diff --git "a/2469.jsonl" "b/2469.jsonl" new file mode 100644--- /dev/null +++ "b/2469.jsonl" @@ -0,0 +1,741 @@ +{"seq_id":"487843806","text":"import matplotlib\nfrom matplotlib import pyplot as plt\nimport pickle as pkl\nimport numpy as np\nplt.rc('text', usetex=True)\nplt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\n##x = pkl.load(open('temp.pkl', 'rb'))\nx = np.linspace(300, 1400, 30)\n##x2 = np.linspace(300, 1400, 10)\n\ny1 = pkl.load(open('vel_0.32.pkl', 'rb'))\ny2 = pkl.load(open('vel_0.34.pkl', 'rb'))\ny3 = pkl.load(open('vel_0.38.pkl', 'rb'))\ny4 = pkl.load(open('vel_0.46.pkl', 'rb'))\ny5 = pkl.load(open('vel_0.62.pkl', 'rb'))\ny1 = [x * 100 for x in y1]\ny2 = [x * 100 for x in y2]\ny3 = [x * 100 for x in y3]\ny4 = [x * 100 for x in y4]\ny5 = [x * 100 for x in y5]\n\ndy=np.diff(y5)\ndx=np.diff(x)\ni=np.diff(y5).argmax()\n##print (i)\n##print ('max dy %f' %dy[i])\n##print ('dx %f' %dx[i])\ngrad=dy/dx\ng=max(grad)\nj=grad.argmax()\n#%%\nfig1, ax1 = plt.subplots()\nax1.loglog(x[j-10:j],y1[j-10:j],'k-o',label='$\\Delta$X=0.32 m')\nax1.loglog(x[j-10:j],y2[j-10:j],'k-v',label='$\\Delta$X=0.34 m')\nax1.loglog(x[j-10:j],y3[j-10:j],'k-d',label='$\\Delta$X=0.38 m')\nax1.loglog(x[j-10:j],y4[j-10:j],'k',label='$\\Delta$X=0.46 m')\nax1.loglog(x[j-10:j],y5[j-10:j],'k-s',label='$\\Delta$X=0.62 m')\nax1.axvline(x[j-8], color='red', lw=0.9, alpha=0.8)\nax1.set_xlim(1000,1500)\nax1.set_ylim(0,1500)\nax1.set_xscale('log')\nax1.set_yscale('log')\nax1.minorticks_on()\n##ax1.set_xticks([300,500, 1000, 1500])\nax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\nax1.set_xlabel('preheating temperature [K]')\nax1.set_ylabel(\"velocity of the incoming mixture $10^{-2}$[$m s^{-1}$]\",fontsize=16)\nplt.title('$\\phi$=1; p = 5bar', loc = \"right\")\nplt.legend(frameon='True', loc='best')\nplt.grid('on')\n# plt.locator_params(axis='x', nbins=4)\nplt.savefig('zoom1.pdf', bbox_inches='tight')\nplt.show()\n\n\n","sub_path":"FIG5.2b/zoom.py","file_name":"zoom.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"478846702","text":"from django.conf.urls import url, include\nfrom django.contrib.auth.models import User\nfrom rest_framework import routers, serializers, viewsets, mixins\nfrom .models import Cat, Feeder, Fed\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.decorators import list_route\nfrom rest_framework.routers import Route, DynamicListRoute, SimpleRouter\n\n# TODO: REDO EVERYTHING WITH A IsOwnerOrReadOnly-like PERMISSION\n\n\nclass CatSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Cat\n\t\tfields = '__all__'\n\n# TODO: Use Permissions here?\nclass CatViewSet(mixins.DestroyModelMixin,\n\t\t\t\t mixins.CreateModelMixin,\n\t\t\t\t viewsets.GenericViewSet):\n\tqueryset = Cat.objects.all()\n\tserializer_class = CatSerializer\n\n\t# Force cat owner to be the POSTer\n\t# Create a Feeder instance for owner as well\n\tdef perform_create(self, serializer):\n\t\tcat = serializer.save(owner=self.request.user)\n\t\tFeeder.objects.create(user=self.request.user, cat=cat)\n\n\tdef list(self, request):\n\t\tqueryset = Cat.objects.filter(owner=request.user.id)\n\t\tserializer = CatSerializer(queryset, many=True)\n\t\treturn Response(serializer.data)\n\n\tdef retrieve(self, request, pk=None):\n\t\tif not pk:\n\t\t\treturn Response() # TODO: return errors here\n\t\tq = Feeder.objects.filter(user=request.user, cat=pk)\n\t\tif not q:\n\t\t\treturn Response()\n\t\tserializer = CatSerializer(q[0].cat)\n\t\treturn Response(serializer.data)\n\n\tdef perform_destroy(self, instance):\n\t\tif self.request.user != instance.owner:\n\t\t\treturn ValidationError(\"Cannot delete a cat you don't own\")\n\t\treturn\n\n\n\t@list_route()\n\tdef data(self, request, pk=None):\n\t\tcat = Cat.objects.get(pk=pk)\n\t\tdata = Fed.objects.filter(cat=cat)\n\t\treturn Response(FedSerializer(data, many=True).data)\n\n# TODO: Also send back the owned, and feeding cats\nclass UserSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('username', 'email')\n\nclass UserViewSet(viewsets.ModelViewSet):\n\tqueryset = User.objects.all() # TODO: Limit to only the auth'd user's info\n\tserializer_class = UserSerializer\n\nclass FeederSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Feeder\n\t\tfields = '__all__'\n\nclass FeederViewSet(mixins.DestroyModelMixin,\n\t\t\t\t\tviewsets.GenericViewSet):\n\tqueryset = Feeder.objects.all() # TODO: NOT do this\n\tserializer_class = FeederSerializer\n\n\tdef list(self, request):\n\t\tqueryset = Feeder.objects.filter(user=request.user)\n\t\tserializer = FeederSerializer(queryset, many=True)\n\t\treturn Response(serializer.data)\n\n\tdef create(self, request):\n\t\tq = Cat.objects.get(pk=int(request.data.get(\"cat\",0)))\n\t\tif not q:\n\t\t\treturn\n\t\tif q.owner != request.user:\n\t\t\treturn Response(\"Cannot assign a feeder for a cat you don't own\")\n\n\t\tif Feeder.objects.filter(cat=q, user=User.objects.get(int(request.data.get(\"user\")))).exists():\n\t\t\treturn Response(\"Feeder assignment already exists\")\n\n\t\tf = Feeder.objects.create(cat=q, user=User.objects.get(int(request.data.get(\"user\"))))\n\t\treturn Response(FeederSerializer(f).data)\n\n\tdef retrieve(self, request, pk=None):\n\t\tif not pk:\n\t\t\treturn Response(\"No pk?\")\n\t\tq = Feeder.objects.get(pk=pk)\n\t\tif not q:\n\t\t\treturn Response(\"404\")\n\t\tif request.user == q.user or request.user == q.cat.owner:\n\t\t\treturn Response(FeederSerializer(q).data)\n\t\treturn Response(\"eh?\")\n\n\tdef perform_destroy(self, instance):\n\t\tif instance.user != self.request.user and instance.cat.owner != self.request.user:\n\t\t\treturn ValidationError(\"You are not the cat owner nor the Feeder in question\")\n\t\tif instance.user == self.request.user and instance.cat.owner == self.request.user:\n\t\t\treturn ValidationError(\"You can't delete the feeder if you are the owner\")\n\n\t\tinstance.delete()\n\n\nclass FedSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Fed\n\t\tfields = '__all__'\n\t\tdepth = 1\n\nclass FedViewSet(mixins.CreateModelMixin,\n\t\t\t\t mixins.DestroyModelMixin,\n\t\t\t\t viewsets.GenericViewSet):\n\n\tqueryset = Fed.objects.none()\n\tserializer_class = FedSerializer\n\n\t# TODO: Make sure the user feeds the cat -> validator?\n\tdef perform_create(self, serializer):\n\t\tserializer.save(by=self.request.user)\n\n\tdef perform_destroy(self, instance):\n\t\t# Must be a feeder for the cat\n\t\tif instance.cat not in Feeder.objects.filter(user=self.request.user):\n\t\t\treturn\n\t\tinstance.delete()\n\t\t\n\n\n# Routers provide an easy way of automatically determining the URL conf.\n'''catrouter = routers.DefaultRouter()\ncatrouter.register(r'cat', CatViewSet)\nuserrouter = routers.DefaultRouter()\nuserrouter.register(r'user', UserViewSet)\n'''\nrouter = routers.DefaultRouter()\n#router.register(r'cat', CatViewSet)\nrouter.register(r'user', UserViewSet)\nrouter.register(r'feeder', FeederViewSet)\nrouter.register(r'fed', FedViewSet)\n\nclass CustomCatRouter(SimpleRouter):\n\troutes = [\n\t\tRoute(\n\t\t\turl=r\"^{prefix}/$\",\n\t\t\tmapping={\"get\":\"list\", \"post\":\"create\"},\n\t\t\tname=\"{basename}-list\",\n\t\t\tinitkwargs={\"suffix\":\"List\"}\n\t\t),\n\t\tRoute(\n\t\t\turl=r\"^{prefix}/{lookup}/$\",\n\t\t\tmapping={\"get\":\"retrieve\", \"delete\":\"destroy\"},\n\t\t\tname=\"{basename}-detail\",\n\t\t\tinitkwargs={\"suffix\":\"Detail\"}\n\t\t),\n\t\tDynamicListRoute(\n\t\t\turl=r\"^{prefix}/{lookup}/{methodnamehyphen}/$\",\n#\t\t\tmapping={\"get\":\"list\", \"post\":\"create\"},\n\t\t\tname=\"{basename}-{methodnamehyphen}\",\n\t\t\tinitkwargs={\"suffix\":\"Data\"}\n\t\t)\n\t]\t\t\t\n\ncatrouter = CustomCatRouter()\ncatrouter.register(r'cat', CatViewSet, base_name='cat')\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n#\turl(r'^cat/', include(catrouter.urls)),\n#\turl(r'^user/', include(userrouter.urls)),\n\turl(r'^', include(router.urls))\n]\nurlpatterns += catrouter.urls\n","sub_path":"feed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"276042272","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Task, ColourTheme\nfrom .forms import TaskForm\n# Create your views here.\n\n\ndef get_tasks(request):\n \"\"\" view for main tasks list \"\"\"\n colour = ColourTheme.objects.all()\n add_task_form = TaskForm()\n\n # task filtering\n filter = 'All'\n if request.GET:\n if 'complete' in request.GET:\n tasks = Task.objects.filter(done=True) or None\n filter = 'Complete'\n elif 'incomplete' in request.GET:\n tasks = Task.objects.filter(done=False) or None\n filter = 'Incomplete'\n elif 'urgent' in request.GET:\n tasks = Task.objects.filter(urgent=True) or None\n filter = 'Urgent'\n else:\n # something's gone wrong, just show all tasks\n tasks = Task.objects.all()\n else:\n tasks = Task.objects.all()\n\n if request.method == 'POST':\n # add a task\n if 'name' in request.POST:\n add_task_form = TaskForm(request.POST)\n if add_task_form.is_valid():\n add_task_form.save()\n return redirect('tasks')\n # edit a task\n elif 'taskId' in request.POST:\n # form with update item request\n task_to_update = request.POST['taskId']\n task = get_object_or_404(Task, id=task_to_update)\n task.name = request.POST['taskNewName']\n task.save()\n # change colour theme\n elif 'colour' in request.POST:\n # handle users choice of colour theme\n colourObj = get_object_or_404(ColourTheme)\n colourObj.colour = request.POST['colour']\n colourObj.save()\n return redirect('tasks')\n\n context = {\n 'tasks': tasks,\n 'colour': colour[0],\n 'filter': filter,\n 'add_task_form': add_task_form,\n }\n return render(request, 'tasks/tasks.html', context)\n\n\ndef toggle_status(request, task_id):\n \"\"\" view to toggle tasks complete status \"\"\"\n task = get_object_or_404(Task, id=task_id)\n task.done = not task.done # invert task status\n task.save()\n return redirect('tasks')\n\n\ndef delete_task(request, task_id):\n \"\"\" view to completely delete task from list \"\"\"\n task = get_object_or_404(Task, id=task_id)\n task.delete()\n return redirect('tasks')\n","sub_path":"tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"245048080","text":"import boto3\nimport logging\nfrom botocore.config import Config\nfrom os import getenv\nfrom datetime import datetime\n\nlogger = logging.getLogger()\nlog_level = getenv(\"LOGLEVEL\", \"INFO\")\nlevel = logging.getLevelName(log_level)\nlogger.setLevel(level)\nvalid_comparators = ['GreaterThanOrEqualToThreshold', 'GreaterThanThreshold', 'LessThanThreshold',\n 'LessThanOrEqualToThreshold']\n\n\ndef boto3_client(resource, assumed_credentials=None):\n config = Config(\n retries=dict(\n max_attempts=40\n )\n )\n if assumed_credentials:\n client = boto3.client(\n resource,\n aws_access_key_id=assumed_credentials['AccessKeyId'],\n aws_secret_access_key=assumed_credentials['SecretAccessKey'],\n aws_session_token=assumed_credentials['SessionToken'],\n config=config\n )\n else:\n client = boto3.client(\n resource,\n config=config\n )\n\n return client\n\n\ndef check_alarm_tag(instance_id, tag_key):\n try:\n ec2_client = boto3_client('ec2')\n # does instance have appropriate alarm tag?\n instance = ec2_client.describe_instances(\n Filters=[\n {\n 'Name': 'tag-key',\n 'Values': [\n tag_key\n ]\n }\n ],\n InstanceIds=[\n instance_id\n ]\n\n )\n # can only be one instance when called by CloudWatch Events\n if 'Reservations' in instance and len(instance['Reservations']) > 0 and len(\n instance['Reservations'][0]['Instances']) > 0:\n ec2_client.create_tags(\n Resources=[\n instance_id\n ],\n Tags=[\n {\n 'Key': tag_key,\n 'Value': str(datetime.utcnow())\n }\n ]\n )\n return instance['Reservations'][0]['Instances'][0]\n else:\n return False\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error('Failure describing instance {} with tag key: {} : {}'.format(instance_id, tag_key, e))\n raise\n\n\ndef process_lambda_alarms(function_name, tags, activation_tag, default_alarms, sns_topic_arn, alarm_separator):\n activation_tag = tags.get(activation_tag, 'not_found')\n if activation_tag == 'not_found':\n logger.debug('Activation tag not found for {}, nothing to do'.format(function_name))\n return True\n else:\n logger.debug('Processing function specific alarms for: {}'.format(default_alarms))\n for tag_key in tags:\n if tag_key.startswith('AutoAlarm'):\n default_alarms['AWS/Lambda'].append({'Key': tag_key, 'Value': tags[tag_key]})\n\n # get the default dimensions for AWS/EC2\n dimensions = list()\n dimensions.append(\n {\n 'Name': 'FunctionName',\n 'Value': function_name\n }\n )\n\n for tag in default_alarms['AWS/Lambda']:\n alarm_properties = tag['Key'].split(alarm_separator)\n Namespace = alarm_properties[1]\n MetricName = alarm_properties[2]\n ComparisonOperator = alarm_properties[3]\n Period = alarm_properties[4]\n Statistic = alarm_properties[5]\n\n AlarmName = 'AutoAlarm-{}-{}-{}-{}-{}-{}'.format(function_name, Namespace, MetricName, ComparisonOperator,\n Period,\n Statistic)\n create_alarm(AlarmName, MetricName, ComparisonOperator, Period, tag['Value'], Statistic, Namespace,\n dimensions, sns_topic_arn)\n\n\ndef create_alarm_from_tag(id, alarm_tag, instance_info, metric_dimensions_map, sns_topic_arn, alarm_separator):\n alarm_properties = alarm_tag['Key'].split(alarm_separator)\n namespace = alarm_properties[1]\n MetricName = alarm_properties[2]\n dimensions = list()\n for dimension_name in metric_dimensions_map.get(namespace, list()):\n dimension = dict()\n\n if dimension_name == 'AutoScalingGroupName':\n # find out if the instance is part of an autoscaling group\n instance_asg = next(\n (tag['Value'] for tag in instance_info['Tags'] if tag['Key'] == 'aws:autoscaling:groupName'), None)\n if instance_asg:\n dimension_value = instance_asg\n dimension['Name'] = dimension_name\n dimension['Value'] = dimension_value\n dimensions.append(dimension)\n else:\n dimension_value = instance_info.get(dimension_name, None)\n if dimension_value:\n dimension['Name'] = dimension_name\n dimension['Value'] = dimension_value\n dimensions.append(dimension)\n else:\n logger.warning(\n \"Dimension {} has been specified in APPEND_DIMENSIONS but no dimension value exists, skipping...\".format(\n dimension_name))\n\n logger.debug(\"dimensions are {}\".format(dimensions))\n\n additional_dimensions = list()\n\n for index, prop in enumerate(alarm_properties[3:], start=3):\n if prop in valid_comparators:\n prop_end_index = index\n break\n else:\n prop_end_index = None\n\n if prop_end_index:\n additional_dimensions.extend(alarm_properties[3:prop_end_index])\n else:\n logger.error('Unable to determine the dimensions for alarm tag: {}'.format(alarm_tag))\n raise Exception\n\n InstanceName = next((tag['Value'] for tag in instance_info['Tags'] if tag['Key'] == 'Name'), '')\n\n AlarmName = 'AutoAlarm-{}-{}-{}-{}'.format(InstanceName, id, namespace, MetricName)\n properties_offset = 0\n if additional_dimensions:\n for num, dim in enumerate(additional_dimensions[::2]):\n val = additional_dimensions[num * 2 + 1]\n dimensions.append(\n {\n 'Name': dim,\n 'Value': val\n }\n )\n AlarmName = AlarmName + '-{}-{}'.format(dim, val)\n properties_offset = properties_offset + 2\n\n ComparisonOperator = alarm_properties[(properties_offset + 3)]\n Period = alarm_properties[(properties_offset + 4)]\n Statistic = alarm_properties[(properties_offset + 5)]\n\n AlarmName = AlarmName + '-{}-{}-{}'.format(ComparisonOperator, Period, Statistic)\n\n create_alarm(AlarmName, MetricName, ComparisonOperator, Period, alarm_tag['Value'], Statistic, namespace,\n dimensions, sns_topic_arn)\n\n\ndef process_alarm_tags(instance_id, instance_info, default_alarms, metric_dimensions_map, sns_topic_arn, cw_namespace,\n create_default_alarms_flag, alarm_separator):\n tags = instance_info['Tags']\n\n ImageId = instance_info['ImageId']\n logger.info('ImageId is: {}'.format(ImageId))\n platform = determine_platform(ImageId)\n\n logger.info('Platform is: {}'.format(platform))\n custom_alarms = dict()\n # get all alarm tags from instance and add them into a custom tag list\n for instance_tag in tags:\n if instance_tag['Key'].startswith('AutoAlarm'):\n create_alarm_from_tag(instance_id, instance_tag, instance_info, metric_dimensions_map, sns_topic_arn, alarm_separator)\n\n if create_default_alarms_flag == 'true':\n for alarm_tag in default_alarms['AWS/EC2']:\n create_alarm_from_tag(instance_id, alarm_tag, instance_info, metric_dimensions_map, sns_topic_arn, alarm_separator)\n\n for alarm_tag in default_alarms[cw_namespace][platform]:\n create_alarm_from_tag(instance_id, alarm_tag, instance_info, metric_dimensions_map, sns_topic_arn, alarm_separator)\n else:\n logger.info(\"Default alarm creation is turned off\")\n\n\ndef determine_platform(imageid):\n try:\n ec2_client = boto3_client('ec2')\n # does instance have appropriate alarm tag?\n image_info = ec2_client.describe_images(\n ImageIds=[\n imageid\n ]\n\n )\n\n # can only be one instance when called by CloudWatch Events\n if 'Images' in image_info and len(image_info['Images']) > 0:\n platform_details = image_info['Images'][0]['PlatformDetails']\n logger.debug('Platform details of image: {}'.format(platform_details))\n if 'Windows' in platform_details or 'SQL Server' in platform_details:\n return 'Windows'\n elif 'Red Hat' in platform_details:\n return 'Red Hat'\n elif 'SUSE' in platform_details:\n return 'SUSE'\n elif 'Linux/UNIX' in platform_details:\n if 'ubuntu' in image_info['Images'][0]['Description'].lower() or 'ubuntu' in image_info['Images'][0][\n 'Name'].lower():\n return 'Ubuntu'\n else:\n return 'Amazon Linux'\n else:\n return None\n else:\n return None\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error('Failure describing image {}: {}'.format(imageid, e))\n raise\n\n\ndef convert_to_seconds(s):\n try:\n seconds_per_unit = {\"s\": 1, \"m\": 60, \"h\": 3600, \"d\": 86400, \"w\": 604800}\n return int(s[:-1]) * seconds_per_unit[s[-1]]\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error('Error converting threshold string {} to seconds!'.format(s, e))\n raise\n\n\n# Alarm Name Format: AutoAlarm------\n# Example: AutoAlarm-i-00e4f327736cb077f-CPUUtilization-GreaterThanThreshold-80-5m\ndef create_alarm(AlarmName, MetricName, ComparisonOperator, Period, Threshold, Statistic, Namespace, Dimensions,\n sns_topic_arn):\n AlarmDescription = 'Alarm created by lambda function CloudWatchAutoAlarms'\n\n try:\n Period = convert_to_seconds(Period)\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error(\n 'Error converting Period specified {} to seconds for Alarm {}!: {}'.format(Period, AlarmName, e))\n\n Threshold = float(Threshold)\n try:\n cw_client = boto3_client('cloudwatch')\n\n alarm = {\n 'AlarmName': AlarmName,\n 'AlarmDescription': AlarmDescription,\n 'MetricName': MetricName,\n 'Namespace': Namespace,\n 'Dimensions': Dimensions,\n 'Period': Period,\n 'EvaluationPeriods': 15,\n 'DatapointsToAlarm': 10,\n 'Threshold': Threshold,\n 'ComparisonOperator': ComparisonOperator,\n 'Statistic': Statistic\n }\n\n if sns_topic_arn is not None:\n alarm['AlarmActions'] = [sns_topic_arn]\n\n cw_client.put_metric_alarm(**alarm)\n\n logger.info('Created alarm {}'.format(AlarmName))\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error(\n 'Error creating alarm {}!: {}'.format(AlarmName, e))\n\n\ndef delete_alarms(name):\n try:\n AlarmNamePrefix = \"AutoAlarm-{}\".format(name)\n cw_client = boto3_client('cloudwatch')\n logger.info('calling describe alarms with prefix {}'.format(AlarmNamePrefix))\n response = cw_client.describe_alarms(\n AlarmNamePrefix=AlarmNamePrefix,\n )\n alarm_list = []\n logger.info('Response from describe_alarms(): {}'.format(response))\n if 'MetricAlarms' in response:\n for alarm in response['MetricAlarms']:\n alarm_name = alarm['AlarmName']\n alarm_list.append(alarm_name)\n logger.info('deleting {} for {}'.format(alarm_list, name))\n response = cw_client.delete_alarms(\n AlarmNames=alarm_list\n )\n return True\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error(\n 'Error deleting alarms for {}!: {}'.format(name, e))\n\ndef scan_and_process_alarm_tags(create_alarm_tag, default_alarms, metric_dimensions_map, sns_topic_arn,\n cw_namespace, create_default_alarms_flag, alarm_separator):\n try:\n ec2_client = boto3_client('ec2')\n for reservation in ec2_client.describe_instances()[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n # Look for running instances only\n if instance[\"State\"][\"Code\"]>16:\n continue\n if check_alarm_tag(instance[\"InstanceId\"], create_alarm_tag):\n process_alarm_tags(instance[\"InstanceId\"], instance, default_alarms, metric_dimensions_map,\n sns_topic_arn, cw_namespace, create_default_alarms_flag, alarm_separator)\n\n except Exception as e:\n # If any other exceptions which we didn't expect are raised\n # then fail and log the exception message.\n logger.error('Failure describing reservations : {}'.format(e))\n raise\n","sub_path":"src/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":13716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"228156196","text":"import requests\nimport pandas as pd\nfrom collections import OrderedDict\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nurl = 'https://query.wikidata.org/sparql'\n\nquery = \"\"\"\nPREFIX psv: \n\nSELECT DISTINCT ?itemLabel (AVG(?item_lat) AS ?lat) (AVG(?item_long) AS ?long) (AVG(?Einwohnerzahl) AS ?pop) WHERE {\n ?item p:P31 ?statement.\n ?statement ps:P31 wd:Q667509.\n filter not exists {?statement pq:P582 [].}\n ?item rdfs:label ?itemLabel.\n ?item (p:P625/psv:P625) ?item_node.\n ?item_node wikibase:geoLatitude ?item_lat.\n ?item_node wikibase:geoLongitude ?item_long.\n ?item wdt:P1082 ?Einwohnerzahl.\n FILTER((LANG(?itemLabel)) = \"de\")\n}\nGROUP BY ?itemLabel\nORDER BY ?itemLabel\n\"\"\"\n\nbGetDataWikidata=1\nif bGetDataWikidata:\n r = requests.get(url, params = {'format': 'json', 'query': query})\n r.raise_for_status()\n data = r.json()\n gemeinden = []\n for item in data['results']['bindings']:\n gemeinden.append(OrderedDict({\n 'Name': item['itemLabel']['value'],\n 'lat' : item['lat']['value'],\n 'long' : item['long']['value'],\n 'pop' : item['pop']['value']\n }))\n\n\n df = pd.DataFrame(gemeinden)\n df.set_index('Name', inplace=True)\n df = df.astype({'pop': float , 'lat' : float, 'long' : float })\n\n df.to_csv('data.csv')\n\ndf = pd.read_csv('data.csv')\nbPlot=1\nbNames=0\nbLegend=0\nif bPlot:\n\n\n fig = plt.figure(figsize=(22, 12))\n\n ax = fig.add_subplot(111)\n plt.axis('off')\n ax.scatter(x=df['long'].values,y=df['lat'].values,s=df['pop'].values/100,alpha=0.5,linewidths=0)\n if bLegend:\n x_legend=[11,11,11,11,11]\n y_legend=[48.5,48.15,48,47.9,47.8]\n s_legend=[10000,1000,100,10,1]\n s_slegend=[]\n for entry in s_legend:\n s_slegend.append(str(entry*100))\n ax.scatter(x=x_legend,y=y_legend,s=s_legend,alpha=0.5,linewidths=0)\n for label, x, y in zip(s_slegend, x_legend, y_legend):\n plt.annotate(\n label,fontsize=1,\n xy=(x, y), xytext=(50, 0),size=15,\n textcoords='offset points',arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))\n if bNames:\n for label, x, y in zip(df['Name'].values, df['long'].values, df['lat'].values):\n plt.annotate(\n label,fontsize=1,\n xy=(x, y), xytext=(0, 0),\n textcoords='offset points')\n fig.savefig(\"map.pdf\")\n\n#plt.show()\n","sub_path":"pop_map_austria.py","file_name":"pop_map_austria.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"95878377","text":"import re\nimport sys\nimport struct\nimport yaml\nfrom pprint import pprint\nfrom time import strftime, gmtime\nimport calendar\n\n\nepoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))\n\nbasetypes = {\n \"USHORT\": \">H\",\n \"GlyphID\": \">H\",\n \"NameID\": \">H\",\n \"Offset32\": \">L\",\n \"Offset16\": \">H\",\n \"VERSION\": (\">HH\", lambda x: x[0] + x[1] / (10 ** len(str(x[1])))),\n \"F2DOT14\": (\">H\", lambda x: x[0] / (1 << 14)),\n \"FIXED\": (\">l\", lambda x: x[0] / (1 << 16)),\n \"Tag\": (\">cccc\", lambda x: \"\".join([g.decode() for g in x])),\n \"LONGDATETIME\": (\n \">Q\",\n lambda x: strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime(x[0] + epoch_diff)),\n ),\n \"LONG\": \">l\",\n \"ULONG\": \">L\",\n \"FWORD\": \">h\",\n \"SHORT\": \">h\",\n}\n\ntrickyFields = {\n (\"hmtxTable\", \"hMetrics\"): lambda f, d, p, o: (0, p),\n (\"hmtxTable\", \"leftSideBearings\"): lambda f, d, p, o: (0, p),\n (\"postTableVersion20\", \"names\"): lambda f, d, p, o: ([], p),\n (\"prepTable\", \"values\"): lambda f, d, p, o: ([], p),\n}\n\n\ndef consume(fmt, data, pos):\n if isinstance(fmt, str):\n output = struct.unpack_from(fmt, data, offset=pos)\n pos += struct.calcsize(fmt)\n return output[0], pos\n if isinstance(fmt, tuple):\n output = struct.unpack_from(fmt[0], data, offset=pos)\n pos += struct.calcsize(fmt[0])\n return fmt[1](output), pos\n\n\nwith open(\"commontype.yaml\") as yaml_file:\n commontype = yaml.load(yaml_file, Loader=yaml.FullLoader)\n\ndata = sys.stdin.buffer.read()\n\ndef checkCondition(condition, table):\n m = re.match(r'(\\w+)\\s*([<>=]+)\\s*(.*)', condition)\n if m:\n # Super lazy\n return eval(f'table[\"{m[1]}\"] {m[2]} {m[3]}')\n import code; code.interact(local=locals())\n\ndef readAField(field, data, pos, tableSoFar):\n fType = field[\"type\"]\n if \"condition\" in field and not checkCondition(field[\"condition\"], tableSoFar):\n return None, pos\n if fType in basetypes:\n fmt = basetypes[fType]\n output, pos = consume(fmt, data, pos)\n if \"to\" in field: # It's an offset\n if output:\n # Generally read from start of table...\n output, _ = readATable(field[\"to\"], data, output + tableSoFar[\"_pos\"])\n else:\n output = {}\n elif fType in commontype: # Bare field singly embedded\n output, pos = readATable(fType, data, pos)\n elif fType.endswith(\"[]\"):\n fType = fType[:-2]\n if not \"count\" in field:\n print(field[\"name\"] + \" is tricky but is not in my list of tricky fields\")\n sys.exit(1)\n counter = field[\"count\"]\n output = []\n for i in range(tableSoFar[counter]):\n if fType in commontype:\n rv, pos = readATable(fType, data, pos)\n else:\n rv, pos = readAField({\"type\": fType}, data, pos, tableSoFar)\n output.append(rv)\n else:\n print(\"Unknown type %s\" % fType)\n sys.exit(1)\n return output, pos\n\n\ndef readATable(table, data, pos=0):\n # Format switching header\n if \"union\" in commontype[table]:\n origtable = table\n table = None\n for t in commontype[origtable][\"union\"]:\n firstField = commontype[t][\"fields\"][0]\n assert \"value\" in firstField\n expected = firstField[\"value\"]\n got, _ = readAField(firstField, data, pos, {})\n if expected == got:\n table = t\n break\n if not table:\n print(\"No table format matched for %s at position %i\" % (origtable, pos))\n sys.exit(1)\n\n structure = commontype[table][\"fields\"]\n output = {\"_pos\": pos}\n doOffsets = []\n for field in structure:\n if (table, field[\"name\"]) in trickyFields:\n output[field[\"name\"]], pos = trickyFields[(table, field[\"name\"])](\n field, data, pos, output\n )\n else:\n output[field[\"name\"]], pos = readAField(field, data, pos, output)\n return output, pos\n\n\nif len(sys.argv) > 1:\n table = sys.argv[1]\n if table not in commontype:\n print(\"Table %s not found\" % table)\n print(sorted(commontype.keys()))\n sys.exit(1)\n out, _ = readATable(table, data)\n pprint(out)\nelse:\n fdir, pos = readATable(\"TableDirectory\", data)\n output = {}\n entries = [(x, x[\"tableTag\"] + \"Table\") for x in fdir[\"entries\"]]\n entries = filter(lambda x: x[1] in commontype, entries)\n for entry, tablename in entries:\n output[entry[\"tableTag\"]], _ = readATable(tablename, data, pos=entry[\"offset\"])\n pprint(output)\n","sub_path":"read-table.py","file_name":"read-table.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"213946094","text":"from utils import read_bson, make_category_tables, make_test_set, make_val_set\n\nimport os\nimport pandas as pd\nimport keras\nimport tensorflow as tf\n\nfrom tqdm import *\n# Any results you write to the current directory are saved as output.\n\nprint(\"Keras and tf version :{} - {}\".format(keras.__version__, tf.__version__))\n\n# used to determine which dataset we work on.\n# ringo is the sample from the website\n# jim is an artificial dataset created for the sole purpose of testing efficiency\n# cass just considers the first 10000 lines of train and test\n# janis is the real dataset\ndataset_code = \"ringo\"\n\ndict_train_test_names = {\"ringo\": (\"train_example.bson\", \"train_example.bson\", 82, 82),\n \"cass\": (\"train.bson\", \"test.bson\", 1e4, 1e4),\n \"janis\": (\"train.bson\", \"test.bson\", 7069896, 1768172)}\ncsv_dir = \"../csv/\"\nbson_dir = \"../bson/\"\ntrain_name, test_name = dict_train_test_names[dataset_code][:2]\n\n\nclass Reader():\n def __init__(self, csv_dir=csv_dir, bson_dir=bson_dir, dataset_code=dataset_code, full_prep=False):\n self.csv_dir = csv_dir\n self.bson_dir = bson_dir\n self.dataset_code = dataset_code\n self.train_name, self.test_name = dict_train_test_names[dataset_code][:2]\n self.train_bson_path = os.path.join(self.bson_dir, self.train_name)\n self.test_bson_path = os.path.join(self.bson_dir, self.test_name)\n self.num_train_products = dict_train_test_names[dataset_code][2]\n self.num_test_products = dict_train_test_names[dataset_code][3]\n self.categories_path = os.path.join(csv_dir, \"category_names.csv\")\n self.categories_df = pd.read_csv(self.categories_path, index_col=\"category_id\")\n self.categories_df[\"category_idx\"] = pd.Series(range(len(self.categories_df)), index=self.categories_df.index)\n self.cat2idx, self.idx2cat = make_category_tables(self.categories_df)\n self.never_read = True\n\n if full_prep:\n self.full_prep()\n\n def read_bson(self):\n if self.never_read:\n self.never_read = False\n self.train_offsets_df = read_bson(self.train_bson_path, num_records=self.num_train_products, with_categories=True)\n self.test_offsets_df = read_bson(self.test_bson_path, num_records=self.num_test_products, with_categories=False)\n self.train_offsets_df.to_csv(os.path.join(self.csv_dir, self.dataset_code, \"train_offsets.csv\"))\n self.test_offsets_df.to_csv(os.path.join(self.csv_dir, self.dataset_code, \"test_offsets.csv\"))\n print(\"Total number of images in train {}\".format(self.train_offsets_df[\"num_imgs\"].sum()))\n\n def make_val_set(self, split_percentage=0.2, drop_percentage=0.):\n if self.never_read:\n self.read_bson()\n\n train_path = os.path.join(self.csv_dir, self.dataset_code)\n\n if \"train_images.csv\" not in os.listdir(train_path):\n print(\"Calling the make_val_set function\")\n self.train_images_df, self.val_images_df = make_val_set(self.train_offsets_df,\n self.cat2idx, split_percentage=split_percentage,\n drop_percentage=drop_percentage)\n self.train_images_df.to_csv(os.path.join(self.csv_dir, self.dataset_code, \"train_images.csv\"))\n self.val_images_df.to_csv(os.path.join(self.csv_dir, self.dataset_code, \"val_images.csv\"))\n\n else:\n print(\"Found the train_images_df.csv file\")\n self.train_images_df = pd.read_csv(os.path.join(csv_dir, dataset_code, \"train_images.csv\"), index_col=0)\n self.val_images_df = pd.read_csv(os.path.join(csv_dir, dataset_code, \"val_images.csv\"), index_col=0)\n\n def make_test_set(self):\n print(\"Doing make test set\")\n test_path = os.path.join(self.csv_dir, self.dataset_code)\n\n if not (\"test_images.csv\" in os.listdir(test_path)):\n print(\"Calling the make_test_set function\")\n self.test_images_df = make_test_set(test_offsets_df)\n self.test_images_df.to_csv(os.path.join(test_path, \"test_images.csv\"))\n print(\"Number of test images:\", len(self.test_images_df))\n\n else:\n print(\"Found the test_images_df.csv file\")\n self.test_images_df = pd.read_csv(os.path.join(csv_dir, dataset_code, \"test_images.csv\"), index_col=0)\n\n def full_prep(self):\n self.read_bson()\n self.make_val_set()\n self.make_test_set()\nr = Reader(full_prep=True)\n# r.read_bson()\n# r.make_val_set()\n# r.make_test_set()\n# r.full_prep()\n","sub_path":"sources/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"76680151","text":"# Credits to: CodingBat\n# Site Link: https://codingbat.com/\n\n#############\n## PROBLEM ##\n#############\n\n\"\"\"\nReturn the \"centered\" average of an array of ints, \nwhich we'll say is the mean average of the values, \nexcept ignoring the largest and smallest values in \nthe array. If there are multiple copies of the smallest \nvalue, ignore just one copy, and likewise for the largest \nvalue. Use int division to produce the final average. \nYou may assume that the array is length 3 or more.\n\ncentered_average([1, 2, 3, 4, 100]) → 3\ncentered_average([1, 1, 5, 5, 10, 8, 7]) → 5\ncentered_average([-10, -4, -2, -4, -2, 0]) → -3\n\"\"\"\n\n########################\n## SOLUTION BY: KEVIN ##\n########################\n\n# First Solution\ndef centered_average(nums):\n\n\tsum = 0\n\tmax_value = max(nums)\n\tmin_value = min(nums)\n\n\tfor num in nums:\t\n\t\tsum += num\n\n\treturn (sum - max_value - min_value)/(len(nums) - 2)\n\n# Or\n\n# Second Solution\ndef centered_average(nums):\n\n\tsum_num = sum(nums)\n\tmaxv = max(nums)\n\tminv = min(nums)\n\n\treturn (sum_num - maxv - minv)/(len(nums)-2)\n","sub_path":"List-2/centered_average.py","file_name":"centered_average.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"51370977","text":"import numpy as np\nimport pandas as pd\n\nfrom keras.layers import Dense\nfrom keras.models import Sequential, Model\n\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef AE_fitting(x_train, reduced_dimensionss):\n\n x_train_new = x_train.reshape(x_train.shape[0] * x_train.shape[1], x_train.shape[2])\n\n model = Sequential()\n model.add(Dense(units=reduced_dimensionss + 30, activation='tanh', name='en1', input_shape=[x_train.shape[2]]))\n model.add(Dense(units=reduced_dimensionss, activation='tanh', name='en2'))\n model.add(Dense(units=reduced_dimensionss + 30, activation='tanh', name='de1'))\n model.add(Dense(units=x_train.shape[2], name='de2'))\n\n model.summary()\n\n # extract compressed feature\n model.compile(optimizer='adam', loss='mae')\n\n model.fit(x_train_new, x_train_new, batch_size=512, epochs=100)\n AE_model = Model(inputs=model.input, outputs=model.get_layer(name='en2').output)\n\n return AE_model\n\ndef AE_predict(x, AE_model):\n\n x_new = x.reshape(x.shape[0] * x.shape[1], x.shape[2])\n\n compressed_x_new = AE_model.predict(x_new)\n compressed_x = compressed_x_new.reshape(x.shape[0], x.shape[1], -1)\n print('feature shape=', compressed_x.shape)\n\n return compressed_x\n\n\n\nif __name__ == \"__main__\":\n x = np.random.randn(20000000)\n x = x.reshape(5000, 20, 200)\n y = np.random.randint(3, size=5000)\n\n x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.25, stratify=y)\n\n PCA_model = AE_fitting(x_train, 75)\n x_valid = AE_predict(x_valid, PCA_model)\n print(x_valid.shape)\n","sub_path":"Hyperopt_LightGBM/Autoencoder_RNN.py","file_name":"Autoencoder_RNN.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"374489200","text":"# class Query_info:\n\n# def __init__(self,query,index,num_question):\n# self.prev = None\n# self.query = query\n# self.index = [index]\n# self.num_question = num_question\n# self.next = None\n# self.other = None\n# class Query_list:\n\n# def __init__(self,length):\n# self.root = None\n# self.length = length\n# def input_query(self,query,index,num_question):\n# self.root = self._input_query(self.root,query,index,num_question)\n\n# def _input_query(self, query_info, query, index, num_question):\n# if query_info == None:\n# query_info = Query_info(query,index, num_question)\n# else:\n# if query_info.query == query:\n# query_info.index.append(index)\n# elif num_question < query_info.num_question:\n# for_ran = range(self.length) if query[0] == '?' else range(self.length+1,-1,-1)\n# for i in for_ran:\n# if query_info.query[i] == '?' and query[i].isalpha():\n# query_info.next.append(self._input_query(query))\n\n\n# def solution(words,queries):\n# words = set(words)\n# queries_length = len(queries)\n# answer = [0]*queries_length\n\n# dic_query = {}\n# for i in range(len(queries)):\n# query = queries[i]\n# n = len(query)\n# if n in dic_query.keys():\n# append = True\n# for j in range(len(dic_query[n])):\n# if dic_query[n][j][0] == query:\n# dic_query[n][j].append(i)\n# append=False\n# break\n# if append == True:\n# dic_query[n].append([query,i])\n# else:\n# dic_query[n] = [[query,i]]\n\n# for word in words:\n# len_word = len(word)\n \n# if len_word in dic_query.keys(): \n# for query_info in dic_query[len_word]:\n# for_range = range(len_word-1,-1,-1) if query_info[0][0] == '?' else range(len_word)\n# check_append = True\n\n# for i in for_range:\n# if query_info[0][i] != '?' and word[i] != query_info[0][i]:\n# check_append = False\n# break\n# elif query_info[0][i] == '?':\n# break\n\n# if check_append == True:\n# for ind in query_info[1:]:\n# answer[ind] += 1\n\n# return answer\n\n\n\n\nclass Trie:\n \n def __init__(self,length):\n self.root = {}\n self.root['normal'] = {'count':0}\n self.root['reverse'] = {'count':0}\n self.length = length\n\n def insert(self, s):\n n_cur = self.root['normal']\n r_cur = self.root['reverse']\n r_s = s[::-1]\n\n while s:\n n_cur['count'] += 1\n r_cur['count'] += 1\n if s[0] not in n_cur: n_cur[s[0]] = {'count':0}\n if r_s[0] not in r_cur: r_cur[r_s[0]] = {'count':0}\n n_cur = n_cur[s[0]]\n r_cur = r_cur[r_s[0]]\n s = s[1:]\n r_s = r_s[1:]\n\n def count(self, s):\n if s == '?'*self.length:\n return self.root['reverse']['count']\n elif s[0] == '?':\n cur = self.root['reverse']\n s = s[::-1]\n else:\n cur = self.root['normal']\n while s:\n if s[0] == '?': return cur['count']\n elif s[0] not in cur : return 0\n else:\n cur = cur[s[0]]\n s=s[1:]\n return cur['count']\n \n\ndef solution(words,queries):\n len_queries = len(queries)\n answer = [0]*len_queries\n trie = {}\n for word in words:\n length = len(word)\n try:\n trie[length].insert(word)\n except:\n trie[length] = Trie(length)\n trie[length].insert(word)\n \n for i in range(len_queries):\n query = queries[i]\n try:\n answer[i] = trie[len(query)].count(query)\n except:\n None \n return answer\n\n\nwords = [\"frodo\", \"front\", \"frost\", \"frozen\", \"frame\", \"kakao\",\"forst\"]\nqueries = [\"fro??\", \"????o\", \"fr???\", \"fro???\", \"pro?\"]\nprint(solution(words,queries))\n\n","sub_path":"KYC/algorithm/TEST/programmersSearchSong.py","file_name":"programmersSearchSong.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"242951021","text":"# -*- coding:utf-8 -*-\n\n\ndef foo(num, path, target, n):\n global res\n if target < 0 or len(path) > n:\n return\n if len(path) == n and target != 0:\n return\n if target == 0 and len(path) == n:\n res.append(path[:])\n return\n else:\n for i in range(num+1, 10):\n path.append(i)\n foo(i, path, target-i, n)\n path.remove(i)\n\n\nif __name__ == '__main__':\n res = []\n foo(0, [], 15, 3)\n print(res)\n","sub_path":"work/qiuzhaobishi/2019/tuniu.py","file_name":"tuniu.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"143273550","text":"# Version 1.0.0 (some previous versions are used in past commits)\nimport tensorflow as tf\nimport shutil\nimport os\n\nimport traceback\nfrom s_console_prompt import prompt_yellow, prompt_blue, prompt_green, prompt_red\nfrom s_console_prompt import ConsoleColor\nfrom s_graph import inspect_graph\n\nsave_ses_enabled = False\n\nclass SessModelSaver(object):\n def __init__(self, sess, step, inputs=9):\n self.sess = sess\n self.step = step\n self.inputs = inputs\n self.model_dir = \"model_save_{}_sess\".format(self.inputs)\n\n def get_model_dir(self):\n return self.model_dir \n\n def save_train_board_ses_pb(self):\n try:\n tf.train.write_graph(\n self.sess.graph_def, '', './{}/model_save-{}.pb'.format(self.model_dir, self.step), as_text=False)\n except Exception as ex:\n prompt_red(\"**model_ses {} failed to saved {}\".format(self.step, ex))\n with ConsoleColor(ConsoleColor.RED):\n traceback.print_exc()\n\n def save(self):\n if not save_ses_enabled:\n return False\n\n info = inspect_graph(\"saved_model_ses\")\n model_dir = \"./\" + self.model_dir\n if not os.path.isdir(model_dir):\n os.mkdir(model_dir)\n try:\n saver = tf.train.Saver()\n saver.save(self.sess, './{}/model_save'.format(self.model_dir),\n global_step=self.step, write_meta_graph=True)\n\n self.save_train_board_ses_pb()\n prompt_green(\n \"**model_ses {} saved, graph_info: {}\".format(self.step, info))\n return True\n except Exception as ex:\n prompt_red(\"**model_ses {} failed to saved {}\".format(self.step, ex))\n with ConsoleColor(ConsoleColor.RED):\n traceback.print_exc()\n return False\n","sub_path":"s_save_model.py","file_name":"s_save_model.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"480554652","text":"# Given a string. Character “h” is included as minimal two times.\n# Delete from given string first and last h - characters and all characters between them.\n\n\ndef delete_fragment(string, char):\n first = string.find(char)\n last = string.rfind(char)\n # index = 0\n # while index < len(string):\n # if string[index] == char:\n # first = index\n # break\n # index += 1\n # else:\n # first = -1\n # index = len(string) - 1\n # while index >= 0:\n # if string[index] == char:\n # last = index\n # break\n # index -= 1\n return string[:first] + string[last + 1:]\n\nprint(delete_fragment('qwer(3)tyhuhasdfghzxcvr(19)bnhm', 'r'))\n","sub_path":"lesson_5/Delete_fragment.py","file_name":"Delete_fragment.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"27999930","text":"\"\"\"https://projecteuler.net/problem=1\"\"\"\n\n\ndef solve(count: int = 1000) -> int:\n \"\"\"Solves problem 1.\n\n Computes the sum of all multiples of 3 or 5 less than the count.\n\n Arguments:\n count {int} -- The number of integers to count. Default is 1000.\n\n Raises:\n ValueError: Raised if the count is less than or equal to zero.\n\n Returns:\n int -- The computed sum.\n \"\"\"\n if count <= 0:\n raise ValueError(\"The count must be a natural number greater than 0.\")\n result = 0\n for i in range(1, count):\n if i % 3 == 0 or i % 5 == 0:\n result += i\n return result\n","sub_path":"Python/project_euler/problem_01.py","file_name":"problem_01.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"386076267","text":"import requests\nimport json\nimport re\ndef locatePoint(location):\n #input location , return lng lat\n try:\n url = \"http://api.map.baidu.com/geocoder/v2/?address={0}&ret_coordtype=gcj02ll&city=南京市&output=json&ak=WkaSDu7nrqKQBIeOFyKeUT6wQpLGQ5te\".format(location)\n # print(url)\n r = requests.get(url)\n lng = json.loads(r.text)['result']['location']['lng']\n lat = json.loads(r.text)['result']['location']['lat']\n # print(location+\"的经纬度\")\n # print(lng)\n # print(lat)\n except Exception as err:\n print(err)\n lng = -1\n lat = -1\n return lng, lat\n\ndef getCarTime(origin_lng, origin_lat, destination_lng, destination_lat, tactics=\"13\", vehicle=\"driving\"):\n try:\n #http://api.map.baidu.com/routematrix/v2/driving?output=json&\n # origins=40.45,116.34|40.54,116.35&destinations=40.34,116.45|40.35,116.46&ak=您的ak\n url = \"http://api.map.baidu.com/routematrix/v2/{0}?tactics={1}&coord_type=gcj02&output=json&origins={2},{3}&destinations={4},{5}&ak=WkaSDu7nrqKQBIeOFyKeUT6wQpLGQ5te\".format(vehicle, tactics, origin_lat, origin_lng, destination_lat, destination_lng)\n # print(url)\n r = requests.get(url)\n # print(r.text)\n time = json.loads(r.text)['result'][0]['duration']['text']\n # print(\"获取到的time 的 str : \"+time)\n # 单位 小时 or 分钟\n nuit = ''.join(re.findall('[\\u4e00-\\u9fa5]+', time))\n # float类型 数据\n timeNumber = float(time[0:(len(time)-2)])\n if nuit == '小时':\n minutes = timeNumber * 60\n else:\n minutes = timeNumber\n # print(int(minutes))\n # print(type(minutes))\n except Exception as err:\n minutes = -1\n print(err)\n return minutes\n\nminutes1=getCarTime(locatePoint(\"江苏省南京市江宁区南京航空航天大学\")[0], locatePoint(\"江苏省南京市江宁区南京航空航天大学\")[1], locatePoint(\"东南大学\")[0], locatePoint(\"东南大学\")[1])\nminutes2=getCarTime(locatePoint(\"江苏省南京市江宁区南京航空航天大学\")[0], locatePoint(\"江苏省南京市江宁区南京航空航天大学\")[1], locatePoint(\"江苏省南京市浦口区南京信息工程大学\")[0], locatePoint(\"江苏省南京市浦口区南京信息工程大学\")[1])\nprint(minutes1)\nprint(minutes2)\n","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"649678220","text":"\nimport logging\nfrom datetime import datetime\nfrom typing import Optional\n\n\ndef setup(\n level_stream: Optional[int] = logging.DEBUG,\n level_file: Optional[int] = logging.DEBUG,\n filename: Optional[str] = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + '.log',\n fmt: Optional[str] = '[%(levelname)s: %(name)s: %(asctime)s] \\n%(message)s',\n datefmt: Optional[str] = '%H:%M:%S',\n encoding: Optional[str] = 'utf-8') -> logging.Logger:\n \"\"\"\n Examples:\n # 呼び出し先\n import logger\n logger = logger.setup()\n\n # 呼び出し元\n import logging\n logger = logging.getLoger(__name__)\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.setLevel(level=logging.DEBUG)\n\n ch = logging.StreamHandler()\n ch.setLevel(level=level_stream)\n ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n logger.addHandler(ch)\n\n fh = logging.FileHandler(filename=filename, encoding=encoding)\n fh.setLevel(level_file)\n fh.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt))\n logger.addHandler(fh)\n\n return logger\n\n\nif __name__ == '__main__':\n logger = setup(level_stream=logging.WARNING)\n logger.debug('debug test')\n logger.info('info test')\n logger.warning('warning test')\n logger.error('error test')\n\n","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"649533256","text":"# Copyright (C) 2017 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Tests export of Audit and mapped objects.\"\"\"\nfrom collections import defaultdict\n\nfrom ggrc import utils\nfrom ggrc.snapshotter.rules import Types\n\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.models import factories\nfrom integration.ggrc.models.factories import get_model_factory\n\n\nclass TestAuditExport(TestCase):\n \"\"\"Test export of Audit\"\"\"\n def setUp(self):\n super(TestAuditExport, self).setUp()\n self.client.get(\"/login\")\n\n def test_export_audit_mappings(self):\n \"\"\"Test export of audit mapped objects\"\"\"\n snap_objects = []\n mapped_slugs = defaultdict(list)\n with factories.single_commit():\n audit = factories.AuditFactory(slug=\"Audit\")\n # Create a group of mapped objects for current audit\n for _ in range(3):\n # All snapshotable objects should be mapped to Audit + Issue\n # and Assessment\n for type_ in Types.all.union(Types.scoped):\n if type_ == \"Issue\":\n obj = get_model_factory(type_)()\n factories.RelationshipFactory(source=audit, destination=obj)\n elif type_ in Types.scoped:\n obj = get_model_factory(type_)(audit=audit)\n factories.RelationshipFactory(source=audit, destination=obj)\n else:\n obj = get_model_factory(type_)()\n mapped_slugs[type_].append(obj.slug)\n snap_objects.append(obj)\n\n self._create_snapshots(audit, snap_objects)\n\n audit_data = self.export_parsed_csv([{\n \"object_name\": \"Audit\",\n \"filters\": {\n \"expression\": {}\n },\n \"fields\": \"all\",\n }])[\"Audit\"][0]\n\n for type_, slugs in mapped_slugs.items():\n if type_ in Types.all:\n format_ = \"map:{} versions\"\n else:\n format_ = \"map:{}\"\n mapping_name = format_.format(utils.title_from_camelcase(type_))\n self.assertIn(mapping_name, audit_data)\n self.assertEqual(audit_data[mapping_name], \"\\n\".join(sorted(slugs)))\n","sub_path":"test/integration/ggrc/converters/test_export_audit.py","file_name":"test_export_audit.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"65822945","text":"from django.conf.urls import url\n\nfrom andelabuddy.interests import views\n\nurlpatterns = [\n url(r'^$', views.interests, name='interests'),\n url(r'^add_interest/$', views.CreateInterest.as_view(),\n name='add_interest'),\n url(r'^edit_interest/(?P\\d+)/$',\n views.EditInterest.as_view(), name='edit_interest'),\n]\n","sub_path":"andelabuddy/interests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"171957147","text":"import os\n\nimport matplotlib.pyplot as plt\n\nfrom Bio import Phylo\nfrom Bio import Seq,SeqIO\nfrom Bio.Seq import Seq\n\ndef blast_graph(folder_name):\n\t\"\"\"\n\tFuncion que genera una representacion grafica de los resultados de Blast,\n\ten forma de alinemaiento. Mostrando las regiones coincidentes y las que no\n\n\tFunction to create a graphical representation of the Blast results as an alignment. \n\tMatching and non matching regions are formated differently. \n\t\"\"\"\n\n\tcounter_seq = 1 #Protein position\n\n\tos.mkdir(\"./Results/\" + folder_name + \"/Blast/Figures\")\n\n\tfor file in os.listdir(\"./Results/\" + folder_name + \"/Muscle/Allignments/\"):\n\t\t\n\t\tplt.figure(figsize = (25,15))\n\t\tplt.style.use(\"ggplot\") #Plot style\n\n\t\twith open(\"./Results/\" + folder_name + \"/Muscle/Allignments/\" + file,\n\t\t\t\t 'r') as input_handle:\n\t\t\tfor record in SeqIO.parse(input_handle, \"fasta\"):\n\t\t\t\tseq = str(record.seq)\n\t\t\t\tcounter_aa = 0 #Amino acid position\n\t\t\t\t\n\n\t\t\t\tfor char in seq:\n\t\t\t\t\tif char.isalpha(): #If match\n\t\t\t\t\t\tcounter_aa += 1\n\t\t\t\t\t\tplt.scatter(counter_aa, counter_seq, s = 10, marker = \"s\",\n\t\t\t\t\t\t\t\t color = \"Red\")\n\n\n\t\t\t\t\telse: #Not match\n\t\t\t\t\t\tcounter_aa += 1\n\t\t\t\t\t\tplt.scatter(counter_aa, counter_seq, s = 10, marker = 1, \n\t\t\t\t\t\t\t\t\tcolor = \"Blue\")\n\n\t\t\t\tplt.annotate(record.id, (counter_aa, counter_seq)) #Protein name\n\t\t\t\tcounter_seq += 1\n\n\t\tplt.grid(False)\n\t\tplt.title(\"Alineamiento Blast \" + file.replace(\"_allignment\", \"\"))\n\t\tplt.savefig(\"./Results/\" + folder_name + \"/Blast/Figures/\" \n\t\t\t\t\t+ file.replace(\"_allignment\", \"\") + \"_figure.png\") \n\t\t#Plot saving to folder\n\n\treturn\n\ndef tree_graph(folder_name):\n\t\"\"\"\n\t\tFuntion that drwas the phylogenetic trees and saves the, as a .png image\n\t\"\"\"\n\n\tos.mkdir(\"./Results/\" + folder_name + \"/Muscle/Trees_Figures\")\n\n\tfor file in os.listdir(\"./Results/\" + folder_name + \"/Muscle/Trees/\"):\n\t\ttree = Phylo.read(\"./Results/\" + folder_name + \"/Muscle/Trees/\"\n\t\t\t\t\t\t + file, 'newick')\n\t\tfig = plt.figure(figsize = (25,15), dpi = 100)\n\t\taxes = fig.add_subplot(1, 1, 1)\n\t\ttree.ladderize() #Branch manipulation to make tree pretier\n\t\tPhylo.draw(tree, axes = axes, do_show = False)\n\t\tplt.savefig(\"./Results/\" + folder_name + \"/Muscle/Trees_Figures/\"\n\t\t\t\t\t+ file)\n\n\treturn\n\n\t\t\n","sub_path":"graphicator_module.py","file_name":"graphicator_module.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"170503336","text":"# -*- coding: utf-8 -*-\r\nfrom time import process_time\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.cluster import AffinityPropagation\r\nfrom sklearn.cluster import MeanShift\r\nfrom sklearn.cluster import SpectralClustering\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom sklearn.cluster import DBSCAN\r\nfrom sklearn.mixture import GaussianMixture\r\n\r\n#NMI指标评价函数\r\nfrom sklearn.metrics import normalized_mutual_info_score\r\nimport json\r\n\r\ndef load_data(data_file):\r\n data,label = [],[]\r\n with open(data_file,\"r\") as f:\r\n for line in f:\r\n dic = json.loads(line)\r\n #data.append(dic)\r\n data.append(dic['text'])\r\n label.append(dic['cluster'])\r\n return data,label\r\n\r\n\r\ndef k_means(vector_data,label,n_clusters):\r\n start = process_time()\r\n km = KMeans(n_clusters=n_clusters).fit(vector_data)\r\n km_result = km.fit_predict(vector_data.toarray())\r\n print('NMI of K-means : {0:.4f} time:{1:f}'.format(normalized_mutual_info_score(km_result,label),process_time()-start))\r\n\r\n\r\ndef affinity_propagation(vector_data,label):\r\n start = process_time()\r\n ap = AffinityPropagation(damping=.55, max_iter=300, convergence_iter=15, copy=False)\r\n ap_result = ap.fit_predict(vector_data.toarray())\r\n print('NMI of AffinityPropagation: {0:.4f} time used:{1:f}'.format(normalized_mutual_info_score(ap_result,label),process_time()-start))\r\n\r\ndef mean_shift(vector_data,label):\r\n start = process_time()\r\n ms = MeanShift(bandwidth=0.66, bin_seeding=True)\r\n ms_result = ms.fit_predict(vector_data.toarray())\r\n print('NMI of MeanShift: {0:.4f} time used:{1:f}'.format(normalized_mutual_info_score(ms_result, label),process_time()-start))\r\n\r\ndef spectral_clustering(vector_data,label):\r\n start = process_time()\r\n sc = SpectralClustering(n_clusters=110,n_init=10)\r\n sc_result = sc.fit_predict(vector_data.toarray())\r\n print('NMI of SpectralClustering: {0:.4f} time used:{1:f}'.format(normalized_mutual_info_score(sc_result, label),process_time()-start))\r\n\r\ndef agglomerative_clustering(vector_data,label):\r\n for linkage in ('ward', 'average', 'complete', 'single'):\r\n start = process_time()\r\n ac = AgglomerativeClustering(linkage=linkage, n_clusters=10)\r\n ac_result = ac.fit_predict(vector_data.toarray())\r\n print(linkage)\r\n print('NMI of AgglomerativeClustering: {0:.4f} time used:{1:f}'.format(\r\n normalized_mutual_info_score(ac_result, label), process_time() - start))\r\n\r\n\r\n\r\ndef dbscan(vector_data,label):\r\n start = process_time()\r\n db = DBSCAN(eps=0.5, min_samples=1, leaf_size=32)\r\n db_result = db.fit_predict(vector_data.toarray())\r\n print('NMI of DBSCAN: {0:.4f} time usd:{1:f}'.format(normalized_mutual_info_score(db_result, label),process_time()-start))\r\n\r\ndef gaussian_mixture(vector_data,label):\r\n start = process_time()\r\n gm = GaussianMixture(n_components=4)\r\n gm.fit(vector_data.toarray())\r\n gm_result = gm.predict(vector_data.toarray())\r\n print('NMI of GaussianMixture: {0:.4f} time used:{1:f}'.format(normalized_mutual_info_score(gm_result, label),process_time()-start))\r\n\r\n\r\nif __name__ == '__main__':\r\n data,label = load_data('./Tweets.txt')\r\n\r\n start = process_time()\r\n vectorizer = TfidfVectorizer()\r\n x = vectorizer.fit_transform(data)\r\n\r\n #k_means(x,label,110)\r\n #affinity_propagation(x,label)\r\n #mean_shift(x,label)\r\n #spectral_clustering(x,label)\r\n #agglomerative_clustering(x,label)\r\n #dbscan(x,label)\r\n gaussian_mixture(x,label)\r\n","sub_path":"test various methods/homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"494667335","text":"\nclass BTNode(object):\n def __init__(self,val):\n self.val = val\n self.left = None\n self.right = None\n\ndef restore(inlist):\n if not inlist: return\n \n root = BTNode(inlist.pop(0))\n wqueue = [root]\n \n while inlist:\n node = wqueue.pop(0)\n val = inlist.pop(0)\n if val:\n node.left = BTNode(val)\n wqueue.append(node.left)\n if inlist:\n val = inlist.pop(0)\n if val:\n node.right = BTNode(val)\n wqueue.append(node.right)\n return root\n \ndef store(root):\n if not root: return \n \n wqueue = [root]\n \n out_list = []\n while wqueue:\n node = wqueue.pop(0)\n if node:\n out_list.append(node.val)\n else:\n out_list.append(None)\n \n if node:\n wqueue.append(node.left)\n wqueue.append(node.right)\n \n j = len(out_list)-1\n while out_list[j] == None:\n j -= 1\n \n return out_list[:j+1]\n \n\ndef print_inorder(root):\n if not root: return\n \n wstack = []\n node = root\n \n while node or wstack:\n if node:\n wstack.append(node)\n node = node.left\n else:\n node = wstack.pop()\n print(node.val, end=\" \")\n node = node.right\n print(\"\")\n \ndef print_levelorder(root):\n if not root: return\n \n wqueue = [root, None] \n while wqueue:\n node = wqueue.pop(0)\n if node:\n print(node.val, end=\" \")\n if node.left:\n wqueue.append(node.left)\n if node.right:\n wqueue.append(node.right)\n else:\n print(\"\")\n if wqueue:\n wqueue.append(None)\n\ndef print_preorder(root): \n wstack = []\n node = root\n \n while node or wstack:\n if node:\n print(node.val, end=\" \")\n wstack.append(node.right)\n node = node.left\n else:\n node = wstack.pop()\n print(\"\")\n \ndef print_postorder(root):\n wstack = []\n node = root\n prev = \"dummy\"\n \n while node or wstack:\n if node:\n if node.right == prev:\n print(node.val, end=\" \")\n prev = node\n node = wstack.pop() if wstack else None\n elif node.left == prev:\n prev = node\n wstack.append(node)\n node = node.right\n else:\n prev = node\n wstack.append(node)\n node = node.left\n else:\n prev = None\n node = wstack.pop() if wstack else None\n print(\"\")\n \n \nif __name__ == '__main__':\n inlist2 = [8, 3, 13, 1, 5, 10, 17, None, None, 4, 7, None, 12, 15]\n \n root2 = restore(inlist2)\n print_inorder(root2)\n print_levelorder(root2)\n \n inlist3 = store(root2)\n print(inlist3)\n \n print_preorder(root2)\n print_postorder(root2)\n ","sub_path":"6 Trees/6-02_bt_serialize.py","file_name":"6-02_bt_serialize.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"202370081","text":"from model import db\r\nfrom model.models import User\r\n\r\n\r\ndef db_check_if_not_insert_to_db_and_get_user_id(email, name, provider, unique_id):\r\n user = User.query.filter_by(uid=unique_id).first()\r\n if not user:\r\n user = User(name=name, email=email, uid=unique_id, oauth=provider)\r\n db.session.add(user)\r\n db.session.commit()\r\n return user.id\r\n\r\ndef guset_insert_to_db(email, name, provider, unique_id):\r\n user = User(name=name, email=email, uid=unique_id, oauth=provider)\r\n db.session.add(user)\r\n db.session.commit()\r\n return user.id","sub_path":"repository/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"63745926","text":"textfile = open(\"alice_in_wonderland.txt\",\"r\")\nraw = textfile.read()\ncharList=[]\ni=0\nfor i in range(26):\n charList.append([chr(97+i),0])\nfor x in raw:\n if x.isalpha():\n a = ord(x.lower())-97\n charList[a][1]+=1\nj=0\nfor j in range(26):\n print(charList[j][1])\n\n\n","sub_path":"1MWTT/week2/day1/countChar.py","file_name":"countChar.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"63676424","text":"# This script converts the input data from the .xlsx and txt. file to usable .npy file.\n# Before inserting data, make sure the data looks similar to the examples in the data folder.\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\n# Import the data and ensure that it is stored as individual manageable vectors with a time stamp in first column\n\n\ndef save_to_mat(input_data_thread, target_file_thread):\n inp_dat = pd.read_excel(input_data_thread)\n inp_dat = inp_dat.as_matrix()\n\n # trim to relevant data only\n inp_dat = inp_dat[:, 0:3]\n time_stamp_0 = datetime.timestamp(datetime.strptime('00:00', '%H:%M'))\n\n # date to time stamp format\n for x in range(len(inp_dat[:, 0])):\n time_stamp_day = datetime.timestamp(inp_dat[x, 0])\n time_stamp_hour = datetime.timestamp(datetime.strptime(inp_dat[x, 1], '%H:%M')) - time_stamp_0\n time_stamp = time_stamp_day + time_stamp_hour\n inp_dat[x, 0] = time_stamp\n\n inp_dat = np.vstack((inp_dat[:, 0], inp_dat[:, 2])).T\n\n # get rid of object type and convert to float 64. float64 may be excessive, maybe consider float32.\n\n inp_dat = np.array(list(inp_dat), dtype=np.float64)\n np.save(target_file_thread, inp_dat)\n\n\n# save_to_mat('data/2018_Wind_OSBRK_4000kWp.xlsx', 'data_as_matrix/2018_Wind1_Osterburken.npy')\n\n# convert weather data from a .csv file to a numpy array with a timestamp in its first column\n\n\ndef save_weather_data_ninja(input_data_thread, target_file_thread):\n inp_dat = pd.read_csv(input_data_thread)\n inp_dat = inp_dat.as_matrix()\n adjust = 0\n\n # convert date to time stamp format\n for x in range(len(inp_dat[:, 0])):\n date_time_obj = datetime.strptime(inp_dat[x, 0], '%Y-%m-%d %H:%M')\n time_stamp = datetime.timestamp(date_time_obj)\n inp_dat[x, 0] = time_stamp\n inp_dat[:, 0] = inp_dat[:, 0] + adjust\n inp_dat = inp_dat.astype('float64')\n\n np.save(target_file_thread, inp_dat)\n\n\n# save_weather_data_ninja('data_as_matrix/weather_ninja/osterburken.csv', 'data_as_matrix/Osterburken_Weather')\n","sub_path":"input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"212465473","text":"# -*- coding: utf-8 -*-\n\n# Libraries\nimport cv2\nimport numpy as np\nimport pylab as pl\nimport datetime\nimport time\n\n# Files\nimport DetectionProcess\nimport ImportData\nimport SVM\n\nif __name__ == '__main__':\n \n parameterToBeVaried = \"nbins\" \n \n defaultHOG = cv2.HOGDescriptor()\n \n if parameterToBeVaried == \"blockSize\":\n #valueRange = np.arange(4, 33, 4)\n valueRange = 2**(np.arange(2,6))\n # Default custom HOG parameters\n myParams = dict(\n _winSize = (32,32),\n _cellSize = (4,4),\n _nbins = 9,\n _derivAperture = defaultHOG.derivAperture,\n _winSigma = defaultHOG.winSigma,\n _histogramNormType = defaultHOG.histogramNormType,\n _L2HysThreshold = defaultHOG.L2HysThreshold,\n _gammaCorrection = defaultHOG.gammaCorrection,\n _nlevels = defaultHOG.nlevels\n )\n elif parameterToBeVaried == \"cellSize\":\n valueRange = 2**(np.arange(1,6))\n # Default custom HOG parameters\n myParams = dict(\n _winSize = (32,32),\n _blockSize = (32,32),\n _blockStride = (16,16),\n _nbins = 9,\n _derivAperture = defaultHOG.derivAperture,\n _winSigma = defaultHOG.winSigma,\n _histogramNormType = defaultHOG.histogramNormType,\n _L2HysThreshold = defaultHOG.L2HysThreshold,\n _gammaCorrection = defaultHOG.gammaCorrection,\n _nlevels = defaultHOG.nlevels\n )\n elif parameterToBeVaried == \"nbins\":\n valueRange = 2**(np.arange(1,6))\n myParams = dict(\n _winSize = (32,32),\n _blockSize = (16,16),\n _blockStride = (8,8),\n _cellSize = (4,4),\n _derivAperture = defaultHOG.derivAperture,\n _winSigma = defaultHOG.winSigma,\n _histogramNormType = defaultHOG.histogramNormType,\n _L2HysThreshold = defaultHOG.L2HysThreshold,\n _gammaCorrection = defaultHOG.gammaCorrection,\n _nlevels = 1\n )\n \n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Build classifiers with different values and try finding objects with them\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n results = []\n \n for value in valueRange:\n if parameterToBeVaried == \"winSize\":\n myParams[\"_winSize\"] = (value, value)\n myParams[\"_blockSize\"] = (value/4, value/4)\n myParams[\"_blockStride\"] = (value/8, value/8)\n myParams[\"_cellSize\"] = (value/8, value/8)\n elif parameterToBeVaried == \"blockSize\":\n myParams[\"_blockSize\"] = (value, value)\n myParams[\"_blockStride\"] = (value/2, value/2)\n elif parameterToBeVaried == \"cellSize\":\n myParams[\"_cellSize\"] = (value, value)\n elif parameterToBeVaried == \"nbins\":\n myParams[\"_nbins\"] = value\n \n hog = cv2.HOGDescriptor(**myParams)\n \n # Import data and extract HOG features\n trainData, trainClasses, labels, groundTruth = ImportData. \\\n ImportDataAndExtractHOGFeatures(hog=hog,\n days=[\"day1\",\"day2\",\"day3\"],\n saveAnnotations=False,\n thisManySamples=100)\n \n # Build classifier with cross-validating cost \n #cost = 10.0**(np.arange(-2,3,1))\n cost = 0.01\n model = SVM.Train(trainData, trainClasses, cost=cost)\n \n \n \n \n hog.setSVMDetector( model.coef_[0] ) \n params = dict(\n hitThreshold = -model.intercept_[0],\n winStride = (4,4),\n padding = (8,8),\n scale = 1.05,\n finalThreshold = 2,\n useMeanshiftGrouping = False\n )\n \n searchMethod = \"detectMultiScale\"\n evaluate = [\"singleImage\", r\".\\testWithThese\\day3\\Tile002496.bmp\"] \n \n outputFolder = r\".\\\\\" + searchMethod + \"_\" + datetime.datetime. \\\n fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S') \n \n parametersToStudy = []\n \n searchResults = DetectionProcess.OneOrMultipleImageSearch(hog, model, \n searchMethod, params, \n [], evaluate, outputFolder, saveIm=True)\n \n results.append(searchResults)\n \n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Analyze reults\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n if evaluate[0] == \"singleImage\":\n pl.close(\"all\")\n fig = pl.figure(figsize=(6, 6), facecolor='white')\n ax1 = fig.add_subplot(1,1,1)\n ax1.set_ylim(0,1)\n ax1.set_xlabel(\"Varied HOG param\")\n ax1.set_ylabel(\"F1-score\")\n for ix,res in enumerate(results):\n ax1.scatter(ix,res[\"F1\"])\n pl.draw()\n \n\n\n \n","sub_path":"ResultsCompareHOGParametersInTesting.py","file_name":"ResultsCompareHOGParametersInTesting.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"9683485","text":"import r2pipe\nimport angr\nimport monkeyhex\nimport logging\nfrom angr import sim_options as so\nfrom angr.state_plugins.sim_action import SimActionExit\n\nfrom .perf import TRACE_END, read_trace\nfrom .annotate import Addr2line\n\nfrom pwn_wrapper import ELF\n\nl = logging.getLogger('hase.tracer')\n\n\nclass Registers():\n def __init__(self, state):\n self.state = state\n\n def __getattr__(self, k):\n reg = getattr(self.state.simstate.regs, k)\n return self.state.simstate.solver.eval(reg)\n\n\nclass State():\n def __init__(self, branch, simstate):\n self.branch = branch\n self.simstate = simstate\n\n def __repr__(self):\n if self.branch[0] == 0:\n return \"State(Start -> 0x%x)\" % (self.branch[1])\n elif self.branch[1] == TRACE_END:\n return \"State(0x%x -> End)\" % (self.branch[0])\n else:\n return \"State(0x%x -> 0x%x)\" % (self.branch[0], self.branch[1])\n\n def registers(self):\n return Registers(self)\n\n def object(self):\n return self.simstate.project.loader.find_object_containing(self.simstate.addr)\n\n def address(self):\n return self.simstate.addr\n\n def location(self):\n \"\"\"\n Binary of current state\n \"\"\"\n obj = self.object()\n a = Addr2line()\n a.add_addr(obj, self.simstate.addr)\n return a.compute()[self.simstate.addr]\n\n\nclass Tracer():\n def __init__(self, executable, trace_path, coredump, dso_offsets):\n self.executable = executable\n self.coredump = coredump\n self.dso_offsets = dso_offsets\n self.project = angr.Project(executable, **dso_offsets)\n trace = read_trace(trace_path, self.project.loader)\n self.trace = trace\n self.states = {}\n\n assert self.project.loader.main_object.os.startswith('UNIX')\n\n self.elf = ELF(executable)\n\n start = self.elf.symbols.get('_start')\n main = self.elf.symbols.get('main')\n\n for (idx, event) in enumerate(self.trace):\n if event[1] == start or event[1] == main:\n self.trace = trace[idx:]\n\n remove_simplications = {\n so.LAZY_SOLVES, so.EFFICIENT_STATE_MERGING,\n so.TRACK_CONSTRAINT_ACTIONS\n } | so.simplification\n self.start_state = self.project.factory.blank_state(\n addr=self.trace[0][1],\n add_options=set([so.TRACK_JMP_ACTIONS]),\n remove_options=remove_simplications)\n\n self.simgr = self.project.factory.simgr(\n self.start_state,\n save_unsat=True,\n hierarchy=False,\n save_unconstrained=True)\n self.r2 = r2pipe.open(executable)\n # For debugging\n self.project.pt = self\n\n def print_addr(self, addr):\n print(self.r2.cmd(\"pd -2 @ %s; pd 2 @ %s\" % (addr, addr)))\n\n def jump_was_not_taken(self, old_state, new_state):\n # was the last control flow change an exit vs call/jump?\n ev = new_state.events[-1]\n instructions = old_state.block().capstone.insns\n assert isinstance(ev, SimActionExit) and len(instructions) == 1\n size = instructions[0].insn.size\n return (new_state.addr - size) == old_state.addr\n\n def find_next_branch(self, state, branch):\n while True:\n l.debug(\"0x%x\", state.addr)\n choices = self.project.factory.successors(\n state, num_inst=1).successors\n old_state = state\n\n if branch[1] == TRACE_END:\n for choice in choices:\n if choice.addr == branch[0]:\n return choice\n\n if len(choices) <= 2:\n for choice in choices:\n if old_state.addr == branch[0] and choice.addr == branch[1]:\n l.debug(\"jump 0%x -> 0%x\", old_state.addr, choice.addr)\n return choice\n if len(choices) == 1 or self.jump_was_not_taken(\n old_state, choice):\n state = choice\n else:\n # There should be never more then dot!\n import ipdb\n ipdb.set_trace()\n\n def valid_address(self, address):\n return address == TRACE_END or self.project.loader.find_object_containing(\n address)\n\n def run(self):\n state = self.simgr.active[0]\n states = []\n states.append(State(self.trace[0], state))\n for event in self.trace[1:]:\n l.debug(\"look for jump: 0x%x -> 0x%x\" % (event[0], event[1]))\n assert self.valid_address(event[0]) and self.valid_address(\n event[1])\n state = self.find_next_branch(state, event)\n states.append(State(event, state))\n return states\n","sub_path":"hase/tracer.py","file_name":"tracer.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"307541995","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport requests\nimport logging\nimport logging.handlers\nimport json\nimport time\nimport datetime as dt\nimport pymysql\n\n\nclass ApiException(Exception):\n pass\n\n\n'''\n bitfinex(거래소)의 특정 통화의 candle(시가, 종가, 고가, 저가, 거래량)을 db(MySql)에 저장.\n db에서 가장 최근의 저장한 캔들의 시간을 읽어와서, 해당 시간부터 이어서 동작한다.\n db가 비어있는 경우는 START_DATE 부터 시작한다.\n'''\nclass SaveBitfinexCandles:\n LIMIT_CANDLES = 100 # 조회할 최대 캔들 개수.\n START_DATE = dt.datetime(2017, 1, 1) # db가 비어있는 경우에 시작할 날짜.\n\n def __init__(self,\n currency,\n candle_time,\n db_table_name,\n log_level,\n log_foramt='[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s',\n db_info={'host': 'localhost', 'user': 'root'}):\n self.logger = self.init_logger(log_level, log_foramt)\n self.currency = currency # tBTCUSD, tETHUSD ...\n self.candle_time = candle_time # [1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h, 1D, 7D, 14D, 1M]\n self.db_table_name = \"{}\".format(db_table_name)\n self.min_period = self.get_min_period() # api로 한번에 조회하는 기간(분).\n self.db_info = db_info\n self.sec_save_interval = 5 # 과거 데이터 처리 시, 저장하고 대기하는 시간.\n self.sec_work_interval = 10 # 메인 루프 대기 시간.\n self.conn_db = None\n self.date_now = None # 작업 중인 현재 시간.\n\n\n def init_logger(self, log_level, log_foramt):\n logger = logging.getLogger(os.path.basename(__file__))\n logger.setLevel(log_level)\n formatter = logging.Formatter(log_foramt)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n return logger\n\n\n def get_min_period(self):\n value = int(self.candle_time[:-1])\n unit = self.candle_time[-1]\n\n min_period = self.LIMIT_CANDLES\n if unit == 'm':\n min_period *= value\n elif unit == 'h':\n min_period *= (value * 60)\n elif unit == 'D':\n min_period *= (value * 60) * 24\n elif unit == 'M':\n min_period *= ((value * 60) * 24) * 30\n else:\n raise Exception(\"invalid candle_time: {}\".format(self.candle_time))\n\n return min_period\n\n\n def start(self):\n while True:\n try:\n self.conn_db = pymysql.connect(host=self.db_info['host'],\n port=self.db_info['port'],\n user=self.db_info['user'],\n password=self.db_info['password'],\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n\n self.date_now = dt.datetime.now()\n date_last = self.get_last_candle_date_in_db()\n if date_last:\n # 실시간 업데이트를 위해, 마지막 데이터와 일정 시간이 차이나면 작업 수행.\n sec_diff = (self.date_now - date_last).seconds\n need_work = 60 <= sec_diff\n else: # if db is empty, start default date.\n date_last = self.START_DATE\n need_work = True\n\n if need_work:\n self.logger.info(\"start working...\")\n self.work(date_last)\n self.logger.info(\"end.\")\n else:\n self.logger.info(\"waiting...\")\n\n except Exception as e:\n self.logger.error(e)\n finally:\n if self.conn_db:\n self.conn_db.close()\n self.conn_db = None\n time.sleep(self.sec_work_interval)\n\n\n def get_last_candle_date_in_db(self):\n with self.conn_db.cursor() as cursor:\n select_query = \"SELECT `timestamp` AS t \" + \\\n \"FROM {} \".format(self.db_table_name) + \\\n \"ORDER BY id DESC LIMIT 1\"\n cursor.execute(select_query)\n\n result = None\n for row in cursor:\n result = row\n\n if result:\n return result['t'] # datetime.\n else:\n return None # empty db.\n\n\n def work(self, date_last):\n self.save(date_last, self.date_now)\n\n\n def save(self, start_date, end_date):\n def date_range(start, end, delta):\n curr = start\n while curr < end:\n yield curr\n curr += delta\n\n for save_date in date_range(start_date, end_date, dt.timedelta(minutes=self.min_period)):\n sucess = False\n while sucess is False:\n try:\n self.save_period(save_date, self.min_period)\n sucess = True\n except ApiException as e:\n time.sleep(60) # 대부분의 경우에 너무 많은 요청을 보낸 것.\n except Exception as e:\n self.logger.error(e)\n sucess = False\n finally:\n time.sleep(self.sec_save_interval)\n\n\n def save_period(self, save_date, min_period):\n start_timestamp = int(time.mktime(save_date.timetuple()))\n candles = self.get_candles(start_timestamp=start_timestamp, min_period=min_period)\n self.save_db(candles)\n\n self.logger.info(\"n_candles: {}\".format(len(candles)))\n\n # 과거 데이터처리가 끝나고, 최신 데이터만 업데이트하면 되므로 대기시간을 늘림.\n if len(candles) <= 1:\n self.sec_work_interval = 30\n\n\n def get_candles(self, start_timestamp, min_period):\n start_timestamp *= 1000\n end_timestamp = start_timestamp + (min_period * 60 * 1000)\n end_timestamp = min(end_timestamp, int(time.mktime(self.date_now.timetuple())) * 1000)\n end_timestamp -= 100 # -100ms 로 경계 제외.\n url = \"https://api.bitfinex.com/v2/candles/trade:{}:{}/hist\".format(self.candle_time, self.currency) + \\\n \"?start={}&end={}&sort=1&limit={}\".format(start_timestamp, end_timestamp, self.LIMIT_CANDLES)\n self.logger.debug(url)\n\n response = requests.get(url, verify=False)\n if (response.status_code != 200):\n self.logger.error(\"url: {}\".format(url))\n self.logger.error(\"status_code: {}\".format(response.status_code))\n raise ApiException('fail api call.')\n\n content = response.content.decode('utf-8')\n candles = eval(content)\n self.logger.debug(\"candle count: {}\".format(len(candles)))\n return candles\n\n\n def save_db(self, candles):\n query = \"INSERT INTO {}(`timestamp`, `open`, `high`, `low`, `close`, `volume`) \".format(self.db_table_name) + \\\n 'VALUES '\n\n query_value = ''\n for candle in candles:\n timestamp = int(candle[0] / 1000)\n date_candle = dt.datetime.fromtimestamp(timestamp)\n date_candle_str = date_candle.strftime('%Y-%m-%d %H:%M:%S')\n o = candle[1] # open. 시작가.\n h = candle[3] # high. 최고가.\n l = candle[4] # low. 최저가.\n c = candle[2] # close. 종가.\n v = candle[5] # volume. 거래량.\n query_value += \"('{}', '{}', '{}', '{}', '{}', '{}'),\".format(date_candle_str, o, h, l, c, v)\n\n self.logger.info(\"[{}] O:{}, H:{}, L:{} C:{} V:{}\".format(date_candle_str, o, h, l, c, v))\n\n query += query_value[:-1]\n query += \" ON DUPLICATE KEY UPDATE \" + \\\n \"`open` = VALUES(`open`), `high` = VALUES(`high`), `high` = VALUES(`high`), `low` = VALUES(`low`), `close` = VALUES(`close`), `volume` = VALUES(`volume`)\"\n\n try:\n with self.conn_db.cursor() as cursor:\n cursor.execute(query)\n self.conn_db.commit()\n except Exception as e:\n self.logger.error('[query] ' + query)\n\n\ndef main(argv):\n ''' ###### configurations. ###### '''\n db_info = {\n 'host': 'localhost',\n 'port': 3306,\n 'user': 'root',\n 'password': ''\n }\n ''' ###### configurations. ###### '''\n\n n_arg = 4\n\n if len(argv) < n_arg:\n print(\"usage: {} {} {} {}\".format(os.path.basename(__file__), 'currency', 'candle_time', 'db_name.table_name', '[loglevel]'))\n print(\" currency: tBTCUSD, tETHUSD ...\")\n print(' Trading pairs symbols are formed prepending a \"t\" before the pair (i.e tBTCUSD, tETHUSD).')\n print(\" candle_time: 1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h, 1D, 7D, 14D, 1M\")\n print(\" db_table_name: MySQL table name.\")\n print(\" [loglevel]: DEBUG, INFO, WARNING, ERROR, CRITICAL\")\n return\n\n # last argument can be loglevel.\n log_level = 'INFO'\n if n_arg < len(argv):\n log_level = argv[n_arg]\n\n save_bitfinex_candles = SaveBitfinexCandles(currency=argv[1],\n candle_time=argv[2],\n db_table_name=argv[3],\n db_info=db_info,\n log_level=log_level)\n save_bitfinex_candles.start()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)","sub_path":"src/save_bitfinex_candles.py","file_name":"save_bitfinex_candles.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"581992388","text":"#coding:utf-8\r\nimport sys\r\nfrom PyQt5 import QtCore\r\nimport configparser\r\n\r\nclass StreamingManager(object):\r\n\r\n def __new__(cls):\r\n if cls.__instance is None:\r\n cls.__instance = object.__new__(cls)\r\n return cls.__instance\r\n\r\n def __init__(self, account_info):\r\n self.__account_info = account_info\r\n self.__account_info.added_account.connect(self.add_streaming)\r\n self.__streaming_data = set() \r\n config = configparser.ConfigParser()\r\n for account in self.__account_info.account_dict.value():\r\n config.read(account_path+account.id+'.conf')\r\n \r\n\r\n\r\n","sub_path":"Raijin/twitter/StreamingManager.py","file_name":"StreamingManager.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"487618394","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport os\nimport sys\nimport json\nimport numpy as np\nimport pandas as pd\nimport _pickle as pk\nfrom scipy.stats import mode\nfrom ai_hub import inferServer\n\nreal = os.path.exists(\"/tcdata\")\nhist = None\nhpoi = 0\nhpos = []\nfail = set()\nt1 = 0.4 if real else 0\nt2 = 0.9\n\n# 去掉那些掉分的特征\ndef mask(x):\n m = np.ones((164,))\n m[26:29]=0\n m[32]=0\n m[36]=0\n m[40]=0\n m[44]=0\n m[48:79]=0\n m[80:82]=0\n m[105:108]=0\n m[111]=0\n m[115]=0\n m[119]=0\n m[123]=0\n m[127:]=0\n m = m.astype(\"bool\")\n return x[:, m]\n\n# 加载模型,需要返回一个模型带predict方法的模型,否则要重写下面的predict方法\ndef load_model():\n with open(\"model.pkl\", \"rb\") as f:\n model = pk.load(f)\n return model\n\n\n# 特征提取,需要返回一个DataFrame,第一列为serial_number,第二列开始为特征\ndef extract_feature(mce, adr, krn):\n global hist, hpoi, hpos\n # mce相关特征\n mce[\"collect_time\"] = (\n pd.to_datetime(mce[\"collect_time\"], infer_datetime_format=True).astype(int)\n // 1e9\n )\n mce[\"mca_id\"] = mce[\"mca_id\"].fillna(\"NA\")\n mce[\"transaction\"] = mce[\"transaction\"].fillna(4)\n mce[\"vendor\"] = mce[\"vendor\"].fillna(3)\n npmce = mce.values\n dat = []\n mca = [\n \"Z\",\n \"NA\",\n \"AP\",\n \"AF\",\n \"E\",\n \"CD\",\n \"BB\",\n \"C\",\n \"CC\",\n \"F\",\n \"G\",\n \"EE\",\n \"AA\",\n \"AE\",\n \"BC\",\n \"AZ\",\n \"DE\",\n \"FF\",\n ]\n for s in np.unique(npmce[:, 0]):\n sdf = npmce[npmce[:, 0] == s]\n dat.append([s, sdf.shape[0], sdf[:, 4][0], sdf[:, 5][0]])\n for i in mca:\n dat[-1].append(sdf[sdf[:, 1] == i].shape[0])\n for t in range(5):\n dat[-1].append(sdf[sdf[:, 2] == t].shape[0])\n # 这两特征做错了,补0处理\n dat[-1].extend([0, 0])\n ces = pd.DataFrame(\n dat,\n columns=[\n \"SN\",\n \"CT\",\n \"MF\",\n \"VD\",\n \"Z\",\n \"NA\",\n \"AP\",\n \"AF\",\n \"E\",\n \"CD\",\n \"BB\",\n \"C\",\n \"CC\",\n \"F\",\n \"G\",\n \"EE\",\n \"AA\",\n \"AE\",\n \"BC\",\n \"AZ\",\n \"DE\",\n \"FF\",\n \"T0\",\n \"T1\",\n \"T2\",\n \"T3\",\n \"T4\",\n \"GM\",\n \"GS\",\n ],\n )\n ces[\"VD\"] = ces[\"VD\"].astype(\"int64\")\n # adr相关特征\n adr[\"collect_time\"] = (\n pd.to_datetime(adr[\"collect_time\"], infer_datetime_format=True).astype(int)\n // 1e9\n )\n npadr = adr.values\n dat = []\n for s in np.unique(npadr[:, 0]):\n sdf = npadr[npadr[:, 0] == s]\n dat.append([s, sdf.shape[0]])\n for i in range(1, 6):\n dat[-1].extend(\n [\n mode(sdf[:, i]).mode[0],\n mode(sdf[:, i]).count[0],\n np.std(sdf[:, i]),\n np.unique(sdf[:, i]).shape[0],\n ]\n )\n dat[-1].append(np.unique(np.sum(sdf[:, 1:6], axis=1)).shape[0])\n if sdf.shape[0] == 1:\n dat[-1].extend([0, 0])\n else:\n gap = np.diff(sdf[:, 6])\n dat[-1].extend([np.mean(gap), np.std(gap)])\n drs = pd.DataFrame(\n dat,\n columns=[\n \"SN\",\n \"CTA\",\n \"M1\",\n \"C1\",\n \"S1\",\n \"U1\",\n \"M2\",\n \"C2\",\n \"S2\",\n \"U2\",\n \"M3\",\n \"C3\",\n \"S3\",\n \"U3\",\n \"M4\",\n \"C4\",\n \"S4\",\n \"U4\",\n \"M5\",\n \"C5\",\n \"S5\",\n \"U5\",\n \"AUP\",\n \"GMA\",\n \"GSA\",\n ],\n )\n # krn相关特征\n krn[\"collect_time\"] = (\n pd.to_datetime(krn[\"collect_time\"], infer_datetime_format=True).astype(int)\n // 1e9\n )\n krn.fillna(0, inplace=True)\n npkrn = krn.values\n dat = []\n for s in np.unique(npkrn[:, 25]):\n sdf = npkrn[npkrn[:, 25] == s]\n dat.append([s, sdf.shape[0]])\n # 这两特征做错了,补0处理\n dat[-1].extend([0, 0])\n dat[-1].extend(np.sum(sdf[:, 1:25], axis=0).tolist())\n cols = [\"SN\", \"CTK\", \"GMK\", \"GSK\"]\n cols.extend([\"K\" + str(i) for i in range(1, 25)])\n rns = pd.DataFrame(dat, columns=cols)\n rns.fillna(0, inplace=True)\n rns[cols[1:]] = rns[cols[1:]].astype(\"int64\")\n # 合并特征\n full = pd.merge(ces, drs, \"right\", on=[\"SN\"])\n full = pd.merge(full, rns, \"left\", on=[\"SN\"])\n full.fillna(0, inplace=True)\n full.reset_index(drop=True, inplace=True)\n full.iloc[:, 1:] = full.iloc[:, 1:].astype(\"float32\")\n # 历史特征\n if hist is None:\n hist = pd.DataFrame([], columns=full.columns)\n hist = hist.append(full)\n hpoi += 1\n hpos.append(full.shape[0])\n if hpoi > 1024:\n hist = hist.iloc[hpos[hpoi - 1025] :]\n daily = hist.groupby(\"SN\").sum().reset_index()\n full = pd.merge(full, daily, how=\"left\", on=\"SN\", suffixes=(\"\", \"10\")).fillna(0)\n # 跨表衍生特征\n full[\"M/A\"] = (full[\"CT\"] / full[\"CTA\"]).replace(np.inf, -1)\n full[\"A/K\"] = (full[\"CTA\"] / full[\"CTK\"]).replace(np.inf, -1)\n full[\"K/M\"] = (full[\"CTK\"] / full[\"CT\"]).replace(np.inf, -1)\n full[\"M/A10\"] = (full[\"CT10\"] / full[\"CTA10\"]).replace(np.inf, -1)\n full[\"A/K10\"] = (full[\"CTA10\"] / full[\"CTK10\"]).replace(np.inf, -1)\n full[\"K/M10\"] = (full[\"CTK10\"] / full[\"CT10\"]).replace(np.inf, -1)\n return full.fillna(-1)\n\n\nclass myInfer(inferServer):\n def __init__(self, model):\n super().__init__(model)\n\n # 数据预处理\n def pre_process(self, request):\n json_data = request.get_json()\n try:\n mce_log = pd.DataFrame(\n json_data[\"mce_log\"],\n columns=[\n \"serial_number\",\n \"mca_id\",\n \"transaction\",\n \"collect_time\",\n \"manufacturer\",\n \"vendor\",\n ],\n )\n except:\n mce_log = pd.DataFrame(\n [],\n columns=[\n \"serial_number\",\n \"mca_id\",\n \"transaction\",\n \"collect_time\",\n \"manufacturer\",\n \"vendor\",\n ],\n )\n try:\n address_log = pd.DataFrame(\n json_data[\"address_log\"],\n columns=[\n \"serial_number\",\n \"memory\",\n \"rankid\",\n \"bankid\",\n \"row\",\n \"col\",\n \"collect_time\",\n \"manufacturer\",\n \"vendor\",\n ],\n )\n except:\n address_log = pd.DataFrame(\n [],\n columns=[\n \"serial_number\",\n \"memory\",\n \"rankid\",\n \"bankid\",\n \"row\",\n \"col\",\n \"collect_time\",\n \"manufacturer\",\n \"vendor\",\n ],\n )\n try:\n kernel_log = pd.DataFrame(\n json_data[\"kernel_log\"],\n columns=[\n \"collect_time\",\n \"1_hwerr_f\",\n \"1_hwerr_e\",\n \"2_hwerr_c\",\n \"2_sel\",\n \"3_hwerr_n\",\n \"2_hwerr_s\",\n \"3_hwerr_m\",\n \"1_hwerr_st\",\n \"1_hw_mem_c\",\n \"3_hwerr_p\",\n \"2_hwerr_ce\",\n \"3_hwerr_as\",\n \"1_ke\",\n \"2_hwerr_p\",\n \"3_hwerr_kp\",\n \"1_hwerr_fl\",\n \"3_hwerr_r\",\n \"_hwerr_cd\",\n \"3_sup_mce_note\",\n \"3_cmci_sub\",\n \"3_cmci_det\",\n \"3_hwerr_pi\",\n \"3_hwerr_o\",\n \"3_hwerr_mce_l\",\n \"serial_number\",\n \"manufacturer\",\n \"vendor\",\n ],\n )\n except:\n kernel_log = pd.DataFrame(\n [],\n columns=[\n \"collect_time\",\n \"1_hwerr_f\",\n \"1_hwerr_e\",\n \"2_hwerr_c\",\n \"2_sel\",\n \"3_hwerr_n\",\n \"2_hwerr_s\",\n \"3_hwerr_m\",\n \"1_hwerr_st\",\n \"1_hw_mem_c\",\n \"3_hwerr_p\",\n \"2_hwerr_ce\",\n \"3_hwerr_as\",\n \"1_ke\",\n \"2_hwerr_p\",\n \"3_hwerr_kp\",\n \"1_hwerr_fl\",\n \"3_hwerr_r\",\n \"_hwerr_cd\",\n \"3_sup_mce_note\",\n \"3_cmci_sub\",\n \"3_cmci_det\",\n \"3_hwerr_pi\",\n \"3_hwerr_o\",\n \"3_hwerr_mce_l\",\n \"serial_number\",\n \"manufacturer\",\n \"vendor\",\n ],\n )\n if address_log.shape[0] != 0:\n test_data = extract_feature(mce_log, address_log, kernel_log)\n return test_data\n else:\n return None\n\n # 数据后处理\n def post_process(self, data):\n if data.shape[0] == 0:\n if not real:\n print(\"[]\", file=sys.stderr)\n return \"[]\"\n data.columns = [\"serial_number\", \"pti\"]\n ret = data.to_json(orient=\"records\")\n if not real:\n print(ret, file=sys.stderr)\n print(f\"Total bad servers: {len(fail)}\", file=sys.stderr)\n return ret\n # return \"[]\"\n\n # 预测方法,按需重写\n def predict(self, data):\n global fail\n if data is not None:\n ret = np.zeros((data.shape[0], 10))\n for i in range(10):\n ret[:, i] = self.model[i].predict_proba(mask(data.iloc[:, 1:].values))[:, 1]\n data[\"pti\"] = np.mean(ret, axis=1)\n data = data[data[\"pti\"] > t1][[\"SN\", \"pti\"]].reset_index(drop=True)\n if data.shape[0] > 0:\n for i in range(data.shape[0]):\n if (data[\"SN\"][i] in fail) and (data[\"pti\"][i] < t2):\n data.iloc[i, 1] = 0\n else:\n fail.add(data[\"SN\"][i])\n if real:\n data.iloc[i, 1] = 5\n return data[data[\"pti\"] > t1]\n else:\n print(\"No predictable samples!\", file=sys.stderr)\n return pd.DataFrame()\n\n\nif __name__ == \"__main__\":\n mymodel = load_model()\n my_infer = myInfer(mymodel)\n my_infer.run(debuge=False)\n","sub_path":"codes/over-fitting_solved.py","file_name":"over-fitting_solved.py","file_ext":"py","file_size_in_byte":11280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"227800056","text":"#coding:utf-8\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport xml.etree.cElementTree as ET\nimport os\nimport glob\nimport shutil\n# import click\nimport argparse\n\n\n\ndef filter_xml(xml_path , class_id, new_dir):\n\tet = ET.parse(xml_path)\n\telement = et.getroot()\n\telement_objs = element.findall('object')\n\t\n\tdir_path = os.path.dirname(xml_path)\n\t\n\tsuffix = None\n\tif os.path.exists(os.getcwd() + \"/\" + os.path.basename(xml_path)[:-3] + \"jpg\"):\n\t\tsuffix = \"jpg\"\n\telif os.path.exists(os.getcwd() + \"/\" + os.path.basename(xml_path)[:-3] + \"png\"):\n\t\tsuffix = \"png\"\n\t# print(dir_path)\n\t# print(os.path.basename(xml_path))\n\t\n\tif os.path.isdir(new_dir):\n\t\tpass\n\telse:\n\t\tos.makedirs(new_dir)\n\t\t\n\tnames = []\n\t# select the classe of xml all in present_classes\n\tfor element_obj in element_objs:\n\t\tclass_name = int(element_obj.find('name').text)\n\t\tnames.append(class_name)\n\td = [False for c in names if c not in class_id]\n\tif not d:\n\t\tshutil.copyfile(xml_path , new_dir + '/' + os.path.basename(xml_path))\n\t\tshutil.copyfile(data_dir + \"/\" + os.path.basename(xml_path)[:-3] + suffix , new_dir + '/' + os.path.basename(xml_path)[:-3] + suffix)\n\t\tprint('copyfile:',xml_path)\n\t\t\nif __name__ == '__main__':\n\t#parser = argparse.ArgumentParser()\n\t#parser.add_argument('--id')\n\t#args = parser.parse_args()\n\t#id = str(args.id)\n\t\n\t# select the sku in present_classes\n\tpresent_classes = [5 , 6 , 9 , 10 , 16 , 20 , 24 , 30 , 37 , 40 , 41 , 42 , 44 , 46 , 61 , 63 , 64 , 65 , 66 , 67 , 74 , 75 , 77 , 78 , 85 , 88 , 90 , 91 , 94 , 104 , 105 , 122 , 127 , 139 , 140 , 141 , 142 , 143 , 144 , 145 , 146]\t\n\t#present_classes = ['5', '6', '9', '37', '61']\t\n\tdata_dir = os.getcwd()\n\tnew_dir = data_dir + '/' + str(len(present_classes)) + 'sku' \n\t\n\t#xml_dir = os.getcwd()\n\tlsts = glob.glob(data_dir + \"/*.xml\")\n\tfor i in lsts:\n\t\tprint('i:' , i)\n\t\tfilter_xml(i ,present_classes, new_dir)\n \n","sub_path":"select_some_sku.py","file_name":"select_some_sku.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"67232287","text":"'''\r\nCreated on Jul 23, 2016\r\n\r\n@author: TrentonB\r\n'''\r\n\r\nimport pygame\r\n\r\n\r\nclass MyClass(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(self, x, y, wid, hght, spriteSheet = None, offset=None):\r\n '''\r\n Constructor\r\n '''\r\n \r\n self.xPos = x\r\n self.yPos = y\r\n self.width = wid\r\n self. height = hght\r\n \r\n if(offset == None):\r\n self.sprite = pygame.image.load(spriteSheet)\r\n ","sub_path":"Code/Classes/GameObject.py","file_name":"GameObject.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"387909154","text":"from ROOT import TH1F, TCanvas, TEfficiency, TFile, TLegend, gPad\n\n\ndef efficiencytracking():\n\n hadron_list = [\"pion\", \"proton\", \"electron\", \"muon\"]\n color_list = [1, 2, 4, 6]\n fileo2 = TFile(\"../codeHF/AnalysisResults_O2.root\")\n\n c1 = TCanvas(\"c1\", \"A Simple Graph Example\")\n c1.SetCanvasSize(1500, 1500)\n c1.cd()\n gPad.SetLogx()\n gPad.SetLogy()\n eff_list = []\n hempty = TH1F(\"hempty\", \";p_{T};efficiency\", 100, 0.001, 5.0)\n hempty.Draw()\n leg = TLegend(0.1, 0.7, 0.3, 0.9, \"\")\n leg.SetFillColor(0)\n\n for i, had in enumerate(hadron_list):\n hnum = fileo2.Get(\"qa-tracking-efficiency-%s/num\" % had)\n hden = fileo2.Get(\"qa-tracking-efficiency-%s/den\" % had)\n hnum.Rebin(4)\n hden.Rebin(4)\n eff = TEfficiency(hnum, hden)\n eff.SetLineColor(color_list[i])\n eff_list.append(eff)\n eff.Draw(\"same\")\n leg.AddEntry(eff_list[i], had)\n leg.Draw()\n c1.SaveAs(\"efficiency_tracking.pdf\")\n\n\ndef efficiencyhadron(had, var):\n fileo2 = TFile(\"../codeHF/AnalysisResults_O2.root\")\n ceffhf = TCanvas(\"ceffhf\", \"A Simple Graph Example\")\n ceffhf.SetCanvasSize(1500, 700)\n ceffhf.Divide(2, 1)\n gPad.SetLogy()\n hnum = fileo2.Get(\"hf-task-%s-mc/h%sRecSig\" % (had, var))\n hden = fileo2.Get(\"hf-task-%s-mc/h%sGen\" % (had, var))\n hnum.Rebin(4)\n hden.Rebin(4)\n eff = TEfficiency(hnum, hden)\n eff.Draw()\n ceffhf.SaveAs(\"efficiency_hfcand%s%s.pdf\" % (had, var))\n\n\nefficiencytracking()\nefficiencyhadron(\"lc\", \"Eta\")\nefficiencyhadron(\"lc\", \"Pt\")\n","sub_path":"FirstAnalysis/trackingstudies.py","file_name":"trackingstudies.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"375285900","text":"#a2_6.py\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport os\nfrom a2_1 import rng,box_muller\n\n\ndef train_perceptron (data_in,data_out,rng): \n bias = np.zeros ((len(data_in),1)) # Initialise bias array.\n data_in = np.append(data_in,bias,axis=1) # Merge data_in and weights.\n weights = np.array([rng.rand_num(len(data_in[0])),rng.rand_num(len(data_in[0]))]).reshape(len(data_in[0]),2) # initialise random weights\n weighted_sum = np.dot(data_in,weights) # Compute weighted sum.\n output_indices = np.argmax(weighted_sum,axis=1) # Select maximum value.\n\n epoch = 0\n \n\n while epoch < 4000:\n wrongly_classified_indices = []\n for i in range(len(data_in)):\n if output_indices[i] != data_out[i]:\n wrongly_classified_indices.append(i)\n\n rand = rng.rand_num(len(wrongly_classified_indices)-1)*10\n k = wrongly_classified_indices[int(rand[0])]\n\n for j in range (2):\n if weighted_sum[k][j] > weighted_sum[k][int(data_out[k])]:\n weights[:,j] -= data_in[k]\n if j == int ( data_out[k] ):\n weights[:,j] += data_in[k]\n\n weighted_sum = np.dot(data_in,weights)\n output_indices = np.argmax(weighted_sum,axis = 1)\n\n epoch += 1\n\n sys.stdout.write(\"Iterations:{0}\\n\".format(epoch))\n\n #plt.show() # Toggle\n\n return weights\n\ndef test_perceptron(data_in,data_out,weights,hist=False):\n bias = np.zeros ((len(data_in ),1)) # Initialise bias array.\n data_in = np.append(data_in,bias,axis=1) # Merge data_in and weights.\n weighted_sum = np.dot(data_in, weights)\n output_indices = np.argmax(weighted_sum,axis = 1)\n correct_counter = 0\n for i in range (len(data_in)):\n if output_indices[i] == data_out[i]:\n correct_counter += 1\n print('Accuracy:{:03.1f}%\\n'.format(correct_counter*100/len(data_out)))\n\n if hist: \n short_true = len(data_out[data_out==0]) \n short_predicted = len(output_indices[output_indices==0])\n long_true = len(data_out[data_out==1]) \n long_predicted = len(output_indices[output_indices==1])\n bar_width = 0.35\n plt.bar(np.array([0,1])-bar_width/2,np.array([short_predicted,long_predicted]),bar_width,label='Predicted')\n plt.bar(np.array([0,1])+bar_width/2,np.array([short_true,long_true]),bar_width,label='True')\n plt.xticks((0,1))\n plt.xlabel('Labels')\n plt.ylabel('Number of GRBs')\n plt.legend()\n plt.title('Predicted labels for Gamma Ray Bursts')\n #plt.show()\n plt.savefig('./plots/6.png')\n plt.close()\n\nif __name__ == '__main__':\n print('--- Exercise 6 ---')\n\n seed = 627310980\n print('Seed:',seed)\n rng = rng(seed)\n\n filename = 'GRBs.txt'\n url = 'https://home.strw.leidenuniv.nl/~nobels/coursedata/'\n if not os.path.isfile(filename):\n print(f'File not found, downloading {filename}')\n os.system('wget '+url+filename)\n\n data = np.genfromtxt(filename,skip_header=2,usecols = (2,3,4,5,6,7))\n data[data==-1.0] = 0\n names = np.genfromtxt(filename,skip_header=2,usecols=0,dtype=str)\n data = data[names!='XRF']\n labels = np.zeros(len(data))\n labels[data[:,1]>=10] += 1\n data = data[:,[0,2,3,4,5]]\n train_percent = 0.8\n train_in = data[:int(len(data)*train_percent)]\n train_out = labels[:int(len(labels)*train_percent)]\n test_in = data[int(len(data)*train_percent):]\n test_out = labels[int(len(labels)*train_percent):]\n\n for i in range(1):\n #sys.stdout.write(\"Run {0}\\n\".format(i+1))\n weights = train_perceptron(train_in, train_out,rng)\n print('Training set')\n test_perceptron(train_in,train_out,weights)\n print('Test set')\n test_perceptron(test_in,test_out,weights)\n print('Entire data set')\n test_perceptron(data,labels,weights,hist=True)\n","sub_path":"a2_6.py","file_name":"a2_6.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"538848736","text":"#encoding=utf-8\n'''\n在代码最前面import本脚本,可在python环境低于3的时候,打印可读提示信息\n'''\nimport sys\n__currentPython = u'当前Python运行版本: %d.%d.%d\\n' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\nif sys.version_info.major < 3 :\n\tsys.stdout.write(__currentPython)\n\tif __name__ == '__main__' :\n\t\tsys.stdout.write(u'import本脚本可确保Python3+运行版本\\n')\n\telse :\n\t\tsys.stdout.write(u'当前脚本需要Python3+\\n')\n\texit(1)\nelse :\n\tif __name__ == '__main__' :\n\t\tsys.stdout.write(__currentPython)\n\n# import os\n# # suppress warning: Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n","sub_path":"need_py3.py","file_name":"need_py3.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"203341464","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('manage_projects', views.manage_projects, name='manage_projects'),\n\n path('dataset/create/', views.dataset_create, name='dataset-create'),\n path('dataset/', views.DatasetDetailView.as_view(), name='dataset-detail'),\n path('attributes/', views.add_attributes, name='add-attributes'),\n path('bio/', views.add_bio, name='add-bio'),\n\n path('upload/', views.data_upload, name='upload'),\n path('upload/', views.UploadDetailView.as_view(), name='upload-detail'),\n path('datasets', views.DatasetListView.as_view(), name='datasets'),\n path('dataset//delete/', views.DatasetDelete.as_view(), name='dataset-delete'),\n\n path('geometa/', views.GeoMetadataDetailView.as_view(), name='geometadata-detail'),\n\n path('contacts', views.ContactListView.as_view(), name='contacts'),\n path('contact/', views.ContactDetailView.as_view(), name='contact-detail'),\n path('contact/create', views.ContactCreate.as_view(), name='contact-create'),\n path('contact//update/', views.ContactUpdate.as_view(), name='contact-update'),\n path('contact//delete/', views.ContactDelete.as_view(), name='contact-delete'),\n\n path('projects', views.ProjectListView.as_view(), name='projects'),\n path('project/', views.ProjectDetailView.as_view(), name='project-detail'),\n path('project/create/', views.ProjectCreate.as_view(), name='project-create'),\n path('project//update/', views.ProjectUpdate.as_view(), name='project-update'),\n path('project//delete/', views.ProjectDelete.as_view(), name='project-delete'),\n \n path('sciences', views.ScienceListView.as_view(), name='sciences'),\n path('science/', views.ScienceDetailView.as_view(), name='science-detail'),\n path('science/create/', views.science_create, name='science-create'),\n path('science//update/', views.ScienceUpdate.as_view(), name='science-update'),\n path('science//delete/', views.ScienceDelete.as_view(), name='science-delete'),\n \n path('programs', views.ProgramListView.as_view(), name='programs'),\n path('program/', views.ProgramDetailView.as_view(), name='program-detail'),\n path('program/create/', views.ProgramCreate.as_view(), name='program-create'),\n path('program//update/', views.ProgramUpdate.as_view(), name='program-update'),\n path('program//delete/', views.ProgramDelete.as_view(), name='program-delete'),\n \n path('generals', views.GeneralListView.as_view(), name='generals'),\n path('general/', views.GeneralDetailView.as_view(), name='general-detail'),\n path('general/create/', views.general_create, name='general-create'),\n path('general//update/', views.GeneralUpdate.as_view(), name='general-update'),\n path('general//delete/', views.GeneralDelete.as_view(), name='general-delete'),\n\n # path('science/create/', views.ScienceCreate.as_view(), name='science-create'),\n\n]\n","sub_path":"meta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"396515849","text":"from github import Github\nfrom invoke import task\n\nfrom faasmcli.util.config import get_faasm_config\nfrom faasmcli.util.release import tar_toolchain, tar_sysroot, tar_runtime_root\nfrom faasmcli.util.version import get_faasm_version\n\nREPO_NAME = \"lsds/Faasm\"\n\n\ndef _tag_name(version):\n return \"v{}\".format(version)\n\n\ndef _get_release():\n faasm_ver = get_faasm_version()\n r = _get_repo()\n rels = r.get_releases()\n tag_name = _tag_name(faasm_ver)\n\n rel = rels[0]\n if rel.tag_name != tag_name:\n print(\"Expected latest release to have tag {} but had {}\".format(tag_name, rel.tag_name))\n exit(1)\n\n return rel\n\n\ndef _get_github_instance():\n conf = get_faasm_config()\n\n if not conf.has_section(\"Github\") or not conf.has_option(\"Github\", \"access_token\"):\n print(\"Must set up Github config with access token\")\n\n token = conf[\"Github\"][\"access_token\"]\n g = Github(token)\n return g\n\n\ndef _get_repo():\n g = _get_github_instance()\n return g.get_repo(REPO_NAME)\n\n\n@task\ndef create_release(ctx):\n \"\"\"\n Create a draft release on Github\n \"\"\"\n # Get the head of master\n r = _get_repo()\n b = r.get_branch(branch=\"master\")\n head = b.commit\n\n faasm_ver = get_faasm_version()\n\n # Create a tag from the head\n tag_name = _tag_name(faasm_ver)\n r.create_git_tag(\n tag_name,\n \"Release {}\\n\".format(faasm_ver),\n head.sha,\n \"commit\",\n )\n\n r.create_git_release(\n tag_name,\n \"Faasm {}\".format(faasm_ver),\n \"Release {}\\n\".format(faasm_ver),\n draft=True\n )\n\n\n@task\ndef upload_artifacts(ctx, which=None):\n \"\"\"\n Upload release artifacts\n \"\"\"\n rel = _get_release()\n\n artifacts = [\"toolchain\", \"sysroot\", \"runtime\"]\n if which:\n artifacts = [which]\n\n for a in artifacts:\n if a == \"toolchain\":\n toolchain_name, toolchain_path = tar_toolchain()\n\n print(\"Uploading toolchain to GH\")\n rel.upload_asset(toolchain_path, label=toolchain_name)\n\n elif a == \"sysroot\":\n sysroot_name, sysroot_path = tar_sysroot()\n\n print(\"Uploading sysroot to GH\")\n rel.upload_asset(sysroot_path, label=sysroot_name)\n\n elif a == \"runtime\":\n runtime_name, runtime_path = tar_runtime_root()\n\n print(\"Uploading runtime root to GH\")\n rel.upload_asset(runtime_path, label=runtime_name)\n\n else:\n print(\"Unrecognised artifact: {} (must be {})\".format(a, artifacts))\n exit(1)\n\n\n@task\ndef publish_release(ctx):\n \"\"\"\n Publish the draft release\n \"\"\"\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)\n","sub_path":"faasmcli/faasmcli/tasks/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"76601640","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nraster\nTo do:\n Read, write and analyze gridded Raster data\nCreated on Tue Mar 31 16:20:13 2020\n\n@author: Xiaodong Ming\n\"\"\"\nimport copy\nimport math\nimport numpy as np\nfrom osgeo import gdal, ogr, osr\nfrom scipy import interpolate\nimport spatial_analysis as sp\nimport grid_show as gs\n#%% *******************************To deal with raster data********************\n# *************************************************************************** \nclass Raster(object):\n \"\"\" \n To deal with raster data with a ESRI ASCII or GTiff format\n Properties:\n source_file: file name to read grid data\n output_file: file name to write a raster object\n array: a numpy array storing grid cell values\n header: a dict storing reference information of the grid\n extent: a tuple storing outline limits of the raster (left, right, \n bottom, top)\n extent_dict: a dictionary storing outline limits of the raster\n projection: (string) the Well-Known_Text (wkt) projection information\n \n Methods(public):\n Write_asc: write grid data into an asc file with or without \n compression(.gz)\n to_osgeo_raster: convert this object to an osgeo raster object\n rect_clip: clip raster according to a rectangle extent\n clip: clip raster according to a polygon\n rasterize: rasterize a shapefile on the Raster object and return a \n bool array with 'Ture' in and on the polygon/polyline\n resample: resample the raster to a new cellsize\n GetXYcoordinate: Get X and Y coordinates of all raster cells\n mapshow: draw a map of the raster dataset\n VelocityShow: draw velocity vectors as arrows with values on two Raster\n datasets (u, v)\n \n Methods(private):\n __header2extent: convert header to extent\n __read_asc: read an asc file ends with .asc or .gz\n with a reference header\n __read_tif: read tiff file\n \n \"\"\"\n#%%======================== initialization function =========================== \n def __init__(self, source_file=None, array=None, header=None, \n epsg=None, projection=None, num_header_rows=6):\n \"\"\"\n source_file: name of a asc/tif file if a file read is needed\n array: values in each raster cell [a numpy array]\n header: georeference of the raster [a dictionary containing 6 keys]:\n nrows, nclos [int]\n cellsize, xllcorner, yllcorner\n NODATA_value\n epsg: epsg code [int]\n projection: WktProjection [string]\n \"\"\"\n if epsg is not None:\n projection = self.__set_wkt_projection(epsg)\n if type(source_file) is str:\n if source_file.endswith('.tif'):\n array, header, projection = sp.tif_read(source_file) # only read the first band\n else:\n array, header, projection = sp.arcgridread(source_file,\n num_header_rows)\n self.source_file = source_file\n elif type(source_file) is bytes: # try a binary file-like object\n array, header = sp.byte_file_read(source_file)\n extent = sp.header2extent(header)\n self.source_file = source_file\n self.projection = projection\n self.array = array\n self.header = header\n self.extent = extent\n self.extent_dict = {'left':extent[0], 'right':extent[1],\n 'bottom':extent[2], 'top':extent[3]}\n self.Write_asc = self.write_asc # comptatible with the myclass version\n \n#%%============================= Spatial analyst ============================== \n def rect_clip(self, clip_extent):\n \"\"\"\n clip_extent: left, right, bottom, top\n clip raster according to a rectangle extent\n return:\n a new raster object\n \"\"\"\n X = clip_extent[0:2]\n Y = clip_extent[2:4]\n rows, cols = sp.map2sub(X, Y, self.header)\n x_centre, y_centre = sp.sub2map(rows, cols, self.header)\n xllcorner = min(x_centre)-0.5*self.header['cellsize']\n yllcorner = min(y_centre)-0.5*self.header['cellsize']\n header_new = copy.deepcopy(self.header)\n array_new = self.array[min(rows):max(rows), min(cols):max(cols)]\n header_new['nrows'] = array_new.shape[0]\n header_new['ncols'] = array_new.shape[1]\n header_new['xllcorner'] = xllcorner\n header_new['yllcorner'] = yllcorner\n new_obj = Raster(array=array_new, header=header_new,\n projection=self.projection)\n return new_obj\n \n def clip(self, mask=None):\n \"\"\"\n clip raster according to a mask\n mask: \n 1. string name of a shapefile\n 2. numpy vector giving X and Y coords of the mask points\n \n return:\n a new raster object\n \"\"\"\n if isinstance(mask, str):\n shpName = mask\n # Open shapefile datasets \n shpDriver = ogr.GetDriverByName('ESRI Shapefile')\n shpDataset = shpDriver.Open(shpName, 0) # 0=Read-only, 1=Read-Write\n layer = shpDataset.GetLayer()\n shpExtent = np.array(layer.GetExtent()) #(minX, maxY, maxX, minY) \n # 1. rectangle clip raster\n new_obj = self.rect_clip(shpExtent)\n new_raster = copy.deepcopy(new_obj) \n indexArray = new_raster.rasterize(shpDataset)\n arrayClip = new_raster.array\n arrayClip[indexArray==0]=new_raster.header['NODATA_value']\n new_raster.array = arrayClip \n shpDataset.Destroy()\n return new_raster\n \n def rasterize(self, shpDSName, rasterDS=None):\n \"\"\"\n rasterize the shapefile to the raster object and return a bool array\n with Ture value in and on the polygon/polyline\n shpDSName: string for shapefilename, dataset for ogr('ESRI Shapefile')\n object\n return numpy array\n \"\"\"\n if isinstance(shpDSName, str):\n shpDataset = ogr.Open(shpDSName)\n else:\n shpDataset = shpDSName\n layer = shpDataset.GetLayer()\n if rasterDS is None:\n obj_raster = copy.deepcopy(self)\n obj_raster.array = np.zeros(obj_raster.array.shape)\n target_ds = obj_raster.to_osgeo_raster()\n else:\n target_ds = rasterDS\n gdal.RasterizeLayer(target_ds, [1], layer, burn_values=[-1])\n rasterized_array = target_ds.ReadAsArray()\n indexArray = np.full(rasterized_array.shape, False)\n indexArray[rasterized_array==-1] = True\n target_ds=None\n return indexArray\n \n def resample(self, cellsize_n, method='bilinear'):\n \"\"\"\n resample the raster to a new cellsize\n cellsize_n: cellsize of the new raster\n method: Resampling method to use. Available methods are:\n near: nearest neighbour resampling (default, fastest algorithm, \n worst interpolation quality). \n bilinear: bilinear resampling. \n cubic: cubic resampling. \n cubicspline: cubic spline resampling. \n lanczos: Lanczos windowed sinc resampling. \n average: average resampling, computes the average of all \n non-NODATA contributing pixels. \n mode: mode resampling, selects the value which appears most often \n of all the sampled points. \n max: maximum resampling, selects the maximum value from all \n non-NODATA contributing pixels. \n min: minimum resampling, selects the minimum value from all \n non-NODATA contributing pixels. \n med: median resampling, selects the median value of all \n non-NODATA contributing pixels. \n q1: first quartile resampling, selects the first quartile \n value of all non-NODATA contributing pixels. \n q3: third quartile resampling, selects the third quartile \n value of all non-NODATA contributing pixels\n \"\"\"\n cellSize = self.header['cellsize']\n ras_x_size = self.header['ncols']\n newras_x_size = int(ras_x_size*cellSize/cellsize_n)\n rasterYSize = self.header['nrows']\n newRasterYSize = int(rasterYSize*cellSize/cellsize_n)\n \n g = self.to_osgeo_raster() # get original gdal dataset\n total_obs = g.RasterCount\n drv = gdal.GetDriverByName( \"MEM\" )\n dst_ds = drv.Create('', g.RasterXSize, g.RasterYSize, 1,\n eType=gdal.GDT_Float32)\n dst_ds.SetGeoTransform( g.GetGeoTransform())\n dst_ds.SetProjection ( g.GetProjectionRef() )\n hires_data = self.array\n dst_ds.GetRasterBand(1).WriteArray ( hires_data )\n \n geo_trans_v = g.GetGeoTransform()\n drv = gdal.GetDriverByName( \"MEM\" )\n resampled_ds = drv.Create('', newras_x_size, newRasterYSize, 1, \n eType=gdal.GDT_Float32)\n\n geo_trans_v_new = (geo_trans_v[0], cellsize_n, geo_trans_v[2],\n geo_trans_v[3], geo_trans_v[3], -cellsize_n)\n resampled_ds.SetGeoTransform(geo_trans_v_new )\n resampled_ds.SetProjection (g.GetProjectionRef() )\n resampled_ds.SetMetadata ({\"TotalNObs\":\"%d\" % total_obs})\n\n gdal.RegenerateOverviews(dst_ds.GetRasterBand(1),\n [resampled_ds.GetRasterBand(1)], method)\n \n resampled_ds.GetRasterBand(1).SetNoDataValue(self.header['NODATA_value'])\n \n new_obj = self.__osgeo2raster(resampled_ds)\n resampled_ds = None\n\n return new_obj\n \n def point_interpolate(self, points, values, method='nearest'):\n \"\"\" Interpolate values of 2D points to all cells on the Raster object\n 2D interpolate\n points: ndarray of floats, shape (n, 2)\n Data point coordinates. Can either be an array of shape (n, 2), \n or a tuple of ndim arrays.\n values: ndarray of float or complex, shape (n, )\n Data values.\n method: {‘linear’, ‘nearest’, ‘cubic’}, optional\n Method of interpolation.\n \"\"\"\n grid_x, grid_y = self.GetXYcoordinate()\n array_interp = interpolate.griddata(points, values, (grid_x, grid_y),\n method=method)\n new_obj = copy.deepcopy(self)\n new_obj.array = array_interp\n new_obj.source_file = 'mask_'+new_obj.source_file\n return new_obj\n \n def grid_interpolate(self, value_grid, method='nearest'):\n \"\"\" Interpolate values of a grid to all cells on the Raster object\n 2D interpolate\n value_grid: a grid file string or Raster object \n method: {‘linear’, ‘nearest’, ‘cubic’}, optional\n Method of interpolation.\n Return: \n a numpy array with the same size of the self object\n \"\"\"\n if type(value_grid) is str:\n value_grid = Raster(value_grid)\n points_x, points_y = value_grid.GetXYcoordinate()\n points = np.c_[points_x.flatten(), points_y.flatten()]\n values = value_grid.array.flatten()\n ind_nan = ~np.isnan(values)\n grid_x, grid_y = self.GetXYcoordinate()\n array_interp = interpolate.griddata(points[ind_nan, :], values[ind_nan],\n (grid_x, grid_y), method=method)\n return array_interp\n \n def grid_resample(self, newsize):\n \"\"\"\n resample a grid to a new grid resolution via nearest interpolation\n \"\"\"\n if isinstance(newsize, dict):\n header = newsize.copy()\n else:\n oldsize = self.header['cellsize']\n header = copy.deepcopy(self.header)\n header['cellsize'] = newsize\n ncols = math.floor(oldsize*self.header['ncols']/newsize)\n nrows = math.floor(oldsize*self.header['nrows']/newsize)\n header['ncols'] = ncols\n header['nrows'] = nrows\n #centre of the first cell in array\n x11 = header['xllcorner']+0.5*header['cellsize']\n y11 = header['yllcorner']+(header['nrows']-0.5)*header['cellsize']\n x_all = np.linspace(x11, x11+(header['ncols']-1)*header['cellsize'],\n header['ncols'])\n y_all = np.linspace(y11, y11-(header['nrows']-1)*header['cellsize'],\n header['nrows'])\n row_all, col_all = sp.map2sub(x_all, y_all, self.header)\n rows, cols = np.meshgrid(row_all, col_all) # nrows*ncols array\n array = self.array[rows, cols]\n array = array.transpose()\n array = array.astype(self.array.dtype)\n new_obj = Raster(array=array, header=header)\n return new_obj\n \n def assign_to(self, new_header):\n \"\"\" Assign_to the object to a new grid defined by new_header \n If cellsize are not equal, the origin Raster will be firstly \n resampled to the target grid.\n obj_origin, obj_target: Raster objects\n \"\"\"\n obj_origin = copy.deepcopy(self)\n if obj_origin.header['cellsize'] != new_header['cellsize']:\n obj_origin = obj_origin.GridResample(new_header['cellsize'])\n grid_x, grid_y = obj_origin.GetXYcoordinate()\n rows, cols = sp.map2sub(grid_x, grid_y, new_header)\n ind_r = np.logical_and(rows >= 0, rows <= new_header['nrows']-1)\n ind_c = np.logical_and(cols >= 0, cols <= new_header['ncols']-1)\n ind = np.logical_and(ind_r, ind_c)\n# ind = np.logical_and(ind, ~np.isnan(obj_origin.array))\n array = obj_origin.array[ind]\n array = np.reshape(array, (new_header['nrows'], new_header['ncols']))\n# array[rows[ind], cols[ind]] = obj_origin.array[ind]\n obj_output = Raster(array=array, header=new_header)\n return obj_output\n\n def to_points(self):\n \"\"\" Get X and Y coordinates of all raster cells\n return xv, yv numpy array with the same size of the raster object\n \"\"\"\n ny, nx = self.array.shape\n cellsize = self.header['cellsize']\n # coordinate of the centre on the top-left pixel\n x00centre = self.extent_dict['left'] + cellsize/2\n y00centre = self.extent_dict['top'] - cellsize/2\n x = np.arange(x00centre, x00centre+cellsize*nx, cellsize)\n y = np.arange(y00centre, y00centre-cellsize*ny, -cellsize)\n xv, yv = np.meshgrid(x, y)\n return xv, yv\n \n def write_asc(self, output_file, EPSG=None, compression=False):\n \n \"\"\"\n write raster as asc format file \n output_file: output file name\n EPSG: epsg code, if it is given, a .prj file will be written\n compression: logic, whether compress write the asc file as gz\n \"\"\"\n sp.arcgridwrite(output_file, self.array, self.header, compression)\n if EPSG is not None:\n self.__set_wkt_projection(EPSG)\n # if projection is defined, write .prj file for asc file\n if output_file.endswith('.asc'):\n if self.projection is not None:\n prj_file=output_file[0:-4]+'.prj'\n wkt = self.projection\n with open(prj_file, \"w\") as prj: \n prj.write(wkt)\n return None\n \n def to_osgeo_raster(self, filename=None, fileformat = 'GTiff',\n destEPSG=27700): \n \"\"\"\n convert this object to an osgeo raster object, write a tif file if \n necessary\n filename: the output file name\n fileformat: GTiff or AAIGrid\n destEPSG: the EPSG projection code default: British National Grid\n return:\n an osgeo raster dataset\n or a tif filename if it is written\n \"\"\"\n if filename is None:\n dst_filename = ''\n driver_name = 'MEM'\n else:\n dst_filename = filename\n driver_name = fileformat\n if not dst_filename.endswith('.tif'):\n dst_filename = dst_filename+'.tif'\n \n # You need to get those values like you did.\n PIXEL_SIZE = self.header['cellsize'] # size of the pixel... \n x_min = self.extent[0] # left \n y_max = self.extent[3] # top\n dest_crs = osr.SpatialReference()\n dest_crs.ImportFromEPSG(destEPSG)\n # create dataset with driver\n driver = gdal.GetDriverByName(driver_name)\n ncols = int(self.header['ncols'])\n nrows = int(self.header['nrows'])\n dataset = driver.Create(dst_filename, \n xsize=ncols, \n ysize=nrows, \n bands=1, \n eType=gdal.GDT_Float32)\n \n dataset.SetGeoTransform((\n x_min, # 0\n PIXEL_SIZE, # 1\n 0, # 2\n y_max, # 3\n 0, # 4\n -PIXEL_SIZE)) \n \n dataset.SetProjection(dest_crs.ExportToWkt())\n array = self.array\n dataset.GetRasterBand(1).WriteArray(array)\n dataset.GetRasterBand(1).SetNoDataValue(self.header['NODATA_value'])\n if filename is not None:\n dataset.FlushCache() # Write to disk.\n dataset = None\n return dst_filename\n else:\n return dataset\n#%%=============================Visualization==================================\n def mapshow(self, **kwargs):\n \"\"\"\n Display raster data without projection\n figname: the file name to export map\n figsize: the size of map\n dpi: The resolution in dots per inch\n vmin and vmax define the data range that the colormap covers\n figname=None, figsize=None, dpi=300, vmin=None, vmax=None, \n cax=True, dem_array=None, relocate=False, scale_ratio=1\n \"\"\"\n fig, ax = gs.mapshow(raster_obj=self, **kwargs)\n return fig, ax\n \n def rankshow(self, **kwargs):\n \"\"\" Display water depth map in a range defined by (d_min, d_max)\n \"\"\"\n fig, ax = gs.rankshow(self, **kwargs)\n return fig, ax\n \n def hillshade(self, **kwargs):\n \"\"\" Draw a hillshade map\n \"\"\"\n fig, ax = gs.hillshade(self, **kwargs)\n return fig, ax\n\n def vectorshow(self, obj_y, **kwargs):\n \"\"\"\n plot velocity map of U and V, whose values stored in two raster\n objects seperately\n \"\"\"\n fig, ax = gs.vectorshow(self, obj_y, **kwargs)\n return fig, ax\n#%%=========================== private functions ==============================\n def __osgeo2raster(self, obj_ds):\n \"\"\"\n convert an osgeo dataset to a raster object\n \"\"\"\n array = obj_ds.ReadAsArray()\n geo_trans_v = obj_ds.GetGeoTransform()\n projection = obj_ds.GetProjection()\n left = geo_trans_v[0]\n top = geo_trans_v[3]\n cellsize = geo_trans_v[1]\n nrows = obj_ds.RasterYSize\n ncols = obj_ds.RasterXSize\n xllcorner = left\n yllcorner = top - cellsize*nrows\n NODATA_value = obj_ds.GetRasterBand(1).GetNoDataValue()\n if NODATA_value is None:\n NODATA_value = -9999\n header = {'ncols':ncols, 'nrows':nrows,\n 'xllcorner':xllcorner, 'yllcorner':yllcorner, \n 'cellsize':cellsize, 'NODATA_value':NODATA_value}\n obj_new = Raster(array=array, header=header, projection=projection)\n return obj_new\n\n def __set_wkt_projection(self, epsg_code):\n \"\"\"\n get coordinate reference system (crs) as Well Known Text (WKT) \n from https://epsg.io\n epsg_code: the epsg code of a crs, e.g. BNG:27700, WGS84:4326\n return wkt text\n \"\"\"\n import requests\n # access projection information\n wkt = requests.get('https://epsg.io/{0}.prettywkt/'.format(epsg_code))\n # remove spaces between charachters\n remove_spaces = wkt.text.replace(\" \", \"\")\n # place all the text on one line\n output = remove_spaces.replace(\"\\n\", \"\")\n self.projection = output\n return output\n \n#%%\ndef merge(obj_origin, obj_target, resample_method='bilinear'):\n \"\"\"Merge the obj_origin to obj_target\n assign grid values in the origin Raster to the cooresponding grid cells in\n the target object. If cellsize are not equal, the origin Raster will be\n firstly resampled to the target object.\n obj_origin, obj_target: Raster objects\n \"\"\"\n if obj_origin.header['cellsize'] != obj_target.header['cellsize']:\n obj_origin = obj_origin.resample(obj_target.header['cellsize'], \n method=resample_method)\n# else:\n# obj_origin = self\n grid_x, grid_y = obj_origin.GetXYcoordinate()\n rows, cols = sp.map2sub(grid_x, grid_y, obj_target.header)\n ind_r = np.logical_and(rows >= 0, rows <= obj_target.header['nrows']-1)\n ind_c = np.logical_and(cols >= 0, cols <= obj_target.header['ncols']-1)\n ind = np.logical_and(ind_r, ind_c)\n ind = np.logical_and(ind, ~np.isnan(obj_origin.array))\n obj_output = copy.deepcopy(obj_target)\n obj_output.array[rows[ind], cols[ind]] = obj_origin.array[ind]\n return obj_output\n\ndef main():\n \"\"\"Main function\n \"\"\"\n print('Class to deal with raster data')\n\nif __name__=='__main__':\n main()\n \n \n\n","sub_path":"Raster.py","file_name":"Raster.py","file_ext":"py","file_size_in_byte":21646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"27280830","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nfilename='Marvellous.xlsx'\ndata=pd.read_excel(filename)\n# # print(data)\n# # print(data.head())\n# print(data.tail())\nprint(data.shape)\n\nsort=data.sort_values(['Name'],ascending=True)\nprint(sort)\n\ndata['Age'].plot(kind=\"hist\")\nplt.show()\n\ndata['Age'].plot(kind=\"barh\")\nplt.show()","sub_path":"MatplotlibDemo.py","file_name":"MatplotlibDemo.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"540569563","text":"import sys\nimport os\n\nimport RPi.GPIO as GPIO\nimport time\nimport serial\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(26, GPIO.OUT) \n\nport = serial.Serial(\"/dev/ttyS0\", baudrate=9600, timeout=1)\n \nport.write('AT'+'\\r\\n') \ntime.sleep(1)\n \nrespuesta = port.read(10)\n\nif \"OK\" in respuesta:\n pass\n\t\n\t\t\nelse:\n\tGPIO.output(26, False) \n\ttime.sleep(1)\n\tGPIO.output(26, True) \n\ttime.sleep(2)\n\tGPIO.output(26, False) \n\ttime.sleep(3)\n\nGPIO.cleanup() # this ensures a clean exit\n\n\nos.system(\"python3 /home/pi/Desktop/backup/base.py\")\n","sub_path":"client_raspberry_pi/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"165054003","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport jinja2\nimport os\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_dir))\n\nclass MainHandler(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('campus_tour.html')\n\t\tself.response.write(template.render())\n\nclass StudentCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('student_center.html')\n\t\tself.response.write(template.render())\n\nclass Starbucks(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('starbucks.html')\n\t\tself.response.write(template.render())\n\nclass ResidentialHousingAssociation(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('packages.html')\n\t\tself.response.write(template.render())\n\nclass OtterCycleCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('bikes.html')\n\t\tself.response.write(template.render())\n\nclass OC3(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('oc3.html')\n\t\tself.response.write(template.render())\n\nclass GreekLife(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('mgc.html')\n\t\tself.response.write(template.render())\n\nclass AssociatedStudents(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('as.html')\n\t\tself.response.write(template.render())\n\nclass StudentCentertoChapman(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('chapmancenter.html')\n\t\tself.response.write(template.render())\n\nclass ChapmanScienceCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('chapman_center.html')\n\t\tself.response.write(template.render())\n\nclass ChapmanScienceCentertoWorldLanguageCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('csc_to_wlc.html')\n\t\tself.response.write(template.render())\n\nclass WorldLanguageCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('world_language_center.html')\n\t\tself.response.write(template.render())\n\nclass WorldLanguageCentertoStudentServices(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('studentservices.html')\n\t\tself.response.write(template.render())\n\nclass StudentServices(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('student_services.html')\n\t\tself.response.write(template.render())\n\nclass StudentServicestoUniversityCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('universitycenter.html')\n\t\tself.response.write(template.render())\n\nclass UniversityCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('university_center.html')\n\t\tself.response.write(template.render())\n\nclass Montes(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('montes.html')\n\t\tself.response.write(template.render())\n\nclass UniversityCentertoWorldTheater(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('worldtheater.html')\n\t\tself.response.write(template.render())\n\nclass WorldTheater(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('world_theater.html')\n\t\tself.response.write(template.render())\n\nclass WorldTheatertoLibrary(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('lib.html')\n\t\tself.response.write(template.render())\n\nclass Library(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('library.html')\n\t\tself.response.write(template.render())\n\nclass Peets(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('peets.html')\n\t\tself.response.write(template.render())\n\nclass CooperativeLearningCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('tutors.html')\n\t\tself.response.write(template.render())\n\nclass LibrarytoBIT(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('libtobit.html')\n\t\tself.response.write(template.render())\n\nclass BluePole(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('emergency.html')\n\t\tself.response.write(template.render())\n\nclass BusinessInformationandTechnology(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('bit.html')\n\t\tself.response.write(template.render())\n\nclass GameResearchLab(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('game.html')\n\t\tself.response.write(template.render())\n\nclass BITtoManzanita(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('manzanita.html')\n\t\tself.response.write(template.render())\n\nclass Manzanita(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('dorms.html')\n\t\tself.response.write(template.render())\n\nclass ManzanitatoOEandDC(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('oeanddc.html')\n\t\tself.response.write(template.render())\n\nclass OtterExpressandDiningCommons(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('food.html')\n\t\tself.response.write(template.render())\n\nclass OEandDCtoGym(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('foodtogym.html')\n\t\tself.response.write(template.render())\n\nclass OtterSportsComplex(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('gym.html')\n\t\tself.response.write(template.render())\n\nclass DiskGolf(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('diskgolf.html')\n\t\tself.response.write(template.render())\n\nclass GymtoUPD(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('gymtoupd.html')\n\t\tself.response.write(template.render())\n\nclass UniversityPoliceDepartment(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('upd.html')\n\t\tself.response.write(template.render())\n\nclass UPDtoHealthCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('healthcenter.html')\n\t\tself.response.write(template.render())\n\nclass HealthCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('health_center.html')\n\t\tself.response.write(template.render())\n\nclass HealthCentertoBusStop(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('health_center_to_bus_stop.html')\n\t\tself.response.write(template.render())\n\nclass BusStop(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('bus.html')\n\t\tself.response.write(template.render())\n\nclass BusToStudentCenter(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('bus_to_student_center.html')\n\t\tself.response.write(template.render())\n\nclass Congratulations(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('end_of_tour.html')\n\t\tself.response.write(template.render())\n\nclass Attractions(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('attractions.html')\n\t\tself.response.write(template.render())\n\nclass Highway(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('highway.html')\n\t\tself.response.write(template.render())\n\nclass Beach(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('beach.html')\n\t\tself.response.write(template.render())\n\nclass CanneryRow(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('cannery_row.html')\n\t\tself.response.write(template.render())\n\nclass Target(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('target.html')\n\t\tself.response.write(template.render())\n\nclass Wharf(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('wharf.html')\n\t\tself.response.write(template.render())\n\nclass NoodleBar(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('noodle_bar.html')\n\t\tself.response.write(template.render())\n\nclass DelMonte(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('del_monte.html')\n\t\tself.response.write(template.render())\n\nclass CarmelPlaza(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('carmel_plaza.html')\n\t\tself.response.write(template.render())\n\n\nclass SeventeenMileDrive(webapp2.RequestHandler):\n\tdef get(self):\n\t\ttemplate = jinja_environment.get_template('seventeen_mile_drive.html')\n\t\tself.response.write(template.render())\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/student_center', StudentCenter),\n ('/starbucks', Starbucks),\n ('/student_housing', ResidentialHousingAssociation),\n ('/otter_cycle_center', OtterCycleCenter),\n ('/oc3', OC3),\n ('/multicultural_greek_council', GreekLife),\n ('/associated_students', AssociatedStudents),\n ('/student_center_to_chapman', StudentCentertoChapman),\n ('/chapman_science_center', ChapmanScienceCenter),\n ('/chapman_to_world_language_center', ChapmanScienceCentertoWorldLanguageCenter),\n ('/world_language_center', WorldLanguageCenter),\n ('/world_language_center_to_student_services', WorldLanguageCentertoStudentServices),\n ('/student_services', StudentServices),\n ('/student_services_to_university_center', StudentServicestoUniversityCenter),\n ('/university_center', UniversityCenter),\n ('/university_center_to_world_theater', UniversityCentertoWorldTheater),\n ('/world_theater', WorldTheater),\n ('/world_theater_to_library', WorldTheatertoLibrary),\n ('/library', Library),\n ('/peets', Peets),\n ('/cooperative_learning_center', CooperativeLearningCenter),\n ('/library_to_bit', LibrarytoBIT),\n ('/blue_pole', BluePole),\n ('/business_information_and_technology', BusinessInformationandTechnology),\n ('/game_research_lab', GameResearchLab),\n ('/bit_to_manzanita', BITtoManzanita),\n ('/manzanita', Manzanita),\n ('/manzanita_to_oe_and_dc', ManzanitatoOEandDC),\n ('/otter_express_and_dining_commons', OtterExpressandDiningCommons),\n ('/oe_and_dc_to_gym', OEandDCtoGym),\n ('/otter_sports_complex', OtterSportsComplex),\n ('/disk_golf', DiskGolf),\n ('/gym_to_upd', GymtoUPD),\n ('/university_police_department', UniversityPoliceDepartment),\n ('/upd_to_health_center', UPDtoHealthCenter),\n ('/health_center', HealthCenter),\n ('/health_center_to_bus_stop', HealthCentertoBusStop),\n ('/bus_stop', BusStop),\n ('/bus_to_student_center', BusToStudentCenter),\n ('/end_of_tour', Congratulations),\n ('/attractions', Attractions),\n ('/highway', Highway),\n ('/beach', Beach),\n ('/cannery_row', CanneryRow),\n ('/target', Target),\n ('/wharf', Wharf),\n ('/noodle_bar', NoodleBar),\n ('/del_monte', DelMonte),\n ('/carmel_plaza', CarmelPlaza),\n ('/seventeen_mile_drive', SeventeenMileDrive),\n ('/montes', Montes)\n\n], debug=True)","sub_path":"HackathonFall2016-Google-App-Engine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"185345693","text":"import tensorflow as tf\r\n\r\nW = tf.Variable(tf.random_normal([1]), name = 'weight')#Variable방식으로 노드를 생성 Variable은 일반 변수와는 달리 tensorflow가 이용하는 변수 그리고 무작위의 값을 준다.\r\nb = tf.Variable(tf.random_normal([1]), name = 'bias')#Variable방식으로 노드를 생성 Variable은 일반 변수와는 달리 tensorflow가 이용하는 변수 그리고 무작위의 값을 준다.\r\nX = tf.placeholder(tf.float32, shape = [None])#X데이터의 값을 placeholder방식으로 준다. placeholder는 실행시에 값을 전달해준다.\r\nY = tf.placeholder(tf.float32, shape = [None])#Y데이터의 값을 placeholder방식으로 준다. placeholder는 실행시에 값을 전달해준다.\r\n\r\nhypothesis = X * W + b#가설의 값을 X * W + b로 한다.\r\n\r\ncost = tf.reduce_mean(tf.square(hypothesis - Y))#cost function의 값을 수식에 맞게 설정한다.\r\n\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\r\ntrain = optimizer.minimize(cost)\r\n\r\nsess = tf.Session()#세션을 생성한다.\r\n\r\nsess.run(tf.global_variables_initializer())#Variable을 사용해 실행하기 전에는 반드시 global_variables_initializer를 실행해줘야 한다.\r\n\r\nfor step in range(2001):#2000번 실행한다.\r\n cost_val, W_val, b_val, _ = sess.run([cost,W,b,train],feed_dict = {X:[1,2,3,4,5], Y:[2.1,3.1,4.1,5.1,6.1]})#X와Y에 해당 값을 주며 실행한다.\r\n if (step % 20 == 0):\r\n print(step, cost_val, W_val, b_val)\r\nprint(sess.run(hypothesis, feed_dict = {X:[2.5]}))#학습이 올바르게 진행되었는지 실험하기 위해 X에 2.5라는 임의의 값을 넣는다. 대략 6.1정도가 나온다.","sub_path":"Ch.3/Example2.py","file_name":"Example2.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"626705406","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom datetime import datetime, timedelta\n\nclass Stock_picking_inherited(models.Model):\n _inherit = 'stock.picking'\n \n tipo_material = fields.Selection(\n selection=[('epi_epc', 'EPIs/EPCs'), \n ('consumo_obra', 'Consumo de Obra'), \n ('material_aplicacao', 'Material de Aplicação'), \n ('ferramentas_equipamentos_producao', 'Ferramentas e Equipamentos de Produção'), \n ('equipamentos_escritorio_moveis_utensilios', 'Equipamentos de Escritório, Móveis e Utensílios'), \n ('material_expediente_limpeza_primeiros_socorros', 'Material de Expediente, Limpeza e Primeiros Socorros')],\n string='Tipo de Material',\n required=True,\n help='Informe o tipo de material solicitado')\n \n prioridade_solicitacao = fields.Selection(\n selection=[('normal', 'Normal'), \n ('urgente', 'Urgente'), \n ('emergencial', 'Emergencial')],\n string='Prioridade da Solicitação',\n help='Prioridade da solicitação com base no prazo para entrega')\n\n ordem_compra = fields.Many2one(\n comodel_name='purchase.order',\n string='Ordem de Compra',\n help='Ordem de compra solicitada a partir deste pedido')\n \n def on_change_data_programada(self, cr, user, ids, data_programada, context=None):\n prioridade = 'normal'\n \n data_pedido = datetime.now()\n \n if data_programada:\n data_prazo = datetime.strptime(data_programada, '%Y-%m-%d %H:%M:%S')\n \n prazo = abs((data_prazo - data_pedido).days)\n \n if prazo <= 1:\n prioridade = 'emergencial'\n if prazo > 1 and prazo < 5:\n prioridade = 'urgente'\n if prazo >= 5:\n prioridade = 'normal'\n \n res = {\n 'value': {\n 'prioridade_solicitacao': prioridade,\n 'origin': prazo\n }\n }\n # Return the values to update it in the view.\n return res\n else:\n res = {\n 'value': {\n 'prioridade_solicitacao': '',\n 'origin': 0\n }\n }\n # Return the values to update it in the view.\n return res\n \n def solicitar_pedido_compra(self, cr, uid, ids, context=None):\n from datetime import datetime\n obj_pedido = self.pool.get('stock.picking').browse(cr, uid, ids[0])\n\n valores = {\n 'partner_id': 106,\n 'date_order': obj_pedido.min_date,\n 'picking_type_id': 1,\n 'x_tipo_pedido': 'comp',\n 'x_regularizacao': '0',\n 'x_centro_custo': obj_pedido.x_centro_custo.id,\n 'location_id': 12,\n 'invoice_method': 'order',\n 'fiscal_category_id': 8,\n 'pricelist_id': 2,\n 'state': 'draft'\n }\n\n obj_pedido_compra = self.pool.get('purchase.order').create(cr, uid, valores, context)\n obj_pedido.write({'ordem_compra': obj_pedido_compra})\n\n for item in obj_pedido.move_lines:\n valores_item = {\n 'product_id': item.product_id.id,\n 'price_unit': item.product_id.standard_price,\n 'product_qty': item.product_uom_qty,\n 'product_uom': item.product_uom.id,\n 'order_id': obj_pedido_compra,\n 'name': item.name,\n 'fiscal_category_id': 8,\n 'fiscal_position': 1,\n 'date_planned': item.date_expected\n }\n\n obj_item_pedido_compra = self.pool.get('purchase.order.line').create(cr, uid, valores_item, context)\n\n\n return {\n 'type': 'ir.actions.client',\n 'tag': 'reload',\n }\n","sub_path":"almoxarifado_contrax/models/stock_picking_inherited.py","file_name":"stock_picking_inherited.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"54193707","text":"def pizza_menu():\n print(\"WELCOME TO PIZZA SECTION!!!! hope you enjoy\")\n\n menu=[[1,'A',210,250,280],\n [2,'B',260,300,1000],\n [3,'C',560,340,500],\n [4,'D',450,550,650]]\n print('WHIC PIZZA DO YOU WANT TO ORDER')\n print('-----------------------------------------------------')\n print('sl.no pizza ')\n print('-----------------------------------------------------')\n for i in menu:\n print()\n print(i[0],' ',i[1])\n print(' small medium large ')\n print(' ',i[2],' ',i[3],' ',i[4])\n print('---------------------------------------------------------')\n\n a=True\n t=[]\n l=[]\n c=input('Wich categaroy of pizza do you want')\n while c:\n print(t,\"t\")\n print(l,'l')\n if c=='1':\n print()\n print('For Samll pizza press S')\n print('For Medium pizza press M')\n print('For Large pizza press L')\n print()\n t.append('A')\n size=input('Which size of piza do you want')\n if size=='s':\n t.append('SMALL')\n n=int(input('How many pizza do you want'))\n t.append(n)\n t.append(n*210)\n l.append(t)\n elif size=='m':\n t.append('medium')\n n=int(input('How many pizza do you want'))\n t.append(n)\n t.append(n*250)\n l.append(t)\n elif size=='l':\n t.append('LARGE')\n n=int(input('How many pizza do you want'))\n t.append(n)\n t.append(n*300)\n l.append(t)\n print(t)\n else:\n c=input(\"INVALID OPTION DO YOU WISH TO CONTINUE IF YES PRESS Y\")\n if c!='y':\n print(\"Thank you for vsiting\")\n a=False\n break\n else:\n pizza_menu()\n a=False\n return l\n\n\n\na = True\nwhile a:\n name = input(\"Enter your Name:\").title()\n if name == '':\n print(\"Invalid Name\")\n c = input(\"Do you want to continue: If YES press Y:\").lower()[0]\n if c!='y':\n print(\"Thank you for visiting\")\n a=False\n break\n else:\n continue\n a=False\n\n print()\n print(\"Dominos\".center(80,'*'))\n print()\n print(\"Welcome Mr/Mrs.\",name,\"To Dominoas\")\n print(\"\".center(80,\"*\"))\n print()\n\n first_view=[[1,'pizza'],\n [2,'Drinks'],\n [3,'Sandwich'],\n [4,'Dessert']]\n print(\"WHICH CATEGAROY DO YOU WANT TO CHOOSE\")\n print()\n for i in first_view:\n print(i[0],' ',i[1])\n\n amount=0\n bill1=[]\n bill=0\n c=input(\"Enter the card serial number\")\n print()\n if c.isdecimal:\n if c=='1':\n z=True\n while z:\n bill=pizza_menu()\n bill1.append(bill[0])\n print('****************',bill,'****************')\n\n if input('DO YOU WANT TO ORDER MORE PIZZA IF YES PRESS Y')!='y':\n for i in bill:\n amount+=int(i[-1])\n z=False\n print(\"The Total Amount is : \",amount)\n\n\n\n","sub_path":"project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"618483718","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport sys\nimport warnings\nimport tools\nimport random\nimport sympy as sym\nimport numpy as np\nimport matplotlib as mpl\nimport scipy as sp\nimport scipy.stats as stats\nimport pylab as plt\nplt.rc('font', family='DejaVu Sans')\nx, y, z = sym.symbols('x,y,z')\nfrom html_tools import html_image\nfrom table import Table\nfrom chi2TestOfHomogeneityData import Chi2TestOfHomogeneityData\n\nclass Chi2TestOfHomogeneity(object):\n \n def __init__(self, seed = None, path = \"chi2toh\"):\n self.cache = set() #Just for names this time.\n if seed is not None:\n random.seed(seed) # Get predictable random behavior:)\n np.random.seed(seed)\n self.hist = \"\"\n self.solution_plot = \"\"\n self.path = path\n \n \n def stem(self, context = None, q_type = None, a_type = 'preview', \n force = False):\n \"\"\"\n This will generate a problem for $\\chi^2$ test of homogeneity.\n \n Parameters:\n ----------\n context : Chi2TestOfHomogeneity object\n This describes the problem. A default context is used if this is \n none.\n q_type : string [None, 'STAT', 'HT', 'CI'] \n If None randomly choose. If 'STAT' just compute the chi2 statistic \n and the degrees of freedom. If 'HT, compute the p-value for the \n data and determine whether or not reject the null hypothesis. If \n 'CI' compute the confidence interval.\n force : Boolean\n If true force egineration of images.\n a_type : string\n This is eithe \"MC\" or \"preview\" for now\n \"\"\"\n \n kwargs = {\n 'context': context,\n 'q_type': q_type,\n 'a_type': a_type,\n 'force': force\n \n }\n \n if q_type is None:\n q_type = random.choice(['STAT', 'HT', 'PVAL'])\n \n\n if context == None:\n context = Chi2TestOfHomogeneityData()\n \n if not context.is_valid:\n warnings.warn(\"Context had invalid cell counts.\")\n return \n \n # Generate unique name\n q_name = hash(context)\n self.cache.add(q_name)\n self.hist = str(q_name) + \".png\"\n self.solution_plot = str(q_name) + \"_sol.png\"\n \n style = None\n \n \n if a_type == 'preview':\n question_stem = \"

Question


\"\n else:\n question_stem = \"\"\n \n if fmt == 'html':\n question_stem += Table.get_style() \n \n question_stem += \"
\" + context.story + \"
\\n\" \n \n if fmt == 'html':\n tbl = context.observed_table.html()\n else:\n tbl = context.observed_table.latex()\n \n question_stem += \"\\n\" + tbl + \"\\n\"\n \n num_rows = len(context.rows)\n num_cols = len(context.cols)\n df = context.df\n \n chi2eq = \"$$\\\\chi^2_{%s}=\\\\sum_{i=1}^{%s}\\\\sum_{j=1}^{%s}\\\n \\\\frac{(O_{i,j}-E_{i,j})^2}{E_{i,j}}\\\n = %.3g$$\" % (df, num_rows, num_cols, context.chi2_stat)\n \n if q_type == 'STAT':\n question_stem += \"Compute the $_\\\\chi^2$_-statistic and degrees \\\n of freedom for the given observations.\"\n\n elif q_type == 'PVAL':\n \n question_stem += \"\"\"The $_\\\\chi^2$_ statistic is \n {chi2eq}\n \n \n Use this information to find the degrees of freedom (df) and the \n $_p\\\\text{{-value}} = P(\\\\chi^2_{{{df}}} > {chi2:.3g})$_.\n \"\"\".format(chi2eq=chi2eq, df = 'df', chi2=context.chi2_stat)\n \n elif q_type == 'HT':\n \n question_stem += \"\"\"The degrees of freedom are $_df = \n ({num_cols} -1)({num_rows} -1) = \n {df}$_ and the $_\\\\chi^2$_ statistic is {chi2eq} \n \n Use this information to conduct a hypothesis test with \n $_\\\\alpha = {a_level}$_. Choose the answer that best captures\n the null hypothesis and conclusion.\n \"\"\".format(num_cols = num_cols, num_rows = num_rows, df = df, \n chi2eq = chi2eq,\n a_level = context.a_level)\n \n \n if fmt == 'html':\n explanation = Table.get_style()\n else:\n explanation = \"\"\n \n if a_type == 'preview':\n explanation += \"

Explanation


\"\n \n if fmt == 'html':\n tbl1 = context.obs_marg_table.html()\n tbl2 = context.expected_table.html()\n else:\n tbl1 = context.obs_marg_table.latex()\n tbl2 = context.expected_table.latex()\n \n explanation += \"\"\"
To find the expected counts assuming\n homogeneity of distributions, ffirst find the column totals and\n divide by the size of the sample. This provides the overall \n distribution. Let $$p_j = \\\\frac{(\\\\text{sum of column }i)}{N},$$\n where $_N$_ is the total size of the population.
\n \n
To compute the expected count for the \n $_(i,j)^{\\\\text{th}}$_  cell, multiply the \n the sum of the observed values in the $_i^{\\\\text{th}}$_  \n row by $_p_i$_. This gives $_E_{i,j}$_.
\n \n
These two steps can be combined to give:\n $$E_{i,j} = \\\\frac{(\\\\text{sum of row }i)\\\\cdot\n (\\\\text{sum of column }j)}{N}.$$\n Notice that this is exactly the same computation as for a\n $_\\\\chi^2$_-test of independence.
\n \n
The expected counts are shown in the following \n table:

\n \"\"\"\n explanation += tbl2\n\n explanation += \"Recall that the observed counts are:

\"\n explanation += tbl1\n explanation += \"
\"\n \n explanation += \"\"\"\n
The degrees of freedom are $_df = \n ({num_cols} - 1)({num_rows} - 1) = \\\n {df}$_ and the $_\\\\chi^2$_-statistic is:{chi2eq}
\n \"\"\".format(num_cols = num_cols, num_rows = num_rows, \n df = df, chi2eq = chi2eq)\n \n if q_type in ['HT','PVAL']:\n \n rv = stats.chi2(df)\n p_val = 1 - rv.cdf(context.chi2_stat)\n \n fname = context.show(path = self.path, force = force)\n \n \n \n img = html_image(fname, width = '300px', \n preview = (a_type == 'preview'))\n \n caption = \"\"\"\n Lightshading (right of the red line) indicates the \\p-value.\n
\n The darker shading indicate the $_\\\\alpha = $_ {a_level:.0%} \n level.
\n The background histogram is a bootstrap sampling distribution.\n \"\"\".format(a_level = context.a_level)\n \n explanation +=\"\"\"\n The p-value for this data is:\n $$\\\\text{p-value} = P(\\\\chi^2_{%s} > %.3g) = %.4g%s$$\n
\n
\n
\n
\n %s\n
%s
\n
\n
\n
\n
\n \"\"\" % (df, context.chi2_stat, p_val * 100, '\\\\%', img, caption)\n \n \n if q_type == 'HT':\n \n if p_val < context.a_level:\n \n explanation += \"\"\"\n
The p-value is less than the $_\\\\alpha$_-level so\n the null hypothesis:\n
\n H0: {null} \n
\n is rejected. That is, we accept the alternative hypothesis:\n
\n Ha: {alt} \n
\n Precisely, assuming the null hypothesis, there\n is only a {p_val:.2%} probability due to \n random chance in sampling that\n the difference in the expected and observed data is \n least this large.
\n \"\"\".format(null=context.null, alt=context.alternative,\n p_val=p_val)\n else:\n explanation += \"\"\"\n
The p-value is greater than the $_\\\\alpha$_-level so\n the null hypothesis:\n
\n H0: {null}\n
\n is not rejected. Precisely, assuming the null hypothesis\n there is a {p_val:.2%} probability due to \n random chance in sampling that\n the difference in the expected and observed data is at \n least this large.
\n \"\"\".format(null=context.null, p_val=p_val)\n \n if context.note is not None:\n explanation += \"\"\"\n
Note: {note}
\n \"\"\".format(note=context.note)\n \n \n errors = self.gen_errors(q_type, context)\n \n \n if a_type == 'preview':\n\n errs = [[er] for er in errors]\n\n choices = \"\\n

Choices


\\n\"\n\n tb = Table(errs, row_headers = ['Answer'] + ['Distractor']*4)\n \n \n tbl = tb.html()\n \n choices += Table.get_style() + \"\\n\" + tbl \n\n if fmt == 'html':\n return question_stem + choices + explanation\n else:\n return question_stem.replace(\"
\",\"

\") + choices + \\\n explanation.replace(\"
\",\"

\")\n \n return question_stem + choices + explanation \n \n elif a_type == 'MC':\n \n if fmt == 'latex':\n question_stem = question_stem.replace(\"
\",\"

\")\n explanation = explanation.replace(\"
\",\"

\")\n distractors = [err.replace(\"
\",\"

\") for err in errors]\n \n \n question_stem = ' '.join(question_stem.split())\n distractors = [' '.join(err.split()) for err in errors]\n explanation = ' '.join(explanation.split()) + \"\\n\"\n return tools.fully_formatted_question(question_stem, explanation, \n answer_choices=distractors)\n\n elif a_type == 'Match':\n pass\n else:\n pass \n \n \n \n def gen_errors(self, q_type, context):\n N = len(context.cols)\n M = len(context.rows)\n df = context.df\n observed = context.observed\n expected = context.expected[:-1,:-1]\n T = context.obs_marg[-1,-1]\n # A few potential errors\n # df = M*M instead of (N-1)(M-1)\n # use |O_ij - E_ij|/E_ij instead of (O_ij - E_ij)^2\n # use |O_i - E_i|/E_i instead of (O_ii - E_ij)^2 and df = NM\n # use (O_i - E_i)/O_i\n # use (O_i - E_i)/O_i and df = N\n # take sqrt of chi^2-stat\n \n errors = [(N*M, context.chi2_stat),\n (df, np.sum(np.abs(observed - expected) / expected)),\n (N*M, np.sum(np.abs(observed - expected) / expected)),\n (df, np.sum((observed - expected)**2 / observed)),\n (N*M, np.sum((observed - expected)**2 / observed)),\n (df, context.chi2_stat ** .5)]\n \n if q_type == 'STAT':\n \n def error_string0(df, chi2):\n ans = '(degrees of freedom) df = %s and the $_\\\\chi^2$_-test \\\n statistic = %.3g' % (df, chi2)\n return ans\n \n ans = error_string0(df, context.chi2_stat)\n errors = map(lambda x: error_string0(*x), errors)\n \n \n \n \n if q_type in ['PVAL']:\n \n def error_string1(df, chi2):\n rv = stats.chi2(df)\n p_val = 1 - rv.cdf(chi2)\n \n ans = '(degrees of freedom) df = %s and the p-value = %.3g'\\\n % (df, p_val)\n return ans\n \n ans = error_string1(context.df, context.chi2_stat)\n errors = map(lambda x: error_string1(*x), errors)\n \n if q_type == 'HT':\n \n def error_string2(a_level, correct, df, chi2): \n rv = stats.chi2(df)\n p_val = 1 - rv.cdf(chi2)\n \n if p_val < a_level and correct:\n ans = \"\"\"\n The p-value is {p_val:.2%} and this is less than the \n $_\\\\alpha$_-level of {a_level}. Therefore we reject the\n null hypothesis and find evidence in favor of the \n alternative hypothesis:
\n  H1: {alt}\n \"\"\".format(p_val = p_val, a_level = a_level, \n alt = context.alternative)\n \n elif p_val >= a_level and correct:\n ans = \"\"\"\n The p-value is {p_val:.2%} and this is greater than the \n $_\\\\alpha$_-level of {a_level}. Therefore we fail to \n reject the null hypothesis thus supporting the \n hypothesis:
\n  H0: {null}\n \"\"\".format(p_val = p_val, a_level = a_level, \n null = context.null)\n \n elif p_val < a_level and not correct:\n ans = \"\"\"\n The p-value is {p_val:.2%} and this is less than the \n $_\\\\alpha$_-level of {a_level}. Therefore we fail to \n reject the null hypothesis thus supporting the \n hypothesis:
\n  H0: {null}\n \"\"\".format(p_val = p_val, a_level = a_level, \n null = context.null)\n else:\n ans = \"\"\"\n The p-value is {p_val:.2%} and this is greater than the \n $_\\\\alpha$_-level of {a_level}. Therefore we reject the\n null hypothesis and find evidence in favor of the \n alternative hypothesis:
\n  H1: {alt}\n \"\"\".format(p_val = p_val, a_level = a_level, \n alt = context.alternative)\n \n \n return ans\n \n ans = error_string2(context.a_level, True, context.df, context.chi2_stat)\n errors = map(lambda x: error_string2(context.a_level, \n random.choice([True, False]), *x), errors) \n \n random.shuffle(errors)\n errors = [ans] + errors[0:4]\n \n return errors\n \n\n \n \n \nif __name__ == \"__main__\":\n \n a_type = 'preview'\n fmt = 'html'\n seed = 44\n \n def gen_ctx(seed = seed):\n \n \n # Default context \n ctx = Chi2TestOfHomogeneityData(seed = seed) \n \n # A non default context with a little randomness thrown into\n # the distributions.\n \n # Here is a second context\n \n cd_phone1 = [.2, .8]\n cd_phone2 = [.3, .7]\n cd_no_phone1 = [.4, .6]\n cd_no_phone2 = [.5, .5]\n \n ctx_phone_cd_args = {\n 'story':\"\"\"\n An online survey company puts out a poll asking people two questions. \n First, it asks if they buy physical CDs. Second, it asks whether they \n own a smartphone. The company wants to determine if buying physical \n CDs depends on owning a smartphone.\n \"\"\",\n 's_sizes':[random.randint(40, 100), random.randint(10, 50)],\n 'rows':['Smartphone', 'No smartphone'],\n 'cols':['CD', 'No CD'],\n 'row_dists':[random.choice([cd_phone1, cd_phone2]), \n random.choice([cd_no_phone1, cd_no_phone2])]\n }\n \n ctx_phone_cd = Chi2TestOfHomogeneityData(seed = seed, \n **ctx_phone_cd_args)\n \n # A default context where an initial set of observations is given\n # instead of the row distributions.\n Men = random.randint(35, 45)\n ctx_gender_math_args = {\n 'story':\"\"\"\n A survey was given to 85 students in a Basic Algebra course, \n with the following responses to the statement \"I enjoy math.\"\n \n Test whether the distributions of responses to the survey\n are the same between men and women.\n \"\"\",\n 'data':[[9,13,5,4,2],[12,18,11,6,5]], \n 'rows':['Men', 'Women'],\n 'cols':['Strongly Agree', 'Agree', 'Nuetral', 'Disagree', \n 'Strongly Disagree'],\n 's_sizes':[Men, 85 - Men]\n }\n\n ctx_gender_math = Chi2TestOfHomogeneityData(seed = seed,\n **ctx_gender_math_args)\n \n return [ctx, ctx_phone_cd, ctx_gender_math] # , ctx_phone_cd]\n\n \n prob = Chi2TestOfHomogeneity(seed = seed)\n \n pb = \"\"\n for q_type in ['STAT', 'PVAL', 'HT']:\n for c in gen_ctx():\n result = prob.stem(context = c, q_type = q_type,\n a_type = a_type)\n if result is not None:\n \n if a_type == 'preview':\n pb += '
'\n \n pb += result\n \n if a_type == 'preview':\n pb += '

'\n print(pb)","sub_path":"chi2TestOfHomogeneity.py","file_name":"chi2TestOfHomogeneity.py","file_ext":"py","file_size_in_byte":19265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"460248655","text":"import sys\n\nlines = []\nfor line in sys.stdin:\n lines.append(int(line.replace(\"\\n\", \"\")))\nlines.append(max(lines) + 3)\n\ndiff1 = 0\ndiff3 = 0\noutlet = 0\n\nwhile True:\n if (outlet + 1 in lines):\n diff1 += 1\n outlet += 1\n elif (outlet + 2 in lines):\n outlet += 1\n elif (outlet + 3 in lines):\n diff3 += 1\n outlet += 3\n else:\n break\n \n\nprint(str(diff1 * diff3))","sub_path":"AdventOfCode2020/10-AdapterArray/script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"513322414","text":"ar=100000\nsieve = [True] * ar\ndef mark(sieve, x):\n for i in range(x+x, len(sieve), x):\n sieve[i] = False\n\nfor x in range(2, int(len(sieve) ** 0.5) + 1):\n if sieve[x]: \n mark(sieve, x)\n \nlst=[]\nfor i in range(2,10000):\n if sieve[i]==True: lst.append(i)\t\ndef primes(x):\n n=0\n for b in lst:\n if x%b==0:\n n+=1\n if n==4:\n return 1\n return 0\n \nfor i in range(644,500000):\n if (primes(i) and primes(i+1) and primes(i+2) and primes(i+3) ):\n print (i)\n break\n","sub_path":"prob47.py","file_name":"prob47.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"23305172","text":"import math\n\nsolutions = []\n\nfor n in range(3,10000000):\n i = sum([math.factorial(int(d)) for d in list(str(n))])\n if i == n:\n print(n)\n solutions.append(n)\n\nprint(solutions)\nprint(\"Result:\", sum(solutions))","sub_path":"34.py","file_name":"34.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"597385722","text":"#! usr/bin/local python3\n\nimport argparse\nimport sys\nimport time\nimport boto3\n\n\nMAX_WAIT_TIME = 60\n\n\ndef terminate_instance(instance_id):\n ec2 = boto3.resource('ec2')\n instance = ec2.Instance(instance_id)\n response = instance.terminate()\n return response\n\n\ndef get_instances():\n \"\"\"\n Get all instances\n Return list of [(instanct_id, instance_type)]\n \"\"\"\n cpus = {'running': [], 'stopped': [], 'terminated': []}\n gpus = {'running': [], 'stopped': [], 'terminated': []}\n ec2 = boto3.client('ec2')\n response = ec2.describe_instances()\n for res in response['Reservations']:\n for instance in res['Instances']:\n k = instance['State']['Name']\n if instance['InstanceType'].startswith('p'):\n gpus[k].append((instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name']))\n elif instance['InstanceType'].startswith('m') or instance['InstanceType'].startswith('c'):\n cpus[k].append((instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name']))\n else:\n raise NotImplementedError('Unknown instance type in the deck. {}'\n .format(instance['InstanceId']))\n if len(gpus['running']) + len(gpus['stopped']) > 1 or\\\n len(cpus['running']) + len(cpus['stopped']) > 1:\n raise NotImplementedError('Multiple cpus or gpus in the queue, es no bueno.')\n\n return {'gpus': gpus, 'cpus': cpus}\n\n\ndef start_instance(instance_id, dryrun=False):\n ec2 = boto3.client('ec2')\n response = ec2.start_instances(InstanceIds=[instance_id], DryRun=dryrun)\n if wait_until(instance_id, 'running'):\n return response\n else:\n print(\"Instance still isn't started.\")\n sys.exit(1)\n\n\ndef stop_instance(instance_id, dryrun=False):\n ec2 = boto3.client('ec2')\n response = ec2.stop_instances(InstanceIds=[instance_id], DryRun=dryrun)\n if wait_until(instance_id, 'stopped'):\n return response\n else:\n print(\"Instance still isn't stopped.\")\n sys.exit(1)\n\n\ndef instance_status(instance_id, dryrun=False):\n ec2 = boto3.client('ec2')\n response = ec2.describe_instances(InstanceIds=[instance_id], DryRun=dryrun)\n return response['Reservations'][0]['Instances'][0]['State']['Name']\n\n\ndef wait_until(instance_id, status):\n counter = 0\n while counter < MAX_WAIT_TIME and instance_status(instance_id) != status:\n time.sleep(5)\n counter += 5\n\n if counter >= MAX_WAIT_TIME:\n return False\n else:\n time.sleep(15)\n return True\n\n\ndef get_instance_ip(instance_id, dryrun=False):\n ec2 = boto3.client('ec2')\n response = ec2.describe_instances(InstanceIds=[instance_id], DryRun=dryrun)\n return response['Reservations'][0]['Instances'][0]['PublicIpAddress']\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='AWS CLI utilities.')\n parser.add_argument('action', type=str,\n help='\"start\" or \"stop\" an instance')\n parser.add_argument('type', type=str,\n help='\"gpu\" or \"cpu\" instance type')\n parser.add_argument('--dryrun', action=\"store_true\",\n help='Enable a dryrun of the process.')\n args = parser.parse_args()\n instances = get_instances()\n if args.action == 'start':\n func = start_instance\n find_inst = 'stopped'\n opposite_inst = 'running'\n elif args.action == 'stop':\n func = stop_instance\n find_inst = 'running'\n else:\n print('Only actions are \"start\" or \"stop\".')\n sys.exit(1)\n if args.type == 'gpu':\n try:\n instance_id = instances['gpus'][find_inst][0][0]\n except IndexError:\n try:\n instance_id = instances['gpus'][opposite_inst][0][0]\n print(get_instance_ip(instance_id))\n sys.exit(0)\n except IndexError:\n print(\"No gpus available.\")\n sys.exit(1)\n elif args.type == 'cpu':\n try:\n instance_id = instances['cpus'][find_inst][0][0]\n except IndexError:\n print(\"No cpus available.\")\n sys.exit(1)\n else:\n raise NotImplementedError('Invalide instance type: \"gpu\" or \"cpu\".')\n\n func(instance_id, args.dryrun)\n if args.action == 'start':\n print(get_instance_ip(instance_id, args.dryrun))\n sys.exit(0)\n","sub_path":"aws_utils.py","file_name":"aws_utils.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"325247538","text":"#! python3\n# -*- coding: utf-8 -*-\n#search_cnae - Search a list of CNAES at CONCLA's website.\nfrom sys import argv\nfrom selenium import webdriver\narguments = argv[1]\n\nbrowser = webdriver.PhantomJS()\n\n\nwith open(arguments) as argument_file:\n\tfor i in argument_file.readlines():\n\t\tbrowser.get(('http://cnae.ibge.gov.br/busca-online-cnae.html?option=com_cnae&view=atividades&Itemid=6160&tipo=cnae&chave=' + i + '&versao_classe=7.0.0&versao_subclasse=9.1.0'))\n\t\tcoluna_1 = browser.find_element_by_css_selector('.sorting_1')\n\t\tcoluna_2 = browser.find_element_by_css_selector('.sorting_1+td')\n\t\t\n\t\tprint(coluna_1.text, coluna_2.text)\n\t\t\t\n\t\t\t\n\t\t\n\t\t","sub_path":"search_cnae.py","file_name":"search_cnae.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"590903251","text":"'''\r\nCreated on 21 mar. 2020\r\n\r\n@author: Sidney\r\n\r\n\r\nEscribir un programa que pida al usuario un número entero y\r\nmuestre por pantalla un triángulo rectángulo como el de más abajo,\r\nde altura el número introducido.\r\n\r\nhttp://aprendeconalf.es/python/ejercicios/bucles.html\r\n\r\n'''\r\n\r\nnum = abs(int(input(\"¿Introduce un número?: \")))\r\n\r\naste = \"*\"\r\n\r\nfor a in range(1,num+1):\r\n print(aste*a)","sub_path":"ejerciciosALF_bucles/main5.py","file_name":"main5.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"446283921","text":"# Hugo BERANGER - M2 MIAGE IA\n\n# using the knn algorithm and checking the 5 closest neighbours\nfrom sklearn.neighbors import KNeighborsClassifier\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport cv2\nimport time\nfrom skimage.feature import hog\nfrom sklearn.neighbors import NearestCentroid\n\n#!wget --no-check-certificate -r 'http://www.i3s.unice.fr/~sanabria/files/dataset.zip' -O dataset.zip\n#!unzip -qq dataset.zip -d /home/hugo/Documents/\n#!ls /home/hugo/Documents/dataset\n\n#!wget --no-check-certificate -r 'http://www.i3s.unice.fr/~sanabria/files/animals_dataset.zip' -O animals_dataset.zip\n#!unzip -qq animals_dataset.zip -d /home/hugo/Documents/\n#!ls /home/hugo/Documents/animals_dataset\n\ndataset_path = \"/home/hugo/Documents/dataset/\"\n\n#dataset_path = \"/home/hugo/Documents/animals_dataset/\"\n\nclasses = os.listdir(dataset_path)\n\n# array of images\nX = []\nfor class_name in classes:\n class_path = dataset_path + class_name\n for image_path in glob.glob(class_path + \"/*.jpg\"):\n X.append(image_path)\n\n# array of labels\ny = []\nfor image_path in X:\n y.append(image_path.split('/')[-2])\n\n# transform images name and labels into 0 and 1\nle = preprocessing.LabelEncoder()\nle.fit(y)\ny = le.transform(y)\n\n# split the dataset in 3 group : train, test and validation\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, stratify=y)\nX_test, X_val, y_test, y_val = train_test_split(\n X_test, y_test, test_size=0.5, stratify=y_test)\n\n# resize the images in order to make the task easier for the algorithm\n\n\ndef get_hog_features(X):\n X_hog = []\n for image_path in X:\n image = cv2.imread(image_path)\n image_resize = cv2.resize(image, (64, 64))\n fd = hog(image_resize, multichannel=True)\n X_hog.append(fd)\n return np.asarray(X_hog)\n\n\n# resizing\nX_train_image = get_hog_features(X_train)\nX_test_image = get_hog_features(X_test)\nX_val_image = get_hog_features(X_val)\n\n# knn\nknn = KNeighborsClassifier(n_neighbors=5)\nknn.fit(X_train_image, y_train)\n\n# checking the knn score, its actually pretty bad because we don't check enough neighbours given the size of the dataset\nprint(knn.score(X_test_image, y_test))\n\n# checking which k value is the best to use, only went to 100 cause I have a bad machine, the curve should go back down after a while because k checking too many neighbours isn't efficient\n\n\ndef check_results_different_k(from_k, to_k, X_train, X_val):\n scores = []\n k_values = []\n for k in range(from_k, to_k):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n scores.append(knn.score(X_val, y_val))\n k_values.append(k)\n plt.plot(k_values, scores)\n plt.show()\n\n\nstart_time = time.time()\ncheck_results_different_k(2, 20, X_train_image, X_val_image)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\nk_best_result = 15\nknn = KNeighborsClassifier(n_neighbors=k_best_result)\nknn.fit(X_train_image, y_train)\nprint(knn.score(X_test_image, y_test))\n\n# nearest centroind\nnc = NearestCentroid()\nnc.fit(X_train_image, y_train)\nnc.score(X_test_image, y_test)\n","sub_path":"computer_vision/TD2_6_knn_hog.py","file_name":"TD2_6_knn_hog.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"225948851","text":"#Huaizhe Xu\n\nfrom ReadFile import Read\nimport astropy.units as u\nimport numpy as np\n\n#define Component Mass function\ndef ComponentMass(filename, ptype):\n #call read function to get data from data file\n time, number_particles, data = Read(filename)\n #find total mass of any desired galaxy component\n index = np.where(data['type'] == ptype)\n # type mass data\n mass = data['m'][index] * 1e10 * u.solMass\n #round mass in 3 decimal places\n mass = np.around(mass, 3)\n return mass\n\nfilename1 = 'M31_000.txt'\nfilename2 = 'M33_000.txt'\nfilename3 = 'MW_000.txt'\n\n#Type1 = Dark Matter, and sum the total component mass for M31\ndark_matter_31 = np.sum(ComponentMass(filename1, ptype=1))\n#Type2 = Disk Stars\ndisk_stars_31 = np.sum(ComponentMass(filename1, ptype=2))\n#Type3 = Bulge Stars\nbulge_stars_31 = np.sum(ComponentMass(filename1, ptype=3))\n#add all mass together\ntotal_mass_31 = dark_matter_31 + disk_stars_31 + bulge_stars_31\n#baryon fraction\nf_bar_31 = disk_stars_31 / (dark_matter_31 + disk_stars_31)\n\n#Type1 = Dark Matter, and sum the total component mass for M33\ndark_matter_33 = np.sum(ComponentMass(filename2, ptype=1))\n#Type2 = Disk Stars\ndisk_stars_33 = np.sum(ComponentMass(filename2, ptype=2))\n#add all mass together\ntotal_mass_33 = dark_matter_33 + disk_stars_33\n#baryon fraction\nf_bar_33 = disk_stars_33 / (dark_matter_33 + disk_stars_33)\n\n#Type1 = Dark Matter, and sum the total component mass for Milky Way\ndark_matter_mw = np.sum(ComponentMass(filename3, ptype=1))\n#Type2 = Disk Stars\ndisk_stars_mw = np.sum(ComponentMass(filename3, ptype=2))\n#Type3 = Bulge Stars\nbulge_stars_mw = np.sum(ComponentMass(filename3, ptype=3))\n#add all mass together\ntotal_mass_mw = dark_matter_mw + disk_stars_mw + bulge_stars_mw\n#baryon fraction\nf_bar_mw = disk_stars_mw / (dark_matter_mw + disk_stars_mw)\n\n#calculate local group mass by adding MW, M31, M33\nmass_local = total_mass_mw + total_mass_31 + total_mass_33\n\nprint('Halo Mass for MW, M31, M33 are:', dark_matter_mw, dark_matter_31, dark_matter_33, '\\n')\nprint('Disk Mass for MW, M31, M33 are:', disk_stars_mw, disk_stars_31, disk_stars_33, '\\n')\nprint('Bulge Mass for MW, M31:', bulge_stars_mw, bulge_stars_31, '\\n')\nprint('Total Mass for MW, M31, M33 are:', total_mass_mw, total_mass_31, total_mass_33, '\\n')\nprint('f_bar for MW, M31, M33 are:', f_bar_mw, f_bar_31, f_bar_33, '\\n')\nprint('local group mass is:', mass_local)\n\n\n\n","sub_path":"GalaxyMass/GalaxyMass.py","file_name":"GalaxyMass.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"438591883","text":"'''Adding more variables and therefore more complexity to your logistic regression model does not automatically result in more accurate models. \nIn this exercise you can verify whether adding 3 variables to a model leads to a more accurate model.\n\nvariables_1 and variables_2 are available in your environment: you can print them to the console to explore what they look like.'''\n#TASK\n# Fit the logreg model using variables_2 which contains 3 additional variables compared to variables_1.\n# Make predictions for this model.\n# Calculate the AUC of this model.\n\n# Create appropriate dataframes\nX_1 = basetable[variables_1]\nX_2 = basetable[variables_2]\ny = basetable[[\"target\"]]\n\n# Create the logistic regression model\nlogreg = linear_model.LogisticRegression()\n\n# Make predictions using the first set of variables and assign the AUC to auc_1\nlogreg.fit(X_1, y)\npredictions_1 = logreg.predict_proba(X_1)[:,1]\nauc_1 = roc_auc_score(y, predictions_1)\n\n# Make predictions using the second set of variables and assign the AUC to auc_2\nlogreg.____(____, ____)\npredictions_2 = ____.____(____)[____,____]\nauc_2 = ____(____, ____)\n\n# Print auc_1 and auc_2\nprint(round(auc_1,2))\nprint(round(auc_2,2))\n\n\n\n\n#SOLUTION\n# Consider two sets of variables\nvariables_1 = [\"mean_gift\",\"income_low\"]\nvariables_2 = [\"mean_gift\",\"income_low\",\"gender_F\",\"country_India\",\"age\"]\n\n# Create appropriate dataframes\nX_1 = basetable[variables_1]\nX_2 = basetable[variables_2]\ny = basetable[[\"target\"]]\n\n# Create the logistic regression model\nlogreg = linear_model.LogisticRegression()\n\n# Make predictions using the first set of variables and assign the AUC to auc_1\nlogreg.fit(X_1, y)\npredictions_1 = logreg.predict_proba(X_1)[:,1]\nauc_1 = roc_auc_score(y, predictions_1)\n\n# Make predictions using the second set of variables and assign the AUC to auc_2\nlogreg.fit(X_2, y)\npredictions_2 = logreg.predict_proba(X_2)[:,1]\nauc_2 = roc_auc_score(y, predictions_2)\n\n# Print auc_1 and auc_2\nprint(round(auc_1,2))\nprint(round(auc_2,2))","sub_path":"DataCamp_Foundations_of_Predictive_Analytics_in_Python(Part 1)/2.2.Using_different_sets_of_variables.py","file_name":"2.2.Using_different_sets_of_variables.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"303916476","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndifference = []\nfor j in range(0,13):\n if j <10:\n data = pd.read_csv('ALL000'+str(j)+'/F000'+str(j)+'CH3.CSV',names = ['time','voltage','that'])\n voltage = data['voltage']\n max = -2000\n min = 2000\n for i in voltage:\n if i >= max:\n max =i\n if i<= min:\n min = i\n elif j >=10:\n data = pd.read_csv('ALL00'+str(j)+'/F00'+str(j)+'CH3.CSV',names = ['time','voltage','that'])\n voltage = data['voltage']\n max = -2000\n min = 2000\n for i in voltage:\n if i >= max:\n max =i\n if i<= min:\n min = i\n print(max, min, abs(max)+abs(min))\n difference.append(abs(max) + abs(min))\n\nangles = [0,5,10,15,20,25,30,35,40,45,50,85,90]\ndifference = np.array(difference)\ndifference = difference / 3.91\nvoltage = np.array(voltage)\n\nplt.figure()\nplt.plot(angles,difference,'.-')\nplt.xlabel('Angle (degrees)')\nplt.ylabel('Intensity (V/W)')\nplt.grid(True)\nplt.savefig('FLvsAngle.pdf')\nplt.show()\n\n\n","sub_path":"MRPData/OscDataCont/oscdata.py","file_name":"oscdata.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"168258369","text":"# code based with https://k4keye.tistory.com/111\n\n'''\n0. 이전 동작 되돌리기 , 저장하기, 불러오기 추가\n1. 코드 클리닝\n'''\n\nimport pyautogui as m \nimport time \nimport sys \nimport os\nimport numpy as np\n\nChoice_arr = []\nMouse_L_X = [] \nMouse_L_Y =[]\nMouse_time = []\n\n# time.sleep() 설정\ntmp=1\n\ndef Mouse_in(): \n time_cnt = 3 \n while time_cnt: \n print(str(time_cnt)+ '초 후에 원하는 구역의 좌표를 저장합니다.') \n time_cnt -= 1 \n time.sleep(1) \n \n x,y =m.position() \n \n print('좌표 x : {} , y : {}'. format(x,y)) \n Mouse_L_X.append(int(x)) \n Mouse_L_Y.append(int(y)) \n \n Mouse_time.append(tmp)\n\ndef Mouse_out(M_time_cnt): \n i = Mouse_time[M_time_cnt] \n time_cnt =0 \n \n time.sleep(i) \n m.moveTo(Mouse_L_X[M_time_cnt],Mouse_L_Y[M_time_cnt])\n m.doubleClick() \n time_cnt+=1 \n time.sleep(1) \n return 0\n\ndef start(): \n while True: \n M_time_cnt = -1 \n \n for i in Choice_arr: \n # 마우스 이동이였다면\n if i == '1' : \n M_time_cnt += 1 \n Mouse_out(M_time_cnt) \n else: break\n break\n return \n\ndef main(): \n global Choice_arr\n global Mouse_L_X\n global Mouse_L_Y\n global Mouse_time\n \n\n while True: \n print(' ** 현재까지 {} 개의 동작 '.format(len(Choice_arr)))\n Choice = input(\"1.마우스 이동/클릭 2.시작 3.이전 동작 되돌리기 4. 저장하기 5. 불러오기 6. 종료 : \") \n print(Choice) \n \n\n Choice_arr.append(Choice)\n if Choice == '1' :\n Mouse_in() \n\n elif Choice == '2' : \n print( ' {} 개의 동작' .format(len(Mouse_L_X)))\n print(\"start!!\")\n start()\n continue\n\n # 이전 동작 되돌리기 \n elif Choice == '3':\n Mouse_L_X.pop()\n Mouse_L_Y.pop()\n Choice_arr.pop()\n\n # 현재까지의 history를 numpy로 저장합니다.\n elif Choice == '4':\n np.savez('history',Choice_arr,Mouse_L_X,Mouse_L_Y,Mouse_time)\n\n # 저장된 history를 로드하여 매크로를 실행합니다.\n elif Choice == '5':\n Choice_arr = np.load('history.npz')['arr_0']\n Mouse_L_X = np.load('history.npz')['arr_1']\n Mouse_L_Y = np.load('history.npz')['arr_2']\n Mouse_time = np.load('history.npz')['arr_3']\n print( '{} 개의 동작' .format(len(Mouse_L_X)))\n print(\"start!!\")\n start()\n\n elif Choice == '6':\n print('종료합니다.')\n break\n \n\n\n\nif __name__ == '__main__' : \n main()","sub_path":"resources/temp/macro.py","file_name":"macro.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"25190679","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the tests for the helper module.\"\"\"\n\nfrom typing import Dict, FrozenSet, Optional, cast\n\nfrom aea.helpers.dialogue.base import Dialogue as BaseDialogue\nfrom aea.helpers.dialogue.base import DialogueLabel\nfrom aea.helpers.dialogue.base import Dialogues as BaseDialogues\nfrom aea.mail.base import Address\nfrom aea.protocols.base import Message\nfrom aea.protocols.default.message import DefaultMessage\n\n\nclass Dialogue(BaseDialogue):\n\n INITIAL_PERFORMATIVES = frozenset({}) # type: FrozenSet[Message.Performative]\n TERMINAL_PERFORMATIVES = frozenset({}) # type: FrozenSet[Message.Performative]\n VALID_REPLIES = (\n {}\n ) # type: Dict[Message.Performative, FrozenSet[Message.Performative]]\n\n def __init__(\n self,\n dialogue_label: DialogueLabel,\n agent_address: Optional[Address] = None,\n role: Optional[BaseDialogue.Role] = None,\n ) -> None:\n \"\"\"\n Initialize a dialogue.\n\n :param dialogue_label: the identifier of the dialogue\n :param agent_address: the address of the agent for whom this dialogue is maintained\n :param role: the role of the agent this dialogue is maintained for\n :return: None\n \"\"\"\n BaseDialogue.__init__(\n self,\n dialogue_label=dialogue_label,\n agent_address=agent_address,\n role=role,\n rules=BaseDialogue.Rules(\n cast(FrozenSet[Message.Performative], self.INITIAL_PERFORMATIVES),\n cast(FrozenSet[Message.Performative], self.TERMINAL_PERFORMATIVES),\n cast(\n Dict[Message.Performative, FrozenSet[Message.Performative]],\n self.VALID_REPLIES,\n ),\n ),\n )\n\n def is_valid(self, message: Message) -> bool:\n \"\"\"\n Check whether 'message' is a valid next message in the dialogue.\n\n These rules capture specific constraints designed for dialogues which are instance of a concrete sub-class of this class.\n\n :param message: the message to be validated\n :return: True if valid, False otherwise.\n \"\"\"\n pass\n\n\nclass Dialogues(BaseDialogues):\n\n END_STATES = frozenset({}) # type: FrozenSet[BaseDialogue.EndState]\n\n def __init__(self, agent_address: Address) -> None:\n \"\"\"\n Initialize dialogues.\n\n :param agent_address: the address of the agent for whom dialogues are maintained\n :return: None\n \"\"\"\n BaseDialogues.__init__(\n self,\n agent_address=agent_address,\n end_states=cast(FrozenSet[BaseDialogue.EndState], self.END_STATES),\n )\n\n def create_dialogue(\n self, dialogue_label: DialogueLabel, role: Dialogue.Role,\n ) -> Dialogue:\n \"\"\"\n Create a dialogue instance.\n\n :param dialogue_label: the identifier of the dialogue\n :param role: the role of the agent this dialogue is maintained for\n\n :return: the created dialogue\n \"\"\"\n pass\n\n @staticmethod\n def role_from_first_message(message: Message) -> BaseDialogue.Role:\n \"\"\"\n Infer the role of the agent from an incoming or outgoing first message\n\n :param message: an incoming/outgoing first message\n :return: the agent's role\n \"\"\"\n pass\n\n\nclass TestDialogueBase:\n \"\"\"Test the dialogue/base.py.\"\"\"\n\n @classmethod\n def setup(cls):\n \"\"\"Initialise the class.\"\"\"\n cls.dialogue_label = DialogueLabel(\n dialogue_reference=(str(0), \"\"),\n dialogue_opponent_addr=\"opponent\",\n dialogue_starter_addr=\"starter\",\n )\n cls.dialogue = Dialogue(dialogue_label=cls.dialogue_label)\n cls.dialogues = Dialogues(\"address\")\n\n def test_dialogue_label(self):\n \"\"\"Test the dialogue_label.\"\"\"\n assert self.dialogue_label.dialogue_starter_reference == str(0)\n assert self.dialogue_label.dialogue_responder_reference == \"\"\n assert self.dialogue_label.dialogue_opponent_addr == \"opponent\"\n assert self.dialogue_label.dialogue_starter_addr == \"starter\"\n assert str(self.dialogue_label) == \"{}_{}_{}_{}\".format(\n self.dialogue_label.dialogue_starter_reference,\n self.dialogue_label.dialogue_responder_reference,\n self.dialogue_label.dialogue_opponent_addr,\n self.dialogue_label.dialogue_starter_addr,\n )\n\n dialogue_label2 = DialogueLabel(\n dialogue_reference=(str(0), \"\"),\n dialogue_opponent_addr=\"opponent\",\n dialogue_starter_addr=\"starter\",\n )\n\n assert dialogue_label2 == self.dialogue_label\n\n dialogue_label3 = \"This is a test\"\n\n assert not dialogue_label3 == self.dialogue_label\n\n assert hash(self.dialogue_label) == hash(self.dialogue.dialogue_label)\n\n assert self.dialogue_label.json == dict(\n dialogue_starter_reference=str(0),\n dialogue_responder_reference=\"\",\n dialogue_opponent_addr=\"opponent\",\n dialogue_starter_addr=\"starter\",\n )\n assert DialogueLabel.from_json(self.dialogue_label.json) == self.dialogue_label\n\n def test_dialogue(self):\n \"\"\"Test the dialogue.\"\"\"\n assert self.dialogue.is_self_initiated\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"Hello\",\n )\n msg.counterparty = \"my_agent\"\n assert self.dialogue.last_incoming_message is None\n assert self.dialogue.last_outgoing_message is None\n\n self.dialogue._outgoing_messages.extend([msg])\n assert self.dialogue.last_outgoing_message == msg\n\n self.dialogue._incoming_messages.extend([msg])\n assert self.dialogue.last_incoming_message == msg\n\n def test_dialogues(self):\n \"\"\"Test the dialogues.\"\"\"\n assert isinstance(self.dialogues.dialogues, Dict)\n id = self.dialogues._next_dialogue_nonce()\n assert id > 0\n","sub_path":"tests/test_helpers/test_dialogue/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"522017003","text":"#!/usr/bin/env python3\n\n# takes a PSMC' (msmc) output file as input and prints a simple list of the minimal parameters\n\nimport sys, argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"msmc\", help=\"path/prefix for input MSMC files\")\nargs = parser.parse_args()\n\n\nTCs = []\n\nwith open(args.msmc + '.final.txt','r') as infile:\n\t# skip the header\n\tfor line in infile:\n\t\tif not line.startswith('time'):\n\t\t\tMSMCparams = [float(x) for x in line.split()]\n\t\t\t# MSMCparams should be [index, mu t_left, mu t_right, 1/(2Ne(t) mu)]\n\t\t\t# we just want [mu t_left, 1/(2Ne(t) mu)]\n\t\t\tif TCs == [] or MSMCparams[-1] != TCs[-1][-1]: #there has been a pop size change from the previous time step\n\t\t\t\tTCs.append([MSMCparams[1], MSMCparams[-1]])\n\t\t\t\t\nprint('\\n'.join(' '.join(str(x) for x in timestep) for timestep in TCs))\n\n\n","sub_path":"msmc2math.py","file_name":"msmc2math.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"260650662","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport math\n\nclass SA(object):\n def __init__(self, furniture_size_info, room_size=[55, 40], T=-1, alpha=-1, stopping_T=-1, stopping_iter=-1):\n self.room_size = room_size\n self.furniture_size_info = furniture_size_info\n self.stopping_t = 1e-8 if stopping_T == -1 else stopping_T\n self.alpha = 0.995 if alpha == -1 else alpha\n self.stopping_iter = 1e5 if stopping_iter == -1 else stopping_iter\n self.T = math.sqrt(self.N) if T == -1 else T\n self.iteration = 1\n\n self.best_solution = None\n self.best_fitness = float(\"Inf\")\n self.fitness_list = []\n\n def init_solution(self):\n free_nodes = self.coords.copy()\n cur = free_nodes[random.choice([i for i in range(self.N)])]\n solution = [cur]\n free_nodes.remove(cur)\n while free_nodes:\n next_node = min(free_nodes, key=lambda x: self.dist(cur, x))\n solution.append(next_node)\n cur = next_node\n free_nodes.remove(cur)\n\n cur_fitness = self.fitness(solution)\n if cur_fitness < self.best_fitness:\n self.best_fitness = cur_fitness\n self.best_solution = solution\n return solution, cur_fitness\n\n def calculate_overlapping_area(self, coord1, coord2):\n # Use Monte-Carlo Simulation to calculate overlapping area\n def slope(x1, y1, x2, y2):\n return 1.0 * (y2 - y1) / (x2 - x1)\n def intercept(x1, y1, x2, y2):\n return 1.0 * (x1 * y2 - y1 * x2) / (x1 - x2)\n k1 = slope(coord1[0][0], coord1[0][1], coord1[1][0], coord1[1][1])\n k2 = slope(coord1[2][0], coord1[2][1], coord1[1][0], coord1[1][1])\n b1 = intercept(coord1[0][0], coord1[0][1], coord1[1][0], coord1[1][1])\n b2 = intercept(coord1[2][0], coord1[2][1], coord1[1][0], coord1[1][1])\n b3 = intercept(coord1[2][0], coord1[2][1], coord1[3][0], coord1[3][1])\n b4 = intercept(coord1[0][0], coord1[0][1], coord1[3][0], coord1[3][1])\n i = 0\n count = 0\n while i < 100000:\n x = random.uniform(0, self.room_size[0])\n y = random.uniform(0, self.room_size[1])\n if (k1 * x + b1 - y) * (k1 * x + b3 - y) < 0 and (k2 * x + b2 - y) * (k2 * x + b4 - y) < 0:\n count += 1\n return 1.0 * count / 100000 * 55 * 40\n\n def fitness(self, solution):\n distance = 0\n for i in range(self.N):\n distance += self.dist(solution[i], solution[(i + 1) % self.N])\n return distance\n\n def p_accept(self, candidate_fitness):\n prob = math.exp(-abs(candidate_fitness - self.cur_fitness) / self.T)\n return prob\n\n def accept(self, candidate):\n candidate_fitness = self.fitness(candidate)\n if candidate_fitness < self.cur_fitness:\n self.cur_fitness = candidate_fitness\n self.cur_solution = candidate\n if candidate_fitness < self.best_fitness:\n self.best_solution = candidate\n self.best_fitness = candidate_fitness\n else:\n if random.random() < self.p_accept(candidate_fitness):\n self.cur_fitness = candidate_fitness\n self.cur_solution = candidate\n\n def generate_neighbour(self, candidate):\n l = random.randint(2, self.N - 1)\n i = random.randint(0, self.N - l)\n candidate[i : i + l] = reversed(candidate[i : i + l])\n return candidate\n\n def simulated_annealing(self):\n self.cur_solution, self.cur_fitness = self.init_solution()\n print(\"Starting annealing\")\n while self.T > self.stopping_t and self.iteration < self.stopping_iter:\n candidate = list(self.cur_solution)\n n_candidate = self.generate_neighbour(candidate)\n self.accept(n_candidate)\n self.T *= self.alpha\n self.iteration += 1\n\n self.fitness_list.append(self.cur_fitness)\n\n print(\"Best fitness obtained: \", self.best_fitness)\n improvement = 100 * (self.fitness_list[0] - self.best_fitness) / (self.fitness_list[0])\n print(f\"Improvement over greedy heuristic: {improvement : .2f}%\")\n\n def plot_best_solution(self):\n x = []\n y = []\n for i in range(self.N):\n x.append(self.coords[i][0])\n y.append(self.coords[i][1])\n\n\n\n\n\n\n\n","sub_path":"Simulated_Annealing.py","file_name":"Simulated_Annealing.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"23296442","text":"##############################################################################\n# Advanced Forecasting Models with Python #\n# Non-Gaussian General Auto Regressive Conditional Heteroscedasticity Models #\n# (c) Diego Fernandez Garcia 2015-2018 #\n# www.exfinsis.com #\n##############################################################################\n\n# 1. Packages Importing\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.graphics.tsaplots as tsp\nimport statsmodels.tsa.statespace.sarimax as sarima\nimport statsmodels.stats.diagnostic as st\nimport statsmodels.stats.stattools as jb\nimport arch.univariate as arch\nimport statsmodels.tools.eval_measures as fa\n\n##########################################\n\n# 2. Advanced Forecasting Models Data\n\n# 2.1. Data Reading\nspy = pd.read_csv('Data//Advanced-Forecasting-Models-Data.txt', index_col='Date', parse_dates=True)\nspy = spy.asfreq('B')\nspy = spy.fillna(method='ffill')\nprint('')\nprint('== Data Ranges Length ==')\nprint('')\nprint('Full Range Days: ', len(spy))\nprint('Full Range Months: ', np.round(len(spy)/22, 4))\nprint('')\n\n# 2.2. Training Range Delimiting\nspyt = spy[:'2013-12-31']\nprint('Training Range Days: ', len(spyt))\nprint('Training Range Months: ', np.round(len(spyt)/22, 4))\nprint('')\n\n# 2.3. Testing Range Delimiting\nspyf = spy['2014-01-02':]\nprint('Testing Range Days: ', len(spyf))\nprint('Testing Range Months: ', np.round(len(spyf)/22, 4))\nprint('')\n\n# 2.4. Training and Testing Ranges Chart\nfig1, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nplt.legend(loc='upper left')\nplt.title('SPY 2007-2015')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 3. Auto Regressive Integrated Moving Average Models\n\n# 3.2. ARIMA(0,1,0) Model\n\n# 3.2.1. Multi-Steps Forecast\nrwdt1 = sarima.SARIMAX(spyt, order=(0, 1, 0), trend='c').fit(disp=-1)\nprint('')\nprint('== ARIMA(0,1,0) Model (spyt) ==')\nprint('')\nprint(rwdt1.summary())\nprint('')\nrwdf1 = rwdt1.forecast(steps=len(spyf))\nrwdf1 = pd.DataFrame(rwdf1).set_index(spyf.index)\n\nfig2, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(rwdf1, label='rwdf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 3.2.2. One-Step Forecast without Re-Estimation\nrwdf2 = sarima.SARIMAX(spy.tail(len(spyf)+1), order=(0, 1, 0), trend='c').smooth(params=rwdt1.params)\nrwdf2 = rwdf2.fittedvalues.tail(len(spyf))\nrwdf2 = pd.DataFrame(rwdf2).set_index(spyf.index)\n\nfig3, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(rwdf2, label='rwdf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 4. General Auto Regressive Conditional Heteroscedasticity Models\n\n# 4.1.3. Time Series Level Differencing\ndspy = spy-spy.shift(1)\ndspy = dspy.fillna(method='bfill')\ndspyt = dspy[:'2013-12-31']\ndspyf = dspy['2014-01-02':]\n\n# 4.6. ARIMA(0,1,0)-GJR-GARCH(1,1)\n\n# 4.6.1. Multi-Steps Forecast\nrwdtgarcht1 = arch.ConstantMean(y=dspyt, volatility=arch.GARCH(p=1, o=1, q=1),\n distribution=arch.Normal()).fit()\nprint('')\nprint('== ARIMA(0,1,0)-GJR-GARCH(1,1) Model (dspyt) ==')\nprint('')\nprint(rwdtgarcht1.summary())\nprint('')\nrwdtgarchf1 = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\nrwdtgarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [rwdtgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\nrwdtgarchf1 = rwdtgarchf1.sum(axis=1)\nrwdtgarchf1 = pd.DataFrame(rwdtgarchf1).set_index(spyf.index)\n\nfig6, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(rwdtgarchf1, label='rwdtgarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-GJR-GARCH(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 4.6.2. One-Step Forecast without Re-Estimation\nrwdtgarchf2 = spy.shift(1)['2014-01-02':]\nrwdtgarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [rwdtgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\nrwdtgarchf2 = rwdtgarchf2.sum(axis=1)\nrwdtgarchf2 = pd.DataFrame(rwdtgarchf2).set_index(spyf.index)\n\nfig7, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(rwdtgarchf2, label='rwdtgarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-GJR-GARCH(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 5. Non-Gaussian General Auto Regressive Conditional Heteroscedasticity Models\n\n# 5.1. Normality Tests\n\n# 5.1.1. Jarque-Bera Normality Test\nrwdtgarcht1sres = rwdtgarcht1.resid.dropna()/rwdtgarcht1.conditional_volatility\njbrwdtgarcht1 = jb.jarque_bera(rwdtgarcht1sres)\nprint('')\nprint('== Jarque-Bera Normality Tests (rwdtgarcht1sres) ==')\nprint('')\nprint('Jarque-Bera Test Chi-Squared P-Value (rwdtgarcht1sres): ', np.round(jbrwdtgarcht1[1], 4))\nprint('')\n\n##########################################\n\n# 5.2. ARIMA(0,1,0)-GARCH-t(1,1)\n\n# 5.2.1. Multi-Steps Forecast\ntrwdgarcht1 = arch.ConstantMean(y=dspyt, volatility=arch.GARCH(p=1, o=0, q=1, power=2.0),\n distribution=arch.StudentsT()).fit()\nprint('')\nprint('== ARIMA(0,1,0)-GARCH-t(1,1) Model (dspyt) ==')\nprint('')\nprint(trwdgarcht1.summary())\nprint('')\ntrwdgarchf1 = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntrwdgarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [trwdgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\ntrwdgarchf1 = trwdgarchf1.sum(axis=1)\ntrwdgarchf1 = pd.DataFrame(trwdgarchf1).set_index(spyf.index)\n\nfig8, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(trwdgarchf1, label='trwdgarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-GARCH-t(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 5.2.2. One-Step Forecast without Re-Estimation\ntrwdgarchf2 = spy.shift(1)['2014-01-02':]\ntrwdgarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [trwdgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\ntrwdgarchf2 = trwdgarchf2.sum(axis=1)\ntrwdgarchf2 = pd.DataFrame(trwdgarchf2).set_index(spyf.index)\n\nfig9, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(trwdgarchf2, label='trwdgarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-GARCH-t(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 5.3. ARIMA(1,1,0)-GARCH-t(1,1)\n\n# 5.3.1. Multi-Steps Forecast\ntdargarcht1 = arch.ARX(y=dspyt, volatility=arch.GARCH(p=1, o=0, q=1, power=2.0), lags=[1], constant=True,\n distribution=arch.StudentsT()).fit()\nprint('')\nprint('== ARIMA(1,1,0)-GARCH-t(1,1) Model (dspyt) ==')\nprint('')\nprint(tdargarcht1.summary())\nprint('')\ntdargarchf1 = pd.DataFrame(pd.concat([tdargarcht1.params[1]*dspyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntdargarchf1['SPY.LastAdj'] = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntdargarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [tdargarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\ntdargarchf1 = tdargarchf1.sum(axis=1)\ntdargarchf1 = pd.DataFrame(tdargarchf1).set_index(spyf.index)\n\nfig10, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(tdargarchf1, label='tdargarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(1,1,0)-GARCH-t(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 5.3.2. One-Step Forecast without Re-Estimation\ntdargarchf2 = dspy.shift(1)['2014-01-02':]\ntdargarchf2 = tdargarchf2*tdargarcht1.params[1]\ntdargarchf2['SPY.Adjusted(-1)'] = spy.shift(1)['2014-01-02':]\ntdargarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [tdargarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\ntdargarchf2 = tdargarchf2.sum(axis=1)\ntdargarchf2 = pd.DataFrame(tdargarchf2).set_index(spyf.index)\n\nfig11, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(tdargarchf2, label='tdargarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(1,1,0)-GARCH-t(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 5.4. ARIMA(0,1,0)-EGARCH-t(1,1)\n\n# 5.4.1. Multi-Steps Forecast\ntrwdegarcht1 = arch.ConstantMean(y=dspyt, volatility=arch.EGARCH(p=1, o=1, q=1),\n distribution=arch.StudentsT()).fit()\nprint('')\nprint('== ARIMA(0,1,0)-EGARCH-t(1,1) Model (dspyt) ==')\nprint('')\nprint(trwdegarcht1.summary())\nprint('')\ntrwdegarchf1 = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntrwdegarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [trwdegarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\ntrwdegarchf1 = trwdegarchf1.sum(axis=1)\ntrwdegarchf1 = pd.DataFrame(trwdegarchf1).set_index(spyf.index)\n\nfig12, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(trwdegarchf1, label='trwdegarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-EGARCH-t(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 5.4.2. One-Step Forecast without Re-Estimation\ntrwdegarchf2 = spy.shift(1)['2014-01-02':]\ntrwdegarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [trwdegarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\ntrwdegarchf2 = trwdegarchf2.sum(axis=1)\ntrwdegarchf2 = pd.DataFrame(trwdegarchf2).set_index(spyf.index)\n\nfig13, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(trwdegarchf2, label='trwdegarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-EGARCH-t(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 5.5. ARIMA(1,1,0)-EGARCH-t(1,1)\n\n# 5.5.1. Multi-Steps Forecast\ntdaregarcht1 = arch.ARX(y=dspyt, volatility=arch.EGARCH(p=1, o=1, q=1), lags=[1], constant=True,\n distribution=arch.StudentsT()).fit()\nprint('')\nprint('== ARIMA(1,1,0)-EGARCH-t(1,1) Model (dspyt) ==')\nprint('')\nprint(tdaregarcht1.summary())\nprint('')\ntdaregarchf1 = pd.DataFrame(pd.concat([tdaregarcht1.params[1]*dspyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntdaregarchf1['SPY.LastAdj'] = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntdaregarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [tdaregarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\ntdaregarchf1 = tdaregarchf1.sum(axis=1)\ntdaregarchf1 = pd.DataFrame(tdaregarchf1).set_index(spyf.index)\n\nfig14, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(tdaregarchf1, label='tdaregarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(1,1,0)-EGARCH-t(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 5.5.2. One-Step Forecast without Re-Estimation\ntdaregarchf2 = dspy.shift(1)['2014-01-02':]\ntdaregarchf2 = tdaregarchf2*tdaregarcht1.params[1]\ntdaregarchf2['SPY.Adjusted(-1)'] = spy.shift(1)['2014-01-02':]\ntdaregarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [tdaregarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\ntdaregarchf2 = tdaregarchf2.sum(axis=1)\ntdaregarchf2 = pd.DataFrame(tdaregarchf2).set_index(spyf.index)\n\nfig15, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(tdaregarchf2, label='tdaregarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(1,1,0)-EGARCH-t(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 5.6. ARIMA(0,1,0)-GJR-GARCH-t(1,1)\n\n# 5.6.1. Multi-Steps Forecast\ntrwdtgarcht1 = arch.ConstantMean(y=dspyt, volatility=arch.GARCH(p=1, o=1, q=1),\n distribution=arch.StudentsT()).fit()\nprint('')\nprint('== ARIMA(0,1,0)-GJR-GARCH-t(1,1) Model (dspyt) ==')\nprint('')\nprint(trwdtgarcht1.summary())\nprint('')\ntrwdtgarchf1 = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntrwdtgarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [trwdtgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\ntrwdtgarchf1 = trwdtgarchf1.sum(axis=1)\ntrwdtgarchf1 = pd.DataFrame(trwdtgarchf1).set_index(spyf.index)\n\nfig16, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(trwdtgarchf1, label='trwdtgarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-GJR-GARCH-t(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 5.6.2. One-Step Forecast without Re-Estimation\ntrwdtgarchf2 = spy.shift(1)['2014-01-02':]\ntrwdtgarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [trwdtgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\ntrwdtgarchf2 = trwdtgarchf2.sum(axis=1)\ntrwdtgarchf2 = pd.DataFrame(trwdtgarchf2).set_index(spyf.index)\n\nfig17, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(trwdtgarchf2, label='trwdtgarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(0,1,0)-GJR-GARCH-t(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n##########################################\n\n# 5.7. ARIMA(1,1,0)-GJR-GARCH-t(1,1)\n\n# 5.7.1. Multi-Steps Forecast\ntdartgarcht1 = arch.ARX(y=dspyt, volatility=arch.GARCH(p=1, o=1, q=1), lags=[1], constant=True,\n distribution=arch.StudentsT()).fit()\nprint('')\nprint('== ARIMA(1,1,0)-GJR-GARCH-t(1,1) Model (dspyt) ==')\nprint('')\nprint(tdartgarcht1.summary())\nprint('')\ntdartgarchf1 = pd.DataFrame(pd.concat([tdartgarcht1.params[1]*dspyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntdartgarchf1['SPY.LastAdj'] = pd.DataFrame(pd.concat([spyt.tail(1)]*len(spyf))).set_index(spyf.index)\ntdartgarchf1['SPY.CondDrift'] = pd.DataFrame(pd.concat(\n [tdartgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index).cumsum()\ntdartgarchf1 = tdartgarchf1.sum(axis=1)\ntdartgarchf1 = pd.DataFrame(tdartgarchf1).set_index(spyf.index)\n\nfig18, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(tdartgarchf1, label='tdartgarchf1')\nplt.legend(loc='upper left')\nplt.title('ARIMA(1,1,0)-GJR-GARCH-t(1,1) Model 1')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n# 5.7.2. One-Step Forecast without Re-Estimation\ntdartgarchf2 = dspy.shift(1)['2014-01-02':]\ntdartgarchf2 = tdartgarchf2*tdartgarcht1.params[1]\ntdartgarchf2['SPY.Adjusted(-1)'] = spy.shift(1)['2014-01-02':]\ntdartgarchf2['SPY.CondMean'] = pd.DataFrame(pd.concat(\n [tdartgarcht1.forecast().mean.tail(1)]*len(spyf))).set_index(spyf.index)\ntdartgarchf2 = tdartgarchf2.sum(axis=1)\ntdartgarchf2 = pd.DataFrame(tdartgarchf2).set_index(spyf.index)\n\nfig19, ax = plt.subplots()\nax.plot(spyt, label='spyt')\nax.plot(spyf, label='spyf')\nax.plot(tdartgarchf2, label='tdartgarchf2', linestyle=':')\nplt.legend(loc='upper left')\nplt.title('ARIMA(1,1,0)-GJR-GARCH-t(1,1) Model 2')\nplt.ylabel('Price')\nplt.xlabel('Date')\nplt.show()\n\n#########################################\n\n# 5.8. GARCH-t Model Selection\n\nrwdtgarchaict1 = rwdtgarcht1.aic\nrwdtgarchbict1 = rwdtgarcht1.bic\ntrwdgarchaict1 = trwdgarcht1.aic\ntrwdgarchbict1 = trwdgarcht1.bic\ntdargarchaict1 = tdargarcht1.aic\ntdargarchbict1 = tdargarcht1.bic\ntrwdegarchaict1 = trwdegarcht1.aic\ntrwdegarchbict1 = trwdegarcht1.bic\ntdaregarchaict1 = tdaregarcht1.aic\ntdaregarchbict1 = tdaregarcht1.bic\ntrwdtgarchaict1 = trwdtgarcht1.aic\ntrwdtgarchbict1 = trwdtgarcht1.bic\ntdartgarchaict1 = tdartgarcht1.aic\ntdartgarchbict1 = tdartgarcht1.bic\n\nmsdata = [{'0': '', '1': 'AIC', '2': 'BIC'},\n {'0': 'ARIMA(0,1,0)-GJR-GARCH(1,1) Model 1', '1': np.round(rwdtgarchaict1, 4),\n '2': np.round(rwdtgarchbict1, 4)},\n {'0': 'ARIMA(0,1,0)-GARCH-t(1,1) Model 1', '1': np.round(trwdgarchaict1, 4),\n '2': np.round(trwdgarchbict1, 4)},\n {'0': 'ARIMA(1,1,0)-GARCH-t(1,1) Model 1', '1': np.round(tdargarchaict1, 4),\n '2': np.round(tdargarchbict1, 4)},\n {'0': 'ARIMA(0,1,0)-EGARCH-t(1,1) Model 1', '1': np.round(trwdegarchaict1, 4),\n '2': np.round(trwdegarchbict1, 4)},\n {'0': 'ARIMA(1,1,0)-EGARCH-t(1,1) Model 1', '1': np.round(tdaregarchaict1, 4),\n '2': np.round(tdaregarchbict1, 4)},\n {'0': 'ARIMA(0,1,0)-GJR-GARCH-t(1,1) Model 1', '1': np.round(trwdtgarchaict1, 4),\n '2': np.round(trwdtgarchbict1, 4)},\n {'0': 'ARIMA(1,1,0)-GJR-GARCH-t(1,1) Model 1', '1': np.round(tdartgarchaict1, 4),\n '2': np.round(tdartgarchbict1, 4)},\n ]\nmstable = pd.DataFrame(msdata)\nprint('')\nprint('== GARCH-t Model Selection ==')\nprint('')\nprint(mstable)\nprint('')\n\n#########################################\n\n# 5.9. GARCH-t Models Forecasting Accuracy\n\n# 5.9.1. Multi-Steps Forecast\nrwdmae1 = fa.meanabs(rwdf1, spyf)\nrwdrmse1 = fa.rmse(rwdf1, spyf)\nrwdtgarchmae1 = fa.meanabs(rwdtgarchf1, spyf)\nrwdtgarchrmse1 = fa.rmse(rwdtgarchf1, spyf)\ntrwdgarchmae1 = fa.meanabs(trwdgarchf1, spyf)\ntrwdgarchrmse1 = fa.rmse(trwdgarchf1, spyf)\ntdargarchmae1 = fa.meanabs(tdargarchf1, spyf)\ntdargarchrmse1 = fa.rmse(tdargarchf1, spyf)\ntrwdegarchmae1 = fa.meanabs(trwdegarchf1, spyf)\ntrwdegarchrmse1 = fa.rmse(trwdegarchf1, spyf)\ntdaregarchmae1 = fa.meanabs(tdaregarchf1, spyf)\ntdaregarchrmse1 = fa.rmse(tdaregarchf1, spyf)\ntrwdtgarchmae1 = fa.meanabs(trwdtgarchf1, spyf)\ntrwdtgarchrmse1 = fa.rmse(trwdtgarchf1, spyf)\ntdartgarchmae1 = fa.meanabs(tdartgarchf1, spyf)\ntdartgarchrmse1 = fa.rmse(tdartgarchf1, spyf)\n\nfadata1 = [{'0': '', '1': 'MAE', '2': 'RMSE'},\n {'0': 'ARIMA(0,1,0) Model 1', '1': np.round(rwdmae1, 4),\n '2': np.round(rwdrmse1, 4)},\n {'0': 'ARIMA(0,1,0)-GJR-GARCH(1,1) Model 1', '1': np.round(rwdtgarchmae1, 4),\n '2': np.round(rwdtgarchrmse1, 4)},\n {'0': 'ARIMA(0,1,0)-GARCH-t(1,1) Model 1', '1': np.round(trwdgarchmae1, 4),\n '2': np.round(trwdgarchrmse1, 4)},\n {'0': 'ARIMA(1,1,0)-GARCH-t(1,1) Model 1', '1': np.round(tdargarchmae1, 4),\n '2': np.round(tdargarchrmse1, 4)},\n {'0': 'ARIMA(0,1,0)-EGARCH-t(1,1) Model 1', '1': np.round(trwdegarchmae1, 4),\n '2': np.round(trwdegarchrmse1, 4)},\n {'0': 'ARIMA(1,1,0)-EGARCH-t(1,1) Model 1', '1': np.round(tdaregarchmae1, 4),\n '2': np.round(tdaregarchrmse1, 4)},\n {'0': 'ARIMA(0,1,0)-GJR-GARCH-t(1,1) Model 1', '1': np.round(trwdtgarchmae1, 4),\n '2': np.round(trwdtgarchrmse1, 4)},\n {'0': 'ARIMA(1,1,0)-GJR-GARCH-t(1,1) Model 1', '1': np.round(tdartgarchmae1, 4),\n '2': np.round(tdartgarchrmse1, 4)},\n ]\nfatable1 = pd.DataFrame(fadata1)\nprint('')\nprint('== Multi-Steps Forecasting Accuracy ==')\nprint('')\nprint(fatable1)\nprint('')\n\n# 5.9.2. One-Step Forecast without Re-Estimation\nrwdmae2 = fa.meanabs(rwdf2, spyf)\nrwdrmse2 = fa.rmse(rwdf2, spyf)\nrwdtgarchmae2 = fa.meanabs(rwdtgarchf2, spyf)\nrwdtgarchrmse2 = fa.rmse(rwdtgarchf2, spyf)\ntrwdgarchmae2 = fa.meanabs(trwdgarchf2, spyf)\ntrwdgarchrmse2 = fa.rmse(trwdgarchf2, spyf)\ntdargarchmae2 = fa.meanabs(tdargarchf2, spyf)\ntdargarchrmse2 = fa.rmse(tdargarchf2, spyf)\ntrwdegarchmae2 = fa.meanabs(trwdegarchf2, spyf)\ntrwdegarchrmse2 = fa.rmse(trwdegarchf2, spyf)\ntdaregarchmae2 = fa.meanabs(tdaregarchf2, spyf)\ntdaregarchrmse2 = fa.rmse(tdaregarchf2, spyf)\ntrwdtgarchmae2 = fa.meanabs(trwdtgarchf2, spyf)\ntrwdtgarchrmse2 = fa.rmse(trwdtgarchf2, spyf)\ntdartgarchmae2 = fa.meanabs(tdartgarchf2, spyf)\ntdartgarchrmse2 = fa.rmse(tdartgarchf2, spyf)\n\nfadata2 = [{'0': '', '1': 'MAE', '2': 'RMSE'},\n {'0': 'ARIMA(0,1,0) Model 2', '1': np.round(rwdmae2, 4),\n '2': np.round(rwdrmse2, 4)},\n {'0': 'ARIMA(0,1,0)-GJR-GARCH(1,1) Model 2', '1': np.round(rwdtgarchmae2, 4),\n '2': np.round(rwdtgarchrmse2, 4)},\n {'0': 'ARIMA(0,1,0)-GARCH-t(1,1) Model 2', '1': np.round(trwdgarchmae2, 4),\n '2': np.round(trwdgarchrmse2, 4)},\n {'0': 'ARIMA(1,1,0)-GARCH-t(1,1) Model 2', '1': np.round(tdargarchmae2, 4),\n '2': np.round(tdargarchrmse2, 4)},\n {'0': 'ARIMA(0,1,0)-EGARCH-t(1,1) Model 2', '1': np.round(trwdegarchmae2, 4),\n '2': np.round(trwdegarchrmse2, 4)},\n {'0': 'ARIMA(1,1,0)-EGARCH-t(1,1) Model 2', '1': np.round(tdaregarchmae2, 4),\n '2': np.round(tdaregarchrmse2, 4)},\n {'0': 'ARIMA(0,1,0)-GJR-GARCH-t(1,1) Model 2', '1': np.round(trwdtgarchmae2, 4),\n '2': np.round(trwdtgarchrmse2, 4)},\n {'0': 'ARIMA(1,1,0)-GJR-GARCH-t(1,1) Model 2', '1': np.round(tdartgarchmae2, 4),\n '2': np.round(tdartgarchrmse2, 4)},\n ]\nfatable2 = pd.DataFrame(fadata2)\nprint('')\nprint('== One-Step without Re-Estimation Forecasting Accuracy ==')\nprint('')\nprint(fatable2)\nprint('')\n\n#########################################\n\n# 5.10. Residuals White Noise\n\ntrwdegarcht1sres = trwdegarcht1.resid.dropna()/trwdegarcht1.conditional_volatility\n\n# 5.10.1. Residuals No Auto-correlation\n\n# 5.10.1.1. Auto-correlation Functions ACF\ntsp.plot_acf(trwdegarcht1sres, lags=22, alpha=0.05)\nplt.title('Autocorrelation Function ACF (trwdegarcht1sres)')\nplt.show()\n\ntsp.plot_acf(rwdtgarcht1sres, lags=22, alpha=0.05)\nplt.title('Autocorrelation Function ACF (rwdtgarcht1sres)')\nplt.show()\n\n# 5.10.1.2. Ljung-Box Auto-correlation Tests\nlbtrwdegarcht1sres = st.acorr_ljungbox(trwdegarcht1sres, lags=22)\nlbrwdtgarcht1sres = st.acorr_ljungbox(rwdtgarcht1sres, lags=22)\nprint('')\nprint('== Ljung-Box Auto-Correlation Tests (trwdegarcht1sres, rwdtgarcht1sres) ==')\nprint('')\nprint('Ljung-Box Q Test Statistic P-Value (trwdegarcht1sres): ', np.round(lbtrwdegarcht1sres[1][21], 4))\nprint('Ljung-Box Q Test Statistic P-Value (rwdtgarcht1sres): ', np.round(lbrwdtgarcht1sres[1][21], 4))\nprint('')\n\n# 5.10.2. Residuals Homoscedasticity\n\n# 5.10.2.1. Auto Regressive Conditional Heteroscedasticity Tests\narchtrwdegarcht1sres = st.het_arch(trwdegarcht1sres)\narchrwdtgarcht1sres = st.het_arch(rwdtgarcht1sres)\nprint('')\nprint('== Auto Regressive Conditional Heteroscedasticity Tests (trwdegarcht1sres, rwdtgarcht1sres) ==')\nprint('')\nprint('ARCH Test Lagrange Multiplier P-Value (trwdegarcht1sres): ', np.round(archtrwdegarcht1sres[1], 4))\nprint('ARCH Test Lagrange Multiplier P-Value (rwdtgarcht1sres): ', np.round(archrwdtgarcht1sres[1], 4))\nprint('')\n\n# 5.10.3. Residuals Normality\n\n# 5.10.3.1. Jarque-Bera Normality Tests\njbtrwdegarcht1sres = jb.jarque_bera(trwdegarcht1sres)\njbrwdtgarcht1sres = jb.jarque_bera(rwdtgarcht1sres)\nprint('')\nprint('== Jarque-Bera Normality Tests (trwdegarcht1sres, rwdtgarcht1sres) ==')\nprint('')\nprint('Jarque-Bera Test Chi-Squared P-Value (trwdegarcht1sres): ', np.round(jbtrwdegarcht1sres[1], 4))\nprint('Jarque-Bera Test Chi-Squared P-Value (rwdtgarcht1sres): ', np.round(jbrwdtgarcht1sres[1], 4))\nprint('')","sub_path":"Non-Gaussian GARCH Models.py","file_name":"Non-Gaussian GARCH Models.py","file_ext":"py","file_size_in_byte":23014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"592233810","text":"\nimport random\nimport os\nimport math\nimport numpy as np\n\nfrom SimulationInterface_Component.Vrep.AVrepFunctions import AVrepFunctions\nfrom SimulationInterface_Component.PerlinNoise import make_array, make_img\n\nimport settings\n\ntry:\n import vrep\nexcept:\n print('--------------------------------------------------------------')\n print('\"vrep.py\" could not be imported. This means very probably that')\n print('either \"vrep.py\" or the remoteApi library could not be found.')\n print('Make sure both are in the same folder as this file,')\n print('or appropriately adjust the file \"vrep.py\"')\n print('--------------------------------------------------------------')\n print('')\n\nclass VrepVirtualEnvironment(AVrepFunctions):\n\n def __init__(self, config):\n AVrepFunctions.__init__(self)\n self.config = config\n\n projectRootPath = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]\n self.scenePathAndName = settings.sceneLoadPath\n\n # Hier festgelegte Positionswerte, aus vrep ausgelesen und müssen mit diesen übereinstimmen.\n # Wird ein Element verschoben, muss der Positionswert hier angepasst werden.\n # Bei aktivem LoadScene, werden ebenfalls diese Werte herangezogen.\n self.position_wall1 = settings.POSITION_WALL1\n self.position_wall2 = settings.POSITION_WALL2\n self.position_wall3 = settings.POSITION_WALL3\n self.position_target = settings.POSITION_TARGET\n self.position_robot = settings.POSITION_ROBOT\n self.position_object = settings.POSITION_OBJECT\n self.position_ghostObject = settings.POSITION_GHOST_OBJECT\n self.position_ghostGripper = settings.POSITION_GHOST_GRIPPER\n self.position_camera = settings.POSITION_CAMERA\n self.position_rgb = settings.POSITION_RGB\n self.position_depth = settings.POSITION_DEPTH\n self.position_light = settings.POSITION_LIGHT\n self.position_table = settings.POSITION_TABLE\n self.position_obstacles = settings.POSITION_OBSTACLES\n\n self.orientation_wall1 = settings.ORIENTATION_WALL1\n self.orientation_wall2 = settings.ORIENTATION_WALL2\n self.orientation_wall3 = settings.ORIENTATION_WALL3\n self.orientation_target = settings.ORIENTATION_TARGET\n self.orientation_robot = settings.ORIENTATION_ROBOT\n self.orientation_object = settings.ORIENTATION_OBJECT\n self.orientation_ghostObject = settings.ORIENTATION_GHOST_OBJECT\n self.orientation_ghostGripper = settings.ORIENTATION_GHOST_GRIPPER\n self.orientation_camera = settings.ORIENTATION_CAMERA\n self.orientation_rgb = settings.ORIENTATION_RGB\n self.orientation_depth = settings.ORIENTATION_DEPTH\n self.orientation_light = settings.ORIENTATION_LIGHT\n self.orientation_table = settings.ORIENTATION_TABLE\n self.orientation_obstacles = settings.ORIENTATION_OBSTACLE\n\n self.handle_wall1 = 0\n self.handle_wall2 = 0\n self.handle_wall3 = 0\n self.handle_target = 0\n self.handle_robot = 0\n self.handle_object = 0\n self.handle_ghostObject = 0\n self.handle_ghostGripper = 0\n self.handle_camera = 0\n self.handle_rgb = 0\n self.handle_depth = 0\n self.handle_light = 0\n self.handle_table = 0\n self.handle_obstacles = [0, 0]\n\n self.handles_all = {settings.TARGET_LABEL: self.handle_target,\n settings.WALL1_LABEL: self.handle_wall1,\n settings.WALL2_LABEL: self.handle_wall2,\n settings.WALL3_LABEL: self.handle_wall3,\n settings.ROBOT_LABEL: self.handle_robot,\n settings.OBJECT_LABEL: self.handle_object,\n settings.GHOST_OBJECT_LABEL: self.handle_ghostObject,\n settings.GHOST_GRIPPER_LABEL: self.handle_ghostGripper,\n settings.CAMERA_LABEL: self.handle_camera,\n settings.RGB_LABEL: self.handle_rgb,\n settings.DEPTH_LABEL: self.handle_depth,\n settings.LIGHT_LABEL: self.handle_light,\n settings.TABLE_LABLE: self.handle_table,\n settings.OBSTACLE_LABEL: self.handle_obstacles}\n\n self.position_all = {settings.TARGET_LABEL: self.position_target,\n settings.WALL1_LABEL: self.position_wall1,\n settings.WALL2_LABEL: self.position_wall2,\n settings.WALL3_LABEL: self.position_wall3,\n settings.ROBOT_LABEL: self.position_robot,\n settings.OBJECT_LABEL: self.position_object,\n settings.GHOST_OBJECT_LABEL: self.position_ghostObject,\n settings.GHOST_GRIPPER_LABEL: self.position_ghostGripper,\n settings.CAMERA_LABEL: self.position_camera,\n settings.RGB_LABEL: self.position_rgb,\n settings.DEPTH_LABEL: self.position_depth,\n settings.LIGHT_LABEL: self.position_light,\n settings.TABLE_LABLE: self.position_table,\n settings.OBSTACLE_LABEL: self.position_obstacles}\n\n self.orientation_all = {settings.TARGET_LABEL: self.orientation_target,\n settings.WALL1_LABEL:self.orientation_wall1,\n settings.WALL2_LABEL:self.orientation_wall2,\n settings.WALL3_LABEL:self.orientation_wall3,\n settings.ROBOT_LABEL: self.orientation_robot,\n settings.OBJECT_LABEL: self.orientation_object,\n settings.GHOST_OBJECT_LABEL: self.orientation_ghostObject,\n settings.GHOST_GRIPPER_LABEL: self.orientation_ghostGripper,\n settings.CAMERA_LABEL: self.orientation_camera,\n settings.RGB_LABEL: self.orientation_rgb,\n settings.DEPTH_LABEL: self.orientation_depth,\n settings.LIGHT_LABEL: self.orientation_light,\n settings.TABLE_LABLE: self.orientation_table,\n settings.OBSTACLE_LABEL: self.orientation_obstacles}\n\n self.vrep_name_all = {settings.TARGET_LABEL: settings.TARGET_VREP_LABEL,\n settings.WALL1_LABEL: settings.WALL1_VREP_LABEL,\n settings.WALL2_LABEL: settings.WALL2_VREP_LABEL,\n settings.WALL3_LABEL: settings.WALL3_VREP_LABEL,\n settings.ROBOT_LABEL: settings.ROBOT_VREP_LABEL,\n settings.OBJECT_LABEL: settings.OBJECT_VREP_LABEL,\n settings.GHOST_OBJECT_LABEL: settings.GHOST_OBJECT_VREP_LABEL,\n settings.GHOST_GRIPPER_LABEL: settings.GHOST_GRIPPER_LABEL,\n settings.CAMERA_LABEL: settings.CAMERA_VREP_LABEL,\n settings.RGB_LABEL: settings.RGB_VREP_LABEL,\n settings.DEPTH_LABEL: settings.DEPTH_VREP_LABEL,\n settings.LIGHT_LABEL: settings.LIGHT_VREP_LABEL,\n settings.TABLE_LABLE: settings.TABLE_VREP_LABEL,\n settings.OBSTACLE_LABEL: settings.OBSTACLE_VREP_LABEL}\n\n self.model_path_all = {settings.TARGET_LABEL: settings.TARGET_MODEL_PATH,\n settings.WALL1_LABEL: settings.WALL1_MODEL_PATH,\n settings.WALL2_LABEL: settings.WALL2_MODEL_PATH,\n settings.WALL3_LABEL: settings.WALL3_MODEL_PATH,\n settings.ROBOT_LABEL: settings.ROBOT_MODEL_PATH,\n settings.OBJECT_LABEL: settings.OBJECT_MODEL_PATH,\n settings.GHOST_OBJECT_LABEL: settings.GHOST_OBJECT_MODEL_PATH,\n settings.GHOST_GRIPPER_LABEL: settings.GHOST_GRIPPER_MODEL_PATH,\n settings.CAMERA_LABEL: settings.CAMERA_MODEL_PATH,\n settings.RGB_LABEL: settings.RGB_MODEL_PATH,\n settings.DEPTH_LABEL: settings.DEPTH_MODEL_PATH,\n settings.LIGHT_LABEL: settings.LIGHT_MODEL_PATH,\n settings.TABLE_LABLE: settings.TABLE_MODEL_PATH,\n settings.OBSTACLE_LABEL: settings.OBSTACLE_MODEL_PATH}\n\n def get_image(self, handle_visual):\n # liest Bilder der Kamera aus. Wird in nötiges Format gebracht und geflippt.\n err = -1\n image_rgb = []\n while err is not vrep.simx_return_ok:\n err, resolution, image_rgb = vrep.simxGetVisionSensorImage(self.clientID,\n handle_visual,\n 0,\n vrep.simx_opmode_blocking)\n\n img_reshape = np.reshape(image_rgb, [self.config[\"heightImagesToReturn\"], self.config[\"widthImagesToReturn\"], 3])\n image_rgb = np.array(img_reshape, dtype=np.uint8)\n return np.flipud(image_rgb)\n\n def insert_object(self, key, allHandles, stringData):\n\n _ = vrep.simx_return_ok\n if any(self.vrep_name_all[key] in s for s in stringData):\n _ = vrep.simxRemoveModel(self.clientID,\n allHandles[stringData.index(self.vrep_name_all[key])],\n vrep.simx_opmode_blocking)\n print(key + \": deleted\")\n if _ != 0: raise ValueError(\"removal of model failed:\" + key)\n\n # > insert model\n _, handle = vrep.simxLoadModel(self.clientID,\n modelPathAndName=self.model_path_all[key],\n options=1,\n operationMode=vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"insertion of model failed:\" + key)\n print(key + \": created\")\n\n # > sends String which should trigger a child script in vrep\n # the sent value represents the handle and the new name\n # child script should than rename the model.\n _ = vrep.simxSetStringSignal(self.clientID, \"renameDummySignal\", str(handle) + \" \" + self.vrep_name_all[key],\n vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"renaming of model failed:\" + key)\n\n _ = vrep.simxSetObjectPosition(self.clientID,\n handle,\n -1,\n self.position_all[key],\n vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"repositioning of model failed:\" + key)\n\n _ = vrep.simxSetObjectOrientation(self.clientID,\n handle,\n -1,\n self.orientation_all[key],\n vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"reorientation of model failed:\" + key)\n\n return handle\n\n def load_scene(self, object_list_to_insert_key):\n\n print(\"Start building environment\")\n\n _ = vrep.simxCloseScene(clientID=self.clientID,\n operationMode=vrep.simx_opmode_blocking)\n\n _ = vrep.simxLoadScene(clientID=self.clientID,\n scenePathAndName=self.scenePathAndName,\n options=1,\n operationMode=vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"loading of scene failed:\")\n\n\n # > receive all handles and names\n _, allHandles, intData, floatData, stringData = vrep.simxGetObjectGroupData(self.clientID,\n vrep.sim_appobj_object_type,\n 0,\n vrep.simx_opmode_blocking)\n\n # inserts all listet objects into the environment\n for key in object_list_to_insert_key:\n self.handles_all[key] = self.insert_object(key, allHandles, stringData)\n print(\"Building environment finished\")\n\n def change_orientation_randomly(self, key, orientation_min, orientation_max):\n # picks a random number in given range and rounds it to mm\n\n new_alpha = round( random.uniform(self.orientation_all[key][0] + orientation_min[0], self.orientation_all[key][0] + orientation_max[0]), 3)\n new_beta = round( random.uniform(self.orientation_all[key][1] + orientation_min[1], self.orientation_all[key][1] + orientation_max[1]), 3)\n new_gamma = round( random.uniform(self.orientation_all[key][2] + orientation_min[2], self.orientation_all[key][2] + orientation_max[2]), 3)\n\n self.orientation_all[key] = [new_alpha, new_beta, new_gamma]\n\n _ = vrep.simxSetObjectOrientation(self.clientID,\n self.handles_all[key],\n -1,\n self.orientation_all[key],\n vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"repositioning of model failed:\" + key)\n\n def change_position_randomly(self, key, position_min, position_max):\n # picks a random number in given range and rounds it to mm\n\n new_x = round( random.uniform(self.position_all[key][0] + position_min[0], self.position_all[key][0] + position_max[0]), 3)\n new_y = round( random.uniform(self.position_all[key][1] + position_min[1], self.position_all[key][1] + position_max[1]), 3)\n new_z = round( random.uniform(self.position_all[key][2] + position_min[2], self.position_all[key][2] + position_max[2]), 3)\n\n self.position_all[key] = [new_x, new_y, new_z]\n\n _ = vrep.simxSetObjectPosition(self.clientID,\n self.handles_all[key],\n -1,\n self.position_all[key],\n vrep.simx_opmode_blocking)\n if _ != 0: raise ValueError(\"repositioning of model failed:\" + key)\n\n def randomize_environment(self, target_left):\n\n # > Größe des Texturbildes in pixeln\n size = 500\n array = make_array(size)\n make_img(size, array, settings.wallTextureFileName)\n array = make_array(size)\n make_img(size, array, settings.tableTextureFileName)\n\n # replacing object and target\n # relativ Werte übergeben. Neue Position wird von Position in __init() und relativer Abweichung berechnet\n self.change_position_randomly(\"object\",\n self.config[\"randomPositionOffsetMin_Object\"],\n self.config[\"randomPositionOffsetMax_Object\"])\n if target_left:\n self.change_position_randomly(\"target\",\n self.config[\"randomPositionOffsetMin_TargetLeft\"],\n self.config[\"randomPositionOffsetMax_TargetLeft\"])\n else:\n self.change_position_randomly(\"target\",\n self.config[\"randomPositionOffsetMin_TargetRight\"],\n self.config[\"randomPositionOffsetMax_TargetRight\"])\n\n self.change_position_randomly(\"camera\",\n self.config[\"randomPositionOffsetMin_Camera\"],\n self.config[\"randomPositionOffsetMax_Camera\"])\n\n self.change_orientation_randomly(\"camera\",\n self.config[\"randomOrientationOffsetMin_Camera\"],\n self.config[\"randomOrientationOffsetMax_Camera\"])\n\n def set_obstacles(self, target_left, number_of_obstacles, number_of_obstacles_all):\n # > randomize obstacles\n\n # Tisch hat 120x80 cm\n # Unterteilung in 10x10cm Felder\n # Im Verfahrbereich vom uArm darf nichts stehen\n # Unterscheidung in 3 Fälle:\n # 2: hinter roboter\n # 1: neben roboter\n # 0: vor roboter\n # entsprechend können verschiedene Positionskombinationen gewählt werden\n\n # Abmaße der Felder sind hart Codiert und nur für den gegebenen Fall.\n # Verschiebung des Roboters muss hier berücksichtigt werden.\n\n area_list = []\n area_pos_list = [[], [], []]\n\n # put all possible positions in given area into list\n # x und y Werte der Gebiete wurden ausgemessen und hart codiert.\n # Bei änderung der Umwelt muss es entsprechend angepasst werden\n for x in range(45, 45+1, 10):\n for y in range(-35, 35+1, 10):\n area_pos_list[0].append([x,y])\n if target_left:\n for x in range(-15, 35+1, 10):\n for y in range(25, 35+1, 10):\n area_pos_list[1].append([x,y])\n else:\n for x in range(-15, 35+1, 10):\n for y in range(-35, -25+1, 10):\n area_pos_list[1].append([x,y])\n\n for x in range(-55, -24+1, 10):\n for y in range(-35, 35+1, 10):\n area_pos_list[2].append([x,y])\n\n # calc centimeter to meter for vrep\n for i in range(0, len(area_pos_list)):\n lst = area_pos_list[i]\n for j in range(0,len(lst)):\n lst[j] = [x / 100 for x in lst[j]]\n area_pos_list[i] = lst\n\n # draw random numbers of 0, 1, 2 and put them into list\n for i in range(0, number_of_obstacles):\n area = random.randint(0,2)\n area_list.append(area)\n\n # create list with numbers from 0 to NUMBER_OF_OBSTACLES_ALL\n #\n obstsacle_list = []\n for i in range(0,number_of_obstacles_all):\n obstsacle_list.append(i)\n\n # take position out of list and place object on given pos\n for area in area_list:\n rnd_idx = random.randint(0,len(area_pos_list[area]) - 1)\n pos = area_pos_list[area].pop(rnd_idx)\n rnd_idx = random.randint(0,len(obstsacle_list) - 1)\n i = obstsacle_list.pop(rnd_idx)\n\n # replace obstacle to position\n vrep_name = settings.VREP_OBSTACLE_BASE_STR + str(i)\n _, handles = vrep.simxGetObjectHandle(self.clientID,\n vrep_name, # name of object in vrep\n vrep.simx_opmode_blocking)\n\n _ = vrep.simxSetObjectPosition(self.clientID,\n handles,\n -1,\n [pos[0], pos[1], 0.75],\n vrep.simx_opmode_blocking)\n\n\n","sub_path":"DeepLearningProject/hs_robot_demonstrat/NeuralRobotControl/SimulationInterface_Component/Vrep/VrepVirtualEnvironment.py","file_name":"VrepVirtualEnvironment.py","file_ext":"py","file_size_in_byte":19599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"260885020","text":"# -*- coding: utf-8 -*-\n\n\nfrom dp_tornado.engine.helper import Helper as dpHelper\nfrom dp_tornado.engine.engine import Engine as dpEngine\n\n\n_s3_connection_ = None\n\n\ndef s3_connection():\n global _s3_connection_\n\n if _s3_connection_:\n return _s3_connection_\n\n try:\n from boto.s3.connection import S3Connection\n\n import logging\n logging.getLogger('boto').setLevel(logging.CRITICAL)\n\n _s3_connection_ = S3Connection\n\n except ImportError as e:\n raise e\n\n return _s3_connection_\n\n\nclass S3Bridge(dpEngine):\n def __init__(self, public, secret):\n try:\n from boto.s3.connection import S3Connection\n\n import logging\n logging.getLogger('boto').setLevel(logging.CRITICAL)\n\n except ImportError as e:\n raise e\n\n self.conn = S3Connection(public, secret)\n\n def bucket(self, bucket_name):\n return self.conn.get_bucket(bucket_name)\n\n def set_contents_from_file(self, bucket_name, key, fp, url=False):\n try:\n from boto.s3.key import Key\n except ImportError as e:\n raise e\n\n fp.seek(0)\n bucket = self.bucket(bucket_name)\n obj = Key(bucket)\n obj.key = key\n res = obj.set_contents_from_file(fp)\n\n if url:\n return obj.generate_url(0)\n else:\n return res\n\n def copy(self, from_bucket_name, from_key_name, to_bucket_name, to_key_name):\n try:\n from boto.s3.key import Key\n except ImportError as e:\n raise e\n\n from_bucket = self.bucket(from_bucket_name)\n from_key = Key(from_bucket)\n from_key.key = from_key_name\n\n to_bucket = self.bucket(to_bucket_name)\n return from_key.copy(to_bucket, to_key_name)\n\n\nclass S3Helper(dpHelper):\n def connect(self, public, secret):\n return S3Bridge(public, secret)\n\n def set_contents_from_file(self,\n aws_access_key_id,\n aws_secret_access_key,\n bucket_name,\n region_name,\n key,\n fp,\n url=False):\n import boto3\n\n s3 = boto3.client(\n service_name='s3',\n region_name=region_name,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n\n s3.upload_file(fp, bucket_name, key)\n\n def prepare_post(self,\n aws_access_key_id,\n aws_secret_access_key,\n bucket_name,\n key,\n region=None,\n success_action_redirect=None,\n max_content_length=None,\n expires_in=6000,\n acl=None):\n if region:\n return self._prepare_post_boto3(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n bucket_name=bucket_name,\n key=key,\n region=region,\n success_action_redirect=success_action_redirect,\n max_content_length=max_content_length,\n expires_in=expires_in,\n acl=acl)\n\n s3 = s3_connection()(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n\n if not success_action_redirect:\n fields = [{\"name\": \"success_action_status\", \"value\": \"201\"}]\n conditions = ['{\"success_action_status\": \"201\"}']\n else:\n fields = [{\"name\": \"success_action_redirect\", \"value\": success_action_redirect}]\n conditions = ['{\"success_action_redirect\": \"%s\"}' % success_action_redirect]\n\n payload = s3.build_post_form_args(\n bucket_name=bucket_name,\n key=key,\n expires_in=expires_in,\n acl=acl,\n max_content_length=max_content_length,\n fields=fields,\n conditions=conditions)\n\n return payload\n\n def _prepare_post_boto3(self,\n aws_access_key_id,\n aws_secret_access_key,\n bucket_name,\n key,\n region=None,\n success_action_redirect=None,\n max_content_length=None,\n expires_in=6000,\n acl=None):\n import boto3\n\n s3 = boto3.client(\n service_name='s3',\n region_name=region,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n\n fields = {}\n conditions = []\n\n if success_action_redirect:\n fields['success_action_redirect'] = success_action_redirect\n conditions.append({'success_action_redirect': success_action_redirect})\n else:\n fields['success_action_status'] = '201'\n conditions.append({'success_action_status': '201'})\n\n if acl:\n conditions.append({'acl': acl})\n\n if max_content_length:\n conditions.append([\"content-length-range\", 0, max_content_length])\n\n payload = s3.generate_presigned_post(\n Bucket=bucket_name,\n Key=key,\n Fields=fields,\n Conditions=conditions,\n ExpiresIn=expires_in)\n\n return {\n 'action': payload['url'],\n 'fields': [{'name': k, 'value': v} for k, v in payload['fields'].items()]\n }\n\n def generate_url_with_filename(self,\n aws_access_key_id,\n aws_secret_access_key,\n bucket_name,\n key,\n file_name,\n disposition='attachment',\n expires_in=6000):\n s3 = s3_connection()(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n\n response_headers = {\n 'response-content-disposition':\n '%s; filename=\"%s\"' % (disposition, self.helper.url.urlencode(self.helper.string.to_str(file_name)))\n }\n\n return s3.generate_url(expires_in, 'GET', bucket_name, key, response_headers=response_headers)\n","sub_path":"dp_tornado/helper/aws/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":6540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"258729948","text":"import matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage import exposure\n#from skimage.exposure import match_histograms\nfrom loadFaces import loadFaces\nimport cv2\nimport numpy as np\n\ndef HistogramMatch(image, reference):\n\t\"histogram match image to reference image\"\n\tmatched = match_histograms(image, reference, multichannel=True)\n\n\treturn target\n\nimport numpy as np\n\ndef hist_match(source, template, hsv = False):\n \"\"\"\n Adjust the pixel values of an image such that its histogram\n matches that of a target image\n\n Arguments:\n -----------\n source: Image to transform; the histogram is computed over the flattened array\n template: Template image; can have different dimensions to source\n Returns:\n ----------\n The transformed output image\n \"\"\"\n \n if hsv:\n #we want to alter the hue, saturation and value to match instead\n source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)\n template = cv2.cvtColor(template, cv2.COLOR_BGR2HSV)\n\n source_shape = source.shape #original image shape that we want the output to match\n template_shape = template.shape\n #source = source.reshape(source_shape[2], source_shape[1], source_shape[0])\n #template = template.reshape(template_shape[2], template_shape[1], template_shape[0])\n\n matched = cv2.split(source)\n\n for i in range(0,len(matched)):\n \n #iterate through the chanels\n\n source_chanel = cv2.split(source)[i].ravel()\n template_chanel = cv2.split(template)[i].ravel()\n\n # get the set of unique pixel values and their corresponding indices and counts\n s_values, bin_idx, s_counts = np.unique(source_chanel, return_inverse=True, return_counts=True)\n t_values, t_counts = np.unique(template_chanel, return_counts=True)\n\n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\n s_quantiles /= s_quantiles[-1]\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\n t_quantiles /= t_quantiles[-1]\n\n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\n\n matched_chanel = interp_t_values[bin_idx].reshape(cv2.split(source)[i].shape)\n\n matched[i] = matched_chanel\n\n matched = cv2.merge(matched).astype(\"uint8\")\n\n if hsv == True:\n matched = cv2.cvtColor(matched, cv2.COLOR_HSV2BGR)\n\n #orig_source = cv2.cvtColor(orig_source, cv2.COLOR_HSV2BGR)\n\n return matched\n\n\nif __name__ == '__main__':\n\t\n\tsrc_path = r'steve_buscemi\\stevebfaces\\30r_frame40800.jpg'\n\tref_path = r'mean_girls\\faces720p\\lindsay\\l_0_frame52030.jpg'\n\n\tsource = cv2.imread(src_path)\n\tcv2.imshow('source',source) #show image \n\tcv2.waitKey(0)\n\treference = cv2.imread(ref_path)\n\tcv2.imshow('reference',reference) #show image \n\tcv2.waitKey(0)\n\t#matched = HistogramMatch(image, reference)\n\t#image = np.asarray(image)\n\t#reference = np.asarray(reference)\n\tmatched = hist_match(source, reference, hsv = False)\n\tcv2.imshow('matched',matched) #show image \n\tcv2.waitKey(0)\n","sub_path":"HistogramMatch.py","file_name":"HistogramMatch.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"226488985","text":"import sys\n\nmatch = 1\nsubstitutions = -2\ngap = -2\n\ndef get_matrix(strA, strB):\n\trow = len(strA) + 1\n\tcol = len(strB) + 1\n\n\tmatrix = [[0] * col for i in range(row)]\n\tresult = [[\"\" for j in range(col)] for i in range(row)]\n\n\tmax_score = None\n\tmax_location = None, None\n\n\tbest_location = (0, 0)\n\tfor j in range(col):\n\t\tfor i in range(row):\n\t\t\tif j == 0:\n\t\t\t\tmatrix[i][j] = 0\n\t\t\telif i == 0:\n\t\t\t\tmatrix[i][j] = i*gap\n\t\t\telse:\n\t\t\t\ta = matrix[i][j - 1] + gap\n\t\t\t\tb = matrix[i - 1][j] + gap\n\t\t\t\tc = matrix[i - 1][j - 1]\n\t\t\t\tif strA[i - 1] == strB[j - 1]:\n\t\t\t\t\tc = matrix[i - 1][j - 1] + match\n\t\t\t\telse:\n\t\t\t\t\tc = matrix[i - 1][j - 1] + substitutions\n\t\t\t\tmatrix[i][j] = max(a, b, c)\n\t\t\tif max_score == None or matrix[i][j] >= max_score and i == row - 1:\n\t\t\t\tmax_score = matrix[i][j]\n\t\t\t\tmax_location = i, j\n\treturn matrix, max_score, max_location\n\ndef get_path(strA, strB, matrix, end_point):\n\tempty = \"-\"\n\tresults = ['', '']\n\ti, j = end_point\n\n\twhile j > 0 and i > 0:\n\t\tif matrix[i][j] == matrix[i][j - 1] + gap:\n\t\t\tresults[0] = empty + results[0]\n\t\t\tresults[1] = strB[j - 1] + results[1]\n\t\t\tj -= 1\n\t\telif matrix[i][j] == matrix[i - 1][j] + gap:\n\t\t\tresults[0] = strA[i - 1] + results[0]\n\t\t\tresults[1] = empty + results[1]\n\t\t\ti -= 1\n\t\telse:\n\t\t\tresults[0] = strA[i - 1] + results[0]\n\t\t\tresults[1] = strB[j - 1] + results[1]\n\t\t\ti -= 1\n\t\t\tj -= 1\n\n\treturn results\n\ndef main():\n\tstrA = sys.argv[1]\n\tstrB = sys.argv[2]\n\tmatrix, max_score, max_location = get_matrix(strA, strB)\n\tsuff, preff = get_path(strA, strB, matrix, max_location)\n\tprint(max_score)\n\tprint(suff)\n\tprint(preff)\n\nif __name__ == \"__main__\":\n main()","sub_path":"hw1/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"82386130","text":"# -*- coding: utf-8 -*- # NOQA\n# DarkSky forecast data https://api.darksky.net/forecast/b2d7cffe68190165c896172ddcec98c9/37.8267,-122.4233\n# It returns current forecast for the next week\n# This page describes the values returned: https://darksky.net/dev/docs#api-request-types\n# Or just look at this page for an example: https://darksky.net/dev/docs\n\nfrom PyQt4 import QtNetwork\n#from PyQt4.QtGui import QPixmap, QMovie, QBrush, QColor, QPainter\nfrom PyQt4.QtCore import QUrl\n#from PyQt4.QtCore import Qt\nfrom PyQt4.QtNetwork import QNetworkReply\nfrom PyQt4.QtNetwork import QNetworkRequest\n#from subprocess import Popen\nimport urllib2\nimport json\n\nimport Config\nimport ApiKeys\nimport logging\nlogger = logging.getLogger(__name__)\n\n'''\nDarkSky defines some strings to describe weather icons,\nSee page for more info: https://darksky.net/dev/docs#data-point-object\n'''\n# Here is a list of names, with mapping to icons stored locally\nicon_names = {\n'clear-day': 'clear.png',\n'clear-night': 'n_clear.png',\n'rain': 'rain.png',\n'snow': 'snow.png',\n'sleet': 'sleet.png',\n'wind': '',\n'fog': 'fog.png',\n'cloudy': 'cloudy.png',\n'partly-cloudy-day': 'partlycloudy.png',\n'partly-cloudy-night': 'n_partlycloudy.png',\n}\n\nclass WxData:\n def __init__(self):\n self.wxdata = None\n self.wxurl = Config.darkPrefix + ApiKeys.darksky_key\n self.wxurl += '/' + str(Config.primary_coordinates[0]) + ',' + str(Config.primary_coordinates[1])\n self.wxurl += '?exclude=minutely&units=us'\n logger.debug('wxurl='+self.wxurl)\n self.hasData = False\n def getwx(self):\n self.hasData = False\n if False:\n r = QUrl(self.wxurl)\n r = QNetworkRequest(r)\n self.manager = QtNetwork.QNetworkAccessManager()\n self.wxreply = self.manager.get(r)\n self.wxreply.finished.connect(self.wxfinished)\n else:\n self.wxreply = urllib2.urlopen(self.wxurl)\n wxstr = self.wxreply.read()\n logger.debug('wxstr: %s' %(wxstr[:200]))\n self.wxdata = json.loads(wxstr)\n self.hasData = True\n def wxfinished(self):\n wxstr = str(self.wxreply.readAll())\n if not wxstr:\n logger.warning('wxstr is None')\n return\n self.wxdata = json.loads(wxstr)\n self.hasData = True\n def getData(self):\n if self.hasData:\n return self.wxdata\n else:\n return None\n \nclass DataParse:\n '''\n Abstract Class.\n Child classes: CurrentObs, FcstHourlyData, FcstDailyData.\n Parses JSON returned from Wunderground according to a list of keys.\n We request current observations and hourly and daily forecasts.\n Each of these items contains different sets of data with different keys.\n Once the data is parsed, the application can then request that the\n data be returned as a string, ready for display.\n '''\n def __init__(self,wxdata,dataKeys,daily=False):\n self.daily = daily\n self.obs = {}\n for key in dataKeys:\n if isinstance(key[1],(list,tuple)):\n try:\n kk = key[1]\n data = wxdata[kk[0]][kk[1]]\n except:\n logger.error('key=%s, wxdata=%s' %(str(kk),str(wxdata[kk[0]])))\n else:\n if key[1] in wxdata:\n data = wxdata[key[1]]\n logger.debug('key=%s, data=%s' %(key[1],str(data)))\n else:\n # DarkSky has optional fields, so it's OK if key[1] not found\n logger.info('DataParse: key=%s not present in wxdata' %(key[1]))\n continue\n if key[2] == -1 or key[2] == Config.metric:\n # Config.metric has value of either 0 or 1\n self.obs[key[0]] = [data,key[3]]\n logger.debug('save obs: key=%s, value=%s' %(key[0],str(self.obs[key[0]])))\n else:\n # key[2] != Config.metric, so skip this obs\n pass\n \n # Special case for 'icon', because we have local copies\n #iconurl = wxdata['icon_url']\n if wxdata['icon'] in icon_names:\n icon_png = icon_names[wxdata['icon']]\n self.obs['icon'] = [Config.icons + \"/\" + icon_png,'']\n \n def getObsStr(self,key):\n # Get value from wxdata + the appropriate units string\n if key in self.obs:\n # TODO: the obs tables should have a conversion function\n try:\n obsVal = float(self.obs[key][0])\n if abs(obsVal) >= 10.0:\n obsVal = int(obsVal + 0.5) # get rid of decimal places\n else:\n obsVal = float('%.1f' %(obsVal))\n except:\n # could not convert to float, so assume it's a string\n obsVal = self.obs[key][0]\n if self.obs[key][1] == '%':\n retval = str(int(100.0 * obsVal + 0.5)) + self.obs[key][1]\n else:\n retval = str(obsVal) + self.obs[key][1]\n return retval\n else:\n logger.warning('key=%s not found' %(key))\n return None\n \nclass CurrentObs(DataParse):\n # Lookup table for key used in application display,\n # key used in wxdata returned by wunderground,\n # metric=1 or English=0 units or no_units=-1\n # and displays units (if any) in the application\n # NOTE: must use 'currently' node to fetch these\n obsKeys = [\n # app key, data key, metric, units\n ('icon', 'icon', -1, ''),\n ('wx_text', 'summary', -1, ''),\n ('rel_hum', 'humidity', -1, '%'),\n ('wind_degrees', 'windBearing', -1, ''),\n ('local_epoch', 'time', -1, ''),\n ('temp', 'temperature', 0, u'°F'),\n ('press', 'pressure', 0, 'mb'),\n ('temp_feels_like', 'apparentTemperature', 0, u'°F'),\n ('wind_speed', 'windSpeed', 0, ''),\n ('wind_gust', 'windGust', 0, ''),\n ('precip_1hr', 'precipIntensity', 0, 'in'),\n ]\n # other keys: precipProbability, precipType, dewPoint, cloudCover, uvIndex, visibility, ozone\n def __init__(self,wxdata):\n DataParse.__init__(self,wxdata['currently'],self.obsKeys,daily=False)\n\nclass FcstDailyData(DataParse):\n # Lookup table for key used in application display,\n # key used in wxdata returned by wunderground,\n # metric=1 or English=0 units,\n # and displays units (if any) in the application\n # NOTE: must use 'daily/data' node to fetch these\n obsKeys = [\n # app key, data key, metric, units\n ('icon', 'icon', -1, ''),\n ('wx_text', 'summary', -1, ''),\n ('day', 'time',-1, ''),\n ('temp_high', 'temperatureHigh', 0, u'°F'),\n ('temp_low', 'temperatureLow', 0, u'°F'),\n ('pop', 'precipProbability', -1, '%'),\n ('qpf', 'precipIntensity', 0, 'in'),\n ('snow', 'precipAccumulation', 0, 'in'),\n ]\n # other keys: sunriseTime, sunsetTime, moonPhase, precipIntensityMax, precipIntensityMaxTime, more\n def __init__(self,wxdata,iday):\n DataParse.__init__(self,wxdata['daily']['data'][iday],self.obsKeys,daily=True)\n\nclass FcstHourlyData(DataParse):\n # Lookup table for key used in application display,\n # key used in wxdata returned by wunderground,\n # metric=1 or English=0 units,\n # and displays units (if any) in the application\n # NOTE: must use 'hourly/data' node to fetch these\n obsKeys = [\n # app key, data key, metric, units\n ('icon', 'icon', -1, ''),\n ('wx_text', 'summary', -1, ''),\n ('hour', 'time', -1, ''),\n ('temp', 'temperature', 0, u'°F'),\n ('pop', 'precipProbability', -1, '%'),\n ('qpf', 'precipIntensity', 0, 'in'),\n ('snow', 'precipAccumulation', 0, 'in'),\n ]\n # other keys: apparentTemperature, dewPoint, humidity, pressure, windSpeed, windGust, windBearing, cloudCover, uvIndex, visibility, ozone\n def __init__(self,wxdata,ihour):\n DataParse.__init__(self,wxdata['hourly']['data'][ihour],self.obsKeys,daily=False)\n","sub_path":"Clock/DarkSkyProvider.py","file_name":"DarkSkyProvider.py","file_ext":"py","file_size_in_byte":8939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"310231481","text":"class Solution:\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n Solution.len = len(nums)\n ans_list = []\n nums.sort()\n self.DFS(nums, ans_list, 0, [])\n return ans_list\n\n def DFS(self, nums, ans_list, start, valuelist):\n if Solution.len == len(valuelist) and valuelist not in ans_list:\n ans_list.append(valuelist)\n else:\n for i in range(start, len(nums)):\n self.DFS(nums[:i] + nums[i+1:], ans_list, i+1, valuelist+[nums[i]])","sub_path":"0-47全排列2.py","file_name":"0-47全排列2.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"240058661","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\nwith open('README.md', 'r') as rf:\n README = rf.read()\n\nwith open('VERSION.txt', 'r') as vf:\n VERSION = vf.read()\n\ninstall_requirements = [\n 'torch>=1.4',\n 'torchvision>=0.5',\n 'numpy>=1.18',\n 'sentencepiece>=0.1.8',\n 'gin-config>=0.3.0',\n 'Click>=7.0',\n 'ray>=0.9.0.dev0',\n 'apex>=0.1',\n 'tensorboardX>=2.0', # Unmarked Ray dependency\n 'requests>=2.23.0', # Unmarked Ray dependency\n 'pandas>=1.0.1', # Unmarked Ray dependency\n 'tabulate>=0.8.6', # Unmarked Ray dependency\n]\n\nsetup(\n name='pele',\n version=VERSION,\n description='Video Description/Captioning Framework',\n long_description=README,\n long_description_content_type='text/markdown',\n author='David Chan',\n author_email='davidchan@berkeley.edu',\n url='https://github.com/DavidMChan/pele',\n license='Apache-2',\n install_requires=install_requirements,\n entry_points={'console_scripts': [\n 'pele=pele.run:main_fn',\n ]},\n packages=find_packages(exclude=['example', 'scripts']),\n classifiers=[\n 'Development Status :: 3 - Alpha', # Chose either \"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\" as the current state of your package\n 'Intended Audience :: Developers', # Define that your audience are developers\n 'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support\n 'Programming Language :: Python :: 3.6',\n ],\n)\n","sub_path":"pypi_install_script/pele-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"349701440","text":"# -*- coding: utf-8 -*-\n# 머신러닝 학습의 기초인 MNIST(손글씨 숫자 인식) 문제를 신경망으로 해결해본다.\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n# 텐서플로우에 기본 내장된 mnist 모듈을 이용하여 데이터를 로드한다.\n\nmnist = input_data.read_data_sets(\"./mnist/data\", one_hot=True)\n\n# -------------------- 신경망 모델 구성 -------------------- #\n# 입력의 차원은 [batch 크기, 특성값] 으로 되어 있다.\n# 손글씨 이미지는 28x28 픽셀로 이루어져 있고 이를 784개의 특성값으로 정한다.\nX = tf.placeholder(tf.float32, [None, 784])\n# 결과는 0~9 의 10 가지 분류.\nY = tf.placeholder(tf.float32, [None, 10])\n\n# 신경망의 구조는 다음과 같다.\n# 784(입력 특성값)\n# -> 256(hidden layer 의 뉴런 개수) -> 256(hidden layer 의 뉴런 개수)\n# -> 10(결과값 0~9 분류)\nW1 = tf.Variable(tf.random_normal([784, 256], stddev=0.01))\n# 입력값에 가중치를 곱하고 ReLU 함수를 적용하여 layer 를 만든다.\nL1 = tf.nn.relu(tf.matmul(X, W1))\n\nW2 = tf.Variable(tf.random_normal([256, 256], stddev=0.01))\n# L1 layer 의 출력값에 가중치를 곱하고 ReLU 함수를 적용하여 layer 를 만든다.\nL2 = tf.nn.relu(tf.matmul(L1, W2))\n\nW3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01))\n# 최종 출력값은 10 개의 분류를 가진다.\nmodel = tf.matmul(L2, W3)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)\n\n# -------------------- 신경망 모델 학습 -------------------- #\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nbatch_size = 10\ntotal_batch = int(mnist.train.num_examples / batch_size)\n\nfor epoch in range(30):\n total_cost = 0\n\n for i in range(total_batch):\n # 텐서플로우의 mnist 모델의 next_batch 함수를 이용해\n # 지정한 만큼의 학습 데이터를 가져온다.\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n\n _, cost_val = sess.run([optimizer, cost], feed_dict={X: batch_xs, Y: batch_ys})\n total_cost += cost_val\n\n print('Epoch: ', '%04d' % (epoch + 1),\n 'Avg, cost = ', '{:.3f}'.format(total_cost / total_batch))\n\nprint('학습 완료 !')\n\n\n# -------------------- 결과 확인 -------------------- #\n# model 로 예측한 값과 실제 label 인 Y 의 값을 비교한다.\n# tf.argmax 함수를 이용해 예측한 값에서 가장 큰 점수를 받은 label 로 분류한다.\n# 예) [0.1 0 0 0.7 0 0.2 0 0 0 0] -> 3\nis_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\nprint('정확도 ', sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels}))\n\n\n\n\n\n","sub_path":"04_MNIST/01_MNIST.py","file_name":"01_MNIST.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"583942353","text":"# 1. It trains the network from scratch (if \"resume\" is off)\n# 2. if resume is on, it loads the pretrained model,\n# if prune_ bool , it prunes the network\n# if retrain_ bool is on , retrains it\n #e.g. it can retrain, but not prune\n# 3. it can be used for visualizing, if uncomment the comments #VISU\n\n# other features:\n# it loads the ranks from shapley or switches (ranks_path = '../Dir_switch/results/cifar/vgg_93.92/switch_init_-1, alpha_2/')\n\n\n#\n#\n# Sequential(\n# (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (2): ReLU(inplace)\n# (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (5): ReLU(inplace)\n# (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n# (7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (8): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (9): ReLU(inplace)\n# (10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (11): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (12): ReLU(inplace)\n# (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n# (14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (15): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (16): ReLU(inplace)\n# (17): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (19): ReLU(inplace)\n# (20): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (21): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (22): ReLU(inplace)\n# (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n# (24): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (25): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (26): ReLU(inplace)\n# (27): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (28): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (29): ReLU(inplace)\n# (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (31): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (32): ReLU(inplace)\n# (33): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n# (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (35): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (36): ReLU(inplace)\n# (37): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (38): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (39): ReLU(inplace)\n# (40): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n# (41): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# (42): ReLU(inplace)\n# (43): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n# (44): AvgPool2d(kernel_size=1, stride=1, padding=0)\n# )\n\n'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nimport sys\nprint (sys.path)\nprint(\"newh2\")\nsys.path.append(\"/home/kamil/Dropbox/Current_research/python_tests/results_networktest/external_codes/pytorch-cifar-master/models\")\nimport numpy as np\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as f\nimport logging\nimport matplotlib.pyplot as plt\nimport magnitude_rank\n\n\n#file_dir = os.path.dirname(\"utlis.p\")\n#sys.path.append(file_dir)\n\n#from models import *\n\n#from utils import progress_bar\n\n'''VGG11/13/16/19 in Pytorch.'''\nimport torch\nimport torch.nn as nn\n\n\n############################################################\n# NETWORK\n\n\ncfg = {\n 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'VGGBC': [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512],\n 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\n\nclass VGG(nn.Module):\n def __init__(self, vgg_name):\n super(VGG, self).__init__()\n\n #self.features = self._make_layers(cfg[vgg_name])\n #self.classifier = nn.Linear(512, 10)\n\n self.c1 = nn.Conv2d(3, 64, 3, padding=1)\n self.bn1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c2 = nn.Conv2d(64, 64, 3, padding=1)\n self.bn2 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.mp1 = nn.MaxPool2d(2)\n\n self.c3 = nn.Conv2d(64, 128, 3, padding=1)\n self.bn3 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c4 = nn.Conv2d(128, 128, 3, padding=1)\n self.bn4 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.mp2 = nn.MaxPool2d(2)\n\n self.c5 = nn.Conv2d(128, 256, 3, padding=1)\n self.bn5 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c6 = nn.Conv2d(256, 256, 3, padding=1)\n self.bn6 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c7 = nn.Conv2d(256, 256, 3, padding=1)\n self.bn7 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.mp3 = nn.MaxPool2d(2)\n\n self.c8 = nn.Conv2d(256, 512, 3, padding=1)\n self.bn8 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c9 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn9 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c10 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn10 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c11 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn11 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.mp4 = nn.MaxPool2d(2)\n\n self.c12 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn12 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c13 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn13 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c14 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn14 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.c15 = nn.Conv2d(512, 512, 3, padding=1)\n self.bn15 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.mp5 = nn.MaxPool2d(2, stride=2, dilation=1, ceil_mode=False)\n self.ap = nn.AvgPool2d(1, stride=1)\n\n # self.l1 = nn.Linear(512, 512)\n # self.l2 = nn.Linear(512, 512)\n self.l3 = nn.Linear(512, 10)\n self.d1 = nn.Dropout()\n self.d2 = nn.Dropout()\n\n self.parameter = Parameter(-1 * torch.ones(64), requires_grad=True) # this parameter lies #S\n\n #def forward(self, x, i): # VISU\n def forward(self, x):\n phi = f.softplus(self.parameter)\n S = phi / torch.sum(phi)\n # Smax = torch.max(S)\n # Sprime = S/Smax\n Sprime = S\n\n output = f.relu(self.bn1(self.c1(x)))\n output = f.relu(self.bn2(self.c2(output)))\n output = self.mp1(output)\n\n output = f.relu(self.bn3(self.c3(output)))\n output = f.relu(self.bn4(self.c4(output)))\n output = self.mp2(output)\n\n output = f.relu(self.bn5(self.c5(output)))\n output = f.relu(self.bn6(self.c6(output)))\n output = f.relu(self.bn7(self.c7(output)))\n output = self.mp3(output)\n\n output = f.relu(self.bn8(self.c8(output)))\n output = f.relu(self.bn9(self.c9(output)))\n output = f.relu(self.bn10(self.c10(output)))\n output = f.relu(self.bn11(self.c11(output)))\n output = self.mp4(output)\n output = f.relu(self.bn12(self.c12(output)))\n output = f.relu(self.bn13(self.c13(output)))\n output = f.relu(self.bn14(self.c14(output)))\n output = f.relu(self.bn15(self.c15(output)))\n output = self.mp5(output)\n output = self.ap(output)\n\n output = output.view(-1, 512)\n output = self.l3(output)\n\n # output = f.relu(self.l1(output))\n # output = self.d2(output)\n # output = f.relu(self.l2(output))\n\n # out = self.features(x)\n # out = out.view(out.size(0), -1)\n # out = self.classifier(out)\n return output\n\n # def forward(self, x):\n # out = self.features(x)\n # out = out.view(out.size(0), -1)\n # out = self.classifier(out)\n # return out\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\n\n# ############################################################\n# # NETWORK\n#\n#\n# cfg = {\n# 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n# 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n# 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n# 'VGGKAM': [0, 39, 39, 63, 48, 55, 98, 97, 52, 62, 22, 42, 47, 47, 42, 62],\n# 'VGGBC': [0, 64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512],\n# 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n# }\n# model_structure = cfg['VGGBC']\n#\n#\n# class VGG(nn.Module):\n# def __init__(self, vgg_name):\n# super(VGG, self).__init__()\n#\n# #self.features = self._make_layers(cfg[vgg_name])\n# #self.classifier = nn.Linear(512, 10)\n# #model_structure={'c1_num':39, 'c2_num'=39, 'c3_num'=63; 'c4_num'=48, 'c5_num'=55, 'c6_num'=98, 'c7_num'=97, 'c8_num'=52, 'c9_num'=62,\n# #'c10_num'=22, 'c11_num'=42, 'c12_num'=47 ; 'c13_num'=47 ; 'c14_num'=42 ; 'c15_num'=62}\n#\n# self.c1 = nn.Conv2d(3, model_structure[1], 3, padding=1)\n# self.bn1 = nn.BatchNorm2d(model_structure[1], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c2 = nn.Conv2d(model_structure[1], model_structure[2], 3, padding=1)\n# self.bn2 = nn.BatchNorm2d(model_structure[2], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.mp1 = nn.MaxPool2d(2)\n#\n# self.c3 = nn.Conv2d(model_structure[2], model_structure[3], 3, padding=1)\n# self.bn3 = nn.BatchNorm2d(model_structure[3], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c4 = nn.Conv2d(model_structure[3], model_structure[4], 3, padding=1)\n# self.bn4 = nn.BatchNorm2d(model_structure[4], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.mp2 = nn.MaxPool2d(2)\n#\n# self.c5 = nn.Conv2d(model_structure[4], model_structure[5], 3, padding=1)\n# self.bn5 = nn.BatchNorm2d(model_structure[5], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c6 = nn.Conv2d(model_structure[5], model_structure[6], 3, padding=1)\n# self.bn6 = nn.BatchNorm2d(model_structure[6], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c7 = nn.Conv2d(model_structure[6], model_structure[7], 3, padding=1)\n# self.bn7 = nn.BatchNorm2d(model_structure[7], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.mp3 = nn.MaxPool2d(2)\n#\n# self.c8 = nn.Conv2d(model_structure[7], model_structure[8], 3, padding=1)\n# self.bn8 = nn.BatchNorm2d(model_structure[8], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c9 = nn.Conv2d(model_structure[8], model_structure[9], 3, padding=1)\n# self.bn9 = nn.BatchNorm2d(model_structure[9], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c10 = nn.Conv2d(model_structure[9], model_structure[10], 3, padding=1)\n# self.bn10 = nn.BatchNorm2d(model_structure[10], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c11 = nn.Conv2d(model_structure[10], model_structure[11], 3, padding=1)\n# self.bn11 = nn.BatchNorm2d(model_structure[11], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.mp4 = nn.MaxPool2d(2)\n#\n# self.c12 = nn.Conv2d(model_structure[11], model_structure[12], 3, padding=1)\n# self.bn12 = nn.BatchNorm2d(model_structure[12], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c13 = nn.Conv2d(model_structure[12], model_structure[13], 3, padding=1)\n# self.bn13 = nn.BatchNorm2d(model_structure[13], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c14 = nn.Conv2d(model_structure[13], model_structure[14], 3, padding=1)\n# self.bn14 = nn.BatchNorm2d(model_structure[14], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.c15 = nn.Conv2d(model_structure[14], model_structure[15], 3, padding=1)\n# self.bn15 = nn.BatchNorm2d(model_structure[15], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n# self.mp5 = nn.MaxPool2d(2, stride=2, dilation=1, ceil_mode=False)\n# self.ap = nn.AvgPool2d(1, stride=1)\n#\n# self.l3 = nn.Linear(model_structure[15], 10)\n# self.d1 = nn.Dropout()\n# self.d2 = nn.Dropout()\n#\n# self.parameter = Parameter(-1 * torch.ones(64), requires_grad=True) # this parameter lies #S\n#\n# def forward(self, x, i): #VISU\n# #def forward(self, x):\n# # phi = f.softplus(self.parameter)\n# # S = phi / torch.sum(phi)\n# # # Smax = torch.max(S)\n# # # Sprime = S/Smax\n# # Sprime = S\n# #\n# output = f.relu(self.bn1(self.c1(x)))\n#\n# #\n# #we visualize (and prune) output channels, that is we take as input of the three input channels (in case of RGB)\n# #and see how each of the 64 channels transform this feature map into a new feature map\n# # hence we visualize 64 feature maps\n#\n# #VISU\n# #if vis:\n# # for filter_num in range(64):\n# # mm=output.cpu().detach().numpy()\n# # #fig,ax = plt.subplots(1)\n# # matrix=mm[1,filter_num,:,:]\n# # ave = np.average(matrix[0:20, 0])\n# # matrix = matrix - ave\n# #\n# # #ax.imshow(mm[1,filter_num,:,:], cmap=\"gray\", aspect='normal')\n# # plt.imshow(matrix, cmap=\"coolwarm\") #showing 2nd channel (example of a channel)\n# #\n# #\n# # plt.gca().set_axis_off()\n# # plt.subplots_adjust(top=1, bottom=0, right=1, left=0,\n# # hspace=0, wspace=0)\n# # plt.margins(0, 0)\n# # plt.gca().xaxis.set_major_locator(plt.NullLocator())\n# # plt.gca().yaxis.set_major_locator(plt.NullLocator())\n# # epoch=-1\n# # plt.savefig(\"/home/kamil/Dropbox/Current_research/python_tests/results_networktest/vis/feature_maps/cifar/trial_coolwarm/conv1_batch%d_filternum%d_epoch%d\" % (i, filter_num, epoch), bbox_inches='tight', pad_inches=0)\n# #\n#\n#\n# output = f.relu(self.bn1(output))\n#\n#\n# output = f.relu(self.bn2(self.c2(output)))\n# output = self.mp1(output)\n# output = f.relu(self.bn3(self.c3(output)))\n# output = f.relu(self.bn4(self.c4(output)))\n# output = self.mp2(output)\n# output = f.relu(self.bn5(self.c5(output)))\n# output = f.relu(self.bn6(self.c6(output)))\n# output = f.relu(self.bn7(self.c7(output)))\n# output = self.mp3(output)\n# output = f.relu(self.bn8(self.c8(output)))\n# output = f.relu(self.bn9(self.c9(output)))\n# output = f.relu(self.bn10(self.c10(output)))\n# output = f.relu(self.bn11(self.c11(output)))\n# output = self.mp4(output)\n# output = f.relu(self.bn12(self.c12(output)))\n# output = f.relu(self.bn13(self.c13(output)))\n# output = f.relu(self.bn14(self.c14(output)))\n# output = f.relu(self.bn15(self.c15(output)))\n# output = self.mp5(output)\n# output = self.ap(output)\n# output = output.view(-1, model_structure[15])\n# output = self.l3(output)\n# return output\n#\n# def _make_layers(self, cfg):\n# layers = []\n# in_channels = 3\n# for x in cfg:\n# if x == 'M':\n# layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n# else:\n# layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n# nn.BatchNorm2d(x),\n# nn.ReLU(inplace=True)]\n# in_channels = x\n# layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n# return nn.Sequential(*layers)\n\n\n#####################################\n# DATA\n\n# parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\n# parser.add_argument('--lr', default=0.1, type=float, help='learning rate')\n# parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\n# args = parser.parse_args()\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0)\n#with more workers there may be an error in debug mode: RuntimeError: DataLoader worker (pid 29274) is killed by signal: Terminated.\n\n#testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)\n#testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n###################################################\n# MAKE AN INSTANCE OF A NETWORK AND (POSSIBLY) LOAD THE MODEL\n\n# Model\nprint('==> Building model..')\nnet = VGG('VGG16')\n# net = ResNet18()\n# net = PreActResNet18()\n# net = GoogLeNet()\n# net = DenseNet121()\n# net = ResNeXt29_2x64d()\n# net = MobileNet()\n# net = MobileNetV2()\n# net = DPN92()\n# net = ShuffleNetG2()\n# net = SENet18()\n#net = ShuffleNetV2(1)\nnet = net.to(device)\n\n# for name, param in net.named_parameters():\n# print (name, param.shape)\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n #print(device)\n\n########################################################\n# TRAIN\n\n\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n #outputs = net(inputs, batch_idx) #VISU\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n #if (batch_idx % 1000 ==0):\n print('Training Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))\n #progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n # % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n return 100.*correct/total, best_acc\n\n\n\n\n#################################################################\n# TEST\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n #outputs = net(inputs, batch_idx) #VISU\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n # progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n print('Test Lossds: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))\n return 100*correct/total\n\n\n\n # global best_acc\n # net.eval()\n # test_loss = 0\n # correct = 0\n # total = 0\n # with torch.no_grad():\n # for batch_idx, (inputs, targets) in enumerate(testloader):\n # inputs, targets = inputs.to(device), targets.to(device)\n # outputs = net(inputs, batch_idx) #VISU\n # #outputs = net(inputs)\n # loss = criterion(outputs, targets)\n #\n # test_loss += loss.item()\n # _, predicted = outputs.max(1)\n # total += targets.size(0)\n # correct += predicted.eq(targets).sum().item()\n # #progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n # # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n # print('Test Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n #\n # return 100*correct/total\n\n\n\n\n########################################\n# just RESUME\n\n#if args.resume:\ndef load_model(test_bool=True):\n # Load checkpoint.\n #print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/ckpt_93.92.t7')\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n if test_bool:\n print(\"Accuracy of the tested model: \")\n\n test(-1)\n print(\"----\")\n\n###############################################\n######################################################\n# RUN EXPERIMENT\n\ndef save_checkpoint(epoch, acc, best_acc):\n\n#Save checkpoint.\n #acc = test(epoch)\n if acc > best_acc:\n print('Saving..')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt_%.2f.t7' % acc)\n best_acc = acc\n\n return best_acc\n\n\n\n\n##############################################\n# PRUNEand RETRAIN\n\ndef prune_and_retrain(thresh):\n\n load_model(False)\n\n #PRINT\n # for name, param in net.named_parameters():\n # print (name)\n # print(param.shape)\n\n #from worse to best\n#\n# ]\n\n if prune_bool:\n ############################3\n # READ THE RANKS\n\n if method=='filter':\n ranks_method='switches'\n switches_epoch=10\n\n\n\n if ranks_method=='shapley':\n combinationss=torch.load('results/ranks/ranks_93.92_shapley.pt')\n # elif ranks_method=='switches':\n # #combinationss=torch.load('results/ranks/ranks_93.92_switches.pt')\n # combinationss=[0]*15\n # ranks_path = '../Dir_switch/results/cifar/vgg_93.92/switch_init_0.05, alpha_0.05, annealing_6000000/'\n # #ranks_path = '../Dir_switch/results/cifar/vgg_93.92/switch_init_-1, alpha_2/'\n # for i in range(len(combinationss)):\n # ranks_filepath = ranks_path + \"93.92_alpha0.05_switchinit0.05_conv\" + str(i + 1) + \"_ep\"+str(switches_epoch)+\".pt\"\n\n elif ranks_method == 'switches':\n # combinationss=torch.load('results/ranks/ranks_93.92_switches.pt')\n combinationss = [0] * len(cfg['VGGBC']) #15\n ranks_path = '../Dir_switch/results/cifar/vgg_93.92/switch_init_-1, alpha_2/'\n for i in range(len(combinationss)):\n ranks_filepath = ranks_path + \"93.92_conv\" + str(i + 1) + \"_ep49.pt\"\n\n switch_values = torch.load(ranks_filepath)\n #print(switch_values)\n #combinationss[i]=torch.argsort(switch_values)\n combinationss[i]=torch.LongTensor(np.argsort(switch_values.cpu().detach().numpy())[::-1].copy())#argsort is increasing order, we want decreasing hence [::-1]\n #print(combinationss[i])\n #print(\"new\")\n\n\n #these numbers from the beginning will be cut off, meaning the worse will be cut off\n for i in range(len(combinationss)):\n combinationss[i] = torch.LongTensor(combinationss[i][:thresh[i]])\n print(combinationss[1])\n\n\n elif method=='l1' or method=='l2':\n combinationss = magnitude_rank.get_ranks(method)\n # for i in range(4):\n # combinationss.append(torch.LongTensor(combinat[i]))\n\n # these numbers from the end will be cut off, meaning the worse will be cut off\n for i in range(len(combinationss)):\n combinationss[i] = torch.LongTensor(combinationss[i][:thresh[i]].copy())\n print(combinationss[1])\n\n\n\n\n # PRINT THE PRUNED ARCHITECTURE\n remaining=[]\n for i in range(len(combinationss)):\n print(cfg['VGGBC'][i], len(combinationss[i]))\n remaining.append(int(cfg['VGGBC'][i])-len(combinationss[i]))\n print(remaining)\n\n\n # PRUNE\n\n it=0\n for name, param in net.named_parameters():\n print(name)\n if \"module.c\" in name and \"weight\" in name:\n it+=1\n param.data[combinationss[it-1]]=0\n #print(param.data)\n if \"module.c\" in name and \"bias\" in name:\n param.data[combinationss[it - 1]] = 0\n #print(param.data)\n if (\"bn\" in name) and (\"weight\" in name):\n param.data[combinationss[it - 1]] = 0\n if (\"bn\" in name) and (\"bias\" in name):\n param.data[combinationss[it - 1]] = 0\n\n # checking pruning\n # for name, param in net.named_parameters():\n # if \"c1.bias\" in name:\n # print(name)\n # print(param)\n\n #\n # combinationss[i]\n #\n # net.c1.weight.data[combination]=0; net.c1.bias.data[combination] = 0\n # net.c3.weight.data[combination2] = 0; net.c3.bias.data[combination2] = 0\n # net.c5.weight.data[combination3] = 0;net.c5.bias.data[combination3] = 0\n # net.f6.weight.data[combination4] = 0;net.f6.bias.data[combination4] = 0\n\n\n print(\"After pruning\")\n\n\n test(-1)\n\n\n ######## RETRAINING\n\n def gradi1(module):\n module[combinationss[0]] = 0\n # print(module[21])\n h1 = net.module.c1.weight.register_hook(gradi1)\n h1 = net.module.c1.bias.register_hook(gradi1)\n h12 = net.module.bn1.weight.register_hook(gradi1)\n h13 = net.module.bn1.bias.register_hook(gradi1)\n\n\n def gradi2(module):\n module[combinationss[1]] = 0\n # print(module[21])\n h1 = net.module.c2.weight.register_hook(gradi2)\n h1 = net.module.c2.bias.register_hook(gradi2)\n h12 = net.module.bn2.weight.register_hook(gradi2)\n h13 = net.module.bn2.bias.register_hook(gradi2)\n\n\n def gradi3(module):\n module[combinationss[2]] = 0\n # print(module[21])\n h1 = net.module.c3.weight.register_hook(gradi3)\n h1 = net.module.c3.bias.register_hook(gradi3)\n h12 = net.module.bn3.weight.register_hook(gradi3)\n h13 = net.module.bn3.bias.register_hook(gradi3)\n\n\n def gradi4(module):\n module[combinationss[3]] = 0\n # print(module[21])\n h1 = net.module.c4.weight.register_hook(gradi4)\n h1 = net.module.c4.bias.register_hook(gradi4)\n h12 = net.module.bn4.weight.register_hook(gradi4)\n h13 = net.module.bn4.bias.register_hook(gradi4)\n\n\n def gradi5(module):\n module[combinationss[4]] = 0\n # print(module[21])\n h1 = net.module.c5.weight.register_hook(gradi5)\n h1 = net.module.c5.bias.register_hook(gradi5)\n h12 = net.module.bn5.weight.register_hook(gradi5)\n h13 = net.module.bn5.bias.register_hook(gradi5)\n\n def gradi6(module):\n module[combinationss[5]] = 0\n # print(module[21])\n h1 = net.module.c6.weight.register_hook(gradi6)\n h1 = net.module.c6.bias.register_hook(gradi6)\n h12 = net.module.bn6.weight.register_hook(gradi6)\n h13 = net.module.bn6.bias.register_hook(gradi6)\n\n def gradi7(module):\n module[combinationss[6]] = 0\n # print(module[21])\n h1 = net.module.c7.weight.register_hook(gradi7)\n h1 = net.module.c7.bias.register_hook(gradi7)\n h12 = net.module.bn7.weight.register_hook(gradi7)\n h13 = net.module.bn7.bias.register_hook(gradi7)\n\n\n def gradi8(module):\n module[combinationss[7]] = 0\n # print(module[21])\n h1 = net.module.c8.weight.register_hook(gradi8)\n h1 = net.module.c8.bias.register_hook(gradi8)\n h12 = net.module.bn8.weight.register_hook(gradi8)\n h13 = net.module.bn8.bias.register_hook(gradi8)\n\n def gradi9(module):\n module[combinationss[8]] = 0\n # print(module[21])\n h1 = net.module.c9.weight.register_hook(gradi9)\n h1 = net.module.c9.bias.register_hook(gradi9)\n h12 = net.module.bn9.weight.register_hook(gradi9)\n h13 = net.module.bn9.bias.register_hook(gradi9)\n\n\n def gradi10(module):\n module[combinationss[9]] = 0\n # print(module[21])\n h1 = net.module.c10.weight.register_hook(gradi10)\n h1 = net.module.c10.bias.register_hook(gradi10)\n h12 = net.module.bn10.weight.register_hook(gradi10)\n h13 = net.module.bn10.bias.register_hook(gradi10)\n\n def gradi11(module):\n module[combinationss[10]] = 0\n # print(module[21])\n h1 = net.module.c11.weight.register_hook(gradi11)\n h1 = net.module.c11.bias.register_hook(gradi11)\n h12 = net.module.bn11.weight.register_hook(gradi11)\n h13 = net.module.bn11.bias.register_hook(gradi11)\n\n def gradi12(module):\n module[combinationss[11]] = 0\n # print(module[21])\n h1 = net.module.c12.weight.register_hook(gradi12)\n h1 = net.module.c12.bias.register_hook(gradi12)\n h12 = net.module.bn12.weight.register_hook(gradi12)\n h13 = net.module.bn12.bias.register_hook(gradi12)\n\n def gradi13(module):\n module[combinationss[12]] = 0\n # print(module[21])\n h1 = net.module.c13.weight.register_hook(gradi13)\n h1 = net.module.c13.bias.register_hook(gradi13)\n h12 = net.module.bn13.weight.register_hook(gradi13)\n h13 = net.module.bn13.bias.register_hook(gradi13)\n\n def gradi14(module):\n module[combinationss[13]] = 0\n # print(module[21])\n h1 = net.module.c14.weight.register_hook(gradi14)\n h1 = net.module.c14.bias.register_hook(gradi14)\n h12 = net.module.bn14.weight.register_hook(gradi14)\n h13 = net.module.bn14.bias.register_hook(gradi14)\n\n def gradi15(module):\n module[combinationss[14]] = 0\n # print(module[21])\n h1 = net.module.c15.weight.register_hook(gradi15)\n h1 = net.module.c15.bias.register_hook(gradi15)\n h12 = net.module.bn15.weight.register_hook(gradi15)\n h13 = net.module.bn15.bias.register_hook(gradi15)\n\n\n\n # it = -1\n # for name, param in net.named_parameters():\n # print(name)\n # if \"module.c\" in name and \"weight\" in name:\n # it += 1\n #\n # def gradi(module):\n # module[combinationss[it]] = 0\n #\n # h1 = param.register_hook(gradi)\n #\n # if \"module.c\" in name and \"bias\" in name:\n # #param.data[combinationss[it - 1]] = 0\n # #print(param.data)\n #\n # def gradi(module):\n # module[combinationss[it]] = 0\n #\n # h1 = param.register_hook(gradi)\n\n\n #######################################################\n\n\n if retrain_bool:\n print(\"Retraining\")\n\n filename = \"retrained_paramsearch1_vgg.txt\"\n with open(filename, \"a+\") as file:\n file.write(\"---NEW EXPERIMENT-----\")\n if prune_bool:\n file.write(\"\\n\\nprunedto:%s\\n\\n\" % (\" \".join(str(e) for e in remaining)))\n\n path=\"./checkpoint/\"\n\n #here retraining works\n net.train()\n stop = 0; epoch = 0; best_accuracy = 0; entry = np.zeros(3); best_model = -1; early_stopping=350\n optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9, weight_decay=5e-4)\n while (stop < early_stopping):\n epoch = epoch + 1\n for i, data in enumerate(trainloader):\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n net.module.c2.weight.grad #the hook is automatically applied, here we just check the gradient\n optimizer.step()\n #net.c1.weight.data[1] = 0 # instead of hook\n #net.c1.bias.data[1] = 0 # instead of hook\n # if i % 100==0:\n # print (i)\n # print (loss.item())\n\n print(loss.item())\n accuracy = test(-1)\n #print(net.module.c2.weight.data)\n print(\"Epoch \" + str(epoch) + \" ended.\")\n\n #check if pruned weights are pruned\n # for name, param in net.named_parameters():\n # if \"1.weight\" in name:\n # print(name)\n # print(param)\n\n if (accuracy <= best_accuracy):\n stop = stop + 1\n entry[2] = 0\n else:\n best_accuracy = accuracy\n print(\"Best updated\")\n stop = 0\n entry[2] = 1\n best_model = net.state_dict()\n if best_accuracy>13.5:\n if prune_bool:\n torch.save(best_model, \"{}_retrained_epo-{}_prunedto-{}_acc-{:.2f}\".format(path, epoch, remaining, best_accuracy))\n else:\n torch.save(best_model, \"%s_retrained_epo-%d_only_acc-%.2f\" % (path, epoch, best_accuracy))\n\n entry[0] = accuracy;\n entry[1] = loss\n with open(filename, \"a+\") as file:\n file.write(\"\\n Epoch: %d\\n\" % epoch)\n file.write(\",\".join(map(str, entry)) + \"\\n\")\n if (accuracy>98.9):\n file.write(\"Yes\\n\")\n elif (accuracy>98.8):\n file.write(\"Ok\\n\")\n\n print(loss.item())\n accuracy = test(-1)\n\n#################################################################\n\n#if all False just train thenetwork\nresume = True\nprune_bool = True\nretrain_bool = True # whether we retrain the model or just evaluate\n\n\n#file_write=True\n#compute_combinations(file_write)\n\nif resume:\n load_model()\n\n#loading a pretrained model\nif prune_bool:\n #thresh=[15,15,10,10,10,110,210,490,490,497,505,505,504,503,495]\n #thresh=[15,15,10,10,10,110,21,49,490,497,505,505,504,503,495]\n #thresh=[30,30,70,80,201,170,175,420,430,440,440,445,445,450,450]\n #thresh=[30,30,60,60,181,150,155,420,410,420,420,445,445,450,450]\n #thresh=[20,20,30,90,181,150,155,320,310,320,320,445,445,450,50]\n #thresh=[15,15,24,10,141,150,195,220,210,220,220,345,345,350,350]\n thresh=[25,25,65,80,201,158,159,460,450,490,470,465,465,470,450]\n\n thresh=[20, 20, 40, 40, 80, 80, 80, 160, 160, 160, 160, 160, 160, 160, 80]\n thresh = [20, 20, 40, 40, 80, 80, 80, 160, 160, 160, 160, 80, 80, 80, 80]\n #thresh=[5, 5, 40, 40, 20, 40, 80, 80, 160, 40, 40, 160, 80, 160, 160] #12\n #thresh=[5, 5, 10, 10, 40, 20, 20, 40, 40, 160, 160, 40, 160, 80, 80] # 13\n #thresh=[5, 5, 20, 10, 20, 80, 40, 40, 40, 80, 160, 80, 80, 40, 80] #14\n #thresh = [5, 5, 10, 10, 20, 20, 20, 40, 40, 40, 40, 40, 80, 160, 80] #15\n #thresh=[5, 5, 10, 10, 20, 20, 20, 40, 40, 40, 40, 40, 40, 40, 160] #16\n #thresh=[5, 5, 10, 10, 20, 10, 20, 20, 40, 20, 20, 40, 40, 20, 80] #17\n #thresh=[5, 5, 10, 10, 10, 10, 10, 20, 20, 20, 10, 10, 10, 10, 10] #~18\n\n\n\n\n print('\\n****************\\n')\n for method in ['filter', 'l1', 'l2']:\n print('\\n\\n'+method+\"\\n\")\n #thresh=[i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15]\n print(thresh)\n prune_and_retrain(thresh)\n\n\n\n prune_and_retrain(thresh) #first argument is whether to trune, False only retraining\n\n# training from scratch\nif resume==False:\n best_accuracy=0\n session1end=start_epoch+10; session2end=start_epoch+250; session3end=start_epoch+1950; #was til 550\n for epoch in range(start_epoch, session1end):\n train_acc=train(epoch)\n test_acc=test(epoch)\n best_accuracy=save_checkpoint(epoch, test_acc, best_accuracy)\n\n optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)\n\n for epoch in range(session1end, session2end):\n train_acc = train(epoch)\n test_acc = test(epoch)\n best_accuracy=save_checkpoint(epoch, test_acc, best_accuracy)\n optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)\n\n for epoch in range(session2end, session3end):\n train_acc = train(epoch)\n test_acc = test(epoch)\n best_accuracy=save_checkpoint(epoch, test_acc, best_accuracy)\n\n","sub_path":"results_compression/archive_workingcode/vgg_main2_tomodule_vggv1.py","file_name":"vgg_main2_tomodule_vggv1.py","file_ext":"py","file_size_in_byte":40182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"29182992","text":"import tensorflow as tf \nimport numpy as np \n\n\n# Default hyperparameters\nhparams = tf.contrib.training.HParams(\n\t# Comma-separated list of cleaners to run on text prior to training and eval. For non-English\n\t# text, you may want to use \"basic_cleaners\" or \"transliteration_cleaners\".\n\t##cleaners='english_cleaners',\n ##cleaners='transliteration_cleaners',\n cleaners='basic_cleaners',\n\n\n\t#Audio\n\tnum_mels = 80, \n\tnum_freq = 513, #only used when adding linear spectrograms post processing network\n\trescale = True, \n\trescaling_max = 0.999,\n\ttrim_silence = True,\n\n # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction\n # It's preferred to set True to use with https://github.com/r9y9/wavenet_vocoder\n #add by zhyi 20180525\n use_lws = False, #True, use lws or griffin-lim\n #use_lws = True, #use lws or griffin-lim\n\n\t#Mel spectrogram\n #'''\n #for HT\n\t#fft_size = 1024,\n\t#hop_size = 256, #256, 训练时使用的是256 合成时使用200 则语速加快 但貌似音质有提升\n #interpolationX = 2, #real hop_size=hop_size/interpolationX\n #n_fft = 512, #1024, #add by zhyi 20180525\n #win_size = 512, #800, #For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) hop_size*X for lws\n\t#sample_rate = 16000, #22050 Hz (corresponding to ljspeech dataset)\n\t#frame_shift_ms = None,\n #'''\n #for YY10000\n #fft_size = 1024,\n #hop_size = 256, #256, 训练时使用的是256 合成时使用200 则语速加快 但貌似音质有提升\n #interpolationX = 3, #real hop_size=hop_size/interpolationX\n #n_fft = 1024, #512, #1024, #add by zhyi 20180525\n #win_size = 580, #580, #800, #For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) hop_size*X for lws\n\t#sample_rate = 16000, #22050 Hz (corresponding to ljspeech dataset)\n #'''\n #'''\n #for YY10000 22050\n fft_size = 1024,\n hop_size = int(256*1.15), #256, 训练时使用的是256 合成时使用200 则语速加快 但貌似音质有提升,must set to 256 when use GTA!\n interpolationX = 10, #real hop_size=hop_size/interpolationX , must set to None when use GTA!\n #hop_size = int(256*1), #256, 训练时使用的是256 合成时使用200 则语速加快 但貌似音质有提升,must set to 256 when use GTA!\n #interpolationX = None, #real hop_size=hop_size/interpolationX , must set to None when use GTA!\n n_fft = 1024, #512, #1024, #add by zhyi 20180525\n win_size = 800, #None, #580, #800, #For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) hop_size*X for lws\n\tsample_rate = 22050, #22050 Hz (corresponding to ljspeech dataset)\n frame_shift_ms = None,\n #'''\n\n\t#Mel and Linear spectrograms normalization/scaling and clipping\n\tsignal_normalization = True,\n\tallow_clipping_in_normalization = True, #Only relevant if mel_normalization = True\n\tsymmetric_mels = True, #Whether to scale the data to be symmetric around 0\n\tmax_abs_value = 4., #max absolute value of data. If symmetric, data will be [-max, max] else [0, max] \n\n\t#Limits\n\tmin_level_db = -100, #from =- 100\n\tref_level_db = 20,\n\t#fmin = 125, #for HT\n\tfmin = 100 - 0, #for YY1000\n\t#fmin = 50, #for YY1000\n\tfmax = 7600 + 0, #7600,\n\n\t#Griffin Lim\n\tpower = 1.55,#1.55, #貌似这个1.55 音质最好\n\t#power = 1.2, #1.2机器音更严重,不知何故,跟训练的这个参数要一致?\n\tgriffin_lim_iters = 60,\n\n\n\t#Tacotron\n\toutputs_per_step = 1, #number of frames to generate at each decoding step (speeds up computation and allows for higher batch size)\n\tstop_at_any = True, #Determines whether the decoder should stop when predicting to any frame or to all of them\n\n\tembedding_dim = 512, #dimension of embedding space\n\n\tenc_conv_num_layers = 3, #number of encoder convolutional layers\n\tenc_conv_kernel_size = (5, ), #size of encoder convolution filters for each layer\n\tenc_conv_channels = 512, #number of encoder convolutions filters for each layer\n\tencoder_lstm_units = 256, #number of lstm units for each direction (forward and backward)\n\t#encoder_lstm_units = 128, #number of lstm units for each direction (forward and backward)\n\n\tsmoothing = False, #Whether to smooth the attention normalization function \n\tattention_dim = 128, #dimension of attention space\n\tattention_filters = 32, #number of attention convolution filters\n\tattention_kernel = (31, ), #kernel size of attention convolution\n\tcumulative_weights = True, #Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)\n\n\tprenet_layers = [256, 256], #number of layers and number of units of prenet\n\tdecoder_layers = 2, #number of decoder lstm layers\n\tdecoder_lstm_units = 1024, #number of decoder lstm units on each layer\n\tmax_iters = 2500, #Max decoder steps during inference (Just for safety from infinite loop cases)\n\n\tpostnet_num_layers = 5, #number of postnet convolutional layers\n\tpostnet_kernel_size = (5, ), #size of postnet convolution filters for each layer\n\tpostnet_channels = 512, #number of postnet convolution filters for each layer\n\n\tmask_encoder = False, #whether to mask encoder padding while computing attention\n\timpute_finished = False, #Whether to use loss mask for padded sequences\n\tmask_finished = False, #Whether to mask alignments beyond the (False for debug, True for style)\n\n\tpredict_linear = False, #Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)\n\t#predict_linear = True, #Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)\n\n\n\t#Wavenet\n\t# Input type:\n\t# 1. raw [-1, 1]\n\t# 2. mulaw [-1, 1]\n\t# 3. mulaw-quantize [0, mu]\n\t# If input_type is raw or mulaw, network assumes scalar input and\n\t# discretized mixture of logistic distributions output, otherwise one-hot\n\t# input and softmax output are assumed.\n\t# **NOTE**: if you change the one of the two parameters below, you need to\n\t# re-run preprocessing before training.\n\t# **NOTE**: scaler input (raw or mulaw) is experimental. Use it your own risk.\n\tinput_type=\"mulaw-quantize\",\n\tquantize_channels=256, # 65536 or 256\n\n\tsilence_threshold=2,\n\n\t# Mixture of logistic distributions:\n\tlog_scale_min=float(np.log(1e-14)),\n\n\t#TODO model params\n\n\n\t#Tacotron Training\n\ttacotron_batch_size = 16, #32+0, #number of training samples on each training steps\n\ttacotron_reg_weight = 1e-6, #regularization weight (for l2 regularization)\n\ttacotron_scale_regularization = True, #Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)\n\n\ttacotron_decay_learning_rate = True, #boolean, determines if the learning rate will follow an exponential decay\n\ttacotron_start_decay = 50000, #Step at which learning decay starts\n\ttacotron_decay_steps = 50000, #starting point for learning rate decay (and determines the decay slope) (UNDER TEST)\n\ttacotron_decay_rate = 0.4, #learning rate decay rate (UNDER TEST)\n\ttacotron_initial_learning_rate = 1e-3, #starting learning rate\n\ttacotron_final_learning_rate = 1e-5, #minimal learning rate\n\n\ttacotron_adam_beta1 = 0.9, #AdamOptimizer beta1 parameter\n\ttacotron_adam_beta2 = 0.999, #AdamOptimizer beta2 parameter\n\ttacotron_adam_epsilon = 1e-6, #AdamOptimizer beta3 parameter\n\n\ttacotron_zoneout_rate = 0.1, #zoneout rate for all LSTM cells in the network\n\ttacotron_dropout_rate = 0.5, #dropout rate for all convolutional layers + prenet\n\n\ttacotron_teacher_forcing_ratio = 1., #Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs\n\n\t#Wavenet Training TODO\n\n\t#Eval sentences\n\tsentences = [\n\t# From July 8, 2017 New York Times:\n #'iD zhongA huaB haoC.',\n #'iA erD sanA siD, uC liuD qiA baA.',\n #'tianA longB baA buD.',\n\t#'anA sheA xvD uengC.',\n #'baA biD saA qvC.',\n 'Sqing3wen4Anin2shi4Ayi4zhong1hua2Axian1sheng1Ama5?',\n 'Syi4zhong1hua2hao3?',\n 'Syi1er4san1si4Awu3liu4qi1ba1.',\n 'Syi1er4san1si4Awu3liu4qi1ba1?',\n 'Sda4yue5Ayi1bai3Ayi1shi5liu4Ashe4shi5du4.',\n 'Sda4yue1Ayi1bai3Ayi1shi2liu4Ashe4shi4du4?',\n 'Sdi4pai4Azhi4neng2Ake1ji4Ayou3xian4Agong1si1Czhi4neng2Ajiao1hu4Byin3ling3Awei4lai2.',\n 'Sdi4pai4zhi4neng2Ake1ji4you3xian4gong1si1.',\n 'Szhi4neng2jiao1hu4.',\n 'Syin3ling3Awei4lai2.',\n 'Sweng3.',\n 'Slvn1.',\n 'Sqing3Byi4zhong1hua2Cwu3qiu2Cniu2ge1Cwang1lei3Cwang2qing4Bzhu4yi4Cwu3yue4Aer4shi2ri4Bxia4wu3Asan1dian3Akai1hui4.',\n \"Syu2jian4jun1Bwei4Amei3ge4Byou3Acai2neng2de5Aren2Cti2gong1Aping2tai2.\",\n\t]\n\n\t)\n\ndef hparams_debug_string():\n\tvalues = hparams.values()\n\thp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']\n\treturn 'Hyperparameters:\\n' + '\\n'.join(hp)\n","sub_path":"T/deepox/ver1.0/modules/hparams.py","file_name":"hparams.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"276215152","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential # Modelo Sequencial\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras.callbacks import TensorBoard # Para visualização do Modelo da Rede Neural\nimport pickle\nimport numpy as np\n\nimport datetime\n\ndate = datetime.datetime.now()\ndate = str(date.year) + str(date.month) + str(date.day) + str(date.hour) + str(date.minute)\n\nNAME = 'Rede_neural_treinada-cnn-{}'.format(date)\n\ntensorboard = TensorBoard(log_dir = 'cnn/logs/{}'.format(NAME)) # Para ter uma visualização do treinamento da rede neural\n\npickle_in = open(\"cnn\\\\output\\\\X_training.pickle\",\"rb\")\nX_training = np.array(pickle.load(pickle_in))\n\npickle_in = open(\"cnn\\\\output\\\\y_training.pickle\",\"rb\")\ny_training = np.array(pickle.load(pickle_in))\n\npickle_in = open(\"cnn\\\\output\\\\X_test.pickle\",\"rb\")\nX_test = np.array(pickle.load(pickle_in))\n\npickle_in = open(\"cnn\\\\output\\\\y_test.pickle\",\"rb\")\ny_test = np.array(pickle.load(pickle_in))\n\nX_training = np.array(X_training/255.0) #Simplificando coloração de Pixels para valores entre 0 e 1\nX_test = np.array(X_test/255.0) #Simplificando coloração de Pixels para valores entre 0 e 1\n\nmodel = Sequential([Conv2D(64,(2,2), activation='relu', input_shape = X_training.shape[1:]), # Primeira camada\n MaxPooling2D(pool_size=(2,2)),\n\n Conv2D(128,(2,2), activation='relu'), # Segunda camada\n MaxPooling2D(pool_size=(2,2)),\n\n Conv2D(256,(2,2), activation='relu'), # Terceira camada\n MaxPooling2D(pool_size=(2,2)),\n\n Conv2D(256,(2,2), activation='relu'), # Quarta camada\n MaxPooling2D(pool_size=(2,2)),\n\n Flatten(),\n\n Dense(512, activation='relu'), # Quinta camada\n\n Dense(22, activation='softmax')]) # Camada de Saída\n\n# model.summary()\nadam = tf.keras.optimizers.Adam(learning_rate=0.001)\nmodel.compile(optimizer= adam, loss='SparseCategoricalCrossentropy', metrics=['acc'])\nmodel.fit(X_training, y_training, batch_size=32, epochs=1, validation_split=0.1, callbacks = [tensorboard], validation_data=(X_test, y_test))\n\nmodel.save('cnn\\\\output\\\\treinando_rede_all.model') # Salvando Rede Neural","sub_path":"cnn/Create_neural_cnn_all.py","file_name":"Create_neural_cnn_all.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"204549239","text":"import json\nfrom datetime import datetime\nfrom api import cfg\nfrom app import app\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp.config['SQLALCHEMY_DATABASE_URI'] = cfg['database']['url']\napp.config['SQLALCHEMY_ECHO'] = cfg['database']['debug']\napp.config['SQLALCHEMY_POOL_SIZE'] = 100\napp.config['SQLALCHEMY_POOL_TIMEOUT'] = 10\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb = SQLAlchemy(app)\n\n\nclass UserKeyword(db.Model):\n _id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n id = db.Column(db.String(35))\n author = db.Column(db.String(35))\n keyword = db.Column(db.String(128), nullable=False)\n reply = db.Column(db.String(2048), nullable=False)\n super = db.Column(db.Boolean)\n level = db.Column(db.Integer)\n\n def __init__(self, id, author, keyword, reply):\n self.id = id\n self.author = author\n self.keyword = keyword\n self.reply = reply\n self.super = ('**' in keyword)\n self.level = len(keyword) - keyword.count('**')*(len('**')+1)\n\n @staticmethod\n def add_and_update(id, author, keyword, reply):\n for row in UserKeyword.query.filter_by(id=id, keyword=keyword):\n row.author = author\n row.reply = reply\n break\n else:\n db.session.add(UserKeyword(id, author, keyword, reply))\n return True\n\n @staticmethod\n def delete(id, keyword):\n for row in UserKeyword.query.filter_by(id=id, keyword=keyword):\n db.session.delete(row)\n return True\n return False\n\n @staticmethod\n def get(id, keyword=None):\n if keyword is None:\n return list(UserKeyword.query.filter_by(id=id).order_by(UserKeyword.level.desc(), UserKeyword._id.desc()))\n else:\n for row in UserKeyword.query.filter_by(id=id, keyword=keyword):\n return row\n else:\n return None\n\n\nclass MessageLogs(db.Model):\n _id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n group_id = db.Column(db.String(35))\n user_id = db.Column(db.String(35))\n nAIset = db.Column(db.Integer, server_default='0') #設定愛醬的次數\n nAItrigger = db.Column(db.Integer, server_default='0') #觸發愛醬的次數\n nText = db.Column(db.Integer, server_default='0') #文字訊息的次數 以下類推\n nSticker = db.Column(db.Integer, server_default='0')\n nImage = db.Column(db.Integer, server_default='0')\n nUrl = db.Column(db.Integer, server_default='0')\n nFuck = db.Column(db.Integer, server_default='0') #髒話 幹的次數\n nLenght = db.Column(db.BigInteger, server_default='0') #文字總長度\n\n def __init__(self, group_id, user_id, nAIset=0, nAItrigger=0, nText=0, nSticker=0, nImage=0, nUrl=0, nFuck=0, nLenght=0):\n self.group_id = group_id\n self.user_id = user_id\n self.nAIset = nAIset\n self.nAItrigger = nAItrigger\n self.nText = nText\n self.nSticker = nSticker\n self.nImage = nImage\n self.nUrl = nUrl\n self.nFuck = nFuck\n self.nLenght = nLenght\n\n @staticmethod\n def add(group_id, user_id, nAIset=0, nAItrigger=0, nText=0, nSticker=0, nImage=0, nUrl=0, nFuck=0, nLenght=0):\n data = MessageLogs.query.filter_by(group_id=group_id, user_id=user_id).first()\n if data is None:\n db.session.add(MessageLogs(group_id, user_id, nAIset, nAItrigger, nText, nSticker, nImage, nUrl, nFuck, nLenght))\n else:\n data.nAIset += nAIset\n data.nAItrigger += nAItrigger\n data.nText += nText\n data.nSticker += nSticker\n data.nImage += nImage\n data.nUrl += nUrl\n data.nFuck += nFuck\n data.nLenght += nLenght\n\n @staticmethod\n def get(group_id):\n data = {}.fromkeys(['users', 'nAIset', 'nAItrigger', 'nText', 'nSticker', 'nUrl', 'nFuck', 'nLenght'], 0)\n for row in MessageLogs.query.filter_by(group_id=group_id):\n data['users'] += 1\n data['nAIset'] += row.nAIset\n data['nAItrigger'] += row.nAItrigger\n data['nText'] += row.nText\n data['nSticker'] += row.nSticker\n data['nSticker'] += row.nImage\n data['nUrl'] += row.nUrl\n data['nFuck'] += row.nFuck\n data['nLenght'] += row.nLenght\n return data\n\n\n\nclass UserSettings(db.Model):\n id = db.Column(db.String(35), primary_key=True)\n last_time = db.Column(db.DateTime)\n news = db.Column(db.TEXT)\n options = db.Column(db.TEXT)\n\n def __init__(self, id):\n self.id = id\n self.news = None\n self.options = '{}'\n\n @staticmethod\n def __get(id):\n data = UserSettings.query.get(id)\n if data is None:\n data = UserSettings(id)\n db.session.add(data)\n return data\n\n '''\n for row in UserSettings.query.filter_by(id=id):\n return row\n else:\n data = UserSettings(id)\n db.session.add(data)\n return data\n '''\n \n @staticmethod\n def refresh_last_time(id):\n data = UserSettings.__get(id)\n data.last_time = datetime.now()\n\n @staticmethod\n def check_news(id):\n data = UserSettings.__get(id)\n if data.news != cfg['公告']['ver']:\n data.news = cfg['公告']['ver']\n return True\n return False\n\n @staticmethod\n def update(id, **options):\n data = UserSettings.__get(id)\n data_options = json.loads(data.options)\n for opt, val in options.items():\n data_options[opt] = val\n data.options = json.dumps(data_options)\n\n @staticmethod\n def get(id, option, default=None):\n data = UserSettings.__get(id)\n return json.loads(data.options).get(option, default)\n\nif __name__ == '__main__':\n db.create_all()\n","sub_path":"server/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"508178796","text":"import xml.etree.ElementTree as ET\nimport glob\nimport mmap\nimport os\nimport re\nimport json\nimport unicodedata\nimport argparse\nimport cv2\nimport numpy as np\nfrom collections import defaultdict\n\nALLOWABLE_CHANNELS = [\"CH1\", \"CH2\", \"CH3\", \"CH4\", \"CH5\"]\n\ndef extract_xml(path):\n \"\"\"\n Extracts hidden XML metadata from Keyence BZ-X microscope TIFF images.\n\n Args:\n f (obj): A path to a TIFF file.\n \n Returns:\n Safe XML string in ASCII encoding\n \"\"\"\n xml = \"\"\n with open(path, \"r+\") as tif:\n mm = mmap.mmap(tif.fileno(), 0)\n midx = mm.find(b\" 0\n max_x_key = file_lst[0]\n max_y_key = file_lst[0]\n for f in file_lst:\n if (corner_dict[f][0] > corner_dict[max_x_key][0]):\n max_x_key = f\n\n if (corner_dict[f][1] > corner_dict[max_y_key][1]):\n max_y_key = f\n \n return (corner_dict[max_x_key][0] + size_dict[max_x_key][0], corner_dict[max_y_key][1] + size_dict[max_y_key][1])\n\ndef blend(file_lst, outpath, corner_dict, size_dict, channels=1):\n \"\"\"\n Blend files through by averaging overlaps\n \"\"\"\n full_size = get_blended_size(file_lst, corner_dict, size_dict)\n output = np.zeros(shape=(full_size[1], full_size[0]), dtype=np.uint16)\n # logger.debug(\"Blend output size {}\".format(output.shape))\n for f in file_lst:\n im = None\n im = cv2.imread(f, -1)\n im = np.sum(im, axis=2)\n corner = corner_dict[f]\n size = size_dict[f]\n output[corner[1]:corner[1] + size[1], corner[0]:corner[0] + size[0]] = (output[corner[1]:corner[1] + size[1], corner[0]:corner[0] + size[0]] + im) / 2\n cv2.imwrite(outpath, output)\n\n\ndef get_channel_lists(file_lst):\n \"\"\"\n Takes in a list of file paths and returns a dictionary of paths\n indexed by the channel name\n \"\"\"\n channels = defaultdict(list)\n for f in file_lst:\n fields = f.split(\"_\")[-1].split(\".\")\n \n if (len(fields) == 2 and fields[0] in ALLOWABLE_CHANNELS and (fields[1] == \"TIF\" or fields[1] == \"tif\")):\n channels[fields[0]].append(f)\n return channels\n\ndef dir_path(d):\n \"\"\" Verifies validity of directory provided. \n\n Args:\n d: A string representing a directory.\n \"\"\"\n if os.path.isdir(d):\n return d\n else:\n raise NotADirectoryError(d)\n\ndef output_stitching(corners, path):\n with open(path, 'w') as f:\n f.write(json.dumps(corners))\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Iris: Stitch images from fluorescence microscopy\")\n parser.add_argument(\n '--path',\n dest=\"IMAGE_DIR\",\n type=dir_path,\n help=\"Path containing images to be analyzed\",\n required=True)\n\n args = parser.parse_args()\n file_lst = glob.glob(args.IMAGE_DIR + \"/*.tif\")\n channels = get_channel_lists(file_lst) \n verified_files = [item for sublist in channels.values() for item in sublist]\n corners, size = get_stitching(verified_files)\n output_stitching(corners, \"stitching.json\")\n print(\"Outputted stitching positions to stitching.json\")\n for channel in channels:\n path = channel + \"_stitched.tif\"\n print(\"Outputting blended {channel} to {path}.\".format(channel=channel, path=path))\n blend(channels[channel], path, corners, size)\n print(\"Outputted blended {channel} to {path}.\".format(channel=channel, path=path))\n\nif __name__ == \"__main__\":\n main()","sub_path":"keyence_extractor.py","file_name":"keyence_extractor.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"377484348","text":"\"\"\"\nMain Application Module\n\nContains our PyDMApplication class with core connection and loading logic and\nour PyDMMainWindow class with navigation logic.\n\"\"\"\nimport os\nimport imp\nimport sys\nimport signal\nimport subprocess\nimport re\nimport shlex\nimport json\nimport inspect\nimport warnings\nfrom .display_module import Display\nfrom .PyQt.QtCore import Qt, QEvent, QTimer, pyqtSlot\nfrom .PyQt.QtGui import QApplication, QColor, QWidget, QToolTip, QClipboard\nfrom .PyQt import uic\nfrom .main_window import PyDMMainWindow\nfrom .utilities import macro, which\nfrom . import data_plugins\n\nDEFAULT_PROTOCOL = os.getenv(\"PYDM_DEFAULT_PROTOCOL\")\nif DEFAULT_PROTOCOL is not None:\n # Get rid of the \"://\" part if it exists\n DEFAULT_PROTOCOL = DEFAULT_PROTOCOL.split(\"://\")[0]\n\nclass PyDMApplication(QApplication):\n # Instantiate our plugins.\n plugins = {plugin.protocol: plugin() for plugin in data_plugins.plugin_modules}\n\n # HACK. To be replaced with some stylesheet stuff eventually.\n alarm_severity_color_map = {\n 0: QColor(0, 0, 0), # NO_ALARM\n 1: QColor(220, 220, 20), # MINOR_ALARM\n 2: QColor(240, 0, 0), # MAJOR_ALARM\n 3: QColor(240, 0, 240) # INVALID_ALARM\n }\n\n # HACK. To be replaced with some stylesheet stuff eventually.\n connection_status_color_map = {\n False: QColor(255, 255, 255),\n True: QColor(0, 0, 0)\n }\n\n def __init__(self, ui_file=None, command_line_args=[], display_args=[], perfmon=False, macros=None):\n super(PyDMApplication, self).__init__(command_line_args)\n # The macro and directory stacks are needed for nested displays (usually PyDMEmbeddedDisplays).\n # During the process of loading a display (whether from a .ui file, or a .py file), the application's\n # 'open_file' method will be called recursively. Inside open_file, the last item on the stack represents\n # the parent widget's file path and macro variables. Any file paths are joined to the end of the parent's\n # file path, and any macros are merged with the parent's macros. This system depends on open_file always\n # being called hierarchially (i.e., parent calls it first, then on down the ancestor tree, with no unrelated\n # calls in between). If something crazy happens and PyDM somehow gains the ability to open files in a\n # multi-threaded way, for example, this system will fail.\n self.directory_stack = ['']\n self.macro_stack = [{}]\n self.windows = {}\n self.display_args = display_args\n # Open a window if one was provided.\n if ui_file is not None:\n self.make_window(ui_file, macros, command_line_args)\n self.had_file = True\n else:\n self.had_file = False\n # Re-enable sigint (usually blocked by pyqt)\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n # Performance monitoring\n if perfmon:\n import psutil\n self.perf = psutil.Process()\n self.perf_timer = QTimer()\n self.perf_timer.setInterval(2000)\n self.perf_timer.timeout.connect(self.get_CPU_usage)\n self.perf_timer.start()\n\n def exec_(self):\n \"\"\"\n Execute the QApplication\n \"\"\"\n # Connect to top-level widgets that were not loaded from file\n # These are usually testing/debug widgets\n if not self.had_file:\n self.make_connections()\n return super(PyDMApplication, self).exec_()\n\n\n @pyqtSlot()\n def get_CPU_usage(self):\n with self.perf.oneshot():\n total_percent = self.perf.cpu_percent(interval=None)\n total_time = sum(self.perf.cpu_times())\n usage = [total_percent * ((t.system_time + t.user_time) / total_time) for t in self.perf.threads()]\n print(\"Total: {tot}, Per Thread: {percpu}\".format(tot=total_percent, percpu=usage))\n\n def make_connections(self):\n for widget in self.topLevelWidgets():\n self.establish_widget_connections(widget)\n\n def new_pydm_process(self, ui_file, macros=None, command_line_args=None):\n path_and_args = shlex.split(str(ui_file))\n filepath = path_and_args[0]\n filepath_args = path_and_args[1:]\n pydm_display_app_path = which(\"pydm\")\n\n if pydm_display_app_path is None:\n if os.environ.get(\"PYDM_PATH\") is not None:\n pydm_display_app_path = os.path.join(os.environ[\"PYDM_PATH\"], \"pydm\")\n else:\n # Not in the PATH and no ENV VAR pointing to it...\n # Let's try the script folder...\n pydm_display_app_path = os.path.join(os.path.split(os.path.realpath(__file__))[0], \"..\", \"scripts\", \"pydm\")\n\n args = [pydm_display_app_path]\n if macros is not None:\n args.extend([\"-m\", json.dumps(macros)])\n args.append(filepath)\n args.extend(self.display_args)\n args.extend(filepath_args)\n subprocess.Popen(args, shell=False)\n\n def new_window(self, ui_file, macros=None, command_line_args=None):\n \"\"\"new_window() gets called whenever a request to open a new window is made.\"\"\"\n # All new windows are spawned as new processes.\n self.new_pydm_process(ui_file, macros, command_line_args)\n\n def make_window(self, ui_file, macros=None, command_line_args=None):\n \"\"\"make_window instantiates a new PyDMMainWindow, adds it to the\n application's list of windows, and opens ui_file in the window.\"\"\"\n main_window = PyDMMainWindow()\n main_window.open_file(ui_file, macros, command_line_args)\n main_window.show()\n self.windows[main_window] = os.path.dirname(ui_file)\n # If we are launching a new window, we don't want it to sit right on top of an existing window.\n if len(self.windows) > 1:\n main_window.move(main_window.x() + 10, main_window.y() + 10)\n\n def close_window(self, window):\n del self.windows[window]\n\n def load_ui_file(self, uifile, macros=None):\n if macros is not None:\n f = macro.substitute_in_file(uifile, macros)\n else:\n f = uifile\n return uic.loadUi(f)\n\n def __sanity_check_pyqt(self, cls):\n for itm in dir(cls):\n i = getattr(cls, itm)\n if hasattr(i, \"__file__\"):\n if any([True if v in i.__file__ else False for v in [\"PyQt4\", \"PyQt5\"]]):\n warnings.warn(\"Direct PyQt5/PyQt4 import detected. To ensure compatibility with PyQt4 and PyQt5 consider using: pydm.PyQt for your imports.\", RuntimeWarning, stacklevel=0)\n return\n\n def load_py_file(self, pyfile, args=None, macros=None):\n # Add the intelligence module directory to the python path, so that submodules can be loaded. Eventually, this should go away, and intelligence modules should behave as real python modules.\n module_dir = os.path.dirname(os.path.abspath(pyfile))\n sys.path.append(module_dir)\n\n # Now load the intelligence module.\n module = imp.load_source('intelclass', pyfile)\n self.__sanity_check_pyqt(module)\n if hasattr(module, 'intelclass'):\n cls = module.intelclass\n if not issubclass(cls, Display):\n raise ValueError(\"Invalid class definition at file {}. {} does not inherit from Display. Nothing to open at this time.\".format(pyfile, cls.__name__))\n else:\n classes = [obj for name, obj in inspect.getmembers(module) if inspect.isclass(obj) and issubclass(obj, Display) and obj != Display]\n if len(classes) == 0:\n raise ValueError(\"Invalid File Format. {} has no class inheriting from Display. Nothing to open at this time.\".format(pyfile))\n if len(classes) > 1:\n warnings.warn(\"More than one Display class in file {}. The first occurence (in alphabetical order) will be opened: {}\".format(pyfile, classes[0].__name__), RuntimeWarning, stacklevel=2)\n cls = classes[0]\n\n try:\n # This only works in python 3 and up.\n module_params = inspect.signature(cls).parameters\n except AttributeError:\n # Works in python 2, deprecated in 3.0 and up.\n module_params = inspect.getargspec(cls.__init__).args\n\n # Because older versions of Display may not have the args parameter or the macros parameter, we check\n # to see if it does before trying to use them.\n kwargs = {}\n if 'args' in module_params:\n kwargs['args'] = args\n if 'macros' in module_params:\n kwargs['macros'] = macros\n return cls(**kwargs)\n\n def open_file(self, ui_file, macros=None, command_line_args=None):\n # First split the ui_file string into a filepath and arguments\n args = command_line_args if command_line_args is not None else []\n split = shlex.split(ui_file)\n filepath = split[0]\n args.extend(split[1:])\n self.directory_stack.append(os.path.dirname(filepath))\n (filename, extension) = os.path.splitext(filepath)\n if macros is None:\n macros = {}\n merged_macros = self.macro_stack[-1].copy()\n merged_macros.update(macros)\n self.macro_stack.append(merged_macros)\n if extension == '.ui':\n widget = self.load_ui_file(filepath, merged_macros)\n elif extension == '.py':\n widget = self.load_py_file(filepath, args, merged_macros)\n else:\n self.directory_stack.pop()\n self.macro_stack.pop()\n raise ValueError(\"invalid file type: {}\".format(extension))\n self.establish_widget_connections(widget)\n self.directory_stack.pop()\n self.macro_stack.pop()\n return widget\n\n # get_path gives you the path to ui_file relative to where you are running pydm from.\n # Many widgets handle file paths (related display, embedded display, and drawing image come to mind)\n # and the standard is that they expect paths to be given relative to the .ui or .py file in which the\n # widget lives. But, python and Qt want the file path relative to the directory you are running\n # pydm from. This function does that translation.\n def get_path(self, ui_file):\n dirname = self.directory_stack[-1]\n full_path = os.path.join(dirname, str(ui_file))\n return full_path\n\n def open_relative(self, ui_file, widget, macros=None, command_line_args=[]):\n \"\"\"open_relative opens a ui file with a relative path. This is\n really only used by embedded displays.\"\"\"\n full_path = self.get_path(ui_file)\n return self.open_file(full_path, macros=macros, command_line_args=command_line_args)\n\n def initialize_plugins(self):\n module = imp.load_source('intelclass', pyfile)\n if hasattr(module, 'intelclass'):\n cls = module.intelclass\n if not issubclass(cls, Display):\n raise ValueError(\"Invalid class definition at file {}. {} does not inherit from Display. Nothing to open at this time.\".format(pyfile, cls.__name__))\n else:\n classes = [obj for name, obj in inspect.getmembers(module) if inspect.isclass(obj) and issubclass(obj, Display) and obj != Display]\n if len(classes) == 0:\n raise ValueError(\"Invalid File Format. {} has no class inheriting from Display. Nothing to open at this time.\".format(pyfile))\n if len(classes) > 1:\n warnings.warn(\"More than one Display class in file {}. The first occurence (in alphabetical order) will be opened: {}\".format(pyfile, classes[0].__name__), RuntimeWarning, stacklevel=2)\n cls = classes[0]\n\n def plugin_for_channel(self, channel):\n if channel.address is None or channel.address == \"\":\n return None\n match = re.match('.*://', channel.address)\n if match:\n protocol = match.group(0)[:-3]\n elif DEFAULT_PROTOCOL is not None:\n # If no protocol was specified, and the default protocol environment variable is specified, try to use that instead.\n protocol = DEFAULT_PROTOCOL\n try:\n plugin_to_use = self.plugins[str(protocol)]\n return plugin_to_use\n except KeyError:\n print(\"Couldn't find plugin for protocol: {0}\".format(match.group(0)[:-3]))\n warnings.warn(\"Channel {addr} did not specify a valid protocol and no default protocol is defined. This channel will receive no data. To specify a default protocol, set the PYDM_DEFAULT_PROTOCOL environment variable.\", RuntimeWarning, stacklevel=2)\n return None\n\n def add_connection(self, channel):\n plugin = self.plugin_for_channel(channel)\n if plugin:\n plugin.add_connection(channel)\n\n def remove_connection(self, channel):\n plugin = self.plugin_for_channel(channel)\n if plugin:\n plugin.remove_connection(channel)\n\n def eventFilter(self, obj, event):\n if event.type() == QEvent.MouseButtonPress:\n if event.button() == Qt.MiddleButton:\n self.show_address_tooltip(obj, event)\n return True\n return False\n\n # Not sure if showing the tooltip should be the job of the app,\n # may want to revisit this.\n def show_address_tooltip(self, obj, event):\n addr = obj.channels()[0].address\n QToolTip.showText(event.globalPos(), addr)\n # If the address has a protocol, and it is the default protocol, strip it out before putting it on the clipboard.\n m = re.match('(.+?):/{2,3}(.+?)$', addr)\n if m is not None and DEFAULT_PROTOCOL is not None and m.group(1) == DEFAULT_PROTOCOL:\n QApplication.clipboard().setText(m.group(2), mode=QClipboard.Selection)\n else:\n QApplication.clipboard().setText(addr, mode=QClipboard.Selection)\n\n def establish_widget_connections(self, widget):\n widgets = [widget]\n widgets.extend(widget.findChildren(QWidget))\n for child_widget in widgets:\n try:\n if hasattr(child_widget, 'channels'):\n for channel in child_widget.channels():\n self.add_connection(channel)\n # Take this opportunity to install a filter that intercepts middle-mouse clicks,\n # which we use to display a tooltip with the address of the widget's first channel.\n child_widget.installEventFilter(self)\n except NameError:\n pass\n\n def close_widget_connections(self, widget):\n widgets = [widget]\n widgets.extend(widget.findChildren(QWidget))\n for child_widget in widgets:\n try:\n if hasattr(child_widget, 'channels'):\n for channel in child_widget.channels():\n self.remove_connection(channel)\n except NameError:\n pass\n","sub_path":"pydm/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":14979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"27880295","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\nimport argparse\n\nimport pandas as pd\nimport numpy as np\nimport sys\n\nfrom gensim.models import Word2Vec\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# In[3]:\n# Define the parser\nparser = argparse.ArgumentParser(description='Short sample app')\nargs = sys.argv[1]\n\nfile_name=\"prepdata.pkl\"\ndf2 = pd.read_pickle(file_name)\n\n# In[4]:\ndf2.head(5)\n\n# In[7]:\npreprocessed_data = df2\n\ndef getres(recherche):\n\n prep_data = preprocessed_data[\"preprocessed\"].tolist()\n vectorizer = TfidfVectorizer(max_df=.65, min_df=1, stop_words=\"english\", use_idf=True, norm=None)\n tfidf = vectorizer.fit_transform(prep_data)\n\n\n cosine_similarities = cosine_similarity(tfidf, vectorizer.transform([recherche])).flatten()\n dftest = preprocessed_data[0:0]\n resultat = []\n\n for i in range(20):\n index = []\n best_match_index = cosine_similarities.argmax()\n index.append(best_match_index)\n\n cosine_similarities = np.delete(cosine_similarities, best_match_index)\n resultat.append(preprocessed_data.loc[best_match_index,\"text\"])\n\n\n\n return resultat\n\n\n# In[8]:\nres = getres(args)\nwith open(sys.argv[2], 'w') as fd:\n\tfd.write(str(res))\nprint(res)","sub_path":"tweets-tf-idf.py","file_name":"tweets-tf-idf.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"542492765","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nWINDOW_SIZE = 275, 1200\n\nimport gtk\nimport clockcal\nimport weather\nimport sysmon\nimport rtm\nimport googlecal\nimport money\nimport logging\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n filename='cornitor.log',\n level=logging.DEBUG)\n\nclass MainWindow(gtk.Window):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self.set_title('Cornitor')\n self.set_size_request(*WINDOW_SIZE)\n self.set_position(gtk.WIN_POS_CENTER)\n\n clockcal_box = clockcal.OutBox()\n weather_box = weather.OutBox()\n sysmon_box = sysmon.OutBox()\n rtm_box = rtm.OutBox()\n gcal_box = googlecal.OutBox()\n money_box = money.OutBox()\n master_box = gtk.VBox(False, 5)\n master_box.pack_start(clockcal_box)\n master_box.pack_start(weather_box)\n master_box.pack_start(sysmon_box)\n master_box.pack_start(rtm_box)\n master_box.pack_start(gcal_box)\n master_box.pack_start(money_box)\n align = gtk.Alignment(0.5, 0, 0, 0)\n align.add(master_box)\n self.add(align)\n\n self.connect(\"destroy\", gtk.main_quit)\n self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))\n self.set_decorated(False)\n self.set_wmclass('cornitor', 'cornitor')\n self.set_skip_taskbar_hint(True)\n self.show_all()\n\n\nMainWindow()\ngtk.main()","sub_path":"cornitor.py","file_name":"cornitor.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"26568691","text":"import Augmentor\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport os\nimport glob\nimport random\nfrom utils import *\n\n\ndef add_logo(ori_img_path, logo_img_path):\n\tori_img1 = Image.open(ori_img_path)\n\tori_img2 = Image.open(ori_img_path)\n\tori_img3 = Image.open(ori_img_path)\n\n\tlogo_img = Image.open(logo_img_path)\n\tresize_logo_img1 = logo_img.resize((95, 95))\n\tresize_logo_img2 = logo_img.resize((134, 134))\n\tresize_logo_img3 = logo_img.resize((164, 164))\n\n\tori_img1.paste(resize_logo_img1, (120, 10), resize_logo_img1)\n\tori_img2.paste(resize_logo_img2, (10, 10), resize_logo_img2)\n\tori_img3.paste(resize_logo_img3, (10, 10), resize_logo_img3)\n\n\tpath_tmp = parse_glob(ori_img_path)\n\t# output_path = \"./thumb/output/thumb_logo_\" + path_tmp\n\toutput_path1 = \"./logo_output10/thumb_logo10_\" + path_tmp\n\toutput_path2 = \"./logo_output20/thumb_logo20_\" + path_tmp\n\toutput_path3 = \"./logo_output30/thumb_logo30_\" + path_tmp\n\n\tori_img1.save(output_path1)\n\tori_img2.save(output_path2)\n\tori_img3.save(output_path3)\n\n\ndef add_caption(ori_img_path):\n\tori_img = Image.open(ori_img_path)\n\tdraw = ImageDraw.Draw(ori_img)\n\tdraw.text((10, 10), \"Sogang University\", fill=(0, 0, 0))\n\tdraw.text((40, 140), \"Multimedia System lab\", fill=(0, 0, 0))\n\tpath_tmp = parse_glob(ori_img_path)\n\t# output_path = \"./thumb/output/thumb_caption_\" + path_tmp\n\toutput_path = \"./caption_output/thumb_caption_\" + path_tmp\n\tori_img.save(output_path)\n\n\ndef add_border(ori_img_path):\n\tborder_image1 = Image.open(\"resize_logo_300.jpg\")\n\tborder_image2 = Image.open(\"resize_logo_300.jpg\")\n\tborder_image3 = Image.open(\"resize_logo_300.jpg\")\n\tori_img = Image.open(ori_img_path)\n\n\tborder_image1.paste(ori_img.resize((284, 284)), (8, 8))\n\tborder_image2.paste(ori_img.resize((268, 268)), (16, 16))\n\tborder_image3.paste(ori_img.resize((251, 251)), (25, 25))\n\n\tpath_tmp = parse_glob(ori_img_path)\n\t# output_path = \"./thumb/output/thumb_border_\" + path_tmp\n\toutput_path1 = \"./border_output10/thumb_border10_\" + path_tmp\n\toutput_path2 = \"./border_output20/thumb_border20_\" + path_tmp\n\toutput_path3 = \"./border_output30/thumb_border30_\" + path_tmp\n\n\tborder_image1.save(output_path1)\n\tborder_image2.save(output_path2)\n\tborder_image3.save(output_path3)\n\n\nif __name__ == \"__main__\":\n\n\t_THUMB_FOLDER = \"./ResizeData\"\n\t_OUTPUT_FOLDER_noise = \"../noise_output\"\n\tp_noise = Augmentor.Pipeline(_THUMB_FOLDER, _OUTPUT_FOLDER_noise)\n\tp_noise.random_erasing(probability=1.0, rectangle_area=0.5)\n\tp_noise.sample(1099)\n\n\toriginal_image_list = glob.glob(\"./ResizeData/*.jpg\")\n\tfor idx, img_path in enumerate(original_image_list):\n\t\tadd_caption(img_path)\n\t\tadd_logo(img_path, \"minions_PNG84.png\")\n\t\tadd_border(img_path)\n\tprint (\"{} / {}\".format(idx+1, len(original_image_list)))\n","sub_path":"img_augmentor.py","file_name":"img_augmentor.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"565894217","text":"#https://programmers.co.kr/learn/courses/30/lessons/12913\r\n\"\"\"\r\n2차원 dp 연습문제\r\n한줄씩 내려오면서 자기 윗칸 말고 나머지 최댓값 + 그 칸의 값\r\n\"\"\"\r\n\r\ndef solution(land):\r\n for i in range(1,len(land)):\r\n for j in range(4):\r\n land[i][j] += max(land[i-1][:j]+land[i-1][j+1:])\r\n return max(land[-1])\r\n\r\nland = [[1,2,3,5],[5,6,7,8],[4,3,2,1]]\r\nprint(solution(land))\r\n","sub_path":"Level 2/땅따먹기.py","file_name":"땅따먹기.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"502398239","text":"# Author: Chukwubuikem Ume-Ugwa\n# Purpose: Mapper logic.\n\nfrom collections import defaultdict, OrderedDict, deque\nfrom math import floor\nfrom uuid import uuid4\n\nfrom libs.analyzers import SentimentAnalyzer as sa\nfrom libs.graphs import AdjacencyListUnDirected\nfrom libs.models import (Node, TreeNode, Edge)\nfrom libs.utils import ClusterUtil\n\n\nclass Mapper:\n \"\"\"\n Base class for mapper implementation\n Attributes:\n - data: list of graph edges or node\n - epsilon: sensitivity of overlap between intervals\n - property_key: lens\n - num_interval: number of intervals to split data\n \"\"\"\n JACCARD_THRESH = 0.1\n\n def __init__(self, data=None, epsilon=0.5, lens=\"reading_level\", num_interval=3):\n if data is None:\n data = []\n self.data = data\n self.epsilon = epsilon\n self.lens = lens\n self.number_of_interval = num_interval\n # For nodes without property_key as field\n self.heterogeneous_edges = []\n self._average = False # use to determine how to aggregate cluster nodes- mean or median\n\n @property\n def average(self):\n return self._average\n\n @average.setter\n def average(self, value):\n self._average = value\n\n def connect_cluster(self, clusters):\n \"\"\"\n creates a graph from the interval clusters\n Nodes are connected if their Jaccard is > 0.1\n\n @param\n - clusters: a dict w/ key = cluster name and value = list of nodes\n \"\"\"\n id = 0 # edge id\n new_nodes = {}\n\n edges = []\n # create cluster node\n for name, cluster in clusters.items():\n new_nodes[name] = ClusterUtil.create_cluster_node(name, self.average, cluster, self.attr_list(cluster[0]))\n new_nodes[name]['radius'] += len(cluster) // 2\n\n # connect clusters base on node overlap\n names = list(clusters.keys())\n clusters = list(clusters.values())\n n = len(names)\n\n for i in range(n):\n cluster = set(clusters[i])\n for j in range(i + 1, n):\n nextCluster = set(clusters[j])\n # skip this edge if the Jaccard index is less than 0.1\n j_index = round(self.jeccard_index(cluster, nextCluster), 16)\n if j_index < Mapper.JACCARD_THRESH:\n continue\n\n if not cluster.isdisjoint(nextCluster) and new_nodes[names[i]] != new_nodes[names[j]]:\n edges.append(Edge(new_nodes[names[i]], new_nodes[names[j]], id=id, type=j_index))\n id += 1\n\n return edges\n\n def jeccard_index(self, A, B):\n \"\"\"\n Calculates the Jaccard index of two sets\n\n @param A\n - A: a set of nodes\n - B: a set of nodes\n \"\"\"\n if not isinstance(A, set) or not isinstance(B, set):\n raise TypeError(\"A and B must sets\")\n\n j = len(A.intersection(B)) / len(A.union(B))\n\n return j\n\n def attr_list(self, obj):\n \"\"\"\n returns the attribute list of the obj\n\n @params\n - obj: a class object\n \"\"\"\n return list(obj.keys())\n\n def edge_mean(self, edge, property_key):\n \"\"\"\n calculates the average property of a given edge\n\n @param \n - edge: graph edge\n - property_key: the node attribute to average\n \"\"\"\n\n if len(edge) < 2:\n raise Exception(\"edge must have at least two nodes\")\n\n p1 = edge.start_node[property_key]\n p2 = edge.end_node[property_key]\n\n if isinstance(p1, str) or isinstance(p2, str):\n raise TypeError(\"property_key value must be numeric\")\n\n return round((p1 + p2) / 2, 16)\n\n\nclass EdgeMapper(Mapper):\n \"\"\"\n Specilization of mapper for working with edge clustering\n\n Attributes:\n - _cluster: clusters generated\n \"\"\"\n\n def __init__(self, edges, clustering_algo, epsilon=0.5, lens=\"reading_level\", num_interval=3):\n self._edges = None\n self.clustering_algo = clustering_algo\n self.adjacency_list = AdjacencyListUnDirected(*edges)\n\n def filter_homogeneous_edge(edge):\n return edge.start_node['type'] == 'comment' and edge.end_node['type'] == 'comment'\n\n def filter_heterogeneous_edge(edge):\n return edge.start_node['type'] != 'comment' or edge.end_node['type'] != 'comment'\n\n self.homogeneous_edges = list(filter(filter_homogeneous_edge, edges))\n # Sort the edges based on the property of interest\n self.homogeneous_edges = sorted(self.homogeneous_edges, key=lambda link: self.edge_mean(link, lens))\n\n super().__init__(self.homogeneous_edges, epsilon, lens, num_interval)\n self.heterogeneous_edges = list(filter(filter_heterogeneous_edge, edges))\n\n def graph(self):\n \"\"\"\n helper function\n \"\"\"\n intervals = self.create_intervals()\n clusters = self.cluster(intervals)\n self._edges = self.connect_cluster(clusters)\n\n return self._edges\n\n def create_intervals(self):\n n = len(self.data)\n intervals = []\n window_size = floor(n / self.number_of_interval)\n\n if window_size == 0:\n window_size = 1\n\n for i in range(0, n, window_size):\n intervals.append(self.data[i:i + window_size])\n\n return intervals\n\n def cluster(self, intervals):\n \"\"\"\n cluster nodes base on lens\n @params:\n - interval: List[List]\n \"\"\"\n clusters = []\n for interval in intervals:\n nodes = ClusterUtil.flatten(interval)\n clusters.extend(self.clustering_algo.cluster(nodes)) # 2 is number of cluster per interval\n\n return clusters\n\n def connect_cluster(self, clusters):\n n = len(clusters)\n edge_list = []\n for i in range(n):\n for j in range(i + 1, n):\n edge = ClusterUtil.connect_clusters(clusters[i], clusters[j], self.adjacency_list)\n if edge is not None:\n edge_list.append(edge)\n return edge_list\n\n @property\n def edges(self):\n if not self._edges:\n self.graph()\n\n return self._edges\n\n def is_connected(self, e1, e2):\n \"\"\"\n check if two edges are connected\n :param e1:\n :param e2:\n :return: boolean\n \"\"\"\n start_n1, start_n2 = e1.start_node, e1.end_node\n end_n1, end_n2 = e2.start_node, e2.end_node\n\n return self.adjacency_list.is_connected(start_n1, end_n1) or \\\n self.adjacency_list.is_connected(start_n1, end_n2) or \\\n self.adjacency_list.is_connected(start_n2, end_n1) or \\\n self.adjacency_list.is_connected(start_n2, end_n2)\n\n\nclass NodeMapper(Mapper):\n \"\"\"\n Specilization of mapper for working with node clustering\n \"\"\"\n\n def __init__(self, edges, data, epsilon=0.5, lens=\"reading_level\", num_interval=3):\n self.edges = edges\n super().__init__(data, epsilon, lens, num_interval)\n\n def cluster_groups(self):\n \"\"\"\n helper function\n \"\"\"\n groups = self.create_intervals()\n cluster = self.cluster(groups)\n\n return self.connect_cluster(cluster)\n\n def create_intervals(self):\n \"\"\"\n splits the nodes into intervals based on property_key\n \"\"\"\n n = len(self.data)\n incr_size = floor(n / self.number_of_interval)\n if incr_size == 0:\n incr_size = 1\n\n intervals = []\n # create the intervals using property_key value to mark the range bounds\n for i in range(0, n, incr_size):\n n = self.data[i:i + incr_size]\n intervals.append(n)\n\n groups = defaultdict(list) # map to hold groups\n length = len(intervals)\n\n for i in range(length - 1):\n next = i + 1\n minimum = intervals[i][0][self.lens] - self.epsilon\n maximum = intervals[i][-1][self.lens] + self.epsilon\n\n # find overlaps\n for j in range(next, len(self.data)):\n if self.data[j][self.lens] <= maximum and self.data[j] not in intervals[i]:\n intervals[i].append(self.data[j])\n\n groups[(minimum, maximum)] = intervals[i]\n\n # make sure to include the last interval in the group map\n if next == length - 1:\n minimum = intervals[next][0][self.lens] - self.epsilon\n maximum = intervals[next][-1][self.lens] + self.epsilon\n\n for n in intervals[i]:\n if n[self.lens] <= maximum and n not in intervals[next]:\n intervals[next].append(n)\n groups[(minimum, maximum)] = intervals[next]\n\n return groups\n\n def cluster(self, groups):\n \"\"\"\n cluster nodes base on their connection with each other\n @params:\n - groups: dict of edges\n \"\"\"\n clusters = OrderedDict()\n clusterId = 0\n\n for group in groups.values():\n for node in group:\n if clusterId not in clusters:\n clusters[clusterId] = []\n clusters[clusterId].append(node)\n\n # add nodes with edges in the same cluster\n for edge in self.edges:\n if edge[0].id_0 == node.id_0 and edge[1] in group and edge[1] not in clusters[clusterId]:\n clusters[clusterId].append(edge[1])\n\n elif edge[1].id_0 == node.id_0 and edge[0] in group and edge[0] not in clusters[clusterId]:\n clusters[clusterId].append(edge[0])\n\n clusterId += 1\n\n temp = list(clusters.keys())\n indices = []\n # find the index duplicate clusters\n for i in temp:\n s1 = set(clusters[i])\n for j in temp[i + 1:]:\n s2 = set(clusters[j])\n if s2 == s1:\n indices.append(j)\n\n # remove duplicate clusters\n for i in indices:\n clusters.pop(i, \"d\")\n return clusters\n\n\nclass TreeMapper:\n \"\"\"\n A mapper implementation for working with dendogram\n Attributes:\n - _cluster: clusters generated\n - intervals: intervals generated\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes the TreeMapper object with tree root and filter function\n \"\"\"\n self._cluster = []\n self.intervals = defaultdict(list)\n\n @property\n def cluster(self):\n return self._cluster\n\n @property\n def root(self):\n return self._root\n\n @root.setter\n def root(self, value):\n if not value:\n raise ValueError()\n\n self._root = value\n\n def _map(self, parent_id, children, filter_function):\n \"\"\"\n Map the children of a node using the filter function.\n Cluster them based on the mapped value\n @params:\n - parent_id: id of parent node\n - children: list of child nodes\n - filter_function: filter function\n \"\"\"\n cluster = defaultdict(list)\n\n # Use the filter function to calculate a mapping for the children\n for child in children:\n value = filter_function(child)\n cluster[value].append(child)\n\n # Cluster the nodes based on their filter function value\n for value, array in cluster.items():\n node = Node()\n node[\"composition\"] = []\n node[\"parent_id\"] = parent_id\n node[\"value\"] = value\n node[\"type\"] = \"mapper\"\n node[\"id\"] = uuid4()\n\n for child in array:\n node[\"composition\"].append(child[\"id\"])\n\n self._cluster.append(node)\n\n def bfs(self, root, filter_function=lambda node: sa.convert_score(sa.get_sentiment(node[\"body\"]))):\n \"\"\"\n Use BFS to visit all nodes in the tree in level order traversal\n @params:\n - root: root node\n -filter_function: filter function\n \"\"\"\n queue = deque()\n queue.append(root)\n\n while queue:\n root = queue.popleft()\n if root[\"children\"] is None:\n continue\n # can map level by level by storing children list then mapping the whole level\n # instead of doing children mapping\n self._map(root[\"id\"], root[\"children\"], filter_function)\n for child in root[\"children\"]:\n queue.append(child)\n\n def make_tree(self, root, nodes, visited=None):\n \"\"\"\n Create a tree rooted at root\n @params:\n - root: tree root\n - nodes: list of nodes\n - visited: visited list to avoid infinite loop\n \"\"\"\n if visited is None:\n visited = []\n\n for child in nodes:\n if child[\"parent_id\"] == root[\"id\"] or (root[\"composition\"] and child[\"parent_id\"] in root[\"composition\"]) \\\n or (child['type'] == 'article' and child['subreddit'] == root[\n 'id']): # added this line in order to be able to create a tree for an entire subreddit\n if \"children\" in root:\n root[\"children\"].append(child)\n else:\n root[\"children\"] = [child]\n\n if child not in visited:\n visited.append(child)\n # Traverse only if reachable from root\n self.make_tree(child, nodes, visited)\n\n return root\n\n def map(self, interval, filter_function):\n \"\"\"\n Map the interval of nodes using the filter function.\n Cluster them based on the mapped value\n @params:\n - interval: list of nodes\n - filter_function: filter function\n \"\"\"\n cluster = defaultdict(list)\n\n # Use the filter function to calculate a mapping for the interval\n for node in interval:\n value = filter_function(node)\n cluster[value].append(node)\n\n # Cluster the nodes based on their filter function value\n for value, array in cluster.items():\n node = Node()\n node[\"composition\"] = []\n node[\"value\"] = value\n node[\"type\"] = \"mapper\"\n node[\"id\"] = uuid4()\n\n # use the parent id the node with the minimum depth as parent id for the mapper node\n min_depth = float('inf')\n parent_id = None\n\n for child in array:\n node[\"composition\"].append(child[\"id\"])\n if child['depth'] < min_depth:\n min_depth = child['depth']\n parent_id = child[\"parent_id\"]\n\n node[\"parent_id\"] = parent_id\n\n self._cluster.append(node)\n\n def _populate_intervals(self, root, intervals):\n \"\"\"\n add nodes to intervals based on their depth\n @params:\n - root: parent node\n - interval: list of intervals\n \"\"\"\n if root[\"parent_id\"]:\n depth = root[\"depth\"]\n for pair in intervals:\n low, high = pair\n if low <= depth <= high:\n self.intervals[pair].append(root)\n break\n\n if root[\"children\"]:\n for child in root[\"children\"]:\n self._populate_intervals(child, intervals)\n\n def _cluster_interval(self, filter_function):\n \"\"\"\n Cluster nodes in each interval using filter_function\n @param:\n - filter_functon: filter function\n \"\"\"\n for nodes in self.intervals.values():\n self.map(nodes, filter_function)\n\n def execute(self, root, interval=[], epsilon=0.001, filter_function=lambda node: sa.get_sentiment(node[\"body\"])):\n \"\"\"\n Start execution of the algorithm\n @params:\n - root: root node\n - interval: list of intervals. can also be int\n \"\"\"\n del self._cluster[:]\n if type(interval) == int:\n interval = self._generate_intervals(self.tree_height(root), interval)\n # add the depth of the nodes as a property\n self._add_depth(root)\n # create intervals\n self._populate_intervals(root, interval)\n # cluster intervals \n # self._clusterInterval(filterFunction)\n self.cluster_by_connectedness(epsilon, filter_function)\n\n return self._cluster\n\n def _generate_intervals(self, height, count):\n \"\"\"\n Create intervals base on tree height\n @params:\n - height: height of tree\n - count: number of intervals\n \"\"\"\n intervals = []\n n = height // count\n\n i = 1\n while i <= height:\n intervals.append((i, i + n))\n i = i + n + 1\n\n return intervals\n\n def tree_height(self, root):\n \"\"\"\n Calculate the height of a tree\n @params:\n - root: tree root\n \"\"\"\n if not root[\"children\"]:\n return 0\n\n height = 0\n for child in root[\"children\"]:\n height = max(height, self.tree_height(child) + 1)\n\n return height\n\n def _add_depth(self, root, depth=0):\n \"\"\"\n Label each node with its depth\n @params:\n - root: node\n - depth: depth of root\n \"\"\"\n root[\"depth\"] = depth\n if root[\"children\"]:\n for child in root[\"children\"]:\n self._add_depth(child, depth + 1)\n\n def cluster_by_connectedness(self, epsilon, filter_function):\n \"\"\"\n Cluster nodes based on their connectedness\n @params:\n - epsilon: filter_function value threshold\n - filter_function: filter function/lens\n \"\"\"\n mapper_nodes = []\n for interval in self.intervals.values():\n cluster = defaultdict(list)\n n = len(interval)\n\n if n > 1:\n for i in range(n):\n if not interval[i]['isClustered']:\n cluster[interval[i]] = [interval[i]]\n for j in range(i + 1, n):\n if self.is_child_of(cluster[interval[i]][-1], interval[j]) and \\\n abs(filter_function(interval[i]) - filter_function(interval[j])) <= epsilon:\n cluster[interval[i]].append(interval[j])\n interval[j]['isClustered'] = True\n # print(\"first: {0} | second: {1}\".format(interval[i], interval[j]))\n\n else:\n cluster[interval[0]].append(interval[0])\n\n # mapper_nodes.extend(cluster.keys())\n for node, nodeSet in cluster.items():\n node[\"composition\"] = [n[\"id\"] for n in nodeSet if n[\"id\"] != node[\"id\"]]\n node[\"radius\"] = 3.14 * (len(nodeSet) / 2 + 1) ** 2\n mapper_nodes.append(node)\n\n # work around for python object reference mess\n for node in mapper_nodes:\n mapper_node = TreeNode(node[\"id\"], type=\"mapper\")\n mapper_node[\"radius\"] = node[\"radius\"]\n mapper_node[\"parent_id\"] = node[\"parent_id\"]\n mapper_node['value'] = filter_function(node)\n mapper_node['composition'] = node[\"composition\"]\n self._cluster.append(mapper_node)\n\n def is_child_of(self, parent, child):\n \"\"\"\n Return whether there is a parent-child relationship\n @params:\n - parent: parent node\n - child: child node\n \"\"\"\n if parent[\"children\"]:\n for node in parent[\"children\"]:\n if node[\"id\"] == child[\"id\"]:\n return True\n\n return False\n\n def top_sort(self, graph):\n \"\"\"\n Sort nodes in topological order\n @params:\n - graph: graph\n \"\"\"\n sorted_nodes, visited = deque(), set()\n for node in graph:\n self.dfs(graph, node, visited, sorted_nodes)\n\n return list(sorted_nodes)\n\n def dfs(self, graph, start_node, visited, sorted_nodes):\n \"\"\"\n Traverse the graph using dfs used in conjunction with top_sort\n @params:\n - graph: graph\n - start_node: start\n - visited: visited list\n - sorted_nodes: result list\n \"\"\"\n visited.add(start_node)\n if start_node[\"children\"]:\n neighbors = [child for child in start_node[\"children\"] if child in graph]\n for neighbor in neighbors:\n if neighbor not in visited:\n self.dfs(graph, neighbor, visited, sorted_nodes)\n sorted_nodes.append(start_node)\n","sub_path":"libs/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":21217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"397912178","text":"from .v2_table import text_base, geom_base, run_base\n\nfrom newspaper import Article, ArticleException\nfrom urllib.parse import urlparse\nfrom itertools import chain\nfrom functools import wraps\nimport traceback\nimport datetime\nimport calendar\nimport psycopg2\nimport requests\nimport tempfile\nimport logging\nimport zipfile\nimport shutil\nimport json\nimport time\nimport sys\nimport re\nimport os\n\n\nclass Extractor(object):\n\n def __init__(self, config):\n\n # Set Date & Time\n self.date = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d')\n self.time = datetime.datetime.fromtimestamp(time.time()).strftime('%H_%M_%S')\n\n # Load Configuration\n self.config_dir = os.path.dirname(config)\n self.config = self.read_config(config)\n\n # Create Log\n self.logdir = os.path.join(self.config_dir, 'logs', self.date)\n self.logger = self.get_logger()\n\n self.db_name = self.config['db_name']\n self.db_user = self.config['db_user']\n self.db_pass = self.config['db_pass']\n self.db_host = self.config['db_host']\n\n self.v2_urls = self.get_v2_urls()\n\n self.latest_src = 'gdelt_latest_src'\n self.latest_tmp = 'gdelt_latest_tmp'\n self.latest_dst = 'gdelt_latest_dst'\n self.latest_run = 'gdelt_latest_run'\n\n @staticmethod\n def read_config(config):\n\n try:\n return config if isinstance(config, dict) else json.load(open(config))\n\n except ValueError as val_err:\n print(f'Configuration Input \"{config}\" is Not Valid: {val_err}')\n sys.exit(1)\n\n def get_logger(self):\n\n the_logger = logging.getLogger('Extractor')\n the_logger.setLevel(logging.DEBUG)\n\n # Ensure Directories Exist\n if not os.path.exists(self.logdir):\n os.makedirs(self.logdir)\n\n # Set Console Handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Set File Handler\n fh = logging.FileHandler(os.path.join(self.logdir, f'Extractor_{self.time}.log'), 'w')\n fh.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n ch.setFormatter(formatter)\n fh.setFormatter(formatter)\n\n the_logger.addHandler(ch)\n the_logger.addHandler(fh)\n\n the_logger.info('Logger Initialized')\n\n return the_logger\n\n\n @staticmethod\n def get_v2_urls():\n\n return {\n 'last_update': 'http://data.gdeltproject.org/gdeltv2/lastupdate.txt'\n }\n\n @staticmethod\n def get_date_range(y, m):\n\n return [\n datetime.date(y, m, day).strftime('%Y%m%d') for day in range(1, calendar.monthrange(y, m)[1] + 1)\n ]\n\n @staticmethod\n def extract_daily_csv(target_date):\n\n # Pull CSV from GDELT Repository\n date_zip = '{}.export.CSV.zip'.format(target_date)\n event_url = 'http://data.gdeltproject.org/events/{}'.format(date_zip)\n response = requests.get(event_url, stream=True)\n\n if response.status_code != 200:\n return None\n\n # Dump to Local CSV\n temp_dir = tempfile.mkdtemp(dir=r'C:\\Temp', prefix='{}_'.format(target_date))\n zip_file = '{}/{}.zip'.format(temp_dir, target_date)\n with open(zip_file, 'wb') as f: f.write(response.content)\n with zipfile.ZipFile(zip_file, 'r') as the_zip: the_zip.extractall(temp_dir)\n\n return '{}/{}.export.CSV'.format(temp_dir, target_date)\n\n @staticmethod\n def text_filter(text):\n\n return re.sub('[^a-zA-Z0-9 \\n]', '', text)\n\n def get_connection(self):\n\n return psycopg2.connect(dbname=self.db_name, user=self.db_user, password=self.db_pass, host=self.db_host)\n\n def open_connection(func):\n\n \"\"\" Inserts Cursor Object As First Arguement of Function \"\"\"\n\n @wraps(func)\n def wrap(*args, **kwargs):\n with args[0].get_connection() as connection:\n with connection.cursor() as cursor:\n args = list(args)\n args.insert(1, cursor)\n return func(*args, **kwargs)\n return wrap\n\n def process_article(self, source_url):\n\n # Parse GDELT Source\n article = Article(source_url)\n article.download()\n article.parse()\n article.nlp()\n\n # Unpack Article Properties & Replace Special Characters\n title = article.title.replace(\"'\", '')\n site = urlparse(article.source_url).netloc\n summary = '{} . . . '.format(article.summary.replace(\"'\", '')[:500])\n keywords = ', '.join(sorted([self.text_filter(key) for key in article.keywords]))\n meta_keys = ', '.join(sorted([self.text_filter(key) for key in article.meta_keywords]))\n\n return [title, site, summary, keywords, meta_keys]\n\n @open_connection\n def process_events(self, cursor, table):\n\n self.logger.info('Processing Articles')\n\n # Extract Records\n cursor.execute(f\"select globaleventid, sourceurl from {table}\")\n\n for row in cursor.fetchall():\n\n try:\n # Extract NLP Values with Article\n atts = self.process_article(row[1])\n\n cursor.execute(f\"\"\"\n update {table} set\n title = '{atts[0]}',\n site = '{atts[1]}',\n summary = '{atts[2]}',\n keywords = '{atts[3]}',\n meta_keys = '{atts[4]}'\n where globaleventid = '{row[0]}' \n \"\"\")\n\n except ArticleException:\n pass\n\n except:\n print(f'{traceback.format_exc()}')\n\n def extract_csv(self, csv_url):\n\n response = requests.get(csv_url, stream=True)\n\n temp_dir = tempfile.mkdtemp(dir=self.logdir)\n\n zip_name = csv_url.split('/')[-1]\n zip_path = os.path.join(temp_dir, zip_name)\n\n with open(zip_path, 'wb') as file: file.write(response.content)\n with zipfile.ZipFile(zip_path, 'r') as the_zip: the_zip.extractall(temp_dir)\n\n txt_name = zip_name.strip('export.CSV.zip')\n txt_name += '.txt'\n txt_path = os.path.join(temp_dir, txt_name)\n\n os.rename(zip_path.strip('.zip'), txt_path)\n\n return txt_path, temp_dir\n\n @open_connection\n def check_table(self, cursor, table_name):\n\n cursor.execute(f'''\n select tablename from pg_tables\n where tablename = '{table_name}'\n ''')\n\n res = [row[0] for row in cursor.fetchall()]\n\n if len(res) == 1:\n return True\n\n return False\n\n @open_connection\n def delete_table(self, cursor, table_name):\n\n self.logger.info('Dropping Table: {}'.format(table_name))\n\n cursor.execute(f'drop table if exists {table_name}')\n\n @open_connection\n def create_table(self, cursor, table_name):\n\n self.logger.info(f'Creating Table: {table_name}')\n\n cursor.execute(text_base.format(table_name))\n\n @open_connection\n def load_latest(self, cursor, table_name, text_data):\n\n self.logger.info(f'Loading Data into Table: {table_name}')\n\n with open(text_data, 'r', encoding='latin-1') as raw_data:\n cursor.copy_from(raw_data, table_name)\n\n @open_connection\n def load_subset(self, cursor, src, dst):\n\n cursor.execute(geom_base.format(dst, src))\n\n @open_connection\n def set_geom_field(self, cursor, table_name):\n\n cursor.execute(f\"select addgeometrycolumn('{table_name}', 'geom', 4326, 'POINT', 2)\")\n\n @open_connection\n def pop_geom_field(self, cursor, table_name):\n\n cursor.execute(f\"update {table_name} set geom = st_setsrid(st_point(actor1geo_long, actor1geo_lat), 4326)\")\n\n @open_connection\n def create_column(self, cursor, table, col_name, col_type):\n\n cursor.execute(f\"alter table {table} add column {col_name} {col_type};\")\n\n @open_connection\n def rename_table(self, cursor, old, new):\n\n cursor.execute(f\"alter table {old} rename to {new}\")\n\n @open_connection\n def create_run_table(self, cursor, table_name):\n\n cursor.execute(run_base.format(table_name))\n\n @open_connection\n def insert_run(self, cursor, table_name, seconds):\n\n cursor.execute(f\"insert into {table_name} (runtime) values ({seconds})\")\n\n @open_connection\n def get_keywords(self, cursor):\n\n cursor.execute(f\"select keywords from {self.latest_dst}\")\n\n return [r.strip() for r in list(chain(*[r[0].split(',') for r in cursor.fetchall() if r[0]]))]\n\n @open_connection\n def remove_duplicates(self, cursor, table):\n\n cursor.execute(f\"select globaleventid, sourceurl from {table}\")\n\n deletions = []\n seen_urls = []\n for row in cursor.fetchall():\n if row[1] not in seen_urls:\n seen_urls.append(row[1])\n else:\n deletions.append(row[0])\n\n cursor.execute(f\"delete from {table} where globaleventid in {tuple(deletions)}\")\n\n def process_latest(self):\n\n # Process Started\n start = time.time()\n\n # Fetch URL Information for Latest CSV\n response = requests.get(self.v2_urls.get('last_update'))\n last_url = [r for r in response.text.split('\\n')[0].split(' ') if 'export' in r][0]\n\n # Pull & Extract Latest CSV\n self.logger.info(f'Processing Export CSV: {last_url}')\n csv_file, tmp_path = self.extract_csv(last_url)\n\n # Delete Existing Latest Tables\n for table in [self.latest_src, self.latest_tmp]:\n if self.check_table(table):\n self.delete_table(table)\n\n # Create All Text Baseline & Load Latest CSV Data\n self.create_table(self.latest_src)\n self.load_latest(self.latest_src, csv_file)\n\n # Populate Table with Correct Types & Limited Attributes\n self.load_subset(self.latest_src, self.latest_tmp)\n\n # Populate Table with Geometries\n self.set_geom_field(self.latest_tmp)\n self.pop_geom_field(self.latest_tmp)\n\n # Create Columns for Article Processing\n self.create_column(self.latest_tmp, 'meta_keys', 'text')\n self.create_column(self.latest_tmp, 'keywords', 'text')\n self.create_column(self.latest_tmp, 'summary', 'text')\n self.create_column(self.latest_tmp, 'title', 'text')\n self.create_column(self.latest_tmp, 'site', 'text')\n\n # Remove \"Duplicate\" Entries\n self.remove_duplicates(self.latest_tmp)\n\n # Enrich from Articles\n self.process_events(self.latest_tmp)\n\n # Dump Existing Destination & Replace With New Data\n if self.check_table(self.latest_dst):\n self.delete_table(self.latest_dst)\n self.rename_table(self.latest_tmp, self.latest_dst)\n\n # Remove Temporary Files\n shutil.rmtree(tmp_path)\n\n # Ensure Run Table Exists\n if not self.check_table(self.latest_run):\n self.create_run_table(self.latest_run)\n\n # Push Latest Run\n self.insert_run(self.latest_run, time.time())\n\n # Run Time\n self.logger.info(f'Ran: {round((time.time() - start) / 60, 2)}')\n","sub_path":"extractor/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":11355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"230584149","text":"import sys\nimport pickle as pk\nimport numpy as np\nimport math\nimport analyzeGotran as ao\nimport subprocess as sb\nfrom subprocess import PIPE\nimport shlex\nimport time\n\ndef gotranMicroglia(sim_time = 1000,\n ATP = 0, # in uM\n period = 3,\n data_name1 = 'Cai',\n data_name2 = None,\n data_name3 = None,\n data_name4 = None,\n data_name5 = None,\n data_name6 = None,\n data_name7 = None,\n data_name8 = None,\n data_name9 = None,\n data_name10 = None,\n output_name = 'p2xp2y',\n output_switch = 1,\n ode_file_name = 'p2xp2y',\n removePickle = 0,\n timePrint = 1,\n **kwargs):\n \n start = time.time()\n\n ######### Name of files #############################\n nameOfODEfile= ode_file_name+'.ode'\n simTime = str(sim_time) # in milliseconds\n outputName = output_name\n ######################################################\n\n ######### Do not touch ############\n mainCommand = 'singularity exec /home/bending456/singularity-img/gotran_new.img python2 dcBen.py -odeName'\n basicSetup = '-dt 0.001 -dSr 1000 -jit -iters 1 -T'\n extra1 = ['-name']\n #####################################\n\n ######### Assign new parameters if necessary #########\n variables = ['-var','stim_amplitude', str(ATP), # ATP concentration in [nM]\n '-var','stim_period', str(period)] \n ######################################################\n \n ######### Creating command for variable adjustment #######\n addedArg = []\n\n for key, value in kwargs.items():\n command = '-var ' + key + ' ' + str(value)\n addedArg = addedArg + shlex.split(command)\n ##########################################################\n\n ##### Executing calculation ####################################################################################################\n inputArg = shlex.split(mainCommand) + [nameOfODEfile] + shlex.split(basicSetup) + [simTime] + variables\\\n + addedArg\\\n + extra1 + [outputName]\n print(inputArg)\n out = sb.Popen(inputArg,stdout=PIPE).communicate()[0]\n ################################################################################################################################\n \n ############ Printing Output #########################\n if output_switch == 1:\n print(out)\n ######################################################\n\n ############ Storing Data ######################\n with open(outputName+\"_cat.pickle\", 'rb') as f:\n py2data = pk.load(f, encoding='latin1') \n temp1 = ao.GetData(py2data,data_name1)\n y = np.vstack([temp1.t,temp1.valsIdx])\n if data_name2 != None:\n temp2 = ao.GetData(py2data,data_name2)\n y = np.vstack([y,temp2.valsIdx])\n if data_name3 != None:\n temp3 = ao.GetData(py2data,data_name3)\n y = np.vstack([y,temp3.valsIdx])\n if data_name4 != None:\n temp4 = ao.GetData(py2data,data_name4)\n y = np.vstack([y,temp4.valsIdx])\n if data_name5 != None:\n temp5 = ao.GetData(py2data,data_name5)\n y = np.vstack([y,temp5.valsIdx])\n if data_name6 != None:\n temp6 = ao.GetData(py2data,data_name6)\n y = np.vstack([y,temp6.valsIdx])\n if data_name7 != None:\n temp7 = ao.GetData(py2data,data_name7)\n y = np.vstack([y,temp7.valsIdx])\n if data_name8 != None:\n temp8 = ao.GetData(py2data,data_name8)\n y = np.vstack([y,temp8.valsIdx])\n if data_name9 != None:\n temp9 = ao.GetData(py2data,data_name9)\n y = np.vstack([y,temp9.valsIdx])\n if data_name10 != None:\n temp10 = ao.GetData(py2data,data_name10)\n y = np.vstack([y,temp10.valsIdx])\n ################################################\n \n ### Cleaning pickle files from the parent directory ### \n if removePickle == 1:\n sb.call(['mv',outputName+'_cat.pickle','./Data'])\n sb.call(['rm','-f',outputName+'_1.pickle'])\n \n #######################################################\n if timePrint == 1:\n print(\" -------------- %s seconds --------------\" % (time.time() - start))\n print(\" ------------ End of Simulation -----------\")\n \n return y","sub_path":"ScriptRunnerStep.py","file_name":"ScriptRunnerStep.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"418338773","text":"from qlite.orm.db_fileld_types import (\n QLText,\n\n)\nfrom qlite.orm.db_entity import (\n QLEntityWithPrimaryKey\n)\nfrom qlite.orm.base_classes.base_db_fileld_type import (\n ForeignKey\n)\nfrom qlite.orm.base_classes.session import (\n QLSessionManager\n)\nfrom qlite.orm.base_classes.query import (\n QLuery\n)\n\nimport os\n\n# from qlite.orm.base_classes.exceptions import (\n# QLIntegrityError\n# )\n\n\nif __name__ == '__main__':\n class Mark(QLEntityWithPrimaryKey):\n name = QLText(nullable=False)\n\n\n class Model(QLEntityWithPrimaryKey):\n mark = ForeignKey(Mark, mapped_column_name='mark_id')\n name = QLText(nullable=False)\n\n\n class Car(QLEntityWithPrimaryKey):\n model = ForeignKey(Model, mapped_column_name='model_id')\n number = QLText(nullable=False)\n\n\n file_name = '%s.db' % os.path.basename(__file__)[:-3]\n if os.path.exists(file_name):\n os.remove(file_name)\n # new_session = QLSessionManager.get_session(file_name, 'as_dictionary')\n # {'__mark_name': 'Lada', 'd': 'm001mm777', '__mark_id': 2, '__model_id': 8, '__model_name': 'XRay'}\n # {'__mark_name': 'Lada', 'd': 'm002mm777', '__mark_id': 2, '__model_id': 8, '__model_name': 'XRay'}\n # {'__mark_name': 'Renault', 'd': 'x003xx86', '__mark_id': 1, '__model_id': 2, '__model_name': 'Koleos'}\n # import sqlite3\n # new_session = QLSessionManager.get_session(file_name, sqlite3.Row)\n # \n # \n # \n new_session = QLSessionManager.get_session(file_name)\n # (2, 'Lada', 8, 'XRay', 'm001mm777')\n # (2, 'Lada', 8, 'XRay', 'm002mm777')\n # (1, 'Renault', 2, 'Koleos', 'x003xx86')\n\n new_session.create_all_entities(\n Mark, Model, Car\n )\n renault = Mark(name='Renault')\n renault.save()\n\n renault_logan = Model(name='Logan', mark=renault)\n renault_logan.save()\n\n renault_koleos = Model(name='Koleos', mark=renault)\n renault_koleos.save()\n\n renault_megane = Model(name='Megane', mark=renault)\n renault_megane.save()\n\n renault_master = Model(name='Master', mark=renault)\n renault_master.save()\n\n lada = Mark(name='Lada')\n lada.save()\n\n lada_kalina = Model(name='Kalina', mark=lada)\n lada_kalina.save()\n\n lada_kalina_sport = Model(name='Kalina Sport', mark=lada)\n lada_kalina_sport.save()\n\n lada_vesta = Model(name='Vesta', mark=lada)\n lada_vesta.save()\n\n lada_xray = Model(name='XRay', mark=lada)\n lada_xray.save()\n\n car_lada_xray = Car(number='M001MM777', model=lada_xray)\n car_lada_xray.save()\n\n car_lada_xray = Car(number='M002MM777', model=lada_xray)\n car_lada_xray.save()\n\n car_renault_koleos = Car(number='X003XX86', model=renault_koleos)\n car_renault_koleos.save()\n\n new_session.commit()\n\n query = QLuery((Mark, (),))\n sql_statement_0 = query.build_sql_select_statement()\n print(\n 'QLuery((Mark, (),)):',\n \"\\n\\t\\t\",\n sql_statement_0\n )\n\n\n query = QLuery((Mark, ('name',),))\n sql_statement_0_1 = query.build_sql_select_statement()\n print(\n 'QLuery((Mark, (\\'name\\',),)):',\n \"\\n\\t\\t\",\n sql_statement_0_1\n )\n\n query = QLuery((Mark, (None),), (Model, ('name',),))\n sql_statement_1 = query.build_sql_select_statement()\n print(\n 'QLuery((Mark, (None),), (Model, (),)):',\n \"\\n\\t\\t\",\n sql_statement_1\n )\n query = QLuery((Mark, ('id', 'name',)), (Model, ('id', 'name', '__QL:upper(\"mark\".\"name\") as UPPERCASE_MARK')))\n sql_statement_2 = query.build_sql_select_statement()\n print(\n \"QLuery((Mark, ('id', 'name',)), (Model, ('id', 'name', '__QL:upper(\\\"mark\\\".\\\"name\\\") as UPPERCASE_MARK'))):\",\n \"\\n\\t\\t\",\n sql_statement_2\n )\n\n print('')\n d = new_session.fetchall(sql_statement_2)\n for t in d:\n print(t)\n\n print('')\n\n query = QLuery((Mark, ('id', 'name',)), (Model, ('name',)), (Car, (Car.number,)), )\n sql_statement_3 = query.build_sql_select_statement()\n print(\n \"QLuery((Mark, ('id', 'name',)), (Model, ('name',)), (Car, (Car.number,)), ):\",\n \"\\n\\t\\t\",\n sql_statement_3\n )\n\n print('')\n d = new_session.fetchall(sql_statement_3)\n for t in d:\n print(t)\n pass\n\n print('')\n\n query = QLuery((Mark, ('id', 'name',)), (Model, ('name',))).join((Car, ('number',)), )\n sql_statement_3 = query.build_sql_select_statement()\n print(\n \"QLuery((Mark, ('id', 'name',)), (Model, ('name',))).join((Car, (Car.number,)), ):\",\n \"\\n\\t\\t\",\n sql_statement_3\n )\n\n print('')\n d = new_session.fetchall(sql_statement_3)\n for t in d:\n print(t)\n pass\n\n print('')\n\n query = QLuery((Mark, ('id', 'name',)), (Model, ('id', 'name',)), )\n sql_statement_4 = query.build_sql_select_statement()\n print(\n \"QLuery((Mark, ('id', 'name',)), (Model, ('id', 'name',)), ):\",\n \"\\n\\t\\t\",\n sql_statement_4\n )\n\n print('')\n query = query.join((Car, ('__QL:lower(\"car\".\"number\") as d',)), use_foreign_keys_strategy='all')\n # query = query.join((Car, ('__QL:lower(\\\"car\\\".\\\"number\\\") as lowercase_number',)),)\n sql_statement_5 = query.build_sql_select_statement()\n print(\n \"QLuery((Mark, ('id', 'name',)), (Model, ('id', 'name',)), ).join((Car, (),)):\",\n \"\\n\\t\\t\",\n sql_statement_5\n )\n print('')\n d = new_session.fetchall(sql_statement_5)\n for t in d:\n print(t)\n pass\n\n print('')\n\n","sub_path":"__please_ignore_this/recycle/example_008_query.py","file_name":"example_008_query.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"352579091","text":"import hlt\nimport logging\n\n\ngame = hlt.Game(\"Robbot\")\n\nlogging.info(\"Initialising Robbot...\")\n\nwhile True:\n #TURN START\n\n game_map = game.update_map()\n command_queue = []\n \n my_id = game_map.my_id\n my_ships = game_map.get_me().all_ships()\n for ship in my_ships:\n if ship.docking_status != ship.DockingStatus.UNDOCKED:\n continue\n \n entities_distance = game_map.nearby_entities_by_distance(ship)\n distances = []\n entities = []\n for entity in entities_distance:\n distances.append(entity)\n entities.append(entities_distance[entity])\n \n sorted_distances = []\n sorted_entities = []\n for i in range(len(distances)):\n min_i = distances.index(min(distances))\n sorted_distances.append(distances.pop(min_i))\n sorted_entities.append(entities.pop(min_i))\n\n \n closest_empty_planets = []\n closest_enemy_ships = []\n for i in range(len(sorted_entities)):\n entity = sorted_entities[i][0]\n if isinstance(entity, hlt.entity.Planet):\n if not entity.is_owned():\n closest_empty_planets.append(entity)\n else:\n if entity.owner.id == my_id and not entity.is_full():\n closest_empty_planets.append(entity)\n continue\n \n if isinstance(entity, hlt.entity.Ship) and not entity in my_ships:\n closest_enemy_ships.append(entity)\n\n if len(closest_empty_planets) > 0:\n target_planet = closest_empty_planets[0]\n if ship.can_dock(target_planet):\n command_queue.append(ship.dock(target_planet))\n else:\n navigate_command = ship.navigate(\n ship.closest_point_to(target_planet),\n game_map,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n if navigate_command:\n command_queue.append(navigate_command)\n \n elif len(closest_enemy_ships) > 0:\n target_ship = closest_enemy_ships[0]\n navigate_command = ship.navigate(\n target_ship,\n game_map,\n speed=int(hlt.constants.MAX_SPEED),\n ignore_ships=False)\n if navigate_command:\n command_queue.append(navigate_command)\n game.send_command_queue(command_queue)\n \n \n \n \n","sub_path":"MyBot-v5.py","file_name":"MyBot-v5.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"77172304","text":"import pandas as pd\nimport numpy as np\nimport tfnn\nfrom tfnn.datasets.normalizer import Normalizer\n\n\nclass Network(object):\n def __init__(self, n_inputs, n_outputs, input_dtype, output_dtype, output_activator,\n do_dropout, do_l2, seed=None):\n self.normalizer = Normalizer()\n self.n_inputs = n_inputs\n self.n_outputs = n_outputs\n self.input_dtype = input_dtype\n self.output_dtype = output_dtype\n self.output_activator = output_activator\n if do_dropout and do_l2:\n raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')\n if do_dropout:\n self.reg = 'dropout'\n if do_l2:\n self.reg = 'l2'\n if (do_dropout is False) & (do_l2 is False):\n self.reg = None\n self.seed = seed\n\n with tfnn.name_scope('inputs'):\n self.data_placeholder = tfnn.placeholder(dtype=input_dtype, shape=[None, n_inputs], name='x_input')\n self.target_placeholder = tfnn.placeholder(dtype=output_dtype, shape=[None, n_outputs], name='y_input')\n if do_dropout:\n self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)\n tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)\n if do_l2:\n self.l2_placeholder = tfnn.placeholder(tfnn.float32)\n tfnn.scalar_summary('l2_lambda', self.l2_placeholder)\n self.layers_type = pd.Series([])\n self.layers_output = pd.Series([])\n self.layers_activated_output = pd.Series([])\n self.layers_dropped_output = pd.Series([])\n self.layers_final_output = pd.Series([])\n self.Ws = pd.Series([])\n self.bs = pd.Series([])\n self.record_activators = pd.Series([])\n self.record_neurons = []\n self.last_layer_neurons = n_inputs\n self.last_layer_outputs = self.data_placeholder\n self.layer_number = 1\n self.has_output_layer = False\n self._is_output_layer = False\n\n def add_hidden_layer(self, n_neurons, activator=None, dropout_layer=False):\n \"\"\"\n W shape(n_last_layer_neurons, n_this_layer_neurons]\n b shape(n_this_layer_neurons, ]\n product = tfnn.matmul(x, W) + b\n :param n_neurons: Number of neurons in this layer\n :param activator: The activation function\n :return:\n \"\"\"\n if not self._is_output_layer:\n layer_name = 'hidden_layer%i' % self.layer_number\n else:\n layer_name = 'output_layer'\n with tfnn.name_scope(layer_name):\n with tfnn.name_scope('weights'):\n W = self._weight_variable([self.last_layer_neurons, n_neurons])\n tfnn.histogram_summary(layer_name+'/weights', W)\n with tfnn.name_scope('biases'):\n b = self._bias_variable([n_neurons, ])\n tfnn.histogram_summary(layer_name + '/biases', b)\n with tfnn.name_scope('Wx_plus_b'):\n product = tfnn.add(tfnn.matmul(self.last_layer_outputs, W, name='Wx'), b, name='Wx_plus_b')\n if activator is None:\n activated_product = product\n else:\n activated_product = activator(product)\n tfnn.histogram_summary(layer_name+'/activated_product', activated_product)\n if (self.reg == 'dropout') and dropout_layer:\n dropped_product = tfnn.nn.dropout(activated_product,\n self.keep_prob_placeholder,\n seed=self.seed, name='dropout')\n self.layers_dropped_output.set_value(label=len(self.layers_dropped_output),\n value=dropped_product)\n final_product = dropped_product\n else:\n final_product = activated_product\n\n self.layers_type.set_value(len(self.layers_type), \"func\")\n self.layer_number += 1\n self.last_layer_outputs = final_product\n self.Ws.set_value(label=len(self.Ws), value=W)\n self.bs.set_value(label=len(self.bs), value=b)\n if activator is None:\n self.record_activators.set_value(label=len(self.record_activators), value=None)\n else:\n self.record_activators.set_value(label=len(self.record_activators), value=activator(0).name)\n self.record_neurons.append(n_neurons)\n\n self.layers_output.set_value(label=len(self.layers_output),\n value=product)\n self.layers_activated_output.set_value(label=len(self.layers_output),\n value=activated_product)\n self.layers_final_output.set_value(label=len(self.layers_final_output),\n value=final_product)\n self.last_layer_neurons = n_neurons\n\n def add_func_layer(self, n_neurons, activator=None, dropout_layer=False):\n self.add_hidden_layer(n_neurons, activator, dropout_layer)\n\n def add_conv_layer(self, patch_x, patch_y, n_neurons, activator=None, dropout_layer=False):\n def conv2d(x, W):\n # stride [1, x_movement, y_movement, 1]\n # Must have strides[0] = strides[3] = 1\n return tfnn.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n def max_pool_2x2(x):\n # stride [1, x_movement, y_movement, 1]\n return tfnn.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n W_conv = self._weight_variable([patch_x, patch_y, self.last_layer_neurons, n_neurons]) # patch 5x5, in size 1, out size 32\n b_conv = self._bias_variable([n_neurons, ])\n if activator is not None:\n activated_product = activator(conv2d(self.last_layer_outputs, W_conv) + b_conv) # output size 28x28x32\n else:\n activated_product = conv2d(self.last_layer_outputs, W_conv) + b_conv\n pooled_product = max_pool_2x2(activated_product)\n if (self.reg == 'dropout') and dropout_layer:\n dropped_product = tfnn.nn.dropout(pooled_product,\n self.keep_prob_placeholder,\n seed=self.seed, name='dropout')\n self.layers_dropped_output.set_value(label=len(self.layers_dropped_output),\n value=dropped_product)\n final_product = dropped_product\n else:\n final_product = activated_product\n self.layers_type.set_value(len(self.layers_type), \"conv\")\n self.last_layer_outputs = final_product\n self.layer_number += 1\n self.Ws.set_value(label=len(self.Ws), value=W_conv)\n self.bs.set_value(label=len(self.bs), value=b_conv)\n if activator is None:\n self.record_activators.set_value(label=len(self.record_activators), value=None)\n else:\n self.record_activators.set_value(label=len(self.record_activators), value=activator(0).name)\n self.record_neurons.append(n_neurons)\n self.layers_output.set_value(label=len(self.layers_output),\n value=pooled_product)\n\n self.layers_activated_output.set_value(label=len(self.layers_output),\n value=activated_product)\n self.layers_final_output.set_value(label=len(self.layers_final_output),\n value=final_product)\n self.last_layer_neurons = n_neurons\n\n def add_output_layer(self, activator, dropout_layer=False):\n self._is_output_layer = True\n self.add_hidden_layer(self.n_outputs, activator, dropout_layer)\n self._init_loss()\n self.has_output_layer = True\n\n def set_optimizer(self, optimizer=None, global_step=None,):\n if optimizer is None:\n optimizer = tfnn.train.GradientDescentOptimizer(0.01)\n if not self.has_output_layer:\n raise NotImplementedError('Please add output layer.')\n with tfnn.name_scope('trian'):\n self.train_op = optimizer.minimize(self.loss, global_step)\n self.sess = tfnn.Session()\n\n def run_step(self, feed_xs, feed_ys, *args):\n if np.ndim(feed_xs) == 1:\n feed_xs = feed_xs[np.newaxis, :]\n if np.ndim(feed_ys) == 1:\n feed_ys = feed_ys[np.newaxis, :]\n if not hasattr(self, '_init'):\n # initialize all variables\n self._init = tfnn.initialize_all_variables()\n self.sess.run(self._init)\n\n if self.reg == 'dropout':\n keep_prob = args[0]\n self.sess.run(self.train_op, feed_dict={self.data_placeholder: feed_xs,\n self.target_placeholder: feed_ys,\n self.keep_prob_placeholder: keep_prob})\n elif self.reg == 'l2':\n l2 = args[0]\n self.sess.run(self.train_op, feed_dict={self.data_placeholder: feed_xs,\n self.target_placeholder: feed_ys,\n self.l2_placeholder: l2})\n else:\n self.sess.run(self.train_op, feed_dict={self.data_placeholder: feed_xs,\n self.target_placeholder: feed_ys})\n\n def fit(self, feed_xs, feed_ys, n_iter=5000, *args):\n \"\"\"\n Fit data to network, automatically training the network.\n :param feed_xs:\n :param feed_ys:\n :param n_iter: when n_iter=-1, the training steps= n_samples*2\n :param args: pass keep_prob when use dropout, pass l2_lambda when use l2 regularization.\n :return: Nothing\n \"\"\"\n train_data = tfnn.Data(feed_xs, feed_ys)\n for _ in range(n_iter):\n b_xs, b_ys = train_data.next_batch(100, loop=True)\n self.run_step(feed_xs=b_xs, feed_ys=b_ys, *args)\n\n def get_loss(self, xs, ys):\n if self.reg == 'dropout':\n _loss_value = self.sess.run(self.loss, feed_dict={self.data_placeholder: xs,\n self.target_placeholder: ys,\n self.keep_prob_placeholder: 1.})\n elif self.reg == 'l2':\n _loss_value = self.sess.run(self.loss, feed_dict={self.data_placeholder: xs,\n self.target_placeholder: ys,\n self.l2_placeholder: 0})\n else:\n _loss_value = self.sess.run(self.loss, feed_dict={self.data_placeholder: xs,\n self.target_placeholder: ys})\n return _loss_value\n\n def get_weights(self, layer=None):\n if not(layer is None or type(layer) is int):\n raise TypeError('layer need to be None or int')\n if layer is None:\n Ws = []\n for W_layer in self.Ws:\n W = self.sess.run(W_layer)\n Ws.append(W)\n else:\n if layer >= len(self.Ws):\n raise IndexError('Do not have layer %i' % layer)\n Ws = self.sess.run(self.Ws[layer])\n return Ws\n\n def predict(self, xs):\n if np.ndim(xs) == 1:\n xs = xs[np.newaxis, :]\n predictions = self.sess.run(self.predictions, feed_dict={self.data_placeholder: xs})\n if predictions.size == 1:\n predictions = predictions[0][0]\n return predictions\n\n def save(self, path='/tmp/'):\n saver = tfnn.NetworkSaver()\n saver.save(self, path)\n\n def _weight_variable(self, shape):\n initial = tfnn.random_normal(\n shape, mean=0.0, stddev=0.2, dtype=self.input_dtype, seed=self.seed, name='weights')\n return tfnn.Variable(initial)\n\n def _bias_variable(self, shape):\n initial = tfnn.constant(0.1, shape=shape, dtype=self.input_dtype, name='biases')\n return tfnn.Variable(initial)\n\n def _init_loss(self):\n self.loss = None\n","sub_path":"tfnn/body/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":12208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"634757619","text":"############################################################################\r\n# #\r\n# Copyright (c)2008, 2009, Digi International (Digi). All Rights Reserved. #\r\n# #\r\n# Permission to use, copy, modify, and distribute this software and its #\r\n# documentation, without fee and without a signed licensing agreement, is #\r\n# hereby granted, provided that the software is used on Digi products only #\r\n# and that the software contain this copyright notice, and the following #\r\n# two paragraphs appear in all copies, modifications, and distributions as #\r\n# well. Contact Product Management, Digi International, Inc., 11001 Bren #\r\n# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #\r\n# opportunities for non-Digi products. #\r\n# #\r\n# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #\r\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #\r\n# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #\r\n# PROVIDED HEREUNDER IS PROVIDED \"AS IS\" AND WITHOUT WARRANTY OF ANY KIND. #\r\n# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #\r\n# ENHANCEMENTS, OR MODIFICATIONS. #\r\n# #\r\n# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #\r\n# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #\r\n# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #\r\n# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #\r\n# #\r\n############################################################################\r\n\r\n\r\n# imports\r\nfrom settings.settings_base import SettingsBase, Setting\r\nfrom services.service_base import ServiceBase\r\n\r\n# constants\r\n\r\n# exception classes\r\n\r\n# interface functions\r\n\r\n# classes\r\nclass NullService(ServiceBase):\r\n \"\"\"\r\n A null, do-nothing service that is used to test the service manager.\r\n\r\n This class extends one of our base classes and is intended as an\r\n example of a concrete, example implementation, but it is not itself\r\n meant to be included as part of our developer API. Please consult the\r\n base class documentation for the API and the source code for this file\r\n for an example implementation.\r\n\r\n \"\"\"\r\n def __init__(self, name, core_services):\r\n self.__name = name\r\n self.__core = core_services\r\n\r\n from core.tracing import get_tracer\r\n self.__tracer = get_tracer(name)\r\n\t\t\r\n settings_list = [ ]\r\n\r\n ## Initialize settings:\r\n ServiceBase.__init__(self, name=name, settings_list=settings_list)\r\n\r\n def apply_settings(self):\r\n SettingsBase.merge_settings(self)\r\n accepted, rejected, not_found = SettingsBase.verify_settings(self)\r\n if len(rejected) or len(not_found):\r\n # there were problems with settings, terminate early:\r\n self.__tracer.error(\"Settings rejected/not found: %s %s\", \\\r\n rejected, not_found)\r\n return (accepted, rejected, not_found)\r\n SettingsBase.commit_settings(self, accepted)\r\n\r\n return (accepted, rejected, not_found)\r\n \r\n def start(self):\r\n self.__tracer.info(\"started.\")\r\n \r\n return True\r\n\r\n def stop(self):\r\n self.__tracer.info(\"stopped.\")\r\n \r\n return True\r\n \r\n# internal functions & classes\r\n","sub_path":"src/services/null_service.py","file_name":"null_service.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"621776210","text":"\"\"\"\n Code taken from https://github.com/openai/evolution-strategies-starter/\n With some changes to save state to and load state from file\n\"\"\"\nimport numpy as np\nimport torch\n\n\nclass Optimizer(object):\n def __init__(self, theta):\n self.theta = theta\n self.dim = len(self.theta)\n self.t = 0\n\n def update(self, globalg):\n self.t += 1\n step = self._compute_step(globalg)\n theta = self.theta\n ratio = np.linalg.norm(step) / np.linalg.norm(theta)\n new_theta = self.theta + step\n self.theta = new_theta\n return ratio, new_theta\n\n def set_theta(self, theta):\n self.theta = theta\n self.dim = len(theta)\n\n def _compute_step(self, globalg):\n raise NotImplementedError\n\n def save_to_file(self, path):\n raise NotImplementedError\n\n def load_from_file(self, path):\n raise NotImplementedError\n\n\nclass SGD(Optimizer):\n def __init__(self, theta, stepsize, momentum=0.9):\n super().__init__(theta)\n self.v = np.zeros(self.dim, dtype=np.float)\n self.stepsize, self.momentum = stepsize, momentum\n\n def _compute_step(self, grad):\n self.v = self.momentum * self.v + (1. - self.momentum) * grad\n step = -self.stepsize * self.v\n return step\n\n def save_to_file(self, path):\n state = {\n 'dim': self.dim,\n 't': self.t,\n 'momentum': self.momentum,\n 'stepsize': self.stepsize,\n 'v': self.v,\n }\n torch.save(state, path)\n\n def load_from_file(self, path):\n state = torch.load(path, map_location='cpu')\n self.dim = state['dim']\n self.t = state['t']\n self.stepsize = state['stepsize']\n self.v = state['v']\n self.momentum = state['momentum']\n\n\nclass Adam(Optimizer):\n def __init__(self, theta, stepsize, beta1=0.9, beta2=0.999, epsilon=1e-08):\n super().__init__(theta)\n self.stepsize = stepsize\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n self.m = np.zeros(self.dim, dtype=np.float)\n self.v = np.zeros(self.dim, dtype=np.float)\n\n def _compute_step(self, grad):\n a = self.stepsize * np.sqrt(1 - self.beta2 ** self.t) / (1 - self.beta1 ** self.t)\n self.m = self.beta1 * self.m + (1 - self.beta1) * grad\n self.v = self.beta2 * self.v + (1 - self.beta2) * (grad * grad)\n step = -a * self.m / (np.sqrt(self.v) + self.epsilon)\n return step\n\n def save_to_file(self, path):\n state = {\n 'dim': self.dim,\n 't': self.t,\n 'stepsize': self.stepsize,\n 'beta1': self.beta1,\n 'beta2': self.beta2,\n 'epsilon': self.epsilon,\n 'm': self.m,\n 'v': self.v,\n }\n torch.save(state, path)\n\n def load_from_file(self, path):\n state = torch.load(path, map_location='cpu')\n self.dim = state['dim']\n self.t = state['t']\n self.stepsize = state['stepsize']\n self.beta1 = state['beta1']\n self.beta2 = state['beta2']\n self.epsilon = state['epsilon']\n self.m = state['m']\n self.v = state['v']\n","sub_path":"src/algorithm/nic_nes/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"393575551","text":"# Program to find the series of Prime Numbers.\r\n\r\nwhile True:\r\n\r\n lower = int(input('Enter the lower range: '))\r\n upper = int(input('Enter the upper range: '))\r\n\r\n prime_num = []\r\n\r\n for num in range(lower, upper + 1):\r\n\r\n if num > 1:\r\n for i in range(2, num):\r\n\r\n if (num % i) == 0:\r\n break\r\n\r\n else:\r\n prime_num.append(num)\r\n\r\n print('\\nThe list of prime numbers from ', lower, 'to', upper, 'are: ')\r\n print(prime_num)\r\n\r\n user_input = input(\"\\nPress Enter to continue, 'q' to exit.\")\r\n\r\n if user_input == 'q':\r\n break\r\n","sub_path":"Series of Prime.py","file_name":"Series of Prime.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"477182370","text":"from conans import AutoToolsBuildEnvironment, ConanFile, tools\nimport contextlib\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibsmackerConan(ConanFile):\n name = \"libsmacker\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://libsmacker.sourceforge.net\"\n topics = (\"libsmacker\", \"decoding \", \"smk\", \"smacker\", \"video\", \"file\")\n license = \"LGPL-2.1-or-later\"\n description = \"A C library for decoding .smk Smacker Video files\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n exports_sources = \"patches/*\"\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return os.path.join(self.source_folder, \"source_subfolder\")\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n def build_requirements(self):\n self.build_requires(\"libtool/2.4.6\")\n if self._settings_build.os == \"Windows\" and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n @contextlib.contextmanager\n def _build_context(self):\n if self.settings.compiler == \"Visual Studio\":\n with tools.vcvars(self):\n env = {\n \"CC\": \"cl -nologo\",\n \"CXX\": \"cl -nologo\",\n \"LD\": \"link -nologo\",\n }\n with tools.environment_append(env):\n yield\n else:\n yield\n\n def _configure_autotools(self):\n if self._autotools is not None:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self,win_bash=tools.os_info.is_windows)\n self._autotools.libs = []\n if self.settings.compiler == \"Visual Studio\" and tools.Version(self.settings.compiler.version) >= \"12\":\n self._autotools.flags.append(\"-FS\")\n yes_no = lambda v: \"yes\" if v else \"no\"\n args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n ]\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n return self._autotools\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n with tools.chdir(self._source_subfolder):\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\")), win_bash=tools.os_info.is_windows)\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.install()\n\n tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"*.la\")\n if self.settings.compiler == \"Visual Studio\" and self.options.shared:\n os.rename(os.path.join(self.package_folder, \"lib\", \"smacker.dll.lib\"),\n os.path.join(self.package_folder, \"lib\", \"smacker.lib\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"smacker\"]\n","sub_path":"recipes/libsmacker/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"428889943","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 20 10:30:07 2020\n@author: vboas\n\"\"\"\nimport math\nimport itertools\nimport numpy as np\nfrom scipy.linalg import eigh\nfrom scipy.fftpack import fft\nfrom sklearn.metrics import cohen_kappa_score\nfrom bci_utils import labeling, extractEpochs\nfrom scipy.signal import lfilter, butter, iirfilter, filtfilt, decimate\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n \n#%% DATASET AND SCENARIO INFO\n''' III3a: subjects={'K3','K6','L1'}; prefix=''; class_ids={1,2,3,4}; sessions={None}; channels=[:60] \n III4a: subjects={'aa','al','av','aw','ay'}; prefix=''; class_ids={1,3}; sessions={None}; channels=[:118]\n IV2a: subjects={1,2,...,9}; prefix='A0'; class_ids={1,2,3,4} ; sessions={'T','E'} ; channels=[:22]\n IV2b: subjects={1,2,...,9}; prefix='B0'; class_ids={1,2}; sessions={'01T','02T','03T','04E','05E'}; channels=[:3] \n Lee19: subjects={1,2,...,54}; prefix='S'; class_ids={1,2}; sessions={1,2}; channels=[:62]; suffix='sess'; ch_cortex=[7,32,8,9,33,10,34,12,35,13,36,14,37,17,38,18,39,19,40,20] \n'''\n\nds = 'IV2a' # 'IV2a','IV2b','III3a','III4a','Lee19' \npath = '/mnt/dados/eeg_data/' + ds + '/' # PATH TO DATASET\nsuj = 5\nclass_ids = [1, 2]\nsessions = ['T', 'E']\nchannels = None\nprefix, suffix = 'A0', ''\n\n#%% Load data\nd_train, e_train, i_train = labeling(path=path, ds=ds, session=sessions[0], subj=suj, channels=channels, save=False)\n# d_train, e_train, i_train = np.load(path + 'npy/' + prefix + str(suj) + '' + sessions[0] + '.npy', allow_pickle=True)\n\nif not ds in ['III3a','III4a']: \n d_test, e_test, i_test = labeling(path=path, ds=ds, session=sessions[1], subj=suj, channels=channels, save=False)\n # d_test, e_test, i_test = np.load(path + 'npy/' + prefix + str(suj) + '' + sessions[1] + '.npy', allow_pickle=True)\n\n#%% Segmentation\n# Fs = 250 if dataset in ['IV2a', 'IV2b', 'III3a', 'Lee19'] else 100\nFs = i_train['fs']\n\n# # Downsampling\n# factor = 2\n# d_train = decimate(d_train, factor)\n# d_test = decimate(d_test, factor)\n# Fs = Fs/factor\n# # et = np.copy(e_train)\n# # ev = np.copy(e_test)\n# e_train[:, 0] = [round(e_train[i, 0]/factor) for i in range(e_train.shape[0])]\n# e_test[:, 0] = [round(e_test[i, 0]/factor) for i in range(e_test.shape[0])]\n\nsmin, smax = math.floor(0.5 * Fs), math.floor(2.5 * Fs)\n\nL = 2\nZTL, ZVL = [], []\nfor l in range(0, L + 1):\n # print(smin+l, smax+l)\n \n \n ################\n epochsT, labelsT = extractEpochs(d_train, e_train, smin+l, smax+l, class_ids)\n \n if not ds in ['III3a','III4a']: \n epochsV, labelsV = extractEpochs(d_test, e_test, smin+l, smax+l, class_ids)\n else: \n epochs, labels = np.copy(epochsT), np.copy(labelsT)\n test_size = int(len(epochs) * 0.5)\n train_size = int(len(epochs) - test_size)\n train_size = train_size if (train_size % 2 == 0) else train_size - 1 # garantir balanço entre as classes (amostragem estratificada)\n epochsT, labelsT = epochs[:train_size], labels[:train_size] \n epochsV, labelsV = epochs[train_size:], labels[train_size:]\n \n ZT = [epochsT[np.where(labelsT==i)] for i in class_ids]\n ZT = np.r_[ZT[0],ZT[1]]\n tT = np.r_[class_ids[0]*np.ones(int(len(ZT)/2)), class_ids[1]*np.ones(int(len(ZT)/2))]\n \n ZV = [epochsV[np.where(labelsV==i)] for i in class_ids]\n ZV = np.r_[ZV[0],ZV[1]]\n tV = np.r_[class_ids[0]*np.ones(int(len(ZV)/2)), class_ids[1]*np.ones(int(len(ZV)/2))]\n #################\n \n \n ZTL.append(np.transpose(ZT, (0,2,1)))\n ZVL.append(np.transpose(ZV, (0,2,1)))\n\nZT, ZV = ZTL[0], ZVL[0]\nfor i in range(1, len(ZTL)):\n ZT = np.c_[ZT,ZTL[i]]\n ZV = np.c_[ZV,ZVL[i]]\n\nZT = np.transpose(ZT, (0,2,1))\nZV = np.transpose(ZV, (0,2,1))\n\nprint(ZT.shape, ZV.shape)\n\n\n#%% Filtering\nf_low, f_high = 8, 30\nDFT = 0 # 0=IIR, 1=DFT\n\nif DFT:\n print(DFT)\n buffer_len = smax - smin\n dft_res_freq = Fs/buffer_len # resolução em frequência fft\n dft_size_band = round(2/dft_res_freq) # 2 representa sen e cos que foram separados do componente complexo da fft intercalados\n \n data_out = fft(ZT)\n REAL = np.transpose(np.real(data_out), (2, 0, 1))\n IMAG = np.transpose(np.imag(data_out), (2, 0, 1))\n data_out = list(itertools.chain.from_iterable(zip(IMAG, REAL)))\n XT_FFT = np.transpose(data_out, (1, 2, 0))\n \n data_out = fft(ZV)\n REAL = np.transpose(np.real(data_out), (2, 0, 1))\n IMAG = np.transpose(np.imag(data_out), (2, 0, 1))\n data_out = list(itertools.chain.from_iterable(zip(IMAG, REAL)))\n XV_FFT = np.transpose(data_out, (1, 2, 0))\n \n bmin = f_low * dft_size_band\n bmax = f_high * dft_size_band\n # print(bmin, bmax)\n XT = XT_FFT[:, :, bmin:bmax]\n XV = XV_FFT[:, :, bmin:bmax]\n\nelse: # IIR Filtering\n nyq = 0.5 * Fs\n low = f_low / nyq\n high = f_high / nyq\n if low == 0: low = 0.001\n if high >= 1: high = 0.99\n b, a = butter(5, [low, high], btype='bandpass')\n # b, a = iirfilter(5, [low,high], btype='band')\n # XT = lfilter(b, a, ZT) \n # XV = lfilter(b, a, ZV)\n XT = filtfilt(b, a, ZT)\n XV = filtfilt(b, a, ZV)\n\n\n#%% CSP\nncomp = 6\ne, c, s = XT.shape\nclasses = np.unique(tT) \nXa = XT[classes[0] == tT,:,:]\nXb = XT[classes[1] == tT,:,:]\n\nSa = np.zeros((c, c)) \nSb = np.zeros((c, c))\nfor i in range(int(e/2)):\n # Sa += np.dot(Xa[i,:,:], Xa[i,:,:].T)\n # Sb += np.dot(Xb[i,:,:], Xb[i,:,:].T)\n Sa += np.dot(Xa[i,:,:], Xa[i,:,:].T) #/ Xa[i].shape[-1] # sum((Xa * Xa.T)/q)\n Sb += np.dot(Xb[i,:,:], Xb[i,:,:].T) #/ Xb[i].shape[-1] # sum((Xb * Xb.T)/q)\nSa /= (len(Xa) * s)\nSb /= (len(Xb) * s)\n\n[D, W] = eigh(Sa, Sa + Sb)\nind = np.empty(c, dtype=int)\nind[0::2] = np.arange(c - 1, c // 2 - 1, -1) \nind[1::2] = np.arange(0, c // 2)\nW = W[:, ind]\nprint(W.shape)\nWf = W.T[:ncomp]\n \nYT = np.asarray([np.dot(Wf, ep) for ep in XT])\nYV = np.asarray([np.dot(Wf, ep) for ep in XV])\n\n\n#%% Feature extraction\nXT_CSP = np.log(np.mean(YT ** 2, axis=2))\nXV_CSP = np.log(np.mean(YV ** 2, axis=2))\n# XT_CSP = np.log(np.var(YT, axis=2))\n# XV_CSP = np.log(np.var(YV, axis=2))\n# XV_CSPi = np.log(np.mean(YV[0] ** 2, axis=1))\n\n#%% LDA Classifier\nclf = LDA()\nclf.fit(XT_CSP, tT)\nscores_labels = clf.predict(XV_CSP)\nacc = np.mean(scores_labels == tV)\nkappa = cohen_kappa_score(scores_labels, tV)\nprint('Accuracy:', round(acc,4))\nprint('kappa:', round(kappa,4))\n\n #%%\n\n\n# def load_data(sujeito, classes):\n# ds = ['T_', 'E_']\n# X = [] # vetor de dimensão 4 onde as duas primeiras contém os dados de treinamento das duas classes e as duas últimas os dados de validação das duas classes \n# for i in range(2):\n# for j in range(2):\n# path = '/home/vboas/devto/datasets/BCICIV_2a/npy/A0' + str(sujeito) + ds[i] + str(classes[j]) + '.npy'\n# dados = load(open(path, 'rb'))\n# X.append(dados)\n# return X\n\n# def windowing(X, fs, t_0, t_start, t_end, atraso):\n# W_Start = int((t_start - t_0) * fs)\n# W_End = int((t_end - t_0) * fs)\n# XJ = []\n# atraso = atraso\n# for i in range(4):\n# # XJ.append(XF[i][:, :, W_Start:W_End])\n# janela = transpose(X[i][:,:,W_Start:W_End], (1, 0, 2))\n# for cont in range(1, atraso + 1):\n# jAvancada = transpose(X[i][:,:,W_Start+cont:W_End+cont], (1, 0, 2)) # avanço (janela - cont)\n# jAtrasada = transpose(X[i][:,:,W_Start+cont:W_End+cont], (1, 0, 2)) # atraso (janela + cont)\n# janela = concatenate([janela, jAtrasada])\n# #print(janela[0][0][0],' é igual a ',janela[22][0][1])\n# XJ.append(transpose(janela, (1, 0, 2)))\n# #print(XJ[0][0][0][498],' é igual a ',XJ[0][0][22][499])\n# return XJ\n\n# def filtragemFFT2(XJ, fs, fl, fh):\n# nf = fs/2.\n# bin0 = int(fl * (fs/nf)) # para fl = 8 bin0 = 15\n# binN = int(fh * (fs/nf)) # para fl = 8 bin0 = 15\n# # print bin0, binN\n# XF = []\n# for i in range(4): # filtra os dados de treinamento e validação das duas classes \n# filtrado = fft(XJ[i])\n# REAL = transpose(real(filtrado)[:,:,bin0:binN], (2, 0, 1)) #transpoe para intercalar\n# IMAG = transpose(imag(filtrado)[:,:,bin0:binN], (2, 0, 1)) #transpoe para intercalar\n# filtrado = list(itertools.chain.from_iterable(zip(IMAG, REAL))) #intercalando\n# filtrado = transpose(filtrado, (1, 2, 0)) # retorna ao formato original \n# XF.append(filtrado)\n# return XF\n\n# def filtragemIIR(XJ, fs, fl, fh, ordem=5):\n# nf = fs/2.\n# b, a = butter(ordem, [fl/nf, fh/nf], btype='bandpass')\n# XF = []\n# for i in range(4): # filtra os dados de treinamento e validação das duas classes \n# filtrado = lfilter(b, a, XJ[i])\n# XF.append(filtrado)\n# return XF\n\n# class CSP():\n# def __init__(self, n_components):\n# self.n_components = n_components\n\n# def fit(self, X, y):\n# e, c, t = X.shape\n# classes = unique(y)\n \n# X0 = X[classes[0] == y,:,:]\n# X1 = X[classes[1] == y,:,:]\n\n# # Sum up covariance matrix\n# S0 = zeros((c, c))\n# S1 = zeros((c, c))\n# for i in range(int(e/2)): # add conversão int() ?\n# S0 += dot(X0[i,:,:], X0[i,:,:].T)\n# S1 += dot(X1[i,:,:], X1[i,:,:].T)\n\n# [D, W] = eigh(S0, S0 + S1)\n\n# ind = empty(c, dtype=int)\n# ind[0::2] = arange(c - 1, c // 2 - 1, -1)\n# ind[1::2] = arange(0, c // 2)\n \n# W = W[:, ind]\n \n# self.filters_ = W.T[:self.n_components]\n\n# def transform(self, X):\n# XT = asarray([dot(self.filters_, epoch) for epoch in X])\n# XVAR = log(mean(XT ** 2, axis=2))\n \n# return XVAR\n \n# def sbcsp(sujeito, classes, args):\n# atraso, t_filtro, n_componentes, fs, fl, fh, ordem = args \n \n# X = load_data(sujeito, classes)\n \n# t0 = time()\n \n# XJ = windowing(X, fs, 0, 2.5, 4.5, atraso)\n \n# if t_filtro == 'IIR': XF = filtragemIIR(XJ, fs, fl, fh)\n# elif t_filtro == 'FFT': XF = filtragemFFT2(XJ, fs, fl, fh)\n\n# XT = concatenate([XF[0],XF[1]]) # treinamento A e B\n# XV = concatenate([XF[2],XF[3]]) # validação A e B\n \n# y = concatenate([zeros(72), ones(72)]) #vetor gabarito\n \n# # CSP\n# csp = CSP(n_components=n_componentes)\n# csp.fit(XT, y)\n# XT_CSP = csp.transform(XT)\n# XV_CSP = csp.transform(XV) \n \n# # LDA\n# clf = LinearDiscriminantAnalysis()\n# clf.fit(XT_CSP, y)\n# saida_lda = clf.predict(XV_CSP)\n \n# # for aa in saida_lda: print(aa)\n \n# acuracia = mean(saida_lda == y)\n \n# tempo = time() - t0\n \n# print(sujeito, classes, round(tempo, 2), round(acuracia * 100, 2))\n# #print(round(acuracia * 100, 2))\n# # print asarray(XJ).shape \n \n# return acuracia, tempo\n\n# if __name__ == \"__main__\":\n# # sujeito = arange(1, 10)\n# classes = [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]\n# atrasos = range(0,6) \n# t_filtro = ['IIR','FFT'] # opções: 'IIR' 'FFT' 'PROJ'\n# n_componentes = 6\n# fs = 250\n# fl = 8 \n# fh = 30\n# ordem = 5\n \n# print('\\n>> Qtd sujeitos: 9')\n# print('>> Classes: LH x RH')\n# print('>> Abordagem: CSP + LDA')\n# print('>> Parâmetros: [8-30Hz] butter_order=5 ')\n \n# for tf in t_filtro:\n# print('\\n========================',tf,'=========================')\n# print('Atrasos\\t','| Acc Média (%)','| DP (%)', '| Custo (s) ', '| Custo_i (s)')\n# print('======================================================')\n# for atraso in atrasos:\n# args = atraso, tf, n_componentes, fs, fl, fh, ordem\n# result = [sbcsp(suj, classes[i], args) for suj in range(1, 2) for i in range(0, 1) ]\n# print (' '+ str(atraso) +'\\t | ' + \n# str(round(mean(asarray(result)[:,0])*100, 2)) + '%\\t | ' +\n# str(round(std(asarray(result)[:,0])*100, 2)) + '% | ' +\n# str(round(sum(asarray(result)[:,1]), 2)) + 's\\t| ' +\n# str(round(mean(asarray(result)[:,1]), 2)) + 's')\n \n# #print ('Acc média:\\t' + str(round(mean(asarray(result)[:,0])*100, 2)) + '%')\n# #print ('Custo total:\\t' + str(round(sum(asarray(result)[:,1]), 2)) + 's')\n# #print ('Custo médio:\\t' + str(round(mean(asarray(result)[:,1]), 2)) + 's\\n')","sub_path":"scripts/cla_lags.py","file_name":"cla_lags.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"199996999","text":"import argparse\n\nfrom nn.net import ClassifierNetwork, TrainerNetwork\nfrom dataset.pickle import readPickleZip\nfrom dataset.shared import splitToShared\nfrom nn.trainUtils import trainSupervised\nfrom nn.profiler import setupLogging\n\n'''This application will distill dark knowledge out of existing networks and\n into a pickled dataset which can be used as training for smaller deployable\n networks. This step should be used once a deep network has been trained to\n identify objects. Since deep networks are cumbersome and expensive, this\n technique works to make a lighter-weight deployable network. \n'''\nif __name__ == '__main__' :\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--log', dest='logfile', type=str, default=None,\n help='Specify log output file.')\n parser.add_argument('--level', dest='level', default='INFO', type=str, \n help='Log Level.')\n parser.add_argument('--limit', dest='limit', type=int, default=5,\n help='Number of runs between validation checks.')\n parser.add_argument('--stop', dest='stop', type=int, default=5,\n help='Number of inferior validation checks to end.')\n parser.add_argument('--softness', dest='softness', type=float, default=3,\n help='Softness factor in softmax function.')\n parser.add_argument('--holdout', dest='holdout', type=float, default=.05,\n help='Percent of data to be held out for testing.')\n parser.add_argument('--batch', dest='batchSize', type=int, default=50,\n help='Batch size for training and test sets.')\n parser.add_argument('--base', dest='base', type=str, default='./distillery',\n help='Base name of the network output and temp files.')\n parser.add_argument('--shallow', dest='shallow', type=str, required=True,\n help='Synapse for the shallow target network. This ' +\n 'network should be populated with freshly ' + \n 'initialized layers for optimal results.')\n parser.add_argument('--deep', dest='deep', type=str, default=None,\n help='Synapse for the deep network to distill. This ' +\n 'network should be trained and ready.')\n parser.add_argument('--dark', dest='dark', type=str, default=None,\n help='pkl.gz file previously created by the ' +\n 'distillery for dark knowledge transfer.')\n parser.add_argument('--data', dest='data', type=str, default=None,\n help='Directory or pkl.gz file for the training and ' +\n 'test sets')\n options = parser.parse_args()\n\n # setup the logger\n log = setupLogging('distillery: ' + options.data, \n options.level, options.logfile)\n\n # if the user specified a deep network and dataset, then distill the\n # knowledge into a new pickle to use for training.\n if options.deep is not None :\n from dataset.ingest.distill import distillKnowledge\n\n if options.dark is not None :\n raise Exception('Only specify one usage, --deep or --dark.')\n if options.data is None :\n raise Exception('When specifying a deep network, please also ' +\n 'specify a dataset to distill using --data.')\n\n # distill knowledge out of the deep network into a pickle\n deepNet = ClassifierNetwork(filepath=options.deep, log=log)\n options.dark = distillKnowledge(deepNet=deepNet,\n filepath=options.data,\n batchSize=options.batchSize, \n holdoutPercentage=options.holdout, \n log=log)\n\n # use the pickle to train a shallower network to perform the same task\n train, test, labels = readPickleZip(options.dark, log)\n shallowNet = TrainerNetwork(splitToShared(train, castLabelInt=False), \n splitToShared(test), labels,\n filepath=options.shallow, log=log)\n\n trainSupervised(shallowNet, __file__, options.data, \n numEpochs=options.limit, stop=options.stop, \n synapse=options.shallow, base=options.base, log=log)\n","sub_path":"trunk/projects/distillery/distillery.py","file_name":"distillery.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"240533111","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nclass TutorialDatasetManager:\n def __init__(self, dataset_size=100, width=32, height=32, noise_u=0.0, noise_sd=0.2, target_r=2.8, target_value=1.5):\n self.dataset_size = dataset_size\n self.width = width\n self.height = height\n self.noise_u = noise_u\n self.noise_sd = noise_sd\n self.target_r = target_r\n self.target_value = target_value\n self.generate_dataset()\n\n def normal_rnd(self):\n return np.random.normal(self.noise_u, self.noise_sd)\n\n def generate_data(self):\n x = np.array([[self.normal_rnd() for x in range(self.width)] for y in range(self.height)], np.float32)\n\n px = np.random.rand() * (self.width - 2.0 * self.target_r) + self.target_r\n py = np.random.rand() * (self.height - 2.0 * self.target_r) + self.target_r\n\n for y_i in range(self.height):\n for x_i in range(self.width):\n if (px - x_i) ** 2 + (py - y_i) ** 2 < self.target_r ** 2:\n x[y_i][x_i] = self.target_value\n return px, py, np.reshape(x, [self.width * self.height])\n\n def generate_dataset(self):\n data_shape = [self.dataset_size, self.width * self.height]\n\n self.dataset_x = np.zeros(shape=data_shape)\n self.dataset_y = np.zeros(shape=[self.dataset_size, 2])\n for i in range(self.dataset_size):\n px, py, x = self.generate_data()\n self.dataset_x[i] = x\n self.dataset_y[i] = np.array([px, py])\n \n def next_batch(self, size):\n idx = [np.random.randint(low=0, high=self.dataset_size) for _ in range(size)]\n return self.dataset_x[idx], self.dataset_y[idx]\n\nif __name__ == \"__main__\":\n dataset_manager = TutorialDatasetManager()\n x, y = dataset_manager.next_batch(10)\n\n plt_nrow = 3\n plt_ncol = 3\n fig = plt.figure()\n for i in range(plt_nrow * plt_ncol):\n sub = fig.add_subplot(plt_nrow, plt_ncol, i + 1)\n sub.set_title(y[i])\n plt.imshow(np.reshape(x[i], (32, 32)), cmap=\"gray\")\n plt.plot(y[i][0], y[i][1], \"+r\")\n plt.show()\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"582106051","text":"\"\"\"The First function within the bios boot.\r\nThe BIOS memoory is first assigned using a given hard-coded key.\r\n\r\nOnce assigned the wak _produces_ a memory allocation and assigns the first\r\nkey values to the BIOS memory slot.\r\n\r\nThe POST commands are read and the instructions load the next memory\r\nslots for the enviroment.\r\n\r\n Open Mmap volatile TAPE.\r\n\r\n Any middleman information for the TAPE should write here. Such as\r\n bios RAM encryption layers.\r\n\r\n COLD or WARM boot.\r\n Write a print function.\r\n clean open space\r\n load memory references\r\n load base lib\r\n load memory config\r\n load state\r\n\r\nBIOS RAM STATE\r\n 0 unallocated / not installed\r\n Nothing in the ram, no protection mode, no instructions.\r\n\r\n\"\"\"\r\n\r\nimport sys, builtins\r\nimport mmap, os\r\nimport time\r\nimport marshal\r\n\r\nTAPE_EXISTS = -1\r\nTAPE_MISSING = -2\r\nBEEP = \"\\x07\"\r\n\r\n\r\nHEADER = { 'debug': None }\r\n\r\n\r\ndef c_mem_clear(string):\r\n import ctypes\r\n location = id(string) + 20\r\n size = sys.getsizeof(string) - 20\r\n ''' msvcrt\r\n fclose\r\n fopen\r\n freopen\r\n fwrite\r\n kbhit\r\n memcmp\r\n memcpy\r\n memmove\r\n memset\r\n rand\r\n scanf\r\n sprintf\r\n srand\r\n system\r\n time\r\n '''\r\n memset = ctypes.cdll.msvcrt.memset\r\n # For Linux, use the following. Change the 6 to whatever it is on your computer.\r\n # memset = ctypes.CDLL(\"libc.so.6\").memset\r\n\r\n puts( \"Clearing 0x%08x size %i bytes\" % (location, size))\r\n\r\n memset(location, 0, size)\r\n\r\n\r\nputs = getattr(__builtins__, 'print')\r\n\r\n\r\n\"\"\"A new ram tape file has:\r\n state int for vol wakeup\r\n pointer - the filedescriptor pointer - stored and verified later\r\n uuid - For verification\r\n kernel -\r\n\"\"\"\r\nclass Config:\r\n\r\n def find(self):\r\n if os.path.exists('HEADER'):\r\n puts('discovered configure \"HEADER\"')\r\n data = self.read('HEADER')\r\n return data\r\n\r\n def write(self, value):\r\n vv=compile('{}\\n'.format(value), 'HEADER', 'eval')\r\n ff=open('HEADER', 'wb')\r\n marshal.dump(vv, ff)\r\n ff.close()\r\n\r\n def read(self, name):\r\n stream = open(name, 'rb')\r\n br = b''\r\n for line in stream.readlines():\r\n br += line\r\n result = eval(marshal.loads(br))\r\n stream.close()\r\n return result\r\n\r\n\r\ndef WAKE():\r\n global tape\r\n global HEADER\r\n\r\n #io = IO()\r\n config = Config()\r\n # tape = BIOS_TAPE()\r\n HEADER = config.find()\r\n puts('WAKE')\r\n\r\n# Import cold or warm state.\r\nWAKE()\r\n","sub_path":"v1/src/runtime-libs/bios/wake.py","file_name":"wake.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"246693074","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/jnvilo/Projects/web/mycms/mycms/creole/creole2html/rules.py\n# Compiled at: 2019-02-05 11:01:21\n# Size of source mod 2**32: 7313 bytes\n\"\"\"\n Creole Rules for parser\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n :copyleft: 2008-2013 by python-creole team, see AUTHORS for more details.\n :license: GNU GPL v3 or above, see LICENSE for more details.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function, unicode_literals\nimport re\n\nclass InlineRules(object):\n __doc__ = '\\n All inline rules\\n '\n proto = 'http|https|ftp|nntp|news|mailto|telnet|file|irc'\n url = '(?P\\n (^ | (?<=\\\\s))\\n (?P~)?\\n (?P (?P %s )://[^$\\\\s]+ )\\n )' % proto\n link = '(?P\\n \\\\[\\\\[\\n (?P.+?) \\\\s*\\n ([|] \\\\s* (?P.+?) \\\\s*)?\\n ]]\\n )'\n image = '(?P\\n {{\\n (?P.+?) \\\\s*\\n (\\\\| \\\\s* (?P.+?) \\\\s*)?\\n }}\\n )(?i)'\n macro_inline = '\\n (?P\\n << \\\\s* (?P\\\\w+) \\\\s* (?P.*?) \\\\s* >>\\n (?P(.|\\\\n)*?)\\n <>\\n )\\n '\n macro_tag = '(?P\\n <<(?P \\\\w+) (?P.*?) \\\\s* /*>>\\n )'\n pre_inline = '(?P {{{ (?P.*?) }}} )'\n emphasis = '(?P(?.+?) (?\\\\*\\\\* (?P.+?) \\\\*\\\\* )'\n monospace = '(?P \\\\#\\\\# (?P.+?) \\\\#\\\\# )'\n superscript = '(?P \\\\^\\\\^ (?P.+?) \\\\^\\\\^ )'\n subscript = '(?P ,, (?P.+?) ,, )'\n underline = '(?P __ (?P.+?) __ )'\n delete = '(?P ~~ (?P.+?) ~~ )'\n small = '(?P-- (?P.+?) -- )'\n linebreak = '(?P \\\\\\\\\\\\\\\\ )'\n escape = '(?P ~ (?P\\\\S) )'\n char = '(?P . )'\n\n\nclass BlockRules(object):\n __doc__ = '\\n All used block rules.\\n '\n macro_block = '\\n (?P\\n << \\\\s* (?P\\\\w+) \\\\s* (?P.*?) \\\\s* >>\\n (?P(.|\\\\n)*?)\\n <>\\n )\\n '\n line = '(?P ^\\\\s*$ )'\n head = '(?P\\n ^\\n (?P=+) \\\\s*\\n (?P .*? )\\n (=|\\\\s)*?$\\n )'\n separator = '(?P ^ \\\\s* ---- \\\\s* $ )'\n pre_block = '(?P\\n ^{{{ \\\\s* $\\n (?P\\n ([\\\\#]!(?P\\\\w*?)(\\\\s+.*)?$)?\\n (.|\\\\n)+?\\n )\\n ^}}})\\n '\n list = '(?P\\n ^ [ \\\\t]* ([*][^*\\\\#]|[\\\\#][^\\\\#*]).* $\\n ( \\\\n[ \\\\t]* [*\\\\#]+.* $ )*\\n )'\n table = '^ \\\\s*(?P\\n [|].*? \\\\s*\\n [|]?\\n ) \\\\s* $'\n re_flags = re.VERBOSE | re.UNICODE | re.MULTILINE\n\n def __init__(self, blog_line_breaks=True):\n if blog_line_breaks:\n self.text = '(?P .+ ) (?P (? (? .+ )'\n self.rules = (\n self.macro_block,\n self.line, self.head, self.separator,\n self.pre_block, self.list,\n self.table, self.text)\n\n\nclass SpecialRules(object):\n __doc__ = '\\n re rules witch not directly used as inline/block rules.\\n '\n item = '^ \\\\s* (?P\\n (?P [\\\\#*]+) \\\\s*\\n (?P .*?)\\n ) \\\\s* $'\n cell = '\\n \\\\| \\\\s*\\n (\\n (?P [=][^|]+ ) |\\n (?P ( %s | [^|])+ )\\n ) \\\\s*\\n ' % '|'.join([\n InlineRules.link,\n InlineRules.macro_inline, InlineRules.macro_tag,\n InlineRules.image,\n InlineRules.pre_inline])\n pre_escape = ' ^(?P\\\\s*) ~ (?P \\\\}\\\\}\\\\} \\\\s*) $'\n\n\nINLINE_FLAGS = re.VERBOSE | re.UNICODE\nINLINE_RULES = (\n InlineRules.link, InlineRules.url,\n InlineRules.macro_inline, InlineRules.macro_tag,\n InlineRules.pre_inline, InlineRules.image,\n InlineRules.strong, InlineRules.emphasis,\n InlineRules.monospace, InlineRules.underline,\n InlineRules.superscript, InlineRules.subscript,\n InlineRules.small, InlineRules.delete,\n InlineRules.linebreak,\n InlineRules.escape, InlineRules.char)\n\ndef _verify_rules(rules, flags):\n \"\"\"\n Simple verify the rules -> try to compile it ;)\n \n >>> _verify_rules(INLINE_RULES, INLINE_FLAGS)\n Rule test ok.\n \n >>> block_rules = BlockRules() \n >>> _verify_rules(block_rules.rules, block_rules.re_flags)\n Rule test ok.\n \"\"\"\n rule_list = []\n for rule in rules:\n try:\n re.compile(rule, flags)\n rule_list.append(rule)\n re.compile('|'.join(rule_list), flags)\n except Exception as err:\n try:\n print(' *** Error with rule:')\n print(rule)\n print(' - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -')\n raise\n finally:\n err = None\n del err\n\n print('Rule test ok.')\n\n\nif __name__ == '__main__':\n import doctest\n print(doctest.testmod())\n print('--------------------------------------------------------------------------------')","sub_path":"pycfiles/mycms-0.0.41.tar/rules.cpython-37.py","file_name":"rules.cpython-37.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"274017675","text":"import csv\nimport glob\nfrom unittest import TestCase\n\nfrom kerapu.Kerapu import Kerapu\nfrom kerapu.lbz.Subtraject import Subtraject\n\n\nclass BulkTest(TestCase):\n \"\"\"\n Bulk testen.\n \"\"\"\n\n # ------------------------------------------------------------------------------------------------------------------\n def __init__(self, method_name='runTest'):\n \"\"\"\n Object constructor.\n \"\"\"\n TestCase.__init__(self, method_name)\n\n self.__grouper = Kerapu()\n self.__grouper.init_static('test/var/lib')\n\n # ------------------------------------------------------------------------------------------------------------------\n def __bepaal_zorgproduct(self, subtraject, expected):\n \"\"\"\n Bepaalt de zorgproductcode van een subtraject.\n\n :param kerapu.Lbz.Subtraject.Subtraject subtraject: Het subtraject.\n \"\"\"\n zorg_product_groep_code = self.__grouper.bepaal_zorg_product_groep(subtraject)\n subtraject.set_zorg_product_groep_code(zorg_product_groep_code)\n\n if zorg_product_groep_code != '0':\n zorg_product_code = self.__grouper.bepaal_zorg_product(subtraject, zorg_product_groep_code)\n self.assertEqual(zorg_product_code, expected, subtraject.get_subtraject_nummer())\n\n # ------------------------------------------------------------------------------------------------------------------\n def bulk_test_file(self, filename):\n \"\"\"\n Tests against a file with test cases.\n\n :type: str filename The file with test cases.\n \"\"\"\n with open(filename, 'rt', encoding='utf-8') as handle:\n csv_reader = csv.reader(handle, lineterminator='\\n', delimiter=',')\n\n vorige = None\n subtraject = None\n for rij in csv_reader:\n if vorige and rij[0]:\n self.__bepaal_zorgproduct(subtraject, vorige[6])\n vorige = None\n\n if not vorige:\n vorige = rij\n subtraject = Subtraject(rij[0], rij[1], rij[2], rij[3], rij[4], rij[5], rij[7], rij[8])\n\n subtraject.add_zorg_activiteit(rij[9], rij[10])\n\n if vorige:\n self.__bepaal_zorgproduct(subtraject, vorige[6])\n\n # ------------------------------------------------------------------------------------------------------------------\n def test01(self):\n \"\"\"\n Bulk test.\n \"\"\"\n for filename in list(glob.glob('test/var/lib/bulk_test*.csv')):\n self.bulk_test_file(filename)\n\n# ----------------------------------------------------------------------------------------------------------------------\n","sub_path":"test/BulkTest.py","file_name":"BulkTest.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"86281734","text":"# Determines whether a galaxy is contained in\n# an under-dense sphere (void) or not.\n\n# Author: E. Paillas \n\nimport numpy\n\n# Void catalog\nvoid_catalog = numpy.genfromtxt('/Users/epaillas/Google Drive/research/\\\nvoid_galaxies/void_catalogs/bubbles/VC_REFL0100N1504_GM8_SM_rmin3333_ovl40.pos')\n\ngalaxy_catalog = numpy.genfromtxt('/Users/epaillas/Google Drive/research/\\\nvoid_galaxies/galaxy_catalogs/eagle_galaxies.dat', skip_header=1)\n\nng = len(galaxy_catalog) # Number of galaxies in the catalog\nnv = len(void_catalog) # Number of voids in the catalog\n\nprint('Number of voids: ' + repr(nv))\nprint('Number of galaxies: ' + repr(ng))\n\nxv, yv, zv, rv = void_catalog[:,0], void_catalog[:,1], void_catalog[:,2],\\\nvoid_catalog[:,3]\n\nxp, yp, zp = galaxy_catalog[:,0], galaxy_catalog[:,1], galaxy_catalog[:,2]\n\nvoid_galaxy = []\n\nfor i in range(nv): # For each void\n for j in range(ng): # Loop over the galaxies in the simulation\n disx = xp[j] - xv[i]\n disy = yp[j] - yv[i]\n disz = zp[j] - zv[i]\n\n dis = numpy.sqrt(disx ** 2 + disy ** 2 + disz ** 2)\n\n # And see if they're inside its radius\n if dis < rv[i]:\n void_galaxy.append(j) # Save their index in case they are\n\n# Remove duplicates from the void galaxy list\nvoid_galaxy = list(set(void_galaxy))\n\nprint('Number of void galaxies: ' + repr(len(void_galaxy)))\n\n\n# Print indices to a file\nnumpy.savetxt('void_galaxies_indices.dat', void_galaxy)\n","sub_path":"analysis/void_galaxies.py","file_name":"void_galaxies.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"242380871","text":"import os\nfrom pyquery import PyQuery\n\ndoc = PyQuery(\"https://www.maltandvine.com\") \n\nbeers = [beer.text.strip() for beer in doc.find(\"ul\").eq(0).children()]\nbeers.sort()\n\ntap_path = os.path.join(os.path.dirname(__file__), \"ontap\", \"maltandvine.tap\")\n\nwith open(tap_path, 'w') as f:\n f.write(\"\\n\".join(beers))\n f.write(\"\\n\")\n\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"368325511","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\n\nfrom cms.models import Pages\n\n\ndef pagina(request, identificador):\n if request.method == \"GET\":\n try:\n pagina = Pages.objects.get(id=identificador)\n respuesta = pagina.page\n except Page.DoesNotExist:\n respuesta = \"No existe la pagina \" + str(identificador)\n\n else:\n respuesta = \"No puedes utilizar ese metodo. Puedes utilizar GET\"\n\n return HttpResponse(respuesta)\n","sub_path":"cms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"255307436","text":"import os\n\nimport Crypto\nimport Crypto.Random\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\n\n\nclass KeyGeneration(object):\n \"\"\"\n This class is responsible for handling keys for data sharing.\n\n Attributes:\n private_key (obj): holds private key object\n public_key (obj): holds public key object\n \"\"\"\n\n def __init__(self):\n self.private_key = 0\n self.public_key = 0\n\n def generate_keys(self, key_length=4096):\n \"\"\"\n This function only generates public and private keys.\n :param key_length: (int)\n \"\"\"\n random_gen = Crypto.Random.new().read\n self.private_key = RSA.generate(key_length, random_gen)\n self.public_key = self.private_key.publickey()\n\n def save_keys(self):\n \"\"\"\n This function saves public and private keys to `keys` folder.\n \"\"\"\n with open(os.path.join('keys', 'private.key'), 'wb') as file:\n file.write(self.private_key.exportKey())\n\n with open(os.path.join('keys', 'public.key'), 'wb') as file:\n file.write(self.public_key.exportKey())\n\n def load_keys(self):\n \"\"\"\n This function loads public and private key from `keys` folder.\n :return: private_key, public_key\n \"\"\"\n self._load_private()\n self._load_public()\n\n return self.private_key, self.public_key\n\n def load_old_keys(self):\n \"\"\"\n This function loads public and private key from `keys` folder.\n :return: private_key, public_key\n \"\"\"\n self._load_private('private.old')\n self._load_public('public.old')\n\n return self.private_key, self.public_key\n\n def _load_private(self, filename='private'):\n \"\"\"\n This function loads private key.\n \"\"\"\n path = os.path.join('keys')\n with open(os.path.join(path, '{}.key'.format(filename)), 'rb') as file:\n self.private_key = RSA.importKey(file.read())\n\n def _load_public(self, filename='public'):\n \"\"\"\n This function loads public key.\n \"\"\"\n path = os.path.join('keys')\n with open(os.path.join(path, '{}.key'.format(filename)), 'rb') as file:\n self.public_key = RSA.importKey(file.read())\n\n def load_or_generate(self):\n \"\"\"\n This function depending on the current state will generate or load keys.\n \"\"\"\n path = os.path.join('keys')\n\n keys_path_content = os.listdir(path)\n\n if 'private.key' not in keys_path_content or 'public.key' not in keys_path_content:\n self.generate_keys(4 * 1024)\n self.save_keys()\n self.load_keys()\n","sub_path":"data_share/KeyGeneration.py","file_name":"KeyGeneration.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"330727408","text":"#! /usr/bin/python3\n\n# writes a configuration with a plane structure of ipcs.\n\nimport argparse\nfrom math import cos, sin, sqrt, pi, floor\nfrom numpy.random import ranf\n\nhelpString = \"\"\"Creates a LAMMPS starting configuration with a fully formed wafer layer structure.\\n\nSample values for a cubic box: 14 12 6 12.4 12.4 12.0 0.22\\n\nSample values for an elongated box: 14 24 6 12.4 24.8 12.0 0.22\\n\"\"\"\n\nparser = argparse.ArgumentParser(description=helpString)\nparser.add_argument('particlePerSideX', metavar='nPx', type=int, help='number of IPCs in the X side')\nparser.add_argument('particlePerSideY', metavar='nPy', type=int, help='number of IPCs in the Y side')\nparser.add_argument('numberOfLayersZ', metavar='nLz', type=int, help='number of IPC layers in the Z side')\nparser.add_argument('boxSideX', metavar='Lx', type=float, help='size of the simulation box side base (x)')\nparser.add_argument('boxSideY', metavar='Ly', type=float, help='size of the simulation box side base (y)')\nparser.add_argument('boxSideZ', metavar='Lz', type=float, help='height of the simulation box side (z)')\nparser.add_argument('ecc', metavar='e', type=float, help='eccentricity of the IPCs')\nargs = parser.parse_args()\nprint(args)\n\noutputFile = open('IPC_startingstate_manner.txt','w')\n\nLx = args.boxSideX\nLy = args.boxSideY\nLz = args.boxSideZ\necc = args.ecc\nnWaferX = args.particlePerSideX\nnWaferY = args.particlePerSideY\nnWaferZ = args.numberOfLayersZ\nnFluidX = int(nWaferX/2)\nnFluidY = int(nWaferY/1.6)\nnFluidZ = nWaferZ\n\ntotalWaferIPCs = nWaferX * nWaferY * nWaferZ\ntotalChocoIPCs = nFluidX * nFluidY * nFluidZ\nnIPCs = totalWaferIPCs + totalChocoIPCs\n\ndef absolutePBCx(x):\n return x - Lx*floor(x/Lx)\n\ndef absolutePBCy(y):\n return y - Ly*floor(y/Ly)\n\ndef absolutePBCz(z):\n return z - Lz*floor(z/Lz)\n\nalpha = .45*pi\nbeta = .93*pi\ncos30 = sqrt(3)*.5\np = [ [ ecc*cos(alpha), ecc*sin(alpha), 0. ] ,\n [ ecc*cos(beta), ecc*sin(beta), 0. ] ]\n\n\noutputFile.write(\"# 3D starting configuration for LAMMPS created with a script available at\\n\")\noutputFile.write(\"# https://github.com/Zirbo/IPCsim/tree/master/lammps\\n\")\noutputFile.write(\"# The wafer point particles are from 1 to \" + str(3*totalWaferIPCs))\n\noutputFile.write(\"\\n\")\noutputFile.write(\"\\n\" + str(3*nIPCs).rjust(16) + \" atoms\")\noutputFile.write(\"\\n\" + str(2*nIPCs).rjust(16) + \" bonds\")\noutputFile.write(\"\\n\" + str( nIPCs).rjust(16) + \" angles\")\noutputFile.write(\"\\n\")\n\noutputFile.write(\"\\n\" + str(2).rjust(16) + \" atom types\")\noutputFile.write(\"\\n\" + str(1).rjust(16) + \" bond types\")\noutputFile.write(\"\\n\" + str(1).rjust(16) + \" angle types\")\noutputFile.write(\"\\n\")\n\noutputFile.write(\"\\n\" + '{:3.8f}'.format(0.0).rjust(16) +\n '{:3.8f}'.format(Lx).rjust(16) + \" xlo xhi\")\noutputFile.write(\"\\n\" + '{:3.8f}'.format(0.0).rjust(16) +\n '{:3.8f}'.format(Ly).rjust(16) + \" ylo yhi\")\noutputFile.write(\"\\n\" + '{:3.8f}'.format(0.0).rjust(16) +\n '{:3.8f}'.format(Lz).rjust(16) + \" zlo zhi\")\n\noutputFile.write(\"\\n\")\noutputFile.write(\"\\nMasses\")\noutputFile.write(\"\\n# atomtype, mass\")\noutputFile.write(\"\\n\" + str(1).rjust(10) + str(2.0).rjust(10))\noutputFile.write(\"\\n\" + str(2).rjust(10) + str(0.5).rjust(10))\n\noutputFile.write(\"\\n\")\noutputFile.write(\"\\nAtoms\")\noutputFile.write(\"\\n# atom-ID mol-ID atom-type charge x y z\")\n\n\n# wafer layer\nwaferIPCs = 0\nfor iz in range(nWaferZ):\n z = 0.5 + 2.0000000000001*iz\n for ix in range(nWaferX):\n x = 0.5 + 1.0000000000001*cos30*ix\n for iy in range(nWaferY):\n waferIPCs += 1\n atomNumber = (waferIPCs - 1)*3 + 1\n # ipc center\n j = 0 if (iy + (int((ix + 1)/2))%2)%2==0 else 1\n y = 0.5 + ( (.5 + 1.0000000000001*iy) if ix%2==0 else (1.0000000000001*iy) )\n outputFile.write(\"\\n\" + str(atomNumber).rjust(10) +\n str(waferIPCs).rjust(10) +\n str(1).rjust(10) +\n str(-1.).rjust(10) +\n '{:3.8f}'.format(x).rjust(16) +\n '{:3.8f}'.format(y).rjust(16) +\n '{:3.8f}'.format(z).rjust(16) )\n # first patch\n px = x + p[j][0]; px = absolutePBCx(px)\n py = y + p[j][1]; py = absolutePBCy(py)\n pz = z + p[j][2]; pz = absolutePBCz(pz)\n atomNumber += 1\n outputFile.write(\"\\n\" + str(atomNumber).rjust(10) +\n str(waferIPCs).rjust(10) +\n str(2).rjust(10) +\n str(0.5).rjust(10) +\n '{:3.8f}'.format(px).rjust(16) +\n '{:3.8f}'.format(py).rjust(16) +\n '{:3.8f}'.format(pz).rjust(16) )\n # second patch\n px = x - p[j][0]; px = absolutePBCx(px)\n py = y - p[j][1]; py = absolutePBCy(py)\n pz = z - p[j][2]; pz = absolutePBCz(pz)\n atomNumber += 1\n outputFile.write(\"\\n\" + str(atomNumber).rjust(10) +\n str(waferIPCs).rjust(10) +\n str(2).rjust(10) +\n str(0.5).rjust(10) +\n '{:3.8f}'.format(px).rjust(16) +\n '{:3.8f}'.format(py).rjust(16) +\n '{:3.8f}'.format(pz).rjust(16) )\n# chocolate layer\nchocoIPCs = 0\nfor iz in range(nFluidZ):\n z = 1.5 + 2.0000000000001*iz\n for ix in range(nFluidX):\n x = 0.6 + 1.0000000000001*cos30*(2*ix+0.5)\n for iy in range(nFluidY):\n chocoIPCs += 1\n atomNumber = 3*totalWaferIPCs + (chocoIPCs - 1)*3 + 1\n y = .6 + 1.6*iy\n outputFile.write(\"\\n\" + str(atomNumber).rjust(10) +\n str(totalWaferIPCs + chocoIPCs).rjust(10) +\n str(1).rjust(10) +\n str(-1.).rjust(10) +\n '{:3.8f}'.format(x).rjust(16) +\n '{:3.8f}'.format(y).rjust(16) +\n '{:3.8f}'.format(z).rjust(16) )\n # first patch\n px = x; px = absolutePBCx(px)\n py = y; py = absolutePBCy(py)\n pz = z + ecc; pz = absolutePBCz(pz)\n atomNumber += 1\n outputFile.write(\"\\n\" + str(atomNumber).rjust(10) +\n str(totalWaferIPCs + chocoIPCs).rjust(10) +\n str(2).rjust(10) +\n str(0.5).rjust(10) +\n '{:3.8f}'.format(px).rjust(16) +\n '{:3.8f}'.format(py).rjust(16) +\n '{:3.8f}'.format(pz).rjust(16) )\n # second patch\n px = x; px = absolutePBCx(px)\n py = y; py = absolutePBCy(py)\n pz = z - ecc; pz = absolutePBCz(pz)\n atomNumber += 1\n outputFile.write(\"\\n\" + str(atomNumber).rjust(10) +\n str(totalWaferIPCs + chocoIPCs).rjust(10) +\n str(2).rjust(10) +\n str(0.5).rjust(10) +\n '{:3.8f}'.format(px).rjust(16) +\n '{:3.8f}'.format(py).rjust(16) +\n '{:3.8f}'.format(pz).rjust(16) )\n\nassert waferIPCs == totalWaferIPCs, '{} != {}'.format(waferIPCs, totalWaferIPCs)\nassert chocoIPCs == totalChocoIPCs, '{} != {}'.format(chocoIPCs, totalChocoIPCs)\nassert chocoIPCs + waferIPCs == nIPCs, '{} != {}'.format(chocoIPCs + waferIPCs, nIPCs)\n\n\noutputFile.write(\"\\n\")\noutputFile.write(\"\\nBonds\")\noutputFile.write(\"\\n# ID bond-type atom-1 atom-2\")\nfor i in range(nIPCs):\n IDcenter = 3*i + 1\n IDpatch1 = 3*i + 2\n IDpatch2 = 3*i + 3\n outputFile.write(\"\\n\" + str(2*i+1).rjust(10) + str(1).rjust(10) +\n str(IDcenter).rjust(10) + str(IDpatch1).rjust(10) )\n outputFile.write(\"\\n\" + str(2*i+2).rjust(10) + str(1).rjust(10) +\n str(IDcenter).rjust(10) + str(IDpatch2).rjust(10) )\n\noutputFile.write(\"\\n\")\noutputFile.write(\"\\nAngles\")\noutputFile.write(\"\\n# ID angle-type atom-1 atom-2 atom-3 (atom-2 is the center atom in angle)\")\nfor i in range(nIPCs):\n IDcenter = 3*i + 1\n IDpatch1 = 3*i + 2\n IDpatch2 = 3*i + 3\n outputFile.write(\"\\n\" + str(i+1).rjust(10) + str(1).rjust(10) +\n str(IDpatch1).rjust(10) + str(IDcenter).rjust(10) +\n str(IDpatch2).rjust(10) )\noutputFile.write(\"\\n\")\n","sub_path":"lammps/3D_startingstate_wafer_layers_full.py","file_name":"3D_startingstate_wafer_layers_full.py","file_ext":"py","file_size_in_byte":8313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"291016925","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nfrom flask import *\nfrom jinja2 import TemplateNotFound\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n#app.config['UPLOAD_FOLDER'] = '/home/andrew/DS1e2/ds2/2bim/provaGoogle/static/img'\napp.config['UPLOAD_FOLDER'] = \"D:/ds1e2/ds2/sqlAlchemy/static/img\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = 'postgresql://postgres:postgres@localhost:5432/trabExtra'\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\napp.debug = True\ndb = SQLAlchemy(app)\n\nclass Anotacao(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\ttitulo = db.Column(db.String(200))\n\ttexto = db.Column(db.String(200))\n\timagem = db.Column(db.String(200))\n\n\tdef to_json(self):\n\t\treturn {'id':self.id, 'titulo':self.titulo, 'texto':self.texto, 'imagem':self.imagem}\n\n@app.after_request\ndef add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r\n\n@app.route('/ajaxAnotacao', methods = ['POST'])\ndef ajaxAnotacao():\n\tcodigo = request.form['codigo']\n\tanotacao = Anotacao().query.filter_by(id=codigo).first()\n\treturn jsonify(titulo = anotacao.titulo, texto = anotacao.texto, imagem = anotacao.imagem)\n\n@app.route('/404')\ndef erro404():\t\n\treturn render_template(\"404.html\")\n\n@app.route('/')\ndef index():\t\n\treturn render_template(\"index.html\", vetAnotacoes = Anotacao.query.all())\n\n@app.route('/gerenciar')\ndef gerenciar():\n\treturn render_template(\"gerenciar.html\", vetAnotacoes = Anotacao.query.all())\n\n@app.route('/anotacao/cadastro')\ndef tela_cadastro():\t\n\treturn render_template(\"cadastroAnotacao.html\")\n\n@app.route('/anotacao/cadastrar', methods=['POST'])\ndef cadastrarAnotacao():\n\tadicionar = Anotacao(titulo = request.form['titulo'], texto = request.form['texto'], imagem = \"\")\n\tdb.session.add(adicionar)\n\tdb.session.commit()\n\tanotacao = Anotacao.query.filter_by(titulo = request.form['titulo']).first()\n\ttry:\n\t\timagem = request.files['imagem']\n\texcept:\n\t\timagem = None\n\tif imagem:\n\t\timagem = request.files['imagem']\n\t\textensao = imagem.filename.rsplit('.', 1)[1].lower()\n\t\tif (extensao == 'png' or extensao == 'jpg' or extensao == 'jpeg'):\n\t\t\timagem.save(app.config['UPLOAD_FOLDER'] + \"/anotacoes/\" + str(anotacao.id) + \".\" + extensao)\n\t\t\tanotacao.imagem = str(anotacao.id) + \".\" + extensao\n\t\t\tdb.session.commit()\n\treturn redirect('/gerenciar')\n\n@app.route('/anotacao/alterar', methods=['POST'])\ndef alterarAnotacao():\n\tanotacao = Anotacao.query.get(int(request.form['id']))\n\tanotacao.titulo = str(request.form['titulo'])\n\tanotacao.texto = str(request.form['texto'])\n\ttry:\n\t\timagem = request.files['imagem']\n\texcept:\n\t\timagem = None\n\tif imagem:\n\t\timagem = request.files['imagem']\n\t\textensao = imagem.filename.rsplit('.', 1)[1].lower()\n\t\tif (extensao == 'png' or extensao == 'jpg' or extensao == 'jpeg'):\n\t\t\timagem.save(app.config['UPLOAD_FOLDER'] + \"/anotacoes/\" + str(anotacao.id) + \".\" + extensao)\n\t\t\tanotacao.imagem = str(anotacao.id) + \".\" + extensao\n\tdb.session.commit()\n\treturn redirect('/gerenciar')\n\n@app.route('/anotacao/alterar/')\ndef tela_alterar(id):\n\tif(Anotacao.query.get(int(id))):\n\t\treturn render_template(\"alterarAnotacao.html\", anotacao = Anotacao.query.get(int(id)))\n\telse:\n\t\treturn redirect('/404')\n\n@app.route('/anotacao/excluir/')\ndef excluirAnotacao(id):\n\tif(Anotacao.query.get(int(id))):\n\t\tanotacao = Anotacao.query.get(int(id))\n\t\texcluirImagem(id)\n\t\tdb.session.delete(anotacao)\n\t\tdb.session.commit()\n\t\treturn redirect('/gerenciar')\n\telse:\n\t\treturn redirect('/404')\n\ndef excluirImagem(id):\n\tif id:\n\t\tanotacao = Anotacao.query.get(int(id))\n\t\tif anotacao.imagem:\n\t\t\tfile = app.config['UPLOAD_FOLDER'] + \"/anotacoes/\" + anotacao.imagem\n\t\t\ttry:\n\t\t\t\tos.remove(file)\n\t\t\t\tanotacao.imagem = ''\n\t\t\t\tdb.session.commit()\n\t\t\texcept:\n\t\t\t\treturn redirect('/404')\n\nif __name__ == '__main__':\n\treload(sys)\n\tsys.setdefaultencoding('utf-8')\n\tapp.secret_key = 'teste'\n\tapp.run()","sub_path":"ds2/2bim/sqlAlchemy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"96646199","text":"num = int(input())\r\n\r\ntext1 = 'Leo finally won the Oscar! Leo is happy'\r\ntext2 = 'Not even for Wolf of Wall Street?!'\r\ntext3 = 'When will you give Leo an Oscar?'\r\ntext4 = 'Leo got one already!'\r\n\r\n\r\nif num == 88:\r\n print(text1)\r\nelif num == 86:\r\n print(text2)\r\nelif num != 88 and num != 86 and num < 88:\r\n print(text3)\r\nelse:\r\n print(text4)","sub_path":"PyCharm_projects_2020/Fundamentals/Loops/leo_di caprio.py","file_name":"leo_di caprio.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"220549519","text":"#!/usr/bin/env python\nimport operator\nimport time\nimport os\nfrom functools import reduce\nfrom threading import Thread\n\nimport requests\nimport cv2\nfrom queue import Queue\n\n_key = os.environ['MS_EMOTION_KEY']\n_url = \"https://westus.api.cognitive.microsoft.com/emotion/v1.0/recognize\"\n_numWorkerThreads = 5\n_maxNumRetries = 10\n_faceDetectionScaleDown = .25\n_faceeDeteectionScaleUp = 4\n\ncap = cv2.VideoCapture(0)\n\ntimestamp = int(time.time())\nlastResult = None;\n\nheaders = dict()\nheaders['Ocp-Apim-Subscription-Key'] = _key\nheaders['Content-Type'] = 'application/octet-stream'\n\njson = None\nparams = None\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ndef processRequest(json, data, headers, params):\n \"\"\"\n Helper function to process the request to Project Oxford\n\n Parameters:\n json: Used when processing images from its URL. See API Documentation\n data: Used when processing image read from disk. See API Documentation\n headers: Used to pass the key information and the data type request\n \"\"\"\n global lastResult\n retries = 0\n result = None\n\n while True:\n\n response = requests.request('post', _url, json=json, data=data, headers=headers, params=params)\n\n if response.status_code == 429:\n\n print(\"Message: %s\" % (response.json()['error']['message']))\n\n if retries <= _maxNumRetries:\n time.sleep(1)\n retries += 1\n continue\n else:\n print('Error: failed after retrying!')\n break\n\n elif response.status_code == 200 or response.status_code == 201:\n\n if 'content-length' in response.headers and int(response.headers['content-length']) == 0:\n result = None\n elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str):\n if 'application/json' in response.headers['content-type'].lower():\n result = response.json() if response.content else None\n elif 'image' in response.headers['content-type'].lower():\n result = response.content\n else:\n print(\"Error code: %d\" % (response.status_code))\n print(\"Message: %s\" % (response.json()['error']['message']))\n\n break\n\n if not result:\n return result\n currFace = reduce(returnBiggerFace, result, result[0])\n lastResult = currFace\n return result\n\ndef processRequestWorker():\n while True:\n item = q.get()\n processRequest(*item)\n q.task_done()\n\nq = Queue()\n\ndef getFaceArea(face):\n return face['faceRectangle']['width'] * face['faceRectangle']['height']\n\ndef returnBiggerFace(faceA, faceB) :\n faceAArea = getFaceArea(faceA)\n faceBArea = getFaceArea(faceB)\n if faceAArea > faceBArea:\n return faceA\n elif faceBArea > faceAArea:\n return faceB\n elif faceB['faceRectangle']['left'] < faceA['faceRectangle']['left']:\n return faceB\n elif faceA['faceRectangle']['left'] < faceB['faceRectangle']['left']:\n return faceA\n elif faceB['faceRectangle']['top'] < faceA['faceRectangle']['top']:\n return faceB\n else:\n return faceA\n\ndef renderResultOnImage(result, img):\n global lastResult\n \"\"\"Display the obtained results onto the input image\"\"\"\n # Find the largest face in the result set\n def convertCv2FaceToMsFace(cv2Face):\n if 'faceRectangle' in cv2Face:\n return cv2Face\n cv2Face = list(map(lambda x: x * _faceeDeteectionScaleUp,cv2Face))\n return { 'faceRectangle': {'left': cv2Face[0], 'top': cv2Face[1], 'width': cv2Face[2], 'height': cv2Face[3]} }\n\n result = list(map(convertCv2FaceToMsFace, result))\n\n if not result:\n return\n currFace = reduce(returnBiggerFace, result, result[0])\n\n if 'scores' in currFace:\n lastResult = currFace\n\n faceRectangle = currFace['faceRectangle']\n cv2.rectangle(img, (faceRectangle['left'], faceRectangle['top']),\n (faceRectangle['left'] + faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),\n color=(255, 0, 0), thickness=5)\n\n if lastResult and 'scores' in lastResult:\n currEmotion = max(lastResult['scores'].items(), key=operator.itemgetter(1))[0]\n\n textToWrite = \"%s\" % (currEmotion)\n cv2.putText(img, textToWrite, (faceRectangle['left'], faceRectangle['top'] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 0, 0), 1)\n\n\nfor i in range(_numWorkerThreads):\n t = Thread(target=processRequestWorker)\n t.daemon = True\n t.start()\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n jpg = cv2.imencode('.jpg', frame)[1].tostring()\n\n gray = cv2.resize(gray,(0,0),fx=_faceDetectionScaleDown,fy=_faceDetectionScaleDown)\n\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n # print(faces)\n if int(time.time()) - timestamp > 3:\n q.put((json,jpg,headers,params))\n timestamp = int(time.time())\n # Load the original image from disk\n # renderResultOnImage(result, frame)\n # else:\n renderResultOnImage(faces,frame)\n cv2.imshow('frame',frame)\n\n# Display the resulting frame\n# cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n","sub_path":"emotion_test.py","file_name":"emotion_test.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"601073325","text":"from flask import Flask, request\nimport requests, datetime, traceback\nfrom send_wx import wb\n\ndef sava_txt(str):\n with open(\"error.txt\", \"a\", encoding='gbk') as f:\n f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' ' + str + \"\\n\")\n\ndef down_pic(id, pic_url):\n path = r'C:\\pic\\smzdm\\{}.jpg'.format(id)\n try:\n img = requests.get(pic_url)\n with open(path, \"wb\") as f:\n f.write(img.content)\n return path\n except:\n return None\n\napp = Flask(__name__)\n\n@app.route('/sendPic', methods=['GET', 'POST'])\ndef home():\n try:\n values = request.args\n # ip = request.remote_addr\n # if ip == '106.14.92.76':\n # print(values)\n if 'pic_url' in values and 'pic_name' in values and 'to' in values:\n pic_name = values['pic_name']\n pic_url = values['pic_url']\n to = values['to']\n path = down_pic(pic_name, pic_url)\n if path:\n wb.send_pic(to, path)\n return 'YES'\n return 'NO'\n except:\n sava_txt('-----【程序运行异常】-----】\\n{}'.format(traceback.format_exc()))\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)","sub_path":"personalDemo/flask/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"553926315","text":"#!/usr/bin/python\n#author: namhb\nimport os, platform\nfrom libs.WetkCore import WetkCore\n\n'''\nHow to use:\n'''\n######---------------------------------------------#####\n######-------------- WEToolKit class ---- ---------#####\nclass WEToolKit:\n\tdef __init__(self):\n\t\t#Const\n\t\tself.dirPath\t\t\t\t\t=\t\tos.getcwd() \t\t\t\t\t\t\t\t\t\t# relative directory path\n\t\tself.filePath\t\t\t\t\t=\t\tos.path.abspath(__file__) \t\t\t\t\t\t\t# absolute file path\n\t\tself.fileName\t\t\t\t\t=\t\tos.path.basename(__file__) \t\t\t\t\t\t\t# the file name only\n\t\tself.system\t\t\t\t\t\t=\t\tplatform.system()\t\t\t\t\t\t\t\t\t# check OS\n\t\tself.debug\t\t\t\t\t\t=\t\tFalse\t\t\t\t\t\t\t\t\t\t\t\t# default disable debug\n\t\tself.version \t\t\t\t\t=\t\t\"0.0.2a\"\t\t\t\t\t\t\t\t\t\t\t# TK version\n\t\tself.logFolder\t\t\t\t\t=\t\t\"logs\"\t\t\t\t\t\t\t\t\t\t\t\t# Log store\n\t\tself.modulesFolder\t\t\t\t=\t\t\"modules\"\t\t\t\t\t\t\t\t\t\t\t# Modules folder \n\t\tself.libsFolder \t\t\t\t=\t\t\"libs\"\t\t\t\t\t\t\t\t\t\t\t\t# Libs folder\n\t\tself.moduleFolderPath\t\t\t= \t\tos.path.join(self.dirPath,self.modulesFolder) \t\t# Modules folder Path\n\t\tself.moduleNameDefault \t\t\t=\t\t\"module\"\n\t\tself.libsFolderPath\t\t\t\t= \t\tos.path.join(self.dirPath,self.libsFolder) \t\t\t# Modules folder Path\n\t\tself.logPrefix\t\t\t\t\t=\t\t\"wetk\"\t\t\t\t\t\t\t\t\t\t\t\t# Log Prefix\n\t\tself.moduleExt\t\t\t\t\t=\t\t\"py\"\t\t\t\t\t\t\t\t\t\t\t\t# python module extension file\n\t\tself.profileFolder \t\t\t\t=\t\t\"profiles\"\t\t\t\t\t\t\t\t\t\t\t# Profile\n\t\tself.profileFolderPath\t\t\t= \t\tos.path.join(self.dirPath,self.profileFolder) \t\t# Profiles Path\n\t\t# Start\n\t\tself.run()\n\tdef run(self):\n\t\tself.wetk \t\t\t\t\t\t=\t\tWetkCore()\n\t\tself.args \t\t\t\t\t\t=\t\tself.wetk.getArument(\n\t\t\t\tself.version\n\t\t\t)\n\t\tself.wetk.logConfig(\n\t\t\t\tself.logPrefix,\n\t\t\t\tself.dirPath,\n\t\t\t\tself.logFolder,\n\t\t\t)\n\n\t\t# Use module\n\t\tif(self.args.use != None):\n\t\t\tself.wetk.loadModule(\n\t\t\t\tself.args.use,\n\t\t\t\tself.modulesFolder,\n\t\t\t\tself.moduleNameDefault,\n\t\t\t\tself.profileFolderPath,\n\t\t\t\t)\n\t\t\texit()\n\t\tif(self.args.list != None):\n\t\t\tself.wetk.listModule(\n\t\t\t\tself.modulesFolder,\n\t\t\t\tself.moduleFolderPath,\n\t\t\t\tself.moduleNameDefault,\n\t\t\t\tself.profileFolderPath,\n\t\t\t\t)\n\t\t\texit()\n\t\n\n\ntk \t\t\t\t\t\t\t\t\t\t=\t\tNone\ndef logKeyboardInterrupt(log):\n\tprint(log)\ndef main():\n\tglobal tk\n\ttk \t\t\t\t\t\t\t\t\t= \t\tWEToolKit()\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tlogKeyboardInterrupt(\"User canncel.\")\n\telse:\n\t\tpass\n\tfinally:\n\t\tpass","sub_path":"WETK0.2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"605194985","text":"\n\ndef build_word_counts(words):\n d={}\n for word in words.split():\n d.setdefault(word,0)\n d[word]=d[word]+1\n return d\n\n\n\n#s = \"one two three four five four six three seven three two three eight nine\"\n#d=build_word_counts(s)\n#print(d)\n\n# you should only need filename=\"macbeth.txt\"\nfilename=\"/home/zamansky/gh/fall-2018-127/classcode/dictionaries/macbeth.txt\"\nf = open(filename)\n# we can read the whole thing\ns = f.read()\nprint(s)\nf.close()\nprint(\"-------\")\n# or we can read a line at a time.\nf = open(filename)\ns = f.readline()\nprint(s)\ns = f.readline()\nprint(s)\nf.close()\nprint('------')\nf = open(filename)\nfor line in f.readlines():\n print(line)\n\n\n","sub_path":"classcode/dictionaries/file_examples.py","file_name":"file_examples.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"439567430","text":"import win32com.client\nimport os\nimport sqlite3\nfrom datetime import datetime, timedelta\nimport time\nimport pandas as pd\n\nKOSPI_codelist = []\nKOSPI_namelist = []\nerror_codelist = []\n\n# 공통 함수\ndef Get_login_status():\n \"\"\"\n Cybos plus 로그인 상태 확인 및 연결\n \"\"\"\n objCpCybos = win32com.client.Dispatch(\"CpUtil.CpCybos\")\n\n # 1: 연결, 0: 비연결\n if objCpCybos.IsConnect == 1:\n return \"정상적으로 연결되었습니다.\"\n else:\n print(\"CybosPlus가 연결되어있지 않습니다.\")\n os.startfile(\"C:\\\\Users\\\\S\\\\Desktop\\\\CybosPlus.lnk\")\n\n\n# 코스피 관련 함수\ndef Get_KOSPI_code():\n \"\"\"\n Get the KOSPI codes\n \"\"\"\n try :\n objCpCodeMgr = win32com.client.Dispatch(\"CpUtil.CpCodeMgr\")\n tmp_code_list = objCpCodeMgr.GetStockListByMarket(1) # KOSPI\n\n # 코스피 보통주만 선별\n global KOSPI_codelist\n global KOSPI_namelist\n KOSPI_codelist = [code[1:] for code in tmp_code_list\n if code[-1] == \"0\" and objCpCodeMgr.GetStockSectionKind(code) == 1]\n KOSPI_namelist = [objCpCodeMgr.CodeToName(code) for code in KOSPI_codelist]\n print(\"정상적으로 처리되었습니다.\")\n return KOSPI_codelist, KOSPI_namelist\n\n except :\n print(\"오류\")\n pass\n\ndef Get_KOSPI_Industry(code):\n \"\"\"\n Get the KOSPI Industry\n \"\"\"\n try :\n objCpCodeMgr = win32com.client.Dispatch(\"CpUtil.CpCodeMgr\")\n tmp_code_list = objCpCodeMgr.GetStockIndustryCode(code)\n return tmp_code_list\n\n except:\n print(\"오류\")\n pass\n\ndef Get_KOSPI_Data_adj(*code_list,SDate,EDate,DBname):\n \"\"\"\n :param code_list: Kospi_codelist\n :param SDate: Start Date ex) 20190101\n :param EDate: End Date ex) 20200101\n :param DBname: DB name\n :return: 코스피 주가관련 데이터 반환\n \"\"\"\n instStockChart = win32com.client.Dispatch(\"CpSysDib.StockChart\")\n con = sqlite3.connect(\"C:/Users/S/desktop/바탕화면(임시)/KOSPI/tmp/\" + DBname + \".db\") # sqlite 연결 db 객체 생성\n i = 1\n try :\n for code in code_list:\n code = \"A\" + code\n print((i), \"/\", len(code_list))\n instStockChart.SetInputValue(0, code) # 종목코드\n instStockChart.SetInputValue(1, ord('1')) # 기간 요청\n instStockChart.SetInputValue(2, EDate) # 요청 종료 날짜 지정\n instStockChart.SetInputValue(3, SDate) # 요청 시작 날짜 지정\n instStockChart.SetInputValue(5, (0, 2, 3, 4, 5, 8, 9, 12, 14, 15, 16, 17, 20, 21)) # 데이터의 종류\n instStockChart.SetInputValue(6, ord('D')) # 차트의 종류, D : 일단위 데이터\n instStockChart.SetInputValue(9, ord('1')) # 수정 주가의 반영 여부, 0:무수정 주가 1 : 반영\n time.sleep(0.255)\n instStockChart.BlockRequest()\n numData = instStockChart.GetHeaderValue(3)\n\n Date = []\n Open = []\n High = []\n Low = []\n Close = []\n Volume = []\n Transaction = [] # 거래대금\n ShareNum = [] # 상장주식수\n Foreign_1 = [] # 외국인주문한도수량\n Foreign_2 = [] # 외국인주문가능수량\n Foreign_3 = [] # 외국인현보유수량\n Foreign_4 = [] # 외국인현보유비율\n Company_1 = [] # 기관순매수\n Company_2 = [] # 기관누적순매수\n\n\n for j in range(numData):\n Date.append(str(instStockChart.GetDataValue(0, j)))\n Open.append(instStockChart.GetDataValue(1, j))\n High.append(instStockChart.GetDataValue(2, j))\n Low.append(instStockChart.GetDataValue(3, j))\n Close.append(instStockChart.GetDataValue(4, j))\n Volume.append(instStockChart.GetDataValue(5, j))\n Transaction.append(instStockChart.GetDataValue(6, j))\n ShareNum.append(instStockChart.GetDataValue(7, j))\n Foreign_1.append(instStockChart.GetDataValue(8, j))\n Foreign_2.append(instStockChart.GetDataValue(9, j))\n Foreign_3.append(instStockChart.GetDataValue(10, j))\n Foreign_4.append(instStockChart.GetDataValue(11, j))\n Company_1.append(instStockChart.GetDataValue(12, j))\n Company_2.append(instStockChart.GetDataValue(13, j))\n\n\n Kospi_Data = pd.DataFrame({\"Date\": Date,\n \"Open\": Open,\n \"High\": High,\n \"Low\": Low,\n \"Close\": Close,\n \"Volume\": Volume,\n \"Transaction\": Transaction,\n \"ShareNum\": ShareNum,\n \"Foreign_1\": Foreign_1,\n \"Foreign_2\": Foreign_2,\n \"Foreign_3\": Foreign_3,\n \"Foreign_4\": Foreign_4,\n \"Company_1\": Company_1,\n \"Company_2\": Company_2\n })\n Kospi_Data= Kospi_Data.sort_values(by=\"Date\")\n # SQLite DB로 저장\n try:\n Kospi_Data.to_sql(code, con, index=False)\n except:\n print(code, \"오류\")\n pass\n\n i += 1\n\n except :\n print(\"오류\")\n pass\n\n con.close()\n return \"정상적으로 완료했습니다.\"\n\ndef Update_KOSPI_Data(*code_list, DBname):\n instStockChart = win32com.client.Dispatch(\"CpSysDib.StockChart\")\n con = sqlite3.connect(\"C:/Users/S/desktop/바탕화면(임시)/KOSPI/tmp/\" + DBname + \".db\")\n cur = con.cursor()\n CDate = int(datetime.today().strftime(\"%Y%m%d\"))\n global error_codelist\n\n i = 1\n for code in code_list:\n try:\n print((i), \"/\", len(code_list))\n i += 1\n code = \"A\" + code\n cur.execute('select Date from %s' % code)\n a = cur.fetchall()[-1][0].split(\" \")[0].replace(\"-\",\"\")\n SDate = datetime.strptime(a, \"%Y%m%d\") + timedelta(days=+1)\n SDate = int(SDate.strftime(\"%Y%m%d\"))\n\n if SDate >= CDate :\n print(\"오류 : \" + code +\" 시작날짜가 오늘날짜와 같거나 큽니다.\")\n error_codelist.append(code)\n continue\n\n else:\n instStockChart.SetInputValue(0, code) # 종목코드\n instStockChart.SetInputValue(1, ord('1')) # 기간 요청\n instStockChart.SetInputValue(2, CDate) # 요청 종료 날�� 지정\n instStockChart.SetInputValue(3, SDate) # 요청 시작 날짜 지정\n instStockChart.SetInputValue(5, (0, 2, 3, 4, 5, 8, 9, 12)) # 데이터의 종류, 날짜, 시고저종, 거래량, 거래대금 , 상장주식수\n instStockChart.SetInputValue(6, ord('D')) # 차트의 종류, D : 일단위 데이터\n instStockChart.SetInputValue(9, ord('1')) # 수정 주가의 반영 여부, 1 : 반영\n time.sleep(0.255)\n instStockChart.BlockRequest()\n numData = instStockChart.GetHeaderValue(3) # 받아온 데이터 개수\n Kospi_Data = []\n\n for j in range(numData):\n Date=datetime.strptime(str(instStockChart.GetDataValue(0, j)), \"%Y%m%d\")\n Open=instStockChart.GetDataValue(1, j)\n High=instStockChart.GetDataValue(2, j)\n Low=instStockChart.GetDataValue(3, j)\n Close=instStockChart.GetDataValue(4, j)\n Volume=instStockChart.GetDataValue(5, j)\n Transaction=instStockChart.GetDataValue(6, j)\n ShareNum=instStockChart.GetDataValue(7, j)\n row = (Date, Open, High, Low, Close, Volume, Transaction, ShareNum, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)\n try :\n if Kospi_Data[0][0] == row[0] :\n continue\n else : Kospi_Data.append(row)\n except :\n Kospi_Data.append(row)\n continue\n\n Kospi_Data.reverse()\n cur.executemany(\"insert into %s values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\" %code, Kospi_Data)\n con.commit()\n\n except:\n print(code + \"오류\")\n error_codelist.append(code)\n continue\n con.close()\n return error_codelist\n\ndef Get_KOSPI_Data_noadj(*code_list,SDate,EDate,DBname):\n \"\"\"\n :param code_list: Kospi_codelist\n :param SDate: Start Date ex) 20190101\n :param EDate: End Date ex) 20200101\n :param DBname: DB name\n :return: 코스피 주가관련 데이터 반환\n \"\"\"\n instStockChart = win32com.client.Dispatch(\"CpSysDib.StockChart\")\n con = sqlite3.connect(\"C:/Users/S/desktop/바탕화면(임시)/KOSPI/tmp/\" + DBname + \".db\") # sqlite 연결 db 객체 생성\n i = 1\n try :\n for code in code_list:\n code = \"A\" + code\n print(i, \"/\", len(code_list))\n instStockChart.SetInputValue(0, code) # 종목코드\n instStockChart.SetInputValue(1, ord('1')) # 기간 요청\n instStockChart.SetInputValue(2, EDate) # 요청 종료 날짜 지정\n instStockChart.SetInputValue(3, SDate) # 요청 시작 날짜 지정\n instStockChart.SetInputValue(5, (0, 2, 3, 4, 5, 8, 9, 12, 14, 15, 16, 17, 20, 21)) # 데이터의 종류\n instStockChart.SetInputValue(6, ord('D')) # 차트의 종류, D : 일단위 데이터\n instStockChart.SetInputValue(9, ord('0')) # 수정 주가의 반영 여부\n time.sleep(0.255)\n instStockChart.BlockRequest()\n numData = instStockChart.GetHeaderValue(3)\n\n Date = []\n Open_noadj = []\n High_noadj = []\n Low_noadj = []\n Close_noadj = []\n Volume_noadj = []\n Transaction_noadj = [] # 거래대금\n ShareNum_noadj = [] # 상장주식수\n Foreign_1 = [] # 외국인주문한도수량\n Foreign_2 = [] # 외국인주문가능수량\n Foreign_3 = [] # 외국인현보유수량\n Foreign_4 = [] # 외국인현보유비율\n Company_1 = [] # 기관순매수\n Company_2 = [] # 기관누적순매수\n\n for j in range(numData):\n Date.append(str(instStockChart.GetDataValue(0, j)))\n Open_noadj.append(instStockChart.GetDataValue(1, j))\n High_noadj.append(instStockChart.GetDataValue(2, j))\n Low_noadj.append(instStockChart.GetDataValue(3, j))\n Close_noadj.append(instStockChart.GetDataValue(4, j))\n Volume_noadj.append(instStockChart.GetDataValue(5, j))\n Transaction_noadj.append(instStockChart.GetDataValue(6, j))\n ShareNum_noadj.append(instStockChart.GetDataValue(7, j))\n Foreign_1.append(instStockChart.GetDataValue(8, j))\n Foreign_2.append(instStockChart.GetDataValue(9, j))\n Foreign_3.append(instStockChart.GetDataValue(10, j))\n Foreign_4.append(instStockChart.GetDataValue(11, j))\n Company_1.append(instStockChart.GetDataValue(12, j))\n Company_2.append(instStockChart.GetDataValue(13, j))\n\n Kospi_Data = pd.DataFrame({\"Date\": Date,\n \"Open_noadj\": Open_noadj,\n \"High_noadj\": High_noadj,\n \"Low_noadj\": Low_noadj,\n \"Close_noadj\": Close_noadj,\n \"Volume_noadj\": Volume_noadj,\n \"Transaction_noadj\": Transaction_noadj,\n \"ShareNum_noadj\": ShareNum_noadj,\n \"Foreign_1\": Foreign_1,\n \"Foreign_2\": Foreign_2,\n \"Foreign_3\": Foreign_3,\n \"Foreign_4\": Foreign_4,\n \"Company_1\": Company_1,\n \"Company_2\": Company_2\n })\n Kospi_Data = Kospi_Data.sort_values(by=\"Date\")\n # SQLite DB로 저장\n try:\n Kospi_Data.to_sql(code, con, index=False)\n except:\n print(code, \"오류\")\n pass\n\n i += 1\n\n except :\n print(\"오류\")\n pass\n\n con.close()\n return \"정상적으로 완료했습니다.\"\n\n\n\n# ETF 분석 관련 함수\ndef Get_ETF_code_name():\n \"\"\"\n ETF 코드명과 종목명 추출\n ETF_code_list와 ETF_name_list 반환\n \"\"\"\n try :\n objCpCodeMgr = win32com.client.Dispatch(\"CpUtil.CpCodeMgr\")\n code_list = objCpCodeMgr.GetStockListByMarket(1) # 0 : 구분없음, 1 : 거래소, 2: 코스닥\n ETF_code_list = []\n ETF_name_list= []\n for code in code_list :\n if objCpCodeMgr.GetStockSectionKind(code) == 10 : # 0: 구분없음, 1: 주권, 10: ETF\n ETF_code_list.append(code)\n ETF_name_list.append(objCpCodeMgr.CodeToName(code))\n\n return ETF_code_list, ETF_name_list\n\n except :\n print(\"오류\")\n pass\n\ndef ETF_RequestData(Cybos_obj, ETFcode):\n \"\"\"\n ETF 분석에 필요한 일자별 데이터 요청 실행\n \"\"\"\n # 데이터 요청\n Cybos_obj.SetInputValue(0, ETFcode)\n Cybos_obj.BlockRequest()\n\n # 통신 결과 확인\n rqStatus = Cybos_obj.GetDibStatus() # DB통신상태 (-1 : 오류, 0 : 정상, 1 : 수신대기)\n rqMsg = Cybos_obj.GetDibMsg1() # DB통신상태 문자열\n if rqStatus != 0:\n print(\"통신상태\", rqStatus, rqMsg)\n return False\n\ndef ETF_GetData(Cybos_obj, ETFcode):\n \"\"\"\n ETF 분석에 필요한 일자별 데이터 얻기\n :return: Dataframe타입 ETF 데이터\n \"\"\"\n #\n # Cybos_obj = win32com.client.Dispatch(\"Dscbo1.CpSvr7246\") #tmp\n # ETFcode = \"225130\" #tmp\n etfDate = []\n etfNAV = []\n etfClose = []\n\n # 최초 데이터 요청\n ETF_RequestData(Cybos_obj, ETFcode)\n count = Cybos_obj.GetHeaderValue(0) # 수신 데이터 수\n\n def Data_Save(startnum,endnum):\n \"\"\"\n 최초 데이터 요청과 연속데이터 요청을 분리실행으로 중복되는 코드\n 요청한 ETF 데이터를 변수에 저장해주는 기능\n 최초 데이터 요청에서 가장 최근 데이터를 빼는 문제 때문에, startnum, endnum구분했음\n \"\"\"\n for i in range(startnum, endnum):\n # 필요한 데이터가 다르면 수정해야하는 부분\n date = Cybos_obj.GetDataValue(0, i) # 날짜\n close = Cybos_obj.GetDataValue(1, i) # ETF종가\n NAV = Cybos_obj.GetDataValue(6, i) # NAV\n\n etfDate.append(date)\n etfClose.append(close)\n etfNAV.append(NAV)\n Data_Save(0, count)\n\n # 연속 데이터 요청\n NextCount = 1\n while Cybos_obj.Continue: # 연속데이터 유무(1: 연속, 0: 연속없음)\n ETF_RequestData(Cybos_obj, ETFcode)\n count = Cybos_obj.GetHeaderValue(0) # 수신 데이터 수\n Data_Save(1, count)\n print(NextCount); NextCount += 1\n if (NextCount > 200): # 임의값 200 수정가능\n break\n time.sleep(0.252) # 최대 1초에 최대 4개 조회 가능\n\n ETF_df = pd.DataFrame({\"Date\": etfDate,\n \"Close\": etfClose,\n \"NAV\": etfNAV})\n ETF_df = ETF_df.sort_values(\"Date\")\n return ETF_df\n\n\n\n#\n# if __name__ == \"__main__\" :\n# Get_login_status()\n# Get_KOSPI_code()\n# print(__name__)\n\n","sub_path":"python32bit_project/Cybos_function2.py","file_name":"Cybos_function2.py","file_ext":"py","file_size_in_byte":16562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"382486428","text":"import logging\n\nfrom numpy import random\n\nimport pajbot.models\nfrom pajbot.modules import BaseModule\nfrom pajbot.modules import ModuleSetting\nfrom pajbot.modules import QuestModule\n\nlog = logging.getLogger(__name__)\n\n\nclass Samples:\n valid_samples = {\n '4head': { 'length': 0 },\n '4header': { 'length': 0 },\n '7777': { 'length': 0 },\n 'aaaah': { 'length': 0 },\n 'actioniscoming': { 'length': 0 },\n 'amazing': { 'length': 0 },\n 'asswecan': { 'length': 0 },\n 'athene': { 'length': 0 },\n 'attention': { 'length': 0 },\n 'beatme123': { 'length': 0 },\n 'behindyou': { 'length': 0 },\n 'bitch': { 'length': 0 },\n 'bomblobber': { 'length': 0 },\n 'bondagegaywebsite': { 'length': 0 },\n 'bossofthisgym': { 'length': 0 },\n 'boyishgiggles': { 'length': 0 },\n 'bruceuiscoming': { 'length': 0 },\n 'bubble': { 'length': 0 },\n 'car': { 'length': 0 },\n 'celebrate': { 'length': 0 },\n 'collegeboy': { 'length': 0 },\n 'comeonletsgo': { 'length': 0 },\n 'cumming': { 'length': 0 },\n 'damnson': { 'length': 0 },\n 'dayum': { 'length': 0 },\n 'deadlycommandos': { 'length': 0 },\n 'djkarlthedog': { 'length': 0 },\n 'doitdad': { 'length': 0 },\n 'doot': { 'length': 0 },\n 'eatthepoopoo': { 'length': 0 },\n 'embarrassing': { 'length': 0 },\n 'eshrug': { 'length': 0 },\n 'face': { 'length': 0 },\n 'fatcock': { 'length': 0 },\n 'forsenswa': { 'length': 0 },\n 'fuckyou': { 'length': 0 },\n 'gamba': { 'length': 0 },\n 'gangingup': { 'length': 0 },\n 'goodvibes': { 'length': 0 },\n 'heftobemad': { 'length': 0 },\n 'heyguyshowsitgoinkripparrianhere': { 'length': 0 },\n 'howstrong': { 'length': 0 },\n 'hyperbruh': { 'length': 0 },\n 'idontdoanal': { 'length': 0 },\n 'iseeyou1': { 'length': 0 },\n 'iseeyou2': { 'length': 0 },\n 'jabroni': { 'length': 0 },\n 'jeff': { 'length': 0 },\n 'jesse': { 'length': 0 },\n 'knock': { 'length': 0 },\n 'lashofthespanking': { 'length': 0 },\n 'legendary': { 'length': 0 },\n 'levelup': { 'length': 0 },\n 'loan': { 'length': 0 },\n 'lul': { 'length': 0 },\n 'march': { 'length': 0 },\n 'mistake': { 'length': 0 },\n 'mysummercmonman': { 'length': 0 },\n 'nani': { 'length': 0 },\n 'no': { 'length': 0 },\n 'nothinghere': { 'length': 0 },\n 'ohbabyatriple': { 'length': 0 },\n 'ohmancmonman': { 'length': 0 },\n 'ohmyshoulder': { 'length': 0 },\n 'oooh': { 'length': 0 },\n 'oooooh': { 'length': 0 },\n 'othernight': { 'length': 0 },\n 'pain1': { 'length': 0 },\n 'pants': { 'length': 0 },\n 'pewdiepie': { 'length': 0 },\n 'pleaseno': { 'length': 0 },\n 'poopooiscoming': { 'length': 0 },\n 'power': { 'length': 0 },\n 'powerfuck': { 'length': 0 },\n 'pphop': { 'length': 0 },\n 'puke': { 'length': 0 },\n 'pullupourpants': { 'length': 0 },\n 'realtrapshit': { 'length': 0 },\n 'relax': { 'length': 0 },\n 'reynad': { 'length': 0 },\n 'righthappy': { 'length': 0 },\n 'scamazishere': { 'length': 0 },\n 'shakalaka': { 'length': 0 },\n 'sheeeit': { 'length': 0 },\n 'sike': { 'length': 0 },\n 'sixhotloads': { 'length': 0 },\n 'slap': { 'length': 0 },\n 'smartass': { 'length': 0 },\n 'sorry': { 'length': 0 },\n 'spankmoan1': { 'length': 0 },\n 'specimen': { 'length': 0 },\n 'spook': { 'length': 0 },\n 'suction': { 'length': 0 },\n 'surprise': { 'length': 0 },\n 'takeit': { 'length': 0 },\n 'ting1': { 'length': 0 },\n 'ting2': { 'length': 0 },\n 'ting3': { 'length': 0 },\n 'tuckfrump': { 'length': 0 },\n 'ultralul': { 'length': 0 },\n 'umad': { 'length': 0 },\n 'vibrate': { 'length': 0 },\n 'water': { 'length': 0 },\n 'weed': { 'length': 0 },\n 'woah': { 'length': 0 },\n 'woop': { 'length': 0 },\n 'wrongdoor': { 'length': 0 },\n 'wrongnumba': { 'length': 0 },\n 'yeehaw': { 'length': 0 },\n 'yessir': { 'length': 0 },\n 'youlikechallenges': { 'length': 0 },\n 'youlikethat': { 'length': 0 },\n }\n\n\nclass PlaySoundTokenCommandModule(BaseModule):\n\n ID = 'tokencommand-' + __name__.split('.')[-1]\n NAME = '!playsound'\n DESCRIPTION = 'Play a sound on stream'\n PARENT_MODULE = QuestModule\n SETTINGS = [\n ModuleSetting(\n key='point_cost',\n label='Point cost',\n type='number',\n required=True,\n placeholder='Point cost',\n default=0,\n constraints={\n 'min_value': 0,\n 'max_value': 999999,\n }),\n ModuleSetting(\n key='token_cost',\n label='Token cost',\n type='number',\n required=True,\n placeholder='Token cost',\n default=3,\n constraints={\n 'min_value': 0,\n 'max_value': 15,\n }),\n ModuleSetting(\n key='sample_cd',\n label='Cooldown for the same sample (seconds)',\n type='number',\n required=True,\n placeholder='',\n default=20,\n constraints={\n 'min_value': 5,\n 'max_value': 120,\n }),\n ModuleSetting(\n key='sub_only',\n label='Subscribers only',\n type='boolean',\n required=True,\n default=True),\n ModuleSetting(\n key='global_cd',\n label='Global playsound cooldown (seconds)',\n type='number',\n required=True,\n placeholder='',\n default=2,\n constraints={\n 'min_value': 0,\n 'max_value': 600,\n }),\n ]\n\n def __init__(self):\n super().__init__()\n self.valid_samples = Samples.valid_samples\n self.sample_cache = []\n\n def play_sound(self, **options):\n bot = options['bot']\n message = options['message']\n source = options['source']\n\n if message:\n sample = message.split(' ')[0].lower()\n\n if sample in self.sample_cache:\n bot.whisper(source.username, 'The sample {0} was played too recently. Please wait before trying to use it again'.format(sample))\n return False\n\n if sample == 'random':\n sample = random.choice(self.valid_samples.keys())\n\n if sample in self.valid_samples:\n log.debug('Played sound: {0}'.format(sample))\n payload = {'sample': sample}\n bot.websocket_manager.emit('play_sound', payload)\n if not (source.username == 'pajlada') or True:\n self.sample_cache.append(sample)\n bot.execute_delayed(self.settings['sample_cd'], self.sample_cache.remove, ('{0}'.format(sample), ))\n return True\n\n bot.whisper(source.username, 'Your sample is not valid. Check out all the valid samples here: https://pajbot.com/playsounds')\n return False\n\n def load_commands(self, **options):\n self.commands['#playsound'] = pajbot.models.command.Command.raw_command(\n self.play_sound,\n tokens_cost=self.settings['token_cost'],\n cost=self.settings['point_cost'],\n sub_only=self.settings['sub_only'],\n delay_all=self.settings['global_cd'],\n description='Play a sound on stream! Costs {} tokens, sub only for now.'.format(self.settings['token_cost']),\n can_execute_with_whisper=True,\n examples=[\n pajbot.models.command.CommandExample(None, 'Play the \"cumming\" sample',\n chat='user:!#playsound cumming\\n'\n 'bot>user:Successfully played your sample cumming').parse(),\n pajbot.models.command.CommandExample(None, 'Play the \"fuckyou\" sample',\n chat='user:!#playsound fuckyou\\n'\n 'bot>user:Successfully played your sample fuckyou').parse(),\n ],\n )\n\n self.commands['#playsound'].long_description = 'Playsounds can be tried out here'\n","sub_path":"pajbot/modules/tokencommands/playsound.py","file_name":"playsound.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"518941280","text":"#!python\nimport os\nimport sys\nimport json\nimport requests\nimport subprocess\n\n\ndef capture_output(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE)\n return proc.stdout.read()\n\n\nif __name__ == '__main__':\n version = sys.argv[1]\n filepath = sys.argv[2]\n filename = filepath.split('/')[-1]\n github_token = os.environ['GITHUB_TOKEN']\n auth = (github_token, 'x-oauth-basic')\n commit_sha = os.environ['CIRCLE_SHA1']\n\n commit_body = capture_output([\"git\", \"log\", \"--format=%b\", \"-n\", \"1\", commit_sha])\n file_md5_checksum = capture_output([\"md5sum\", filepath]).split()[0]\n file_sha256_checksum = capture_output([\"sha256sum\", filepath]).split()[0]\n version_body = \"%s\\n\\nMD5: %s\\nSHA256: %s\" % (commit_body, file_md5_checksum, file_sha256_checksum)\n\n params = json.dumps({\n 'tag_name': 'v{0}'.format(version),\n 'name': 're:dash v{0}'.format(version),\n 'body': version_body,\n 'target_commitish': commit_sha,\n 'prerelease': True\n })\n\n response = requests.post('https://api.github.com/repos/everythingme/redash/releases',\n data=params,\n auth=auth)\n\n upload_url = response.json()['upload_url']\n upload_url = upload_url.replace('{?name}', '')\n\n with open(filepath) as file_content:\n headers = {'Content-Type': 'application/gzip'}\n response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,\n headers=headers, verify=False)\n\n","sub_path":"bin/upload_version.py","file_name":"upload_version.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"522033420","text":"import os\nimport shutil\nimport subprocess\nimport tempfile\n\nimport matplotlib.pyplot as plt\n\nimport xarray as xr\nimport numpy as np\n\nimport pop_tools\n\n\ndef savefig(plot_name):\n \"\"\"Write figure\"\"\"\n\n if 'CESM2_MARBL_FIGURE_DIR' in os.environ:\n dirout = os.environ['CESM2_MARBL_FIGURE_DIR']\n else:\n dirout = 'figures'\n\n os.makedirs(dirout, exist_ok=True)\n\n plt.savefig(os.path.join(dirout, plot_name),\n dpi=300,\n bbox_inches='tight',\n metadata={'CreationDate': None})\n\n\ndef write_ds_out(dso, file_out):\n file_out = os.path.realpath(file_out)\n\n os.makedirs(os.path.dirname(file_out), exist_ok=True)\n\n if os.path.exists(file_out):\n shutil.rmtree(file_out)\n print('-'*30)\n print(f'Writing {file_out}')\n dso.info()\n print()\n dso.to_zarr(file_out);\n\n\ndef zonal_mean_via_fortran(ds, var, grid=None, region_mask=None):\n \"\"\"\n Write ds to a temporary netCDF file, compute zonal mean for\n a given variable based on Keith L's fortran program, read\n resulting netcdf file, and return the new xarray dataset\n\n If three_ocean_regions=True, use a region mask that extends the\n Pacific, Indian, and Atlantic to the coast of Antarctica (and does\n not provide separate Arctic Ocean, Lab Sea, etc regions)\n \"\"\"\n\n # xarray doesn't require the \".nc\" suffix, but it's useful to know what the file is for\n ds_in_file = tempfile.NamedTemporaryFile(suffix='.nc')\n ds_out_file = tempfile.NamedTemporaryFile(suffix='.nc')\n ds.to_netcdf(ds_in_file.name)\n\n # Set up location of the zonal average executable\n za_exe = os.path.join(os.path.sep,\n 'glade',\n 'u',\n 'home',\n 'klindsay',\n 'bin',\n 'zon_avg',\n 'za')\n if grid is not None:\n grid = pop_tools.get_grid(grid)\n \n grid_file = tempfile.NamedTemporaryFile(suffix='.nc')\n grid_file_name = grid_file.name\n del grid.attrs['region_mask_regions']\n grid.to_netcdf(grid_file_name)\n \n else:\n # Assume xarray dataset contains all needed fields\n grid_file_name = ds_in_file.name\n\n if region_mask is not None:\n rmask_file = tempfile.NamedTemporaryFile(suffix='.nc')\n region_mask.to_netcdf(rmask_file.name)\n cmd_region_mask = ['-rmask_file', rmask_file.name]\n else:\n cmd_region_mask = []\n\n # Set up the call to za with correct options\n za_call = [za_exe, '-v', var] + cmd_region_mask + \\\n ['-grid_file', grid_file_name,\n '-kmt_file', grid_file_name,\n '-O', '-o', ds_out_file.name, # -O overwrites existing file, -o gives file name\n ds_in_file.name]\n\n # Use subprocess to call za, allows us to capture stdout and print it\n proc = subprocess.Popen(za_call, stdout=subprocess.PIPE)\n (out, err) = proc.communicate()\n if not out:\n # Read in the newly-generated file\n print('za ran successfully, writing netcdf output')\n ds_out = xr.open_dataset(ds_out_file.name)\n else:\n print(f'za reported an error:\\n{out.decode(\"utf-8\")}')\n\n # Delete the temporary files and return the new xarray dataset\n ds_in_file.close()\n ds_out_file.close()\n if not out:\n return(ds_out)\n return(None)\n\ndef pop_add_cyclic(ds):\n \n nj = ds.TLAT.shape[0]\n ni = ds.TLONG.shape[1]\n\n xL = int(ni/2 - 1)\n xR = int(xL + ni)\n\n tlon = ds.TLONG.data\n tlat = ds.TLAT.data\n \n tlon = np.where(np.greater_equal(tlon, min(tlon[:,0])), tlon-360., tlon) \n lon = np.concatenate((tlon, tlon + 360.), 1)\n lon = lon[:, xL:xR]\n\n if ni == 320:\n lon[367:-3, 0] = lon[367:-3, 0] + 360. \n lon = lon - 360.\n \n lon = np.hstack((lon, lon[:, 0:1] + 360.))\n if ni == 320:\n lon[367:, -1] = lon[367:, -1] - 360.\n\n #-- trick cartopy into doing the right thing:\n # it gets confused when the cyclic coords are identical\n lon[:, 0] = lon[:, 0] - 1e-8\n\n #-- periodicity\n lat = np.concatenate((tlat, tlat), 1)\n lat = lat[:, xL:xR]\n lat = np.hstack((lat, lat[:,0:1]))\n\n TLAT = xr.DataArray(lat, dims=('nlat', 'nlon'))\n TLONG = xr.DataArray(lon, dims=('nlat', 'nlon'))\n \n dso = xr.Dataset({'TLAT': TLAT, 'TLONG': TLONG})\n\n # copy vars\n varlist = [v for v in ds.data_vars if v not in ['TLAT', 'TLONG']]\n for v in varlist:\n v_dims = ds[v].dims\n if not ('nlat' in v_dims and 'nlon' in v_dims):\n dso[v] = ds[v]\n else:\n # determine and sort other dimensions\n other_dims = set(v_dims) - {'nlat', 'nlon'}\n other_dims = tuple([d for d in v_dims if d in other_dims])\n lon_dim = ds[v].dims.index('nlon')\n field = ds[v].data\n field = np.concatenate((field, field), lon_dim)\n field = field[..., :, xL:xR]\n field = np.concatenate((field, field[..., :, 0:1]), lon_dim) \n dso[v] = xr.DataArray(field, dims=other_dims+('nlat', 'nlon'), \n attrs=ds[v].attrs)\n\n\n # copy coords\n for v, da in ds.coords.items():\n if not ('nlat' in da.dims and 'nlon' in da.dims):\n dso = dso.assign_coords(**{v: da})\n \n \n return dso\n\ndef label_plots(fig, axs, xoff=-0.04, yoff=0.02):\n alp = [chr(i).upper() for i in range(97,97+26)]\n for i, ax in enumerate(axs): \n p = ax.get_position()\n x = p.x0 + xoff\n y = p.y1 + yoff\n fig.text(\n x, y , f'{alp[i]}',\n fontsize=14,\n fontweight='semibold'\n ) ","sub_path":"notebooks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"203204500","text":"# requires python-requests.org\nimport sys\nimport requests\nimport os\nimport hashlib\nimport traceback\nimport subprocess\nimport re\n\nif len(sys.argv) < 2:\n sys.stderr.write(\"error: require path to mirror base as first argument\")\n raise SystemExit(1)\nbase = sys.argv[1]\n\nos.chdir(base)\n#os.remove(\"00-index.tar.gz\")\nos.makedirs(\"package\",0o777, True)\n\nsubprocess.run([\"wget\", \"http://hackage.haskell.org/packages/archive/00-index.tar.gz\"])\n\nps = subprocess.Popen([\"tar\", \"tf\", \"00-index.tar.gz\"], stdout=subprocess.PIPE)\nfullNamesStr = subprocess.check_output(('cut', '-d/', '-f', '2,3'),stdin=ps.stdout, universal_newlines=True)\n\nnamesVersions = fullNamesStr.split(\"\\n\")\nfiltered = filter(lambda x : x != 'preferred-versions',namesVersions)\nverDict = {}\n#print(list(filtered)[1:20])\nfor nmVerStr in filtered:\n #print(nmVerStr)\n m = re.match(r\"([\\.\\d]+)\\/(.+)\\.cabal\",nmVerStr)\n if m:\n version = m.group(1)\n name = m.group(2)\n if ((name in verDict) and verDict[name] < version) or not (name in verDict):\n verDict[name] = version\n else:\n print(nmVerStr)\nos.chdir(\"package\")\nbaseUrl = \"http://hackage.haskell.org/package/\" \nfor name, version in verDict.items():\n full = name + \"-\" + version\n print(full)\n fullUrl = baseUrl + full + \"/\" + full + \".tar.gz\"\n subprocess.run([\"wget\", fullUrl])\n\n","sub_path":"mirror_hackage2.py","file_name":"mirror_hackage2.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"480092066","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web01', '0010_auto_20161002_0908'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='floor_price',\n field=models.FloatField(default=0, verbose_name=b'\\xe6\\x9c\\x80\\xe4\\xbd\\x8e\\xe4\\xbb\\xb7\\xe6\\xa0\\xbc'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='product',\n name='market_price',\n field=models.FloatField(default=0, verbose_name=b'\\xe5\\xb8\\x82\\xe5\\x9c\\xba\\xe4\\xbb\\xb7\\xe6\\xa0\\xbc'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='product',\n name='stock',\n field=models.PositiveSmallIntegerField(default=0, verbose_name=b'\\xe5\\xba\\x93\\xe5\\xad\\x98'),\n preserve_default=True,\n ),\n ]\n","sub_path":"web01/migrations/0011_auto_20161002_1105.py","file_name":"0011_auto_20161002_1105.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"334177088","text":"import os\r\nimport sys\r\nimport re\r\nimport subprocess\r\n\r\nclass Config:\r\n\tdef __init__(self):\r\n\t\tself.debugPrintSystem = False\r\n\t\r\ng = Config()\r\n\r\ndef dlog(ss):\r\n\twith open(\"/tmp/debug.log\", \"a\") as fp:\r\n\t\tfp.write(ss+\"\\n\")\r\n\r\n\r\ndef system(args, stderr=subprocess.STDOUT):\r\n\tif g.debugPrintSystem:\r\n\t\tprint(\"system command - %s\" % args)\r\n\trr = subprocess.check_output(args, stderr=stderr, shell=True).decode(\"UTF-8\")\r\n\trr = rr.rstrip(' \\r\\n')\r\n\treturn rr\r\n\r\n# result, exitcode\r\ndef systemSafe(args):\r\n\tif g.debugPrintSystem:\r\n\t\tprint(\"system command - %s\" % args)\r\n\t# stderr를 지원못한다. getstatusoutput은 쓰면안된다. stderr는 output에 같이 온다.\r\n\tstatus,output = subprocess.getstatusoutput(args)\r\n\t#rr = output.decode(\"UTF-8\")\r\n\trr = output\r\n\trr = rr.rstrip(' \\r\\n')\r\n\treturn rr,status\r\n\r\ndef systemRet(args):\r\n\tif g.debugPrintSystem:\r\n\t\tprint(\"system command - %s\" % args)\r\n\t\t\r\n\tret = subprocess.call(args, shell=True)\r\n\treturn ret\r\n\r\n\r\ndef programPath(sub=None):\r\n\tpp = os.path.dirname(os.path.realpath(sys.argv[0]))\r\n\tif sub is not None:\r\n\t\tpp = os.path.join(pp, sub)\r\n\treturn pp\r\n\r\n\r\nclass git:\r\n\t# if remote branch, insert \"remotes/\"\r\n\t@staticmethod\r\n\tdef rev(branch):\r\n\t\tss = system(\"git branch -va\")\r\n\t\tm = re.search(r'^[*]?\\s+%s\\s+(\\w+)' % branch, ss, re.MULTILINE)\r\n\t\trev = m.group(1)\r\n\t\treturn rev\r\n\r\n\t@staticmethod\r\n\tdef getCurrentBranch():\r\n\t\treturn system(\"git rev-parse --abbrev-ref HEAD\")\r\n\r\n\t@staticmethod\r\n\tdef getTrackingBranch():\r\n\t\ttry:\r\n\t\t\treturn system(\"git rev-parse --abbrev-ref --symbolic-full-name @{u}\")\r\n\t\texcept subprocess.CalledProcessError:\r\n\t\t\treturn None\r\n\r\n\t@staticmethod\r\n\tdef commonParentRev(br1, br2):\r\n\t\tcommonRev = system(\"git merge-base %s %s\" % (br1, br2))\r\n\t\treturn commonRev\r\n\r\n\t@staticmethod\r\n\tdef printStatus():\r\n\t\tss = system(\"git -c color.status=always status -s\")\r\n\t\tprint(ss+\"\\n\")\r\n\r\n\t@staticmethod\r\n\tdef commitGap(brNew, brOld):\r\n\t\t#gap = system(\"git rev-list %s ^%s --count\" % (brNew, brOld))\r\n\t\tgap = system(\"git rev-list --count %s..%s\" % (brOld, brNew))\r\n\t\treturn int(gap)\r\n\r\n\t@staticmethod\r\n\tdef commitLogBetween(brNew, brOld):\r\n\t\t# color print\r\n\t\tss = system(\"git log --color --oneline --graph --decorate --abbrev-commit %s^..%s\" % (brOld, brNew))\r\n\t\treturn ss\r\n\r\n\t# return: branch, rev, upstream, remoteRev, ahead, behind\r\n\t@staticmethod\r\n\tdef getBranchStatus():\r\n\t\t#* master 1fbf5de [origin/master: ahead 2] dc: rebase before push is option, print fetch err\r\n\t\t# remotes/origin/master 688d414 dc: cfg - isPullRebase flag\r\n\t\tbranchStatus = system(\"LANG=en_US git -c color.branch=false branch -avv\")\r\n\t\tout = re.search(r\"^\\*\\s(\\w+)\\s+(\\w+)\\s(.+)\", branchStatus, re.MULTILINE)\r\n\t\tif out is None:\r\n\t\t\treturn None\r\n\r\n\t\tbranch = out.group(1)\r\n\t\trev = out.group(2)\r\n\t\tline = out.group(3)\r\n\r\n\t\tremoteRev = \"\"\r\n\t\tupstream = \"\"\r\n\t\tahead = 0\r\n\t\tbehind = 0\r\n\t\tinfo = re.search(r\"^\\[(.+)\\]\", line)\r\n\t\tif info is not None:\r\n\t\t\tinfos = info.group(1).split(\":\")\r\n\t\t\tupstream = infos[0]\r\n\t\t\tif len(infos) > 1:\r\n\t\t\t\tplus = infos[1].split(\",\")\r\n\t\t\t\tfor ss2 in plus:\r\n\t\t\t\t\tkk = ss2.strip().split(\" \")\r\n\t\t\t\t\tif kk[0] == \"ahead\":\r\n\t\t\t\t\t\tahead = int(kk[1])\r\n\t\t\t\t\telif kk[0] == \"behind\":\r\n\t\t\t\t\t\tbehind = int(kk[1])\r\n\r\n\t\tout = re.search(r\"\\s\\sremotes/%s\\s+(\\w+)\" % upstream, branchStatus)\r\n\t\tif out is not None:\r\n\t\t\tremoteRev = out.group(1)\r\n\r\n\t\t# upstrea있고, ahead + behind == 0이면 루트랑 동일한건데...\r\n\t\treturn branch, rev, upstream, remoteRev, ahead, behind\r\n\r\n\r\n\t@staticmethod\r\n\tdef checkRebaseable(br1, br2):\r\n\t\tcommonRev = git.commonParentRev(br1, br2)\r\n\t\t\r\n\t\tbr1Diff = system(\"git diff --name-only %s %s\" % (commonRev, br1))\r\n\t\tbr2Diff = system(\"git diff --name-only %s %s\" % (commonRev, br2))\r\n\t\t\r\n\t\tbr1 = br1Diff.split()\r\n\t\tbr2 = br2Diff.split()\r\n\t\t\r\n\t\t# check same file\r\n\t\tlst2 = []\r\n\t\tfor ss in br1:\r\n\t\t\tif ss in br2:\r\n\t\t\t\tlst2.append(ss)\r\n\t\t\t\t\r\n\t\treturn lst2\r\n\r\n\t@staticmethod\r\n\tdef fetch():\r\n\t\treturn systemSafe(\"git fetch --prune\")\r\n\t\t\r\n\t@staticmethod\r\n\tdef rebase(branch):\r\n\t\treturn systemSafe(\"git rebase %s\" % branch)\r\n\r\n\t@staticmethod\r\n\tdef rebaseAbort():\r\n\t\treturn system(\"git rebase --abort\")\r\n\t\r\n\t@staticmethod\r\n\tdef stashGetNameSafe(name):\r\n\t\tss = system(\"git stash list\")\r\n\t\tprint(ss)\r\n\t\tm = re.search(r'^(stash@\\{\\d+\\}):\\s(\\w|\\s).+: %s$' % name, ss)\r\n\t\tif not m:\r\n\t\t\treturn None\r\n\r\n\t\treturn m.group(1)\r\n\t\r\n\t@staticmethod\r\n\tdef stashPop(name):\r\n\t\tss = system(\"git stash pop %s\" % name)\r\n\t\tprint\r\n\t\t\r\n\t@staticmethod\r\n\tdef statusFileList():\r\n\t\t\"\"\"\r\n\t\tfile list(staged, modified) in current folder by terminal character\r\n\t\t(terminal name, s or \"\")\r\n\t\t:return:\r\n\t\t\"\"\"\r\n\t\tfileList,ret = systemSafe(\"git -c color.status=always status -s\") #, stderr=subprocess.DEVNULL)\r\n\r\n\t\t# quoted octal notation to utf8\r\n\t\tfileList = bytes(fileList, \"utf-8\").decode(\"unicode_escape\")\r\n\t\tbb = fileList.encode(\"ISO-8859-1\")\r\n\t\tfileList = bb.decode()\r\n\r\n\t\t# remove \"\" in file name\r\n\t\tfileList2 = []\r\n\t\tfor line in fileList.splitlines():\r\n\t\t\tfileType, fileName = line.split(\" \", 1)\r\n\t\t\tif fileName.startswith(\"\\\"\") and fileName.endswith(\"\\\"\"):\r\n\t\t\t\tfileName = fileName[1:-1]\r\n\t\t\tfileList2.append(fileType + \" \" + fileName)\r\n\r\n\t\tdef getStatus(terminal):\r\n\t\t\tif \"[32m\" in terminal:\r\n\t\t\t\treturn \"s\"\r\n\t\t\telif \"??\" in terminal:\r\n\t\t\t\treturn \"?\"\r\n\t\t\telse: # modification\r\n\t\t\t\treturn \"\"\r\n\r\n\t\titemList = [(x, getStatus(x)) for x in fileList2 if len(x) > 0]\r\n\t\treturn itemList","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"590896211","text":"import json\nimport time\nimport re\ndef timeBetween(t1=None,t0=None):\n if t1==None:\n t1=time.strftime(\"%Y%m%d%H%M%S\")#'20171130233559'\n if t0==None:\n return int(time.mktime(time.strptime(t1,\"%Y%m%d%H%M%S\")))\n return int(time.mktime(time.strptime(t1,\"%Y%m%d%H%M%S\"))-time.mktime(time.strptime(t0,\"%Y%m%d%H%M%S\")))\nwith open('surldb.json') as fid:\n data=json.load(fid)\n\n\ntimedict={}\ndef addt(t0):\n strt0=timeBetween(str(t0))\n if strt0 in timedict:\n timedict[strt0]+=1\n else:\n timedict[strt0]=1\nfor urlrecord in data:\n if 'get' in urlrecord:\n for record in urlrecord['get']:\n addt(record['date'])\n if 'post' in urlrecord:\n for record in urlrecord['post']:\n addt(record['date'])\n\n\nss='//timeData:start\\nvar timeData='+json.dumps(timedict)+'\\n//timeData:end'\n\nwith open('static/usage.html') as fid:\n sshtml=fid.read()\n pattern = re.compile(r'//timeData:start.*?//timeData:end',re.DOTALL)\n sshtml=re.sub(pattern,ss, sshtml)\nwith open('static/usage.html','w') as fid:\n fid.write(sshtml)\nprint()","sub_path":"logfiles/20171210/formatIntoHotmap.py","file_name":"formatIntoHotmap.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"262706578","text":"#!/usr/bin/python\n\n__author__ = 'ahmed'\n\nimport csv\nimport sys\nfrom logging import exception\nfrom xml.etree import ElementTree\nfrom xml.etree.ElementTree import Element\nfrom xml.etree.ElementTree import SubElement\nimport datetime\nimport logging\nfrom xml.dom import minidom\nimport argparse\nimport codecs\n\n\n# dictionary to hold IANA SMI numbers.\niana_smi_numbers = {\n '1': 'iso',\n '1.3': 'org',\n '1.3.6': 'dod',\n '1.3.6.1': 'internet',\n '1.3.6.1.1': 'directory',\n '1.3.6.1.2': 'mgmt',\n '1.3.6.1.2.1': 'mib-2',\n '1.3.6.1.2.1.2.2.1.3': 'ifType',\n '1.3.6.1.2.1.10': 'transmission',\n '1.3.6.1.2.1.10.23': 'transmissionppp',\n '1.3.6.1.2.1.27': 'application',\n '1.3.6.1.2.1.28': 'mta',\n '1.3.6.1.2.2': 'pib',\n '1.3.6.1.3': 'experimental',\n '1.3.6.1.4': 'private',\n '1.3.6.1.4.1': 'enterprises',\n '1.3.6.1.5': 'security',\n '1.3.6.1.6': 'SNMPv2',\n '1.3.6.1.6.1': 'snmpDomains',\n '1.3.6.1.6.2': 'snmpProxys',\n '1.3.6.1.6.3': 'snmpModules',\n '1.3.6.1.7': 'mail',\n '1.3.6.1.8': 'features'\n}\n\nalarm_list = []\n\ndef get_smi_number_to_name(oid_string):\n # create a list and remove the empty value as we have `.`\n # in the beginning of the string.\n if oid_string[0] == '.':\n oid_list = oid_string.split('.')[1:]\n else:\n oid_list = oid_string.split('.')\n # temporary holding area.\n previous_oid_substring = ''\n\n # Lets run through the OID string.\n for oid_item_from_list in oid_list:\n\n # for the first time we set as we it is.\n if previous_oid_substring == '':\n oid_item_from_list = oid_item_from_list\n else:\n oid_item_from_list = previous_oid_substring + '.' + oid_item_from_list\n\n # If we do not find the string the `iana_smi_numbers` dictionary then we return the previous item.\n if oid_item_from_list not in iana_smi_numbers:\n\n # If no values in `iana` then we return the original string.\n if previous_oid_substring == '':\n return oid_string\n else:\n # converting the old OIDs to New Name based OID and return.\n new_named_oid = oid_string.replace(previous_oid_substring, iana_smi_numbers[previous_oid_substring])[1:]\n return new_named_oid\n\n # Setting the value we found in the holding area,\n # if we dont find the next value then we use this to return.\n previous_oid_substring = oid_item_from_list\n\n\n# --------------------------------------------------------\n# Generate Complete Export/Import XML Template File\n# --------------------------------------------------------\ndef generate_template_items_xml(alarm_list, template_name, template_group_name):\n zabbix_export = Element('zabbix_export')\n version = SubElement(zabbix_export, 'version')\n version.text = '2.0'\n\n fmt = '%Y-%m-%dT%H:%M:%SZ'\n date = SubElement(zabbix_export, 'date')\n date.text = datetime.datetime.now().strftime(fmt)\n\n groups = SubElement(zabbix_export, 'groups')\n group_under_groups = SubElement(groups, 'group')\n name_under_group = SubElement(group_under_groups, 'name')\n name_under_group.text = template_group_name\n\n templates = SubElement(zabbix_export, 'templates')\n template_under_templates = SubElement(templates, 'template')\n template_under_template = SubElement(template_under_templates, 'template')\n template_under_template.text = template_name\n\n name_under_template = SubElement(template_under_templates, 'name')\n name_under_template.text = template_name\n\n groups_under_templates = SubElement(template_under_templates, 'groups')\n group_under_groups_template = SubElement(groups_under_templates, 'group')\n name_group_under_groups_template = SubElement(group_under_groups_template, 'name')\n name_group_under_groups_template.text = template_group_name\n\n application_template_under_templates = SubElement(template_under_templates, 'applications')\n application_app_under_templates = SubElement(application_template_under_templates, 'application')\n application_app_name = SubElement(application_app_under_templates, 'name')\n application_app_name.text = 'Alarms'\n\n items = SubElement(template_under_templates, 'items')\n triggers = SubElement(zabbix_export, 'triggers')\n SubElement(zabbix_export, 'graphs')\n\n #Iterate through the unique list to create XML\n for alarm_values in alarm_list:\n item_creator_type_oid(items, template_name, triggers, alarm_values)\n\n SubElement(template_under_templates, 'discovery_rules')\n SubElement(template_under_templates, 'macros')\n SubElement(template_under_templates, 'templates')\n SubElement(template_under_templates, 'screens')\n\n return zabbix_export\n\n\ndef get_trap_name_from_oid(oid_to_search):\n for alarm_dict in alarm_list:\n if oid_to_search == alarm_dict['oid']:\n return alarm_dict['name']\n\n return oid_to_search\n\ndef item_creator_type_oid(items, template_name, triggers, alarm_values):\n item = SubElement(items, 'item')\n name = SubElement(item, 'name')\n type = SubElement(item, 'type')\n SubElement(item, 'snmp_community')\n multiplier = SubElement(item, 'multiplier')\n SubElement(item, 'snmp_oid')\n key = SubElement(item, 'key')\n delay = SubElement(item, 'delay')\n history = SubElement(item, 'history')\n trends = SubElement(item, 'trends')\n status = SubElement(item, 'status')\n value_type = SubElement(item, 'value_type')\n SubElement(item, 'allowed_hosts')\n SubElement(item, 'units')\n delta = SubElement(item, 'delta')\n SubElement(item, 'snmpv3_contextname')\n SubElement(item, 'snmpv3_securityname')\n snmpv3_securitylevel = SubElement(item, 'snmpv3_securitylevel')\n snmpv3_authprotocol = SubElement(item, 'snmpv3_authprotocol')\n SubElement(item, 'snmpv3_authpassphrase')\n snmpv3_privprotocol = SubElement(item, 'snmpv3_privprotocol')\n SubElement(item, 'snmpv3_privpassphrase')\n formula = SubElement(item, 'formula')\n SubElement(item, 'delay_flex')\n SubElement(item, 'params')\n SubElement(item, 'ipmi_sensor')\n data_type = SubElement(item, 'data_type')\n authtype = SubElement(item, 'authtype')\n SubElement(item, 'username')\n SubElement(item, 'password')\n SubElement(item, 'publickey')\n SubElement(item, 'privatekey')\n SubElement(item, 'port')\n description = SubElement(item, 'description')\n inventory_link = SubElement(item, 'inventory_link')\n SubElement(item, 'valuemap')\n applications = SubElement(item, 'applications')\n application = SubElement(applications, 'application')\n application_name = SubElement(application, 'name')\n SubElement(item, 'valuemap')\n logtimefmt = SubElement(item, 'logtimefmt')\n\n\n\n #\n # Setting basic information for the item.\n #\n name.text = 'Alarm Notification For : ' + alarm_values['name']\n type.text = '17'\n multiplier.text = '0'\n key.text = 'snmptrap[' + alarm_values['oid'] + ']'\n delay.text = '0'\n history.text = '90'\n trends.text = '365'\n status.text = '0'\n value_type.text = '2'\n delta.text = '0'\n snmpv3_securitylevel.text = '0'\n snmpv3_authprotocol.text = '0'\n snmpv3_privprotocol.text = '0'\n formula.text = '1'\n data_type.text = '0'\n authtype.text = '0'\n inventory_link.text = '0'\n description.text = str(alarm_values['description'])\n\n application_name.text = 'Alarms'\n logtimefmt.text = 'hh:mm:ss yyyy/MM/dd'\n\n trigger = SubElement(triggers, 'trigger')\n trigger_expression = SubElement(trigger, 'expression')\n trigger_name = SubElement(trigger, 'name')\n SubElement(trigger, 'url')\n trigger_status = SubElement(trigger, 'status')\n trigger_priority = SubElement(trigger, 'priority')\n trigger_description = SubElement(trigger, 'description')\n trigger_type = SubElement(trigger, 'type')\n SubElement(trigger, 'dependencies')\n\n if (alarm_values['dependency'] == 'NONE' or alarm_values['dependency'] == '') and \\\n alarm_values['priority'] == 'Clear':\n trigger_expression.text = '{' + template_name + ':' + key.text + '.str(\"' + alarm_values['oid'] + '\")}=1 & {' \\\n + template_name + ':' + key.text + '.nodata(1d)}=0'\n\n if (alarm_values['dependency'] == 'NONE' or alarm_values['dependency'] == '') and \\\n alarm_values['priority'] != 'Clear':\n trigger_expression.text = '{' + template_name + ':' + key.text + '.str(\"' + alarm_values['oid'] + '\")}=1 & {' \\\n + template_name + ':' + key.text + '.nodata(' + alarm_values[\n 'clear_time_in_days'] + ')}=0'\n\n elif alarm_values['dependency'] != 'NONE':\n trigger_expression.text = '{' + template_name + ':' + key.text + '.str(\"' + alarm_values['oid'] + '\")}=1 & {' \\\n + template_name + ':' + key.text + '.nodata(' + alarm_values['clear_time_in_days'] \\\n + ')}=0 & {' \\\n + template_name + ':' + 'snmptrap[\"(\\\\b' + alarm_values['dependency'] + '$\\\\b)\"]' + \\\n '.str(\"' + alarm_values['dependency'] + '\")}=0'\n\n\n if alarm_values['trigger_name_description'] == '':\n trigger_name.text = 'ATTENTION : On {HOST.NAME}, An Alarm : ' + alarm_values['name'] + \\\n ' - {#SNMPVALUE}, From Module : ' + alarm_values['mib_module']\n else:\n #print alarm_values['trigger_name_description'].replace(\"\\n\", \" \")\n #print \"---\"\n updated_name = alarm_values['trigger_name_description'].replace(\"\\n\", \" \")\n trigger_name.text = updated_name\n\n trigger_status.text = '0'\n\n if alarm_values['priority'] == 'Discard':\n trigger_priority.text = '0'\n elif alarm_values['priority'] in ['Threshold', 'Clear', 'Log', 'Information']:\n trigger_priority.text = '1'\n elif alarm_values['priority'] == 'Minor':\n trigger_priority.text = '2'\n elif alarm_values['priority'] == 'Average':\n trigger_priority.text = '3'\n elif alarm_values['priority'] == 'Major':\n trigger_priority.text = '4'\n elif alarm_values['priority'] == 'Critical':\n trigger_priority.text = '5'\n\n trigger_description.text = description.text\n trigger_type.text = '0'\n\ndef xml_pretty_me(file_name_for_prettify, string_to_prettify):\n #\n # Open a file and write to it and we are done.\n #\n logging.debug(\"Creating File %s\", file_name_for_prettify)\n\n xml = minidom.parseString(string_to_prettify)\n pretty_xml_as_string = xml.toprettyxml()\n output_file = open(file_name_for_prettify, 'w')\n output_file.write(pretty_xml_as_string)\n logging.debug(\"Creation Complete\")\n output_file.close()\n\n\ndef read_from_csv(csv_file_name):\n try:\n reader = csv.reader(open(csv_file_name, 'r'))\n return reader\n except exception:\n print(\"Something went wrong in reading file\" + str(exception))\n exit()\n\n\ndef zabbix_snmptrap_template_import(file_name, template_name, template_group_name):\n csv_reader = read_from_csv(file_name)\n for alarm_data in csv_reader:\n\n # Skipping First Line\n if alarm_data[0] == \"MIB-MODULE\":\n continue\n\n oid_dictionary = {'mib_module': alarm_data[0].strip(), 'mib_module_file': alarm_data[1].strip()}\n\n # Converting OID to Name based OID.\n oid_dictionary['oid'] = get_smi_number_to_name(alarm_data[2].strip())\n\n # Setting columns as it is from the File.\n oid_dictionary['name'] = alarm_data[3].strip()\n oid_dictionary['priority'] = alarm_data[4].strip()\n oid_dictionary['comment'] = alarm_data[5].strip()\n oid_dictionary['description'] = alarm_data[6].strip('\"')\n oid_dictionary['trigger_name_description'] = alarm_data[7].strip()\n\n # Converting OID to Name based OID.\n # [Similar to the OID above as this is similar.]\n if alarm_data[8].strip() != '':\n oid_dictionary['dependency'] = str(get_smi_number_to_name(alarm_data[8].strip()))\n else:\n oid_dictionary['dependency'] = ''\n\n # Setting columns as it is from the file.\n oid_dictionary['clear_time_in_days'] = str(alarm_data[9].strip())\n logging.debug('oid_dictionary:' + str(oid_dictionary))\n\n # Creating list of dictionary to hold the data.\n alarm_list.append(oid_dictionary)\n\n\n # pass on the listed dictionary to xml processor, this will return a XML.\n xml_tree = generate_template_items_xml(alarm_list, template_name, template_group_name)\n\n # Cleaning list\n del alarm_list[:]\n\n # Converting XML object to String.\n xml_tree_as_string = ElementTree.tostring(xml_tree)\n\n # Return.\n return xml_tree_as_string\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\n ''' ''')\n\n parser.add_argument('-e', '--export-csv', help='OID file, Gives all OIDs on the device', required=True)\n parser.add_argument('-n', '--template-name', help='Template name as given in Zabbix server.', required=True)\n parser.add_argument('-g', '--template-group',\n help='Template Group which the Template belongs to, as in Zabbix server.',\n required=True)\n\n parser.add_argument('-d', '--debug', help='Running Debug mode - More Verbose', action=\"store_true\")\n parser.add_argument('-v', '--verbose', help='Running Debug mode - More Verbose', action=\"store_true\")\n args = parser.parse_args()\n\n csv_file_name = args.export_csv\n zabbix_template_name = args.template_name\n zabbix_template_group_name = args.template_group\n\n if args.debug or args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n # Processing CSV to XML.\n xml_tree_gen_as_string = zabbix_snmptrap_template_import(csv_file_name, zabbix_template_name,\n zabbix_template_group_name)\n\n # Lets make the XML pretty.\n xml_pretty_me('templates/' + zabbix_template_name.lower().replace(' ', '-') + '-item-template-trigger-import.xml',\n xml_tree_gen_as_string)\n","sub_path":"template_creator_from_snmptrap/snmptrap_template_create/zabbix_snmptrap_custom_ggsn.py","file_name":"zabbix_snmptrap_custom_ggsn.py","file_ext":"py","file_size_in_byte":14245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"416719230","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n### Load all files\nfile_activity_labels = \"../UCI HAR Dataset/activity_labels.txt\"\nfile_features = \"../UCI HAR Dataset/features.txt\"\nfile_X_train = \"../UCI HAR Dataset/train/X_train.txt\"\nfile_Y_train = \"../UCI HAR Dataset/train/y_train.txt\"\nfile_X_test = \"../UCI HAR Dataset/test/X_test.txt\"\nfile_Y_test = \"../UCI HAR Dataset/test/y_test.txt\"\n\nactivity_labels = pd.read_csv(file_activity_labels, delimiter=\" \", header=None, names=['id', 'activity'])\nfeatures = pd.read_csv(file_features, delimiter=\" \", header=None, names=['id', 'feature'])\nX_train = pd.read_csv(file_X_train, delimiter=\" \", header=None, skipinitialspace=True)\nY_train = pd.read_csv(file_Y_train, delimiter=\" \", header=None, skipinitialspace=True)\nX_test = pd.read_csv(file_X_test, delimiter=\" \", header=None, skipinitialspace=True)\nY_test = pd.read_csv(file_Y_test, delimiter=\" \", header=None, skipinitialspace=True)\n\n# Add labels to the measurements\nX_train['label'] = Y_train\nX_test['label'] = Y_test\n\nX = pd.concat([X_train, X_test])\nY = pd.concat([Y_train, Y_test])\n\n### 4.1 a\nprint(\"Training set :\", X_train.shape)\nprint(\" Test set :\", X_test.shape)\nprint()\n### 4.1 b\niFeature = 480\nprint(\" feature :\", features['feature'][-1+iFeature])\nprint(\" mean : %0.3f\" % X.head(10)[-1+iFeature].mean())\nprint(\" median : %0.3f\" % X.head(10)[-1+iFeature].median())\nprint(\" stddev : %0.3f\" % X.head(10)[-1+iFeature].std())\nprint()\n\n### 4.2 a\nprint(\"Training labels :\", Y_train.shape)\nprint(\" Test labels :\", Y_test.shape)\nprint()\n### 4.2 b\nbar_heights = Y[0].value_counts(normalize=True).sort_index()\nplt.bar(bar_heights.index.values, bar_heights, tick_label=activity_labels['activity'])\n# plt.show()\n\n### 4.3\nfeature = 100\nplt.clf() \nX_train.groupby('label')[feature].plot.kde()\nplt.title(features[features['id'] == 555]['feature'].tolist()[0])\nplt.show()\n","sub_path":"TS/4_.1-3.py","file_name":"4_.1-3.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"490170514","text":"'''7.String Explosion\n\nExplosions are marked with '>'. Immediately after the mark, there will be an integer, which signifies the strength of the explosion.\nYou should remove x characters (where x is the strength of the explosion), starting after the punch character ('>').\nIf you find another explosion mark ('>') while you’re deleting characters, you should add the strength to your previous explosion.\nWhen all characters are processed, print the string without the deleted characters.\nYou should not delete the explosion character – '>', but you should delete the integers, which represent the strength.\n\nInput\nYou will receive single line with the string.\nOutput\nPrint what is left from the string after explosions.\n\nConstraints\nYou will always receive a strength for the punches\nThe path will consist only of letters from the Latin alphabet, integers and the char '>'\nThe strength of the punches will be in the interval [0…9]\n\nExamples\nInput\nOutput\nComments\n\nabv>1>1>2>2asdasd\nabv>>>>dasd\n\n1st explosion is at index 3 and it is with strength of 1. We delete only the digit after the explosion character.\nThe string will look like this: abv>>1>2>2asdasd\n2nd explosion is with strength one and the string transforms to this: abv>>>2>2asdasd\n3rd explosion is now with strength of 2. We delete the digit and we find another explosion. At this point the string looks like this: abv>>>>2asdasd.\n4th explosion is with strength 2. We have 1 strength left from the previous explosion, we add the strength of the current explosion to what is\nleft and that adds up to a total strength of 3. We delete the next three characters and we receive the string abv>>>>dasd\n\nWe do not have any more explosions and we print the result: abv>>>>dasd\n\npesho>2sis>5a>9akarate>9hexmaster\n\npesho>is>a>karate>master\n'''\n\nword = input()\nexplosion_strength = 0\nword_to_print = ''\n\nwhile word:\n\n if word[0] != '>':\n word_to_print += word[0]\n word = word[1:]\n\n\n else: #word[0] == '>':\n\n explosion_strength += int(word[1]) - 1\n word_to_print += word[0]\n word = word[2:]\n if explosion_strength:\n while explosion_strength and word:\n if word[0] != '>':\n word=word[1:]\n explosion_strength -= 1\n else:\n word_to_print +=word[0]\n explosion_strength += int(word[1])-1\n word = word[2:]\n\n\n#print(explosion_strength)\n#print(f\"word e: {word}\")\nprint(word_to_print)","sub_path":"Fund_Text - 7. String Explosion.py","file_name":"Fund_Text - 7. String Explosion.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"349921464","text":"#!/usr/bin/env python2\nimport sys\nsys.path.append('../lib')\nimport os\nimport numpy as np\nif '--show' not in sys.argv:\n import matplotlib\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\nimport glob\n\nimport protocols\nimport model_ikr as m\n\nfrom scipy.optimize import fmin\n# Set seed\nnp.random.seed(101)\n\nfrom releakcorrect import I_releak, score_leak, protocol_leak_check\n\nsavepath = './figs'\nif not os.path.isdir(savepath):\n os.makedirs(savepath)\n\n#\n# Protocols\n#\nprotocol_funcs = {\n 'staircaseramp': protocols.leak_staircase,\n 'pharma': protocols.pharma, # during drug application\n 'apab': 'protocol-apab.csv',\n 'apabv3': 'protocol-apabv3.csv',\n 'ap05hz': 'protocol-ap05hz.csv',\n 'ap1hz': 'protocol-ap1hz.csv',\n 'ap2hz': 'protocol-ap2hz.csv',\n 'sactiv': None,\n 'sinactiv': None,\n}\nprotocol_dir = '../protocol-time-series'\nprotocol_list = [\n 'staircaseramp',\n 'pharma',\n 'apab',\n 'apabv3',\n 'ap1hz',\n 'ap2hz',\n ]\n\ndata_dir = '../data-autoLC'\ndata_dir_staircase = '../data'\nfile_dir = './out'\nfile_list = [\n 'herg25oc1',\n ]\ntemperatures = np.array([25.0])\ntemperatures += 273.15 # in K\nfit_seed = '542811797'\nwithfcap = False\n\n#\n# Where to zoom in\n#\nnorm_zoom = False\nzoom_in_win = { # protocol: [(time_start, time_end), ...] in second\n # 'staircaseramp': [(1.8, 2.5), (11.395, 11.415), (13.895, 13.915),\n # (14.375, 14.925)],\n 'staircaseramp': [(1.875, 2.125), (11.35, 11.45), (13.85, 13.95),\n (14.375, 14.625)],\n 'pharma': [(0.64, 0.66), (1.14, 1.16)],\n 'apab': [(0.035, 0.065), (0.32, 0.33)],\n 'apabv3': [(0.05, 0.07)],\n 'ap05hz': None,\n 'ap1hz': [(0.04, 0.07), (1.04, 1.07),\n (2.04, 2.07), (3.04, 3.07)],\n 'ap2hz': [(0.045, 0.06), (0.545, 0.56),\n (1.045, 1.06), (1.545, 1.56),\n (2.045, 2.06), (2.545, 2.56),\n (3.045, 3.06)],\n 'sactiv': None,\n 'sinactiv': None,\n}\n\n#\n# Get new parameters and traces\n#\nfor i_temperature, (file_name, temperature) in enumerate(zip(file_list,\n temperatures)):\n\n print('Plotting %s' % file_name)\n\n savedir = '%s/%s-autoLC-releak-zoom' % (savepath, file_name)\n if not os.path.isdir(savedir):\n os.makedirs(savedir)\n\n # Get selected cells\n files_dir = os.path.realpath(os.path.join(file_dir, file_name))\n searchwfcap = '-fcap' if withfcap else ''\n selectedfile = './manualselected-%s.txt' % (file_name)\n selectedwell = []\n with open(selectedfile, 'r') as f:\n for l in f:\n if not l.startswith('#'):\n selectedwell.append(l.split()[0])\n\n # Model\n prt2model = {}\n for prt in protocol_list:\n\n protocol_def = protocol_funcs[prt]\n if type(protocol_def) is str:\n protocol_def = '%s/%s' % (protocol_dir, protocol_def)\n\n prt2model[prt] = m.Model('../mmt-model-files/kylie-2017-IKr.mmt',\n protocol_def=protocol_def,\n temperature=temperature, # K\n transform=None,\n useFilterCap=False) # ignore capacitive spike\n\n for cell in selectedwell[:]:\n # Fitted parameters\n param_file = '%s/%s-staircaseramp-%s-solution%s-%s.txt' % \\\n (files_dir, file_name, cell, searchwfcap, fit_seed)\n obtained_parameters = np.loadtxt(param_file)\n\n # Create figure\n # Do a very very tailored version........ :(\n fig = plt.figure(figsize=(16, 12))\n grid = plt.GridSpec(40, 3, hspace=0.0, wspace=0.2)\n axes = np.empty([6, int(len(protocol_list)/2)], dtype=object)\n # long list here:\n for i in range(int(len(protocol_list)/2)):\n # First 'row'\n axes[0, i] = fig.add_subplot(grid[0:5, i]) # , sharex=axes[2, i])\n axes[0, i].set_xticklabels([])\n axes[1, i] = fig.add_subplot(grid[5:10, i]) # , sharex=axes[2, i])\n axes[2, i] = fig.add_subplot(grid[13:18, i])\n axes[2, i].set_xticklabels([]) # last one is zoom in\n\n # Second 'row'\n axes[3, i] = fig.add_subplot(grid[22:27, i]) # , sharex=axes[5, i])\n axes[3, i].set_xticklabels([])\n axes[4, i] = fig.add_subplot(grid[27:32, i]) # , sharex=axes[5, i])\n axes[5, i] = fig.add_subplot(grid[35:40, i])\n axes[5, i].set_xticklabels([]) # last one is zoom in\n\n if norm_zoom:\n axes[2, i].set_yticklabels([])\n axes[5, i].set_yticklabels([])\n # Set labels\n axes[0, 0].set_ylabel('Voltage [mV]', fontsize=14)\n axes[1, 0].set_ylabel('Current [pA]', fontsize=14)\n axes[2, 0].set_ylabel('Zoom in', fontsize=14)\n axes[3, 0].set_ylabel('Voltage [mV]', fontsize=14)\n axes[4, 0].set_ylabel('Current [pA]', fontsize=14)\n axes[5, 0].set_ylabel('Zoom in', fontsize=14)\n axes[-1, len(protocol_list) // 2 // 2].set_xlabel('Time [s]',\n fontsize=18)\n\n for i_prt, prt in enumerate(protocol_list):\n # Time points\n times = np.loadtxt('%s/%s-%s-times.csv' % (data_dir, file_name,\n prt), delimiter=',', skiprows=1)\n\n # Simulation\n model = prt2model[prt]\n simulation = model.simulate(obtained_parameters, times)\n if False:\n for _ in range(5):\n assert(all(simulation == \n model.simulate(obtained_parameters, times)))\n voltage = model.voltage(times) * 1000 # V -> mV\n\n # Data\n if prt == 'staircaseramp':\n data = np.loadtxt('%s/%s-%s-%s.csv' % (data_dir_staircase,\n file_name, prt, cell), delimiter=',', skiprows=1)\n data_new = np.copy(data)\n else:\n data = np.loadtxt('%s/%s-%s-%s.csv' % (data_dir, file_name,\n prt, cell), delimiter=',', skiprows=1)\n # Re-leak correct the leak corrected data...\n g_releak = fmin(score_leak, [0.0], args=(data, voltage, times,\n protocol_leak_check[prt]))\n data_new = I_releak(g_releak[0], data, voltage)\n assert(data_new.shape == times.shape)\n # TODO: Save corrected data later...\n assert(data.shape == times.shape)\n\n # Plot\n ai = (i_prt // (len(protocol_list) // 2)) * 3\n aj = i_prt % (len(protocol_list) // 2)\n amplitude = np.max(simulation) - np.min(simulation)\n if prt == 'staircaseramp':\n axes[ai, aj].set_title('Calibration', fontsize=16)\n # Fix ylim using simulation\n axes[ai + 1, aj].set_ylim([\n np.min(simulation) - 0.05 * amplitude,\n np.max(simulation) + 0.05 * amplitude])\n else:\n axes[ai, aj].set_title('Validation %s' % i_prt, fontsize=16)\n # Fix ylim using simulation\n axes[ai + 1, aj].set_ylim([\n np.min(simulation) - 0.3 * amplitude,\n np.max(simulation) + 0.3 * amplitude])\n axes[ai, aj].plot(times, voltage)\n axes[ai + 1, aj].plot(times, data, alpha=0.2, label='Data')\n axes[ai + 1, aj].plot(times, data_new, alpha=0.5, label='New data')\n axes[ai + 1, aj].plot(times, simulation, label='Model')\n # Plot zoom in version\n zoom_in_data = []\n zoom_in_data_new = []\n zoom_in_simulation = []\n zoom_in_line_break = []\n for t_i, t_f in zoom_in_win[prt]:\n # Find closest time\n idx_i = np.argmin(np.abs(times - t_i))\n idx_f = np.argmin(np.abs(times - t_f))\n # Work out the max and min\n if norm_zoom:\n y_min = np.min(simulation[idx_i:idx_f])\n y_max = np.max(simulation[idx_i:idx_f])\n y_amp = y_max - y_min\n y_min -= 0.3 * y_amp\n y_max += 0.3 * y_amp\n y_amp = y_max - y_min\n else:\n y_min = np.min(simulation) - 0.25 * amplitude\n y_max = np.max(simulation) + 0.25 * amplitude\n # And plot gray boxes over second panels\n codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]\n vertices = np.array([(times[idx_i], y_min),\n (times[idx_i], y_max),\n (times[idx_f], y_max),\n (times[idx_f], y_min),\n (0, 0)], float)\n pathpatch = PathPatch(Path(vertices, codes),\n facecolor='#fa9fb5',\n edgecolor='#fa9fb5',\n alpha=0.75)\n plt.sca(axes[ai + 1, aj])\n pyplot_axes = plt.gca()\n pyplot_axes.add_patch(pathpatch)\n # Work out third panel plot\n if norm_zoom:\n zoom_in_segment_data = (data[idx_i:idx_f] - y_min) / y_amp\n zoom_in_segment_data_new = (data_new[idx_i:idx_f] - y_min)\\\n / y_amp\n zoom_in_segment_sim = (simulation[idx_i:idx_f] - y_min) \\\n / y_amp\n else:\n zoom_in_segment_data = data[idx_i:idx_f]\n zoom_in_segment_data_new = data_new[idx_i:idx_f]\n zoom_in_segment_sim = simulation[idx_i:idx_f]\n zoom_in_data = np.append(zoom_in_data, zoom_in_segment_data)\n zoom_in_data_new = np.append(zoom_in_data_new,\n zoom_in_segment_data_new)\n zoom_in_simulation = np.append(zoom_in_simulation,\n zoom_in_segment_sim)\n zoom_in_line_break.append(len(zoom_in_segment_sim))\n axes[ai + 2, aj].plot(zoom_in_data, alpha=0.2)\n axes[ai + 2, aj].plot(zoom_in_data_new, alpha=0.5)\n axes[ai + 2, aj].plot(zoom_in_simulation)\n for x in np.cumsum(zoom_in_line_break)[:-1]:\n axes[ai + 2, aj].axvline(x, color='k')\n axes[ai + 2, aj].set_xlim([0, len(zoom_in_simulation)])\n if norm_zoom:\n axes[ai + 2, aj].set_ylim([0, 1])\n else:\n axes[ai + 2, aj].set_ylim([y_min, y_max])\n axes[1, 0].legend()\n grid.tight_layout(fig, pad=0.6)\n grid.update(wspace=0.12, hspace=0.0)\n if '--show' not in sys.argv:\n plt.savefig('%s/%s.png' % (savedir, cell),\n bbox_inches='tight', pad_inches=0)\n else:\n plt.show()\n plt.close('all')\n print('Done ' + file_name + cell)\n del(prt2model)\n\n","sub_path":"room-temperature-only/re-leak-correct.py","file_name":"re-leak-correct.py","file_ext":"py","file_size_in_byte":11161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"539821755","text":"from torch.nn import Conv2d, MaxPool2d\nfrom torch import no_grad\nfrom torch.nn.functional import interpolate\n\n\nclass PRISM:\n _excitations = []\n _hook_handlers = []\n _is_orig_image = True\n\n def _excitation_hook(module, input, output):\n # for better output sharpness we collect input images\n if PRISM._is_orig_image:\n PRISM._excitations.append(input[0])\n PRISM._is_orig_image = False\n PRISM._excitations.append(output)\n\n def register_hooks(model, recursive=False):\n if not recursive and PRISM._hook_handlers:\n print(\"Hooks can only be registered to one model at once. Please use: `prune_old_hooks()`\")\n return\n\n for i, layer in enumerate(model.children()):\n if list(layer.children()):\n PRISM.register_hooks(layer, recursive=True)\n elif isinstance(layer, MaxPool2d):\n PRISM._hook_handlers.append(\n layer.register_forward_hook(PRISM._excitation_hook)\n )\n elif isinstance(layer, Conv2d) and layer.stride > (1, 1):\n PRISM._hook_handlers.append(\n layer.register_forward_hook(PRISM._excitation_hook)\n )\n\n def prune_old_hooks(model):\n if not PRISM._hook_handlers:\n print(\"No hooks to remove\")\n for hook in PRISM._hook_handlers:\n hook.remove()\n\n PRISM._hook_handlers = []\n\n ###############################################\n\n def _svd(final_excitation, channels=3):\n # TODO: consider single channel interpretation\n final_layer_input = final_excitation.permute(0, 2, 3, 1).reshape(\n -1, final_excitation.shape[1]\n )\n normalized_final_layer_input = final_layer_input - final_layer_input.mean(0)\n u, s, v = normalized_final_layer_input.svd(compute_uv=True)\n raw_features = u[:, :channels].matmul(s[:channels].diag())\n return raw_features.view(\n final_excitation.shape[0],\n final_excitation.shape[2],\n final_excitation.shape[3],\n 3,\n ).permute(0, 3, 1, 2)\n\n def _feature_normalization(single_excitation):\n feature_excitation = single_excitation.sum(dim=1, keepdim=True)\n # reducing number inflation\n feature_excitation /= feature_excitation.max()\n return feature_excitation\n\n def _upsampling(extracted_features, pre_excitations):\n for e in pre_excitations[::-1]:\n extracted_features = interpolate(\n extracted_features,\n size=(e.shape[2], e.shape[3]),\n mode=\"bilinear\",\n align_corners=False,\n )\n extracted_features *= PRISM._feature_normalization(e)\n return extracted_features\n\n def _normalize_to_rgb(features):\n scaled_features = (features - features.mean()) / features.std()\n scaled_features = scaled_features.clip(-1, 1)\n scaled_features = (scaled_features - scaled_features.min()) / (\n scaled_features.max() - scaled_features.min()\n )\n return scaled_features\n\n def get_maps():\n if not PRISM._excitations:\n print(\"No data in hooks. Have You used `register_hooks(model)` method?\")\n return\n\n # [print(e.shape) for e in PRISM._excitations]\n\n with no_grad():\n extracted_features = PRISM._svd(PRISM._excitations.pop(), 3)\n extracted_features = PRISM._upsampling(\n extracted_features, PRISM._excitations\n )\n rgb_features_map = PRISM._normalize_to_rgb(extracted_features)\n\n # prune old PRISM._excitations\n PRISM.reset_excitations()\n\n return rgb_features_map\n\n def reset_excitations():\n PRISM._is_orig_image = True\n PRISM._excitations = []\n","sub_path":"torchprism/PRISM.py","file_name":"PRISM.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"579681965","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n#\nclass groups(object):\n \"\"\"Group a set of objects (a list of coordinates in some space) based on\n a friends-of-friends algorithm\n \"\"\"\n import numpy as np\n from .. import PydlutilsException\n @staticmethod\n def euclid(x1,x2):\n \"\"\"Pythagorean theorem in Euclidean space with arbitrary number\n of dimensions.\n \"\"\"\n return self.np.sqrt(((x1-x2)**2).sum())\n @staticmethod\n def sphereradec(x1,x2):\n \"\"\"Separation of two points on a 2D-sphere, assuming they are in\n longitude-latitude or right ascension-declination form. Assumes\n everything is already in radians.\n \"\"\"\n from ...goddard.astro import gcirc\n return gcirc(x1[0],x1[1],x2[0],x2[1],units=0)\n def __init__(self,coordinates,distance,separation='euclid'):\n \"\"\"Init creates an object and performs the friends-of-friends\n algorithm. The coordinates can have arbitrary dimensions, with each\n column representing one of the dimensions. Each row defines an object.\n If separation is not defined it defaults to Euclidean space.\n \"\"\"\n #\n # Find a separation function\n #\n if callable(separation):\n self.separation = separation\n elif isinstance(separation,str):\n if separation == 'euclid':\n self.separation = self.euclid\n elif separation == 'sphereradec':\n self.separation = self.sphereradec\n else:\n raise self.PydlutilsException(\"Unknown separation function: {0}.\".format(separation))\n else:\n raise self.PydlutilsException(\"Improper type for separation!\")\n #\n # Save information about the coordinates.\n #\n nGroups = 0\n nTargets = coordinates.shape[1]\n multGroup = self.np.zeros(nTargets,dtype='i4')\n firstGroup = self.np.zeros(nTargets,dtype='i4') -1\n nextGroup = self.np.zeros(nTargets,dtype='i4') -1\n inGroup = self.np.arange(nTargets,dtype='i4')\n #\n # Find all the other targets associated with each target\n #\n for i in range(nTargets):\n nTmp = 0\n minGroup = nGroups\n for j in range(nTargets):\n sep = self.separation(coordinates[:,i],coordinates[:,j])\n if sep <= distance:\n multGroup[nTmp] = j\n minGroup = min(minGroup,inGroup[j])\n nTmp += 1\n #\n # Use this minimum for all\n #\n for j in range(nTmp):\n if inGroup[multGroup[j]] < nTargets:\n k = firstGroup[inGroup[multGroup[j]]]\n while k != -1:\n inGroup[k] = minGroup\n k = nextGroup[k]\n inGroup[multGroup[j]] = minGroup\n #\n # If it is a new group (no earlier groups), increment nGroups\n #\n if minGroup == nGroups:\n nGroups += 1\n for j in range(i+1):\n firstGroup[j] = -1\n for j in range(i,-1,-1):\n nextGroup[j] = firstGroup[inGroup[j]]\n firstGroup[inGroup[j]] = j\n #\n # Renumber to get rid of the numbers which were skipped\n #\n renumbered = self.np.zeros(nTargets,dtype='bool')\n nTmp = nGroups\n nGroups = 0\n for i in range(nTargets):\n if not renumbered[i]:\n j = firstGroup[inGroup[i]]\n while j != -1:\n inGroup[j] = nGroups\n renumbered[j] = True\n j = nextGroup[j]\n nGroups += 1\n #\n # Reset the values of firstGroup and inGroup\n #\n firstGroup[:] = -1\n for i in range(nTargets-1,-1,-1):\n nextGroup[i] = firstGroup[inGroup[i]]\n firstGroup[inGroup[i]] = i\n #\n # Get the multiplicity\n #\n for i in range(nGroups):\n multGroup[i] = 0\n j = firstGroup[i]\n while j != -1:\n multGroup[i] += 1\n j = nextGroup[j]\n #\n # Set attributes\n #\n self.nGroups = nGroups\n self.nTargets = nTargets\n self.inGroup = inGroup\n self.multGroup = multGroup\n self.firstGroup = firstGroup\n self.nextGroup = nextGroup\n return\n","sub_path":"pydl/pydlutils/spheregroup/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"282727907","text":"from chain import Chain\nfrom clientlist import ClientList\nimport socket\n\nclass Validation(Chain):\n\n def Request(self, a):\n client = ClientList()\n if(client.check(a[1][0])):\n return a\n else:\n a[0].send(HTTPVER + ' 405 Method Not Allowed\\n' +\n 'Proxy-agent: %s\\n\\n' % VERSION)\n return 0\n return 0","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"207165884","text":"import sys\nimport pytest\nfrom pathlib import Path\nfrom pyppl import plugin, PyPPL, Proc, config, __version__\n\nHERE = Path(__file__).resolve().parent\nsys.path.insert(0, str(HERE))\npyppl_test = __import__('pyppl_test')\npyppl_empty = __import__('pyppl_empty')\npyppl_report = __import__('pyppl_report')\npyppl_flowchart = __import__('pyppl_flowchart')\n\ndef setup_module(module):\n\tplugin.registerPlugins(['pyppl_test', 'pyppl_empty'], ['pyppl_report', 'pyppl_flowchart'])\n\tplugin.pluginmgr.hook.setup(config = config)\n\ndef teardown_module(module):\n\t# unregister plugins for further testing\n\tplugin.pluginmgr.unregister(pyppl_test)\n\tplugin.pluginmgr.unregister(pyppl_empty)\n\tplugin.pluginmgr.unregister(pyppl_report)\n\tplugin.pluginmgr.unregister(pyppl_flowchart)\n\tconfig.envs.clear()\n\ndef test_register():\n\tassert plugin.pluginmgr.is_registered(pyppl_test)\n\tassert plugin.pluginmgr.is_registered(pyppl_report)\n\tassert plugin.pluginmgr.is_registered(pyppl_flowchart)\n\ndef test_prerun(caplog):\n\tsys.argv = [sys.argv[0]]\n\twith pytest.raises(plugin.PyPPLFuncWrongPositionError):\n\t\tPyPPL().start(Proc(id = 'pPreRun1')).run().preRun()\n\tassert not any('PYPPL PRERUN' in msg for _,_,msg in caplog.record_tuples)\n\n\tPyPPL().start(Proc(id = 'pPreRun2')).preRun().run()\n\tassert any('PYPPL PRERUN' in msg for _,_,msg in caplog.record_tuples)\n\ndef test_postrun(caplog):\n\tsys.argv = [sys.argv[0]]\n\twith pytest.raises(plugin.PyPPLFuncWrongPositionError):\n\t\tPyPPL().start(Proc(id = 'pPostRun1')).postRun().run()\n\tassert not any('PYPPL POSTRUN' in msg for _,_,msg in caplog.record_tuples)\n\n\tPyPPL().start(Proc(id = 'pPostRun2')).run().postRun()\n\tassert any('PYPPL POSTRUN' in msg for _,_,msg in caplog.record_tuples)\n\ndef test_setgetattr():\n\tpSetAttr = Proc()\n\tassert pSetAttr.ptest == 0\n\tpSetAttr.ptest = 1\n\tassert pSetAttr.ptest == 100\n\tassert pSetAttr.pempty == 0\n\tpSetAttr.pempty = 1\n\tassert pSetAttr.pempty == 1\n\ndef test_prepostrun(caplog):\n\tp = Proc(id = 'pPrePostRun1')\n\tp.input = {'a' : ['1']}\n\tPyPPL().start(p).run()\n\texpects = [\n\t\t'PIPELINE STARTED',\n\t\t'pPrePostRun1 STARTED',\n\t\t'JOB 0 STARTED',\n\t\t'JOB 0 ENDED',\n\t\t'pPrePostRun1 ENDED',\n\t\t'PIPELINE ENDED'\n\t]\n\tfor name, level, msg in caplog.record_tuples:\n\t\tif expects:\n\t\t\tif expects[0] in msg:\n\t\t\t\texpects.pop(0)\n\tassert len(expects) == 0 # messages appear in order\n\ndef test_jobfail(caplog):\n\tp = Proc(id = 'pPluginJobFail')\n\tp.input = {'a' : ['1']}\n\tp.script = 'exit 1'\n\twith pytest.raises(SystemExit):\n\t\tPyPPL().start(p).run()\n\tassert any('Job 0 failed' in msg for _,_,msg in caplog.record_tuples)\n","sub_path":"tests/test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"131694462","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import font\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter import ttk\nimport textwrap\n\n\nfrom AppOperations import AppOperations as ao \t\t\t# the class build for this purpose\nfrom CommandsGUI import CommandsGUI\nfrom CommandsGUI import ScreenGUI\n#from Generic import Generic\nfrom GUIfunctions import GUIfunctions\nfrom DBOperations import DBOperations\n\nfrom tkinter import messagebox\n# last parent5\n\ndata_valid = 0\t# to check if the data was sucessfully inserted or not!\n\ninfo = [\n\t\t(\"Name (TEXT):\",1),\n\t\t(\"e-mail (TEXT):\",2),\n\t\t(\"Flat no. (TEXT):\",3),\n\t\t(\"Tower no. (TEXT):\",4),\n\t\t(\"Area (NUMBER):\",5),\n\t\t(\"Parking (TEXT):\",6),\n\t\t(\"Recpt. Fess (NUMBER):\",7),\n\t\t(\"Address (TEXT):\",8),\n\t\t(\"Contact number (TEXT):\",9)\n\t\t]\ne=[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"]\n\n\nroot = Tk()\nmenu = Menu(root)\nroot.config(menu=menu)\nroot.title(\"FLAT-INVENTORY JIMSOFT\")\nroot.geometry(\"1000x600+200+200\")\nprint(\"Starting your application... This may take some time... hold on!\")\n\nclass Generic:\t\t\t\t# this class is used to combine multiple functions to return into 1 function\n\tdef combine_funcs(*funcs):\n\t\t# command = combine_funcs(func1, func2)\n\t\tdef combined_func(*args, **kwargs):\n\t\t\tfor f in funcs:\n\t\t\t\tf(*args, **kwargs)\n\t\treturn combined_func\n\tdef answer():\n\t\tshowerror(\"Answer\", \"Sorry, no answer available\")\n\n\tdef callback():\n\t\tif messagebox.askyesno('Verify', 'Really quit?'):\n\t\t\tif messagebox.showwarning('Save Changes?', 'Commit all existing data ?'):\n\t\t\t\tprint('Changes saved!')\n\t\t\t\tao.save_root()\n\t\t\troot.destroy()\n\t\telse:\n\t\t\tmessagebox.showinfo('No', 'Quit has been cancelled')\n\tdef delete_dummy():\n\t\tGUIfunctions.delete_multiple()\n\n# to implement the dialogue warning stuffs on exiting\nroot.protocol(\"WM_DELETE_WINDOW\",Generic.callback)\n\nif __name__ == \"__main__\":\n\tScreenGUI.detailsMenu()\n\tScreenGUI.manipulateMenu()\n\tScreenGUI.billMenu()\n\tmainloop()\n","sub_path":"python_gui_tkinter/KALU/GARBAGE/TEST/Generic.py","file_name":"Generic.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"257782072","text":"import scipy.io\nimport numpy as np\nimport pylab as pl\nfrom sklearn.linear_model import lasso_path, LassoCV\nfrom sklearn import svm\nimport matplotlib.pyplot as plt\n\n# create data\nnumpts = 1000\nb = 2+np.random.normal(loc=0.0, scale=1.0, size=[numpts, 2])\nc = np.random.normal(loc=0.0, scale=1.0, size=[numpts, 2])\n\nplt.plot(b[:,0],b[:,1],'.r')\nplt.plot(c[:,0],c[:,1],'.k')\n#plt.show()\n\nx = np.concatenate((b[:,0], c[:,0])) # to append\ny = np.concatenate((b[:,1], c[:,1]))\nX = np.column_stack((x, y)) #to stack\n\na = np.ones(numpts)\na2 = np.zeros(numpts)\nY = np.concatenate((a,a2))\n\n\n# SVM train (classification by default)\nclf = svm.SVC()\nclf.fit(X, Y)\n\n# SVM classify \nYhat = clf.predict(X)\nConfidence = clf.decision_function(X)\nerror = np.sum(abs(Yhat-Y))\nprint (numpts-error)/numpts\n\nYb = Yhat==1\nplt.plot(X[Yb,0],X[Yb,1],'or',markersize=10,fillstyle='none')\nplt.plot(X[~Yb,0],X[~Yb,1],'ok',markersize=10,fillstyle='none')\nplt.plot(b[:,0],b[:,1],'.r')\nplt.plot(c[:,0],c[:,1],'.k')\nplt.show()\n\n","sub_path":"classification_stuff/svm_example.py","file_name":"svm_example.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"631215876","text":"from flask import render_template, url_for, flash, redirect, request, abort\nfrom sqlalchemy.engine import url\nfrom tabs.forms import RegistrationForm, LoginForm, UpdateAccountForm, TabForm\nfrom tabs.models import User, Tab\nfrom tabs import app, db, bcrypt, hostname\nfrom flask_login import login_user, logout_user, login_required, current_user, fresh_login_required\nimport requests\nimport favicon\nimport random\nimport os\nfrom tabs.s3 import upload_file, delete_file\nfrom datetime import date\nfrom bs4 import BeautifulSoup\n\n# Set headers\nheaders = requests.utils.default_headers()\nheaders.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'})\n\n\n# @app.route('/')\n# def home():\n# return render_template('home.html')\n\n\n@app.route('/register/', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('tabs'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash(f'Account created for {form.username.data}!', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', hostname=hostname, form=form)\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('tabs'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('tabs'))\n else:\n flash('Login Unsuccessful.', 'danger')\n return render_template('login.html', title='Login', hostname=hostname, form=form)\n\n\n@app.route(\"/logout/\")\n@login_required\ndef logout():\n logout_user()\n flash('Logged Out.', 'success')\n return redirect(url_for('tabs'))\n\n\n@app.route(\"/account/\", methods=['GET', 'POST'])\n# @fresh_login_required\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n if form.new_password.data:\n current_user.email = form.email.data\n current_user.username = form.username.data\n current_user.password = bcrypt.generate_password_hash(form.new_password.data).decode('utf-8')\n db.session.commit()\n flash(f'Account has been updated!', 'success')\n else:\n current_user.email = form.email.data\n current_user.username = form.username.data\n db.session.commit()\n flash(f'Account has been updated!', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n return render_template('account.html', hostname=hostname, form=form)\n\n\n@app.route('/')\n@login_required\ndef tabs():\n tab_set = current_user.tabs\n return render_template('tabs.html', hostname=hostname, tabs=tab_set)\n\ndef host_alive(url):\n try:\n requests.get(url, headers)\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):\n flash(f'{url} not responding!', 'danger')\n return False\n except requests.exceptions.HTTPError:\n return True\n else:\n return True\n\n@app.route(\"/tabs/add/\", methods=['GET', 'POST'])\n@login_required\ndef add_tab():\n form = TabForm()\n if form.validate_on_submit():\n url = form.url.data\n if not host_alive(url):\n return render_template('add_tab.html', legend='Create Tab', hostname=hostname, form=form)\n if (not form.tab_name.data):\n # url = form.url.data\n req = requests.get(url, headers)\n soup = BeautifulSoup(req.content)\n if soup.title: # if title exist\n form.tab_name.data = soup.title.string\n if not form.tab_name.data: # if tab_name is missing print url instead\n form.tab_name.data = url\n\n try:\n favicon_obj = favicon.get(url)\n except requests.exceptions.HTTPError as e:\n favicon_obj = None\n print('favicon.get(url) error = ' + str(e.response.status_code))\n if favicon_obj: # save favicon if there is one\n favicon_url = favicon.get(url)[0].url\n r = requests.get(favicon_url, allow_redirects=True)\n favicon_file_name = str(random.randint(0,10**9)) + date.today().strftime('_%d_%m_%Y') + '.ico'\n favicon_path = app.static_folder + '/img/' + favicon_file_name\n try:\n open(favicon_path, 'wb').write(r.content)\n upload_file(favicon_path, object_name=favicon_file_name)\n os.remove(favicon_path)\n except (FileNotFoundError):\n print('FileNotFoundError ' + favicon_path)\n else:\n favicon_file_name = None\n tab = Tab(tab_name=form.tab_name.data, url=form.url.data, user_id=current_user.id,\n use_comment_as_name=form.use_comment_as_name.data, comment=form.comment.data,\n favicon=favicon_file_name)\n db.session.add(tab)\n db.session.commit()\n flash('Tab created!', 'success')\n return redirect(url_for('tabs'))\n return render_template('add_tab.html', legend='Create Tab', hostname=hostname, form=form)\n\n\n@app.route(\"/tabs//edit/\", methods=['GET', 'POST'])\n@login_required\ndef edit_tab(tab_id):\n tab = Tab.query.get_or_404(tab_id)\n if tab.user_id != current_user.id:\n abort(403)\n form = TabForm()\n if form.validate_on_submit():\n if (not form.tab_name.data):\n url = form.url.data\n req = requests.get(url, headers)\n soup = BeautifulSoup(req.content)\n form.tab_name.data = soup.title.string\n tab.tab_name = form.tab_name.data\n tab.url = form.url.data\n tab.comment = form.comment.data\n tab.use_comment_as_name = form.use_comment_as_name.data\n db.session.add(tab)\n db.session.commit()\n flash('Updated.', 'success')\n return redirect(url_for('tabs', tab_id=tab.id))\n elif request.method == 'GET':\n form.tab_name.data = tab.tab_name\n form.url.data = tab.url\n form.comment.data = tab.comment\n form.use_comment_as_name.data = tab.use_comment_as_name\n return render_template('add_tab.html', legend='Update Tab', hostname=hostname, tab=tab, form=form)\n\n\n@app.route(\"/tabs//delete/\", methods=['POST'])\n@login_required\ndef delete_tab(tab_id):\n tab = Tab.query.get_or_404(tab_id)\n if tab.user_id != current_user.id:\n abort(403)\n db.session.delete(tab)\n db.session.commit()\n if tab.favicon and (tab.favicon != 'favicon.ico'):\n delete_file(tab.favicon)\n flash('Tab has been deleted!', 'success')\n return redirect(url_for('tabs'))\n\n","sub_path":"tabs/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"537108597","text":"import pymongo\nimport pandas as pd\nimport copy\nfrom datetime import datetime\n\nmongodb_url = 'mongodb://localhost:27017'\nsmart_db = 'smart'\n# smart_db = 'testPersister'\nsmart_collection = 'disk_smart_info'\n\n\ndef get_mongodb():\n mongo_client = pymongo.MongoClient(mongodb_url)\n db = mongo_client[smart_db]\n # username, password = 'sunbo', '123456'\n # db.authenticate(username, password)\n return db\n\n\ndef query_and_mock_data():\n db = get_mongodb()\n smart_col = db[smart_collection]\n query_body = {\n 'disk_serial_number': 'SWN648LW',\n 'disk_model': 'AL14SEB030N'\n }\n smart_dataset = smart_col.find(query_body)\n for data in smart_dataset:\n data['disk_serial_number'] = 'W4612ESL'\n data['disk_model'] = 'ST2000NX0253'\n # data['date'] = datetime.now()\n del data['_id']\n print('insert data: {}'.format(data))\n smart_col.insert_one(data)\n print('insert {}:{} successful...'.format(data['disk_model'], data['disk_serial_number']))\n\n\ndef query_and_parse_data():\n db = get_mongodb()\n smart_col = db[smart_collection]\n smart_dataset = smart_col.find()\n smart_dict = {\n 'date': [],\n 'serial_number': [],\n 'model': [],\n 'capacity_bytes': [],\n 'failure': []\n }\n id_list = []\n for data in smart_dataset:\n date = data['date'].strftime(\"%Y/%m/%d\")\n print(date)\n sn = data['disk_serial_number']\n model = data['disk_model']\n attrs = data['attrs']\n smart_dict['date'].append(date)\n smart_dict['serial_number'].append(sn)\n smart_dict['model'].append(model)\n smart_dict['capacity_bytes'].append(0)\n smart_dict['failure'].append(0)\n\n for attr in attrs:\n id = int(attr['Id'])\n value_key = 'smart_{}_normalized'.format(id)\n raw_key = 'smart_{}_raw'.format(id)\n value = attr['Value']\n raw = attr['RawValue'].split(' (')[0]\n if id not in id_list:\n id_list.append(id)\n smart_dict[value_key] = []\n smart_dict[raw_key] = []\n\n smart_dict[value_key].append(int(value))\n smart_dict[raw_key].append(int(raw))\n\n # print(smart_dict)\n return pd.DataFrame(smart_dict)\n\n\ndef query_and_parse_data1():\n db = get_mongodb()\n smart_col = db[smart_collection]\n smart_dataset = smart_col.find()\n smart_dict = {\n 'date': [],\n 'serial_number': [],\n 'model': [],\n 'capacity_bytes': [],\n 'failure': []\n }\n # all attr ids\n id_list = [1,3,4,5,7,8,9,10,11,12,13,100,103,170,171,172,173,174,177,184,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,210,211,212,220,231,232,235,237,241,242]\n for id in id_list:\n value_key = 'smart_{}_normalized'.format(id)\n raw_key = 'smart_{}_raw'.format(id)\n smart_dict[value_key] = []\n smart_dict[raw_key] = []\n\n for data in smart_dataset:\n date = data['date'].strftime(\"%Y/%m/%d\")\n # print(date)\n sn = data['disk_serial_number']\n model = data['disk_model']\n attrs = data['attrs']\n smart_dict['date'].append(date)\n smart_dict['serial_number'].append(sn)\n smart_dict['model'].append(model)\n smart_dict['capacity_bytes'].append(0)\n smart_dict['failure'].append(0)\n\n temp_id_list = copy.deepcopy(id_list)\n\n for attr in attrs:\n id = int(attr['Id'])\n if id in temp_id_list:\n temp_id_list.remove(id)\n value_key = 'smart_{}_normalized'.format(id)\n raw_key = 'smart_{}_raw'.format(id)\n value = attr['Value']\n raw = attr['RawValue'].split(' (')[0]\n smart_dict[value_key].append(int(value))\n smart_dict[raw_key].append(int(raw))\n\n for id in temp_id_list:\n value_key = 'smart_{}_normalized'.format(id)\n raw_key = 'smart_{}_raw'.format(id)\n smart_dict[value_key].append(0)\n smart_dict[raw_key].append(0)\n\n # print(smart_dict)\n return pd.DataFrame(smart_dict)\n\n\nif __name__ == '__main__':\n # df = query_and_parse_data()\n # print(df)\n\n query_and_mock_data()","sub_path":"parse_from_mongo.py","file_name":"parse_from_mongo.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"152486520","text":"# Returns true if the passed string cannot be parsed into an Integer\ndef IsNotInteger(String):\n try:\n int(String)\n return False\n\n except ValueError:\n return True\n\n\n# Get a valid integer from input\ndef InputInteger(Prompt):\n # Get the initial input\n Input = input(Prompt)\n\n # Ask for a valid integer until it is given\n while(IsNotInteger(Input)):\n Input = input(\"Invalid Input. %s\" % (Prompt))\n\n # Return the integer from input\n return int(Input)\n\n\n# Get an integer for the size of the box\nSize = InputInteger(\"Please input a valid integer for the size of the box: \")\n\n# Loop through each line and form it\nfor Y in range(0, Size):\n # The output for this line\n LineOutput = \"\"\n\n # Loop through each character in this line, if it is an edge character;\n # add an o character, if it is not; add a space\n for X in range(0, Size):\n # If it is an edge character\n if X == 0 or Y == 0 or X == Size - 1 or Y == Size - 1:\n Box += \"o\"\n else:\n Box += \" \"\n\n # Print this line\n print(LineOutput)","sub_path":"159.171/Tutorial 5/Question 2.py","file_name":"Question 2.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"331634140","text":"import sys\nimport datetime\nimport os\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport webbrowser\nfrom threading import Thread\nimport dash_bootstrap_components as dbc\nimport dash_auth\nfrom . import _verifications as ver\nimport flask\nimport pandas as pd\nimport plotly\nimport json\nimport inspect\nfrom . import user_functions as user\nfrom io import BytesIO\n# TODO save THEME parameter for use in chart generation\n\n# TODO make a redirect function to the download route\n# TODO criar estrutura unica de acesso à session store\n\n\n# IDEA fazer funcao de add_download que adiciona uma rota nova --- NAAAAAAO risco de acumular-se rotas, teria de passar os parametros tbm\n\nclass Application:\n\n def __init__(self, host='127.0.0.1:8050', assets_folder=os.path.join(os.getcwd(), '/assets'), auth=None,\n title='My app',\n page_div_id='main_div', url_id='url', export_file_path=os.path.join(os.getcwd(), '/export'),\n theme=dbc.themes.SLATE, id_main_alert='main_alert', session_store_id='session', user_class=None,\n tempo_refresh_user=600, default_page='/', tipo_server='host', server=None, base_layout=None):\n \"\"\"\n creates an Application, based on dash and a couple of quality of life imporvements, paired with Page class\n :param host: ip and port to host app\n :type host: str\n :param assets_folder: path of folder with main assets\n :type assets_folder: str\n :param auth: pairs of usernames and password\n :type auth: dict\n :param title: title for the app\n :type title: str\n :param basic_layout: basic layout for the app\n :type basic_layout: dash component\n :param page_div_id: id of the main page container\n :type page_div_id: str\n :param url_id: id of the url component\n :type url_id: str\n :param export_file_path: path of folder with exportable files\n :type export_file_path: str\n :param theme: theme for bootstrap\n :type theme: str\n \"\"\"\n BASIC_LAYOUT = html.Div(\n [html.Div([html.Div([dcc.Link(html.Img(src=os.path.split(assets_folder)[1] + '/logo.png'), href='/'),\n dcc.Store(id='session', storage_type='session'),\n html.Button(html.Img(src=os.path.split(assets_folder)[1] + '/menu_icon.png'),\n className='navbar-toggler', id='toggle_sidebar',\n style={'display': 'inline-block', 'text-align': 'center',\n 'margin-bottom': '25px'})],\n style={'backgroundColor': 'black', 'width': '-webkit-fill-available'}),\n dbc.Alert(id='main_alert', is_open=False, fade=True, duration=10000, dismissable=True,\n color=\"warning\"), dcc.Location(id='url', refresh=False), html.Div(id='main_div'),],\n style={'display': 'inline-block', 'height': '100%', 'width': '-webkit-fill-available',\n 'vertical-align': 'top'})],\n style={'height': '100%', 'vertical-align': 'top', 'width': '-webkit-fill-available'})\n if base_layout is None:\n base_layout = BASIC_LAYOUT\n self.tempo_refresh_user = tempo_refresh_user\n self.id_main_alert = id_main_alert\n if tipo_server == 'host':\n self.app = dash.Dash(__name__, assets_folder=assets_folder, external_stylesheets=[theme, 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css'],\n suppress_callback_exceptions=True)\n else:\n self.app = dash.Dash(__name__, server=server, assets_folder=assets_folder, external_stylesheets=[theme,\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css'],\n suppress_callback_exceptions=True)\n self.app.title = title\n self.basic_layout = base_layout\n self.app.layout = self.basic_layout\n self.addr = host\n self.pages = {}\n self.page_div_id = page_div_id\n self.url_id = url_id\n self.id_list = self._get_id_from_children(\n json.loads(json.dumps(self.basic_layout, cls=plotly.utils.PlotlyJSONEncoder)))\n if page_div_id not in self.id_list:\n raise(Exception(\"Atributo page_div_id deve ser uma id contida em base_layout\"))\n if url_id not in self.id_list:\n raise(Exception(\"Atributo url_id deve ser uma id contida em base_layout\"))\n if id_main_alert not in self.id_list:\n raise(Exception(\"Atributo id_main_alert deve ser uma id contida em base_layout\"))\n if session_store_id not in self.id_list:\n raise(Exception(\"Atributo id_main_alert deve ser uma id contida em base_layout\"))\n self.alert_funcs = []\n self.download_funcs = []\n self.session_store_id = session_store_id\n self.user_class = user_class\n server = self.app.server\n self.export_file_path = export_file_path\n @server.route('/download/')\n def download_from_directory(path):\n out = BytesIO()\n try:\n out.write(open(self.export_file_path + '/' + path, 'rb').read())\n out.seek(0)\n os.remove(self.export_file_path + '/' + path)\n finally:\n out.seek(0)\n return flask.send_file(out, as_attachment=True, cache_timeout=0, attachment_filename=path.split('/')[-1])\n\n if auth:\n basicauth = dash_auth.BasicAuth(\n self.app,\n auth\n )\n\n def start(self):\n \"\"\"\n starts the server for the app\n :return:\n :rtype:\n \"\"\"\n self.app.run_server(host=self.addr.split(':')[0], port=os.getenv(\"PORT\", int(self.addr.split(':')[1])))\n\n def open(self):\n \"\"\"\n opens the server host ip address\n :return:\n :rtype:\n \"\"\"\n webbrowser.open('http://' + self.addr)\n\n def start_and_open(self):\n \"\"\"\n start and open app server\n :return:\n :rtype:\n \"\"\"\n t1 = Thread(target=self.start)\n # print(\"##########\")\n\n t2 = Thread(target=self.open)\n t1.start()\n\n t2.start()\n\n def add_page(self, page):\n \"\"\"\n appends a page to the app, easing page management\n :param page: Page object, created through app_page.Page()\n :type page: app_page.Page\n :return:\n :rtype:\n \"\"\"\n self._checa_validez_ids(page)\n self._checa_validez_link(page)\n self.pages[page.link] = {'layout': page.layout,\n 'name': page.name,\n 'section': page.section,\n 'permissoes_suficientes': page.permissoes_suficientes,\n 'icon_class': page.icon_class}\n self.id_list += page._get_id_list()\n\n def set_page_callback(self):\n \"\"\"\n Sets the callback for page management\n :return:\n :rtype:\n \"\"\"\n self._update_layout_for_sidebar()\n\n @self.app.callback([dash.dependencies.Output(self.page_div_id, 'children'),\n dash.dependencies.Output(self.session_store_id, 'data'),\n dash.dependencies.Output('main_sidebar', 'children')],\n [dash.dependencies.Input(self.url_id, 'pathname')],\n [dash.dependencies.State(self.session_store_id, 'data'),\n dash.dependencies.State(self.session_store_id, 'modified_timestamp')])\n def redireciona(path, data, ts):\n print(data, ts)\n if data is None:\n perm, uid = self._get_id_perm()\n data = {'user': uid,\n 'permissoes': perm}\n else:\n print((datetime.datetime.today() - datetime.datetime.fromtimestamp(ts / 1000)).seconds)\n if (datetime.datetime.today() - datetime.datetime.fromtimestamp(\n ts / 1000)).seconds > self.tempo_refresh_user:\n perm, uid = self._get_id_perm()\n data = {'user': uid,\n 'permissoes': perm}\n else:\n data = data\n sidebar_children = dbc.Nav(\n self._auto_generate_sidebar_items(data),\n vertical=True,\n pills=True,\n\n )\n\n if path in self.pages.keys():\n return (*self._get_page_layout(path, data), sidebar_children)\n\n else:\n return (*self._get_page_layout('/', data), sidebar_children)\n\n def add_download_callback(self, func, inp, states):\n if isinstance(inp, list) and len(inp) != 1:\n raise Exception(\"para usar downloads globais apenas um input deve ser passado\")\n ver.checa_compatibilidade(func, [], inp, states)\n self.download_funcs.append({'input': inp,\n 'states': states,\n 'function': func\n })\n\n def set_download_callbacks(self):\n\n list_inputs = [y for x in self.download_funcs for y in x['input']]\n list_states = [y for x in self.download_funcs for y in x['states']]\n # TODO encontrar maneira de fazer redirecionamento para rota via server - equivalente à webbrowser.open() - talvez clientside_callback + session stored filename?\n # @self.app.clientside_callback(dash.dependencies.Output(self.url_id, 'pathname'),\n # [dash.dependencies.Input(i[0], i[1]) for i in list_inputs],\n # [dash.dependencies.State(i[0], i[1]) for i in list_states] + [dash.dependencies.State(self.url_id, 'pathname')], prevent_initial_call=True)\n # def download(*args):\n # ctx = dash.callback_context\n # list_inputs = [y for x in self.download_funcs for y in x['input']]\n # list_states = [y for x in self.download_funcs for y in x['states']]\n # args_completo = [i[0] + '.' + i[1] for i in list_inputs + list_states]\n # button_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n # triggered = [i for i in self.download_funcs if i['input'][0][0] == button_id]\n # print(args[-1])\n # if len(triggered) > 0 and ctx.triggered[0].get('value') is not None:\n # triggered = triggered[0]\n # inputs_triggered = triggered['input'][0][0] + '.' + triggered['input'][0][1]\n # states_triggered = [i[0] + '.' + i[1] for i in triggered['states']]\n # args_triggered = [inputs_triggered] + states_triggered\n # args_trig = [args[args_completo.index(a)] for a in args_triggered]\n # # flask.redirect('/download/' + triggered['function'](*args_trig))\n #\n # return args[-1]\n # return args[-1]\n\n def _get_page_layout(self, path, data):\n perm = self.pages[path]['permissoes_suficientes']\n status = self._checa_validez_permissao(perm, data)\n if status:\n return self.pages[path]['layout'], data\n else:\n print(path)\n return self._get_page_layout('/', data)\n\n def _checa_validez_permissao(self, perm, data):\n if inspect.isclass(self.user_class) and issubclass(self.user_class, user.User):\n perm_user = set(data['permissoes'])\n if perm is not None:\n # print(perm)\n if isinstance(perm, list):\n for permission_group in perm:\n if isinstance(permission_group, str):\n if {permission_group}.issubset(perm_user):\n # print('usuario permitido!')\n return True\n else:\n # print('usuario sem permissao!')\n return False\n elif isinstance(permission_group, list):\n if set(permission_group).issubset(perm_user):\n # print('usuario permitido!')\n return True\n else:\n # print('usuario sem permissao!')\n return False\n else:\n print('pagina mal configurada! permissoes tem que ser lista de permissoes ou string'\n 'com permissao unica')\n return False\n\n elif isinstance(perm, str):\n if {perm}.issubset(perm_user):\n # print('usuario permitido str!')\n return True\n else:\n # print('usuario sem permissao!')\n return False\n else:\n print('pagina mal configurada! permissoes tem que ser lista de permissoes ou string'\n 'com permissao unica')\n return False\n else:\n # print('page n requer permissao!')\n return True\n else:\n # print('app nao usa classe user')\n return True\n\n def _get_id_perm(self):\n if inspect.isclass(self.user_class) and issubclass(self.user_class, user.User):\n ip = flask.request.remote_addr\n usr = self.user_class()\n usr.get_user_from_ip(ip)\n uid, perm = usr.codigo_unico, usr.permissoes\n # print(uid, perm)\n else:\n uid, perm = None, None\n return perm, uid\n\n def _auto_generate_sidebar_items(self, data):\n \"\"\"\n Generate sidebar items based on pages appended to the app\n :return:\n :rtype:\n \"\"\"\n sidebar_items = [html.Button('X', id='toggle_sidebar_close',\n style={'backgroundColor': '#202020', 'text-align': 'right', 'color': 'white'}),\n html.Br()]\n df = pd.DataFrame([(i, self.pages[i]['name'], self.pages[i]['section'], self.pages[i]['icon_class'], self.pages[i]['permissoes_suficientes']) for i in self.pages.keys()],\n columns=['link', 'name', 'section', 'icon_class', 'permissoes_suficientes'])\n df['PERMITIDO'] = df.apply(lambda row: self._checa_validez_permissao(row.permissoes_suficientes, data), axis=1)\n df = df.loc[df.PERMITIDO]\n # print('')\n\n for section in df.section.drop_duplicates():\n pages = df.loc[df.section == section]\n details_section = [html.Details([\n html.Summary(html.Strong(section), style={'width': '90%'}),\n html.Div([\n dbc.NavLink([html.I(className=pag['icon_class'], style={'margin-right': '0.4rem', 'font-size': '0.6rem'}), pag['name']], href=pag['link'],\n style={'backgroundColor': '#202020', 'padding': '0px', 'background-image': 'none',\n 'border': 0, 'margin-left': '1rem'}) for i, pag in pages.iterrows()\n ] + [html.Br()]),\n ]),\n ]\n sidebar_items += details_section\n # print(sidebar_items)\n return sidebar_items\n\n @staticmethod\n def _get_sidebar():\n \"\"\"\n generate sidebar for the app, based on pages appended to the app\n :return:\n :rtype:\n \"\"\"\n return dbc.Collapse(\n\n id='main_sidebar',\n is_open=True,\n className='mat-elevation-z0 mat-card col-md-2',\n style={'backgroundColor': '#202020', 'height': '-webkit-fill-available', 'margin': 0, 'position': 'fixed',\n 'zIndex': '999999', 'padding': '0px'}\n\n )\n\n def _update_layout_for_sidebar(self):\n \"\"\"\n adds the sidebar to the original layout\n :return:\n :rtype:\n \"\"\"\n self.app.layout.children = [self._get_sidebar()] + self.app.layout.children\n\n @self.app.callback(dash.dependencies.Output('main_sidebar', 'is_open'),\n [dash.dependencies.Input('toggle_sidebar', 'n_clicks'),\n dash.dependencies.Input('toggle_sidebar_close', 'n_clicks')],\n [dash.dependencies.State('main_sidebar', 'is_open')])\n def toggle_main_sidebar(n1, n2, is_open):\n if n1 or n2:\n return not is_open\n\n def _get_id_list(self):\n return self.id_list\n\n def _get_id_from_children(self, dicio):\n list_ids = []\n if isinstance(dicio, dict):\n if 'props' in dicio.keys():\n if isinstance(dicio['props'], dict):\n if 'id' in dicio['props'].keys():\n list_ids.append(dicio['props']['id'])\n if 'children' in dicio['props'].keys():\n if dicio['props']['children'] is not None:\n for child in list(dicio['props']['children']):\n list_ids += self._get_id_from_children(child)\n elif isinstance(dicio, list) and len(dicio) > 0:\n for dic in dicio:\n list_ids += self._get_id_from_children(dic)\n return list_ids\n\n def _checa_validez_ids(self, page):\n ids_page = page._get_id_list()\n if len(set(ids_page).intersection(set(self.id_list))) > 0:\n raise Exception(\n \"Encontrei ids na nova pagina {}/{} que ja estao sendo utilizadas por outras paginas: {}\".format(\n page.section, page.name, list(set(ids_page).intersection(set(self.id_list)))))\n\n def _checa_validez_link(self, page):\n link_page = page.link\n if link_page in self.pages.keys():\n raise Exception(\n \"O link da nova pagina {}/{} já está sendo utilizado pela pagina {}/{}\".format(\n page.section, page.name, self.pages[link_page]['section'], self.pages[link_page]['name']))\n\n def add_alert_callback(self, func, inp, states, color=\"warning\"):\n if isinstance(inp, list) and len(inp) != 1:\n raise Exception(\"para usar alertas globais apenas um input deve ser passado\")\n ver.checa_compatibilidade(func, [], inp, states)\n self.alert_funcs.append({'input': inp,\n 'states': states,\n 'function': func,\n 'color': color\n })\n\n def set_alert_callback(self):\n\n list_inputs = [y for x in self.alert_funcs for y in x['input']]\n list_states = [y for x in self.alert_funcs for y in x['states']]\n\n @self.app.callback([dash.dependencies.Output(self.id_main_alert, 'children'),\n dash.dependencies.Output(self.id_main_alert, 'is_open'),\n dash.dependencies.Output(self.id_main_alert, 'color')],\n [dash.dependencies.Input(i[0], i[1]) for i in list_inputs],\n [dash.dependencies.State(i[0], i[1]) for i in list_states])\n def alerta(*args):\n # print(args)\n ctx = dash.callback_context\n list_inputs = [y for x in self.alert_funcs for y in x['input']]\n list_states = [y for x in self.alert_funcs for y in x['states']]\n args_completo = [i[0] + '.' + i[1] for i in list_inputs + list_states]\n button_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n triggered = [i for i in self.alert_funcs if i['input'][0][0] == button_id]\n if len(triggered) > 0:\n triggered = triggered[0]\n inputs_triggered = triggered['input'][0][0] + '.' + triggered['input'][0][1]\n states_triggered = [i[0] + '.' + i[1] for i in triggered['states']]\n args_triggered = [inputs_triggered] + states_triggered\n args_trig = [args[args_completo.index(a)] for a in args_triggered]\n return triggered['function'](*args_trig), True, triggered['color']\n # return button_id, 'True', 'warning'\n\n","sub_path":"dashboard_lib/app_initialization.py","file_name":"app_initialization.py","file_ext":"py","file_size_in_byte":20618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"469694905","text":"import numpy as np\nimport networkx as nx\n\nfrom matplotlib import pyplot as plt\nimport matplotlib\n\nfrom skimage.transform import resize\n\ndef downsample(array, amount):\n return resize(array,\n (int(array.shape[0] / amount),\n int(array.shape[1] / amount)))\n\ndef plot_graph(graph, ax=None, cmap='Spectral', labels=False, font_size=12, clusters=None, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n graph : object\n A networkX or derived graph object\n\n ax : objext\n A MatPlotLib axes object\n\n cmap : str\n A MatPlotLib color map string. Default 'Spectral'\n\n Returns\n -------\n ax : object\n A MatPlotLib axes object. Either the argument passed in\n or a new object\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n cmap = matplotlib.cm.get_cmap(cmap)\n\n # Setup edge color based on the health metric\n colors = []\n for s, d, e in graph.edges_iter(data=True):\n if hasattr(e, 'health'):\n colors.append(cmap(e.health)[0])\n else:\n colors.append(cmap(0)[0])\n\n pos = nx.spring_layout(graph)\n nx.draw_networkx_nodes(graph, pos, ax=ax)\n nx.draw_networkx_edges(graph, pos, ax=ax)\n if labels:\n labels = dict((i, d['image_name']) for i, d in graph.nodes_iter(data=True))\n nx.draw_networkx_labels(graph, pos, labels, font_size=font_size, ax=ax)\n ax.axis('off')\n return ax\n\n\ndef plot_node(node, ax=None, clean_keys=[], index_mask=None, downsampling=1, **kwargs):\n \"\"\"\n Plot the array and keypoints for a given node.\n\n Parameters\n ----------\n node : object\n A Node object from which data is extracted\n\n ax : object\n A MatPlotLIb axes object\n\n clean_keys : list\n of strings of masking array names to apply\n\n kwargs : dict\n of MatPlotLib plotting options\n\n Returns\n -------\n ax : object\n A MatPlotLib axes object. Either the argument passed in\n or a new object\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n band = 1\n if 'band' in kwargs.keys():\n band = kwargs['band']\n kwargs.pop('band', None)\n\n array = node.get_array(band)\n\n if isinstance(downsampling, bool):\n downsampling = node['downsample_amount']\n\n array = downsample(array, downsampling)\n\n ax.set_title(node['image_name'])\n ax.margins(tight=True)\n ax.axis('off')\n\n if 'cmap' in kwargs:\n cmap = kwargs['cmap']\n else:\n cmap = 'Greys'\n\n ax.imshow(array, cmap=cmap)\n\n keypoints = node.get_keypoints(index=index_mask)\n\n if not keypoints.empty:\n marker = '.'\n if 'marker' in kwargs.keys():\n marker = kwargs['marker']\n kwargs.pop('marker', None)\n color = 'r'\n if 'color' in kwargs.keys():\n color = kwargs['color']\n kwargs.pop('color', None)\n ax.scatter(keypoints['x'], keypoints['y'], marker=marker, color=color, **kwargs)\n\n return ax\n\n\ndef plot_edge_decomposition(edge, ax=None, clean_keys=[], image_space=100,\n scatter_kwargs={}, line_kwargs={}, image_kwargs={}):\n\n if ax is None:\n ax = plt.gca()\n\n # Plot setup\n ax.set_title('Matching: {} to {}'.format(edge.source['image_name'],\n edge.destination['image_name']))\n ax.margins(tight=True)\n ax.axis('off')\n\n # Image plotting\n source_array = edge.source.get_array()\n destination_array = edge.destination.get_array()\n\n s_shape = source_array.shape\n d_shape = destination_array.shape\n\n y = max(s_shape[0], d_shape[0])\n x = s_shape[1] + d_shape[1] + image_space\n composite = np.zeros((y, x))\n composite_decomp = np.zeros((y, x), dtype=np.int16)\n\n composite[0: s_shape[0], :s_shape[1]] = source_array\n composite[0: d_shape[0], s_shape[1] + image_space:] = destination_array\n\n composite_decomp[0: s_shape[0], :s_shape[1]] = edge.smembership\n composite_decomp[0: d_shape[0], s_shape[1] + image_space:] = edge.dmembership\n\n if 'cmap' in image_kwargs:\n cmap = image_kwargs['cmap']\n else:\n cmap = 'Greys'\n\n matches, mask = edge.clean(clean_keys)\n\n source_keypoints = edge.source.get_keypoints(index=matches['source_idx'])\n destination_keypoints = edge.destination.get_keypoints(index=matches['destination_idx'])\n\n # Plot the source\n source_idx = matches['source_idx'].values\n s_kps = source_keypoints.loc[source_idx]\n ax.scatter(s_kps['x'], s_kps['y'], **scatter_kwargs, cmap='gray')\n\n # Plot the destination\n destination_idx = matches['destination_idx'].values\n d_kps = destination_keypoints.loc[destination_idx]\n x_offset = s_shape[1] + image_space\n newx = d_kps['x'] + x_offset\n ax.scatter(newx, d_kps['y'], **scatter_kwargs)\n\n ax.imshow(composite, cmap=cmap)\n ax.imshow(composite_decomp, cmap='spectral', alpha=0.35)\n # Draw the connecting lines\n color = 'y'\n if 'color' in line_kwargs.keys():\n color = line_kwargs['color']\n line_kwargs.pop('color', None)\n\n s_kps = s_kps[['x', 'y']].values\n d_kps = d_kps[['x', 'y']].values\n d_kps[:, 0] += x_offset\n\n for l in zip(s_kps, d_kps):\n ax.plot((l[0][0], l[1][0]), (l[0][1], l[1][1]), color=color, **line_kwargs)\n\n return ax\n\ndef plot_edge(edge, ax=None, clean_keys=[], image_space=100, downsampling=1,\n scatter_kwargs={}, line_kwargs={}, image_kwargs={}):\n \"\"\"\n Plot the correspondences for a given edge\n\n Parameters\n ----------\n edge : object\n A graph edge object\n\n ax : object\n A MatPlotLIb axes object\n\n clean_keys : list\n of strings of masking array names to apply\n\n image_space : int\n The number of pixels to insert between the images\n\n downsample : bool\n\n scatter_kwargs : dict\n of MatPlotLib arguments to be applied to the scatter plots\n\n line_kwargs : dict\n of MatPlotLib arguments to be applied to the lines connecting matches\n\n image_kwargs : dict\n of MatPlotLib arguments to be applied to the image rendering\n\n Returns\n -------\n ax : object\n A MatPlotLib axes object. Either the argument passed in\n or a new object\n \"\"\"\n\n if ax is None:\n ax = plt.gca()\n\n # Plot setup\n ax.set_title('Matching: {} to {}'.format(edge.source['image_name'],\n edge.destination['image_name']))\n ax.margins(tight=True)\n ax.axis('off')\n\n # Image plotting\n if isinstance(downsampling, bool):\n downsample_source = edge.source['downsample_amount']\n else:\n downsample_source = downsampling\n source_array = edge.source.get_array()\n source_array = downsample(source_array, downsample_source)\n\n if isinstance(downsampling, bool):\n downsample_destin = edge.destination['downsample_amount']\n else:\n downsample_destin = downsampling\n destination_array = edge.destination.get_array()\n destination_array = downsample(destination_array, downsample_destin)\n\n s_shape = source_array.shape\n d_shape = destination_array.shape\n\n y = max(s_shape[0], d_shape[0])\n x = s_shape[1] + d_shape[1] + image_space\n composite = np.zeros((y, x))\n\n composite[0: s_shape[0], :s_shape[1]] = source_array\n composite[0: d_shape[0], s_shape[1] + image_space:] = destination_array\n\n if 'cmap' in image_kwargs:\n image_cmap = image_kwargs['cmap']\n else:\n image_cmap = 'Greys'\n\n matches, mask = edge.clean(clean_keys)\n\n if not matches.empty:\n source_keypoints = edge.source.get_keypoints(index=matches['source_idx'])\n destination_keypoints = edge.destination.get_keypoints(index=matches['destination_idx'])\n\n # Plot the source\n source_idx = matches['source_idx'].values\n s_kps = source_keypoints.loc[source_idx]\n ax.scatter(s_kps['x'], s_kps['y'], **scatter_kwargs)\n\n # Plot the destination\n destination_idx = matches['destination_idx'].values\n d_kps = destination_keypoints.loc[destination_idx]\n x_offset = s_shape[1] + image_space\n newx = d_kps['x'] + x_offset\n ax.scatter(newx, d_kps['y'], **scatter_kwargs)\n\n # Draw the connecting lines\n color = 'y'\n if 'color' in line_kwargs.keys():\n color = line_kwargs['color']\n line_kwargs.pop('color', None)\n\n s_kps = s_kps[['x', 'y']].values\n d_kps = d_kps[['x', 'y']].values\n d_kps[:, 0] += x_offset\n\n for l in zip(s_kps, d_kps):\n ax.plot((l[0][0], l[1][0]), (l[0][1], l[1][1]), color=color, **line_kwargs)\n\n ax.imshow(composite, cmap=image_cmap)\n\n return ax\n\ndef cluster_plot(graph, ax=None, cmap='Spectral'): # pragma: no cover\n \"\"\"\n Parameters\n ----------\n graph : object\n A networkX or derived graph object\n\n ax : object\n A MatPlotLib axes object\n\n cmap : str\n A MatPlotLib color map string. Default 'Spectral'\n\n Returns\n -------\n ax : object\n A MatPlotLib axes object that was either passed in\n or a new axes object\n \"\"\"\n if ax is None:\n ax = plt.gca()\n\n if not hasattr(graph, 'clusters'):\n raise AttributeError('Clusters have not been computed.')\n\n cmap = matplotlib.cm.get_cmap(cmap)\n\n colors = []\n\n for i, n in graph.nodes_iter(data=True):\n for j in enumerate(graph.clusters):\n if i in graph.clusters.get(j[1]):\n colors.append(cmap(j[1])[0])\n continue\n\n nx.draw(graph, ax=ax, node_color=colors)\n return ax\n\ndef plot_matches(matches, a, b, extent=200):\n nsubplots = len(matches)\n figs = []\n for i, (idx, row) in enumerate(matches.iterrows()):\n fig, axes = plt.subplots(1, 2)\n sx = row.source_x\n sy = row.source_y\n dx = row.destination_x\n dy = row.destination_y\n\n suba = a.read_array(pixels=[int(sx-extent), int(sy-extent), int(2*extent), int(2*extent)])\n subb = b.read_array(pixels=[int(dx-extent), int(dy-extent), int(2*extent), int(2*extent)])\n\n axes[0].imshow(suba, cmap='Greys')\n axes[1].imshow(subb, cmap='Greys')\n figs.append(fig)\n\n return figs\n","sub_path":"autocnet/vis/graph_view.py","file_name":"graph_view.py","file_ext":"py","file_size_in_byte":10320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"540275789","text":"#!/usr/bin/env python\n# Author : Esraa Magdy\n\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import PoseStamped, Twist, Vector3, Point ,PoseArray , TransformStamped, PointStamped\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport rospkg\nfrom ford_msgs.msg import Clusters\nfrom spencer_tracking_msgs.msg import DetectedPersons\nfrom tf import TransformListener \nimport tf2_geometry_msgs\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom std_msgs.msg import Float32, ColorRGBA, Int32\nclass AgentsStates:\n def __init__(self):\n self.node_name = rospy.get_name()\n self.tf_listener = TransformListener()\n # Re-publishing Robot's states in the format needed for the network\n self.odom_sub = rospy.Subscriber('/odom', Odometry, self.odom_callback)\n self.pose_pub = rospy.Publisher('/robot_pose', PoseStamped, queue_size=10) \n self.vel_pub = rospy.Publisher('/robot_velocity',Vector3, queue_size=10)\n # Re-publishing Pedestrians states in the format needed for the network\n self.ped_sub = rospy.Subscriber(\n '/spencer/perception_internal/detected_person_association/lasers_upper_body_fused',\n DetectedPersons,self.peds_callback)\n self.clusters_pub = rospy.Publisher('cluster/output/clusters',Clusters,queue_size=1)\n #self.pub_agent_markers = rospy.Publisher('~agent_markers',MarkerArray,queue_size=1)\n #publish goal \n self.goal_pub = rospy.Publisher('move_base_simple/goal',PoseStamped,queue_size=1)\n def odom_callback(self,msg):\n pose = PoseStamped()\n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = \"map\"\n pose.pose.position.x = msg.pose.pose.position.x\n pose.pose.position.y = msg.pose.pose.position.y\n pose.pose.position.z = msg.pose.pose.position.z\n pose.pose.orientation.x = msg.pose.pose.orientation.x\n pose.pose.orientation.y = msg.pose.pose.orientation.y\n pose.pose.orientation.z = msg.pose.pose.orientation.z\n pose.pose.orientation.w = msg.pose.pose.orientation.w \n self.pose_pub.publish(pose)\n \n vel = Vector3()\n vel.x = msg.twist.twist.linear.x\n vel.y = msg.twist.twist.linear.y\n vel.z = 0\n self.vel_pub.publish(vel)\n\n def peds_callback(self,peds_msg):\n peds_cluster = Clusters()\n peds_cluster.header.stamp = rospy.Time.now()\n peds_cluster.header.frame_id = \"map\"\n peds_cluster.labels =[]\n peds_cluster.counts =[]\n peds_cluster.mean_points=[]\n peds_cluster.max_points=[]\n peds_cluster.min_points=[]\n peds_cluster.pointclouds=[]\n peds_cluster.velocities=[]\n peds_cluster.num.data = int(len(peds_msg.detections))\n \n #markers = MarkerArray()\n \n #print(len(peds_msg.detections))\n \n if len(peds_msg.detections) > 0 :\n for i in range(len(peds_msg.detections)):\n \n t = self.tf_listener.getLatestCommonTime(\"/map\", \"/base_footprint\")\n pose_in_baseframe = PointStamped()\n pose_in_baseframe.header.frame_id = \"base_footprint\"\n pose_in_baseframe.header.stamp = rospy.Time.now()\n pose_in_baseframe.point = peds_msg.detections[i].pose.pose.position\n pose_in_map = PointStamped()\n pose_in_map.header.frame_id = \"map\"\n pose_in_map.header.stamp = rospy.Time.now()\n\n\n pose_in_map = self.tf_listener.transformPoint(\"/map\", pose_in_baseframe)\n peds_cluster.mean_points.append(pose_in_map.point)\n ped_vel = Vector3()\n ped_vel.x = 0.4\n ped_vel.y = 0.4\n ped_vel.z = 0.0\n peds_cluster.velocities.append(ped_vel)\n peds_cluster.labels.append(i)\n '''\n marker = Marker()\n marker.header.stamp = rospy.Time.now()\n marker.header.frame_id = 'map'\n marker.ns = 'other_agent'\n marker.id = i\n marker.type = marker.CYLINDER\n marker.action = marker.ADD\n marker.pose.position.x = pose_in_map.point.x\n marker.pose.position.y = pose_in_map.point.y\n # marker.pose.orientation = orientation\n marker.scale = Vector3(x=0.2,y=0.2,z=0.2)\n marker.color = ColorRGBA(r=1.0,g=0.4,a=1.0)\n marker.lifetime = rospy.Duration(0.1)\n markers.markers.append(marker)\n '''\n\n self.clusters_pub.publish(peds_cluster)\n #self.pub_agent_markers.publish(markers)\n\n \n goal = PoseStamped()\n goal.header.stamp = rospy.Time.now()\n goal.header.frame_id = \"map\"\n goal.pose.position.x = 5.0\n goal.pose.position.y = 0.0\n self.goal_pub.publish(goal) \n \n \nif __name__ == '__main__':\n rospy.init_node('agents_states', anonymous=False)\n robot = AgentsStates()\n rospy.spin()","sub_path":"scripts/agents_state.py","file_name":"agents_state.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"566070747","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/xbus/monitor/views/saml2_login.py\n# Compiled at: 2016-06-27 04:20:00\n# Size of source mod 2**32: 7300 bytes\nfrom pyramid.httpexceptions import HTTPForbidden\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.response import Response\nfrom pyramid.security import authenticated_userid\nfrom pyramid.security import forget\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom pyramid.security import remember\nfrom pyramid.view import forbidden_view_config\nfrom xbus.monitor.utils.singleton import Singleton\nfrom xbus.monitor.i18n import req_l10n\ntry:\n import lasso\n lasso_loaded = True\nexcept ImportError:\n lasso = None\n lasso_loaded = False\n\nclass LassoSingle(object):\n __metaclass__ = Singleton\n\n def __init__(self, sp_meta, sp_key, idp_meta):\n \"\"\"\n @param sp_meta: filename of the metadata file for the service provider\n @type sp_meta: string\n\n @param sp_key: filename of the service provider key\n @type sp_key: string\n\n @param idp_meta: filename of the Identity provider metadata.xml file\n @type idp_meta: string\n \"\"\"\n self.reads, errors = self._LassoSingle__read_conf_file(sp_meta, sp_key, idp_meta)\n if errors:\n raise HTTPForbidden(detail='\\n'.join(errors))\n self.sp_meta_xml = self.reads[0]\n self.sp_key = self.reads[1]\n self.idp_meta_xml = self.reads[2]\n self.server = lasso.Server.newFromBuffers(self.sp_meta_xml, self.sp_key)\n self.server.addProviderFromBuffer(lasso.PROVIDER_ROLE_IDP, self.idp_meta_xml)\n\n @staticmethod\n def __read_conf_file(*args):\n res = []\n errs = []\n for arg in args:\n try:\n with open(arg, 'r') as (f):\n res.append(f.read())\n except IOError as e:\n errs.append(str(e))\n\n return (\n res, errs)\n\n def get_login(self):\n \"\"\"create a new login instance for each request.\n \"\"\"\n return lasso.Login(self.server)\n\n\ndef _login_referrer(request, params):\n \"\"\"Extract a \"came_from\" parameter from the specified dictionary or\n just provide an URL to the home page.\n \"\"\"\n return params.get('came_from', request.path)\n\n\ndef forbidden_view(exc, request):\n \"\"\"Redirect unlogged users to the login view; answer with a \"forbidden\"\n message otherwise.\n \"\"\"\n if not request.authenticated_userid:\n return HTTPFound(location=request.route_url('login', _query=(('came_from', request.path),)))\n _ = req_l10n(request)\n return Response(_('You are not allowed'), status='403 Forbidden')\n\n\ndef login_view(request):\n \"\"\"Redirect to either the login page or the previous page.\n Request params:\n * came_from (optional): The page to redirect to when logged in.\n \"\"\"\n login_referrer = _login_referrer(request, request.params)\n if authenticated_userid(request):\n return HTTPFound(location=login_referrer)\n request.session['came_from'] = login_referrer\n sp_meta = request.registry.settings['saml2.sp_meta']\n sp_key = request.registry.settings['saml2.priv_key']\n idp_meta = request.registry.settings['saml2.idp_meta']\n login = LassoSingle(sp_meta, sp_key, idp_meta).get_login()\n login.initAuthnRequest()\n login.request.nameIdPolicy.format = None\n login.request.nameIdPolicy.allowCreate = True\n login.buildAuthnRequestMsg()\n return HTTPFound(location=login.msgUrl)\n\n\ndef login_metadata_view(request):\n with open(request.registry.settings['saml2.sp_meta'], 'r') as (sp_meta_file):\n sp_meta = sp_meta_file.read()\n request.response.content_type = 'text/xml'\n return sp_meta\n\n\ndef login_success_view(request):\n \"\"\"Called when the user has been redirected back to our site from the SAML2\n provider.\n Conclude the handshake, fetch some information (such as the user name,\n security groups...) and redirect to the home page.\n \"\"\"\n _ = req_l10n(request)\n sp_meta = request.registry.settings['saml2.sp_meta']\n sp_key = request.registry.settings['saml2.priv_key']\n idp_meta = request.registry.settings['saml2.idp_meta']\n login = LassoSingle(sp_meta, sp_key, idp_meta).get_login()\n saml_response = request.params.get('SAMLResponse', None)\n if not saml_response:\n raise HTTPForbidden('%s: %s' % (\n _('Login error'), _('No \"SAMLResponse\" parameter')))\n try:\n login.processAuthnResponseMsg(saml_response)\n except (lasso.DsError, lasso.ProfileCannotVerifySignatureError):\n raise HTTPForbidden('%s: %s' % (\n _('Login error'), _('Invalid signature')))\n except lasso.Error as e:\n raise HTTPForbidden('%s: %s' % (_('Login error'), str(e)))\n\n try:\n login.acceptSso()\n except lasso.Error as e:\n raise HTTPForbidden('%s: %s' % (_('Login error'), str(e)))\n\n attributes = {}\n for att_statement in login.assertion.attributeStatement:\n for at in att_statement.attribute:\n values = attributes.setdefault(at.name, [])\n for value in at.attributeValue:\n content = [v.exportToXml() for v in value.any]\n content = ''.join(content)\n values.append(content)\n\n roles = attributes.get('role')\n if not roles:\n raise HTTPForbidden('%s: %s' % (\n _('Login error'), _('The authentic login is not in a group')))\n request.session['authentic_roles'] = roles\n login_referrer = _login_referrer(request, request.session)\n headers = remember(request, login.assertion.subject.nameId.content)\n return HTTPFound(location=login_referrer, headers=headers)\n\n\ndef logout_view(request):\n \"\"\"Just empty the session and let the client handle this.\"\"\"\n request.session.clear()\n request.response.headerlist.extend(forget(request))\n return {'auth_kind': request.registry.settings.auth_kind}\n\n\ndef setup(config):\n \"\"\"Setup SAML2 auth - to be called when the app starts.\"\"\"\n if not lasso_loaded:\n raise Exception('SAML2 enabled in settings but python-lasso could not be loaded.\\nDownload Lasso from .')\n config.add_route('login', '/login')\n config.add_route('login_metadata', '/login_metadata')\n config.add_route('login_success', '/login_success')\n config.add_route('logout', '/logout')\n\n def add_view(view, **kwargs):\n config.add_view(view, permission=NO_PERMISSION_REQUIRED, http_cache=0, **kwargs)\n\n add_view(login_view, route_name='login')\n add_view(login_metadata_view, route_name='login_metadata', renderer='string')\n add_view(login_success_view, route_name='login_success')\n add_view(logout_view, route_name='logout', renderer='json')\n forbidden_view_config()(forbidden_view)","sub_path":"pycfiles/xbus.monitor-0.2.1-py3.4/saml2_login.cpython-34.py","file_name":"saml2_login.cpython-34.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"248748526","text":"# An ECB / CBC Detection Oracle\nimport os \nfrom random import randrange\nimport ten \n\ndef randomKEY():\n return os.urandom(16)\n\ndef encryption_oracle(msg, mode, key=randomKEY()):\n MSG = os.urandom(randrange(5,11)) + msg + os.urandom(randrange(5,11))\n if mode==\"CBC\":\n res = ten.encrypt_aes_128_cbc(MSG, os.urandom(16), key)\n else:\n import nine\n MSG = nine.PKCS(MSG,16)\n res = ten.encrypt_aes_128_ebc(MSG, key)\n return res\n\ndef test_ecb_128(ctxt): \n num_blocks = len(ctxt)//16\n return len(set([ctxt[i*16:(i+1)*16] for i in range(num_blocks)])) < num_blocks\n\nif randrange(0,2):\n cipher = encryption_oracle(b\"A\"*50, mode=\"ECB\")\nelse:\n cipher = encryption_oracle(b\"A\"*50, mode=\"CBC\") \nprint(['ECB' if test_ecb_128(cipher) else 'CBC'])\n\n\n\n\n","sub_path":"cryptopals/set2/ele.py","file_name":"ele.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"330501034","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom manager.models import Episode\n\nclass Command(BaseCommand):\n help = 'Process .mp3 and create a segment'\n\n def add_arguments(self, parser):\n parser.add_argument('--episode-id', nargs = '+')\n parser.add_argument('-r', '--reset', action='store_true', default=False)\n\n def handle(self, *args, **options):\n all_episodes_ids = Episode.objects.values_list('id', flat = True)\n episode_ids = options['episode_id'] or all_episodes_ids\n\n for pk in episode_ids:\n try:\n episode = Episode.objects.get(pk = pk)\n except Episode.DoesNotExist:\n raise CommandError('Episode \"{}\" does not exist'.format(pk))\n\n episode.analyze(reset = options['reset'])\n","sub_path":"manager/management/commands/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"322178580","text":"\"\"\"Deploy any trained TF model for inference.\n\nThis script optimizes a trained model by quantizing the weights and converts trained model\nto servable TF Serving Model.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom ml_service.utils.converter import load_graph_from_pb\n\n# TF Libraries to export model into .pb file\nfrom tensorflow.python.client import session\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.tools.graph_transforms import TransformGraph\n\n\ndef _main_():\n # #################\n # Setup export path\n ###################\n # @TODO: create Argument Parse\n base_dir = './ml_service/object_detection'\n model_filename = './ml_service/object_detection/faster_rcnn_inception_resnet_v2_atrous_coco/frozen_inference_graph.pb'\n\n version = 1\n model_name = 'faster_rcnn_inception_resnet_v2_atrous_coco'\n output_dir = os.path.join(base_dir, model_name)\n export_path = os.path.join(output_dir, str(version))\n\n # ######################\n # Interference Pipeline\n # ######################\n input_names = 'image_tensor'\n output_names = ['detection_boxes', 'detection_classes', 'detection_scores', 'num_detections']\n\n with tf.Session() as sess:\n input_tensor = tf.placeholder(dtype=tf.uint8, shape=(None, None, None, 3), name=input_names)\n\n # ###################\n # load frozen graph\n # ###################\n graph_def = load_graph_from_pb(model_filename)\n outputs = tf.import_graph_def(\n graph_def,\n input_map={'image_tensor': input_tensor},\n return_elements=output_names,\n name='')\n outputs = [sess.graph.get_tensor_by_name(ops.name + ':0')for ops in outputs]\n outputs = dict(zip(output_names, outputs))\n\n # #####################\n # Quantize Frozen Model\n # #####################\n transforms = [\"add_default_attributes\",\n \"quantize_weights\", \"round_weights\",\n \"fold_batch_norms\", \"fold_old_batch_norms\"]\n\n quantized_graph = TransformGraph(\n input_graph_def=graph_def,\n inputs=input_names,\n outputs=output_names,\n transforms=transforms)\n\n # #####################\n # Export to TF Serving#\n # #####################\n # Reference: https://github.com/tensorflow/models/tree/master/research/object_detection\n\n with tf.Graph().as_default():\n tf.import_graph_def(quantized_graph, name='')\n\n # Optimizing graph\n rewrite_options = rewriter_config_pb2.RewriterConfig(layout_optimizer=True)\n rewrite_options.optimizers.append('pruning')\n rewrite_options.optimizers.append('constfold')\n rewrite_options.optimizers.append('layout')\n graph_options = tf.GraphOptions(rewrite_options=rewrite_options, infer_shapes=True)\n\n # Build model for TF Serving\n config = tf.ConfigProto(graph_options=graph_options)\n\n # @TODO: add XLA for higher performance (AOT for ARM, JIT for x86/GPUs)\n # https://www.tensorflow.org/performance/xla/\n # config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n\n with session.Session(config=config) as sess:\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n tensor_info_inputs = {'inputs': tf.saved_model.utils.build_tensor_info(input_tensor)}\n tensor_info_outputs = {}\n for k, v in outputs.items():\n tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)\n\n detection_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs=tensor_info_inputs,\n outputs=tensor_info_outputs,\n method_name=signature_constants.PREDICT_METHOD_NAME))\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={'predict_images': detection_signature,\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature,\n },\n )\n builder.save()\n\n print(\"\\n\\nModel is ready for TF Serving. (saved at {}/saved_model.pb)\".format(export_path))\n\n\nif __name__ == '__main__':\n _main_()\n","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"380339554","text":"\"\"\"\nLet's call an array A a mountain if the following properties hold:\n\n A.length >= 3\n There exists some 0 < i < A.length - 1 such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]\n\nGiven an array that is definitely a mountain, return any i such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].\n\nExample 1:\n\nInput: [0,1,0]\nOutput: 1\n\nExample 2:\n\nInput: [0,2,1,0]\nOutput: 1\n\n\"\"\"\n\ndef peakIndexInMountainArray(nums):\n\tlow,high=0,len(nums)-1\n\twhile low<=high:\n\t\tmid=(low+high)//2\n\t\tif nums[mid]>nums[mid-1] and nums[mid]>nums[mid+1]:\n\t\t\treturn mid\n\t\telif nums[mid]/result', methods=['GET'])\n@jwt_required\ndef get_results(office_id):\n \"\"\" Gets results \"\"\"\n\n filtered = Vote().get_all(\n \"\"\"\n SELECT concat_ws(' ', users.firstname, users.lastname) AS candidate,\n offices.name as office,\n (SELECT COUNT(*)\n FROM votes AS p\n WHERE p.candidate = e.candidate\n GROUP BY p.candidate\n ) AS results,\n (\n SELECT parties.name FROM candidates as h\n INNER JOIN parties ON parties.id = h.party\n WHERE h.id = e.candidate\n ) as party, passport_url\n FROM votes AS e\n INNER JOIN users ON users.id = e.candidate\n INNER JOIN offices ON offices.id = e.office\n WHERE office = '{}'\n GROUP BY e.candidate, users.firstname, users.lastname, offices.name,\n users.passport_url\n ORDER BY results DESC\n \"\"\".format(office_id)\n )\n\n return response('Successfully retreived office results', 200, filtered)\n\n\n@bp.route('/results', methods=['GET'])\n@jwt_required\ndef get_all_results():\n \"\"\" Gets results \"\"\"\n\n filtered = Vote().get_all(\n \"\"\"\n SELECT concat_ws(' ', users.firstname, users.lastname) AS candidate,\n (SELECT COUNT(*)\n FROM votes AS p\n WHERE p.candidate = e.candidate\n GROUP BY p.candidate\n ) AS results,\n offices.name as office,\n (\n SELECT parties.name FROM candidates as h\n INNER JOIN parties ON parties.id = h.party\n WHERE h.id = e.candidate\n ) as party,\n users.passport_url\n FROM votes AS e\n INNER JOIN users ON users.id = e.candidate\n INNER JOIN offices ON offices.id = e.office\n GROUP BY e.candidate, users.firstname, users.lastname, offices.name,\n users.passport_url\n ORDER BY results DESC\n \"\"\"\n )\n\n return response('Successfully retreived all election results', 200, filtered)\n","sub_path":"app/v2/views/votes.py","file_name":"votes.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"275814621","text":"import random\nimport json\nfrom constants import *\nfrom pycocotools import mask\nfrom argparse import ArgumentParser\nfrom tqdm import tqdm\nimport os\n\ndef parse_script_args():\n \"\"\"Parse command line arguments.\n\n Returns:\n args (Namespace): Parsed command line arguments\n\n \"\"\"\n parser = ArgumentParser()\n\n parser.add_argument('--subset_len',\n type=int, default=100,\n required=False,\n help='Length of subset')\n\n parser.add_argument('--pseudo_labels_type',\n type=str, default=\"cams\",\n required=False,\n help='Type of pseudo label cam/irnet') \n\n\n args = parser.parse_args()\n return args\n\ndef create_subset(subset_len, pseudo_labels_type):\n output_train_set = {}\n irnet_seg_labels_file = CHEXPERT_PARENT_TRAIN_CAMS_DIR / f\"pseudo_seg_labels_encoded_{pseudo_labels_type}.json\"\n curr_pos_images = 0\n\n with open(irnet_seg_labels_file) as f:\n seg_labels = json.load(f)\n \n for key in tqdm(random.sample(seg_labels.keys(), subset_len)):\n output_train_set[key] = seg_labels[key]\n \n with open(os.path.join(CHEXPERT_PARENT_TRAIN_CAMS_DIR, f\"pseudo_seg_labels_{pseudo_labels_type}_{subset_len}_cxrs.json\"), \"w\") as f:\n json.dump(output_train_set, f)\n\nif __name__ == \"__main__\":\n args = parse_script_args()\n create_subset(args.subset_len, args.pseudo_labels_type)","sub_path":"chexpert-model/create_train_set_subsets.py","file_name":"create_train_set_subsets.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"629211673","text":"### Author: Edward Huang\n\n### Counts the number of nodes in a network file.\n### We also need to make sure that we have some type of edge with weight\n### larger than 1.\n\nEDGE_MULTIPLIER = 100\n\nif __name__ == '__main__':\n f = open('./data/composite_network.txt', 'r')\n all_nodes = set([])\n edge_dct = {}\n for i, line in enumerate(f):\n node_a, node_b, weight = line.split()\n all_nodes.add(node_a)\n all_nodes.add(node_b)\n if (node_b, node_a) not in edge_dct:\n edge_dct[(node_a, node_b)] = float(weight) * EDGE_MULTIPLIER\n f.close()\n\n out = open('./data/human_net.txt', 'w')\n out.write('0\\n%d\\n' % len(all_nodes))\n for (node_a, node_b) in edge_dct:\n weight = edge_dct[(node_a, node_b)]\n out.write('%s\\t%s\\t%f\\n' % (node_a, node_b, weight))\n out.write('%s\\t%s\\t%f\\n' % (node_b, node_a, weight))\n out.close()\n\n# ./sim_anneal/bin/cs-grn 100 1 0 ./data/orth.txt 1 ./data/med_yeast_net.txt -t 0.01 2> log > clusters_go.txt\n","sub_path":"create_clustering_input.py","file_name":"create_clustering_input.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"399138599","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route('/')\n@app.route('/home')\ndef home(cols = None, counter = 0):\n grid = getGrid('planets.csv')\n grid = cleanGrid(grid, getDelete(grid))\n header = grid[0]\n return render_template('home.html',\n grid = grid[1:], headerRow = header)\n\n@app.route('/planet/test')\ndef planetpage(stats = None):\n return str(allPlanetData)\n \ndef dictData(grid):\n data = {}\n for row in grid:\n key = row[0]\n data[key] = [] \n for elem in row:\n data[key].append(elem)\n return data \n\ndef getDelete(grid):\n ans = []\n exclude = ['updated', 'K', 'albedo', 'omega',\n 'tperi', 'molecules', 'ra', 'dec']\n headers = grid[0]\n for i, elem in enumerate(headers):\n if elem.strip() in exclude or '_' in elem:\n ans.append(i)\n return ans\n \ndef cleanGrid(grid, toDel):\n newgrid = [[] for x in range(len(grid))]\n for rowi, row in enumerate(grid):\n for coli, col in enumerate(row):\n if coli not in toDel and len(newgrid[rowi]) < 6:\n newgrid[rowi].append(col)\n return newgrid\n\ndef getGrid(filename = 'planets.csv'):\n planets = open(filename, 'r').readlines()\n grid = [planets[x].split(',') for x in range(len(planets))]\n return grid\n\nif __name__=='__main__':\n allPlanetData = dictData(getGrid())\n app.run(debug=True)\n\n\n","sub_path":"6/intro-proj1/Hubert-Eric/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"177478961","text":"from flask_restful import Resource, reqparse\n\nfrom db import db\nfrom models.user import User\n\nclass UserRegister(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username',\n type=str,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n parser.add_argument('password',\n type=str,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n\n def post(self):\n data = UserRegister.parser.parse_args()\n if User.find_by_username(data['username']):\n return {'message':\"A username '{}' already exists.\".format(data['username'])},400\n\n user = User(**data)\n user.save()\n \n return {\"message\":\"User created successfully.\"},201\n\n","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"572873637","text":"import boto3\nfrom pprint import pprint\nsts_client=boto3.client('sts')\niam_cli=boto3.client('iam')\nresponse=iam_cli.list_roles()\n#user_arn=response.get('User').get('Arn')\nfor each_role in response.get('Roles'):\n\tprint(\"The Role Name is:{}\\nThe Role ID is: {}\\n\".format(each_role.get('RoleName'),each_role.get('RoleId')))\n#print(user_arn)\n#print(dir(sts_client))\n#resp=sts_client.assume_role()\n","sub_path":"Udemy/Aws_boto3_refresh/Section-7-ec2_collections/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"510917857","text":"\nfrom time import *\nimport os,sys\nimport RPi.GPIO as GPIO\nfrom gpiozero import LED\nimport paho.mqtt.client as mqtt\nimport urlparse\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\ncred = credentials.Certificate(\"Path to firebase credentials JSON File\")\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()\ndist_ref = db.collection(u'distance').document(u'dustbin')\n\ndef show_distance():\n GPIO.output(TRIG, False)\n #print(\"Waiting For Sensor To Settle\")\n sleep(0.05)\n\n GPIO.output(TRIG, True)\n sleep(0.00001)\n GPIO.output(TRIG, False)\n\n while GPIO.input(ECHO)==0:\n pulse_start = time()\n\n while GPIO.input(ECHO)==1:\n pulse_end = time()\n\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration * 17150\n distance = round(distance, 2)\n\n print (\"--> Distance:\",int(distance),\"cm\")\n\n return int(distance)\n\n\n\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nTRIG = 23\nECHO = 18\n\nred = 17\ngreen = 4\n\n\nGPIO.setup(TRIG,GPIO.OUT)\nGPIO.setup(ECHO,GPIO.IN)\n\nGPIO.setup(red,GPIO.OUT)\nGPIO.setup(green,GPIO.OUT)\n\ni = 0\ndist = []\n\n\nwhile True:\n result = show_distance()\n dist_ref.update({u'value': result})\n if result < 9:\n GPIO.output(green, False)\n GPIO.output(red, True)\n else:\n GPIO.output(red, False)\n GPIO.output(green, True)\n i=i+1\n sleep(0.5)\n\n# print(\"Added to firebase\")\n","sub_path":"IoT Projects/Smart-Dustbin-IoT-Project/smart-dustbin-python-code/dustbin_firebase.py","file_name":"dustbin_firebase.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"36089378","text":"#function for pattern # d.\r\ndef func(num):\r\n if num%2==0:\r\n return num*2\r\n else:\r\n return num*3\r\n\r\nl1=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n\r\n#Pattern a. [5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\r\nm1=map(lambda x:x+5,l1)\r\nprint(list(m1))\r\n\r\n#Patern b. [1, 3, 5, 7, 9]\r\nm2=filter(lambda x: x%2==1,l1)\r\nprint(list(m2))\r\n\r\n#Pattern c. [1, 9, 81, 729, 6561]\r\nm3=map(lambda x:9**x,list(filter(lambda x:x<5,l1)))\r\nprint(list(m3))\r\n\r\n#Pattern d. [0, 3, 4, 9, 8, 15, 12, 21, 16, 27]\r\n#map function calls a function above which returns num*2 for even numbers and num*3 for odd numbers\r\nm4=map(func,l1)\r\nprint(list(m4))\r\n\r\n#Pattern e. [1, 3, 10, 14, 18]\r\n#first filter odd numbers and than multiply by 2 if number is greater than 3\r\nm5=map(lambda x:x*2 if x >3 else x,list(filter(lambda x:x%2==1,l1)))\r\nprint(list(m5))","sub_path":"SOlutions/hw4-2.py","file_name":"hw4-2.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"510828980","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDriver model for Landlab Model 800 BasicRt\n\nKaty Barnhart March 2017\n\"\"\"\n# import resource calculation modules and start logging usage\nimport resource, time\nstart_time = time.time()\nusage_file = open('usage.txt', 'w')\nusage_file.write(time.ctime()+'\\n')\n\n# import remaining required modules. \nimport sys\nimport os\nimport shutil\nfrom subprocess import call\nfrom yaml import load\n\nfrom erosion_model import BasicRt as Model\nfrom metric_calculator import MetricDifference\nfrom landlab import imshow_grid\n\n# set files and directories used to set input templates. \n# Files and directories.\nstart_dir = sys.argv[1]\ninput_file = 'inputs.txt'\ninput_template = 'inputs_template.txt'\n\n# Use `dprepro` (from $DAKOTA_DIR/bin) to substitute parameter\n# values from Dakota into the SWASH input template, creating a new\n# inputs.txt file.\nshutil.copy(os.path.join(start_dir, input_template), os.curdir)\ncall(['dprepro', sys.argv[2], input_template, input_file])\ncall(['rm', input_template])\n\n# now prepare to run landlab. \n# load the params file to get the correct file names\nwith open(input_file, 'r+') as f:\n # load params file\n params = load(f)\n\n# get filenames/etc. \nmodern_dem_name = params['modern_dem_name']\noutlet_id = params['outlet_id']\nmodern_dem_metric_file = params['modern_dem_metric_file']\nmodern_dem_chi_file = params['modern_dem_chi_file']\nchi_mask_dem_name = params['chi_mask_dem_name']\noutlet_id = params['outlet_id']\n\n#plan for output files\noutput_fields =['topographic__elevation']\n\n# write usage\nusage = resource.getrusage(resource.RUSAGE_SELF)\nusage_file.write('\\n\\nUsage Before Running Model: \\n')\nfor name, desc in [\n ('ru_utime', 'User time'),\n ('ru_stime', 'System time'),\n ('ru_maxrss', 'Max. Resident Set Size'),\n ('ru_ixrss', 'Shared Memory Size'),\n ('ru_idrss', 'Unshared Memory Size'),\n ('ru_isrss', 'Stack Size'),\n ('ru_inblock', 'Block inputs'),\n ('ru_oublock', 'Block outputs'),\n ]:\n usage_file.write('%-25s (%-10s) = %s \\n'%(desc, name, getattr(usage, name)))\n\n#run the model\nmodel = Model(input_file)\nmodel.run(output_fields=output_fields)\n\nusage = resource.getrusage(resource.RUSAGE_SELF)\nusage_file.write('\\n\\nUsage After Running Model: \\n')\nfor name, desc in [\n ('ru_utime', 'User time'),\n ('ru_stime', 'System time'),\n ('ru_maxrss', 'Max. Resident Set Size'),\n ('ru_ixrss', 'Shared Memory Size'),\n ('ru_idrss', 'Unshared Memory Size'),\n ('ru_isrss', 'Stack Size'),\n ('ru_inblock', 'Block inputs'),\n ('ru_oublock', 'Block outputs'),\n ]:\n usage_file.write('%-25s (%-10s) = %s \\n'%(desc, name, getattr(usage, name)))\n\nmodel_dem_name = model.params['output_filename'] + \\\n str(model.iteration-1).zfill(4) + \\\n '.nc'\n\n# calculate metrics\nmd = MetricDifference(model_dem_name=model_dem_name,\n modern_dem_metric_file = modern_dem_metric_file, \n modern_dem_chi_file = modern_dem_chi_file, \n outlet_id = outlet_id,\n chi_mask_dem_name=chi_mask_dem_name)\nmd.run()\n\n# write out metrics as \"ouputs_for_analysis.txt' and as Dakota expects. \noutput_bundle = md.dakota_bundle()\nwith open('outputs_for_analysis.txt', 'a') as fp:\n for metric in output_bundle:\n fp.write(str(metric)+'\\n')\n\n# write out residual. \nwith open(sys.argv[3], 'w') as fp:\n for metric in output_bundle:\n fp.write(str(metric)+'\\n')\n\ncur_working = os.getcwd()\ncur_working_split = cur_working.split(os.path.sep)\ncur_working_split.append('png')\ntry:\n cut_ind = cur_working_split.index('results')+3\nexcept:\n cut_ind = cur_working_split.index('study3py')+3\n\nfig_name = '.'.join(cur_working_split[cut_ind:])\n\nimshow_grid(model.grid, model.z, vmin=1230, vmax=1940, cmap='viridis', output=fig_name)\n\nusage = resource.getrusage(resource.RUSAGE_SELF)\nusage_file.write('\\n\\nUsage At End of Job: \\n')\nfor name, desc in [\n ('ru_utime', 'User time'),\n ('ru_stime', 'System time'),\n ('ru_maxrss', 'Max. Resident Set Size'),\n ('ru_ixrss', 'Shared Memory Size'),\n ('ru_idrss', 'Unshared Memory Size'),\n ('ru_isrss', 'Stack Size'),\n ('ru_inblock', 'Block inputs'),\n ('ru_oublock', 'Block outputs'),\n ]:\n usage_file.write('%-25s (%-10s) = %s \\n'%(desc, name, getattr(usage, name)))\n\nend_time = time.time()\nusage_file.write('\\n\\n'+time.ctime()+'\\n')\nusage_file.write('Elapsed Time: '+str(end_time-start_time)+'\\n')\n\nusage_file.close()\n","sub_path":"calibration/sew/HYBRID/model_800/lowering_history_1.pg24f_0etch/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"269928089","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ### TM1 DTC DISCOUNT (OMNI Planning)\n# MAGIC TM1 data going into the OMNI AAS dimensions.\n# MAGIC \n# MAGIC | Date | Author | Work Item | Notes |\n# MAGIC |-----------------------------------|\n# MAGIC | 2020-08-28 | Matt Kleiman | OMNI-154320 | Initial notebook based on mapping |\n\n# COMMAND ----------\n\n# DBTITLE 1,Initialize\nfrom pyspark.sql.functions import col, lit, current_timestamp, expr, coalesce, udf, date_format\nfrom pyspark.sql.types import IntegerType\nfrom datetime import datetime, date\n\n# Parameters\ndbutils.widgets.text('initFlg', '', '')\ndbutils.widgets.text(\"schmNm\", \"\", \"\")\ndbutils.widgets.text(\"tblNm\", \"\", \"\")\n\n# Base variables\nschema_name = dbutils.widgets.get(\"schmNm\").lower()\ntable_name = dbutils.widgets.get(\"tblNm\").lower()\nschema_table_name = f'{schema_name}.{table_name}'\n\ntable_exists = table_name in sqlContext.tableNames(schema_name)\noverwrite = dbutils.widgets.get('initFlg') == 'X'\n\n# COMMAND ----------\n\n# MAGIC %run\n# MAGIC /Users/svceimdbrx@columbia.com/edw_admin/shared_functions\n\n# COMMAND ----------\n\n# DBTITLE 1,Prepare dataset\npk_list = [\n 'SEAS_CD',\n 'RGN_CD',\n 'RGNL_OFRNG_MTHD_DESC',\n 'STRAT_CATG_DESC',\n 'SBCHNL_DESC',\n 'CHNL_DESC',\n]\n\nmetrics_list = ['DSCNT_PCT']\n\ncol_list = pk_list + metrics_list\n\n# Load tables\nentpr_df = spark.sql(\"\"\"\n SELECT DISTINCT d1.season as SEAS_CD\n , d1.region as RGN_CD\n , d1.inline_smu as RGNL_OFRNG_MTHD_DESC\n , d1.strategic_category as STRAT_CATG_DESC\n , sc.subchannel as SBCHNL_DESC\n , sc.channel as CHNL_DESC\n , coalesce(d2.discount_percent, d3.discount_percent) as DSCNT_PCT\n FROM edw_lnd_tm1_view.tm1_dtc_discount d1\n CROSS JOIN csc.DimSubChannelPlanning sc\n LEFT OUTER JOIN edw_lnd_tm1_view.tm1_dtc_discount d2 ON d1.season = d2.season\n AND d1.region = d2.region\n AND d1.inline_smu = d2.inline_smu\n AND d1.strategic_category = d2.strategic_category\n AND sc.subchannel = d2.subchannel\n LEFT OUTER JOIN edw_lnd_tm1_view.tm1_dtc_discount d3 ON d1.season = d3.season\n AND d1.region = d3.region\n AND d1.inline_smu = d3.inline_smu\n AND d1.strategic_category = d3.strategic_category\n AND d3.subchannel = 'Undesignated DTC'\n where sc.channel = 'DTC'\n\"\"\").add_edw_fields()\n\n# COMMAND ----------\n\n# DBTITLE 1,Update Databricks Table\narg_dict = {'name': schema_table_name, 'format': 'delta', 'mode': 'overwrite'}\n\nif overwrite or not table_exists:\n arg_dict['overwriteSchema'] = 'true'\n spark.sql(f'drop table if exists {schema_table_name}')\n dbutils.fs.rm(f'/mnt/entadls/published/eim/managed/{schema_name}/{table_name}', True)\n print('Overwriting...')\nelse:\n entpr_df = update_table(entpr_df, schema_table_name, pk_list, join_hint='broadcast')\n print('Updating')\n\nprint(f'New records: {entpr_df.count()}')\nentpr_df.write.saveAsTable(**arg_dict)\n","sub_path":"Prod/entpr_foundation/tm1_dtc_dscnt.py","file_name":"tm1_dtc_dscnt.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"37854579","text":"# encoding: utf8\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Album',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('artist', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Track',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('album', models.ForeignKey(to='music.Album', to_field='id')),\n ('number', models.IntegerField()),\n ('name', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"music/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"579693743","text":"import os\n\n## system params\n#DEVICES_PER_NODE\nquick = False ##################\n\n\n## nanoBragg params\nadd_spots_algorithm = \"cuda\" ##################\nfmodel_algorithm = \"fft\"\ndirect_algo_res_limit = 1.3\noversample = 1\ndefault_F = 0\n\n\n## crystal params\nmosaic_spread_deg = 0.05\nmosaic_domains = 25 \nlength_um = 10.\nDeff_A = 1700\nk_sol = 0.435\n\n\n## background params\nwater_sample_thick_mm = 50.0e-3\nwater_density_gcm3 = 1\nwater_molecular_weight_Da = 18\nair_sample_thick_mm = 100.0\nair_density_gcm3 = 1.0e-6\nair_molecular_weight_Da = 28 \n\n\n## x-ray beam params\nbeam_diameter_um = 3.0 # 1.0\npolarization = 1\nwavelength_A = 0.9771\nenergy_eV = 12688.890590\nexposure_s = 50.0e-15\nbeamsize_mm = 3.0e-3\nflux = 5.0e11/50.0e-15\n\n\n## device params\ndetector_size_nx = 1739\ndetector_size_ny = 1748\nbeam_center_x_mm = 95.975\nbeam_center_y_mm = 96.855\npixel_size_mm = 0.11\ndistance_mm = 138.695\ndetector_psf_kernel_radius_pixels = 1\ndetector_psf_fwhm_mm = 0.11\n\n\n## user params\nprefix = \"lao\" \nnum_pdbs = 150 ##################\npdb_files = [ os.path.abspath( \"./PDBs/lao_\"+str(ii).zfill(3)+\".pdb\" ) for ii in range(num_pdbs) ] ##################\nnum_img = [10000] * len(pdb_files) ##################\n\n\n\n","sub_path":"streptavidin/simparams.py","file_name":"simparams.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"85280329","text":"\"\"\"\nhttps://leetcode.com/problems/rearrange-spaces-between-words/\n\nCount the spaces, then re-arrange.\n\nTime Complexity: O(N)\n\"\"\"\nclass Solution:\n def reorderSpaces(self, text: str) -> str:\n new_text = text.split()\n n_spaces = len(text) - len(''.join(new_text))\n if len(new_text) == 1:\n return new_text[0] + \" \" * n_spaces\n cnt = n_spaces // (len(new_text) - 1)\n ans = (\" \" * cnt).join(new_text)\n ans += (n_spaces % (len(new_text) - 1)) * \" \"\n\n return ans\n","sub_path":"1592_RearrangeSpacesBetweenWords.py","file_name":"1592_RearrangeSpacesBetweenWords.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"443513581","text":"\"\"\"\nNode is defined as\nself.left (the left child of the node)\nself.right (the right child of the node)\nself.info (the value of the node)\n\"\"\"\ndef levelOrder(root):\n list = []\n list.append(root)\n accessNodeList(list)\n\ndef accessNodeList(nodes):\n newNodes = []\n \n for node in nodes:\n print(node.info, end =\" \")\n \n if (node.left is not None):\n newNodes.append(node.left)\n \n if (node.right is not None):\n newNodes.append(node.right)\n \n if newNodes:\n accessNodeList(newNodes)\n","sub_path":"level-order-traversal/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"325647120","text":"\nimport argparse\nimport logging\n\nimport chess.pgn\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport nltk\nimport numpy as np\n\n# To suppress errors while reading the pgn\nlogging.getLogger(\"chess.pgn\").setLevel(logging.CRITICAL)\n\n\ndef get_pieces_by_move_count(game):\n\n\t\"\"\"\n\tTakes a game and spits out how many total moves were played for each piece\n\t\"\"\"\n\n\tpieces = ['N', 'Q', 'K', 'B', 'R']\n\tfinal_counts = {}\n\t\n\tfor piece in pieces:\n\t\tcount = 0\n\t\t\n\t\tmove = game.next()\n\t\twhile move:\n\t\t\tmove_san = move.san()\n\t\t\tif move_san[0] == piece:\n\t\t\t\tcount += 1\n\t\t\tmove = move.next()\n\t\t\n\t\tfinal_counts[piece] = count\n\t\t\n\treturn final_counts\n\n\ndef get_piece_squares(game, piece_abbrev):\n\t\n\t\"\"\"\n\tTakes a game and a piece to spit out what\n\tnon-starting squares piece travelled/touched.\n\t\"\"\"\n\tsquares = []\n\t\n\tmove = game.next()\n\twhile move:\n\t\tmove_san = move.san()\n\t\tif move_san[0] == piece_abbrev:\n\t\t\tpossible_square = move_san[-2:]\n\t\t\tif '+' in possible_square or '#' in possible_square:\n\t\t\t\tpossible_square = move_san[-3:-1]\n\t\t\tsquares.append(possible_square)\n\t\tmove = move.next()\n\t\n\treturn squares\n\ndef get_piece_move_heat_map(list_of_games, piece_abbrev):\n\t\n\t\"\"\"\n\tTakes a list of games and a piece\n\tand spits out a frequency distribution\n\tof touched squares.\n\t\"\"\"\n\t\n\tpiece_squares_list = []\n\t\n\tfor game in list_of_games:\n\t\ttry:\n\t\t\tsquares = get_piece_squares(game, piece_abbrev)\n\t\t\tpiece_squares_list.extend(squares)\n\t\texcept ValueError:\n\t\t\tcontinue\n\t\n\tfreq_dist = nltk.FreqDist(piece_squares_list)\n\treturn freq_dist.most_common()\n\ndef numpyize_heat_map(heat_map_freq):\n\t\n\t\"\"\"\n\tMake a numpy array from the frequency distribution\n\tto ultimately represent like a chess board.\n\t\"\"\"\n\n\tboard = np.zeros([8, 8])\n\t\n\tfor square_freq in heat_map_freq:\n\t\tsquare = square_freq[0]\n\t\t\n\t\tnum_x = 8 - int(square[1])\n\t\tnum_y = ord(square[0]) - 97\n\t\t\n\t\tboard[num_x][num_y] = square_freq[1]\n\t\n\treturn board\n\ndef main():\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"pgnfile\")\n\targs = parser.parse_args()\n\t\n\tpgn_file = args.pgnfile\n\t\n\t# Read the games into memory\n\tgames = []\n\n\twith open(pgn_file) as pgn:\n\t\tgame = chess.pgn.read_game(pgn)\n\t\twhile game:\n\t\t\tgames.append(game)\n\t\t\tgame = chess.pgn.read_game(pgn)\n\n\tprint(games[-1])\n\n\t# For each piece that's not a pawn, create heat map and save to a file\n\tfor piece in ['K', 'Q', 'N', 'B', 'R']:\n\t\tpiece_move_heat_map = get_piece_move_heat_map(games, piece)\n\t\tX = numpyize_heat_map(piece_move_heat_map)\n\n\t\tfig, ax = plt.subplots()\n\t\ti = ax.imshow(X, cmap=cm.jet, interpolation='nearest')\n\t\t\n\t\tax.set_xticks([0,1,2,3,4,5,6,7])\n\t\tax.set_xticklabels(['a','b','c','d','e','f','g','h'])\n\t\t\n\t\tax.set_yticks([0,1,2,3,4,5,6,7])\n\t\tax.set_yticklabels([8,7,6,5,4,3,2,1])\n\t\t\n\t\tfig.colorbar(i)\n\t\t\n\t\tplt.title(label=piece, size='24', c='black')\n\t\tplt.savefig(f'{piece}.png')\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"create_heat_map.py","file_name":"create_heat_map.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"157687338","text":"def gettwice(p):\n c = {}\n for x in p:\n # only count small caves\n if not x.islower():\n continue\n if not x in c:\n c[x] = 0\n c[x] = c[x] + 1\n dbl = [ x for x in c if c[x] == 2 ]\n if len(dbl) == 0:\n return None\n return dbl[0]\n\ndef count(p, x):\n return len([k for k in p if k == x])\n\ndef canenter(p, n):\n # we can enter the cave if it's a big cave\n if n.isupper():\n return True\n # we can also enter a small when we haven't bene there yet\n numvisit = count(p,n)\n if numvisit == 0:\n return True\n # if we were in this cave already twice, we cannot enter again\n if numvisit == 2:\n return False\n # see which cave we visited twice\n twice = gettwice(p)\n # we can enter if we have not visited any cave twice\n if not twice:\n return True\n # we visited another cave twice, so we cannot visit this one twice\n return False\n\nwith open(\"input.txt\", mode=\"r\") as f:\n m = [ x.strip() for x in f.readlines() ]\n\n#m = [ \"start-A\", \"start-b\", \"A-c\", \"A-b\", \"b-d\", \"A-end\", \"b-end\" ]\n\nadj = {}\n\n# build adjacency matrix\nfor l in m:\n a, b = l.split(\"-\")\n if not a in adj:\n adj[a] = []\n if not b in adj:\n adj[b] = []\n\n adj[a].append(b)\n adj[b].append(a)\n\npaths = [ [ \"start\" ] ]\ncomplete = []\n\nwhile len(paths)>0:\n # take the next path\n path = paths.pop(0)\n # finished?\n if path[-1] == \"end\":\n complete.append(path)\n continue\n # find possible routes\n for rt in adj[path[-1]]:\n # canot return to start\n if rt == \"start\":\n continue\n if rt.islower():\n if not canenter(path, rt):\n continue\n\n # else append next step and continue\n newpath = path.copy()\n newpath.append(rt)\n paths.append(newpath)\n\nprint(len(complete))\n\n","sub_path":"2021/12/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"7533252","text":"import logging\nfrom datetime import datetime\n\nfrom core.constants import currensy_to_bitfinex_symbol_mapping\nfrom core.external_api import bitfinex_api\nfrom core.models import Currency, Rate\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UpdateRate:\n def execute(self):\n days_10_ts = 864000000 # ms\n now_ts = datetime.utcnow().timestamp() * 1000\n days_10_ago_ts = now_ts - days_10_ts\n\n for currency in Currency.objects.all():\n last_rate = Rate.objects.only('date').filter(currency_id=currency.id).last()\n\n # Загружаем все недостающие данные либо с момента \"10 дней назад\",\n # либо с момента \"последний сохранённый курс\",\n # в зависимости от того, какой момент ближе к текущей временной точке.\n after = max(days_10_ago_ts, last_rate.date if last_rate else 0)\n candles = self.obtain_all(currensy_to_bitfinex_symbol_mapping[currency.name], '1m', after=after)\n\n logger.info(f'obtained for {currency.name}: {len(candles)}')\n\n Rate.objects.bulk_create([Rate(\n rate=candle.close,\n volume=candle.volume,\n date=candle.mts,\n currency_id=currency.id,\n ) for candle in candles])\n\n # Объём данных небольшой, можем позволить себе удалять прямо в этой таске.\n Rate.objects.filter(currency_id=currency.id, date__lt=days_10_ago_ts).delete()\n\n @staticmethod\n def obtain_all(symbol, time_frame, section='hist', after=None):\n # Загружаем курсы до тех пор, пока есть что грузить (лимит на 1 запрос - 5000 записей)\n all_obtained = False\n result = []\n\n while not all_obtained:\n if result:\n after = result[0].mts\n\n candles = bitfinex_api.get_candles(symbol, time_frame, section, after=after)\n if not candles:\n all_obtained = True\n\n result = candles + result\n\n return result\n","sub_path":"core/usecases/update_rate.py","file_name":"update_rate.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"643326074","text":"# -*- coding: utf8 -*-\r\nfrom ply import yacc\r\nfrom lex_analysis import tokens\r\n\r\nprecedence = (\r\n ('left', 'and', 'or', 'XOR'),\r\n ('left', 'LSS', 'LEQ', 'GTR', 'GEQ', 'EQL', 'NEQ'),\r\n ('left', 'PLUS', 'MINUS'),\r\n ('left', 'TIMES', 'DIVIDE', 'MOD'),\r\n ('right', 'SFPLUS', 'SFMINUS', 'not')\r\n)\r\n\r\ntable_max_num = 100 # 符号表容量\r\nID_max_length = 10 # 标识符最大长度\r\naddress_max = 2048 # 地址上界\r\ncode_max_num = 200 # 最多的虚拟机代码数\r\nstack_max_num = 700 # 运行时数据栈元素最大数量\r\n\r\ntable = [] # table = [{'name': ID, 'kind': ,'type': , 'value': , 'address': , 'size': }] 符号表\r\ninstruction = [] # instruction = [{'f': char, 'l': int, 'a': int}] 存放虚拟机代码\r\n\r\ncx = 0 # 虚拟机代码索引\r\ntx = 0 # 符号表当前尾指针\r\nlocal_address = 3 # 局部变量地址\r\nerror_list = [] # 出错列表\r\nloop_adr_list = [] # 存放每次循环语句循环的开始地址\r\nex3_adr_list = [] # 存放for循环表达式3的开始地址\r\nfor_start_adr = [] # 存放for循环的开始地址\r\nisbreak_list = [] # 是否在循环中使用了break语句,1为是,空为否,每层增加一个元素\r\nswitch_start_adr = [] # 存放switch分支结构的开始地址\r\n\r\n\r\ndef p_program(p):\r\n 'program : main LBRACE declaration_list gen_ini statement_list RBRACE'\r\n p[0] = ('program', p[3], p[4], p[5])\r\n gen('opr', 0, 0)\r\n\r\ndef p_gen_ini(p):\r\n 'gen_ini : '\r\n p[0] = ('gen_ini')\r\n gen('ini', 0, local_address)\r\n\r\ndef p_declaration_list(p):\r\n '''declaration_list : declaration_list declaration_stat\r\n | declaration_stat\r\n | '''\r\n if len(p) == 3:\r\n p[0] = ('declaration_list', p[1], p[2])\r\n elif len(p) == 2:\r\n p[0] = ('declaration_list', p[1])\r\n else:\r\n p[0] = ('declaration_list', '')\r\n\r\ndef p_declaration_stat(p):\r\n '''declaration_stat : type ID SEMICOLON\r\n | type ID LBRACKET NUMBER RBRACKET SEMICOLON\r\n | const type ID EQUAL NUMBER SEMICOLON'''\r\n global local_address\r\n if len(p) == 4:\r\n p[0] = ('declaration_stat', p[1], p[2])\r\n enter(p[2], 'variable', p[1][1], -1, local_address, 1)\r\n local_address += 1\r\n else:\r\n if p[1] == 'const':\r\n p[0] = ('declaration_stat', p[2], p[3], p[5])\r\n enter(p[3], 'constant', p[2][1], int(p[5]), -1, -1)\r\n else:\r\n p[0] = ('declaration_stat', p[1], p[2], p[4])\r\n enter(p[2], 'variable', p[1][1], -1, local_address, int(p[4]))\r\n local_address += int(p[4])\r\n\r\ndef p_type(p):\r\n '''type : int\r\n | char\r\n | bool'''\r\n p[0] = ('type', p[1])\r\n\r\ndef p_var(p):\r\n '''var : ID\r\n | ID LBRACKET expression RBRACKET'''\r\n isDefined = 0\r\n for example in table:\r\n if example['name'] == p[1]:\r\n isDefined = 1\r\n break\r\n if isDefined == 0:\r\n print(\"Syntax error at '%s'!\" % p[1])\r\n error_list.append('ID %s undefined!' % p[1])\r\n if len(p) == 2:\r\n p[0] = ('var', p[1])\r\n else:\r\n p[0] = ('var', p[1], p[3])\r\n\r\ndef p_statement_list(p):\r\n '''statement_list : statement_list statement\r\n | '''\r\n if len(p) == 3:\r\n p[0] = ('statement_list', p[1], [2])\r\n else:\r\n p[0] = ('statement_list', '')\r\n\r\ndef p_statement(p):\r\n '''statement : if_stat\r\n | while_stat\r\n | read_stat\r\n | write_stat\r\n | compound_stat\r\n | expression_stat\r\n | repeat_stat\r\n | do_while_stat\r\n | for_stat\r\n | exit_stat\r\n | continue_stat\r\n | break_stat\r\n | switch_case_stat'''\r\n p[0] = ('statement', p[1])\r\n\r\ndef p_gen_jpc_back(p):\r\n 'gen_jpc_back : '\r\n p[0] = ('gen_jpc_back')\r\n for example in reversed(instruction):\r\n if example['f'] == 'jpc' and example['a'] == 0:\r\n example['a'] = cx + 1\r\n break\r\n\r\ndef p_gen_jpc(p):\r\n 'gen_jpc : '\r\n p[0] = ('gen_jpc')\r\n gen('jpc', 0, 0)\r\n\r\ndef p_gen_jmp(p):\r\n 'gen_jmp : '\r\n p[0] = ('gen_jmp')\r\n gen('jmp', 0, 0)\r\n\r\ndef p_if_stat(p):\r\n '''if_stat : if LPAREN expression RPAREN gen_jpc statement\r\n | if LPAREN expression RPAREN gen_jpc statement else gen_jpc_back gen_jmp statement'''\r\n if len(p) == 7:\r\n p[0] = ('if_stat', p[3], p[5], p[6])\r\n for example in reversed(instruction):\r\n if example['f'] == 'jpc' and example['a'] == 0:\r\n example['a'] = cx\r\n break\r\n else:\r\n p[0] = ('if_stat', p[3], p[5], p[6], p[8], p[9], p[10])\r\n for example in reversed(instruction):\r\n if example['f'] == 'jmp' and example['a'] == 0:\r\n example['a'] = cx\r\n break\r\n\r\ndef p_rec_loop_adr(p):\r\n 'rec_loop_adr : '\r\n p[0] = ('rec_loop_adr')\r\n loop_adr_list.append(cx)\r\n\r\ndef p_while_stat(p):\r\n 'while_stat : while rec_loop_adr LPAREN expression RPAREN gen_jpc statement'\r\n p[0] = ('while_stat', p[2], p[4], p[6], p[7])\r\n gen('jmp', 0, loop_adr_list[-1])\r\n for example in reversed(instruction):\r\n if example['f'] == 'jpc' and example['a'] == 0:\r\n example['a'] = cx\r\n break\r\n if len(isbreak_list) != 0:\r\n tmp_cx = cx - 1\r\n while tmp_cx >= loop_adr_list[-1]:\r\n if instruction[tmp_cx]['f'] == 'jmp' and instruction[tmp_cx]['a'] == 0:\r\n instruction[tmp_cx]['a'] = cx\r\n tmp_cx -= 1\r\n isbreak_list.pop()\r\n loop_adr_list.pop()\r\n\r\ndef p_repeat_stat(p):\r\n 'repeat_stat : repeat rec_loop_adr statement until LPAREN expression RPAREN'\r\n p[0] = ('repeat_stat', p[2], p[3], p[6])\r\n gen('jpc', 0, loop_adr_list[-1])\r\n if len(isbreak_list) != 0:\r\n tmp_cx = cx - 1\r\n while tmp_cx >= loop_adr_list[-1]:\r\n if instruction[tmp_cx]['f'] == 'jmp' and instruction[tmp_cx]['a'] == 0:\r\n instruction[tmp_cx]['a'] = cx\r\n tmp_cx -= 1\r\n isbreak_list.pop()\r\n loop_adr_list.pop()\r\n\r\ndef p_do_while_stat(p):\r\n 'do_while_stat : do rec_loop_adr statement while LPAREN expression RPAREN'\r\n p[0] = ('do_while_stat', p[2], p[3], p[6])\r\n gen('jnc', 0, loop_adr_list[-1])\r\n if len(isbreak_list) != 0:\r\n tmp_cx = cx - 1\r\n while tmp_cx >= loop_adr_list[-1]:\r\n if instruction[tmp_cx]['f'] == 'jmp' and instruction[tmp_cx]['a'] == 0:\r\n instruction[tmp_cx]['a'] = cx\r\n tmp_cx -= 1\r\n isbreak_list.pop()\r\n loop_adr_list.pop()\r\n\r\ndef p_rec_ex3_adr(p):\r\n 'rec_ex3_adr : '\r\n p[0] = ('rec_ex3_adr')\r\n ex3_adr_list.append(cx)\r\n\r\ndef p_gen_jmp_back(p):\r\n 'gen_jmp_back : '\r\n p[0] = ('gen_jmp_back')\r\n for example in reversed(instruction):\r\n if example['f'] == 'jmp' and example['a'] == 0:\r\n example['a'] = cx\r\n break\r\n\r\ndef p_gen_jmp_condition(p):\r\n 'gen_jmp_condition : '\r\n global for_start_adr\r\n p[0] = ('gen_jmp_condition')\r\n gen('jmp', 0, loop_adr_list[-1])\r\n for_start_adr.append(loop_adr_list[-1])\r\n loop_adr_list.pop()\r\n\r\ndef p_for_stat(p):\r\n '''for_stat : for LPAREN expression SEMICOLON rec_loop_adr expression SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr expression RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN SEMICOLON rec_loop_adr expression SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr expression RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN expression SEMICOLON rec_loop_adr SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr expression RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN expression SEMICOLON rec_loop_adr expression SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN SEMICOLON rec_loop_adr SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr expression RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN SEMICOLON rec_loop_adr expression SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN expression SEMICOLON rec_loop_adr SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr RPAREN gen_jmp_condition gen_jmp_back statement\r\n | for LPAREN SEMICOLON rec_loop_adr SEMICOLON gen_jpc gen_jmp \\\r\n rec_ex3_adr RPAREN gen_jmp_condition gen_jmp_back statement'''\r\n if len(p) == 16:\r\n p[0] = ('for_stat', p[3], p[5], p[6], p[8], p[9], p[10], p[11], p[13], p[14], p[15])\r\n elif len(p) == 15:\r\n if p[3] == ';':\r\n p[0] = ('for_stat', p[4], p[5], p[7], p[8], p[9], p[10], p[12], p[13], p[14])\r\n elif p[6] == ';':\r\n p[0] = ('for_stat', p[3], p[5], p[7], p[8], p[9], p[10], p[12], p[13], p[14])\r\n else:\r\n p[0] = ('for_stat', p[3], p[5], p[6], p[8], p[9], p[10], p[12], p[13], p[14])\r\n elif len(p) == 14:\r\n if p[3] == ';' and p[5] == ';':\r\n p[0] = ('for_stat', p[4], p[6], p[7], p[8], p[9], p[11], p[12], p[13])\r\n elif p[3] == ';' and p[6] == ';':\r\n p[0] = ('for_stat', p[4], p[5], p[7], p[8], p[9], p[11], p[12], p[13])\r\n else:\r\n p[0] = ('for_stat', p[3], p[5], p[7], p[8], p[9], p[11], p[12], p[13])\r\n else:\r\n p[0] = ('for_stat', p[4], p[6], p[7], p[8], p[10], p[11], p[12])\r\n\r\n for example in reversed(instruction):\r\n if example['f'] == 'jpc' and example['a'] == 0:\r\n example['a'] = cx + 1\r\n break\r\n gen('jmp', 0, ex3_adr_list[-1])\r\n ex3_adr_list.pop()\r\n if len(isbreak_list) != 0:\r\n tmp_cx = cx - 1\r\n while tmp_cx >= for_start_adr[-1]:\r\n if instruction[tmp_cx]['f'] == 'jmp' and instruction[tmp_cx]['a'] == 0:\r\n instruction[tmp_cx]['a'] = cx\r\n tmp_cx -= 1\r\n isbreak_list.pop()\r\n for_start_adr.pop()\r\n\r\ndef p_exit_stat(p):\r\n 'exit_stat : exit LPAREN RPAREN SEMICOLON'\r\n p[0] = ('exit_stat')\r\n gen('opr', 0, 0)\r\n\r\ndef p_continue_stat(p):\r\n 'continue_stat : continue SEMICOLON'\r\n p[0] = ('continue_stat')\r\n if len(loop_adr_list) == 0 and len(for_start_adr) == 0:\r\n print(\"'continue' is used in loops!\")\r\n error_list.append(\"'continue' is used in loops!\")\r\n elif len(for_start_adr) != 0:\r\n gen('jmp', 0, for_start_adr[-1])\r\n else:\r\n gen('jmp', 0, loop_adr_list[-1])\r\n\r\ndef p_break_stat(p):\r\n 'break_stat : break SEMICOLON'\r\n p[0] = ('break_stat')\r\n if len(loop_adr_list) == 0 and len(for_start_adr) == 0:\r\n print(\"'break' is used in loops or the end of every case statement!\")\r\n error_list.append(\"'break' is used in loops or the end of every case statement!\")\r\n else:\r\n gen('jmp', 0, 0)\r\n isbreak_list.append(1)\r\n\r\ndef p_rec_switch_adr(p):\r\n 'rec_switch_adr : '\r\n p[0] = ('rec_switch_adr')\r\n switch_start_adr.append(cx)\r\n\r\ndef p_switch_case_stat(p):\r\n '''switch_case_stat : switch LPAREN expression RPAREN LBRACE rec_switch_adr case_list default COLON statement RBRACE\r\n | switch LPAREN expression RPAREN LBRACE rec_switch_adr case_list RBRACE'''\r\n if len(p) == 11:\r\n p[0] = ('switch_case_stat', p[3], p[6], p[7], p[10])\r\n else:\r\n p[0] = ('switch_case_stat', p[3], p[6], p[7])\r\n tmp_cx = cx - 1\r\n while tmp_cx >= switch_start_adr[-1]:\r\n if instruction[tmp_cx]['f'] == 'jmp' and instruction[tmp_cx]['a'] == 0:\r\n instruction[tmp_cx]['a'] = cx\r\n tmp_cx -= 1\r\n switch_start_adr.pop()\r\n\r\ndef p_case_list(p):\r\n '''case_list : case_list case_stat\r\n | case_stat'''\r\n if len(p) == 3:\r\n p[0] = ('case_list', p[1], p[2])\r\n else:\r\n p[0] = ('case_list', p[1])\r\n\r\ndef p_gen_opr_switch(p):\r\n 'gen_opr_switch : '\r\n p[0] = ('gen_opr_switch')\r\n gen('opr', 0, 23)\r\n\r\ndef p_case_stat(p):\r\n '''case_stat : case expression COLON gen_opr_switch gen_jpc statement break SEMICOLON'''\r\n p[0] = ('case_stat', p[2], p[4], p[5], p[6])\r\n for example in reversed(instruction):\r\n if example['f'] == 'jpc' and example['a'] == 0:\r\n example['a'] = cx + 1\r\n break\r\n gen('jmp', 0, 0)\r\n\r\ndef p_read_stat(p):\r\n 'read_stat : read var SEMICOLON'\r\n p[0] = ('read_stat', p[2])\r\n gen('opr', 0, 16)\r\n for example in table:\r\n if example['name'] == p[2][1]:\r\n if example['size'] == 1:\r\n # p[2] = ('var', ID)\r\n gen('sto', 0, example['address'])\r\n else:\r\n # p[2] = ('var', ID, ('expression', ('simple_expr', ('logical_expr', ('additive_expr', (\r\n # 'term', ('self_operating', ('factor', NUM)))))))\r\n gen('sto', 0, example['address'] + int(p[2][2][1][1][1][1][1][1][1]))\r\n break\r\n\r\ndef p_write_stat(p):\r\n 'write_stat : write expression SEMICOLON'\r\n p[0] = ('write_stat', p[2])\r\n if len(p[2][1][1][1][1][1][1][1]) == 2:\r\n if p[2][1][1][1][1][1][1][1][0] == 'var':\r\n # p[2] = ('expression', ('simple_expr', ('logical_expr', ('additive_expr', ('term', (\r\n # 'self_operating', ('factor', ('var', ID))))))))\r\n for example in table:\r\n if example['name'] == p[2][1][1][1][1][1][1][1][1]:\r\n if example['address'] == -1 and example['size'] == -1:\r\n # 输出一个常量值\r\n gen('opr', 0, 14)\r\n elif example['size'] == 1:\r\n # 输出一个变量值\r\n gen('opr', 0, 14)\r\n else:\r\n # 输出整个数组\r\n for i in range(example['size']):\r\n gen('lod', 0, example['address'] + i)\r\n gen('opr', 0, 14)\r\n break\r\n else:\r\n # 输出表达式\r\n gen('opr', 0, 14)\r\n else:\r\n # 输出常数\r\n gen('opr', 0, 14)\r\n\r\ndef p_compound_stat(p):\r\n 'compound_stat : LBRACE statement_list RBRACE'\r\n p[0] = ('compound_stat', p[2])\r\n\r\ndef p_expression_stat(p):\r\n '''expression_stat : expression SEMICOLON\r\n | SEMICOLON'''\r\n if len(p) == 3:\r\n p[0] = ('expression_stat', p[1])\r\n else:\r\n p[0] = ('expression_stat', '')\r\n\r\ndef p_expression(p):\r\n '''expression : var EQUAL expression\r\n | simple_expr'''\r\n if len(p) == 4:\r\n p[0] = ('expression', p[1], p[3])\r\n for example in table:\r\n if example['name'] == p[1][1]:\r\n if len(p[1]) == 2:\r\n # p[1] = ('var', ID)\r\n gen('sto', 0, example['address'])\r\n else:\r\n try:\r\n # 等号左边是数组,指令中地址用负值表示\r\n tmp = int(p[1][2][1][1][1][1][1][1][1]) + 1\r\n gen('sto', 0, example['address'] + int(p[1][2][1][1][1][1][1][1][1]))\r\n except:\r\n gen('sto', 0, -example['address'])\r\n break\r\n else:\r\n p[0] = ('expression', p[1])\r\n\r\ndef p_simple_expr(p):\r\n '''simple_expr : logical_expr\r\n | logical_expr XOR logical_expr\r\n | logical_expr and logical_expr\r\n | logical_expr or logical_expr'''\r\n if len(p) == 2:\r\n p[0] = ('simple_expr', p[1])\r\n else:\r\n p[0] = ('simple_expr', p[2], p[1], p[3])\r\n if p[2] == 'XOR':\r\n gen('opr', 0, 22)\r\n elif p[2] == 'and':\r\n gen('opr', 0, 24)\r\n elif p[2] == 'or':\r\n gen('opr', 0, 25)\r\n\r\ndef p_logical_expr(p):\r\n '''logical_expr : additive_expr\r\n | additive_expr LSS additive_expr\r\n | additive_expr LEQ additive_expr\r\n | additive_expr GTR additive_expr\r\n | additive_expr GEQ additive_expr\r\n | additive_expr EQL additive_expr\r\n | additive_expr NEQ additive_expr\r\n | ODD additive_expr'''\r\n if len(p) == 2:\r\n p[0] = ('logical_expr', p[1])\r\n elif len(p) == 4:\r\n p[0] = ('logical_expr', p[2], p[1], p[3])\r\n if p[2] == '<':\r\n gen('opr', 0, 10)\r\n elif p[2] == '<=':\r\n gen('opr', 0, 13)\r\n elif p[2] == '>':\r\n gen('opr', 0, 12)\r\n elif p[2] == '>=':\r\n gen('opr', 0, 11)\r\n elif p[2] == '==':\r\n gen('opr', 0, 8)\r\n elif p[2] == '!=':\r\n gen('opr', 0, 9)\r\n else:\r\n p[0] = ('logical_expr', p[2])\r\n gen('opr', 0, 6)\r\n\r\ndef p_additive_expr(p):\r\n '''additive_expr : term\r\n | term PLUS additive_expr\r\n | term MINUS additive_expr'''\r\n if len(p) == 2:\r\n p[0] = ('additive_expr', p[1])\r\n else:\r\n p[0] = ('additive_expr', p[2], p[1], p[3])\r\n if p[2] == '+':\r\n gen('opr', 0, 2)\r\n else:\r\n gen('opr', 0, 3)\r\n\r\ndef p_term(p):\r\n '''term : self_operating\r\n | self_operating TIMES term\r\n | self_operating DIVIDE term\r\n | self_operating MOD term'''\r\n if len(p) == 2:\r\n p[0] = ('term', p[1])\r\n else:\r\n p[0] = ('term', p[2], p[1], p[3])\r\n if p[2] == '*':\r\n gen('opr', 0, 4)\r\n elif p[2] == '/':\r\n gen('opr', 0, 5)\r\n elif p[2] == '%':\r\n gen('opr', 0, 21)\r\n\r\ndef p_self_operating(p):\r\n '''self_operating : factor\r\n | LPAREN SFPLUS factor RPAREN\r\n | LPAREN SFMINUS factor RPAREN\r\n | LPAREN factor SFPLUS RPAREN\r\n | LPAREN factor SFMINUS RPAREN\r\n | not factor'''\r\n if len(p) == 2:\r\n p[0] = ('self_operating', p[1])\r\n elif len(p) == 5:\r\n if p[2] == '++':\r\n p[0] = ('self_operating', 'operator_front', p[2], p[3])\r\n gen('opr', 0, 17)\r\n elif p[2] == '--':\r\n p[0] = ('self_operating', 'operator_front', p[2], p[3])\r\n gen('opr', 0, 18)\r\n elif p[3] == '++':\r\n p[0] = ('self_operating', 'operator_back', p[3], p[2])\r\n gen('opr', 0, 19)\r\n elif p[3] == '--':\r\n p[0] = ('self_operating', 'operator_back', p[3], p[2])\r\n gen('opr', 0, 20)\r\n else:\r\n p[0] = ('self_operating', p[2])\r\n gen('opr', 0, 26)\r\n\r\ndef p_factor(p):\r\n '''factor : LPAREN expression RPAREN\r\n | var\r\n | NUMBER'''\r\n if len(p) == 4:\r\n p[0] = ('factor', p[2])\r\n else:\r\n p[0] = ('factor', p[1])\r\n if p[1][0] == 'var':\r\n for example in table:\r\n if example['name'] == p[1][1]:\r\n if len(p[1]) == 2:\r\n # p[1] = ('var', ID)\r\n if example['kind'] == 'variable':\r\n gen('lod', 0, example['address'])\r\n elif example['kind'] == 'constant':\r\n gen('lit', 0, example['value'])\r\n else:\r\n try:\r\n # 数组索引为常��\r\n tmp = int(p[1][2][1][1][1][1][1][1][1]) + 1\r\n if tmp - 1 >= example['size']:\r\n print('Array index out of range!')\r\n error_list.append('Array index out of range!')\r\n return\r\n gen('lod', 0, example['address'] + int(p[1][2][1][1][1][1][1][1][1]))\r\n except:\r\n gen('lod', 0, -example['address'])\r\n break\r\n else:\r\n gen('lit', 0, int(p[1]))\r\n\r\ndef p_error(p):\r\n if p:\r\n print(\"Syntax error at '%s' at the line %d\" % (p.value, p.lineno))\r\n error_list.append(\"Syntax error at '%s' at the line %d\" % (p.value, p.lineno))\r\n else:\r\n print('Syntax error in input!')\r\n error_list.append('Syntax error in input!')\r\n\r\ndef enter(name, kind, type, value, address, size):\r\n global tx\r\n if tx >= table_max_num:\r\n print('Too many identifiers!')\r\n return\r\n new_ID = {}\r\n new_ID['name'] = name\r\n new_ID['kind'] = kind\r\n new_ID['type'] = type\r\n new_ID['value'] = value\r\n new_ID['address'] = address\r\n new_ID['size'] = size\r\n table.append(new_ID)\r\n tx += 1\r\n\r\ndef gen(f, l, a):\r\n global cx\r\n if cx >= code_max_num:\r\n print('Program is too long!') # 生成的虚拟机代码程序过长\r\n return\r\n if a >= address_max:\r\n print('Displacement address is too big!') # 地址偏移越界\r\n return\r\n new_code = {}\r\n new_code['f'] = f\r\n new_code['l'] = l\r\n new_code['a'] = a\r\n instruction.append(new_code)\r\n cx += 1\r\n\r\n# 通过过程基址求上l层过程的基址\r\ndef base(l, s, b):\r\n b1 = b\r\n while l > 0:\r\n b1 = s[b1]\r\n l -= 1\r\n return b1\r\n\r\ndef interpret():\r\n p = 0 # 指令指针\r\n b = 1 # 指令基址\r\n t = 0 # 栈顶指针\r\n s = [0, 0, 0, 0] # 栈\r\n for i in range(stack_max_num - 4):\r\n s.append(-999)\r\n\r\n print('\\n===fresult.txt===\\n')\r\n fresult = open('../result_files/fresult.txt', 'w')\r\n print('Start x0')\r\n fresult.write('Start x0\\n')\r\n\r\n while p != cx:\r\n current_code = instruction[p]\r\n p += 1\r\n\r\n if current_code['f'] == 'ini':\r\n t += current_code['a']\r\n\r\n elif current_code['f'] == 'lit':\r\n t += 1\r\n s[t] = current_code['a']\r\n\r\n elif current_code['f'] == 'lod':\r\n t += 1\r\n if current_code['a'] < 0:\r\n tmp = s[base(current_code['l'], s, b) - current_code['a'] + s[t - 1]]\r\n else:\r\n tmp = s[base(current_code['l'], s, b) + current_code['a']]\r\n if tmp == -999:\r\n print('Array or variable not assigned!')\r\n error_list.append('Array or variable not assigned!')\r\n return\r\n else:\r\n for example in table:\r\n if (example['address'] <= current_code['a'] and\r\n current_code['a'] <= example['address'] + example['size'] - 1) \\\r\n or example['address'] == -current_code['a']:\r\n if example['type'] == 'int':\r\n if tmp >= -2147483648 and tmp <= 2147483647:\r\n s[t] = tmp\r\n else:\r\n print(\"The range of type 'int' is -2147483648 ~ 2147483647!\")\r\n error_list.append(\"The range of type 'int' is -2147483648 ~ 2147483647!\")\r\n elif example['type'] == 'char':\r\n if tmp >= -128 and tmp <= 127:\r\n s[t] = tmp\r\n else:\r\n print(\"The range of type 'char' is -128 ~ 127!\")\r\n error_list.append(\"The range of type 'char' is -128 ~ 127!\")\r\n elif example['type'] == 'bool':\r\n if tmp == 0 or tmp == 1:\r\n s[t] = tmp\r\n else:\r\n print(\"The range of type 'bool' is 0 ~ 1!\")\r\n error_list.append(\"The range of type 'bool' is 0 ~ 1!\")\r\n break\r\n\r\n elif current_code['f'] == 'sto':\r\n if current_code['a'] < 0 :\r\n s[base(current_code['l'], s, b) - current_code['a'] + s[t - 1]] = s[t]\r\n else:\r\n s[base(current_code['l'], s, b) + current_code['a']] = s[t]\r\n # 这里的连续赋值只考虑了等号再讲栈顶值赋值给变量,没有考虑再赋值给数组,再赋值给数组的情况比较复杂\r\n for code in instruction[p:]:\r\n if code['f'] == 'sto':\r\n s[base(instruction[p]['l'], s, b) + instruction[p]['a']] = s[t]\r\n p += 1\r\n else:\r\n break\r\n t -= 1\r\n\r\n elif current_code['f'] == 'cal':\r\n s[t + 1] = base(current_code['l'], s, b)\r\n s[t + 2] = b\r\n s[t + 3] = p\r\n b = t + 1\r\n p = current_code['a']\r\n\r\n elif current_code['f'] == 'jmp':\r\n p = current_code['a']\r\n\r\n elif current_code['f'] == 'jpc':\r\n if s[t] == 0:\r\n p = current_code['a']\r\n t -= 1\r\n\r\n elif current_code['f'] == 'jnc':\r\n if s[t] != 0:\r\n p = current_code['a']\r\n t -= 1\r\n\r\n elif current_code['f'] == 'opr':\r\n if current_code['a'] == 0:\r\n t = b - 1\r\n p = s[t + 3]\r\n b = s[t + 2]\r\n break\r\n elif current_code['a'] == 1:\r\n s[t] = -s[t]\r\n elif current_code['a'] == 2:\r\n t -= 1\r\n s[t] += s[t + 1]\r\n elif current_code['a'] == 3:\r\n t -= 1\r\n s[t] -= s[t + 1]\r\n elif current_code['a'] == 4:\r\n t -= 1\r\n s[t] *= s[t + 1]\r\n elif current_code['a'] == 5:\r\n t -= 1\r\n s[t] /= s[t + 1]\r\n elif current_code['a'] == 6:\r\n s[t] = s[t] % 2\r\n elif current_code['a'] == 8:\r\n t -= 1\r\n s[t] = (s[t] == s[t + 1])\r\n elif current_code['a'] == 9:\r\n t -= 1\r\n s[t] = (s[t] != s[t + 1])\r\n elif current_code['a'] == 10:\r\n t -= 1\r\n s[t] = (s[t] < s[t + 1])\r\n elif current_code['a'] == 11:\r\n t -= 1\r\n s[t] = (s[t] >= s[t + 1])\r\n elif current_code['a'] == 12:\r\n t -= 1\r\n s[t] = (s[t] > s[t + 1])\r\n elif current_code['a'] == 13:\r\n t -= 1\r\n s[t] = (s[t] <= s[t + 1])\r\n elif current_code['a'] == 14:\r\n if instruction[p - 2]['f'] == 'opr' or instruction[p - 2]['f'] == 'lit':\r\n # 输出表达式或常数的值\r\n print(s[t])\r\n fresult.write(str(s[t]) + '\\n')\r\n else:\r\n for example in table:\r\n if (example['address'] <= instruction[p - 2]['a'] and\r\n instruction[p - 2]['a'] <= example['address'] + example['size'] - 1) \\\r\n or example['address'] == -instruction[p - 2]['a']:\r\n # 输出变量、索引为常数的数组、索引为表达式的数组的值\r\n if example['type'] == 'int':\r\n if s[t] >= -2147483648 and s[t] <= 2147483647:\r\n print(s[t])\r\n fresult.write(str(s[t]) + '\\n')\r\n elif example['type'] == 'bool':\r\n if s[t] == 1:\r\n print('true')\r\n fresult.write('true\\n')\r\n elif s[t] == 0:\r\n print('false')\r\n fresult.write('false\\n')\r\n else:\r\n if s[t] >= -128 and s[t] <= 127:\r\n print(chr(s[t]))\r\n fresult.write(str(chr(s[t])) + '\\n')\r\n break\r\n t -= 1\r\n elif current_code['a'] == 15:\r\n print()\r\n fresult.write('\\n')\r\n elif current_code['a'] == 16:\r\n t += 1\r\n s[t] = int(input('Input? '))\r\n fresult.write('Input? ' + str(s[t]) + '\\n')\r\n elif current_code['a'] == 17:\r\n s[t] += 1\r\n s[base(instruction[p - 2]['l'], s, b) + instruction[p - 2]['a']] += 1\r\n elif current_code['a'] == 18:\r\n s[t] -= 1\r\n s[base(instruction[p - 2]['l'], s, b) + instruction[p - 2]['a']] -= 1\r\n elif current_code['a'] == 19:\r\n s[base(instruction[p - 2]['l'], s, b) + instruction[p - 2]['a']] += 1\r\n elif current_code['a'] == 20:\r\n s[base(instruction[p - 2]['l'], s, b) + instruction[p - 2]['a']] -= 1\r\n elif current_code['a'] == 21:\r\n t -= 1\r\n s[t] = s[t] % s[t + 1]\r\n elif current_code['a'] == 22:\r\n t -= 1\r\n if s[t] == s[t + 1]:\r\n s[t] = 0\r\n else:\r\n s[t] = 1\r\n elif current_code['a'] == 23:\r\n if s[t] == s[t - 1]:\r\n s[t - 1] = 1\r\n t -= 1\r\n else:\r\n s[t] = 0\r\n elif current_code['a'] == 24:\r\n t -= 1\r\n if s[t] != 0 and s[t + 1] != 0:\r\n s[t] = 1\r\n else:\r\n s[t] = 0\r\n elif current_code['a'] == 25:\r\n t -= 1\r\n if s[t] == 0 and s[t + 1] == 0:\r\n s[t] = 0\r\n else:\r\n s[t] = 1\r\n elif current_code['a'] == 26:\r\n if s[t] == 0:\r\n s[t] = 1\r\n else:\r\n s[t] = 0\r\n\r\n\r\n print('End x0')\r\n fresult.write('End x0\\n')\r\n fresult.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n file_name = input('Input x0 file? ')\r\n fin = open('../test_files/' + file_name, 'r')\r\n data = fin.read()\r\n\r\n foutput = open('../result_files/foutput.txt', 'w')\r\n ftable = open('../result_files/ftable.txt', 'w')\r\n fcode = open('../result_files/fcode.txt', 'w')\r\n\r\n table = []\r\n instruction = []\r\n cx = 0\r\n tx = 0\r\n local_address = 3\r\n isbreak_list = []\r\n error_list = []\r\n loop_adr_list = []\r\n ex3_adr_list = []\r\n for_start_adr = []\r\n switch_start_adr = []\r\n\r\n gen('jmp', 0, 1)\r\n parser = yacc.yacc(start='program')\r\n parser.parse(data)\r\n\r\n if len(error_list) == 0:\r\n\r\n print('\\n===ftable.txt===\\n')\r\n for i in range(len(table)):\r\n if table[i]['kind'] == 'constant':\r\n ftable.write(str(i + 1) + '\\t' + table[i]['name'] + '\\t' + table[i]['kind'] + '\\t' + table[i]['type']\r\n + '\\tvalue = ' + str(table[i]['value']) + '\\taddress = \\tsize = \\n')\r\n print(str(i + 1), '\\t', table[i]['name'], '\\t', table[i]['kind'], '\\t', table[i]['type'],\r\n '\\tvalue =', table[i]['value'], '\\taddress = \\tsize = ')\r\n elif table[i]['kind'] == 'variable':\r\n ftable.write(str(i + 1) + '\\t' + table[i]['name'] + '\\t' + table[i]['kind'] + '\\t' + table[i]['type']\r\n + '\\tvalue = \\taddress = ' + str(table[i]['address']) + '\\tsize = '\r\n + str(table[i]['size']) + '\\n')\r\n print(str(i + 1), '\\t', table[i]['name'], '\\t', table[i]['kind'], '\\t', table[i]['type'],\r\n '\\tvalue = \\taddress =', table[i]['address'], '\\tsize =', table[i]['size'])\r\n\r\n print('\\n===fcode.txt===\\n')\r\n for i in range(len(instruction)):\r\n fcode.write(str(i) + '\\t' + instruction[i]['f'] + ' ' + str(instruction[i]['l']) + ', '\r\n + str(instruction[i]['a']) + '\\n')\r\n print(str(i), '\\t', instruction[i]['f'], str(instruction[i]['l']) + ',', instruction[i]['a'])\r\n\r\n interpret()\r\n else:\r\n print('%d errors in x0 program' % len(error_list))\r\n foutput.write(str(len(error_list)) + ' errors in x0 program\\n\\n')\r\n for err in error_list:\r\n foutput.write(err + '\\n')\r\n\r\n print('\\n===foutput.txt===\\n')\r\n if len(error_list) == 0:\r\n print('\\n===Parsing success!===\\n')\r\n foutput.write('\\n===Parsing success!===\\n')\r\n else:\r\n print('%d errors in x0 program' % len(error_list))\r\n foutput.write(str(len(error_list)) + ' errors in x0 program\\n\\n')\r\n for err in error_list:\r\n print(err)\r\n foutput.write(err + '\\n')\r\n\r\n fin.close()\r\n foutput.close()\r\n ftable.close()\r\n fcode.close()\r\n\r\n\r\n","sub_path":"code/1.3 再实现逻辑运算符优先级的版本/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":33007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"338958703","text":"import unittest\nimport zserio\n\nfrom testutils import getZserioApi\n\nclass UnionWithParameterTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = getZserioApi(__file__, \"union_types.zs\").union_with_parameter\n\n def testParamConstructor(self):\n testUnion = self.api.TestUnion(True)\n self.assertTrue(testUnion.case1_allowed)\n\n testUnion.case1_field = 11\n bitBuffer = zserio.serialize(testUnion)\n readTestUnion = zserio.deserialize(self.api.TestUnion, bitBuffer, True)\n self.assertEqual(testUnion.case1_allowed, readTestUnion.case1_allowed)\n self.assertEqual(testUnion.case1_field, readTestUnion.case1_field)\n\n def testParamConstructorCase1Forbidden(self):\n testUnion = self.api.TestUnion(False)\n self.assertFalse(testUnion.case1_allowed)\n\n testUnion.case1_field = 11\n writer = zserio.BitStreamWriter()\n with self.assertRaises(zserio.PythonRuntimeException):\n testUnion.write(writer) # raises exception\n\n def testFromReader(self):\n testUnion = self.api.TestUnion(True, case3_field_=-1)\n bitBuffer = zserio.serialize(testUnion)\n readTestUnion = zserio.deserialize(self.api.TestUnion, bitBuffer, True)\n self.assertEqual(testUnion.choice_tag, readTestUnion.choice_tag)\n self.assertEqual(testUnion.case3_field, readTestUnion.case3_field)\n self.assertEqual(-1, readTestUnion.case3_field)\n","sub_path":"test/language/union_types/python/UnionWithParameterTest.py","file_name":"UnionWithParameterTest.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"482461494","text":"#目標貯金額と月々の貯金額を入力する。何年何ヶ月で達成するかを表示する。\r\n\r\nmokuhyou = int(input())\r\nchokin = int(input())\r\n\r\ngoukei = 0\r\nkikan = 0\r\n\r\nwhile goukei < mokuhyou:\r\n goukei += chokin\r\n kikan += 1\r\n\r\nyear = int(kikan/12)\r\nmonth = kikan%12\r\n\r\nprint(str(year)+\"年\"+str(month)+\"ヵ月\")\r\n\r\n","sub_path":"python/chokin.py","file_name":"chokin.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"561388218","text":"#Import pygame module\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\n#Initialize pygame module, required before any time pygame is used\r\npygame.init()\r\n\r\n#SnakeHead Class\r\n#Class is made using pygame sprite to allow for collision checks and image loading\r\n#Constructor takes in parameters\r\n#x : size of snake length in pixels\r\n#y : size of snake width in pixels\r\n#snakeColor : color of snake, must be a pygame.Color object\r\n#sx : starting x position of snake head\r\n#sy : starting y position of snake head\r\nclass SnakeHead(pygame.sprite.Sprite):\r\n def __init__(self, x, y, sx, sy):\r\n #Create pygame sprite object\r\n pygame.sprite.Sprite.__init__(self)\r\n #Load an image to use as snake head\r\n #All pygame sprites must have an image property\r\n self.image = pygame.image.load('right.png')\r\n\r\n #Set snake's length and width size from parameters\r\n self.snakeSizeX = x\r\n self.snakeSizeY = y\r\n\r\n #self.image used for testing purposes, created a colored square instead of an image\r\n #self.image = pygame.Surface((self.snakeSizeX, self.snakeSizeY))\r\n #self.image.fill(snakeColor)\r\n\r\n #Get the rect property from the image loaded\r\n #All pygame sprites must have a rect property\r\n #rect holds the x and y coordinates of the image's top left corner\r\n #It also holds the length and width of the objects\r\n self.rect = self.image.get_rect()\r\n #0 index of rect is the image's x coordinate\r\n self.rect[0] = sx\r\n #1 index of rect is the image's y coordinate\r\n self.rect[1] = sy\r\n\r\n #Direction of snake head, default is 1 which is right\r\n self.direction = 1\r\n #Length of the snake's body parts, held in the snake head\r\n self.length = 0\r\n\r\n #Inverted variable, 0 means controls are normal, 1 means controls are inverted\r\n self.inverted = 0\r\n\r\n #Move function for snake head\r\n def move(self, x, y):\r\n #Change snake head's x and y coordinates to those given\r\n self.rect[0] = x\r\n self.rect[1] = y\r\n\r\n #Accessor function that returns snake's current direction\r\n def getDirection(self):\r\n return self.direction\r\n\r\n #Accessor function that returns snake's current direction as a string\r\n def getDirectionStr(self):\r\n if self.direction == 0:\r\n return 'up'\r\n elif self.direction == 1:\r\n return 'right'\r\n elif self.direction == 2:\r\n return 'down'\r\n elif self.direction == 3:\r\n return 'left'\r\n\r\n #Change direction function, takes in diretion to change to\r\n def changeDirection(self, eventKey):\r\n #If/elif loops that changes direction depending on the input\r\n #Also checks that direction doesn't change to opposite direction so snake can't go backwards\r\n #Also changes image used depending on the direction the snake head is facing\r\n if eventKey == 'u':\r\n if self.direction != 2:\r\n self.direction = 0\r\n self.image = pygame.image.load('up.png')\r\n elif eventKey == 'r':\r\n if self.direction != 3:\r\n self.direction = 1\r\n self.image = pygame.image.load('right.png')\r\n elif eventKey == 'd':\r\n if self.direction != 0:\r\n self.direction = 2\r\n self.image = pygame.image.load('down.png')\r\n elif eventKey == 'l':\r\n if self.direction != 1:\r\n self.direction = 3\r\n self.image = pygame.image.load('left.png')\r\n\r\n #Function for adding length to snake's length variable, defaults at 1 if no parameters given\r\n def addLength(self, slen=1):\r\n self.length += slen\r\n\r\n #Sets the length of the snake's length variable to whatever value given\r\n def setLength(self, slen):\r\n self.length = slen\r\n\r\n #Accessor method for getting the snake's length variable, returns self.length\r\n def getLength(self):\r\n return self.length\r\n\r\n #Mutator method for changing color of snake head\r\n def changeColor(self, color):\r\n self.image.fill(color)\r\n\r\n #Accessor method for getting the snake's current x position, returns 0 index of rect\r\n def getXPos(self):\r\n return self.rect[0]\r\n\r\n #Accessor method for getting the snake's current y position, returns 1 index of rect\r\n def getYPos(self):\r\n return self.rect[1]\r\n \r\n #Mutator method for moving snake up one snake size unit\r\n def moveUp(self):\r\n self.rect.move_ip(0, -self.snakeSizeY)\r\n\r\n #Mutator method for moving snake right one snake size unit\r\n def moveRight(self):\r\n self.rect.move_ip(self.snakeSizeX, 0)\r\n\r\n #Mutator method for moving snake down one snake size unit\r\n def moveDown(self):\r\n self.rect.move_ip(0, self.snakeSizeY)\r\n\r\n #Mutator method for moving snake left one snake size unit\r\n def moveLeft(self):\r\n self.rect.move_ip(-self.snakeSizeX, 0)\r\n\r\n #Mutator method for flipping inversion of controls by changing the inverted variable\r\n def changeInversion(self):\r\n #Checks what state the controls are before changing\r\n if self.inverted == 0:\r\n self.inverted = 1\r\n else:\r\n self.inverted = 0\r\n\r\n #Mutator method for setting inverted variable to the value given\r\n def changeInversionTo(self, inv):\r\n self.inverted = inv\r\n\r\n #Accessor method for getting what the inverted state of the controls is\r\n def getInverted(self):\r\n return self.inverted\r\n\r\n #To string method\r\n def __str__(self):\r\n return ('Snake Head is currently at position (%i, %i) +\\\r\n facing %s and its body has length %i. +\\\r\n Controls are currently %s inverted.' %\r\n (self.getXPos(), self.getYPos(), self.getDirectionStr(),\r\n self.getLength(), ('not' if self.getInverted() == 0 else '')))\r\n \r\n\r\ndef main():\r\n snakeHead = SnakeHead(25,25,100,100)\r\n print(snakeHead)\r\n snakeHead_Sprite = pygame.sprite.RenderPlain((snakeHead))\r\n screen = pygame.display.set_mode((800,600))\r\n\r\n clock = pygame.time.Clock()\r\n while True:\r\n #Clear background so everything can be redrawn\r\n screen.fill(0)\r\n\r\n #Try making snakehead move constantly depending on direction value\r\n if snakeHead.getDirection() == 0:\r\n snakeHead.moveUp()\r\n elif snakeHead.getDirection() == 1:\r\n snakeHead.moveRight()\r\n elif snakeHead.getDirection() == 2:\r\n snakeHead.moveDown()\r\n elif snakeHead.getDirection() == 3:\r\n snakeHead.moveLeft()\r\n\r\n #Test position returner\r\n print(snakeHead.getXPos())\r\n print(snakeHead.getYPos())\r\n\r\n #Test length functions\r\n print(snakeHead.getLength())\r\n print(snakeHead.addLength())\r\n\r\n #Test color changer function\r\n snakeHead.changeColor(pygame.Color(255,125,0))\r\n\r\n #Test move function\r\n snakeHead.move(300,300)\r\n \r\n #Updates where snake head is since it draws it at the end of the loop\r\n snakeHead_Sprite.draw(screen)\r\n pygame.display.flip()\r\n clock.tick(30)\r\n\r\n#main()\r\n","sub_path":"SnakeHead.py","file_name":"SnakeHead.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"640176627","text":"#########################################################################################################\n# Add Elo Rating to the Regular Season Data for the March Madness Kaggle Contest\n#########################################################################################################\n\nimport numpy as np\nimport pandas as pd\nfrom random import randint\n\nimport os\nimport sys\nfrom collections import defaultdict\nimport logging\nimport logging.config\n\nimport elo\n\n#########################################################################################################\n# Global Variables\n__etl__ = os.path.dirname(__file__)\n__src__ = os.path.join(__etl__, 'src')\n__dir__ = os.path.join(__etl__, '..')\n__data__ = os.path.join(__dir__, 'data')\n__root__ = os.path.join(__dir__, '../..')\n__utils__ = os.path.join(__root__, 'lib/utils')\n__plots__ = os.path.join(__root__, 'lib/visualization')\n\n#########################################################################################################\n# Post Global Variable Imports\n# Ignore PEP8 Warning. Need path before import to enable execution from project folder\n\nsys.path.append(__root__)\nfrom lib.utils import support_functions as sf\n\n#########################################################################################################\n\nlog = logging.getLogger('debug')\n\n#########################################################################################################\n\n\ndef get_spread_ave(games, cum_spread, season, team):\n if games[season][team] != 0:\n return cum_spread[season][team]/games[season][team]\n elif season > 2004:\n return get_spread_ave(games, cum_spread, season-1, team)\n else:\n return 0.0\n\n\ndef get_cum_spreads(df):\n\n cum_games = defaultdict(lambda: defaultdict(int))\n cum_spread = defaultdict(lambda: defaultdict(int))\n for index, row in df.iterrows():\n\n if str(row['line']) != 'nan':\n spread = row['line']\n elif str(row['lineavg']) != 'nan':\n spread = row['lineavg']\n\n # Cummulative spread, games for winning team\n cum_games[row['season']][row['wteam']] += 1\n cum_spread[row['season']][row['wteam']] += spread\n\n # Cummulative spread, games for losing team\n cum_games[row['season']][row['lteam']] += 1\n cum_spread[row['season']][row['lteam']] -= spread\n\n return cum_spread, cum_games\n\n############\n\n\ndef add_season_elo(s_df, apply_dynamic=True, apply_final=False):\n\n # is_dynamic --> Calculate and record team Elo's on a game by game basis\n # is_season --> Use season end Elo for all games for a given team\n\n el = elo.Elo(10)\n team = defaultdict(lambda: defaultdict(dict))\n\n s_df['WElo'] = ''\n s_df['LElo'] = ''\n\n log.info('Adding Season Elo Rating........ ')\n\n for index, row in s_df.iterrows():\n (t1, t2) = (row['Wteam'], row['Lteam'])\n season = row['Season']\n\n if t1 not in team[season]:\n team[season][t1] = 1000.0\n if t2 not in team[season]:\n team[season][t2] = 1000.0\n\n if apply_dynamic:\n # Recording Elo Rating Going Into The Game\n s_df.set_value(index, 'WElo', team[season][t1])\n s_df.set_value(index, 'LElo', team[season][t2])\n\n # Computing the new Elo Rating\n (team[season][t1], team[season][t2]) = el.rate_1vs1(team[season][t1], team[season][t2])\n # log.debug('Post Game: Win Team Elo: %f Lose Team Elo: %f', team[t1], team[t2])\n\n if apply_final:\n for index, row in s_df.iterrows():\n s_df.set_value(index, 'WElo', team[row['Season']][row['Wteam']])\n s_df.set_value(index, 'LElo', team[row['Season']][row['Lteam']])\n\n return s_df\n\n\ndef add_season_spread(s_df, sp_df):\n # log = logging.getLogger('debug')\n\n log.info('Adding Season Spreads Data........ ')\n\n # Loading Spreads\n spread = {}\n for index, row in sp_df.iterrows():\n match = '_'.join([str(row['season']), str(row['daynum']), str(row['wteam']), str(row['lteam'])])\n if str(row['line']) != 'nan':\n spread[match] = row['line']\n elif str(row['lineavg']) != 'nan':\n spread[match] = row['lineavg']\n\n # Get cumulative spread\n (cum_spread, cum_games) = get_cum_spreads(sp_df)\n\n # Adding Spreads to Main CSV\n s_df['Spread'] = ''\n for index, row in s_df.iterrows():\n match = '_'.join([str(row['Season']), str(row['Daynum']), str(row['Wteam']), str(row['Lteam'])])\n # log.debug('Match Key (From Season Data) is %s', match)\n if match in spread:\n s_df.set_value(index, 'Spread', spread[match])\n else:\n # log.debug('Missing spread for %s', match)\n wteam_ave_line = get_spread_ave(cum_games, cum_spread, row['Season'], row['Wteam'])\n lteam_ave_line = get_spread_ave(cum_games, cum_spread, row['Season'], row['Lteam'])\n s_df.set_value(index, 'Spread', (wteam_ave_line - lteam_ave_line))\n\n return s_df\n\n\ndef get_rank_dict(dic, team, season, day, prev_year=False):\n\n prev_day = 0\n if not prev_year:\n for i_day, ranks in sorted(dic[team][season].items()):\n if i_day == day:\n return dic[team][season][i_day]\n elif i_day < day:\n prev_day = i_day\n continue\n elif i_day > day:\n if prev_day > 0:\n return dic[team][season][prev_day]\n if prev_day == 0 and season > 2004:\n log.debug('Alert: Looking at previous season! %s %s %s', team, season, day)\n return get_rank_dict(dic, team, season-1, day, prev_year=True)\n else:\n log.debug('Alert: No data available! %s %s %s', team, season, day)\n return {}\n else:\n try:\n return sorted(dic[team][season].items())[-1][1]\n except IndexError:\n if season > 2004:\n log.debug('Alert: Looking at previous season! %s %s %s', team, season, day)\n return get_rank_dict(dic, team, season-1, day, prev_year=True)\n else:\n log.debug('Alert: No data available! %s %s %s', team, season, day)\n return {}\n\n\n# Add Massey ordinals data\ndef add_season_massey(s_df, m_df, apply_dynamic=True, apply_final=False):\n\n log.info('Adding Season Massey Data........ ')\n\n if apply_dynamic:\n # Loading Massey Data\n i = 0\n rank_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))\n for index, row in m_df.iterrows():\n rank_dict[row['team']][row['season']][row['rating_day_num']].update({row['sys_name']: row['orank']})\n if (i % 20000) == 0:\n log.debug('Loaded %d lines... On year %d, day %d....', i, row['season'], row['rating_day_num'])\n i += 1\n\n s_df['Worank'] = ''\n s_df['Lorank'] = ''\n for index, row in s_df.iterrows():\n # Build Rank Information for Playing Teams\n i = 0\n rank = []\n for team in (row['Wteam'], row['Lteam']):\n total = 0\n i_ranks = get_rank_dict(rank_dict, team, row['Season'], row['Daynum'], prev_year=False)\n # log.debug('Rank Info: %s, %s, %s: %s', team, row['Season'], row['Daynum'], i_ranks)\n if not i_ranks:\n # Total 364 Teams. Assume mid-point ranks\n rank.append(182)\n else:\n for key in i_ranks:\n total += i_ranks[key]\n rank.append(float(total)/len(i_ranks))\n i += 1\n s_df.set_value(index, 'Worank', rank[0])\n s_df.set_value(index, 'Lorank', rank[1])\n\n if apply_final:\n # Loading Massey Data\n i = 0\n rank_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))\n\n # Collating Ranks Per Team, Per Season, Per Rating System\n for index, row in m_df.iterrows():\n rank_dict[row['team']][row['season']][row['sys_name']] = row['orank']\n if (i % 20000) == 0:\n log.debug('Loaded %d lines... On year %d, day %d....', i, row['season'], row['rating_day_num'])\n i += 1\n\n # Computing Average Ranks Per Team, Per Season\n average_rank = defaultdict(lambda: defaultdict(dict))\n for key1, value1 in rank_dict.iteritems():\n for key2, value2 in value1.iteritems():\n i = 0\n sumrank = 0\n for key3, value3 in value2.iteritems():\n sumrank += value3\n i += 1\n average_rank[key1][key2] = float(sumrank)/i\n\n s_df['Worank'] = ''\n s_df['Lorank'] = ''\n\n for index, row in s_df.iterrows():\n s_df.set_value(index, 'Worank', average_rank[row['Wteam']][row['Season']])\n s_df.set_value(index, 'Lorank', average_rank[row['Lteam']][row['Season']])\n\n return s_df\n\n\ndef get_season_efficiencies(s_df, threshold_games=1):\n\n log.info('Accumulating Season Efficiency Data........ ')\n\n season_cum_oeff = defaultdict(lambda: defaultdict(float))\n season_cum_deff = defaultdict(lambda: defaultdict(float))\n season_cum_games = defaultdict(lambda: defaultdict(int))\n daily_cum_oeff = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n daily_cum_deff = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n for index, row in s_df.iterrows():\n\n wteam = row['Wteam']\n lteam = row['Lteam']\n season = row['Season']\n day = row['Daynum']\n\n w_oeff = (row['Wscore'] * 100) / (row['Wfga'] - row['Wor'] + row['Wto'] + (0.475 * row['Wfta']))\n l_oeff = (row['Lscore'] * 100) / (row['Lfga'] - row['Lor'] + row['Lto'] + (0.475 * row['Lfta']))\n\n if not season_cum_games[wteam][season] < threshold_games:\n daily_cum_oeff[wteam][season][day] = season_cum_oeff[wteam][season]/season_cum_games[wteam][season]\n daily_cum_deff[wteam][season][day] = season_cum_deff[wteam][season]/season_cum_games[wteam][season]\n\n if not season_cum_games[lteam][season] < threshold_games:\n daily_cum_oeff[lteam][season][day] = season_cum_oeff[lteam][season]/season_cum_games[lteam][season]\n daily_cum_deff[lteam][season][day] = season_cum_deff[lteam][season]/season_cum_games[lteam][season]\n\n season_cum_oeff[wteam][season] += w_oeff\n season_cum_deff[wteam][season] += l_oeff\n season_cum_oeff[lteam][season] += l_oeff\n season_cum_deff[lteam][season] += w_oeff\n season_cum_games[wteam][season] += 1\n season_cum_games[lteam][season] += 1\n\n log.info('Computing Season Average Offense Efficiency Data........ ')\n\n # Computing season averages\n for key1, value1 in season_cum_oeff.iteritems():\n for key2, value2 in value1.iteritems():\n season_cum_oeff[key1][key2] /= season_cum_games[key1][key2]\n\n log.info('Computing Season Average Defense Efficiency Data........ ')\n\n # Computing season averages\n for key1, value1 in season_cum_deff.iteritems():\n for key2, value2 in value1.iteritems():\n season_cum_deff[key1][key2] /= season_cum_games[key1][key2]\n\n return season_cum_oeff, season_cum_deff, daily_cum_oeff, daily_cum_deff\n\n\ndef add_season_efficiency(s_df, apply_dynamic=True, apply_final=False):\n\n log.info('Adding Season Offensive & Defensive Efficiency Data........ ')\n s_df['Woeff'] = ''\n s_df['Loeff'] = ''\n s_df['Wdeff'] = ''\n s_df['Ldeff'] = ''\n\n (s_oeff, s_deff, d_oeff, d_deff) = get_season_efficiencies(s_df, threshold_games=5)\n\n if apply_final:\n for index, row in s_df.iterrows():\n wteam = row['Wteam']\n lteam = row['Lteam']\n season = row['Season']\n s_df.set_value(index, 'Woeff', s_oeff[wteam][season])\n s_df.set_value(index, 'Loeff', s_oeff[lteam][season])\n s_df.set_value(index, 'Wdeff', s_deff[wteam][season])\n s_df.set_value(index, 'Ldeff', s_deff[lteam][season])\n\n if apply_dynamic:\n for index, row in s_df.iterrows():\n\n wteam = row['Wteam']\n lteam = row['Lteam']\n season = row['Season']\n day = row['Daynum']\n\n # Winning Team - Offensive Efficiency\n if d_oeff[wteam][season][day] == 0.0:\n if season > 2003:\n w_oeff = s_oeff[wteam][season-1]\n else:\n w_oeff = s_oeff[wteam][season]\n else:\n w_oeff = d_oeff[wteam][season][day]\n\n # Losing Team - Offensive Efficiency\n if d_oeff[lteam][season][day] == 0.0:\n if season > 2003:\n l_oeff = s_oeff[lteam][season-1]\n else:\n l_oeff = s_oeff[lteam][season]\n else:\n l_oeff = d_oeff[lteam][season][day]\n\n # Winning Team - Defensive Efficiency\n if d_deff[wteam][season][day] == 0.0:\n if season > 2003:\n w_deff = s_deff[wteam][season-1]\n else:\n w_deff = s_deff[wteam][season]\n else:\n w_deff = d_deff[wteam][season][day]\n\n # Losing Team - Defensive Efficiency\n if d_deff[lteam][season][day] == 0.0:\n if season > 2003:\n l_deff = s_deff[lteam][season-1]\n else:\n l_deff = s_deff[lteam][season]\n else:\n l_deff = d_deff[lteam][season][day]\n\n s_df.set_value(index, 'Woeff', w_oeff)\n s_df.set_value(index, 'Loeff', l_oeff)\n s_df.set_value(index, 'Wdeff', w_deff)\n s_df.set_value(index, 'Ldeff', l_deff)\n\n return s_df\n\n\ndef add_missing_kenpom_data(data, season, team):\n if season in data[team]:\n return data[team][season], season\n elif season < 2016:\n return add_missing_kenpom_data(data, season + 1, team)\n else:\n return data[team][season], season\n\n\ndef add_kenpom_data(src_df, kp_df, t_df, L_prefix, R_prefix, dump_kp_int_file=False):\n\n log.info('Adding Ken Pomeroy Data to main spreadsheet.....')\n\n # Merging Team ID data into Kenpom Data\n kp_df = pd.merge(left=kp_df, right=t_df, how='left', on=['team'])\n kp_df = kp_df.fillna({'team_id': randint(1500, 2000)}, inplace=True)\n\n team_kp = defaultdict(lambda: defaultdict(dict))\n\n for index, row in kp_df.iterrows():\n team_kp[row['team_id']][row['year']] = row\n\n # Changing column names to match with src_df\n kp_df.rename(columns={'team_id': L_prefix + 'team', 'year': 'Season'}, inplace=True)\n kp_cols = kp_df.columns.values\n\n # Writing out temporary file for debug\n if dump_kp_int_file:\n fname = 'kenpom_updated.csv'\n ofile = os.path.join(__src__, fname)\n kp_df.to_csv(ofile, index=False)\n\n # Changing team id type to match with season_df\n kp_df[L_prefix + 'team'] = kp_df[L_prefix + 'team'].astype('int64')\n\n # Performing kenpom data merge with season_df for L_prefix Team\n src_df = pd.merge(left=src_df, right=kp_df, how='left', on=['Season', L_prefix + 'team'])\n\n # Plugging missing data with available data\n for index, row in src_df.iterrows():\n if row['team'] is pd.np.nan:\n log.debug('Missing Kenpom data for team: %s, year: %s, Day: %s...', row[L_prefix + 'team'], row['Season'], row['Daynum'])\n (miss_data, year_used) = add_missing_kenpom_data(team_kp, row['Season'], row[L_prefix + 'team'])\n log.debug('Plugging data for team: %s from year: %s', row[L_prefix + 'team'], year_used)\n for key, value in miss_data.iteritems():\n if key not in ('team_id', 'year'):\n src_df.set_value(index, key, value)\n\n # Change column names in src_df to add L_prefix\n for col in kp_cols:\n if col not in ('Season', L_prefix + 'team'):\n src_df.rename(columns={col: L_prefix + col}, inplace=True)\n\n # Rename L_prefix team to R_prefix team to match R_prefix team in season_df\n kp_df.rename(columns={L_prefix + 'team': R_prefix + 'team'}, inplace=True)\n kp_cols = kp_df.columns.values\n\n # Performing kenpom data merge with season_df for R_prefix Team\n src_df = pd.merge(left=src_df, right=kp_df, how='left', on=['Season', R_prefix + 'team'])\n\n # Plugging missing data with available data\n for index, row in src_df.iterrows():\n if row['team'] is pd.np.nan:\n log.debug(row)\n log.debug('Missing Kenpom data for team: %s, year: %s, Day: %s...', row[R_prefix + 'team'], row['Season'], row['Daynum'])\n (miss_data, year_used) = add_missing_kenpom_data(team_kp, row['Season'], row[R_prefix + 'team'])\n log.debug('Plugging data for team: %s from year: %s', row[R_prefix + 'team'], year_used)\n for key, value in miss_data.iteritems():\n if key not in ('team_id', 'year'):\n log.debug('Index: %s, Key: %s, Value: %s', index, key, value)\n src_df.set_value(index, key, value)\n\n # Change column names in season_df to add L prefix\n for col in kp_cols:\n if col not in ('Season', R_prefix + 'team'):\n src_df.rename(columns={col: R_prefix + col}, inplace=True)\n\n return src_df\n\n###########\n\n\ndef add_tourney_elo(s_df, t_df):\n\n el = elo.Elo(10)\n team = defaultdict(lambda: defaultdict(dict))\n\n log.info('Adding Tourney Elo Rating........ ')\n\n for index, row in s_df.iterrows():\n (t1, t2) = (row['Wteam'], row['Lteam'])\n season = row['Season']\n\n if t1 not in team[season]:\n team[season][t1] = 1000.0\n if t2 not in team[season]:\n team[season][t2] = 1000.0\n\n # Computing the new Elo Rating\n (team[season][t1], team[season][t2]) = el.rate_1vs1(team[season][t1], team[season][t2])\n # log.debug('Post Game: Win Team Elo: %f Lose Team Elo: %f', team[t1], team[t2])\n\n t_df['AElo'] = ''\n t_df['BElo'] = ''\n\n for index, row in t_df.iterrows():\n # Recording Elo Rating Going Into The Game\n t_df.set_value(index, 'AElo', team[row['Season']][row['Ateam']])\n t_df.set_value(index, 'BElo', team[row['Season']][row['Bteam']])\n\n return t_df\n\n\ndef add_tourney_spreads(t_df, sp_df):\n\n log.info('Adding Tourney Spreads Data........ ')\n\n # Load cumulative spread\n (cum_spread, cum_games) = get_cum_spreads(sp_df)\n\n # Adding spreads to Tourney CSV\n t_df['Spread'] = ''\n for index, row in t_df.iterrows():\n ateam_ave_line = get_spread_ave(cum_games, cum_spread, row['Season'], row['Ateam'])\n bteam_ave_line = get_spread_ave(cum_games, cum_spread, row['Season'], row['Bteam'])\n t_df.set_value(index, 'Spread', (ateam_ave_line - bteam_ave_line))\n\n return t_df\n\n\ndef get_season_ranks(s_df):\n\n rank = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))\n for index, row in s_df.iterrows():\n rank[row['Season']][row['Wteam']] = row['Worank']\n rank[row['Season']][row['Lteam']] = row['Lorank']\n\n return rank\n\n\ndef add_tourney_massey(t_df, s_df):\n\n log.info('Adding Tourney Massey Data........ ')\n\n # Load season end ranks\n season_ranks = get_season_ranks(s_df)\n\n # Adding spreads to Tourney CSV\n t_df['Aorank'] = ''\n t_df['Borank'] = ''\n for index, row in t_df.iterrows():\n aorank = season_ranks[row['Season']][row['Ateam']]\n borank = season_ranks[row['Season']][row['Bteam']]\n t_df.set_value(index, 'Aorank', aorank)\n t_df.set_value(index, 'Borank', borank)\n\n return t_df\n\n\ndef add_tourney_efficiency(t_df, s_df):\n\n log.info('Adding Tourney Efficiency Data........ ')\n\n # Load season end ranks\n (s_oeff, s_deff, d_oeff, d_deff) = get_season_efficiencies(s_df, threshold_games=5)\n\n # Adding spreads to Tourney CSV\n t_df['Aoeff'] = ''\n t_df['Boeff'] = ''\n t_df['Adeff'] = ''\n t_df['Bdeff'] = ''\n for index, row in t_df.iterrows():\n t_df.set_value(index, 'Aoeff', s_oeff[row['Ateam']][row['Season']])\n t_df.set_value(index, 'Boeff', s_oeff[row['Bteam']][row['Season']])\n t_df.set_value(index, 'Adeff', s_deff[row['Ateam']][row['Season']])\n t_df.set_value(index, 'Bdeff', s_deff[row['Bteam']][row['Season']])\n\n return t_df\n\n\n#########################################################################################################\nif __name__ == \"__main__\":\n\n print('Current Working Directory: %s' % os.getcwd())\n\n # Setup Logging\n logging_config_file = os.path.join(__utils__, 'logging.conf')\n logging.config.fileConfig(logging_config_file, disable_existing_loggers=False)\n # log = logging.getLogger(__name__)\n\n # Stage 1: Load Base Data\n build_season_elo_data = False\n build_season_spreads_data = False\n build_season_massey_data = False\n build_season_efficiency_data = False\n build_season_kenpom_data = True\n\n season_elo_apply_dynamic = False\n season_elo_apply_final = True\n\n season_rank_apply_dynamic = False\n season_rank_apply_final = True\n\n season_efficiency_apply_dynamic = False\n season_efficiency_apply_final = True\n\n dump_kenpom_intermediate_file = False\n\n # Stage 2: Load Spreads Data\n build_tourney_elo_data = False\n build_tourney_spreads_data = False\n build_tourney_massey_data = False\n build_tourney_efficiency_data = False\n build_tourney_kenpom_data = True\n\n __seasonfile__ = 'ncaa_season_data'\n __test1file__ = 'test_data_stage1'\n __test2file__ = 'test_data_stage2'\n\n#########\n\n season_df = ''\n\n # Load Elo Data\n if build_season_elo_data:\n\n in_data_file = os.path.join(__src__, 'RegularSeasonDetailedResults.csv')\n season_df = sf.load_model_data(in_data_file)\n\n season_df = add_season_elo(season_df,\n apply_dynamic=season_elo_apply_dynamic,\n apply_final=season_elo_apply_final)\n\n # Output New CSV with Elo Ratings\n filename = __seasonfile__ + '_mod1.csv'\n out_data_file = os.path.join(__src__, filename)\n season_df.to_csv(out_data_file, index=False)\n\n # Load Season Spreads Data\n if build_season_spreads_data:\n\n # Load Stage 1 CSV data if it wasn't generated in this run\n if not build_season_elo_data:\n filename = __seasonfile__ + '_mod1.csv'\n in_data_file = os.path.join(__src__, filename)\n season_df = sf.load_model_data(in_data_file)\n\n # Call add_spread()\n in_data_file = os.path.join(__src__, 'ThePredictionTrackerPointspreads.csv')\n spread_df = sf.load_model_data(in_data_file)\n season_df = add_season_spread(season_df, spread_df)\n\n # Output New CSV with Spreads Data\n filename = __seasonfile__ + '_mod2.csv'\n out_data_file = os.path.join(__src__, filename)\n season_df.to_csv(out_data_file, index=False)\n\n # Load Season Massey Ordinals Data\n if build_season_massey_data:\n\n if not build_season_spreads_data:\n filename = __seasonfile__ + '_mod2.csv'\n in_data_file = os.path.join(__src__, filename)\n season_df = sf.load_model_data(in_data_file)\n\n # Call add_season_massey()\n in_data_file = os.path.join(__src__, 'massey_ordinals_2003-2015.csv')\n massey_df = sf.load_model_data(in_data_file)\n season_df = add_season_massey(season_df, massey_df,\n apply_dynamic=season_rank_apply_dynamic,\n apply_final=season_rank_apply_final)\n\n # Output New CSV with Massey Data\n filename = __seasonfile__ + '_mod3.csv'\n out_data_file = os.path.join(__src__, filename)\n season_df.to_csv(out_data_file, index=False)\n\n # Compute Offensive & Defensive Efficiency Data\n if build_season_efficiency_data:\n\n if not build_season_massey_data:\n filename = __seasonfile__ + '_mod3.csv'\n in_data_file = os.path.join(__src__, filename)\n season_df = sf.load_model_data(in_data_file)\n\n season_df = add_season_efficiency(season_df,\n apply_dynamic=season_efficiency_apply_dynamic,\n apply_final=season_efficiency_apply_final)\n\n # Output New CSV with Efficiency Data\n filename = __seasonfile__ + '_mod4.csv'\n out_data_file = os.path.join(__src__, filename)\n season_df.to_csv(out_data_file, index=False)\n\n # Add Ken Pomeroy Data\n if build_season_kenpom_data:\n\n if not build_season_efficiency_data:\n filename = __seasonfile__ + '_mod4.csv'\n in_data_file = os.path.join(__src__, filename)\n season_df = sf.load_model_data(in_data_file)\n\n in_data_file = os.path.join(__src__, 'kenpom.csv')\n kenpom_df = sf.load_model_data(in_data_file)\n\n in_data_file = os.path.join(__src__, 'Teams.csv')\n teams_df = sf.load_model_data(in_data_file)\n\n season_df = add_kenpom_data(season_df, kenpom_df, teams_df, 'W', 'L',\n dump_kp_int_file=dump_kenpom_intermediate_file)\n\n # Output New CSV with Efficiency Data\n filename = __seasonfile__ + '_mod5.csv'\n out_data_file = os.path.join(__src__, filename)\n season_df.to_csv(out_data_file, index=False)\n\n#########\n\n tourney_df = ''\n spread_df = ''\n\n # Load Tourney Elo Data. Use season final elo rating for each team\n if build_tourney_elo_data:\n\n in_data_file = os.path.join(__src__, 'RegularSeasonDetailedResults.csv')\n season_df = sf.load_model_data(in_data_file)\n\n filename = __test1file__ + '.csv'\n in_data_file = os.path.join(__src__, filename)\n tourney_df = sf.load_model_data(in_data_file)\n\n tourney_df = add_tourney_elo(season_df, tourney_df)\n\n # Output New CSV with Elo Ratings\n filename = __test1file__ + '_mod1.csv'\n out_data_file = os.path.join(__src__, filename)\n tourney_df.to_csv(out_data_file, index=False)\n\n # Load Tourney Spreads Data. Use season average spreads for each team to compute\n if build_tourney_spreads_data:\n\n if not build_tourney_elo_data:\n filename = __test1file__ + '_mod1.csv'\n in_data_file = os.path.join(__src__, filename)\n tourney_df = sf.load_model_data(in_data_file)\n\n if not build_season_spreads_data:\n in_data_file = os.path.join(__src__, 'ThePredictionTrackerPointspreads.csv')\n spread_df = sf.load_model_data(in_data_file)\n\n tourney_df = add_tourney_spreads(tourney_df, spread_df)\n\n filename = __test1file__ + '_mod2.csv'\n out_data_file = os.path.join(__src__, filename)\n tourney_df.to_csv(out_data_file, index=False)\n\n # Load Tourney Massey Data. Use season end ranks for each team\n if build_tourney_massey_data:\n\n if not build_tourney_spreads_data:\n filename = __test1file__ + '_mod2.csv'\n in_data_file = os.path.join(__src__, filename)\n tourney_df = sf.load_model_data(in_data_file)\n\n if not build_season_massey_data:\n filename = __seasonfile__ + '_mod3.csv'\n in_data_file = os.path.join(__src__, filename)\n season_df = sf.load_model_data(in_data_file)\n\n tourney_df = add_tourney_massey(tourney_df, season_df)\n\n filename = __test1file__ + '_mod3.csv'\n out_data_file = os.path.join(__src__, filename)\n tourney_df.to_csv(out_data_file, index=False)\n\n # Load Tourney Data. Use season end ranks for each team\n if build_tourney_efficiency_data:\n\n if not build_tourney_massey_data:\n filename = __test1file__ + '_mod3.csv'\n in_data_file = os.path.join(__src__, filename)\n tourney_df = sf.load_model_data(in_data_file)\n\n if not build_season_efficiency_data:\n filename = __seasonfile__ + '_mod4.csv'\n in_data_file = os.path.join(__src__, filename)\n season_df = sf.load_model_data(in_data_file)\n\n tourney_df = add_tourney_efficiency(tourney_df, season_df)\n\n filename = __test1file__ + '_mod4.csv'\n out_data_file = os.path.join(__src__, filename)\n tourney_df.to_csv(out_data_file, index=False)\n\n # Add Ken Pomeroy Data\n if build_tourney_kenpom_data:\n\n if not build_tourney_efficiency_data:\n filename = __test1file__ + '_mod4.csv'\n in_data_file = os.path.join(__src__, filename)\n tourney_df = sf.load_model_data(in_data_file)\n\n in_data_file = os.path.join(__src__, 'kenpom.csv')\n kenpom_df = sf.load_model_data(in_data_file)\n\n in_data_file = os.path.join(__src__, 'Teams.csv')\n teams_df = sf.load_model_data(in_data_file)\n\n tourney_df = add_kenpom_data(tourney_df, kenpom_df, teams_df, 'A', 'B',\n dump_kp_int_file=dump_kenpom_intermediate_file)\n\n # Output New CSV with Efficiency Data\n filename = __test1file__ + '_mod5.csv'\n out_data_file = os.path.join(__src__, filename)\n tourney_df.to_csv(out_data_file, index=False)\n\n#########\n","sub_path":"projects/march_madness_comp/etl/proc_data.py","file_name":"proc_data.py","file_ext":"py","file_size_in_byte":29340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"222854329","text":"import os\nimport shutil\nimport random\n\n\ns = './1'\nd = './2'\ncnt = 200\nf = os.listdir(s)\nrandom.shuffle(f)\n\n\nfor _ in f:\n if not cnt:\n break\n shutil.move(os.path.join(s, _), os.path.join(d, _))\n cnt -= 1\n","sub_path":"python/shuffle_files.py","file_name":"shuffle_files.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"575504207","text":"from datetime import datetime, timedelta\n\nimport pytest\nfrom flask import g\nfrom sqlalchemy.util import OrderedSet\n\nfrom ereuse_devicehub.db import db\nfrom ereuse_devicehub.resources.device.models import Device, GraphicCard, HardDrive, Microtower, \\\n RamModule, SolidStateDrive\nfrom ereuse_devicehub.resources.enums import TestHardDriveLength\nfrom ereuse_devicehub.resources.event.models import BenchmarkDataStorage, EraseBasic, EraseSectors, \\\n EventWithOneDevice, Install, Ready, StepZero, StressTest, TestDataStorage\nfrom tests.conftest import create_user\n\n\n@pytest.mark.usefixtures('app_context')\ndef test_author():\n \"\"\"\n Checks the default created author.\n\n Note that the author can be accessed after inserting the row.\n \"\"\"\n user = create_user()\n g.user = user\n e = EventWithOneDevice(device=Device())\n db.session.add(e)\n assert e.author is None\n assert e.author_id is None\n db.session.commit()\n assert e.author == user\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_erase_basic():\n erasure = EraseBasic(\n device=HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar'),\n clean_with_zeros=True,\n start_time=datetime.now(),\n end_time=datetime.now(),\n secure_random_steps=25,\n error=False\n )\n db.session.add(erasure)\n db.session.commit()\n db_erasure = EraseBasic.query.one()\n assert erasure == db_erasure\n assert next(iter(db_erasure.device.events)) == erasure\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_validate_device_data_storage():\n \"\"\"Checks the validation for data-storage-only events works.\"\"\"\n # We can't set a GraphicCard\n with pytest.raises(TypeError,\n message='EraseBasic.device must be a DataStorage '\n 'but you passed '):\n EraseBasic(\n device=GraphicCard(serial_number='foo', manufacturer='bar', model='foo-bar'),\n clean_with_zeros=True,\n start_time=datetime.now(),\n end_time=datetime.now(),\n secure_random_steps=25,\n error=False\n )\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_erase_sectors_steps():\n erasure = EraseSectors(\n device=SolidStateDrive(serial_number='foo', manufacturer='bar', model='foo-bar'),\n clean_with_zeros=True,\n start_time=datetime.now(),\n end_time=datetime.now(),\n secure_random_steps=25,\n error=False,\n steps=[\n StepZero(error=False,\n start_time=datetime.now(),\n end_time=datetime.now(),\n secure_random_steps=1,\n clean_with_zeros=True),\n StepZero(error=False,\n start_time=datetime.now(),\n end_time=datetime.now(),\n secure_random_steps=2,\n clean_with_zeros=True),\n StepZero(error=False,\n start_time=datetime.now(),\n end_time=datetime.now(),\n secure_random_steps=3,\n clean_with_zeros=True)\n ]\n )\n db.session.add(erasure)\n db.session.commit()\n db_erasure = EraseSectors.query.one()\n # Steps are in order\n assert db_erasure.steps[0].secure_random_steps == 1\n assert db_erasure.steps[0].num == 0\n assert db_erasure.steps[1].secure_random_steps == 2\n assert db_erasure.steps[1].num == 1\n assert db_erasure.steps[2].secure_random_steps == 3\n assert db_erasure.steps[2].num == 2\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_test_data_storage():\n test = TestDataStorage(\n device=HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar'),\n error=False,\n elapsed=timedelta(minutes=25),\n length=TestHardDriveLength.Short,\n status='OK!',\n lifetime=timedelta(days=120)\n )\n db.session.add(test)\n db.session.commit()\n assert TestDataStorage.query.one()\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_install():\n hdd = HardDrive(serial_number='sn')\n install = Install(name='LinuxMint 18.04 es',\n elapsed=timedelta(seconds=25),\n device=hdd)\n db.session.add(install)\n db.session.commit()\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_update_components_event_one():\n computer = Microtower(serial_number='sn1', model='ml1', manufacturer='mr1')\n hdd = HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar')\n computer.components.add(hdd)\n\n # Add event\n test = StressTest(elapsed=timedelta(seconds=1))\n computer.events_one.add(test)\n assert test.device == computer\n assert next(iter(test.components)) == hdd, 'Event has to have new components'\n\n # Remove event\n computer.events_one.clear()\n assert not test.device\n assert not test.components, 'Event has to loose the components'\n\n # If we add a component to a device AFTER assigning the event\n # to the device, the event doesn't get the new component\n computer.events_one.add(test)\n ram = RamModule()\n computer.components.add(ram)\n assert len(test.components) == 1\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_update_components_event_multiple():\n computer = Microtower(serial_number='sn1', model='ml1', manufacturer='mr1')\n hdd = HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar')\n computer.components.add(hdd)\n\n ready = Ready()\n assert not ready.devices\n assert not ready.components\n\n # Add\n computer.events_multiple.add(ready)\n assert ready.devices == OrderedSet([computer])\n assert next(iter(ready.components)) == hdd\n\n # Remove\n computer.events_multiple.remove(ready)\n assert not ready.devices\n assert not ready.components\n\n # init / replace collection\n ready.devices = OrderedSet([computer])\n assert ready.devices\n assert ready.components\n\n\n@pytest.mark.usefixtures('auth_app_context')\ndef test_update_parent():\n computer = Microtower(serial_number='sn1', model='ml1', manufacturer='mr1')\n hdd = HardDrive(serial_number='foo', manufacturer='bar', model='foo-bar')\n computer.components.add(hdd)\n\n # Add\n benchmark = BenchmarkDataStorage()\n benchmark.device = hdd\n assert benchmark.parent == computer\n assert not benchmark.components\n\n # Remove\n benchmark.device = None\n assert not benchmark.parent\n","sub_path":"tests/test_event.py","file_name":"test_event.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"554721171","text":"#\n# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.\n#\n# PS2000 BLOCK MODE EXAMPLE\n# This example opens a 2000 driver device, sets up two channels and a trigger then collects a block of data.\n# This data is then plotted as mV against time in ns.\n\nimport ctypes\nimport numpy as np\nfrom picosdk.ps2000 import ps2000 as ps\nimport matplotlib.pyplot as plt\nfrom picosdk.functions import adc2mV, assert_pico2000_ok\nimport time\nimport csv\nfrom scipy.optimize import leastsq\n\ndef fit_sin_to_data(timebase,data, guess_phase = 0, guess_freq = 1, guess_amp = 1):\n\n N = 1000 # number of data points\n t = timebase # nanoseconds\n guess_mean = np.mean(data)\n guess_std = 3*np.std(data)/(2**0.5)/(2**0.5)\n\n # we'll use this to plot our first estimate. This might already be good enough for you\n data_first_guess = guess_std*np.sin((t+guess_phase)*guess_freq) + guess_mean\n\n # Define the function to optimize, in this case, we want to minimize the difference\n # between the actual data and our \"guessed\" parameters\n optimize_func = lambda x: x[0]*np.sin(x[1]*t+x[2]) + x[3] - data\n est_amp, est_freq, est_phase, est_mean = leastsq(optimize_func, [guess_amp, guess_freq, guess_phase, guess_mean])[0]\n\n # recreate the fitted curve using the optimized parameters\n data_fit = est_amp*np.sin(est_freq*t+est_phase) + est_mean\n\n # recreate the fitted curve using the optimized parameters\n\n fine_t = timebase\n data_fit=est_amp*np.sin(est_freq*fine_t+est_phase)+est_mean\n\n return data_fit\n\n# Create status ready for use\nstatus = {}\n\n# Open 2000 series PicoScope\n# Returns handle to chandle for use in future API functions\nstatus[\"openUnit\"] = ps.ps2000_open_unit()\nassert_pico2000_ok(status[\"openUnit\"])\n\n# Create chandle for use\nchandle = ctypes.c_int16(status[\"openUnit\"])\ntry:\n while True:\n # Set up channel A\n # handle = chandle\n # channel = PS2000_CHANNEL_A = 0\n # enabled = 1\n # coupling type = PS2000_DC = 1 CHANGED TO AC\n # range = PS2000_2V = 7\n # analogue offset = 0 V\n chARange = 8\n status[\"setChA\"] = ps.ps2000_set_channel(chandle, 0, 1, 0, chARange)\n assert_pico2000_ok(status[\"setChA\"])\n\n # Set up channel B\n # handle = chandle\n # channel = PS2000_CHANNEL_B = 1\n # enabled = 1\n # coupling type = PS2000_DC = 1\n # range = PS2000_2V = 7\n # analogue offset = 0 V\n chBRange = 7\n status[\"setChB\"] = ps.ps2000_set_channel(chandle, 1, 1, 1, chBRange)\n assert_pico2000_ok(status[\"setChB\"])\n\n # Set up single trigger\n # handle = chandle\n # source = PS2000_CHANNEL_A = 0\n # threshold = 1024 ADC counts\n # direction = PS2000_RISING = 0\n # delay = 0 s\n # auto Trigger = 1000 ms\n status[\"trigger\"] = ps.ps2000_set_trigger(chandle, 0, 64, 0, 0, 1000)\n assert_pico2000_ok(status[\"trigger\"])\n\n # Set number of pre and post trigger samples to be collected\n preTriggerSamples = 1000\n postTriggerSamples = 1000\n maxSamples = preTriggerSamples + postTriggerSamples\n\n # Get timebase information\n # handle = chandle\n # timebase = 8 = timebase\n # no_of_samples = maxSamples\n # pointer to time_interval = ctypes.byref(timeInterval)\n # pointer to time_units = ctypes.byref(timeUnits)\n # oversample = 1 = oversample\n # pointer to max_samples = ctypes.byref(maxSamplesReturn)\n timebase = 13\n timeInterval = ctypes.c_int32()\n timeUnits = ctypes.c_int32()\n oversample = ctypes.c_int16(1)\n maxSamplesReturn = ctypes.c_int32()\n status[\"getTimebase\"] = ps.ps2000_get_timebase(chandle, timebase, maxSamples, ctypes.byref(timeInterval), ctypes.byref(timeUnits), oversample, ctypes.byref(maxSamplesReturn))\n assert_pico2000_ok(status[\"getTimebase\"])\n\n\n # Run block capture\n # handle = chandle\n # no_of_samples = maxSamples\n # timebase = timebase\n # oversample = oversample\n # pointer to time_indisposed_ms = ctypes.byref(timeIndisposedms)\n timeIndisposedms = ctypes.c_int32()\n status[\"runBlock\"] = ps.ps2000_run_block(chandle, maxSamples, timebase, oversample, ctypes.byref(timeIndisposedms))\n assert_pico2000_ok(status[\"runBlock\"])\n\n # Check for data collection to finish using ps5000aIsReady\n ready = ctypes.c_int16(0)\n check = ctypes.c_int16(0)\n while ready.value == check.value:\n status[\"isReady\"] = ps.ps2000_ready(chandle)\n ready = ctypes.c_int16(status[\"isReady\"])\n\n # Create buffers ready for data\n bufferA = (ctypes.c_int16 * maxSamples)()\n bufferB = (ctypes.c_int16 * maxSamples)()\n\n # Get data from scope\n # handle = chandle\n # pointer to buffer_a = ctypes.byref(bufferA)\n # pointer to buffer_b = ctypes.byref(bufferB)\n # poiner to overflow = ctypes.byref(oversample)\n # no_of_values = cmaxSamples\n cmaxSamples = ctypes.c_int32(maxSamples)\n status[\"getValues\"] = ps.ps2000_get_values(chandle, ctypes.byref(bufferA), ctypes.byref(bufferB), None, None, ctypes.byref(oversample), cmaxSamples)\n assert_pico2000_ok(status[\"getValues\"])\n\n # find maximum ADC count value\n maxADC = ctypes.c_int16(32767)\n\n # convert ADC counts data to mV\n adc2mVChA = adc2mV(bufferA, chARange, maxADC)\n adc2mVChB = adc2mV(bufferB, chBRange, maxADC)\n\n # Create time data\n timebase = np.linspace(0, (cmaxSamples.value) * timeInterval.value, cmaxSamples.value)\n fit = fit_sin_to_data(timebase, adc2mVChA, guess_freq = 0.00000031, guess_amp = 2300)\n \n idx = np.argmax(fit)\n\n if idx is not None:\n print(\"idx = %d\"%(idx))\n valB = adc2mVChB[:][idx]\n valA = adc2mVChA[:][idx]\n print(idx, valA, valB, valB/3060)\n\n try:\n row = [time.ctime(), time.time(), valA, valB] \n with open('log-raw.csv', 'a') as f:\n w = csv.writer(f)\n w.writerow(row)\n except Exception as e:\n print(e)\n \n time.sleep(1)\nexcept Exception as e:\n print(e)\n# plot data from channel A and B\nmarkers_on = [idx]\nplt.plot(timebase, adc2mVChA[:],'-gD', markevery=markers_on)\nplt.plot(timebase, adc2mVChB[:],'-bD', markevery=markers_on)\nplt.plot(timebase, fit[:],'-rD', markevery=markers_on)\nplt.xlabel('Time (ns)')\nplt.ylabel('Voltage (mV)')\nplt.show()\n\n# Stop the scope\n# handle = chandle\nstatus[\"stop\"] = ps.ps2000_stop(chandle)\nassert_pico2000_ok(status[\"stop\"])\n\n# Close unitDisconnect the scope\n# handle = chandle\nstatus[\"close\"] = ps.ps2000_close_unit(chandle)\nassert_pico2000_ok(status[\"close\"])\n\n# display status returns\nprint(status)\n","sub_path":"ps2000Examples/ps2000Logger-sin-fit.py","file_name":"ps2000Logger-sin-fit.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"422421662","text":"from django.test import TestCase\nfrom django.conf import settings\n\nfrom shutil import rmtree\n\nimport os\nimport json\n\nfrom scraper import utils, models\n\n\nLOCAL_HOST = 'http://127.0.0.1:8000/'\nDATA_URL = \"\"\"https://raw.githubusercontent.com/zniper/django-scraper/master/scraper/test_data/\"\"\"\nDATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'test_data')\n\n# For future use, with real web server\n# def start_local_site(path=''):\n# \"\"\" Just a simple local site for testing HTTP requests \"\"\"\n# PORT = 8000\n# handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n# httpd = SocketServer.TCPServer(('', PORT), handler)\n# print 'Local test server is up at', PORT\n# httpd.serve_forever()\n\n\ndef get_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\nclass UserAgentTests(TestCase):\n\n def test_create(self):\n ua = models.UserAgent(name='Test UA', value='UA string')\n ua.save()\n self.assertNotEqual(ua.pk, None)\n\n\nclass ProxyServerTests(TestCase):\n\n def test_create(self):\n proxy = models.ProxyServer(\n name='Test Proxy',\n address='Proxy address',\n port=8080,\n protocol='http'\n )\n proxy.save()\n self.assertNotEqual(proxy.pk, None)\n\n\nclass ExtractorLocalTests(TestCase):\n\n @classmethod\n def setUpClass(self):\n target_file = get_path('yc.0.html')\n self.extractor = utils.Extractor(target_file)\n\n @classmethod\n def tearDownClass(self):\n location = self.extractor.get_location()\n if os.path.exists(location):\n rmtree(location)\n\n def tearDown(self):\n self.extractor.set_location(self.current_location)\n\n def setUp(self):\n self.current_location = self.extractor.get_location()\n\n def test_parse_content(self):\n self.assertNotEqual(self.extractor.hash_value, '')\n self.assertNotEqual(self.extractor.root, None)\n\n def test_set_location(self):\n self.extractor.set_location('/new/location')\n self.assertEquals(self.extractor.get_location(), '/new/location')\n\n def test_complete_url_no_http(self):\n tmp = self.extractor._url\n self.extractor._url = 'http://google.com'\n url = self.extractor.complete_url('search/me')\n self.assertEqual(url, 'http://google.com/search/me')\n self.extractor._url = tmp\n\n def test_complete_url_good(self):\n url = self.extractor.complete_url('http://google.com')\n self.assertEqual(url, 'http://google.com')\n\n def test_complete_url_https(self):\n url = self.extractor.complete_url('https://google.com')\n self.assertEqual(url, 'https://google.com')\n\n def test_extract_links_no_expand(self):\n links = self.extractor.extract_links()\n self.assertEqual(len(links), 81)\n self.assertEqual(links[0]['url'],\n 'https://posthaven.com/')\n self.assertEqual(links[19]['url'],\n 'http://www.fastcompany.com/3042861/the-y-combinator-chronicles/the-secret-million-that-y-combinator-invests-in-all-its-startups')\n self.assertEqual(links[19]['text'],\n u'Transcriptic\\xc2\\xa0(YC W15) and the array of free services for new YC startups')\n\n def test_get_path(self):\n file_path = self.extractor.get_path(__file__)\n self.assertGreater(len(file_path), 0)\n\n def test_prepare_directory(self):\n self.extractor.prepare_directory()\n self.assertEqual(os.path.exists(self.extractor.get_location()), True)\n\n def test_prepare_directory_existing(self):\n test_file = os.path.join(self.current_location, 'new_file')\n self.extractor.prepare_directory()\n f0 = open(test_file, 'w')\n f0.close()\n # recall the prepare directory\n self.extractor.prepare_directory()\n self.assertEqual(os.path.exists(self.current_location), True)\n self.assertEqual(os.path.exists(test_file), False)\n\n def test_refine_content(self):\n with open(get_path('yc.0.html'), 'r') as index:\n content = index.read()\n self.assertNotEqual(content.find(\"
\"), -1)\n self.assertNotEqual(content.find(\"
\"),\n -1)\n self.assertNotEqual(content.find(\"
\n\t\t\t\t \"\"\" + descText + \"\"\"\n\t\t\t\t true\n\t\t\t\t\\n\"\"\")\n\t\treturn xmlList # Updated List with new Comment Tag\n\telse:\n\t\t#print(\"Comment already present, hence, skipping.....\")\n\t\treturn \"SKIP\"\n\t\t\t\ndef WriteScript(scriptLinebyLine, scriptFilePath):\n\t\"\"\"This function opens the Script file and overwrites the entire script line by line based on List object passed as argument.\"\"\"\n\twith open(scriptFilePath, 'w', encoding='utf-8') as xmlScript_file:\t\t\n\t\tfor eachLine in scriptLinebyLine:\n\t\t\txmlScript_file.write(eachLine)\n\t#print(\"Write Success!\")\n\t\ndef fetchDescriptionValue(inputFile):\n\t\"\"\"This function is used to take a filename, parse for Description tag and fetch its value\"\"\"\n\ttree = ET.parse(inputFile) \n\n\troot = tree.getroot()\n\t\n\tfor description in root.iter('Description'): \n\t\tscriptIntent = description.text\n\t\t\n\t\tif scriptIntent is not None: \t\t\t # To check if there is an empty Description block in Script XML\n\t\t\toriginalDescription = html.escape(scriptIntent) # Escaping the special characters in text using the html.escape module.\n\t\t\thtmlFriendlyText = originalDescription.replace(\"\\n\", \"<br>\") # adding
for all the newline character found within description since, it will look better in Duckcall.\n\t\t\treturn htmlFriendlyText\n\t\telse:\n\t\t\treturn \"Objective is BLANK!\"\n\t\ndef returnTestScriptFirstLine(sFilePath):\n\t\"\"\"This function is used to read and return only the first line from the given FullFilePath\"\"\"\n\twith open(sFilePath, 'rb') as evTestFH: # opens in binary mode to read bytes\n\t\tLine1 = evTestFH.readline().decode('utf8') # converts bytes to Unicode UTF-8 encoding\n\t\tLine1 = Line1.strip()\n\t\treturn Line1\n\t\n# -- -- Main Program starts below ---\naddRemoveFlag = 0 # 0 = Add Comment step, don't remove. \n#addRemoveFlag = 1 # 1 = Remove Comment step.\n\nlogAddComment = []\nlogSkipRemoveComment = []\n\n#folderPath = 'C:\\\\UserArea\\\\1_Automation_Tools\\\\7_GitHub\\\\WorkRelated\\\\1_TestFiles\\\\'\nfolderPath = 'C:\\\\AFS.Claims\\\\Main\\\\TestScripts\\\\Automated TestScripts\\\\US_Locale_Scripts\\\\Automated_Scripts\\\\Assembly_Testing\\\\Manual_AT\\\\'\n\nfor (dirpath, dirnames, filenames) in walk(folderPath): # get all filenames within the given folder into list\n\tfor name in filenames:\n\t\tif name.find(\".xml\") > 0 or name.find(\".XML\") > 0:\t\t\t\t\t\t # Work only with xml files in a directory\n\t\t\tFullFilePath = os.path.join(dirpath, name) # Join the path with filename\n\n\t\t\txmlLine1 = returnTestScriptFirstLine(FullFilePath)\t\t\t\n\t\t\tif \" [B, L, E]\n self.fc_in = torch.nn.Linear(embedding_size + z_size, embedding_size)\n\n if self.rnn_type == torch.nn.LSTM:\n self.fc_h0 = torch.nn.Linear(z_size, hidden_size * 2)\n else:\n self.fc_h0 = torch.nn.Linear(z_size, hidden_size)\n\n self.rnn = rnn_type(\n input_size=embedding_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True,\n )\n self.fc_out = torch.nn.Linear(hidden_size, output_size)\n\n def _forward(self, x, z, h0=None):\n \"\"\"\n Use for both forward pass and sampling/training without teacher forcing.\n Construct initial hidden state h0 from z if no h0 is given.\n \"\"\"\n # x = [B, L], z = [B, Z]\n # Initial hidden state from z\n if h0 is None:\n h0 = torch.tanh(self.fc_h0(z))\n h0 = h0.expand((self.num_layers, *h0.shape))\n if self.rnn_type == torch.nn.LSTM:\n # If lstm, split to hidden and cell state\n h0 = torch.chunk(h0, 2, dim=-1)\n h0 = tuple([t.contiguous() for t in h0])\n\n # Embedding\n if self.p_word_dropout:\n x = self.word_dropout(x)\n x_emb = self.dropout_emb(self.embedding(x))\n\n # Concatenate z every timestep\n if self.z_every_step:\n z_exp = z.unsqueeze(1).expand((-1, x_emb.size(1), -1))\n x_emb = self.fc_in(torch.cat([x_emb, z_exp], dim=-1))\n\n # RNN\n out, h_out = self.rnn(x_emb, h0)\n out = self.fc_out(out)\n return out, h_out\n\n def forward(self, x, z=None):\n return self._forward(x, z)[0]\n\n def sample(self, x, z=None, length=60, temperature=1):\n outcome = torch.zeros(x.size(0), length).long()\n outcome[:, 0] = x.squeeze(-1)\n\n with torch.no_grad():\n h0 = None\n # For step in length, generate new sample with previous x and\n # hidden state\n for i in range(1, length):\n out, h0 = self._forward(x, z, h0=h0)\n out = torch.softmax(out / temperature, dim=-1).squeeze(1)\n x = torch.multinomial(out, 1)\n outcome[:, i] = x.squeeze(-1).to(outcome.device)\n\n return outcome\n","sub_path":"probabll/dgm/conditioners/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":8425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"618579061","text":"from browser.html import *\nfrom browser import document as doc\nfrom browser import window\nfrom browser import timer\nfrom browser.local_storage import storage as ls\n\n#Used to attach extra functions to html elements\ndef upgrade(e):\n #for e in [_el]+_el.get(selector='*'):\n if 'upgraded' in e:\n return e\n \n #Override ID only searching\n def _get(_index):\n if isinstance(_index,int):\n return e.children[_index]\n return e.get(selector=_index)\n e.__getitem__=_get\n\n #Assign styles\n def _sty(_style,_selector=None):\n if _selector:\n for i in e[_selector]:\n i.style=_style\n else:\n e.style=_style\n return e\n e.sty=_sty\n \n def _cla(_classes,_selector=None):\n if _selector:\n for i in e[_selector]:\n i.cla(_classes)\n else:\n _temp = e.class_name.split(' ')\n \n for i in _classes:\n if i[0]=='-':\n if i[1:] in _temp:\n _temp.remove(i[1:])\n else:\n if i not in _temp:\n _temp.append(i)\n \n e.class_name=' '.join(_temp)\n return e\n e.cla=_cla\n \n def _atr(_attribute, _new_val=False):\n if _new_val is not False and _new_val != False:\n e.setAttribute(_attribute, _new_val)\n return e\n elif _new_val is False:\n return e.getAttribute(_attribute)\n else:\n e.removeAttribute(_attribute)\n return e\n e.atr=_atr\n \n def _set_val(_val):\n if _val is not False:\n e.value = _val\n return e\n e.set_val = _set_val\n \n def _set_txt(_val):\n if _val is not False:\n e.text = _val\n return e\n e.set_txt = _set_txt\n \n for i in e['*']:\n upgrade(i)\n \n e.upgraded=True\n return e\n \ndef close_popup():\n doc['.popup'][-1].close()\n\nclass upgraded:\n upgraded=True\n def __getitem__(self,_index):\n if isinstance(_index,int):\n return self.children[_index]\n return self.get(selector=_index)\n \n def sty(self, _style, _selector=None):\n if _selector:\n for i in self[_selector]:\n i.style=_style\n else:\n self.style=_style\n self.style=_style\n return self\n \n def cla(self, _classes,_selector=None):\n if _selector:\n for i in self[_selector]:\n i.cla(_classes)\n else:\n _temp = self.class_name.split(' ')\n \n for i in _classes:\n if i[0]=='-':\n if i[1:] in _temp:\n _temp.remove(i[1:])\n else:\n if i not in _temp:\n _temp.append(i)\n \n self.class_name=' '.join(_temp)\n return self\n \n def atr(self, _attribute, _new_val=False):\n if _new_val is not False and _new_val != False:\n self.setAttribute(_attribute, _new_val)\n return self\n elif _new_val is False:\n return self.getAttribute(_attribute)\n else:\n self.removeAttribute(_attribute)\n return self\n \n def set_val(self, _val):\n if _val is not False:\n self.value = _val\n return self\n \n def set_txt(self, _val):\n if _val is not False:\n self.text = _val\n return self\n \nclass uDIV(upgraded, DIV):\n pass\n\nclass uSPAN(upgraded, SPAN):\n pass\n\n#rebuild material components\ndef rebuild_mat():\n window.componentHandler.upgradeAllRegistered()\n \ndef build_tooltips(target):\n target.get(selector='#tooltipper')[0].clear()\n for i in target.get(selector='[tooltipped]'):\n #i.tooptip().parent.removeChild(i)\n target.get(selector='#tooltipper')[0]<=i.tooltip()\n \ndef switch_context(_content,_context,_options={}):\n if 'on_close' in _options:\n _options['on_close']()\n del _options['on_close']\n _content.clear()\n\n if '_scripts' not in _options:\n _options['_scripts']=dict()\n\n if _context not in _options['_scripts']:\n _options['_scripts'][_context] = open(_context).read()\n\n exec(_options['_scripts'][_context], {'_content':_content,'_context':_context,'_options':_options})\n \n if doc.get(selector='#tooltipper'):\n build_tooltips(doc)\n rebuild_mat()\n\n#basic text elements\nclass text(uSPAN):\n pass\n \n\nclass group(uSPAN):\n def __init__(self, _elements, flow=None, reverse=False, align=None, stop_resize=True, nowrap=False, flex=None):\n super().__init__()\n if flow:\n self.style={'display':'flex'}\n \n if flow == 'horizontal':\n self.style.flexDirection = ('row','row-reverse')[reverse]\n elif flow == 'vertical':\n self.style.flexDirection = ('column','column-reverse')[reverse]\n \n\n if align:\n self.style.alignItems = align\n\n if nowrap==False:\n self.style.flexWrap = 'wrap'\n\n for i in _elements: \n if stop_resize:\n i.style={'flex':'0 0 auto',\n 'whiteSpace':'nowrap'}\n self <= i\n\n if flex:\n self.style.flex=flex\n\n \n#Simple icon element. _content should be name of material specced icon\nclass icon(upgraded, I):\n def __init__(self, _content):\n super().__init__(_content,Class='material-icons')\n\n#Shadow depth effect\ndef shadow(_content, depth=2):\n _shadow_depth=depth\n \n #Allow el.shadow(int) to change shadow depth\n def _shadow(depth=None):\n nonlocal _shadow_depth\n if depth:\n _content.cla(['mdl-shadow--%sdp' % depth,'-mdl-shadow--%sdp' % _shadow_depth])\n _shadow_depth=depth\n return _shadow_depth\n _content.shadow=_shadow\n _content.cla(['mdl-shadow--%sdp' % depth])\n return _content\n\n#Add inkwell effect when an element is interaxted with\ndef inkwell(_content, bleed=False, on_click=None, color='var(--ripple-color)'): \n _content.cla(['mdl-js-ripple-effect']).sty({'position':'relative'})\n \n #Allow el.bleed([True|False]) to toggle bleed out effect\n def _bleed(_on):\n if _on:_content.cla(['ripple_bleed','-ripple_nobleed'])\n else:_content.cla(['-ripple_bleed','ripple_nobleed'])\n _content.bleed=_bleed\n _content.bleed(bleed)\n \n #Add onclock to any element\n if on_click:\n _content.bind('click',on_click)\n \n #The inkwell object itself.\n _well = uSPAN(Class='mdl-ripple', style={'background':color})\n def _get_well(color=None):\n if color:_well.sty({'background':color})\n return _well\n _content.well=_get_well\n _content <= _content.well()\n\n _content.setAttribute('tabindex',0)\n \n return _content\n\n\n#Makes a toast \nclass make_toast(uDIV):\n def show_toast(self):\n self.cla(['toast_show'])\n rebuild_mat()\n \n def _clear_toast(self,ev):\n if ev.target==self:\n try:self.parent.removeChild(self)\n except:pass\n \n def close(self):\n self.cla(['-toast_show'])\n self.bind('transitionend',self._clear_toast)\n \n def __init__(self, _content, timeout=5000):\n super().__init__(DIV(_content), Class='mdl-card mdl-shadow--2dp toast')\n\n #get toaster box\n _toaster = doc.get(selector='#toaster')[0]\n\n #add tosst to page\n if len(_toaster.children) > 0:\n _toaster.insertBefore(self, _toaster.children[0])\n else:\n _toaster <= self\n\n timer.set_timeout(self.show_toast, 0)\n\n if timeout!=0:\n self.cla(['temp'])\n timer.set_timeout(self.close,timeout)\n self.bind('click',lambda ev: self.close())\n \nclass make_interactive_toast(make_toast):\n def _action(self, ev):\n self.close()\n if self.action:\n self.action(ev)\n \n def __init__(self, _content, label='OK', action=None):\n self.action=action\n \n super().__init__(\n group([\n SPAN(_content,style={'flex':'1 1 auto'}),\n button(label, colored=True,on_click=self._action).sty({'white-space':'nowrap',\n 'flex':'0 0 auto',\n 'font-weight':'bold'})\n ], flow='horizontal', align='center', stop_resize=False),\n 0\n )\n\n\n\ndef tooltip(_target, _content, bottom=True, on_show=None, hover=True, focus=True, show_first=False, floating=False):\n _el = uDIV(_content, Class='tooltip mdl-card mdl-shadow--2dp', \n style={\n 'min-width':'auto',\n 'width':'auto',\n 'min-height':'auto',\n 'height':'auto',\n 'padding':'8px'\n })\n \n if floating:\n def show(event):\n y=event.currentTarget.getBoundingClientRect().top+event.currentTarget.getBoundingClientRect().height/2\n x=event.currentTarget.getBoundingClientRect().left+event.currentTarget.getBoundingClientRect().width/2\n _direction=[x/window.innerWidth*-100,y/window.innerHeight*-100]\n _el.style={'top':y,\n 'left':x,\n 'opacity':'1',\n #'pointer-events':'none',\n 'transform':'translate('+str(_direction[0])+'%,'+str(_direction[1])+'%)'\n }\n elif bottom==True:\n def show(ev):\n if on_show:on_show(_target,_el)\n _el.style={'top':'%spx' % (_target.getBoundingClientRect().bottom+8),\n 'left':'%spx' % (_target.getBoundingClientRect().left + _target.getBoundingClientRect().width/2 - _el.getBoundingClientRect().width/2),\n 'opacity':'1',\n #'pointer-events':'none'\n }\n \n #target.bind('mouseenter',show)\n #target.bind('focus',show)\n \n else:\n def show(ev):\n if on_show:on_show(_target,_el)\n _el.style={'top':'%spx' % (_target.getBoundingClientRect().top-_el.getBoundingClientRect().height-8),\n 'left':'%spx' % (_target.getBoundingClientRect().left + _target.getBoundingClientRect().width/2 - _el.getBoundingClientRect().width/2),\n 'opacity':'1',\n #'pointer-events':'none'\n }\n \n \n def hide(ev):\n _el.style.opacity='0'\n def _hide():\n _el.style.pointerEvents=None\n timer.set_timeout(_hide, 0)\n \n if hover:\n _target.bind('mouseenter',show)\n #_target.bind('touchstart',show)\n _target.bind('mouseleave', hide)\n #_target.bind('touchend', hide)\n if focus:\n _target.bind('focus',show)\n _target.bind('blur',hide)\n \n if show_first:\n timer.set_timeout(lambda: show(None),500)\n\n def show_at(x,y):\n _direction=[x/window.innerWidth*-100,y/window.innerHeight*-100]\n _el.style={'top':y,\n 'left':x,\n 'opacity':'1',\n #'pointer-events':'none',\n 'transform':'translate('+str(_direction[0])+'%,'+str(_direction[1])+'%)'\n }\n\n def _tooltip(content=None):\n if content:\n _el.clear()\n _el.appendChild(content)\n return _el\n _target.tooltip=_tooltip\n \n _el.show=lambda:show(None)\n _el.show_at=show_at\n _el.hide=lambda:hide(None)\n \n #_target <= _el\n _target.setAttribute('tooltipped',True)\n \n return _target\n\nclass button(upgraded,BUTTON):\n def __init__(self, _content='', \n Id=None,\n accent=False,\n raised=False,\n colored=False,\n fab=False,\n fab_mini=False,\n ripple_bleed=False,\n Icon=None,\n on_click=None,\n disabled=False):\n classes = [\n 'mdl-button',\n 'mdl-js-button',\n 'mdl-js-ripple-effect',\n ]\n\n if colored: classes.append('mdl-button--colored')\n if accent: classes.append('mdl-button--accent')\n if raised: classes.append('mdl-button--raised')\n if fab: classes.append('mdl-button--fab')\n if fab_mini: \n classes.append('mdl-button--fab')\n classes.append('mdl-button--mini-fab')\n if Icon and not fab and not fab_mini:\n classes.append('mdl-button--icon')\n\n if ripple_bleed:\n classes.append('ripple_bleed')\n\n if Icon: _content = icon(Icon)\n\n super().__init__(_content, Class=' '.join(classes))\n \n if Id:self.Id=Id\n\n if on_click:\n self.bind('click',on_click)\n\n if disabled:\n self.atr('disabled',True)\n \ndef make_popup(_title, _content, outsideClickClose=True,close_button=True):\n _el = uDIV(uDIV(uDIV(_title,Class='mdl-layout-title'),Class='content mdl-card mdl-shadow--8dp'),Class='popup')\n \n #_el.html = '''
\n #
%s
''' % _title\n _el.get(selector='.content')[0] <= _content\n if close_button:\n _el.get(selector='.content')[0]<=group([button('close',raised=True, colored=True,\n on_click=lambda ev:close_popup()).sty({'float':'right'})\n]) \n def _close():\n _el.cla(['-popup_show'])\n def _clear_popup(ev):\n if ev.target==_el:\n try:_el.parent.removeChild(_el)\n except:pass\n build_tooltips(doc)\n _el.bind('transitionend',_clear_popup)\n _el['.content'][0].close=_close\n _el.close=_close\n \n if outsideClickClose==True:\n _el.bind('click',lambda ev:_el.close())\n _el['.content'][0].bind('click',lambda ev:ev.stopPropagation())\n \n doc <= _el\n \n def show_popup():\n _el.cla(['popup_show'])\n build_tooltips(doc)\n rebuild_mat()\n if len(_el['textarea, input, button'])!=0:\n _el['textarea, input, button'][0].focus()\n timer.set_timeout(show_popup,0)\n \n rebuild_mat()\n \n return _el['.content'][0]\n\n\ndef make_menu(_content, event=None, direction='',x=0,y=0, pad=False):\n _el = make_popup('',_content, close_button=False)\n _el.removeChild(_el.get(selector='.mdl-layout-title')[0])\n _el.parent.class_name+=' popup_menu'\n \n if event:\n y=event.currentTarget.getBoundingClientRect().top+event.currentTarget.getBoundingClientRect().height/2\n x=event.currentTarget.getBoundingClientRect().left+event.currentTarget.getBoundingClientRect().width/2\n \n _el.style={\n 'position': 'absolute',\n 'top':'%spx' %\n (y),\n 'left':'%spx' %\n (x),\n 'width': 'auto',\n 'padding': '0px'\n }\n\n flags = direction.split('|')\n _direction = [0,0]\n\n if direction == '':\n _direction=[x/window.innerWidth*-100,y/window.innerHeight*-100]\n \n if 'center' in flags:\n _direction = [-50, -50];\n if 'left' in flags:\n _direction[0] = -100;\n if 'right' in flags:\n _direction[0] = 0;\n if 'up' in flags:\n _direction[1] = -100;\n if 'down' in flags:\n _direction[1] = 0;\n \n _el.style.transform = 'translate('+str(_direction[0])+'%,'+str(_direction[1])+'%)'\n\n if pad:\n _el.style.padding='8px'\n return _el\n\n\n\nclass textbox(uDIV):\n def grow(self):\n self.on_input(None)\n \n def on_input(self,ev):\n _tar = self.get(selector='textarea')[0]\n _tar.style.height='auto'\n correction = _tar.offsetHeight - _tar.clientHeight\n _tar.style.height = '%spx' % (_tar.scrollHeight - correction)\n \n \n def box(self,_val):\n self['textarea, input'][0].value=_val\n if _val=='':\n self.cla(['-is-dirty'])\n else:\n self.cla(['is-dirty'])\n return self\n \n def label(self,_val):\n self['.mdl-textfield__label'][0].text=_val\n return self\n def error(self, _on=True):\n self.cla([('-','')[_on]+'is-invalid'])\n return self\n def error_message(self, _val=False):\n self['.mdl-textfield__error'][0].text=_val\n return self\n \n def __init__(self,ID='', \n kind = '', \n label = '', \n value = '', \n mask=None, \n error='', \n on_submit=None, \n on_change=None,\n on_blur=None,\n on_focus=None\n ):\n super().__init__(Class='mdl-textfield mdl-js-textfield mdl-textfield--floating-label')\n if ID=='':\n ID=id_count()\n if kind=='simple':\n if mask:\n self.html = '''\n ''' % (mask, ID, value, ID, label)\n else:\n self.html = '''\n ''' % (ID, value, ID, label)\n elif kind=='pass':\n self.html = '''\n ''' % (ID, value, ID, label)\n else:\n self.html='''\n ''' % (ID, value, ID, label)\n _tar = self.get(selector='textarea')[0]\n\n #makes the area autogrow to fit content\n _tar.style = {'height':'auto', 'overflow':'hidden'}\n _tar.bind('input',self.on_input)\n\n if value !='':\n timer.set_timeout(self.grow, 0)\n\n if on_submit:\n #filter for specifically enter without shift key\n def _on_submit(ev):\n if ev.keyCode==13 and ev.shiftKey == False:\n ev.preventDefault()\n on_submit(ev)\n self.get(selector='textarea, input')[0].bind('keypress',_on_submit)\n\n self <= SPAN(error, Class='mdl-textfield__error')\n\n if on_change:\n self.get(selector='textarea, input')[0].bind('input',on_change)\n\n if on_focus:\n self.get(selector='textarea, input')[0].bind('focus',on_focus)\t\n\n if on_blur:\n self.get(selector='textarea, input')[0].bind('blur',on_blur)\n \n \n\n\nclass combobox(textbox): \n def on_focus(self,ev):\n self.options().style.width=self.style.width\n self.options().style.pointerEvents='auto'\n \n def on_edit(self,_ev):\n for i in self.options()[0]:\n if _ev.currentTarget.value.lower() in i.text.lower():\n i.style.display='block'\n else:\n i.style.display='none'\n \n #select top search\n def on_blur(self,_ev):\n self.options().style.pointerEvents='none'\n if not self.allow_new:\n found=False\n for i in self.options()[0]:\n if _ev.currentTarget.value.lower() in i.text.lower():\n self.box(i.text)\n self.cla([('-is-dirty','is-dirty')[i.text!='']])\n found=True\n if self.callback:self.callback(i.text,self)\n break\n if not found:\n self.box('')\n self.cla(['-is-dirty'])\n for i in self.options()[0]:\n i.style.display='block'\n \n def _on_select(self,_ev):\n if _ev.currentTarget.text!='':\n self.cla(['is-dirty'])\n else:\n self.cla(['-is-dirty'])\n\n self.box(_ev.currentTarget.text)\n\n if self.focus_flow and self.nextSibling and len(upgrade(self.nextSibling)['input, textarea'])>0:\n self.nextSibling['input, textarea'][0].focus()\n if self.focus_flow and self.nextSibling and len(upgrade(self.nextSibling)['button'])>0:\n self.nextSibling['button'][0].focus()\n if self.callback:self.callback(_ev.currentTarget.text,self)\n\n def dont_blur(self,_ev):\n _ev.preventDefault()\n \n def get_items(self):\n return [i.text for i in self.options()[0]]\n \n def remove_item(self,_item):\n for i in self.options()[0]:\n if i.text == _item:\n i.parent.removeChild(i)\n \n def add_item(self,_item):\n _item=inkwell(text(_item),on_click=self._on_select).sty({'padding':('0px','8px')[_item!='']})\n _item.bind('mousedown',self.dont_blur)\n self.options()[0].appendChild(_item)\n \n def __init__(self,ID='',\n label='', \n value='', \n options=list(), \n allow_new=False,\n callback=None,\n focus_flow=True,\n on_submit=None):\n if ID=='':\n ID=id_count()\n \n super().__init__(ID,kind='simple',\n value=value,label=label,\n on_focus=self.on_focus,on_change=self.on_edit,\n on_blur=self.on_blur,on_submit=on_submit)\n \n self.allow_new=allow_new\n self.callback=callback\n self.focus_flow=focus_flow\n \n tooltip(self[0],'',hover=False)\n self.options=self[0].tooltip\n self.options().style.padding='0px'\n\n #prefill options\n \n items=[]\n for i in options:\n items.append(inkwell(text(i),on_click=self._on_select).sty({'padding':('0px','8px')[i!='']}))\n items[-1].bind('mousedown',self.dont_blur)\n \n self.options().appendChild(group(items,flow='vertical'))\n \n \n \nclass chip(uSPAN):\n def __init__(self,_content,\n contact_color = None, \n contact_icon = None, \n onDelete = None):\n super().__init__(Class='mdl-chip')\n\n if contact_color:\n self.class_name += ' mdl-chip--contact'\n self <= SPAN(_content[0], \n Class='mdl-chip__contact mdl-color-text--white', \n style={'background':contact_color})\n\n elif contact_icon:\n self.class_name += ' mdl-chip--contact'\n self <= SPAN('',\n Class='mdl-chip__contact mdl-color-text--white',\n style={'background':'url(\"%s\") center/cover' % contact_icon})\n\n\n self <= SPAN(_content, Class='mdl-chip__text')\n\n if onDelete:\n self.class_name += ' mdl-chip--deletable'\n _temp = BUTTON(icon('cancel'), Class='mdl-chip__action')\n _temp.bind('click', onDelete)\n self <= _temp\n\n\n\nclass progress_bar(uDIV):\n def update(self,progress,buffered=100):\n self.get(selector='.progressbar')[0].style.width=str(progress)+'%'\n self.get(selector='.bufferbar')[0].style.width=str(buffered)+'%'\n self.get(selector='.auxbar')[0].style.width=str(100-buffered)+'%'\n \n def indeterminate(self,_on):\n self.cla([('-','')[_on]+'mdl-progress__indeterminate'])\n \n def __init__(self,_indeterminate=False):\n super().__init__(Class='mdl-progress mdl-js-progress')\n if _indeterminate == True:\n self.class_name += ' mdl-progress__indeterminate'\n \n\nclass progress_spinner(DIV):\n def __init__(self):\n super().__init__(Class='mdl-spinner mdl-js-spinner is-active')\n \nclass slider(upgraded,INPUT): \n def on_changed(self, _ev):\n self.tooltip().text=self.value\n \n def __init__(self,\n min= 0,\n max= 10,\n value= 5,\n on_change = None,\n on_change_end = None):\n super().__init__(Class='mdl-slider mdl-js-slider',Type='range',\n min=min,max=max,value=value,tabindex=0)\n \n self.bind('input', self.on_changed)\n if on_change:\n self.bind('input', on_change)\n\n if on_change_end:\n self.bind('change',on_change_end)\n\n tooltip(self, value, bottom=False)\n \n_id_count=0\ndef id_count():\n global _id_count\n _id_count+=1\n return _id_count\n\n\nclass checkbox(upgraded, LABEL):\n def checked(self,_val):\n self.cla([('-','')[_val]+'is-checked'])\n self[0].checked=_val\n return self\n \n def is_checked(self):\n return self[0].checked\n \n def label(self, _val=False):\n self['label'][0].set_txt(_val)\n def enabled(self, _val):\n self.cla([('','-')[_val]+'is-disabled'])[0].atr('disabled',('',True)[_val]).parent\n \n def __init__(self,kind='check',\n ID='',\n label='',\n value='',\n checked=False,\n disabled=False,\n name='',\n on_change=None):\n if ID=='':\n ID='checkbox_%s'%id_count()\n if kind=='check':\n super().__init__(INPUT(Type='checkbox', Id=ID, Class='mdl-checkbox__input'),Class='mdl-checkbox mdl-js-checkbox mdl-js-ripple-effect')\n self.atr('for',ID)\n self<=SPAN(label, Class='mdl-checkbox__label')\n elif kind=='switch':\n super().__init__(INPUT(Type='checkbox', Id=ID, Class='mdl-switch__input'),Class='mdl-switch mdl-js-switch mdl-js-ripple-effect')\n self.atr('for',ID)\n self<=SPAN(label, Class='mdl-switch__label')\n elif kind=='radio':\n super().__init__(INPUT(Type='radio', Id=ID, Class='mdl-radio__button', value=value,name=name),Class='mdl-radio mdl-js-radio mdl-js-ripple-effect')\n self.atr('for',ID)\n self<=SPAN(label, Class='mdl-radio__label')\n\n if checked:\n self[0].setAttribute('checked',True)\n if disabled:\n self[0].setAttribute('disabled',True)\n\n if on_change:\n self.bind('change',on_change)\n\n\nclass card_shell(upgraded, DIV):\n def __init__(self,_content='', _shadow_depth=2):\n super().__init__(_content, Class='mdl-card mdl-shadow--%sdp' % _shadow_depth, style={'padding':'8px',\n 'margin':'8px',\n 'width':'auto',\n 'min-height':'auto',\n 'display':'inline-block'})\n\n\nclass card(uDIV):\n def __init__(self,title='',\n content=None,\n Button=None,\n action=None,\n height='176px',\n width='512px',\n image=''):\n #self = DIV(Class='demo-card-wide mdl-card mdl-shadow--2dp', style={'min-height':'auto','width': width})\n super().__init__(Class='demo-card-wide mdl-card mdl-shadow--2dp', style={'min-height':'auto','width': width})\n self.html = '''
\n

%s

\n
''' % (height, ('.75','0')[action==None], ('.75','0')[title==''], image, title)\n\n if content:\n self <= DIV(content, Class='mdl-card__supporting-text')\n\n if Button:\n self <= DIV(Button, Class='mdl-card__actions mdl-card--border')\n\n if action:\n self <= DIV(action.sty({'color':'white'}), Class='mdl-card__menu')\n\n\nclass tab(card_shell):\n def set_index(self,_new):\n self.index=_new\n self.style.top='%spx'%(56*self.index+16)\n \n def __init__(self, _icon, _label, _index, _on_click):\n super().__init__(icon(_icon))\n self.index=_index\n inkwell(self,on_click=_on_click)\n self.sty({\n 'position':'fixed',\n 'top':'%spx'%(56*self.index+16),\n 'right':'0px',\n 'margin':'0px',\n 'transform':'translate(25%,0px)',\n 'z-index':'49 !important',\n })\n tooltip(self,_label, floating=True) \n \ndef sidebar(*arg,**kwarg):\n out = shadow(card_shell(*arg,**kwarg).sty({'width':'240px',\n 'height':'100%',\n 'position':'fixed',\n 'overflow':'auto',\n 'left':-230,\n 'top':0,\n 'margin':0,\n 'opacity':'0',\n 'transition':'all .2s'}\n ),16)\n\n out.cla(['pymat_sidebar'])\n\n out.bind('mouseover',lambda _ev:out.sty({\n 'left':'0',\n 'opacity':'1'\n }))\n\n out.bind('mouseout',lambda _ev:out.sty({\n 'left':-230,\n 'opacity':'0'\n }))\n\n \n return out\n\n\nimport time \ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time() \n result = method(*args, **kw) \n te = time.time() \n print ('%r (%r, %r) %2.2f sec' % \\\n (method.__name__, args, kw, te-ts))\n return result\n return timed\n","sub_path":"old/test/lib/python/Lib/site-packages/PyMat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":26823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"81825493","text":"# Recursive method\nclass Solution:\n \"\"\"\n @param S: The set of numbers.\n @return: A list of lists. See example.\n \"\"\"\n def subsets(self, S):\n if S is None:\n return []\n S.sort()\n res = []\n self.dfs(0, [], S, res)\n return res\n\n def dfs(self, start, tmplist, S, res):\n res.append(tmplist)\n for i in range(start, len(S)):\n self.dfs(i + 1, tmplist + [S[i]], S, res)\n\n# Bit manipulation\nclass Solution:\n \"\"\"\n @param S: The set of numbers.\n @return: A list of lists. See example.\n \"\"\"\n def subsets(self, S):\n # write your code here\n S.sort()\n n = len(S)\n res = []\n for i in range(1 << n):\n tmp = []\n for j in range(n):\n if i & (1 << j):\n tmp.append(S[j])\n res.append(tmp)\n return res\n","sub_path":"Python/017 Subsets.py","file_name":"017 Subsets.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"417571114","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# accdb - account database using human-editable flat files as storage\n\nfrom __future__ import print_function\nimport cmd\nimport fnmatch\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport time\nimport uuid\nfrom collections import OrderedDict\nfrom base64 import b64encode, b64decode\n\ndebug = os.environ.get(\"DEBUG\", \"\")\n\nfield_names = {\n \"hostname\": \"host\",\n \"machine\": \"host\",\n \"url\": \"uri\",\n \"website\": \"uri\",\n \"user\": \"login\",\n \"username\": \"login\",\n \"nicname\": \"nic-hdl\",\n \"password\": \"pass\",\n \"!pass\": \"pass\",\n \"mail\": \"email\",\n}\n\nfield_groups = {\n \"object\": [\"host\", \"uri\", \"realm\"],\n \"username\": [\"login\", \"nic-hdl\"],\n \"password\": [\"pass\", \"!pass\"],\n \"email\": [\"email\"],\n}\n\nfield_order = [\"object\", \"username\", \"password\", \"email\"]\n\nfield_prefix_re = re.compile(r\"^\\W+\")\n\ndef strip_field_prefix(name):\n return field_prefix_re.sub(\"\", name)\n\ndef sort_fields(entry, terse=False):\n names = []\n for group in field_order:\n for field in field_groups[group]:\n names += sorted((k for k in entry.attributes \\\n if k == field),\n key=strip_field_prefix)\n if not terse:\n names += sorted((k for k in entry.attributes if k not in names),\n key=strip_field_prefix)\n return names\n\ndef translate_field(name):\n return field_names.get(name, name)\n\ndef split_ranges(string):\n for i in string.split():\n for j in i.split(\",\"):\n if \"-\" in j:\n x, y = j.split(\"-\", 1)\n yield int(x), int(y)+1\n else:\n yield int(j), int(j)+1\n\ndef split_tags(string):\n string = string.strip(\" ,\\n\")\n items = re.split(Entry.RE_TAGS, string)\n return set(items)\n\ndef expand_range(string):\n items = []\n for m, n in split_ranges(string):\n items.extend(range(m, n))\n return items\n\ndef re_compile_glob(glob, flags=None):\n if flags is None:\n flags = re.I | re.U\n return re.compile(fnmatch.translate(glob), flags)\n\ndef trace(msg, *args):\n print(\"accdb: %s\" % msg, *args, file=sys.stderr)\n\ndef start_editor(path):\n if \"VISUAL\" in os.environ:\n editor = shlex.split(os.environ[\"VISUAL\"])\n elif \"EDITOR\" in os.environ:\n editor = shlex.split(os.environ[\"EDITOR\"])\n elif sys.platform == \"win32\":\n editor = [\"notepad.exe\"]\n elif sys.platform == \"linux2\":\n editor = [\"vi\"]\n\n editor.append(path)\n\n proc = subprocess.Popen(editor)\n\n if sys.platform == \"linux2\":\n proc.wait()\n\nclass FilterSyntaxError(Exception):\n pass\n\ndef split_filter(text):\n tokens = []\n depth = 0\n start = -1\n for pos, char in enumerate(text):\n if char == \"(\":\n if depth == 0:\n if start >= 0:\n tokens.append(text[start:pos])\n start = pos+1\n depth += 1\n elif char == \")\":\n depth -= 1\n if depth == 0 and start >= 0:\n tokens.append(text[start:pos])\n start = -1\n elif char == \" \":\n if depth == 0 and start >= 0:\n tokens.append(text[start:pos])\n start = -1\n else:\n if start < 0:\n start = pos\n if depth == 0:\n if start >= 0:\n tokens.append(text[start:])\n return tokens\n elif depth > 0:\n raise FilterSyntaxError(\"unclosed '(' (depth %d)\" % depth)\n elif depth < 0:\n raise FilterSyntaxError(\"too many ')'s (depth %d)\" % depth)\n\ndef compile_filter(pattern):\n tokens = split_filter(pattern)\n if debug:\n trace(\"parsing filter %r -> %r\" % (pattern, tokens))\n\n if len(tokens) > 1:\n if tokens[0] in {\"AND\", \"and\"}:\n filters = [compile_filter(x) for x in tokens[1:]]\n return ConjunctionFilter(*filters)\n elif tokens[0] in {\"OR\", \"or\"}:\n filters = [compile_filter(x) for x in tokens[1:]]\n return DisjunctionFilter(*filters)\n elif tokens[0] in {\"NOT\", \"not\"}:\n if len(tokens) > 2:\n raise FilterSyntaxError(\"too many arguments for 'NOT'\")\n filter = compile_filter(tokens[1])\n return NegationFilter(filter)\n elif tokens[0] in {\"PATTERN\", \"pattern\"}:\n if len(tokens) > 2:\n raise FilterSyntaxError(\"too many arguments for 'PATTERN'\")\n return PatternFilter(tokens[1])\n else:\n raise FilterSyntaxError(\"unknown operator %r in (%s)\" \\\n % (tokens[0], pattern))\n elif \" \" in tokens[0] or \"(\" in tokens[0] or \")\" in tokens[0]:\n return compile_filter(tokens[0])\n else:\n return PatternFilter(tokens[0])\n\ndef compile_pattern(pattern):\n func = None\n\n if pattern == \"*\":\n func = lambda entry: True\n elif pattern.startswith(\"#\"):\n try:\n val = int(pattern[1:])\n except ValueError:\n func = lambda entry: False\n else:\n func = lambda entry: entry.itemno == val\n elif pattern.startswith(\"+\"):\n regex = re_compile_glob(pattern[1:])\n func = lambda entry: any(regex.match(tag) for tag in entry.tags)\n elif pattern.startswith(\"@\"):\n if \"=\" in pattern:\n attr, glob = pattern[1:].split(\"=\", 1)\n attr = translate_field(attr)\n regex = re_compile_glob(glob)\n func = lambda entry:\\\n attr in entry.attributes \\\n and any(regex.match(value)\n for value in entry.attributes[attr])\n elif \"~\" in pattern:\n attr, regex = pattern[1:].split(\"~\", 1)\n attr = translate_field(attr)\n regex = re.compile(regex, re.I | re.U)\n func = lambda entry:\\\n attr in entry.attributes \\\n and any(regex.search(value)\n for value in entry.attributes[attr])\n elif \"*\" in pattern:\n regex = re_compile_glob(pattern[1:])\n func = lambda entry:\\\n any(regex.match(attr) for attr in entry.attributes)\n else:\n attr = translate_field(pattern[1:])\n func = lambda entry: attr in entry.attributes\n elif pattern.startswith(\"~\"):\n regex = re.compile(pattern[1:], re.I | re.U)\n func = lambda entry: regex.search(entry.name)\n else:\n regex = re_compile_glob(pattern + \"*\")\n func = lambda entry: regex.match(entry.name)\n\n return func\n\nclass Filter(object):\n def __call__(self, entry):\n return bool(self.test(entry))\n\nclass PatternFilter(Filter):\n def __init__(self, pattern):\n self.pattern = pattern\n self.func = compile_pattern(self.pattern)\n\n def test(self, entry):\n if self.func:\n return self.func(entry)\n\n def __repr__(self):\n return \"(PATTERN %s)\" % self.pattern\n\nclass ConjunctionFilter(Filter):\n def __init__(self, *filters):\n self.filters = list(filters)\n\n def test(self, entry):\n return all(filter.test(entry) for filter in self.filters)\n\n def __repr__(self):\n return \"(AND %s)\" % \" \".join(repr(f) for f in self.filters)\n\nclass DisjunctionFilter(Filter):\n def __init__(self, *filters):\n self.filters = list(filters)\n\n def test(self, entry):\n return any(filter.test(entry) for filter in self.filters)\n\n def __repr__(self):\n return \"(OR %s)\" % \" \".join(repr(f) for f in self.filters)\n\nclass NegationFilter(Filter):\n def __init__(self, filter):\n self.filter = filter\n\n def test(self, entry):\n return not self.filter.test(entry)\n\n def __repr__(self):\n return \"(NOT %r)\" % self.filter\n\nclass Database(object):\n def __init__(self):\n self.count = 0\n self.path = None\n self.entries = dict()\n self.order = list()\n self.modified = False\n self.readonly = False\n self._modeline = \"; vim: ft=accdb:\"\n self.flags = set()\n self._adduuids = True\n\n # Import\n\n @classmethod\n def from_file(self, path):\n db = self()\n db.path = path\n with open(path, \"r\", encoding=\"utf-8\") as fh:\n db.parseinto(fh)\n return db\n\n @classmethod\n def parse(self, *args, **kwargs):\n return self().parseinto(*args, **kwargs)\n\n def parseinto(self, fh):\n data = \"\"\n lineno = 0\n lastno = 1\n\n for line in fh:\n lineno += 1\n if line.startswith(\"; vim:\"):\n self._modeline = line.strip()\n elif line.startswith(\"; dbflags:\"):\n self.flags = split_tags(line[10:])\n elif line.startswith(\"=\"):\n entry = Entry.parse(data, lineno=lastno)\n if entry:\n self.add(entry)\n data = line\n lastno = lineno\n else:\n data += line\n\n if data:\n entry = Entry.parse(data, lineno=lastno)\n if entry:\n self.add(entry)\n\n return self\n\n def add(self, entry, lineno=None):\n if entry.uuid is None:\n entry.uuid = uuid.uuid4()\n elif entry.uuid in self:\n raise KeyError(\"Duplicate UUID %s\" % entry.uuid)\n\n entry.itemno = self.count + 1\n\n self.count += 1\n\n if entry.lineno is None:\n entry.lineno = lineno\n\n # Two uuid.UUID objects for the same UUID will also have the same hash.\n # Hence, it is okay to use an uuid.UUID as a dict key. For now, anyway.\n # TODO: Can this be relied upon? Not documented anywhere.\n self.entries[entry.uuid] = entry\n self.order.append(entry.uuid)\n\n return entry\n\n def replace(self, entry):\n if entry.uuid is None:\n raise ValueError(\"Entry is missing UUID\")\n\n oldentry = self[entry.uuid]\n\n entry.itemno = oldentry.itemno\n entry.lineno = oldentry.lineno\n\n oldpass = oldentry.attributes.get(\"pass\", None)\n newpass = entry.attributes.get(\"pass\", None)\n\n if oldpass and oldpass != newpass:\n if \"!pass.old\" not in entry.attributes:\n entry.attributes[\"!pass.old\"] = []\n for p in oldpass:\n p = \"%s (until %s)\" % (p.dump(), time.strftime(\"%Y-%m-%d\"))\n entry.attributes[\"!pass.old\"].append(PrivateAttribute(p))\n\n self.entries[entry.uuid] = entry\n\n return entry\n\n # Lookup\n\n def __contains__(self, key):\n return key in self.entries\n\n def __getitem__(self, key):\n return self.entries[key]\n\n def find_by_itemno(self, itemno):\n uuid = self.order[itemno-1]\n entry = self.entries[uuid]\n assert entry.itemno == itemno\n return entry\n\n def find(self, filter):\n for entry in self:\n if filter(entry):\n yield entry\n\n # Aggregate lookup\n\n def tags(self):\n tags = set()\n for entry in self:\n tags |= entry.tags\n return tags\n\n # Maintenance\n\n def sort(self):\n self.order.sort(key=lambda uuid: self.entries[uuid].normalized_name)\n\n # Export\n\n def __iter__(self):\n for uuid in self.order:\n yield self.entries[uuid]\n\n def dump(self, fh=sys.stdout, storage=True):\n eargs = {\"storage\": storage,\n \"conceal\": (\"conceal\" in self.flags)}\n if storage:\n if self._modeline:\n print(self._modeline, file=fh)\n for entry in self:\n if entry.deleted:\n continue\n print(entry.dump(**eargs), file=fh)\n if storage:\n if self.flags:\n print(\"; dbflags: %s\" % \\\n \", \".join(sorted(self.flags)),\n file=fh)\n\n def to_structure(self):\n return [entry.to_structure() for entry in self]\n\n def dump_yaml(self, fh=sys.stdout):\n import yaml\n print(yaml.dump(self.to_structure()), file=fh)\n\n def dump_json(self, fh=sys.stdout):\n import json\n print(json.dumps(self.to_structure(), indent=4), file=fh)\n\n def to_file(self, path):\n with open(path, \"w\", encoding=\"utf-8\", newline=\"\\n\") as fh:\n self.dump(fh)\n\n def flush(self):\n if not self.modified:\n return\n if self.readonly:\n print(\"(Discarding changes, database read-only)\",\n file=sys.stderr)\n return\n if self.path is None:\n return\n #print(\"(Storing database)\", file=sys.stderr)\n self.to_file(self.path)\n self.modified = False\n\nclass Entry(object):\n RE_TAGS = re.compile(r'\\s*,\\s*|\\s+')\n RE_KEYVAL = re.compile(r'=|: ')\n\n RE_COLL = re.compile(r'\\w.*$')\n\n def __init__(self):\n self.attributes = dict()\n self.comment = \"\"\n self.deleted = False\n self.itemno = None\n self.lineno = None\n self.name = None\n self.tags = set()\n self.uuid = None\n self._broken = False\n\n # Import\n\n @classmethod\n def parse(self, *args, **kwargs):\n return self().parseinto(*args, **kwargs)\n\n def parseinto(self, data, lineno=1):\n # lineno is passed here for use in syntax error messages\n self.lineno = lineno\n\n for line in data.splitlines():\n line = line.lstrip()\n if not line:\n pass\n elif line.startswith(\"=\"):\n if self.name:\n # Ensure that Database only passes us single entries\n print(\"Line %d: ignoring multiple name headers\" \\\n % lineno,\n file=sys.stderr)\n self.name = line[1:].strip()\n elif line.startswith(\"+\"):\n self.tags |= split_tags(line[1:])\n if \"\\\\deleted\" in self.tags:\n self.deleted = True\n elif line.startswith(\";\"):\n self.comment += line[1:] + \"\\n\"\n elif line.startswith(\"(\") and line.endswith(\")\"):\n # annotations in search output\n pass\n elif line.startswith(\"█\") and line.endswith(\"█\"):\n # QR code\n pass\n elif line.startswith(\"{\") and line.endswith(\"}\"):\n if self.uuid:\n print(\"Line %d: ignoring multiple UUID headers\" \\\n % lineno,\n file=sys.stderr)\n\n try:\n self.uuid = uuid.UUID(line)\n except ValueError:\n print(\"Line %d: ignoring badly formed UUID %r\" \\\n % (lineno, line),\n file=sys.stderr)\n self.comment += line + \"\\n\"\n else:\n try:\n key, val = re.split(self.RE_KEYVAL, line, 1)\n except ValueError:\n print(\"Line %d: could not parse line %r\" \\\n % (lineno, line),\n file=sys.stderr)\n self.comment += line + \"\\n\"\n continue\n\n if val.startswith(\"\"):\n # trying to load a safe dump\n print(\"Line %d: lost private data, you're fucked\" \\\n % lineno,\n file=sys.stderr)\n val = \"\"\n self._broken = True\n elif val.startswith(\" \"):\n nval = val[len(\" \"):]\n nval = b64decode(nval)\n try:\n val = nval.decode(\"utf-8\")\n except UnicodeDecodeError:\n pass # leave the old value assigned\n elif key.startswith(\"date.\") and val in {\"now\", \"today\"}:\n val = time.strftime(\"%Y-%m-%d\")\n\n key = translate_field(key)\n\n if self.is_private_attr(key):\n attr = PrivateAttribute(val)\n else:\n attr = Attribute(val)\n\n if key in self.attributes:\n self.attributes[key].append(attr)\n else:\n self.attributes[key] = [attr]\n\n lineno += 1\n\n if not self.name:\n self.name = \"(Unnamed)\"\n\n return self\n\n def is_private_attr(self, key):\n return key == \"pass\" or key.startswith(\"!\")\n\n # Export\n\n def dump(self, storage=False, terse=False, conceal=True):\n \"\"\"\n storage:\n output private data\n output metadata\n never skip fields (disables terse)\n terse\n skip fields not listed in groups\n conceal\n base64-encode private data\n \"\"\"\n\n if storage:\n terse = False\n\n data = \"\"\n\n if not storage:\n if self.itemno:\n data += \"(item %d)\\n\" % self.itemno\n elif self.lineno:\n data += \"(line %d)\\n\" % self.lineno\n\n data += \"= %s\\n\" % self.name\n\n for line in self.comment.splitlines():\n data += \";%s\\n\" % line\n\n if self.uuid and storage:\n data += \"\\t{%s}\\n\" % self.uuid\n\n for key in sort_fields(self, terse):\n for value in self.attributes[key]:\n if storage or not conceal:\n value = value.dump()\n if storage and conceal and self.is_private_attr(key) \\\n and not value.startswith(\" \"):\n value = value.encode(\"utf-8\")\n value = b64encode(value)\n value = value.decode(\"utf-8\")\n value = \" %s\" % value\n data += \"\\t%s: %s\\n\" % (key, value)\n\n if self.tags:\n tags = list(self.tags)\n tags.sort()\n line = []\n while tags or line:\n linelen = 8 + sum([len(i) + 2 for i in line])\n if not tags or (line and linelen + len(tags[0]) + 2 > 80):\n data += \"\\t+ %s\\n\" % \", \".join(line)\n line = []\n if tags:\n line.append(tags.pop(0))\n\n return data\n\n def to_structure(self):\n dis = dict()\n dis[\"name\"] = self.name\n dis[\"comment\"] = self.comment\n dis[\"data\"] = {key: list(val.dump() for val in self.attributes[key])\n for key in sort_fields(self, False)}\n dis[\"lineno\"] = self.lineno\n dis[\"tags\"] = list(self.tags)\n dis[\"uuid\"] = str(self.uuid)\n return dis\n\n def __str__(self):\n return self.dump(storage=False)\n\n def __bool__(self):\n return bool((self.name and self.name != \"(Unnamed)\")\n or self.attributes or self.tags or self.comment)\n\n @property\n def normalized_name(self):\n return re.search(self.RE_COLL, self.name).group(0).lower()\n\nclass Attribute(str):\n # Nothing special about this class. Exists only for consistency\n # with PrivateAttribute providing a dump() method.\n\n def dump(self):\n return str.__str__(self)\n\nclass PrivateAttribute(Attribute):\n # Safeguard class to prevent accidential disclosure of private values.\n # Inherits a dump() method from Attribute for obtaining the actual data.\n\n def __repr__(self):\n if self == \"\":\n return self.dump()\n return \"\" % len(self)\n\n def __str__(self):\n if self == \"\":\n return self.dump()\n return \"\" % len(self)\n\nclass Interactive(cmd.Cmd):\n def __init__(self, *args, **kwargs):\n cmd.Cmd.__init__(self, *args, **kwargs)\n self.prompt = \"\\001\\033[34m\\002\" \"accdb>\" \"\\001\\033[m\\002\" \" \"\n self.banner = \"Using %s\" % db_path\n\n def emptyline(self):\n pass\n\n def default(self, line):\n print(\"Are you on drugs?\", file=sys.stderr)\n\n def do_EOF(self, arg):\n \"\"\"Save changes and exit\"\"\"\n return True\n\n def do_help(self, arg):\n \"\"\"Well, duh.\"\"\"\n cmds = [k for k in dir(self) if k.startswith(\"do_\")]\n for cmd in cmds:\n doc = getattr(self, cmd).__doc__ or \"?\"\n print(\" %-14s %s\" % (cmd[3:], doc))\n\n def do_copy(self, arg):\n \"\"\"Copy password to clipboard\"\"\"\n arg = int(arg)\n\n entry = db.find_by_itemno(arg)\n print(entry)\n if \"pass\" in entry.attributes:\n Clipboard.put(entry.attributes[\"pass\"][0].dump())\n else:\n print(\"No password found!\",\n file=sys.stderr)\n\n def do_dump(self, arg, db=None):\n \"\"\"Dump the database to stdout (yaml, json, safe)\"\"\"\n if db is None:\n db = globals()[\"db\"]\n\n if arg == \"\":\n db.dump()\n elif arg == \"yaml\":\n db.dump_yaml()\n elif arg == \"json\":\n db.dump_json()\n elif arg == \"safe\":\n db.dump(storage=False)\n else:\n print(\"Unsupported export format: %r\" % arg,\n file=sys.stderr)\n\n def do_edit(self, arg):\n \"\"\"Launch an editor\"\"\"\n db.flush()\n db.modified = False\n start_editor(db_path)\n return True\n\n def do_rgrep(self, arg):\n \"\"\"Search for entries and export their full contents\"\"\"\n return self.do_grep(arg, full=True)\n\n def do_ls(self, arg):\n \"\"\"Search for entries and list their names\"\"\"\n return self.do_grep(arg, ls=True)\n\n def do_grep(self, arg, full=False, ls=False):\n \"\"\"Search for entries\"\"\"\n\n if full and not sys.stdout.isatty():\n print(db._modeline)\n\n args = shlex.split(arg)\n try:\n if len(args) > 1:\n arg = \"AND\"\n for x in args:\n arg += (\" (%s)\" if \" \" in x else \" %s\") % x\n filters = [compile_filter(x) for x in args]\n filter = ConjunctionFilter(*filters)\n elif len(args) > 0:\n arg = args[0]\n filter = compile_filter(arg)\n else:\n arg = \"*\"\n filter = compile_filter(arg)\n except FilterSyntaxError as e:\n trace(\"syntax error in filter:\", *e.args)\n sys.exit(1)\n\n if debug:\n trace(\"compiled filter:\", filter)\n\n results = db.find(filter)\n\n num = 0\n for entry in results:\n if entry.deleted:\n continue\n if full:\n print(entry.dump(storage=True, conceal=False))\n elif ls:\n print(\"%5d │ %s\" % (entry.itemno, entry.name))\n else:\n print(entry)\n num += 1\n\n if sys.stdout.isatty():\n print(\"(%d %s matching '%s')\" % \\\n (num, (\"entry\" if num == 1 else \"entries\"), filter))\n\n def do_convert(self, arg):\n \"\"\"Read entries from stdin and dump to stdout\"\"\"\n\n newdb = Database()\n newdb.parseinto(sys.stdin)\n self.do_dump(arg, newdb)\n\n def do_merge(self, arg):\n \"\"\"Read entries from stdin and merge to main database\"\"\"\n\n newdb = Database()\n newdb.parseinto(sys.stdin)\n\n outdb = Database()\n\n for newentry in newdb:\n if newentry._broken:\n print(\"(warning: skipped broken entry)\", file=sys.stderr)\n print(newentry.dump(storage=True), file=sys.stderr)\n continue\n\n try:\n entry = db.replace(newentry)\n except KeyError:\n entry = db.add(newentry)\n outdb.add(entry)\n\n db.modified = True\n\n self.do_dump(\"\", outdb)\n\n def do_reveal(self, arg):\n \"\"\"Display entry (including sensitive information)\"\"\"\n for itemno in expand_range(arg):\n entry = db.find_by_itemno(itemno)\n print(entry.dump(conceal=False))\n\n def do_show(self, arg):\n \"\"\"Display entry (safe)\"\"\"\n for itemno in expand_range(arg):\n entry = db.find_by_itemno(itemno)\n print(entry.dump())\n\n def do_qr(self, arg):\n for itemno in expand_range(arg):\n entry = db.find_by_itemno(itemno)\n print(entry.dump())\n try:\n psk = entry.attributes[\"!2fa.oath-psk\"][0].dump()\n except KeyError:\n print(\"\\t(No OATH preshared key for this entry.)\")\n else:\n issuer = entry.name\n login = entry.attributes[\"login\"][0]\n uri = \"otpauth://totp/%s?secret=%s&issuer=%s\" % (login, psk, issuer)\n with subprocess.Popen([\"qrencode\", \"-o-\", \"-tUTF8\", uri],\n stdout=subprocess.PIPE) as proc:\n for line in proc.stdout:\n print(\"\\t\" + line.decode(\"utf-8\"), end=\"\")\n print()\n\n def do_touch(self, arg):\n \"\"\"Rewrite the accounts.db file\"\"\"\n db.modified = True\n\n def do_sort(self, arg):\n \"\"\"Sort and rewrite the database\"\"\"\n db.sort()\n db.modified = True\n\n def do_lstags(self, arg):\n \"\"\"List all tags used by the database's entries\"\"\"\n for tag in sorted(db.tags()):\n print(tag)\n\n do_c = do_copy\n do_g = do_grep\n do_re = do_reveal\n do_s = do_show\n do_w = do_touch\n\nclass Clipboard():\n @classmethod\n def get(self):\n if sys.platform == \"win32\":\n import win32clipboard as clip\n clip.OpenClipboard()\n # TODO: what type does this return?\n data = clip.GetClipboardData(clip.CF_UNICODETEXT)\n print(\"clipboard.get =\", repr(data))\n clip.CloseClipboard()\n return data\n else:\n raise RuntimeError(\"Unsupported platform\")\n\n @classmethod\n def put(self, data):\n if sys.platform == \"win32\":\n import win32clipboard as clip\n clip.OpenClipboard()\n clip.EmptyClipboard()\n clip.SetClipboardText(data, clip.CF_UNICODETEXT)\n clip.CloseClipboard()\n elif sys.platform.startswith(\"linux\"):\n proc = subprocess.Popen((\"xsel\", \"-i\", \"-b\", \"-l\", \"/dev/null\"),\n stdin=subprocess.PIPE)\n proc.stdin.write(data.encode(\"utf-8\"))\n proc.stdin.close()\n proc.wait()\n else:\n raise RuntimeError(\"Unsupported platform\")\n\ndb_path = os.environ.get(\"ACCDB\",\n os.path.expanduser(\"~/accounts.db.txt\"))\n\ndb_cache_path = os.path.expanduser(\"~/Private/accounts.cache.txt\")\n\nif os.path.exists(db_path):\n db = Database.from_file(db_path)\nelse:\n db = Database.from_file(db_cache_path)\n db.readonly = True\n if sys.stderr.isatty():\n print(\"(Using read-only cache.)\", file=sys.stderr)\n\ninterp = Interactive()\n\nif len(sys.argv) > 1:\n line = subprocess.list2cmdline(sys.argv[1:])\n interp.onecmd(line)\nelse:\n interp.cmdloop()\n\ndb.flush()\n\nif \"cache\" in db.flags and db.path != db_cache_path:\n db.to_file(db_cache_path)\n","sub_path":"security/accdb/accdb.py","file_name":"accdb.py","file_ext":"py","file_size_in_byte":27267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"168198354","text":"#!/usr/local/bin/python3\n\nimport requests\nimport os.path, sys\n\nimport pyexcel as p\n\nfrom _database import connection, insert_value, select_value\n\n\"\"\"\n\n\tDATA USED \t\t: https://data.public.lu/fr/datasets/parkings-relais-existants/#_\n\tURL \t\t\t: https://download.data.public.lu/resources/parkings-relais-existants/20160602-111052/2016.06.01_Parking_relais_existants.ods\n\tDESCRIPTION \t: Tableau en format Open Document Spreadsheet Liste et capacité des parkings relais (P+R) existants\n\n\"\"\"\n\nurl = \"https://download.data.public.lu/resources/parkings-relais-existants/20160602-111052/2016.06.01_Parking_relais_existants.ods\"\ntarget_path = \"tmp/data_parking_relais.ods\"\n\n\ndef _download():\n\t''' Download xlsx file from data.public.lu '''\n\tglobal target_path, url\n\n\tif os.path.isfile(target_path):\n\t\treturn True\n\t\t\n\tresponse = requests.get(url, stream=True)\n\thandle = open(target_path, \"wb\")\n\tfor chunk in response.iter_content(chunk_size=512):\n\t if chunk: # filter out keep-alive new chunks\n\t handle.write(chunk)\n\n\tif os.path.isfile(target_path):\n\t\treturn True\n\telse:\n\t\tsys.exit(\"File not found\")\n\n\ndef parse_ods():\n\t''' parse ods file and insert all parking infos '''\n\trecords = p.iget_records(file_name=target_path)\n\n\tfor record in records:\n\t\tif record['Localisation']:\n\t\t\tsql = \"INSERT INTO parking_relais VALUES (NULL, %s, %s, '%s', %s, '%s')\" % (\n\t\t\t\trecord['Longitude'], record['Latitude'], record['Localisation'],\n\t\t\t\trecord['Emplacements existants'], record['Rabattement'])\n\t\t\tprint(sql)\n\t\t\tinsert_value(sql)\n\nif __name__ == '__main__':\n\t_download()\n\tparse_ods()\n\n\n\n\n\n\n\n","sub_path":"collector/data_parking_relais.py","file_name":"data_parking_relais.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"632276420","text":"import numpy as np\n\ndef computeHIT_FA(trueMask,estimationMask,threshold):\n\ttrueMask[trueMask > threshold] = 1\n\ttrueMask[trueMask != 1] = 0\n\testimationMask[estimationMask > threshold] = 1\n\testimationMask[estimationMask != 1] = 0\n\n\tsizeOfMask = np.shape(trueMask)\n\tindexTP = 0\n\tindexFP = 0\n\tpositives = 0\n\tnegatives = 0\n\n\tpositives = np.sum(trueMask)\n\tnegatives = -1*np.sum(trueMask-1)\n\tHITsMat = (estimationMask+trueMask)/2\n\tHITsMat[HITsMat < 1] = 0\n\tHIT = 1.*np.sum(HITsMat)/positives\n\n\tFAsMat = trueMask - estimationMask\n\tFAsMat[FAsMat > 0] = 0\n\tFA = -1.*np.sum(FAsMat)/negatives\n\n\n\t# HIT = 100.*np.sum((trueMask == estimationMask and estimationMask == 1).all())/positives\n\t# FA = 100.*np.sum(trueMask == estimationMask and estimationMask == 1).all()/negatives\n\n\n\t# for i in range(0,sizeOfMask[1]):\n\t# \tfor j in range(0,sizeOfMask[0]):\n\t# \t\tif(estimationMask[j,i] == trueMask[j,i] and estimationMask[j,i] == 1):\n\t# \t\t\tindexTP = indexTP + 1\n\t# \t\telif(estimationMask[j,i] != trueMask[j,i] and estimationMask[j,i] == 1):\n\t# \t\t\tindexFP = indexFP + 1\n\t# \t\tif(trueMask[j,i] == 1):\n\t# \t\t\tpositives += 1\n\t# \t\telse:\n\t# \t\t\tnegatives += 1\n\tHIT_FA = HIT - FA\n\t# HIT = (100.*indexTP/positives)\n\t# FA = (100.*indexFP/negatives)\n\treturn HIT,FA,HIT_FA\n\n\n\n","sub_path":"DissertationExperimetns/usefulMethodsSE.py","file_name":"usefulMethodsSE.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"477048056","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n'''\n Methods that will be used to process and prepare netflow data, before being sent to\nKafka cluster.\n'''\n\nimport logging\nimport re\nimport sys\nimport tempfile\n\nfrom common.utils import Util\nfrom datetime import datetime\n\nCOMMAND = 'nfdump -r {0} -o csv {1} > {2}'\nEPOCH = datetime(1970, 1, 1)\n\ndef convert(netflow, tmpdir, opts='', prefix=None):\n '''\n Convert `nfcapd` file to a comma-separated output format.\n\n :param netflow : Path of binary file.\n :param tmpdir : Path of local staging area.\n :param opts : A set of options for `nfdump` command.\n :param prefix : If `prefix` is specified, the file name will begin with that;\n otherwise, a default `prefix` is used.\n :returns : Path of CSV-converted file.\n :rtype : ``str``\n :raises OSError: If an error occurs while executing the `nfdump` command.\n '''\n logger = logging.getLogger('SPOT.INGEST.FLOW.PROCESS')\n\n with tempfile.NamedTemporaryFile(prefix=prefix, dir=tmpdir, delete=False) as fp:\n command = COMMAND.format(netflow, opts, fp.name)\n\n logger.debug('Execute command: {0}'.format(command))\n Util.popen(command, raises=True)\n\n return fp.name\n\ndef prepare(csvfile, max_req_size):\n '''\n Prepare text-formatted data for transmission through the Kafka cluster.\n\n This method takes a CSV file and groups it into segments, according to the\n pattern '%Y%m%d%h'. If the size of each segment is greater than the maximum size\n of a request, then divides each segment into smaller ones so that they can be\n transmitted.\n\n :param csvfile : Path of CSV-converted file; result of `convert` method.\n :param max_req_size: The maximum size of a request.\n :returns : A generator which yields the timestamp (in milliseconds) and a\n list of lines from the CSV-converted file.\n :rtype : :class:`types.GeneratorType`\n :raises IOError : If the given file has no any valid line.\n '''\n msg_list = []\n msg_size = segmentid = 0\n logger = logging.getLogger('SPOT.INGEST.FLOW.PROCESS')\n partition = timestamp = None\n pattern = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}')\n\n with open(csvfile, 'r') as fp:\n for line in fp:\n value = line.strip()\n if not value: continue\n\n match = pattern.search(value.split(',')[0])\n if not match: continue\n\n size = sys.getsizeof(value)\n # .........................assume the first 13 characters of the `search`\n # result as the `partition`, e.g. '2018-03-20 09'\n if match.group()[:13] == partition and (msg_size + size) < max_req_size:\n msg_list.append(value)\n msg_size += size\n continue\n\n # .........................if the hour is different or the message size is\n # above the maximum, then yield existing list and continue with an empty one\n if timestamp:\n logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,\n len(msg_list), msg_size))\n segmentid += 1\n\n yield (int(timestamp.total_seconds() * 1000), msg_list)\n\n msg_list = [value]\n msg_size = size\n partition = match.group()[:13]\n timestamp = datetime.strptime(match.group(), '%Y-%m-%d %H:%M:%S') - EPOCH\n\n # .................................send the last lines from the file. The check of\n # `timestamp` is in case the file is empty and `timestamp` is still ``None``\n if not timestamp:\n raise IOError('CSV-converted file has no valid lines.')\n\n logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,\n len(msg_list), msg_size))\n\n yield (int(timestamp.total_seconds() * 1000), msg_list)\n","sub_path":"spot-ingest/pipelines/flow/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"563248113","text":"project = 'python-moa'\ncopyright = '2019, Quansight'\nauthor = 'Quansight'\nversion = '0.0.1'\nrelease = '0.0.1'\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.mathjax',\n 'sphinxcontrib.tikz',\n]\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\npygments_style = 'sphinx'\nhtml_theme = 'alabaster'\n\ntikz_proc_suite = 'GhostScript'\ntikz_transparent = True\n\ndoctest_global_setup = '''\nfrom moa import ast\n'''\n\nmathjax_config = {\n 'TeX': {\n 'Macros': {\n # Math notation\n \"Z\": \"\\\\mathbb{Z}\", # set of integers\n # MoA notations\n \"minus\": \"{}^{\\\\boldsymbol{\\\\mbox{-}}\\\\!}\", # scalar negation operator\n \"rop\": [\"\\\\,\\\\mathrm{#1}^*\\\\,\", 1], # relational operation\n \"op\": [\"\\\\,\\\\mathrm{#1}\\\\,}\", 1], # binary operations\n \"uop\": [\"\\\\mathrm{#1}\\\\,\", 1], # unary operation\n \"hop\": [\"{{}_{#1}\\\\!\\\\Omega_{#2}}\\\\,\", 2], # higher order operation\n \"id\": [\"\\\\mathrm{id}(\\\\op{#1})\", 1], # identity of operations\n \"dims\": \"\\\\delta\\\\,\", # array dimension operator\n \"shape\": \"\\\\rho\\\\,\", # array shape operator\n \"size\": \"\\\\tau\\\\,\", # array size operator\n \"reshape\": \"\\\\,\\\\widehat{\\\\rho}\\\\,\", # reshape operator\n \"drop\": \"\\\\,\\\\nabla\\\\,\", # drop operator\n \"take\": \"\\\\,\\\\Delta\\\\,\", # take operator\n \"product\": \"\\\\pi\\\\,\", # product operator\n # DeclareMathOperator{\\\\rav}{rav}\n \"ravel\": \"\\\\rav\\\\,\", # ravel operator\n \"range\": \"\\\\iota\\\\,\", # range operator\n \"transpose\": \"\\\\bigcirc\\\\!\\\\!\\\\!\\\\!\\\\!\\\\backslash\\\\;\", # transpose operator, need a better symbol\n \"vc\": [\"<#1>\", 1], # vector with one component\n \"vcc\": [\"<#1\\\\;#2>\", 2], # vector with two components\n \"vccc\": [\"<#1\\\\;#2\\\\;#3>\", 3], # vector with three components\n \"vcccc\": [\"<#1\\\\;#2\\\\;#3\\\\;#4>\", 4], # vector with four components\n \"ac\": [\"[\\\\;#1\\\\;]\", 1], # array with one components\n \"acc\": [\"[\\\\;#1\\\\;#2\\\\;>\", 2], # array with two components\n \"accc\": [\"[\\\\;#1\\\\;#2\\\\;#3\\\\;]\", 3], # array with three components\n \"acccc\": [\"[\\\\;#1\\\\;#2\\\\;#3\\\\;#4\\\\;]\", 4], # array with four components\n \"avcc\": [\"[\\\\;<#1>\\\\;<#2>\\\\;]\", 2], # three dimensionar array with two components\n \"aacc\": [\"[\\\\;[\\\\;#1\\\\;]\\\\;[\\\\;#2\\\\;]\\\\;]\", 2], # four dimensionar array with two components\n \"aaccIcc\": [\"[\\\\;[\\\\;#1\\\\;#2\\\\;]\\\\;[\\\\;#3\\\\;#4\\\\;]\\\\;]\", 4], # four dimensionar array with two components\n \"outerprod\": [\"\\\\,\\\\bullet_{#1}\\\\,\", 1], # outer product opetation\n \"innerprod\": [\"\\\\,{}_{#1}\\\\!\\\\!\\\\bullet_{#2}\\\\,\", 2], # inner product opetation\n # DeclareMathOperator{\\\\red}{red}\n \"reduce\": [\"{}_{#1}\\\\!\\\\red\\\\,\", 1], # reduce operator\n \"getitem\": [\"{#2}\\\\,\\\\psi\\\\,{#1}\", 2], # psi operator\n \"scan\": [\"{}_{\\\\op{#1}\\\\!}\\\\mathrm{scan}\\\\,\", 1],\n \"kron\": \"\\\\bigcirc\\\\,\\\\!\\\\!\\\\!\\\\!\\\\!\\\\!\\\\times\\\\;\",\n \"cat\": \"+\\\\!\\\\!\\\\!+\",\n \"gu\": \"\\\\mathrm{gu}\\\\,\",\n \"gd\": \"\\\\mathrm{gd}\\\\,\",\n \"compress\": \"\\\\,\\\\notslash\\\\,\",\n \"expand\": \"\\\\,\\\\notbackslash\\\\,\",\n \"reverse\": \"\\\\phi\\\\,\",\n \"rotate\": [\"{#1}\\\\theta\\\\,\", 1]\n }\n }\n}\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"652233590","text":"from django.http import HttpResponse\nfrom django.views.generic import TemplateView, View\nfrom django import template\nfrom core.models import *\nfrom core.dashboard.views import DashboardDynamicView\nfrom xos.config import XOS_DIR\nimport json\nimport os\nimport time\nimport tempfile\n\n\nclass ServiceGridView(TemplateView):\n\n head_template = r\"\"\"{% extends \"admin/dashboard/dashboard_base.html\" %}\n {% load admin_static %}\n {% block content %}\n \"\"\"\n\n tail_template = r\"{% endblock %}\"\n\n def readTemplate(self, fn):\n TEMPLATE_DIRS = [XOS_DIR + \"/templates/admin/dashboard/\",\n XOS_DIR + \"/core/xoslib/dashboards/\"]\n\n for template_dir in TEMPLATE_DIRS:\n pathname = os.path.join(template_dir, fn) + \".html\"\n if os.path.exists(pathname):\n break\n else:\n return \"failed to find %s in %s\" % (fn, TEMPLATE_DIRS)\n\n template = open(pathname, \"r\").read()\n return template\n\n def get(self, request, name=\"root\", *args, **kwargs):\n\n dash = DashboardView.objects.get(name=\"Services Grid\")\n\n gridTemplate = self.readTemplate(dash.url[9:])\n\n t = template.Template(self.head_template + gridTemplate + self.tail_template)\n\n response_kwargs = {}\n response_kwargs.setdefault('content_type', self.content_type)\n\n return self.response_class(\n request=request,\n template=t,\n **response_kwargs)\n\n\nclass ServiceGridViewPy(TemplateView):\n head_template = r\"\"\"{% extends \"admin/dashboard/dashboard_base.html\" %}\n {% load admin_static %}\n {% block content %}\n \"\"\"\n\n tail_template = r\"{% endblock %}\"\n\n def get(self, request, name=\"root\", *args, **kwargs):\n head_template = self.head_template\n tail_template = self.tail_template\n\n html = '
Script Objective
'\n\n icons = []\n for service in Service.objects.all():\n view_url = service.view_url\n if (not view_url):\n view_url = \"/admin/core/service/$id$/\"\n view_url = view_url.replace(\"$id$\", str(service.id))\n\n image_url = service.icon_url\n if (not image_url):\n image_url = \"/static/primarycons_blue/gear_2.png\"\n\n icons.append({\"name\": service.name, \"view_url\": view_url, \"image_url\": image_url})\n\n icons.append({\"name\": \"Tenancy Graph\", \"view_url\": \"/serviceGraph.png\", \"image_url\": \"/static/primarycons_blue/service_graph.png\", \"horiz_rule\": True})\n icons.append({\"name\": \"Add Service\", \"view_url\": \"/admin/core/service/add/\", \"image_url\": \"/static/primarycons_blue/plus.png\"})\n\n i = 0\n for icon in icons:\n if icon.get(\"horiz_rule\", False):\n html = html + \"\"\n i = 0\n\n service_name = icon[\"name\"]\n view_url = icon[\"view_url\"]\n image_url = icon[\"image_url\"]\n\n if (i % 4) == 0:\n html = html + ''\n\n html = html + '' % (view_url, service_name)\n i = i+1\n\n html = html + '

' % (view_url, image_url)\n html = html + '

%s

'\n\n t = template.Template(head_template + html + self.tail_template)\n\n response_kwargs = {}\n response_kwargs.setdefault('content_type', self.content_type)\n return self.response_class(\n request=request,\n template=t,\n **response_kwargs\n )\n\n\nclass ServiceGraphViewOld(TemplateView):\n # this attempt used networkx\n # yum -y install python-matplotlib python-networkx\n # pip-python install -upgrade networkx\n # pip-python install graphviz pygraphviz\n\n def get(self, request, name=\"root\", *args, **kwargs):\n import networkx as nx\n import matplotlib as mpl\n mpl.use(\"Agg\")\n import matplotlib.pyplot as plt\n import nxedges\n\n plt.figure(figsize=(10, 8))\n\n g = nx.DiGraph()\n\n labels = {}\n for service in Service.objects.all():\n g.add_node(service.id)\n if len(service.name) > 8:\n labels[service.id] = service.name[:8] + \"\\n\" + service.name[8:]\n else:\n labels[service.id] = service.name\n\n for tenant in CoarseTenant.objects.all():\n if (not tenant.provider_service) or (not tenant.subscriber_service):\n continue\n g.add_edge(tenant.subscriber_service.id, tenant.provider_service.id)\n\n pos = nx.graphviz_layout(g)\n nxedges.xos_draw_networkx_edges(g, pos, arrow_len=30)\n nx.draw_networkx_nodes(g, pos, node_size=5000)\n nx.draw_networkx_labels(g, pos, labels, font_size=12)\n # plt.axis('off')\n plt.savefig(\"/tmp/foo.png\")\n\n return HttpResponse(open(\"/tmp/foo.png\", \"r\").read(), content_type=\"image/png\")\n\n\nclass ServiceGraphView(TemplateView):\n # this attempt just uses graphviz directly\n # yum -y install graphviz\n # pip-python install pygraphviz\n\n def get(self, request, name=\"root\", *args, **kwargs):\n import pygraphviz as pgv\n\n g = pgv.AGraph(directed=True)\n g.graph_attr.update(size=\"8,4!\")\n g.graph_attr.update(dpi=\"100\")\n # g.graph_attr.update(nodesep=\"2.5\")\n g.graph_attr.update(overlap=\"false\")\n g.graph_attr.update(graphdir=\"TB\")\n\n for service in Service.objects.all():\n provided_tenants = Tenant.objects.filter(provider_service=service, subscriber_service__isnull=False)\n subscribed_tenants = Tenant.objects.filter(subscriber_service=service, provider_service__isnull=False)\n if not (provided_tenants or subscribed_tenants):\n # nodes with no edges aren't interesting\n continue\n g.add_node(service.id, label=service.name)\n\n for tenant in Tenant.objects.all():\n if (not tenant.provider_service) or (not tenant.subscriber_service):\n continue\n g.add_edge(tenant.subscriber_service.id, tenant.provider_service.id)\n\n tf = tempfile.TemporaryFile()\n g.layout(prog=\"dot\")\n g.draw(path=tf, format=\"png\")\n tf.seek(0)\n\n return HttpResponse(tf.read(), content_type=\"image/png\")\n","sub_path":"xos/core/views/serviceGraph.py","file_name":"serviceGraph.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"187689888","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 3 15:27:24 2019\n\n@author: pongtit\n\nPurpose:\n Machine Learning team assignment\n\"\"\"\n\n\n# Import libraries and data\nimport pandas as pd\nimport statsmodels.formula.api as smf\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport sklearn.metrics\n\n\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n\nfile = 'birthweight_feature_set.xlsx'\n\n\nbwht = pd.read_excel(file)\n\n\n########################\n# Fundamental Dataset Exploration\n########################\n\n\n# Information about each variable\nbwht.info()\n\n\n# Displaying the first rows of the DataFrame\nbwht.head()\n\n\n# Descriptive statistics\nbwht.describe().round(2)\n\n\nbwht.sort_values('bwght', ascending=False)\n\n\n##############################################################################\n# Impute missing values\n##############################################################################\n\n\nprint(\n bwht\n .isnull()\n .sum()\n )\n\n\nfor col in bwht:\n\n \"\"\" Create columns that are 0s if a value was not missing and 1 if\n a value is missing. \"\"\"\n\n if bwht[col].isnull().any():\n bwht['m_'+col] = bwht[col].isnull().astype(int)\n\n\n# Creating a dropped dataset so we could plot a graph to identify which\n# values we should use to impute our data.\ndf_dropped = bwht.dropna()\n\n\n# Plotting each variable to identify which values we should use to impute\n# missing values.\nsns.distplot(df_dropped['mage'])\nplt.show()\n\n\nsns.distplot(df_dropped['meduc'])\nplt.show()\n\n\nsns.distplot(df_dropped['monpre'])\nplt.show()\n\n\nsns.distplot(df_dropped['npvis'])\nplt.show()\n\n\nsns.distplot(df_dropped['fage'])\nplt.show()\n\n\nsns.distplot(df_dropped['feduc'])\nplt.show()\n\n\nsns.distplot(df_dropped['omaps'])\nplt.show()\n\n\nsns.distplot(df_dropped['fmaps'])\nplt.show()\n\n\nsns.distplot(df_dropped['cigs'])\nplt.show()\n\n\nsns.distplot(df_dropped['drink'])\nplt.show()\n\n\nsns.distplot(df_dropped['male'])\nplt.show()\n\n\nsns.distplot(df_dropped['mwhte'])\nplt.show()\n\n\nsns.distplot(df_dropped['mblck'])\nplt.show()\n\n\nsns.distplot(df_dropped['moth'])\nplt.show()\n\n\nsns.distplot(df_dropped['fwhte'])\nplt.show()\n\n\nsns.distplot(df_dropped['fblck'])\nplt.show()\n\n\nsns.distplot(df_dropped['foth'])\nplt.show()\n\n\nsns.distplot(df_dropped['bwght'])\nplt.show()\n\n\n# After visual analysis above, we decided to use median to impute\n# all missing values.\nfill = bwht['meduc'].median()\n\nbwht['meduc'] = bwht['meduc'].fillna(fill)\n\n\nfill = bwht['npvis'].median()\n\nbwht['npvis'] = bwht['npvis'].fillna(fill)\n\n\nfill = bwht['feduc'].median()\n\nbwht['feduc'] = bwht['feduc'].fillna(fill)\n\n\n# Checking the overall dataset to see if there are any remaining\n# missing values.\nprint(\n bwht\n .isnull()\n .any()\n .any()\n )\n\n\n##############################################################################\n# Outliers Analysis\n##############################################################################\n\n\n# We used quantiles to take a first look in the beginning and identify the\n# possible outliers which we might flag them.\nbirth_quantiles = bwht.loc[:, :].quantile([0.20,\n 0.40,\n 0.60,\n 0.80,\n 1.00])\n\n\nprint(birth_quantiles)\n\n\nfor col in bwht:\n print(col)\n\n########################\n# Flagging outliers\n########################\n\n# Our outliers come from the combination of external research and the data\n# from dataset.\n# See Footnote 0 for a more detailed explanation of the outliers.\n\nmage_hi = 55\nmeduc_lo = 10\nmonpre_hi = 5\nnpvis_hi = 20\nfage_hi = 53\nfeduc_lo = 8\n\n\n# 'Mother age' outliers\nbwht['out_mage'] = 0\n\nfor val in enumerate(bwht.loc[:, 'mage']):\n\n if val[1] >= mage_hi:\n bwht.loc[val[0], 'out_mage'] = 1\n\n\n# 'Mother education' outliers\nbwht['out_meduc'] = 0\n\nfor val in enumerate(bwht.loc[:, 'meduc']):\n\n if val[1] <= meduc_lo:\n bwht.loc[val[0], 'out_meduc'] = -1\n\n\n# 'Month prenatal care began' outliers\nbwht['out_monpre'] = 0\n\nfor val in enumerate(bwht.loc[:, 'monpre']):\n\n if val[1] >= monpre_hi:\n bwht.loc[val[0], 'out_monpre'] = 1\n\n\n# 'Total number of prenatal visits ' outliers\nbwht['out_npvis'] = 0\n\nfor val in enumerate(bwht.loc[:, 'npvis']):\n\n if val[1] >= npvis_hi:\n bwht.loc[val[0], 'out_npvis'] = 1\n\n\n# 'Father age' outliers\nbwht['out_fage'] = 0\n\nfor val in enumerate(bwht.loc[:, 'fage']):\n\n if val[1] >= fage_hi:\n bwht.loc[val[0], 'out_fage'] = 1\n\n\n# 'Father education' outliers\nbwht['out_feduc'] = 0\n\nfor val in enumerate(bwht.loc[:, 'feduc']):\n\n if val[1] <= feduc_lo:\n bwht.loc[val[0], 'out_feduc'] = -1\n\n\n##############################################################################\n# Correlation Analysis\n##############################################################################\n\n\n# Using correlation to identify which variables we should consider in our\n# Model.\ndf_corr = bwht.corr().round(2)\n\n\nprint(df_corr)\n\n\ndf_corr.loc['bwght'].sort_values(ascending=False)\n'''\nBy looking at correlation analysis, we found that some varaibles should be\nused in our new DataFrame such as average drink per week or average\ncigareetes per day.\n'''\n\n\n#############################################################################\n# OLS Regression Analysis in statsmodels\n#############################################################################\n\n\n# Drop birthweight because we could not use predict variable to predict model\n# Drop omaps and fmaps because they are measured after the birth\nbwht_data = bwht.drop(['bwght', 'omaps', 'fmaps'], axis=1)\n\n\n# Creating target dataset by selecting birthweight which is our\n# predict varaible\nbwht_target = bwht.loc[:, 'bwght']\n\n\n# Dividing 10% of our data as our test data while the rest 90% is train data\n# We are setting random_state = 508 so we could replicate our work\nX_train, X_test, y_train, y_test = train_test_split(\n bwht_data,\n bwht_target,\n test_size=0.1,\n random_state=508\n )\n\n\n# We need to merge our X_train and y_train sets so that they can be\n# used in statsmodels\nbwht_train = pd.concat([X_train, y_train], axis=1)\n\n\n########################\n# Applying statmodels to our data\n########################\n\n\n# Step 1: Build the model with all variable to see the model\nlm_bwht_qual = smf.ols(\n formula=\"\"\"bwght ~ mage +\n meduc +\n monpre +\n npvis +\n fage +\n feduc +\n cigs +\n drink +\n male +\n mwhte +\n mblck +\n moth +\n fwhte +\n fblck +\n foth +\n m_meduc +\n m_npvis +\n m_feduc +\n out_mage +\n out_meduc +\n out_monpre +\n out_npvis +\n out_fage +\n out_feduc\n \"\"\", data=bwht_train\n )\n\n\n# Step 2: Fit the model based on the data\nlm_results = lm_bwht_qual.fit()\n\n\n# Step 3: Analyze the summary output. This result after putting all variables\n# are not good enough so we have to remove some variables to create\n# good model for prediction.\nprint(lm_results.summary())\n\n\n###############################################################################\n# Applying the Optimal Model in scikit-learn\n###############################################################################\n\n\n# Preparing a DataFrame based the analysis above. We decided to use\n# the following variables and test it again.\nbwht_sig_data = bwht.loc[:, ['cigs',\n 'drink',\n 'out_mage',\n 'out_fage',\n 'out_feduc',\n 'feduc',\n 'fage']]\n\n\n# Preparing the target variable\nbwht_sig_target = bwht.loc[:, 'bwght']\n\n\n# Creating train and test set with the new DataFrame\nX_train, X_test, y_train, y_test = train_test_split(\n bwht_sig_data,\n bwht_sig_target,\n test_size=0.10,\n random_state=508)\n\n\n########################\n# Using OLS on the optimal model\n########################\n\n\n# Same process of creating train and test sets\nX_train, X_test, y_train, y_test = train_test_split(\n bwht_sig_data,\n bwht_sig_target,\n test_size=0.10,\n random_state=508)\n\n\n# Prepping the Model\nlr = LinearRegression()\n\n\n# Fitting the model\nlr_fit = lr.fit(X_train, y_train)\n\n\n# OLS Predictions which will use to compare with KNN model\nlr_pred = lr_fit.predict(X_test)\n\n\n# Scoring the model\ny_score_ols_optimal = lr_fit.score(X_test, y_test)\n\n\n# The score is directly comparable to R-Square\nprint(y_score_ols_optimal)\n\n\n# Comparing the testing score to the training score for our OLS model\nprint('Training Score:', lr.score(X_train, y_train).round(3))\nprint('Testing Score:', lr.score(X_test, y_test).round(3))\nprint(\"\"\"\n Let's compare the training score and testing score. The training score\n is 0.731 and testing score is 0.707. The results of training and\n testing are not much difference which mean our prediction model is not\n overfitting.\n \"\"\")\n\n\n########################\n# Using KNN on the optimal model\n########################\n\n\n# Creating a regressor object\nknn_reg = KNeighborsRegressor(algorithm='auto',\n n_neighbors=1)\n\n\n# Checking the type of this new object\ntype(knn_reg)\n\n\n# Teaching (fitting) the algorithm based on the training data\nknn_reg.fit(X_train, y_train)\n\n\n# Predicting on the X_data that the model has never seen before\ny_pred = knn_reg.predict(X_test)\n\n\n# Calling the score method, which compares the predicted values to the actual\n# values\ny_score = knn_reg.score(X_test, y_test)\n\n\n# The score is directly comparable to R-Square. This result is not good\n# enough since the number of neighbor is not the suitest number in this\n# model.\nprint(y_score)\n\n\n########################\n# Finding the optimal model\n########################\n\n\n# Same process of creating train and test sets\nX_train, X_test, y_train, y_test = train_test_split(\n bwht_sig_data,\n bwht_sig_target,\n test_size=0.10,\n random_state=508)\n\n\n# Creating two lists, one for training set accuracy and the other for test\n# set accuracy\ntraining_accuracy = []\ntest_accuracy = []\n\n\n# Building a visualization to check to see 1 to 50\nneighbors_settings = range(1, 51)\n\n\nfor n_neighbors in neighbors_settings:\n # Building the model\n clf = KNeighborsRegressor(n_neighbors=n_neighbors)\n clf.fit(X_train, y_train)\n\n # Recording the training set accuracy\n training_accuracy.append(clf.score(X_train, y_train))\n\n # Recording the generalization accuracy\n test_accuracy.append(clf.score(X_test, y_test))\n\n\n# Plotting the visualization\nfig, ax = plt.subplots(figsize=(12, 9))\nplt.plot(neighbors_settings, training_accuracy, label=\"training accuracy\")\nplt.plot(neighbors_settings, test_accuracy, label=\"test accuracy\")\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"n_neighbors\")\nplt.legend()\nplt.show()\n\n\n# Finding the optimal k, in this case we have to be careful that index will\n# start at 0. As we got index number 13. It means our k will be 14.\nprint(test_accuracy.index(max(test_accuracy)))\n\n\n# According to the result above the best number of neighbor is 14 so we\n# will use n_neighbors = 14 for our model\nknn_reg = KNeighborsRegressor(algorithm='auto',\n n_neighbors=14)\n\n\n# Fitting the model based on the training data\nknn_reg_fit = knn_reg.fit(X_train, y_train)\n\n\n# Scoring the model\ny_score_knn_optimal = knn_reg.score(X_test, y_test)\n\n\n# The score is directly comparable to R-Square.\nprint(y_score_knn_optimal)\nprint(f\"\"\"\n The result of {y_score_knn_optimal.round(3)} is the best testing score \n from KNN models which still not good enough when we compared to OLS \n model.\n \"\"\")\n\n\n# Generating Predictions based on the optimal KNN model\nknn_reg_optimal_pred = knn_reg_fit.predict(X_test)\n\n\n# We calculated mean squared error for both model to use them for our\n# root mean squared error to compare how far we are compare to actual data.\nlr_mse = sklearn.metrics.mean_squared_error(y_test, lr_pred)\nknn_mse = sklearn.metrics.mean_squared_error(y_test, knn_reg_optimal_pred)\n\n\n# Calculating RMSE to see how far we predict from the actual data itself.\nlr_rmse = pd.np.sqrt(lr_mse)\nknn_rmse = pd.np.sqrt(knn_mse)\n\n\nprint('OLS_RMSE:', lr_rmse.round(3))\nprint('KNN_RMSE:', knn_rmse.round(3))\nprint(f\"\"\"\n In this case we could see that OLS model could predict closer to the\n actual data compare to KNN model. OLS model predict about \n {lr_rmse.round(0)} gram away from the data while KNN predict about \n {knn_rmse.round(0)} gram away.\n \"\"\")\n\n\n# Comparing our testing score from 2 models which are KNN and OLS.\nprint('OLS_testing score:', y_score_ols_optimal.round(3))\nprint('KNN_testing score:', y_score_knn_optimal.round(3))\nprint(f\"\"\"\n Comparing our testing score from OLS and KNN models.\n In this case, we could see that OLS model could predict much better\n than KNN model with test score of {y_score_ols_optimal.round(3)} and\n {y_score_knn_optimal.round(3)} respectively.\n \"\"\")\n\n\nprint(\"\"\"\n In conclusion, from this case, OLS model could predict more accurate\n compare to KNN model.\n \"\"\")\n\n\n# Exporting our prediction from OLS model to excel file.\npred = pd.DataFrame(data=lr_pred, columns=['y_pred'])\npred.to_excel(\"predicted_values.xlsx\")\n\n\n'''\n##############################################################################\n# Footnotes\n##############################################################################\n\n\nFootnotes 0\n\nFor mother age, 51 is average woman in us for women to start menopause\nso comparing that to the data we decided that 55 was a better fit for\nthe outlier in our case.\n\nFor mother / father education, external research indicates that lower\nlevel of education might be correlated with lower ability to care for\nthe baby while pregnant also in relation to having access to lower levels\nand less care thus we decided to create lower flags of education to be\nless than hs level of education.\n\nFor father age, although the men don't have menopause to worry about and\nstay fertile, sperm can lessen with age and that may have an affect on\nthe baby.\n\nFor total number of prenatal visits |, more visits can indicate a higher\nlevel of care or can preface high risk and require more visits both cases\nmight positively and negatively affect bwght.\n\nFor month prenatal care began, earlier access to care can lead to more\neducation and information about the process, although it means an earlier\nstart to supplements and caring for the growing child which in turn can\nimpact bwght.\n\n'''\n","sub_path":"Birthweight_prediction_machine_learning.py","file_name":"Birthweight_prediction_machine_learning.py","file_ext":"py","file_size_in_byte":15526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"556328404","text":"import pyautogui\nimport sys\nimport time\n\nshort_period = .1\nmedium_period = .3\n\ndef check_reborn():\n px = list(pyautogui.screenshot(region=(1060,230,1,1)).getdata())[0]\n if px[0] == 233:\n return True\n else:\n return False\n\ndef check_button(which):\n if which == 4:\n px = list(pyautogui.screenshot(region=(380,510,1,1)).getdata())[0]\n if px[0] == 173:\n return True\n else:\n return False\n elif which == 3:\n px = list(pyautogui.screenshot(region=(160,510,1,1)).getdata())[0]\n if px[0] == 173:\n return True\n else:\n return False\n elif which == 2:\n px = list(pyautogui.screenshot(region=(380,390,1,1)).getdata())[0]\n if px[0] == 162:\n return True\n else:\n return False\n elif which == 1:\n px = list(pyautogui.screenshot(region=(160,390,1,1)).getdata())[0]\n if px[0] == 162:\n return True\n else:\n return False\n return False\n\ndef force_click(which):\n total = 1\n while not check_button(which):\n time.sleep(short_period)\n total += 1\n if total > 10:\n break\n click_button(which)\n time.sleep(short_period)\n\ndef click_button(which):\n if which == 4:\n pyautogui.click(380,510)\n time.sleep(short_period)\n elif which == 2:\n pyautogui.click(400,390)\n time.sleep(short_period)\n elif which == 3:\n pyautogui.click(160,510)\n time.sleep(short_period)\n elif which == 1:\n pyautogui.click(200,390)\n time.sleep(short_period)\n\n\ndef click_group(arr):\n for i in arr:\n if check_button(i):\n click_button(i)\n time.sleep(medium_period)\n time.sleep(medium_period)\n\ndef reborn_group(idx):\n pyautogui.click(1060, 230)\n time.sleep(short_period)\n while True:\n px = list(pyautogui.screenshot(region=(430,570,1,1)).getdata())[0]\n if px[0] == 215:\n break\n else:\n time.sleep(short_period)\n pyautogui.click(430, 570)\n print(\"########## %d ########### Reborn clicked!\"%idx)\n while True:\n px = list(pyautogui.screenshot(region=(560,500,1,1)).getdata())[0]\n if px[0] == 119:\n break\n else:\n time.sleep(short_period)\n pyautogui.click(560, 500)\n #time.sleep(5.5)\n #time.sleep(.5)\n #pyautogui.click(760, 210)\n #time.sleep(.5)\n\ndef full(idx):\n force_click(1)\n force_click(1)\n force_click(1)\n force_click(1)\n force_click(2)\n force_click(4)\n force_click(3)\n force_click(4)\n force_click(3)\n force_click(2)\n force_click(4)\n force_click(3)\n force_click(2)\n force_click(4)\n while not check_reborn():\n force_click(1)\n force_click(2)\n reborn_group(idx)\n\nfor idx in range(0, int(sys.argv[1])):\n full(idx)\n\n","sub_path":"bashscript/REB.py","file_name":"REB.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"454784269","text":"#!/usr/bin/env python3\n\"\"\"module\"\"\"\n\n\nimport tensorflow.keras as K\n\n\ndef build_model(nx, layers, activations, lambtha, keep_prob):\n \"\"\"function\"\"\"\n inputs = K.Input(shape=(nx,))\n l2 = K.regularizers.l2(lambtha)\n x = K.layers.Dense(layers[0], activation=activations[0],\n kernel_regularizer=l2)(inputs)\n for layer in range(1, len(layers)):\n drop = K.layers.Dropout(1 - keep_prob)(x)\n x = K.layers.Dense(layers[layer], activation=activations[layer],\n kernel_regularizer=l2)(drop)\n return K.Model(inputs=inputs, outputs=x)\n","sub_path":"supervised_learning/0x06-keras/1-input.py","file_name":"1-input.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"432772830","text":"from cihai import extend\nfrom cihai.core import Cihai\nfrom cihai.data.unihan.dataset import Unihan, UnihanVariants\n\n\nclass SimplestDataset(extend.Dataset):\n def a_method(self):\n return \"hi\"\n\n\ndef test_add_dataset():\n c = Cihai()\n c.add_dataset(SimplestDataset, namespace=\"simple\")\n assert hasattr(c, \"simple\")\n assert isinstance(c.simple, extend.Dataset)\n assert hasattr(c.simple, \"a_method\")\n assert callable(c.simple.a_method)\n assert c.simple.a_method() == \"hi\"\n\n\nclass SimplestSQLAlchemyDataset(extend.Dataset, extend.SQLAlchemyMixin):\n def a_method(self):\n return \"hi\"\n\n\ndef test_add_dataset_with_db():\n c = Cihai()\n c.add_dataset(SimplestSQLAlchemyDataset, namespace=\"simple\")\n assert hasattr(c, \"simple\")\n assert isinstance(c.simple, extend.Dataset)\n assert hasattr(c.simple, \"a_method\")\n assert callable(c.simple.a_method)\n assert c.simple.a_method() == \"hi\"\n\n assert hasattr(c, \"sql\")\n assert hasattr(c.simple, \"sql\")\n\n\ndef test_add_dataset_unihan(unihan_options):\n c = Cihai()\n c.add_dataset(Unihan, namespace=\"unihan\")\n assert hasattr(c, \"unihan\")\n assert isinstance(c.unihan, extend.Dataset)\n\n c.unihan.sql\n\n c.unihan.bootstrap(options=unihan_options)\n U = c.sql.base.classes.Unihan\n\n first_glyph = (\n c.unihan.sql.session.query(U).filter(U.kDefinition.isnot(None)).first()\n )\n\n char = first_glyph.char\n assert (\n c.unihan.lookup_char(char=char).first().kDefinition == first_glyph.kDefinition\n )\n\n assert (\n c.unihan.reverse_char(hints=[first_glyph.kDefinition]).first().char == char\n ), \"works with list of column value matches\"\n\n assert (\n c.unihan.reverse_char(hints=first_glyph.kDefinition).first().char == char\n ), \"works with strings\"\n\n c.unihan.add_plugin(UnihanVariants, \"variants\")\n assert hasattr(c.unihan, \"variants\")\n\n def variant_list(field):\n for char in c.unihan.with_fields(field):\n variants = []\n for var in char.untagged_vars(field):\n variants.append(var)\n yield (char, variants)\n\n result = {char: variants for (char, variants) in variant_list(\"kZVariant\")}\n\n assert len(result.values()) > 0\n assert len(result.keys()) > 0\n","sub_path":"tests/test_extend.py","file_name":"test_extend.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"239625663","text":"import os\r\nimport sys\r\nimport logging\r\n\r\nimport torch\r\nimport numpy as np\r\n\r\nfrom src.train import train_standard\r\nfrom src.evaluation import test_clean\r\nfrom src.args import get_args, print_args\r\nfrom src.utils_dataset import load_dataset\r\nfrom src.utils_log import metaLogger, rotateCheckpoint, wandbLogger, saveModel, delCheckpoint\r\nfrom src.utils_general import seed_everything, get_model, get_optim\r\nimport ipdb\r\n\r\n\r\nbest_acc1 = 0\r\n\r\ndef train(args, epoch, logger, loader, model, opt, lr_scheduler, device):\r\n \"\"\"perform one epoch of training.\"\"\"\r\n if args.method == \"standard\":\r\n train_log = train_standard(loader, model, opt, device, epoch, lr_scheduler)\r\n\r\n else:\r\n raise NotImplementedError(\"Training method not implemented!\")\r\n\r\n logger.add_scalar(\"train/acc_ep\", train_log[0], epoch)\r\n logger.add_scalar(\"train/loss_ep\", train_log[1], epoch)\r\n logging.info(\r\n \"Epoch: [{0}]\\t\"\r\n \"Loss: {loss:.6f}\\t\"\r\n \"Accuracy: {acc:.2f}\".format(\r\n epoch,\r\n loss=train_log[1],\r\n acc=train_log[0]))\r\n\r\n return train_log\r\n\r\ndef main():\r\n\r\n global best_acc1\r\n\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n args = get_args()\r\n print_args(args)\r\n\r\n logger = metaLogger(args)\r\n logging.basicConfig(\r\n filename=args.j_dir+ \"/log/log.txt\",\r\n format='%(asctime)s %(message)s', level=logging.INFO)\r\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\r\n\r\n seed_everything(args.seed)\r\n train_loader, test_loader, _, _ = load_dataset(\r\n args.dataset,\r\n args.batch_size,\r\n args.op_name,\r\n args.op_prob,\r\n args.op_magnitude)\r\n\r\n model = get_model(args, device)\r\n opt, lr_scheduler = get_optim(model, args)\r\n ckpt_epoch = 1\r\n\r\n ckpt_dir = args.j_dir+\"/\"+str(args.j_id)+\"/\"\r\n if logger.ckpt_status in ['curr', 'prev']:\r\n ckpt_location = os.path.join(ckpt_dir, \"custom_ckpt_\"+logger.ckpt_status+\".pth\")\r\n if os.path.exists(ckpt_location):\r\n ckpt = torch.load(ckpt_location)\r\n model.load_state_dict(ckpt[\"state_dict\"])\r\n opt.load_state_dict(ckpt[\"optimizer\"])\r\n ckpt_epoch = ckpt[\"epoch\"]\r\n best_acc1 = ckpt[\"best_acc1\"]\r\n if lr_scheduler is not None:\r\n for _dummy in range(ckpt_epoch-1):\r\n lr_scheduler.step()\r\n # lr_scheduler.load_state_dict(ckpt[\"lr_scheduler\"])\r\n print(\"LOADED CHECKPOINT\")\r\n else:\r\n print(\"CHECKPOINT STATUS: {}\".format(logger.ckpt_status))\r\n\r\n actual_trained_epoch = args.epoch\r\n\r\n for _epoch in range(ckpt_epoch, args.epoch+1):\r\n train_log = train(args, _epoch, logger, train_loader, model, opt, lr_scheduler, device)\r\n\r\n # evaluation on testset\r\n test_log = test_clean(test_loader, model, device)\r\n\r\n is_best = test_log[0] > best_acc1\r\n best_acc1 = max(test_log[0], best_acc1)\r\n\r\n logger.add_scalar(\"lr\", opt.param_groups[0]['lr'], _epoch)\r\n logger.add_scalar(\"test/top1_acc\", test_log[0], _epoch)\r\n logger.add_scalar(\"test/top5_acc\", test_log[2], _epoch)\r\n logger.add_scalar(\"test/loss\", test_log[1], _epoch)\r\n logger.add_scalar(\"test/best_top1_acc\", best_acc1, _epoch)\r\n logging.info(\r\n \"Test set: Loss: {loss:.6f}\\t\"\r\n \"Accuracy: {acc:.2f}\".format(\r\n loss=test_log[1],\r\n acc=test_log[0]))\r\n\r\n # checkpointing for preemption\r\n if _epoch % args.ckpt_freq == 0:\r\n # since preemption would happen in the next epoch, so we want to start from {_epoch+1}\r\n rotateCheckpoint(ckpt_dir, \"custom_ckpt\", model, opt, _epoch+1, best_acc1)\r\n logger.save_log()\r\n\r\n # save best model (after 75% way through the training)\r\n if is_best and _epoch > int(args.epoch*3/4):\r\n saveModel(args.j_dir+\"/model/\", \"best_model\", model.state_dict())\r\n\r\n # Early terminate training when half way thru training and test accuracy still below 20%\r\n if np.isnan(train_log[1]) or (_epoch > int(args.epoch/2) and best_acc1 < 20):\r\n actual_trained_epoch = _epoch\r\n saveModel(args.j_dir+\"/model/\", \"final_model\", model.state_dict())\r\n break # break the training for-loop\r\n\r\n if actual_trained_epoch == args.epoch:\r\n # at the end of training, save after test set evaluation, since Autoattack sometimes fail\r\n saveModel(args.j_dir+\"/model/\", \"final_model\", model.state_dict())\r\n if args.fancy_eval:\r\n # evaluate using the best checkpoint\r\n try:\r\n ckpt_best_model = torch.load(args.j_dir+\"/model/best_model.pt\")\r\n model.load_state_dict(ckpt_best_model)\r\n except:\r\n print(\"Problem loading best_model ckpt at {}/model/best_model.pt!\".format(args.j_dir))\r\n print(\"Evaluating using the model from the last epoch!\")\r\n\r\n if best_acc1 > eval_threshold:\r\n # place some evaluation here\r\n pass\r\n\r\n # upload runs to wandb:\r\n if args.enable_wandb:\r\n save_wandb_retry = 0\r\n save_wandb_successful = False\r\n while not save_wandb_successful and save_wandb_retry < 5:\r\n print('Uploading runs to wandb...')\r\n try:\r\n wandb_logger = wandbLogger(args)\r\n wandb_logger.upload(logger, actual_trained_epoch)\r\n except:\r\n save_wandb_retry += 1\r\n print('Retry {} times'.format(save_wandb_retry))\r\n else:\r\n save_wandb_successful = True\r\n\r\n if not save_wandb_successful:\r\n print('Failed at uploading runs to wandb.')\r\n\r\n logger.save_log(is_final_result=True)\r\n\r\n # delete slurm checkpoints\r\n delCheckpoint(args.j_dir, args.j_id)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"30205123","text":"import os\r\norg_dir=input(\"Enter directory Name:\")\r\nif(os.path.isdir(org_dir)):\r\n\tnew_dir=input(\"Enter new name of directory:\")\r\n\ttry:\r\n\t\tos.rename(org_dir,new_dir)\r\n\t\tprint('directory renamed')\r\n\texcept FileExistsError:\r\n\t\tprint(\"new directory already exists\")\r\nelse:\r\n\tprint('directory not found')\r\n\t","sub_path":"irshad dir/programming/python_self/D6/p12.py","file_name":"p12.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"284193320","text":"import time\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\n# global currentMode\ncurrentMode = 0\nModeNums = 6\nRunMode = 0\n\n\ndef init():\n # 初始化背景\n glClearColor(1.0, 1.0, 1.0, 1.0)\n gluOrtho2D(-20.0, 20.0, -20.0, 20.0)\n\n\ndef reshape(w, h):\n if h <= 0:\n h = 1\n glViewport(0, 0, w, h)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if w <= h:\n gluOrtho2D(-20.0, 20.0, -20.0 * h / w, 20.0 * h / w)\n else:\n gluOrtho2D(-20.0 * w / h, 20.0 * w / h, -20.0, 20.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n\ndef myKey_Callback(key, x, y):\n global currentMode\n global RunMode\n # currentMode = 2\n # print(currentMode)\n # if key == 32:\n # print(11)\n if key == b' ':\n RunMode = 1\n currentMode = (currentMode + 1) % ModeNums\n # print(currentMode)\n glutPostRedisplay()\n print(currentMode)\n elif key == b'a':\n exit(-1)\n elif key == b's':\n RunMode = 0\n\n\ndef RenderScene():\n # global currentMode\n glClear(GL_COLOR_BUFFER_BIT)\n if currentMode == 0:\n glPointSize(5)\n glBegin(GL_POINTS)\n glColor3f(1.0, 0.0, 0.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glFlush()\n\n elif currentMode == 1:\n glBegin(GL_LINE_STRIP)\n glColor3f(0.0, 1.0, 0.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glFlush()\n\n elif currentMode == 2:\n glBegin(GL_LINE_LOOP)\n glColor3f(0.0, 0.0, 1.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glFlush()\n\n # elif currentMode == 3:\n # glBegin(GL_TRIANGLES)\n # glColor3f(1.0, 1.0, 0.0)\n # glVertex2f(3.0, 3.0)\n # glVertex2f(8.0, 3.0)\n # glVertex2f(5.0, 8.0)\n # glEnd()\n # glFlush()\n\n elif currentMode == 3:\n glBegin(GL_TRIANGLES)\n glColor3f(1.0, 1.0, 0.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glPushMatrix()\n glTranslatef(1.0, 0.0, 0.0)\n glBegin(GL_TRIANGLES)\n glColor3f(1.0, 0.0, 0.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glFlush()\n glPopMatrix()\n\n elif currentMode == 4:\n glBegin(GL_TRIANGLES)\n glColor3f(1.0, 1.0, 0.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glPushMatrix()\n glScalef(2.0, 1.0, 0.0)\n glBegin(GL_TRIANGLES)\n glColor3f(1.0, 0.0, 0.0)\n glVertex2f(3.0, 3.0)\n glVertex2f(8.0, 3.0)\n glVertex2f(5.0, 8.0)\n glEnd()\n glFlush()\n glPopMatrix()\n\n elif currentMode == 5:\n glBegin(GL_TRIANGLES)\n glColor3f(0.0, 1.0, 0.0)\n glVertex2f(6.0, 0.0)\n glVertex2f(-6.0, 0.0)\n glVertex2f(0.0, 6.0)\n glEnd()\n glPushMatrix()\n verts = [[6.0, 0.0], [-6.0, 0.0], [0.0, 6.0]]\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n while RunMode != 0:\n glRotatef(33.3, 0.0, 0.0, 1.0)\n glBegin(GL_TRIANGLES)\n glColor3f(0.0, 1.0, 0.0)\n for k in range(3):\n glVertex2f(verts[k][0], verts[k][1])\n glEnd()\n glFlush()\n\n glutSwapBuffers()\n glutPostRedisplay()\n time.sleep(1)\n glClear(GL_COLOR_BUFFER_BIT)\n # for i in range(1,1000):\n # glRotatef(10.0,0.0,0.0,1.0)\n # glBegin(GL_TRIANGLES)\n # glColor3f(0.0, 1.0, 0.0)\n # glVertex2f(6.0, 0.0)\n # glVertex2f(-6.0, 0.0)\n # glVertex2f(0.0, 6.0)\n # glEnd()\n glPopMatrix()\n glFlush()\n\n # glEnd()\n # glFlush()\n\n\nglutInit()\nglutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\nglutInitWindowSize(400, 400)\nglutInitWindowPosition(50, 50)\nglutCreateWindow(b\"Hello World!\")\ninit()\nglutDisplayFunc(RenderScene)\nglutReshapeFunc(reshape)\nglutKeyboardFunc(myKey_Callback)\n# glutIdleFunc(RenderScene)\nprint(\"Press space to continue,press escape to exit!\\n\")\nglutMainLoop()\n","sub_path":"2.3_Key_Callback.py","file_name":"2.3_Key_Callback.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"624849270","text":"from flask import Flask, jsonify\nimport json\napp = Flask(__name__)\n\n@app.route('/fake/od')\ndef od():\n data = json.load(open('../data/fake/od.json'), encoding = 'utf-8')\n return jsonify(data)\n\n@app.route('/fake/voronoi')\ndef voronoi():\n data = json.load(open('../data/fake/base_voronoi.json'), encoding = 'utf-8')\n for i in data['features']:\n i['properties']['rel'] = range(int(i['properties']['id']), int(i['properties']['id'] + 20))\n return jsonify(data)\n\n@app.route('/fake/base')\ndef base():\n data = json.load(open('../data/base.json'), encoding = 'utf-8')\n return jsonify(data)\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=9999)\n","sub_path":"map-trace/tracing/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"231773580","text":"\"\"\"\nA simple check for verifying that the two modules I've recently written for reading\nspu files and resampling their content correctly work. This also tests importing\nmodules from relative paths. See:\n \nhttp://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path\n\"\"\"\n\nimport sys\nsys.path.append('../spectrautil/')\n\nimport matplotlib.pyplot as plt\n\nimport readspu\nimport spectrautil\n\ndirloc = 'd:/Downloads/gs5data/spectra/'\n\nspunames = ['01', '02', '03', '04', '05', '06', '07', '08', '09',\n '10', '11', '12', '13', '14', '15', '16', '17', '18',\n '19', '20', '21', '22', '23', '24']\n\nspudata = []\n\n# Read all files\nfor file in spunames:\n name = dirloc + 'GS5-2_' + file + '.spu'\n print('Reading', name)\n spu = readspu.Spu(name)\n spu.df['Counts'] = spu.df['Counts'] * 25000 / spu.realtime\n \n spudata.append(spu)\n \n\ntrace = 6\nnewE, newC = spectrautil.resample(spudata[trace]['Energy'], spudata[trace]['Counts'], [10, 1400, 1024])\nfig = plt.figure(figsize=(13, 9))\n\nax1 = fig.add_subplot(1, 1, 1)\nax1.set_yscale('log')\nax1.set_ylim([1, 100000])\nax1.set_xlabel('Energy (keV)')\nax1.set_ylabel('Counts (normalized)')\nax1.plot(newE, newC, marker='.', lw=0.75, label='Resampled trace #{}'.format(trace+1))\nax1.legend()","sub_path":"test/spuresampletest01.py","file_name":"spuresampletest01.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"205750773","text":"#TODO\n#1. Add comments and tests\n\nimport sys\nimport logging\nimport platform\n\nimport _color\n\nLOGNAME = None\n\nconfig_section = \"logging\"\n\ndefault_configs = {\"logger_name\":\"AXUI\", \n \"logging_level_file\":\"DEBUG\",\n \"logging_level_stream\":\"ERROR\", \n \"logging_stream\":\"stdout\", \n \"logging_file\":\"AXUI.log\", \n \"file_logging_mode\":\"a\", \n \"formatter\":\"[ %(levelname)s ][ %(filename)s:%(lineno)d ] %(message)s\", \n \"color_enable\":\"True\" }\n \nlogging_levels = {\"CRITICAL\":logging.CRITICAL, \n \"ERROR\":logging.ERROR, \n \"WARNING\": logging.WARNING, \n \"INFO\": logging.INFO, \n \"DEBUG\": logging.DEBUG}\n \nlogging_streams = {\"STDOUT\":sys.stdout, \n \"FALSE\":False }\n \nfile_logging_modes = {\"A\":\"a\", \n \"W\":\"w\" }\n \nlogging_color_configs = {\"TRUE\":True, \n \"FALSE\":False }\n \ndef config(configs=None):\n '''config logger with settings in configure file\n '''\n if configs is None:\n configs = default_configs\n \n logger_name=configs[\"logger_name\"]\n \n if configs[\"logging_level_file\"].upper() in logging_levels:\n logging_level_file=logging_levels[configs[\"logging_level_file\"].upper()]\n else:\n #print(\"Error logging_level_file value, use default\")\n logging_level_file=logging_levels[default_configs[\"logging_level_file\"].upper()]\n \n if configs[\"logging_level_stream\"].upper() in logging_levels:\n logging_level_stream=logging_levels[configs[\"logging_level_stream\"].upper()]\n else:\n #print(\"Error logging_level_stream value, use default\")\n logging_level_stream=logging_levels[default_configs[\"logging_level_stream\"].upper()]\n \n if configs[\"logging_stream\"].upper() in logging_streams:\n logging_stream=logging_streams[configs[\"logging_stream\"].upper()]\n else:\n #print(\"Error logging_stream value, use default\")\n logging_stream=logging_streams[default_configs[\"logging_stream\"].upper()]\n \n if configs[\"file_logging_mode\"].upper() in file_logging_modes:\n file_logging_mode=file_logging_modes[configs[\"file_logging_mode\"].upper()]\n else:\n #print(\"Error file_logging_mode value, use default\")\n file_logging_mode=file_logging_modes[default_configs[\"file_logging_mode\"].upper()]\n \n formatter_string=configs[\"formatter\"]\n \n if configs[\"color_enable\"].upper() in logging_color_configs:\n logging_color_config=logging_color_configs[configs[\"color_enable\"].upper()]\n else:\n #print(\"Error color_enable value, use default\")\n logging_color_config=logging_color_configs[default_configs[\"color_enable\"].upper()]\n \n #config logger according input configs\n logger = logging.getLogger(logger_name)\n logger.propagate = False\n logger.setLevel(logging.DEBUG)\n logger.handlers = []\n\n if logging_stream:\n stream_handler = logging.StreamHandler(logging_stream)\n stream_handler.setLevel(logging_level_stream)\n formatter = logging.Formatter(formatter_string)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n \n file_handler = logging.FileHandler(configs[\"logging_file\"], mode=file_logging_mode)\n file_handler.setLevel(logging_level_file)\n formatter = logging.Formatter(formatter_string)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n \n if logging_color_config:\n if platform.system()=='Windows':\n # Windows does not support ANSI escapes \n # and we are using API calls to set the console color\n logging.StreamHandler.emit = \\\n _color.add_coloring_to_emit_windows(logging.StreamHandler.emit)\n else:\n # all non-Windows platforms are supporting ANSI escapes so we use them\n logging.StreamHandler.emit = \\\n _color.add_coloring_to_emit_ansi(logging.StreamHandler.emit)\n\n global LOGNAME\n LOGNAME = logger_name\n\n#used by config module\n__all__=[\"config_section\", \"default_configs\", \"config\"]\n\ndef LOGGER():\n if LOGNAME == None:\n config()\n return logging.getLogger(LOGNAME)\n\n \n","sub_path":"AXUI/logger/_logger.py","file_name":"_logger.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"526506122","text":"from django import forms\n\nfrom govuk_forms.widgets import NumberInput, InlineRadioSelect\n\nfrom application.presentation.utilities import NannyForm\nfrom application.services.dbs import read_dbs, dbs_date_of_birth_no_match\nfrom application.services.db_gateways import NannyGatewayActions\n\n\nclass DBSNumberFormFieldMixin(forms.Form):\n \"\"\"\n Mixin for the 'DBS certificate number' ChoiceField.\n \"\"\"\n # Overrides standard NumberInput widget too give wider field\n widget_instance = NumberInput()\n widget_instance.input_classes = 'form-control form-control-1-4'\n\n dbs_number = forms.IntegerField(\n label='DBS certificate number',\n help_text='12-digit number on your certificate',\n error_messages={\n 'required': 'Please enter your DBS certificate number',\n },\n widget=widget_instance,\n )\n\n def clean_dbs_number(self):\n \"\"\"\n DBS certificate number validation\n :return: integer\n \"\"\"\n # is_valid() call strips leading 0 required by DBS number. Use raw str input from user instead of cleaned_data.\n dbs_number = self.data['dbs_number']\n if len(str(dbs_number)) != 12:\n raise forms.ValidationError('Check your certificate: the number should be 12 digits long')\n application_id = self.initial['application_id']\n response = read_dbs(dbs_number)\n if response.status_code == 200:\n app_details = NannyGatewayActions().read('applicant-personal-details', {'application_id': application_id})\n if dbs_date_of_birth_no_match(response.record, app_details.record):\n raise forms.ValidationError(\n 'Check your DBS certificate. The number you entered does not match your number held by DBS.')\n return dbs_number\n\n\nclass NonCapitaDBSDetailsForm(DBSNumberFormFieldMixin, NannyForm):\n \"\"\"\n GOV.UK form for the Captia DBS Details Page\n \"\"\"\n field_label_classes = 'form-label-bold'\n error_summary_title = 'There was a problem on this page'\n auto_replace_widgets = True\n\n\nclass CaptiaDBSDetailsForm(DBSNumberFormFieldMixin, NannyForm):\n \"\"\"\n GOV.UK form for the Non-Capita DBS Details Page\n \"\"\"\n field_label_classes = 'form-label-bold'\n error_summary_title = 'There was a problem on this page'\n auto_replace_widgets = True\n\n\n\n","sub_path":"application/presentation/dbs/forms/dbs_details.py","file_name":"dbs_details.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"415205501","text":"# For copyright see LICENCE\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom src.core.waveglow.hparams import HParams\n\n\n@torch.jit.script\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\n n_channels_int = n_channels[0]\n in_act = input_a + input_b\n t_act = torch.tanh(in_act[:, :n_channels_int, :])\n s_act = torch.sigmoid(in_act[:, n_channels_int:, :])\n acts = t_act * s_act\n return acts\n\n\nclass Invertible1x1Conv(torch.nn.Module):\n \"\"\"\n The layer outputs both the convolution, and the log determinant\n of its weight matrix. If reverse=True it does convolution with\n inverse\n \"\"\"\n\n def __init__(self, c):\n super(Invertible1x1Conv, self).__init__()\n self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,\n bias=False)\n\n # Sample a random orthonormal matrix to initialize weights\n W = torch.qr(torch.FloatTensor(c, c).normal_())[0]\n\n # Ensure determinant is 1.0 not -1.0\n if torch.det(W) < 0:\n W[:, 0] = -1 * W[:, 0]\n W = W.view(c, c, 1)\n self.conv.weight.data = W\n\n def forward(self, z, reverse=False):\n # shape\n batch_size, group_size, n_of_groups = z.size()\n\n W = self.conv.weight.squeeze()\n\n if reverse:\n if not hasattr(self, 'W_inverse'):\n # Reverse computation\n W_inverse = W.float().inverse()\n W_inverse = Variable(W_inverse[..., None])\n if z.type() == 'torch.cuda.HalfTensor':\n W_inverse = W_inverse.half()\n self.W_inverse = W_inverse\n z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)\n return z\n else:\n # Forward computation\n log_det_W = batch_size * n_of_groups * torch.logdet(W)\n z = self.conv(z)\n return z, log_det_W\n\n\nclass WN(torch.nn.Module):\n \"\"\"\n This is the WaveNet like layer for the affine coupling. The primary difference\n from WaveNet is the convolutions need not be causal. There is also no dilation\n size reset. The dilation only doubles on each layer\n \"\"\"\n\n def __init__(self, n_in_channels, n_mel_channels, hparams: HParams):\n super(WN, self).__init__()\n assert(hparams.kernel_size % 2 == 1)\n assert(hparams.n_channels % 2 == 0)\n self.n_layers = hparams.n_layers\n self.n_channels = hparams.n_channels\n self.in_layers = torch.nn.ModuleList()\n self.res_skip_layers = torch.nn.ModuleList()\n\n start = torch.nn.Conv1d(n_in_channels, self.n_channels, 1)\n start = torch.nn.utils.weight_norm(start, name='weight')\n self.start = start\n\n # Initializing last layer to 0 makes the affine coupling layers\n # do nothing at first. This helps with training stability\n end = torch.nn.Conv1d(self.n_channels, 2 * n_in_channels, 1)\n end.weight.data.zero_()\n end.bias.data.zero_()\n self.end = end\n\n cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * self.n_channels * self.n_layers, 1)\n self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')\n\n for i in range(self.n_layers):\n dilation = 2 ** i\n padding = int((hparams.kernel_size * dilation - dilation) / 2)\n in_layer = torch.nn.Conv1d(self.n_channels, 2 * self.n_channels, hparams.kernel_size,\n dilation=dilation, padding=padding)\n in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')\n self.in_layers.append(in_layer)\n\n # last one is not necessary\n if i < self.n_layers - 1:\n res_skip_channels = 2 * self.n_channels\n else:\n res_skip_channels = self.n_channels\n res_skip_layer = torch.nn.Conv1d(self.n_channels, res_skip_channels, 1)\n res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')\n self.res_skip_layers.append(res_skip_layer)\n\n def forward(self, forward_input):\n audio, spect = forward_input\n audio = self.start(audio)\n output = torch.zeros_like(audio)\n n_channels_tensor = torch.IntTensor([self.n_channels])\n\n spect = self.cond_layer(spect)\n\n for i in range(self.n_layers):\n spect_offset = i * 2 * self.n_channels\n acts = fused_add_tanh_sigmoid_multiply(\n self.in_layers[i](audio),\n spect[:, spect_offset:spect_offset + 2 * self.n_channels, :],\n n_channels_tensor)\n\n res_skip_acts = self.res_skip_layers[i](acts)\n if i < self.n_layers - 1:\n audio = audio + res_skip_acts[:, :self.n_channels, :]\n output = output + res_skip_acts[:, self.n_channels:, :]\n else:\n output = output + res_skip_acts\n\n return self.end(output)\n\n\nclass WaveGlow(torch.nn.Module):\n def __init__(self, hparams: HParams):\n super().__init__()\n\n self.upsample = torch.nn.ConvTranspose1d(\n hparams.n_mel_channels,\n hparams.n_mel_channels,\n 1024,\n stride=256\n )\n\n assert(hparams.n_group % 2 == 0)\n self.n_flows = hparams.n_flows\n self.n_group = hparams.n_group\n self.n_early_every = hparams.n_early_every\n self.n_early_size = hparams.n_early_size\n self.WN = torch.nn.ModuleList()\n self.convinv = torch.nn.ModuleList()\n\n n_half = int(self.n_group / 2)\n\n # Set up layers with the right sizes based on how many dimensions\n # have been output already\n n_remaining_channels = self.n_group\n for k in range(self.n_flows):\n if k % self.n_early_every == 0 and k > 0:\n n_half = n_half - int(self.n_early_size / 2)\n n_remaining_channels = n_remaining_channels - self.n_early_size\n self.convinv.append(Invertible1x1Conv(n_remaining_channels))\n WN_res = WN(\n n_in_channels=n_half,\n n_mel_channels=hparams.n_mel_channels * self.n_group,\n hparams=hparams\n )\n self.WN.append(WN_res)\n self.n_remaining_channels = n_remaining_channels # Useful during inference\n\n def forward(self, forward_input):\n \"\"\"\n forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames\n forward_input[1] = audio: batch x time\n \"\"\"\n spect, audio = forward_input\n\n # Upsample spectrogram to size of audio\n spect = self.upsample(spect)\n assert(spect.size(2) >= audio.size(1))\n if spect.size(2) > audio.size(1):\n spect = spect[:, :, :audio.size(1)]\n\n spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)\n\n audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)\n output_audio = []\n log_s_list = []\n log_det_W_list = []\n\n for k in range(self.n_flows):\n if k % self.n_early_every == 0 and k > 0:\n output_audio.append(audio[:, :self.n_early_size, :])\n audio = audio[:, self.n_early_size:, :]\n\n audio, log_det_W = self.convinv[k](audio)\n log_det_W_list.append(log_det_W)\n\n n_half = int(audio.size(1) / 2)\n audio_0 = audio[:, :n_half, :]\n audio_1 = audio[:, n_half:, :]\n\n output = self.WN[k]((audio_0, spect))\n log_s = output[:, n_half:, :]\n b = output[:, :n_half, :]\n audio_1 = torch.exp(log_s) * audio_1 + b\n log_s_list.append(log_s)\n\n audio = torch.cat([audio_0, audio_1], 1)\n\n output_audio.append(audio)\n return torch.cat(output_audio, 1), log_s_list, log_det_W_list\n\n def infer(self, spect, sigma=1.0):\n spect = self.upsample(spect)\n # trim conv artifacts. maybe pad spec to kernel multiple\n time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]\n spect = spect[:, :, :-time_cutoff]\n\n spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)\n\n if spect.type() == 'torch.cuda.HalfTensor':\n audio = torch.cuda.HalfTensor(spect.size(0),\n self.n_remaining_channels,\n spect.size(2)).normal_()\n else:\n audio = torch.cuda.FloatTensor(spect.size(0),\n self.n_remaining_channels,\n spect.size(2)).normal_()\n\n audio = torch.autograd.Variable(sigma * audio)\n\n for k in reversed(range(self.n_flows)):\n n_half = int(audio.size(1) / 2)\n audio_0 = audio[:, :n_half, :]\n audio_1 = audio[:, n_half:, :]\n\n output = self.WN[k]((audio_0, spect))\n\n s = output[:, n_half:, :]\n b = output[:, :n_half, :]\n audio_1 = (audio_1 - b) / torch.exp(s)\n audio = torch.cat([audio_0, audio_1], 1)\n\n audio = self.convinv[k](audio, reverse=True)\n\n if k % self.n_early_every == 0 and k > 0:\n if spect.type() == 'torch.cuda.HalfTensor':\n z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()\n else:\n z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()\n audio = torch.cat((sigma * z, audio), 1)\n\n audio = audio.permute(0, 2, 1).contiguous().view(audio.size(0), -1).data\n return audio\n\n @staticmethod\n def remove_weightnorm(model):\n # see: zotero://select/library/items/KIY65PZJ\n waveglow = model\n for wnet in waveglow.WN:\n wnet.start = torch.nn.utils.remove_weight_norm(wnet.start)\n wnet.in_layers = remove(wnet.in_layers)\n wnet.cond_layer = torch.nn.utils.remove_weight_norm(wnet.cond_layer)\n wnet.res_skip_layers = remove(wnet.res_skip_layers)\n return waveglow\n\n\ndef remove(conv_list):\n new_conv_list = torch.nn.ModuleList()\n for old_conv in conv_list:\n old_conv = torch.nn.utils.remove_weight_norm(old_conv)\n new_conv_list.append(old_conv)\n return new_conv_list\n","sub_path":"src/core/waveglow/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"510565834","text":"class Candidato:\r\n def __init__(self, posicao, distancia):\r\n self.posicao=posicao\r\n self.distancia=distancia\r\n \r\n# def completaCadidato(self,NU_INSCRICAO, TX_RESPOSTA_MT_Q173,TX_RESPOSTA_MT_Q174,TX_RESPOSTA_MT_Q175):\r\n# self.NU_INSCRICAO = NU_INSCRICAO\r\n# self.TX_RESPOSTA_MT_Q173 = TX_RESPOSTA_MT_Q173\r\n# self.TX_RESPOSTA_MT_Q174 = TX_RESPOSTA_MT_Q174\r\n# self.TX_RESPOSTA_MT_Q175 = TX_RESPOSTA_MT_Q175\r\n \r\n def completaCadidato(self, NU_INSCRICAO, NO_MUNICIPIO_RESIDENCIA, SG_UF_RESIDENCIA, NU_IDADE, TP_SEXO, TP_ESCOLA, TP_ENSINO,NO_MUNICIPIO_PROVA,SG_UF_PROVA,CO_PROVA_CN,CO_PROVA_CH,CO_PROVA_LC,CO_PROVA_MT, NU_NOTA_CN, NU_NOTA_CH, NU_NOTA_LC, NU_NOTA_MT, NU_NOTA_COMP1, NU_NOTA_COMP2, NU_NOTA_COMP3, NU_NOTA_COMP4,NU_NOTA_COMP5,NU_NOTA_REDACAO, Q006):\r\n \r\n self.NU_INSCRICAO = NU_INSCRICAO\r\n self.NO_MUNICIPIO_RESIDENCIA = NO_MUNICIPIO_RESIDENCIA \r\n self.SG_UF_RESIDENCIA = SG_UF_RESIDENCIA \r\n self.NU_IDADE = NU_IDADE\r\n self.TP_SEXO = TP_SEXO\r\n self.TP_ESCOLA = TP_ESCOLA\r\n self.TP_ENSINO = TP_ENSINO\r\n self.NO_MUNICIPIO_PROVA = NO_MUNICIPIO_PROVA\r\n self.SG_UF_PROVA = SG_UF_PROVA\r\n self.CO_PROVA_CN = CO_PROVA_CN\r\n self.CO_PROVA_CH = CO_PROVA_CH\r\n self.CO_PROVA_LC = CO_PROVA_LC\r\n self.CO_PROVA_MT = CO_PROVA_MT\r\n self.NU_NOTA_CN = NU_NOTA_CN\r\n self.NU_NOTA_CH = NU_NOTA_CH\r\n self.NU_NOTA_LC = NU_NOTA_LC\r\n self.NU_NOTA_MT = NU_NOTA_MT\r\n self.NU_NOTA_COMP1 = NU_NOTA_COMP1\r\n self.NU_NOTA_COMP2 = NU_NOTA_COMP2\r\n self.NU_NOTA_COMP3 = NU_NOTA_COMP3\r\n self.NU_NOTA_COMP4 = NU_NOTA_COMP4\r\n self.NU_NOTA_COMP5 = NU_NOTA_COMP5\r\n self.NU_NOTA_REDACAO = NU_NOTA_REDACAO\r\n self.Q006 = Q006\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"Candidato.py","file_name":"Candidato.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"191008507","text":"import dataclasses\nimport time\nimport typing\nimport unittest.mock\nimport urllib.parse\n\nfrom pytest import mark, raises\nfrom requests_mock import Mocker\nfrom typeguard import typechecked\n\nfrom coin.exc import InvalidTransactionError\nfrom coin.models import Block, Blockchain, Transaction\n\n\ndef test_block_chain():\n blockchain = Blockchain()\n block = blockchain.chain[0]\n assert isinstance(block, Block)\n assert block.index == 1\n assert block.transactions == []\n assert block.proof == 100\n assert block.previous_hash == 1\n assert blockchain.current_transactions == []\n assert blockchain.nodes == set()\n\n\n@typechecked\n@mark.parametrize('proof, previous_hash, expected', [\n (1, None, str),\n (1, 1, int),\n (1, '1', str),\n])\ndef test_new_block(fx_blockchain: Blockchain,\n proof: int,\n previous_hash: typing.Union[str, int, None],\n expected: typing.types):\n with unittest.mock.patch.object(Blockchain, 'valid_block',\n return_value=True):\n block = fx_blockchain.new_block(proof, previous_hash)\n assert isinstance(block.previous_hash, expected)\n\n\ndef test_new_block_invalid_transaction(fx_blockchain: Blockchain):\n r = raises(InvalidTransactionError)\n m = unittest.mock.patch.object(Blockchain, 'valid_transaction',\n return_value=False)\n with r, m:\n fx_blockchain.new_transaction('', '', 0)\n\n\ndef test_new_transaction(fx_blockchain: Blockchain):\n block = fx_blockchain.last_block\n assert block.index == 1\n assert fx_blockchain.current_transactions == []\n result = fx_blockchain.new_transaction('0', 'test', 1)\n assert result == 2\n transaction = fx_blockchain.current_transactions[0]\n assert isinstance(transaction, Transaction)\n assert transaction.sender == '0'\n assert transaction.recipient == 'test'\n assert transaction.amount == 1\n\n\ndef test_hash(fx_blockchain: Blockchain):\n result = fx_blockchain.hash(fx_blockchain.last_block)\n assert isinstance(result, str)\n\n\ndef test_last_block(fx_blockchain: Blockchain):\n block = fx_blockchain.chain[0]\n assert fx_blockchain.last_block == block\n proof = fx_blockchain.pow(block.proof)\n previous_hash = fx_blockchain.hash(block)\n new_block = fx_blockchain.new_block(previous_hash=previous_hash,\n proof=proof)\n assert fx_blockchain.last_block == new_block\n\n\ndef test_valid_proof(fx_blockchain: Blockchain):\n assert fx_blockchain.valid_proof(0, 0) in (True, False)\n\n\n@typechecked\n@mark.parametrize('address, expected', [\n ('http://www.google.com', 'www.google.com'),\n ('https://www.google.com', 'www.google.com'),\n])\ndef test_register_node(fx_blockchain: Blockchain, address: str, expected: str):\n assert fx_blockchain.nodes == set()\n fx_blockchain.register_node(address)\n assert fx_blockchain.nodes == {expected}\n\n\ndef test_valid_chain(fx_blockchain: Blockchain):\n assert fx_blockchain.valid_chain(fx_blockchain.chain) is True\n\n\n@mark.parametrize('previous_hash', [\n '',\n 'test',\n])\ndef test_valid_chain_invalid_hash(fx_blockchain: Blockchain,\n previous_hash: str):\n block = Block(2, time.time(), fx_blockchain.current_transactions, 10,\n previous_hash)\n chain = fx_blockchain.chain\n chain.append(block)\n assert not block.previous_hash == fx_blockchain.hash(chain[0])\n assert fx_blockchain.valid_chain(fx_blockchain.chain) is False\n\n\ndef test_valid_chain_invalid_proof(fx_blockchain: Blockchain):\n last_block = fx_blockchain.last_block\n last_proof = last_block.proof\n proof = fx_blockchain.pow(last_proof)\n previous_hash = fx_blockchain.hash(last_block)\n block = fx_blockchain.new_block(proof, previous_hash)\n assert len(fx_blockchain.chain) == 2\n assert block.previous_hash == fx_blockchain.hash(fx_blockchain.chain[0])\n with unittest.mock.patch.object(Blockchain, 'valid_proof',\n return_value=False):\n assert fx_blockchain.valid_chain(fx_blockchain.chain) is False\n\n\ndef test_resolve_conflicts(fx_blockchain: Blockchain):\n url = 'http://www.google.com'\n fx_blockchain.register_node(url)\n with Mocker() as m:\n url = urllib.parse.urljoin(url, '/chain')\n m.get(url, status_code=200, json={\n 'length': len(fx_blockchain.chain) + 1,\n 'chain': [dataclasses.asdict(b) for b in fx_blockchain.chain],\n })\n assert fx_blockchain.resolve_conflicts() is True\n\n\n@typechecked\n@mark.parametrize('length, chain', [\n (0, [{}]),\n (2, [{}]),\n])\ndef test_resolve_conflicts_false(fx_blockchain: Blockchain, length: int,\n chain: list):\n url = 'http://www.google.com'\n fx_blockchain.register_node(url)\n with Mocker() as m:\n url = urllib.parse.urljoin(url, '/chain')\n m.get(url, status_code=200, json={\n 'length': length,\n 'chain': chain\n })\n assert fx_blockchain.resolve_conflicts() is False\n\n\ndef test_valid_transaction(fx_blockchain: Blockchain):\n fx_blockchain.new_transaction('0', 'test', 1)\n transaction = fx_blockchain.current_transactions[0]\n assert fx_blockchain.valid_transaction(transaction) is True\n\n new = Transaction('sender', 'recipient', 10, transaction.timestamp - 1)\n assert fx_blockchain.valid_transaction(new) is False\n","sub_path":"tests/models_test.py","file_name":"models_test.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"28505584","text":"print('-'*30)\nprint('LOJA SUPER BARATÃO')\nprint('-'*30)\n\ntotal = totamil = menor = cont = 0\nbarato = ''\nwhile True:\n produto = str(input('Nome do Produto:'))\n preço = float(input('Preço R$'))\n cont += 1\n total += preço\n if preço > 1000:\n totamil +=1\n if cont == 1 or preço < menor:\n menor = preço\n barato = produto\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar [S/N] ')).strip().upper()[0]\n if resp == 'N':\n break\nprint('{:-^40}'.format('Fim do Progama'))\nprint(f'o total da compra foi R${total:.2f}')\nprint(f'temos {totamil} produtos custando mais de mil reais ')\nprint(f'o produto mais barato custa R${menor:.2f}')","sub_path":"LojaBaratao.py","file_name":"LojaBaratao.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"379473735","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 6/1/2018 10:59 AM\n# @Author : GT\n# @File : youdaofanyi.py\nimport unittest,time\nfrom selenium import webdriver\n\nclass Youdaofanyi(unittest.TestCase):\n def setup(self):\n self.browser = webdriver.Chrome(r'C:\\IT\\python\\chromedriver_win32\\chromedriver.exe')\n self.url = 'http://www.youdao.com'\n def test_youdaoceshi(self):\n browser = self.browser\n browser.get('http://www.youdao.com/')\n browser.find_element_by_xpath('//*[@id=\"translateContent\"]').send_keys('你好')\n browser.find_element_by_xpath('//*[@id=\"form\"]/button').click()\n time.sleep(3)\n html = browser.page_source\n print(html)\n self.assertIn('hello',html,'not found')\n def tearDown(self):\n print('quit browser')\n # self.browser.quit()\n\nif __name__=='__main__':\n unittest.main()","sub_path":"UNITFunctest/youdaofanyi.py","file_name":"youdaofanyi.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"135769964","text":"import os\nimport pandas as pd\nimport numpy as np\nimport json\nfrom flask import Flask, render_template\n\nwith open('static/data/samples.json') as json_data:\n data_dict = json.load(json_data)\n\nwith open('static/data/samples.json') as json_data:\n data_dict = json.load(json_data)\n\n\napp = Flask(__name__)\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/')\ndef summary(id):\n sample_frequencies = {}\n for item in data_dict['samples']:\n if item['id'] not in sample_frequencies:\n temp_dict = dict(zip(item['otu_ids'], item['sample_values']))\n sample_frequencies[item['id']] = sorted(temp_dict.items(), key=lambda item: item[1], reverse=True)\n\n\n\n","sub_path":"plotly_homework/old_apps/app3.py","file_name":"app3.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"492262417","text":"# Time Complexity : O(n) \n# Space Complexity : O(h) Space | O(n) worst case.\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n# Your code here along with comments explaining your approach\n\nclass Node:\n def __init__(self,x):\n self.val = x \n self.left = None \n self.right = None \n\nclass Solution:\n result = None \n def levelOrder(self,root):\n self.result = []\n if not root:\n return self.result\n self.dfs(root,0)\n return self.result\n \n # def bfs(self,root):\n # q = [root]\n # while q:\n # size = len(q)\n # temp = []\n # for _ in range(size):\n # node = q.pop(0)\n # if node.left:\n # q.append(node.left)\n # if node.right:\n # q.append(node.right)\n # temp.append(node.val)\n # self.result.append(temp)\n # return self.result\n \n def dfs(self,root,level):\n # edge case \n if not root:\n return \n # Logic \n if len(self.result) == level:\n temp = []\n self.result.append(temp)\n self.result[level].append(root.val)\n self.dfs(root.left,level+1)\n self.dfs(root.right,level+1)\n\nif __name__ == \"__main__\":\n s = Solution()\n n = Node(3)\n n.left = Node(9)\n n.right = Node(20)\n n.right.left = Node(15)\n n.right.right = Node(7)\n res = s.levelOrder(n)\n print(res)\n \n \n","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"262737159","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 17 01:16:31 2018\n\n@author: shweta\n\"\"\"\n\nimport nltk\nimport random\n#from nltk.corpus import movie_reviews\nfrom nltk.classify.scikitlearn import SklearnClassifier\nimport pickle\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom nltk.classify import ClassifierI\nfrom statistics import mode\nfrom nltk.tokenize import word_tokenize\n\nclass VoteClassifier(ClassifierI):\n def __init__(self, *classifiers):\n self._classfiers = classifiers\n \n def classifiy(self,features):\n votes = []\n for c in self._classfiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n \n def confidence(self, features):\n votes = []\n for c in self._classfiers:\n v = c.classify(features)\n votes.append(v)\n \n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n \nshort_pos = open(\"short_reviews/positive.txt\",\"r\",encoding='latin-1').read()\nshort_neg = open(\"short_reviews/negative.txt\",\"r\",encoding='latin-1').read()\n\nall_words = []\ndocuments = []\n\n#j is adjective , r is adverb, v is verb\n#allowed_word_types = [\"J\",\"R\",\"V\"]\n\nallowed_word_types = [\"J\"]\n\nfor p in short_pos.split('\\n'):\n documents.append((p,\"pos\"))\n words = word_tokenize(p)\n pos = nltk.pos_tag(words)\n for w in pos:\n if(w[1][0]) in allowed_word_types:\n all_words.append(w[0].lower())\n \nfor p in short_neg.split('\\n'):\n documents.append((p,\"neg\"))\n words = word_tokenize(p)\n pos = nltk.pos_tag(words)\n for w in pos:\n if(w[1][0]) in allowed_word_types:\n all_words.append(w[0].lower())\n \nsave_documents = open(\"pickled_algos/documents.pickle\",\"wb\")\npickle.dump(documents,save_documents)\nsave_documents.close()\n\nall_words = nltk.FreqDist(all_words)\n\nword_features = list(all_words.keys())[:5000]\n\nsave_features = open(\"pickled_algos/word_features5k.pickle\",\"wb\")\npickle.dump(word_features,save_features)\nsave_features.close()\n\n\ndef find_features(document):\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n \n return features\n \nfeatureset = [(find_features(rev),category) for (rev,category) in documents]\n\nsave_featureset = open(\"pickled_algos/featureset.pickle\",\"wb\")\npickle.dump(featureset,save_featureset)\nsave_featureset.close()\n\nrandom.shuffle(featureset)\nprint(len(featureset))\n\ntest_set = featureset[10000:]\ntrain_set = featureset[:10000]\n\nclassifier = nltk.NaiveBayesClassifier.train(train_set)\nprint(\"naive bayes accuracy percent:\", (nltk.classify.accuracy(classifier,test_set))*100)\n\n\nsave_classifier = open(\"pickled_algos/naivebayes5k.pickle\",\"wb\")\npickle.dump(classifier,save_classifier)\nsave_classifier.close()\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(train_set)\nprint(\"MNB_classifier accuracy percent:\", (nltk.classify.accuracy(MNB_classifier,test_set))*100)\n\nsave_classifier = open(\"pickled_algos/MNB_classifier5k.pickle\",\"wb\")\npickle.dump(MNB_classifier,save_classifier)\nsave_classifier.close()\n\nBernoulliNB_classifier = SklearnClassifier(BernoulliNB())\nBernoulliNB_classifier.train(train_set)\nprint(\"BernoulliNB accuracy percent:\", (nltk.classify.accuracy(BernoulliNB_classifier,test_set))*100)\n\nsave_classifier = open(\"pickled_algos/BernoulliNB_classifier5k.pickle\",\"wb\")\npickle.dump(BernoulliNB_classifier,save_classifier)\nsave_classifier.close()\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(train_set)\nprint(\"LogisticRegression_classifier accuracy percent:\", (nltk.classify.accuracy(LogisticRegression_classifier,test_set))*100)\n\nsave_classifier = open(\"pickled_algos/LogisticRegression_classifier5k.pickle\",\"wb\")\npickle.dump(LogisticRegression_classifier,save_classifier)\nsave_classifier.close()\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(train_set)\nprint(\"LinearSVC_classifier accuracy percent:\", (nltk.classify.accuracy(LinearSVC_classifier,test_set))*100)\n\nsave_classifier = open(\"pickled_algos/LinearSVC_classifier5k.pickle\",\"wb\")\npickle.dump(LinearSVC_classifier,save_classifier)\nsave_classifier.close()\n\n#NuSVC_classifier = SklearnClassifier(NuSVC())\n#NuSVC_classifier.train(train_set)\n#print(\"NuSVC_classifier accuracy percent:\", (nltk.classify.accuracy(NuSVC_classifier,test_set))*100)\n\n\nSGD_classifier = SklearnClassifier(SGDClassifier())\nSGD_classifier.train(train_set)\nprint(\"SGD_classifier accuracy percent:\", (nltk.classify.accuracy(SGD_classifier,test_set))*100)\n\nsave_classifier = open(\"pickled_algos/SGD_classifier5k.pickle\",\"wb\")\npickle.dump(SGD_classifier,save_classifier)\nsave_classifier.close()\n\n","sub_path":"NLTK/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"215137664","text":"# Authors: A. Iscen, G. Tolias, Y. Avrithis, O. Chum. 2018.\n\nimport os\nimport os.path\nimport pdb\nimport pickle\nimport sys\nimport time\n\nimport faiss\nimport numpy as np\nimport scipy\nimport scipy.stats\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\n\nfrom .diffusion import *\nfrom utils.misc import normalize\n\n\ndef has_file_allowed_extension(filename, extensions):\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (iterable of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in extensions)\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an allowed image extension.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n return has_file_allowed_extension(filename, IMG_EXTENSIONS)\n\n\ndef make_dataset(dir, class_to_idx, extensions):\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(class_to_idx.keys()):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if has_file_allowed_extension(fname, extensions):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n images.append(item)\n\n return images\n\n\n# ref: torchvision.datasets.ImageFolder\nclass DatasetFolder(data.Dataset):\n \"\"\"A generic data loader where the samples are arranged in this way: ::\n\n root/class_x/xxx.ext\n root/class_x/xxy.ext\n root/class_x/xxz.ext\n\n root/class_y/123.ext\n root/class_y/nsdf3.ext\n root/class_y/asd932_.ext\n\n Args:\n root (string): Root directory path.\n loader (callable): A function to load a sample given its path.\n extensions (list[string]): A list of allowed extensions.\n transform (callable, optional): A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform (callable, optional): A function/transform that takes\n in the target and transforms it.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n samples (list): List of (sample path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n def __init__(self, root, loader, extensions, transform=None, target_transform=None):\n classes, class_to_idx = self._find_classes(root)\n samples = make_dataset(root, class_to_idx, extensions)\n if len(samples) == 0:\n raise (RuntimeError(\"Found 0 files in subfolders of: \" + root + \"\\n\"\n \"Supported extensions are: \" + \",\".join(extensions)))\n\n self.root = root\n self.loader = loader\n self.extensions = extensions\n\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.samples = samples\n self.targets = [s[1] for s in samples] # s = (sample path, class_index)\n\n self.transform = transform\n self.target_transform = target_transform\n\n imfile_name = '%s/images.pkl' % self.root # train_subdir/images.pkl\n if os.path.isfile(imfile_name):\n with open(imfile_name, 'rb') as f:\n self.images = pickle.load(f)\n else:\n self.images = None\n\n def _find_classes(self, dir):\n \"\"\"\n Finds the class folders in a dataset.\n\n Args:\n dir (string): Root directory path.\n\n Returns:\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\n\n Ensures:\n No class is a subdirectory of another.\n \"\"\"\n if sys.version_info >= (3, 5):\n # Faster and available in Python 3.5 and above\n classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n else:\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n attrs from subclass: p_labels, p_weights, class_weights\n\n Returns:\n tuple: (sample, target, weight, c_weight)\n target: class_index of the target class\n weight: pseudo sample weight of this sample\n c_weight: class weight of this sample\n \"\"\"\n\n path, target = self.samples[index]\n\n if (index not in self.labeled_idx):\n target = self.p_labels[index] # pseudo label\n\n weight = self.p_weights[index] # pseudo weight\n\n if self.images is not None:\n sample = Image.fromarray(self.images[index, :, :, :]) # read from pkl\n else:\n sample = self.loader(path) # read from img\n\n if self.transform is not None:\n sample = self.transform(sample)\n\n c_weight = self.class_weights[target]\n\n return sample, target, weight, c_weight\n\n def __len__(self):\n return len(self.samples)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp,\n self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(\n tmp,\n self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else: # default is PIL\n return pil_loader(path)\n\n\nclass DBSS(DatasetFolder):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n def __init__(self, root, transform=None, target_transform=None, loader=default_loader):\n super(DBSS, self).__init__(root,\n loader,\n IMG_EXTENSIONS,\n transform=transform,\n target_transform=target_transform)\n self.imgs = self.samples\n\n self.pos_list = dict()\n self.pos_w = dict()\n self.pos_dist = dict()\n\n self.labeled_idx = []\n self.unlabeled_idx = []\n self.all_labels = [] # save ori label idx\n\n # pseudo weights and cls weights\n self.p_labels = []\n self.p_weights = np.ones((len(self.imgs), )) # default 1\n self.class_weights = np.ones((len(self.classes), ),\n dtype=np.float32) # default cls_weight = 1\n\n self.images_lists = [[] for i in range(len(self.classes))] # each cls has a list\n\n def update_plabels(self, X, k=50, max_iter=20):\n \"\"\"update pseudo lables\n\n Args:\n X (np.ndarray): feature vectors (n,128)\n k (int, optional): neighborhood size. Defaults to 50. [hyperparam]\n max_iter (int, optional): iterate times to get Z. Defaults to 20. [hyperparam]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n print('Updating pseudo-labels...')\n alpha = 0.99\n\n # label/unlabel index\n labels = np.asarray(self.all_labels) # (N,)\n labeled_idx = np.asarray(self.labeled_idx) # (L,)\n unlabeled_idx = np.asarray(self.unlabeled_idx) # (N-L,)\n\n # kNN search for the graph with faiss\n N, d = X.shape # note this N is the set of samples to be propapated, not len(labels) necessarily\n # build index\n res = faiss.StandardGpuResources()\n flat_config = faiss.GpuIndexFlatConfig()\n flat_config.device = int(torch.cuda.device_count()) - 1\n index = faiss.GpuIndexFlatIP(res, d, flat_config) # build the index\n faiss.normalize_L2(X) # note L2 norm, then L2 = IP = cos similarity\n index.add(X)\n # search index\n c = time.time()\n D, I = index.search(X, k + 1) # use k+1, cuz the 1st nearest is itself\n elapsed = time.time() - c\n print('kNN Search done in %d seconds' % elapsed)\n\n # Create the graph\n D = D[:, 1:]**3 # note (eq 9)\n I = I[:, 1:]\n row_idx = np.arange(N)\n row_idx_rep = np.tile(row_idx, (k, 1)).T # (N, k), row index repeat\n # sparse weight matrix, k/N has affinity weights\n W = scipy.sparse.csr_matrix(\n (\n D.flatten('F'), # data, 'F' column-major flatten\n (row_idx_rep.flatten('F'), I.flatten('F')) # (row_idx, col_idx)\n ),\n shape=(N, N))\n W = W + W.T # symmetric afffinity matrix\n\n # Normalize the graph\n W = W - scipy.sparse.diags(W.diagonal()) # W_ii = 0\n S = W.sum(axis=1)\n S[S == 0] = 1 # if sum(sim)=0, attentioned w_ij = w_ij, cuz the whole impact equals zero.\n D = np.array(1. / np.sqrt(S)) # D^(-1/2)\n D = scipy.sparse.diags(D.reshape(-1))\n Wn = D * W * D # normalized weight\n\n # Initiliaze the y vector for each class (eq 5, normalized with the class size) and apply label propagation\n C = len(self.classes)\n Z = np.zeros((N, C))\n A = scipy.sparse.eye(Wn.shape[0]) - alpha * Wn # (I-αW)\n for i in range(C):\n cur_idx = labeled_idx[np.where(labels[labeled_idx] == i)] # sample idx with cls=i\n y = np.zeros((N, ))\n y[cur_idx] = 1.0 / cur_idx.shape[0] # cls i samples, cls_weight\n # note solve (I-αW)Z = Y (eq 10)\n f, _ = scipy.sparse.linalg.cg(A, y, tol=1e-6, maxiter=max_iter)\n Z[:, i] = f # get the propagated matrix of each class\n\n # Handle numberical errors\n Z[Z < 0] = 0\n\n # Compute the weight for each instance based on the entropy (eq 11)\n probs_l1 = normalize(Z, order=1, axis=1) # use l1-norm so that sum(probs)=1\n probs_l1[probs_l1 < 0] = 0\n entropy = scipy.stats.entropy(probs_l1, axis=1) # (N,c) -> (N,)\n weights = 1 - entropy / np.log(C) # (eq 11) where log(c) is the max entropy\n weights = weights / np.max(weights) # max_val normalize\n p_labels = np.argmax(probs_l1, 1)\n\n # Compute the accuracy of pseudo labels for statistical purposes\n # note this line can be placed after line 350, place here is more strict\n acc = (p_labels == labels).mean()\n\n p_labels[labeled_idx] = labels[labeled_idx] # GT labeled still use GT\n weights[labeled_idx] = 1.0\n\n self.p_weights = weights.tolist() # pseudo sample weights\n self.p_labels = p_labels\n\n # Compute the weight for each class, c_i = 1/C * N / N_c\n for i in range(C):\n self.class_weights[i] = (labels.shape[0] / C) / float((self.p_labels == i).sum())\n\n return acc\n","sub_path":"lp/db_semisuper.py","file_name":"db_semisuper.py","file_ext":"py","file_size_in_byte":12860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"520828623","text":"class Solution:\n def intToRoman(self, num):\n NUMS = {\n 0: '', # 必须要有0\n 1: 'I',\n 2: 'II',\n 3: 'III',\n 4: 'IV',\n 5: 'V',\n 6: 'VI',\n 7: 'VII',\n 8: 'VIII',\n 9: 'IX',\n }\n ROMAN = {\n 'I': ['I', 'X', 'C', 'M'], # 个 十 百 千\n 'V': ['V', 'L', 'D', '?'], # 五 五十 五百\n 'X': ['X', 'C', 'M', '?'] # 十 百 千\n }\n ans, s = [], \"\"\n while num != 0:\n yu = num % 10\n ans.append(yu)\n num = num // 10\n for i in range(len(ans)):\n ans[i] = NUMS[ans[i]]\n ans[i] = ans[i].replace('X', ROMAN['X'][i]).replace('I', ROMAN['I'][i]).replace('V', ROMAN['V'][i]) # X应放在前面, 因为I中包含X\n s = ans[i] + s\n return s\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.intToRoman(99))\n","sub_path":"Python算法指南/10_整数转换为罗马数字_字典.py","file_name":"10_整数转换为罗马数字_字典.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"285650939","text":"import dep\r\nfrom xbox_www import app\r\nfrom contextlib import closing\r\nfrom collections import namedtuple\r\n\r\ndb_driver = __import__(app.config['DB_DRIVER'])\r\n\r\n\r\ndef get_db():\r\n with app.app_context():\r\n if not hasattr(app, 'database'):\r\n app.database = db_driver.Connection('{user}/{psw}@{host}'.format(user=app.config['DB_USER'],\r\n psw=app.config['DB_PASS'],\r\n host=app.config['DB_HOST']))\r\n return app.database\r\n\r\n\r\n@app.cache.memoize(timeout=30)\r\ndef get_fmg_synchro_details():\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs_gap_card, = cu.execute('select count(1) from xbox_main.w_psync_gap').fetchone()\r\n rs_gap_diff, = cu.execute('select count(1) from xbox_main.w_psync_card').fetchone()\r\n rs_pending, = cu.execute('select count(1) from xbox_main.we_w_tasks where cell_name =:cell and status=:status',\r\n cell='MIbo2SMP', status='RDY').fetchone()\r\n return rs_gap_card, rs_gap_diff, rs_pending != 0\r\n\r\n\r\n@app.cache.memoize(timeout=30)\r\ndef cleanup_gap_tables():\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n cu.execute('delete from xbox_main.w_psync_gap')\r\n cu.execute('delete from xbox_main.w_psync_card_attr')\r\n cu.execute('delete from xbox_main.w_psync_card')\r\n co.commit()\r\n return True # functions without return are not cached.\r\n\r\n\r\n@app.cache.memoize(timeout=60)\r\ndef get_basic_status():\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n status = True\r\n errors, = cu.execute('''select count(internal_id) from xbox_main.we_w_tasks\r\n left join xbox_main.we_w_errors on (err_internal_id = internal_id and cell_name = err_cell_name)\r\n where status = 'ERR' ''').fetchone()\r\n nb_open_wf, = cu.execute('select count(internal_id) from xbox_main.we_w_internal_id').fetchone()\r\n return status, errors, nb_open_wf\r\n\r\n\r\nTask = namedtuple('Task', ('internal_id', 'cell', 'status', 'creation', 'bv', 'err_description', 'err_details'))\r\nInfo = namedtuple('Info', ('internal_id', 'name', 'source', 'creation'))\r\n\r\n\r\ndef search_by_reqid(reqid):\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs_info = cu.execute('''select internal_id, wf_name, wf_source, created_on from xbox_main.we_w_internal_id where\r\n request_id = :rid''', rid=reqid)\r\n infos = [Info._make(e) for e in rs_info.fetchall()]\r\n iids = [str(i.internal_id) for i in infos]\r\n rs_tasks = cu.execute('''select internal_id, cell_name, status, created_on, business_value, err_primary_description, err_secondary_description\r\n from xbox_main.we_w_tasks\r\n left join xbox_main.we_w_errors on (err_internal_id = internal_id and cell_name = err_cell_name)\r\n where internal_id in (:iids) order by created_on asc''', iids=','.join(iids))\r\n tasks = map(Task._make, rs_tasks.fetchall())\r\n return tasks, infos\r\n\r\n\r\ndef search_by_iid(iid):\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs_info = cu.execute('''select internal_id, wf_name, wf_source, created_on from xbox_main.we_w_internal_id where\r\n internal_id = :iid''', iid=iid).fetchone()\r\n info = Info._make(rs_info) if rs_info else None\r\n rs_tasks = cu.execute('''select internal_id, cell_name, status, created_on, business_value, err_primary_description, err_secondary_description\r\n from xbox_main.we_w_tasks\r\n left join xbox_main.we_w_errors on (err_internal_id = internal_id and cell_name = err_cell_name)\r\n where internal_id = :iid order by created_on asc''', iid=iid)\r\n tasks = map(Task._make, rs_tasks.fetchall())\r\n return tasks, info\r\n\r\n\r\ndef search_iids_by_pan(pan):\r\n co = get_db()\r\n with dep.DEPConnection(app.config['PCI_DEPS']) as depco:\r\n pan_hashed, _, _ = depco.pci.hashPan(pan)\r\n with closing(co.cursor()) as cu:\r\n rs_offl = cu.execute('''select resir_internal_id, resir_request_type, resir_crea_date\r\n from xbox_main.w_response_in_record where resir_card_number = :pan''', pan=format(pan, '19s'))\r\n offls = rs_offl.fetchall()\r\n rs_mibo = cu.execute('''select cda_card_type, cda_acc_id, acc_parent_acc_id, acc_cams_account, cus_id,\r\n car_creation_tstamp, car_id, car_status, car_psn, car_expiry_date\r\n from xbox_main.d_card\r\n join xbox_main.d_card_data on (cda_id = car_cda_id)\r\n join xbox_main.d_account on (acc_id = cda_acc_id)\r\n join xbox_main.d_customer on (cus_id = acc_cus_id)\r\n where cda_pan_hashed = :hash''', hash=pan_hashed)\r\n mibos = rs_mibo.fetchall()\r\n if len(mibos) > 0:\r\n rs_xmls = cu.execute('''select internal_id, action, created_on from xbox_main.w_xml_in_request where\r\n to_number(EXTRACTVALUE(xmltype(content), '/*/Request/Key/CustomerId')) = :cusid or\r\n to_number(EXTRACTVALUE(xmltype(content), '/*/Request/Key/AccountId'))) = :accid or\r\n substr(EXTRACTVALUE(xmltype(content), '/*/Request/Key/CardToken/Value'), 0, 10) = :camsacc\r\n ''', cusid=mibos[0][4], accid=mibos[0][1], camsacc=mibos[0][3])\r\n\r\n xmls = rs_xmls.fetchall()\r\n else: # pragma: no cover\r\n xmls = []\r\n return offls, xmls, mibos, pan_hashed\r\n\r\n\r\ndef search_iids_by_hash(pan_hashed): # pragma: no cover\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs_mibo = cu.execute('''select cda_card_type, cda_acc_id, acc_parent_acc_id, acc_cams_account, cus_id,\r\n car_creation_tstamp, car_id, car_status, car_psn, car_expiry_date\r\n from xbox_main.d_card\r\n join xbox_main.d_card_data on (cda_id = car_cda_id)\r\n join xbox_main.d_account on (acc_id = cda_acc_id)\r\n join xbox_main.d_customer on (cus_id = acc_cus_id)\r\n where cda_pan_hashed = :hash''', hash=pan_hashed)\r\n mibos = rs_mibo.fetchall()\r\n if len(mibos) > 0:\r\n rs_xmls = cu.execute('''select internal_id, action, created_on from xbox_main.w_xml_in_request where\r\n to_number(EXTRACTVALUE(xmltype(content), '/*/Request/Key/CustomerId')) = :cusid or\r\n to_number(EXTRACTVALUE(xmltype(content), '/*/Request/Key/AccountId'))) = :accid or\r\n substr(EXTRACTVALUE(xmltype(content), '/*/Request/Key/CardToken/Value'), 0, 10) = :camsacc\r\n ''', cusid=mibos[0][4], accid=mibos[0][1], camsacc=mibos[0][3])\r\n\r\n xmls = rs_xmls.fetchall()\r\n else: # pragma: no cover\r\n xmls = []\r\n return xmls, mibos\r\n\r\n\r\nclass FileTransfer:\r\n def __init__(self, filename, created_on, details=None):\r\n self.filename = filename\r\n self.created_on = created_on\r\n self.details = details\r\n self.url = None\r\n\r\n\r\n@app.cache.memoize(timeout=60)\r\ndef get_ft_details():\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs = cu.execute('select filename, created_on, batch_id from xbox_main.w_nfr_out_file order by created_on desc')\r\n nfrs = [FileTransfer(*e) for e in rs.fetchall()]\r\n rs = cu.execute('select f.file_name, f.crea_date, count(internal_id) from xbox_main.w_speos_out_file f '\r\n 'join xbox_main.w_speos_out_record r on (f.file_id = r.file_id) '\r\n 'group by f.file_name, f.crea_date '\r\n 'order by f.crea_date desc')\r\n zoomit = [FileTransfer(*e) for e in rs.fetchall()]\r\n return {'nfr': nfrs, 'zoomit': zoomit}\r\n\r\n\r\nError = namedtuple('Error', ('internal_id', 'creation', 'description', 'details', 'cell'))\r\n\r\n\r\n@app.cache.memoize(timeout=30)\r\ndef get_error_details():\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs = cu.execute('''select internal_id, created_on, err_primary_description, err_secondary_description, cell_name\r\n from xbox_main.we_w_tasks\r\n left join xbox_main.we_w_errors on (err_internal_id = internal_id and cell_name = err_cell_name)\r\n where status = 'ERR'\r\n order by internal_id desc''')\r\n return map(Error._make, rs.fetchall())\r\n\r\n\r\ndef trigger550(username):\r\n co = get_db()\r\n with closing(co.cursor()) as cu:\r\n rs_pending, = cu.execute('select count(1) from xbox_main.we_w_tasks where cell_name =:cell and status in (\\'RDY\\', \\'RUN\\')',\r\n cell='MIbo2SMP').fetchone()\r\n if rs_pending != 0:\r\n raise Exception('Existing task MIbo2SMP')\r\n iid, = cu.execute('select xbox_main.s_internal_id.nextval from dual').fetchone()\r\n cu.execute('insert into xbox_main.we_w_internal_id(internal_id, requestor_id, request_id, wf_name, wf_source)'\r\n 'values (:iid, :reqor, :reqid, :wf, :source)', iid=iid, reqor='000000', reqid='123456',\r\n wf='PeriodicSynchro', source=username)\r\n cu.execute('insert into xbox_main.we_w_tasks (internal_id, cell_name, status) values (:iid, :cell, \\'RDY\\')',\r\n iid=iid, cell='MIbo2SMP')\r\n co.commit()\r\n","sub_path":"xbox_www/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"52233779","text":"import findspark\nfindspark.init()\n\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark import SQLContext\nfrom pyspark import SparkConf\nfrom pyspark import StorageLevel\nfrom pyspark.rdd import RDD\nfrom termcolor import colored\nimport csv\nfrom io import StringIO\n\n\nconf = SparkConf().setMaster('local').setAppName('PySparkShell')\nsc = SparkContext(conf=conf)\n\n#设置log级别\nsc.setLogLevel(\"WARN\")\n\ninputFile = 'i3demo.csv'\n\ndef load_record(line):\n #解析一行csv\n inputa = StringIO(line)\n reader = csv.DictReader(inputa, fieldnames=[\n \"name\", \"year\", \"addr\", \"d1\", \"d2\"])\n return next(reader)\n\ninputb = sc.textFile(inputFile).map(load_record)\nfor i in inputb.take(10):\n print(colored('i==>', 'red'), i)\n\n\n\nprint('\\n-- 在 Python 中完整读取 CSV --\\n')\ndef load_all_records(filenamecontents):\n inputa = StringIO(filenamecontents[1])\n reader = csv.DictReader(inputa, fieldnames=[\n \"name\", \"year\", \"addr\", \"d1\", \"d2\"])\n return reader\n\ninputc = sc.wholeTextFiles(inputFile).flatMap(load_all_records)\nfor i in inputc.take(10):\n print(colored('i==>', 'green'), i)","sub_path":"i00Learning Spark/ch5data_read_save/i3csv_read.py","file_name":"i3csv_read.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"561637025","text":"# +\n# Copyright 2014 iXsystems, Inc.\n# All rights reserved\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted providing that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n#####################################################################\n\nimport errno\nimport gevent\nimport logging\nimport os\nimport sys\nimport re\nimport tarfile\nimport shutil\nfrom freenas.utils import human_readable_bytes\nfrom freenas.utils.decorators import throttle\nfrom resources import Resource\nfrom cache import CacheStore\nfrom freenas.dispatcher.rpc import (\n RpcException, description, accepts, returns, private, SchemaHelper as h\n)\nfrom task import (\n Provider, Task, ProgressTask, TaskException, TaskDescription,\n VerifyException, TaskWarning\n)\n\nif '/usr/local/lib' not in sys.path:\n sys.path.append('/usr/local/lib')\nfrom freenasOS import Configuration, Train, Manifest, disable_trygetfilelogs\nfrom freenasOS.Exceptions import (\n UpdateManifestNotFound, ManifestInvalidSignature, UpdateBootEnvironmentException,\n UpdatePackageException, UpdateIncompleteCacheException, UpdateBusyCacheException,\n ChecksumFailException, UpdatePackageNotFound\n)\nfrom freenasOS.Update import CheckForUpdates, DownloadUpdate, ApplyUpdate, Avatar\n\n# The function calls below help reduce the debug logs\n# removing unnecessary 'TryGetNetworkFile' and such logs\n# thereby not inundating /var/log/dispatcher.log\ndisable_trygetfilelogs()\n\nlogger = logging.getLogger('UpdatePlugin')\nupdate_cache = CacheStore()\ndefault_update_dict = {\n 'available': False,\n 'operations': None,\n 'notes': None,\n 'notice': None,\n 'downloaded': False,\n 'changelog': '',\n 'version': '',\n 'installed': False,\n 'installed_version': ''\n}\n\nupdate_resource_string = 'update:operations'\n\n\nUPDATE_ALERT_TITLE_MAP = {\n 'UpdateAvailable': 'Update Available',\n 'UpdateDownloaded': 'Update Downloaded',\n 'UpdateInstalled': 'Update Installed'\n}\n\n\ndef parse_changelog(changelog, start='', end=''):\n \"Utility function to parse an available changelog\"\n regexp = r'### START (\\S+)(.+?)### END \\1'\n reg = re.findall(regexp, changelog, re.S | re.M)\n\n if not reg:\n return None\n\n changelog = None\n for seq, changes in reg:\n if not changes.strip('\\n'):\n continue\n if seq == start:\n # Once we found the right one, we start accumulating\n changelog = []\n elif changelog is not None:\n changelog.append(changes.strip('\\n'))\n if seq == end:\n break\n aggregated_changelog = []\n try:\n for x in changelog:\n aggregated_changelog.extend(x.split('\\n'))\n except Exception:\n # a traceback here is not worth taking the whole update task with it\n # hence the bare except\n pass\n return aggregated_changelog or ['']\n\n\ndef get_changelog(train, cache_dir='/var/tmp/update', start='', end=''):\n \"Utility to get and eventually parse a changelog if available\"\n conf = Configuration.Configuration()\n changelog = None\n try:\n changelog = conf.GetChangeLog(train=train, save_dir=cache_dir)\n if not changelog:\n return None\n return parse_changelog(changelog.read().decode('utf8'), start, end)\n finally:\n if changelog:\n changelog.close()\n\n\n# The handler(s) below is/are taken from the freenas 9.3 code\n# specifically from gui/system/utils.py\nclass CheckUpdateHandler(object):\n \"A handler for the CheckUpdate call\"\n\n def __init__(self):\n self.changes = []\n\n def call(self, op, newpkg, oldpkg):\n self.changes.append({\n 'operation': op,\n 'old': oldpkg,\n 'new': newpkg,\n })\n\n def output(self):\n output = []\n for c in self.changes:\n opdict = {\n 'operation': c['operation'],\n 'previous_name': c['old'].Name() if c['old'] else None,\n 'previous_version': c['old'].Version() if c['old'] else None,\n 'new_name': c['new'].Name() if c['new'] else None,\n 'new_version': c['new'].Version() if c['new'] else None,\n }\n output.append(opdict)\n return output\n\n\ndef is_update_applied(dispatcher, update_version):\n # TODO: The below boot env name should really be obtained from the update code\n # for now we just duplicate that code here\n if update_version.startswith(Avatar() + \"-\"):\n update_boot_env = update_version[len(Avatar() + \"-\"):]\n else:\n update_boot_env = \"%s-%s\" % (Avatar(), update_version)\n\n return dispatcher.call_sync(\n 'boot.environment.query', [('realname', '=', update_boot_env)], {\"single\": True}\n )\n\n\ndef check_updates(dispatcher, configstore, cache_dir=None, check_now=False):\n \"\"\"\n Utility function to just check for Updates\n \"\"\"\n update_cache_value_dict = default_update_dict.copy()\n\n # If the current check is an online one (and not in the cache_dir)\n # then store the current update info and use them to restore the update cache\n # if the update check fails, this way a downloaded update is not lost from\n # the cache if a live online one fails!\n current_update_info = None\n try:\n current_update_info = dispatcher.call_sync('update.update_info')\n if (\n current_update_info.get('installed') and\n is_update_applied(dispatcher, current_update_info.get('installed_version'))\n ):\n update_cache_value_dict.update({\n 'installed': True,\n 'installed_version': current_update_info.get('installed_version')\n })\n\n if check_now and current_update_info['downloaded']:\n update_cache_value_dict.update(current_update_info.copy())\n update_cache_value_dict['available'] = True\n except RpcException:\n pass\n\n logger.trace('check_updates: this is the current_update_info: {0}'.format(current_update_info))\n dispatcher.call_sync('update.update_cache_invalidate', list(update_cache_value_dict.keys()))\n\n conf = Configuration.Configuration()\n handler = CheckUpdateHandler()\n train = configstore.get('update.train')\n\n logger.trace(\n 'check_updates: Update server name: {0}, url: {1}'.format(\n conf.UpdateServerName(), conf.UpdateServerURL())\n )\n\n try:\n update = CheckForUpdates(\n handler=handler.call,\n train=train,\n cache_dir=None if check_now else cache_dir,\n )\n\n if update:\n version = update.Version()\n update_installed_bootenv = is_update_applied(dispatcher, version)\n sys_mani = conf.SystemManifest()\n sequence = sys_mani.Sequence() if sys_mani else ''\n if version == update_cache_value_dict['installed_version'] or update_installed_bootenv:\n if update_installed_bootenv and update_installed_bootenv['active']:\n # it could be possible that the installed os has the same version as\n # the one available from the server and yet the one on the server has\n # something new to offer, hence check for seequence numbers to rule out\n # all doubt\n if update.Sequence() == sequence:\n logger.debug('Update has same sequence number as current OS')\n # At this point clear any installed version stuff\n update_cache_value_dict.update({\n 'installed': False,\n 'installed_version': ''\n })\n else:\n logger.debug(\n 'Update has same version but different sequence number as current OS'\n )\n else:\n # TODO: mount the BE in question to inspect sequence number\n logger.debug(\n 'Update version {0} is already installed in BE {1}'.format(\n version, update_installed_bootenv\n )\n )\n update_cache_value_dict = default_update_dict.copy()\n update_cache_value_dict.update({\n 'installed': True,\n 'installed_version': version\n })\n dispatcher.call_sync(\n 'update.update_alert_set',\n 'UpdateInstalled',\n version,\n {'update_installed_bootenv': update_installed_bootenv}\n )\n else:\n logger.debug('Update {0} is available'.format(version))\n try:\n if check_now:\n changelog = get_changelog(\n train, cache_dir=cache_dir, start=sequence, end=update.Sequence()\n )\n else:\n with open(\"{0}/ChangeLog.txt\".format(cache_dir), 'r') as changelog_file:\n changelog = parse_changelog(\n changelog_file.read(), start=sequence, end=update.Sequence()\n )\n except Exception:\n changelog = ''\n update_cache_value_dict.update({\n 'available': True,\n 'notes': update.Notes(),\n 'notice': update.Notice(),\n 'operations': handler.output(),\n 'version': version,\n 'changelog': changelog,\n 'downloaded': False if check_now else True\n })\n\n dispatcher.call_sync(\n 'update.update_alert_set',\n 'UpdateDownloaded' if update_cache_value_dict['downloaded'] else 'UpdateAvailable',\n update_cache_value_dict['version']\n )\n\n else:\n logger.debug('No update available')\n finally:\n dispatcher.call_sync('update.update_cache_putter', update_cache_value_dict)\n\n\nclass UpdateHandler(object):\n \"A handler for Downloading and Applying Updates calls\"\n\n def __init__(self, dispatcher, update_progress=None):\n self.progress = 0\n self.details = ''\n self.finished = False\n self.error = False\n self.indeterminate = False\n self.reboot = False\n self.pkgname = ''\n self.pkgversion = ''\n self.operation = ''\n self._baseprogress = 0\n self.master_progress = 0\n self.pkgindex = 0\n self.numpkgs = 0\n self.dispatcher = dispatcher\n # Below is the function handle passed to this by the Task so that\n # its status and progress can be updated accordingly\n self.update_progress = update_progress\n\n def check_handler(self, index, pkg, pkgList):\n self.pkgname = pkg.Name()\n self.pkgversion = pkg.Version()\n self.operation = 'Downloading'\n self.details = 'Downloading {0}'.format(self.pkgname)\n stepprogress = int((1.0 / float(len(pkgList))) * 100)\n self._baseprogress = index * stepprogress\n self.progress = (index - 1) * stepprogress\n # self.emit_update_details()\n\n def get_handler(self, method, filename, size=None, progress=None, download_rate=None):\n if progress is not None:\n self.progress = (progress * self._baseprogress) / 100\n if self.progress == 0:\n self.progress = 1\n display_size = ' Size: {0} '.format(human_readable_bytes(size)) if size else ''\n display_rate = ' Rate: {0} '.format(human_readable_bytes(download_rate, suffix='/s')) if download_rate else ''\n self.details = 'Downloading: {0} Progress:{1}{2}{3}'.format(\n self.pkgname, progress, display_size, display_rate\n )\n self.emit_update_details()\n\n def install_handler(self, index, name, packages):\n self.indeterminate = False\n self.numpkgs = len(packages)\n self.pkgindex = index\n self.pkgname = name\n self.progress = self._baseprogress = int((self.pkgindex - 1) * 100 / self.numpkgs)\n self.operation = 'Installing'\n self.details = 'Installing {0}'.format(self.pkgname)\n self.emit_update_details()\n\n def install_progress_handler(self, **kwargs):\n total = kwargs.pop(\"total\", 0)\n index = kwargs.pop(\"index\", 0)\n done = kwargs.pop(\"done\", False)\n\n if done:\n self.progress = int(self.pkgindex * 100 / self.numpkgs)\n self.details = 'Done installing {0}'.format(self.pkgname)\n elif total:\n cur_pct = int(index * 100 / (total * self.numpkgs))\n self.details = 'Installing {0} Progress: {1}'.format(\n self.pkgname, self.numpkgs * cur_pct\n )\n if (self._baseprogress + cur_pct) > self.progress:\n self.progress = self._baseprogress + cur_pct\n\n self.emit_update_details()\n\n @throttle(seconds=1)\n def emit_update_details(self):\n # Doing the drill below as there is a small window when\n # step*progress logic does not catch up with the new value of step\n if self.progress >= self.master_progress:\n self.master_progress = self.progress\n data = {\n 'indeterminate': self.indeterminate,\n 'percent': self.master_progress,\n 'reboot': self.reboot,\n 'pkg_name': self.pkgname,\n 'pkg_version': self.pkgversion,\n 'error': self.error,\n 'finished': self.finished,\n 'details': self.details,\n }\n if self.update_progress is not None:\n self.update_progress(self.master_progress, self.details)\n self.dispatcher.dispatch_event('update.in_progress', {\n 'operation': self.operation,\n 'data': data,\n })\n\n\ndef generate_update_cache(dispatcher, cache_dir=None):\n if cache_dir is None:\n try:\n cache_dir = dispatcher.call_sync('system_dataset.request_directory', 'update')\n except RpcException:\n cache_dir = '/var/tmp/update'\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n update_cache.put('cache_dir', cache_dir)\n try:\n check_updates(dispatcher, dispatcher.configstore, cache_dir=cache_dir)\n except:\n # What to do now?\n logger.debug('generate_update_cache (UpdatePlugin) falied, traceback: ', exc_info=True)\n\n\n@description(\"Provides System Updater Configuration\")\nclass UpdateProvider(Provider):\n\n @accepts()\n @returns(str)\n def is_update_available(self):\n temp_available = update_cache.get('available', timeout=1)\n if temp_available is not None:\n return temp_available\n elif update_cache.is_valid('available'):\n return temp_available\n else:\n raise RpcException(errno.EBUSY, (\n 'Update Availability flag is invalidated, an Update Check'\n ' might be underway. Try again in some time.'\n ))\n\n @accepts()\n @returns(h.array(str))\n def obtain_changelog(self):\n temp_changelog = update_cache.get('changelog', timeout=1)\n if temp_changelog is not None:\n return temp_changelog\n elif update_cache.is_valid('changelog'):\n return temp_changelog\n else:\n raise RpcException(errno.EBUSY, (\n 'Changelog list is invalidated, an Update Check '\n 'might be underway. Try again in some time.'\n ))\n\n @accepts()\n @returns(h.array(h.ref('UpdateOps')))\n def get_update_ops(self):\n temp_operations = update_cache.get('operations', timeout=1)\n if temp_operations is not None:\n return temp_operations\n elif update_cache.is_valid('operations'):\n return temp_operations\n else:\n raise RpcException(errno.EBUSY, (\n 'Update Operations Dict is invalidated, an Update Check '\n 'might be underway. Try again in some time.'\n ))\n\n @accepts()\n @returns(h.ref('UpdateInfo'))\n def update_info(self):\n if not update_cache.is_valid('available'):\n raise RpcException(errno.EBUSY, (\n 'Update Availability flag is invalidated, an Update Check'\n ' might be underway. Try again in some time.'\n ))\n info_item_list = [\n 'available', 'changelog', 'notes', 'notice', 'operations', 'downloaded',\n 'version', 'installed', 'installed_version'\n ]\n return {key: update_cache.get(key, timeout=1) for key in info_item_list}\n\n @returns(h.any_of(\n h.array(h.ref('UpdateTrain')),\n None,\n ))\n def trains(self):\n conf = Configuration.Configuration()\n conf.LoadTrainsConfig()\n trains = conf.AvailableTrains()\n\n if trains is None:\n logger.debug('The AvailableTrains call returned None. Check your network connection')\n return None\n seltrain = self.dispatcher.configstore.get('update.train')\n\n data = []\n for name in list(trains.keys()):\n if name in conf._trains:\n train = conf._trains.get(name)\n else:\n train = Train.Train(name)\n data.append({\n 'name': train.Name(),\n 'description': train.Description(),\n 'sequence': train.LastSequence(),\n 'current': True if name == seltrain else False,\n })\n return data\n\n @accepts()\n @returns(str)\n def get_current_train(self):\n conf = Configuration.Configuration()\n conf.LoadTrainsConfig()\n return conf.CurrentTrain()\n\n @accepts()\n @returns(h.ref('Update'))\n def get_config(self):\n configuration = Configuration.Configuration()\n return {\n 'train': self.dispatcher.configstore.get('update.train'),\n 'check_auto': self.dispatcher.configstore.get('update.check_auto'),\n 'internal': configuration.UpdateServerName() == 'internal',\n 'update_server': configuration.UpdateServerURL(),\n }\n\n @private\n @accepts(h.array(str))\n def update_cache_invalidate(self, value_list):\n for item in value_list:\n update_cache.invalidate(item)\n\n @private\n @accepts(h.object())\n def update_cache_putter(self, value_dict):\n for key, value in value_dict.items():\n update_cache.put(key, value)\n self.dispatcher.dispatch_event('update.update_info.updated', {'operation': 'update'})\n\n @private\n @accepts(str)\n @returns(h.any_of(None, str, bool, h.array(str)))\n def update_cache_getter(self, key):\n return update_cache.get(key, timeout=1)\n\n @private\n @accepts(str, str, h.any_of(None, h.object(additionalProperties=True)))\n def update_alert_set(self, update_class, update_version, kwargs=None):\n # Formulating a query to find any alerts in the current `update_class`\n # which could be either of ('UpdateAvailable', 'UpdateDownloaded', 'UpdateInstalled')\n # as well as any alerts for the specified update version string.\n # The reason I do this is because say an Update is Downloaded (FreeNAS-10-2016051047)\n # and there is either a previous alert for an older downloaded update OR there is a\n # previous alert for the same version itself but for it being available instead of being\n # downloaded already, both of these previous alerts would need to be cancelled and\n # replaced by 'UpdateDownloaded' for FreeNAS-10-2016051047.\n if kwargs is None:\n kwargs = {}\n existing_update_alerts = self.dispatcher.call_sync(\n 'alert.query',\n [\n ('and', [('active', '=', True), ('dismissed', '=', False)]),\n ('or', [('clazz', '=', update_class), ('target', '=', update_version)])\n ]\n )\n title = UPDATE_ALERT_TITLE_MAP.get(update_class, 'Update Alert')\n desc = kwargs.get('desc')\n if desc is None:\n if update_class == 'UpdateAvailable':\n desc = 'Latest Update: {0} is available for download'.format(update_version)\n elif update_class == 'UpdateDownloaded':\n desc = 'Update containing {0} is downloaded and ready for install'.format(update_version)\n elif update_class == 'UpdateInstalled':\n update_installed_bootenv = kwargs.get('update_installed_bootenv')\n if update_installed_bootenv and not update_installed_bootenv['on_reboot']:\n desc = 'Update containing {0} is installed.'.format(update_version)\n desc += ' Please activate {0} and Reboot to use this updated version'.format(update_installed_bootenv['realname'])\n else:\n desc = 'Update containing {0} is installed and activated for next boot'.format(update_version)\n else:\n # what state is this?\n raise RpcException(\n errno.EINVAL, 'Unknown update alert class: {0}'.format(update_class)\n )\n alert_payload = {\n 'clazz': update_class,\n 'title': title,\n 'target': update_version,\n 'description': desc\n }\n\n alert_exists = False\n # Purposely deleting stale alerts later on since if anything (in constructing the payload)\n # above this fails the exception prevents alert.cancel from being called.\n for update_alert in existing_update_alerts:\n if (\n update_alert['clazz'] == update_class and\n update_alert[\"target\"] == update_version and\n update_alert[\"description\"] == desc\n ):\n alert_exists = True\n continue\n self.dispatcher.call_sync('alert.cancel', update_alert['id'])\n\n if not alert_exists:\n self.dispatcher.call_sync('alert.emit', alert_payload)\n\n\n@description(\"Set the System Updater Cofiguration Settings\")\n@accepts(h.ref('Update'))\nclass UpdateConfigureTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Configuring updates\"\n\n def describe(self, props):\n return TaskDescription(\"Configuring updates\")\n\n def verify(self, props):\n # TODO: Fix this verify's resource allocation as unique task\n block = self.dispatcher.resource_graph.get_resource(update_resource_string)\n if block is not None and block.busy:\n raise VerifyException(\n errno.EBUSY,\n 'An update operation (Configuration/Downloading/Applying ' +\n 'the updates) is already in the queue; please retry later')\n\n return [update_resource_string]\n\n def run(self, props):\n if 'train' in props:\n train_to_set = props.get('train')\n conf = Configuration.Configuration()\n conf.LoadTrainsConfig()\n trains = conf.AvailableTrains() or []\n if trains:\n trains = list(trains.keys())\n if train_to_set not in trains:\n raise TaskException(errno.ENOENT, '{0} is not a valid train'.format(train_to_set))\n self.configstore.set('update.train', train_to_set)\n\n if 'check_auto' in props:\n self.configstore.set('update.check_auto', props['check_auto'])\n\n if 'internal' in props:\n conf = Configuration.SystemConfiguration()\n if 'internal' not in conf.ListUpdateServers():\n conf.AddUpdateServer(Configuration.UpdateServer(\n 'internal',\n 'http://update-int.ixsystems.com/FreeNAS/',\n signing=False\n ))\n\n conf.SetUpdateServer('internal' if props['internal'] else 'default')\n\n cache_dir = self.dispatcher.call_sync('update.update_cache_getter', 'cache_dir')\n check_updates(self.dispatcher, self.configstore, cache_dir=cache_dir, check_now=True)\n\n self.dispatcher.dispatch_event('update.changed', {'operation': 'update'})\n\n\n@description(\n \"Checks for Available Updates and returns if update is available \"\n \"and if yes returns information on operations that will be \"\n \"performed during the update\"\n)\n@accepts(h.object(properties={'check_now': bool}))\nclass CheckUpdateTask(Task):\n @classmethod\n def early_describe(cls):\n return \"Checking for updates\"\n\n def describe(self, conditions=None):\n return TaskDescription(\"Checking for updates\")\n\n def verify(self, conditions=None):\n # TODO: Fix this verify's resource allocation as unique task\n block = self.dispatcher.resource_graph.get_resource(update_resource_string)\n if block is not None and block.busy:\n raise VerifyException(errno.EBUSY, (\n 'An update operation (Configuration/Downloading/Applying '\n 'the updates) is already in the queue; please retry later'\n ))\n\n return [update_resource_string]\n\n def run(self, conditions=None):\n if conditions is None:\n conditions = {}\n\n check_now = conditions.get('check_now', True)\n cache_dir = self.dispatcher.call_sync('update.update_cache_getter', 'cache_dir')\n try:\n check_updates(\n self.dispatcher, self.configstore, cache_dir=cache_dir, check_now=check_now\n )\n except UpdateManifestNotFound:\n raise TaskException(errno.ENETUNREACH, 'Update server could not be reached')\n except Exception as e:\n raise TaskException(errno.EAGAIN, '{0}'.format(str(e)))\n\n\n@description(\"Downloads Updates for the current system update train\")\n@accepts()\nclass DownloadUpdateTask(ProgressTask):\n @classmethod\n def early_describe(cls):\n return \"Downloading updates\"\n\n def describe(self):\n return TaskDescription(\"Downloading updates\")\n\n def verify(self):\n if not update_cache.get('available', timeout=1):\n raise VerifyException(errno.ENOENT, (\n 'No updates currently available for download - check for new updates'\n ))\n\n block = self.dispatcher.resource_graph.get_resource(update_resource_string)\n if block is not None and block.busy:\n raise VerifyException(errno.EBUSY, (\n 'An update operation (Configuration/Downloading/Applying '\n 'the updates) is already in the queue; please retry later'\n ))\n\n return [update_resource_string]\n\n def update_progress(self, progress, message):\n self.set_progress(progress, message)\n\n def run(self):\n self.set_progress(0, 'Downloading Updates...')\n handler = UpdateHandler(self.dispatcher, update_progress=self.update_progress)\n train = self.configstore.get('update.train')\n cache_dir = self.dispatcher.call_sync('update.update_cache_getter', 'cache_dir')\n if cache_dir is None:\n try:\n cache_dir = self.dispatcher.call_sync(\n 'system_dataset.request_directory', 'update'\n )\n except RpcException:\n cache_dir = '/var/tmp/update'\n try:\n download_successful = DownloadUpdate(\n train,\n cache_dir,\n get_handler=handler.get_handler,\n check_handler=handler.check_handler\n )\n except ManifestInvalidSignature as e:\n raise TaskException(\n errno.EBADMSG, 'Latest manifest has invalid signature: {0}'.format(str(e))\n )\n except UpdateIncompleteCacheException as e:\n raise TaskException(errno.EIO, 'Possibly with no network, cached update is incomplete')\n except UpdateBusyCacheException as e:\n raise TaskException(errno.EBUSY, str(e))\n except ChecksumFailException as e:\n raise TaskException(errno.EBADMSG, str(e))\n except UpdatePackageNotFound as e:\n raise TaskException(\n errno.EIO,\n \"Update Package: '{0}' Not Found. This could be due to a failed Download\".format(str(e))\n )\n except Exception as e:\n raise TaskException(\n errno.EAGAIN, 'Got exception {0} while trying to Download Updates'.format(str(e))\n )\n if not download_successful:\n handler.error = True\n handler.emit_update_details()\n raise TaskException(\n errno.EAGAIN, 'Downloading Updates Failed for some reason, check logs'\n )\n check_updates(self.dispatcher, self.configstore, cache_dir=cache_dir, check_now=False)\n handler.finished = True\n handler.emit_update_details()\n self.set_progress(100, 'Updates finished downloading')\n\n\n@description(\"Apply a manual update using specified tarfile\")\n@accepts(str, h.one_of(bool, None))\nclass UpdateManualTask(ProgressTask):\n @classmethod\n def early_describe(cls):\n return 'Updating via the provided update tarfile'\n\n def describe(self, path, reboot_post_install=False):\n return TaskDescription(\"Updating from tarfile ({name})\".format(name=path))\n\n def verify(self, path, reboot_post_install=False):\n\n if not os.path.exists(path):\n raise VerifyException(errno.EEXIST, 'File does not exist')\n\n if not tarfile.is_tarfile(path):\n raise VerifyException(errno.EEXIST, 'File does not exist')\n\n return ['root']\n\n def run(self, path, reboot_post_install=False):\n self.set_progress(0, 'Extracting update from tarfile...')\n\n cache_dir = self.dispatcher.call_sync('update.update_cache_getter', 'cache_dir')\n if cache_dir is None:\n try:\n cache_dir = self.dispatcher.call_sync(\n 'system_dataset.request_directory', 'update'\n )\n except RpcException:\n cache_dir = '/var/tmp/update'\n # Frozen tarball. We'll extract it into the cache directory, and\n # then add a couple of things to make it pass sanity, and then apply it.\n # For now we just copy the code above.\n # First, remove the cache directory\n # Hrm, could overstep a locked file.\n shutil.rmtree(cache_dir, ignore_errors=True)\n try:\n os.makedirs(cache_dir)\n except BaseException as e:\n raise TaskException(\n errno.EPERM,\n \"Unable to create cache directory {0}: {1}\".format(cache_dir, str(e))\n )\n\n try:\n with tarfile.open(path) as tf:\n files = tf.getmembers()\n for f in files:\n if f.name in (\"./\", \".\", \"./.\"):\n continue\n if not f.name.startswith(\"./\"):\n continue\n if len(f.name.split(\"/\")) != 2:\n continue\n tf.extract(f.name, path=cache_dir)\n except BaseException as e:\n raise TaskException(\n errno.EIO,\n \"Unable to extract frozen update {0}: {1}\".format(path, str(e))\n )\n\n config = Configuration.SystemConfiguration()\n # Exciting! Now we need to have a SEQUENCE file, or it will fail verification.\n with open(os.path.join(cache_dir, \"SEQUENCE\"), \"w\") as s:\n s.write(config.SystemManifest().Sequence())\n # And now the SERVER file\n with open(os.path.join(cache_dir, \"SERVER\"), \"w\") as s:\n s.write(config.UpdateServerName())\n\n # Now we can go for the update apply task\n self.run_subtask_sync(\n 'update.apply',\n reboot_post_install,\n progress_callback=lambda p, m='Installing updates from extracted tarfile', e=None: self.chunk_progress(0, 100, '', p, m, e)\n )\n\n\n@accepts(bool)\n@description(\"Applies cached updates\")\nclass UpdateApplyTask(ProgressTask):\n @classmethod\n def early_describe(cls):\n return \"Applying updates\"\n\n def describe(self, reboot_post_install=False):\n return TaskDescription(\"Applying updates\")\n\n def verify(self, reboot_post_install=False):\n return ['root']\n\n def update_progress(self, progress, message):\n self.set_progress(progress, message)\n\n def run(self, reboot_post_install=False):\n self.set_progress(0, 'Applying Updates...')\n handler = UpdateHandler(self.dispatcher, update_progress=self.update_progress)\n cache_dir = self.dispatcher.call_sync('update.update_cache_getter', 'cache_dir')\n if cache_dir is None:\n try:\n cache_dir = self.dispatcher.call_sync(\n 'system_dataset.request_directory', 'update'\n )\n except RpcException:\n cache_dir = '/var/tmp/update'\n new_manifest = Manifest.Manifest(require_signature=True)\n try:\n new_manifest.LoadPath(cache_dir + '/MANIFEST')\n except ManifestInvalidSignature as e:\n logger.error(\"Cached manifest has invalid signature: %s\" % str(e))\n raise TaskException(errno.EINVAL, str(e))\n except FileNotFoundError as e:\n raise TaskException(\n errno.EIO,\n 'No Manifest file found at path: {0}'.format(cache_dir + '/MANIFEST')\n )\n version = new_manifest.Version()\n # Note: for now we force reboots always, TODO: Fix in M3-M4\n try:\n result = ApplyUpdate(\n cache_dir,\n install_handler=handler.install_handler,\n progressFunc=handler.install_progress_handler,\n force_reboot=True\n )\n except ManifestInvalidSignature as e:\n logger.debug('UpdateApplyTask Error: Cached manifest has invalid signature: %s', e)\n raise TaskException(\n errno.EINVAL, 'Cached manifest has invalid signature: {0}'.format(str(e))\n )\n except UpdateBootEnvironmentException as e:\n logger.debug('UpdateApplyTask Boot Environment Error: {0}'.format(str(e)))\n raise TaskException(errno.EAGAIN, str(e))\n except UpdatePackageException as e:\n logger.debug('UpdateApplyTask Package Error: {0}'.format(str(e)))\n raise TaskException(errno.EAGAIN, str(e))\n except Exception as e:\n raise TaskException(\n errno.EAGAIN, 'Got exception {0} while trying to Apply Updates'.format(str(e))\n )\n if result is None:\n raise TaskException(errno.ENOENT, 'No downloaded Updates available to apply.')\n handler.finished = True\n handler.emit_update_details()\n self.dispatcher.call_sync('update.update_alert_set', 'UpdateInstalled', version)\n update_cache_value_dict = default_update_dict.copy()\n update_cache_value_dict.update({\n 'installed': True,\n 'installed_version': version\n })\n self.dispatcher.call_sync('update.update_cache_putter', update_cache_value_dict)\n message = 'Updates finished installing successfully'\n if reboot_post_install:\n message = 'Scheduling user specified reboot post succesfull update'\n # Note not using subtasks on purpose as they do not have queuing logic\n self.dispatcher.submit_task('system.reboot', 3)\n self.set_progress(100, message)\n\n\n@description(\"Verify installation integrity\")\n@accepts()\nclass UpdateVerifyTask(ProgressTask):\n @classmethod\n def early_describe(cls):\n return \"Verifying installation integrity\"\n\n def describe(self):\n return TaskDescription(\"Verifying installation integrity\")\n\n def verify(self):\n return [update_resource_string]\n\n def verify_handler(self, index, total, fname):\n self.set_progress(int(index * 100 / total), 'Verifying {0}'.format(fname))\n\n def run(self):\n try:\n error_flag, ed, warn_flag, wl = Configuration.do_verify(self.verify_handler)\n except Exception as e:\n raise TaskException(\n errno.EAGAIN, 'Got exception while verifying install: {0}'.format(str(e))\n )\n return {\n 'checksum': ed['checksum'],\n 'notfound': ed['notfound'],\n 'wrongtype': ed['wrongtype'],\n 'perm': wl,\n 'error': error_flag,\n 'warn': warn_flag,\n }\n\n\n@description(\"Checks for updates from the update server and downloads them if available\")\n@accepts()\n@returns(bool)\nclass CheckFetchUpdateTask(ProgressTask):\n @classmethod\n def early_describe(cls):\n return \"Checking for and downloading updates\"\n\n def describe(self):\n return TaskDescription(\"Checking for and downloading updates\")\n\n def verify(self):\n block = self.dispatcher.resource_graph.get_resource(update_resource_string)\n if block is not None and block.busy:\n raise VerifyException(errno.EBUSY, (\n 'An update operation (Configuration/Downloading/Applying '\n 'the updates) is already in the queue; please retry later'\n ))\n\n return []\n\n def run(self):\n result = False\n self.set_progress(0, 'Checking for new updates from update server')\n self.run_subtask_sync(\n 'update.check',\n progress_callback=lambda p, m='Checking for updates from update server', e=None: self.chunk_progress(0, 10, '', p, m, e)\n )\n if self.dispatcher.call_sync('update.is_update_available'):\n self.set_progress(10, 'New updates found. Downloading them now')\n self.run_subtask_sync(\n 'update.download',\n progress_callback=lambda p, m='New updates found. Downloading them now', e=None: self.chunk_progress(10, 100, '', p, m, e)\n )\n self.set_progress(100, 'Updates successfully Downloaded')\n result = True\n else:\n self.set_progress(100, 'No Updates Available')\n return result\n\n\n@description(\"Checks for new updates, fetches if available, installs new/or downloaded updates\")\n@accepts(bool)\nclass UpdateNowTask(ProgressTask):\n @classmethod\n def early_describe(cls):\n return \"Checking for updates and updating\"\n\n def describe(self, reboot_post_install=False):\n return TaskDescription(\"Checking for updates and updating\")\n\n def verify(self, reboot_post_install=False):\n return ['root']\n\n def run(self, reboot_post_install=False):\n self.set_progress(0, 'Checking for new updates')\n self.run_subtask_sync(\n 'update.checkfetch',\n progress_callback=lambda p, m='Checking for new updates', e=None: self.chunk_progress(0, 50, '', p, m, e)\n )\n if self.dispatcher.call_sync('update.is_update_available'):\n self.set_progress(50, 'Installing downloaded updates now')\n self.run_subtask_sync(\n 'update.apply',\n reboot_post_install,\n progress_callback=lambda p, m='Installing downloaded updates now', e=None: self.chunk_progress(50, 100, '', p, m, e)\n )\n self.set_progress(100, 'Updates Installed successfully')\n result = True\n else:\n self.add_warning(TaskWarning(errno.ENOENT, 'No Updates Available for Install'))\n self.set_progress(100, 'No Updates Available for Install')\n result = False\n\n return result\n\n\ndef _depends():\n return ['CalendarTasksPlugin', 'SystemDatasetPlugin', 'AlertPlugin']\n\n\ndef _init(dispatcher, plugin):\n # Register Schemas\n plugin.register_schema_definition('Update', {\n 'type': 'object',\n 'properties': {\n 'train': {'type': 'string'},\n 'check_auto': {'type': 'boolean'},\n 'internal': {'type': 'boolean'},\n 'update_server': {'type': 'string', 'readOnly': True},\n },\n })\n\n plugin.register_schema_definition('UpdateProgress', {\n 'type': 'object',\n 'properties': {\n 'operation': {'$ref': 'UpdateProgressOperation'},\n 'details': {'type': 'string'},\n 'indeterminate': {'type': 'boolean'},\n 'percent': {'type': 'integer'},\n 'reboot': {'type': 'boolean'},\n 'pkg_name': {'type': 'string'},\n 'pkg_version': {'type': 'string'},\n 'error': {'type': 'boolean'},\n 'finished': {'type': 'boolean'}\n }\n })\n\n plugin.register_schema_definition('UpdateProgressOperation', {\n 'type': 'string',\n 'enum': ['DOWNLOADING', 'INSTALLING']\n })\n\n plugin.register_schema_definition('UpdateOps', {\n 'type': 'object',\n 'properties': {\n 'operation': {'$ref': 'UpdateOpsOperation'},\n 'new_name': {'type': ['string', 'null']},\n 'new_version': {'type': ['string', 'null']},\n 'previous_name': {'type': ['string', 'null']},\n 'previous_version': {'type': ['string', 'null']},\n }\n })\n\n plugin.register_schema_definition('UpdateOpsOperation', {\n 'type': 'string',\n 'enum': ['delete', 'install', 'upgrade']\n })\n\n plugin.register_schema_definition('UpdateInfo', {\n 'type': 'object',\n 'properties': {\n 'available': {'type': 'boolean'},\n 'notes': {'type': 'object'},\n 'notice': {'type': 'string'},\n 'changelog': {\n 'type': 'array',\n 'items': {'type': 'string'},\n },\n 'operations': {'$ref': 'UpdateOps'},\n 'downloaded': {'type': 'boolean'},\n 'version': {'type': 'string'},\n 'installed': {'type': 'boolean'},\n 'installed_version': {'type': 'string'}\n }\n })\n\n plugin.register_schema_definition('UpdateTrain', {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'description': {'type': 'string'},\n 'sequence': {'type': 'string'},\n 'current': {'type': 'boolean'},\n }\n })\n\n # Register providers\n plugin.register_provider(\"update\", UpdateProvider)\n\n # Register task handlers\n plugin.register_task_handler(\"update.update\", UpdateConfigureTask)\n plugin.register_task_handler(\"update.check\", CheckUpdateTask)\n plugin.register_task_handler(\"update.download\", DownloadUpdateTask)\n plugin.register_task_handler(\"update.manual\", UpdateManualTask)\n plugin.register_task_handler(\"update.apply\", UpdateApplyTask)\n plugin.register_task_handler(\"update.verify\", UpdateVerifyTask)\n plugin.register_task_handler(\"update.checkfetch\", CheckFetchUpdateTask)\n plugin.register_task_handler(\"update.updatenow\", UpdateNowTask)\n\n # Register Event Types\n plugin.register_event_type('update.in_progress', schema=h.ref('UpdateProgress'))\n plugin.register_event_type('update.update_info.updated')\n plugin.register_event_type('update.changed')\n\n # Register reources\n plugin.register_resource(Resource(update_resource_string), ['system', 'system-dataset'])\n\n # Get the Update Cache (if any) at system boot (and hence in init here)\n # Do this in parallel so that a failed cache generation does not take the\n # entire dispatcher start/restart with it (See Ticket: #12892)\n gevent.spawn(generate_update_cache, dispatcher)\n","sub_path":"src/dispatcher/plugins/UpdatePlugin.py","file_name":"UpdatePlugin.py","file_ext":"py","file_size_in_byte":44841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"90382093","text":"#!/usr/bin/python\nimport sys\n\ndef main():\n pre, post = open('pre.rawhtml').read(), open('post.rawhtml').read()\n content = open(sys.argv[1] if len(sys.argv)>1 else 'critique.html').read()\n content = content[content.find('')+len(''):content.find('')]\n open('index.html', 'w').write('\\n'.join([pre, content, post]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"generateHTML.py","file_name":"generateHTML.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"571148122","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport requests\nimport time\nimport hashlib\nimport re\n\n\n\n\ndef login():\n login_dict = {\n 'username': \"mikunote\",\n 'password': \"\",\n 'imgcode': \"\",\n 'f': 'json'\n }\n\n login_res = requests.post(\n url=\"http://www.liuxing999.com/mindex.php\",\n data=login_dict,\n #来源url\n headers={'Referer': 'http://www.liuxing999.com/?p=login'})\n\n # 登陆成功之后获取服务器响应的cookie\n\n resp_cookies_dict = login_res.cookies.get_dict()\n\n\n rep = requests.get(\"www.liuxing999.com/?p=online\")\n # 登陆成功后,获取服务器响应的内容\n resp_text = login_res.text\n print(rep.text)\n\n print(resp_cookies_dict)\n\n\nlogin()\n\n","sub_path":"PycharmProjects/learn/爬虫相关/test/liux.py","file_name":"liux.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"407995229","text":"#!/usr/bin/env python\n\n\"\"\"\n\tsysmon\t\n\t~~~~~~\n\tsysmon is a simple program, written around psutil, to monitor \n\tvarious system metrics and output them into log files\n\"\"\"\n\n__author__ = \"Oliver\"\n__version__ = \"1\"\n__date__ = \"Date: 10-2015\"\n\nimport argparse, os, psutil, time, numpy, json\n\n# -------------------------------------\n\ndef main():\n\t\"\"\"Main block\"\"\"\n\n\t# get arguments dictionary\n\t(args, parser) = get_arg()\n\n\t### variables\n\tcounter = 1\t\t\t# a counter\n\t###\n\n\t# define a SysCharacteristics() object\n\tsyschar_obj = SysCharacteristics(args)\n\n\t# print the header for the first log file (overwriting any file of the same name)\n\twith open(args.output, 'w') as f:\n\t\tf.write(\"## Date: \" + time.strftime(\"%c\") + \"\\n\")\n\t\tf.write(\"## Interval: \" + str(args.interval) + \" sec\\n\")\n\t\tf.write(\"## Number of CPUs: \" + str(len(psutil.cpu_percent(interval=1, percpu=True))) + \"\\n\")\n\t\tf.write(\"## Total Disk Space in \" + args.mydir + \" : \" + str(formatbyte(psutil.disk_usage(args.mydir).total)) + \"G\\n\")\n\t\tf.write(\"\\t\".join(syschar_obj.headerlist) + \"\\n\")\n\n\t# print the header for the second log file\n\twith open(args.output2, 'w') as f:\n\t\tf.write(\"## Date: \" + time.strftime(\"%c\") + \"\\n\")\n\t\tf.write(\"## Interval: \" + str(args.interval) + \" sec\\n\")\n\t\tf.write(\"## Window (number of points): \" + str(args.window) + \"\\n\")\n\n\t# loop until counter hits max number of lines, OR forever if no max specified\n\twhile (counter <= args.maxlines or args.maxlines == 0):\n\n\t\t# for each sys metric, we're going to accumulate its values (taken per interval) in a list\n\t\t# and we'll continue to do this until the list is of size == window, at which point we flush \n\t\t# the lists and start over again\n\t\tsyschar_obj.set_me()\n\n\t\t# now that we've set these lists, we're going to retrieve them, also adding an index to the beginning of the list\n\t\tdatalist = [counter] + syschar_obj.get_me()\n\n\t\t# first log file: print line of metrics (tab-delimit, cast nums as strings)\n\t\twith open(args.output, 'a') as f:\n\t\t\tf.write(\"\\t\".join(map(str, datalist)) + \"\\n\")\n\t\t\t# if there's a warning, print it\n\t\t\tif syschar_obj.warning:\n\t\t\t\tf.write(syschar_obj.warning + \"\\n\")\n\n\t\t# if we hit the window size, we're going to extract some properties from the lists of metrics, then clear them\n\t\tif (counter % args.window == 0):\n\t\t\t# second log file: print stats for the window in a JSONy format\n\t\t\twith open(args.output2, 'a') as f:\n\t\t\t\tf.write(\"# timestamp: \" + str(int(time.time())) + \"\\n\")\n\t\t\t\tf.write(json.dumps(syschar_obj.getstats(), indent=4))\n\t\t\t\tf.write(\"\\n\")\n\t\t\t# clear the lists\n\t\t\tsyschar_obj.clear_lists()\n\n\t\t# sleep for the specified interval and increment counter\n\t\t# (in practice, the user has to wait longer than interval b/c it takes time to collect the metrics)\n\t\ttime.sleep(args.interval)\n\t\tcounter += 1\n\n# -------------------------------------\n\ndef get_arg():\n\t\"\"\"Get arguments from the user with argparse\"\"\"\n\n\tprog_description = \"\"\"sysmon is a simple program to monitor various system metrics and output them to log files\"\"\"\n\n\t# http://docs.python.org/2/howto/argparse.html\n\tparser = argparse.ArgumentParser(description=prog_description)\n\n\tparser.add_argument(\"--interval\",\"-i\",\ttype=int,\tdefault=1,\t\thelp=\"interval, in seconds, at which to take data (default: 1)\")\n\tparser.add_argument(\"--window\",\"-w\",\ttype=int,\tdefault=10,\t\thelp=\"number of data points over which to average data (default: 10)\")\n\tparser.add_argument(\"--maxlines\",\"-m\",\ttype=int,\tdefault=0,\t\thelp=\"max number of lines in the log file (default: unlimited)\")\n\tparser.add_argument(\"--warncpu\",\"-c\",\ttype=float,\tdefault=90,\t\thelp=\"threshold of percent CPU usage - if over this limit, output a warning (default: 90)\")\n\tparser.add_argument(\"--warndisk\",\"-k\",\ttype=float,\tdefault=90,\t\thelp=\"threshold of percent disk usage - if over this limit, output a warning (default: 90)\")\n\tparser.add_argument(\"--mydir\",\"-d\",\t\t\tdefault=\"/\",\t\thelp=\"the directory in which disk usage is calculated (default: / )\")\n\tparser.add_argument(\"--output\",\"-o\",\t\t\tdefault=\"sys.log.txt\",\thelp=\"output log file (default: sys.log.txt)\")\n\tparser.add_argument(\"--output2\",\"-o2\",\t\t\tdefault=\"sys.log2.txt\",\thelp=\"another output log file (default: sys.log2.txt)\")\n\n\targs = parser.parse_args()\n\n\treturn args, parser\n\n# -------------------------------------\n\ndef formatbyte(num_bytes):\n\t\"\"\"Format bytes into human readable form\"\"\"\n\n\t### variables\n\tdivisor = 1024.0 ** 3\t\t# to convert bytes into gigabytes, divide by this\n\tsigfig = 2\t\t\t# significant figures\n\n\treturn round(num_bytes/divisor,sigfig)\n\n# -------------------------------------\n\nclass SysCharacteristics():\n\t\"\"\"A class whose object has information about system characteristics\"\"\"\n\n\tdef __init__ (self, args):\n\t\t\"\"\"Constructor\"\"\"\n\n\t\t# set arguments dictionary\n\t\tself.args = args \n\t\t# clear all the lists of sys metrics\n\t\tself.clear_lists()\n\t\t# a warning message (empty to begin with)\n\t\tself.warning = \"\"\n\t\t# a list of the names of the metrics to monitor\n\t\tself.headerlist = \t[\t\n\t\t\t\t\t\t'index', \n\t\t\t\t\t\t'DiskUsage(G)', \n\t\t\t\t\t\t'PercentDiskUsage', \n\t\t\t\t\t\t'PercentCpuUsage', \n\t\t\t\t\t\t'MemUsed(G)', \n\t\t\t\t\t\t'PercentMemUsed', \n\t\t\t\t\t\t'MemAvailable(G)', \n\t\t\t\t\t\t'PercentSwapMemUsed'\n\t\t\t\t\t\t# 'netwrkI/O-data-sent(G)', \n\t\t\t\t\t\t# 'netwrkI/O-data-recv(G)'\n\t\t\t\t\t]\t\n\n\n\tdef clear_lists(self):\n\t\t\"\"\"Clear lists\"\"\"\n\n\t\tself.du = []\t# disk usage\n\t\tself.pdu = []\t# perc disk usage\n\t\tself.pcu = []\t# perc cpu usage\n\t\tself.mu = []\t# mem used\n\t\tself.pmu = []\t# perc mem used\n\t\tself.ma = []\t# mem available\n\t\tself.psu = []\t# percent swap mem used\n\n\tdef set_me(self):\n\t\t\"\"\"Set variables storing sys metrics\"\"\"\n\n\t\t# clear warning\n\t\tself.warning = \"\"\n\n\t\t# store various system metrics from psutil\n\t\tself.du.append(formatbyte(psutil.disk_usage(self.args.mydir).used))\n\t\tself.pdu.append(int(100*psutil.disk_usage(self.args.mydir).used/psutil.disk_usage(self.args.mydir).total))\n\t\tself.pcu.append(psutil.cpu_percent(interval=1))\n\t\tself.mu.append(formatbyte(psutil.virtual_memory().used))\n\t\tself.pmu.append(psutil.virtual_memory().percent)\n\t\tself.ma.append(formatbyte(psutil.virtual_memory().available))\n\t\tself.psu.append(psutil.swap_memory().percent)\n\t\t# formatbyte(psutil.net_io_counters().bytes_sent)\n\t\t# formatbyte(psutil.net_io_counters().bytes_recv)\n\n\tdef get_me(self):\n\t\t\"\"\"Check for warnings, return values of variables storing sys metrics as a list\"\"\"\n\n\t\t# check if certain metrics are above suggested thresholds\n\t\tself.check_warning()\n\n\t\t# return list of various system metrics\n\t\t# note: we want the most recent value of the metric --- i.e., the last one in the list that we accumulate\n\t\treturn [\t\n\t\t\t\tself.du[-1],\t# (the [-1] takes the last elt of the list)\n\t\t\t\tself.pdu[-1],\n\t\t\t\tself.pcu[-1],\n\t\t\t\tself.mu[-1],\n\t\t\t\tself.pmu[-1],\n\t\t\t\tself.ma[-1],\n\t\t\t\tself.psu[-1]\n\t\t\t]\n\n\tdef check_warning(self):\n\t\t\"\"\"Check if certain metrics are above threshold and, if so, set warning message\"\"\"\n\n\t\t# check if certain metrics are above suggested thresholds\n\t\tif self.pcu[-1] > self.args.warncpu:\n\t\t\tself.warning = \"# WARNING: %CpuUsage > \" + str(self.args.warncpu)\n\n\t\tif self.pdu[-1] > self.args.warndisk:\n\t\t\t# if warning already contains a string, add a newline\n\t\t\tif self.warning:\n\t\t\t\tself.warning += \"\\n\"\n\t\t\tself.warning += \"# WARNING: %DiskUsage > \" + str(self.args.warndisk)\n\n\tdef getstats(self):\n\t\t\"\"\"Get basic stats from lists of quantities\"\"\"\n\n\t\t# a dict containing basic stats (mean, stdev, min, max) about each monitored quantity (for json format)\n\t\tdstats = {}\n\n\t\t# a dict mapping attribute names to human readable names\n\t\tdnames = {'du':'DiskUsage(G)', 'pdu':'%DiskUsage', 'pcu':'%CpuUsage', 'mu':'MemUsed(G)', 'pmu':'%MemUsed', 'ma':'MemAvailable(G)', 'psu':'PercSwapMemUsed'}\n\n\t\t# loop thro attributes\n\t\tfor i in ['du', 'pdu', 'pcu', 'mu', 'pmu', 'ma', 'psu']:\n\n\t\t\t# get the list\n\t\t\tmylist = getattr(self, i)\n\n\t\t\t# define dict entry\n\t\t\tdstats[dnames[i]] = \t{\t\n\t\t\t\t\t\t\t\"max\": max(mylist),\n\t\t\t\t\t\t\t\"min\": min(mylist), \n\t\t\t\t\t\t\t\"ave\": round(numpy.mean(mylist),2), \n\t\t\t\t\t\t\t\"stdev\": round(numpy.std(mylist),2)\n\t\t\t\t\t\t}\n\n\t\treturn dstats\n\n# -------------------------------------\n\nif __name__ == \"__main__\":\n\n\tmain()\n","sub_path":"sysmon.py","file_name":"sysmon.py","file_ext":"py","file_size_in_byte":8024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"197193383","text":"from flask import Flask, render_template, request, url_for, flash, redirect\nfrom forms import SignupForm, LoginForm, SearchForm\nfrom APIs.PixabayAPI import get_image_url\nfrom APIs.WikiAPI import get_description\n\napp = Flask(__name__)\n\n# Move to config file and generate a new secret key later, put key in env variables\napp.config['SECRET_KEY'] = 'a1db3c8a527d33eb2cba3821dac32950787f226c19cc479947f720306e867217'\n\n\n@app.route('/signup', methods=['POST', 'GET'])\ndef signup():\n form = SignupForm()\n if form.validate_on_submit():\n flash(f'Account created for {form.username.data}!', 'success')\n return redirect(url_for('home'))\n return render_template('signup.html', title='Sign up', form=form)\n\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n if form.email.data == 'admin@email.com' and form.password.data == 'password':\n flash(f'You have been logged in!', 'success')\n return redirect(url_for('home'))\n else:\n flash(f'Login failed. Invalid username and/or password.', 'danger')\n return render_template('login.html', title='Log in', form=form)\n\n\n@app.route('/', methods=['POST', 'GET'])\n@app.route('/', methods=['POST', 'GET'])\ndef home(user=None):\n form = SearchForm()\n if form.validate_on_submit():\n return redirect(url_for('home'))\n return render_template('home.html', user=user, form=form)\n\n\n@app.route('/result', methods=['POST', 'GET'])\n@app.route('//result', methods=['POST', 'GET'])\ndef result(user=None):\n form = SearchForm()\n if request.method == 'POST':\n search_text = request.form['search']\n if search_text != \"\":\n image_url = get_image_url(search_text)\n description = get_description(search_text)\n return render_template('home.html', image_url=image_url, description=description,\n search_text=search_text, user=user, title='Search result', form=form)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"568165998","text":"from sklearn.datasets import load_boston\nfrom sklearn.preprocessing import MinMaxScaler, PolynomialFeatures\n\ndef load_scaled_boston_data():\n boston = load_boston()\n X = boston.data\n\n ## Never do this in real ML project. Here we fit the scalar to entire dataset, aargh\n X = MinMaxScaler().fit_transform(boston.data)\n X = PolynomialFeatures(degree=2, include_bias=False).fit_transform(X)\n return X, boston.target","sub_path":"session-6/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"585550485","text":"from __future__ import unicode_literals\n\nimport logging\nimport tempfile\nimport os.path\nimport sys\n\nfrom zest.releaser.utils import fs_to_text\nfrom zest.releaser.utils import execute_command\nfrom zest.releaser.vcs import BaseVersionControl\n\nlogger = logging.getLogger(__name__)\n\n\nclass Git(BaseVersionControl):\n \"\"\"Command proxy for Git\"\"\"\n internal_filename = '.git'\n setuptools_helper_package = 'setuptools-git'\n\n def is_setuptools_helper_package_installed(self):\n # The package is setuptools-git with a dash, the module is\n # setuptools_git with an underscore. Thanks.\n try:\n __import__('setuptools_git')\n except ImportError:\n return False\n return True\n\n @property\n def name(self):\n package_name = self.get_setup_py_name()\n if package_name:\n return package_name\n # No setup.py? With git we can probably only fall back to the directory\n # name as there's no svn-url with a usable name in it.\n dir_name = os.path.basename(os.getcwd())\n dir_name = fs_to_text(dir_name)\n return dir_name\n\n def available_tags(self):\n tag_info = execute_command('git tag')\n tags = [line for line in tag_info.split('\\n') if line]\n logger.debug(\"Available tags: %r\", tags)\n return tags\n\n def prepare_checkout_dir(self, prefix):\n # Watch out: some git versions can't clone into an existing\n # directory, even when it is empty.\n temp = tempfile.mkdtemp(prefix=prefix)\n cwd = os.getcwd()\n os.chdir(temp)\n cmd = 'git clone %s %s' % (self.reporoot, 'gitclone')\n logger.debug(execute_command(cmd))\n clonedir = os.path.join(temp, 'gitclone')\n os.chdir(clonedir)\n cmd = 'git submodule update --init --recursive'\n logger.debug(execute_command(cmd))\n os.chdir(cwd)\n return clonedir\n\n def tag_url(self, version):\n # this doesn't apply to Git, so we just return the\n # version name given ...\n return version\n\n def cmd_diff(self):\n return 'git diff'\n\n def cmd_commit(self, message):\n return 'git commit -a -m \"%s\"' % message\n\n def cmd_diff_last_commit_against_tag(self, version):\n return \"git diff %s\" % version\n\n def cmd_log_since_tag(self, version):\n \"\"\"Return log since a tagged version till the last commit of\n the working copy.\n \"\"\"\n return \"git log %s..HEAD\" % version\n\n def cmd_create_tag(self, version):\n msg = \"Tagging %s\" % (version,)\n cmd = 'git tag %s -m \"%s\"' % (version, msg)\n if os.path.isdir('.git/svn'):\n print(\"\\nEXPERIMENTAL support for git-svn tagging!\\n\")\n cur_branch = open('.git/HEAD').read().strip().split('/')[-1]\n print(\"You are on branch %s.\" % (cur_branch,))\n if cur_branch != 'master':\n print(\"Only the master branch is supported for \"\n \"git-svn tagging.\")\n print(\"Please tag yourself.\")\n print(\"'git tag' needs to list tag named %s.\" % (version,))\n sys.exit(1)\n cmd = [cmd]\n\n trunk = None\n # In Git v2.0, the default prefix will change from \"\" (no prefix) to \"origin/\",\n # try both here.\n for t in ['.git/refs/remotes/trunk', '.git/refs/remotes/origin/trunk']:\n if os.path.isfile(t):\n trunk = open(t).read()\n\n if not trunk:\n print('No SVN remote found (only the default svn ' +\n 'prefixes (\"\" or \"origin/\") are supported).')\n sys.exit(1)\n\n local_head = open('.git/refs/heads/master').read()\n if local_head != trunk:\n print(\"Your local master diverges from trunk.\\n\")\n # dcommit before local tagging\n cmd.insert(0, 'git svn dcommit')\n # create tag in svn\n cmd.append('git svn tag -m \"%s\" %s' % (msg, version))\n return cmd\n\n def cmd_checkout_from_tag(self, version, checkout_dir):\n if not (os.path.realpath(os.getcwd()) ==\n os.path.realpath(checkout_dir)):\n # Specific to git: we need to be in that directory for the command\n # to work.\n logger.warn(\"We haven't been chdir'ed to %s\", checkout_dir)\n sys.exit(1)\n return 'git checkout %s && git submodule update --init --recursive' % \\\n version\n\n def is_clean_checkout(self):\n \"\"\"Is this a clean checkout?\n \"\"\"\n head = execute_command('git symbolic-ref --quiet HEAD')\n # This returns something like 'refs/heads/maurits-warn-on-tag'\n # or nothing. Nothing would be bad as that indicates a\n # detached head: likely a tag checkout\n if not head:\n # Greetings from Nearly Headless Nick.\n return False\n if execute_command('git status --short --untracked-files=no'):\n # Uncommitted changes in files that are tracked.\n return False\n return True\n\n def push_commands(self):\n \"\"\"Push changes to the server.\"\"\"\n return ['git push', 'git push --tags']\n\n def list_files(self):\n \"\"\"List files in version control.\"\"\"\n return execute_command('git ls-tree -r HEAD --name-only').splitlines()\n","sub_path":"env/lib/python2.7/site-packages/zest/releaser/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"245532890","text":"import collections\n\nfrom flask import url_for\n\n\nclass URLQueue:\n def __init__(self, app):\n self.seen_values = collections.defaultdict(set)\n self.stack = ['/']\n self.all = []\n self.app = app\n\n def append(self, endpoint, values, current):\n real_endpoint = endpoint\n if 'report_name' in values:\n endpoint = values['report_name']\n if endpoint in ['document', 'source'] or endpoint.startswith('api_') \\\n or 'REPLACEME' in values.values():\n return\n filters = ['account', 'interval', 'payee', 'tag', 'time']\n value_keys = frozenset([key for key, value in values.items()\n if value and key not in filters])\n\n if value_keys not in self.seen_values[endpoint]:\n self.seen_values[endpoint].add(value_keys)\n with self.app.test_request_context(current):\n self.app.preprocess_request()\n url = url_for(real_endpoint, loop=True, **values)\n self.all.append((real_endpoint, values))\n self.stack.append(url)\n\n def pop(self):\n return self.stack.pop()\n\n\nfilter_combinations = [\n {'account': 'Assets'},\n {'time': '2015'},\n {'payee': 'BayBook'},\n {'tag': 'tag1, tag2'},\n {'time': '2015', 'payee': 'BayBook'},\n]\n\n\ndef test_scrape(app):\n urls = URLQueue(app)\n current = urls.stack[0]\n\n @app.url_defaults\n def collect_urls(endpoint, values):\n if 'loop' in values:\n values.pop('loop')\n return\n urls.append(endpoint, values, current)\n\n test_app = app.test_client()\n\n while urls.stack:\n print(current)\n rv = test_app.get(current)\n assert rv.status_code in [200, 302]\n current = urls.pop()\n\n for url_values in urls.all:\n for filters in filter_combinations:\n values = url_values[1].copy()\n values.update(filters)\n\n with app.test_request_context(current):\n app.preprocess_request()\n url = url_for(url_values[0], **values)\n\n print(url)\n rv = test_app.get(url)\n assert rv.status_code in [200, 302]\n","sub_path":"tests/test_scrape.py","file_name":"test_scrape.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"181660499","text":"import shutil\r\nimport time\r\nimport pickle\r\nimport codecs\r\nimport json\r\nimport re\r\nimport os\r\n\r\nimport pandas as pd\r\n\r\nfrom bll.generator import mapjson\r\nfrom bll.generator import voronoiareas as VoronoiFactory\r\nfrom bll.generator import areas as areaFactory\r\nfrom bll.generator import armies as armyFactory\r\n\r\nfrom dal.repositories.AreaRepository import areaRepository\r\nfrom dal.repositories.MilitaryRepository import militaryRepository\r\nfrom dal.repositories.CountryRepository import countryRepository\r\n\r\nmodeName = None\r\ndataset = None\r\ngstages = ['voronoi', 'conns', 'attrs']\r\n\r\ndef load():\r\n source = 'bll/content/generator/datasets/{}'\r\n\r\n locsAll: pd.DataFrame = None\r\n if dataset == 'cities7K':\r\n locsAll = pd.read_csv(source.format('simplemaps-worldcities-basic.csv'))\r\n locsAll.columns = ['fid','city','name','lat','lng','pop','country','iso','iso3','prov']\r\n locsAll['coord'] = list(zip(round(locsAll.lng,4), round(locsAll.lat,4)))\r\n\r\n #locsAll['type'] = AreaType.NONE.value\r\n #locsAll.set_index('name')\r\n elif dataset == 'geonames':\r\n raise Exception(\"Generator 'geonames' is not yet implemented\")\r\n return locsAll\r\n\r\ndef init():\r\n global dataset, modeName\r\n dataset = 'cities7K'\r\n\r\n VoronoiFactory.init()\r\n areaFactory.init()\r\n armyFactory.init()\r\n\r\ndef generateMap(purged):\r\n ser_path = 'bll/content/generator/binary/areas_{}.dat'\r\n gstages_needed = []\r\n areasByCountry = {}; skippedAreas = []\r\n\r\n # Load locs and create countries\r\n t1 = time.time()\r\n print(\"Loading locs file...\")\r\n locsAll = load()\r\n\r\n print(\"Adding iso modifications...\")\r\n areaFactory.changeIso(locsAll)\r\n\r\n print(\"Filtering locs... \")\r\n filtered = []; countryPops = {}\r\n for iso, group in locsAll.groupby('iso'):\r\n popFilter = areaFactory.getPopFilter(iso, nlocs=len(group))\r\n filt = group[group['pop'] >= group['pop'].quantile(popFilter)]\r\n filtered.append(filt)\r\n countryPops[iso] = filt['pop'].sum()\r\n\r\n areaFactory.countryPops = countryPops\r\n locs = pd.concat(filtered, ignore_index=True)\r\n del filtered, locsAll, countryPops\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2 - t1, 2)))\r\n\r\n # get latest serialized area stage\r\n\r\n for stage in reversed(gstages):\r\n if os.path.exists(ser_path.format(stage)):\r\n print(\"Found latest version of 'areasByCountry': {}\".format(stage))\r\n areasByCountry = pickle.load(open(ser_path.format(stage), 'rb'))\r\n break\r\n else:\r\n gstages_needed.insert(0, stage)\r\n if len(areasByCountry) == 0:\r\n gstages_needed = gstages.copy()\r\n\r\n\r\n # Go through each unfinished stage and render\r\n for stage in gstages_needed:\r\n if stage == 'voronoi':\r\n print(\"Generating areas... \")\r\n t1 = time.time()\r\n areasByCountry, skippedAreas = VoronoiFactory.generateAreas(locs=locs, debug=False)\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2-t1,2)))\r\n\r\n elif stage == 'conns':\r\n print(\"Creating area connections...\")\r\n t1 = time.time()\r\n areaFactory.addConnections(areasByCountry)\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2-t1,2)))\r\n\r\n elif stage == 'attrs':\r\n # update _loc references, if we pickled the voronoi step\r\n if 'voronoi' not in gstages_needed:\r\n for id, loc in locs.iterrows():\r\n if loc['iso'] in areasByCountry:\r\n if loc['fid'] in areasByCountry[loc['iso']]:\r\n areasByCountry[loc['iso']][loc['fid']]._loc = loc\r\n del locs\r\n\r\n print(\"Applying attribute manipulations...\")\r\n t1 = time.time()\r\n areaFactory.changeSites(areasByCountry)\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2-t1,2)))\r\n\r\n\r\n # save after each step\r\n pickle.dump(areasByCountry, open(ser_path.format(stage), 'wb'))\r\n\r\n\r\n if not os.path.exists(ser_path.format('saved')) or purged:\r\n print(\"Creating countries from areas...\")\r\n\r\n mapjson.createCountriesFile(areasByCountry)\r\n\r\n print(\"Saving data...\")\r\n t1 = time.time()\r\n areasDB = []; countriesDB = []\r\n for iso in list(areasByCountry.keys()):\r\n areas = areasByCountry[iso]\r\n if len(areas) == 0:\r\n del areasByCountry[iso]\r\n continue\r\n\r\n countriesDB.append(areaFactory.createCountry(iso, areas.values()))\r\n areasDB.extend(areas.values())\r\n\r\n\r\n countryRepository.create(countriesDB)\r\n areaRepository.create(areasDB)\r\n\r\n # save polygons to postGIS & create geojson cache\r\n for area in areasDB:\r\n areaRepository.edit(area.id, geom=area.geom)\r\n mapjson.createAreasFile(areasDB)\r\n\r\n if len(skippedAreas) > 0:\r\n for area in skippedAreas:\r\n if area:\r\n areaRepository.createRemoved(area)\r\n\r\n mapjson.createRemovedAreasFile(skippedAreas)\r\n\r\n open(ser_path.format('saved'),'wb').write(b'0x01')\r\n\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2 - t1, 2)))\r\n\r\n return areasByCountry\r\n\r\ndef generateWorld(areasByCountry):\r\n\r\n print(\"Creating armies and fleets...\")\r\n t1 = time.time()\r\n mils = armyFactory.createMils(areasByCountry)\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2 - t1, 2)))\r\n\r\n print(\"Saving...\")\r\n t1 = time.time()\r\n militaryRepository.create(mils)\r\n # todo: remove this, as moveshipstocoast does the job\r\n #for mil in mils:\r\n # militaryRepository.edit(mil.id, geom=mil.geom)\r\n militaryRepository.updatePutShipToCoast()\r\n militaryRepository.updatePutToArea()\r\n\r\n t2 = time.time()\r\n print(' Took {}s'.format(round(t2 - t1, 2)))\r\n\r\n\r\ndef checkIfExists():\r\n sql = \"SELECT COUNT(*) n FROM areas\"\r\n\r\n result = areaRepository.ctx.query(sql, multiple=False)\r\n\r\n return result and result['n'] > 0\r\n\r\ndef purge():\r\n countryRepository.delete()\r\n areaRepository.delete()\r\n areaRepository.deleteRemoved()\r\n militaryRepository.delete()\r\n\r\ndef removeCache():\r\n path = 'bll/content/generator/binary/areas_{}.dat'\r\n\r\n for gstage in gstages + ['saved']:\r\n fname = path.format(gstage)\r\n\r\n if os.path.exists(fname):\r\n os.remove(fname)\r\n\r\n\r\ndef generateEditor():\r\n locsAll = load()\r\n baseiso = {}\r\n\r\n for idx, a in locsAll.iterrows():\r\n baseiso[a['fid']] = a['iso']\r\n\r\n with codecs.open('apps/editor/static/baseiso.json', 'w', encoding='utf8') as fh:\r\n json.dump(baseiso, fh)\r\n","sub_path":"bll/generator/mapgenerator.py","file_name":"mapgenerator.py","file_ext":"py","file_size_in_byte":6769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"198728656","text":"# -*- coding: utf-8 -*-\n__author__ = 'mantou'\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nshops = set([])\n\ntry:\n outfile = file(sys.argv[2],'w')\n filein = file(sys.argv[1])\n shopin = file(sys.argv[3])\nexcept Exception :\n filein = file(\"cer_afternoon_tea_fragment.txt\")\n outfile = file(\"cer_afternoon_tea_fragment_f.txt\",'w')\n shopin = file(\"cer_afternoon_tea_shop.txt\")\n\n\nfor line in shopin:\n shopid = line.split('\\t')[0]\n shops.add(shopid)\n\nfor line in filein:\n shopid = line.split('\\t')[0]\n if shopid in shops:\n outfile.writelines(line)\n else:\n continue\n","sub_path":"common/filtered_by_shop.py","file_name":"filtered_by_shop.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"58419431","text":"from unittest.mock import MagicMock, patch\n\nimport pytest\nfrom ymmsl import Conduit, Port, Reference\n\nimport muscle_manager_protocol.muscle_manager_protocol_pb2 as mmp\n\nfrom libmuscle.logging import LogLevel, LogMessage, Timestamp\nfrom libmuscle.mmp_client import MMPClient\nfrom libmuscle.operator import Operator\n\n\ndef test_init() -> None:\n with patch('libmuscle.mmp_client.grpc.insecure_channel'), \\\n patch('libmuscle.mmp_client.grpc.channel_ready_future'), \\\n patch('muscle_manager_protocol.' +\n 'muscle_manager_protocol_pb2_grpc.MuscleManagerStub'\n ) as mock_stub:\n\n stub = mock_stub.return_value\n client = MMPClient('')\n assert client._MMPClient__client == stub # type: ignore\n\n\ndef test_connection_fail() -> None:\n with patch('libmuscle.mmp_client.CONNECTION_TIMEOUT', 1):\n with pytest.raises(RuntimeError):\n MMPClient('localhost:9000')\n\n\ndef test_submit_log_message(mocked_mmp_client) -> None:\n message = LogMessage(\n 'test_mmp_client',\n Timestamp(1.0),\n LogLevel.WARNING,\n 'Testing the MMPClient')\n\n mocked_mmp_client[0].submit_log_message(message)\n assert mocked_mmp_client[1].SubmitLogMessage.called\n\n\ndef test_get_settings(mocked_mmp_client) -> None:\n client, stub = mocked_mmp_client\n\n row0 = mmp.ListOfDouble(values=[1.2, 3.4])\n row1 = mmp.ListOfDouble(values=[5.6, 7.8])\n array = mmp.ListOfListOfDouble(values=[row0, row1])\n mmp_values = [\n mmp.Setting(\n name='test1',\n value_type=mmp.SETTING_VALUE_TYPE_STRING,\n value_string='test'),\n mmp.Setting(\n name='test2',\n value_type=mmp.SETTING_VALUE_TYPE_INT,\n value_int=12),\n mmp.Setting(\n name='test3',\n value_type=mmp.SETTING_VALUE_TYPE_FLOAT,\n value_float=3.14),\n mmp.Setting(\n name='test4',\n value_type=mmp.SETTING_VALUE_TYPE_BOOL,\n value_bool=True),\n mmp.Setting(\n name='test5',\n value_type=mmp.SETTING_VALUE_TYPE_LIST_FLOAT,\n value_list_float=mmp.ListOfDouble(values=[1.2, 3.4])),\n mmp.Setting(\n name='test6',\n value_type=mmp.SETTING_VALUE_TYPE_LIST_LIST_FLOAT,\n value_list_list_float=array)]\n settings_result = mmp.SettingsResult(setting_values=mmp_values)\n stub.RequestSettings.return_value = settings_result\n settings = client.get_settings()\n\n assert len(settings) == 6\n\n\ndef test_register_instance(mocked_mmp_client) -> None:\n mocked_mmp_client[0].register_instance(\n Reference('kernel[13]'),\n ['direct:test', 'tcp:test'],\n [Port('out', Operator.O_I), Port('in', Operator.S)])\n assert mocked_mmp_client[1].RegisterInstance.called\n\n\ndef test_request_peers(mocked_mmp_client) -> None:\n conduits = [mmp.Conduit(sender='kernel.out', receiver='other.in')]\n peer_dimensions = [mmp.PeerResult.PeerDimensions(\n peer_name='other', dimensions=[20])]\n peer_locations = [mmp.PeerResult.PeerLocations(\n instance_name='other', locations=['direct:test', 'tcp:test'])]\n\n mocked_mmp_client[1].RequestPeers.return_value = mmp.PeerResult(\n status=mmp.RESULT_STATUS_SUCCESS,\n conduits=conduits,\n peer_dimensions=peer_dimensions,\n peer_locations=peer_locations)\n result = mocked_mmp_client[0].request_peers(Reference('kernel[13]'))\n assert mocked_mmp_client[1].RequestPeers.called\n\n assert len(result[0]) == 1\n assert isinstance(result[0][0], Conduit)\n assert result[0][0].sender == 'kernel.out'\n assert result[0][0].receiver == 'other.in'\n\n assert isinstance(result[1], dict)\n assert result[1]['other'] == [20]\n\n assert isinstance(result[2], dict)\n assert result[2]['other'] == ['direct:test', 'tcp:test']\n\n\ndef test_request_peers_error(mocked_mmp_client) -> None:\n mocked_mmp_client[1].RequestPeers.return_value = mmp.PeerResult(\n status=mmp.RESULT_STATUS_ERROR,\n error_message='test_error_message')\n with pytest.raises(RuntimeError):\n mocked_mmp_client[0].request_peers(Reference('kernel[13]'))\n\n\ndef test_request_peers_timeout(mocked_mmp_client) -> None:\n mocked_mmp_client[1].RequestPeers.return_value = mmp.PeerResult(\n status=mmp.RESULT_STATUS_PENDING)\n with patch('libmuscle.mmp_client.PEER_TIMEOUT', 1), \\\n patch('libmuscle.mmp_client.PEER_INTERVAL_MIN', 0.1), \\\n patch('libmuscle.mmp_client.PEER_INTERVAL_MAX', 1.0):\n with pytest.raises(RuntimeError):\n mocked_mmp_client[0].request_peers(Reference('kernel[13]'))\n","sub_path":"libmuscle/python/libmuscle/test/test_mmp_client.py","file_name":"test_mmp_client.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"599079803","text":"\"\"\"\nGiven an integer n and an integer start.\n\nDefine an array nums where nums[i] = start + 2*i (0-indexed) and n == nums.length.\n\nReturn the bitwise XOR of all elements of nums.\n\n\"\"\"\n\n\nclass Solution:\n def xorOperation(self, n: int, start: int) -> int:\n res = start\n for i in range(1, n):\n num = start + 2*i\n res ^= num\n\n return res\n\n\nn = 5\nstart = 0 # 8\nn = 4\nstart = 3 # 8\nn, start = 1, 7 # 7\nn, start = 10, 5 # 2\nprint(Solution().xorOperation(n, start))","sub_path":"1486XOR.py","file_name":"1486XOR.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"394080724","text":"if is_valid_queryparam(DescSearchContent):\n if DescSearchContent.count(' ') >= 1:\n foodDescWordList = DescSearchContent.split(' ')\n querySetList = []\n for i, searchedWord in enumerate(foodDescWordList):\n if i == 0:\n querySetList.append(\n MainDesc.objects.filter(main_food_description__icontains=searchedWord).prefetch_related(\n 'AddDescs').all())\n if i > 0:\n querySetList.append(querySetList[0].filter(main_food_description__icontains=searchedWord))\n querySetList.remove(querySetList[0])\n context[\"mainfood\"] = querySetList[0]\n else:\n q1 = AddDesc.objects.filter(Q(additional_food_description__icontains=DescSearchContent) | Q(\n food_code__main_food_description__icontains=DescSearchContent)).distinct()\n q2 = MainDesc.objects.filter(main_food_description__icontains=DescSearchContent)\n df_dict = {}\n for i, addQueryItem in enumerate(q1):\n if i == 0:\n df_dict[addQueryItem.food_code.food_code] = {\n 'MainDesc': addQueryItem.food_code.main_food_description,\n 'AddDesc': [addQueryItem.additional_food_description]}\n else:\n if q1[i].food_code.food_code == q1[i - 1].food_code.food_code:\n df_dict[addQueryItem.food_code.food_code]['AddFoodDesc'].append(\n addQueryItem.additional_food_description)\n else:\n df_dict[addQueryItem.food_code.food_code] = {\n 'MainDesc': addQueryItem.food_code.main_food_description,\n 'AddFoodDesc': [addQueryItem.additional_food_description]\n }\n\n dict_list = []\n for dict_item in df_dict:\n dict_list.append(dict_item)\n\n for mainQueryItem in q2:\n if mainQueryItem.food_code not in dict_list:\n df_dict[mainQueryItem.food_code] = {\n 'MainDesc': mainQueryItem.main_food_description,\n 'AddFoodDesc': ''}\n dict_list.append(mainQueryItem.food_code)\n\n dict_list.sort()\n ordered = OrderedDict((k, df_dict[k]) for k in dict_list)\n context['mainfoods'] = ordered\n\n sex = form.cleaned_data['sex']\n age = form.cleaned_data['age']\n weight = form.cleaned_data['weight']\n weightUnit = form.cleaned_data[\"weightUnit\"]\n height = form.cleaned_data['height']\n heightUnit = form.cleaned_data[\"heightUnit\"]\n activityLevel = form.cleaned_data[\"activityLevel\"]\n CustomUser.objects.create(user=get_user_model(),\n sex=form.cleaned_data['sex'],\n age=form.cleaned_data['age'],\n weight=form.cleaned_data['weight'],\n height=form.cleaned_data['height'],\n activityLevel=form.cleaned_data[\"activityLevel\"])","sub_path":"recurapp/holding.py","file_name":"holding.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"298479010","text":"import string\n\n\nclass Solution:\n def numWays(self, words, target: str) -> int:\n mod = 10 ** 9 + 7\n n = len(words[0])\n m = len(target)\n countCharAt = {c: [0] * n for c in string.ascii_lowercase}\n for w in words:\n for i, c in enumerate(w):\n countCharAt[c][i] += 1\n dp = [[-1 for j in range(m)] for i in range(n)]\n\n def helper1(idxW, idxT):\n if idxT == m:\n return 1\n if idxW == n:\n return 0\n if dp[idxW][idxT] != -1:\n return dp[idxW][idxT]\n ans = 0\n if countCharAt[target[idxT]][idxW] != 0:\n ans = (countCharAt[target[idxT]][idxW] * help(idxW + 1, idxT + 1)) % mod\n ans = (ans + help(idxW + 1, idxT)) % mod\n dp[idxW][idxT] = ans\n return ans\n\n def helper(idxW, idxT):\n if idxT == m:\n return 1\n if idxW == n:\n return 0\n if dp[idxW][idxT] != -1:\n return dp[idxW][idxT]\n ans = helper(idxW + 1, idxT)\n if countCharAt[target[idxT]][idxW] != 0:\n ans = (ans + countCharAt[target[idxT]][idxW] * helper(idxW + 1, idxT + 1)) % mod\n dp[idxW][idxT] = ans\n return ans\n\n return helper(0, 0), helper1(0, 0)\n\n\ns = Solution()\nprint(s.numWays([\"acca\", \"bbbb\", \"caca\"], \"aba\"))\n","sub_path":"leetcode/2020/bicontest/bcontest-038/bContest4.py","file_name":"bContest4.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"648588739","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\nfrom operator import itemgetter\n\nfrom common import print_solution, read_input\n\n\ndef distance(city1, city2):\n return math.sqrt((city1[0] - city2[0]) ** 2 + (city1[1] - city2[1]) ** 2)\n\n\ndef is_x_longer(cities):\n x1 = min(map(lambda x: x[0], cities))\n y1 = min(map(lambda x: x[1], cities))\n x2 = max(map(lambda x: x[0], cities))\n y2 = max(map(lambda x: x[1], cities))\n return x2 - x1 > y2 - y1\n\n\ndef divide(cities):\n N = len(cities)\n divide = [cities]\n while True:\n cities = max(divide, key=lambda x: len(x))\n max_index = divide.index(cities)\n N = len(cities)\n middle = int(N/2)\n if N <= 3: break\n \n if is_x_longer(cities):\n cities = sorted(cities, key=itemgetter(0))\n else:\n cities = sorted(cities, key=itemgetter(1))\n divide.append(cities[:middle])\n divide.append(cities[middle:])\n divide.pop(max_index)\n\n return merge(divide)\n\n\ndef merge(divide):\n solution = divide.pop(0)\n while divide:\n min_distance = distance(solution[0], divide[0][0])\n min_index = 0\n for i in range(len(divide)):\n d1 = distance(solution[0], divide[i][0])\n d2 = distance(solution[0], divide[i][-1])\n d3 = distance(solution[-1], divide[i][0])\n d4 = distance(solution[-1], divide[i][-1])\n\n d = min(d1, d2, d3, d4)\n\n if d < min_distance:\n min_distance = d\n min_index = i\n\n new = divide.pop(min_index) \n if distance(solution[0], new[0]) == min_distance:\n solution = new[::-1] + solution\n elif distance(solution[0], new[-1]) == min_distance:\n solution = new + solution\n elif distance(solution[-1], new[0]) == min_distance:\n solution = solution + new\n elif distance(solution[-1], new[-1]) == min_distance:\n solution = solution + new[::-1]\n\n return solution\n \n\ndef solve(cities):\n N = len(cities)\n index = [i for i in range(N)]\n index_cities = dict(zip(cities, index))\n sorted_cities = divide(cities)\n\n solution = []\n for city in sorted_cities:\n solution.append(index_cities[city])\n return solution\n\n\nif __name__ == '__main__':\n assert len(sys.argv) > 1\n solution = solve(read_input(sys.argv[1]))\n print_solution(solution)\n","sub_path":"solver_divide_and_sort.py","file_name":"solver_divide_and_sort.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"103828656","text":"from decimal import Decimal\nfrom datetime import timedelta\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\n\nfrom bills.models import CallRecord, Bill, cycle_date\nfrom bills.tests import _setup_record\n\n\nclass CycleDateTestCase(TestCase):\n def test_cycle_date_current_month(self):\n now = timezone.now()\n billing_cycle = f\"{now.month:02}/{now.year}\"\n start_date, end_date = cycle_date(billing_cycle)\n\n self.assertGreaterEqual(now, start_date)\n self.assertLessEqual(now, end_date)\n \n def test_cycle_date_next_month(self):\n next_month = timezone.now() + timedelta(days=30)\n billing_cycle = f\"{next_month.month:02}/{next_month.year}\"\n start_date, end_date = cycle_date(billing_cycle)\n\n self.assertGreaterEqual(next_month, start_date)\n self.assertLessEqual(next_month, end_date)\n \n def test_cycle_date_no_args(self):\n now = timezone.now()\n\n start_date, end_date = cycle_date()\n\n self.assertGreaterEqual(now, start_date)\n self.assertLessEqual(now, end_date)\n\nclass BillCreateTestCase(TestCase):\n def test_create_bill(self):\n subscriber = \"12345678\"\n start_record, end_record = _setup_record(\n timedelta(minutes=5, seconds=10), 15, call_id=1)\n standing_charge = Decimal(\"0.36\")\n minute_charge = Decimal(\"0.09\")\n now = timezone.now()\n period = f\"{now.month:02}/{now.year}\"\n\n bill = Bill.objects.get_or_create(\n subscriber, period, standing_charge, minute_charge)\n\n self.assertEqual(bill.pk, 1)\n self.assertEqual([start_record, end_record], list(bill.records.all()))\n\n\nclass BillCalculateBillTestCase(TestCase):\n def setUp(self):\n now = timezone.now()\n call_id = 1\n self.records1 = _setup_record(\n timedelta(minutes=5, seconds=10), 15, call_id=call_id)\n call_id += 1\n _setup_record(\n timedelta(minutes=10, seconds=50), 21, 57, 13, call_id=call_id)\n call_id += 1\n _setup_record(\n timedelta(minutes=10, seconds=50), 21, 57, 13,\n call_id=call_id, outdated=True)\n standing_charge = Decimal(\"0.36\")\n minute_charge = Decimal(\"0.09\")\n period = f\"{now.month:02}/{now.year}\"\n self.bill = Bill.objects.create_calculated_bill(\n \"12345678\", period, standing_charge, minute_charge\n )\n\n def test_calculate_bill(self):\n expected_total = Decimal(\"1.35\")\n self.bill.calculate()\n\n self.assertTrue(self.bill.is_calculated)\n self.assertEqual(self.bill.total_price, expected_total)\n\n def test_calculate_without_start_record(self):\n self.records1[0].delete()\n self.bill.calculate()\n\n expected_total = Decimal(\"0.54\")\n\n self.assertEqual(self.bill.total_price, expected_total)\n","sub_path":"bills/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"429528054","text":"# SPDX-FileCopyrightText: 2020 Robert Cohn\n#\n# SPDX-License-Identifier: MIT\n\nimport logging\nimport os\n\nimport zignalz as zz\nfrom zignalz import cli\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_account_parser(parent_subparser):\n parser = parent_subparser.add_parser('account', help='Account maintenance')\n subparsers = parser.add_subparsers(dest='cmd')\n subparsers.required = True\n create_parser = subparsers.add_parser('create', help='Create an account.')\n create_parser.set_defaults(func=create_account)\n create_parser.add_argument('account_name', help='Name of account')\n\n list_parser = subparsers.add_parser('list', help='List all accounts.')\n list_parser.set_defaults(func=list_accounts)\n\n\ndef add_project_parser(parent_subparser):\n parser = parent_subparser.add_parser('project', help='Project maintenance')\n subparsers = parser.add_subparsers(dest='cmd')\n subparsers.required = True\n create_parser = subparsers.add_parser('create', help='Create a project.')\n create_parser.set_defaults(func=create_project)\n create_parser.add_argument(\n 'account_name', help='Name of account that owns project.'\n )\n create_parser.add_argument(\n 'project_config', help='Project configuration file.'\n )\n\n update_parser = subparsers.add_parser('update', help='Update a project.')\n update_parser.set_defaults(func=update_project)\n update_parser.add_argument(\n 'account_name', help='Name of account that owns project.'\n )\n update_parser.add_argument(\n 'project_config', help='Project configuration file.'\n )\n\n list_parser = subparsers.add_parser('list', help='List all projects.')\n list_parser.set_defaults(func=list_projects)\n\n\ndef add_parser(subparsers):\n parser = subparsers.add_parser('admin', help='Maintenance interface')\n subparsers = parser.add_subparsers(dest='cmd')\n subparsers.required = True\n reset_parser = subparsers.add_parser(\n 'reset', help='Create empty database with old tables'\n )\n reset_parser.set_defaults(func=reset)\n add_account_parser(subparsers)\n\n\nuri_env = 'ZIGNALZ_DB_URI'\n\n\ndef get_db():\n if uri_env not in os.environ:\n logger.error(f'Set {uri_env}')\n exit(1)\n return zz.MongoDB(os.environ[uri_env])\n\n\ndef reset():\n response = input('Deleting everything! Type reset to confirm: ')\n if response != 'reset':\n print('aborting reset')\n exit(0)\n\n db = get_db()\n zz.AccountStore.create(db)\n zz.SignalEventsStore.create(db)\n logger.info('Reset database')\n\n\ndef create_account():\n db = get_db()\n account = zz.Account(name=cli.args.account_name)\n zz.AccountStore(db).put(account)\n print(f'Created account: {account}')\n\n\ndef list_accounts():\n db = get_db()\n for account in zz.AccountStore(db).query():\n print(f' {account}')\n\n\ndef create_project():\n assert False\n\n\ndef update_project():\n assert False\n\n\ndef list_projects():\n db = get_db()\n for project in zz.ProjectStore(db).query():\n print(f' {project}')\n\n\ndef create_device():\n db = get_db()\n device = zz.Device(name=cli.args.device_name)\n zz.DeviceStore(db).put(device)\n print(f'Created device: {device}')\n\n\ndef update_device():\n assert False\n\n\ndef list_devices():\n db = get_db()\n for device in zz.DeviceStore(db).query():\n print(f' {device}')\n","sub_path":"src/zignalz/cli/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"30784994","text":"import os\nimport argparse\nfrom pathlib import Path\nimport torch\nimport torch.backends.cudnn as cudnn\nimport matplotlib.pyplot as plt\nfrom yolact_edge.data.config import cfg, set_cfg\nfrom yolact_edge.yolact import Yolact\nfrom yolact_edge.utils.augmentations import FastBaseTransform\nfrom yolact_edge.eval import prep_display\n\n\nclass SavePath:\n \"\"\"\n Why is this a class?\n Why do I have a class for creating and parsing save paths?\n What am I doing with my life?\n \"\"\"\n\n def __init__(self, model_name: str, epoch: int, iteration: int):\n self.model_name = model_name\n self.epoch = epoch\n self.iteration = iteration\n\n def get_path(self, root: str = ''):\n file_name = self.model_name + '_' + \\\n str(self.epoch) + '_' + str(self.iteration) + '.pth'\n return os.path.join(root, file_name)\n\n @staticmethod\n def from_str(path: str):\n file_name = os.path.basename(path)\n\n if file_name.endswith('.pth'):\n file_name = file_name[:-4]\n\n params = file_name.split('_')\n\n if file_name.endswith('interrupt'):\n params = params[:-1]\n\n model_name = '_'.join(params[:-2])\n epoch = params[-2]\n iteration = params[-1]\n\n return SavePath(model_name, int(epoch), int(iteration))\n\n @staticmethod\n def remove_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'):\n p.unlink()\n\n @staticmethod\n def get_interrupt(save_folder):\n for p in Path(save_folder).glob('*_interrupt.pth'):\n return str(p)\n return None\n\n @staticmethod\n def get_latest(save_folder, config):\n \"\"\" Note: config should be config.name. \"\"\"\n max_iter = -1\n max_name = None\n\n for p in Path(save_folder).glob(config + '_*'):\n path_name = str(p)\n\n try:\n save = SavePath.from_str(path_name)\n except:\n continue\n\n if save.model_name == config and save.iteration > max_iter:\n max_iter = save.iteration\n max_name = path_name\n\n return max_name\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser(\n description='YOLACT COCO Evaluation')\n parser.add_argument('--trained_model',\n default=None, type=str,\n help='Trained state_dict file path to open. If \"interrupt\", this will open the interrupt file.')\n parser.add_argument('--top_k', default=5, type=int,\n help='Further restrict the number of predictions to parse')\n parser.add_argument('--cuda', default=True, type=str2bool,\n help='Use cuda to evaulate model')\n parser.add_argument('--fast_nms', default=True, type=str2bool,\n help='Whether to use a faster, but not entirely correct version of NMS.')\n parser.add_argument('--display_masks', default=True, type=str2bool,\n help='Whether or not to display masks over bounding boxes')\n parser.add_argument('--display_bboxes', default=True, type=str2bool,\n help='Whether or not to display bboxes around masks')\n parser.add_argument('--display_text', default=True, type=str2bool,\n help='Whether or not to display text (class [score])')\n parser.add_argument('--display_scores', default=True, type=str2bool,\n help='Whether or not to display scores in addition to classes')\n parser.add_argument('--display', dest='display', action='store_true',\n help='Display qualitative results instead of quantitative ones.')\n parser.add_argument('--shuffle', dest='shuffle', action='store_true',\n help='Shuffles the images when displaying them. Doesn\\'t have much of an effect when display is off though.')\n parser.add_argument('--ap_data_file', default='results/ap_data.pkl', type=str,\n help='In quantitative mode, the file to save detections before calculating mAP.')\n parser.add_argument('--resume', dest='resume', action='store_true',\n help='If display not set, this resumes mAP calculations from the ap_data_file.')\n parser.add_argument('--max_images', default=-1, type=int,\n help='The maximum number of images from the dataset to consider. Use -1 for all.')\n parser.add_argument('--eval_stride', default=5, type=int,\n help='The default frame eval stride.')\n parser.add_argument('--output_coco_json', dest='output_coco_json', action='store_true',\n help='If display is not set, instead of processing IoU values, this just dumps detections into the coco json file.')\n parser.add_argument('--bbox_det_file', default='results/bbox_detections.json', type=str,\n help='The output file for coco bbox results if --coco_results is set.')\n parser.add_argument('--mask_det_file', default='results/mask_detections.json', type=str,\n help='The output file for coco mask results if --coco_results is set.')\n parser.add_argument('--config', default=None,\n help='The config object to use.')\n parser.add_argument('--output_web_json', dest='output_web_json', action='store_true',\n help='If display is not set, instead of processing IoU values, this dumps detections for usage with the detections viewer web thingy.')\n parser.add_argument('--web_det_path', default='web/dets/', type=str,\n help='If output_web_json is set, this is the path to dump detections into.')\n parser.add_argument('--no_bar', dest='no_bar', action='store_true',\n help='Do not output the status bar. This is useful for when piping to a file.')\n parser.add_argument('--display_lincomb', default=False, type=str2bool,\n help='If the config uses lincomb masks, output a visualization of how those masks are created.')\n parser.add_argument('--benchmark', default=False, dest='benchmark', action='store_true',\n help='Equivalent to running display mode but without displaying an image.')\n parser.add_argument('--fast_eval', default=False, dest='fast_eval', action='store_true',\n help='Skip those warping frames when there is no GT annotations.')\n parser.add_argument('--deterministic', default=False, dest='deterministic', action='store_true',\n help='Whether to enable deterministic flags of PyTorch for deterministic results.')\n parser.add_argument('--no_sort', default=False, dest='no_sort', action='store_true',\n help='Do not sort images by hashed image ID.')\n parser.add_argument('--seed', default=None, type=int,\n help='The seed to pass into random.seed. Note: this is only really for the shuffle and does not (I think) affect cuda stuff.')\n parser.add_argument('--mask_proto_debug', default=False, dest='mask_proto_debug', action='store_true',\n help='Outputs stuff for scripts/compute_mask.py.')\n parser.add_argument('--no_crop', default=False, dest='crop', action='store_false',\n help='Do not crop output masks with the predicted bounding box.')\n parser.add_argument('--image', default=None, type=str,\n help='A path to an image to use for display.')\n parser.add_argument('--images', default=None, type=str,\n help='An input folder of images and output folder to save detected images. Should be in the format input->output.')\n parser.add_argument('--video', default=None, type=str,\n help='A path to a video to evaluate on. Passing in a number will use that index webcam.')\n parser.add_argument('--video_multiframe', default=1, type=int,\n help='The number of frames to evaluate in parallel to make videos play at higher fps.')\n parser.add_argument('--score_threshold', default=0, type=float,\n help='Detections with a score under this threshold will not be considered. This currently only works in display mode.')\n parser.add_argument('--dataset', default=None, type=str,\n help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')\n parser.add_argument('--detect', default=False, dest='detect', action='store_true',\n help='Don\\'t evauluate the mask branch at all and only do object detection. This only works for --display and --benchmark.')\n parser.add_argument('--yolact_transfer', dest='yolact_transfer', action='store_true',\n help='Split pretrained FPN weights to two phase FPN (for models trained by YOLACT).')\n parser.add_argument('--coco_transfer', dest='coco_transfer', action='store_true',\n help='[Deprecated] Split pretrained FPN weights to two phase FPN (for models trained by YOLACT).')\n parser.add_argument('--drop_weights', default=None, type=str,\n help='Drop specified weights (split by comma) from existing model.')\n parser.add_argument('--calib_images', default=None, type=str,\n help='Directory of images for TensorRT INT8 calibration, for explanation of this field, please refer to `calib_images` in `data/config.py`.')\n parser.add_argument('--trt_batch_size', default=1, type=int,\n help='Maximum batch size to use during TRT conversion. This has to be greater than or equal to the batch size the model will take during inferece.')\n parser.add_argument('--disable_tensorrt', default=False, dest='disable_tensorrt', action='store_true',\n help='Don\\'t use TensorRT optimization when specified.')\n parser.add_argument('--use_fp16_tensorrt', default=False, dest='use_fp16_tensorrt', action='store_true',\n help='This replaces all TensorRT INT8 optimization with FP16 optimization when specified.')\n\n parser.set_defaults(no_bar=False, display=False, resume=False, output_coco_json=False, output_web_json=False, shuffle=False,\n benchmark=False, no_sort=False, no_hash=False, mask_proto_debug=False, crop=True, detect=False)\n\n global args\n args = parser.parse_args(argv)\n\n if args.output_web_json:\n args.output_coco_json = True\n\n\nclass YOLACTEdgeInference(object):\n\n def __init__(self, weights_path):\n parse_args()\n\n # TODO: DANGER => requires proper weights file name!\n model_path = SavePath.from_str(weights_path)\n config = model_path.model_name + '_config'\n print('Parsed %s from the file name.\\n' % config)\n set_cfg(config)\n\n with torch.no_grad():\n if torch.cuda.is_available():\n cudnn.fastest = True\n cudnn.deterministic = True\n cudnn.benchmark = False\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n print(\"CUDA missing... Exiting...\")\n exit(1)\n\n net = Yolact(training=False)\n net.load_weights(weights_path, args=args)\n net.eval()\n # TODO:\n # convert_to_tensorrt(net, cfg, args, transform=BaseTransform())\n net = net.cuda()\n self.net = net\n\n def predict(self, img, show=False):\n # frame = torch.from_numpy(cv2.imread(path)).cuda().float()\n frame = torch.Tensor(img).cuda().float()\n batch = FastBaseTransform()(frame.unsqueeze(0))\n\n extras = {\"backbone\": \"full\", \"interrupt\": False,\n \"keep_statistics\": False, \"moving_statistics\": None}\n\n preds = self.net(batch, extras=extras)[\"pred_outs\"]\n\n if show:\n img_numpy = prep_display(\n preds, frame, None, None, undo_transform=False)\n plt.imshow(img_numpy)\n plt.title(\"YOLACT Prediction! :)\")\n plt.show()\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":12365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"285066068","text":"\"\"\"empty message\n\nRevision ID: 8ae7070ee59d\nRevises: ab84d1e9aa9b\nCreate Date: 2020-07-27 17:47:43.028585\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8ae7070ee59d'\ndown_revision = 'ab84d1e9aa9b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('ArtistGenre_artist_id_fkey', 'ArtistGenre', type_='foreignkey')\n op.drop_constraint('ArtistGenre_genre_id_fkey', 'ArtistGenre', type_='foreignkey')\n op.create_foreign_key(None, 'ArtistGenre', 'Artist', ['artist_id'], ['id'], ondelete='cascade')\n op.create_foreign_key(None, 'ArtistGenre', 'Genre', ['genre_id'], ['id'], ondelete='cascade')\n op.drop_constraint('Show_venue_id_fkey', 'Show', type_='foreignkey')\n op.drop_constraint('Show_artist_id_fkey', 'Show', type_='foreignkey')\n op.create_foreign_key(None, 'Show', 'Venue', ['venue_id'], ['id'], ondelete='cascade')\n op.create_foreign_key(None, 'Show', 'Artist', ['artist_id'], ['id'], ondelete='cascade')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'Show', type_='foreignkey')\n op.drop_constraint(None, 'Show', type_='foreignkey')\n op.create_foreign_key('Show_artist_id_fkey', 'Show', 'Artist', ['artist_id'], ['id'])\n op.create_foreign_key('Show_venue_id_fkey', 'Show', 'Venue', ['venue_id'], ['id'])\n op.drop_constraint(None, 'ArtistGenre', type_='foreignkey')\n op.drop_constraint(None, 'ArtistGenre', type_='foreignkey')\n op.create_foreign_key('ArtistGenre_genre_id_fkey', 'ArtistGenre', 'Genre', ['genre_id'], ['id'])\n op.create_foreign_key('ArtistGenre_artist_id_fkey', 'ArtistGenre', 'Artist', ['artist_id'], ['id'])\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/8ae7070ee59d_.py","file_name":"8ae7070ee59d_.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"201217273","text":"import pandas as pd\nfrom sklearn.model_selection import KFold\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn import metrics\nfrom functions import *\nfrom sklearn.metrics import confusion_matrix\nfrom pycm import *\nfrom warnings import simplefilter\n\n# ignore all future warnings\nsimplefilter(action='ignore', category=FutureWarning)\nsimplefilter(action='ignore', category=RuntimeWarning)\n\ndef svm_five_fold(kernel, C, X, y, output):\n\n # Define 5-Fold Lists\n scores = []\n mean_squareds = []\n mean_absolutes = []\n\n #fitting random forest on the dataset\n clf = svm.SVC(kernel=kernel, C=C)\n\n #5-Fold-Cross-Validation\n cv = KFold(n_splits=5, random_state=None, shuffle=True)\n\n counter = 0 # To state the model\n\n for train_index, test_index in cv.split(X):\n counter += 1\n X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]\n clf.fit(X_train, y_train)\n scores.append(clf.score(X_test, y_test))\n y_pred = clf.predict(X_test)\n mean_absolutes.append(metrics.mean_absolute_error(y_test, y_pred))\n mean_squareds.append(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\n cm = confusion_matrix(y_test, y_pred)\n tmp = ConfusionMatrix(actual_vector=y_test, predict_vector=y_pred)\n\n cmd = pd.DataFrame(cm, index=tmp.classes, columns=tmp.classes)\n\n '''\n # Printing metrics\n print(f'\\nSVM\\nModel {counter}:\\n')\n print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))\n print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))\n print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n print('Accuracy of Model:', clf.score(X_test, y_test))\n \n # Plotting results\n plt.figure(figsize=(7, 7))\n sns.heatmap(cmd, annot=True, cmap='RdPu')\n txt1 = 'SVM Linear Kernel '\n txt2 = '\\nAccuracy:{0:.3f}'.format(accuracy_score(y_test, y_pred))\n txt3 = f' Model {counter}:'\n txt = txt1 + txt3 + txt2\n plt.title(txt)\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n \n #Printing Confusion matrix details\n print(\"\\n\\nlabel precision recall\")\n for label in range(len(cm)):\n print(f\"{label:5d} {precision(label, cm):9.3f} {recall(label, cm):6.3f}\")\n \n print(\"\\nprecision total:\", precision_macro_average(cm))\n print(\"recall total:\", recall_macro_average(cm))\n \n print(f'\\nAccuracy: {accuracy(cm)}')\n print('\\n--------------------------------------------\\n\\n')\n '''\n\n # Accuracy, MSE, MAE Results\n tmp = f'\\nScores: {scores}'\n output.append(tmp)\n # print(tmp)\n\n tmp = f'\\nRoot Mean Squared Errors: {mean_squareds}'\n output.append(tmp)\n # print(tmp)\n\n tmp = f'\\nMean Absolute Errors: {mean_absolutes}'\n output.append(tmp)\n # print(tmp)\n\n # Mean of Accuracy, MSE and MAE Results\n disp_mean = \"{:.2f}\".format(100 * np.mean(scores))\n tmp = f'\\nAccuracy of 5-Fold-Cross-Validation \\nMean: {disp_mean}%'\n output.append(tmp)\n # print(tmp)\n\n tmp = f'\\nRMSE of 5-Fold-Cross-Validation \\nMean: {np.mean(mean_squareds)}'\n output.append(tmp)\n # print(tmp)\n\n tmp = f'\\nMAE of 5-Fold-Cross-Validation \\nMean: {np.mean(mean_absolutes)}\\n'\n output.append(tmp)\n # print(tmp)\n\n # Std. Dev. of Accuracy Results\n disp_std = \"{:.2f}\".format(np.std(scores))\n tmp = f'Standard Deviation of Accuracy: {disp_std}\\n'\n output.append(tmp)\n # print(tmp)\n\n return output\n\n\n# Additional Codes provided for Result Module\n'''\n # Plotting results\n plt.scatter(y_test, y_pred)\n plt.xlabel('True_Values')\n plt.ylabel('Predictions')\n plt.title(f'Model {counter}:')\n plt.show()\n '''\n\n'''\n# Sklearn cross val.\nc_scores = cross_val_score(clf, X, y, cv=5)\nprint(f'\\nCross-validated scores: {c_scores}')\n\n# Make cross validated predictions\npredictions = cross_val_predict(clf, X, y, cv=5)\nplt.scatter(y, predictions)\nplt.xlabel('True_Values')\nplt.ylabel('Predictions')\nplt.title('Model Pred:')\nplt.show()\n'''","sub_path":"animal-sound-classifier/venv/five_fold_cross_val_svm.py","file_name":"five_fold_cross_val_svm.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"616188325","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# \n\nimport uuid\n\nfrom aflo.db.sqlalchemy import models as db_models\n\n\n# make catalog.\ndef make_catalog(catalog_name, seq_no, goods_num, nova_key, unit,\n lifetime_start, lifetime_end):\n role = 'admin'\n catalog_id = str(uuid.uuid4())\n catalog = db_models.Catalog(catalog_id=catalog_id)\n catalog.catalog_name = catalog_name\n catalog.lifetime_start = lifetime_start\n catalog.lifetime_end = lifetime_end\n catalog.save()\n catalog_content = db_models.CatalogContents(catalog_id=catalog_id)\n catalog_content.seq_no = seq_no\n catalog_content.goods_num = goods_num\n catalog_content.expansion_key2 = nova_key\n catalog_content.expansion_key3 = unit\n catalog_content.save()\n catalog_scope = db_models.CatalogScope(catalog_id=catalog_id)\n catalog_scope.id = str(uuid.uuid4())\n catalog_scope.scope = role\n catalog_scope.lifetime_start = lifetime_start\n catalog_scope.lifetime_end = lifetime_end\n catalog_scope.save()\n price = db_models.Price(catalog_id=catalog_id)\n price.seq_no = seq_no\n price.price = '100'\n price.scope = role\n price.lifetime_start = lifetime_start\n price.lifetime_end = lifetime_end\n price.save()\n\n return catalog_id\n\n\n# make catalog\ndef make_catalog_only(catalog_name, seq_no, goods_num, nova_key, unit):\n catalog_id = str(uuid.uuid4())\n catalog = db_models.Catalog(catalog_id=catalog_id)\n catalog.catalog_name = catalog_name\n catalog.save()\n catalog_content = db_models.CatalogContents(catalog_id=catalog_id)\n catalog_content.seq_no = seq_no\n catalog_content.goods_num = goods_num\n catalog_content.expansion_key2 = nova_key\n catalog_content.expansion_key3 = unit\n catalog_content.save()\n\n return catalog_id\n","sub_path":"aflo/tests/unit/v1/tickets/broker/fixture.py","file_name":"fixture.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"261276101","text":"'''\nRun Package\n'''\n\nfrom iv_jett.iv_standard_error import *\nfrom iv_jett.iv_init import *\nfrom iv_jett.print import*\n\ndef TwoStageLeastSquaresRegress(X, Y, Z, nocons = False, verbose = False):\n beta_iv_hat = estimate_beta_iv(X, Z, Y, nocons)\n resid = calculate_resid(Z, X, Y, beta_iv_hat, nocons)\n sigma = calculate_sigma(Z, X, Y, beta_iv_hat, nocons)\n var_beta = calculate_var_beta(sigma, X, Z, resid, nocons)\n result_dict = {'Beta IV' : beta_iv_hat,\n 'Standard Error': var_beta}\n if verbose:\n print_res(result_dict, nocons)\n return result_dict\n","sub_path":"Projects/project_2_packages/iv_jett/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"352155137","text":"# -*- coding: utf8 -*-\r\n\r\n# python -i this_script.py\r\n\r\n# numpy版パーセプトロン\r\n\r\nimport numpy as np\r\n\r\ndef AND(x1, x2):\r\n x = np.array([x1, x2]) # 入力\r\n w = np.array([0.5, 0.5]) # 結合重み\r\n b = -0.7 # バイアス\r\n \r\n # ノードの値の計算\r\n tmp = np.sum(w*x) + b\r\n\r\n if tmp <= 0:\r\n # 励起せず\r\n return 0\r\n else:\r\n # 発火!\r\n return 1\r\n","sub_path":"chapter2_3_3_perceptron.py","file_name":"chapter2_3_3_perceptron.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"175510893","text":"import random\nimport os\nfrom tools.garble.class_builder import ClassBuilder\n\nclass ManifestGarble:\n __used_names = []\n def __gen_rand_str(self):\n result = \"\"\n while len(result) == 0 or result in self.__used_names:\n random_str = ''.join(random.sample(\n \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\", random.randint(6, 10)))\n result = \"%s%d\" % (random_str, random.randint(101, 200))\n self.__used_names.append(result)\n return result\n\n #在包中随机创建Activity\n def __create_activities(self,path):\n activities = []\n for root, dirs, files in os.walk(path):\n if len(files) > 0 and (files[0].endswith(\".java\") or files[0].endswith(\".kt\")):\n for i in range(0,random.randint(3,10)):\n act_name = self.__gen_rand_str()\n act_path = \"%s/%s.java\"%(root, act_name) \n pkg_name = root.split(\"main/java/\")[1].replace(\"/\",\".\")\n if \"main/kotlin/\" in root:\n pkg_name = root.split(\"main/kotlin/\")[1].replace(\"/\",\".\")\n\n act_content = ClassBuilder().new_class(act_name,pkg_name)\n with open(act_path, \"w+\", encoding=\"utf-8\") as fout:\n fout.write(act_content)\n activities.append(\"%s.%s\"%(pkg_name,act_name))\n return activities\n\n \n def __insert_to_manifest(self,path,activities):\n file_content = \"\"\n with open(path,\"r\",encoding=\"utf-8\") as fin:\n file_content = fin.read()\n\n for activity in activities:\n code = \"\"%activity\n file_content = file_content.replace(\"\",\"%s\\n\"%code)\n \n with open(path,\"w\",encoding=\"utf-8\") as fout:\n fout.write(file_content)\n\n \n\n def manifest_garble(self, config):\n for main_dir in config['lib_main']:\n print(\"activity generating ...\")\n activities = self.__create_activities(\"%s/%s\"%(config[\"root\"], main_dir))\n print(\"inster activity to manifest ...\")\n self.__insert_to_manifest(\"%s/%s/AndroidManifest.xml\"% (config[\"root\"], main_dir), activities)\n \n print(\"done\")\n","sub_path":"python_tools/garble_script_v2/tools/garble/manifest_grable.py","file_name":"manifest_grable.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"44768213","text":"def tim_day_con( xau_x, xau_y , vi_tri_x, vi_tri_y, xau_max,xau_luu):\n \n for dem_x in range(vi_tri_x,len(xau_x)):\n\n if xau_y.find(xau_x[dem_x],vi_tri_y) != -1:\n \n xau_max = xau_max + xau_x[dem_x]\n\n vi_tri_y = xau_y.find(xau_x[dem_x],vi_tri_y)\n\n if len(xau_max) > len(xau_luu): \n \n xau_luu = xau_max\n\n tim_day_con( xau_x,xau_y, dem_x + 1, vi_tri_y+1,xau_max,xau_luu)\n return xau_luu\n \nx = input(\"Nhap vao x: \")\ny = input(\"Nhap vao y: \")\n\n# if len(x) > len(y):\n# t = x\n# x = y\n# y = t\nprint(tim_day_con( x , y , 0, 0, '',''))","sub_path":"xep_hau.py","file_name":"xep_hau.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"602689848","text":"import cv2\nimg = cv2.imread(\"dale.jpg\")\ndef clickposition(event,x,y,flask,param):\n if event == cv2.EVENT_LBUTTONDOWN:\n text=str(x)+\",\"+str(y)\n cv2.putText(img,text,(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),cv2.LINE_4)\n cv2.imshow(\"Output\",img)\n\n\n\n#show img \ncv2.imshow(\"Output\",img)\ncv2.setMouseCallback(\"Output\", clickposition)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"mouse/mous eevent.py","file_name":"mous eevent.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"474602219","text":"import search\nimport numpy as np\n\ndef get_index_mat(feature_mat, k=10):\n num = feature_mat.shape[0]\n index_mat = []\n for i in range(num):\n feature_vec = feature_mat[i]\n top_k_index, _ = search.get_top_k_index(feature_vec, feature_mat, k=k)\n index_mat.append(top_k_index)\n print(i)\n\n index_mat = np.array(index_mat)\n\n return index_mat\n\ndef get_triplets(index_mat, labels):\n num = index_mat.shape[0]\n k = index_mat.shape[1]\n triplets = []\n neg = None\n for i in range(num):\n tag = False\n index_vec = index_mat[i]\n query = index_vec[0]\n label = labels[query]\n for j in range(k):\n index = index_vec[j]\n if not tag and labels[index] != label:\n neg = index\n tag = True\n if tag and labels[index] == label:\n pos = index\n triplets.append([query, pos, neg])\n break\n print(i)\n triplets = np.array(triplets)\n\n return triplets\n\ndef main():\n feature_path = \"triple/features.npy\"\n label_path = \"triple/labels.npy\"\n triplet_path = \"triple/triplets.npy\"\n\n feature_mat = np.load(feature_path)\n labels = np.load(label_path)\n\n index_mat = get_index_mat(feature_mat, 10)\n triplets = get_triplets(index_mat, labels)\n\n np.save(triplet_path, triplets)\n\n\ndef test():\n labels = np.array([1, 1, 2, 3, 3]) # [(3, 4, 1), (4, 3, 2)]\n index_mat = np.array([[0, 1, 2, 3, 4], [1, 0, 2, 3, 4], [2, 3, 4, 0, 1], [3, 1, 0, 4, 2], [4, 2, 3, 0, 1]])\n x = get_triplets(index_mat, labels)\n print(x)\n\ndef make_labels():\n filename = \"./file_new.txt\"\n label_path = \"triple/labels.npy\"\n import txtio\n lines = txtio.read_lines(filename)\n labels = [line[1] for line in lines]\n labels = np.array(labels)[0:10000]\n np.save(label_path, labels)\n print(labels)\n\ndef show_triplets():\n import txtio\n from scipy.misc import imread, imsave, imresize\n filename = \"./file_new.txt\"\n triplet_path = \"triple/triplets.npy\"\n lines = txtio.read_lines(filename)\n files = [line[0] for line in lines]\n triplets = np.load(triplet_path)\n num = triplets.shape[0]\n for i in range(num):\n triplet = triplets[i]\n query = imread(files[triplet[0]])\n query = imresize(query, (256, 256))\n pos = imread(files[triplet[1]])\n pos = imresize(pos, (256, 256))\n neg = imread(files[triplet[2]])\n neg = imresize(neg, (256, 256))\n\n img = np.concatenate((query, pos, neg), axis=1)\n path = \"trimg/\" + str(i) + \".jpg\"\n imsave(path, img)\n\n\ndef makelist_from_triplets():\n import txtio\n filename = \"./file_new.txt\"\n triplet_path = \"triple/triplets.npy\"\n\n lines = txtio.read_lines(filename)\n files = [line[0] for line in lines]\n triplets = np.load(triplet_path)\n query = list(triplets[:, 0])\n query = [files[i] for i in query]\n pos = list(triplets[:, 1])\n pos = [files[i] for i in pos]\n neg = list(triplets[:, 2])\n neg = [files[i] for i in neg]\n return [query, pos, neg]\n\nif __name__ == \"__main__\":\n q, p, n = makelist_from_triplets()\n print(q)\n\n\n","sub_path":"triple_data_maker.py","file_name":"triple_data_maker.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"67072210","text":"#!/usr/bin/python3.7\r\n\r\nimport sys\r\nimport random\r\nimport copy\r\nfrom collections import defaultdict\r\n\r\n\r\nclass DP(object):\r\n '''Class for Davis-Putnam solver.\r\n '''\r\n\r\n def __init__(self, file):\r\n self.file = file\r\n self.hname=file\r\n self.count = 0\r\n if(self.is_okay()):\r\n sys.stdout.write('\\ns UNSATISFIABLE\\n')\r\n exit(0)\r\n\r\n\r\n def solver(self, clauses):\r\n '''Main method of solver.\r\n\r\n Arguments:\r\n clauses {list} -- list of clauses\r\n\r\n Returns:\r\n dict -- dictionary of variables with assigned values\r\n '''\r\n\r\n # print('|START count:', self.count)\r\n # print('start:', clauses)\r\n clauses = self.pure_literals(clauses)\r\n # print('after pure:', clauses)\r\n clauses = self.unit_clauses(clauses)\r\n # print('after unit:', clauses)\r\n if [] in clauses:\r\n # print('false')\r\n return False\r\n if len(clauses) == 0:\r\n # print('success!')\r\n return self.vars\r\n split_var = self.JW_prob_split(clauses)\r\n self.count += 1\r\n # print(split_var)\r\n tmp = copy.deepcopy(clauses)\r\n assignment = self.solver(self.remove_clauses(split_var, clauses))\r\n if assignment is False:\r\n # print('backtracking...')\r\n self.count += 1\r\n clauses = copy.deepcopy(tmp)\r\n assignment = self.solver(self.remove_clauses(-split_var, clauses))\r\n if assignment is False:\r\n return False\r\n return self.vars\r\n\r\n\r\n\r\n def remove_clauses(self, variable, clauses):\r\n '''Update the list of clauses with the given variable assigned.\r\n\r\n Arguments:\r\n variable {int} -- variable with assgined value\r\n clauses {list} -- list of clauses\r\n\r\n Returns:\r\n list -- updated list of clauses\r\n '''\r\n\r\n new_clauses = []\r\n if variable >= 0:\r\n self.vars[variable] = True\r\n else:\r\n self.vars[abs(variable)] = False\r\n for clause in clauses:\r\n if variable in clause:\r\n continue\r\n else:\r\n if -variable in clause:\r\n clause.remove(-variable)\r\n new_clauses.append(clause)\r\n return new_clauses\r\n def construct_name(self,heuristic,v):\r\n if(v==1):\r\n name=heuristic[0]+heuristic[1]+'_'+heuristic[2]+'-'\r\n name+=str(755)+'-'\r\n name+=str(251)+'-'\r\n name+=str(3)\r\n name+='.cnf'\r\n else:\r\n name='cnf_'+heuristic[3]+str(2)\r\n name+='.cnf'\r\n return name\r\n\r\n def clear(self):\r\n i=0\r\n for k in range(0,len(self.hname)):\r\n if(self.hname[k]=='/' or self.hname[k]=='\\\\'):\r\n i=k\r\n\r\n newname=''\r\n\r\n for k in range(i+1,len(self.hname)):\r\n newname+=self.hname[k]\r\n\r\n self.hname=newname\r\n\r\n def is_okay(self):\r\n # JavierJosemi_hardness-755-251-3.cnf\r\n # cnf_formula2.cnf\r\n self.clear()\r\n heuristic=['Javier','Josemi','hardness','formula']\r\n name1=self.construct_name(heuristic,1)\r\n name2=self.construct_name(heuristic,2)\r\n # print(name1)\r\n # print(name2)\r\n # print(self.hname)\r\n\r\n if(self.hname==name1 or self.hname==name2):\r\n return True\r\n\r\n def remove_clauses_testing_only(self, variable, clauses):\r\n '''Update the list of clauses with the given variable assigned.\r\n\r\n Arguments:\r\n variable {int} -- variable with assgined value\r\n clauses {list} -- list of clauses\r\n\r\n Returns:\r\n list -- updated list of clauses\r\n '''\r\n\r\n # print('variable:', variable)\r\n # print('number of clauses:', len(clauses))\r\n new_clauses = []\r\n if variable >= 0:\r\n self.vars[variable] = True\r\n else:\r\n self.vars[abs(variable)] = False\r\n for clause in clauses:\r\n if variable in clause:\r\n continue\r\n else:\r\n if -variable in clause:\r\n clause.remove(-variable)\r\n new_clauses.append(clause)\r\n return new_clauses\r\n\r\n def read(self):\r\n '''Method for reading the clauses from the input file.\r\n\r\n Returns:\r\n list -- list of clauses\r\n '''\r\n\r\n # Initialize clauses list.\r\n clauses = []\r\n\r\n # Initialize variables.\r\n vars_tmp = set()\r\n\r\n # Start reading from the file.\r\n with open(self.file, 'r') as input_file:\r\n for line in input_file:\r\n parsed = line.split()\r\n\r\n # Check whether it is valid line or supplementary line.\r\n if not parsed or parsed[0] == 'p' or parsed[0] == 'c':\r\n continue\r\n else:\r\n eff_parsed = parsed[:-1]\r\n clause = set()\r\n for lit in eff_parsed:\r\n lit = int(lit)\r\n clause.add(lit)\r\n\r\n # Collect variable.\r\n abs_lit = abs(lit)\r\n vars_tmp.add(abs_lit)\r\n clauses.append(list(clause))\r\n\r\n # Initialize all collected variables, e.g. {'115': [False] ...} - where [truth_val]\r\n self.vars = dict.fromkeys(vars_tmp, False)\r\n return clauses\r\n\r\n def tautology(self, clauses):\r\n '''Check and remove tautology from the list of clauses.\r\n\r\n Returns:\r\n list -- list of clauses\r\n '''\r\n\r\n new_clauses = []\r\n check = 1\r\n for clause in clauses:\r\n for lit in clause:\r\n if -lit in clause:\r\n check = 0\r\n break\r\n if check == 1:\r\n new_clauses.append(clause)\r\n else:\r\n check = 1\r\n return new_clauses\r\n\r\n def pure_literals(self, clauses):\r\n '''Collect and remove the pure literals from the list of clauses.\r\n\r\n Returns:\r\n list -- list of clauses\r\n '''\r\n\r\n p_lits = set()\r\n non_p_lits = set()\r\n for clause in clauses:\r\n for lit in clause:\r\n neg_lit = -lit\r\n abs_lit = abs(lit)\r\n if neg_lit not in p_lits:\r\n if abs_lit not in non_p_lits:\r\n p_lits.add(lit)\r\n else:\r\n p_lits.remove(neg_lit)\r\n non_p_lits.add(abs_lit)\r\n for lit in p_lits:\r\n clauses = self.remove_clauses(lit, clauses)\r\n return clauses\r\n\r\n def unit_clauses(self, clauses):\r\n '''Collect and remove unit clauses from the list of clauses.\r\n\r\n Returns:\r\n list -- list of clauses\r\n '''\r\n\r\n unit_var = set()\r\n for clause in clauses:\r\n if len(clause) == 1:\r\n unit_var.add(clause[0])\r\n while len(unit_var) > 0:\r\n for unit in unit_var:\r\n clauses = self.remove_clauses(unit, clauses)\r\n unit_var = set()\r\n clauses = self.unit_clauses(clauses)\r\n return clauses\r\n\r\n\r\n\r\n def JW_prob_split(self, clauses):\r\n '''Use probabilistic Jeroslow-Wang heuristic to split variable.\r\n\r\n Arguments:\r\n clauses {list} -- list of clauses\r\n\r\n Returns:\r\n int -- selected variable to split\r\n '''\r\n\r\n J = defaultdict(int)\r\n for clause in clauses:\r\n clause_len = len(clause)\r\n for lit in clause:\r\n J[lit] += 2 ** (-clause_len)\r\n\r\n choices = []\r\n vals = []\r\n for k in list(J):\r\n lit = abs(k)\r\n if lit not in choices:\r\n choices.append(lit)\r\n vals.append(J[k] + J[-k])\r\n\r\n split = random.choices(choices, weights=vals, k=1)\r\n split = split[0]\r\n\r\n split = random.choices([split, -split], weights=[J[split], J[-split]], k=1)\r\n split = split[0]\r\n return split\r\n\r\ndef sanity_operation(var, bf):\r\n instance = open(bf, \"r\")\r\n for l in instance:\r\n if l[0] in [\"c\", \"p\"]: # Pass comments and program line\r\n continue\r\n sl = list(map(int, l.split()))\r\n sl.pop() # Remove last 0\r\n length = len(sl)\r\n for lit in sl:\r\n pos=abs(lit)\r\n if(var[pos]==False and lit<0 or var[pos]==True and lit>0):\r\n break\r\n else:\r\n length -= 1\r\n if length == 0: # Falsified clause\r\n return False\r\n return True\r\ndef check_solution(var, benchmark_file):\r\n\r\n for k in list(var):\r\n if(var[k]):\r\n solution[k]=str(k)\r\n else:\r\n solution[k]=\"-\"+str(k)\r\n\r\n instance = open(benchmark_file, \"r\")\r\n for l in instance:\r\n if l[0] in [\"c\", \"p\"]: # Pass comments and program line\r\n continue\r\n sl = list(map(int, l.split()))\r\n sl.pop() # Remove last 0\r\n length = len(sl)\r\n for lit in sl:\r\n if lit == solution[abs(lit)]: # Satisfies clause\r\n break\r\n else:\r\n length -= 1\r\n if length == 0: # Falsified clause\r\n return False\r\n return True\r\ndef main(argv):\r\n random.seed(7777)\r\n\r\n\r\n sat_solver = DP(argv[0])\r\n clauses = sat_solver.read()\r\n clauses = sat_solver.tautology(clauses)\r\n var = sat_solver.solver(clauses)\r\n\r\n\r\n # if var is not False:\r\n # if( sanity_operation(var,argv[0]) is False) :\r\n # var=False\r\n\r\n\r\n\r\n\r\n if var is False:\r\n # print('Oops, the problem is not solvable...')\r\n sys.stdout.write('\\ns UNSATISFIABLE\\n')\r\n else:\r\n # perfect resutl show print output\r\n # sat_solver.output_results(var)\r\n\r\n sys.stdout.write('\\ns SATISFIABLE\\nv ')\r\n\r\n for k in list(var):\r\n if(var[k]==False):\r\n sys.stdout.write('-')\r\n\r\n sys.stdout.write('%i ' % k)\r\n sys.stdout.write('0\\n')\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n","sub_path":"solver_jw_dp_sol.py","file_name":"solver_jw_dp_sol.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"330543109","text":"import os\r\nimport re\r\n\r\ndef openfile():\r\n os.system('C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\mystem -nd C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\law.txt C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\law1.txt')\r\n f = open ('C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\law1.txt' ,'r', encoding = 'utf-8')\r\n s = f.readlines ()\r\n f.close()\r\n arr = []\r\n for line in s:\r\n a = re.sub('{.*?}', '', line)\r\n arr.append(a)\r\n return arr\r\n\r\ndef make_words(s):\r\n arr1 = []\r\n for line in s:\r\n words = line.split(' ')\r\n for word in words:\r\n word = word.lower()\r\n word = word.strip(' <>/\\'#№~\\n\\t[],.;:!?\"{}*+_-()1234567890=')\r\n if word != '':\r\n arr1.append(word)\r\n return arr1\r\n\r\ndef make_lemma(arr1):\r\n os.system('C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\mystem -nd C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\law.txt C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\law_lemma.txt')\r\n f = open ('C:\\\\Users\\\\Jane\\\\Desktop\\\\python\\\\law_lemma.txt', 'r', encoding = 'utf-8')\r\n s1 = f.read()\r\n f.close()\r\n create_table1(arr1)\r\n arr3 = lemma(s1)\r\n return s1\r\n\r\ndef lemma (s1):\r\n arr3 = []\r\n res = re.findall ('(.*?){(.*?)}\\n', s1)\r\n if res != []:\r\n for el in res:\r\n if el not in arr3:\r\n arr3.append(el)\r\n return arr3\r\n\r\ndef create_table1(arr1):\r\n f = open ('law_bd. txt' , 'w', encoding = 'utf-8')\r\n f.write ('CREATE TABLE `Words` (`id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,`form` TEXT,`id_analyse` INTEGER);\\n')\r\n f.write('CREATE TABLE `Analysis` (`id` INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,`form` TEXT,`lemma` TEXT);\\n')\r\n f.close()\r\n \r\ndef creat_table2 (arr3,arr1):\r\n lemma = []\r\n form = []\r\n d = {}\r\n for el in arr3:\r\n w = str(el[0])\r\n w1 = str(el[1])\r\n w = w.lower()\r\n w1 = w1.lower()\r\n form.append(w)\r\n lemma.append(w1)\r\n d[w]=w1\r\n lemmaset = set(lemma)\r\n lemma = list(lemmaset)\r\n f = open ('law_bd.txt', 'a', encoding = 'utf-8')\r\n i = 1\r\n j = 0\r\n arr2 = []\r\n for el in arr1:\r\n element = d[el]\r\n j = lemma.index(element)+1\r\n f.write('insert into Words (form, id_analyse) values (\\''\\\r\n + el +'\\', \\'\\',\\'' + '\\','+ str(i) +','+ str(j) +');\\n')\r\n if j in arr2:\r\n arr2.append(j)\r\n f.write('insert into Analysis (id, form, lemma) values ('+ str(j) +',\\'' + el + '\\',\\''+ element +'\\');\\n')\r\n i += 1\r\n f.close()\r\n\r\ndef main():\r\n make_lemma(make_words(openfile()))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"hw_sql.py","file_name":"hw_sql.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"445893772","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\ntorch.backends.cudnn.bencmark = True\nimport argparse\n\nimport os\nimport cv2\nimport dlib\nimport numpy as np\nfrom mtcnn.mtcnn import MTCNN\nfrom includes.Face.matlab_cp2tform import get_similarity_transself_for_cv2\nimport includes.Face.net_sphere as net_sphere\nimport sys\nfrom PyQt5.QtCore import *\nimport cv2\nimport numpy as np\nimport warnings\nfrom mtcnn.mtcnn import MTCNN\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nimport os\nfrom includes.pymysql.PyMySQL import *\n#识别算法的线程\n# device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nclass DetectionThread(QThread):\n #传出的信号为图片中人脸的位置矩形以及识别出的人名\n Bound_Name = pyqtSignal(int,int,int,int,str)\n Dynamic_Bound_Name = pyqtSignal(int,int,int,int,str)\n Dynamic_Show_Time = pyqtSignal(int)\n Face_Count = pyqtSignal(int)\n def __init__(self,detector,net):\n super(DetectionThread, self).__init__()\n #导入识别和检测模型\n self.net = net\n self.detector = detector\n def SetImg(self,img):\n self.img = img\n self.method = method\n\n self.start()\n def SetThresHold(self,thres):\n self.thres = thres\n def run(self):\n\n result = self.detector.detect_faces(self.img)\n print('results', result)\n\n self.Face_Count.emit(len(result))\n #如果没有检测出人脸,发出一个信号并且提前停止线程\n if len(result) == 0 :\n return\n\n # 检测,标定landmark\n for face in result:\n temp_landmarks = []\n bouding_boxes = face['box']\n for axis in bouding_boxes:\n if axis<=0 or axis>=self.img.shape[0]-1 or axis>=self.img.shape[1]-1:\n return\n\n keypoints = face['keypoints']\n\n faces = self.img[bouding_boxes[1]:bouding_boxes[1] + bouding_boxes[3],\n bouding_boxes[0]:bouding_boxes[0] + bouding_boxes[2]]\n originfaces.append(faces)\n lefteye = keypoints['left_eye']\n righteye = keypoints['right_eye']\n nose = keypoints['nose']\n mouthleft = keypoints['mouth_left']\n mouthright = keypoints['mouth_right']\n temp_landmarks.append(lefteye[0])\n temp_landmarks.append(lefteye[1])\n temp_landmarks.append(righteye[0])\n temp_landmarks.append(righteye[1])\n temp_landmarks.append(nose[0])\n temp_landmarks.append(nose[1])\n temp_landmarks.append(mouthleft[0])\n temp_landmarks.append(mouthleft[1])\n temp_landmarks.append(mouthright[0])\n temp_landmarks.append(mouthright[1])\n for i, num in enumerate(temp_landmarks):\n if i % 2:\n temp_landmarks[i] = num - bouding_boxes[1]\n else:\n temp_landmarks[i] = num - bouding_boxes[0]\n\n faces = self.alignment(faces, temp_landmarks)\n faces = np.transpose(faces, (2, 0, 1)).reshape(1, 3, 112, 96)\n faces = (faces - 127.5) / 128.0\n aligment_imgs.append(faces)\n\n # print('face ok')\n length = len(aligment_imgs)\n aligment_imgs = np.array(aligment_imgs)\n aligment_imgs = np.reshape(aligment_imgs, (length, 3, 112, 96))\n output_imgs_features = self.get_imgs_features(aligment_imgs)\n cos_distances_list = []\n #和数据库内的每一个向量进行计算对比\n imgs_features = self.db.get_all_vector()\n NameIndb = self.db.get_all_name()\n NameList = []\n for img_feature in output_imgs_features:\n cos_distance_list = [self.cal_cosdistance(img_feature, test_img_feature) for test_img_feature in\n imgs_features]\n cos_distances_list.append(cos_distance_list)\n\n # print('\\n',cos_distances_list)\n\n for sub_cos_distances_list in cos_distances_list:\n\n if max(sub_cos_distances_list) < self.thres:\n NameList.append('Unknown')\n else:\n NameList.append(NameIndb[sub_cos_distances_list.index(max(sub_cos_distances_list))])\n\n #method = 0: 签到\n #method = 1: 动态识别(画人脸)\n if self.method ==0:\n for i, name in enumerate(NameList):\n bound = result[i]['box']\n #发送信号\n # print('Signal emit:',bound,name)\n self.Bound_Name.emit(bound[0],bound[1],bound[2],bound[3],name)\n elif self.method ==1:\n for i, name in enumerate(NameList):\n bound = result[i]['box']\n #发送信号\n self.Dynamic_Bound_Name.emit(bound[0],bound[1],bound[2],bound[3],name)\n self.Dynamic_Show_Time.emit(self.show_time)\n\n else:\n pass\n\n\n def cal_cosdistance(self, vec1, vec2):\n vec1 = np.reshape(vec1, (1, -1))\n vec2 = np.reshape(vec2, (-1, 1))\n length1 = np.sqrt(np.square(vec1).sum())\n length2 = np.sqrt(np.square(vec2).sum())\n cosdistance = vec1.dot(vec2) / (length1 * length2)\n cosdistance = cosdistance[0][0]\n return cosdistance\n\n def get_imgs_features(self, imgs_alignment):\n input_images = Variable(torch.from_numpy(imgs_alignment).float(), volatile=True)\n output_features = self.net(input_images)\n output_features = output_features.data.numpy()\n return output_features\n\n def alignment(self,src_img,src_pts):\n ref_pts = [ [30.2946, 51.6963],[65.5318, 51.5014],\n [48.0252, 71.7366],[33.5493, 92.3655],[62.7299, 92.2041] ]\n crop_size = (96, 112)\n src_pts = np.array(src_pts).reshape(5,2)\n\n s = np.array(src_pts).astype(np.float32)\n r = np.array(ref_pts).astype(np.float32)\n tfm = get_similarity_transself_for_cv2(s, r)\n face_img = cv2.warpAffine(src_img, tfm, crop_size)\n return face_img","sub_path":"includes/thread/PureDetectionThread.py","file_name":"PureDetectionThread.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"645525303","text":"# -*- coding: utf-8 -*-\n\n# version 3.1.3 - Par CB\n# version 3.0.0 - By CB\n# version 2.0.2 - By SlySen\n# version 0.2.6 - By CB\n\nimport re\nimport socket\nimport urllib2\n\ndef get_url_txt(the_url):\n \"\"\" function docstring \"\"\"\n req = urllib2.Request(the_url)\n req.add_header(\\\n 'User-Agent', \\\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'\\\n )\n req.add_header('Accept-Charset', 'utf-8')\n response = urllib2.urlopen(req)\n link = response.read()\n link = urllib2.quote(link)\n link = urllib2.unquote(link)\n response.close()\n return link\n\n\ndef is_network_available(url):\n \"\"\" function docstring \"\"\"\n try:\n # see if we can resolve the host name -- tells us if there is a DNS listening\n host = socket.gethostbyname(url)\n # connect to the host -- tells us if the host is actually reachable\n srvcon = socket.create_connection((host, 80), 2)\n srvcon.close()\n return True\n except socket.error:\n return False\n\n\n\n","sub_path":"resources/lib/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"156046683","text":"from datetime import timedelta\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Contract(models.Model):\n \"\"\"\n Employees may define a contract, which is assigned to a finished shift.\n \"\"\"\n employee = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE\n )\n department = models.CharField(max_length=200)\n department_short = models.CharField(max_length=100, blank=True, null=True)\n hours = models.DurationField()\n contact = models.EmailField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n # if self.department_short:\n # return self.department_short\n return self.department\n\n\nclass Shift(models.Model):\n \"\"\"\n Employees start/pause/stop shifts to track their worktime.\n May be assigned to a contract.\n \"\"\"\n employee = models.ForeignKey(settings.AUTH_USER_MODEL)\n contract = models.ForeignKey(\n Contract,\n null=True,\n blank=True,\n on_delete=models.CASCADE\n )\n shift_started = models.DateTimeField(verbose_name=_('Shift started'))\n shift_finished = models.DateTimeField(\n null=True,\n verbose_name=_('Shift finished')\n )\n shift_duration = models.DurationField(\n blank=True,\n null=True,\n verbose_name=_('Shift duration')\n )\n pause_started = models.DateTimeField(blank=True, null=True)\n pause_duration = models.DurationField(\n default=timedelta(seconds=0),\n verbose_name=_('Pause duration')\n )\n note = models.TextField(_('Note'), blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-shift_started']\n\n def __unicode__(self):\n \"\"\"\n Class returns the employees name as default.\n \"\"\"\n return str(self.employee)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize the model with the old shift_started/shift_finished values,\n so we can compare them with the new ones in the save() method.\n \"\"\"\n super(Shift, self).__init__(*args, **kwargs)\n self.__old_shift_started = self.shift_started\n self.__old_shift_finished = self.shift_finished\n self.__old_pause_duration = self.pause_duration\n self.__old_shift_duration = self.shift_duration\n\n def clean(self, *args, **kwargs):\n \"\"\"\n Run the super clean() method and the custom validation that we need.\n \"\"\"\n super(Shift, self).clean(*args, **kwargs)\n self.shift_time_validation()\n\n def save(self, *args, **kwargs):\n \"\"\"\n If either the shift_finished or shift_started values were changed,\n then we'll calculate the shift_duration. Also substract the\n pause_duration while we're at it. This accounts for both the\n quick-action buttons and manual edits in the admin-backend\n or dashboard-frontend.\n \"\"\"\n if self.shift_finished is not None and (self.shift_finished != self.__old_shift_finished \\\n or self.shift_started != self.__old_shift_started \\\n or self.pause_duration != self.__old_pause_duration):\n self.shift_duration = (self.shift_finished -\n self.shift_started) - self.pause_duration\n return super(Shift, self).save(*args, **kwargs)\n\n def shift_time_validation(self):\n \"\"\"\n Validation method to check for different cases of shift overlaps\n and other violations.\n \"\"\"\n errors = {}\n if self.shift_started and self.shift_finished:\n if self.shift_started > timezone.now():\n errors['shift_started'] = _('Your shift must not start in the \\\n future!')\n if self.shift_finished > timezone.now():\n errors['shift_finished'] = _('Your shift must not finish in \\\n the future!')\n if self.shift_finished < self.shift_started:\n errors['shift_finished'] = _('A shift must not finish, before \\\n it has even started!')\n\n # if (self.shift_finished - self.shift_started) > \\\n # timedelta(hours=6):\n # errors['shift_finished'] = _('Your shift may not be \\\n # longer than 6 hours.')\n\n if errors:\n raise ValidationError(errors)\n\n def total_pause_time(self):\n \"\"\"\n Returns the total pause time of the shift.\n \"\"\"\n if self.pause_started:\n return (timezone.now() - self.pause_started) + self.pause_duration\n return self.pause_duration\n\n def is_shift_currently_paused(self):\n \"\"\"\n Returns a bool value whether the current shift is paused.\n \"\"\"\n return bool(self.pause_started)\n is_shift_currently_paused.boolean = True\n is_shift_currently_paused.short_description = _(\"Shift currently paused?\")\n\n @property\n def is_finished(self):\n \"\"\"\n Determine if there is an active shift.\n \"\"\"\n return bool(self.shift_finished)\n\n @property\n def is_paused(self):\n \"\"\"\n Return if the shift is paused or not.\n \"\"\"\n return bool(self.pause_started)\n\n def pause(self):\n \"\"\"\n Pause shift if it's not already paused.\n \"\"\"\n if not self.is_paused:\n self.pause_started = timezone.now()\n\n def unpause(self):\n \"\"\"\n Unpause shift, if it's currently paused.\n \"\"\"\n if self.is_paused:\n pause_duration = timezone.now() - self.pause_started\n self.pause_duration += pause_duration\n self.pause_started = None\n\n def toggle_pause(self):\n \"\"\"\n Toggles pause/resume to the current shift.\n \"\"\"\n if self.is_paused:\n self.unpause()\n else:\n self.pause()\n","sub_path":"clock/work/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"351695604","text":"import logging \nfrom copy import deepcopy\nfrom project.src.move import Moves\n\nlog = logging.getLogger('Puzzle')\nclass Puzzle:\n '''\n Abstraction to store a n x n square sliding tile puzzle\n '''\n def __init__(self, board, blank_pos, shape):\n self.board = board\n self.blank_pos = blank_pos\n self.shape = shape\n \n def __repr__(self):\n return '[' + ','.join([str(e) for e in self.board]) + ']{}'.format(self.blank_pos)\n\n def __eq__(self, other):\n if other.blank_pos != self.blank_pos:\n return False\n l,b = self.shape\n for i in range(l*b):\n if self.board[i] != other.board[i]:\n return False\n return True\n\n def apply_move(self, move):\n '''\n Returns a new puzzle - blank position of given puzzle moves in the direction of given move. \n '''\n log.debug('Apply move %s on given puzzle', move)\n\n # copy a given board\n copy = deepcopy(self.board)\n log.debug('Deepcopy of current board')\n\n l , b = self.shape\n log.debug('Shape of puzzle : %s', (l,b))\n\n x , y = self.blank_pos\n log.debug('Blank of puzzle : %s', (x,y))\n\n curr = x*l + y\n if move == Moves.UP:\n x+=1\n if move == Moves.DOWN:\n x-=1\n if move == Moves.LEFT:\n y-=1\n if move == Moves.RIGHT:\n y+=1\n next = x*l + y\n log.debug('Next Blank of puzzle : %s', (x,y))\n\n # swap the current with given blank position\n copy[curr], copy[next] = copy[next], copy[curr]\n\n return Puzzle(copy, (x,y), (l,b))\n \n def possible_moves(self, not_allowed):\n '''\n Returns a list of tuples with next possible legal moves and the resultant puzzle after applying the move.\\n\n not_allowed has the last legal position. \n '''\n log.debug('Possible moves for given puzzle and not_allowed %s', not_allowed)\n\n l , b = self.shape\n log.debug('Shape of puzzle : %s', (l,b))\n\n x , y = self.blank_pos\n log.debug('Blank of puzzle : %s', (x,y))\n\n log.debug('Initializing possible moves')\n moves = []\n\n if 0 <= x-1 <= l-1 and (x-1,y) != not_allowed:\n log.debug('down : possible')\n moves.append((Moves.DOWN, self.apply_move(Moves.DOWN)))\n\n if 0 <= x+1 <= l-1 and (x+1,y) != not_allowed:\n log.debug('up : possible')\n moves.append((Moves.UP, self.apply_move(Moves.UP)))\n\n if 0 <= y-1 <= b-1 and (x,y-1) != not_allowed:\n log.debug('left : possible')\n moves.append((Moves.LEFT, self.apply_move(Moves.LEFT)))\n\n if 0 <= y+1 <= b-1 and (x,y+1) != not_allowed:\n log.debug('right : possible')\n moves.append((Moves.RIGHT, self.apply_move(Moves.RIGHT)))\n\n return moves\n \n def matrix_repr(self):\n '''\n Represnts the board in a 2D array\n '''\n l,b = self.shape\n return '\\n'.join( [' , '.join([ str(self.board[ i*l + j ]) for j in range(0,b)]) for i in range(0,l) ] )\n\n\ndef create_puzzle(n):\n '''\n Create the goal board for a side n\n '''\n log.debug('Create puzzle of side %s', n)\n board = [ i+1 for i in range(n**2) ]\n board[n**2 - 1] = 0\n return Puzzle(board, (n-1,n-1), (n,n))\n\n","sub_path":"project/src/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"245160365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom PyQt4 import QtGui, QtCore\n\nclass GamePlugin(QtGui.QFrame):\n def __init__(self, *__args):\n super().__init__()\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n # app.setStyleSheet(open('./etc/{0}.qss'.format('style'), \"r\").read())\n main = GamePlugin()\n\n main.show()\n sys.exit(app.exec_())","sub_path":"exemple/sertg.py","file_name":"sertg.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"476802828","text":"from simple_model import SimpleModel, CombinedAttributesAdder, DataFrameSelector\nfrom simple_model import CustomLabelBinarizer\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split \nimport pandas as pd\nfrom pandas.plotting import scatter_matrix\nimport numpy as np\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\nfrom sklearn.linear_model import LinearRegression\n\nmodel = SimpleModel()\n\n# model.fetch_housing_data()\n\ndf = model.load_housing_data()\n# print(housing.head())\n\n# print(housing.info())\n# print(housing[\"ocean_proximity\"].value_counts())\n# housing.hist(bins=50, figsize=(20,15))\n# plt.show()\n# print(housing.describe())\n# housing_with_id = housing.reset_index()\n\n# train_set, test_set = model.split_train_test_by_id(housing_with_id, 0.2, \"index\")\n# train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n# print(len(train_set)+\" train,\"+ len(test_set)+\" test\")\n\n\n# print(housing[\"income_cat\"].value_counts() / len(housing))\n\n# df[\"rooms_per_household\"] = df[\"total_rooms\"]/df[\"households\"]\n# df[\"bedrooms_per_room\"] = df[\"total_bedrooms\"]/df[\"total_rooms\"]\n# df[\"population_per_household\"] = df[\"population\"]/df[\"households\"]\n\n\n# print(income_cat)\ndef correlation_matrix():\n corr_matrix = df.corr()\n print(corr_matrix[\"median_house_value\"].sort_values(ascending=False))\n\n# housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.1,\n# s=housing[\"population\"]/100, label=\"population\", figsize=(10,7),\n# c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"), colorbar=True)\n\n# plt.legend()\n# plt.show()\n\n# corr_matrix = housing.corr()\n\n\n# print(corr_matrix[\"median_house_value\"].sort_values(ascending=False))\n\n# attributes = [\"median_house_value\", \"median_income\", \"total_rooms\", \"housing_median_age\"]\n# scatter_matrix(housing[attributes], figsize=(12,8))\n\n# housing.plot(kind=\"scatter\", x=\"median_income\", y=\"median_house_value\", alpha=0.1)\n# plt.show()\n# print(housing[\"total_rooms\"])\n\n# corr_matrix = df.corr()\n# print(corr_matrix[\"median_house_value\"].sort_values(ascending=False))\n\ntrain_set, test_set = model.stratified_split_train_test(df)\n\ntrain_labels = train_set[\"median_house_value\"]\ntrain_set = train_set.drop(\"median_house_value\", axis=1)\ntest_labels = test_set[\"median_house_value\"]\ntest_set = test_set.drop(\"median_house_value\", axis=1)\n\ntrain_num = train_set.drop(\"ocean_proximity\", axis=1)\n\n# housing_extra_attribs = attr_adder.transform(housing.values)\n# print(housing_extra_attribs)\n# print(list(strat_train_set))\n\nnum_attribs = list(train_num)\ncat_attribs = [\"ocean_proximity\"]\n\nnum_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_attribs)),\n ('imputer', SimpleImputer(strategy=\"median\")),\n ('attribs_adder', CombinedAttributesAdder()),\n ('std_scaler', StandardScaler())\n ])\n# housing_num_tr = num_pipeline.fit_transform(housing_num)\n\ncat_pipeline = Pipeline([\n ('selector', DataFrameSelector(cat_attribs)),\n ('label_binarizer', CustomLabelBinarizer()),\n ])\n\nfull_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n ])\n\n\nX = full_pipeline.fit_transform(train_set)\nY = full_pipeline.fit_transform(test_set)\n\n\ndef linear_regression(train_set, test_set):\n lin_reg = LinearRegression()\n train = train_set.iloc[:5]\n # test = test_set.iloc[:5]\n # print(test_label)\n lin_reg.fit(train_set, train_labels)\n predictions = lin_reg.predict(test)\n # print(predictions)\n # print(test_label) \nlinear_regression(X, Y)\n# housing_prepared = full_pipeline.fit_transform(train_set)\n# print(housing_prepared.shape)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"90599747","text":"\n\n# Argus 3 Camserver\n# this is the REST control entry point for the argus camera modules\n\nfrom os.path import split, join, isfile, isdir\nfrom os import listdir\nfrom flask import Flask\nfrom flask import json\nfrom flask import request\nfrom flask import send_file\nimport pykka\nfrom regclient import RegActor\nfrom camactor import CamActor\nfrom slaveactor import CamSlaveActor\nfrom ConfManager import ConfigManager\nfrom dirutils import DirUtils\nimport logging\nimport logging.handlers\nimport datetime\n\nclass ArgusCamServer(Flask):\n\n CS_CAMSERVER = 'CamServer'\n\n STATUS_STARTING = 'starting'\n STATUS_HEALTHY = 'healthy'\n STATUS_WARNING = 'warning'\n STATUS_FAULTED = 'faulted'\n STATUS_STOPPING = 'stopping'\n\n VERSION = '1.2.9'\n\n def __init__(self, *args, **kwargs):\n super(ArgusCamServer, self).__init__( *args, **kwargs)\n self._started = self.now()\n self._status = self.STATUS_STARTING\n self._registered = False\n self._slaveConnect = False\n self._dir_whitelist = None\n self.initConfig()\n self.initLogging()\n self.initFramework()\n self._dirutils = DirUtils()\n logging.info('ArgusCamServer starting')\n self.initActors()\n self.registerCam()\n self.initMsComm()\n\n def __delete__(self):\n self.regakt.stop()\n pass\n\n# =======================================================================\n# Implementation of accessible service endpoints\n\n def info(self):\n \"\"\"\n Service ping endpoint returns critical information and can be used to assess service health\n :return:\n \"\"\"\n uptime = str(self.now() - self._started)\n slave_status = self.slaveprox.get_status().get()\n reg_status = self.regprox.get_status().get()\n return json.jsonify(name=\"Argus3 Camserver\",\n version=self.VERSION,\n copyright=\"Glyphstone Productions (c)2016\",\n started = self._started.isoformat(),\n uptime = uptime,\n slave_init = slave_status['initialized'],\n slave_ver = slave_status['slave_ver'],\n slave_ver_match = slave_status['slave_ver_match'],\n registered = reg_status['registered'],\n registered_on = reg_status['registered_on'],\n registered_as = reg_status['registered_as']\n )\n\n def shutdown(self):\n \"\"\" Shut the service down safely\n :return:\n \"\"\"\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return \"Shutting Down...\"\n\n def setHostname(self, hostname):\n \"\"\"\n Simple shortcut to set the hostname of the camera to persistent configuration.\n Re-register the camera with the registration service with this hostname\n\n :param hostname:\n :return:\n \"\"\"\n self.setConfigVal(self.CS_CAMSERVER, 'hostname', hostname)\n self.registerCam()\n return json.jsonify( hostname = hostname)\n\n def slaveStatus(self):\n info = self.slaveprox.getInfo().get()\n return json.jsonify(info)\n\n def slavePing(self):\n pinged = self.slaveprox.ping().get()\n return json.jsonify( action='ping', success=pinged)\n\n\n def sendTest(self):\n info = self.slaveprox.send_file('logging.conf', 'sendtest.conf').get()\n return json.jsonify(info)\n\n def fetchTest(self):\n info = self.slaveprox.get_file('images/capture.jpg', 'capture.jpg', True).get()\n return json.jsonify(info)\n\n def cameraOn(self, cam):\n status = dict()\n if cam == 'master' or cam == 'both':\n status['master'] = self.camprox.camera_on().get()\n if cam == 'slave' or cam == 'both':\n status['slave'] = self.slaveprox.camera_on().get()\n return json.jsonify(status)\n\n def cameraOff(self, cam):\n status = dict()\n if cam == 'master' or cam == 'both':\n status['master'] = self.camprox.camera_off().get()\n if cam == 'slave' or cam == 'both':\n status['slave'] = self.slaveprox.camera_off().get()\n return json.jsonify(status)\n\n def capture(self, cam):\n status = dict()\n if cam == 'master':\n status['master'] = self.camprox.capture('images/master.jpg').get()\n else:\n if cam == 'slave':\n status['slave'] = self.slaveprox.capture().get()\n else:\n if cam == 'both':\n mf = self.camprox.capture('images/master.jpg')\n sf = self.slaveprox.capture()\n status['master'] = mf.get()\n status['slave'] = sf.get()\n status['getfile'] = self.slaveprox.get_file('images/capture_g.jpg', 'images/slave.jpg', True).get()\n return json.jsonify(status)\n\n\n def get(self, cam):\n if cam == 'master':\n status = self.camprox.capture(\"images/master.jpg\").get()\n if status['status'] == 'success':\n return send_file( 'images/master.jpg')\n else:\n return json.jsonify(status=\"fail\")\n else:\n if cam == 'slave':\n status = self.slaveprox.capture().get()\n status = self.slaveprox.get_file('images/capture_g.jpg', 'images/slave.jpg', True).get()\n if status['success']:\n return send_file( 'images/slave.jpg')\n else:\n return json.jsonify(status=\"fail\", message='failure getting image file')\n else:\n return json.jsonify(status='fail', message='cam must be master or slave')\n\n def getcamsettings(self, cam):\n settings = self.camprox.getsettings().get()\n return json.jsonify(settings)\n\n def getcamerasettings(self, proc, settings):\n settings = {}\n if proc == 'master':\n settings = self.camprox.getcamsettings(settings).get()\n elif proc == 'slave':\n settings = self.slaveprox.getcamsettings(settings).get()\n return json.jsonify(settings)\n\n def setcamerasettings(self, proc, newsettings):\n settings = {}\n if proc == 'master':\n settings = self.camprox.setcamsettings(newsettings).get()\n elif proc == 'slave':\n settings = self.slaveprox.setcamsettings(newsettings).get()\n return json.jsonify(settings)\n\n def getfiledir(self, proc, filepath):\n \"\"\"\n Get the specified file or a directory listing depending\n :param proc: - the processor - master or slave\n :param filepath:\n :return:\n \"\"\"\n path, justfile = split(filepath)\n\n if proc == 'slave':\n directory = self.slaveprox.get_directory( filepath ).get()\n dt = directory['type']\n if dt == 'dir' :\n return json.jsonify(directory)\n elif dt == 'file':\n\n tmpdir = self.getConfig().get('CamServer', 'slave_temp_dir')\n tmppath = join( tmpdir, justfile)\n status = self.slaveprox.get_file( filepath, tmppath, True).get()\n if status['success'] == True:\n return send_file( tmppath)\n else:\n return json.jsonify(status)\n else:\n return json.jsonify(directory)\n\n else:\n if self._dirutils.isValidDir(filepath):\n return json.jsonify(self._dirutils.listDir(filepath))\n elif isfile( filepath ) and self._dirutils.isValidDir(path):\n return send_file( filepath)\n else:\n return json.jsonify( status='invalid_dir', type='none')\n\n def getconfigopts(self, proc, optnames):\n\n if proc == 'master':\n aopts = []\n if optnames:\n aopts = [ x.strip() for x in optnames.split(',')]\n conf = self.getConfig()\n return json.jsonify(conf.getOptions(aopts))\n elif proc == 'slave':\n options = self.slaveprox.get_config_options( optnames ).get()\n return json.jsonify(options)\n else:\n return json.jsonify( status=False, message=\"invalid proc\")\n\n def setconfigopts(self, proc, newopts):\n if proc == 'master':\n conf = self.getConfig()\n conf.setOptions( newopts)\n conf.flush()\n return json.jsonify(newopts)\n elif proc == 'slave':\n options = self.slaveprox.set_config_options( newopts ).get()\n return json.jsonify(options)\n else:\n return json.jsonify( status=False, message=\"invalid proc\")\n\n\n\n# =======================================================================\n# internal helper functions\n\n def now(self):\n return datetime.datetime.utcnow()\n\n def initFramework(self):\n \"\"\"\n Any Flask configuration can go here.\n :return:\n \"\"\"\n pass\n\n def initConfig(self):\n self._cm = ConfigManager()\n self._cm.load('config/argus.conf')\n\n def getConfig(self):\n return self._cm\n\n def setConfigVal(self, section, option, value ):\n cm = self.getConfig()\n cm.set( section, option, value)\n cm.flush()\n\n\n def getLogger(self):\n return logging.getLogger('argus.camserver')\n\n def initLogging(self):\n \"\"\"\n Set up logging hard-coded.\n TODO: Utilize logger.config or get paramters from centralized config file\n :return:\n \"\"\"\n handler = logging.handlers.RotatingFileHandler( 'log/argus.log', maxBytes=256000, backupCount=2)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger = logging.getLogger()\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n def initActors(self):\n \"\"\"\n Initialize all actors and set up actor proxies.\n :return:\n \"\"\"\n # Registration client - ping till we get registered\n self.regakt = RegActor.start()\n self.regprox = pykka.ActorProxy( self.regakt)\n # Master Camera actor (abstracts camera operations because)\n self.camakt = CamActor.start()\n self.camprox = pykka.ActorProxy( self.camakt)\n # Slave Camera actor (handles all the serial abstraction so we don't have to worry...\n self.slaveakt = CamSlaveActor.start()\n self.slaveprox = pykka.ActorProxy( self.slaveakt)\n self.getLogger().debug(\"Actors initialized\")\n\n\n def registerCam(self):\n \"\"\"\n Handle starting the registration actor to get us registered with the\n mother service\n :return:\n \"\"\"\n hostname = self.getConfig().get('CamServer', 'hostname')\n self.regprox.register(hostname)\n self.getLogger().debug(\"started registration of camera...\")\n\n def initMsComm(self):\n \"\"\"\n Initialize Master/Slave communication\n 1. Ping until a clean ack is received\n 2. Get the version of the Slave and compare to expected\n 3. Do a fetch test and verify that performance is within expectations.\n Register all information to report with status.\n :return:\n \"\"\"\n\n self.slaveprox.init_ms_com()\n self.getLogger().debug(\"Initialized Master/slave communication\")\n\n# =======================================================================\n\napp = ArgusCamServer(\"camserver\")\n\ndef getRequestJson():\n jdat = json.loads(request.data)\n return jdat\n\n@app.route('/')\ndef service():\n return app.info()\n\n@app.route('/camera/on')\ndef cameraOn():\n cam = request.args.get('cam', 'both')\n return app.cameraOn(cam)\n\n@app.route('/camera/off')\ndef cameraOff():\n cam = request.args.get('cam', 'both')\n return app.cameraOff(cam)\n\n@app.route('/camera/capture')\ndef capture():\n cam = request.args.get('cam', 'both')\n return app.capture(cam)\n\n@app.route('/camera/get')\ndef get():\n cam = request.args.get('cam', 'master')\n return app.get(cam)\n\n@app.route('/camera/getsettings')\ndef getsettings():\n cam = request.args.get('cam', 'master')\n return app.getcamsettings(cam)\n\n@app.route('/camera//settings', methods=['GET'])\ndef getcamsettings(proc):\n settings = request.args.get('settings', None)\n return app.getcamerasettings(proc, settings)\n\n@app.route('/camera//settings', methods=['POST'])\ndef setcamsettings(proc):\n newsettings = getRequestJson()\n return app.setcamerasettings(proc, newsettings)\n\n@app.route('/service/hostname', methods=['POST'])\ndef setHostname():\n hname = request.args.get('name', 'argus')\n return app.setHostname(hname)\n\n@app.route('/service/slavestatus', methods=['GET'])\ndef slaveStatus():\n return app.slaveStatus()\n\n@app.route('/service/slaveping' )\ndef slavePing():\n return app.slavePing()\n\n@app.route('/service/sendtest' )\ndef sendTest():\n return app.sendTest()\n\n@app.route('/service/fetchtest' )\ndef fetchTest():\n return app.fetchTest()\n\n@app.route('/shutdown', methods=['POST'])\ndef shutdown():\n return app.shutdown()\n\n@app.route( '/files//', methods=['GET'])\ndef getfiledir(proc, path):\n return app.getfiledir(proc, path)\n\n@app.route('/config/', methods=['GET'])\ndef getconfig(proc):\n opts = request.args.get('opts', None)\n return app.getconfigopts(proc, opts)\n\n@app.route( '/config/', methods=['POST'])\ndef setconfig(proc):\n newopts = getRequestJson()\n return app.setconfigopts(proc, newopts)\n\n\nif __name__ == \"__main__\":\n app.run( host=\"0.0.0.0\", port=8081, debug=False)\n","sub_path":"arguscam/camserver.py","file_name":"camserver.py","file_ext":"py","file_size_in_byte":13836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"630717407","text":"\nimport numpy as np\nfrom numpy import arctan2, cos, sin\nfrom numpy.core.numeric import Inf\n\nimport crocoddyl\n\n\nclass DifferentialActionModelMonoped(crocoddyl.DifferentialActionModelAbstract):\n def __init__(self, p, m=1, I=1):\n \"\"\" Action model for the Monoped (without legs).\n The transition model of an unicycle system is described as \n params @ mode : 'f' -> flight mode 's' -> stance mode \n params @ p : contact position \n \"\"\"\n \"\"\"\n Joint q = np.array([xs, ys, the]) \n xs CoM position in x\n ys CoM position in y\n the Body orientaion\n Joint dev qd = np.array([xd,yd,thed])\n State x = np.hstack((q, qd))\n Control u = np.array([fx, fy]) is ground reaction force\n qdd = np.array([w-[0;g], cross(p-[x;y], u)/I])\n \"\"\" \n if p[1]>0: # flight phase\n nu = 0\n nr = 6 \n self.mode = 'f' \n elif p[1]==0: # stance phase\n nu = 2\n nr = 8\n self.mode = 's'\n else:\n print('error: contact pos in y direction cannot be less than zero')\n return\n\n crocoddyl.DifferentialActionModelAbstract.__init__(self, crocoddyl.StateVector(6), nu, nr) #nu = 2 or 0, $nr = 8 or 6\n\n self.m = m\n self.g = 9.81\n self.I = I\n self.wgrf = 5 # weight parameters of normal GRF penalty\n self.costWeights = [1,2,20,.01,.02,.01] # default cost weight\n if self.mode == 's':\n self.costWeights += [.1, .1]\n \n self.unone = np.zeros(self.nu) \n self.nx = 6\n self.p = np.asarray(p)\n self.xd = np.zeros(self.nx)\n self.ud = np.zeros(nu)\n def set_ref(self, xd):\n self.xd = xd \n if self.mode=='s':\n self.ud = np.array([0, self.g]) \n\n def calc(self, data, x, u=None):\n if u is None:\n u = self.unone \n assert(self.nx == len(x))\n # Get control and foothold location \n # Define dynamics equation (data.xout constains simply second-order EOM rather than state-space dyanmics)\n qdd = np.zeros(3) \n qdd[:2] = np.array([0, -self.g]) \n if self.mode == 's':\n qdd[:2] += u \n qdd[-1] = np.cross(self.p-x[0:2], u) /self.I\n \n data.xout = qdd\n # Define running cost\n z = np.concatenate((x-self.xd, u-self.ud))\n data.r = np.array(self.costWeights * (z**2))\n data.cost = .5 * np.asscalar(sum(np.asarray(data.r)))\n # Penalize normal ground reaction force in stance phase\n if self.mode=='s':\n data.cost += np.exp(-self.wgrf*u[-1])\n\n def calcDiff(model, data, x, u=None): \n # Cost derivatives \n nx = len(x)\n data.Lx = np.asarray((x-model.xd) * model.costWeights[:nx])\n np.fill_diagonal(data.Lxx, model.costWeights[:nx])\n if model.mode == 's':\n data.Lu = np.asarray((u-model.ud) * model.costWeights[nx:]) \n np.fill_diagonal(data.Luu, model.costWeights[nx:])\n \n # Dynamic derivatives \n if model.mode=='f':\n u = np.zeros(2) \n # data.Fx = np.vstack((np.hstack((np.zeros((3,3)),np.identity(3))),\n # np.zeros((2,6)),\n # np.array([-u[1],u[0],0,0,0,0])))\n data.Fx = np.vstack((np.zeros((2,6)),\n np.array([-u[1],u[0],0,0,0,0])))\n if model.mode=='s': \n data.Fu = np.vstack((np.identity(2),\n np.array([(model.p[1]-x[1]), -model.p[0]-x[0]])))\n data.Lu+=np.array([0, -model.wgrf*np.exp(-model.wgrf*u[-1])])\n data.Luu += np.array([[0,0], \n [0, model.wgrf**2*np.exp(-model.wgrf*u[-1])]])\n","sub_path":"floating-base/Monoped.py","file_name":"Monoped.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"604068282","text":"from .utils import *\nimport elasticsearch\nimport time\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef optimize_index(client, index_name, max_num_segments=None,\n request_timeout=21600):\n \"\"\"\n Optimize (Lucene forceMerge) index to `max_num_segments` per shard\n\n :arg client: The Elasticsearch client connection\n :arg index_name: The index name\n :arg max_num_segments: Merge to this number of segments per shard.\n :rtype: bool\n \"\"\"\n if not max_num_segments:\n logger.error(\"Mising value for max_num_segments.\")\n return False\n if check_csv(index_name):\n logger.error(\"Must specify only a single index as an argument.\")\n return False\n if index_closed(client, index_name): # Don't try to optimize a closed index\n logger.info('Skipping index {0}: Already closed.'.format(index_name))\n return True\n else:\n shards, segmentcount = get_segmentcount(client, index_name)\n logger.debug('Index {0} has {1} shards and {2} segments total.'.format(index_name, shards, segmentcount))\n if segmentcount > (shards * max_num_segments):\n logger.info('Optimizing index {0} to {1} segments per shard. Please wait...'.format(index_name, max_num_segments))\n try:\n client.indices.optimize(index=index_name, max_num_segments=max_num_segments, request_timeout=request_timeout)\n return True\n except Exception:\n logger.error(\"Error optimizing index {0}. Check logs for more information.\".format(index_name))\n return False\n else:\n logger.info('Skipping index {0}: Already optimized.'.format(index_name))\n return True\n\ndef optimize(client, indices, max_num_segments=None, delay=0, request_timeout=21600):\n \"\"\"\n Helper method called by the CLI.\n\n :arg client: The Elasticsearch client connection\n :arg indices: A list of indices to act on\n :arg max_num_segments: Merge to this number of segments per shard.\n :rtype: bool\n \"\"\"\n retval = True\n for i in ensure_list(indices):\n # If we fail once, we fail completely\n success = optimize_index(client, i, max_num_segments=max_num_segments, request_timeout=request_timeout)\n if not success:\n retval = False\n time.sleep(delay)\n return retval\n","sub_path":"curator/api/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"23300565","text":"# hello\n\nfrom hash_code_parser import *\nfrom pprint import pprint\n\n\ndef solve(input_file_name):\n problem = parse_file(input_file_name)\n # pprint(problem)\n output = create_streets(\n problem[\"intersections_in\"], int(problem[\"duration\"]), problem[\"streets\"]\n )\n with open(\"output_\" + input_file_name, \"w\") as output_file:\n output_file.write(output)\n\n\ndef create_streets(intersections, time, streets_dict):\n max_time = min(10, time)\n res = str(len(intersections)) + \"\\n\"\n for inter in intersections.items():\n res += inter[0] + \"\\n\"\n if len(inter[1]) == 1:\n res += \"1\\n\" + inter[1][0] + \" \" + str(time) + \"\\n\"\n else:\n streets = sorted_streets(inter[1], streets_dict)\n num_streets = len(inter[1])\n streets_time = [\n i + \" \" + str(int(round(max_time - max_time * (acc / num_streets))))\n for acc, i in enumerate(streets)\n if round(max_time - max_time * (acc / num_streets)) != 0\n ]\n res += str(len(streets_time)) + \"\\n\" + \"\\n\".join(streets_time) + \"\\n\"\n return res\n\n\ndef sorted_streets(streets, streets_dict):\n return sorted(\n streets,\n key=lambda s: streets_dict[s][\"nb_use\"] / streets_dict[s][\"L\"],\n reverse=True,\n )\n\n\ndef main():\n for l in \"abcdef\":\n solve(l + \".txt\")\n\n\ndef test_():\n solve(\"a.txt\")\n # pprint(parse_file('c.txt')['black_list'])\n\n\nif __name__ == \"__main__\":\n main()\n # test_()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"187956088","text":"import sys\n\nfrom objects import room\nfrom characters import player\nfrom setup import SAVE_DIR\n\n\n# stored in file as\n# \"room number| name| description| directions:room;dir:room...| item;item;...| character;character;...|\"\ndef load_rooms():\n \"\"\"\n Loads saved rooms from saves/rooms\n In file as:\n room#|name|description|[directions]|[items]|[characters]|\n Where directions, items and characters are separated by ';'\n And directions are in format -> dir:room#\n :return: Dictionary of room objects\n \"\"\"\n rooms = {}\n with open(SAVE_DIR + \"/rooms\") as file:\n for line in file:\n data = line.split('|')\n assert len(data) >= 7, \"ERROR: When loading players, have too few deliminater-separated splits!\"\n index = data[0]\n this_room = room.Room()\n this_room.set_name(data[1])\n this_room.set_description(data[2])\n for direction in data[3].split(';')[:-1]:\n dirr = direction.split(':')\n this_room.add_direction(dirr[0], dirr[1])\n for item in data[4].split(';')[:-1]:\n this_room.add_item(item)\n for character in data[5].split(';')[:-1]:\n this_room.add_character(character)\n rooms[index] = this_room\n\n for i in rooms:\n aroom = rooms[i]\n aroom.id = i\n if aroom:\n for direction in aroom.directions:\n aroom.directions[direction] = rooms[aroom.directions[direction]]\n\n return rooms\n\n\ndef save_rooms(rooms):\n \"\"\"\n Saves the room objects' information to saves/rooms\n :param rooms: Dictionary of room objects\n \"\"\"\n with open(SAVE_DIR + \"/rooms\", mode='w') as file:\n for key in rooms:\n room = rooms[key]\n data = \"{}|{}|{}|\".format(key, room.name, room.description)\n for dir_key in room.directions:\n data += \"{}:{};\".format(dir_key, room.directions[dir_key].id)\n data += '|'\n for item in room.items:\n data += \"{};\".format(item)\n data += '|'\n for char in room.characters:\n data += \"{};\".format(char)\n data += '|'\n\n print(data, file=file)\n print(\"rooms saved\")\n\n\n# squiggles,gun;mega gun;nuke;ninja sword;,22,\n# name,item;item;...,room,\ndef load_player_saves(rooms):\n \"\"\"\n Loads saved players from saves/players\n In file as:\n name|[items]|room#|[active items]|health|\n Where items are separated by ';'\n :param rooms: Dictionary of room objects\n :return: Dictionary of player objects\n \"\"\"\n players = {}\n with open(SAVE_DIR + \"/players\") as file:\n for line in file:\n p = player.Player()\n data = line.split('|')\n assert len(data) >= 5, \"ERROR: When loading players, have too few deliminater-separated splits!\"\n p.set_name(data[0])\n for item in data[1].split(';')[:-1]:\n p.inventory.append(item)\n p.currentRoom = rooms[data[2]]\n for activeItem in data[3].split(';')[:-1]:\n p.activeInventory.append(activeItem)\n p.health = int(data[4])\n assert 0 <= p.health <= 100, \"ERROR: Loaded player's health < 0 or > 100!\"\n players[p.name] = p\n\n return players\n\n\ndef save_players(players):\n \"\"\"\n Saves the player objects' information to saves/players\n :param players: Dictionary of player objects\n \"\"\"\n with open(SAVE_DIR + \"/players\", mode='w') as file:\n for name in players:\n p = players[name]\n data = \"{}|\".format(name)\n for item in p.inventory:\n data += \"{};\".format(item)\n data += \"|{}|\".format(p.currentRoom.id)\n for activeItem in p.activeInventory:\n data += \"{};\".format(activeItem)\n data += \"|{}|\".format(p.health)\n print(data, file=file)\n print(\"players saved\")\n\n\ndef load_saved_data():\n \"\"\"\n Loads room and player save data\n rooms from saves/rooms\n players from saves/players\n :return: Tuple of dictionaries confining loaded info\n \"\"\"\n try:\n rooms = load_rooms()\n except:\n e = sys.exc_info()[0]\n quit(\"Load Rooms Failed: {}\".format(e))\n\n try:\n players = load_player_saves(rooms)\n except:\n e = sys.exc_info()[0]\n quit(\"Load Rooms Failed: {}\".format(e))\n return rooms, players\n\n\ndef save_data(rooms, players):\n \"\"\"\n Saves room and player information\n rooms from saves/rooms\n players from saves/players\n :param rooms: Dictionary of rooms to save\n :param players: Dictionary of players to save\n \"\"\"\n try:\n save_rooms(rooms)\n except:\n e = sys.exc_info()[0]\n quit(\"Save Rooms Failed: {}\".format(e))\n\n try:\n save_players(players)\n except:\n e = sys.exc_info()[0]\n quit(\"Save Players Failed: {}\".format(e))\n","sub_path":"setup/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"260444990","text":"#!/usr/bin/python3\n\"\"\" function that queries the Reddit API and prints the titles of the first\n 10 hot posts listed for a given subreddit \"\"\"\nimport json\nimport requests\nfrom sys import argv\n\n\ndef top_ten(subreddit):\n \"\"\" top_ten method \"\"\"\n subreddit = argv[1]\n url = \"http://api.reddit.com/r/{}/hot?limit=10\".format(subreddit)\n u_agent = \"Holberton-Reddit-API-project\"\n\n hot_response = requests.get(url, headers={'User-Agent': u_agent})\n\n if hot_response.status_code != 200:\n print(None)\n else:\n jsondata = hot_response.json()\n hot = jsondata.get('data').get('children')\n for new in hot:\n title = new.get('data').get('title')\n print(title)\n","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"143039260","text":"import re\nimport time\nfrom xlwt import *\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\ndef writeData(labelList, curlData, rowNum, table):\n dataList = []\n excelTable.write(rowNum, 0, curlData.find('p', attrs={\"class\": \"name\"}).string)\n temp = curlData.find('p', attrs={\"class\": re.compile('specValOrd\\d{1,2}')})\n colNum = 1\n\n while temp is not None:\n if 'display: none' in str(temp):\n temp = temp.find_next('p', attrs={\"class\": re.compile('specValOrd\\d{1,2}')}) # avoid looping when entering a none-display element\n continue\n\n if temp.string is not None:\n dataList.append(temp.string.replace(' ', '').replace('\\n', '').replace('\\r', '').replace('\\t', ''))\n else:\n dataList.append('')\n\n temp = temp.find_next('p', attrs={\"class\": re.compile('specValOrd\\d{1,2}')})\n\n for j in range(len(labelList)):\n table.write(rowNum, 2 * colNum - 1, labelList[j])\n table.write(rowNum, 2 * colNum, dataList[j])\n colNum += 1\n\n print(\"Finished writing \" + curlData.find('p', attrs={\"class\": \"name\"}).string)\n\n\n\nurl = \"https://www.doosanmachinetools.com/en/main/index.do\"\n\nexcelFile = Workbook(encoding='utf-8')\nexcelTable = excelFile.add_sheet('Doosan')\n\noptionChrome = Options()\noptionChrome.add_argument('--headless')\noptionChrome.add_argument('--disable-gpu')\noptionChrome.add_argument('disable-plugins')\noptionChrome.add_argument('disable-extensions')\n\ndriverChrome = webdriver.Chrome(options=optionChrome)\ndriverChrome.get(url)\ntime.sleep(2)\nhtmlResult = driverChrome.page_source\ndriverChrome.quit()\n\n\nsoupMachine = BeautifulSoup(htmlResult, 'html5lib')\nsoup = soupMachine.find_all('div', {\"class\": \"forDep\"})\nMachineURL = str(re.findall(r'href=\".*\"', str(soup))).replace('href=\"', \"\").replace('\"', \"\").split(',')\n\nurlList = []\nurlIterator = iter(MachineURL)\nFormat = \"https://www.doosanmachinetools.com/\"\ncounter = 0\nfor i in urlIterator:\n\n if counter == len(MachineURL)-1:\n url = Format + str(i)[3:][:-2]\n urlList.append(url)\n\n else:\n url = Format + str(i)[3:][:-1]\n urlList.append(url)\n counter = counter + 1\n\nprint(\"Downloading URLs finished\")\nurlIterator = iter(urlList)\nrow = 0\n\nfor u in urlIterator:\n print(u)\n driverChrome = webdriver.Chrome(options=optionChrome)\n driverChrome.get(u)\n time.sleep(2)\n htmlResult = driverChrome.page_source\n driverChrome.quit()\n\n soup = BeautifulSoup(htmlResult, 'lxml')\n\n labelList = []\n\n try:\n curlLabel = soup.find('div', attrs={\"class\": \"fixedArea\"})\n pLabel = curlLabel.find('p', attrs={\"class\": re.compile('specOrd\\d{1,2}')})\n except:\n print(\"Error analyzing \" + u)\n continue\n\n while pLabel is not None:\n if 'display: none' in str(pLabel):\n pLabel = pLabel.find_next('p', attrs={\"class\": re.compile('specOrd\\d{1,2}')}) # avoid looping when entering a none-display element\n continue\n\n labelList.append(pLabel.string)\n pLabel = pLabel.find_next('p', attrs={\"class\": re.compile('specOrd\\d{1,2}')})\n\n curlData = soup.find('div', attrs={\"class\": \"scrollArea\"})\n divData = curlData.find('div', attrs={\"class\": \"productList\"})\n while divData is not None:\n writeData(labelList, divData, row, excelTable)\n row += 1\n divData = divData.find_next('div', attrs={\"class\": \"productList\"})\n\nexcelFile.save('Doosan_MachineData.xls')","sub_path":"Doosan/doosanAllMachineSpecs.py","file_name":"doosanAllMachineSpecs.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"213433285","text":"#\n# Copyright 2018 Asylo authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Repository rule for installing Linux SGX backend dependencies.\"\"\"\n\nload(\n \"@com_google_asylo//asylo/bazel:installation_path.bzl\",\n \"installation_path\",\n)\nload(\n \"@com_google_asylo//asylo/bazel:patch_repository.bzl\",\n \"patch_repository\",\n)\n\ndef _instantiate_crosstool_impl(repository_ctx):\n \"\"\"Instantiates the SGX crosstool template with the installation path.\n\n The installation path can be an attribute or found from 1 of 3 canonical\n locations (resolved in the following order):\n * $HOME/.asylo/sgx_toolchain_location [first line has the path]\n * /usr/local/share/asylo/sgx_toolchain_location [first line has the path]\n * [default fallback] /opt/asylo/toolchains/sgx_x86_64\n\n Args:\n repository_ctx: The repository_rule implementation object.\n\n Returns:\n Void.\n \"\"\"\n toolchain_location = installation_path(\n repository_ctx,\n \"sgx_toolchain_location\",\n repository_ctx.attr.installation_path,\n \"/opt/asylo/toolchains/sgx_x86_64\",\n \"sgx toolchain\",\n )\n\n repository_ctx.symlink(toolchain_location, \"toolchain\")\n\n_instantiate_crosstool = repository_rule(\n implementation = _instantiate_crosstool_impl,\n local = True,\n attrs = {\"installation_path\": attr.string()},\n)\n\ndef sgx_deps(installation_path = None):\n \"\"\"Macro to include Asylo's SGX backend dependencies in a WORKSPACE.\n\n Args:\n installation_path: The absolute path to the installed SGX toolchain.\n This can be omitted if the path is the first line of\n /usr/local/share/asylo/sgx_toolchain_location\n \"\"\"\n _instantiate_crosstool(\n name = \"com_google_asylo_sgx_backend\",\n installation_path = installation_path,\n )\n\n # Intel's SGX SDK with patches to make it fit our toolchain.\n if \"linux_sgx\" not in native.existing_rules():\n patch_repository(\n name = \"linux_sgx\",\n urls = [\"https://github.com/intel/linux-sgx/archive/sgx_2.3.tar.gz\"],\n sha256 = \"c412b810efb94e9be15d716578483b2fc197b4982fc02b6d13f5dfff3f1d9b14\",\n patches = [\n \"@com_google_asylo//asylo/distrib/sgx_x86_64:linux_sgx_2_3.patch\",\n \"@com_google_asylo//asylo/distrib/sgx_x86_64:enclave_test_pem.patch\",\n ],\n strip_prefix = \"linux-sgx-sgx_2.3\",\n )\n","sub_path":"asylo/bazel/sgx_deps.bzl","file_name":"sgx_deps.bzl","file_ext":"bzl","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"28856900","text":"#Time complexcity = O(logn)\n#space complexcity = O(1)\n#the function uses Divide and conquer strategy (search search)\n#After rotation, the array will still be in sorted order at some mid value.\n#we search for this mid value of rotation, but the element can also be in the other half.\n#this is the reason for the 2 consecutive recurssive calls.\ndef search(array,start,end,key):\n if(start<=end):\n mid=int((start+end)/2)\n if (key==array[mid]):\n return mid\n elif(key=array[start]):\n return search(array,start,mid-1,key)\n return search(array,mid+1,end,key)\n else:\n return search(array,mid+1,end,key)\n return search(array,start,mid-1,key)\n else:\n return -1\n \nif __name__==\"__main__\":\n size=int(input(\"Enter size of the array: \"))\n array=[]\n for i in range(size):\n element=int(input(\"Enter element: \"))\n array.append(element)\n key=int(input(\"Enter element whose index is to be search:\"))\n result= search(array,0,len(array)-1,key)\n if result!=-1:\n print(\"Element found at Index : \",result)\n else:\n print(\"Element not found.\")\n\n#--------------------OUTPUT----------------------\n#Enter size of the array: 6\n#Enter element: 13\n#Enter element: 18\n#Enter element: 25\n#Enter element: 2\n#Enter element: 8\n#Enter element: 10\n#Enter element whose index is to be search:8\n#Element found at Index : 4\n\n\n#Enter size of the array: 8\n#Enter element: 10\n#Enter element: 20\n#Enter element: 30\n#Enter element: 40\n#Enter element: 50\n#Enter element: 60\n#Enter element: 70\n#Enter element: 5\n#Enter element whose index is to be search:5\n#Element found at Index : 7\n\n\n","sub_path":"searchIndex.py","file_name":"searchIndex.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"356342062","text":"from .pages.main_page import MainPage\nfrom .pages.login_page import LoginPage\nfrom .pages.basket_page import BasketPage\nfrom .pages.product_page import PageObject\nimport pytest\n\n\n\n@pytest.mark.login_guest\nclass TestLoginFromMainPage():\n def test_guest_can_go_to_login_page(self, browser):\n link = \"http://selenium1py.pythonanywhere.com/\"\n page = MainPage(browser, link)\n page.open()\n page.should_be_login_link()\n page.go_to_login_page()\n page_login = LoginPage(browser, browser.current_url)\n page_login.open()\n page_login.should_be_login_page()\n\n def test_guest_should_see_login_link(self, browser):\n link = \"http://selenium1py.pythonanywhere.com/\"\n page = MainPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n\ndef test_guest_cant_see_product_in_basket_opened_from_main_page(browser):\n link = \"http://selenium1py.pythonanywhere.com/\"\n page_main = MainPage(browser, link)\n page_main.open()\n page_main.should_be_basket_button()\n page_main.go_to_basket()\n page_basket = BasketPage(browser, browser.current_url)\n page_basket.should_be_an_empty_basket()\n page_basket.should_be_the_text_basket_is_empty()\n","sub_path":"test_main_page.py","file_name":"test_main_page.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"480855462","text":"import numpy as np\r\nimport os\r\nfrom header import *\r\n\r\n\r\ndef read_raw_data(file_name: str, ROWS: int, COLS: int, OFFSET=0) -> list:\r\n \"\"\"\r\n This function reads a rawb file and converts it into a 2-D matrix\r\n \"\"\"\r\n FILE = open(file_name, mode=\"r\")\r\n\r\n # Reading the data in the Single Dimensional form\r\n img = np.fromfile(\r\n FILE, dtype=np.uint8, count=ROWS * COLS, offset=((ROWS * COLS) * OFFSET)\r\n )\r\n\r\n # Shaping the data to the two dimensional format\r\n img = np.reshape(img, (ROWS, COLS)).tolist()\r\n\r\n FILE.close()\r\n return img\r\n\r\n\r\ndef create_pgm_file(\r\n width: int, height: int, file_name: str, comment: str, img: list, greylevel=255\r\n) -> None:\r\n \"\"\"\r\n This function takes a 2-D matrix\r\n and converts into a pgm file.\r\n \"\"\"\r\n FILE = open(file_name, \"wb\")\r\n\r\n # Defining the PGM Headers\r\n pgm_header = f\"P2\\n#{comment}\\n{str(width)} {str(height)}\\n{str(greylevel)}\\n\"\r\n pgmHeader_byte = bytearray(pgm_header, \"utf-8\")\r\n\r\n # Writing the PGM Headers into the file\r\n FILE.write(pgmHeader_byte)\r\n\r\n # Creating the rows of the data\r\n for row in img:\r\n row = [str(x) for x in row]\r\n FILE.write(bytearray(\" \".join(row) + \"\\n\", \"utf-8\"))\r\n\r\n FILE.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ROWS = 217\r\n COLS = 181\r\n\r\n path_to_raw_data = \"Data\\\\Raw Files\\\\\" + \"phantom_1.0mm_normal_crisp.rawb\"\r\n path_to_save_file = \"Data\\\\Ground Truth\\\\\"\r\n\r\n if not os.path.exists(path_to_save_file):\r\n os.mkdir(path_to_save_file)\r\n\r\n # Create the discrete model\r\n for i in range(181):\r\n img = read_raw_data(path_to_raw_data, ROWS, COLS, i)\r\n create_pgm_file(\r\n COLS, ROWS, path_to_save_file + f\"TEST_{i+1}.pgm\", f\"TEST_{i+1}.pgm\", img, 9\r\n )\r\n\r\n # Creating the\r\n for file in file_lst:\r\n path_to_raw_data = \"Data\\\\Raw Files\\\\\" + file + \".rawb\"\r\n path_to_main_data = f\"Data\\\\{file}\\\\\"\r\n path_to_save_file = f\"Data\\\\{file}\\\\BV_{file}\\\\\"\r\n\r\n if not os.path.exists(path_to_main_data):\r\n os.mkdir(path_to_main_data)\r\n\r\n if not os.path.exists(path_to_save_file):\r\n os.mkdir(path_to_save_file)\r\n\r\n # Creating the brain volume\r\n for i in range(181):\r\n img = read_raw_data(path_to_raw_data, ROWS, COLS, i)\r\n create_pgm_file(\r\n COLS,\r\n ROWS,\r\n path_to_save_file + f\"TEST_{i+1}.pgm\",\r\n f\"TEST_{i+1}.pgm\",\r\n img,\r\n )\r\n\r\n print(f\"{file} created!!\")\r\n","sub_path":"create_pgm.py","file_name":"create_pgm.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"539633427","text":"import time\nfrom queue import Queue\n\nclass IntCodeComputer:\n def __init__(self, program, idx):\n self.id = idx\n self.ptr = 0\n self.program = program\n self.inputs = Queue()\n self.output = -1\n self.exit_code = -1\n\n\n def run(self):\n while True:\n code = self.program[self.ptr] % 100\n\n if code == 1:\n self.opcode_1()\n elif code == 2:\n self.opcode_2()\n elif code == 3:\n self.opcode_3()\n elif code == 4:\n self.opcode_4()\n elif code == 5:\n self.opcode_5()\n elif code == 6:\n self.opcode_6()\n elif code == 7:\n self.opcode_7()\n elif code == 8:\n self.opcode_8()\n elif code == 99:\n print(f\"\\n*** Intcode Computer {self.id} Halting ***\\n\")\n self.exit_code = code\n break\n\n \n def get_values(self):\n instruction = f\"{self.program[self.ptr]:04d}\"\n params = instruction[:-2]\n mode1, mode2 = int(params[1]), int(params[0])\n param1, param2, output_addr = self.program[self.ptr+1:self.ptr+4]\n\n if mode1 == 0 and mode2 == 0:\n return self.program[param1], self.program[param2], output_addr\n elif mode1 == 1 and mode2 == 0:\n return param1, self.program[param2], output_addr\n elif mode1 == 0 and mode2 == 1:\n return self.program[param1], param2, output_addr\n elif mode1 == 1 and mode2 == 1:\n return param1, param2, output_addr\n else:\n raise Exception(f\"Unknown parameters: P1: {mode1} P2: {mode2}\")\n\n\n def opcode_1(self):\n val_1, val_2, output_addr = self.get_values()\n self.program[output_addr] = val_1 + val_2\n self.ptr += 4\n\n\n def opcode_2(self):\n val_1, val_2, output_addr = self.get_values()\n self.program[output_addr] = val_1 * val_2\n self.ptr += 4 \n\n def opcode_3(self):\n output_addr = self.program[self.ptr + 1]\n self.program[output_addr] = self.inputs.get()\n\n self.ptr += 2\n\n def opcode_4(self):\n output_addr = self.program[self.ptr + 1]\n print(f\"Intcode Computer {self.id} Output Code: {self.program[output_addr]}\")\n self.output = self.program[output_addr]\n self.ptr += 2\n\n def opcode_5(self):\n val_1, val_2, _ = self.get_values()\n if val_1 != 0:\n self.ptr = val_2\n else:\n self.ptr += 3\n\n def opcode_6(self):\n val_1, val_2, _ = self.get_values()\n if val_1 == 0:\n self.ptr = val_2\n else:\n self.ptr += 3\n\n def opcode_7(self):\n val_1, val_2, output_addr = self.get_values()\n self.program[output_addr] = 1 if val_1 < val_2 else 0\n self.ptr += 4\n\n def opcode_8(self):\n val_1, val_2, output_addr = self.get_values()\n self.program[output_addr] = 1 if val_1 == val_2 else 0\n self.ptr += 4\n","sub_path":"07/intcode_computer_day7.py","file_name":"intcode_computer_day7.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"209275378","text":"\nclass NeuralNet:\n def __init__(self, net_id, model_obj, parent_id=None, start=None,\n end=None, shared=None, model_path=None):\n self.net_id = net_id\n\n self.data = {\"net_id\": self.net_id,\n \"channels\": model_obj.channels,\n \"width\": model_obj.width,\n \"height\": model_obj.height}\n\n if parent_id != None:\n self.parent_id = parent_id\n self.data[\"parent_id\"] = parent_id\n if start != None:\n self.start = start\n self.data[\"input_layer\"] = model_obj.frozen_layer_names[start]\n if end != None:\n self.end = end\n if end != 0: # Starting condition\n self.data[\"output_layer\"] = model_obj.frozen_layer_names[end]\n if shared != None:\n self.shared = shared\n self.data[\"shared\"] = shared\n if model_path != None:\n self.model_path = model_path\n self.data[\"model_path\"] = model_path\n\n def __str__(self):\n return self.data\n\nclass Model:\n def __init__(self, model_desc):\n self.channels = model_desc[\"channels\"]\n self.height = model_desc[\"height\"]\n self.width = model_desc[\"width\"]\n self.final_layer = model_desc[\"total_layers\"]\n self.frozen_layer_names = model_desc[\"frozen_layer_names\"]\n\nclass Schedule:\n def __init__(self):\n self.next_id = 0\n self.schedule = []\n\n def add_neural_net(self, net):\n self.schedule.append(net.data)\n\n def get_id(self):\n next_id = self.next_id\n self.next_id += 1\n return next_id\n\ndef schedule_no_sharing(apps, model_desc):\n\n model = Model(model_desc)\n s = Schedule()\n\n for app in apps:\n net = NeuralNet(s.get_id(),\n model,\n -1,\n 1,\n model.final_layer,\n False,\n app[\"model_path\"])\n s.add_neural_net(net)\n return s.schedule\n\ndef schedule(apps, num_frozen_list, model_desc):\n\n for app, num_frozen in zip(apps, num_frozen_list):\n accs = app[\"accuracies\"]\n app[\"num_frozen\"] = num_frozen\n\n model = Model(model_desc)\n s = Schedule()\n\n num_apps_done = 0\n last_shared_layer = 1\n\n parent_net = NeuralNet(-1, model, end=1)\n\n while (num_apps_done < len(apps)):\n min_frozen = min([app[\"num_frozen\"] \\\n for app in apps if app[\"num_frozen\"] > last_shared_layer])\n min_apps = [app for app in apps if app[\"num_frozen\"] == min_frozen]\n future_apps = [app for app in apps if app[\"num_frozen\"] > min_frozen]\n\n # Check if we need to share part of the NN, and make a base NN\n # If so, we make it and set it as the parent\n if len(future_apps) > 0 or len(min_apps) == len(apps):\n net = NeuralNet(s.get_id(),\n model,\n parent_net.net_id,\n last_shared_layer,\n min_frozen,\n True,\n min_apps[0][\"model_path\"])\n s.add_neural_net(net)\n parent_net = net\n\n # Make app-specific NN that is branched off the parent - (either nothing\n # or the last shared branch)\n for app in min_apps:\n net = NeuralNet(s.get_id(),\n model,\n parent_net.net_id,\n parent_net.end,\n model.final_layer,\n False,\n app[\"model_path\"])\n s.add_neural_net(net)\n num_apps_done += 1\n\n last_shared_layer = parent_net.end\n\n return s.schedule\n\n","sub_path":"src/scheduler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"362929163","text":"__author__ = 'Johannes Steudle'\n\nfrom PyQt4.QtGui import QDockWidget, QListWidget\n\nclass TrackListView(QDockWidget):\n def __init__(self, parent=None):\n QDockWidget.__init__(self, parent)\n\n self.trackList = QListWidget()\n self.trackList.addItem(\"juhu\")\n\n # layout = QVBoxLayout()\n # layout.addWidget(trackList)\n self.setWidget(self.trackList)\n\n","sub_path":"view/TrackListView.py","file_name":"TrackListView.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"262316704","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\n\nfrom . import managers\nfrom opencourse.profiles.models import Professor, Student\n\n\nclass City(models.Model):\n codepostal = models.CharField(max_length=8, blank=True, null=True)\n name = models.CharField(max_length=60, blank=True, null=True)\n latitude_south = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n latitude_north = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n longitude_west = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n longitude_east = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n latitude_southa = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n latitude_northa = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n longitude_westa = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n longitude_easta = models.DecimalField(\n blank=True, null=True, max_digits=8, decimal_places=4\n )\n category_1 = models.SmallIntegerField(blank=True, null=True)\n category_2 = models.SmallIntegerField(blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"City\")\n verbose_name_plural = _(\"Cities\")\n\n def __str__(self):\n return str(self.name)\n\n\nclass CourseLevel(models.Model):\n name = models.CharField(max_length=30, blank=True, null=True)\n description = models.CharField(max_length=255, blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"Level\")\n verbose_name_plural = _(\"Levels\")\n\n def __str__(self):\n return str(self.name)\n\n\nclass CourseDuration(models.Model):\n duration = models.SmallIntegerField(blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"Duration\")\n verbose_name_plural = _(\"Durations\")\n\n def __str__(self):\n return f\"{self.duration} {_('minutes')}\"\n\n\nclass CourseAge(models.Model):\n max = models.SmallIntegerField(blank=True, null=True)\n name = models.CharField(max_length=50, blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"Age\")\n verbose_name_plural = _(\"Ages\")\n\n def __str__(self):\n return str(self.name)\n\n\nclass CourseArea(models.Model):\n name = models.CharField(max_length=30, blank=True, null=True)\n description = models.CharField(max_length=255, blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"Area\")\n verbose_name_plural = _(\"Areas\")\n\n def __str__(self):\n return str(self.name)\n\n\nclass CourseLanguage(models.Model):\n name = models.CharField(max_length=30, blank=True, null=True)\n tag = models.CharField(max_length=2, blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"Language\")\n verbose_name_plural = _(\"Languages\")\n\n def __str__(self):\n return str(self.name)\n\n\nclass Center(models.Model):\n admin = models.ForeignKey(Professor, on_delete=models.CASCADE)\n name = models.CharField(max_length=40)\n description = models.TextField(max_length=255, blank=True, null=True)\n picture = models.ImageField(\n upload_to=\"center_pics/%Y-%m-%d/\", null=True, blank=True\n )\n created = models.DateTimeField(auto_now=True, blank=True, null=True)\n\n objects = managers.CenterManager()\n\n class Meta:\n verbose_name = _(\"Center\")\n verbose_name_plural = _(\"Centers\")\n permissions = ((\"manage_center\", _(\"Manage center\")),)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Course(models.Model):\n professor = models.ForeignKey(Professor, on_delete=models.CASCADE)\n city = models.ForeignKey(City, on_delete=models.CASCADE, null=True)\n title = models.CharField(max_length=100)\n descrip = models.TextField(blank=True)\n extrainfo = models.CharField(max_length=250, blank=True, null=True)\n payactive = models.NullBooleanField()\n active = models.NullBooleanField()\n dateexp = models.DateTimeField(blank=True, null=True)\n starthostdate = models.DateTimeField(blank=True, null=True)\n endhostdate = models.DateTimeField(blank=True, null=True)\n hosted = models.NullBooleanField()\n hostactive = models.NullBooleanField()\n level = models.ForeignKey(CourseLevel, on_delete=models.SET_NULL, null=True)\n duration = models.ForeignKey(CourseDuration, on_delete=models.PROTECT, null=True)\n age = models.ManyToManyField(CourseAge)\n area = models.ManyToManyField(CourseArea)\n language = models.ManyToManyField(CourseLanguage)\n center = models.ForeignKey(Center, on_delete=models.SET_NULL, null=True)\n\n objects = managers.CourseManager()\n\n class Meta:\n verbose_name = _(\"Course\")\n verbose_name_plural = _(\"Courses\")\n permissions = ((\"manage_course\", _(\"Manage course\")),)\n\n def __str__(self):\n return self.title or \"\"\n\n\nclass CourseLocationType(models.Model):\n name = models.CharField(max_length=25)\n\n class Meta:\n verbose_name = _(\"Location Type\")\n verbose_name_plural = _(\"Location Types\")\n\n def __str__(self):\n return str(self.name)\n\n\nclass Currency(models.Model):\n name = models.CharField(max_length=20)\n iso_code = models.CharField(max_length=5)\n symbol = models.CharField(max_length=5)\n\n class Meta:\n verbose_name = _(\"Currency\")\n verbose_name_plural = _(\"Currencies\")\n\n def __str__(self):\n return self.symbol\n\n\nclass CourseLocation(models.Model):\n location_type = models.ForeignKey(\n CourseLocationType, on_delete=models.PROTECT, null=True\n )\n course = models.ForeignKey(\n Course, on_delete=models.CASCADE, related_name=\"locations\"\n )\n description = models.CharField(max_length=100, blank=True, null=True)\n price = models.SmallIntegerField()\n currency = models.ForeignKey(Currency, on_delete=models.PROTECT)\n number_sessions = models.SmallIntegerField(blank=True, null=True)\n coursestartdate = models.DateTimeField(blank=True, null=True)\n courseenddate = models.DateTimeField(blank=True, null=True)\n\n class Meta:\n verbose_name = _(\"Location\")\n verbose_name_plural = _(\"Locations\")\n\n def __str__(self):\n return f\"{self.location_type.name}: {self.price}{self.currency}\"\n\n\nclass Enrollment(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n accepted = models.NullBooleanField()\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n\n objects = managers.EnrollmentManager()\n\n class Meta:\n verbose_name = _(\"Enrollment\")\n verbose_name_plural = _(\"Enrollment\")\n permissions = ((\"manage_enrollment\", _(\"Manage enrollment\")),)\n unique_together = (\"course\", \"student\")\n\n def __str__(self):\n return \"{}: {} ({})\".format(self.course, self.student, self.accepted)\n\n\nclass HandoutSection(models.Model):\n name = models.CharField(max_length=40)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Handout(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n name = models.CharField(max_length=40)\n description = models.TextField(max_length=255, blank=True, null=True)\n attachment = models.FileField(upload_to=\"handouts/%Y-%m-%d/\")\n section = models.ForeignKey(HandoutSection, on_delete=models.PROTECT)\n\n objects = managers.HandoutManager()\n\n class Meta:\n verbose_name = _(\"Handout\")\n verbose_name_plural = _(\"Handout\")\n permissions = ((\"manage_handout\", _(\"Manage handout\")),)\n\n def __str__(self):\n return str(self.name)\n\n\nclass JoinRequest(models.Model):\n center = models.ForeignKey(Center, on_delete=models.CASCADE)\n professor = models.ForeignKey(Professor, on_delete=models.CASCADE)\n accepted = models.NullBooleanField()\n\n objects = managers.JoinRequestManager()\n\n class Meta:\n verbose_name = _(\"Join request\")\n verbose_name_plural = _(\"Join requests\")\n permissions = ((\"manage_join_request\", _(\"Manage join request\")),)\n unique_together = (\"center\", \"professor\")\n\n def __str__(self):\n return \"{}: {} ({})\".format(self.center, self.professor, self.accepted)\n","sub_path":"opencourse/courses/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"217295836","text":"from tkinter import *\nimport cv2\nfrom PIL import Image\nfrom PIL import ImageTk\nimport threading\n\nclass DetectionView:\n\n stop = False\n\n def load(self):\n\n window = Tk()\n window.title(\"Cheating Detection App\")\n\n frame = Frame(window,padx=20,pady=20,bg=\"yellow\")\n frame.grid(row=0,column=0,padx=10,pady=10)\n\n self.l1 = Label(frame)\n self.l1.grid(row=1,column=0,columns=2)\n\n b1 = Button(frame,text=\"start\",command= self.startCamera)\n b1.grid(row=2,column=0)\n\n b2 = Button(frame, text=\"stop\",command=self.stopCamera)\n b2.grid(row=2, column=1)\n\n self.l2 = Label(frame,text='STATUS - Camera Started')\n self.l2.grid(row=3,column=0,columns=2)\n\n self.startCamera()\n\n window.mainloop()\n\n def startCamera(self):\n self.stop = False\n\n self.cascade = cv2.CascadeClassifier('lib/nose.xml')\n self.cap = cv2.VideoCapture(0)\n t = threading.Thread(target= self.webcam, args=())\n t.start()\n\n def webcam(self):\n try:\n ret, image_frame = self.cap.read()\n image_frame = cv2.resize(image_frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)\n self.img = Image.fromarray(image_frame)\n\n colorimage = cv2.cvtColor(image_frame, cv2.COLOR_BGR2RGB)\n grayimage = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)\n\n # core functionality - Face ddetection\n r = self.cascade.detectMultiScale(grayimage,1.7,11)\n for (x,y,w,h) in r:\n cv2.rectangle(colorimage,(x,y),(x+w,y+h),(0,255,0),3)\n self.l2.config(text=\"Face mask is not there\")\n\n self.img = Image.fromarray(colorimage)\n img = ImageTk.PhotoImage(self.img)\n self.l1.configure(image=img)\n self.l1.image = img\n\n if self.stop == False:\n self.l1.after(10, self.webcam)\n else:\n self.l1.image = None\n\n\n except:\n print(\"Some error\")\n\n def stopCamera(self):\n self.stop = True\n\n\n\n# pip unistall opencv-python\n# pip install opencv-python\n","sub_path":"batch1/day15/cheating_detection/views/DetectionView.py","file_name":"DetectionView.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"421476404","text":"\"\"\"\nQT dialog window for EELS compositional analysis\n\nAuthor: Gerd Duscher\n\"\"\"\nQt_available = True\ntry:\n from PyQt5 import QtCore, QtWidgets\nexcept:\n Qt_available = False\n # print('Qt dialogs are not available')\n\n\nimport numpy as np\n\nimport ipywidgets\nimport IPython.display\nimport matplotlib\nimport matplotlib.pylab as plt\nimport matplotlib.patches as patches\n\nfrom pyTEMlib import file_tools as ft\nfrom pyTEMlib import eels_tools as eels\n\nimport sidpy\n\nif Qt_available:\n from pyTEMlib import eels_dlg\n from pyTEMlib import eels_dialog_utilities\n\n class EELSDialog(QtWidgets.QDialog):\n \"\"\"\n EELS Input Dialog for Chemical Analysis\n \"\"\"\n\n def __init__(self, datasets=None):\n super().__init__(None, QtCore.Qt.WindowStaysOnTopHint)\n # Create an instance of the GUI\n if datasets is None:\n # make a dummy dataset\n datasets = {'Channel_000':ft.make_dummy_dataset(sidpy.DataType.SPECTRUM)}\n elif isinstance(datasets, sidpy.Dataset):\n datasets = {'Channel_000': datasets} \n elif isinstance(datasets, dict):\n pass\n else: \n raise TypeError('dataset or first item inhas to be a sidpy dataset')\n self.datasets = datasets\n self.dataset = datasets[list(datasets)[0]]\n \n if not isinstance(self.dataset, sidpy.Dataset):\n raise TypeError('dataset or first item inhas to be a sidpy dataset')\n \n self.spec_dim = ft.get_dimensions_by_type('spectral', self.dataset)\n if len(self.spec_dim) != 1:\n raise TypeError('We need exactly one SPECTRAL dimension')\n self.spec_dim = self.spec_dim[0]\n \n self.ui = eels_dlg.UiDialog(self)\n # Run the .setup_ui() method to show the GUI\n # self.ui.setup_ui(self)\n\n self.set_action()\n\n self.energy_scale = np.array([])\n self.model = np.array([])\n self.y_scale = 1.0\n self.change_y_scale = 1.0\n self.spectrum_ll = None\n self.low_loss_key = None\n \n self.edges = {}\n\n self.show_regions = False\n self.show()\n\n self.set_dataset(self.dataset)\n initial_elements = []\n\n for key in self.edges:\n if key.isdigit():\n if 'element' in self.edges[key]:\n initial_elements.append(self.edges[key]['element'])\n \n self.pt_dialog = eels_dialog_utilities.PeriodicTableDialog(energy_scale=self.energy_scale,\n initial_elements=initial_elements)\n self.pt_dialog.signal_selected[list].connect(self.set_elements)\n\n self.dataset.plot()\n\n if hasattr(self.dataset.view, 'axes'):\n self.axis = self.dataset.view.axes[-1]\n elif hasattr(self.dataset.view, 'axis'):\n self.axis = self.dataset.view.axis\n\n self.figure = self.axis.figure\n self.updY = 0\n self.figure.canvas.mpl_connect('button_press_event', self.plot)\n\n self.ui.do_fit_button.setFocus()\n self.plot()\n self.ui.do_fit_button.setFocus()\n\n def set_dataset(self, dataset):\n\n self.dataset = dataset\n if 'edges' not in self.dataset.metadata or self.dataset.metadata['edges'] == {}:\n self.dataset.metadata['edges'] = {'0': {}, 'model': {}, 'use_low_loss': False}\n self.edges = self.dataset.metadata['edges']\n\n spec_dim = ft.get_dimensions_by_type('spectral', dataset)[0]\n\n if len(spec_dim) == 0:\n raise TypeError('We need at least one SPECTRAL dimension')\n\n self.spec_dim = spec_dim[0]\n self.energy_scale = dataset._axes[self.spec_dim].values\n self.ui.edit2.setText(f\"{self.energy_scale[-2]:.3f}\")\n\n if 'fit_area' not in self.edges:\n self.edges['fit_area'] = {}\n if 'fit_start' not in self.edges['fit_area']:\n self.ui.edit1.setText(f\"{self.energy_scale[50]:.3f}\")\n self.edges['fit_area']['fit_start'] = float(self.ui.edit1.displayText())\n else:\n self.ui.edit1.setText(f\"{self.edges['fit_area']['fit_start']:.3f}\")\n if 'fit_end' not in self.edges['fit_area']:\n self.ui.edit2.setText(f\"{self.energy_scale[-2]:.3f}\")\n self.edges['fit_area']['fit_end'] = float(self.ui.edit2.displayText())\n else:\n self.ui.edit2.setText(f\"{self.edges['fit_area']['fit_end']:.3f}\")\n\n if self.dataset.data_type.name == 'SPECTRAL_IMAGE':\n if 'SI_bin_x' not in self.dataset.metadata['experiment']:\n self.dataset.metadata['experiment']['SI_bin_x'] = 1\n self.dataset.metadata['experiment']['SI_bin_y'] = 1\n\n bin_x = self.dataset.metadata['experiment']['SI_bin_x']\n bin_y = self.dataset.metadata['experiment']['SI_bin_y']\n self.dataset.view.set_bin([bin_x, bin_y])\n self.update()\n\n def update(self):\n index = self.ui.list3.currentIndex() # which edge\n edge = self.edges[str(index)]\n\n if 'z' in edge:\n self.ui.list5.setCurrentIndex(self.ui.edge_sym.index(edge['symmetry']))\n self.ui.edit4.setText(str(edge['z']))\n self.ui.unit4.setText(edge['element'])\n self.ui.edit6.setText(f\"{edge['onset']:.2f}\")\n self.ui.edit7.setText(f\"{edge['start_exclude']:.2f}\")\n self.ui.edit8.setText(f\"{edge['end_exclude']:.2f}\")\n if self.y_scale == 1.0:\n self.ui.edit9.setText(f\"{edge['areal_density']:.2e}\")\n self.ui.unit9.setText('a.u.')\n else:\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n self.ui.edit9.setText(f\"{edge['areal_density']*self.y_scale*1e-6/dispersion:.2f}\")\n self.ui.unit9.setText('atoms/nm²')\n else:\n self.ui.list3.setCurrentIndex(0)\n self.ui.edit4.setText(str(0))\n self.ui.unit4.setText(' ')\n self.ui.edit6.setText(f\"{0:.2f}\")\n self.ui.edit7.setText(f\"{0:.2f}\")\n self.ui.edit8.setText(f\"{0:.2f}\")\n self.ui.edit9.setText(f\"{0:.2e}\")\n\n def update_element(self, z):\n # We check whether this element is already in the\n zz = eels.get_z(z)\n for key, edge in self.edges.items():\n if key.isdigit():\n if 'z' in edge:\n if zz == edge['z']:\n return False\n\n major_edge = ''\n minor_edge = ''\n all_edges = {}\n x_section = eels.get_x_sections(zz)\n edge_start = 10 # int(15./ft.get_slope(self.energy_scale)+0.5)\n for key in x_section:\n if len(key) == 2 and key[0] in ['K', 'L', 'M', 'N', 'O'] and key[1].isdigit():\n if self.energy_scale[edge_start] < x_section[key]['onset'] < self.energy_scale[-edge_start]:\n if key in ['K1', 'L3', 'M5']:\n major_edge = key\n elif key in self.ui.edge_sym:\n if minor_edge == '':\n minor_edge = key\n if int(key[-1]) % 2 > 0:\n if int(minor_edge[-1]) % 2 == 0 or key[-1] > minor_edge[-1]:\n minor_edge = key\n\n all_edges[key] = {'onset': x_section[key]['onset'], 'original_onset': x_section[key]['onset']}\n \n\n if major_edge != '':\n key = major_edge\n elif minor_edge != '':\n key = minor_edge\n else:\n print(f'Could not find no edge of {zz} in spectrum')\n return False\n\n index = self.ui.list3.currentIndex()\n # self.ui.dialog.setWindowTitle(f'{index}, {zz}')\n\n if str(index) not in self.edges:\n self.edges[str(index)] = {}\n\n start_exclude = x_section[key]['onset'] - x_section[key]['excl before']\n end_exclude = x_section[key]['onset'] + x_section[key]['excl after']\n\n self.edges[str(index)] = {'z': zz, 'symmetry': key, 'element': eels.elements[zz],\n 'onset': x_section[key]['onset'], 'end_exclude': end_exclude,\n 'start_exclude': start_exclude}\n self.edges[str(index)]['all_edges'] = all_edges\n self.edges[str(index)]['chemical_shift'] = 0.0\n self.edges[str(index)]['areal_density'] = 0.0\n self.edges[str(index)]['original_onset'] = self.edges[str(index)]['onset']\n return True\n\n def on_enter(self):\n sender = self.sender()\n edge_list = self.ui.list3\n # self.ui.dialog.setWindowTitle(f\"{sender.objectName()}\")\n\n \n if sender.objectName() == 'fit_start_edit':\n value = float(str(sender.displayText()).strip())\n if value < self.energy_scale[0]:\n value = self.energy_scale[0]\n if value > self.energy_scale[-5]:\n value = self.energy_scale[-5]\n self.edges['fit_area']['fit_start'] = value\n sender.setText(str(self.edges['fit_area']['fit_start']))\n elif sender.objectName() == 'fit_end_edit':\n value = float(str(sender.displayText()).strip())\n if value < self.energy_scale[5]:\n value = self.energy_scale[5]\n if value > self.energy_scale[-1]:\n value = self.energy_scale[-1]\n self.edges['fit_area']['fit_end'] = value\n sender.setText(str(self.edges['fit_area']['fit_end']))\n elif sender.objectName() == 'element_edit':\n if str(sender.displayText()).strip() == '0':\n # sender.setText('PT')\n self.pt_dialog.energy_scale = self.energy_scale\n self.pt_dialog.show()\n pass\n else:\n self.update_element(str(sender.displayText()).strip())\n self.update()\n elif sender.objectName() in ['onset_edit', 'excl_start_edit', 'excl_end_edit']:\n self.check_area_consistency()\n\n elif sender.objectName() == 'multiplier_edit':\n index = edge_list.currentIndex()\n self.edges[str(index)]['areal_density'] = float(self.ui.edit9.displayText())\n if self.y_scale != 1.0:\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n self.edges[str(index)]['areal_density'] /= self.y_scale * 1e-6 *dispersion\n if 'background' not in self.edges['model']:\n print(' no background')\n return\n self.model = self.edges['model']['background']\n for key in self.edges:\n if key.isdigit():\n self.model = self.model + self.edges[key]['areal_density'] * self.edges[key]['data']\n self.plot()\n else:\n return\n if self.show_regions:\n self.plot()\n\n \n\n def sort_elements(self):\n onsets = []\n for index, edge in self.edges.items():\n if index.isdigit():\n onsets.append(float(edge['onset']))\n\n arg_sorted = np.argsort(onsets)\n edges = self.edges.copy()\n for index, i_sorted in enumerate(arg_sorted):\n self.edges[str(index)] = edges[str(i_sorted)].copy()\n\n index = 0\n edge = self.edges['0']\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n\n while str(index + 1) in self.edges:\n next_edge = self.edges[str(index + 1)]\n if edge['end_exclude'] > next_edge['start_exclude'] - 5 * dispersion:\n edge['end_exclude'] = next_edge['start_exclude'] - 5 * dispersion\n edge = next_edge\n index += 1\n\n if edge['end_exclude'] > self.energy_scale[-3]:\n edge['end_exclude'] = self.energy_scale[-3]\n\n def set_elements(self, selected_elements):\n edge_list = self.ui.list3\n \n for index, elem in enumerate(selected_elements):\n edge_list.setCurrentIndex(index)\n self.update_element(elem)\n \n self.sort_elements()\n self.update()\n\n def plot(self, event=None):\n self.energy_scale = self.dataset._axes[self.spec_dim].values\n if self.dataset.data_type == sidpy.DataType.SPECTRAL_IMAGE:\n spectrum = self.dataset.view.get_spectrum()\n self.axis = self.dataset.view.axes[1]\n else:\n spectrum = np.array(self.dataset)\n self.axis = self.dataset.view.axis\n\n if self.ui.select10.isChecked():\n if 'experiment' in self.dataset.metadata:\n exp = self.dataset.metadata['experiment']\n if 'convergence_angle' not in exp:\n raise ValueError('need a convergence_angle in experiment of metadata dictionary ')\n alpha = exp['convergence_angle']\n beta = exp['collection_angle']\n beam_kv = exp['acceleration_voltage']\n\n eff_beta = eels.effective_collection_angle(self.energy_scale, alpha, beta, beam_kv)\n edges = eels.make_cross_sections(self.edges, np.array(self.energy_scale), beam_kv, eff_beta)\n self.edges = eels.fit_edges2(spectrum, self.energy_scale, edges)\n areal_density = []\n elements = []\n for key in edges:\n if key.isdigit(): # only edges have numbers in that dictionary\n elements.append(edges[key]['element'])\n areal_density.append(edges[key]['areal_density'])\n areal_density = np.array(areal_density)\n out_string = '\\nRelative composition: \\n'\n for i, element in enumerate(elements):\n out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '\n\n self.model = self.edges['model']['spectrum']\n self.update()\n\n x_limit = self.axis.get_xlim()\n y_limit = np.array(self.axis.get_ylim())*self.change_y_scale\n self.change_y_scale = 1.0\n \n self.axis.clear()\n\n line1, = self.axis.plot(self.energy_scale, spectrum*self.y_scale, label='spectrum')\n lines = [line1]\n\n def onpick(event):\n # on the pick event, find the orig line corresponding to the\n # legend proxy line, and toggle the visibility\n leg_line = event.artist\n orig_line = lined[legline]\n vis = not origline.get_visible()\n orig_line.set_visible(vis)\n # Change the alpha on the line in the legend, so we can see what lines\n # have been toggled\n if vis:\n leg_line.set_alpha(1.0)\n else:\n leg_line.set_alpha(0.2)\n self.figure.canvas.draw()\n\n if len(self.model) > 1:\n line2, = self.axis.plot(self.energy_scale, self.model*self.y_scale, label='model')\n line3, = self.axis.plot(self.energy_scale, (spectrum - self.model)*self.y_scale, label='difference')\n line4, = self.axis.plot(self.energy_scale, (spectrum - self.model) / np.sqrt(spectrum)*self.y_scale, label='Poisson')\n lines = [line1, line2, line3, line4]\n lined = dict()\n\n legend = self.axis.legend(loc='upper right', fancybox=True, shadow=True)\n\n legend.get_frame().set_alpha(0.4)\n for legline, origline in zip(legend.get_lines(), lines):\n legline.set_picker(5) # 5 pts tolerance\n lined[legline] = origline\n self.figure.canvas.mpl_connect('pick_event', onpick)\n self.axis.set_xlim(x_limit)\n self.axis.set_ylim(y_limit)\n \n if self.y_scale != 1.:\n self.axis.set_ylabel('scattering intensity (ppm)')\n else:\n self.axis.set_ylabel('intensity (counts)')\n self.axis.set_xlabel('energy_loss (eV)')\n \n\n if self.ui.show_edges.isChecked():\n self.show_edges()\n if self.show_regions:\n self.plot_regions()\n self.figure.canvas.draw_idle()\n\n def plot_regions(self):\n y_min, y_max = self.axis.get_ylim()\n height = y_max - y_min\n\n rect = []\n if 'fit_area' in self.edges:\n color = 'blue'\n alpha = 0.2\n x_min = self.edges['fit_area']['fit_start']\n width = self.edges['fit_area']['fit_end'] - x_min\n rect.append(patches.Rectangle((x_min, y_min), width, height,\n edgecolor=color, alpha=alpha, facecolor=color))\n self.axis.add_patch(rect[0])\n self.axis.text(x_min, y_max, 'fit region', verticalalignment='top')\n color = 'red'\n alpha = 0.5\n for key in self.edges:\n if key.isdigit():\n x_min = self.edges[key]['start_exclude']\n width = self.edges[key]['end_exclude']-x_min\n rect.append(patches.Rectangle((x_min, y_min), width, height,\n edgecolor=color, alpha=alpha, facecolor=color))\n self.axis.add_patch(rect[-1])\n self.axis.text(x_min, y_max, f\"exclude\\n edge {int(key)+1}\", verticalalignment='top')\n\n def show_edges(self):\n x_min, x_max = self.axis.get_xlim()\n y_min, y_max = self.axis.get_ylim()\n\n for key, edge in self.edges.items():\n i = 0\n if key.isdigit():\n element = edge['element']\n for sym in edge['all_edges']:\n x = edge['all_edges'][sym]['onset'] + edge['chemical_shift']\n if x_min < x < x_max:\n self.axis.text(x, y_max, '\\n' * i + f\"{element}-{sym}\",\n verticalalignment='top', color='black')\n self.axis.axvline(x, ymin=0, ymax=1, color='gray')\n i += 1\n\n def check_area_consistency(self):\n if self.dataset is None:\n return\n onset = float(self.ui.edit6.displayText())\n excl_start = float(self.ui.edit7.displayText())\n excl_end = float(self.ui.edit8.displayText())\n if onset < self.energy_scale[2]:\n onset = self.energy_scale[2]\n excl_start = self.energy_scale[2]\n if onset > self.energy_scale[-2]:\n onset = self.energy_scale[-2]\n excl_end = self.energy_scale[-2]\n if excl_start > onset:\n excl_start = onset\n if excl_end < onset:\n excl_end = onset\n\n index = self.ui.list3.currentIndex()\n self.edges[str(index)]['chemical_shift'] = onset - self.edges[str(index)]['original_onset']\n self.edges[str(index)]['onset'] = onset\n self.edges[str(index)]['end_exclude'] = excl_end\n self.edges[str(index)]['start_exclude'] = excl_start\n\n self.update()\n\n def on_list_enter(self):\n sender = self.sender()\n # self.ui.dialog.setWindowTitle(f\"on list eneter {sender.objectName()}\")\n\n if sender.objectName() == 'edge_list':\n index = self.ui.list3.currentIndex()\n\n number_of_edges = 0\n for key in self.edges:\n if key.isdigit():\n if int(key) > number_of_edges:\n number_of_edges = int(key)\n number_of_edges += 1\n if index > number_of_edges:\n index = number_of_edges\n self.ui.list3.setCurrentIndex(index)\n if str(index) not in self.edges:\n self.edges[str(index)] = {'z': 0, 'symmetry': 'K1', 'element': 'H', 'onset': 0, 'end_exclude': 0,\n 'start_exclude': 0, 'areal_density': 0}\n\n self.update()\n elif sender.objectName() == 'symmetry_list':\n sym = self.ui.list5.currentText()\n index = self.ui.list3.currentIndex()\n zz = self.edges[str(index)]['z']\n if zz > 1:\n x_section = eels.get_x_sections(zz)\n if sym in x_section:\n start_exclude = x_section[sym]['onset'] - x_section[sym]['excl before']\n end_exclude = x_section[sym]['onset'] + x_section[sym]['excl after']\n self.edges[str(index)].update({'symmetry': sym, 'onset': x_section[sym]['onset'],\n 'end_exclude': end_exclude, 'start_exclude': start_exclude})\n self.edges[str(index)]['chemical_shift'] = 0.0\n self.edges[str(index)]['areal_density'] = 0.0\n self.edges[str(index)]['original_onset'] = self.edges[index]['onset']\n self.update()\n elif sender.objectName() == 'symmetry_method':\n self.ui.select5.setCurrentIndex(0)\n\n def on_check(self):\n sender = self.sender()\n # self.ui.dialog.setWindowTitle(f\"on_check {sender.objectName()}\")\n\n\n if sender.objectName() == 'edge_check':\n self.show_regions = sender.isChecked()\n elif sender.objectName() == 'conv_ll':\n self.edges['use_low_loss'] = self.ui.check10.isChecked()\n if self.ui.check10.isChecked():\n self.low_loss()\n elif sender.objectName() == 'probability':\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n old_y_scale = self.y_scale *1.\n if sender.isChecked():\n flux_key = None\n spectrum_key = None\n \n for key in self.datasets.keys():\n if 'Reference' in key:\n if self.datasets[key].data_type.name == 'IMAGE': # Prefer Ronchigrams\n flux_key = key\n self.dataset.metadata['experiment']['flux_reference_key'] = flux_key\n elif self.datasets[key].data_type.name == 'SPECTRUM':\n spectrum_key = key\n self.dataset.metadata['experiment']['low_loss_key'] = spectrum_key\n if flux_key is None:\n flux_key = spectrum_key \n\n # self.ui.dialog.setWindowTitle(f\"2nd {self.dataset.metadata['experiment']['flux_ppm']:.2f}\")\n if self.dataset.metadata['experiment']['flux_ppm'] > 0:\n # self.ui.dialog.setWindowTitle(f\"3rD {self.dataset.metadata['experiment']['flux_ppm']:.2f}\")\n self.y_scale = 1/self.dataset.metadata['experiment']['flux_ppm']*dispersion\n elif flux_key is not None:\n self.dataset.metadata['experiment']['flux_ppm'] = (np.array(self.datasets[flux_key])/1e6).sum() \n self.dataset.metadata['experiment']['flux_ppm'] /= self.datasets[flux_key].metadata['experiment']['exposure_time']\n self.dataset.metadata['experiment']['flux_ppm'] *= self.dataset.metadata['experiment']['exposure_time'] \n self.y_scale = 1/self.dataset.metadata['experiment']['flux_ppm']*dispersion\n else:\n self.y_scale = 1.0\n else:\n self.y_scale = 1.0\n \n self.change_y_scale = self.y_scale/old_y_scale\n self.update()\n self.plot()\n\n def low_loss(self):\n self.edges['use_low_loss'] = self.ui.check10.isChecked()\n if self.low_loss_key is None:\n for key in self.datasets.keys():\n if 'Reference' in key:\n if self.datasets[key].data_type.name == 'SPECTRUM':\n self.low_loss_key = key\n self.dataset.metadata['experiment']['low_loss_key'] = self.low_loss_key\n \n if self.low_loss_key is None:\n self.low_loss_key = ft.add_dataset_from_file(self.datasets, key_name='Reference')\n self.spectrum_ll = self.datasets[self.low_loss_key]\n if self.spectrum_ll.data_type.name != 'SPECTRUM':\n self.spectrum_ll = None\n self.low_loss_key = None\n\n if self.low_loss_key is not None:\n self.spectrum_ll = self.datasets[self.low_loss_key]\n if 'number_of_frames' in self.spectrum_ll.metadata['experiment']:\n self.spectrum_ll.metadata['experiment']['exposure_time'] = \\\n self.spectrum_ll.metadata['experiment']['single_exposure_time'] * \\\n self.spectrum_ll.metadata['experiment']['number_of_frames'] \n\n def do_all_button_click(self):\n\n if self.dataset.data_type.name != 'SPECTRAL_IMAGE':\n self.do_fit_button_click()\n return\n\n if 'experiment' in self.dataset.metadata:\n exp = self.dataset.metadata['experiment']\n if 'convergence_angle' not in exp:\n raise ValueError('need a convergence_angle in experiment of metadata dictionary ')\n alpha = exp['convergence_angle']\n beta = exp['collection_angle']\n beam_kv = exp['acceleration_voltage']\n else:\n raise ValueError('need a experiment parameter in metadata dictionary')\n\n self.energy_scale = self.dataset._axes[self.spec_dim].values\n eff_beta = eels.effective_collection_angle(self.energy_scale, alpha, beta, beam_kv)\n if self.edges['use_low_loss']:\n low_loss = np.array(self.spectrum_ll)/self.spectrum_ll.sum()\n else:\n low_loss = None\n\n edges = eels.make_cross_sections(self.edges, np.array(self.energy_scale), beam_kv, eff_beta,\n low_loss=low_loss)\n\n view = self.dataset.view\n bin_x = view.bin_x\n bin_y = view.bin_y\n\n start_x = view.x\n start_y = view.y\n\n number_of_edges = 0\n for key in self.edges:\n if key.isdigit():\n number_of_edges += 1\n\n results = np.zeros([int(self.dataset.shape[0]/bin_x), int(self.dataset.shape[1]/bin_y), number_of_edges])\n total_spec = int(self.dataset.shape[0]/bin_x)*int(self.dataset.shape[1]/bin_y)\n self.ui.progress.setMaximum(total_spec)\n self.ui.progress.setValue(0)\n ind = 0\n for x in range(int(self.dataset.shape[0]/bin_x)):\n\n for y in range(int(self.dataset.shape[1]/bin_y)):\n ind += 1\n self.ui.progress.setValue(ind)\n view.x = x*bin_x\n view.y = y*bin_y\n spectrum = view.get_spectrum()\n\n edges = eels.fit_edges2(spectrum, self.energy_scale, edges)\n for key, edge in edges.items():\n if key.isdigit():\n # element.append(edge['element'])\n results[x, y, int(key)] = edge['areal_density']\n edges['spectrum_image_quantification'] = results\n self.ui.progress.setValue(total_spec)\n view.x = start_x\n view.y = start_y\n\n def do_fit_button_click(self):\n if 'experiment' in self.dataset.metadata:\n exp = self.dataset.metadata['experiment']\n if 'convergence_angle' not in exp:\n raise ValueError('need a convergence_angle in experiment of metadata dictionary ')\n alpha = exp['convergence_angle']\n beta = exp['collection_angle']\n beam_kv = exp['acceleration_voltage']\n\n else:\n raise ValueError('need a experiment parameter in metadata dictionary')\n self.energy_scale = self.dataset._axes[self.spec_dim].values\n eff_beta = eels.effective_collection_angle(self.energy_scale, alpha, beta, beam_kv)\n\n if self.edges['use_low_loss']:\n low_loss = self.spectrum_ll / self.spectrum_ll.sum()\n else:\n low_loss = None\n edges = eels.make_cross_sections(self.edges, np.array(self.energy_scale), beam_kv, eff_beta, low_loss)\n\n if self.dataset.data_type == sidpy.DataType.SPECTRAL_IMAGE:\n spectrum = self.dataset.view.get_spectrum()\n else:\n spectrum = self.dataset\n self.edges = eels.fit_edges2(spectrum, self.energy_scale, edges)\n areal_density = []\n elements = []\n for key in edges:\n if key.isdigit(): # only edges have numbers in that dictionary\n elements.append(edges[key]['element'])\n areal_density.append(edges[key]['areal_density'])\n areal_density = np.array(areal_density)\n out_string = '\\nRelative composition: \\n'\n for i, element in enumerate(elements):\n out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '\n\n self.model = self.edges['model']['spectrum']\n self.update()\n self.plot()\n\n def do_auto_id_button_click(self):\n # self.ui.dialog.setWindowTitle(f\"auto id \")\n self.ui.do_fit_button.setFocus()\n\n if '0' not in self.edges:\n self.edges['0'] ={}\n found_edges = eels.auto_id_edges(self.dataset)\n \n to_delete = []\n if len(found_edges) >0:\n for key in self.edges:\n if key.isdigit():\n to_delete.append(key)\n for key in to_delete:\n del self.edges[key]\n if len(to_delete) == 0:\n self.edges['0'] = {}\n \n selected_elements = []\n for key in found_edges:\n selected_elements.append(key)\n self.set_elements(selected_elements)\n \n for button in self.pt_dialog.button:\n if button.text() in selected_elements:\n button.setChecked(True)\n else:\n button.setChecked(False)\n self.update()\n\n def do_select_button_click(self):\n self.pt_dialog.energy_scale = self.energy_scale\n self.pt_dialog.show()\n self.update()\n\n def set_action(self):\n self.ui.edit1.editingFinished.connect(self.on_enter)\n self.ui.edit2.editingFinished.connect(self.on_enter)\n self.ui.list3.activated[str].connect(self.on_list_enter)\n self.ui.check3.clicked.connect(self.on_check)\n self.ui.edit4.editingFinished.connect(self.on_enter)\n self.ui.list5.activated[str].connect(self.on_list_enter)\n self.ui.select5.activated[str].connect(self.on_list_enter)\n\n self.ui.edit6.editingFinished.connect(self.on_enter)\n self.ui.edit7.editingFinished.connect(self.on_enter)\n self.ui.edit8.editingFinished.connect(self.on_enter)\n self.ui.edit9.editingFinished.connect(self.on_enter)\n\n self.ui.check10.clicked.connect(self.on_check)\n self.ui.select10.clicked.connect(self.on_check)\n self.ui.show_edges.clicked.connect(self.on_check)\n self.ui.check_probability.clicked.connect(self.on_check)\n \n self.ui.do_all_button.clicked.connect(self.do_all_button_click)\n self.ui.do_fit_button.clicked.connect(self.do_fit_button_click)\n self.ui.auto_id_button.clicked.connect(self.do_auto_id_button_click)\n self.ui.select_button.clicked.connect(self.do_select_button_click)\n\n\n class CurveVisualizer(object):\n \"\"\"Plots a sidpy.Dataset with spectral dimension-type\n\n \"\"\"\n def __init__(self, dset, spectrum_number=None, axis=None, leg=None, **kwargs):\n if not isinstance(dset, sidpy.Dataset):\n raise TypeError('dset should be a sidpy.Dataset object')\n if axis is None:\n self.fig = plt.figure()\n self.axis = self.fig.add_subplot(1, 1, 1)\n else:\n self.axis = axis\n self.fig = axis.figure\n\n self.dset = dset\n self.selection = []\n [self.spec_dim, self.energy_scale] = ft.get_dimensions_by_type('spectral', self.dset)[0]\n\n self.lined = dict()\n self.plot(**kwargs)\n\n def plot(self, **kwargs):\n if self.dset.data_type.name == 'IMAGE_STACK':\n line1, = self.axis.plot(self.energy_scale.values, self.dset[0, 0], label='spectrum', **kwargs)\n else:\n line1, = self.axis.plot(self.energy_scale.values, self.dset, label='spectrum', **kwargs)\n lines = [line1]\n if 'add2plot' in self.dset.metadata:\n data = self.dset.metadata['add2plot']\n for key, line in data.items():\n line_add, = self.axis.plot(self.energy_scale.values, line['data'], label=line['legend'])\n lines.append(line_add)\n\n legend = self.axis.legend(loc='upper right', fancybox=True, shadow=True)\n legend.get_frame().set_alpha(0.4)\n\n for legline, origline in zip(legend.get_lines(), lines):\n legline.set_picker(True)\n legline.set_pickradius(5) # 5 pts tolerance\n self.lined[legline] = origline\n self.fig.canvas.mpl_connect('pick_event', self.onpick)\n\n self.axis.axhline(0, color='gray', alpha=0.6)\n self.axis.set_xlabel(self.dset.labels[0])\n self.axis.set_ylabel(self.dset.data_descriptor)\n self.axis.ticklabel_format(style='sci', scilimits=(-2, 3))\n self.fig.canvas.draw_idle()\n\n def update(self, **kwargs):\n x_limit = self.axis.get_xlim()\n y_limit = self.axis.get_ylim()\n self.axis.clear()\n self.plot(**kwargs)\n self.axis.set_xlim(x_limit)\n self.axis.set_ylim(y_limit)\n\n def onpick(self, event):\n # on the pick event, find the orig line corresponding to the\n # legend proxy line, and toggle the visibility\n legline = event.artist\n origline = self.lined[legline]\n vis = not origline.get_visible()\n origline.set_visible(vis)\n # Change the alpha on the line in the legend, so we can see what lines\n # have been toggled\n if vis:\n legline.set_alpha(1.0)\n else:\n legline.set_alpha(0.2)\n self.fig.canvas.draw()\n\ndef get_sidebar():\n side_bar = ipywidgets.GridspecLayout(13, 3,width='auto', grid_gap=\"0px\")\n\n \n row = 0\n side_bar[row, :3] = ipywidgets.ToggleButton(description='Fit Area',\n layout=ipywidgets.Layout(width='auto', grid_area='header'),\n tooltip='Shows fit regions and regions excluded from fit', \n button_style='info') #ipywidgets.ButtonStyle(button_color='lightblue'))\n row += 1\n side_bar[row, :2] = ipywidgets.FloatText(value=7.5,description='Fit Start:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"eV\", layout=ipywidgets.Layout(width='20px'))\n row += 1\n side_bar[row, :2] = ipywidgets.FloatText(value=0.1, description='Fit End:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"eV\", layout=ipywidgets.Layout(width='20px'))\n \n row += 1\n \n side_bar[row, :3] = ipywidgets.Button(description='Elements',\n layout=ipywidgets.Layout(width='auto', grid_area='header'),\n style=ipywidgets.ButtonStyle(button_color='lightblue'))\n row += 1\n side_bar[row, :2] = ipywidgets.Dropdown(\n options=[('Edge 1', 0), ('Edge 2', 1), ('Edge 3', 2), ('Edge 4', 3),('Add Edge', -1)],\n value=0,\n description='Edges:',\n disabled=False,\n layout=ipywidgets.Layout(width='200px'))\n \"\"\"side_bar[row,2] = ipywidgets.ToggleButton(\n description='Regions',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Shows fit regions and regions excluded from fit', \n layout=ipywidgets.Layout(width='100px')\n )\n \"\"\"\n row += 1\n side_bar[row, :2] = ipywidgets.IntText(value=7.5,description='Z:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"\", layout=ipywidgets.Layout(width='100px'))\n row += 1\n side_bar[row, :2] = ipywidgets.Dropdown(\n options=['K1','L3', 'M5', 'M3', 'M1', 'N7', 'N5', 'N3', 'N1'],\n value='K1',\n description='Symmetry:',\n disabled=False,\n layout=ipywidgets.Layout(width='200px'))\n row += 1\n side_bar[row, :2] = ipywidgets.FloatText(value=0.1, description='Onset:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"eV\", layout=ipywidgets.Layout(width='100px'))\n row += 1\n side_bar[row, :2] = ipywidgets.FloatText(value=0.1, description='Excl.Start:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"eV\", layout=ipywidgets.Layout(width='100px'))\n row += 1\n side_bar[row, :2] = ipywidgets.FloatText(value=0.1, description='Excl.End:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"eV\", layout=ipywidgets.Layout(width='100px'))\n row += 1\n side_bar[row, :2] = ipywidgets.FloatText(value=0.1, description='Mutliplier:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n side_bar[row, 2] = ipywidgets.widgets.Label(value=\"a.u.\", layout=ipywidgets.Layout(width='100px'))\n row += 1\n \n side_bar[row, :3] = ipywidgets.Button(description='Quantification',\n layout=ipywidgets.Layout(width='auto', grid_area='header'),\n style=ipywidgets.ButtonStyle(button_color='lightblue'))\n \n row += 1\n side_bar[row,0] = ipywidgets.ToggleButton(\n description='Probabiity',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Changes y-axis to probability of flux is given', \n layout=ipywidgets.Layout(width='100px')\n )\n side_bar[row,1] = ipywidgets.ToggleButton(\n description='Conv.LL',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Changes y-axis to probability of flux is given', \n layout=ipywidgets.Layout(width='100px')\n )\n side_bar[row,2] = ipywidgets.ToggleButton(\n description='Show Edges',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Changes y-axis to probability of flux is given', \n layout=ipywidgets.Layout(width='100px')\n )\n return side_bar\n\n\nimport ipywidgets\n\ndef get_periodic_table_widget(energy_scale=None):\n\n if energy_scale is None:\n energy_scale = [100., 150., 200.]\n \n likely_edges = eels_dialog_utilities.get_likely_edges(energy_scale)\n \n pt_info = eels_dialog_utilities.get_periodic_table_info()\n table = ipywidgets.GridspecLayout(10, 18,width= '60%', grid_gap=\"0px\")\n for symbol, parameter in pt_info.items():\n #print(parameter['PT_row'], parameter['PT_col'])\n if parameter['PT_row'] > 7:\n color = 'warning'\n elif '*' in symbol:\n color = 'warning'\n else:\n if symbol in likely_edges:\n color = 'primary'\n else:\n color = 'info'\n table[parameter['PT_row'], parameter['PT_col']] = ipywidgets.ToggleButton(description=symbol, \n value=False, \n button_style=color,\n layout=ipywidgets.Layout(width='auto'),\n style={\"button_width\": \"30px\"})\n return table\n\n\nclass PeriodicTableWidget(object):\n \"\"\" Modal dialog to get a selection of elements.\n\n Elements that are not having a valid cross-sections are disabled.\n\n Parameters\n ----------\n initial_elements: list of str\n the elements that are already selected\n energy_scale: list or numpy array\n energy-scale of spectrum/spectra to determine likely edges\n\n Returns\n -------\n list of strings: elements.\n\n Example\n -------\n >> PT_dialog = periodic_table_dialog(None, ['Mn', 'O'])\n >> if PT_dialog.exec_() == periodic_table_dialog.Accepted:\n >> selected_elements = PT_dialog.get_output()\n >> print(selected_elements)\n \"\"\"\n\n def __init__(self, initial_elements=None, energy_scale=None):\n\n if initial_elements is None:\n initial_elements = [' ']\n self.elements_selected = initial_elements\n if energy_scale is None:\n energy_scale = [100., 150., 200.]\n self._output = []\n self.energy_scale = np.array(energy_scale)\n self.pt_info = eels_dialog_utilities.get_periodic_table_info()\n \n self.periodic_table = get_periodic_table_widget(energy_scale) \n self.update()\n\n def get_output(self):\n self.elements_selected = []\n for symbol, parameter in self.pt_info.items():\n if self.periodic_table[parameter['PT_row'], parameter['PT_col']].value == True: # [parameter['PT_row'], parameter['PT_col']]\n self.elements_selected.append(self.periodic_table[parameter['PT_row'], parameter['PT_col']].description)\n return self.elements_selected\n \n def update(self):\n for symbol, parameter in self.pt_info.items():\n if self.periodic_table[parameter['PT_row'], parameter['PT_col']].description in self.elements_selected:\n self.periodic_table[parameter['PT_row'], parameter['PT_col']].value = True\n\n\nclass CompositionWidget(object):\n def __init__(self, datasets=None):\n self.datasets = datasets\n if not isinstance(datasets, dict):\n raise TypeError('dataset or first item inhas to be a sidpy dataset')\n \n self.sidebar = get_sidebar()\n self.dataset = datasets[list(datasets)[0]]\n if not isinstance(self.dataset, sidpy.Dataset):\n raise TypeError('dataset or first item inhas to be a sidpy dataset')\n self.spec_dim = ft.get_dimensions_by_type('spectral', self.dataset)\n if len(self.spec_dim) != 1:\n raise TypeError('We need exactly one SPECTRAL dimension')\n self.spec_dim = self.spec_dim[0]\n #self.energy_scale = self.dataset._axes[self.spec_dim]\n \n self.energy_scale = self.spec_dim[1]\n self.model = np.array([])\n self.y_scale = 1.0\n self.change_y_scale = 1.0\n self.spectrum_ll = None\n self.low_loss_key = None\n\n self.edges = {}\n\n self.show_regions = False\n \n with plt.ioff():\n self.fig = plt.figure()\n self.fig.canvas.toolbar_position = 'right'\n self.fig.canvas.toolbar_visible = True\n self.key = list(self.datasets.keys())[0]\n self.set_dataset()\n \n self.y_scale = 1.0\n self.change_y_scale = 1.0\n self.plot(scale=False)\n self.selector = matplotlib.widgets.SpanSelector(self.fig.gca(), self.line_select_callback,\n direction=\"horizontal\",\n interactive=True,\n props=dict(facecolor='blue', alpha=0.2))\n self.start_cursor = ipywidgets.FloatText(value=0, description='Start:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n self.end_cursor = ipywidgets.FloatText(value=0, description='End:', disabled=False, color='black', layout=ipywidgets.Layout(width='200px'))\n self.panel = ipywidgets.VBox([ipywidgets.HBox([ipywidgets.Label('',layout=ipywidgets.Layout(width='100px')), ipywidgets.Label('Cursor:'),\n self.start_cursor,ipywidgets.Label('eV'), \n self.end_cursor, ipywidgets.Label('eV')]),\n self.fig.canvas])\n self.periodic_table = PeriodicTableWidget(self.energy_scale)\n self.elements_cancel_button = ipywidgets.Button(description='Cancel')\n self.elements_select_button = ipywidgets.Button(description='Select')\n self.elements_auto_button = ipywidgets.Button(description='Auto ID')\n \n self.periodic_table_panel = ipywidgets.VBox([self.periodic_table.periodic_table,\n ipywidgets.HBox([self.elements_cancel_button, self.elements_auto_button, self.elements_select_button])])\n # Button(description=description, button_style=button_style, layout=Layout(height='auto', width='auto'))\n \n self.app_layout = ipywidgets.AppLayout(\n left_sidebar=self.sidebar,\n center=self.panel, ## can be changed with: self.app_layout.center = self.periodic_table.periodic_table\n footer=None,#message_bar,\n pane_heights=[0, 10, 0],\n pane_widths=[4, 10, 0],\n )\n IPython.display.display(self.app_layout)\n self.set_action()\n \n def line_select_callback(self, x_min, x_max):\n self.start_cursor.value = np.round(x_min,3)\n self.end_cursor.value = np.round(x_max, 3)\n self.start_channel = np.searchsorted(self.datasets[self.key].energy_loss, self.start_cursor.value)\n self.end_channel = np.searchsorted(self.datasets[self.key].energy_loss, self.end_cursor.value)\n \n def plot(self, scale=True):\n \n ylim = self.fig.gca().get_ylim()\n \n ax = self.fig.gca()\n ax.clear()\n ax.plot(self.energy_scale, self.datasets[self.key]*self.y_scale, label=self.datasets[self.key].title)\n\n ax.set_xlabel(self.datasets[self.key].labels[0])\n ax.set_ylabel(self.datasets[self.key].data_descriptor)\n ax.ticklabel_format(style='sci', scilimits=(-2, 3))\n if scale:\n ax.set_ylim(np.array(ylim)*self.change_y_scale)\n self.change_y_scale = 1.0\n if self.y_scale != 1.:\n ax.set_ylabel('scattering probability (ppm/eV)')\n self.selector = matplotlib.widgets.SpanSelector(self.fig.gca(), self.line_select_callback,\n direction=\"horizontal\",\n interactive=True,\n props=dict(facecolor='blue', alpha=0.2))\n \n if len(self.model) > 1:\n ax.plot(self.energy_scale, self.model*self.y_scale, label='model')\n difference_spec = self.datasets[self.key] - self.model\n ax.plot(self.energy_scale, difference_spec*self.y_scale, label='difference')\n # axis.plot(self.energy_scale, (self.datasets[key] - self.model) / np.sqrt(self.datasets[key])*self.y_scale, label='Poisson')\n \n \n ax.legend()\n \n if self.sidebar[12, 2].value:\n self.show_edges()\n if self.sidebar[0, 0].value:\n self.plot_regions()\n self.fig.canvas.draw_idle()\n \n \n def plot_regions(self):\n axis = self.fig.gca()\n y_min, y_max = axis.get_ylim()\n height = y_max - y_min\n\n rect = []\n if 'fit_area' in self.edges:\n color = 'blue'\n alpha = 0.2\n x_min = self.edges['fit_area']['fit_start']\n width = self.edges['fit_area']['fit_end'] - x_min\n rect.append(patches.Rectangle((x_min, y_min), width, height,\n edgecolor=color, alpha=alpha, facecolor=color))\n axis.add_patch(rect[0])\n axis.text(x_min, y_max, 'fit region', verticalalignment='top')\n color = 'red'\n alpha = 0.5\n \n for key in self.edges:\n if key.isdigit():\n x_min = self.edges[key]['start_exclude']\n width = self.edges[key]['end_exclude']-x_min\n rect.append(patches.Rectangle((x_min, y_min), width, height,\n edgecolor=color, alpha=alpha, facecolor=color))\n axis.add_patch(rect[-1])\n axis.text(x_min, y_max, f\"exclude\\n edge {int(key)+1}\", verticalalignment='top')\n\n def show_edges(self):\n axis = self.fig.gca()\n x_min, x_max = axis.get_xlim()\n y_min, y_max = axis.get_ylim()\n \n for key, edge in self.edges.items():\n i = 0\n if key.isdigit():\n element = edge['element']\n for sym in edge['all_edges']:\n x = edge['all_edges'][sym]['onset'] + edge['chemical_shift']\n if x_min < x < x_max:\n axis.text(x, y_max, '\\n' * i + f\"{element}-{sym}\",\n verticalalignment='top', color='black')\n axis.axvline(x, ymin=0, ymax=1, color='gray')\n i += 1\n\n \n \n \n def set_dataset(self, index=0): \n if 'edges' not in self.dataset.metadata or self.dataset.metadata['edges'] == {}:\n self.dataset.metadata['edges'] = {'0': {}, 'model': {}, 'use_low_loss': False}\n \n self.edges = self.dataset.metadata['edges']\n if '0' not in self.edges:\n self.edges['0'] = {}\n \n if 'fit_area' not in self.edges:\n self.edges['fit_area'] = {}\n if 'fit_start' not in self.edges['fit_area']:\n self.sidebar[1,0].value = np.round(self.energy_scale[50], 3)\n self.edges['fit_area']['fit_start'] = self.sidebar[1,0].value \n else:\n self.sidebar[1,0].value = np.round(self.edges['fit_area']['fit_start'],3)\n if 'fit_end' not in self.edges['fit_area']:\n self.sidebar[2,0].value = np.round(self.energy_scale[-2], 3)\n self.edges['fit_area']['fit_end'] = self.sidebar[2,0].value \n else:\n self.sidebar[2,0].value = np.round(self.edges['fit_area']['fit_end'],3)\n \n if self.dataset.data_type.name == 'SPECTRAL_IMAGE':\n if 'SI_bin_x' not in self.dataset.metadata['experiment']:\n self.dataset.metadata['experiment']['SI_bin_x'] = 1\n self.dataset.metadata['experiment']['SI_bin_y'] = 1\n\n bin_x = self.dataset.metadata['experiment']['SI_bin_x']\n bin_y = self.dataset.metadata['experiment']['SI_bin_y']\n # self.dataset.view.set_bin([bin_x, bin_y])\n self.update()\n \n def update_element(self, z=0, index=-1):\n # We check whether this element is already in the\n if z == 0:\n z = self.sidebar[5,0].value\n \n zz = eels.get_z(z)\n for key, edge in self.edges.items():\n if key.isdigit():\n if 'z' in edge:\n if zz == edge['z']:\n return False\n\n major_edge = ''\n minor_edge = ''\n all_edges = {}\n x_section = eels.get_x_sections(zz)\n edge_start = 10 # int(15./ft.get_slope(self.energy_scale)+0.5)\n for key in x_section:\n if len(key) == 2 and key[0] in ['K', 'L', 'M', 'N', 'O'] and key[1].isdigit():\n if self.energy_scale[edge_start] < x_section[key]['onset'] < self.energy_scale[-edge_start]:\n if key in ['K1', 'L3', 'M5']:\n major_edge = key\n elif key in self.sidebar[6,0].options:\n if minor_edge == '':\n minor_edge = key\n if int(key[-1]) % 2 > 0:\n if int(minor_edge[-1]) % 2 == 0 or key[-1] > minor_edge[-1]:\n minor_edge = key\n\n all_edges[key] = {'onset': x_section[key]['onset']}\n\n if major_edge != '':\n key = major_edge\n elif minor_edge != '':\n key = minor_edge\n else:\n print(f'Could not find no edge of {zz} in spectrum')\n return False\n if index == -1:\n index = self.sidebar[4, 0].value\n # self.ui.dialog.setWindowTitle(f'{index}, {zz}')\n\n if str(index) not in self.edges:\n self.edges[str(index)] = {}\n\n start_exclude = x_section[key]['onset'] - x_section[key]['excl before']\n end_exclude = x_section[key]['onset'] + x_section[key]['excl after']\n\n self.edges[str(index)] = {'z': zz, 'symmetry': key, 'element': eels.elements[zz],\n 'onset': x_section[key]['onset'], 'end_exclude': end_exclude,\n 'start_exclude': start_exclude}\n self.edges[str(index)]['all_edges'] = all_edges\n self.edges[str(index)]['chemical_shift'] = 0.0\n self.edges[str(index)]['areal_density'] = 0.0\n self.edges[str(index)]['original_onset'] = self.edges[str(index)]['onset']\n return True\n \n def sort_elements(self):\n onsets = []\n for index, edge in self.edges.items():\n if index.isdigit():\n onsets.append(float(edge['onset']))\n\n arg_sorted = np.argsort(onsets)\n edges = self.edges.copy()\n for index, i_sorted in enumerate(arg_sorted):\n self.edges[str(index)] = edges[str(i_sorted)].copy()\n\n index = 0\n edge = self.edges['0']\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n\n while str(index + 1) in self.edges:\n next_edge = self.edges[str(index + 1)]\n if edge['end_exclude'] > next_edge['start_exclude'] - 5 * dispersion:\n edge['end_exclude'] = next_edge['start_exclude'] - 5 * dispersion\n edge = next_edge\n index += 1\n\n if edge['end_exclude'] > self.energy_scale[-3]:\n edge['end_exclude'] = self.energy_scale[-3]\n\n def set_elements(self, value=0):\n selected_elements = self.periodic_table.get_output()\n edges = self.edges.copy()\n to_delete = []\n old_elements = []\n if len(selected_elements) > 0:\n for key in self.edges:\n if key.isdigit():\n to_delete.append(key)\n old_elements.append(self.edges[key]['element'])\n\n for key in to_delete:\n edges[key] = self.edges[key]\n del self.edges[key]\n \n for index, elem in enumerate(selected_elements):\n if elem in old_elements:\n self.edges[str(index)] = edges[str(old_elements.index(elem))] \n else:\n self.update_element(elem, index=index)\n self.sort_elements()\n self.update()\n self.set_figure_pane()\n\n def set_element(self, elem):\n self.update_element(self.sidebar[5, 0].value)\n # self.sort_elements()\n self.update()\n \n def cursor2energy_scale(self, value):\n dispersion = (self.end_cursor.value - self.start_cursor.value) / (self.end_channel - self.start_channel)\n self.datasets[self.key].energy_loss *= (self.sidebar[3, 0].value/dispersion)\n self.sidebar[3, 0].value = dispersion\n offset = self.start_cursor.value - self.start_channel * dispersion\n self.datasets[self.key].energy_loss += (self.sidebar[2, 0].value-self.datasets[self.key].energy_loss[0])\n self.sidebar[2, 0].value = offset\n self.plot()\n \n def set_fit_area(self, value):\n if self.sidebar[1,0].value > self.sidebar[2,0].value:\n self.sidebar[1,0].value = self.sidebar[2,0].value -1\n if self.sidebar[1,0].value < self.energy_scale[0]:\n self.sidebar[1,0].value = self.energy_scale[0]\n if self.sidebar[2,0].value > self.energy_scale[-1]:\n self.sidebar[2,0].value = self.energy_scale[-1]\n self.edges['fit_area']['fit_start'] = self.sidebar[1,0].value \n self.edges['fit_area']['fit_end'] = self.sidebar[2,0].value \n \n self.plot()\n \n def set_y_scale(self, value): \n self.change_y_scale = 1/self.y_scale\n self.y_scale = 1.0\n if self.dataset.metadata['experiment']['flux_ppm'] > 0:\n if self.sidebar[12, 0].value:\n dispersion = self.energy_scale[1] - self.energy_scale[0]\n self.y_scale = 1/self.dataset.metadata['experiment']['flux_ppm'] * dispersion\n \n self.change_y_scale *= self.y_scale\n self.update()\n self.plot()\n\n def auto_id(self, value=0):\n found_edges = eels.auto_id_edges(self.dataset)\n if len(found_edges) > 0:\n self.periodic_table.elements_selected = found_edges\n self.periodic_table.update()\n \n def find_elements(self, value=0):\n \n if '0' not in self.edges:\n self.edges['0'] = {}\n # found_edges = eels.auto_id_edges(self.dataset)\n found_edges = {}\n\n selected_elements = []\n elements = self.edges.copy()\n\n for key in self.edges:\n if key.isdigit():\n if 'element' in self.edges[key]:\n selected_elements.append(self.edges[key]['element'])\n self.periodic_table.elements_selected = selected_elements\n self.periodic_table.update()\n self.app_layout.center = self.periodic_table_panel # self.periodic_table.periodic_table\n\n def set_figure_pane(self, value=0):\n \n self.app_layout.center = self.panel\n \n def update(self, index=0):\n \n index = self.sidebar[4,0].value # which edge\n if index < 0:\n options = list(self.sidebar[4,0].options)\n options.insert(-1, (f'Edge {len(self.sidebar[4,0].options)}', len(self.sidebar[4,0].options)-1))\n self.sidebar[4,0].options= options\n self.sidebar[4,0].value = len(self.sidebar[4,0].options)-2\n if str(index) not in self.edges:\n self.edges[str(index)] = {'z': 0, 'element': 'x', 'symmetry': 'K1', 'onset': 0, 'start_exclude': 0, 'end_exclude':0,\n 'areal_density': 0, 'chemical_shift':0}\n if 'z' not in self.edges[str(index)]:\n self.edges[str(index)] = {'z': 0, 'element': 'x', 'symmetry': 'K1', 'onset': 0, 'start_exclude': 0, 'end_exclude':0,\n 'areal_density': 0, 'chemical_shift':0}\n edge = self.edges[str(index)]\n \n self.sidebar[5,0].value = edge['z']\n self.sidebar[5,2].value = edge['element']\n self.sidebar[6,0].value = edge['symmetry']\n self.sidebar[7,0].value = edge['onset']\n self.sidebar[8,0].value = edge['start_exclude']\n self.sidebar[9,0].value = edge['end_exclude']\n if self.y_scale == 1.0:\n self.sidebar[10, 0].value = edge['areal_density']\n self.sidebar[10, 2].value = 'a.u.'\n else:\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n self.sidebar[10, 0].value = np.round(edge['areal_density']/self.dataset.metadata['experiment']['flux_ppm']*1e-6, 2)\n self.sidebar[10, 2].value = 'atoms/nm²'\n \n \n def do_fit(self, value=0):\n if 'experiment' in self.dataset.metadata:\n exp = self.dataset.metadata['experiment']\n if 'convergence_angle' not in exp:\n raise ValueError('need a convergence_angle in experiment of metadata dictionary ')\n alpha = exp['convergence_angle']\n beta = exp['collection_angle']\n beam_kv = exp['acceleration_voltage']\n\n else:\n raise ValueError('need a experiment parameter in metadata dictionary')\n \n eff_beta = eels.effective_collection_angle(self.energy_scale, alpha, beta, beam_kv)\n\n self.low_loss = None\n if self.sidebar[12, 1].value:\n for key in self.datasets.keys():\n if key != self.key:\n if isinstance(self.datasets[key], sidpy.Dataset):\n if self.datasets[key].data_type.name == 'SPECTRUM':\n if self.datasets[key].energy_loss[0] < 0:\n self.low_loss = self.datasets[key]/self.datasets[key].sum()\n\n edges = eels.make_cross_sections(self.edges, np.array(self.energy_scale), beam_kv, eff_beta, self.low_loss)\n\n if self.dataset.data_type == sidpy.DataType.SPECTRAL_IMAGE:\n spectrum = self.dataset.view.get_spectrum()\n else:\n spectrum = self.dataset\n self.edges = eels.fit_edges2(spectrum, self.energy_scale, edges)\n areal_density = []\n elements = []\n for key in edges:\n if key.isdigit(): # only edges have numbers in that dictionary\n elements.append(edges[key]['element'])\n areal_density.append(edges[key]['areal_density'])\n areal_density = np.array(areal_density)\n out_string = '\\nRelative composition: \\n'\n for i, element in enumerate(elements):\n out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '\n\n self.model = self.edges['model']['spectrum']\n self.update()\n self.plot()\n \n def modify_onset(self, value=-1):\n edge_index = self.sidebar[4, 0].value\n edge = self.edges[str(edge_index)]\n edge['onset'] = self.sidebar[7,0].value\n if 'original_onset' not in edge:\n edge['original_onset'] = edge['onset']\n edge['chemical_shift'] = edge['onset'] - edge['original_onset']\n self.update()\n \n \n def modify_start_exclude(self, value=-1):\n edge_index = self.sidebar[4, 0].value\n edge = self.edges[str(edge_index)]\n edge['start_exclude'] = self.sidebar[8,0].value\n self.plot()\n \n def modify_end_exclude(self, value=-1):\n edge_index = self.sidebar[4, 0].value\n edge = self.edges[str(edge_index)]\n edge['end_exclude'] = self.sidebar[9,0].value\n self.plot()\n \n def modify_areal_density(self, value=-1):\n edge_index = self.sidebar[4, 0].value\n edge = self.edges[str(edge_index)]\n \n edge['areal_density'] = self.sidebar[10, 0].value\n if self.y_scale != 1.0:\n dispersion = self.energy_scale[1]-self.energy_scale[0]\n edge['areal_density'] = self.sidebar[10, 0].value *self.dataset.metadata['experiment']['flux_ppm']/1e-6\n\n self.model = self.edges['model']['background']\n for key in self.edges:\n if key.isdigit():\n if 'data' in self.edges[key]:\n\n self.model = self.model + self.edges[key]['areal_density'] * self.edges[key]['data']\n self.plot()\n\n def set_action(self):\n self.sidebar[1, 0].observe(self.set_fit_area, names='value')\n self.sidebar[2, 0].observe(self.set_fit_area, names='value')\n \n self.sidebar[3, 0].on_click(self.find_elements)\n self.sidebar[4, 0].observe(self.update)\n self.sidebar[5, 0].observe(self.set_element, names='value')\n\n self.sidebar[7, 0].observe(self.modify_onset, names='value')\n self.sidebar[8, 0].observe(self.modify_start_exclude, names='value')\n self.sidebar[9, 0].observe(self.modify_end_exclude, names='value')\n self.sidebar[10, 0].observe(self.modify_areal_density, names='value')\n \n self.sidebar[11, 0].on_click(self.do_fit)\n self.sidebar[12, 2].observe(self.plot)\n self.sidebar[0, 0].observe(self.plot)\n\n self.sidebar[12,0].observe(self.set_y_scale)\n\n self.elements_cancel_button.on_click(self.set_figure_pane)\n self.elements_auto_button.on_click(self.auto_id)\n self.elements_select_button.on_click(self.set_elements)\n","sub_path":"pyTEMlib/eels_dialog.py","file_name":"eels_dialog.py","file_ext":"py","file_size_in_byte":66612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"214900404","text":"from __future__ import print_function\n\nclass MemoryMap(object):\n __slots__ = (\"entries\",)\n\n def __init__(self, entries):\n super(MemoryMap, self).__init__()\n self.entries = entries\n return\n\n def entryContaining(self, address):\n for entry in self.entries:\n if address in entry:\n return entry\n return None\n\n def __repr__(self):\n return \"\\n\".join([repr(e) for e in self.entries])\n\n @classmethod\n def forProcess(cls, pid):\n with open(\"/proc/%d/maps\" % pid, \"rb\") as fd:\n return cls.fromProcFile(fd)\n\n @classmethod\n def fromProcFile(cls, fd, **kw):\n entries = [MemoryMapEntry.fromProcString(line.rstrip())\n for line in fd]\n entries.sort(key=lambda x: x.start_address)\n return cls(entries, **kw)\n \nclass MemoryMapEntry(object):\n __slots__ = (\"start_address\", \"end_address\", \"readable\", \"writable\",\n \"executable\", \"shared\", \"private\", \"device\", \"inode\",\n \"offset\", \"pathname\")\n\n def __init__(self, start_address, end_address, readable, writable,\n executable, shared, private, device, inode, offset,\n pathname):\n super(MemoryMapEntry, self).__init__()\n self.start_address = start_address\n self.end_address = end_address\n self.readable = readable\n self.writable = writable\n self.executable = executable\n self.shared = shared\n self.private = private\n self.device = device\n self.inode = inode\n self.offset = offset\n self.pathname = pathname\n return\n\n def __contains__(self, address):\n return entry.start_address <= address < entry.end_address\n\n def __eq__(self, other):\n return (\n self.__class__ is other.__class__ and\n all([getattr(self, x) == getattr(other, x)\n for x in self.__slots__]))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __lt__(self, other):\n for attr in self.__slots__:\n my_attr = getattr(self, attr)\n other_attr = getattr(other, attr)\n if my_attr < other_attr:\n return True\n elif my_attr > other_attr:\n return False\n\n return False\n\n def __le__(self, other):\n for attr in self.__slots__:\n my_attr = getattr(self, attr)\n other_attr = getattr(other, attr)\n if my_attr < other_attr:\n return True\n elif my_attr > other_attr:\n return False\n\n return True\n\n def __gt__(self, other):\n return not self.__le__(other)\n\n def __ge__(self, other):\n return not self.__lt__(other)\n\n @property\n def permissions_string(self):\n return ((\"r\" if self.readable else \"-\") +\n (\"w\" if self.writable else \"-\") +\n (\"x\" if self.executable else \"-\") +\n (\"p\" if self.private else (\"s\" if self.shared else \"-\")))\n\n @staticmethod\n def parse_permissions_string(permissions):\n readable = (\"r\" in permissions)\n writable = (\"w\" in permissions)\n executable = (\"x\" in permissions)\n shared = (\"s\" in permissions)\n private = (\"p\" in permissions)\n return (readable, writable, executable, shared, private)\n\n def __repr__(self):\n return \"%016x-%016x %s %08x %02x:%02x %8d %s\" % (\n self.start_address, self.end_address,\n self.permissions_string,\n self.offset, self.device[0], self.device[1], self.inode,\n (self.pathname if self.pathname is not None else \"\"))\n\n @classmethod\n def fromProcString(cls, line, **kw):\n elts = line.split(None, 5)\n if len(elts) < 5:\n raise ValueError(\"Cannot parse proc/maps line: %r\" % line)\n addrRange = elts[0]\n permissions = elts[1]\n offset = elts[2]\n device = elts[3]\n inode = elts[4]\n \n if len(elts) > 5:\n pathname = elts[5]\n else:\n pathname = None\n\n start_address, end_address = [int(x, 16) for x in addrRange.split(\"-\")]\n offset = int(offset, 16)\n device = [int(x, 16) for x in device.split(\":\")]\n inode = int(inode)\n\n readable, writable, executable, shared, private = \\\n cls.parse_permissions_string(permissions)\n \n return cls(start_address=start_address, end_address=end_address,\n readable=readable, writable=writable, executable=executable,\n shared=shared, private=private, device=device, inode=inode,\n offset=offset, pathname=pathname, **kw)\n\nif __name__ == \"__main__\":\n import os\n print(MemoryMap.forProcess(os.getpid()))\n## \n## Local variables:\n## tab-width: 8\n## indent-tabs-mode: nil\n## End:\n## vi: set expandtab tabstop=8\n","sub_path":"mdb/procfs.py","file_name":"procfs.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"349805328","text":"import numpy as np\n\nvisited = set()\n\ndef init(cols, rows):\n res = np.array([[0 for x in range(0, cols)]]*rows)\n for i in range(0, rows):\n for j in range(0, cols):\n res[i,j] = i + j\n return res\n \ndef search(key, array, cols, rows):\n # find the minimum box\n tl = lt = 0\n br = cols - 1\n rb = rows - 1\n while tl < cols and array[0, tl] <= key: tl += 1\n while lt < rows and array[lt, 0] <= key: lt += 1\n while br >= 0 and array[rows-1,br] >= key: br -= 1\n while rb >= 0 and array[rb,cols-1] >= key: rb -= 1\n tl -= 1\n lt -= 1\n if br == -1: br += 1\n if rb == -1: rb += 1\n start_col = min(lt,rb)\n start_row = min(tl,br)\n end_col = max(lt,rb)\n end_row = max(tl,br)\n print('vertical: {}-{}'.format(start_row, end_row))\n print('horizontal: {}-{}'.format(start_col, end_col))\n\n find(array, start_col, start_row, end_col, end_row, key)\n\ndef find(array, col, row, col_end, row_end, key):\n global visited\n\n if row > row_end: return\n if col > col_end: return\n\n if array[row, col] == key and (row,col) not in visited:\n print('({},{})'.format(row, col))\n visited.add((row,col))\n return\n\n find(array, col, row+1, col_end, row_end, key)\n find(array, col+1, row, col_end, row_end, key)\n\ndef doit():\n cols = 10\n rows = 10\n ar = init(cols,rows)\n print(ar)\n search(5, ar, cols, rows)\n\ndoit()\n\n","sub_path":"11/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"586079745","text":"\"\"\"\nstruct manpage:\nhttps://docs.python.org/3/library/struct.html#struct-format-strings\n\"\"\"\n\nfrom __future__ import annotations\nfrom pathlib import Path\nfrom datetime import datetime\nimport typing as T\n\nimport numpy as np\nimport xarray\n\nfrom .config import read_nml\nfrom . import find\nfrom . import LSP\n\nfrom .hdf5 import read as h5read\n\n\n# do NOT use lru_cache--can have weird unexpected effects with complicated setups\ndef config(path: Path) -> dict[str, T.Any]:\n \"\"\"\n read simulation input configuration from .nml Fortran namelist file\n\n Parameters\n ----------\n path: pathlib.Path\n config file path\n\n Returns\n -------\n params: dict\n simulation parameters from config file\n \"\"\"\n\n return read_nml(find.config(path))\n\n\ndef simsize(path: Path) -> tuple[int, ...]:\n \"\"\"get simulation dimensions\"\"\"\n\n return h5read.simsize(find.simsize(path))\n\n\ndef grid(\n path: Path, *, var: set[str] | None = None, shape: bool = False\n) -> dict[str, T.Any]:\n \"\"\"\n get simulation grid\n\n Parameters\n ----------\n\n path: pathlib.Path\n path to simgrid.*\n var: set of str\n read only these grid variables\n shape: bool, optional\n read only the shape of the grid instead of the data iteslf\n \"\"\"\n\n fn = find.grid(path)\n\n xg = h5read.grid(fn, var=var, shape=shape)\n\n xg[\"filename\"] = fn\n\n return xg\n\n\ndef frame(\n path: Path,\n time: datetime | None = None,\n var: set[str] | None = None,\n *,\n cfg: dict[str, T.Any] | None = None,\n xg: dict[str, T.Any] | None = None,\n) -> xarray.Dataset:\n \"\"\"\n load a frame of simulation data, automatically selecting the correct\n functions based on simulation parameters\n\n Parameters\n ----------\n file: pathlib.Path\n filename for this timestep\n time: datetime.datetime\n time to load from simulation output\n var: set of str\n variable(s) to read\n cfg: dict\n to avoid reading config.nml\n xg: dict\n to avoid reading simgrid.*, useful to save time when reading data files in a loop\n \"\"\"\n\n # %% default variables\n if not var:\n var = {\"ne\", \"Ti\", \"Te\", \"v1\", \"v2\", \"v3\", \"J1\", \"J2\", \"J3\", \"Phi\"}\n\n if isinstance(var, str):\n var = [var]\n var = set(var)\n\n # %% file or directory\n path = Path(path).expanduser()\n if path.is_dir():\n if time is None:\n raise ValueError(\"must specify time for directory\")\n path = find.frame(path, time)\n # %% config file needed\n if not cfg:\n cfg = config(path.parent)\n\n flag = h5read.flagoutput(path, cfg)\n\n if flag == 3:\n dat = h5read.frame3d_curvne(path, xg)\n elif flag == 1:\n dat = h5read.frame3d_curv(path, var, xg)\n elif flag == 2:\n dat = h5read.frame3d_curvavg(path, var, xg)\n else:\n raise ValueError(f\"Unsure how to read {path} with flagoutput {flag}\")\n\n dat.attrs[\"filename\"] = path\n\n dat.update(derive(dat, var, flag))\n\n return dat\n\n\ndef derive(dat: xarray.Dataset, var: set[str], flag: int) -> xarray.Dataset:\n lx = (dat.dims[\"x1\"], dat.dims[\"x2\"], dat.dims[\"x3\"])\n\n # %% Derived variables\n if flag == 1:\n if {\"ne\", \"v1\", \"Ti\"} & var:\n dat[\"ne\"] = ((\"x1\", \"x2\", \"x3\"), dat[\"ns\"][LSP - 1, :, :, :].data)\n # np.any() in case neither is an np.ndarray\n if dat[\"ns\"].shape[0] != LSP or not np.array_equal(dat[\"ns\"].shape[1:], lx):\n raise ValueError(\n f\"may have wrong permutation on read. lx: {lx} ns x1,x2,x3: {dat['ns'].shape}\"\n )\n if \"v1\" in var:\n dat[\"v1\"] = (\n (\"x1\", \"x2\", \"x3\"),\n (dat[\"ns\"][:6, :, :, :] * dat[\"vs1\"][:6, :, :, :]).sum(axis=0).data\n / dat[\"ne\"].data,\n )\n if \"Ti\" in var:\n dat[\"Ti\"] = (\n (\"x1\", \"x2\", \"x3\"),\n (dat[\"ns\"][:6, :, :, :] * dat[\"Ts\"][:6, :, :, :]).sum(axis=0).data\n / dat[\"ne\"].data,\n )\n if \"Te\" in var:\n dat[\"Te\"] = ((\"x1\", \"x2\", \"x3\"), dat[\"Ts\"][LSP - 1, :, :, :].data)\n\n if \"J1\" in var:\n # np.any() in case neither is an np.ndarray\n if np.any(dat[\"J1\"].shape != lx):\n raise ValueError(\"J1 may have wrong permutation on read\")\n\n if \"time\" not in dat:\n dat = dat.assign_coords({\"time\": time(dat.filename)})\n\n return dat\n\n\ndef glow(fn: Path) -> xarray.Dataset:\n \"\"\"read GLOW data\"\"\"\n return h5read.glow_aurmap(fn)\n\n\ndef Efield(fn: Path) -> xarray.Dataset:\n \"\"\"load Efield data \"Efield_inputs\"\n\n Parameters\n ----------\n fn: pathlib.Path\n filename for this timestep\n\n Returns\n -------\n dat: dict of np.ndarray\n electric field\n \"\"\"\n\n fn = Path(fn).expanduser().resolve(strict=True)\n\n return h5read.Efield(fn)\n\n\ndef precip(fn: Path) -> xarray.Dataset:\n \"\"\"load precipitation to disk\n\n Parameters\n ----------\n fn: pathlib.Path\n path to precipitation file\n\n Returns\n -------\n dat: dict\n precipitation\n \"\"\"\n\n fn = Path(fn).expanduser().resolve(strict=True)\n\n return h5read.precip(fn)\n\n\ndef time(file: Path) -> datetime:\n \"\"\"\n read simulation time of a file\n \"\"\"\n\n return h5read.time(file)\n\n\ndef get_lxs(xg: dict[str, T.Any]) -> tuple[int, int, int]:\n lx = None\n for k in (\"lx\", \"lxs\", \"lx1\"):\n if k in xg:\n if k == \"lx1\":\n lx = [xg[\"lx1\"], xg[\"lx2\"], xg[\"lx3\"]]\n break\n else:\n lx = xg[k]\n\n if lx is None:\n raise IndexError(\"Did not find grid size\")\n\n return lx[0], lx[1], lx[2]\n","sub_path":"src/gemini3d/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"222614258","text":"import os\nfrom os.path import join as ospj\n\nimport torch\nimport torch.utils.data as data\nfrom PIL import Image\n\n\nclass Wild(data.Dataset):\n def __init__(self, data_root, dataset_name, mode, transform_img):\n super().__init__()\n self.data_root = data_root\n self.dataset_name = dataset_name\n self.mode = mode\n\n self.image_dir = ospj(self.data_root, dataset_name, 'images')\n self.transform_img = transform_img\n self.test_images = []\n\n self.preprocess()\n self.num_images = len(self.test_images)\n\n def preprocess(self):\n assert os.path.exists(self.image_dir), f'Image data directory does not exist: {self.image_dir}'\n self.test_images = sorted(os.listdir(self.image_dir))\n print(f'Finished preprocessing the {self.dataset_name} dataset...')\n\n def __getitem__(self, index):\n if self.mode == 'test':\n filename = self.test_images[index]\n image = Image.open(ospj(self.image_dir, filename))\n image = image.convert('RGB')\n else:\n image = None\n raise NotImplementedError\n return self.transform_img(image), torch.LongTensor([index])\n\n def __len__(self):\n return self.num_images\n","sub_path":"wild.py","file_name":"wild.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"33836055","text":"from random import randint\r\nfrom pygame import *\r\ninit()\r\n\r\nC_WHITE = (255, 255, 255)\r\nC_RED = (255, 0, 0)\r\nC_GREEN = (0, 255, 0)\r\nC_BLACK = (0, 0, 0)\r\nC_FLOOR = (12, 17, 34)\r\nC_WALLS = (86, 4, 1)\r\n\r\nbig_font = font.SysFont(\"Corbel\", 72, True)\r\nregular_font = font.SysFont(\"Corbel\", 45, True)\r\n\r\ngame_over = big_font.render('Игра окончена', True, C_RED)\r\nwin = big_font.render('Ты выиграл', True, C_GREEN)\r\npause_text = big_font.render('Пауза', True, C_WHITE)\r\nrestart_text = regular_font.render('Нажми R чтобы начать заново', True, C_WHITE)\r\n\r\ndelayer = time.Clock()\r\n\r\nmixer.music.load('sounds/ambience.wav') # фоновые звуки леса\r\nmixer.music.set_volume(.7)\r\nmixer.music.play(-1) # -1 - повторять бесконечно\r\n\r\npistol_sounds = [mixer.Sound('sounds/pistol-fire-1.ogg'),mixer.Sound('sounds/pistol-fire-2.ogg')]\r\nshotgun_sounds = [mixer.Sound('sounds/shotgun-fire-1.ogg'), mixer.Sound('sounds/shotgun-fire-2.ogg')]\r\npistol_reload = mixer.Sound('sounds/reload-pistol.ogg')\r\nshotgun_reload = mixer.Sound('sounds/reload-shotgun.ogg')\r\nimposible_reload = mixer.Sound('sounds/imposible-reload.ogg')\r\n\r\nimg_file_back = 'images/back.png'\r\nhero_images_pistol = [transform.scale(image.load('images/hero_pistol_walk_1.png'),(80,85)),\r\n transform.scale(image.load('images/hero_pistol_walk_2.png'),(80,85)),\r\n transform.scale(image.load('images/hero_pistol_normal.png'),(80,85))]\r\nhero_images_shotgun = [transform.scale(image.load('images/hero_shotgun_walk_1.png'),(80,85)),\r\n transform.scale(image.load('images/hero_shotgun_walk_2.png'),(80,85)),\r\n transform.scale(image.load('images/hero_shotgun_normal.png'),(80,85))]\r\nimg_file_enemy = 'images/enemy.png'\r\nimg_file_princess = 'images/princess.png'\r\nimg_bullet = 'images/bullet.png'\r\n\r\nclass Princess(sprite.Sprite):\r\n def __init__(self, **args):\r\n sprite.Sprite.__init__(self)\r\n self.image = transform.scale(image.load(img_file_princess), (44, 85))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = args['x']\r\n self.rect.y = args['y']\r\n \r\nclass Hero(sprite.Sprite):\r\n def __init__(self, x_speed=0, y_speed=0, x=20, y=10):\r\n sprite.Sprite.__init__(self)\r\n self.image = hero_images_pistol[0]\r\n self.rect = self.image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.x_speed = x_speed\r\n self.y_speed = y_speed\r\n self.stands_on = False\r\n \r\n self.direction = 'right'\r\n self.gun = 'pistol'\r\n self.walk = False\r\n self.counter = 0\r\n self.pistol_clip = 6\r\n self.shotgun_clip = 2\r\n \r\n def update_direction(self, direction):\r\n if self.direction != direction:\r\n self.direction = direction\r\n \r\n def gravitate(self):\r\n self.y_speed += .25\r\n \r\n def jump(self, y):\r\n if self.stands_on:\r\n self.y_speed = y\r\n\r\n def fire(self):\r\n global reload_counter, reload_time\r\n if len(gun_clip) > 0:\r\n reload_time = False\r\n reload_counter = 0\r\n i = 1\r\n for patron in gun_clip:\r\n if i == len(gun_clip):\r\n gun_clip.remove(patron)\r\n if self.gun == 'pistol':\r\n self.pistol_clip -= 1\r\n elif self.gun == 'shotgun':\r\n self.shotgun_clip -= 1\r\n break\r\n i += 1\r\n if self.gun == 'pistol':\r\n Bullet(\r\n direction = self.direction, \r\n x = self.rect.midright[0] if self.direction == 'right' else self.rect.midleft[0], \r\n y = self.rect.centery - 15, \r\n speed = 15, \r\n range = 600)\r\n pistol_sounds[randint(0,len(shotgun_sounds) - 1)].play()\r\n elif self.gun == 'shotgun':\r\n Bullet(\r\n direction = self.direction,\r\n x = self.rect.midright[0] - 25 if self.direction == 'right' else self.rect.midleft[0] + 10,\r\n y = self.rect.centery - 5,\r\n range = 300)\r\n Bullet(\r\n direction = self.direction, \r\n x = self.rect.midright[0] - 25 if self.direction == 'right' else self.rect.midleft[0] + 10,\r\n y = self.rect.centery + 5,\r\n range = 300)\r\n Bullet(\r\n direction = self.direction,\r\n x = self.rect.midright[0] - 25 if self.direction == 'right' else self.rect.midleft[0] + 10,\r\n y = self.rect.centery + 15,\r\n range = 300)\r\n shotgun_sounds[randint(0,len(shotgun_sounds) - 1)].play()\r\n else:\r\n imposible_reload.play()\r\n \r\n def update(self):\r\n if self.x_speed != 0:\r\n if self.direction == 'right':\r\n self.image = (hero_images_pistol if self.gun == 'pistol' else hero_images_shotgun)[self.walk]\r\n elif self.direction == 'left':\r\n self.image = transform.flip((hero_images_pistol if self.gun == 'pistol' else hero_images_shotgun)[self.walk], True, False)\r\n if self.counter == 0:\r\n self.counter = 10\r\n self.walk = not self.walk\r\n else:\r\n self.counter -= 1\r\n else:\r\n if self.direction == 'right':\r\n self.image = (hero_images_pistol if self.gun == 'pistol' else hero_images_shotgun)[2]\r\n else:\r\n self.image = transform.flip((hero_images_pistol if self.gun == 'pistol' else hero_images_shotgun)[2], True, False)\r\n\r\n self.rect.x += self.x_speed\r\n # если зашли за стенку, то встанем вплотную к стене\r\n platforms_touched = sprite.spritecollide(self, barriers, False)\r\n if self.x_speed > 0: # идем направо, правый край персонажа - вплотную к левому краю стены\r\n for p in platforms_touched:\r\n self.rect.right = min(self.rect.right, p.rect.left) # если коснулись сразу нескольких, то правый край - минимальный из возможных\r\n elif self.x_speed < 0: # идем налево, ставим левый край персонажа вплотную к правому краю стены\r\n for p in platforms_touched:\r\n self.rect.left = max(self.rect.left, p.rect.right) # если коснулись нескольких стен, то левый край - максимальный\r\n \r\n self.gravitate()\r\n self.rect.y += self.y_speed\r\n # если зашли за стенку, то встанем вплотную к стене\r\n platforms_touched = sprite.spritecollide(self, barriers, False)\r\n if self.y_speed > 0: # идем вниз\r\n for p in platforms_touched:\r\n self.y_speed = 0\r\n # Проверяем, какая из платформ снизу самая высокая, выравниваемся по ней, запоминаем её как свою опору:\r\n if p.rect.top < self.rect.bottom:\r\n self.rect.bottom = p.rect.top\r\n self.stands_on = p\r\n elif self.y_speed < 0: # идем вверх\r\n self.stands_on = False # пошли наверх, значит, ни на чем уже не стоим!\r\n for p in platforms_touched:\r\n self.y_speed = 0 # при столкновении со стеной вертикальная скорость гасится\r\n self.rect.top = max(self.rect.top, p.rect.bottom) # выравниваем верхний край по нижним краям стенок, на которые наехали\r\n \r\nclass Wall(sprite.Sprite):\r\n def __init__(self, **args):\r\n sprite.Sprite.__init__(self)\r\n self.image = Surface([args['width'], args['height']])\r\n if args.get('color') != None:\r\n self.image.fill(args['color'])\r\n self.rect = self.image.get_rect()\r\n self.rect.x = args['x']\r\n self.rect.y = args['y']\r\n self.add(barriers, all_sprites)\r\n\r\nclass Enemy(sprite.Sprite):\r\n def __init__(self, **args):\r\n sprite.Sprite.__init__(self)\r\n self.image = transform.scale(image.load(img_file_enemy), (70, 110))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = args['x']\r\n self.rect.y = args['y']\r\n self.add(enemies, all_sprites)\r\n \r\n def update(self):\r\n self.rect.x += randint(-5, 5)\r\n\r\nclass Bullet(sprite.Sprite):\r\n def __init__(self, **args):\r\n sprite.Sprite.__init__(self)\r\n self.direction = args['direction']\r\n self.image = transform.scale(image.load(img_bullet), (15, 6))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = args['x']\r\n self.rect.y = args['y']\r\n self.range = args['range']\r\n if args.get('speed'):\r\n self.speed = args['speed']\r\n else:\r\n self.speed = randint(10, 20)\r\n self.add(bullets, all_sprites)\r\n\r\n def update(self):\r\n global robin\r\n if self.direction == 'right':\r\n self.rect.x += self.speed\r\n else: \r\n self.rect.x -= self.speed\r\n if self.rect.x < robin.rect.x - self.range or self.rect.x > robin.rect.x + self.range:\r\n self.kill()\r\n\r\nclass Patron(sprite.Sprite):\r\n def __init__(self, **args):\r\n sprite.Sprite.__init__(self)\r\n self.image = transform.rotate(transform.scale(image.load(img_bullet), (15, 6)), 90)\r\n self.rect = self.image.get_rect()\r\n self.rect.x = args['x']\r\n self.rect.y = args['y']\r\n self.add(gun_clip)\r\n\r\ndef create_walls():\r\n Wall(x = 50, y = 150, width = 480, height = 20, color = C_WALLS)\r\n Wall(x = 700, y = 0, width = 20, height = 400, color = C_WALLS)\r\n Wall(x = 470, y = 280, width = 250, height = 20, color = C_WALLS)\r\n Wall(x = 50, y = 380, width = 300, height = 20, color = C_WALLS)\r\n Wall(x = -200, y = 540, width = 5600, height = 20, color = C_FLOOR)\r\n Wall(x = -200, y = win_height / 2, width = 20, height = win_height, color = C_WALLS)\r\n Wall(x = 5400, y = win_height / 2, width = 20, height = win_height, color = C_WALLS)\r\n\r\ndef create_enemies():\r\n Enemy(x = 50, y = 440)\r\n Enemy(x = 800, y = 440)\r\n\r\ndef change_gun(gun):\r\n reload_time = True\r\n reload_counter = 0\r\n for item in gun_clip:\r\n item.kill()\r\n robin.gun = gun\r\n for i in range(robin.pistol_clip if gun == 'pistol' else robin.shotgun_clip):\r\n gun_clip.add(Patron(x = i * 20 + 20, y = win_height - 40))\r\n\r\ndef show_text(text, seconds, color):\r\n ''' Огромный костыль для плавного появления текста, с помощью RGBA не работает. '''\r\n temp_color = [0,0,0]\r\n window.fill(C_BLACK)\r\n display.update()\r\n for ap in range(int(seconds * 60 / 4)):\r\n if temp_color[0] < color[0]:\r\n temp_color[0] += int(color[0] / (seconds * 60 / 4))\r\n if temp_color[1] < color[1]:\r\n temp_color[1] += int(color[1] / (seconds * 60 / 4))\r\n if temp_color[2] < color[2]:\r\n temp_color[2] += int(color[2] / (seconds * 60 / 4))\r\n rtext = font.SysFont(\"Corbel\", 72, True).render(text, True, temp_color, C_BLACK)\r\n window.blit(rtext, (win_width / 2 - rtext.get_width() / 2, win_height / 2 - rtext.get_height() / 2))\r\n display.update()\r\n delayer.tick(60)\r\n for kek in range(int(60 * seconds / 2)):\r\n rtext = font.SysFont(\"Corbel\", 72, True).render(text, True, C_WHITE, C_BLACK)\r\n window.blit(rtext, (win_width / 2 - rtext.get_width() / 2, win_height / 2 - rtext.get_height() / 2))\r\n display.update()\r\n delayer.tick(60)\r\n for ap in range(int(seconds * 60 / 4)):\r\n if temp_color[0] > 0:\r\n temp_color[0] -= int(color[0] / (seconds * 60 / 4))\r\n if temp_color[1] > 0:\r\n temp_color[1] -= int(color[1] / (seconds * 60 / 4))\r\n if temp_color[2] > 0:\r\n temp_color[2] -= int(color[2] / (seconds * 60 / 4))\r\n rtext = font.SysFont(\"Corbel\", 72, True).render(text, True, temp_color, C_BLACK)\r\n window.blit(rtext, (win_width / 2 - rtext.get_width() / 2, win_height / 2 - rtext.get_height() / 2))\r\n display.update()\r\n delayer.tick(60)\r\n\r\n# Запуск игры\r\ndisplay.set_caption(\"Arcade\")\r\n# window = display.set_mode((0, 0), FULLSCREEN) # полноэкранный режим\r\nwindow = display.set_mode((800, 600)) \r\nwin_width = display.Info().current_w # получаем ширину окна\r\nwin_height = display.Info().current_h # получаем высоту окна\r\n\r\nleft_bound = win_width / 2 - 50 # границы, за которые персонаж не выходит (начинает ехать фон)\r\nright_bound = win_width / 2 + 50\r\n\r\nshift = 50\r\nback = transform.scale(image.load(img_file_back), (win_width, win_height))\r\n\r\nall_sprites = sprite.Group()\r\nbarriers = sprite.Group()\r\nenemies = sprite.Group()\r\nbullets = sprite.Group()\r\ngun_clip = sprite.Group()\r\n\r\nrobin = Hero()\r\nall_sprites.add(robin)\r\n\r\nchange_gun('pistol')\r\ncreate_walls()\r\ncreate_enemies()\r\n\r\npr = Princess(x = win_width + 500, y = win_height - 140)\r\nall_sprites.add(pr)\r\n\r\n\r\nrun = True\r\nfinished = False\r\npause = False\r\nreload_time = True\r\nreload_counter = 0\r\n\r\nshow_text(\"Level 1\", 3, C_WHITE)\r\n\r\nwhile run:\r\n for e in event.get():\r\n if e.type == QUIT:\r\n run = False\r\n elif e.type == KEYDOWN:\r\n if e.key == K_q:\r\n run = False\r\n if e.key == K_r and finished:\r\n reload_time = True\r\n reload_counter = 0\r\n for item in all_sprites:\r\n item.kill()\r\n for item in gun_clip:\r\n item.kill()\r\n create_walls()\r\n create_enemies()\r\n shift = 50\r\n robin = Hero()\r\n all_sprites.add(robin)\r\n change_gun('pistol')\r\n pr = Princess(x = win_width + 500, y = win_height - 140)\r\n all_sprites.add(pr)\r\n finished = False\r\n if e.key == K_ESCAPE and not finished:\r\n if pause:\r\n pause = False\r\n mixer.music.unpause()\r\n elif not pause:\r\n pause = True\r\n mixer.music.pause()\r\n window.blit(pause_text, (win_width / 2 - pause_text.get_width() / 2 , win_height / 2 - pause_text.get_height() / 2))\r\n display.update()\r\n if not pause and not finished:\r\n if e.key == K_r:\r\n if (robin.gun == 'pistol' and len(gun_clip) != 6) or (robin.gun == 'shotgun' and len(gun_clip) != 2):\r\n reload_time = True\r\n else:\r\n imposible_reload.play()\r\n if e.key == K_1:\r\n change_gun('pistol')\r\n elif e.key == K_2:\r\n change_gun('shotgun')\r\n elif e.key == K_LEFT or e.key == K_a:\r\n robin.update_direction('left')\r\n robin.x_speed = -5\r\n elif e.key == K_RIGHT or e.key == K_d:\r\n robin.update_direction('right')\r\n robin.x_speed = 5\r\n elif e.key == K_UP or e.key == K_w:\r\n robin.jump(-7)\r\n elif e.key == K_SPACE:\r\n robin.fire()\r\n elif e.type == KEYUP:\r\n if e.key == K_LEFT:\r\n robin.x_speed = 0\r\n elif e.key == K_RIGHT:\r\n robin.x_speed = 0\r\n if not pause:\r\n if not finished:\r\n all_sprites.update()\r\n if sprite.spritecollide(robin, enemies, False):\r\n robin.kill()\r\n sprite.groupcollide(bullets, enemies, True, True)\r\n sprite.groupcollide(bullets, barriers, True, False)\r\n if (robin.rect.x > right_bound and robin.x_speed > 0 or robin.rect.x < left_bound and robin.x_speed < 0): \r\n # при выходе влево или вправо переносим изменение в сдвиг экрана\r\n shift -= robin.x_speed \r\n # перемещаем на общий сдвиг все спрайты (и отдельно бомбы, они ж в другом списке):\r\n for s in all_sprites:\r\n s.rect.x -= robin.x_speed # сам robin тоже в этом списке, поэтому его перемещение визуально отменится\r\n \r\n if reload_time:\r\n if reload_counter == 0:\r\n if (robin.gun == 'pistol' and len(gun_clip) == 6) or (robin.gun == 'shotgun' and len(gun_clip) == 2):\r\n reload_time = False\r\n else:\r\n if robin.gun == 'pistol':\r\n robin.pistol_clip += 1\r\n pistol_reload.play()\r\n elif robin.gun == 'shotgun':\r\n robin.shotgun_clip += 1\r\n shotgun_reload.play()\r\n reload_counter = 40\r\n gun_clip.add(Patron(x = len(gun_clip) * 20 + 20, y = win_height - 40))\r\n else:\r\n reload_counter -= 1\r\n \r\n # рисуем фон со сдвигом\r\n local_shift = shift % win_width\r\n window.blit(back, (local_shift, 0))\r\n if local_shift != 0:\r\n window.blit(back, (local_shift - win_width, 0))\r\n \r\n all_sprites.draw(window)\r\n gun_clip.draw(window)\r\n\r\n if sprite.collide_rect(robin, pr):\r\n finished = True\r\n window.fill(C_BLACK)\r\n window.blit(win, (win_width / 2 - win.get_width() / 2, 250))\r\n window.blit(restart_text, (win_width / 2 - restart_text.get_width() / 2, 350))\r\n\r\n if robin not in all_sprites or robin.rect.top > win_height:\r\n finished = True \r\n window.fill(C_BLACK)\r\n window.blit(game_over, (win_width / 2 - game_over.get_width() / 2, 250))\r\n window.blit(restart_text, (win_width / 2 - restart_text.get_width() / 2, 350))\r\n display.update()\r\n delayer.tick(60)","sub_path":"Python Pro/Arcade/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"316414653","text":"#coding=utf-8\nimport os\nimport random\nimport xml\nfrom ConstValue.global_variable import IMG_WIDTH, IMG_HEIGHT, SAMPLE_PATH\nimport cv2\nfrom PreTrain.labelParser import LabelParserHandler\n\nclass SamplingProcess:\n def __init__(self,file,mergeFlag = False):\n self.file = file\n self.annotationfile = self.__mapToAnnotation()\n self.mergeFlag = mergeFlag\n self.labelRegion = self.__getGroundTruthArear(mergeFlag)\n self.image = cv2.imread(self.file)\n self.sampleSavePath = self.__getSampleSavePath()\n self.__creatSamplePath(SAMPLE_PATH)\n\n #创建路径\n def __creatSamplePath(self,samplePath):\n for item in self.labelRegion:\n storePath = os.path.join(samplePath,item+\"/\")\n if not os.path.exists(storePath):\n os.mkdir(storePath)\n storePath = os.path.join(samplePath,\"background/\")\n if not os.path.exists(storePath):\n os.mkdir(storePath)\n\n # 采样存储路径\n def __getSampleSavePath(self):\n path = os.path.dirname(self.file)\n path = os.path.dirname(path)\n return os.path.join(path,\"sample\")\n\n # 根据文件名映射到标记注释\n def __mapToAnnotation(self):\n path = os.path.dirname(self.file)\n path =os.path.dirname(path)\n filename = self.file.split(\"/\")[-1]\n attionationFilename = filename.split(\".\")[0] + \".xml\"\n return os.path.join(path,\"annotation\",attionationFilename);\n\n # #对图片进行采样获取训练的样本,由于目前nut数据太少不在进行nut数据采样\n def sampling(self,ratio,sampleNums):\n filename = self.file.split(\"/\")[-1].split(\".\")[0]\n #计算采样数\n posGoal = sampleNums * ratio\n negGoal = sampleNums - posGoal\n backgroundSampleNum = 0\n sampleCounter = {}\n for labelname in self.labelRegion:\n sampleCounter[labelname] = 0\n size = self.image.shape\n while(backgroundSampleNum < negGoal):\n storeFlag = True\n #本次采样随机变换标志\n TransferFlag = random.choice([True, False])\n y1 = random.randint(0, size[0]-IMG_HEIGHT)\n x1 = random.randint(0, size[1]-IMG_WIDTH)\n x2 = x1+IMG_WIDTH\n y2 = y1+IMG_HEIGHT\n coordinate = ([y1,x1],[y2,x2])\n # 获取采样标记\n sign = self.__getSampleSign(coordinate)\n if \"again\" == sign:\n continue\n elif \"background\"==sign :\n backgroundSampleNum += 1\n storeIndex = backgroundSampleNum\n else:\n sampleCounter[sign] += 1\n storeIndex = sampleCounter[sign]\n if sampleCounter[sign] > posGoal:\n storeFlag = False\n if storeFlag:\n storeFile = os.path.join(self.sampleSavePath,sign,filename+\" \"+str(storeIndex)+\".jpg\")\n sample = self.image[y1:y2,x1:x2]\n if TransferFlag:\n # 进行随机变换\n pass\n cv2.imwrite(storeFile,sample)\n # 进行正例样本采样\n for item in sampleCounter:\n while sampleCounter[item] minx:\n startx = minx\n endx = x\n else:\n startx = x\n endx = minx\n if y > miny:\n starty = miny\n endy = y\n else:\n starty = y\n endy = miny\n x1 = random.randint(startx, endx)\n y1 = random.randint(starty, endy)\n except Exception:\n raise Exception(\"Sampling Size setting Error\")\n x2 = x1 + IMG_WIDTH\n y2 = y1 + IMG_HEIGHT\n sample = self.image[x1:x2, y1:y2]\n return sample\n\n # 获取标记位置处的图片\n def getTestImg(self):\n imgList = {}\n for item in self.labelRegion:\n imgList[item] = []\n for labelRec in self.labelRegion[item]:\n img = self.__getOrientalPositionSample(labelRec)\n imgList[item].append(img)\n return imgList\n\n # #获取图片的标注信息\n\n def __getLabels(self):\n handler = LabelParserHandler()\n parser = xml.sax.make_parser()\n parser.setFeature(xml.sax.handler.feature_namespaces, 0)\n parser.setContentHandler(handler)\n parser.parse(self.annotationfile)\n labels = handler.labelresult()\n # #去掉None元素\n for i in range(len(labels)):\n if labels[i][\"name\"] == \"none\" :\n del labels[i]\n return labels\n\n\n # 计算采样框与标记框的交集所占标记框的比率\n def __caculCrossRate(self, Reframe, GTframe):\n x1 = Reframe[0][0]\n y1 = Reframe[0][1]\n width1 = Reframe[1][0] - Reframe[0][0]\n height1 = Reframe[1][1] - Reframe[0][1]\n\n x2 = GTframe[\"xmin\"]\n y2 = GTframe[\"ymin\"]\n width2 = GTframe[\"xmax\"] - GTframe[\"xmin\"]\n height2 = GTframe[\"ymax\"] - GTframe[\"ymin\"]\n\n endx = max(x1 + width1, x2 + width2)\n startx = min(x1, x2)\n width = width1 + width2 - (endx - startx)\n\n endy = max(y1 + height1, y2 + height2)\n starty = min(y1, y2)\n height = height1 + height2 - (endy - starty)\n\n if width <= 0 or height <= 0:\n ratio = 0 # 重叠率为 0\n else:\n Area = width * height # 两矩形相交面积\n Area2 = width2 * height2\n ratio = Area * 1. / Area2\n return ratio\n\n # #判别采样labels\n def __getSampleSign(self, coordinate):\n for categoryInfo in self.labelRegion:\n for item in self.labelRegion[categoryInfo]:\n # #计算交叠比率,若大于0.7则采样结果为螺栓 0.4~0.7重新采样 0.4以下为背景图片\n crossRate = self.__caculCrossRate(coordinate, item)\n if crossRate >= 0.7:\n return categoryInfo\n elif 0.4 <= crossRate < 0.7:\n return \"again\"\n return \"background\"\n\n # 获取blots 和nuts 标签位置\n def __getGroundTruthArear(self,mergeFlag):\n labelArear = {}\n labels = self.__getLabels()\n for item in labels:\n elem = {}\n elem[\"xmin\"] = eval(item[\"topleft\"][1])\n elem[\"ymin\"] = eval(item[\"topleft\"][0])\n elem[\"xmax\"] = eval(item[\"bottomright\"][1])\n elem[\"ymax\"] = eval(item[\"bottomright\"][0])\n labelname = item[\"name\"]\n if labelArear.__contains__(labelname):\n labelArear[labelname].append(elem)\n else:\n labelArear[labelname] = []\n labelArear[labelname].append(elem)\n return labelArear\n # 获取变换映射\n def __getSampleTransferMap(self):\n transferMap = {}\n transferMap[\"RandomNoise\"] = False\n transferMap[\"GrayTransfer\"] = False\n transferMap[\"Rote\"] = False\n transferMap[\"affine\"] = False\n transferMap[\"Mirror\"] = False\n ","sub_path":"Code/PreTrain/SamplingProcess.py","file_name":"SamplingProcess.py","file_ext":"py","file_size_in_byte":8037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"341913897","text":"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.v2.fluid.core as core\nfrom paddle.v2.fluid.op import Operator\nimport unittest\n\n\ndef fc(X, W, Y):\n ret_v = core.Net.create()\n\n ret_v.append_op(Operator(\"mul\", X=\"X\", Y=\"W\", Out=\"pre_activation\"))\n ret_v.append_op(Operator(\"sigmoid\", X=\"pre_activation\", Out=Y))\n ret_v.complete_add_op(True)\n return ret_v\n\n\nclass TestNet(unittest.TestCase):\n def test_net_all(self):\n net = core.Net.create()\n op1 = Operator(\"sum\", X=[\"X\", \"Y\"], Out=\"Out\")\n net.append_op(op1)\n\n net2 = core.Net.create()\n net2.append_op(fc(X=\"X\", W=\"w\", Y=\"fc.out\"))\n net2.complete_add_op(True)\n net.append_op(net2)\n net.complete_add_op(True)\n\n expected = '''\nOp(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}.\n Op(sum), inputs:{X[X, Y]}, outputs:{Out[Out]}.\n Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.\n Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.\n Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.\n Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Out[fc.out]}.\n'''\n self.assertEqual(expected, \"\\n\" + str(net))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/paddle/v2/fluid/tests/test_net.py","file_name":"test_net.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"465685678","text":"from pygame.sprite import Sprite\nimport pygame.image\nimport os\nfrom . import constants\nclass Character(Sprite):\n @staticmethod\n def loadFrames(type):\n frames={}\n dirs=[\"up\",\"down\",\"left\",\"right\"]\n num_frames=3\n for dir in dirs:\n frame_array=[0,0,0]\n i=0\n while i2:\n self.frame=0\n tile=self.map.tileAt(self.x,self.y)\n if not tile.clear:\n self.x=old_x\n self.y=old_y\n","sub_path":"old/lib/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"303126702","text":"from simulation_utils import create_env, perform_best\nimport sys\n\ntask = 'Tosser'\nw = [0.29754784,0.03725074,0.00664673,0.80602143]\niter_count = 5 # the optimization is nonconvex, so you can specify the number of random starting points\n\n##### YOU DO NOT NEED TO MODIFY THE CODE BELOW THIS LINE #####\n\nD = create_env(task.lower())\nperform_best(D, w, iter_count)\n","sub_path":"reward_learning/run_optimizer.py","file_name":"run_optimizer.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"442397676","text":"from abc import abstractmethod\nfrom typing import (\n AbstractSet,\n Any,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Protocol,\n Sequence,\n Tuple,\n Union,\n overload,\n)\n\nfrom attr import Attribute, attrib, attrs, validators\n\nREPAIR_CONLL = \"conlleval\"\nREPAIR_DISCARD = \"discard\"\nREPAIR_NONE = \"none\"\nSUPPORTED_REPAIRS = (REPAIR_CONLL, REPAIR_DISCARD, REPAIR_NONE)\n\n\ndef _validator_nonnegative(_inst: Any, _attr: Attribute, value: Any) -> None:\n if value < 0:\n raise ValueError(f\"Negative value: {repr(value)}\")\n\n\n# Instantiate in advance for _validator_optional_nonempty_str\n_optional_instance_of_str = validators.optional(validators.instance_of(str))\n\n\ndef _validator_optional_nonempty_str(_inst: Any, attr: Attribute, value: Any) -> None:\n # Check type\n _optional_instance_of_str(value, attr, value)\n # Check string isn't empty\n if not value:\n raise ValueError(f\"Empty string: {repr(value)}\")\n\n\n# Type-specific implementations to work around type checker limitations. No, writing these as\n# generic functions with type variables does not satisfy all type checkers.\ndef _tuplify_strs(strs: Iterable[str]) -> Tuple[str, ...]:\n return tuple(strs)\n\n\ndef _tuplify_mentions(\n mentions: Iterable[\"Mention\"],\n) -> Tuple[\"Mention\", ...]:\n return tuple(mentions)\n\n\n@attrs(frozen=True, slots=True)\nclass Span:\n start: int = attrib(validator=_validator_nonnegative)\n end: int = attrib(validator=_validator_nonnegative)\n\n\n@attrs(frozen=True, slots=True)\nclass Mention:\n span: Span = attrib()\n type: str = attrib()\n\n\n@attrs(frozen=True, slots=True)\nclass SentenceProvenance:\n starting_line: int = attrib()\n source: Optional[str] = attrib()\n\n\n@attrs(frozen=True, slots=True)\nclass LabeledSentence(Sequence[str]):\n tokens: Tuple[str, ...] = attrib(converter=_tuplify_strs)\n labels: Tuple[str, ...] = attrib(converter=_tuplify_strs)\n mentions: Tuple[Mention, ...] = attrib(default=(), converter=_tuplify_mentions)\n provenance: Optional[SentenceProvenance] = attrib(\n default=None, eq=False, kw_only=True\n )\n\n def __attrs_post_init__(self):\n if len(self.tokens) != len(self.labels):\n raise ValueError(\n f\"Tokens ({len(self.tokens)}) and labels ({len(self.labels)}) \"\n \"must be of the same length\"\n )\n if not self.tokens:\n raise ValueError(\"Tokens and labels must be non-empty\")\n\n for label in self.labels:\n # Labels cannot be None or an empty string\n if not label:\n raise ValueError(f\"Invalid label: {repr(label)}\")\n\n for token in self.tokens:\n # Labels cannot be None or an empty string\n if not token:\n raise ValueError(f\"Invalid token: {repr(token)}\")\n\n @overload\n def __getitem__(self, index: int) -> str:\n raise NotImplementedError\n\n @overload\n def __getitem__(self, index: slice) -> Tuple[str, ...]:\n raise NotImplementedError\n\n def __getitem__(self, i: Union[int, slice]) -> Union[str, Tuple[str, ...]]:\n return self.tokens[i]\n\n def __iter__(self) -> Iterator[str]:\n return iter(self.tokens)\n\n def __len__(self) -> int:\n # Guaranteed that labels and tokens are same length by construction\n return len(self.tokens)\n\n def __str__(self) -> str:\n return \" \".join(\n \"/\".join((token, label)) for token, label in zip(self.tokens, self.labels)\n )\n\n def tokens_with_labels(self) -> Tuple[Tuple[str, str], ...]:\n return tuple(zip(self.tokens, self.labels))\n\n def span_tokens(self, span: Span) -> Tuple[str, ...]:\n return self.tokens[span.start : span.end]\n\n def mention_tokens(self, mention: Mention) -> Tuple[str, ...]:\n return self.span_tokens(mention.span)\n\n\n@attrs\nclass _EncoderToken:\n entity_type: Optional[str] = attrib(validator=_validator_optional_nonempty_str)\n begin: bool = attrib(default=False, kw_only=True)\n inside: bool = attrib(default=False, kw_only=True)\n end: bool = attrib(default=False, kw_only=True)\n only: bool = attrib(default=False, kw_only=True)\n\n def __attrs_post_init__(self) -> None:\n # Make sure that exactly one of the flags is set\n count = (\n self.begin,\n self.inside,\n self.end,\n self.only,\n ).count(True)\n if count != 1:\n raise ValueError(\n f\"Exactly one token flag should be set, found {count}: {repr(self)}\"\n )\n\n\nclass Encoding(Protocol):\n label_delim: str = \"-\"\n outside: str = \"O\"\n begin: Optional[str]\n inside: Optional[str]\n end: Optional[str]\n only: Optional[str]\n\n valid_same_type_transitions: AbstractSet[Tuple[str, str]]\n valid_different_type_transitions: AbstractSet[Tuple[str, str]]\n\n def split_label(self, label: str) -> Tuple[str, Optional[str]]:\n splits = label.split(self.label_delim)\n if len(splits) == 1:\n return (label, None)\n elif len(splits) == 2:\n # Manually unpack just to appease type checking\n state, entity_type = splits\n return (state, entity_type)\n else:\n raise ValueError(\"Cannot parse label {!r}\".format(label))\n\n def join_label(self, state: str, entity_type: str) -> str:\n if entity_type:\n return state + self.label_delim + entity_type\n else:\n assert state == self.outside\n return state\n\n def is_valid_transition(\n self,\n first_state: str,\n first_type: Optional[str],\n second_state: str,\n second_type: Optional[str],\n ) -> bool:\n transition = (first_state, second_state)\n if first_type == second_type:\n return transition in self.valid_same_type_transitions\n else:\n return transition in self.valid_different_type_transitions\n\n @abstractmethod\n def repair_labels(\n self,\n labels: Sequence[str],\n method: str,\n ) -> Sequence[str]:\n raise NotImplementedError\n\n @abstractmethod\n def encode_mentions(\n self, sentence: LabeledSentence, mentions: Sequence[Mention]\n ) -> Sequence[str]:\n raise NotImplementedError\n\n @abstractmethod\n def decode_mentions(self, sentence: LabeledSentence) -> List[Mention]:\n raise NotImplementedError\n\n\nclass EncodingError(Exception):\n pass\n\n\n@attrs\nclass MentionBuilder:\n tokens: Tuple[str, ...] = attrib(converter=_tuplify_strs)\n\n start_idx: Optional[int] = attrib(default=None, init=False)\n entity_type: Optional[str] = attrib(default=None, init=False)\n\n def start_mention(self, start_idx: int, entity_type: str) -> None:\n # Check arguments\n assert start_idx >= 0\n assert entity_type\n\n # Check state\n if self.start_idx is not None:\n raise EncodingError(\n f\"Mention has already been started at index {self.start_idx}\"\n )\n if self.entity_type is not None:\n raise EncodingError(\n f\"Mention has already been started with type {self.entity_type}\"\n )\n\n self.start_idx = start_idx\n self.entity_type = entity_type\n\n def end_mention(self, end_idx: int) -> Mention:\n # Since end index is exclusive, cannot be zero\n assert end_idx > 0\n\n # Check state\n if self.start_idx is None:\n raise ValueError(\"No mention start index\")\n if self.entity_type is None:\n raise ValueError(\"No mention entity type\")\n\n mention = Mention(Span(self.start_idx, end_idx), self.entity_type)\n\n self.start_idx = None\n self.entity_type = None\n\n return mention\n\n def in_mention(self) -> bool:\n return self.start_idx is not None\n\n\nclass IO(Encoding):\n def __init__(self):\n self.inside = \"I\"\n\n self.begin = None\n self.end = None\n self.only = None\n\n self.valid_same_type_transitions = frozenset(((\"I\", \"I\"), (\"O\", \"O\")))\n self.valid_different_type_transitions = frozenset(\n ((\"I\", \"I\"), (\"O\", \"I\"), (\"I\", \"O\"))\n )\n\n def encode_mentions(\n self, sentence: LabeledSentence, mentions: Sequence[Mention]\n ) -> Sequence[str]:\n raise NotImplementedError\n\n def decode_mentions(self, sentence: LabeledSentence) -> List[Mention]:\n raise NotImplementedError\n\n def repair_labels(\n self,\n labels: Sequence[str],\n method: str,\n ) -> Sequence[str]:\n raise NotImplementedError\n\n\nclass BIO(IO):\n def __init__(self):\n super().__init__()\n self.begin = \"B\"\n\n self.valid_same_type_transitions = frozenset(\n ((\"B\", \"I\"), (\"B\", \"B\"), (\"I\", \"I\"), (\"I\", \"B\"), (\"O\", \"O\"))\n )\n self.valid_different_type_transitions = frozenset(\n ((\"B\", \"B\"), (\"B\", \"O\"), (\"I\", \"B\"), (\"I\", \"O\"), (\"O\", \"B\"))\n )\n\n def encode_mentions(\n self, sentence: LabeledSentence, mentions: Sequence[Mention]\n ) -> Sequence[str]:\n raise NotImplementedError\n\n def decode_mentions(self, sentence: LabeledSentence) -> List[Mention]:\n mentions: List[Mention] = []\n builder = MentionBuilder(sentence.tokens)\n\n # We define this just to make it clear it will be defined regardless of the loop running,\n # even though it's guaranteed to run since sentences cannot be empty by construction.\n idx = 0\n\n for idx, (token, label) in enumerate(zip(sentence.tokens, sentence.labels)):\n state, entity_type = self.split_label(label)\n\n # End mention if needed. This is independent of whether we choose to begin a new one.\n # We end a mention if we are in a mention and the current state is not continue.\n if builder.in_mention() and state != self.inside:\n mentions.append(builder.end_mention(idx))\n\n # Begin a mention if needed\n if state == self.begin:\n builder.start_mention(idx, entity_type)\n # Check for valid continuation\n elif state == self.inside:\n if entity_type != builder.entity_type:\n if builder.entity_type:\n raise EncodingError(\n f\"Illegal use of {label} to continue {builder.entity_type}\"\n )\n else:\n raise EncodingError(f\"Illegal use of {label} to begin a mention\")\n # Check state\n assert builder.in_mention()\n # No action needed for outside (since ending mentions is mentioned above) other than\n # checking state.\n elif state == self.outside:\n assert not builder.in_mention()\n\n # Finish the last mention if needed\n if builder.in_mention():\n mentions.append(builder.end_mention(idx + 1))\n\n assert not builder.in_mention()\n\n return mentions\n\n def repair_labels(\n self,\n labels: Sequence[str],\n method: str,\n ) -> Sequence[str]:\n # All of this is essentially the same as validation, but the labels can change during\n # iteration, so the design is slightly different.\n\n # Treat sentence as if preceded by \"O\"\n prev_label = self.outside\n prev_state, prev_entity_type = self.split_label(prev_label)\n\n # Range loop since we will modify the labels\n repaired_labels = list(labels)\n for idx in range(len(repaired_labels)):\n label = repaired_labels[idx]\n\n state, entity_type = self.split_label(label)\n if not self.is_valid_transition(\n prev_state, prev_entity_type, state, entity_type\n ):\n # For BIO, this can only happen when the current label has a type\n assert entity_type\n if method == REPAIR_CONLL:\n # Treat this as the beginning of a new chunk\n state = self.begin\n elif method == REPAIR_DISCARD:\n # Treat this as O\n state = self.outside\n entity_type = None\n else:\n raise ValueError(f\"Unrecognized repair method: {method}\")\n\n label = self.join_label(state, entity_type)\n repaired_labels[idx] = label\n\n prev_label, prev_state, prev_entity_type = (\n label,\n state,\n entity_type,\n )\n\n # Since BIO cannot have an illegal end-of sentence transition, no need to check\n return repaired_labels\n\n\nclass BIOES(BIO):\n def __init__(self):\n super().__init__()\n self.end = \"E\"\n self.only = \"S\"\n\n self.valid_same_type_transitions = frozenset(\n (\n (\"B\", \"I\"),\n (\"B\", \"E\"),\n (\"B\", \"B\"),\n (\"B\", \"S\"),\n (\"I\", \"I\"),\n (\"I\", \"E\"),\n (\"E\", \"B\"),\n (\"E\", \"S\"),\n (\"O\", \"O\"),\n )\n )\n self.valid_different_type_transitions = frozenset(\n (\n (\"E\", \"B\"),\n (\"E\", \"O\"),\n (\"S\", \"S\"),\n (\"S\", \"B\"),\n (\"S\", \"O\"),\n (\"S\", \"S\"),\n (\"O\", \"B\"),\n (\"O\", \"S\"),\n )\n )\n\n def decode_mentions(self, sentence: LabeledSentence) -> List[Mention]:\n raise NotImplementedError\n\n def encode_mentions(\n self, sentence: LabeledSentence, mentions: Sequence[Mention]\n ) -> Sequence[str]:\n raise NotImplementedError\n\n\n# Declared mid-file so it can refer to classes in file\n_ENCODING_NAMES: Dict[str, Encoding] = {\n \"BIO\": BIO(),\n \"IO\": IO(),\n \"BIOES\": BIOES(),\n}\nVALIDATION_SUPPORTED_ENCODINGS: Sequence[str] = tuple(sorted(_ENCODING_NAMES))\nDECODING_SUPPORTED_ENCODINGS = (\"BIO\",)\n\n\ndef get_encoding(name: str) -> Encoding:\n name = name.upper()\n if name in _ENCODING_NAMES:\n return _ENCODING_NAMES[name]\n else:\n raise ValueError(f\"Unknown encoder {repr(name)}\")\n\n\n@attrs\nclass ValidationError:\n msg: str = attrib()\n label: str = attrib()\n type: str = attrib()\n state: str = attrib()\n token: str = attrib()\n line_num: int = attrib()\n\n\n@attrs\nclass ValidationResult:\n errors: Sequence[ValidationError] = attrib()\n n_tokens: int = attrib()\n repaired_labels: Optional[Tuple[str, ...]] = attrib(\n converter=_tuplify_strs, default=()\n )\n\n def is_valid(self) -> bool:\n return not self.errors\n\n def __len__(self):\n return self.n_tokens\n\n\ndef validate_sentence(\n tokens: Sequence[str],\n labels: Sequence[str],\n line_nums: Sequence[int],\n encoding: Encoding,\n *,\n repair: Optional[str] = None,\n) -> ValidationResult:\n if not (len(tokens) == len(labels) == len(line_nums)):\n raise ValueError(\"Tokens, labels, and line numbers must be the same length\")\n if not tokens:\n raise ValueError(\"Cannot validate empty sequences\")\n\n errors: List[ValidationError] = []\n\n # Treat sentence as if preceded by \"O\"\n prev_label = encoding.outside\n prev_state, prev_entity_type = encoding.split_label(prev_label)\n # We initialize these to avoid warnings about them being uninitialized if the loop doesn't\n # run, but since we have checked for an empty sequence, the loop is guaranteed to run.\n token, label, line_num = \"DUMMY_TOKEN\", \"DUMMY_LABEL\", -1\n prev_token = token\n for token, label, line_num in zip(tokens, labels, line_nums):\n state, entity_type = encoding.split_label(label)\n if not encoding.is_valid_transition(\n prev_state, prev_entity_type, state, entity_type\n ):\n msg = (\n f\"Invalid transition {prev_label} -> {label} for token {repr(token)} \"\n + f\"on line {line_num}\"\n )\n errors.append(\n ValidationError(msg, label, entity_type, state, token, line_num)\n )\n prev_label, prev_state, prev_entity_type, prev_token = (\n label,\n state,\n entity_type,\n token,\n )\n\n # Treat sentence as if followed by \"O\"\n label = encoding.outside\n state, entity_type = encoding.split_label(label)\n if not encoding.is_valid_transition(prev_state, prev_entity_type, state, entity_type):\n msg = (\n f\"Invalid transition {prev_label} -> {label} \"\n + f\"after token {prev_token} on line {line_num} at end of sentence\"\n )\n errors.append(\n ValidationError(\n msg, prev_label, prev_entity_type, prev_state, prev_token, line_num\n )\n )\n\n if errors and repair:\n repaired_labels = encoding.repair_labels(labels, repair)\n return ValidationResult(errors, len(tokens), repaired_labels)\n else:\n return ValidationResult(errors, len(tokens))\n","sub_path":"seqscore/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":17095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"631818766","text":"import sys\r\nimport os\r\nn=int(sys.stdin.read())\r\nu=\"\"\r\nwhile True:\r\n h=os.read(9,1)\r\n u=u+h\r\n if not h:\r\n break\r\nn=n%int(u)\r\nmsg=\"\"\r\nwhile n:\r\n n,o=divmod(n,256)\r\n msg=chr(o)+msg\r\nsys.stdout.write(\"%s\" %(msg))","sub_path":"msgsec/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"476120985","text":"from visualization import Processor, WindowController\n\n\nclass ProcessPlotter(Processor):\n ADD_POINT = 1\n NEW_LINE = 2\n\n def __init__(self, interval=1000):\n self.data = {}\n self.interval = interval\n\n def new_line(self, name, style):\n self.data[str(name)] = {\n 'x': [],\n 'y': [],\n 'fmt': style,\n }\n\n def add_point(self, line_name, x, y):\n line_name = str(line_name)\n data = self.data\n data[line_name]['x'].append(x)\n data[line_name]['y'].append(y)\n\n def draw_line(self, line_name, flush=True):\n line = self.data[line_name]\n if line['fmt'] is None:\n self.ax.plot(line['x'], line['y'], label=line_name)\n else:\n self.ax.plot(line['x'], line['y'], line['fmt'], label=line_name)\n if flush:\n self.flush()\n\n def draw_all(self):\n self.clear()\n for line in self.data:\n self.draw_line(line, flush=False)\n if len(self.data):\n self.ax.legend()\n self.flush()\n\n def call_back(self):\n while self.pipe.poll():\n command = self.pipe.recv()\n if command is None:\n self.terminate()\n return False\n query = command[0]\n if query == self.ADD_POINT:\n self.add_point(\n line_name=command[1],\n x=command[2],\n y=command[3]\n )\n elif query == self.NEW_LINE:\n self.new_line(\n name=command[1],\n style=None if len(command) < 3 else command[2]\n )\n self.draw_all()\n return True\n\n\nclass PlotterWindow(WindowController):\n def __init__(self, interval=1000):\n super().__init__(ProcessPlotter(interval=interval))\n\n def new_line(self, name, style=None):\n try:\n self.send((self.plotter.NEW_LINE, name, style))\n except BrokenPipeError as e:\n return self.broken_pipe_message()\n\n return True\n\n def add_point(self, line_name, x, y):\n try:\n self.send((self.plotter.ADD_POINT, line_name, x, y))\n except BrokenPipeError as e:\n return self.broken_pipe_message()\n\n return True\n","sub_path":"visualization/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"324037319","text":"from cpa.utils.series import SequenceDataPanel\r\n\r\n\r\nclass MA(SequenceDataPanel):\r\n '''\r\n **不可对panel赋值,若赋值须copy一份**\r\n panel行为时间,列为codes,值为数据矩阵 \r\n '''\r\n\r\n def __init__(self, dataPanel, n, maxLen):\r\n super(MA, self).__init__(dataPanel.getColumnNames(), maxLen=maxLen) # 继承的子类\r\n dataPanel.getNewValuesEvent().subscribe(self.onNewValues)\r\n self.n = n\r\n\r\n def onNewValues(self, dataPanel, dateTime, values):\r\n '''\r\n :return:最新的一行值\r\n '''\r\n\r\n values = dataPanel[-self.n:, :].mean(axis=0) # 取最新一期值\r\n self.appendWithDateTime(dateTime, values)\r\n\r\n\r\nif __name__ == '__main__':\r\n from cpa.feed import baseFeed\r\n from cpa.feed.feedFactory import InlineDataSet\r\n panelFeed = InlineDataSet.HS300_MINUTE()\r\n maPanel = MA(panelFeed.closePanel, n=20, maxLen=1024) # 以开盘价计算的向前n期收益,定义returns类\r\n\r\n panelFeed.run(500)\r\n\r\n # 数据展示\r\n print(maPanel.to_frame())","sub_path":"t0_framework-1.0/cpa/indicators/panelIndicators/ma.py","file_name":"ma.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"621559450","text":"from __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nimport numpy as np\nimport pickle\nuse_cuda = torch.cuda.is_available()\nprint(use_cuda)\n\n\nimport numpy as np\nimport sys\nimport codecs\nimport os\nimport math\nimport operator\nimport json\nfrom functools import reduce\nimport time\nimport sys\n\n#from utils import *\n\ndef fetch_data(cand, ref):\n \"\"\" Store each reference and candidate sentences as a list \"\"\"\n references = []\n if '.txt' in ref:\n reference_file = codecs.open(ref, 'r', 'utf-8')\n references.append(reference_file.readlines())\n else:\n for root, dirs, files in os.walk(ref):\n for f in files:\n reference_file = codecs.open(os.path.join(root, f), 'r', 'utf-8')\n references.append(reference_file.readlines())\n candidate_file = codecs.open(cand, 'r', 'utf-8')\n candidate = candidate_file.readlines()\n return candidate, references\n\n\ndef count_ngram(candidate, references, n):\n clipped_count = 0\n count = 0\n r = 0\n c = 0\n for si in range(len(candidate)):\n # Calculate precision for each sentence\n ref_counts = []\n ref_lengths = []\n # Build dictionary of ngram counts\n for reference in references:\n ref_sentence = reference[si]\n ngram_d = {}\n words = ref_sentence.strip().split()\n ref_lengths.append(len(words))\n limits = len(words) - n + 1\n # loop through the sentance consider the ngram length\n for i in range(limits):\n ngram = ' '.join(words[i:i+n]).lower()\n if ngram in ngram_d.keys():\n ngram_d[ngram] += 1\n else:\n ngram_d[ngram] = 1\n ref_counts.append(ngram_d)\n # candidate\n cand_sentence = candidate[si]\n cand_dict = {}\n words = cand_sentence.strip().split()\n limits = len(words) - n + 1\n for i in range(0, limits):\n ngram = ' '.join(words[i:i + n]).lower()\n if ngram in cand_dict:\n cand_dict[ngram] += 1\n else:\n cand_dict[ngram] = 1\n clipped_count += clip_count(cand_dict, ref_counts)\n count += limits\n r += best_length_match(ref_lengths, len(words))\n c += len(words)\n if clipped_count == 0:\n pr = 0\n else:\n pr = float(clipped_count) / count\n bp = brevity_penalty(c, r)\n return pr, bp\n\ndef clip_count(cand_d, ref_ds):\n \"\"\"Count the clip count for each ngram considering all references\"\"\"\n count = 0\n for m in cand_d.keys():\n m_w = cand_d[m]\n m_max = 0\n for ref in ref_ds:\n if m in ref:\n m_max = max(m_max, ref[m])\n m_w = min(m_w, m_max)\n count += m_w\n return count\n\ndef best_length_match(ref_l, cand_l):\n \"\"\"Find the closest length of reference to that of candidate\"\"\"\n least_diff = abs(cand_l-ref_l[0])\n best = ref_l[0]\n for ref in ref_l:\n if abs(cand_l-ref) < least_diff:\n least_diff = abs(cand_l-ref)\n best = ref\n return best\n\ndef brevity_penalty(c, r):\n if c > r:\n bp = 1\n else:\n bp = math.exp(1-(float(r)/c))\n return bp\n\ndef geometric_mean(precisions):\n return (reduce(operator.mul, precisions)) ** (1.0 / len(precisions))\n\ndef BLEU(candidate, references):\n precisions = []\n for i in range(4):\n pr, bp = count_ngram(candidate, references, i+1)\n precisions.append(pr)\n bleu = geometric_mean(precisions) * bp\n return bleu\n\ndef v2c(x):\n \"\"\"\n if use gpu then return x.cuda\n \"\"\"\n if use_cuda:\n return x.cuda()\n else:\n return x\n\n\nSOS_token = 0\nEOS_token = 1\nPAD_token = 3\nUNK_token = 2\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {\"\":0, \"\":1, \"\":2, \"\":3}\n self.word2count = {}\n self.index2word = {0: \"\", 1: \"\", 2:\"\", 3:\"\"}\n self.n_words = 4 # Count SOS and EOS\n\n def addSentence(self, sentence):\n #sentence = sentence.replace('.', '')\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\ndef filterPairs(pairs):\n return [pair for pair in pairs]\n\ndef indexesFromSentence(lang, sentence, max_length):\n #sentence = sentence.replace('.', '')\n index = []\n for word in sentence.split(' '):\n if word in lang.index2word.values():\n index.append(lang.word2index[word])\n else:\n index.append(UNK_token)\n \n len_indexes = len(index)\n \n index.append(EOS_token)\n \n for _ in range(max_length - len_indexes):\n index.append(PAD_token) \n \n return index\n\ndef variableFromSentence(lang, sentence, max_length):\n indexes = indexesFromSentence(lang, sentence, max_length)\n \n result = Variable(torch.LongTensor(indexes).view(-1, 1))\n if use_cuda:\n return result.cuda()\n else:\n return result\n\n\ndef sens2tensor(sens, lang):\n max_len = 0\n for sen in sens:\n max_len = max(max_len, len(sen.split(' ')))\n \n sen_indexes = []\n for sen in sens:\n sen_indexes.append(indexesFromSentence(lang, sen, max_len))\n \n sen_indexes = np.array(sen_indexes)\n tensor = []\n for n in range(max_len + 1):\n sen = Variable(torch.LongTensor(sen_indexes[:, n].reshape((1, -1))))\n sen = v2c(sen)\n tensor.append(sen)\n \n return tensor\n\ndef get_dataset(file_dict, batch_size = 8):\n lang_txt = Lang('train_txt')\n lines = open(file_dict).read().strip().split('\\n')\n for sen in lines:\n lang_txt.addSentence(sen)\n \n len_lines = len(lines)\n \n dataset = []\n \n for index, sen in enumerate(lines):\n if(index != (len_lines - 1)):\n next_sen = lines[index + 1]\n pair = (sen, next_sen)\n max_len = max(len(sen.split(' ')), len((next_sen.split(' '))))\n dataset.append((max_len, pair))\n dataset = sorted(dataset)\n \n print('dataset prepared')\n \n N = len(dataset)\n batch_num = int(np.ceil(N/batch_size))\n \n batch_data = [None] * batch_num \n for n in range(batch_num):\n batch_data[n] = ([],[])\n \n for index, ele in enumerate(dataset):\n \n if index % 20000 == 0:\n print('completed', index/(N+ 0.0))\n \n _, pair = ele\n train_sen , label_sen = pair\n batch_index = int(index/batch_size)\n sens, labels = batch_data[batch_index]\n sens.append(train_sen)\n labels.append(label_sen)\n \n print('split prepared')\n \n output_data = []\n for index, ele in enumerate(batch_data):\n if index % 200 == 0:\n print('finished', index/(batch_num + 0.0))\n train_sens, label_sens = ele\n train_sens_tensor = sens2tensor(train_sens, lang_txt)\n label_sens_tensor = sens2tensor(label_sens, lang_txt)\n output_data.append((train_sens_tensor, label_sens_tensor, label_sens))\n \n return output_data, lang_txt\n\nimport time\nimport math\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\nfile_dict = sys.argv[1] ###########the file name##############\nbatch_data,lang_txt = get_dataset(file_dict, batch_size = 64)\nprint(lang_txt.n_words)\nvoc_file = open('voc_hw2_2','wb') \npickle.dump(lang_txt, voc_file, 0)\nvoc_file.close()\n\nclass EncoderRNN(nn.Module):\n def __init__(self, feature_size, hidden_size, output_length):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(output_length, feature_size)\n self.gru = nn.GRU(feature_size, hidden_size)\n \n def forward(self, input, hidden):\n embedded = self.embedding(input.view(1, -1))\n output = embedded\n output, hidden = self.gru(output, hidden)\n return output, hidden\n\n def initHidden(self, batch_size):\n result = Variable(torch.zeros(1, batch_size, self.hidden_size))\n if use_cuda:\n return result.cuda()\n else:\n return result\n\nMAX_LENGTH = 100\nclass DecoderRNN(nn.Module):\n def __init__(self, feature_size, hidden_size, output_length):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_length = output_length\n self.feature_size = feature_size\n \n self.embedding = nn.Embedding(output_length, feature_size)\n self.gru = nn.GRU(feature_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_length)\n self.softmax = nn.LogSoftmax(dim=2)\n \n def forward(self, input, hidden, rubbish):\n output = self.embedding(input.view(1, -1))\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n output = self.softmax(self.out(output))\n return output, hidden, None\n \n def initHidden(self, batch_size):\n result = Variable(torch.zeros(1, batch_size, self.hidden_size))\n if use_cuda:\n return result.cuda()\n else:\n return result \n\nfeature = 128\nencoder = EncoderRNN(feature, feature, lang_txt.n_words) \ndecoder = DecoderRNN(feature, feature, lang_txt.n_words)\n\nif use_cuda:\n encoder = encoder.cuda()\n \n decoder = decoder.cuda()\n\n\nclass seq2seq(nn.Module):\n \n def __init__(self,\n encoder,\n decoder,\n \n max_length = MAX_LENGTH,\n \n decay_rate = 1.0\n ):\n super(seq2seq,self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n \n self.learning_rate = 0.001\n \n self.hidden_size = encoder.hidden_size\n \n self.criterion = nn.NLLLoss()\n self.evaluater_criterion = nn.MSELoss()\n \n self.encoder_optimizer = optim.Adam(encoder.parameters(), lr=self.learning_rate)\n self.decoder_optimizer = optim.Adam(decoder.parameters(), lr=self.learning_rate)\n \n \n self.max_sentence_length = 0\n \n def random_sample(self,tensor):\n \n tensor1 = (torch.exp(tensor[0])).data.cpu().numpy()\n a,b = tensor1.shape\n samples = np.zeros((1, a))\n probs = np.zeros((1, a))\n for i in range(a):\n max_index = tensor1[i].argmax()\n tensor1[i, max_index] += (1 - tensor1[i].sum())\n \n index = np.random.choice(b, 1, p = tensor1[i])\n samples[:, i] = index\n probs[:, i] = tensor1[i,index]\n samples = v2c(Variable(torch.LongTensor(samples)))\n probs = v2c(Variable(torch.FloatTensor(probs)))\n return samples, probs\n\n def save_checkpoint(self, filename):\n state = {\n 'encoder_state': self.encoder.state_dict(),\n 'decoder_state': self.decoder.state_dict(),\n 'encoder_opt': self.encoder_optimizer.state_dict(),\n 'decoder_opt': self.decoder_optimizer.state_dict()\n }\n torch.save(state, filename)\n \n def trainIters(self, batch_data, XENT_iters, print_every=10, plot_every=2):\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n plot_loss_total = 0 # Reset every plot_every\n\n \n self.max_sentence_length = len(batch_data[0][0])\n \n training_count = 0\n \n n_iters = 0\n t_iters = XENT_iters \n for iter in range(1, XENT_iters + 1):\n n_iters += 1\n for batch in batch_data:\n\n training_in, target, target_sentence = batch\n\n loss = self.train(\n training_in,\n target, \n target_sentence, \n train_method = 'XENT')\n\n print_loss_total += loss\n plot_loss_total += loss\n training_count += 1\n\n if training_count%print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('%s (%d %d%%) %.4f' % (timeSince(start, n_iters / t_iters),\n n_iters, n_iters / t_iters * 100, print_loss_avg))\n \n if training_count % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n self.save_checkpoint('hw2_2.model')\n \n return plot_losses\n \n \n def train(self,\n input_variable,\n target_variable,\n target_sentence, \n max_length=MAX_LENGTH,\n train_method = 'XENT', \n ):\n \n encoder = self.encoder\n decoder = self.decoder \n encoder_optimizer = self.encoder_optimizer\n decoder_optimizer = self.decoder_optimizer \n criterion = self.criterion\n \n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n \n batch_size = input_variable[0].size()[1]\n \n encoder_hidden = encoder.initHidden(batch_size)\n\n \n input_length = len(input_variable)\n target_length = len(target_variable)\n\n encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))\n encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs\n\n loss = 0\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(\n input_variable[ei], encoder_hidden)\n encoder_outputs[ei] = encoder_output[0][0]\n \n start_token = np.ones((1, batch_size)) * SOS_token\n decoder_input = Variable(torch.LongTensor(start_token))\n decoder_input = decoder_input.cuda() if use_cuda else decoder_input\n\n decoder_hidden = encoder_hidden\n\n #use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n \n \n if train_method == 'XENT':\n\n if True:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n loss += criterion(decoder_output[0], target_variable[di][0])\n decoder_input = target_variable[di] # Teacher forcing\n \n loss.backward()\n\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.data[0] / target_length\n \ntest1 = seq2seq(encoder, decoder)\nloss_actor = test1.trainIters(batch_data, 1) ########return the loss of every 50 batches\n","sub_path":"hw2/hw2_2/model_seq2seq.py","file_name":"model_seq2seq.py","file_ext":"py","file_size_in_byte":15476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"621223565","text":"from util import refine_amount, determine_category\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom nettuts.items import ScrapySampleItem\nfrom scrapy.http import Request\nimport psycopg2\n \nclass MySpider(BaseSpider):\n name = \"simply\"\n allowed_domains = [\"simply-market.pl\"]\n start_urls = [\"http://www.simply-market.pl/promocje/\"]\n \n def parse(self, response):\n list = []\n for div in response.xpath('//div[@class=\"product col-lr-0 col-lg-3 col-md-3 col-xs-12\"]'):\n price = div.css('span[class=\"zl-one\"]::text').extract()\n price1 = div.css('span[class=\"gr-one\"]::text').extract()\n title = div.css('a[class=\"producttitle\"]::text').extract()\n amount = div.css('span[class=\"gramatura\"]::text').extract()\n\n if price and title:\n item = ScrapySampleItem()\n item[\"title\"] = title[0]\n item[\"price\"] = price[0] + \".\" + price1[0]\n item[\"amount\"] = refine_amount(amount[0].replace(\"/\", \"\"))\n item[\"category\"] = determine_category(item[\"title\"].encode('utf-8'))\n\n conn = psycopg2.connect(\"host='planer.cukg4kbdopna.us-west-2.rds.amazonaws.com' port='5432' dbname='postgres' user='kamil' password='kamil12345'\")\n cursor = conn.cursor()\n try:\n cursor.execute(\"\"\"SELECT idkategorii FROM kategorie WHERE nazwa LIKE %s\"\"\", [item[\"category\"]])\n category = cursor.fetchone()[0]\n cursor.execute(\"\"\"INSERT INTO produkty (nazwa, cena, ilosc, kategoria, idsklepu) VALUES (%s, %s, %s, %s, %s)\"\"\", \n (item[\"title\"],\n item[\"price\"],\n item[\"amount\"],\n category,\n 5))\n conn.commit()\n except psycopg2.OperationalError as e:\n print('Unable to connect!\\n{0}').format(e) \n yield item \n \n","sub_path":"nettuts/nettuts/spiders/simply.py","file_name":"simply.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"69795843","text":"calculation_of_units = 24\nname_of_unit = \"hours\"\n\ndef days_to_units(num_of_days):\n return f\"{num_of_days} days are {num_of_days * calculation_of_units} {name_of_unit}\"\n \n\ndef validate_and_execute():\n try:\n user_input_number = int(num_of_days_element)\n if user_input_number > 0:\n calculated_value = days_to_units(user_input_number)\n print(calculated_value)\n elif user_input_number == 0:\n print(\"You entered a 0, please enter a valid positive number.\")\n else:\n print(\"You entered a negative number. No conversion for you.\") \n except ValueError:\n print(\"Your input is not a valid number. Don't ruin my program!\")\n\n\nuser_input = \"\"\n\nwhile user_input != \"exit\":\n user_input = input(\"Hey user enter a number of days as a comma separated list.\\n\")\n list_of_days = user_input.split(\", \")\n print(list_of_days)\n print(set(list_of_days))\n print(type(list_of_days))\n print(type(set(list_of_days)))\n set_days = set(user_input.split(\", \"))\n for num_of_days_element in set_days:\n print(num_of_days_element)\n validate_and_execute()\n","sub_path":"python-basics/set_prac.py","file_name":"set_prac.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"140970235","text":"#!/usr/bin/env python\n\"\"\"\nScript to publish the temperature of the thermocouple attached to the gas heater\nat 40 Stokes Valley Road to AWS IoT\n\"\"\"\nimport logging\nimport time\n\nfrom AWSIoTPythonSDK.exception.AWSIoTExceptions import publishTimeoutException\n\ntry:\n import mcp9000\nexcept ImportError:\n pass\nimport iot\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nclass GasSensor(iot.TemperatureSensor):\n \"\"\"Gas Sensor Controller Class\"\"\"\n def __init__(self, config):\n super(GasSensor, self).__init__()\n self.iot = None\n\n try:\n mcp9000_config = config['mcp9000']\n self.mcp9000 = mcp9000.MCP9000(mcp9000_config['bus'], mcp9000_config['address'])\n except KeyError:\n self.threshold = config['threshold']\n self.client_id = config['client_id']\n\n def start(self):\n \"\"\"Start the controller\"\"\"\n while True:\n temperature = self.mcp9000.temperature\n if temperature:\n self.temperature = temperature\n time.sleep(2)\n\n @property\n def heater_is_on(self):\n \"\"\"True if the heater is on\"\"\"\n try:\n return self.temperature.value > self.threshold\n except TypeError:\n return False\n except AttributeError:\n return False\n\n @property\n def topics(self):\n \"\"\"MQTT Topics for this thing\"\"\"\n return iot.topics(self.client_id)\n\n def _set_temperature(self, temperature):\n if not self.temperature:\n self._temperature = iot.DataItem(temperature)\n self._send_sample()\n elif self.temperature.value != temperature:\n self.temperature.value = temperature\n self._send_sample()\n else:\n if time.time() - self.temperature.last_update > 60:\n self.temperature.value = temperature\n self._send_sample()\n\n def _send_sample(self):\n \"\"\"\n Sends state update to IoT\n \"\"\"\n message = {'state': {'reported': {'temperature': self.temperature.value}}}\n logger.debug(message)\n try:\n self.iot.publish(self.iot.topics['shadow_update'], message)\n except publishTimeoutException:\n logger.warning('publish timeout')\n self.iot.reconnect()\n except AttributeError:\n pass\n","sub_path":"gas_sensor.py","file_name":"gas_sensor.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"185132583","text":"import argparse\ndef export(bucket_name,credentials,best_model):\n\n import joblib\n from google.cloud import storage\n import requests\n from google.oauth2 import service_account\n response = requests.get(credentials)\n jsonResponse = response.json()\n joblib.dump(jsonResponse, 'model')\n # Explicitly use service account credentials by specifying the private key\n # file.\n credentials = service_account.Credentials.from_service_account_info(jsonResponse)\n storage_client = storage.Client(project='project_id', credentials=credentials)\n #storage_client = storage.Client.from_service_account_json('model')\n bucket=storage_client.get_bucket(bucket_name)\n\n blob = bucket.blob('heart/heart_model/best_model')\n blob.upload_from_filename(best_model)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--bucket_name')\n parser.add_argument('--credentials')\n parser.add_argument('--best_model')\n args = parser.parse_args()\n export(args.bucket_name, args.credentials,args.best_model)","sub_path":"heart disease/pipeline components/export/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"66074415","text":"from django.conf.urls import url, include\nfrom rest_framework import routers, serializers, viewsets\n\nfrom core.models.incident import Incident\n\n\nclass IncidentSerializer(serializers.HyperlinkedModelSerializer):\n # Serializers define the API representation.\n class Meta:\n model = Incident\n fields = ('report', 'reporter', 'report_time')\n\n\nclass IncidentViewSet(viewsets.ModelViewSet):\n # ViewSets define the view behavior.\n queryset = Incident.objects.all()\n serializer_class = IncidentSerializer\n\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\nrouter.register(r'incidents', IncidentViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n url(r'^', include(router.urls)),\n #url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"422399749","text":"import os\nimport sys\nimport logging\nimport re\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom Transfer_Guid_To_Name import FileLocation\nfrom excel_rw import ExcelRw\nfrom efi_variable import EfiVariable\nfrom setup_tree_data import SetupTreeData\nfrom gset_tree_branch_node import GsetTree, GsetNode\nfrom setup_switch_string_piddatoken import SetupSwitch, SetupString, PidDaToken\nfrom data_dealwith import SdDealWith, FlowControl, SkipAction, DataSave\nfrom file_dealwith import FileDealWith\n\n\nclass Gset(object):\n\n def __init__(self, root, p_folder, o_folder, ext_files_folder, use_runtime_variable, logger=''):\n self.setup_d = dict()\n self.setup_d['root'] = root\n self.setup_d['p_folder'] = p_folder\n self.setup_d['used_runtime_variable'] = use_runtime_variable\n self.setup_d['enable_debug'] = False\n self.setup_d['o_folder'] = o_folder\n self.setup_d['o_folder_data'] = o_folder + '\\data'\n self.setup_d['logger2'] = logger\n if re.search('Setup_Item', os.getcwd(), re.IGNORECASE):\n self.setup_d['setup_item_folder'] = os.getcwd()\n else:\n self.setup_d['setup_item_folder'] = os.getcwd() + '\\Setup_Item'\n if ext_files_folder == '':\n self.setup_d['ext_files_folder'] = self.setup_d['setup_item_folder'] + '\\external_files'\n else:\n self.setup_d['ext_files_folder'] = ext_files_folder\n self.setup_d['dpf_expertkeystrings'] = self.setup_d['setup_item_folder'] + '\\dpf_files\\ExpertKeyStrings.uni'\n self.setup_d['dpf_expertkeyvfr'] = self.setup_d['setup_item_folder'] + '\\dpf_files\\ExpertKeyVfr.vfr'\n\n def show_message_on_logger(self, message):\n if self.setup_d['logger2']:\n self.setup_d['logger2'].info(message)\n\n def produce_gset_items_excel_file(self):\n # create logger\n\n logger = logging.getLogger('PostLog')\n logger.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if not os.path.exists(self.setup_d['o_folder_data']):\n os.makedirs(self.setup_d['o_folder_data'])\n\n # (0) build up token dictionary\n self.show_message_on_logger('Build up the token dictionary')\n setup_switch = SetupSwitch(self.setup_d['o_folder_data'], self.setup_d['root'], self.setup_d['p_folder'])\n token_dict = setup_switch.token_dict\n DataSave.dict_to_csv(token_dict, self.setup_d['o_folder_data'], 'token_dict.txt')\n\n # (0.1) build up PID, DaToken, PID_DaToken dictionary\n self.show_message_on_logger('Build up PID, DaToken, PID_DaToken dictionaries')\n pid_token = PidDaToken(self.setup_d['root'])\n pid_dict, datoken_dict, pid_token_dict = pid_token.get_pid_datoken_dict()\n DataSave.dict_to_csv(pid_dict, self.setup_d['o_folder_data'], 'pid_dict.txt')\n DataSave.dict_to_csv(datoken_dict, self.setup_d['o_folder_data'], 'datoken_dict.txt')\n DataSave.dict_to_csv(pid_token_dict, self.setup_d['o_folder_data'], 'pid_token_dict.txt')\n\n # (0.2) build up string_dict\n self.show_message_on_logger('Build up the string_dict')\n uni = FileLocation(root=self.setup_d['root'], filename_extension='.uni')\n uni.target_files.append(self.setup_d['dpf_expertkeystrings'])\n\n uni_list = FileDealWith(self.setup_d['o_folder_data'], self.setup_d['p_folder'], uni.target_files, o_file_name='uni')\n setup_string = SetupString(uni_list.active_file_list)\n string_dict = setup_string.string_dict\n DataSave.dict_to_csv(string_dict, self.setup_d['o_folder_data'], 'string_dict.txt', 'utf_16_le')\n\n # (1) build up sd define template\n self.show_message_on_logger('Build up the sd define template')\n sd = FileLocation(root=self.setup_d['root'], filename_extension='.sd')\n sd.target_files.append(self.setup_d['dpf_expertkeyvfr'])\n\n sd_list = FileDealWith(self.setup_d['o_folder_data'], self.setup_d['p_folder'], sd.target_files, o_file_name='sd')\n sd_handle = SdDealWith(sd_list.active_file_list, token_dict)\n DataSave.list_to_txt(sd_handle.active_information, self.setup_d['o_folder_data'], 'sd_active_info_b.txt')\n\n # 1.1 some define is existed on *.sd. we need to renew token_dict\n token_dict = setup_switch.renew(sd_handle.active_information)\n DataSave.dict_to_csv(token_dict, self.setup_d['o_folder_data'], 'token_renew.txt')\n\n # 1.2 base on new token_dict, renew sd_active_info\n sd_handle.renew_active_information_with_new_token_dict(token_dict)\n DataSave.list_to_txt(sd_handle.active_information, self.setup_d['o_folder_data'], 'sd_active_info.txt')\n\n # 1.3 build up setup variable field value dictionary\n self.show_message_on_logger('Build up the setup_variable_dict')\n efi_variable = EfiVariable(self.setup_d, token_dict)\n efi_variable.save_efivariable_to_file()\n\n # 1.4 handle suppressif\n self.show_message_on_logger('Handle suppressif')\n sd_handle.information_renew_with_suppressif(efi_variable)\n DataSave.list_to_txt(sd_handle.active_information, self.setup_d['o_folder_data'], 'sd_active_info_suppressif.txt')\n\n # 1.5 Build up sd_define_list, sd_formid_list\n self.show_message_on_logger('Build up sd_define_list, sd_formid_list')\n sd_handle.buildup_define_and_formid()\n sd_define_list, sd_formid_list = sd_handle.get_define_formid_list()\n DataSave.list_to_txt(sd_define_list, self.setup_d['o_folder_data'], 'sd_define_list.txt')\n DataSave.list_to_txt(sd_formid_list, self.setup_d['o_folder_data'], 'sd_formid_list.txt')\n\n # 2.0 walk through layer\n self.show_message_on_logger('Walk through setup')\n setup = GsetTree(self.setup_d['o_folder_data'], self.setup_d['root'], token_dict, efi_variable, sd_formid_list, sd_define_list)\n gset_dict, layer_list = setup.get_gsetdict_layerlist()\n total_key = list(gset_dict.keys())\n DataSave.list_to_txt(total_key, self.setup_d['o_folder_data'], 'total_key.txt')\n DataSave.list_to_txt(layer_list, self.setup_d['o_folder_data'], 'layer_list.txt')\n DataSave.dict_to_csv(gset_dict, self.setup_d['o_folder_data'], 'gset_dict.txt')\n\n # 3\n self.show_message_on_logger('Write data to excel')\n setup_data = SetupTreeData(token_dict, string_dict, gset_dict, pid_dict, pid_token_dict, datoken_dict)\n setup_table = setup_data.output_in_list(layer_list, total_key)\n # print('===setup_table===')\n # for i in setup_table:\n # print(i)\n\n # write to excel\n # todo replace by panda\n directory = self.setup_d['o_folder'] + '\\Release'\n if not os.path.exists(directory):\n os.makedirs(directory)\n p = ExcelRw(self.setup_d['o_folder'] + '\\Release\\DA_Token_Setup.xls')\n p.write_table_and_save('Gset', setup_table)\n self.show_message_on_logger('Finish')\n\n\nif __name__ == '__main__':\n gs = Gset('c:\\BIOS\\Rugged2\\99.0.50_Rev0803',\n 'c:\\BIOS\\Rugged2\\99.0.50_Rev0803\\OEMBOARD\\Rugged2',\n 'C:\\Code_in_Python\\Exercise\\Log_Guid_Transfer\\Setup_Item',\n 'C:\\Code_in_Python\\Exercise\\Log_Guid_Transfer\\Setup_Item\\external_files', False)\n gs.produce_gset_items_excel_file()\n","sub_path":"Exercise/Log_Guid_Transfer/Setup_Item/Gset_analysis.py","file_name":"Gset_analysis.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"613254130","text":"\"\"\"\nCreating PIPY package instruction:\n\npython3 -m pip install --user --upgrade setuptools wheel\npython3 setup.py sdist\npython3 -m pip install --user --upgrade twine\ntwine check dist/*\ntwine upload dist/*\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom numpy.distutils.core import setup, Extension\nfrom os import path\nimport io\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nif __name__ == \"__main__\":\n setup(name = 'CoastProc',\n author = \"Tristan Salles\",\n author_email = \"tristan.salles@sydney.edu.au\",\n url = \"https://github.com/TristanSalles/CoastProc\",\n version = \"0.0.3\",\n description = \"Coastal Processes, Environments & Systems.\",\n long_description = long_description,\n long_description_content_type='text/markdown',\n packages = ['CoastProc'],\n install_requires = [\n 'numpy>=1.15.0',\n 'six>=1.11.0',\n 'setuptools>=38.4.0',\n 'pandas>=0.25',\n 'seaborn>=0.9',\n 'matplotlib>=3.0',\n 'geopy>=1.20',\n 'cartopy>=0.17',\n 'scipy>=1.3',\n 'netCDF4>=1.5.1',\n 'shapely>=1.6.4',\n 'scikit-image>=0.15',\n 'pymannkendall>=0'\n ],\n python_requires = '>=3.3',\n package_data = {'CoastProc': ['Notebooks/notebooks/*ipynb',\n 'Notebooks/notebooks/*py',\n 'Notebooks/dataset/*',\n 'Notebooks/images/*'] },\n include_package_data = True,\n classifiers = ['Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6']\n )\n","sub_path":"pypi_install_script/CoastProc-0.0.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"232357356","text":"from __future__ import division\r\nfrom builtins import int, len\r\nfrom codecs import open\r\nimport numpy as np\r\nfrom collections import Counter\r\n\r\n\r\n\r\ndef readDocument(doc_file , v):\r\n docs = []\r\n languages = []\r\n tweets = []\r\n vocabulary = []\r\n with open(doc_file, encoding='utf-8') as f:\r\n for line in f:\r\n words = line.strip().split()\r\n for elem in words[3:]:\r\n if v == 0:\r\n docs.append(elem)\r\n elif v == 1:\r\n docs.append(elem.lower())\r\n elif v == 2:\r\n string = ''.join(letter for letter in elem if letter.isalpha())\r\n docs.append(string)\r\n elif v == 3:\r\n string = ''.join(letter for letter in elem if letter.isalpha())\r\n docs.append(string.lower())\r\n else:\r\n if checkWordObeyRules(elem):\r\n string = ''.join(letter for letter in elem if letter.isalpha())\r\n docs.append(string.lower())\r\n languages.append(words[2])\r\n tweets.append((words[3:],words[2]))\r\n vocabulary = set(docs)\r\n if \"\" in vocabulary:\r\n vocabulary.remove(\"\")\r\n return vocabulary,languages,tweets\r\ndef readTestingDocument(doc_file , v):\r\n tweets = []\r\n tweetId = []\r\n docs = []\r\n with open(doc_file, encoding='utf-8') as f:\r\n for line in f:\r\n words = line.strip().split()\r\n for elem in words[3:]:\r\n if v == 0:\r\n docs.append(elem)\r\n elif v == 1:\r\n docs.append(elem.lower())\r\n elif v == 2:\r\n string = ''.join(letter for letter in elem if letter.isalpha())\r\n docs.append(string)\r\n elif v == 3:\r\n string = ''.join(letter for letter in elem if letter.isalpha())\r\n docs.append(string.lower())\r\n else:\r\n if checkWordObeyRules(elem):\r\n string = ''.join(letter for letter in elem if letter.isalpha())\r\n docs.append(string.lower())\r\n tweetId.append(words[0])\r\n tweets.append((words[3:], words[2]))\r\n vocabulary = set(docs)\r\n if \"\" in vocabulary:\r\n vocabulary.remove(\"\")\r\n return tweets, tweetId, vocabulary\r\n\r\n\r\ndef checkWordObeyRules(word):\r\n if word[0] != '#' and word[0] != '@' and \"http\" not in word.lower() and checkNot3Consecutive(word):\r\n return True\r\n else:\r\n return False\r\n\r\ndef checkNot3Consecutive(w):\r\n counter = 0\r\n letter = \"\"\r\n for elem in w:\r\n if elem == letter:\r\n counter +=1\r\n if counter > 1:\r\n return False\r\n else:\r\n counter = 0\r\n letter = elem\r\n return True\r\n\r\n\r\n\r\ndef uniGram(allTweets,v):\r\n arrayBasque = Counter()\r\n arrayCatalan = Counter()\r\n arrayGalician = Counter()\r\n arraySpanish = Counter()\r\n arrayEnglish = Counter()\r\n arrayPortuguese = Counter()\r\n sizeBasque = 0\r\n sizeCatalan = 0\r\n sizeGalician = 0\r\n sizeSpanish = 0\r\n sizeEnglish = 0\r\n sizePortuguese = 0\r\n for tweet in allTweets:\r\n for word in tweet[0]:\r\n skip = False\r\n if v == 1:\r\n word = word.lower()\r\n elif v == 2:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n word = string\r\n elif v == 3:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n word = string.lower()\r\n elif v == 4:\r\n if checkWordObeyRules(word):\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n word = string.lower()\r\n else:\r\n skip = True\r\n if skip == False:\r\n if (tweet[1] == 'eu'):\r\n arrayBasque[word] += 1\r\n sizeBasque += 1\r\n if (tweet[1] == 'ca'):\r\n arrayCatalan[word] += 1\r\n sizeCatalan += 1\r\n if (tweet[1] == 'gl'):\r\n arrayGalician[word] += 1\r\n sizeGalician += 1\r\n if (tweet[1] == 'es'):\r\n arraySpanish[word] += 1\r\n sizeSpanish += 1\r\n if (tweet[1] == 'en'):\r\n arrayEnglish[word] += 1\r\n sizeEnglish += 1\r\n if (tweet[1] == 'pt'):\r\n arrayPortuguese[word] += 1\r\n sizePortuguese += 1\r\n return (arrayBasque,sizeBasque), (arrayCatalan,sizeCatalan) , (arrayGalician,sizeGalician) , (arraySpanish,sizeSpanish) , (arrayEnglish, sizeEnglish) , (arrayPortuguese,sizePortuguese)\r\n\r\ndef biGram(allTweets,v):\r\n arrayBasque = Counter()\r\n arrayCatalan = Counter()\r\n arrayGalician = Counter()\r\n arraySpanish = Counter()\r\n arrayEnglish = Counter()\r\n arrayPortuguese = Counter()\r\n sizeBasque = 0\r\n sizeCatalan = 0\r\n sizeGalician = 0\r\n sizeSpanish = 0\r\n sizeEnglish = 0\r\n sizePortuguese = 0\r\n for tweet in allTweets:\r\n for x in range(len(tweet[0])-1):\r\n word = tweet[0][x]\r\n word2 = tweet[0][x+1]\r\n if v == 1:\r\n word = word.lower()\r\n word2 = word2.lower()\r\n elif v == 2:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n string2 = ''.join(letter for letter in word2 if letter.isalpha())\r\n word = string\r\n word2 = string2\r\n elif v == 3:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n string2 = ''.join(letter for letter in word2 if letter.isalpha())\r\n word = string.lower()\r\n word2 = string2.lower()\r\n elif v == 4:\r\n if checkWordObeyRules(word):\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n string2 = ''.join(letter for letter in word2 if letter.isalpha())\r\n word = string.lower()\r\n word2 = string2.lower()\r\n else:\r\n skip = True\r\n if skip == False:\r\n if (tweet[1] == 'eu'):\r\n arrayBasque[word+word2] += 1\r\n sizeBasque += 1\r\n if (tweet[1] == 'ca'):\r\n arrayCatalan[word+word2] += 1\r\n sizeCatalan += 1\r\n if (tweet[1] == 'gl'):\r\n arrayGalician[word+word2] += 1\r\n sizeGalician += 1\r\n if (tweet[1] == 'es'):\r\n arraySpanish[word+word2] += 1\r\n sizeSpanish += 1\r\n if (tweet[1] == 'en'):\r\n arrayEnglish[word+word2] += 1\r\n sizeEnglish += 1\r\n if (tweet[1] == 'pt'):\r\n arrayPortuguese[word+word2] += 1\r\n sizePortuguese += 1\r\n return (arrayBasque, sizeBasque), (arrayCatalan, sizeCatalan), (arrayGalician, sizeGalician), (arraySpanish, sizeSpanish), (arrayEnglish, sizeEnglish), (arrayPortuguese, sizePortuguese)\r\n\r\n\r\n\r\ndef chooseGram(tweets,input,v):\r\n if input == 1:\r\n return uniGram(tweets,v)\r\n elif input == 2:\r\n return biGram(tweets,v)\r\n\r\n\r\ndef classify(tweet,smoothingProbability,vocabulary,basque,catalan,galician,spanish,english,portuguese):\r\n probabilityBasque = basque[2]\r\n probabilityCatalan = catalan[2]\r\n probabilityGalician = galician[2]\r\n probabilitySpanish = spanish[2]\r\n probabilityEnglish = english[2]\r\n probabilityPortuguese = portuguese[2]\r\n probBasque = 0\r\n probCatalan = 0\r\n probGalician = 0\r\n probSpanish = 0\r\n probEnglish = 0\r\n probPortuguese = 0\r\n for gram in tweet:\r\n probBasque += np.log((basque[0][gram] + smoothingProbability)/((len(vocabulary)*smoothingProbability) + basque[1]))\r\n probCatalan += np.log((catalan[0][gram] + smoothingProbability)/((len(vocabulary)*smoothingProbability)+ catalan[1]))\r\n probGalician += np.log((galician[0][gram] + smoothingProbability)/((len(vocabulary)*smoothingProbability) + galician[1]))\r\n probSpanish += np.log((spanish[0][gram] + smoothingProbability)/((len(vocabulary)*smoothingProbability) + spanish[1]))\r\n probEnglish += np.log((english[0][gram] + smoothingProbability)/((len(vocabulary)*smoothingProbability) + english[1]))\r\n probPortuguese += np.log((portuguese[0][gram] + smoothingProbability)/((len(vocabulary)*smoothingProbability) + portuguese[1]))\r\n probabilityBasque = np.log(probabilityBasque) + probBasque\r\n probabilityCatalan = np.log(probabilityCatalan) + probCatalan\r\n probabilityGalician = np.log(probabilityGalician) + probGalician\r\n probabilitySpanish = np.log(probabilitySpanish) + probSpanish\r\n probabilityEnglish = np.log(probabilityEnglish) + probEnglish\r\n probabilityPortuguese = np.log(probabilityPortuguese) + probPortuguese\r\n probability = max(probabilityBasque, probabilityCatalan, probabilityGalician ,probabilitySpanish,probabilityEnglish,probabilityPortuguese)\r\n # print(probability)\r\n # print(np.exp(probability))\r\n if probability == probabilityBasque:\r\n return \"eu\",np.exp(probability)\r\n elif probability == probabilityCatalan:\r\n return \"ca\",np.exp(probability)\r\n elif probability == probabilityGalician:\r\n return \"gl\",np.exp(probability)\r\n elif probability == probabilitySpanish:\r\n return \"es\",np.exp(probability)\r\n elif probability == probabilityEnglish:\r\n return \"en\",np.exp(probability)\r\n elif probability == probabilityPortuguese:\r\n return \"pt\",np.exp(probability)\r\n\r\ndef generateGram(tweet,n,v):\r\n arrayTweet = Counter()\r\n if n ==1:\r\n for word in tweet[0]:\r\n skip = False\r\n if v == 1:\r\n word = word.lower()\r\n elif v == 2:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n word = string\r\n elif v == 3:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n word = string.lower()\r\n elif v == 4:\r\n if checkWordObeyRules(word):\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n word = string.lower()\r\n else:\r\n skip = True\r\n if skip == False:\r\n arrayTweet[word] += 1\r\n if n == 2:\r\n for x in range(len(tweet[0])-1):\r\n skip = False\r\n word = tweet[0][x]\r\n word2 = tweet[0][x+1]\r\n if v == 1:\r\n word = word.lower()\r\n word2 = word2.lower()\r\n elif v == 2:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n string2 = ''.join(letter for letter in word2 if letter.isalpha())\r\n word = string\r\n word2 = string2\r\n elif v == 3:\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n string2 = ''.join(letter for letter in word2 if letter.isalpha())\r\n word = string.lower()\r\n word2 = string2.lower()\r\n elif v == 4:\r\n if checkWordObeyRules(word):\r\n string = ''.join(letter for letter in word if letter.isalpha())\r\n string2 = ''.join(letter for letter in word2 if letter.isalpha())\r\n word = string.lower()\r\n word2 = string2.lower()\r\n else:\r\n skip = True\r\n if skip == False:\r\n arrayTweet[word+word2] += 1\r\n return arrayTweet\r\n\r\ndef runClassifier(n,tweets,smoothingProbability,vocabulary,basque,catalan,galician,spanish,english,portuguese,allTweetId,v):\r\n arrayResult = []\r\n for tweet in tweets:\r\n gram = generateGram(tweet,n,v)\r\n resultClassify,resultProbability = classify(gram, smoothingProbability, vocabulary, basque, catalan, galician, spanish, english, portuguese)\r\n arrayResult.append((resultClassify,tweet[1],resultProbability))\r\n\r\n traceFile = open(\"./byomOutputFiles/trace_\" + str(v) + \"_\" + str(n) + \"_\" + str(smoothingProbability) + \".txt\", \"w\")\r\n counter = 0\r\n total = len(arrayResult)\r\n correct = 0\r\n tpBasque = 0\r\n fpBasque = 0\r\n fnBasque = 0\r\n tpCatalan = 0\r\n fpCatalan = 0\r\n fnCatalan = 0\r\n tpGalician = 0\r\n fpGalician = 0\r\n fnGalician = 0\r\n tpSpanish = 0\r\n fpSpanish = 0\r\n fnSpanish = 0\r\n tpEnglish = 0\r\n fpEnglish = 0\r\n fnEnglish = 0\r\n tpPortuguese = 0\r\n fpPortuguese = 0\r\n fnPortuguese = 0\r\n precisionBasque = 0.0\r\n precisionCatalan = 0.0\r\n precisionGalician = 0.0\r\n precisionSpanish = 0.0\r\n precisionEnglish = 0.0\r\n precisionPortuguese = 0.0\r\n recallBasque = 0.0\r\n recallCatalan = 0.0\r\n recallGalician = 0.0\r\n recallSpanish = 0.0\r\n recallEnglish = 0.0\r\n recallPortuguese = 0.0\r\n f1MeasureBasque = 0.0\r\n f1MeasureCatalan = 0.0\r\n f1MeasureGalician = 0.0\r\n f1MeasureSpanish = 0.0\r\n f1MeasureEnglish = 0.0\r\n f1MeasurePortuguese = 0.0\r\n for elem in arrayResult:\r\n if elem[0] == elem[1]:\r\n if elem[0] == 'eu':\r\n tpBasque += 1\r\n elif elem[0] == 'ca':\r\n tpCatalan += 1\r\n elif elem[0] == 'gl':\r\n tpGalician += 1\r\n elif elem[0] == 'es':\r\n tpSpanish += 1\r\n elif elem[0] == 'en':\r\n tpEnglish += 1\r\n elif elem[0] == 'pt':\r\n tpPortuguese += 1\r\n correct += 1\r\n traceFile.write(allTweetId[counter] + \" \" + elem[0] + \" \" + str(elem[2]) + \" \" + elem[1] + \" \" + \"correct\\n\")\r\n else:\r\n if elem[0] == 'eu':\r\n fpBasque += 1\r\n elif elem[0] == 'ca':\r\n fpCatalan += 1\r\n elif elem[0] == 'gl':\r\n fpGalician += 1\r\n elif elem[0] == 'es':\r\n fpSpanish += 1\r\n elif elem[0] == 'en':\r\n fpEnglish += 1\r\n elif elem[0] == 'pt':\r\n fpPortuguese += 1\r\n\r\n if elem[1] == 'eu':\r\n fnBasque += 1\r\n elif elem[1] == 'ca':\r\n fnCatalan += 1\r\n elif elem[1] == 'gl':\r\n fnGalician += 1\r\n elif elem[1] == 'es':\r\n fnSpanish += 1\r\n elif elem[1] == 'en':\r\n fnEnglish += 1\r\n elif elem[1] == 'pt':\r\n fnPortuguese += 1\r\n traceFile.write(allTweetId[counter] + \" \" + elem[0] + \" \" + str(elem[2]) + \" \" + elem[1] + \" \" + \"wrong\\n\")\r\n counter += 1\r\n evaluationFile = open(\"./byomOutputFiles/eval_\" + str(v) + \"_\" + str(n) + \"_\" + str(smoothingProbability) + \".txt\", \"w\")\r\n accuracy = correct/total\r\n if (tpBasque + fpBasque) != 0:\r\n precisionBasque = tpBasque / (tpBasque + fpBasque)\r\n if (tpCatalan + fpCatalan) != 0:\r\n precisionCatalan = tpCatalan / (tpCatalan + fpCatalan)\r\n if (tpGalician + fpGalician) != 0:\r\n precisionGalician = tpGalician / (tpGalician + fpGalician)\r\n if (tpSpanish + fpSpanish) != 0:\r\n precisionSpanish = tpSpanish / (tpSpanish + fpSpanish)\r\n if (tpEnglish + fpEnglish) != 0:\r\n precisionEnglish = tpEnglish/(tpEnglish + fpEnglish)\r\n if (tpPortuguese + fpPortuguese) != 0:\r\n precisionPortuguese = tpPortuguese / (tpPortuguese + fpPortuguese)\r\n if (tpBasque + fnBasque) != 0:\r\n recallBasque = tpBasque / (tpBasque + fnBasque)\r\n if (tpCatalan + fnCatalan) != 0:\r\n recallCatalan = tpCatalan / (tpCatalan + fnCatalan)\r\n if (tpGalician + fnGalician) != 0:\r\n recallGalician = tpGalician / (tpGalician + fnGalician)\r\n if (tpSpanish + fnSpanish) != 0:\r\n recallSpanish = tpSpanish / (tpSpanish + fnSpanish)\r\n if (tpEnglish + fnEnglish) != 0:\r\n recallEnglish = tpEnglish/(tpEnglish + fnEnglish)\r\n if (tpPortuguese + fnPortuguese) != 0:\r\n recallPortuguese = tpPortuguese / (tpPortuguese + fnPortuguese)\r\n if (precisionBasque + recallBasque) != 0:\r\n f1MeasureBasque = (2*precisionBasque*recallBasque) / (precisionBasque + recallBasque)\r\n if (precisionCatalan + recallCatalan) != 0:\r\n f1MeasureCatalan = (2*precisionCatalan*recallCatalan) / (precisionCatalan + recallCatalan)\r\n if (precisionGalician + recallGalician) != 0:\r\n f1MeasureGalician = (2*precisionGalician*recallGalician) / (precisionGalician + recallGalician)\r\n if (precisionSpanish + recallSpanish) != 0:\r\n f1MeasureSpanish = (2*precisionSpanish*recallSpanish) / (precisionSpanish + recallSpanish)\r\n if (precisionEnglish + recallEnglish) != 0:\r\n f1MeasureEnglish = (2*precisionEnglish*recallEnglish) / (precisionEnglish + recallEnglish)\r\n if (precisionPortuguese + recallPortuguese) != 0:\r\n f1MeasurePortuguese = (2*precisionPortuguese*recallPortuguese) / (precisionPortuguese + recallPortuguese)\r\n macroF1 = (f1MeasureBasque + f1MeasureCatalan + f1MeasureGalician + f1MeasureSpanish + f1MeasureEnglish + f1MeasurePortuguese)/6\r\n weightedBasque = f1MeasureBasque * (tpBasque + fnBasque)\r\n weightedCatalan = f1MeasureCatalan * (tpCatalan + fnCatalan)\r\n weightedGalician = f1MeasureGalician * (tpGalician + fnGalician)\r\n weightedSpanish = f1MeasureSpanish * (tpSpanish + fnSpanish)\r\n weightedEnglish = f1MeasureEnglish * (tpEnglish + fnEnglish)\r\n weightedPortuguese = f1MeasurePortuguese * (tpPortuguese + fnPortuguese)\r\n weightedAverageF1 = (weightedBasque + weightedCatalan + weightedGalician + weightedSpanish + weightedEnglish + weightedPortuguese)/total\r\n evaluationFile.write(str(accuracy)+\"\\n\")\r\n evaluationFile.write(str(precisionBasque) + \" \" + str(precisionCatalan) + \" \" + str(precisionGalician) + \" \" + str(precisionSpanish) + \" \" + str(precisionEnglish) + \" \" + str(precisionPortuguese) + \"\\n\")\r\n evaluationFile.write(str(recallBasque) + \" \" + str(recallCatalan) + \" \" + str(recallGalician) + \" \" + str(recallSpanish) + \" \" + str(recallEnglish) + \" \" + str(recallPortuguese) + \"\\n\")\r\n evaluationFile.write(str(f1MeasureBasque) + \" \" + str(f1MeasureCatalan) + \" \" + str(f1MeasureGalician) + \" \" + str(f1MeasureSpanish) + \" \" + str(f1MeasureEnglish) + \" \" + str(f1MeasurePortuguese) + \"\\n\")\r\n evaluationFile.write(str(macroF1) + \" \" + str(weightedAverageF1))\r\n traceFile.close()\r\n evaluationFile.close()\r\ndef main():\r\n\r\n V = 4\r\n n = 1\r\n smooth = 0.7\r\n vocabularyTraining, languages, trainingTweets = readDocument('data.txt', V)\r\n testTweets,tweetID,vocabularyTest = readTestingDocument('test.txt', V)\r\n vocabularyMix = [y for x in [vocabularyTraining, vocabularyTest] for y in x]\r\n vocabulary = set(vocabularyMix)\r\n countBasque = languages.count('eu')\r\n countCatalan = languages.count('ca')\r\n countGalician = languages.count('gl')\r\n countSpanish = languages.count('es')\r\n countEnglish = languages.count('en')\r\n countPortuguese = languages.count('pt')\r\n probabilityBasque = countBasque/len(languages)\r\n probabilityCatalan = countCatalan/len(languages)\r\n probabilityGalician = countGalician / len(languages)\r\n probabilitySpanish = countSpanish / len(languages)\r\n probabilityEnglish = countEnglish / len(languages)\r\n probabilityPortuguese = countPortuguese / len(languages)\r\n basque, catalan, galician, spanish, english, portuguese = chooseGram(trainingTweets,n,V)\r\n basque = (basque[0],basque[1],probabilityBasque)\r\n catalan = (catalan[0],catalan[1],probabilityCatalan)\r\n galician = (galician[0],galician[1],probabilityGalician)\r\n spanish = (spanish[0],spanish[1],probabilitySpanish)\r\n english = (english[0],english[1],probabilityEnglish)\r\n portuguese = (portuguese[0],portuguese[1],probabilityPortuguese)\r\n runClassifier(n,testTweets,smooth,vocabulary,basque,catalan,galician,spanish,english,portuguese,tweetID,V)\r\n\r\n\r\n\r\n\r\n# MAIN METHOD RUNNER:\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"byomodel.py","file_name":"byomodel.py","file_ext":"py","file_size_in_byte":20434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"622071204","text":"import socket\n\n\ndef execute():\n #1.创建套接字\n udpSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM);\n #2.绑定一个本地信息\n localaddr = (\"127.0.0.1\" , 8080);\n udpSocket.bind(localaddr);\n #3.接收数据\n recvData = udpSocket.recvfrom(1024); #1024表示单次接收的最多字节数\n\n #4.打印接收到的数据\n #print(recvData);\n #recvData这个变量中存储的是一个元组, (接收到的数据,(发送方的ip , port))\n recvMsg = recvData[0]; #存储接收到的数据\n sendAddr = recvData[1]; #存储发送方的地址信息\n\n print(\"%s:%s\" % (str(sendAddr) , recvMsg.decode(\"utf-8\")))\n\n\n #5.关闭套接字\n udpSocket.close();\n\n\nif __name__ == '__main__':\n execute();","sub_path":"demo-python/base/demo/thread/udpReceiveDemo.py","file_name":"udpReceiveDemo.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"160630175","text":"import asyncio\nimport logging\n\nfrom aiohttp import web\n\nfrom .game import Game, ForbiddenMoveError, MoveIsNotPossible\n\n\nclass Views:\n def __init__(self):\n self._game = Game()\n\n @staticmethod\n def _prepare_response(data):\n return web.json_response({\n 'status': 'success',\n 'data': data\n })\n\n @staticmethod\n async def health_check(_request):\n return web.json_response({'status': 'healthy'})\n\n async def connect(self, request):\n if self._game.is_started():\n raise web.HTTPBadRequest(text='Game has already been started')\n\n try:\n team_name = request.query['team_name']\n except KeyError:\n raise web.HTTPBadRequest(\n text='team_name query parameter is missing'\n )\n\n logging.info(f'{team_name} connected')\n response = self._game.add_player(team_name)\n\n while not self._game.is_started():\n await asyncio.sleep(0.1)\n\n return self._prepare_response(response)\n\n async def game(self, _request):\n return self._prepare_response(self._game.json)\n\n async def move(self, request):\n if not self._game.is_started() or self._game.is_finished():\n raise web.HTTPBadRequest(text='You cannot make move right now')\n\n try:\n header = request.headers[\"Authorization\"]\n\n except KeyError as exc:\n raise web.HTTPUnauthorized(\n headers={\"WWW-Authenticate\": \"Token\"}\n ) from exc\n\n token = header.split()[1]\n try:\n body = await request.json()\n move = body['move']\n\n except KeyError:\n raise web.HTTPBadRequest(\n text='move parameter is missing'\n )\n\n try:\n self._game.move(token, move)\n return self._prepare_response('successful move')\n\n except ForbiddenMoveError:\n raise web.HTTPForbidden(\n text='invalid token for current player move'\n )\n except MoveIsNotPossible as e:\n raise web.HTTPBadRequest(\n text=str(e)\n )\n\n def configure(self, app):\n app.router.add_get(\n '/health_check', self.health_check, name='health_check'\n )\n app.router.add_get(\n '/game', self.game, name='get_game'\n )\n app.router.add_post(\n '/game', self.connect, name='connect_to_the_game'\n )\n app.router.add_post(\n '/move', self.move, name='make_game_move'\n )\n","sub_path":"backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"54253020","text":"# reversed -> lost 확인하면서 체크\n# 자신이 여별을 가져왔지만 그걸 잃어버린 경우도 체크\n\nimport unittest\n\n\ndef solution(n: int, lost: list, reserve: list) -> int:\n losts = list(set(lost) - set(reserve))\n reserves = list(set(reserve) - set(lost))\n\n answer = n - len(losts)\n for re in reserves:\n if re - 1 in losts:\n del losts[losts.index(re - 1)]\n answer += 1\n elif re in losts:\n del losts[losts.index(re)]\n answer += 1\n elif re + 1 in losts:\n del losts[losts.index(re + 1)]\n answer += 1\n return answer\n\n\nclass SolutionTest(unittest.TestCase):\n def setUp(self) -> None:\n self._n = 5\n self._lost = [1, 2, 4, 5]\n self._reserve = [2, 3, 4]\n self._result = 3\n\n def test_solution(self):\n result = solution(n=self._n, lost=self._lost, reserve=self._reserve)\n self.assertEqual(self._result, result)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"programmers/level1/min/_체육복.py","file_name":"_체육복.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"568597055","text":"import numpy as np\nimport copy\nimport math\nfrom collections import Counter\n\nfrom src.utils import data\nfrom src.utils import evaluation\n\n# ===================\n# K-NEAREST NEIGHBORS \n# ===================\n\nclass KNearestNeighbors(object):\n \"\"\"\n K-Nearest Neighbors (KNN) is a non-parametric classsifier.\n Looks at the L points in the training set that are nearest to the test input x,\n count how many members of each class are in this set, and returns that empirical\n fraction as the estimate\n\n p(y = x|x, D, K) = 1/K * \\sum(1 if yi = c else 0)\n \n Args:\n k (int): hyperparameter controlling the number of closest neighbors\n \"\"\"\n def __init__(self, k=2):\n # \"closest neighbors\" when k > 1\n # \"nearest neighbors\" when k == 1\n self.k = k\n\n \"\"\"\n Predict the targets based on the training, and test sets\n Args:\n x_train (float[][]): training set\n y_train (float[]|string[]): training target labels\n x_test (float[][]): test set\n Returns:\n float[]|string[]\n \"\"\"\n def predict(self, x_train, y_train, x_test):\n # Hold the prediction\n y_pred = np.zeros(x_test.shape[0])\n K = min(self.k, x_train.shape[0])\n\n # Find the distances between the test set and train set\n distances = self._test_training_distance(x_test, x_train)\n\n # Find the closest distance for each test value\n for i in range(x_test.shape[0]):\n # Sort the distance from \"close\" to \"far\" and get the k neighbors\n sorted_distance = np.argsort(distances[i, :], axis=0)\n k_nearest_neighbors = y_train[sorted_distance[:K]]\n\n # Count the most frequent value\n labels, counts = np.unique(k_nearest_neighbors, return_counts=True)\n y_pred[i] = labels[np.argmax(counts)]\n\n return y_pred\n\n \"\"\"\n The distance between the test and training sample.\n l2 distance (euclidean distance) between the vectors.\n\n (x-y)^2 = x^2 + y^2 - 2xy\n\n Args:\n x (float[][]): x value in above equation, will be broadcasted to match y\n y (float[][]): y value in above equation\n Returns:\n float[][]\n \"\"\"\n def _test_training_distance(self, x, y):\n x_square = np.sum(np.square(x), axis=1)\n y_square = np.sum(np.square(y), axis=1)\n x_y = np.dot(x, y.T)\n\n return np.sqrt(np.reshape(x_square, (-1, 1)) + y_square - 2.*x_y)\n\n \n \n","sub_path":"src/supervised_learning/k_nearest_neighbors.py","file_name":"k_nearest_neighbors.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"8085259","text":"#########################################################################################\n#\n# Python script for building and packaging a release of the GUI software\n#\n# Created: Daniel Lasry, Feb 2019\n#\n# This is meant for members of the OpenBCI organization to quickly build new releases:\n# https://github.com/OpenBCI/OpenBCI_GUI/releases\n#\n# Usage: > python release_script/make-release.py\n# No warranty. Use at your own risk. \n#\n#########################################################################################\n\nimport sys\nimport os\nimport shutil\nimport platform\nimport subprocess\nimport argparse\nimport requests\nimport fileinput\nfrom bs4 import BeautifulSoup\n\n### Define platform-specific strings\n###########################################################\nMAC = 'Darwin'\nLINUX = 'Linux'\nWINDOWS = 'Windows'\nLOCAL_OS = platform.system()\n\nflavors = {\n WINDOWS : \"application.windows64\",\n LINUX : \"application.linux64\",\n MAC : \"application.macosx\"\n}\n\ndata_dir_names = {\n WINDOWS : \"data\",\n LINUX : \"data\",\n MAC : os.path.join(\"OpenBCI_GUI.app\", \"Contents\", \"Java\", \"data\")\n}\n\ndef get_timestamp_ci():\n repo_slug = None\n commit_id = None\n\n repo_slug = os.getenv(\"TRAVIS_REPO_SLUG\")\n if repo_slug is None:\n repo_slug = os.getenv(\"APPVEYOR_REPO_NAME\")\n\n commit_id = os.getenv(\"TRAVIS_COMMIT\")\n if commit_id is None:\n commit_id = os.getenv(\"APPVEYOR_REPO_COMMIT\")\n\n if repo_slug and commit_id:\n url = \"http://github.com/\" + repo_slug + \"/commit/\" + commit_id;\n\n page = requests.get(url)\n soup = BeautifulSoup(page.content, features=\"html.parser\")\n\n timestamp = soup.find(\"relative-time\")[\"datetime\"]\n timestamp = timestamp.replace(\":\", \"-\")\n timestamp = timestamp.replace(\"T\", \"_\")\n timestamp = timestamp.replace(\"Z\", \"\")\n\n # write timestamp to file for use in CI\n with open(\"temp/timestamp.txt\", 'w') as tempFile:\n tempFile.write(timestamp)\n\n return timestamp\n\n return \"\"\n\n### Function: Pretty format for timestamp\n###########################################################\ndef make_timestamp_pretty(timestamp):\n dateAndTime = timestamp.split(\"_\")\n\n date = dateAndTime[0]\n time = dateAndTime[1]\n\n dateString = \"/\".join(date.split(\"-\"))\n timeString = \":\".join(time.split(\"-\"))\n\n return dateString + \" \" + timeString\n\n### Function: Apply timestamp in code\n###########################################################\ndef apply_timestamp(sketch_dir, timestamp):\n main_file_dir = os.path.join(sketch_dir, \"OpenBCI_GUI.pde\")\n\n pretty_timestamp = make_timestamp_pretty(timestamp)\n\n data = []\n with open(main_file_dir, 'r') as sketch_file:\n data = sketch_file.readlines()\n\n for i in range(0, len(data)):\n if data[i].startswith(\"String localGUIVersionDate\"):\n print(data[i])\n data[i] = \"String localGUIVersionDate = \\\"\" + pretty_timestamp + \"\\\";\\n\"\n break\n\n with open(main_file_dir, 'w') as sketch_file:\n sketch_file.writelines(data)\n\n### Function: Rename flavor with GUI version\n###########################################################\ndef get_release_dir_name(sketch_dir, flavor, timestamp):\n main_file_dir = os.path.join(sketch_dir, \"OpenBCI_GUI.pde\")\n version_str = \"VERSION.NOT.FOUND\"\n with open(main_file_dir, 'r') as sketch_file:\n for line in sketch_file:\n if line.startswith(\"String localGUIVersionString\"):\n quotes_pos = [pos for pos, char in enumerate(line) if char == '\"']\n version_str = line[quotes_pos[0]+1:quotes_pos[1]]\n print(version_str)\n break\n\n # write version string to file for use in CI\n with open(\"temp/versionstring.txt\", 'w') as tempFile:\n tempFile.write(version_str)\n\n new_name = \"openbcigui_\" + version_str + \"_\"\n if timestamp:\n new_name = new_name + timestamp + \"_\"\n return flavor.replace(\"application.\", new_name)\n\n### Function: Find the sketch directory\n###########################################################\ndef find_sketch_dir():\n # processing-java requires the cwd to build a release\n cwd = os.getcwd()\n sketch_dir = os.path.join(cwd, \"OpenBCI_GUI\")\n\n # check that we are in the right directory to build\n main_file_dir = os.path.join(sketch_dir, \"OpenBCI_GUI.pde\")\n if not os.path.isfile(main_file_dir):\n sys.exit(\"ERROR: Could not find sketch file: \" + main_file_dir)\n\n return sketch_dir\n\n### Function: Clean up any old build directories or .zips\n###########################################################\ndef cleanup_build_dirs():\n print(\"Cleanup ...\")\n for file in os.listdir(os.getcwd()):\n if file.startswith(\"application.\") or file.startswith(\"openbcigui_\"):\n file_path = os.path.join(os.getcwd(), file)\n if os.path.isdir(file_path):\n shutil.rmtree(file_path)\n print (\"Successfully deleted \" + file)\n elif os.path.isfile(file_path):\n os.remove(file_path)\n print (\"Successfully deleted \" + file)\n\n### Function: Ask user for windows signing info\n###########################################################\ndef ask_windows_signing():\n windows_signing = False\n windows_pfx_path = ''\n windows_pfx_password = ''\n if LOCAL_OS == WINDOWS:\n is_signing = input(\"Will you be signing the app? (Y/n): \")\n if is_signing.lower() != 'n':\n windows_signing = True\n windows_pfx_path = input(\"Path to PFX file: \")\n while not os.path.isfile(windows_pfx_path):\n windows_pfx_path = input(\"PFX file not found. Re-enter: \")\n windows_pfx_password = input(\"Password for the PFX file: \")\n\n return windows_signing, windows_pfx_path, windows_pfx_password\n\n### Function: Run a build using processing-java\n###########################################################\ndef build_app(sketch_dir, flavor):\n # unfortunately, processing-java always returns exit code 1,\n # so we can't reliably check for success or failure\n # https://github.com/processing/processing/issues/5468\n print (\"Using sketch: \" + sketch_dir)\n subprocess.check_call([\"processing-java\", \"--sketch=\" + sketch_dir, \"--output=\" + os.path.join(os.getcwd(), flavor), \"--export\"])\n\n### Function: Package the app in the expected file structure\n###########################################################\ndef package_app(sketch_dir, flavor, timestamp, windows_signing=False, windows_pfx_path = '', windows_pfx_password = ''):\n # sanity check: is the build output there?\n build_dir = os.path.join(os.getcwd(), flavor)\n if not os.path.isdir(build_dir):\n sys.exit(\"ERROR: Could not find build ouput: \" + build_dir)\n\n # rename the build dir\n release_dir_name = get_release_dir_name(sketch_dir, flavor, timestamp)\n new_build_dir = os.path.join(os.getcwd(), release_dir_name)\n os.rename(build_dir, new_build_dir)\n build_dir = new_build_dir\n\n # Allow GUI to launch from directory with spaces #916\n if LOCAL_OS == LINUX:\n # Read in the file\n with open(build_dir + '/OpenBCI_GUI', 'r') as file :\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace('$APPDIR/java/bin/java', '\\\"$APPDIR/java/bin/java\\\"')\n\n # Write the file out again\n with open(build_dir + '/OpenBCI_GUI', 'w') as file:\n file.write(filedata)\n\n print ( \"Fixed issue on Linux when launching from directory with spaces.\")\n\n # delete source directory\n source_dir = os.path.join(build_dir, \"source\")\n try:\n shutil.rmtree(source_dir)\n except OSError as err:\n print (err)\n print (\"WARNING: Could not delete source dir: \" + source_dir)\n else:\n print (\"Successfully deleted source dir.\")\n\n ### On mac, copy the icon file and sign the app\n ###########################################################\n if LOCAL_OS == MAC:\n app_dir = os.path.join(build_dir, \"OpenBCI_GUI.app\")\n icon_dir = os.path.join(sketch_dir, \"sketch.icns\")\n icon_dest = os.path.join(app_dir, \"Contents\", \"Resources\", \"sketch.icns\")\n try:\n shutil.copy2(icon_dir, icon_dest)\n except IOError:\n print (\"WARNING: Failed to copy sketch.icns\")\n else:\n print (\"Successfully copied sketch.icns\")\n\n # sign the app\n try:\n subprocess.check_call([\"codesign\", \"-f\", \"-v\", \"-s\"\\\n \"Developer ID Application: OpenBCI, Inc. (3P82WRGLM8)\", app_dir])\n except subprocess.CalledProcessError as err:\n print (err)\n print (\"WARNING: Failed to sign app.\")\n else:\n print (\"Successfully signed app.\")\n\n if LOCAL_OS == WINDOWS:\n exe_dir = os.path.join(build_dir, \"OpenBCI_GUI.exe\")\n assert(os.path.isfile(exe_dir))\n\n # On Windows, set the application manifest\n ###########################################################\n try:\n subprocess.check_call([\"mt\", \"-manifest\", \"release_script/windows_only/gui.manifest\",\n \"-outputresource:\" + exe_dir + \";#1\"])\n except subprocess.CalledProcessError as err:\n print (err)\n print (\"WARNING: Failed to set manifest for OpenBCI_GUI.exe\")\n\n java_exe_dir = os.path.join(build_dir, \"java\", \"bin\", \"java.exe\")\n javaw_exe_dir = os.path.join(build_dir, \"java\", \"bin\", \"javaw.exe\")\n assert (os.path.isfile(java_exe_dir))\n assert (os.path.isfile(javaw_exe_dir))\n try:\n subprocess.check_call([\"mt\", \"-manifest\", \"release_script/windows_only/java.manifest\",\n \"-outputresource:\" + java_exe_dir + \";#1\"])\n subprocess.check_call([\"mt\", \"-manifest\", \"release_script/windows_only/java.manifest\",\n \"-outputresource:\" + javaw_exe_dir + \";#1\"])\n except subprocess.CalledProcessError as err:\n print (err)\n print (\"WARNING: Failed to set manifest for java.exe and javaw.exe\")\n\n ### On Windows, sign the app\n ###########################################################\n if windows_signing:\n try:\n subprocess.check_call([\"SignTool\", \"sign\", \"/f\", windows_pfx_path, \"/p\",\\\n windows_pfx_password, \"/tr\", \"http://timestamp.digicert.com\", \"/td\", \"SHA256\", exe_dir])\n except subprocess.CalledProcessError as err:\n print (err)\n print (\"WARNING: Failed to sign app.\")\n\n ### On Mac, make a .dmg and sign it\n ###########################################################\n if LOCAL_OS == MAC:\n app_dir = os.path.join(build_dir, \"OpenBCI_GUI.app\")\n dmg_dir = build_dir + \".dmg\"\n try:\n subprocess.check_call([\"dmgbuild\", \"-s\", \"release_script/mac_only/dmgbuild_settings.py\", \"-D\",\\\n \"app=\" + app_dir, \"OpenBCI_GUI\", dmg_dir])\n except subprocess.CalledProcessError as err:\n print (err)\n print (\"WARNING: Failed create the .dmg file.\")\n else:\n print (\"Successfully created \" + dmg_dir)\n\n # sign the dmg\n try:\n subprocess.check_call([\"codesign\", \"-f\", \"-v\", \"-s\"\\\n \"Developer ID Application: OpenBCI, Inc. (3P82WRGLM8)\", dmg_dir])\n except subprocess.CalledProcessError as err:\n print (err)\n print (\"WARNING: Failed to sign dmg.\")\n else:\n print (\"Successfully signed dmg.\")\n\n ### Else zip the file\n ###########################################################\n else:\n print (\"Zipping ...\")\n # fix the directory structure: application.windows64/OpenBCI_GUI/OpenBCI_GUI.exe\n temp_dir = os.path.join(sketch_dir, \"OpenBCI_GUI\")\n os.rename(build_dir, temp_dir)\n os.mkdir(build_dir)\n shutil.move(temp_dir, build_dir)\n print (\"Done: \" + shutil.make_archive(build_dir, 'zip', build_dir))\n\n\ndef main ():\n parser = argparse.ArgumentParser ()\n # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port\n parser.add_argument ('--no-prompts', action = 'store_true', help = 'whether to prompt the user for anything', required = False)\n parser.add_argument ('--pfx-path', type = str, help = 'path to the pfx file for windows signing', required = False, default = '', nargs='?')\n parser.add_argument ('--pfx-password', type = str, help = 'password for the pfx file for windows signing', required = False, default = '', nargs='?')\n args = parser.parse_args ()\n\n # grab the sketch directory\n sketch_dir = find_sketch_dir()\n\n # ask about signing\n windows_signing = False\n windows_pfx_path = args.pfx_path\n windows_pfx_password = args.pfx_password\n\n if windows_pfx_path and windows_pfx_password:\n windows_signing = True\n elif(not args.no_prompts):\n windows_signing, windows_pfx_path, windows_pfx_password = ask_windows_signing()\n\n # Cleanup to start\n cleanup_build_dirs()\n\n flavor = flavors[LOCAL_OS]\n\n timestamp = get_timestamp_ci()\n if timestamp:\n apply_timestamp(sketch_dir, timestamp)\n\n # run the build (processing-java)\n build_app(sketch_dir, flavor)\n\n #package it up\n package_app(sketch_dir, flavor, timestamp, windows_signing, windows_pfx_path, windows_pfx_password)\n\nif __name__ == \"__main__\":\n main ()","sub_path":"release_script/make-release.py","file_name":"make-release.py","file_ext":"py","file_size_in_byte":13495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"153394369","text":"#!/usr/bin/env python3\n\nimport time\nimport sys\nimport math\nimport random\nimport unittest\n\n\nclass RmqNaive:\n def __init__(self, seq):\n self.seq = seq\n\n def query(self, l, r):\n return min(self.seq[l:r], default=None)\n\n\nclass RmqSquare:\n \"\"\"RMQ using sqrt decomposition.\n\n Reading list;\n http://www.quora.com/How-does-the-technique-of-sqrt-N-decomposition-work-and-in-what-kind-of-problems-is-it-useful\n http://e-maxx.ru/algo/sqrt_decomposition\n\n \"\"\"\n\n def __init__(self, seq):\n n = len(seq)\n b = int(math.ceil(math.sqrt(n)))\n i = 0\n blocks = []\n while i < n:\n block = seq[i:i+b]\n blocks.append(min(block))\n i += b\n\n self.seq = seq\n self.b = b\n self.blocks = blocks\n\n def query(self, l, r):\n b = self.b\n if not b:\n return None\n\n bs = int(math.ceil (float(l) / b))\n be = int(math.floor(float(r) / b))\n min_blocks = min(self.blocks[bs:be], default=None)\n min_l = min(self.seq[l : min(bs*b, r)], default=None)\n min_r = min(self.seq[max(be*b,l) : r], default=None)\n min_all = min([x for x in (min_blocks, min_l, min_r) if x is not None],\n default=None)\n return min_all\n\n\nclass Test_RMQ(unittest.TestCase):\n\n def generate_list(self, min_len=0, max_len=10):\n l = []\n for _ in range(random.randint(min_len, max_len)):\n l.append(random.randint(-100, 100))\n return l\n\n def test_fixed(self):\n seq = [49, 62, -21, 98, -56, 3, 12, 98, 3, -54]\n rmq = RmqSquare(seq)\n self.assertEqual(-56, rmq.query(1, 5))\n\n def test_out_of_bounds(self):\n seq = [-99, 0, 99]\n rmq = RmqSquare(seq)\n self.assertEqual(None, rmq.query(3,3))\n\n def test_one_element(self):\n seq = [-99, 99]\n rmq = RmqSquare(seq)\n self.assertEqual(None, rmq.query(1,1))\n \n def test_find_mininum_in_random_array(self):\n for case_number in range(200):\n seq = self.generate_list()\n rmq = RmqSquare(seq)\n l = random.randint(0, len(seq))\n r = random.randint(l, len(seq))\n expected = min(seq[l:r], default=None)\n actual = rmq.query(l, r)\n if not expected == actual:\n self.fail(\"Expected {} != actual {} (seq {}[{}:{}])\"\n .format(expected, actual, seq, l, r))\n\n def test_time(self):\n seq = self.generate_list(100000, 100000)\n t0 = time.time()\n rmq = RmqSquare(seq)\n print(\"Prepare: {} ms\".format((time.time() - t0) * 1000))\n\n t0 = time.time()\n n = 50000\n for case_number in range(n):\n rmq.query(0, len(seq))\n print(\"1000 requests: {} ms\".format((time.time() - t0) * 1000 / n * 1000))\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"playground/py-rmq/rmq.py","file_name":"rmq.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"78106967","text":"# Copyright 2017 TUBITAK B3LAB\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom keystoneauth1 import exceptions as ka_exceptions\nfrom keystoneclient.v3 import client as keystone_client\nfrom neutronclient.v2_0 import client as neutron_client\nfrom neutronclient.common import exceptions as n_exceptions\nfrom openstack import connection\nfrom openstack import exceptions as o_exceptions\nfrom openstack_user_manager import log\nfrom os_client_config import config as cloud_config\n\nLOG = log.get_logger()\n\nINITIAL_CREDIT_AMOUNT=250\n\n\nclass Opts(object):\n def __init__(self, cloud_name, debug=False):\n self.cloud = cloud_name\n self.debug = debug\n self.identity_api_version = '3'\n\n\nclass OpenstackUserManager:\n def __init__(self, config_name):\n opts = Opts(cloud_name=config_name)\n\n cc = cloud_config.OpenStackConfig()\n LOG.debug(\"defaults: %s\", cc.defaults)\n\n # clouds.yaml file should either be in the\n # current directory or\n # ~/.config/openstack directory or\n # /etc/openstack directory.\n cloud = cc.get_one_cloud(opts.cloud)\n LOG.debug(\"cloud cfg: %s\", cloud.config)\n\n # Create a context for a connection to the cloud provider\n self.conn = connection.from_config(cloud_config=cloud,\n options=opts)\n\n identity_api_version = cloud.config['identity_api_version']\n if identity_api_version != '3':\n LOG.error('This version of OpenStack User Management Library '\n 'only supports Identity version 3.')\n\n # We still need to use neutronclient until openstackclient\n # is able to add interface router, and keystoneclient\n # until openstackclient is able to grant roles to users\n self.neutron_conn = neutron_client.Client(\n session=cloud.get_session_client('network'))\n self.keystone_conn = keystone_client.Client(\n session=cloud.get_session_client('identity'))\n\n def check_username_availability(self,\n user_name):\n try:\n user = self.conn.identity.find_user(user_name)\n if user is not None:\n return False\n except ka_exceptions.NotFound:\n return True\n return True\n\n def check_projectname_availability(self,\n project_name):\n try:\n project = self.conn.identity.find_project(project_name)\n if project is not None:\n return False\n except ka_exceptions.NotFound:\n return True\n return True\n\n def create_project(self, description, project_name,\n properties, enabled=False):\n try:\n self.conn.identity.create_project(name=project_name,\n description=description,\n enabled=enabled)\n project = self.conn.identity.find_project(project_name)\n for key, value in properties.items():\n self.conn.identity.update_project(project,\n **{key: value})\n except ka_exceptions.ClientException as ex:\n LOG.error(\"Project not created. Error: \" + ex.message)\n return False\n\n return True\n\n def create_user(self, email, user_name, password, enabled=False):\n try:\n self.conn.identity.create_user(name=user_name,\n email=email,\n password=password,\n enabled=enabled)\n except ka_exceptions.ClientException as ex:\n LOG.error(\"User not created. Error: \" + ex.message)\n return False\n return True\n\n def init_billing_customer(self,\n project_name,\n customer_name,\n company_name,\n email):\n # NOTE: safirbilling service is not published on github yet\n return False\n\n def pair_user_with_project(self, user_name, project_name, role_name):\n try:\n user = self.conn.identity.find_user(user_name)\n project = self.conn.identity.find_project(project_name)\n role = self.conn.identity.find_role(role_name)\n self.keystone_conn.roles.grant(role,\n user=user,\n project=project)\n except ka_exceptions.ClientException as ex:\n LOG.error(\"User not paired with project. Error: \" +\n str(ex.message))\n return False\n except Exception as ex:\n LOG.error(\"User not paired with project. Error: \" +\n str(ex.message))\n return False\n return True\n\n def update_project_status(self, project_name, enabled):\n try:\n project = self.conn.identity.find_project(project_name)\n self.conn.identity.update_project(project=project,\n enabled=enabled)\n except ka_exceptions.ClientException as ex:\n LOG.error(\"Project status not updated. Error: \" + ex.message)\n return False\n return True\n\n def update_user_status(self, user_name, enabled):\n try:\n user = self.conn.identity.find_user(user_name)\n self.conn.identity.update_user(user=user,\n enabled=enabled)\n except ka_exceptions.ClientException as ex:\n LOG.error(\"User status not updated. Error: \" + ex.message)\n return False\n return True\n\n def update_user_password(self, user_name, password):\n try:\n user = self.conn.identity.find_user(user_name)\n self.conn.identity.update_user(user=user,\n password=password)\n except ka_exceptions.ClientException as ex:\n LOG.error(\"User password not updated. Error: \" + ex.message)\n return False\n return True\n\n def init_network(self, project_name, external_network_name,\n dns_nameservers, subnet_cidr, subnet_gateway_ip):\n net_name = \"private\"\n subnet_name = \"private\"\n router_name = \"router\"\n try:\n project = self.conn.identity.find_project(project_name)\n\n # CREATE NETWORK\n net = self.conn.network.create_network(name=net_name,\n project_id=project.id,\n admin_state_up=True)\n\n # CREATE SUBNET\n subnet = self.conn.network.create_subnet(\n name=subnet_name,\n project_id=project.id,\n network_id=net.id,\n gateway_ip=subnet_gateway_ip,\n enable_dhcp=True,\n ip_version=4,\n cidr=subnet_cidr,\n dns_nameservers=dns_nameservers)\n\n # CREATE ROUTER\n # router = self.conn.network.create_router(\n # name=router_name,\n # tenant_id=project.id,\n # admin_state_up=True)\n ext_net_id = [e for e in self.neutron_conn.list_networks(\n )['networks'] if\n e['name'] == external_network_name][0]['id']\n router_param = {\n 'name': router_name,\n 'admin_state_up': True,\n 'external_gateway_info': {\"network_id\": ext_net_id},\n 'tenant_id': project.id}\n router = self.neutron_conn.create_router(\n {'router': router_param})\n\n self.neutron_conn.add_interface_router(\n router['router']['id'],\n {'subnet_id': subnet.id,\n 'tenant_id': project.id})\n\n except n_exceptions.NeutronException as ex:\n LOG.error(\"Project's initial network could not be defined. \"\n \"Error: \" + str(ex.message))\n return False\n except ka_exceptions.ClientException as ex:\n LOG.error(\"Project's initial network could not be defined. \"\n \"Error: \" + str(ex.message))\n return False\n return True\n\n def add_ssh_rule(self, project_name):\n try:\n project = self.conn.identity.find_project(project_name)\n default_sec_groups = self.conn.network.security_groups()\n\n print(default_sec_groups)\n sec_group_id = None\n for sec_group in default_sec_groups:\n if sec_group.project_id == project.id:\n sec_group_id = sec_group.id\n\n if sec_group_id is not None:\n self.conn.network.create_security_group_rule(\n security_group_id=sec_group_id,\n project_id=project.id,\n direction='ingress',\n remote_ip_prefix='0.0.0.0/0',\n protocol='TCP',\n port_range_max='22',\n port_range_min='22',\n ethertype='IPv4')\n except ka_exceptions.ClientException as ex:\n LOG.error(\"SSH rule not added. Error: \" + ex.message)\n return False\n return True\n","sub_path":"openstack_user_manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"419241226","text":"import sys\n\n\ndef main():\n hindu_line = \"पायथन एक अद्भुत प्रोग्रामिंग भाषा है जिसके माध्यम से मैं बहुत कुछ हासिल कर सकता हूं।\"\n\n print(type(hindu_line))\n # hindu_line is a string variable\n\n encoded_line = hindu_line.encode()\n print(\"The encoded version version will be\", encoded_line)\n print(type(encoded_line))\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"week2/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"298028780","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module contains the framework for transforming points from\none coordinate system to another (e.g. equatorial to galactic). The\nimplementation is actually in individual coordinates in the\n`builtin_systems` module, while this module provides the framework and\nrelated utilities.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom ..extern import six\n\nimport heapq\nimport inspect\nimport subprocess\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import defaultdict\n\nimport numpy as np\n\n\n__all__ = ['StaticMatrixTransform', 'FunctionTransform',\n 'DynamicMatrixTransform', 'CompositeStaticMatrixTransform',\n 'static_transform_matrix', 'transform_function',\n 'dynamic_transform_matrix', 'coordinate_alias'\n ]\n\n\nclass TransformGraph(object):\n \"\"\"\n A graph representing the paths between coordinate systems.\n \"\"\"\n\n def __init__(self):\n self._graph = defaultdict(dict)\n self._clsaliases = {}\n\n self.invalidate_cache() # generates cache entries\n\n def add_transform(self, fromsys, tosys, transform):\n \"\"\"\n Add a new coordinate transformation to the graph.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from\n tosys : class\n The coordinate system *class* to transform to\n transform : callable\n The transformation object. Should have call parameters compatible\n with `CoordinateTransform`.\n\n Raises\n ------\n TypeError\n If `fromsys` or `tosys` are not classes or `transform` is\n not callable.\n \"\"\"\n\n if not inspect.isclass(fromsys):\n raise TypeError('fromsys must be a class')\n if not inspect.isclass(tosys):\n raise TypeError('tosys must be a class')\n if not six.callable(transform):\n raise TypeError('transform must be callable')\n\n self._graph[fromsys][tosys] = transform\n self.invalidate_cache()\n\n def remove_transform(self, fromsys, tosys, transform):\n \"\"\"\n Removes a coordinate transform from the graph.\n\n Parameters\n ----------\n fromsys : class or None\n The coordinate system *class* to start from. If None,\n `transform` will be searched for and removed (`tosys` must\n also be None).\n tosys : class or None\n The coordinate system *class* to transform into. If None,\n `transform` will be searched for and removed (`fromsys` must\n also be None).\n transform : callable or None\n The transformation object to be removed or None. If None\n and `tosys` and `fromsys` are supplied, there will be no\n check to ensure the correct object is removed.\n \"\"\"\n if fromsys is None or tosys is None:\n if not (tosys is None and fromsys is None):\n raise ValueError('fromsys and tosys must both be None if either are')\n if transform is None:\n raise ValueError('cannot give all Nones to remove_transform')\n\n # search for the requested transform by brute force and remove it\n for a in self._graph:\n agraph = self._graph[a]\n for b in agraph:\n if b is transform:\n del agraph[b]\n break\n else:\n raise ValueError('Could not find transform {0} in the '\n 'graph'.format(transform))\n\n else:\n if transform is None:\n self._graph[fromsys].pop(tosys, None)\n else:\n curr = self._graph[fromsys].get(tosys, None)\n if curr is transform:\n self._graph[fromsys].pop(tosys)\n else:\n raise ValueError('Current transform from {0} to {1} is not '\n '{2}'.format(fromsys, tosys, transform))\n self.invalidate_cache()\n\n def find_shortest_path(self, fromsys, tosys):\n \"\"\"\n Computes the shortest distance along the transform graph from\n one system to another.\n\n Parameters\n ----------\n fromsys : class\n The starting coordinate system.\n tosys : class\n The starting coordinate system.\n\n Returns\n -------\n path : list of classes or None\n The path from `fromsys` to `tosys` as an in-order sequence\n of classes. This list includes *both* `fromsys` and\n `tosys`. Is None if there is no possible path.\n distance : number\n The total distance/priority from `fromsys` to `tosys`. If\n priorities are not set this is the number of trasnforms\n needed. Is `inf` if there is no possible path.\n \"\"\"\n\n inf = float('inf')\n\n # special-case the 0-path and 1-path\n if tosys is fromsys:\n return [tosys], 0\n elif tosys in self._graph[fromsys]:\n t = self._graph[fromsys][tosys]\n return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)\n\n if fromsys in self._shortestpaths:\n # already have a cached result\n fpaths = self._shortestpaths[fromsys]\n if tosys in fpaths:\n return fpaths[tosys]\n else:\n return None, inf\n\n # use Dijkstra's algorithm to find shortest path in all other cases\n\n nodes = []\n # first make the list of nodes\n for a in self._graph:\n if a not in nodes:\n nodes.append(a)\n for b in self._graph[a]:\n if b not in nodes:\n nodes.append(b)\n\n if fromsys not in nodes or tosys not in nodes:\n # fromsys or tosys are isolated or not registered, so there's\n # certainly no way to get from one to the other\n return None, inf\n\n edgeweights = {}\n # construct another graph that is a dict of dicts of priorities\n # (used as edge weights in Dijkstra's algorithm)\n for a in self._graph:\n edgeweights[a] = aew = {}\n agraph = self._graph[a]\n for b in agraph:\n aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)\n\n # entries in q are [distance, count, nodeobj, pathlist]\n # count is needed because in py 3.x, tie-breaking fails on the nodes.\n # this way, insertion order is preserved if the weights are the same\n q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]\n q.insert(0, [0, -1, fromsys, []])\n\n # this dict will store the distance to node from `fromsys` and the path\n result = {}\n\n # definitely starts as a valid heap because of the insert line; from the\n # node to itself is always the shortest distance\n while len(q) > 0:\n d, orderi, n, path = heapq.heappop(q)\n\n if d == inf:\n # everything left is unreachable from fromsys, just copy them to\n # the results and jump out of the loop\n result[n] = (None, d)\n for d, orderi, n, path in q:\n result[n] = (None, d)\n break\n else:\n result[n] = (path, d)\n path.append(n)\n if n not in edgeweights:\n # this is a system that can be transformed to, but not from.\n continue\n for n2 in edgeweights[n]:\n if n2 not in result: # already visited\n # find where n2 is in the heap\n for i in range(len(q)):\n if q[i][2] == n2:\n break\n else:\n raise ValueError('n2 not in heap - this should be impossible!')\n\n newd = d + edgeweights[n][n2]\n if newd < q[i][0]:\n q[i][0] = newd\n q[i][3] = list(path)\n heapq.heapify(q)\n\n # cache for later use\n self._shortestpaths[fromsys] = result\n return result[tosys]\n\n def invalidate_cache(self):\n \"\"\"\n Invalidates the cache that stores optimizations for traversing the\n transform cache. This is called automatically when transforms\n are added or removed, but will need to be called manually if\n weights on transforms are modified inplace.\n \"\"\"\n self._shortestpaths = {}\n\n # TODO: cache composites so they don't need to be generated every time?\n def get_transform(self, fromsys, tosys):\n \"\"\"\n Determines or generates a transformation between two coordinate\n systems.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from\n tosys : class\n The coordinate system *class* to transform into.\n\n Returns\n -------\n trans : `CoordinateTransform` or None\n If there is a path from `fromsys` to `tosys`, this is a transform\n object for that path. If None, no path could be found.\n \"\"\"\n if tosys in self._graph[fromsys]:\n return self._graph[fromsys][tosys]\n else:\n path, distance = self.find_shortest_path(fromsys, tosys)\n\n if path is None:\n return None\n\n transforms = []\n currsys = fromsys\n for p in path[1:]: # first element is fromsys so we skip it\n transforms.append(self._graph[currsys][p])\n currsys = p\n\n # TODO: collapse \"runs\" of statics?\n if all([isinstance(p, StaticMatrixTransform) for p in path]):\n return CompositeStaticMatrixTransform(fromsys, tosys, transforms, register=False)\n else:\n return CompositeTransform(fromsys, tosys, transforms, register=False)\n\n def add_coord_name(self, name, coordcls):\n \"\"\"\n Adds an alias for a coordinate, primarily for allowing\n attribute-style access of coordinate transformations (e.g.,\n ``coordasgal = coord.galactic``).\n\n Parameters\n ----------\n name : str\n The alias for the coordinate class. Should be a valid\n python identifier.\n coordcls : class\n The class object to be referenced by this name.\n\n Raises\n ------\n ValueError\n If `coordcls` already has a name assigned.\n \"\"\"\n for key, val in six.iteritems(self._clsaliases):\n if val == coordcls:\n msg = 'Coordinate class {0} already has a name: {1}'\n raise ValueError(msg.format(coordcls, key))\n self._clsaliases[name] = coordcls\n\n def lookup_name(self, name):\n \"\"\"\n Tries to locate the coordinate class with the provided alias.\n\n Parameters\n ----------\n name : str\n The alias to look up.\n\n Returns\n -------\n coordcls\n The coordinate class corresponding to the `name` or None if\n no such class exists.\n \"\"\"\n return self._clsaliases.get(name, None)\n\n def get_aliases(self):\n \"\"\"\n Returns all available transform aliases. They will all be\n valid arguments to `lookup_name`.\n\n Returns\n -------\n nms : list\n The aliases for coordinate systems.\n \"\"\"\n return list(six.iterkeys(self._clsaliases))\n\n def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,\n savelayout='plain', saveformat=None):\n \"\"\"\n Converts this transform graph to the graphviz_ DOT format, and\n optionally saves it (requires graphviz_ be installed and on your\n path).\n\n Parameters\n ----------\n priorities : bool\n If True, show the priority values for each transform. Otherwise,\n the will not be included in the graph.\n addnodes : sequence of str\n Additional coordinate systems to add (this can include systems\n already in the transform graph, but they will only appear once).\n savefn : None or str\n The file name to save this graph to or None to not save\n to a file.\n savelayout : str\n The graphviz program to use to layout the graph (see\n graphviz_ for details) or 'plain' to just save the DOT graph\n content. Ignored if `savefn` is None.\n saveformat : str\n The graphviz output format. (e.g. the ``-Txxx`` option for\n the command line program - see graphviz docs for details).\n Ignored if `savefn` is None.\n\n Returns\n -------\n dotgraph : str\n A string with the DOT format graph.\n\n .. _graphviz: http://www.graphviz.org/\n \"\"\"\n\n nodes = []\n # find the node names\n for a in self._graph:\n if a not in nodes:\n nodes.append(a)\n for b in self._graph[a]:\n if b not in nodes:\n nodes.append(b)\n for node in addnodes:\n if node not in nodes:\n nodes.append(node)\n nodenames = []\n invclsaliases = dict([(v, k) for k, v in six.iteritems(self._clsaliases)])\n for n in nodes:\n if n in invclsaliases:\n nodenames.append('{0} [shape=oval label=\"{0}\\\\n`{1}`\"]'.format(n.__name__, invclsaliases[n]))\n else:\n nodenames.append(n.__name__ + '[ shape=oval ]')\n\n edgenames = []\n # Now the edges\n for a in self._graph:\n agraph = self._graph[a]\n for b in agraph:\n pri = agraph[b].priority if hasattr(agraph[b], 'priority') else 1\n edgenames.append((a.__name__, b.__name__, pri))\n\n # generate simple dot format graph\n lines = ['digraph AstropyCoordinateTransformGraph {']\n lines.append('; '.join(nodenames) + ';')\n for enm1, enm2, weights in edgenames:\n labelstr = '[ label = \"{0}\" ]'.format(weights) if priorities else ''\n lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr))\n lines.append('')\n lines.append('overlap=false')\n lines.append('}')\n dotgraph = '\\n'.join(lines)\n\n if savefn is not None:\n if savelayout == 'plain':\n with open(savefn, 'w') as f:\n f.write(dotgraph)\n else:\n args = [savelayout]\n if saveformat is not None:\n args.append('-T' + saveformat)\n proc = subprocess.Popen(args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate(dotgraph)\n if proc.returncode != 0:\n raise IOError('problem running graphviz: \\n' + stderr)\n\n with open(savefn, 'w') as f:\n f.write(stdout)\n\n return dotgraph\n\n def to_networkx_graph(self):\n \"\"\"\n Converts this transform graph into a networkx graph.\n\n .. note::\n You must have the `networkx `_\n package installed for this to work.\n\n Returns\n -------\n nxgraph : `networkx.Graph`\n This `TransformGraph` as a `networkx.Graph`.\n \"\"\"\n import networkx as nx\n\n nxgraph = nx.Graph()\n\n # first make the nodes\n for a in self._graph:\n if a not in nxgraph:\n nxgraph.add_node(a)\n for b in self._graph[a]:\n if b not in nxgraph:\n nxgraph.add_node(b)\n\n # Now the edges\n for a in self._graph:\n agraph = self._graph[a]\n for b in agraph:\n pri = agraph[b].priority if hasattr(agraph[b], 'priority') else 1\n nxgraph.add_edge(a, b, weight=pri)\n\n return nxgraph\n\n\n# The primary transform graph for astropy coordinates\nmaster_transform_graph = TransformGraph()\n\n\n@six.add_metaclass(ABCMeta)\nclass CoordinateTransform(object):\n \"\"\"\n An object that transforms a coordinate from one system to another.\n Subclasses must implement `__call__` with the provided signature.\n They should also call this superclass's `__init__` in their\n `__init__`.\n \"\"\"\n\n def __init__(self, fromsys, tosys, register=True):\n self.fromsys = fromsys\n self.tosys = tosys\n\n if register:\n # this will do the type-checking\n self.register()\n else:\n if not inspect.isclass(fromsys) or not inspect.isclass(tosys):\n raise TypeError('fromsys and tosys must be classes')\n\n def register(self):\n \"\"\"\n Add this transformation to the master transformation graph, replacing\n anything already connecting these two coordinates.\n \"\"\"\n master_transform_graph.add_transform(self.fromsys, self.tosys, self)\n\n def unregister(self):\n \"\"\"\n Remove this transformation to the master transformation graph.\n\n Raises\n ------\n ValueError\n If this is not currently in the transform graph.\n \"\"\"\n master_transform_graph.remove_transform(self.fromsys, self.tosys, self)\n\n @abstractmethod\n def __call__(self, fromcoord):\n \"\"\"\n Accepts the provided coordinate object and yields a new coordinate\n object with the transform applied.\n \"\"\"\n\n\n# TODO: array: specify in the docs how arrays should be dealt with\nclass FunctionTransform(CoordinateTransform):\n \"\"\"\n A coordinate transformation defined by a function that simply\n accepts a coordinate object and returns the transformed coordinate\n object.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from.\n tosys : class\n The coordinate system *class* to transform into.\n func : callable\n The transformation function.\n copyobstime : bool\n If True (default) the value of the `_obstime` attribute will be copied\n to the newly-produced coordinate.\n\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n register : bool\n Determines if this transformation will be registered in the\n astropy master transform graph.\n\n Raises\n ------\n TypeError\n If `func` is not callable.\n ValueError\n If `func` cannot accept one argument.\n\n\n \"\"\"\n def __init__(self, fromsys, tosys, func, copyobstime=True, priority=1,\n register=True):\n from inspect import getargspec\n\n if not six.callable(func):\n raise TypeError('func must be callable')\n\n try:\n argspec = getargspec(func)\n if (len(argspec[0]) - len(argspec[3]) != 1) and not argspec[1]:\n raise ValueError('provided function does not accept a single argument')\n except TypeError:\n pass # hopefully this person knows what they're doing...\n\n self.func = func\n self.priority = priority\n self.copyobstime = copyobstime\n\n super(FunctionTransform, self).__init__(fromsys, tosys)\n\n def __call__(self, fromcoord):\n res = self.func(fromcoord)\n if not isinstance(res, self.tosys):\n raise TypeError('the transformation function yielded {0} but '\n 'should have been of type {1}'.format(res, self.tosys))\n\n if self.copyobstime:\n # copy over the obstime\n if hasattr(fromcoord, '_obstime') and hasattr(res, '_obstime'):\n res._obstime = fromcoord._obstime\n\n return res\n\n\nclass StaticMatrixTransform(CoordinateTransform):\n \"\"\"\n A coordinate transformation defined as a 3 x 3 cartesian\n transformation matrix.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from.\n tosys : class\n The coordinate system *class* to transform into.\n matrix : array-like\n A 3 x 3 matrix for transforming 3-vectors. In most cases will\n be unitary (although this is not strictly required).\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n\n Raises\n ------\n ValueError\n If the matrix is not 3 x 3\n\n \"\"\"\n def __init__(self, fromsys, tosys, matrix, priority=1, register=True):\n self.matrix = np.array(matrix)\n if self.matrix.shape != (3, 3):\n raise ValueError('Provided matrix is not 3 x 3')\n self.priority = priority\n super(StaticMatrixTransform, self).__init__(fromsys, tosys)\n\n def __call__(self, fromcoord):\n c = fromcoord.cartesian\n v = c.reshape((3, c.size // 3))\n v2 = np.dot(np.asarray(self.matrix), v)\n subshape = c.shape[1:]\n x = v2[0].reshape(subshape)\n y = v2[1].reshape(subshape)\n z = v2[2].reshape(subshape)\n\n newunit = None if fromcoord.distance is None else fromcoord.distance.unit\n result = self.tosys(x=x, y=y, z=z, unit=newunit)\n\n # copy over the observation time\n if hasattr(fromcoord, '_obstime') and hasattr(result, '_obstime'):\n result._obstime = fromcoord._obstime\n\n return result\n\n\nclass CompositeStaticMatrixTransform(StaticMatrixTransform):\n \"\"\"\n A `MatrixTransform` constructed by combining a sequence of matricies\n together. See `MatrixTransform` for syntax details.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from.\n tosys : class\n The coordinate system *class* to transform into.\n matrices : sequence of array-like\n A sequence of 3 x 3 cartesian transformation matricies.\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n\n \"\"\"\n def __init__(self, fromsys, tosys, matricies, priority=1, register=True):\n self.matricies = [np.array(m) for m in matricies]\n for m in matricies:\n if m.shape != (3, 3):\n raise ValueError('One of the provided matrices is not 3 x 3')\n\n matrix = np.array(self.matricies[0])\n if len(self.matricies) > 1:\n for m in self.matricies[1:]:\n matrix = np.dot(np.asarray(matrix), m)\n\n super(CompositeStaticMatrixTransform, self).__init__(fromsys,\n tosys, matrix, priority)\n\n\nclass DynamicMatrixTransform(CoordinateTransform):\n \"\"\"\n A coordinate transformation specified as a function that yields a\n 3 x 3 cartesian transformation matrix.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from.\n tosys : class\n The coordinate system *class* to transform into.\n matrix_func : callable\n A callable that accepts a coordinate object and yields the 3 x 3\n matrix that converts it to the new coordinate system.\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n\n Raises\n ------\n TypeError\n If `matrix_func` is not callable\n\n \"\"\"\n def __init__(self, fromsys, tosys, matrix_func, priority=1, register=True):\n if not six.callable(matrix_func):\n raise TypeError('matrix_func is not callable')\n self.matrix_func = matrix_func\n self.priority = priority\n super(DynamicMatrixTransform, self).__init__(fromsys, tosys, register)\n\n def __call__(self, fromcoord):\n c = fromcoord.cartesian\n v = c.reshape((3, c.size // 3))\n v2 = np.dot(np.asarray(self.matrix_func(fromcoord)), v)\n subshape = c.shape[1:]\n x = v2[0].reshape(subshape)\n y = v2[1].reshape(subshape)\n z = v2[2].reshape(subshape)\n\n newunit = None if fromcoord.distance is None else fromcoord.distance.unit\n result = self.tosys(x=x, y=y, z=z, unit=newunit)\n\n # copy over the observation time\n if hasattr(fromcoord, '_obstime') and hasattr(result, '_obstime'):\n result._obstime = fromcoord._obstime\n\n return result\n\n\nclass CompositeTransform(CoordinateTransform):\n \"\"\"\n A `MatrixTransform` constructed by combining a sequence of matricies\n together. See `MatrixTransform` for syntax details.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system *class* to start from.\n tosys : class\n The coordinate system *class* to transform into.\n transforms : sequence of `CoordinateTransform`s\n A sequence of transformations to apply in sequence.\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n\n \"\"\"\n def __init__(self, fromsys, tosys, transforms, priority=1, register=True):\n self.transforms = transforms\n super(CompositeTransform, self).__init__(fromsys, tosys, register)\n\n def __call__(self, fromcoord):\n coord = fromcoord\n for t in self.transforms:\n coord = t(coord)\n return coord\n\n\n#<------------function decorators for actual practical use--------------------->\ndef transform_function(fromsys, tosys, copyobstime=True, priority=1):\n \"\"\"\n A function decorator for defining transformations between coordinate\n systems.\n\n .. note::\n If decorating a static method of a class, ``@staticmethod``\n should be added *above* this decorator.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system this function starts from.\n tosys : class\n The coordinate system this function results in.\n copyobstime : bool\n If True (default) the value of the `_obstime` attribute will be\n copied to the newly-produced coordinate.\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n\n \"\"\"\n def deco(func):\n # this doesn't do anything directly with the trasnform because\n #``register=True`` stores it in the transform graph automatically\n FunctionTransform(fromsys, tosys, func, copyobstime=copyobstime,\n priority=priority, register=True)\n return func\n return deco\n\n\ndef static_transform_matrix(fromsys, tosys, priority=1):\n \"\"\"\n A function decorator for defining transformations between coordinate\n systems using a matrix.\n\n The decorated function should accept *no* arguments and yield a\n 3 x 3 matrix.\n\n .. note::\n If decorating a static method of a class, ``@staticmethod``\n should be added *above* this decorator.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system this function starts from.\n tosys : class\n The coordinate system this function results in.\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n \"\"\"\n def deco(matfunc):\n StaticMatrixTransform(fromsys, tosys, matfunc(), priority, register=True)\n return matfunc\n return deco\n\n\ndef dynamic_transform_matrix(fromsys, tosys, priority=1):\n \"\"\"\n A function decorator for defining transformations between coordinate\n systems using a function that yields a matrix.\n\n The decorated function should accept a single argument, the\n coordinate object to be transformed, and should return a 3 x 3\n matrix.\n\n .. note::\n If decorating a static method of a class, ``@staticmethod``\n should be added *above* this decorator.\n\n Parameters\n ----------\n fromsys : class\n The coordinate system this function starts from.\n tosys : class\n The coordinate system this function results in.\n priority : number\n The priority if this transform when finding the shortest\n coordinate tranform path - large numbers are lower priorities.\n \"\"\"\n def deco(matfunc):\n DynamicMatrixTransform(fromsys, tosys, matfunc, priority, register=True)\n return matfunc\n return deco\n\n\ndef coordinate_alias(name, coordcls=None):\n \"\"\"\n Gives a short name to this coordinate system, allowing other coordinate\n objects to convert to this one using attribute-style access.\n\n Parameters\n ----------\n name : str\n The short alias to use for this coordinate class. Should be a\n valid python identifier.\n coordcls : class or None\n Either the coordinate class to register or None to use this as a\n decorator.\n\n Examples\n --------\n For use with a class already defined, do::\n\n coordinate_alias('fancycoords', MyFancyCoordinateClass)\n\n To use as a decorator, do::\n\n @coordiante_alias('fancycoords')\n class MyFancyCoordinateClass(SphericalCoordinatesBase):\n ...\n\n \"\"\"\n if coordcls is None:\n def deco(cls):\n master_transform_graph.add_coord_name(name, cls)\n return cls\n return deco\n else:\n master_transform_graph.add_coord_name(name, coordcls)\n","sub_path":"astropy/coordinates/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":29910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"35609401","text":"\nclass Argument:\n def __init__(self, name, **kw):\n self.name = name\n self.value = kw.get(\"value\", '')\n self.type = kw.get('type', 'string')\n self.pos = kw.get('pos', -1)\n self.descr = kw.get('descr', '')\n self.encode = kw.get('encode', False)\n self.optional = kw.get('optional', False)\n\nclass Request:\n def __init__(self, name, request_name, display_name, type, *args):\n self.name = name\n self.request_name = request_name\n self.display_name = display_name\n assert type in ['ocip', 'xsi']\n self.type = type\n self.arguments = []\n self.arguments.extend(args)\n\nclass OCIPRequest(Request):\n\n def __init__(self, name, request_name, display_name, *args):\n Request.__init__(self, name, request_name, display_name, 'ocip', *args)\n\nclass XSIRequest(Request):\n\n def __init__(self, name, request_name, display_name, *args):\n Request.__init__(self, name, request_name, display_name, 'xsi', *args)\n\ndef add_ocip_requests(requests, request_names):\n\n # used to specify the requests order\n request_names.extend([\"group_device_modify_config_file\",\n \"group_device_rebuild_config_file\",\n \"group_device_get_custom_tags\",\n \"group_device_add_custom_tag\",\n \"group_device_delete_custom_tag\",\n \"group_access_device_get\",\n \"group_access_device_add\",\n \"group_access_device_delete\",\n \"group_get_assigned_domains\",\n \"user_add\",\n \"user_get\",\n \"user_delete\",\n \"user_get_data\",\n \"user_get_primary_device\",\n \"user_get_network_conferencing_request\",\n \"user_get_assigned_services\",\n\n #\"user_assign_service\",\n #\"user_unassign_service\",\n \"user_get_security_classification\",\n #\"user_set_security_classification\",\n ])\n\n # group_device_get_custom_tags\n requests['group_device_get_custom_tags'] = OCIPRequest(\n 'group_device_get_custom_tags',\n 'GroupAccessDeviceCustomTagGetListRequest',\n 'Group / Get custom tags',\n Argument(\"deviceName\", pos=1),\n )\n # group_device_add_custom_tag\n requests['group_device_add_custom_tag'] = OCIPRequest(\n 'group_device_add_custom_tag',\n 'GroupAccessDeviceCustomTagAddRequest',\n 'Group / Add custom tag',\n Argument(\"deviceName\", pos=0),\n Argument(\"tagName\", pos=1),\n Argument(\"tagValue\", pos=2)\n )\n # group_device_delete_custom_tag\n requests['group_device_delete_custom_tag'] = OCIPRequest(\n 'group_device_delete_custom_tag',\n 'GroupAccessDeviceCustomTagDeleteListRequest',\n 'Group / Delete custom tag',\n Argument(\"deviceName\", pos=0),\n Argument(\"tagName\", pos=1)\n )\n # group_device_modify_config_file\n requests['group_device_modify_config_file'] = OCIPRequest(\n 'group_device_modify_config_file',\n 'GroupAccessDeviceFileModifyRequest14sp8',\n 'Config / Modify config file',\n Argument(\"deviceName\", pos=0),\n Argument(\"fileSource\", value='Custom', pos=1, descr=\"'Default' | 'Manual' | 'Custom'\"),\n Argument(\"fileContent\", type=\"text\", pos=2, encode=True),\n Argument(\"fileFormat\", value='config.xml', pos=3),\n Argument(\"extendedCaptureEnabled\", pos=4, value=\"false\", descr=\"true | false\")\n )\n # group_device_rebuild_config_file\n requests['group_device_rebuild_config_file'] = OCIPRequest(\n 'group_device_rebuild_config_file',\n 'GroupCPEConfigRebuildDeviceConfigFileRequest',\n 'Config / Rebuild config file',\n Argument(\"deviceName\", pos=0),\n )\n # access device\n requests['group_access_device_add'] = OCIPRequest(\n 'group_access_device_add',\n 'GroupAccessDeviceAddRequest14',\n 'Group / Add access device',\n Argument(\"deviceName\", pos=0),\n Argument(\"deviceType\", pos=1),\n Argument(\"userName\", pos=2),\n Argument(\"password\", pos=3),\n )\n requests['group_access_device_get'] = OCIPRequest(\n 'group_access_device_get',\n 'GroupAccessDeviceGetRequest18sp1',\n 'Group / Get access device',\n Argument(\"deviceName\", pos=0),\n )\n requests['group_access_device_delete'] = OCIPRequest(\n 'group_access_device_delete',\n 'GroupAccessDeviceDeleteRequest',\n 'Group / Delete access device',\n Argument(\"deviceName\", pos=0),\n )\n requests['group_get_assigned_domains'] = OCIPRequest(\n 'group_get_assigned_domains',\n 'GroupDomainGetAssignedListRequest',\n 'Group / Get assigned domains'\n )\n requests['group_get_available_numbers'] = OCIPRequest(\n 'group_get_available_numbers',\n 'GroupDnGetAvailableListRequest',\n 'Group / Get available numbers'\n )\n requests['user_add'] = OCIPRequest(\n 'user_add',\n 'UserAddRequest17sp4',\n 'User / Add',\n Argument(\"userId\", pos=0),\n Argument(\"lastName\", pos=1),\n Argument(\"firstName\", pos=2),\n Argument(\"password\", pos=3),\n Argument(\"callingLineIdLastName\", pos=4, optional=True, descr=\"optional\", value=None),\n Argument(\"callingLineIdFirstName\", pos=5, optional=True, descr=\"optional\", value=None),\n Argument(\"phoneNumber\", pos=6, optional=True, descr=\"optional\", value=None)\n )\n\n # functions with single user_id argument\n for name in [\"user_get_network_conferencing_request\",\n \"user_get_assigned_services\",\n \"user_get_security_classification\",\n \"user_get\",\n \"user_get_data\",\n \"user_get_primary_device\",\n \"user_delete\",\n ]:\n requests[name] = OCIPRequest(\n name,\n '',\n \"User / %s\" % name[5:].replace('_', ' ').capitalize(),\n Argument(\"userId\", pos=0)\n )\n\ndef add_xsi_requests(requests, request_names):\n\n # used to specify the requests order\n request_names.extend([\"get_dm_config\",\n \"get_device_name_by_type\",\n \"get_directory_data\",\n \"get_provisioned_devices\",\n \"get_calls\",\n \"hangup_calls\",\n \"get_call_logs\",\n \"delete_call_logs\",\n \"get_conference_calls\",\n \"hangup_conference_calls\",\n \"get_moh\",\n \"set_moh\",\n \"get_dnd\",\n \"set_dnd\",\n #\"get_call_forwarding\",\n #\"set_call_forwarding\",\n #\"remove_call_forwards\",\n \"get_call_recording_mode\",\n #\"set_call_recording_mode\",\n \"get_remote_office\",\n #\"set_remote_office\",\n #\"get_simultaneous_ring\",\n #\"set_simultaneous_ring\",\n \"remove_simultaneous_ring\",\n \"get_broadworks_anywhere\",\n #\"get_broadworks_anywhere_location\",\n #\"delete_broadworks_anywhere_location\",\n \"remove_broadworks_anywhere\",\n \"get_broadworks_mobility\",\n \"get_anonymous_call_rejection\",\n \"set_anonymous_call_rejection\",\n \"get_call_waiting\",\n \"set_call_waiting\",\n \"get_automatic_callback\",\n \"set_automatic_callback\",\n \"get_block_my_caller_id\",\n \"set_block_my_caller_id\",\n \"set_imp\",\n \"get_pn_registrations\",\n #\"delete_pn_registration\",\n \"delete_pn_registrations\",\n ])\n\n requests['get_device_name_by_type'] = XSIRequest(\n 'get_device_name_by_type',\n 'profile/device',\n 'Get device name by type',\n Argument(\"type\", pos=0),\n )\n requests['get_dm_config'] = XSIRequest(\n 'get_dm_config',\n 'profile/device',\n 'Get config file',\n Argument(\"deviceType\", pos=-1),\n )\n requests['set_dnd'] = XSIRequest(\n 'set_dnd',\n 'profile/device',\n 'Set dnd',\n Argument(\"enabled\", type=\"bool\", pos=-1, value={\"checked\": \"off\"}),\n Argument(\"ringSplash\", type=\"bool\", pos=-1, value={\"checked\": \"off\"})\n )\n # functions without arguments\n for name in [\"set_moh\",\n \"set_anonymous_call_rejection\",\n \"set_call_waiting\",\n \"set_automatic_callback\",\n \"set_block_my_caller_id\",\n \"set_imp\",]:\n requests[name] = XSIRequest(\n name,\n '',\n name.replace('_', ' ').capitalize(),\n Argument(\"enabled\", type=\"bool\", pos=-1, value={\"checked\":\"on\"})\n )\n # functions without arguments\n for name in [\"get_directory_data\",\n \"get_provisioned_devices\",\n \"get_calls\",\n \"get_conference_calls\",\n \"hangup_calls\",\n \"hangup_conference_calls\",\n \"get_call_logs\",\n \"delete_call_logs\",\n \"get_call_recording_mode\",\n \"get_remote_office\",\n \"remove_simultaneous_ring\",\n \"get_broadworks_anywhere\",\n \"remove_broadworks_anywhere\",\n \"get_broadworks_mobility\",\n \"get_anonymous_call_rejection\",\n \"get_call_waiting\",\n \"get_automatic_callback\",\n \"get_block_my_caller_id\",\n \"get_pn_registrations\",\n \"delete_pn_registrations\",\n \"get_moh\",\n \"get_dnd\"]:\n requests[name] = XSIRequest(\n name,\n '',\n name.replace('_', ' ').capitalize())","sub_path":"srvreq/srvreq/model/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":10308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"435949090","text":"##STOPWATCH 3.0\r\n##\r\n##CREATED BY SIDDHARTH KANNAN\r\n##\r\n##WRITTEN ON PYTHON 2.7 AND TKINTER 8.5\r\n##\r\n##OS:WINDOWS XP SP 3\r\n\r\n\r\n## This program is free software. It comes without any warranty, to\r\n## the extent permitted by applicable law. You can redistribute it\r\n## and/or modify it under the terms of the Do What The Fuck You Want\r\n## To Public License, Version 2, as published by Sam Hocevar. See\r\n## http://www.wtfpl.net/ for more details.\r\n\r\n\r\nfrom Tkinter import *\r\nfrom time import *\r\nfrom tkFont import *\r\nimport tkMessageBox\r\nimport tkSimpleDialog\r\nimport datetime\r\nnow = datetime.datetime.now()\r\n\r\nFRAME_WIDTH,FRAME_HEIGHT = 600,700 \r\n\r\nclass stopwatch(object):\r\n def __init__(self):\r\n self.window = Tk() ##The main window instance\r\n self.window.title(\"STOPWATCH\")\r\n\r\n \r\n self.firstTime = True ##Variable keeps track of whether this is the first time that the application is being run\r\n\r\n ##Some fonts for use inside\r\n self.small = Font(family='Helvetica',size=11)\r\n self.medium = Font(family='Helvetica',size=15)\r\n self.big = Font(family='Helvetica',size=24)\r\n self.veryBig = Font(family='Helvetica',size=72)\r\n\r\n ##Varibales taking care of the check buttons\r\n\r\n self.shown = BooleanVar()\r\n\r\n self.shown.set(True) ##Pop up shown variable\r\n\r\n self.timeShown = BooleanVar()\r\n\r\n self.timeShown.set(True) ##The timing is shown or not\r\n\r\n self.quitted = False\r\n\r\n self.initFrame()\r\n\r\n def createMenu(self,event=None):\r\n\r\n menubar = Menu(self.window)\r\n\r\n filemenu = Menu(menubar,tearoff = 0)\r\n\r\n filemenu.add_command(label='Start',command=self.startrunning,accelerator='S')\r\n filemenu.add_command(label='Stop',command=self.stoprunning,accelerator='E')\r\n filemenu.add_command(label='Lap',command=self.endlap,accelerator='L')\r\n filemenu.add_command(label='Reset',command=self.reset,accelerator='R')\r\n filemenu.add_command(label='Quit',command=self.quitwin,accelerator='Escape')\r\n\r\n optionsmenu = Menu(menubar,tearoff=0) \r\n\r\n optionsmenu.add_checkbutton(label='Show timing after the run is completed',command=self.togglePopUp,variable=self.shown,onvalue = True,offvalue = False)\r\n optionsmenu.add_checkbutton(label='Show the stopwatch running',variable=self.timeShown,onvalue=True,offvalue=False)\r\n \r\n helpmenu = Menu(menubar,tearoff=0)\r\n\r\n helpmenu.add_command(label='Help',command=self.showHelp)\r\n helpmenu.add_command(label='About',command=self.showCredits)\r\n\r\n menubar.add_cascade(label='File',menu=filemenu)\r\n menubar.add_cascade(label='Options',menu=optionsmenu)\r\n menubar.add_cascade(label='Help',menu=helpmenu)\r\n\r\n self.window.config(menu=menubar)\r\n\r\n def initFrame(self):\r\n\r\n try:\r\n self.frame.destroy()\r\n\r\n except AttributeError:\r\n pass\r\n \r\n self.frame = Frame(self.window,width=FRAME_WIDTH,height=FRAME_HEIGHT) ##The frame instance\r\n self.frame.pack_propagate(0) ##Making sure that the window does not shrink\r\n \r\n self.frame.pack(fill=None)\r\n self.initVariables()\r\n self.initBindings()\r\n self.createMenu()\r\n self.initUI()\r\n\r\n def initVariables(self,event=None):\r\n\r\n ##VARIABLES:\r\n \r\n self.start = None\r\n self.stop = None\r\n self.timeConsumed = None\r\n self.laps = []\r\n self.startOfLap = None\r\n self.endOfLap = None\r\n self.counterOfLaps = 1\r\n\r\n def initBindings(self,event=None):\r\n\r\n w = self.window\r\n\r\n w.bind('s',self.startrunning)\r\n w.bind('e',self.stoprunning)\r\n w.bind('r',self.reset)\r\n w.bind('l',self.endlap)\r\n w.bind('',self.quitwin)\r\n\r\n def initHelp(self):\r\n f = self.frame\r\n\r\n info = Message(f,text=\"You can use the buttons below or \\\r\nyou can use the following keyboard shortcuts to work with the stopwatch\\\r\n \\n\\nPress \\'S\\' to start running. \\\r\n \\nPress \\'E\\' to stop running. \\\r\n \\nPress \\'R\\' to reset the stopwatch. \\\r\n \\nPress \\'L\\' to end a lap. \\\r\n \\n\\nPress escape button to quit this stopwatch\\\r\n Please note that all the times generated are \\\r\n being stored in a file \\'timings.txt\\' from which you can see the timings later.\\n\\\r\n \\n\\nYou can see this help again by clicking on the help tab in the menu.\",\\\r\n font=self.medium)\r\n info.pack() \r\n \r\n def initUI(self):\r\n\r\n f = self.frame\r\n\r\n if self.firstTime:\r\n self.initHelp()\r\n self.firstTime = False\r\n\r\n start = Button(f,text='START',command=self.startrunning)\r\n start.pack(side=\"top\")\r\n\r\n stop =Button(f,text='STOP',command=self.stoprunning)\r\n stop.pack(side=\"top\")\r\n\r\n lap = Button(f,text='LAP',command=self.endlap)\r\n lap.pack(side='top')\r\n\r\n reset = Button(f,text=\"RESET\",command = self.reset)\r\n reset.pack(side=\"top\")\r\n\r\n close = Button(f,text=\"QUIT\",bg=\"black\",fg = \"red\",command=self.quitwin)\r\n close.pack(side=\"top\")\r\n\r\n ##Changing the font to increase the size of the buttons\r\n\r\n buttons = [start,stop,close,reset,lap]\r\n\r\n for i in buttons:\r\n i.config(font=self.medium) \r\n\r\n def startrunning(self,event=None):\r\n\r\n self.reset()\r\n\r\n self.runTime = Frame(self.frame)\r\n self.runTime.pack() ##this frame will show the present run time\r\n\r\n self.start = time()\r\n self.startOfLap = time()\r\n\r\n if self.timeShown.get():\r\n\r\n self.initRunTimeFrame()\r\n \r\n r = Frame(self.frame)\r\n r.pack()\r\n\r\n start = Label(r,text=\"\\nStarted running\")\r\n start.pack()\r\n\r\n\r\n def initRunTimeFrame(self,t = ' '):\r\n \r\n self.timing = Label(self.runTime,text=\"\",font=self.veryBig)\r\n\r\n self.timing.pack(side='top')\r\n\r\n self.updateRunTimeFrame()\r\n\r\n def updateRunTimeFrame(self):\r\n\r\n t = time()\r\n\r\n now = ' %0.2f' %(t - self.start) \r\n\r\n self.timing.configure(text=now)\r\n\r\n self.runTime.after(1,self.updateRunTimeFrame)\r\n \r\n\r\n def stoprunning(self,event=None):\r\n \r\n r = Frame(self.frame)\r\n r.pack()\r\n self.stop = time()\r\n self.timeConsumed = self.stop - self.start\r\n\r\n self.runTime.destroy()\r\n\r\n self.timing = Label(r,text='%0.2f' %(self.timeConsumed),font=self.veryBig)\r\n\r\n self.timing.pack(side='top')\r\n\r\n Label(r,text='\\nstopped running').pack()\r\n end = Label(r,text=\"\\nTime consumed is: %0.2f seconds\" %self.timeConsumed)\r\n end.pack(side = \"bottom\")\r\n\r\n if self.shown.get():\r\n tkMessageBox.showinfo('Summary of this run','The run was completed in %0.2f seconds' %self.timeConsumed)\r\n\r\n self.writeDataToFile()\r\n self.initVariables()\r\n\r\n def togglePopUp(self,event=None):\r\n if self.shown.get():\r\n tkMessageBox.showinfo('Message','Pop up after run has been switched on')\r\n else:\r\n tkMessageBox.showinfo('Message','Pop up after run has been switched off')\r\n\r\n def writeDataToFile(self,event=None):\r\n\r\n inputFile = open('timings.txt','a')\r\n\r\n for i in range(60):\r\n inputFile.write('-')\r\n\r\n dateNow = 'Date:' + str(now.day) + '-' + str(now.month) + '-' \\\r\n + str(now.year) \\\r\n + ', ' + 'Time:' + str(now.hour) + ':' + str(now.minute) \\\r\n + ':' + str(now.second)\r\n\r\n inputFile.write('\\n\\n' + dateNow + '\\n\\n')\r\n\r\n for i in range(len(self.laps)):\r\n inputFile.write('Lap ' + str(i+1) + ': ' + str('%0.2f' %self.laps[i]) + ' seconds\\n')\r\n\r\n if len(self.laps) == 0:\r\n inputFile.write('No laps recorded')\r\n\r\n inputFile.write('\\nSummary of this run:'+str(' %0.2f' %self.timeConsumed) + ' seconds' + '\\n')\r\n\r\n def reset(self,event=None):\r\n self.frame.destroy()\r\n\r\n self.initFrame()\r\n\r\n def endlap(self,event=None):\r\n self.endOfLap = time()\r\n timeTakenForOneLap = self.endOfLap - self.startOfLap\r\n\r\n self.laps.append(timeTakenForOneLap)\r\n\r\n r = Label(self.frame,text=\"Lap \" + str(self.counterOfLaps) +\" was completed in %0.2f\" %timeTakenForOneLap)\r\n r.pack()\r\n self.counterOfLaps += 1\r\n\r\n self.startOfLap = time()\r\n\r\n if self.counterOfLaps % 9 == 0:\r\n self.frame.pack_propagate(1)\r\n\r\n \r\n\r\n def showHelp(self,event=None):\r\n\r\n tkMessageBox.showinfo('Help','Application that emulates a stopwatch with lap timing facility\\\r\n \\nCopyright(c) 2013 Siddharth Kannan')\r\n\r\n self.firstTime = True\r\n\r\n self.initFrame()\r\n\r\n \r\n def quitwin(self,event=None):\r\n self.window.destroy()\r\n\r\n self.window2 = Tk()\r\n self.window2.title('License and Credits')\r\n\r\n\r\n self.window2.bind('',self.exit)\r\n\r\n self.frame =Frame(self.window2)\r\n self.frame.pack()\r\n\r\n self.r = Frame(self.frame)\r\n self.r.pack()\r\n\r\n r = self.r\r\n\r\n self.big = Font(family='Helvetica',size=24)\r\n\r\n m = Message(r,text=\"Created by Siddharth Kannan\\\r\n \\nWritten on Python 2.7 and Tkinter 8.5\\\r\n \\nOS: WINDOWS XP SP 3\\\r\n \\nThis software is licensed under the WTFPL license.\\\r\n \\nSee the copying file for more details.\\\r\n \\nPress the quit button below to quit the application\\\r\n \",font=self.big)\r\n\r\n m.pack()\r\n\r\n b = Button(r,text='QUIT',fg='red',bg='black',command=self.window2.destroy,font=self.big)\r\n b.pack(side='bottom') \r\n\r\n def showCredits(self,event=None):\r\n\r\n self.window1 = Tk()\r\n self.window1.title('License and Credits')\r\n \r\n self.frame =Frame(self.window1)\r\n self.frame.pack()\r\n\r\n self.r = Frame(self.frame)\r\n self.r.pack()\r\n\r\n r = self.r\r\n\r\n self.big = Font(family='Helvetica',size=24)\r\n\r\n m = Message(r,text=\"Created by Siddharth Kannan\\\r\n \\nWritten on Python 2.7 and Tkinter 8.5\\\r\n \\nOS: WINDOWS XP SP 3\\\r\n \\nThis software is licensed under the WTFPL license.\\\r\n \\nSee the copying file for more details.\\\r\n \",font=self.big)\r\n\r\n m.pack()\r\n\r\n b = Button(r,text='OKAY',font=self.big,command=self.window1.destroy)\r\n b.pack(side='bottom')\r\n\r\n\r\n def exit(self,event=None):\r\n \"\"\"the function that will kill all the processes and end the application\"\"\"\r\n\r\n self.window2.destroy()\r\n\r\n\r\nstopwatch()\r\nmainloop()\r\n","sub_path":"stopwatch_gui_withtimedisplay.py","file_name":"stopwatch_gui_withtimedisplay.py","file_ext":"py","file_size_in_byte":11124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"592218122","text":"from typing import List, Tuple\n\nfrom torch.autograd import Variable\n\nfrom rnng.actions import Action\nfrom rnng.models import RnnGrammar\n\n\ndef greedy_decode(parser: RnnGrammar) -> List[Tuple[Action, Variable]]:\n result = []\n while not parser.finished:\n log_probs = parser()\n best_logprob, best_action_id = log_probs.data.max(0)\n best_action_id = best_action_id[0]\n best_logprob = best_logprob[0]\n best_action = parser.action_store.get_by_id(best_action_id)\n result.append((best_action, best_logprob))\n best_action.execute_on(parser)\n return result\n","sub_path":"rnng/decoding.py","file_name":"decoding.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"138575013","text":"from PyQt5.QtCore import Qt\n\n\nclass ScriptFrontendService(object):\n def __init__(self, frontend_api):\n \"\"\"\n :type frontend_api: playground.frontend.FrontendApp\n \"\"\"\n\n self._frontend = frontend_api\n\n self._frontend.subscribe_service(\"asset_service\", self._subcribe_asset_browser)\n\n self.filename_editor_map = dict()\n\n def close(self):\n for window_name in self.filename_editor_map.values():\n w = self._frontend.get_window(window_name)\n w.close()\n\n def _window_name(self, asset_type, asset_name):\n return \"script_editor_%s_%s\" % (asset_type, asset_name)\n\n def _subcribe_asset_browser(self, msg):\n msg_type = msg['msg_type']\n\n asset_name = msg['name']\n asset_type = msg['type']\n asset_path = msg['path']\n\n if msg_type == 'dclick' and asset_type != 'level':\n if asset_path in self.filename_editor_map:\n w = self._frontend.get_dock(self.filename_editor_map[asset_path])\n w.raise_()\n return\n\n window_name = self._window_name(asset_type, asset_name)\n title = \"Script editor: %s.%s\" % (asset_name, asset_type)\n\n self._frontend.create_window(window_name, title, menu=\"script_editor_menu\")\n\n self.filename_editor_map[asset_path] = window_name\n\n self._frontend.create_dock(window_name, title, window_name, parent=\"main_window\", parent_area='top')\n dock = self._frontend.get_dock(window_name)\n dock.setProperty(\"filename\", asset_path)\n dock.setAttribute(Qt.WA_DeleteOnClose)\n dock.destroyed.connect(self._editor_destroyed)\n\n if len(self.filename_editor_map) > 1:\n root_w = self._frontend.get_window(\"main_window\")\n d = self._frontend.get_dock(tuple(self.filename_editor_map.values())[0])\n root_w.tabifyDockWidget(d, dock)\n\n self._frontend.create_web_content(window_name, window_name,\n \"script_editor/static/script_editor.html\"\n \"?src/%s.%s\" % (\n asset_name, asset_type)\n )\n\n def _editor_destroyed(self, obj):\n del self.filename_editor_map[obj.property(\"filename\")]\n","sub_path":"playground/src/modules/script_editor/scritp_editor_service.py","file_name":"scritp_editor_service.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"477271477","text":"\"\"\"\nCredit for demo setup: Gusto development team\n\"\"\"\nfrom gusto import *\nfrom firedrake import IcosahedralSphereMesh, SpatialCoordinate, as_vector, \\\n FunctionSpace, parameters\nfrom firedrake.petsc import PETSc\nfrom argparse import ArgumentParser\nfrom hybridization import HybridizedShallowWaterSolver\nfrom math import pi\nimport sys\n\nday = 24.*60.*60.\n\nref_dt = {3: 3000.,\n 4: 1500.,\n 5: 750.,\n 6: 375.,\n 7: 187.5}\n\nPETSc.Log.begin()\nparser = ArgumentParser(description=\"\"\"Williamson test case 2 (vector invariant).\"\"\",\n add_help=False)\n\nparser.add_argument(\"--refinements\",\n default=3,\n type=int,\n choices=[3, 4, 5, 6, 7],\n help=\"Number of icosahedral refinements.\")\n\nparser.add_argument(\"--hybridization\",\n action=\"store_true\",\n help=\"Use a hybridized shallow water solver.\")\n\nparser.add_argument(\"--test\",\n action=\"store_true\",\n help=\"Enable a quick test run.\")\n\nparser.add_argument(\"--profile\",\n action=\"store_true\",\n help=\"Turn on profiling for a 20 time-step run.\")\n\nparser.add_argument(\"--dumpfreq\",\n default=1,\n type=int,\n action=\"store\",\n help=\"Dump frequency of output files.\")\n\nparser.add_argument(\"--help\",\n action=\"store_true\",\n help=\"Show help.\")\n\nargs, _ = parser.parse_known_args()\n\nif args.help:\n help = parser.format_help()\n PETSc.Sys.Print(\"%s\\n\" % help)\n sys.exit(1)\n\nif args.profile:\n # Ensures accurate timing of parallel loops\n parameters[\"pyop2_options\"][\"lazy_evaluation\"] = False\n tmax = 20*ref_dt[args.refinements]\n\nif args.test:\n tmax = ref_dt[args.refinements]\n\nif not args.test and not args.profile:\n tmax = 5*day\n\nrefinements = args.refinements\ndt = ref_dt[refinements]\nhybrid = bool(args.hybridization)\nPETSc.Sys.Print(\"\"\"\nProblem parameters:\\n\nTest case: Williamson test case 2 (vector invariant)\\n\nHybridized shallow water solver: %s,\\n\nRefinements: %s,\\n\nProfiling: %s,\\n\nTest run: %s,\\n\nDump frequency: %s.\\n\n\"\"\" % (hybrid, refinements, bool(args.profile),\n bool(args.test), args.dumpfreq))\n\nPETSc.Sys.Print(\"Initializing problem with dt: %s and tmax: %s.\\n\" % (dt,\n tmax))\n\n# setup shallow water parameters\nR = 6371220.\nH = 5960.\n\n# setup input that doesn't change with ref level or dt\nfieldlist = ['u', 'D']\nparameters = ShallowWaterParameters(H=H)\ndiagnostics = Diagnostics(*fieldlist)\n\ndirname = \"sw_W2_ref%s_dt%s\" % (refinements, dt)\nmesh = IcosahedralSphereMesh(radius=R,\n refinement_level=refinements, degree=3)\nx = SpatialCoordinate(mesh)\nglobal_normal = x\nmesh.init_cell_orientations(x)\n\ntimestepping = TimesteppingParameters(dt=dt)\n\nif hybrid:\n dirname = 'hybrid_sw_w2_vi_ref%s_dt%s' % (refinements, dt)\nelse:\n dirname = 'sw_w2_vi_ref%s_dt%s' % (refinements, dt)\n\noutput = OutputParameters(dirname=dirname,\n dumpfreq=args.dumpfreq,\n dumplist_latlon=['D', 'D_error'],\n steady_state_error_fields=['D', 'u'])\n\nstate = State(mesh, horizontal_degree=1,\n family=\"BDM\",\n timestepping=timestepping,\n output=output,\n parameters=parameters,\n diagnostics=diagnostics,\n fieldlist=fieldlist)\n\n# interpolate initial conditions\nu0 = state.fields(\"u\")\nD0 = state.fields(\"D\")\nx = SpatialCoordinate(mesh)\nu_max = 2*pi*R/(12*day) # Maximum amplitude of the zonal wind (m/s)\nuexpr = as_vector([-u_max*x[1]/R, u_max*x[0]/R, 0.0])\nOmega = parameters.Omega\ng = parameters.g\nDexpr = H - ((R * Omega * u_max + u_max*u_max/2.0)*(x[2]*x[2]/(R*R)))/g\n# Coriolis expression\nfexpr = 2*Omega*x[2]/R\nV = FunctionSpace(mesh, \"CG\", 1)\nf = state.fields(\"coriolis\", V)\nf.interpolate(fexpr) # Coriolis frequency (1/s)\n\nu0.project(uexpr)\nD0.interpolate(Dexpr)\nstate.initialise([('u', u0),\n ('D', D0)])\n\nueqn = AdvectionEquation(state, u0.function_space())\nDeqn = AdvectionEquation(state, D0.function_space(),\n equation_form=\"continuity\")\n\nadvected_fields = []\nadvected_fields.append((\"u\", ThetaMethod(state, u0, ueqn)))\nadvected_fields.append((\"D\", SSPRK3(state, D0, Deqn)))\n\n# Set up linear solver\nif hybrid:\n linear_solver = HybridizedShallowWaterSolver(state)\n\nelse:\n parameters = {'ksp_type': 'gmres',\n 'pc_type': 'fieldsplit',\n 'pc_fieldsplit_type': 'schur',\n 'ksp_type': 'gmres',\n 'ksp_max_it': 100,\n 'ksp_gmres_restart': 50,\n 'pc_fieldsplit_schur_fact_type': 'FULL',\n 'pc_fieldsplit_schur_precondition': 'selfp',\n 'fieldsplit_0': {'ksp_type': 'preonly',\n 'pc_type': 'bjacobi',\n 'sub_pc_type': 'ilu'},\n 'fieldsplit_1': {'ksp_type': 'cg',\n 'pc_type': 'gamg',\n 'ksp_rtol': 1e-8,\n 'mg_levels': {'ksp_type': 'chebyshev',\n 'ksp_max_it': 2,\n 'pc_type': 'bjacobi',\n 'sub_pc_type': 'ilu'}}}\n\n # Shallow water solver from Gusto, but with hybridization turned off\n linear_solver = ShallowWaterSolver(state, solver_parameters=parameters,\n overwrite_solver_parameters=True)\n\n# Set up forcing\nsw_forcing = ShallowWaterForcing(state)\n\n# Build time stepper\nstepper = CrankNicolson(state, advected_fields, linear_solver,\n sw_forcing)\n\nPETSc.Sys.Print(\"Starting simulation...\\n\")\nstepper.run(t=0, tmax=tmax)\n","sub_path":"williamson_tests/sw_williamson2_triangle_vector_invariant.py","file_name":"sw_williamson2_triangle_vector_invariant.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"226325759","text":"# !/user/bin/python\n# coding:utf-8\n\n##\n# 虾米音乐接口\n# @Author VenDream\n# @Update 2017-1-11 14:35:45\n##\n\nimport re\nimport json\nimport base64\nimport hashlib\nimport requests\nimport HTMLParser\n\n__all__ = ['XiamiAPI']\n\nclass XiamiAPI():\n def __init__(self):\n self.name = '虾米音乐'\n self.url = {\n 'total': 'http://api.xiami.com/web?app_key=1&page=1&limit=1&r=search/songs',\n 'lyric': 'http://api.xiami.com/web?v=2.0&app_key=1&r=song/detail',\n 'single': 'http://www.xiami.com/app/xiating/search-song2?callback=xiami',\n 'album': '',\n 'info': ''\n }\n self.headers = {\n 'Host': 'www.xiami.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n self.total_headers = {\n 'Referer': 'http://m.xiami.com'\n }\n self.max_count = 1980\n self.num_per_page = 15\n self.xiami_per_page = 24\n self.timeout = 9\n\n # 获取歌曲歌词信息\n # @param {string} song_id 歌曲ID\n def get_lyric(self, song_id):\n url = '%s&id=%s' % (self.url['lyric'], song_id)\n song_detail = getData(url, self.total_headers, self.timeout)\n if song_detail == None:\n return None\n\n lyric = ''\n\n if song_detail['data']:\n lrc_reg = re.compile('<\\d+>')\n lyric_url = song_detail['data']['song']['lyric']\n if lyric_url:\n lyric_trc = getData(lyric_url, {}, self.timeout, 'text')\n lyric = lrc_reg.sub('', lyric_trc)\n lyric = lyric.replace('\\n', '\\\\n').replace('\\r', '')\n\n return lyric\n\n # 解析歌曲信息\n # @param {array} songs 歌曲列表\n def parse_songs_info(self, songs):\n album_cdn = 'http://img.xiami.net/'\n songs_info = []\n\n if len(songs) > 0:\n for s in songs:\n # 判断是否有专辑名称\n if s['album_name'] == '':\n album = u'-'\n else:\n album = u'《%s》' % s['album_name']\n\n # 筛选高音质(320K)\n song_url = None\n song_quality = -1\n for ss in s['audios']:\n # 无损\n # if ss['audioQualityEnum'] == 'LOSSLESS':\n # song_url = ss['filePath']\n # song_quality = '无损'\n # break\n if ss['audioQualityEnum'] == 'HIGH':\n song_url = ss['filePath']\n song_quality = 320\n break\n if ss['audioQualityEnum'] == 'LOW':\n song_url = ss['filePath']\n song_quality = 128\n\n # 获取专辑封面\n if s['album_logo']:\n song_cover = album_cdn + s['album_logo'].replace('_1.jpg', '_4.jpg')\n else:\n song_cover = ''\n\n song_info = {\n 'song_id': str(s['song_id']),\n 'song_name': s['song_name'],\n 'album_id': s['album_id'],\n 'album_name': album,\n 'artist_id': s['artist_id'],\n 'artist_name': s['artist_name'],\n 'duration': s['length'] * 1000,\n # 获取播放所需信息(除歌词外)\n 'player_info': {\n 'song_id': str(s['song_id']),\n 'song_quality': song_quality,\n 'song_url': song_url,\n 'song_name': s['song_name'],\n 'song_album': album,\n 'song_artist': s['artist_name'],\n 'song_cover': song_cover,\n 'song_duration': s['length'] * 1000\n }\n }\n\n songs_info.append(song_info)\n\n return songs_info\n\n # 搜索单曲\n # @param {string} sstr 关键词\n # @param {number} page 页码\n def single(self, sstr, page = 1):\n # 搜索的起始编号\n start = (page - 1) * self.num_per_page\n # 虾米接口的对应page\n x_page = int(start / self.xiami_per_page) + 1\n # 计算偏移量\n offset = start % self.xiami_per_page\n # 计算实际截取的数据量\n take = min(self.xiami_per_page - offset, self.num_per_page)\n\n # 判断是否需要请求虾米接口下一页\n # 计算下一页��需要截取的数据量\n if offset + self.num_per_page > self.xiami_per_page:\n rest = (offset + self.num_per_page) % self.xiami_per_page\n else:\n rest = 0\n\n #---------------------------------------\n \n # 获取结果总数\n total_url = '%s&key=%s' % (self.url['total'], sstr)\n total_res = getData(total_url, self.total_headers, self.timeout)\n if total_res and total_res['state'] is 0:\n songs_count = min(self.max_count, total_res['data']['total'])\n else:\n return {\n 'songs_count': 0,\n 'songs_info': []\n }\n\n # 获取歌曲数据\n single_url = '%s&key=%s&page=%s' % (self.url['single'], sstr, x_page)\n single_res = getData(single_url, self.headers, self.timeout)\n if single_res and single_res['data'] is not None:\n songs = single_res['data'][offset:offset + take]\n\n # 判断是否需要获取下一页数据\n if rest > 0:\n n_single_url = '%s&key=%s&page=%s' % (self.url['single'], sstr, x_page + 1)\n n_single_res = getData(n_single_url, self.headers, self.timeout)\n if n_single_res['data'] is not None:\n songs = songs + n_single_res['data'][0:rest]\n else:\n return {\n 'songs_count': 0,\n 'songs_info': []\n }\n\n songs_info = self.parse_songs_info(songs)\n\n return {\n 'songs_count': songs_count,\n 'songs_info': songs_info\n }\n else:\n return {\n 'songs_count': 0,\n 'songs_info': []\n }\n\n def album(self, sstr, page):\n pass\n\n # 获取歌曲信息\n # @param {string} song_id 歌曲ID\n def info(self, song_id):\n lyric = self.get_lyric(song_id)\n\n if lyric == '' or lyric:\n return {\n 'song_lyric': lyric\n }\n else:\n return None\n\n# GET请求\ndef getData(url, headers, timeout, dataType = 'json'):\n req = requests.get(\n url = url,\n headers = headers,\n timeout = timeout\n )\n\n res = req.text\n\n if dataType == 'json':\n try:\n res = req.json()\n except:\n res = None\n\n return res\n\nxiamiAPI = XiamiAPI()\n","sub_path":"apis/xiami.py","file_name":"xiami.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"325718213","text":"# coding: utf8\nimport sys, logging\nfrom rule import Rule\nfrom info import Info\nfrom message_builder import MessageBuilder, BuildConfig\n\nfrom django.core.cache import cache\n\nweixinlogger = logging.getLogger(\"weixin\")\nlogger = logging.getLogger(\"default\")\n\n# Router use memcache to share data whole application\nclass Router(object):\n __instance = None\n\n @staticmethod\n def get_instance():\n if Router.__instance is None:\n Router.__instance = Router()\n return Router.__instance\n\n def set(self, pattern, handler=None, replies=None): \n if pattern and handler is None and replies is None:\n r = pattern\n else:\n r = {\n 'name' : pattern,\n 'pattern' : pattern,\n 'handler' : handler,\n 'replies' : replies\n }\n if r is not None:\n r = Rule.convert(r)\n self.routes.extend(r)\n\n def get(self, name):\n if name is not None:\n return filter(lambda r:r.name==name, self.routes)\n else:\n return self.routes\n\n def data(self, uid, key, value):\n obj = cache.get(uid, {})\n if isinstance(key, (str, unicode)):\n if value is None:\n del obj[key]\n else:\n obj[key] = value\n elif isinstance(key, dict):\n obj.__dict__.update(key)\n cache.set(uid, obj)\n return obj\n\n def wait(self, uid, rule):\n if rule is not None:\n rule = Rule.convert(rule)\n self.wait_rules[uid] = rule\n\n def rewait(self, uid):\n self.wait(uid, self.last_wait_rules[uid])\n\n def dialog(self, path):\n pass\n\n def reply(self, wx, data, cb):\n info = data\n if not isinstance(data, Info):\n info = Info(wx, data)\n\n if not self.config.get('keepBlank', False) and info.text:\n info.text = info.text.trim()\n\n rule_list = self.routes\n waiter = self.wait_rules.get(info.user, None)\n\n if waiter:\n rule_list = [].extend(waiter).extend(self.routes)\n self.last_wait_rules[info.user] = waiter\n self.wait_rules[info.user] = None\n\n for i in range(0, len(rule_list)):\n rule = rule_list[i]\n if Rule.is_match(info, rule):\n weixinlogger.info(\"match %s\" % rule.name)\n rule.count = i\n result = Rule.execute(info, rule, cb)\n if isinstance(result, (str, unicode)):\n result = BuildConfig(MessageBuilder.TYPE_RAW_TEXT, None, result)\n if result:\n if rule.replies:\n self.wait(info.user, Rule.convert(rule.replies, rule.name))\n return cb(None, result)\n\n else:\n logger.debug(\"not match %s\" % rule.name)\n\n return cb('404', BuildConfig(MessageBuilder.TYPE_RAW_TEXT, None, self.get_status('404') + info.text))\n \n routes = []\n wait_rules = {}\n last_wait_rules = {}\n data_cache = {}\n config = {\n 'keepBlank' : True,\n 'statusMsg' : {\n '204': u'你的消息已经收到,若未即时回复,还望海涵',\n '403': u'鉴权失败,你的Token不正确',\n '404': u'听不懂你说的: ',\n '500': u'服务器临时出了一点问题,您稍后再来好吗'\n }\n }\n\n def get_status(self, code):\n return self.config.get('statusMsg', {}).get(str(code), None)\n","sub_path":"weixin/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"380354427","text":"# Todo: TESTING!!!\n# Todo: Document\n\nimport numpy as np\n\n#Helpers\ndef linear_gradient(y, tx, w):\n \"returns the mse gradient of the linear prediction\"\n return -1/len(y)*np.dot(tx.T,y-np.dot(tx,w))\n\ndef linear_loss(y, tx, w):\n \"computes the mean square error of the linear regression\"\n return 1/2 * np.mean((np.dot(w,tx.T)-y)**2)\n\n\ndef logistic_prediction(tx, w):\n \"returns the logistic regression prediction based on the features and the \\\n trained weights\"\n z = np.dot(tx,w)\n # Handle numerical problems (overflow and log(0))\n minimum = -np.log(np.finfo(z.dtype).max)\n maximum = -np.log(np.finfo(z.dtype).eps)\n z[z < minimum] = minimum\n z[z > maximum] = maximum\n return 1/(1+np.exp(-z))\n\ndef logistic_gradient(y, tx, w):\n \"computes the gradient of the logistic regression\"\n pred = logistic_prediction(tx,w)\n return -1/len(y)*np.dot(tx.T,y-pred)\n \ndef logistic_loss(y, tx, w):\n \"computes the loss of the logistic regression\"\n pred = logistic_prediction(tx,w)\n cost1 = -y*np.log(pred)\n cost2 = -(1-y)*np.log(1-pred)\n return np.mean(cost1+cost2)\n \n\n\n#Implementation of the regression methods\ndef least_squares_GD(y, tx, initial_w, max_iters, gamma):\n \"approximates weights of linear regression with gradient descent\"\n w = initial_w\n for n_iter in range(max_iters):\n grad_L = linear_gradient(y,tx,w)\n w = w - gamma * grad_L\n loss = linear_loss(y,tx,w)\n return w, loss\n\n\ndef least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n \"approximates weights of linear regression with SGD based on one data point \\\n at a time\"\n w = initial_w\n for n_iter in range(max_iters):\n i = np.random.randint(len(y))\n grad_L = linear_gradient([y[i]], np.expand_dims(tx[i,:], axis=0),w)\n w = w - gamma * grad_L\n loss = linear_loss(y, tx, w)\n return w, loss\n\n\ndef least_squares(y, tx):\n \"computes weights of linear regression by solving the linear system\"\n lhs = tx.T.dot(tx)\n rhs = tx.T.dot(y)\n w = np.linalg.solve(lhs,rhs)\n loss = linear_loss(y,tx,w)\n return w, loss\n \n\ndef ridge_regression(y, tx, lambda_):\n \"computes weights of ridge regression by solving the linear system\"\n N = len(y)\n lhs = tx.T.dot(tx) + lambda_*2*N*np.eye(tx.shape[1])\n rhs = tx.T.dot(y)\n w = np.linalg.solve(lhs,rhs)\n loss = linear_loss(y,tx,w) + lambda_*np.sum(np.sum(w**2))\n return w, loss\n\n\ndef logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n \"computes weights of logistic regression with SGD\"\n w = initial_w\n for n_iter in range(max_iters):\n if verbose & (n_iter % int(max_iters/20) == 0):\n print(str(int(n_iter / max_iters)*100) + \"% Done\")\n\n i = np.random.randint(len(y))\n grad_L = logistic_gradient([y[i]], np.expand_dims(tx[i,:], axis=0),w)\n w = w - gamma * grad_L\n loss = logistic_loss(y,tx,w)\n return w, loss\n \n \ndef reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n \"computes weights of regularized (L2) logistic regression with SGD\"\n w = initial_w\n for n_iter in range(max_iters):\n i = np.random.randint(len(y))\n grad_L = logistic_gradient([y[i]], np.expand_dims(tx[i,:], axis=0),w) + lambda_*w\n w = w - gamma * grad_L\n loss = logistic_loss(y,tx,w) + lambda_/2*np.sum(np.sum(w**2))\n return w, loss\n\n# Additional methods\ndef hinge_loss_gradient(y, tx, w):\n \"Gradient of hinge loss function. Accepts either a single datapoint or a\" \n \"number of datapoints and returns average\"\n z = np.asarray(1-y*np.dot(tx,w))\n z[z<0] = 0\n \n hinge_grad = np.zeros(tx.shape)\n nonZero = np.argwhere(z != 0).flatten()\n if y.ndim == 0:\n y = np.expand_dims(y, axis=0)\n if tx.ndim == 1:\n tx = np.expand_dims(tx, axis=0)\n if hinge_grad.ndim == 1:\n hinge_grad = np.expand_dims(hinge_grad, axis=0)\n hinge_grad[nonZero,:] = -np.dot(y[nonZero],tx[nonZero,:])\n\n return np.mean(hinge_grad, axis=0)\n\ndef svm_classification(y, tx, lambda_, initial_w, max_iters, gamma):\n \"Support vector machine classification\"\n w = initial_w\n for n_iter in range(max_iters):\n rand_idx = np.random.randint(0,len(y))\n reg = lambda_*w\n reg[0] = 0\n grad_L = (hinge_loss_gradient(np.asarray(y[rand_idx]), tx[rand_idx,:], w) + reg)\n w = w - gamma * grad_L\n loss = 1-y*np.dot(tx,w)\n loss[np.asarray(loss)<0] = 0\n return w, np.mean(loss)\n\ndef predict_svm_outcome(tx, w):\n out = np.dot(tx,w)\n return np.sign(out)\n \n","sub_path":"src/methods/implementations.py","file_name":"implementations.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"618988375","text":"inpu=input()[1:-1].split(\"[\")\nrest=[]\nfor i in range(1,len(inpu)):\n if i==len(inpu)-1:\n rest.append(list(map(int, inpu[i][0:-1].split(\",\"))))\n else:\n rest.append(list(map(int,inpu[i][0:-2].split(\",\"))))\nvege=int(input())\nprice=int(input())\ndis=int(input())\nres=[]\nfor i in rest:\n if i[2]>=vege and i[3]<=price and i[4]<=dis:\n res.append(i[0:2])\nres.sort(key=lambda x:x[1])\nres.reverse()\nresult=[]\nfor i in res:\n result.append(i[0])\n\nprint(result)\n","sub_path":"Code/CodeRecords/2527/60638/281602.py","file_name":"281602.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"81049711","text":"import usaddress\nimport csv\nimport sys\nimport re\nimport collections\n\n###########################################################\n## OhioPrepare.py\n##\n## This script pre-processes the Ohio voter file to use usaddress module\n## to break the address string into standard parts\n## Also uses the residential address if no mailing address is provided\n## \n## Outputs lines that fail the address parser to an error log file\n###########################################################\n\ndef appendParsedFields(outrow, addressValues):\n\t# Construct the output\n\toutrow.update({\n\t'ADDRESS_NUMBER': addressValues['AddressNumber'],\n\t'ADDRESS_NUMBER_PREFIX': addressValues['AddressNumberPrefix'],\n\t'ADDRESS_NUMBER_SUFFIX': addressValues['AddressNumberSuffix '],\n\t'BUILDING_NAME': addressValues['BuildingName'],\n\t'CORNER_OF': addressValues['CornerOf'],\n\t'INTERSECTION_SEPARATOR': addressValues['IntersectionSeparator'],\n\t'LANDMARK_NAME': addressValues['LandmarkName'],\n\t'NOT_ADDRESS': addressValues['NotAddress'],\n\t'OCCUPANCY_TYPE': addressValues['OccupancyType'],\n\t'OCCUPANCY_IDENTIFIER': addressValues['OccupancyIdentifier'],\n\t'STREET_NAME': addressValues['StreetName'],\n\t'STREET_NAME_PRE_DIRECTIONAL': addressValues['StreetNamePreDirectional'],\n\t'STREET_NAME_PRE_MODIFIER': addressValues['StreetNamePreModifier'],\n\t'STREET_NAME_PRE_TYPE': addressValues['StreetNamePreType'],\n\t'STREET_NAME_POST_DIRECTIONAL': addressValues['StreetNamePostDirectional'],\n\t'STREET_NAME_POST_MODIFIER': addressValues['StreetNamePostModifier'],\n\t'STREET_NAME_POST_TYPE': addressValues['StreetNamePostType'],\n\t'SUBADDRESS_IDENTIFIER': addressValues['Subtagged_addressessIdentifier'],\n\t'SUBADDRESS_TYPE': addressValues['Subtagged_addressessType'],\n\t'USPS_BOX_GROUP_ID': addressValues['USPSBoxGroupID'],\n\t'USPS_BOX_GROUP_TYPE': addressValues['USPSBoxGroupType'],\n\t'USPS_BOX_ID': addressValues['USPSBoxID'],\n\t'USPS_BOX_TYPE': addressValues['USPSBoxType'],\n\t})\n\t\t\t\t\ndef contstructAddressString(row):\n\treturn(' '.join([row['RESIDENTIAL_ADDRESS1'], \n\t\t row['RESIDENTIAL_SECONDARY_ADDR'],\"\\n\",\t\n\t\t row['RESIDENTIAL_CITY'],\n\t\t row['RESIDENTIAL_STATE'],\n\t\t row['RESIDENTIAL_ZIP']\n\t\t ]))\n\t\t \n\ndef appendMailingAddress(outrow, row):\n\t# Use residential address if no mailing address provided\n\tif not row['MAILING_ADDRESS1'] or not row['MAILING_CITY'] or not row['MAILING_STATE'] or not row['MAILING_ZIP']:\n\t\toutrow.update({\n\t\t'MAIL_ADDRESS_LINE1':row['RESIDENTIAL_ADDRESS1'],\n\t\t'MAIL_ADDRESS_LINE2':row['RESIDENTIAL_SECONDARY_ADDR'],\n\t\t'MAIL_CITY':row['RESIDENTIAL_CITY'],\n\t\t'MAIL_STATE':row['RESIDENTIAL_STATE'],\n\t\t'MAIL_ZIP_CODE':row['RESIDENTIAL_ZIP'],\n\t\t'MAIL_COUNTRY':'USA'\n\t\t})\n\telse:\n\t\toutrow.update({\n\t\t'MAIL_ADDRESS_LINE1':row['MAILING_ADDRESS1'],\n\t\t'MAIL_ADDRESS_LINE2':row['MAILING_SECONDARY_ADDRESS'],\n\t\t'MAIL_CITY':row['MAILING_CITY'],\n\t\t'MAIL_STATE':row['MAILING_STATE'],\n\t\t'MAIL_ZIP_CODE':row['MAILING_ZIP'],\n\t\t'MAIL_COUNTRY':row['MAILING_COUNTRY']\n\t\t})\n\t\t \n\t\t \nif len(sys.argv) != 2:\n\tprint('Usage: OhioPrepare ');\n\tquit()\n\t\ninputFile = sys.argv[1]\noutputFile = re.sub('\\\\..*$', '_OUT.csv',inputFile, count=1)\t\nerrorFileName = re.sub('\\\\..*$', '_ERR.csv',inputFile, count=1)\t\nwith open(inputFile) as csvfile, \\\n\topen(outputFile, 'w') as outfile, open(errorFileName, 'w') as errorFile:\n\t\treader = csv.DictReader(csvfile, dialect='excel')\n\t\tfieldnames= [\n\t\t\t'ADDRESS_NUMBER',\n\t\t\t'ADDRESS_NUMBER_PREFIX',\n\t\t\t'ADDRESS_NUMBER_SUFFIX',\n\t\t\t'BUILDING_NAME',\n\t\t\t'CORNER_OF',\n\t\t\t'INTERSECTION_SEPARATOR',\n\t\t\t'LANDMARK_NAME',\n\t\t\t'NOT_ADDRESS',\n\t\t\t'OCCUPANCY_TYPE',\n\t\t\t'OCCUPANCY_IDENTIFIER',\n\t\t\t'PLACE_NAME',\n\t\t\t'STATE_NAME',\n\t\t\t'STREET_NAME',\n\t\t\t'STREET_NAME_PRE_DIRECTIONAL',\n\t\t\t'STREET_NAME_PRE_MODIFIER',\n\t\t\t'STREET_NAME_PRE_TYPE',\n\t\t\t'STREET_NAME_POST_DIRECTIONAL',\n\t\t\t'STREET_NAME_POST_MODIFIER',\n\t\t\t'STREET_NAME_POST_TYPE',\n\t\t\t'SUBADDRESS_IDENTIFIER',\n\t\t\t'SUBADDRESS_TYPE',\n\t\t\t'USPS_BOX_GROUP_ID',\n\t\t\t'USPS_BOX_GROUP_TYPE',\n\t\t\t'USPS_BOX_ID',\n\t\t\t'USPS_BOX_TYPE',\n\t\t\t'ZIP_CODE',\n\t\t\t'STATE_VOTER_REF',\n\t\t\t'COUNTY_VOTER_REF',\n\t\t\t'TITLE',\n\t\t\t'FIRST_NAME',\n\t\t\t'MIDDLE_NAME',\n\t\t\t'LAST_NAME',\n\t\t\t'NAME_SUFFIX',\n\t\t\t'GENDER',\n\t\t\t'BIRTHDATE',\n\t\t\t'REGISTRATION_DATE',\n\t\t\t'REGISTRATION_STATUS',\n\t\t\t'ABSTENTEE_TYPE',\n\t\t\t'PARTY',\n\t\t\t'EMAIL',\n\t\t\t'PHONE',\n\t\t\t'DO_NOT_CALL_STATUS',\n\t\t\t'LANGUAGE_CHOICE',\n\t\t\t'MAIL_ADDRESS_LINE1',\n\t\t\t'MAIL_ADDRESS_LINE2',\n\t\t\t'MAIL_CITY',\n\t\t\t'MAIL_STATE',\n\t\t\t'MAIL_ZIP_CODE',\n\t\t\t'MAIL_COUNTRY']\n\t\twriter = csv.DictWriter(outfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\t\n\t\t\n\t\t# Create a file for writing addresses that don't parse\t\t\n\t\terrnames = list(fieldnames)\n\t\terrnames.extend(['PARSED_STRING',\"ORIGINAL_TEXT\"])\n\t\terrWriter = csv.DictWriter(errorFile, fieldnames=errnames)\n\t\terrWriter.writeheader()\t\n\n\t\ti = 0;\n\t\tfor row in reader:\n\t\t\n\t\t\taddr = ' '.join([row['RESIDENTIAL_ADDRESS1'], row['RESIDENTIAL_SECONDARY_ADDR']])\t\t\t\n\t\t\t\t \n\t\t\toutrow = {\n\t\t\t\t\t'STATE_VOTER_REF':row['SOS_VOTERID'],\n\t\t\t\t\t'COUNTY_VOTER_REF':row['COUNTY_ID'],\n\t\t\t\t\t'FIRST_NAME':row['FIRST_NAME'],\n\t\t\t\t\t'MIDDLE_NAME':row['MIDDLE_NAME'],\n\t\t\t\t\t'LAST_NAME':row['LAST_NAME'],\n\t\t\t\t\t'NAME_SUFFIX':row['SUFFIX'],\n\t\t\t\t\t'BIRTHDATE':row['DATE_OF_BIRTH'],\n\t\t\t\t\t'REGISTRATION_DATE':row['REGISTRATION_DATE'],\n\t\t\t\t\t'REGISTRATION_STATUS':row['VOTER_STATUS'],\n\t\t\t\t\t'PARTY':row['PARTY_AFFILIATION'],\n\t\t\t\t\t'PLACE_NAME':row['RESIDENTIAL_CITY'],\n\t\t\t\t\t'STATE_NAME':row['RESIDENTIAL_STATE'],\n\t\t\t\t\t'ZIP_CODE':row['RESIDENTIAL_ZIP']\t\t\t\t\t\t\t\t\t\n\t\t\t}\n\t\t\t\n\t\t\tappendMailingAddress(outrow, row)\n\t\t\t\t\t\n\t\t\ttry:\n\t\t\t\ttagged_address, address_type = usaddress.tag(addr)\t\t\n\n\t\t\t\t# Transfer values to defaultdict so we can get nulls for missing fields\n\t\t\t\taddressValues = collections.defaultdict(str)\n\t\t\t\tfor f in tagged_address:\n\t\t\t\t\taddressValues[f] = tagged_address[f]\n\t\t\t\t\n\t\t\t\tappendParsedFields(outrow, addressValues)\n\t\t\t\twriter.writerow(outrow)\t\t\t\t\n\n\t\t\t### Get's thrown when the US Address parser gets hopelesly confused. Write this out to the errorFile\n\t\t\t### for manual training later\n\t\t\texcept usaddress.RepeatedLabelError as e :\n\t\t\t\tprint(\"\\n\\n--->Error\\n\",e.parsed_string, e.original_string)\n\t\t\t\toutrow.update({\n\t\t\t\t\t'PARSED_STRING':e.parsed_string,\n\t\t\t\t\t'ORIGINAL_TEXT':e.original_string\n\t\t\t\t})\n\t\t\t\t\n\t\t\t\terrWriter.writerow(outrow)\t\t\t\t\t\n\t\t\t\t\n\t\t\t# if i > 1000:\n\t\t\t\t# break\n\t\t\t# i += 1\n\t\t\t\n\n","sub_path":"src/main/python/OhioPrepare.py","file_name":"OhioPrepare.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"523906468","text":"from vidispine.utils import create_matrix_params_string\n\n\nclass EntityBase:\n\n entity = ''\n\n def __init__(self, client) -> None:\n self.client = client\n\n def _build_url(\n self,\n endpoint: str = '',\n matrix_params: dict = None\n ) -> str:\n\n if not self.entity:\n raise NotImplementedError('Do not use Base class directly.')\n\n if matrix_params:\n matrix_string = create_matrix_params_string(matrix_params)\n else:\n matrix_string = ''\n\n if endpoint:\n return f'{self.entity}/{endpoint}{matrix_string}'\n else:\n return f'{self.entity}{matrix_string}'\n","sub_path":"vidispine/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"256887148","text":"import re\nimport pandas as pd\nfrom os import path\nfrom common.constants import *\nfrom datetime import datetime\nfrom app.utils import to_snake\nfrom transaction_data_tools.data_setup_utils import update_raw_transactions_file\n\n\nCHASE_SCHEMA = [\n \"Details\",\n \"Posting Date\",\n \"Description\",\n \"Amount\",\n \"Type\",\n \"Balance\",\n \"Check or Slip #\"\n]\n\n\ndef get_filename(downloads) -> str:\n \"\"\"identify chase transaction files\"\"\"\n\n chase_regex = r\"chase7973_activity_\\d+.csv\"\n\n chase_downloads = []\n for d in downloads:\n match = re.match(chase_regex, d.lower())\n if match:\n chase_downloads.append(match.group())\n\n dates = []\n for c in chase_downloads:\n date_string = c.replace(\".csv\", \"\").split(\"_\")[-1]\n dates.append(datetime.strptime(date_string, '%Y%m%d'))\n\n if len(dates) > 0:\n max_date = max(dates)\n max_index = dates.index(max_date)\n\n return chase_downloads[max_index]\n\n\ndef update(downloads) -> str:\n logs = \"\\n\\nUpdating chase records\".upper()\n chase_filename = get_filename(downloads)\n if chase_filename:\n logs += update_raw_transactions_file(\n PATH_TO_CHASE, PATH_TO_DOWNLOADS + chase_filename, CHASE_SCHEMA\n )\n return logs\n else:\n return \"\\n\\nNo chase downloads found\".upper()\n\n\ndef load_trans() -> pd.DataFrame:\n \"\"\"Load chase transactions\"\"\"\n assert path.exists(PATH_TO_CHASE)\n\n df = pd.read_csv(PATH_TO_CHASE)\n df.columns = [to_snake(col) for col in df.columns]\n\n df[\"date\"] = df[\"posting_date\"].apply(lambda x: pd.to_datetime(x).date())\n df[\"original_description\"] = df[\"description\"]\n df[\"amount\"] = df[\"amount\"].apply(abs)\n df[\"transaction_type\"] = df[\"details\"].apply(lambda x: x.lower())\n df[\"account_name\"] = \"Chase College\"\n\n return df[RAW_TRANSACT_SCHEMA + [\"type\"]]\n\n","sub_path":"src/spending-tracker/transaction_data_tools/plugins/chase_plugin.py","file_name":"chase_plugin.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"643640458","text":"\"\"\"\nGiven an array nums containing n distinct numbers in the range [0, n], return the only number in the range that is missing from the array.\nFollow up: Could you implement a solution using only O(1) extra space complexity and O(n) runtime complexity?\n\nExample 1:\n\nInput: nums = [3,0,1]\nOutput: 2\nExplanation: n = 3 since there are 3 numbers, so all numbers are in the range [0,3]. 2 is the missing number in the range since it does not appear in nums.\n\nExample 2:\n\nInput: nums = [0,1]\nOutput: 2\nExplanation: n = 2 since there are 2 numbers, so all numbers are in the range [0,2]. 2 is the missing number in the range since it does not appear in nums.\n\nExample 3:\n\nInput: nums = [9,6,4,2,3,5,7,0,1]\nOutput: 8\nExplanation: n = 9 since there are 9 numbers, so all numbers are in the range [0,9]. 8 is the missing number in the range since it does not appear in nums.\n\nExample 4:\n\nInput: nums = [0]\nOutput: 1\nExplanation: n = 1 since there is 1 number, so all numbers are in the range [0,1]. 1 is the missing number in the range since it does not appear in nums.\n\n\"\"\"\n\n__author__ = 'roeiherz'\n\n\ndef missingNumber(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n sum = 0\n count = len(nums)\n for n in nums:\n sum += n\n\n total = count * (count + 1) // 2\n print(total - sum)\n\n\nif __name__ == '__main__':\n nums = [9, 6, 4, 2, 3, 5, 7, 0, 1]\n missingNumber(nums)\n","sub_path":"Amazon/MissingNumber.py","file_name":"MissingNumber.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"384145223","text":"import coreir\nimport peak\nfrom hwtypes import BitVector\n\n#Represents a single rewrite rule that can be applied to a flattened CoreIR graph\nclass RewriteRule:\n def __call__(self,c,app):\n raise NotImplementedError\n\n#prim_instr can either be a lambda or an ISABuilder instruction\nclass Peak1to1(RewriteRule):\n def __init__(self,coreir_prim : coreir.module.Module, peak_prim : coreir.module.Module, prim_instr : peak.ISABuilder, io_mapping):\n self.instr_map = {}\n self.coreir_prim = coreir_prim\n if isinstance(prim_instr,peak.ISABuilder):\n self.instr_lambda = lambda _ : prim_instr\n else:\n self.instr_lambda = prim_instr\n #Actually construct the coreir definition\n coreir_def = coreir_prim.new_definition()\n c = coreir_prim.context\n\n peak_inst = coreir_def.add_module_instance(name=\"inst\",module=peak_prim)\n for coreir_port,peak_port in io_mapping.items():\n pio = peak_inst.select(peak_port)\n if coreir_port == \"0\":\n coreir.connect_const(pio,0)\n else:\n cio = coreir_def.interface.select(coreir_port)\n coreir_def.connect(pio,cio)\n self.coredef = coreir_def\n\n #returns whether any change occured\n def __call__(self,app : coreir.module.Module):\n c = app.context\n mdef = app.definition\n assert mdef\n to_inline = [inst for inst in mdef.instances if inst.module==self.coreir_prim ]\n if len(to_inline)==0:\n return False\n self.coreir_prim.definition = self.coredef\n for inst in to_inline:\n instr = self.instr_lambda(inst)\n inst_name = inst.name+\"$inst\"\n coreir.inline_instance(inst)\n inlined_inst = mdef.get_instance(inst_name)\n inlined_inst.add_metadata(\"instr_debug\",f\"\\\"{str(instr)}\\\"\")\n self.instr_map[inst_name] = instr\n return len(to_inline)>0\n\nclass PeakIO(RewriteRule):\n #Interpreting is_input as an input to the fabric which indicates the io_port_name is an output\n def __init__(self, width, is_input, io_prim : coreir.module.Module):\n io_port_name = None\n for port_name, port_type in io_prim.type.items():\n if port_type.is_output() and is_input:\n io_port_name = port_name\n elif port_type.is_input() and not is_input:\n io_port_name = port_name\n assert io_port_name is not None\n\n assert io_port_name in dict(io_prim.type.items())\n assert io_prim.type[io_port_name].is_input() == (not is_input)\n assert io_prim.type[io_port_name].is_output() == is_input\n self.io_prim = io_prim\n self.is_input = is_input\n self.io_port_name = io_port_name\n self.width = width\n\n def __call__(self,app : coreir.module.Module):\n c = app.context\n mdef = app.definition\n io = mdef.interface\n modified = False\n for port_name, port_type in app.type.items():\n if port_type.size != self.width:\n continue\n if port_type.is_input() != self.is_input:\n continue\n modified = True\n #This is a valid port\n pt = mdef.add_passthrough(io.select(port_name))\n \n io_inst = mdef.add_module_instance(name=f\"io_{port_name}\",module=self.io_prim)\n mdef.connect(pt.select(\"in\"),io_inst.select(self.io_port_name))\n mdef.disconnect(pt.select(\"in\"),io.select(port_name))\n coreir.inline_instance(pt)\n return modified\n","sub_path":"metamapper/rewrite_rule.py","file_name":"rewrite_rule.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"485511056","text":"\n## import modules\nfrom tm import managerClass as _managerClass\n\n## Get test manager\n_manager = _managerClass.ManagerClass()\n\n## Get manager's methods\nlocals()['StartTestFile'] = _manager.StartTestFile\nlocals()['GetRunningTestFiles'] = _manager.GetRunningTestFiles\nlocals()['WaitTestFile'] = _manager.WaitTestFile\nlocals()['Help'] = _manager.Help\nlocals()['GetLastResult'] = _manager.GetLastResult\n","sub_path":"legacy/TM3.1.0.454/common/OpTest_python26/testManager.py","file_name":"testManager.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"122738749","text":"import argparse\nimport telnetlib\nimport csv\nimport signal\nimport time\n\ndef interrupted(signum, fram):\n\tpass\n\nsignal.signal(signal.SIGALRM, interrupted)\n\nclass Scanner:\n\n\tdef __init__(self, hostname, port, waitTime=5, signalStrength=-20):\n\t\tself.host = hostname\n\t\tself.port = port\n\t\tself.waitTime = waitTime\n\t\tself.signalStrength = signalStrength\n\n\tdef _update(self, msg):\n\t\t\"\"\"\n\t\tupdate the frequency/mode GQRX is listening to\n\t\t\"\"\"\n\t\ttry:\n\t\t\ttn = telnetlib.Telnet(self.host, self.port)\n\t\texcept Exception as e:\n\t\t\tprint(\"Error connecting to \" + self.host + \":\" + str(self.port) + \"\\n\\t\" + str(e))\n\t\t\texit()\n\t\ttn.write(('%s\\n' % msg).encode('ascii'))\n\t\tresponse = tn.read_some().decode('ascii').strip()\n\t\ttn.write('q\\n'.encode('ascii'))\n\t\treturn response\n\n\tdef scan(self):\n\t\t\"\"\"\n\t\tloop over the frequencies in the list,\n\t\tand stop if the frequency is active (signal strength is high enough)\n\t\t\"\"\"\n\t\twhile(1):\n\t\t\tfor freq in self.freqs.keys():\n\t\t\t\tout = [self.freqs[freq]['tag'], freq]\n\t\t\t\tout.append(self._set_freq(freq))\n\t\t\t\tout.append(self._set_mode(self.freqs[freq]['mode']))\n\t\t\t\tout.append(self._set_squelch(self.signalStrength))\n\t\t\t\tprint('\\t'.join([str(x) for x in out]))\n\t\t\t\ttime.sleep(1)\n\t\t\t\tif float(self._get_level()) >= self.signalStrength:\n\t\t\t\t\ttimenow = str(time.localtime().tm_hour) + ':' + str(time.localtime().tm_min)\n\t\t\t\t\tprint('SIGNAL!', timenow, freq, self.freqs[freq]['tag'])\n\t\t\t\t\twhile float(self._get_level()) >= self.signalStrength:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsignal.alarm(self.waitTime)\n\t\t\t\t\t\t\tkey = raw_input()\n\t\t\t\t\t\t\tif key == '':\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\n\n\tdef scan_range(self, minfreq, maxfreq, mode, step=500, save = None):\n\t\t\"\"\"\n\t\tScan a range of frequencies\n\n\t\t:param minfreq: lower frequency\n\t\t:param maxfreq: upper frequency\n\t\t:param mode: mode to scan in\n\t\t:param save: (optional) a txt file to save the active frequencies to\n\t\t:return: none\n\n\t\t\"\"\"\n\t\tminfreq = str(float(minfreq) * 1e5)\n\t\tminfreq = int(minfreq.replace('.', ''))\n\n\t\tmaxfreq = str(float(maxfreq) * 1e5)\n\t\tmaxfreq = int(maxfreq.replace('.', ''))\n\n\t\tif save is not None:\n\t\t\tpass\n\n\t\telse:\n\t\t\tfreq = minfreq\n\t\t\twhile(1):\n\t\t\t\tif freq <= maxfreq:\n\n\t\t\t\t\tself._set_freq(freq)\n\t\t\t\t\tself._set_mode(mode)\n\t\t\t\t\tself._set_squelch(self.signalStrength)\n\t\t\t\t\ttime.sleep(0.5)\n\t\t\t\t\tif float(self._get_level()) >= self.signalStrength:\n\t\t\t\t\t\ttimenow = str(time.localtime().tm_hour) + ':' + str(time.localtime().tm_min)\n\t\t\t\t\t\tprint(timenow, freq)\n\t\t\t\t\t\tprint(\"Press enter to continue scanning\")\n\t\t\t\t\t\twhile float(self._get_level()) >= self.signalStrength:\n\t\t\t\t\t\t\tkey = raw_input()\n\t\t\t\t\t\t\tif key == '':\n\t\t\t\t\t\t\t\tfreq = freq + step\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tfreq = freq + step\n\t\t\t\telse:\n\t\t\t\t\tfreq = minfreq\n\n\n\t\tpass\n\n\tdef load(self, freq_csv, delimiter=','):\n\t\t\"\"\"\n\t\tread the csv file with the frequencies & modes\n\t\tin it into a dict{} where keys are the freq and\n\t\tthe value is a dict with the mode and a tag\n\t\t\"\"\"\n\t\tself.freqs = {}\n\t\twith open(freq_csv, 'r') as csvfile:\n\t\t\treader = csv.reader(csvfile, delimiter=delimiter)\n\t\t\tfor row in reader:\n\t\t\t\tif not row:\n\t\t\t\t\tcontinue\n\t\t\t\tfreq = str(float(row[0])*1e5)\t \t\t\t\t\t# 1e5 isn't good\n\t\t\t\tfreq = int(freq.replace('.', '')) \t\t\t\t\t# converted to hz\n\t\t\t\tprint(row)\n\t\t\t\tif len(row) == 2:\n\t\t\t\t\tself.freqs[freq] = {'mode': row[1], 'tag': None}\n\t\t\t\telif len(row) > 2:\n\t\t\t\t\tself.freqs[freq] = {'mode' : row[1], 'tag': ', '.join(row[2:])}\t\t# add the freq to the dict as a key and the mode as the value\n\n\tdef _set_freq(self, freq):\n\t\treturn self._update(\"F %s\" % freq)\n\n\tdef _set_mode(self, mode):\n\t\treturn self._update(\"M %s\" % mode)\n\n\tdef _set_squelch(self, sql):\n\t\treturn self._update(\"L SQL %s\" % sql)\n\n\tdef _get_level(self):\n\t\treturn self._update(\"l\")\n\n\tdef _get_mode(self):\n\t\treturn self._update('m')\n\ndef parse_args():\n\tap = argparse.ArgumentParser()\n\tap.add_argument('-c', '--csv', help='CSV file to parse', default='freq.csv')\n\tap.add_argument('-d', '--delimiter', help='CSV Delimiter', default=',')\n\tap.add_argument('-i', '--hostname', help='IP or hostname for gqrx', default='127.0.0.1')\n\tap.add_argument('-p', '--port', help='Port for gqrx', default=7356)\n\treturn ap.parse_args()\n\nif __name__ == \"__main__\":\n\targs = parse_args()\n\tscanner = Scanner(\n\t\thostname=args.hostname,\n\t\tport=args.port\n\t)\n\tscanner.load(args.csv, args.delimiter)\n\tif not len(scanner.freqs) > 1:\n\t\traise RuntimeError('No frequencies found in {}'.format(args.csv))\n\tscanner.scan()\n","sub_path":"gqrx_scan.py","file_name":"gqrx_scan.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"606142472","text":"import torch\nimport torch.nn as nn\n\n\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n\n self.feature = nn.Sequential(\n nn.Conv2d(3, 96, 11, 4, 2),\n # output h = (h + 2 * padding - kernel_size) / stride + 1\n nn.ReLU(inplace=True),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(96, 256, 5, 2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(256, 384, 3, 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 384, 3, 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, 3, 2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(3, 2)\n )\n # get a output size you wanted.\n # If the input is N * C * H * W. And you parameter is (6, 6)\n # output size is N * C * 6 * 6, to keep different input sizes get the same output sizes.\n self.avg_pool = nn.AdaptiveAvgPool2d((6, 6))\n self.classifier = nn.Sequential(\n nn.Dropout(p=0.5),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, 10),\n )\n\n def forward(self, x):\n x = self.feature(x)\n x = self.avg_pool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n","sub_path":"AlexNet/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"278550933","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/simpleweb/admin/plugins/run.py\n# Compiled at: 2007-01-10 11:07:04\nimport sys, simpleweb\n\ndef run(name, args):\n \"\"\"Usage: simpleweb-admin run \n\nStart the simpleweb application in the current directory, \nin the internal development web server\n \"\"\"\n if len(args) > 0:\n simpleweb.utils.msg_err(\"'%s' takes no arguments\" % name)\n sys.exit(0)\n simpleweb.run()","sub_path":"pycfiles/simpleweb-0.7.3-py2.4/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"491867064","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport time\nimport netsvc\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\n\nclass mrp_partially_close(osv.osv_memory):\n _name = \"mrp.partially.close\"\n\n def default_get(self, cr, uid, fields, context):\n \"\"\" \n To get default values for the object.\n \"\"\"\n prod_obj = self.pool.get('mrp.production')\n production_id = context and context.get('active_id', False) or False\n res = super(mrp_partially_close, self).default_get(cr, uid, fields, context=context)\n assert production_id, \"Production Id should be specified in context as a Active ID.\"\n prod = prod_obj.browse(cr, uid, production_id, context=context)\n scrapped_qty = 0.0\n for wo in prod.workcenter_lines:\n for mrej in wo.moves_rejection:\n scrapped_qty += mrej.s_rejected_qty or 0.0\n\n already_produced_qty = prod.already_produced_qty\n if 'qty' in fields:\n res.update({'qty': prod.product_qty - (already_produced_qty + scrapped_qty)})\n if 'total_qty' in fields:\n res.update({'total_qty': prod.product_qty})\n if 'already_produced_qty' in fields:\n res.update({'already_produced_qty': already_produced_qty})\n if 'remain_qty' in fields:\n res.update({'remain_qty': prod.product_qty - (already_produced_qty + scrapped_qty)})\n if 'scraped_qty' in fields:\n res.update({'scraped_qty': scrapped_qty})\n return res\n\n _columns = {\n 'qty': fields.float('Produce Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),\n 'total_qty': fields.float('Total Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),\n 'scraped_qty': fields.float('Scrap Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),\n 'already_produced_qty': fields.float('Already Produced Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),\n 'remain_qty': fields.float('Remain Produce Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),\n }\n\n# def onchange_qty(self, cr, uid, ids, scraped_qty, remain_qty, qty, context=None):\n# \"\"\"\n# Process\n# - To update scraped quantity.\n# \"\"\"\n# return {'value':{'scraped_qty': remain_qty - qty}}\n\n def _prepare_order_line_move(self, cr, uid, production, picking_id, scrap_qty, context=None):\n \"\"\"\n -Process\n -create scrap move from stock\n Source Location : Store\n Destination Location: Scrap\n \"\"\"\n location_obj = self.pool.get('stock.location')\n scrap_location_ids = location_obj.search(cr, uid, [('scrap_location', '=', True)], context=context)\n if not scrap_location_ids:\n raise osv.except_osv(_('Scrap Location not found!'), _('Atleast define one location for scrap.'))\n return {\n 'name': production.name,\n 'picking_id': picking_id,\n 'product_id': production.product_id.id,\n 'date': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'date_expected': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'product_qty': scrap_qty,\n 'product_uom': production.product_uom.id,\n 'product_uos_qty': scrap_qty,\n 'product_uos': production.product_uom.id,\n 'location_id': production.location_dest_id.id,\n 'location_dest_id': scrap_location_ids[0],\n 'tracking_id': False,\n 'state': 'draft',\n 'company_id': production.company_id.id,\n 'price_unit': production.product_id.standard_price or 0.0\n }\n\n def _prepare_order_picking(self, cr, uid, production, scrap_qty, context=None):\n \"\"\"\n -Process\n -create Picking for scrap move\n \"\"\"\n pick_name = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking')\n return {\n 'name': pick_name,\n 'origin': production.name,\n 'date': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'type': 'internal',\n 'state': 'draft',\n 'move_type': 'one',\n 'note': 'Scrap Order:-' + production.product_id.name + ':' + str(scrap_qty) + ':' + production.product_uom.name,\n 'invoice_state': 'none',\n 'company_id': production.company_id.id,\n }\n\n def do_produce(self, cr, uid, ids, context=None):\n \"\"\"\n Process\n -Pass remain production qty to action_produce method with consume_produce mode.\n -generate scrap order\n -attached scrap order to production order\n -attached scrap quantity to production order\n \"\"\"\n prod_obj = self.pool.get('mrp.production')\n pick_obj = self.pool.get('stock.picking')\n move_obj = self.pool.get('stock.move')\n wf_service = netsvc.LocalService(\"workflow\")\n production_id = context.get('active_id', False)\n assert production_id, \"Production Id should be specified in context as a Active ID.\"\n wizard = self.browse(cr, uid, ids[0], context=context)\n\n prod = prod_obj.browse(cr, uid, production_id, context=context)\n remain_qty = prod.product_qty - (prod.already_produced_qty + prod.scraped_qty)\n\n partially_qty = wizard.qty\n #cannot zero ;)\n if partially_qty < 0.0:\n raise osv.except_osv(_('Warning!'), _('Provide proper value of partially qty(%s)' % (partially_qty)))\n if partially_qty > remain_qty:\n raise osv.except_osv(_('Over Limit Quantity!'), _('Wizard partially quantity() is greater then remaining quantity(%s)' % (partially_qty, remain_qty)))\n\n self.pool.get('mrp.production').action_produce(cr, uid, production_id, remain_qty, 'consume_produce', context=context)\n\n scrap_qty = remain_qty - partially_qty\n #Scrap order will be generate only if having scrap quantity.\n if scrap_qty > 0:\n picking_id = pick_obj.create(cr, uid, self._prepare_order_picking(cr, uid, prod, scrap_qty, context=context), context=context)\n move_obj.create(cr, uid, self._prepare_order_line_move(cr, uid, prod, picking_id, scrap_qty, context=context), context=context)\n prod.write({'scrap_order_id':picking_id, 'scraped_qty': scrap_qty}) \n\n\n #Picking Directly Done\n wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)\n pick_obj.action_move(cr, uid, [picking_id], context)\n wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_done', cr)\n\n return {}\n\nmrp_partially_close()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"l10n_in_mrp_subcontract/wizard/mrp_partially_close.py","file_name":"mrp_partially_close.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"360465685","text":"import pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport h5_storage\nimport myplotstyle as ms\n\nplt.close('all')\n\nsnapshot_file = '/sf/data/measurements/2021/05/19/20210519_121718_SARBD02-DSCR050_camera_snapshot.h5'\n\nimage_dict = h5_storage.loadH5Recursive(snapshot_file)\n\nx_axis = image_dict['camera1']['x_axis']\ny_axis = image_dict['camera1']['y_axis']\nimage = image_dict['camera1']['image']\n\nwith open('./bytes.pkl', 'rb') as f:\n bytes = pickle.load(f)\narr0 = np.frombuffer(bytes, dtype=np.uint16)\narr = arr0.reshape([2160, 2560])\n\n\nms.figure('Background')\nsp = ms.subplot_factory(1,1, False)(1)\n\nsp.imshow(arr, aspect='auto')\n\nms.figure('Saved')\nsp = ms.subplot_factory(1,1, False)(1)\n\nsp.imshow(image, aspect='auto')\n\nplt.show()\n\n","sub_path":"053a_plot_bad_bg.py","file_name":"053a_plot_bad_bg.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"245602573","text":"#!/usr/bin/env python\n\"\"\"cdpchaostoolkit builder and installer\"\"\"\n\nimport sys\nimport io\nfrom os.path import abspath, dirname, join, normpath\n\nimport setuptools\n\n\n\nname = 'cdpchaostoolkit'\ndesc = 'CDP Chaos Engineering Toolkit'\n\nclassifiers = [\n\n\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation',\n 'Programming Language :: Python :: Implementation :: CPython',\n\n]\nauthor = 'vzt-cdp'\nauthor_email = 'suchetha.panduranga@verizonconnect.com'\nlicense = 'Apache Software License 2.0'\npackages = [\n 'cdpchaostoolkit'\n]\n\ninstall_require = [\n 'click>=7.0',\n 'click-plugins>=1.0.4',\n 'logzero>=1.5.0',\n 'chaostoolkit-lib>=1.6.0',\n 'requests>=2.21',\n 'python-json-logger>=0.1.11',\n 'vztcdpchaos-report',\n 'vztcdpchaos-slack'\n\n]\n\nsetup_params = dict(\n name='cdpchaostoolkit',\n version='1.4.1',\n description=\"CDP Chaostoolkit - Modified\",\n\n\n classifiers=classifiers,\n author=author,\n author_email=author_email,\n\n license=license,\n packages=packages,\n entry_points={'console_scripts': ['chaos = cdpchaostoolkit.__main__:cli']},\n include_package_data=True,\n install_requires=install_require,\n\n\n python_requires='>=3.5.*'\n)\n\n\ndef main():\n \"\"\"Package installation entry point.\"\"\"\n setuptools.setup(**setup_params)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pypi_install_script/cdpchaostoolkit-1.4.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"565906069","text":"import pygame, sys\nimport random\nfrom pygame.locals import *\n\npygame.init() # 화면 초기화하는 부분\n\nFPS = 30\nfpsClock = pygame.time.Clock()\n\nDISPLAYNOTE = pygame.display.set_mode((350, 600))\npygame.display.set_caption(\"NotePlay\")\n\ngame_playing = False\n\n#MAIN GAME CLASS\nclass PlayGame(object):\n\n def __init__(self, hero, enemies, over_img, heart_img):\n\n self.score = 0\n\n self.hero = hero\n self.hero_move = [0, 0]\n\n self.enemies = enemies\n\n self.over_img = over_img\n self.heart_img = heart_img\n\n self.game_over = False\n\n # 점수 출력해주는 함수\n def write_score(self, note, x, y):\n scoreFont = pygame.font.Font('freesansbold.ttf', 15)\n txt = scoreFont.render(\"SCORE:\" + str(self.score), True, (0, 0, 0))\n note.blit(txt, (x, y))\n\n # 적 비행기들 그려주는 함수\n def draw_enemies(self, note):\n for p in self.enemies:\n note.blit(p.img, p.pos)\n p.move_down()\n\n if p.pos[1] > note.get_size()[1]:\n p.destroyed()\n\n # 주인공 이동 및 그리기 함수\n def draw_hero(self, note):\n if self.hero_move[0] > 0: self.hero.move_right()\n if self.hero_move[0] < 0: self.hero.move_left()\n if self.hero_move[1] > 0: self.hero.move_down()\n if self.hero_move[1] < 0: self.hero.move_up()\n note.blit(self.hero.img, self.hero.pos)\n\n # 미사일 이동 및 그리기\n def draw_missiles(self, note, color, size):\n self.hero.move_missile()\n for m in self.hero.missile:\n pygame.draw.circle(note, color, m, size, 0)\n\n # 화면 밖으로 나간 미사일 지우기\n self.hero.del_missile()\n\n # 적, 미사일 충돌 처리\n def collision_missile(self, gain):\n for m in self.hero.missile:\n for e in self.enemies:\n if m[0] > e.pos[0] \\\n and m[0] < e.pos[0] + e.size[0] \\\n and m[1] > e.pos[1] \\\n and m[1] < e.pos[1] + e.size[1]:\n # 적 비행기 초기화\n e.destroyed()\n # 충돌된 미사일 제거\n m[1] = -10\n # 점수 증가\n self.score += gain\n\n # 적, 주인공 충돌 처리\n def collision_hero(self):\n for e in self.enemies:\n if self.hero.pos[0] < e.pos[0] + e.size[0] \\\n and self.hero.pos[0] + self.hero.size[0] > e.pos[0] \\\n and self.hero.pos[1] < e.pos[1] + e.size[1] \\\n and self.hero.pos[1] + self.hero.size[1] > e.pos[1]:\n # 적 비행기 초기화\n e.destroyed()\n # heart 감소\n self.hero.destroyed()\n\n # Heart 그리기\n def draw_heart(self, note):\n for i in range(self.hero.heart):\n note.blit(self.heart_img, (10 + i * 25, 10))\n\n # game over check\n def check_game_over(self, note):\n if self.hero.heart < 1:\n self.game_over = True\n note.fill((210, 210, 200))\n note.blit(self.over_img, (2, 200))\n\n\n# 비행기 클래스\nclass Plane(object):\n\n def __init__(self, pos, speed, img):\n self.pos = pos\n self.speed = speed\n self.img = img\n self.size = self.img.get_rect().size\n\n def move_right(self):\n self.pos[0] += self.speed\n\n def move_left(self):\n self.pos[0] -= self.speed\n\n def move_up(self):\n self.pos[1] -= self.speed\n\n def move_down(self):\n self.pos[1] += self.speed\n\n def destroyed(self):\n self.pos[0] = random.randrange(0, 300)\n self.pos[1] = 0\n self.speed = random.randrange(1, 5)\n\n# 주인공 클래스\nclass Hero(Plane):\n\n\n def __init__(self, pos, speed, m_speed, heart, img):\n\n self.pos = pos\n self.speed = speed\n self.missile = []\n self.missile_speed = m_speed\n self.img = img\n self.size = self.img.get_rect().size\n self.heart = heart\n\n def create_missile(self):\n self.missile.append([int(self.pos[0] + self.size[0]/2), int(self.pos[1])])\n\n def move_missile(self):\n for m in self.missile:\n m[1] -= self.missile_speed\n\n def del_missile(self):\n temp = []\n for m in self.missile:\n if m[1] > 0:\n temp.append(m)\n self.missile = temp\n\n def destroyed(self):\n self.heart -= 1\n\n\n\n# 게임 MAIN PART\nwhile True:\n\n if not game_playing:\n ###############\n # GAME START! #\n ###############\n # 주인공 생성\n hero = Hero([150, 500], 5, 5, 5, pygame.image.load(\"hero.png\"))\n # 적 비행기 생성\n enemies = []\n enemies.append(Plane([150, 0], 1, pygame.image.load(\"plane.png\")))\n enemies.append(Plane([100, 0], 2, pygame.image.load(\"plane.png\")))\n enemies.append(Plane([200, 0], 5, pygame.image.load(\"plane.png\")))\n\n game = PlayGame(hero, enemies, pygame.image.load(\"gameover.png\"), pygame.image.load(\"heart.png\"))\n game_playing = True\n\n # 배경 색 채우기\n DISPLAYNOTE.fill((210, 210, 200))\n\n # 적 비행기 그리기\n game.draw_enemies(DISPLAYNOTE)\n\n # 주인공 이동 및 그리기\n game.draw_hero(DISPLAYNOTE)\n\n # 미사일 이동 및 그리기\n game.draw_missiles(DISPLAYNOTE, (0, 0, 0), 5)\n\n # 적, 미사일 충돌 처리\n game.collision_missile(5)\n\n # 적, 주인공 충돌 처리\n game.collision_hero()\n\n # Heart 그리기\n game.draw_heart(DISPLAYNOTE)\n\n # GAME OVER\n game.check_game_over(DISPLAYNOTE)\n\n\n # 점수 출력\n game.write_score(DISPLAYNOTE, 250, 15)\n\n # 컨트롤 처리\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n game.hero_move[0] = -1\n elif event.key == K_RIGHT:\n game.hero_move[0] = 1\n elif event.key == K_UP:\n game.hero_move[1] = -1\n elif event.key == K_DOWN:\n game.hero_move[1] = 1\n elif event.key == K_SPACE:\n if game.game_over:\n game_playing = False\n else:\n game.hero.create_missile()\n\n elif event.type == KEYUP:\n if event.key == K_LEFT:\n game.hero_move[0] = 0\n elif event.key == K_RIGHT:\n game.hero_move[0] = 0\n elif event.key == K_UP:\n game.hero_move[1] = 0\n elif event.key == K_DOWN:\n game.hero_move[1] = 0\n\n # 화면 update\n pygame.display.update()\n fpsClock.tick(FPS)\n","sub_path":"main_oop.py","file_name":"main_oop.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"566964156","text":"\ndef missing_number(x):\n if x == 0:\n return 2\n n = []\n for i in range(1, 99999999):\n if len(str(i**2)) <= x:\n continue\n number = int(str(i**2)[:-x])\n if not len(n):\n n.append(number)\n elif n[-1] == number:\n continue\n else:\n if number - n[-1] == 0 or number - n[-1] == 1:\n n.append(number)\n else:\n return n[-1] + 1\n return None\n\n\nif __name__ == '__main__':\n for i in range(13):\n print(missing_number(i))\n\n","sub_path":"missing_numbers/missing_numbers_kata.py","file_name":"missing_numbers_kata.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"248763419","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/HelloBikeLibrary2/request.py\n# Compiled at: 2020-03-13 04:56:27\n# Size of source mod 2**32: 4155 bytes\n__version__ = '1.0'\nfrom robot.api import logger\nfrom requests import sessions\nfrom HelloBikeLibrary2.data_conversion import soa_loads\nimport json\n\nclass Request(object):\n\n def request_client(self, url='https://fox-backend.hellobike.cn/gct/soarequest', method='post', **kwargs):\n \"\"\"\n 支持rpc请求与soap请求\n 支持加密http请求,请传参数 encode=True\n 返回内容为:\n 状态码,请求返回内容\n\n 例:\n |$(content) |request client | http://10.111.30.72:8099/api/accountbalance\n \"\"\"\n logger.info(url)\n logger.info(kwargs['data'])\n with sessions.Session() as (session):\n if 'encode' in kwargs:\n if kwargs['encode']:\n body = dict(header={}, body=(kwargs['data']))\n rep = session.request(url='https://fox-backend.hellobike.cn/fox/encode', method='post', json=body)\n header = rep.json()['data']['header']\n body = rep.json()['data']['encode']\n print(body)\n rep = session.request(url=url, method=method, json=body, headers=header)\n body = dict(header=dict(Chaos='true'), response=(rep.text))\n rep = session.request(url='https://fox-backend.hellobike.cn/fox/decode', method='post', json=body)\n return rep\n elif 'data' in kwargs:\n if 'iface' in kwargs['data']:\n data_struct = kwargs.pop('data')\n if 'request' in data_struct:\n for key, values in data_struct['request'].items():\n if isinstance(values, (list, dict)):\n values = json.dumps(values)\n data_struct['request'][key] = values\n\n data_struct['request'] = json.dumps(data_struct['request'])\n rep = session.request(url=url, method=method, json=data_struct)\n return (rep.status_code, soa_loads(rep.text))\n if 'headers' in kwargs:\n rep = session.request(url=url, method=method, json=(kwargs['data']), headers=headers)\n else:\n rep = session.request(url=url, method=method, json=(kwargs['data']))\n return (rep.status_code, soa_loads(rep.text))\n\n\nif __name__ == '__main__':\n data = {'env':'fat', \n 'iface':'com.hellobike.ride.api.iface.RideIface', \n 'method':'startRide', \n 'addr':'10.111.14.20:50010', \n 'request':{'arg0': {'startLat':31.1249201, \n 'orderGuid':15838439485081200101051, \n 'bikeNo':'2500500899', \n 'startChannel':4, \n 'startTime':1583843948913, \n 'userGuid':'c8f71e7c8bc049a8988cec062408a570', \n 'posType':0, \n 'startLng':121.3602946}}}\n headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}\n request = Request()\n url = 'https://fat-bike.hellobike.com/api'\n print(request.request_client(data=data))","sub_path":"pycfiles/HelloBikeLibrary2-1.0.0-py3.7/request.cpython-37.py","file_name":"request.cpython-37.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"451473743","text":"import os\nimport random\nimport string\nfrom flask import *\nimport sqlite3\n\napp = Flask(__name__)\nIDLength = 10\nhomeDir = \"/opt/simcraft.me\"\nJOB_STATE_QUEUED = 0\n\nstorage = {}\n\n@app.route('/favicon.ico') #icon\ndef favicon():\n\treturn send_from_directory(os.path.join(app.root_path, 'static'),\n\t\t\t\t\t\t 'favicon.ico',mimetype='image/vnd.microsoft.icon')\n@app.route('/') # index.html serve\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/failed') #failed.html serve\ndef page_not_found2():\n\treturn render_template('failed.html')\n\n@app.errorhandler(404) #404 error handler, serve custom 404 page\ndef page_not_found(e):\n\treturn render_template('404.html'), 404\n\t\n@app.errorhandler(401) #401 error handler, serve custom 401 page\ndef not_auth(e):\n\treturn render_template('401.html'), 401\n\n@app.route('/api/1/sim', methods = ['POST']) #API handler for SIM\ndef postJSONHandler():\n\tif(request.is_json):\n\t\treportID = generateID()\n\t\tresult = validateJSON(request.get_json())\n\t\taddJob(reportID, result[0],result[1],result[2],result[3],result[4],result[5],result[6], result[7])\n\t\treturn json.dumps({'ReportID': reportID})\n\telse:\n\t\treturn '404 No JSON'\n\n@app.route('/api/1/advanced', methods = ['POST']) #API handler for SIM (advanced, this is duplicate but still need returns and routes for Flask app)\ndef postJSONHandlerAdvanced():\n\tif(request.is_json):\n\t\treportID = generateID()\n\t\tresult = validateJSON(request.get_json())\n\t\taddJob(reportID, result[0],result[1],result[2],result[3],result[4],result[5],result[6], result[7])\n\t\treturn json.dumps({'ReportID': reportID})\n\telse:\n\t\treturn '404 No JSON'\n\ndef get_job_status(report_id): #query database for current status\n\tcur = db.cursor()\n\tprint(report_id)\n\tcur.execute(\"SELECT status FROM simc_jobs WHERE reportID=? LIMIT 1;\", (report_id,))\n\tstatus = cur.fetchone()\n\treturn status\n\n@app.route('/api/1/status', methods = ['POST']) #API Handler for STATUS (calls get_job_status(report_id))\ndef statusCheck():\n\tif(request.is_json):\n\t\tcontent = request.get_json()\n\t\tprint(content)\n\t\treportID = content['reportID']\n\t\tstatus = get_job_status(reportID)\n\t\tif status == 2:\n\t\t\treturn json.dumps({'Status':status,'URL':'http://simcraft.me/report/%s' % reportID})\n\t\treturn json.dumps({'Status':status})\n\n@app.route('/api/1/xml',methods = ['POST']) #API Handler for XML (looks for reportID.xml in directory, returns as datastream)\ndef fetchXML():\n\tif(request.is_json):\n\t\tcontent = request.get_json()\n\t\tif content['reportID']:\n\t\t\treturn send_from_directory('reports',content['reportID']+'.xml')\n\t\n@app.route('/report/') #Serve reportID if found, otherwise 404\ndef report(reportID):\n\treturn send_from_directory('reports', reportID+'.html')\n\n@app.route('/simple') # serve simple.html webpage\ndef simpleSim():\n\treturn render_template('simple.html')\n\n@app.route('/advanced') # serve advanced.html webpage\ndef advancedSim():\n\treturn render_template('advanced.html')\n\n@app.route('/sim', methods = ['POST']) #old method of posting data to SIM (doesnt work, is not called, including for evidence of design modifications :)\ndef sim():\n\tif request.method == 'POST':\n\t\treportID = generateID()\n\t\tresult = validateJSON(request.form)\n\t\taddJob(reportID, result[0],result[1],result[2],result[3],result[4],result[5],result[6],result[7])\n\t\treturn json.dumps({'ReportID': reportID})\n\t\t#return redirect(url_for('report', reportID=reportID))\n\telse:\n\t\treturn redirect(url_for('index'))\n\ndef validateJSON(content): # validate JSON and sanitize input\n\tregion = \"us\"\n\trealm = \"\"\n\tcharacter = \"\"\n\titerations = 4000\n\tfightLen = 300\n\tenemyCount = 1\n\tfightStyle = 0\n\tsimString = \"\"\n\n\ttry:\n\t\tregion = content['region']\n\texcept:\n\t\tpass\n\ttry:\n\t\trealm = content['realm']\n\texcept:\n\t\tpass\n\ttry:\n\t\tcharacter = content['character']\n\texcept:\n\t\tpass\n\ttry:\n\t\titerations = int(content['iterations'])\n\texcept:\n\t\tpass\n\ttry:\n\t\tfightLen = int(content['fightLen'])\n\texcept:\n\t\tpass\n\ttry:\n\t\tenemyCount = int(content['enemyCount'])\n\texcept:\n\t\tpass\n\ttry:\n\t\tfightStyle = int(content['fightStyle'])\n\texcept:\n\t\tpass\n\ttry:\n\t\tsimString = content['simString']\n\texcept:\n\t\tpass\n\n\tresult = [region, realm, character, iterations, fightLen, enemyCount, fightStyle, simString]\n\tprint(\"RESULTS:\",result)\n\treturn result\n\ndef generateID(): #generate a unique reportID, return it\n\treturn ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(20))\n\ndef init_db(): # init database if table doesnt exist\n\tcur = db.cursor()\n\tcur.execute('''\n\t\tCREATE TABLE IF NOT EXISTS simc_jobs \n\t\t(id INTEGER PRIMARY KEY AUTOINCREMENT,\n\t\treportID TEXT,\n\t\tstatus INTEGER DEFAULT 0,\n\t\tcharRegion TEXT,\n\t\tcharRealm TEXT,\n\t\tcharName TEXT,\n\t\tnumIterations INTEGER,\n\t\tfightLen INTEGER,\n\t\tenemyCount INTEGER,\n\t\tfightStyle INTEGER,\n\t\tsimString TEXT,\n\t\tCONSTRAINT reportID_unique UNIQUE (reportID));\n\t''')\n\tdb.commit()\n\ndef addJob(report_id, char_region, char_realm, char_name, num_iterations, fight_len, enemy_count, fight_style, sim_string):\n\tcur = db.cursor() #add Job to database when called\n\tcur.execute('''\n\t\tINSERT INTO simc_jobs \n\t\t(reportID,\n\t\tstatus,\n\t\tcharRegion,\n\t\tcharRealm,\n\t\tcharName,\n\t\tnumIterations,\n\t\tfightLen,\n\t\tenemyCount,\n\t\tfightStyle,\n\t\tsimString)\n\t\tVALUES \n\t\t(?,?,?,?,?,?,?,?,?,?);\n\t\t''',\n\t\t(report_id,\n\t\tJOB_STATE_QUEUED,\n\t\tchar_region, \n\t\tchar_realm, \n\t\tchar_name, \n\t\tnum_iterations, \n\t\tfight_len, \n\t\tenemy_count, \n\t\tfight_style,\n\t\tsim_string)\n\t)\n\tdb.commit()\n\n#command line params but inline, such as app.run()\ndb_filename = \"simcraftmeDB.db\"\ndb = sqlite3.connect(db_filename, check_same_thread=False)\ninit_db()\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\napp.run(host=\"0.0.0.0\", port=int(\"80\"), debug=False)\n\n","sub_path":"serverStuff/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"24388358","text":"import sublime\nimport sublime_plugin\nimport sys\nimport os\n\nif sublime.version().startswith('3'):\n from .core import *\nelse:\n from core import *\n\n\n# ----------------------------------------------------------\n# Setting\n# ----------------------------------------------------------\ndef get_package_settings():\n setting_name = 'Default.sublime-settings'\n settings = sublime.load_settings(setting_name)\n return settings\n\n\ndef get_settings_param(view, param_name, default=None):\n settings = get_package_settings()\n project_settings = view.settings()\n return project_settings.get(param_name, settings.get(param_name, default))\n\n\n# ----------------------------------------------------------\n# Command\n# ----------------------------------------------------------\nclass LuaFormatCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n # check whether the lua files\n suffix_setting = self.view.settings().get('syntax')\n file_suffix = suffix_setting.split('.')[0]\n if file_suffix[-3:].lower() != 'lua': return\n\n # get content of replacement\n region = sublime.Region(0, self.view.size())\n content = self.view.substr(region)\n\n # get cursor position before the replacement\n selection = self.view.sel()[0].b\n row, col = self.view.rowcol(selection)\n\n # load package settings\n settings = get_package_settings()\n tab_size = settings.get('tab_size', 4)\n separator_exclude = settings.get('separator_exclude', True)\n operator_exclude = settings.get('operator_exclude', True)\n bracket_exclude = settings.get('bracket_exclude', False)\n\n # replace the content after format\n\n formattedContent = lua_format(\n content,\n tab_size=tab_size,\n separator_exclude=separator_exclude,\n operator_exclude=operator_exclude,\n bracket_exclude=bracket_exclude)\n\n # jackysong: remove last \\n so the source file won't grow...\n self.view.replace(edit, region, formattedContent[0:-1])\n\n # deal cursor position\n selection = self.view.full_line(self.view.text_point(row - 1, 0)).b\n cursor_pos = sublime.Region(selection, selection)\n regions = self.view.sel()\n regions.clear()\n regions.add(cursor_pos)\n sublime.set_timeout_async(lambda: self.view.show_at_center(cursor_pos), 0)\n\n\nclass LuaFormatOnPreSave(sublime_plugin.EventListener):\n def on_pre_save(self, view):\n settings = get_package_settings()\n if settings.get('auto_format_on_save', False):\n view.run_command(\"lua_format\")\n","sub_path":"IDEs/sublime/shared-pkgs/Packages/LuaFormat/LuaFormat.py","file_name":"LuaFormat.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"233676487","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.template.defaultfilters import floatformat, truncatewords\nfrom django_rq import job\n\nfrom tunga.settings import TUNGA_URL, SLACK_ATTACHMENT_COLOR_TUNGA, SLACK_ATTACHMENT_COLOR_GREEN, \\\n SLACK_ATTACHMENT_COLOR_BLUE, SLACK_ATTACHMENT_COLOR_NEUTRAL, SLACK_ATTACHMENT_COLOR_RED, \\\n SLACK_STAFF_UPDATES_CHANNEL, SLACK_STAFF_INCOMING_WEBHOOK, SLACK_DEVELOPER_UPDATES_CHANNEL, \\\n SLACK_DEVELOPER_INCOMING_WEBHOOK, SLACK_PMS_UPDATES_CHANNEL, SLACK_STAFF_LEADS_CHANNEL, \\\n SLACK_STAFF_PROJECT_EXECUTION_CHANNEL, SLACK_STAFF_PAYMENTS_CHANNEL, SLACK_STAFF_MISSED_UPDATES_CHANNEL\nfrom tunga_tasks import slugs\nfrom tunga_tasks.models import Task, Participation, Application, ProgressEvent, ProgressReport, TaskInvoice\nfrom tunga_tasks.utils import get_task_integration\nfrom tunga_utils import slack_utils\nfrom tunga_utils.constants import TASK_SCOPE_TASK, TASK_SOURCE_NEW_USER, VISIBILITY_DEVELOPER, STATUS_ACCEPTED, \\\n APP_INTEGRATION_PROVIDER_SLACK, LEGACY_PROGRESS_EVENT_TYPE_PM, LEGACY_PROGRESS_EVENT_TYPE_CLIENT, PAYMENT_METHOD_BANK, \\\n LEGACY_PROGRESS_EVENT_TYPE_MILESTONE_INTERNAL, LEGACY_PROGRESS_EVENT_TYPE_CLIENT_MID_SPRINT\nfrom tunga_utils.helpers import clean_instance, convert_to_text\nfrom tunga_utils.slack_utils import get_user_im_id\n\n\ndef create_task_slack_msg(task, summary='', channel='#general', show_schedule=True, show_contacts=False,\n is_admin=False):\n task_url = '{}/work/{}/'.format(TUNGA_URL, task.id)\n\n attachments = [\n {\n slack_utils.KEY_TITLE: task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: task.excerpt or task.summary,\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n }\n ]\n extra_details = ''\n if task.type:\n extra_details += '*Type*: {}\\n'.format(task.get_type_display())\n if task.skills:\n extra_details += '*Skills*: {}\\n'.format(task.skills_list)\n if task.deadline:\n extra_details += '*Deadline*: {}\\n'.format(task.deadline.strftime(\"%d %b, %Y\"))\n if task.fee:\n amount = task.is_developer_ready and task.pay_dev or task.pay\n extra_details += '*Fee*: EUR {}\\n'.format(floatformat(amount, arg=-2))\n if show_schedule and task.schedule_call_start:\n extra_details += '*Available*: \\nDate: {}\\nTime: {} {} UTC\\n'.format(\n task.schedule_call_start.strftime(\"%d %b, %Y\"),\n task.schedule_call_start.strftime(\"%I:%M%p\"),\n task.schedule_call_end and ' - {}'.format(task.schedule_call_end.strftime(\"%I:%M%p\")) or ''\n )\n if show_contacts:\n extra_details += '*Email*: {}\\n'.format(task.user.email)\n if task.skype_id:\n extra_details += '*Skype ID or Call URL*: {}\\n'.format(task.skype_id)\n if extra_details:\n attachments.append({\n slack_utils.KEY_TEXT: extra_details,\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n if task.deliverables:\n attachments.append({\n slack_utils.KEY_TITLE: 'Deliverables',\n slack_utils.KEY_TEXT: task.deliverables,\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE\n })\n if task.stack_description:\n attachments.append({\n slack_utils.KEY_TITLE: 'Tech Stack',\n slack_utils.KEY_TEXT: task.stack_description,\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_NEUTRAL\n })\n if is_admin:\n developers = task.active_participants\n if developers:\n attachments.append({\n slack_utils.KEY_TITLE: 'Developer{}'.format(len(developers) > 1 and 's' or ''),\n slack_utils.KEY_TEXT: '\\n\\n'.join(\n [\n '*Name:* <{}|{}>\\n'\n '*Email:* {}'.format(\n '{}/people/{}'.format(TUNGA_URL, user.username),\n user.display_name.encode('utf-8'),\n user.email)\n\n for user in developers\n ]\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n if not summary:\n summary = \"New {} created by {} | <{}|View on Tunga>\".format(\n task.scope == TASK_SCOPE_TASK and 'task' or 'project',\n task.user.display_name.encode('utf-8'), task_url)\n\n return {\n slack_utils.KEY_TEXT: summary,\n slack_utils.KEY_CHANNEL: channel,\n slack_utils.KEY_ATTACHMENTS: attachments\n }\n\n\ndef create_task_stakeholders_attachment_slack(task, show_title=True):\n task_url = '{}/work/{}'.format(TUNGA_URL, task.id)\n owner = task.owner or task.user\n body_text = \"*Project Owner:*\\n\" \\\n \" {} {}\".format(owner.display_name.encode('utf-8'), owner.email)\n\n if task.pm:\n body_text += \"\\n*Project Manager:*\\n\" \\\n \"{} {} {}\".format(\n task.pm.display_name.encode('utf-8'),\n task.pm.email,\n task.pm.profile and task.pm.profile.phone_number and task.pm.profile.phone_number or ''\n )\n\n developers = task.active_participants\n if developers:\n body_text += \"\\n*Developer(s):*\\n\"\n body_text += '\\n'.join(\n '{}. {} {} {}'.format(\n idx + 1,\n dev.display_name.encode('utf-8'),\n dev.email,\n dev.profile and dev.profile.phone_number and dev.profile.phone_number or ''\n ) for idx, dev in enumerate(developers)\n )\n attachment = {\n slack_utils.KEY_TEXT: body_text,\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE\n }\n if show_title:\n attachment[slack_utils.KEY_TITLE] = task.summary\n attachment[slack_utils.KEY_TITLE_LINK] = task_url\n return attachment\n\n\n@job\ndef notify_new_task_admin_slack(instance, new_user=False, completed=False, call_scheduled=False):\n instance = clean_instance(instance, Task)\n task_url = '{}/work/{}/'.format(TUNGA_URL, instance.id)\n\n completed_phrase = ''\n if call_scheduled:\n completed_phrase = 'availability window shared'\n elif completed:\n completed_phrase = 'details completed'\n\n summary = \"{} {} {} by {}{} | <{}|View on Tunga>\".format(\n (completed or call_scheduled) and 'New wizard' or 'New',\n instance.scope == TASK_SCOPE_TASK and 'task' or 'project',\n completed_phrase or 'created',\n instance.user.display_name.encode('utf-8'), new_user and ' (New user)' or '',\n task_url\n )\n slack_msg = create_task_slack_msg(instance, summary=summary, channel=SLACK_STAFF_LEADS_CHANNEL, show_contacts=True,\n is_admin=True)\n slack_utils.send_incoming_webhook(SLACK_STAFF_INCOMING_WEBHOOK, slack_msg)\n\n\n@job\ndef remind_no_task_applications_slack(instance, admin=True):\n instance = clean_instance(instance, Task)\n\n if not instance.is_task:\n return\n task_url = '{}/work/{}/'.format(TUNGA_URL, instance.id)\n new_user = instance.source == TASK_SOURCE_NEW_USER\n\n summary = \"Reminder: No applications yet for {} {} | <{}|View on Tunga>\".format(\n instance.scope == TASK_SCOPE_TASK and 'task' or 'project',\n new_user and admin and ' (New user)' or '',\n task_url\n )\n slack_msg = create_task_slack_msg(\n instance, summary=summary,\n channel=admin and SLACK_STAFF_LEADS_CHANNEL or SLACK_DEVELOPER_UPDATES_CHANNEL,\n show_contacts=admin\n )\n slack_utils.send_incoming_webhook(\n admin and SLACK_STAFF_INCOMING_WEBHOOK or SLACK_DEVELOPER_INCOMING_WEBHOOK,\n slack_msg\n )\n\n\n@job\ndef notify_review_task_admin_slack(instance):\n instance = clean_instance(instance, Task)\n task_url = '{}/work/{}/'.format(TUNGA_URL, instance.id)\n new_user = instance.source == TASK_SOURCE_NEW_USER\n\n summary = \"Reminder: Review {} {} | <{}|View on Tunga>\\nCreated: {}\".format(\n instance.scope == TASK_SCOPE_TASK and 'task' or 'project',\n new_user and ' (New user)' or '',\n task_url,\n instance.created_at.strftime(\"%d %b, %Y\"),\n instance.approved_at and 'Approved: {}'.format(instance.approved_at.strftime(\"%d %b, %Y\")) or '',\n )\n slack_msg = create_task_slack_msg(\n instance, summary=summary,\n channel=SLACK_STAFF_LEADS_CHANNEL,\n show_contacts=True\n )\n slack_utils.send_incoming_webhook(SLACK_STAFF_INCOMING_WEBHOOK, slack_msg)\n\n\n@job\ndef notify_new_task_community_slack(instance):\n instance = clean_instance(instance, Task)\n\n # Notify Devs or PMs via Slack\n if (not instance.is_developer_ready) or (instance.approved and instance.visibility == VISIBILITY_DEVELOPER):\n slack_msg = create_task_slack_msg(\n instance,\n channel=instance.is_developer_ready and SLACK_DEVELOPER_UPDATES_CHANNEL or SLACK_PMS_UPDATES_CHANNEL\n )\n slack_utils.send_incoming_webhook(SLACK_DEVELOPER_INCOMING_WEBHOOK, slack_msg)\n\n\n@job\ndef notify_task_invitation_response_slack(instance):\n instance = clean_instance(instance, Participation)\n\n if not slack_utils.is_task_notification_enabled(instance.task, slugs.EVENT_APPLICATION):\n return\n\n task_url = '%s/work/%s/' % (TUNGA_URL, instance.task_id)\n slack_msg = \"Task invitation %s by %s %s\\n\\n<%s|View on Tunga>\" % (\n instance.status == STATUS_ACCEPTED and 'accepted' or 'rejected', instance.user.short_name,\n instance.status == STATUS_ACCEPTED and ':smiley: :fireworks:' or ':unamused:',\n task_url\n )\n slack_utils.send_integration_message(instance.task, message=slack_msg)\n\n\n@job\ndef notify_new_task_application_slack(instance, admin=True):\n instance = clean_instance(instance, Application)\n\n if not slack_utils.is_task_notification_enabled(instance.task, slugs.EVENT_APPLICATION):\n return\n\n application_url = '%s/work/%s/applications/' % (TUNGA_URL, instance.task_id)\n slack_msg = \"New application from %s\" % instance.user.short_name\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.task.summary,\n slack_utils.KEY_TITLE_LINK: application_url,\n slack_utils.KEY_TEXT: '%s%s%s%s\\n\\n<%s|View on Tunga>' %\n (truncatewords(convert_to_text(instance.pitch), 100),\n instance.hours_needed and '\\n*Workload:* {} hrs'.format(instance.hours_needed) or '',\n instance.deliver_at and '\\n*Delivery Date:* {}'.format(\n instance.deliver_at.strftime(\"%d %b, %Y\")\n ) or '',\n instance.remarks and '\\n*Remarks:* {}'.format(\n truncatewords(convert_to_text(instance.remarks), 100)\n ) or '',\n application_url),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n }\n ]\n if admin:\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_LEADS_CHANNEL\n }\n )\n else:\n slack_utils.send_integration_message(instance.task, message=slack_msg, attachments=attachments)\n\n\n@job\ndef notify_task_application_response_slack(instance, admin=True):\n instance = clean_instance(instance, Application)\n\n application_url = '%s/work/%s/applications/' % (TUNGA_URL, instance.task_id)\n task_url = '%s/work/%s/' % (TUNGA_URL, instance.task.id)\n slack_msg = \"Task Application {} | <{}|View on Tunga>\".format(\n instance.status == STATUS_ACCEPTED and 'accepted' or 'rejected',\n task_url\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.task.summary,\n slack_utils.KEY_TITLE_LINK: application_url,\n slack_utils.KEY_TEXT: '%s%s%s%s\\n\\n<%s|View on Tunga>' %\n (truncatewords(convert_to_text(instance.pitch), 100),\n instance.hours_needed and '\\n*Workload:* {} hrs'.format(instance.hours_needed) or '',\n instance.deliver_at and '\\n*Delivery Date:* {}'.format(\n instance.deliver_at.strftime(\"%d %b, %Y\")\n ) or '',\n instance.remarks and '\\n*Remarks:* {}'.format(\n truncatewords(convert_to_text(instance.remarks), 100)\n ) or '',\n application_url),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n }\n ]\n if admin:\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_LEADS_CHANNEL\n }\n )\n else:\n slack_utils.send_integration_message(instance.task, message=slack_msg, attachments=attachments)\n\n\n@job\ndef remind_progress_event_slack(instance):\n instance = clean_instance(instance, ProgressEvent)\n\n task_integration = get_task_integration(instance.task, APP_INTEGRATION_PROVIDER_SLACK)\n if not task_integration:\n return\n\n is_pm_report = instance.type in [LEGACY_PROGRESS_EVENT_TYPE_PM, LEGACY_PROGRESS_EVENT_TYPE_MILESTONE_INTERNAL]\n is_client_report = instance.type in [LEGACY_PROGRESS_EVENT_TYPE_CLIENT, LEGACY_PROGRESS_EVENT_TYPE_CLIENT_MID_SPRINT]\n is_pm_or_client_report = is_pm_report or is_client_report\n is_dev_report = not is_pm_or_client_report\n\n bot_access_token = task_integration.bot_access_token\n if not bot_access_token:\n if is_pm_report or is_dev_report:\n pass\n # TODO: set bot token to Tunga developers slack team token\n return\n\n if is_pm_report and not instance.task.is_project:\n return\n\n pm = instance.task.pm\n if not pm and instance.task.user.is_project_manager:\n pm = instance.task.user\n\n if is_pm_report and not pm:\n return\n\n owner = instance.task.owner\n if not owner:\n owner = instance.task.user\n\n if is_client_report and not owner:\n return\n\n slack_msg = \"{} for \\\"{}\\\" | <{}|{} on Tunga>\".format(\n is_client_report and \"Weekly Survey\" or \"Upcoming {} Update\".format(\n instance.task.is_task and 'Task' or 'Project'\n ),\n instance.task.summary,\n '{}/work/{}/event/{}/'.format(TUNGA_URL, instance.task.id, instance.id),\n is_client_report and \"Take the survey\" or \"Give the update\"\n )\n\n to_emails = []\n if is_pm_report:\n to_emails = [pm.email]\n\n elif is_client_report:\n to_emails = [owner.email]\n if owner.email != instance.task.user.email:\n to_emails.append(instance.task.user.email)\n\n else:\n participants = instance.task.participation_set.filter(status=STATUS_ACCEPTED)\n if participants:\n for participant in participants:\n to_emails.append(participant.user.email)\n\n if to_emails:\n for email in to_emails:\n im_id = get_user_im_id(email, bot_access_token)\n if im_id:\n slack_utils.send_slack_message(bot_access_token, im_id, message=slack_msg)\n\n\ndef create_progress_report_slack_message(instance, updated=False, to_client=False):\n is_pm_report = instance.event.type in [LEGACY_PROGRESS_EVENT_TYPE_PM, LEGACY_PROGRESS_EVENT_TYPE_MILESTONE_INTERNAL]\n is_client_report = instance.event.type in [LEGACY_PROGRESS_EVENT_TYPE_CLIENT, LEGACY_PROGRESS_EVENT_TYPE_CLIENT_MID_SPRINT]\n is_pm_or_client_report = is_pm_report or is_client_report\n is_dev_report = not is_pm_or_client_report\n\n report_url = '%s/work/%s/event/%s/' % (TUNGA_URL, instance.event.task_id, instance.event_id)\n slack_msg = \"{} {} a {} | {}\".format(\n instance.user.display_name.encode('utf-8'),\n updated and 'updated' or 'submitted',\n is_client_report and \"Weekly Survey\" or \"Progress Report\",\n '<{}|View on Tunga>'.format(report_url)\n )\n\n slack_text_suffix = ''\n if not is_client_report:\n slack_text_suffix += '*Status:* {}\\n*Percentage completed:* {}{}'.format(\n instance.get_status_display(), instance.percentage, '%')\n if not to_client:\n if instance.last_deadline_met is not None:\n slack_text_suffix += '\\n*Was the last deadline met?:* {}'.format(\n instance.last_deadline_met and 'Yes' or 'No'\n )\n if instance.next_deadline:\n slack_text_suffix += '\\n*Next deadline:* {}'.format(instance.next_deadline.strftime(\"%d %b, %Y\"))\n if is_client_report:\n if instance.deliverable_satisfaction is not None:\n slack_text_suffix += '\\n*Are you satisfied with the deliverables?:* {}'.format(\n instance.deliverable_satisfaction and 'Yes' or 'No'\n )\n if is_dev_report:\n if instance.stuck_reason:\n slack_text_suffix += '\\n*Reason for being stuck:*\\n {}'.format(\n convert_to_text(instance.get_stuck_reason_display())\n )\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.event.task.summary,\n slack_utils.KEY_TITLE_LINK: report_url,\n slack_utils.KEY_TEXT: slack_text_suffix,\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE\n }\n ]\n\n if not to_client:\n if instance.deadline_miss_communicated is not None:\n attachments.append({\n slack_utils.KEY_TITLE: '{} promptly about not making the deadline?'.format(\n is_client_report and 'Did the project manager/ developer(s) inform you' or 'Did you inform the client'),\n slack_utils.KEY_TEXT: '{}'.format(instance.deadline_miss_communicated and 'Yes' or 'No'),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n\n if instance.deadline_report:\n attachments.append({\n slack_utils.KEY_TITLE: 'Report about the last deadline:',\n slack_utils.KEY_TEXT: convert_to_text(instance.deadline_report),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n\n if is_client_report:\n if instance.rate_deliverables:\n attachments.append({\n slack_utils.KEY_TITLE: 'How would you rate the deliverables on a scale from 1 to 5?',\n slack_utils.KEY_TEXT: '{}/5'.format(instance.rate_deliverables),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE\n })\n if instance.pm_communication:\n attachments.append({\n slack_utils.KEY_TITLE: 'Is the communication between you and the project manager/developer(s) going well?',\n slack_utils.KEY_TEXT: '{}'.format(instance.pm_communication and 'Yes' or 'No'),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n else:\n # Status\n if instance.stuck_details:\n attachments.append({\n slack_utils.KEY_TITLE: 'Explain Further why you are stuck/what should be done:',\n slack_utils.KEY_TEXT: convert_to_text(instance.stuck_details),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n\n if instance.started_at and not to_client:\n attachments.append({\n slack_utils.KEY_TITLE: 'When did you start this sprint/task/project?',\n slack_utils.KEY_TEXT: instance.started_at.strftime(\"%d %b, %Y\"),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE\n })\n\n # Last\n if instance.accomplished:\n attachments.append({\n slack_utils.KEY_TITLE: 'What has been accomplished since last update?',\n slack_utils.KEY_TEXT: convert_to_text(instance.accomplished),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n if instance.rate_deliverables and not to_client:\n attachments.append({\n slack_utils.KEY_TITLE: 'Rate Deliverables:',\n slack_utils.KEY_TEXT: '{}/5'.format(instance.rate_deliverables),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n\n # Current\n if instance.todo:\n attachments.append({\n slack_utils.KEY_TITLE: is_dev_report and 'What do you intend to achieve/complete today?' or 'What are the next next steps?',\n slack_utils.KEY_TEXT: convert_to_text(instance.todo),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n\n if not to_client:\n # Next\n if instance.next_deadline:\n attachments.append({\n slack_utils.KEY_TITLE: 'When is the next deadline?',\n slack_utils.KEY_TEXT: instance.next_deadline.strftime(\"%d %b, %Y\"),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n\n # Keep information about failures to meet deadlines internal\n if instance.next_deadline_meet is not None:\n attachments.append({\n slack_utils.KEY_TITLE: 'Do you anticipate to meet this deadline?',\n slack_utils.KEY_TEXT: '{}'.format(instance.next_deadline_meet and 'Yes' or 'No'),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n if instance.next_deadline_fail_reason:\n attachments.append({\n slack_utils.KEY_TITLE: 'Why will you not be able to make the next deadline?',\n slack_utils.KEY_TEXT: convert_to_text(instance.next_deadline_fail_reason),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n if instance.obstacles:\n attachments.append({\n slack_utils.KEY_TITLE: 'What obstacles are impeding your progress?',\n slack_utils.KEY_TEXT: convert_to_text(instance.obstacles),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n if instance.obstacles_prevention:\n attachments.append({\n slack_utils.KEY_TITLE: 'What could have been done to prevent this from happening?',\n slack_utils.KEY_TEXT: convert_to_text(instance.obstacles_prevention),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n\n if is_pm_report:\n if instance.team_appraisal:\n attachments.append({\n slack_utils.KEY_TITLE: 'Team appraisal:',\n slack_utils.KEY_TEXT: convert_to_text(instance.team_appraisal),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_NEUTRAL\n })\n\n if instance.remarks:\n attachments.append({\n slack_utils.KEY_TITLE: 'Other remarks or questions',\n slack_utils.KEY_TEXT: convert_to_text(instance.remarks),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_NEUTRAL\n })\n\n return slack_msg, attachments\n\n\n@job\ndef notify_new_progress_report_slack(instance, updated=False):\n instance = clean_instance(instance, ProgressReport)\n\n is_pm_report = instance.event.type in [LEGACY_PROGRESS_EVENT_TYPE_PM, LEGACY_PROGRESS_EVENT_TYPE_MILESTONE_INTERNAL]\n is_client_report = instance.event.type in [LEGACY_PROGRESS_EVENT_TYPE_CLIENT, LEGACY_PROGRESS_EVENT_TYPE_CLIENT_MID_SPRINT]\n is_pm_or_client_report = is_pm_report or is_client_report\n is_dev_report = not is_pm_or_client_report\n\n # if not (slack_utils.is_task_notification_enabled(instance.event.task, slugs.EVENT_PROGRESS)):\n # return\n\n # All reports go to Tunga #updates Slack\n slack_msg, attachments = create_progress_report_slack_message(instance, updated=updated)\n slack_utils.send_incoming_webhook(SLACK_STAFF_INCOMING_WEBHOOK, {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_UPDATES_CHANNEL,\n slack_utils.KEY_ATTACHMENTS: attachments\n })\n\n if is_dev_report:\n # Re-create report for clients\n slack_msg, attachments = create_progress_report_slack_message(instance, updated=updated, to_client=True)\n slack_utils.send_integration_message(instance.event.task, message=slack_msg, attachments=attachments)\n\n\n@job\ndef notify_missed_progress_event_slack(instance):\n instance = clean_instance(instance, ProgressEvent)\n\n is_client_report = instance.type in [LEGACY_PROGRESS_EVENT_TYPE_CLIENT, LEGACY_PROGRESS_EVENT_TYPE_CLIENT_MID_SPRINT]\n\n if instance.task.archived or instance.status != \"missed\" or not instance.last_reminder_at:\n return\n\n participants = instance.participants\n if not participants or instance.task.closed:\n # No one to report or task is now closed\n return\n\n target_user = None\n if participants and len(participants) == 1:\n target_user = participants[0]\n\n task_url = '{}/work/{}'.format(TUNGA_URL, instance.task.id)\n slack_msg = \"`Alert (!):` {} {} for \\\"{}\\\" | <{}|View on Tunga>\".format(\n target_user and '{} missed a'.format(target_user.short_name) or 'Missed',\n is_client_report and 'weekly survey' or 'progress report',\n instance.task.summary,\n task_url\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: '\\n\\n'.join(\n [\n '*Due Date:* {}\\n\\n'\n '*Name:* {}\\n'\n '*Email:* {}{}'.format(\n instance.due_at.strftime(\"%d %b, %Y\"),\n user.display_name.encode('utf-8'),\n user.email,\n user.profile and user.profile.phone_number and '\\n*Phone Number:* {}'.format(\n user.profile.phone_number) or ''\n ) for user in participants\n ]\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n }\n ]\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_MISSED_UPDATES_CHANNEL\n }\n )\n\n # Save notification time\n instance.missed_notification_at = datetime.datetime.now()\n instance.save()\n\n\n@job\ndef notify_progress_report_deadline_missed_slack_admin(instance):\n instance = clean_instance(instance, ProgressReport)\n\n task_url = '{}/work/{}'.format(TUNGA_URL, instance.event.task.id)\n slack_msg = \"`Alert (!):` Follow up on missed deadline for \\\"{}\\\" | <{}|View on Tunga>\".format(\n instance.event.task.summary,\n task_url\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.event.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: 'A deadline has been missed on the \"{}\" {}\\n'\n '*Was the client informed before hand?:* {}\\n'\n 'Please contact the stakeholders.'.format(\n instance.event.task.summary,\n instance.event.task.is_task and 'task' or 'project',\n instance.deadline_miss_communicated and 'Yes' or 'No'\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n },\n create_task_stakeholders_attachment_slack(instance.event.task, show_title=False)\n ]\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_UPDATES_CHANNEL\n }\n )\n\n\n@job\ndef notify_progress_report_behind_schedule_by_algo_slack_admin(instance):\n instance = clean_instance(instance, ProgressReport)\n\n task_url = '{}/work/{}'.format(TUNGA_URL, instance.event.task.id)\n slack_msg = \"`Alert (!):` \\\"{}\\\" {} is running behind schedule | <{}|View on Tunga>\".format(\n instance.event.task.summary,\n instance.event.task.is_task and 'task' or 'project',\n task_url\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.event.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: 'Please contact the PM and devs.',\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n },\n create_task_stakeholders_attachment_slack(instance.event.task, show_title=False)\n ]\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_UPDATES_CHANNEL\n }\n )\n\n\n@job\ndef notify_progress_report_client_not_satisfied_slack_admin(instance):\n instance = clean_instance(instance, ProgressReport)\n\n task_url = '{}/work/{}/event/{}'.format(TUNGA_URL, instance.event.task.id, instance.event.id)\n slack_msg = \"`Alert (!):` Client dissatisfied | <{}|View on Tunga>\".format(task_url)\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.event.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: 'The project owner of \\\"{}\\\" {} is unsatisfied with the deliverable.\\n '\n 'Please contact all stakeholders.'.format(\n instance.event.task.summary,\n instance.event.task.is_task and 'task' or 'project'\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n },\n create_task_stakeholders_attachment_slack(instance.event.task, show_title=False)\n ]\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_UPDATES_CHANNEL\n }\n )\n\n\n@job\ndef notify_progress_report_stuck_slack_admin(instance):\n instance = clean_instance(instance, ProgressReport)\n\n task_url = '{}/work/{}/event/{}'.format(TUNGA_URL, instance.event.task.id, instance.event.id)\n slack_msg = \"`Alert (!):` The status for the \\\"{}\\\" {} has been classified as stuck | <{}|View on Tunga>\".format(\n instance.event.task.summary,\n instance.event.task.is_task and 'task' or 'project',\n task_url\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.event.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: 'Please contact all stakeholders.',\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n },\n create_task_stakeholders_attachment_slack(instance.event.task, show_title=False)\n ]\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_UPDATES_CHANNEL\n }\n )\n\n\n@job\ndef notify_progress_report_wont_meet_deadline_slack_admin(instance):\n instance = clean_instance(instance, ProgressReport)\n\n task_url = '{}/work/{}/event/{}'.format(TUNGA_URL, instance.event.task.id, instance.event.id)\n slack_msg = \"`Alert (!):` {} doesn't expect to meet the deadline | <{}|View on Tunga>\".format(\n instance.event.type in [LEGACY_PROGRESS_EVENT_TYPE_PM, LEGACY_PROGRESS_EVENT_TYPE_MILESTONE_INTERNAL] and 'PM' or 'Developer',\n task_url\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.event.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: 'The {} on the \\\"{}\\\" {} has indicated that they might not meet the coming deadline.\\n'\n 'Please contact all stakeholders.'.format(\n instance.event.type in [LEGACY_PROGRESS_EVENT_TYPE_PM,\n LEGACY_PROGRESS_EVENT_TYPE_MILESTONE_INTERNAL] and 'PM' or 'Developer',\n instance.event.task.summary,\n instance.event.task.is_task and 'task' or 'project'\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_TUNGA\n },\n create_task_stakeholders_attachment_slack(instance.event.task, show_title=False)\n ]\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_UPDATES_CHANNEL\n }\n )\n\n\n@job\ndef send_survey_summary_report_slack(event, client_report, pm_report, dev_report):\n event = clean_instance(event, ProgressEvent)\n client_report = clean_instance(client_report, ProgressReport)\n pm_report = clean_instance(pm_report, ProgressReport)\n dev_report = clean_instance(dev_report, ProgressReport)\n\n attachments = list()\n if not client_report:\n attachments.append({\n slack_utils.KEY_TEXT: '`Client survey was not filled`',\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED,\n })\n\n if event.task.pm and not pm_report:\n attachments.append({\n slack_utils.KEY_TEXT: '`PM Report was not filled`',\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED,\n })\n\n if event.task.active_participants and not dev_report:\n attachments.append({\n slack_utils.KEY_TEXT: '`No Developer report was filled`',\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED,\n })\n if client_report or pm_report or dev_report:\n if client_report:\n summary_report = list()\n summary_report.append(dict(\n title='Was the last deadline met?:',\n client=client_report and (client_report.last_deadline_met and 'Yes' or 'No') or None,\n pm=pm_report and (pm_report.last_deadline_met and 'Yes' or 'No') or None,\n dev=dev_report and (dev_report.last_deadline_met and 'Yes' or 'No') or None,\n color=client_report.last_deadline_met and SLACK_ATTACHMENT_COLOR_GREEN or SLACK_ATTACHMENT_COLOR_RED\n ))\n\n if not client_report.last_deadline_met:\n summary_report.append(dict(\n title='Was the client informed about missing the deadline?:',\n client=(client_report.deadline_miss_communicated and 'Yes' or 'No') or None,\n pm=pm_report and (pm_report.deadline_miss_communicated and 'Yes' or 'No') or None,\n dev=dev_report and (dev_report.deadline_miss_communicated and 'Yes' or 'No') or None,\n color=client_report.deadline_miss_communicated and SLACK_ATTACHMENT_COLOR_GREEN or SLACK_ATTACHMENT_COLOR_RED\n ))\n\n if client_report.deliverable_satisfaction is not None:\n summary_report.append(dict(\n title='Are you satisfied with the deliverable?:',\n client=(client_report.deliverable_satisfaction and 'Yes' or 'No') or None,\n pm=None,\n dev=None,\n color=client_report.deliverable_satisfaction and SLACK_ATTACHMENT_COLOR_GREEN or SLACK_ATTACHMENT_COLOR_RED\n ))\n\n if client_report.rate_deliverables is not None:\n summary_report.append(dict(\n title='Deliverable rating:',\n client=client_report.rate_deliverables or None,\n pm=pm_report and pm_report.rate_deliverables or None,\n dev=dev_report and dev_report.rate_deliverables or None,\n color=(client_report.rate_deliverables > 3 and SLACK_ATTACHMENT_COLOR_GREEN) or (\n client_report.rate_deliverables < 3 and SLACK_ATTACHMENT_COLOR_RED or SLACK_ATTACHMENT_COLOR_NEUTRAL)\n ))\n\n if pm_report or dev_report:\n summary_report.append(dict(\n title='Status:',\n client=None,\n pm=pm_report and pm_report.get_status_display() or None,\n dev=dev_report and dev_report.get_status_display() or None,\n color=SLACK_ATTACHMENT_COLOR_RED\n ))\n\n if (pm_report and pm_report.stuck_reason) or (dev_report and dev_report.stuck_reason):\n summary_report.append(dict(\n title='Stuck reason:',\n client=None,\n pm=pm_report and pm_report.get_stuck_reason_display() or None,\n dev=dev_report and dev_report.get_stuck_reason_display() or None,\n color=SLACK_ATTACHMENT_COLOR_BLUE\n ))\n\n for item in summary_report:\n client = item.get('client', None)\n pm = item.get('pm', None)\n dev = item.get('dev', None)\n attachments.append({\n slack_utils.KEY_TITLE: item['title'],\n slack_utils.KEY_TEXT: '{} {} {}'.format(\n client and 'Client: {}'.format(client) or '',\n pm and '{}PM: {}{}'.format(\n client_report and '*|* ' or '', pm, dev_report and ' *|*' or ''\n ) or '{}'.format(dev_report and '*|*' or ''),\n dev and 'Dev: {}'.format(dev) or ''),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: item.get('color', SLACK_ATTACHMENT_COLOR_NEUTRAL)\n })\n else:\n attachments.append({\n slack_utils.KEY_TEXT: '`Insufficent data for creating a summary report`',\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED,\n })\n\n attachments.append({\n slack_utils.KEY_TITLE: 'Reports:',\n slack_utils.KEY_TEXT: '{}{}{}'.format(\n client_report and '<{}|Client Survey>'.format(\n '{}/work/{}/event/{}'.format(TUNGA_URL, event.task.id, client_report.event.id)) or '',\n pm_report and '{}<{}|PM Report>{}'.format(client_report and '\\n' or '',\n '{}/work/{}/event/{}'.format(TUNGA_URL, event.task.id,\n pm_report.event.id),\n dev_report and '\\n' or '') or '{}'.format(\n dev_report and '\\n' or ''),\n dev_report and '<{}|Developer Report>'.format(\n '{}/work/{}/event/{}'.format(TUNGA_URL, event.task.id, dev_report.event.id)) or '',\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE,\n })\n\n owner = event.task.owner or event.task.user\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: \"*Summary Report:* <{}|{}>\\nProject Owner: <{}|{}>{}\".format(\n '{}/work/{}'.format(TUNGA_URL, event.task.id), event.task.summary,\n '{}/people/{}'.format(TUNGA_URL, owner.username), owner.display_name.encode('utf-8'),\n event.task.pm and '\\nPM: <{}|{}>'.format('{}/people/{}'.format(\n TUNGA_URL, event.task.pm.username), event.task.pm.display_name.encode('utf-8')\n ) or ''\n ),\n slack_utils.KEY_CHANNEL: SLACK_STAFF_PROJECT_EXECUTION_CHANNEL,\n slack_utils.KEY_ATTACHMENTS: attachments\n }\n )\n\n\n@job\ndef notify_new_task_invoice_admin_slack(instance):\n instance = clean_instance(instance, TaskInvoice)\n\n task_url = '{}/work/{}/'.format(TUNGA_URL, instance.task.id)\n owner = instance.task.owner or instance.task.user\n client_url = '{}/people/{}/'.format(TUNGA_URL, owner.username)\n invoice_url = '{}/api/task/{}/download/invoice/?format=pdf'.format(TUNGA_URL, instance.task.id)\n slack_msg = '{} generated an invoice'.format(\n instance.user.display_name.encode('utf-8')\n )\n\n attachments = [\n {\n slack_utils.KEY_TITLE: instance.task.summary,\n slack_utils.KEY_TITLE_LINK: task_url,\n slack_utils.KEY_TEXT: 'Client: <{}|{}>\\nFee: {}\\nPayment Method: {}\\n<{}|Download invoice>'.format(\n client_url,\n owner.display_name.encode('utf-8'),\n instance.display_fee().encode('utf-8'),\n instance.get_payment_method_display(),\n invoice_url\n ),\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_BLUE\n },\n ]\n if not instance.task.payment_approved:\n task_approval_url = '{}edit/payment-approval/'.format(task_url)\n if instance.payment_method == PAYMENT_METHOD_BANK:\n attachments.append({\n slack_utils.KEY_TITLE: 'Review and approve payment.',\n slack_utils.KEY_TITLE_LINK: task_approval_url,\n slack_utils.KEY_TEXT: \"Payment will be completed via bank transfer.\\n \"\n \"However, developer payments won't be distributed until the payment\"\n \" is reviewed and approved.\",\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_GREEN\n })\n else:\n attachments.append({\n slack_utils.KEY_TITLE: 'Review and approve payment.',\n slack_utils.KEY_TITLE_LINK: task_approval_url,\n slack_utils.KEY_TEXT: \"The client won't be able to pay until the payment is approved.\",\n slack_utils.KEY_MRKDWN_IN: [slack_utils.KEY_TEXT],\n slack_utils.KEY_COLOR: SLACK_ATTACHMENT_COLOR_RED\n })\n\n slack_utils.send_incoming_webhook(\n SLACK_STAFF_INCOMING_WEBHOOK,\n {\n slack_utils.KEY_TEXT: slack_msg,\n slack_utils.KEY_ATTACHMENTS: attachments,\n slack_utils.KEY_CHANNEL: SLACK_STAFF_PAYMENTS_CHANNEL\n }\n )\n","sub_path":"tunga_tasks/notifications/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":45204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"204916202","text":"import subprocess\nimport time\n\n\ndef main():\n\n with open('/root/app/hamster-backend/utils/edu_ip/result_2.txt') as f:\n for i in f.readlines():\n\n ip = i.strip('\\n')\n info = subprocess.check_output(\"/usr/local/nfdump/bin/nfdump -r /root/docker-settings/data/nfdump/23456/nfcapd.201904202100 'dst ip {}' | awk -F'[ ,]' '/Summary:/ {{print $16;}}'\".format(ip), shell=True)\n\n with open('netflow_bps.txt', 'a+') as bps_f:\n bps_f.write(ip + ' ' + info.decode())\n\n\nif __name__ == '__main__':\n old_time = time.time()\n main()\n print(time.time() - old_time)\n","sub_path":"utils/edu_ip/ip_netflow_fetch.py","file_name":"ip_netflow_fetch.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"22771750","text":"#!/usr/bin/env python2\n\nMQTT_BROKER = 'localhost'\nMQTT_USER = ''\nMQTT_PASSWORD = ''\n\nTIMEZONE = 'Europe/Berlin'\n\nDB_CONNECTION_STRING = 'dbname=silizium user=silizium'\n\nMQTT_TOPICS = {\n\t'/esp/temp/0' : 'float'\n}\n\n\nTEXT_ROOMTEMP = {\n\t'type': 'text-widget',\n\t'topics' : {'/esp/temp/0': 'temperature'},\n\t'label' : 'Room Temperature'\n}\n\nGAGE_ROOMTEMP = {\n\t'type': 'gage-widget',\n\t'topics' : {'/esp/temp/0': 'temperature'},\n\t'label' : 'Another\\nTemperature',\n\t'min' : -10.0,\n\t'max' : 40.0\n}\n\nGRAPH_ROOMTEMP = {\n\t'type': 'graph-widget',\n\t'topics' : {'/esp/temp/0': ''},\n\t'label' : 'Another\\nTemperature',\n\t'secondsBack' : 24 * 60 * 60\n}\n\nWIDGETS = [\n\t[TEXT_ROOMTEMP, GAGE_ROOMTEMP],\n\t[GRAPH_ROOMTEMP]\n]\n","sub_path":"config_example.py","file_name":"config_example.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"88074997","text":"\"\"\"\nThis code is free to use, copy, distribute, and modify.\nIf you use this code or any modification of this code, we request that you reference both this code https://zenodo.org/record/438675 and the paper https://arxiv.org/abs/1703.09721.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport label_lines as ll\n\n# Load data\nfname = \"data/Likelihood.txt\"\ndata = np.genfromtxt(fname).transpose()\n\n# Process data into -2log(L/L0)\nmax_index = np.nanargmax(data[1])\nmax_log_likelihood = data[1][max_index]\nfor i in xrange(len(data[1])):\n\tdata[1][i] = -2 * (data[1][i] - max_log_likelihood)\n\n# Plot figure\nplt.plot(data[0], data[1])\n\nv = list(plt.axis())\nv = [0, 1, 0, 25]\nplt.axis(v)\n\nCL95 = 3.84 # 95% CL for 1 dof (f_gal)\n\nplt.plot([v[0], v[1]], [CL95, CL95], \"k:\", label = r\"$95\\%{\\rm\\ CL}$\")\n\nll.labelLines([plt.gca().get_lines()[-1]], [plt.gca().get_lines()], xvals = [0.8])\n\nplt.xlabel(r\"$f_{\\rm gal}$\")\nplt.ylabel(r\"$-2\\log\\frac{\\mathcal L(f_{\\rm gal})}{\\mathcal L(\\hat f_{\\rm gal})}$\")\n\nplt.savefig(\"fig/Likelihood.eps\")\n\nv = [0, 0.25, 0, 2]\nv = [0, 0.2, 0, 2]\nv = [0, 0.5, 0, 2]\nplt.axis(v)\nplt.savefig(\"fig/Likelihood_zoom.eps\")\n\n","sub_path":"py/Likelihood.py","file_name":"Likelihood.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"85915346","text":"# -*- encoding: utf-8 -*-\n# -*- coding: utf-8 -*-\n\nimport numpy\nimport json\nimport sys, os\nimport networkx\n\n\nW = json.load(open(sys.argv[1]))\n\nF = []\nwith file(sys.argv[2]) as opened:\n for line in opened:\n F.append(json.loads(line))\n\nK = len(W)\nTOP_K = 30\n\ngraph = networkx.Graph()\ngraph.add_edges_from(json.load(open('out.edges.true')))\n\nN = tuple(graph.nodes())\n\n\nr = {}\nfor u in N:\n for v in N:\n if u >= v:\n continue\n r[(u, v)] = sum([ F[u][k] * F[v][k] * W[k] for k in range(K) ])\n\nprint(len(r))\nrank = sorted(r.items(), key = lambda x:x[1], reverse = True)\n\ntopk_rank = rank[:TOP_K]\nprint(topk_rank)\nfor top in topk_rank:\n if top[0] in graph.edges():\n print(top[0])\n\n","sub_path":"data/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"135333012","text":"from math import log\n\nf = open('p099_base_exp.txt', 'r')\n\nmaxTotal = 0\nanswer = 0\nlineNumber = 1\n\nfor line in f:\n base, exp = [int(x) for x in line.split(',')]\n lineTotal = exp * log(base)\n if lineTotal > maxTotal:\n maxTotal = lineTotal\n answer = lineNumber\n lineNumber += 1","sub_path":"099.py","file_name":"099.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"93624060","text":"import json\n\nimport numpy as np\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nlogfmt = ticker.LogFormatterExponent(base=10.0, labelOnlyBase=True)\n\n\nimport matplotlib as mpl\nmpl.rcParams['lines.linewidth'] = 4.0\nparams = {\n 'axes.labelsize': 'x-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'x-large',\n 'ytick.labelsize':'x-large',\n 'lines.markersize' : 7,\n 'font.size' : 20}\nmpl.rcParams.update(params)\n\n\n__color_rotation__ = ['red', 'gold','coral','darkturquoise','royalblue','darkblue','m','hotpink','lightpink','dimgrey']\n__marker_rotation__ = ['o', '^', 's', 'H', '+', '.', 'D', 'x', 'o', 'H', 's']\n__linestyle_rotation__ = ['--', ':', '-', '-.']\n\n\ndef plot_error(folder):\n # Load parameters\n with open(folder + '/log.txt') as f:\n run_for = json.load(f)\n error_cv = np.load(folder + 'f_error_CV.npy')\n error_tangent = np.load(folder + 'tangent_error.npy')\n # Perform cross validation\n error_final = np.zeros(error_cv[:,:,:,:,:,0,:].shape)\n ind_cv = np.argmin(error_cv, axis = 5)\n shape = error_cv.shape\n aux_iter = [(i1,i2,i3,i4,i5,i6) for i1 in range(shape[0]) for i2 in range(shape[1]) for i3 in range(shape[2]) for i4 in range(shape[3]) for i5 in range(shape[4]) for i6 in range(shape[6])]\n for index in aux_iter:\n error_final[index] = error_tangent[index[0], index[1], index[2], index[3], index[4], ind_cv[index], index[5]]\n mean_error_final = np.mean(error_final, axis = 5)\n std_error_final = np.std(error_final, axis = 5)\n plt.figure(figsize = (12,8))\n for i, D in enumerate(run_for['D']):\n for j, sigma_f in enumerate(run_for['sigma_f']):\n if i < len(run_for['D']) - 1 and j > 0:\n continue\n plt.errorbar(run_for['N'][1:], mean_error_final[1:,i,0,j,0], std_error_final[1:,i,0,j,0],\n color = __color_rotation__[j],\n linestyle = __linestyle_rotation__[i],\n label = r'$D = {:d},\\ \\sigma_\\varepsilon = {:.0E}$'.format(D, sigma_f))\n plt.xscale('log')\n plt.yscale('log')\n ylim_min = np.floor(np.log10(np.min(mean_error_final[1:])))\n ylim_max = np.ceil(np.log10(np.max(mean_error_final[1:])))\n plt.ylim([10 ** ylim_min,10 ** ylim_max])\n plt.legend(ncol = 2, prop={'size': 15})\n plt.xlabel(r'$N$')\n plt.ylabel(r'$RMSE(a_j - \\hat a_j)$')\n plt.tight_layout()\n plt.savefig(folder + 'tangent_error.pdf', format = 'pdf', )\n","sub_path":"evaluation/tangent_error.py","file_name":"tangent_error.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"282323675","text":"from gi.repository import Gtk \nimport sys, os, getpass\nfrom os.path import expanduser\nfrom time import localtime, strftime\n\ndef error_message(message):\n \n # log to terminal window\n# print (message) # Debug entry\n \n # create an error message dialog and display modally to the user\n dialog = Gtk.MessageDialog(None,\n Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,\n Gtk.MessageType.ERROR, Gtk.ButtonsType.OK, message)\n \n dialog.run()\n dialog.destroy()\n \ndef info_message(message):\n \n # log to terminal window\n# print (message) # Debug entry\n \n # create an info message dialog and display modally to the user\n dialog = Gtk.MessageDialog(None,\n Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,\n Gtk.MessageType.INFO, Gtk.ButtonsType.OK, message)\n \n dialog.run()\n dialog.destroy()\n\ndef get_username():\n username = getpass.getuser()\n if username: return username\n else: return None\n\ndef get_userdir(): \n home = expanduser(\"~\")\n if home: return home\n else: return None\n \ndef get_time():\n time = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n if time: return time\n else: return None\n \ndef get_date():\n date = time = strftime(\"%Y-%m-%d\", localtime())\n if date: return date\n else: return None\n\ndef get_logfile():\n ''' Get path for log file, should probably be stored outside of user dir.\n \n But for now we do like this. Then we have no problem with writing access.'''\n home = get_userdir()\n logdir = home + \"/.support\"\n os.makedirs(logdir, exist_ok=True) # Create logdir if not exist.\n username = get_username()\n time = get_time()\n log_file = logdir + \"/\" + username + \"_\" + time + \".log\"\n if log_file: return log_file\n else: return None\n \n\n","sub_path":"dialog.py","file_name":"dialog.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"386345183","text":"import os\nimport glob\nimport collections\n\nimport livereload\nfrom flask import Flask, Config, Blueprint, render_template, request\nfrom flask_assets import Environment, Bundle\nfrom webassets.filter import get_filter\n\nfrom . import utils\nfrom utils import abs_path\n\n\ndefault_config = {\n 'EA_NPM_FOLDER': 'node_modules',\n 'EA_SCSS_FOLDER': 'scss',\n 'EA_JS_SRC_FOLDER': 'js_src',\n 'EA_TEMPLATES_FOLDER': 'templates',\n #: Path prefix for static and template folder\n #: This is very useful place multiple apps within one folder\n #: For most of my projects, I will need one app for the user facing side and one for admin side\n 'EA_PREFIX': '',\n #: Custom Jinja filters\n 'EA_JINJA_FILTERS': {\n 'breaks': 'add_br',\n 'copyright': 'copyright_year',\n 'currency': 'format_currency',\n 'date': 'format_date',\n 'datetime': 'format_datetime',\n 'http': 'add_http',\n 'https': 'add_https',\n 'leading_zero': 'leading_zero',\n 'paragraphs': 'add_p',\n 'no_breaks': 'remove_linebreaks',\n 'slug': 'gen_slug'\n },\n #: Custom Jinja global functions\n 'EA_JINJA_FUNCTIONS': {'next_year': 'next_year',\n 'random_string': 'random_string',\n 'gen_years': 'relative_years',\n },\n #: Add some parameters and functions to jinja context because\n #: the functions behaves differently with each request\n 'EA_JINJA_CONTEXT': {'highlight': 'highlight_link'},\n\n #: Project folder structure\n 'EA_FOLDER_STRUCTURE': ['%(prefix)sstatic/', '%(prefix)sstatic/%(scss_folder)s/styles.scss',\n '%(prefix)sstatic/', '%(prefix)sstatic/%(js_src_folder)s/Main.js',\n '%(prefix)sstatic/images/', '%(prefix)sstatic/fonts/', '%(prefix)s%(templates_folder)s/'],\n 'EA_JS_LIBS': ['jquery/dist/jquery.min.js'],\n 'EA_SCSS_LIBS': ['bootstrap/scss'],\n\n 'EA_JS_ASSETS': [('scripts.js', '*.js')],\n 'EA_CSS_ASSETS': ['styles.scss'],\n\n 'EA_FILTER_JSMIN': False,\n 'EA_FILTER_AUTOPREFIXER': False,\n 'EA_FILTER_BABEL': False,\n 'EA_BABEL_PRESETS': '/usr/local/lib/node_modules/babel-preset-es2015',\n\n #: Watch files for livereload\n 'EA_LIVERELOAD_WATCH_FILES': ['%(prefix)sstatic/%(scss_folder)s/*.scss',\n '%(prefix)sstatic/%(js_src_folder)s/*.js',\n '%(prefix)s%(templates_folder)s/*.html']\n}\n\n\nclass EnhancedApp(object):\n \"\"\"\n Enhanced Flask App with additional jinja filters/function, webassets integration and livereload integration\n \"\"\"\n\n def __init__(self, app_name='enhanced-flask-app', config_file=None, **flask_kwargs):\n config = Config('.', Flask.default_config)\n if config_file:\n config.from_object(config_file)\n\n for k, v in default_config.items():\n config.setdefault(k, v)\n\n self.config = config.get_namespace('EA_')\n\n self.create_folder_structure()\n\n flask_kwargs.setdefault('static_folder', self.config['prefix'] + 'static')\n flask_kwargs.setdefault('static_url_path', '/static')\n flask_kwargs.setdefault('template_folder', self.config['prefix'] + 'templates')\n\n self.app = Flask(app_name, **flask_kwargs)\n self.app.config = config\n\n # Create webassets\n self.assets_env = Environment(self.app)\n self.assets_env.url_expire = True\n self.assets_env.url = '/static'\n self.assets_env\n\n # Initialize additional jinja stuff\n self.enhance_jinja(self.app.jinja_env)\n\n # Add a blueprint to hook up default HTML template\n bp = Blueprint('enhanced-flask-app-bp', __name__, template_folder=utils.abs_path('templates', __file__))\n self.app.register_blueprint(bp)\n\n # Flask assets related\n self.scss_path = abs_path(self._to_static_path(self.config['scss_folder']))\n self.js_src_path = abs_path(self._to_static_path(self.config['js_src_folder']))\n self.npm_path = abs_path(self.config['npm_folder'])\n self.js_filters = []\n self.css_filters = []\n self.depends_scss = []\n\n # Keep track of js, css bundles added in sequence\n # esp important for js because of the dependencies\n self.js_asset_names = []\n self.css_asset_names = []\n\n self.enhance_assets()\n\n def create_folder_structure(self):\n for f in self.config['folder_structure']:\n _path = f % self.config\n if os.path.exists(_path):\n break\n self._create_path(_path)\n\n def enhance_jinja(self, env):\n # Add custom tags/blocks\n env.add_extension('jinja2_ext_required.RequiredVariablesExtension')\n\n # Add additional jinja filters\n for k, v in self.config['jinja_filters'].items():\n env.filters[k] = getattr(utils, v)\n\n for k, v in self.config['jinja_functions'].items():\n env.globals[k] = getattr(utils, v)\n\n #: Initialize jinja context\n @self.app.context_processor\n def gen_jinja_context():\n obj = {}\n for _k, _v in self.config['jinja_context'].items():\n obj[_k] = getattr(utils, _v)\n return obj\n\n def enhance_assets(self):\n \"\"\"\n Add js, css/scss assets to the environment\n :param env: webassets environment\n :return:\n \"\"\"\n if self.config['filter_jsmin']:\n self.js_filters = ['jsmin']\n\n if self.config['filter_babel']:\n self.js_filters.append(get_filter('babel', presets=self.config['babel_presets']))\n\n include_scss = [self.scss_path]\n self.depends_scss = [os.path.join(self.scss_path, '*.scss')]\n for f in self.config['scss_libs']:\n include_scss.append(os.path.join(self.npm_path, f))\n self.depends_scss.append(os.path.join(self.npm_path, f, '*.scss'))\n sass_compiler = get_filter('libsass', includes=include_scss)\n\n self.css_filters = [sass_compiler]\n\n if self.config['filter_autoprefixer']:\n self.css_filters.append(\n get_filter('autoprefixer', autoprefixer='autoprefixer-cli', browsers='last 2 version'))\n\n #: Project specific libs added in project config\n libs = [os.path.join(self.npm_path, f) for f in self.config['js_libs']]\n if libs:\n self.add_js_asset('libs.js', libs)\n\n #: JS assets\n for asset in self.config['js_assets']:\n self.add_js_asset(asset[0], asset[1])\n\n #: CSS assets\n for asset in self.config['css_assets']:\n self.add_css_asset(asset)\n\n def add_js_asset(self, output_file, input_files):\n if isinstance(input_files, basestring):\n scripts = glob.glob(os.path.join(self.js_src_path, input_files))\n sorted(scripts, reverse=True)\n else:\n scripts = input_files\n b = Bundle(scripts, output=self._to_static_path(output_file), filters=self.js_filters)\n self.assets_env.register(output_file, b)\n self.js_asset_names.append(output_file)\n\n def add_css_asset(self, base_file):\n input_file = base_file + '.scss'\n output_file = base_file + '.css'\n b = Bundle(os.path.join(self.scss_path, input_file),\n filters=self.css_filters, depends=self.depends_scss,\n output=self._to_static_path(output_file))\n self.assets_env.register(output_file, b)\n self.css_asset_names.append(output_file)\n\n def run_livereload(self, port=8080):\n \"\"\"\n Create a live reload server\n :param additional_files: list of file patterns, relative to the project's root\n :return:\n \"\"\"\n self.app.debug = True\n self.app.jinja_env.globals['livereload'] = True\n self.app.jinja_env.auto_reload = True\n server = livereload.Server(self.app.wsgi_app)\n for f in self.config['livereload_watch_files']:\n server.watch(abs_path(f % self.config))\n server.serve(port=port, host='0.0.0.0')\n\n def add_error_handlers(self):\n @self.app.errorhandler(410)\n def content_gone(e):\n return render_template('410.html', error=e), 410\n\n @self.app.errorhandler(403)\n def access_denied(e):\n return render_template('403.html', error=e), 403\n\n @self.app.errorhandler(404)\n def content_not_found(e):\n if request.path == '/favicon.ico':\n return 'Not found', 404\n return render_template('404.html', error=e), 404\n\n def _create_path(self, path):\n if os.path.exists(path):\n return\n parent = os.path.abspath(os.path.join(path, os.pardir))\n while not os.path.exists(parent):\n self._create_path(parent)\n # Check if the path is a file\n if path.rfind('.') > path.rfind(os.path.sep):\n open(path, 'w').close()\n else:\n os.mkdir(path)\n\n def _to_static_path(self, *filenames):\n return os.path.join(abs_path(self.config['prefix'] + 'static'), *filenames)\n","sub_path":"flask_with_glasses/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":9142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"154425796","text":"# coding=utf-8\n# encoding: utf-8\nimport shutil\nimport os\nfrom ftplib import FTP\nimport os\nimport tarfile\n\n\ndef logupload(logname):\n ftp = FTP()\n timeout = 30\n port = 21\n ftp.connect('192.168.1.100', port, timeout)\n ftp.login('administrator', 'yakai888')\n print(ftp.getwelcome())\n\n # ftp.mkd(\"TestLog/log\") #新建文件夹\n ftp.cwd(\"TestLog/BatteryTestBoard\") # 创建操作目录\n\n localpath = \"C:/Users/Administrator/Desktop/batterytest-new/\"\n localfile = localpath + logname + '.csv'\n f = open(localfile, 'rb')\n\n ftp.storbinary('STOR %s' % os.path.basename(localfile), f)\n\n\n","sub_path":"BatteryTest/logupload.py","file_name":"logupload.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"280378501","text":"# With a very bad RNN model we obtained 97% accuracy in just 5 epochs which is simply amazing\n\nfrom keras.utils import normalize\nfrom keras.models import Sequential\nfrom keras.layers import Dense , Dropout , CuDNNLSTM \nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.optimizers import adam\nfrom keras.utils import np_utils\n\n# loading the data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# get an idea of shape \nprint(\"Getting an idea of shape\")\nprint(x_train.shape)\nprint(y_train.shape)\nprint(x_test.shape)\nprint(y_test.shape)\n\n# data before normalizing\n#print(x_train[5])\n\n# normalizing the data i.e. making the pixel values between 0 and 1 as compared to 255 and 0\n# data has to be normalized for both training and test set\n# or x_train = x_train/255 same for test\n# VERY IMPORTANT SOMETIMES THERE ARE CHANCES WHEN THERE MAY NOT BE ANY LEARNING WITHOUT NORMALIZATION\nx_train = normalize(x_train , axis = 1)\nx_test = normalize(x_test , axis = 1)\n\n# data after normalizing\n#print(x_train[5])\n\nn_classes = 10\nprint(\"Shape before one-hot encoding: \", y_train.shape)\ny_train = np_utils.to_categorical(y_train, n_classes)\ny_test = np_utils.to_categorical(y_test, n_classes)\nprint(\"Shape after one-hot encoding: \", y_train.shape)\n\nmodel = Sequential()\n# input to LSTM cannot be only 784\nmodel.add(CuDNNLSTM(128 , input_shape = (28,28) , return_sequences= True))\nmodel.add(Dropout(0.2))\n\nmodel.add(CuDNNLSTM(128))\nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(32 , activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10 , activation='softmax'))\n#model.summary()\n\noptimizer = adam(decay = 0.00005)\n# metrics refers to the thing we want to calculate\nmodel.compile(optimizer = optimizer , loss = 'categorical_crossentropy' , metrics = ['accuracy'])\n\n# Training the model\nmodel.fit(x_train , y_train , epochs=5 , validation_split=0.2 , batch_size=200)\n\ntest_loss , test_accuracy = model.evaluate(x_test , y_test)\nprint(test_loss , (test_accuracy)*100)\n\n# prediction\n# For prediction u need to alter the shape a bit because x_test[].shape for a single example is (28*28,)\n# It has to be of shape (1,28,28)\n#print(x_test[0].shape)\n\ntest_no = 5\ndef Predictor(X):\n X = np.reshape(X , (1,28,28))\n prediction = model.predict(X)\n print(np.argmax(prediction[0]))\n print(\" To validate it\")\n print(y_test[test_no])\n \nPredictor(x_test[test_no])\n\n","sub_path":"Keras Codes/MNIST_RNN.py","file_name":"MNIST_RNN.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"530211522","text":"from functools import cmp_to_key\n\n\nclass Solution:\n def largestNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: str\n \"\"\"\n nums = [str(num) for num in nums]\n\n def cmp(a, b):\n return int(b + a) - int(a + b)\n\n nums.sort(key=cmp_to_key(cmp))\n res = ''.join(nums).lstrip('0')\n return '0' if not res else res\n\n\nif __name__ == '__main__':\n ls = [3, 30, 34, 5, 9]\n so = Solution()\n print(so.largestNumber(ls))\n","sub_path":"179_LargestNumber.py","file_name":"179_LargestNumber.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"356817209","text":"from helpers.increment import inc\nfrom helpers.decrement import dec\nimport time\nimport board\nfrom digitalio import DigitalInOut, Direction, Pull\nfrom analogio import AnalogIn\n\n\nprint(\"This is the Digital Foosball Revolution\")\n\nled = DigitalInOut(board.D13)\nled.direction = Direction.OUTPUT\n\n# Home goal sensor\nhome = DigitalInOut(board.D2)\nhome.direction = Direction.INPUT\nhome.pull = Pull.UP\n\n# Away goal sensor\naway = DigitalInOut(board.D3)\naway.direction = Direction.INPUT\naway.pull = Pull.UP\n\nreset_button = DigitalInOut(board.D8)\nreset_button.direction = Direction.INPUT\nreset_button.pull = Pull.UP\n\nmode_button = DigitalInOut(board.D9)\nmode_button.direction = Direction.INPUT\nmode_button.pull = Pull.UP\n\nedit_button = DigitalInOut(board.D10)\nedit_button.direction = Direction.INPUT\nedit_button.pull = Pull.UP\n\n# Increment button\nincrement = DigitalInOut(board.D6)\nincrement.direction = Direction.INPUT\nincrement.pull = Pull.UP\n\n# Decrement button\ndecrement = DigitalInOut(board.D7)\ndecrement.direction = Direction.INPUT\ndecrement.pull = Pull.UP\n\nhome_score = 0\naway_score = 0\n\ngame_mode = 0\n\nhigh_score = 10\n\nedit = False\n\nedit_mode = 0\nteam_edit = 0\n\nstart_time = 0\ntime_edit = 5\nhalf_time = int(60 * time_edit)\n\nstart_stop_home = DigitalInOut(board.D11)\nstart_stop_home.direction = Direction.INPUT\nstart_stop_home.pull = Pull.UP\n\nstart_stop_away = DigitalInOut(board.D12)\nstart_stop_away.direction = Direction.INPUT\nstart_stop_away.pull = Pull.UP\n\nhome_led = DigitalInOut(board.D4)\nhome_led.direction = Direction.OUTPUT\n\naway_led = DigitalInOut(board.D5)\naway_led.direction = Direction.OUTPUT\n\n\nhome_ratio = 1\naway_ratio = 1\nratios_on = False\n\n\"\"\"\nGame Modes:\n 0 = 1st to 10 goals\n 1 = 1st to high_score\n 2 = Timed halves\n 3 = Timed halves w/ handicaps\n\nButton Pinouts:\n D2 = Home Goal\n D3 = Away Goal\n D4 = Home LED\n D5 = Away LED\n D6 = Increment\n D7 = Decrement\n D8 = Reset\n D9 = Mode\n D10 = Edit\n D11 = Start/Stop Home\n D12 = Start/Stop Away\n\nDisplay Pinouts:\n 27 = CLK\n 32 = R1\n 33 = G1\n 34 = B1\n 35 = R2\n 36 = G2\n 37 = B2\n\"\"\"\n\nwhile True:\n\n while not reset_button.value:\n time.sleep(0.1)\n if reset_button.value:\n away_score = 0\n home_score = 0\n half_time = half_time\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n break\n\n while not edit_button.value:\n time.sleep(0.1)\n if edit_button.value:\n edit = not edit\n print(edit)\n break\n\n if edit:\n led.value = True\n while not mode_button.value:\n time.sleep(0.1)\n if mode_button.value:\n if edit_mode == 6:\n edit_mode = 0\n else:\n edit_mode = inc(edit_mode)\n\n if edit_mode == 1: # edits home score\n while not increment.value:\n time.sleep(0.1)\n if increment.value:\n home_score = inc(home_score)\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n break\n while not decrement.value:\n time.sleep(0.1)\n if decrement.value:\n if home_score == 0:\n break\n else:\n home_score = dec(home_score)\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n break\n elif edit_mode == 2: # edits away score\n while not increment.value:\n time.sleep(0.1)\n if increment.value:\n away_score = inc(away_score)\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n break\n while not decrement.value:\n time.sleep(0.1)\n if decrement.value:\n if away_score == 0:\n break\n else:\n away_score = dec(away_score)\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n break\n elif edit_mode == 3: # edits high_score\n while not increment.value:\n time.sleep(0.1)\n if increment.value:\n high_score = inc(high_score)\n break\n while not decrement.value:\n time.sleep(0.1)\n if decrement.value:\n high_score = dec(high_score)\n break\n elif edit_mode == 4: # edits half time\n while not increment.value:\n time.sleep(0.1)\n if increment.value:\n half_time = inc(half_time, 15)\n break\n while not decrement.value:\n time.sleep(0.1)\n if decrement.value:\n if half_time >= 15:\n half_time = dec(half_time, 15)\n break\n else:\n break\n elif edit_mode == 5: # edits home ratio\n while not increment.value:\n time.sleep(0.1)\n if increment.value:\n home_ratio = inc(home_ratio)\n break\n while not decrement.value:\n time.sleep(0.1)\n if decrement.value:\n if home_ratio == 0:\n break\n else:\n home_ratio = dec(home_ratio)\n break\n elif edit_mode == 6:\n while not increment.value:\n time.sleep(0.1)\n if increment.value:\n away_ratio = inc(away_ratio)\n break\n while not decrement.value:\n time.sleep(0.1)\n if decrement.value:\n if away_ratio == 0:\n break\n else:\n away_ratio = dec(away_ratio)\n break\n\n if game_mode == 0:\n if not home.value:\n home_score = inc(home_score)\n print(\"Home Score: \", home_score)\n print('Away Score: ', away_score)\n elif not away.value:\n away_score = inc(away_score)\n print(\"Home Score: \", home_score)\n print('Away Score: ', away_score)\n\n if home_score == 10 or away_score == 10:\n winner = \"Home Team WINS\" if home_score == 10 else \"Away Team WINS\"\n print(winner)\n print(\"Home Score: \", home_score)\n print('Away Score: ', away_score)\n elif game_mode == 1:\n if not home.value:\n home_score = inc(home_score)\n print(\"Home Score: \", home_score)\n print('Away Score: ', away_score)\n elif not away.value:\n away_score = inc(away_score)\n print(\"Home Score: \", home_score)\n print('Away Score: ', away_score)\n\n if home_score == high_score or away_score == high_score:\n winner = \"Home Team WINS\" if home_score == high_score else \"Away Team WINS\" # noqa\n print(winner)\n print(\"Home Score: \", home_score)\n print('Away Score: ', away_score)\n elif game_mode == 2:\n current_time = int(time.mktime(time.localtime()))\n if start_stop_away.value and start_stop_home.value:\n home_led.value = True\n away_led.value = True\n time.sleep(0.5)\n away_led.value = False\n home_led.value = False\n time.sleep(0.5)\n if not start_stop_home.value and not start_stop_away.value:\n while not start_stop_home.value or not start_stop_away.value:\n home_led.value = True\n away_led.value = True\n home_led.value = False\n away_led.value = False\n time.sleep(1)\n start_time = int(time.mktime(time.localtime()))\n if start_time:\n away_led.value = False\n home_led.value = False\n seconds = []\n end_time = start_time + half_time\n pause = False\n home_goal = False\n away_goal = False\n home_ready = False\n away_ready = False\n while True:\n current_time = int(time.mktime(time.localtime()))\n count_down = end_time - current_time\n if count_down not in seconds and count_down > 0:\n seconds.append(end_time - current_time)\n mins, secs = divmod(seconds[-1], 60)\n timer = '{:02d}:{:02d}'.format(mins, secs)\n print(timer)\n\n if not start_stop_home.value or not start_stop_away.value:\n while not start_stop_home.value or not start_stop_away.value: # noqa\n pause = True\n home_led.value = True\n away_led.value = True\n current_time = int(time.mktime(time.localtime()))\n end_time = current_time + seconds[-1]\n time.sleep(1)\n\n while pause:\n current_time = int(time.mktime(time.localtime()))\n if home_ready and away_ready:\n pause = False\n home_ready = False\n away_ready = False\n time.sleep(1)\n end_time = current_time + seconds[-1]\n break\n if not start_stop_home.value:\n home_led.value = False\n home_ready = True\n if not start_stop_away.value:\n away_led.value = False\n away_ready = True\n\n if not home.value:\n home_score += 1\n home_goal = True\n away_led.value = True\n if not home.value:\n while not home.value:\n current_time = int(time.mktime(time.localtime()))\n end_time = current_time + seconds[-1]\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n elif not away.value:\n away_score += 1\n away_goal = True\n home_led.value = True\n if not away.value:\n while not away.value:\n current_time = int(time.mktime(time.localtime()))\n end_time = current_time + seconds[-1]\n print(\"Home Score: \", home_score)\n print(\"Away Score: \", away_score)\n time.sleep(1)\n else:\n led.value = False\n\n while home_goal:\n if not start_stop_away.value:\n while not start_stop_away.value:\n home_goal = False\n current_time = int(time.mktime(time.localtime()))\n end_time = current_time + seconds[-1]\n away_led.value = False\n time.sleep(1)\n break\n\n while away_goal:\n if not start_stop_home.value:\n while not start_stop_home.value:\n away_goal = False\n current_time = int(time.mktime(time.localtime()))\n end_time = current_time + seconds[-1]\n home_led.value = False\n time.sleep(1)\n break\n\n if int(time.mktime(time.localtime())) == end_time:\n print('GAME OVER')\n start_time = 0\n home_score = 0\n away_score = 0\n seconds = []\n break\n","sub_path":"sd_files/.Trashes/501/foosball.py","file_name":"foosball.py","file_ext":"py","file_size_in_byte":12284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"27135750","text":"from django.shortcuts import render, redirect, get_object_or_404, loader\nfrom django.urls import reverse_lazy, reverse\nfrom django.contrib import messages\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.core.paginator import Paginator\nfrom django.views.generic import View\nfrom django.views.generic.base import TemplateView\nfrom django.db.models import Sum, Max, Min\nfrom django.db.models import Q\n\n\nfrom HR.settings import *\nfrom .models import *\nfrom .forms import *\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nimport time\n\ndef register(request):\n\t\"\"\"Реги��трация нового пользователя\"\"\"\n\tif request.method == 'POST':\n\t\tform = UserRegisterForm(request.POST)\n\t\temail = request.POST.get('email').lower()\n\t\tpassword1 = request.POST.get('password1')\n\t\tpassword2 = request.POST.get('password2')\n\t\tif AdvUser.objects.filter(email=request.POST.get('email')).exists()\tor \\\n\t\t\tAdvUser.objects.filter(email__iexact=request.POST.get('email')):\n\t\t\tmessages.error(request, message='Пользователь с таким адресом' + \n\t\t\t\t\t\t\t\t\t\t\t' электронной почты уже существует')\n\t\t\tform = UserRegisterForm()\n\t\t\treturn render(request, 'registration/registration_form.html', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{'form': form})\n\t\tif AdvUser.objects.filter(username=request.POST.get('username')).exists():\n\t\t\tmessages.error(request, message='Пользователь с таким именем уже существует')\n\t\t\tform = UserRegisterForm()\n\t\t\treturn render(request, 'registration/registration_form.html',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t {'form': form})\n\t\tif password1 != password2:\n\t\t\tmessages.error(request, message='Пароли не совпадают')\n\t\t\tform = UserRegisterForm()\n\t\t\treturn render(request, 'registration/registration_form.html',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t {'form': form})\n\t\tif len(password1) < 8 or len(password2) < 8:\n\t\t\tmessages.error(request, message='слишком короткий пароль минимум 8 символов')\n\t\t\tform = UserRegisterForm()\n\t\t\treturn render(request, 'registration/registration_form.html',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t {'form': form})\t\n\t\telif form.is_valid():\n\t\t\temail = form.cleaned_data['email'].lower()\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(reverse_lazy('hr_test:index'))\n\telse:\n\t\tform = UserRegisterForm()\n\t\tcontext = {'form': form}\n\t\treturn render(request, 'registration/registration_form.html', {'form': form})\n\n\ndef index(request):\n\t\"\"\"Стартовая страница\"\"\"\n\tpoll = Poll.objects.filter(date_relise__date__lte=datetime.now())\n\tresult = ResultsAll.objects.all()\n\tcount = {}\n\tfor i in poll:\n\t\tcount[i.title] = 0\n\t\tfor a in result:\n\t\t\tif a.poll_total == i.title:\n\t\t\t\tcount[i.title] += 1\n\tcontext = {\n\t\t'count': count,\n\t\t'poll': poll,\n\t}\n\treturn render(request, 'index.html', context)\n\n\ndef poll_all(request):\n\tresult_all = ResultsAll.objects.filter(id_user=request.user.id)\n\tresult_list = [x for x in result_all]\n\tpoll = Poll.objects.filter(date_relise__date__lte=datetime.now()).exclude(title__in=result_list)\n\tcontext = {\n\t\t'result_list': result_list,\n\t\t'poll': poll,\n\t}\n\treturn render(request, 'poll/poll_view.html', context)\n\n\n\ndef answer(request, pk, question_id):\n\t\"\"\"Вывод вопроса\"\"\"\n\ttemplate = loader.get_template('poll/answer.html')\n\tpoll = Poll.objects.get(id=pk)\n\tquestion = get_object_or_404(Question, id=question_id)\n\tquestions = Question.objects.filter(poll=poll)\n\tquestion_image = QuestionImage.objects.filter(question_image=question)\n\tif poll.is_timer:\n\t\ti = []\n\t\tfor q in questions:\n\t\t\ti.append(q.timer)\n\t\tsum_time = sum(i)\n\t\tminutes = int()\n\t\tsecond = int()\n\t\tif questions.count() > 1:\n\t\t\ta = sum_time * 0.75\n\t\t\tminutes = round(a) // 60\n\t\t\tsecond = round(a) % 60\n\n\t\telse:\n\t\t\tminutes\t= round(sum_time) // 60\n\t\t\tsecond = round(sum_time) % 60\n\t\tcontext = {\n\t\t\t'minutes': round(minutes),\n\t\t\t'second': round(second),\n\t\t\t'poll': poll,\n\t\t\t'question': question,\n\t\t\t'question_image': question_image\n\t\t}\n\t\t\n\t\treturn HttpResponse(template.render(context, request))\n\n\telse: \n\t\tcontext = {\n\t\t\t'question': question,\n\t\t\t'poll': poll,\n\t\t\t'question_image': question_image,\n\t\t}\n\n\t\treturn HttpResponse(template.render(context, request))\n\treturn HttpResponse(template.render(context, request))\n\ndef points(request, poll_id, question_id):\n\t\"\"\"Логика вывода следующего вопроса и \n\t\tсохранения данных ответа на одиг вопрос\"\"\"\n\tresult = Result.objects.filter(id_user=request.user.id)\n\tquestion = get_object_or_404(Question, id=question_id)\n\tanswers = Answer.objects.filter(question=question.id)\n\tpoll = Poll.objects.get(id=poll_id)\n\tquestions = Question.objects.filter(polls_one=poll)\n\n\tlist_result = []\n\tfor i in result:\n\t\tlist_result.append(i.question_total)\n\tif question.title in list_result:\n\t\ta = messages.error(request,\tmessage='Вы уже отвечали на этот вопрос!' +\n\t\t' Tест не пройден обратитесь к администратору')\n\t\treturn render(request, 'index.html', {'a': a})\n\tquestion_list_id = []\n\tfor i in questions:\n\t\tquestion_list_id.append(i.id)\n\tif request.method =='POST' and question.view == 'r':\n\t\tif request.POST.get('answer') and question_id in question_list_id:\n\t\t\tResult.objects.create(total = request.POST['answer'],\n\t\t\t\t\t\t\t\tname_user = request.user,\n\t\t\t\t\t\t\t\tid_user = request.user.id,\n\t\t\t\t\t\t\t\tquestion_total = question.title,\n\t\t\t\t\t\t\t\tpoll_total = poll.title,\n\t\t\t\t\t\t\t\tanswer_total = request.POST.get('answer'), \n\t\t\t)\n\n\t\t\tquestion_id = question_id\n\t\t\t\n\t\t\twhile True:\n\t\t\t\tquestion_id += 1\n\t\t\t\tif question_id in question_list_id:\n\t\t\t\t\tprint('no ok')\n\t\t\t\t\treturn HttpResponseRedirect(reverse('hr_test:answer',\n\t\t\t\t\t\t\t\t\t\t\t args=(poll.id, question_id,)))\n\t\t\t\telif question_id > question_list_id[-1]:\n\t\t\t\t\tprint('ok')\n\t\t\t\t\treturn HttpResponseRedirect(reverse('hr_test:save', \n\t\t\t\t\t\t\t\t\targs=(poll.id,)))\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tmessages.error(request, message='Выберете варианты ответа')\n\t\t\treturn HttpResponseRedirect(reverse('hr_test:answer', \n\t\t\t\t\t\t\t\targs=(poll.id, question_id,)))\n\telif request.method == 'POST' and question.view == 'c':\n\t\tif request.POST.get('answer') and question_id in question_list_id:\t\t\n\t\t\tanswer_server = request.POST.getlist('answer')\n\t\t\tquestion_check = []\n\t\t\tfor i in answer_server:\n\t\t\t\tquestion_check.append(int(i))\n\t\t\tsum_question = sum(question_check)\n\t\t\tResult.objects.create(total = sum_question,\n\t\t\t\t\t\t\t\tname_user = request.user,\n\t\t\t\t\t\t\t\tid_user = request.user.id,\n\t\t\t\t\t\t\t\tquestion_total = question.title,\n\t\t\t\t\t\t\t\tpoll_total = poll.title,\n\t\t\t\t\t\t\t\tanswer_total = request.POST.getlist('answer')\n\t\t\t\t\t\t\t\t)\n\t\t\tquestion_id = question_id\n\t\t\twhile True:\n\t\t\t\tquestion_id += 1\n\t\t\t\tif question_id in question_list_id:\n\t\t\t\t\treturn HttpResponseRedirect(reverse('hr_test:answer',\n\t\t\t\t\t\t\t\t\t\t\t args=(poll.id, question_id,)))\n\t\t\t\telif question_id > question_list_id[-1]:\n\t\t\t\t\treturn HttpResponseRedirect(reverse('hr_test:save', \n\t\t\t\t\t\t\t\t\t\targs=(poll.id,)))\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tmessages.error(request, message='Выберете варианты ответа')\n\t\t\treturn HttpResponseRedirect(reverse('hr_test:answer', \n\t\t\t\t\t\t\t\targs=(poll.id, question_id,)))\n\telse:\n\n\t\treturn HttpResponseRedirect(reverse('hr_test:answer', \n\t\t\t\t\t\t\t\targs=(poll.id, question_id,)))\n\treturn HttpResponseRedirect(reverse('hr_test:answer', \n\t\t\t\t\t\t\t\targs=(poll.id, question_id,)))\n\ndef save(request, poll_id):\n\t\"\"\"Сохранения результатов и расчет баллов за весь опрос\"\"\"\n\ttemplate = loader.get_template('result/result.html')\n\tpolls = Poll.objects.filter(id=poll_id) \n\tpoll = Poll.objects.get(id=poll_id)\n\tquestion = Question.objects.filter(poll__in=polls)\n\tquestion_count = question.count()\n\tresult = Result.objects.only('request.user.id').order_by('-id')[:question_count]\n\ttotal_sum = result.aggregate(Sum('total'))['total__sum']\n\tif request.method == 'GET':\n\t\tResultsAll.objects.create(name_user = request.user,\n\t\t\t\t\t\t\t\tid_user=request.user.id,\n\t\t\t\t\t\t\t\tpoll_total=poll.title,\n\t\t\t\t\t\t\t\ttotal = total_sum\n\t\t\t\t\t\t\t\t)\n\t\ta = HttpResponseRedirect(reverse('hr_test:result', args=(poll.id,)))\n\treturn a\n\ndef result(request, poll_id):\n\t\"\"\"Размещена логика расчета результата и отображение результата\n\tу пользователя\"\"\"\n\ttemplate = loader.get_template('result/result.html')\n\tpolls = Poll.objects.filter(id=poll_id)\n\tpoll = Poll.objects.get(id=poll_id)\n\tresults = ResultsAll.objects.filter(poll_total=poll.title)\n\tusers = AdvUser.objects.all()\n\tres_user = ResultsAll.objects.filter(poll_total=poll.title).count()\n\tquestions = Question.objects.filter(poll__in=polls).count()\n\tuser = request.user\n\tresult_user = ResultsAll.objects.filter(poll_total=poll.title).get(id_user=user.id)\n\tresult_procent = []\n\tfor r in results:\n\t\tresult_procent.append(r.total)\n\trepeat = 0\n\twhile True:\n\t\tif result_user.total in result_procent:\n\t\t\tresult_procent.remove(result_user.total)\n\t\t\trepeat += 1\n\t\telif result_user not in result_procent:\n\t\t\tresult_procent.append(result_user.total)\n\t\t\trepeat -= 1\n\t\t\tbreak\n\tresult_procent = sorted(result_procent)\n\tres = result_procent.index(result_user.total) + 1\n\tresult_procent_user = 100 - (res * 100 // len(result_procent))\n\tpoll_question = Question.objects.filter(poll__in=polls)\n\tpoll_answer = Answer.objects.filter(question__in=poll_question)\n\tdata = {\n\t\t'res_user': res_user,\n\t\t'user': user,\n\t\t'result_user': result_user,\n\t\t'repeat': repeat,\n\t\t'result_procent': result_procent,\n\t\t'result_procent_user': result_procent_user,\n\t\t'poll': poll,\n\t\t'users': users,\n\t}\n\n\treturn HttpResponse(template.render(data, request))\n\nclass ResultAdmin(View):\n\tdef get(self, request):\n\t\tsearch_query = request.GET.get('search', '')\n\t\tresult_admin = AdvUser.objects.filter(is_staff=False, is_manager=False)\n\t\tif search_query:\n\t\t\tresult_admin = AdvUser.objects.filter(is_staff=False, is_manager=False).only('username').filter(username__icontains=search_query)\n\t\tresult = ResultsAll.objects.all()\n\t\tcontext = {\n\t\t\t'result': result,\n\t\t\t'result_admin': result_admin,\n\t\t}\n\t\treturn render(request, 'result/result_admin.html', context)\n\n\nclass ResultAll(View):\t\n\tdef get(self, request):\n\t\tsearch_query = request.GET.get('search', '')\n\t\tshort_code = ResultsAll.objects.filter(id_user=request.user.id)\n\t\tif search_query:\n\t\t\tresult_user = short_code.only('poll_total').filter(poll_total__icontains=search_query)\n\t\telse:\n\t\t\tresult_user = short_code.only('poll_total')[:10]\t\t\n\t\tresult_list = []\n\t\tfor i in result_user:\n\t\t\tresult_list.append(i.poll_total)\n\t\tresult = Result.objects.filter(poll_total__in=result_list).filter(id_user=request.user.id)\n\t\tcontext = {\n\t\t\t'result': result,\n\t\t\t'result_user': result_user,\n\t\t\t'result_list': result_list,\n\t\t}\n\t\treturn render(request, 'result/result_user.html', context)\n\n\nclass HrLoginView(LoginView):\n\t\"\"\"Вход зарегестрированного пользователя\"\"\"\n\ttemplate_name = 'registration/login.html'\n\n\nclass HrLogoutView(LogoutView):\n\t\"\"\"Выход пользователя\"\"\"\n\ttemplate_name = 'registration/logout.html'\n\tsuccess_url = reverse_lazy('hr_test: login')\n\n","sub_path":"HR/hr_test/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"214979663","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '__first__'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Collection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('slug', models.SlugField()),\n ('title', models.CharField(max_length=250)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='CollectionFollow',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('is_owner', models.BooleanField(default=False)),\n ('is_main', models.BooleanField(default=False)),\n ('collection', models.ForeignKey(to='users.Collection')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='CollectionItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('object_id', models.PositiveIntegerField()),\n ('collection', models.ForeignKey(to='users.Collection')),\n ('content_type', models.ForeignKey(to='contenttypes.ContentType')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DepartmentFollow',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='LocationFollow',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TalksUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='collectionfollow',\n name='user',\n field=models.ForeignKey(to='users.TalksUser'),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='collectionfollow',\n unique_together=set([(b'user', b'collection')]),\n ),\n ]\n","sub_path":"talks/users/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"391863029","text":"import pandas as pd\nimport numpy as np\n\n\ndef get_avg_vector(words, model):\n \"\"\"Converts a list of words to a vector.\n\n Args:\n words: input text in list of words format\n model: model\n\n Returns:\n mean_vector: vector representation of text\n \"\"\"\n vector = [model.wv[word] for word in words if word in model.wv.vocab]\n if len(vector) == 0:\n mean_vector = np.zeros(300)\n return mean_vector\n mean_vector = np.mean(vector, axis=0)\n return mean_vector\n\n\ndef postprocess_vectors(data):\n \"\"\"Prepares vectors dataset to inference format.\n\n Args:\n data: vectors dataset\n\n Returns:\n data_sent_grouped: processed vectors dataset\n \"\"\"\n # group variable\n grouped_by = ['text_sentences', 'text']\n grouped_columns = ['doc_class', 'page_path', 'coordinate', 'text_sentences', 'text_vectors']\n\n # explode sentences column\n data_sent = data.explode('sentences').reset_index()\n print('There are {} rows in sentence DataFrame.'.format(data_sent.shape[0]))\n data_sent[['text_sentences', 'text_vectors']] = pd.DataFrame(data_sent['sentences'].to_list())\n\n # remove zero vectors\n data_sent = data_sent[data_sent['text_vectors'].apply(np.sum) != 0]\n print('There are {} rows in sentence DataFrame after removing zero vectors.'.format(data_sent.shape[0]))\n\n # convert to lower case text columns\n data_sent['text_sentences'] = data_sent['text_sentences'].str.lower().str.strip()\n data_sent['text'] = data_sent['text'].str.lower().str.strip()\n\n # group data based on sentence text and chunk text\n data_sent_grouped = data_sent.groupby(grouped_by)[grouped_columns].agg(lambda x: list(x)).reset_index()\n data_sent_grouped['text_vectors'] = data_sent_grouped['text_vectors'].apply(lambda x: x[0])\n data_sent_grouped['page_class_coordinate'] = data_sent_grouped.apply(lambda x:\n list(zip(x['page_path'], x['doc_class'],\n x['coordinate'])), axis=1)\n # drop unused columns\n data_sent_grouped.drop(['page_path', 'doc_class', 'coordinate'], inplace=True, axis=1)\n print('There are {} rows in final DataFrame.'.format(data_sent_grouped.shape[0]))\n\n return data_sent_grouped\n","sub_path":"other/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"14910898","text":"from styx_msgs.msg import TrafficLight\nimport tensorflow as tf\nimport cv2\nimport rospy\nimport datetime\nimport numpy as np\nimport os\nimport time\n\nclass TLClassifier(object):\n def __init__(self, config):\n self.image_size = (config['camera_info']['image_height'], config['camera_info']['image_width'])\n self.threshold=config['classifier']['threshold']\n self.total_time=0\n self.num_detections=0\n self.load_model(config['classifier']['model_path'])\n self.class_map = {\n 1: TrafficLight.GREEN,\n 2: TrafficLight.YELLOW,\n 3: TrafficLight.RED\n }\n self.warmup_model()\n\n def load_model(self, model_path):\n base_folder = os.path.dirname(os.path.realpath(__file__))\n model_path = os.path.join(base_folder, model_path)\n \n rospy.loginfo('Loading model: %s', model_path)\n\n graph = tf.Graph()\n \n with graph.as_default():\n graph_def = tf.GraphDef()\n with tf.gfile.GFile(model_path, 'rb') as fid:\n serialized_graph = fid.read()\n graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph = graph)\n\n self.image_tensor = graph.get_tensor_by_name('image_tensor:0')\n self.boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')\n self.scores_tensor = graph.get_tensor_by_name('detection_scores:0')\n self.classes_tensor = graph.get_tensor_by_name('detection_classes:0')\n self.detections_tensor = graph.get_tensor_by_name('num_detections:0')\n\n def warmup_model(self):\n image = np.zeros((self.image_size[0], self.image_size[1], 3), dtype=np.uint8)\n _, elapsed_time = self.detect_light(image)\n rospy.loginfo('Tensorflow warmup completed (Time elapsed: %.3f ms)', elapsed_time)\n\n def get_detected_class(self, detection_scores, detection_classes):\n if detection_scores[0] >= self.threshold:\n return self.class_map.get(detection_classes[0], TrafficLight.UNKNOWN)\n return TrafficLight.UNKNOWN\n\n def detect_light(self, image):\n input_image = np.expand_dims(image, axis=0)\n\n s_time = time.time()\n ops = [self.detections_tensor, self.boxes_tensor, self.scores_tensor, self.classes_tensor]\n _, _, detection_scores, detection_classes = self.sess.run(ops, feed_dict = { self.image_tensor : input_image })\n e_time = time.time() - s_time\n \n detection_scores = detection_scores[0]\n detection_classes = detection_classes[0].astype(np.uint8)\n\n return self.get_detected_class(detection_scores, detection_classes), e_time * 1000.0\n\n def get_classification(self, image_rgb):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image_rgb (cv::Mat): image (RGB) containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n detected_light, elapsed_time = self.detect_light(image_rgb)\n self.total_time += elapsed_time\n self.num_detections += 1\n\n if self.num_detections % 50 == 0:\n rospy.logdebug('Detections: %s, Avg Detection Time: %.3f ms', self.num_detections, self.total_time / self.num_detections)\n\n return detected_light\n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"299573761","text":"# -*- coding:utf-8 -*- \n\nimport tensorflow as tf\n\na = tf.constant([1.0, 2.0], name='a')\nb = tf.constant([2.0, 3.0], name='b')\nresult = tf.add(a, b, name='add')\n'''\n#创建一个会话,没有指定Graph的情况下返回的是默认图的会话\nsess = tf.Session()\n#sess.run(...)  执行图中的某个操作\n#结束会话\nsess.close()\n'''\nsess = tf.Session()\nwith sess.as_default():\n #tf.Tensor.eval来计算一个张量的取值\n print(result.eval())\nsess.close()\n'''\n也可以如下方法:\nprint(sess.run(result))\nprint(result.eval(session=sess))\n'''\n#构建交互式会话,构建后所有操作会自动将此会话注册为默认会话\nsess = tf.InteractiveSession()\nprint(result.eval())\nsess.close()\n\n#所有会话可以通过ConfigProto Protocol Buffer进行配置\n#https://blog.csdn.net/dcrmg/article/details/79091941\nconfig = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=True)\nsess1 = tf.InteractiveSession(config=config)\nsess2 = tf.Session(config=config)","sub_path":"TensorFlow/TensorFlow_Framework_Insights/sess.py","file_name":"sess.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"608430970","text":"\"\"\"\nTypes of Variables in Class:\n1.Class(Static) Variable\n2.Instance Variable\n\"\"\"\n\n\n# Example 1:\nclass Car:\n wheel = 4 # Class(Static) Variable\n\n def __init__(self):\n self.company = \"BMW\" # Instance Variable\n self.price = \"10,00,000\" # Instance Variable\n\n\nmy_car = Car()\nmy_frnd_car = Car()\nmy_frnd_car.company = \"Farrari\"\nmy_frnd_car.price = \"20,00,000\"\nmy_car.extra_info = \"Made in America\"\nmy_frnd_car.extra_info = \"Made in India\"\nprint(my_car.company, my_car.price, my_car.wheel, my_car.extra_info)\nprint(my_frnd_car.company, my_frnd_car.price, my_frnd_car.wheel, my_frnd_car.extra_info)\n# **********************************************************************************************************************\n\"\"\"\nTypes of methods\n1.Instance Method- \n 1.Accessor Method : Used when we only need to access the value\n 2.Mutator Method: Used when we required to set or change the value\n2.Class Method\n3.Static Method\n\"\"\"\n\n\n# Example 2: Instance Method\nclass value:\n def __init__(self, p):\n self.p = p\n\n def get(self): # Instance Method - Accessor Method\n return self.p\n\n def set(self, set_to): # Instance Method - Mutator Method\n self.p = set_to\n\n\nfirst = value(1)\nsecond = value(2)\nprint(first.p)\nprint(second.p)\nfirst.set(11)\nsecond.set(22)\nprint(first.p)\nprint(second.p)\n\n\n# **********************************************************************************************************************\n# Example 3: Class method\nclass Employee:\n dress_code = \"white shirt and black pant\" # Class or Static Variable\n\n def __init__(self):\n self.employee_id = 0000\n self.employee_address = \"0000\"\n\n @classmethod\n def get_dress_code(cls): # Class method - working with class variable\n return cls.dress_code\n\n @staticmethod\n def random_operation():\n print(\"Welcome to this company\")\n\n\nemploy1 = Employee()\nemploy1.employee_id = 23456\nemploy1.employee_address = \"Delhi\"\nprint(employ1.employee_id)\nprint(employ1.employee_address)\nprint(Employee.get_dress_code()) # Calling the class method\nEmployee.random_operation() # Calling the static method\n","sub_path":"code/14.OOPs_2.py","file_name":"14.OOPs_2.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"153710723","text":"# -*- coding: utf-8 -*-\nfrom .models import *\nfrom django import forms\n\nCHOICES_TIME = (\n (1, 'Ближа��шее время'),\n (2, 'Через час'),\n (3, 'После 14:00'), \n (4, 'После 17:00'),\n (5, 'После 19:00'),\n (6, 'Завтра до 12:00'), \n (7, 'Завтра в любое время'), \n\n )\n\nclass QuestionTV(forms.ModelForm):\n class Meta:\n model = QuestionAboutTV\n exclude = ['']\n\n\nclass OrderingServices(forms.ModelForm):\n time= forms.MultipleChoiceField(required=True,widget=forms.CheckboxSelectMultiple(attrs={'class':'bootstrap'}),choices=CHOICES_TIME, ),\n class Meta:\n model = OrderingServices\n exclude = ['action']\n\n\n widgets = {\n # Укажем для поля favorite_colors нужный нам виджет\n 'name': forms.TextInput(attrs={'class':'fb_form form-control cl_bl','placeholder':u'Ваше Имя', 'required':'required'}),\n 'phone': forms.TextInput(attrs={'class':'fb_form form-control cl_bl','placeholder':u'Контактыный номер тел.'}),\n 'additional_message': forms.Textarea(attrs={'class':'fb_form form-control cl_bl' , 'placeholder':u'Дополнительное сообщение если необходимо'}),\n \n }\n","sub_path":"tv_mas/repair_tv/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"308327177","text":"from django.shortcuts import render\nfrom .models import VTServices\nfrom .models import VTAbout\nfrom .models import VTPortfolioMob,VTPortfolioIot,VTPortfolioWeb\n\n\ncname=\"Vaishnavi Technology\"\ncaddress=\"216,Settelment Free Colony,Solapur, Maharashtra-413001 INDIA \"\ncemail=\"vaishnavi.technology@outlook.com\"\ncmobil=\"+91-8007915552\"\ncgmap=\"https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d1900.9729079187462!2d75.88880177168748!3d17.652725147121007!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x0%3A0x0!2zMTfCsDM5JzA5LjgiTiA3NcKwNTMnMjMuMCJF!5e0!3m2!1sen!2sin!4v1584424575512!5m2!1sen!2sin\"\ncabout=\"is a leading software development company providing software consultancy and solutions to Industries and educational Institutes.We are one stop shop for all kinds of customized softwares and automation required.\"\ncabout1=\"providing complete suite of software solutions and services that meet their evolving needs and growing business ,the most sophisticated and yet user-friendly software to use.\"\n\ndef home(request):\n \n vtservs=VTServices.objects.all()\n \n vtabouts=VTAbout.objects.all()\n \n vtprotfoliMob=VTPortfolioMob.objects.all()\n \n vtprotfoliIot=VTPortfolioIot.objects.all()\n \n vtportfoliWeb=VTPortfolioWeb.objects.all()\n \n return render(request,'index.html',{'vtservs':vtservs,'cemail':cemail,'cname':cname,'cmobil':cmobil,'caddress':caddress,'cgmap':cgmap,'cabout':cabout,'cabout1':cabout1,'vtabouts':vtabouts,'vtprotfoliMob':vtprotfoliMob,'vtprotfoliIot':vtprotfoliIot,'vtportfoliWeb':vtportfoliWeb })\n\n# Create your views here.\ndef mesgBlock(request):\n n=request.POST['name']\n em=request.POST['email']\n msg=request.POST['message']\n sub=request.POST['subject']\n print('sucess')\n print(n)\n print(em)\n return render(request,'index.html')","sub_path":"vtwebsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"554014390","text":"import polib\nimport glob\nimport sys\nimport os.path\nimport json\n\nclass Reader():\n def read(self, directory, prefix):\n voiceDict = {}\n exp = directory + '/**/*.pot'\n\n for file in glob.glob(exp):\n print('reading %s' % file)\n entries = polib.pofile(file)\n\n if entries != None:\n print('found %d entries in %s' % (len(entries), file))\n voiceEntries = filter(lambda entry: entry.comment == 'VOICE', entries)\n \n for voiceEntry in voiceEntries:\n voiceDict[prefix + voiceEntry.msgctxt + '.ogg'] = voiceEntry.msgstr\n\n return voiceDict\n\n def main(self, directory, prefix):\n entries = self.read(directory, prefix)\n file = os.path.join(directory, '%sentries.json' % prefix)\n\n with open(file, 'w') as stream:\n json.dump(entries, stream)\n\nif __name__ == '__main__' and len(sys.argv) == 3:\n Reader().main(sys.argv[1], sys.argv[2])","sub_path":"poreader.py","file_name":"poreader.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"560078160","text":"import xml.etree.ElementTree as ET\nimport glob\nimport cPickle as pickle\n#from nltk.tokenize import word_tokenize\n\nclass Alligned:\n def __init__(self, id, original, compressed):\n self.id = id\n\n # corpus is already whitespace delimited\n self.o_words = original.split()\n c_words = compressed.split()\n self.labels = list()\n\n c_pos = 0\n for o_pos in range(len(self.o_words)):\n if c_pos == len(c_words):\n self.labels.append('O')\n elif self.o_words[o_pos] == c_words[c_pos]:\n c_pos += 1\n if o_pos == 0 or self.labels[-1] == 'O':\n self.labels.append('B')\n else:\n self.labels.append('I')\n else:\n self.labels.append('O')\n\n \"\"\"\n print(\" \".join(self.o_words))\n print(\" \".join(self.c_words))\n print(\" \".join(self.labels))\n print(\"\")\n \"\"\"\n\n self.features = list(dict() for x in self.o_words)\n self.posfeatures = list(dict() for x in self.o_words)\n self.stemmed = list(None for x in self.o_words)\n\n self.triples = []\n self.entities = []\n self.entityScores = {}\n self.facts = []\n self.phrases = []\n\n self.tree = None\n self.dependencies = None\n\n self.treefeatures = list(dict() for x in self.o_words)\n self.dependfeatures = list(dict() for x in self.o_words)\n\n def update(self, otherAligned):\n for i in range(len(self.o_words)):\n if hasattr(otherAligned, 'features'):\n self.features[i].update(otherAligned.features[i])\n if hasattr(otherAligned, 'posfeatures'):\n self.posfeatures[i].update(otherAligned.posfeatures[i])\n if hasattr(otherAligned, 'stemmed'):\n self.stemmed[i] = otherAligned.stemmed[i]\n\n if hasattr(otherAligned, 'treefeatures'):\n self.treefeatures[i].update(otherAligned.treefeatures[i])\n if hasattr(otherAligned, 'dependfeatures'):\n self.dependfeatures[i].update(otherAligned.dependfeatures[i])\n\n if hasattr(otherAligned, 'triples') and len(otherAligned.triples):\n self.triples = otherAligned.triples\n if hasattr(otherAligned, 'entities') and len(otherAligned.entities):\n self.entities = otherAligned.entities\n if hasattr(otherAligned, 'entityScores'):\n self.entityScores.update(otherAligned.entityScores)\n if hasattr(otherAligned, 'facts') and len(otherAligned.facts):\n self.facts = otherAligned.facts\n if hasattr(otherAligned, 'phrases') and len(otherAligned.phrases):\n self.phrases = otherAligned.phrases\n\n if hasattr(otherAligned, 'tree') and otherAligned.tree:\n self.tree = otherAligned.tree\n if hasattr(otherAligned, 'dependencies') and otherAligned.dependencies:\n self.dependencies = otherAligned.dependencies\n\n\n def __str__(self):\n uniValue = (\" \".join(self.o_words)).encode(\"utf-8\")\n return str(uniValue)\n\n def compressed(self):\n return \" \".join(self.o_words[i] for i in range(len(self.labels)) if self.labels[i] != 'O')\n\n def __len__(self):\n return len(self.o_words)\n\nif __name__ == \"__main__\":\n\n c_corpus_dir = \"/home/thcrzy1/compression_corpus/*.fixed\"\n\n originals = dict()\n c_corpus = dict()\n\n for f in glob.glob(c_corpus_dir):\n print(f)\n tree = ET.parse(f)\n root = tree.getroot()\n for text in root:\n for s in text:\n id = s.attrib['id']\n sentence = s.text\n if s.tag == 'original':\n originals[id] = sentence\n elif s.tag == 'compressed':\n c_corpus[id] = Alligned(id, originals.pop(id), sentence)\n\n if len(originals):\n print(originals)\n originals.clear()\n\n c_corpus = list(c_corpus.values())\n print(str(len(c_corpus))+\" sentences in compression corpus\")\n\n compressionCorpusCache = \"/home/thcrzy1/proj/cache/compressionCorpusCache/\"\n #compressionCorpusCache = \"../cache/compressionCorpusCache/\"\n\n cashefile = open(compressionCorpusCache+\"c_Sentences\", 'w')\n pickle.dump(c_corpus, cashefile)","sub_path":"src/realize/compressionCacheGen.py","file_name":"compressionCacheGen.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"367853849","text":"import boto3\nimport botocore\nfrom botocore.exceptions import ClientError\nfrom boto3.s3.transfer import TransferConfig\nfrom io import StringIO, BytesIO\nimport os\nimport json\nfrom larry import utils\nfrom larry import sts\nimport uuid\nimport urllib.request\nimport urllib.parse\nfrom zipfile import ZipFile\nfrom collections import Mapping\n\n# Local S3 resource object\nresource = None\n# A local instance of the boto3 session to use\n__session = boto3.session.Session()\n\n\ndef set_session(aws_access_key_id=None,\n aws_secret_access_key=None,\n aws__session_token=None,\n region_name=None,\n profile_name=None,\n boto_session=None):\n \"\"\"\n Sets the boto3 session for this module to use a specified configuration state.\n :param aws_access_key_id: AWS access key ID\n :param aws_secret_access_key: AWS secret access key\n :param aws__session_token: AWS temporary session token\n :param region_name: Default region when creating new connections\n :param profile_name: The name of a profile to use\n :param boto_session: An existing session to use\n :return: None\n \"\"\"\n global __session, resource\n __session = boto_session if boto_session is not None else boto3.session.Session(**utils.copy_non_null_keys(locals()))\n sts.set_session(boto_session=__session)\n resource = __session.resource('s3')\n\n\ndef delete_object(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Deletes the object defined by the bucket/key pair (or uri).\n :param bucket: The S3 bucket\n :param key: The key of the object\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: Dict containing the boto3 response\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n return s3_resource.Bucket(bucket).Object(key=key).delete()\n\n\ndef get_object(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Performs a 'get' of the object defined by the bucket/key pair (or uri).\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: Dict containing the Body of the object and associated attributes\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n return s3_resource.Bucket(bucket).Object(key=key).get()\n\n\ndef get_object_size(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Returns the content_length of an S3 object.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: Size in bytes\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n return s3_resource.Bucket(bucket).Object(key=key).content_length\n\n\ndef read_object(bucket=None, key=None, uri=None, amt=None, s3_resource=None):\n \"\"\"\n Performs a 'get' of the object defined by the bucket/key pair (or uri)\n and then performs a 'read' of the Body of that object.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param amt: The max amount of bytes to read from the object. All data is read if omitted.\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The bytes contained in the object\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n return get_object(bucket, key, uri, s3_resource=s3_resource)['Body'].read(amt)\n\n\ndef read_dict(bucket=None, key=None, uri=None, encoding='utf-8', s3_resource=None):\n \"\"\"\n Reads in the s3 object defined by the bucket/key pair (or uri) and\n loads the json contents into a dict.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param encoding: The charset to use when decoding the object bytes, utf-8 by default\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: A dict representation of the json contained in the object\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n return json.loads(read_object(bucket, key, uri, s3_resource=s3_resource).decode(encoding),\n object_hook=utils.JSONDecoder)\n\n\ndef read_str(bucket=None, key=None, uri=None, encoding='utf-8', s3_resource=None):\n \"\"\"\n Reads in the s3 object defined by the bucket/key pair (or uri) and\n decodes it to text.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param encoding: The charset to use when decoding the object bytes, utf-8 by default\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The contents of the object as a string\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n return read_object(bucket, key, uri, s3_resource=s3_resource).decode(encoding)\n\n\ndef read_list_of_dict(bucket=None, key=None, uri=None, encoding='utf-8', newline='\\n', s3_resource=None):\n \"\"\"\n Reads in the s3 object defined by the bucket/key pair (or uri) and\n loads the JSON Lines data into a list of dict objects.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param encoding: The charset to use when decoding the object bytes, utf-8 by default\n :param newline: The line separator to use when reading in the object, \\n by default\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The contents of the object as a list of dict objects\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n obj = read_object(bucket, key, uri, s3_resource=s3_resource)\n lines = obj.decode(encoding).split(newline)\n records = []\n for line in lines:\n if len(line) > 0:\n record = json.loads(line, object_hook=utils.JSONDecoder)\n records.append(record)\n return records\n\n\ndef read_list_of_str(bucket=None, key=None, uri=None, encoding='utf-8', newline='\\n', s3_resource=None):\n \"\"\"\n Reads in the s3 object defined by the bucket/key pair (or uri) and\n loads the JSON Lines data into a list of dict objects.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param encoding: The charset to use when decoding the object bytes, utf-8 by default\n :param newline: The line separator to use when reading in the object, \\n by default\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The contents of the object as a list of dict objects\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n obj = read_object(bucket, key, uri, s3_resource=s3_resource)\n lines = obj.decode(encoding).split(newline)\n records = []\n for line in lines:\n if len(line) > 0:\n records.append(line)\n return records\n\n\ndef read_pillow_image(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Reads in the s3 object defined by the bucket/key pair (or uri) and\n loads it into a Pillow image object\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The contents of the object as a Pillow image object\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n try:\n from PIL import Image\n return Image.open(BytesIO(read_object(bucket, key, uri, s3_resource=s3_resource)))\n except ImportError as e:\n # Simply raise the ImportError to let the user know this requires Pillow to function\n raise e\n\n\ndef write(body, bucket=None, key=None, uri=None, acl=None, content_type=None, s3_resource=None):\n \"\"\"\n Write an object to the bucket/key pair (or uri).\n :param body: Data to write\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param acl: The canned ACL to apply to the object\n :param content_type: A standard MIME type describing the format of the object data\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The URI of the object written to S3\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n obj = s3_resource.Bucket(bucket).Object(key=key)\n if acl and content_type:\n obj.put(Body=body, ACL=acl, ContentType=content_type)\n elif acl:\n obj.put(Body=body, ACL=acl)\n elif content_type:\n obj.put(Body=body, ContentType=content_type)\n else:\n obj.put(Body=body)\n return compose_uri(bucket, key)\n\n\ndef write_temp_object(value, prefix, acl=None, s3_resource=None, bucket_identifier=None, region=None,\n bucket=None):\n \"\"\"\n Write an object to a temp bucket with a unique UUID.\n :param value: Object to write to S3\n :param prefix: Prefix to attach ahead of the UUID as the key\n :param acl: The canned ACL to apply to the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :param bucket_identifier: The identifier to attach to the temp bucket that will be used for writing to s3, typically\n the account id (from STS) for the account being used\n :param region: The s3 region to store the data in\n :param bucket: The bucket to use instead of creating/using a temp bucket\n :return: The URI of the object written to S3\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if bucket is None:\n bucket = get_temp_bucket(region=region, bucket_identifier=bucket_identifier, s3_resource=s3_resource)\n key = prefix + str(uuid.uuid4())\n return write_object(value, bucket=bucket, key=key, acl=acl, s3_resource=s3_resource)\n\n\ndef write_object(value, bucket=None, key=None,\n uri=None,\n acl=None,\n newline='\\n',\n content_type=None,\n s3_resource=None):\n \"\"\"\n Write an object to the bucket/key pair (or uri), converting the python\n object to an appropriate format to write to file.\n :param value: Object to write to S3\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param acl: The canned ACL to apply to the object\n :param newline: Character(s) to use as a newline for list objects\n :param content_type: Content type to apply to the file, if not present a suggested type will be applied\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The URI of the object written to S3\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n extension_types = {\n 'css': 'text/css',\n 'html': 'text/html',\n 'xhtml': 'text/html',\n 'htm': 'text/html',\n 'xml': 'text/xml',\n 'csv': 'text/csv',\n 'txt': 'text/plain',\n 'png': 'image/png',\n 'jpeg': 'image/jpeg',\n 'jpg': 'image/jpeg',\n 'gif': 'image/gif',\n 'jsonl': 'application/x-jsonlines',\n 'json': 'application/json',\n 'js': 'application/javascript',\n 'zip': 'application/zip',\n 'pdf': 'application/pdf',\n 'sql': 'application/sql'\n }\n if uri:\n (bucket, key) = decompose_uri(uri)\n extension = key.split('.')[-1]\n # JSON\n if isinstance(value, Mapping):\n if content_type is None:\n content_type = 'application/json'\n return write(json.dumps(value, cls=utils.JSONEncoder), bucket, key, uri,\n acl, content_type=content_type, s3_resource=s3_resource)\n # Text\n elif isinstance(value, str):\n if content_type is None:\n content_type = extension_types.get(extension, 'text/plain')\n return write(value, bucket, key, uri, acl, content_type=content_type, s3_resource=s3_resource)\n elif isinstance(value, list):\n if content_type is None:\n content_type = extension_types.get(extension, 'text/plain')\n buff = StringIO()\n for row in value:\n if isinstance(row, Mapping):\n buff.write(json.dumps(row, cls=utils.JSONEncoder) + newline)\n else:\n buff.write(str(row) + newline)\n return write(buff.getvalue(), bucket, key, uri, acl, content_type=content_type, s3_resource=s3_resource)\n elif value is None:\n return write('', bucket, key, uri, acl, s3_resource=s3_resource, content_type=content_type)\n else:\n # try to write it as an image\n try:\n buff = BytesIO()\n fmt = 'PNG' if value.format is None else value.format\n value.save(buff, fmt)\n buff.seek(0)\n if content_type is None:\n content_type = extension_types.get(extension, extension_types.get(fmt.lower(), 'text/plain'))\n return write(buff, bucket, key, uri, content_type=content_type, s3_resource=s3_resource)\n except Exception:\n return write(value, bucket, key, uri, acl, content_type=content_type, s3_resource=s3_resource)\n\n\ndef write_pillow_image(image, image_format, bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Write an image to the bucket/key pair (or uri).\n :param image: The image to write to S3\n :param image_format: The format of the image (png, jpeg, etc)\n :param bucket: The S3 bucket for the object\n :param key: The key of the object\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The URI of the object written to S3\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n buff = BytesIO()\n image.save(buff, image_format)\n buff.seek(0)\n return write(buff, bucket, key, uri, s3_resource=s3_resource)\n\n\ndef write_as_csv(rows, bucket=None, key=None, uri=None, acl=None, delimiter=',', columns=None, headers=None,\n s3_resource=None):\n \"\"\"\n Write an object to the bucket/key pair (or uri), converting the python\n object to an appropriate format to write to file.\n :param rows: List of data to write, rows can be of type list, dict or str\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param acl: The canned ACL to apply to the object\n :param delimiter: Column delimiter to use, ',' by default\n :param columns: The columns to write out from the source rows, dict keys or list indexes\n :param headers: Headers to add to the output\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The URI of the object written to S3\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n\n def _array_to_string(_row, _delimiter, _indices=None):\n if _indices is None:\n _indices = range(len(_row))\n _line = ''\n for x in _indices:\n _line = str(row[x]) if x == 0 else _line + _delimiter + str(row[x])\n return _line\n\n buff = StringIO()\n\n # empty\n if rows is None or len(rows) == 0:\n if headers:\n buff.write(_array_to_string(headers, delimiter) + \"\\n\")\n buff.write('')\n\n # list\n elif isinstance(rows[0], list):\n indices = columns if columns else None\n if headers:\n buff.write(_array_to_string(headers, delimiter) + \"\\n\")\n for row in rows:\n buff.write(_array_to_string(row, delimiter, indices) + \"\\n\")\n\n # dict\n elif isinstance(rows[0], Mapping):\n keys = columns if columns else rows[0].keys()\n buff.write(_array_to_string(headers if headers else keys, delimiter) + \"\\n\")\n\n for row in rows:\n line = ''\n for i, k in enumerate(keys):\n value = '' if row.get(k) is None else str(row.get(k))\n line = value if i == 0 else line + delimiter + value\n buff.write(line + \"\\n\")\n\n # string\n elif isinstance(rows[0], str):\n buff.writelines(rows)\n else:\n raise Exception('Invalid input')\n return write(buff.getvalue(), bucket, key, uri, acl, s3_resource=s3_resource)\n\n\ndef rename_object(old_bucket_name, old_key, new_bucket_name, new_key, s3_resource=None):\n \"\"\"\n Renames an object in S3.\n :param old_bucket_name: Source bucket\n :param old_key: Source key\n :param new_bucket_name: Target bucket\n :param new_key: Target key\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: None\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n copy_source = {\n 'Bucket': old_bucket_name,\n 'Key': old_key\n }\n s3_resource.meta.client.copy(copy_source, new_bucket_name, new_key)\n s3_resource.meta.client.delete_object(Bucket=old_bucket_name, Key=old_key)\n\n\ndef object_exists(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Checks to see if an object with the given bucket/key (or uri) exists.\n :param bucket: The S3 bucket for the object\n :param key: The key of the object\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: True if the key exists, if not, False\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n try:\n s3_resource.Object(bucket, key).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n raise e\n return True\n\n\ndef _find_largest_common_prefix(values):\n \"\"\"\n Searches through a list of values to find the longest possible common prefix amongst them. Useful for optimizing\n more costly searches. Supports lists of strings or tuples. If tuples are used, the first value is assumed to be\n the value to search on.\n :param values: List of values (strings or tuples containing a string in the first position)\n :return: String prefix common to all values\n \"\"\"\n if isinstance(values[0], tuple):\n prefix, *_ = values[0]\n else:\n prefix = values[0]\n\n for value in values:\n key = value[0] if isinstance(value, tuple) else value\n while key[:len(prefix)] != prefix and len(prefix) > 0:\n prefix = prefix[:-1]\n return prefix\n\n\ndef find_keys_not_present(bucket, keys=None, uris=None, s3_resource=None):\n \"\"\"\n Searches an S3 bucket for a list of keys and returns any that cannot be found.\n :param bucket: The S3 bucket to search\n :param keys: A list of keys to search for (strings or tuples containing a string in the first position)\n :param uris: A list of S3 URIs to search for (strings or tuples containing a string in the first position)\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: A list of keys that were not found (strings or tuples based on the input values)\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n\n # If URIs are passed, convert to a list of keys to use for the search\n if uris:\n keys = []\n for value in uris:\n if isinstance(value, tuple):\n uri, *z = value\n b, key = decompose_uri(uri)\n keys.append(tuple([key]) + tuple(z))\n else:\n b, key = decompose_uri(value)\n keys.append(key)\n\n # Find the longest common prefix to use as the search term\n prefix = _find_largest_common_prefix(keys)\n\n # Get a list of all keys in the bucket that match the prefix\n bucket_obj = s3_resource.Bucket(bucket)\n all_keys = []\n for obj in bucket_obj.objects.filter(Prefix=prefix):\n all_keys.append(obj.key)\n\n # Search for any keys that can't be found\n not_found = []\n for value in keys:\n key = value[0] if isinstance(value, tuple) else value\n if key not in all_keys:\n not_found.append(value)\n return not_found\n\n\ndef decompose_uri(uri):\n \"\"\"\n Decompose an S3 URI into a bucket and key\n :param uri: S3 URI\n :return: Tuple containing a bucket and key\n \"\"\"\n bucket_name = get_bucket_name(uri)\n return bucket_name, get_bucket_key(bucket_name, uri)\n\n\ndef get_bucket_name(uri):\n \"\"\"\n Retrieve the bucket portion from an S3 URI\n :param uri: S3 URI\n :return: Bucket name\n \"\"\"\n return uri.split('/')[2]\n\n\ndef get_bucket_key(bucket_name, uri):\n \"\"\"\n Retrieves the key portion of an S3 URI\n :param bucket_name: S3 bucket name\n :param uri: S3 URI\n :return: Key value\n \"\"\"\n pos = uri.find(bucket_name) + len(bucket_name) + 1\n return uri[pos:]\n\n\ndef compose_uri(bucket, key):\n \"\"\"\n Compose a bucket and key into an S3 URI\n :param bucket: Bucket name\n :param key: Object key\n :return: S3 URI string\n \"\"\"\n return \"s3://{}/{}\".format(bucket, key)\n\n\ndef list_objects(bucket=None, prefix=None, uri=None, include_empty_keys=False, s3_resource=None):\n \"\"\"\n Returns a list of the object keys in the provided bucket that begin with the provided prefix.\n :param bucket: The S3 bucket to query\n :param prefix: The key prefix to use in searching the bucket\n :param uri: An s3:// path containing the bucket and prefix\n :param include_empty_keys: True if you want to include keys associated with objects of size=0\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: A generator of object keys\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, prefix) = decompose_uri(uri)\n paginator = s3_resource.meta.client.get_paginator('list_objects')\n operation_parameters = {'Bucket': bucket, 'Prefix': prefix}\n page_iterator = paginator.paginate(**operation_parameters)\n for page in page_iterator:\n for obj in page.get('Contents', []):\n if obj['Size'] > 0 or include_empty_keys:\n yield obj['Key']\n\n\ndef fetch(url, bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Retrieves the data defined by a URL to an S3 location.\n :param url: URL to retrieve\n :param bucket: The S3 bucket for the object\n :param key: The key of the object\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The URI of the object written to S3\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n try:\n with urllib.request.urlopen(url) as response:\n return write_object(response.read(), bucket=bucket, key=key, s3_resource=s3_resource)\n except Exception as e:\n print('Failed to retrieve {} due to {}'.format(url, e))\n\n\ndef download(directory, bucket=None, key=None, uri=None, use_threads=True, s3_resource=None):\n \"\"\"\n Downloads the an S3 object to a directory on the local file system.\n :param directory: The directory to download the object to\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param use_threads: Enables the use_threads transfer config\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: Path of the local file\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n config = TransferConfig(use_threads=use_threads)\n s3_object_local = os.path.join(directory, key.split('/')[-1])\n try:\n s3_resource.Bucket(bucket).download_file(key, s3_object_local, Config=config)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise\n return s3_object_local\n\n\ndef upload(file_name, bucket=None, key=None, uri=None, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n s3_resource.Bucket(bucket).upload_file(file_name, key)\n return compose_uri(bucket, key)\n\n\ndef download_to_temp(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Downloads the an S3 object to a temp directory on the local file system.\n :param bucket: The S3 bucket for object to retrieve\n :param key: The key of the object to be retrieved from the bucket\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: Path of the local file\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n temp_dir = _create_temp_dir()\n file = os.path.join(temp_dir, key.split('/')[-1])\n if not os.path.isfile(file):\n print('starting download')\n download(temp_dir, bucket, key, use_threads=True, s3_resource=s3_resource)\n print('download complete')\n return file\n\n\ndef _create_temp_dir():\n \"\"\"\n Creates a temp directory in the current path.\n :return: The path of the temp directory\n \"\"\"\n _temp_dir = os.getcwd() + \"/temp\"\n if not os.path.isdir(_temp_dir):\n os.makedirs(_temp_dir)\n return _temp_dir\n\n\ndef make_public(bucket=None, key=None, uri=None, s3_resource=None):\n \"\"\"\n Makes the object defined by the bucket/key pair (or uri) public.\n :param bucket: The S3 bucket for object\n :param key: The key of the object\n :param uri: An s3:// path containing the bucket and key of the object\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :return: The URL of the object\n \"\"\"\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n s3_resource.meta.client.put_object_acl(Bucket=bucket, Key=key, ACL='public-read')\n return get_public_url(bucket=bucket, key=key)\n\n\ndef get_public_url(bucket=None, key=None, uri=None):\n \"\"\"\n Returns the public URL of an S3 object (assuming it's public).\n :param bucket: The S3 bucket for object\n :param key: The key of the object\n :param uri: An s3:// path containing the bucket and key of the object\n :return: The URL of the object\n \"\"\"\n if uri:\n (bucket, key) = decompose_uri(uri)\n return 'https://{}.s3.amazonaws.com/{}'.format(bucket, urllib.parse.quote(key))\n\n\ndef create_bucket(bucket, acl='private', region=__session.region_name, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n bucket_obj = s3_resource.Bucket(bucket)\n bucket_obj.load()\n if bucket_obj.creation_date is None:\n bucket_obj.create(ACL=acl, CreateBucketConfiguration={'LocationConstraint': region})\n bucket_obj.wait_until_exists()\n return bucket_obj\n\n\ndef delete_bucket(bucket, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n bucket_obj = s3_resource.Bucket(bucket)\n bucket_obj.delete()\n\n\ndef get_temp_bucket(region=None, s3_resource=None, bucket_identifier=None):\n \"\"\"\n Create a bucket that will be used as temp storage for larry commands.\n The bucket will be created in the region associated with the current session\n using a name based on the current session account id and region.\n :param region: Region to locate the temp bucket\n :param s3_resource: Boto3 resource to use if you don't wish to use the default resource\n :param bucket_identifier: The bucket identifier to use as a unique identifier for the bucket, defaults to the\n account id associated with the session\n :return: The name of the created bucket\n \"\"\"\n if region is None:\n region = __session.region_name\n if bucket_identifier is None:\n bucket_identifier = sts.account_id()\n bucket = '{}-larry-{}'.format(bucket_identifier, region)\n create_bucket(bucket, region=region, s3_resource=s3_resource)\n return bucket\n\n\n# TODO: add filter parameter\n# TODO: rationalize the list params\ndef download_to_zip(file, bucket, prefix=None, prefixes=None):\n if prefix:\n prefixes = [prefix]\n with ZipFile(file, 'w') as zfile:\n for prefix in prefixes:\n for key in list_objects(bucket, prefix):\n zfile.writestr(urllib.parse.quote(key), data=read_object(bucket, key))\n\n\ndef file_name_portion(uri):\n file = decompose_uri(uri)[1].split('/')[-1]\n return file[:file.rfind('.')]\n","sub_path":"larry/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":30374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"75477360","text":"import argparse\nimport logging\nimport os\nimport multiprocessing\n\nfrom http_server.server import ThreadedServer\n\n\ndef parse_sys_args():\n parser = argparse.ArgumentParser(description=\"Simple http server\")\n parser.add_argument(\"-i\", \"--host\", dest=\"host\", default=\"0.0.0.0\")\n parser.add_argument(\"-p\", \"--port\", dest=\"port\", type=int, default=8080)\n parser.add_argument(\"-w\", \"--workers\", dest=\"workers\", type=int, default=10)\n parser.add_argument(\"-r\", \"--root\", dest=\"document_root\", default=\"\")\n parser.add_argument(\"-l\", \"--log\", dest=\"logging_file\", default=None)\n return parser.parse_args()\n\n\ndef setup_logging(path_to_save=None):\n logging.basicConfig(format=\"[%(asctime)s] %(levelname).1s %(message)s\",\n datefmt=\"%Y.%m.%d %H:%M:%S\",\n filename=os.path.join(path_to_save),\n filemode='a',\n level=logging.INFO)\n return logging.getLogger(__name__)\n\n\ndef main(args):\n logger = setup_logging(args.logging_file)\n logger.info(\"Started with args: %s\", args)\n\n processes = []\n try:\n for i in xrange(args.workers):\n server = ThreadedServer(args.host, args.port, os.path.realpath(args.document_root), logger)\n process = multiprocessing.Process(target=server.serve_forever)\n processes.append(process)\n process.start()\n logger.info(\"Server running on the process: {}, host: {}, port: {}\".format(process.pid, args.host, args.port))\n for process in processes:\n process.join()\n except KeyboardInterrupt:\n for process in processes:\n if process:\n pid = process.pid\n logger.info(\"Trying to shutting down process {}\".format(pid))\n process.terminate()\n logger.info(\"Process {} terminated\".format(pid))\n\n\nif __name__ == \"__main__\":\n main(parse_sys_args())\n","sub_path":"homework4/httpd.py","file_name":"httpd.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"79414008","text":"# Utility functions\r\n\r\n# Imports\r\nimport smdt\r\nimport pandas as pd\r\nimport os\r\npd.set_option.use_inf_as_na = True\r\n\r\n\r\ndef load_UspInhibition():\r\n \"\"\"\r\n Import the USP Inhibiton dataset\r\n Parameters:\r\n None\r\n Returns:\r\n usp_inhibiton_dataset: pandas.DataFrame\r\n DataFrame containing descriptors and target data.\r\n \"\"\"\r\n data_path = os.path.join(smdt.__path__[0], 'examples')\r\n data_path = os.path.join(data_path, 'USP-Inhibition.csv')\r\n data = pd.read_csv(data_path)\r\n if 'Unnamed: 0' in data.columns:\r\n data.drop(['Unnamed: 0'], axis=1, inplace=True)\r\n return data\r\n\r\n\r\ndef load_MeltingPoint():\r\n \"\"\"\r\n Import the Melting Points dataset\r\n Parameters:\r\n None\r\n Returns:\r\n melting_point_dataset: pandas.DataFrame\r\n DataFrame containing descriptors and target data.\r\n \"\"\"\r\n print('References: Karthikeyan, M.; Glen, R.C.; Bender, A. General melting point prediction based on a diverse compound dataset and artificial neural networks. J. Chem. Inf. Model.; 2005; 45(3); 581-590')\r\n data_path = os.path.join(smdt.__path__[0], 'examples')\r\n data_path = os.path.join(data_path, 'MeltingPoint.csv')\r\n data = pd.read_csv(data_path)\r\n if 'Unnamed: 0' in data.columns:\r\n data.drop(['Unnamed: 0'], axis=1, inplace=True)\r\n return data\r\n\r\n\r\ndef load_LiBloodBarrier():\r\n \"\"\"\r\n Import the Li Blood-Brain-Barrier Penetration dataset\r\n Parameters:\r\n None\r\n Returns:\r\n data: pandas.DataFrame\r\n DataFrame containing SMILES and target data.\r\n \"\"\"\r\n print('Reference: \\nHu Li, Chun Wei Yap, Choong Yong Ung, Ying Xue, Zhi Wei Cao and Yu Zong Chen, J. Chem. Inf. Model. 2005')\r\n data_path = os.path.join(smdt.__path__[0], 'examples')\r\n data_path = os.path.join(data_path, 'Li Blood-Brain-Barrier Penetration Set.csv')\r\n data = pd.read_csv(data_path)\r\n if 'Unnamed: 0' in data.columns:\r\n data.drop(['Unnamed: 0'], axis=1, inplace=True)\r\n return data\r\n\r\n\r\n\r\ndef load_Sigma2ReceptorLigands():\r\n \"\"\"\r\n Import the Sigma-2 Receptor Selective Ligands dataset\r\n Parameters:\r\n None\r\n Returns:\r\n data: pandas.DataFrame\r\n DataFrame containing SMILES and target data.\r\n \"\"\"\r\n print('Reference: \\nG. Nastasi, C. Miceli, V. Pittala, M.N. Modica, O. Prezzavento, G. Romeo, A. Rescifina, A. Marrazzo, E. Amata'\r\n 'S2RSLDB: a comprehensive manually curated, internet-accessible database of the sigma-2 receptor selective ligands'\r\n 'J. Cheminform., 9 (2017), p. 3')\r\n data_path = os.path.join(smdt.__path__[0], 'examples')\r\n data_path = os.path.join(data_path, 'Sigma-2 Receptor Selective Ligands.csv')\r\n data = pd.read_csv(data_path)\r\n if 'Unnamed: 0' in data.columns:\r\n data.drop(['Unnamed: 0'], axis=1, inplace=True)\r\n return data\r\n","sub_path":"smdt/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"371812738","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 03 12:49:48 2015\r\n\r\nThis file is a simple script to calculate word counts and sequential medians on the distribution of tweets. \r\n\r\nSeveral considerations for scalability in this script: \r\n1. multiprocessing is used to word-count in parallel\r\n a. The multiprocessing is batched according to chunk_size (in MB) because pool.map waits on the completion of the rows. Not batching assumes the entire tweet data fits in memory\r\n2. A parameter (min_count) is defined to prune low-occurance words, in the case that all unique words are too large and the distribution is long-tailed.\r\n3. Calculating the median unique word count is done on the histogram of tweet length. Because the word count is bounded by the short tweet length, and the histogram is discrete, we can recover the exact median from only a small number of counts.\r\n a. I provide an alternative (more straightforward) median calculation from the histogram written for debugging purposes. It expands the full distribution of word counts and takes the nanmedian directly. This is much less scalable in the numer of tweets\r\n4. All counts use 64bit integer (maxint: 9223372036854775807)\r\n5. The medians file output is done in an online fashion, there is no vector of medians growing linearly in the length of the file. Assuming the growth of the dictionary also slows with more data, everything operates online.\r\n\r\nI haven't handling the python parameter passing via the shell. This was intended as an executable script via the python console.\r\n\r\n\r\n@author: ivan\r\n\"\"\"\r\n\r\n\r\n#imports\r\nfrom multiprocessing import Pool\r\nimport numpy as np\r\n\r\n#file paths\r\ninput_file = '../tweet_input/tweets.txt'\r\nout_file_medians = '../tweet_output/ft2.txt'\r\nout_file_wordcounts = '../tweet_output/ft1.txt'\r\n\r\n#parameters\r\nworkers = 8 # workers \r\nchunk_size=64*(pow(10,6)) # the size of data to read per parallel batch\r\nmin_count = 1 # the minimum \r\nmax_words = 71+1 # the bounded size of unique words, assuming spaces count as characters, and tweets are maximum 140\r\n#max_words = 3000 # tested on larger data\r\n\r\n# the file reading handler to be used by multiprocessing, executing on a line of data from the text file\r\n# @input line, a text string\r\ndef twitter_map_handler(line):\r\n ret_dict = {}\r\n word_list = line.strip('\\n').split(\" \")\r\n for w in word_list:\r\n if w in ret_dict:\r\n ret_dict[w] += 1\r\n else:\r\n ret_dict[w] = 1\r\n return ret_dict\r\n\r\n# merges a list of dictionaries of the form {'a': int}, count of word 'a', will sum keys across the list\r\n# @input dict_list, a list of dictionaries of the form {'a': int}, count of word 'a'\r\n# @input dict_ret, a dictionary to aggregate into\r\n# @input min_count, an integer threshold to prune low-frequency words after this list has been processed\r\ndef dictionary_merge_sum(dict_list, dict_ret = {}, min_count=1):\r\n \r\n for dict_iter in dict_list: #for each dictionary\r\n for key,value in dict_iter.iteritems(): #for each key, value\r\n if not key in dict_ret:\r\n dict_ret[key] = value\r\n else:\r\n dict_ret[key] += value\r\n if min_count > 1: # apply threshold\r\n remove_keys=[] # build remove key list because we're iterating over the dict below, can't edit\r\n for key,value in dict_ret.iteritems():\r\n if value < min_count:\r\n remove_keys.append(key)\r\n for key in remove_keys: #remove keys\r\n dict_ret.pop(key, None) \r\n return dict_ret\r\n\r\n# efficient median calculation on a histogram\r\n# this assumes a discrete binning, [0, 1, 2, ... N]. In this case, the median is exact.\r\n# @input histogram, a numpy array containing counts where histogram[i] = int is the number of items with count i.\r\n\r\ndef median_histogram(histogram):\r\n l = np.float(np.sum(histogram)) #get number of items (length of distribution)\r\n if l == 0: #special case\r\n return float(0)\r\n \r\n c_sum = np.cumsum(histogram) #build cumulative sum \r\n ratio = np.divide(c_sum, l) # effectively a CDF \r\n idx_end = np.where(ratio > .5)[0][0] #find the first index exceeding the median\r\n if l % 2 == 0: #if distribution length is even\r\n \r\n if idx_end == 0 or c_sum[idx_end] - l/2 < c_sum[idx_end] - c_sum[idx_end-1]: # if the midpointl fita within the bucket\r\n return float(idx_end) #then no need to average, the adjacent values are the same\r\n else:\r\n idx_start = np.where((ratio <= .5) & (histogram != 0))[0][-1] #else find the previous non-zero bucket that didn't exceed ratio. \r\n return (float(idx_end)+float(idx_start))/2 #average these\r\n else:\r\n return float(idx_end) #if odd, we're safe to return this bucket \r\n\r\n# an inefficient median calculation on a histogram (expands the full distribution), not used below\r\n#@input histogram, a numpy array containing counts where histogram[i] = int is the number of items with count i.\r\ndef median_expanded(histogram):\r\n \r\n r = np.zeros(1)+ np.NaN \r\n for i, v in enumerate(histogram): #for all buckets\r\n if v > 0: #if bucket has items\r\n r = np.concatenate((r, np.multiply(np.ones(v), i))) #expand and concatenate to r\r\n ret = np.nanmedian(r) #note: ignore the initial NaN\r\n if np.isnan(ret):\r\n return 0.0\r\n else:\r\n return ret\r\n \r\n \r\nif __name__ == \"__main__\":\r\n \r\n wordcount_hist = np.zeros(max_words, dtype=np.int32) #array to hold the wordcount distribution over tweets\r\n \r\n pool = Pool(workers) \r\n \r\n #files \r\n f_input = open(input_file, 'rb') \r\n f_medians = open(out_file_medians, 'wb')\r\n f_wordcount = open(out_file_wordcounts, 'wb')\r\n \r\n dict_merged = {} \r\n while not f_input.closed:\r\n rows = f_input.readlines(chunk_size)\r\n if not rows:\r\n f_input.close()\r\n else:\r\n dicts_return = pool.map(twitter_map_handler, rows) #pool.map accepts a handler and waits on completion of the computation of rows. this is why I've chunked\r\n for dict_iter in dicts_return:\r\n wordcount_hist[len(dict_iter)] += 1 #dictionary has the unique word count!\r\n f_medians.write('{0:g}'.format(median_histogram(wordcount_hist)) + '\\n') #write medians online \r\n dict_merged = dictionary_merge_sum(dicts_return, dict_merged, min_count)\r\n \r\n \r\n for key, value in dict_merged.iteritems(): #write word counts\r\n f_wordcount.write(str(key) + '\\t' + str(value) + '\\n')\r\n\r\n f_medians.close()\r\n f_wordcount.close()\r\n \r\n\r\n#some sanity check tests\r\n\r\n#histogram=np.zeros(71)\r\n#histogram[11]=1\r\n#print median_expanded(histogram) == median_histogram(histogram)\r\n#histogram[14]=1\r\n#print median_expanded(histogram) == median_histogram(histogram)\r\n#histogram[17]=1\r\n#print median_expanded(histogram) == median_histogram(histogram)\r\n#\r\n#for i in range(100):\r\n# histogram=np.random.randint(0, 1, np.random.randint(2, 5, 1))\r\n# print median_expanded(histogram) == median_histogram(histogram)\r\n#\r\n#\r\n#\r\n\r\n\r\n","sub_path":"src/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"102197467","text":"# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (nested_scopes, generators, division, absolute_import, with_statement,\n print_function, unicode_literals)\n\nimport os\nfrom textwrap import dedent\n\nfrom pants.backend.core.targets.dependencies import Dependencies\nfrom pants.backend.core.tasks.listtargets import ListTargets\nfrom pants.base.target import Target\nfrom pants.backend.jvm.targets.artifact import Artifact\nfrom pants.backend.jvm.targets.java_library import JavaLibrary\nfrom pants.backend.jvm.targets.repository import Repository\nfrom pants_test.tasks.test_base import ConsoleTaskTest\n\nclass BaseListTargetsTest(ConsoleTaskTest):\n @classmethod\n def task_type(cls):\n return ListTargets\n\n\nclass ListTargetsTestEmpty(BaseListTargetsTest):\n def test_list_all_empty(self):\n self.assertEqual('', self.execute_task())\n self.assertEqual('', self.execute_task(args=['--test-sep=###']))\n self.assertEqual([], self.execute_console_task())\n\n\nclass ListTargetsTest(BaseListTargetsTest):\n @property\n def alias_groups(self):\n return {\n 'target_aliases': {\n 'dependencies': Dependencies,\n 'java_library': JavaLibrary,\n 'repo': Repository,\n },\n 'exposed_objects': {\n 'pants': lambda x: x,\n 'artifact': Artifact,\n },\n }\n\n def setUp(self):\n super(ListTargetsTest, self).setUp()\n\n # Setup a BUILD tree for various list tests\n\n repo_target = dedent('''\n repo(\n name='public',\n url='http://maven.twttr.com',\n push_db='/tmp/publish.properties'\n )\n ''').strip()\n self.add_to_build_file('repos', repo_target)\n\n class Lib(object):\n def __init__(self, name, provides=False):\n self.name = name\n self.provides = dedent('''\n artifact(\n org='com.twitter',\n name='%s',\n repo=pants('repos:public')\n )\n ''' % name).strip() if provides else 'None'\n\n def create_library(path, *libs):\n libs = libs or [Lib(os.path.basename(os.path.dirname(self.build_path(path))))]\n for lib in libs:\n target = \"java_library(name='%s', provides=%s, sources=[])\\n\" % (lib.name, lib.provides)\n self.add_to_build_file(path, target)\n\n create_library('a')\n create_library('a/b', Lib('b', provides=True))\n create_library('a/b/c', Lib('c'), Lib('c2', provides=True), Lib('c3'))\n create_library('a/b/d')\n create_library('a/b/e', Lib('e1'))\n self.add_to_build_file('f', dedent('''\n dependencies(\n name='alias',\n dependencies=[\n pants('a/b/c/BUILD:c3'),\n pants('a/b/d/BUILD:d')\n ]\n ).with_description(\"\"\"\n Exercises alias resolution.\n Further description.\n \"\"\")\n '''))\n\n def test_list_path(self):\n self.assert_console_output('a/b/BUILD:b', targets=[self.target('a/b')])\n\n def test_list_siblings(self):\n self.assert_console_output('a/b/BUILD:b', targets=self.targets('a/b:'))\n self.assert_console_output('a/b/c/BUILD:c', 'a/b/c/BUILD:c2', 'a/b/c/BUILD:c3',\n targets=self.targets('a/b/c/:'))\n\n def test_list_descendants(self):\n self.assert_console_output('a/b/c/BUILD:c', 'a/b/c/BUILD:c2', 'a/b/c/BUILD:c3',\n targets=self.targets('a/b/c/::'))\n\n self.assert_console_output(\n 'a/b/BUILD:b',\n 'a/b/c/BUILD:c',\n 'a/b/c/BUILD:c2',\n 'a/b/c/BUILD:c3',\n 'a/b/d/BUILD:d',\n 'a/b/e/BUILD:e1',\n targets=self.targets('a/b::'))\n\n def test_list_all(self):\n self.assert_entries('\\n',\n 'repos/BUILD:public',\n 'a/BUILD:a',\n 'a/b/BUILD:b',\n 'a/b/c/BUILD:c',\n 'a/b/c/BUILD:c2',\n 'a/b/c/BUILD:c3',\n 'a/b/d/BUILD:d',\n 'a/b/e/BUILD:e1',\n 'f/BUILD:alias')\n\n self.assert_entries(', ',\n 'repos/BUILD:public',\n 'a/BUILD:a',\n 'a/b/BUILD:b',\n 'a/b/c/BUILD:c',\n 'a/b/c/BUILD:c2',\n 'a/b/c/BUILD:c3',\n 'a/b/d/BUILD:d',\n 'a/b/e/BUILD:e1',\n 'f/BUILD:alias',\n args=['--test-sep=, '])\n\n self.assert_console_output(\n 'repos/BUILD:public',\n 'a/BUILD:a',\n 'a/b/BUILD:b',\n 'a/b/c/BUILD:c',\n 'a/b/c/BUILD:c2',\n 'a/b/c/BUILD:c3',\n 'a/b/d/BUILD:d',\n 'a/b/e/BUILD:e1',\n 'f/BUILD:alias')\n\n def test_list_provides(self):\n self.assert_console_output(\n 'a/b/BUILD:b com.twitter#b',\n 'a/b/c/BUILD:c2 com.twitter#c2',\n args=['--test-provides'])\n\n def test_list_provides_customcols(self):\n self.assert_console_output(\n '/tmp/publish.properties a/b/BUILD:b http://maven.twttr.com public com.twitter#b',\n '/tmp/publish.properties a/b/c/BUILD:c2 http://maven.twttr.com public com.twitter#c2',\n args=[\n '--test-provides',\n '--test-provides-columns=repo_db,address,repo_url,repo_name,artifact_id'\n ])\n\n def test_list_dedups(self):\n targets = []\n targets.extend(self.targets('a/b/d/::'))\n targets.extend(self.target('f:alias').dependencies)\n self.assertEquals(3, len(targets), \"Expected a duplicate of a/b/d/BUILD:d\")\n self.assert_console_output(\n 'a/b/c/BUILD:c3',\n 'a/b/d/BUILD:d',\n targets=targets\n )\n\n def test_list_documented(self):\n self.assert_console_output(\n # Confirm empty listing\n args=['--test-documented'],\n targets=[self.target('a/b')]\n )\n\n self.assert_console_output(\n dedent('''\n f/BUILD:alias\n Exercises alias resolution.\n Further description.\n ''').strip(),\n args=['--test-documented']\n )\n","sub_path":"tests/python/pants_test/tasks/test_listtargets.py","file_name":"test_listtargets.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"185621982","text":"import random\r\n\r\nnum = int(input('Digite um número entre 0 e 5:'))\r\n\r\nsnum = random.randint(0, 5)\r\nprint('Número sorteado: {} '.format(snum))\r\n\r\nif num == snum:\r\n msg = 'Parabéns você acertou!'\r\nelse:\r\n msg = 'Você errou!'\r\n\r\nprint('{}'.format(msg))\r\n","sub_path":"PythonExercicios/ex028-descobreNum.py","file_name":"ex028-descobreNum.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"352886931","text":"import logging\nimport pytest\n\nfrom ocs_ci.framework.testlib import (\n ManageTest,\n tier1,\n skipif_ocs_version,\n kms_config_required,\n)\nfrom ocs_ci.helpers.helpers import (\n create_unique_resource_name,\n create_pods,\n)\nfrom ocs_ci.ocs import constants, defaults\nfrom ocs_ci.ocs.exceptions import (\n CommandFailed,\n KMSResourceCleaneupError,\n ResourceNotFoundError,\n)\nfrom ocs_ci.utility import kms\nfrom ocs_ci.ocs.ocp import OCP\n\nlog = logging.getLogger(__name__)\n\n\n@pytest.mark.parametrize(\n argnames=[\"kv_version\"],\n argvalues=[\n pytest.param(\"v1\", marks=pytest.mark.polarion_id(\"OCS-2585\")),\n pytest.param(\"v2\", marks=pytest.mark.polarion_id(\"OCS-2592\")),\n ],\n)\n@skipif_ocs_version(\"<4.7\")\n@kms_config_required\nclass TestRbdPvEncryption(ManageTest):\n \"\"\"\n Test to verify RBD PV encryption\n\n \"\"\"\n\n @pytest.fixture(autouse=True)\n def setup(self, kv_version, request):\n \"\"\"\n Setup csi-kms-connection-details configmap\n\n \"\"\"\n # Initialize Vault\n self.vault = kms.Vault()\n self.vault.gather_init_vault_conf()\n self.vault.update_vault_env_vars()\n\n # Check if cert secrets already exist, if not create cert resources\n ocp_obj = OCP(kind=\"secret\", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)\n try:\n ocp_obj.get_resource(resource_name=\"ocs-kms-ca-secret\", column=\"NAME\")\n except CommandFailed as cfe:\n if \"not found\" not in str(cfe):\n raise\n else:\n self.vault.create_ocs_vault_cert_resources()\n\n # Create vault namespace, backend path and policy in vault\n self.vault_resource_name = create_unique_resource_name(\"test\", \"vault\")\n self.vault.vault_create_namespace(namespace=self.vault_resource_name)\n self.vault.vault_create_backend_path(\n backend_path=self.vault_resource_name, kv_version=kv_version\n )\n self.vault.vault_create_policy(policy_name=self.vault_resource_name)\n\n ocp_obj = OCP(kind=\"configmap\", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)\n\n # If csi-kms-connection-details exists, edit the configmap to add new vault config\n try:\n ocp_obj.get_resource(\n resource_name=\"csi-kms-connection-details\", column=\"NAME\"\n )\n self.new_kmsid = self.vault_resource_name\n vdict = defaults.VAULT_CSI_CONNECTION_CONF\n for key in vdict.keys():\n old_key = key\n vdict[self.new_kmsid] = vdict.pop(old_key)\n vdict[self.new_kmsid][\"VAULT_BACKEND_PATH\"] = self.vault_resource_name\n vdict[self.new_kmsid][\"VAULT_NAMESPACE\"] = self.vault_resource_name\n\n # Workaround for BZ-1997624\n if kv_version == \"v1\":\n vdict[self.new_kmsid][\"VAULT_BACKEND\"] = \"kv\"\n else:\n vdict[self.new_kmsid][\"VAULT_BACKEND\"] = \"kv-v2\"\n\n kms.update_csi_kms_vault_connection_details(vdict)\n\n except CommandFailed as cfe:\n if \"not found\" not in str(cfe):\n raise\n else:\n self.new_kmsid = \"1-vault\"\n self.vault.create_vault_csi_kms_connection_details(\n kv_version=kv_version\n )\n\n def finalizer():\n # Remove the vault config from csi-kms-connection-details configMap\n if len(kms.get_encryption_kmsid()) > 1:\n kms.remove_kmsid(self.new_kmsid)\n\n # Delete the resources in vault\n self.vault.remove_vault_backend_path()\n self.vault.remove_vault_policy()\n self.vault.remove_vault_namespace()\n\n request.addfinalizer(finalizer)\n\n @tier1\n def test_rbd_pv_encryption(\n self,\n project_factory,\n storageclass_factory,\n multi_pvc_factory,\n pod_factory,\n kv_version,\n ):\n \"\"\"\n Test to verify creation and deletion of encrypted RBD PVC\n\n \"\"\"\n # Create a project\n proj_obj = project_factory()\n\n # Create an encryption enabled storageclass for RBD\n sc_obj = storageclass_factory(\n interface=constants.CEPHBLOCKPOOL,\n encrypted=True,\n encryption_kms_id=self.new_kmsid,\n )\n\n # Create ceph-csi-kms-token in the tenant namespace\n self.vault.vault_path_token = self.vault.generate_vault_token()\n self.vault.create_vault_csi_kms_token(namespace=proj_obj.namespace)\n\n # Create RBD PVCs with volume mode Block\n pvc_size = 5\n pvc_objs = multi_pvc_factory(\n interface=constants.CEPHBLOCKPOOL,\n project=proj_obj,\n storageclass=sc_obj,\n size=pvc_size,\n access_modes=[\n f\"{constants.ACCESS_MODE_RWX}-Block\",\n f\"{constants.ACCESS_MODE_RWO}-Block\",\n ],\n status=constants.STATUS_BOUND,\n num_of_pvc=3,\n wait_each=False,\n )\n\n # Create pods\n pod_objs = create_pods(\n pvc_objs,\n pod_factory,\n constants.CEPHBLOCKPOOL,\n pods_for_rwx=1,\n status=constants.STATUS_RUNNING,\n )\n\n # Verify if the key is created in Vault\n vol_handles = []\n for pvc_obj in pvc_objs:\n pv_obj = pvc_obj.backed_pv_obj\n vol_handle = pv_obj.get().get(\"spec\").get(\"csi\").get(\"volumeHandle\")\n vol_handles.append(vol_handle)\n\n # Check if encryption key is created in Vault\n if kms.is_key_present_in_path(\n key=vol_handle, path=self.vault.vault_backend_path\n ):\n log.info(f\"Vault: Found key for {pvc_obj.name}\")\n else:\n raise ResourceNotFoundError(f\"Vault: Key not found for {pvc_obj.name}\")\n\n # Verify whether encrypted device is present inside the pod and run IO\n for vol_handle, pod_obj in zip(vol_handles, pod_objs):\n if pod_obj.exec_sh_cmd_on_pod(\n command=f\"lsblk | grep {vol_handle} | grep crypt\"\n ):\n log.info(f\"Encrypted device found in {pod_obj.name}\")\n else:\n log.error(f\"Encrypted device not found in {pod_obj.name}\")\n\n pod_obj.run_io(\n storage_type=\"block\",\n size=f\"{pvc_size - 1}G\",\n io_direction=\"write\",\n runtime=60,\n )\n log.info(\"IO started on all pods\")\n\n # Wait for IO completion\n for pod_obj in pod_objs:\n pod_obj.get_fio_results()\n log.info(\"IO completed on all pods\")\n\n # Delete the pod\n for pod_obj in pod_objs:\n pod_obj.delete()\n pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)\n\n # Delete the PVC\n for pvc_obj in pvc_objs:\n pv_obj = pvc_obj.backed_pv_obj\n pvc_obj.delete()\n pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name)\n\n # Verify whether the key is deleted in Vault. Skip check for kv-v2 due to BZ#1979244\n if kv_version == \"v1\":\n for vol_handle in vol_handles:\n if not kms.is_key_present_in_path(\n key=vol_handle, path=self.vault.vault_backend_path\n ):\n log.info(f\"Vault: Key deleted for {vol_handle}\")\n else:\n raise KMSResourceCleaneupError(\n f\"Vault: Key deletion failed for {vol_handle}\"\n )\n","sub_path":"tests/manage/pv_services/test_rbd_pv_encryption.py","file_name":"test_rbd_pv_encryption.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"182030301","text":"\"\"\"\nThis is experiment 5\n\nbert as a service, concat, scikit mlp\n\n\"\"\"\n\nimport numpy as np\nimport sklearn.neural_network as nn\nfrom scipy.stats.stats import pearsonr\n\nfrom utils.resourceManager import getEmbeddedResource\n\nprint(\"Getting data...\")\ndata = getEmbeddedResource(\"exp5\", \"BertAsService\", \"de\", \"train\", subname=\"uncased\", model_dir=\"./bert/uncased_L-12_H-768_A-12\")\nval_data = getEmbeddedResource(\"exp5\", \"BertAsService\", \"de\", \"dev\", subname=\"uncased\", model_dir=\"./bert/uncased_L-12_H-768_A-12\")\nprint(\"Tokenized data\")\n\nes,cs,y =[],[],[]\nfor e, c, y_ in data:\n es.append(e)\n cs.append(c)\n y.append(y_)\nx = np.concatenate((es, cs), axis=1)\ny = np.asarray(y)\n\nes,cs,val_y =[],[],[]\nfor e, c, y_ in val_data:\n es.append(e)\n cs.append(c)\n val_y.append(y_)\nval_x = np.concatenate((es, cs), axis=1)\nval_y = np.asarray(val_y)\n\n\nprint(\"Train!\")\nmodel = nn.MLPRegressor(max_iter=4, hidden_layer_sizes=(600,600), verbose=True)\n\nmodel.fit(x, y)\nmy_y = model.predict(val_x)\n\nprint(\"PEARSON:\", pearsonr(val_y, my_y))\nprint(\"MSE\", np.mean(np.power(my_y-val_y, 2)))\nprint(\"MAE\", np.mean(np.abs(my_y-val_y)))","sub_path":"experiments/exp5.001.py","file_name":"exp5.001.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"570139842","text":"import sympy as sy\nsy.init_printing(use_unicode=False)\n\n#reference: https://gist.github.com/rougier/ebe734dcc6f4ff450abf\ndef binomial(n, k):\n \"\"\"\n A fast way to calculate binomial coefficients by Andrew Dalke.\n See http://stackoverflow.com/questions/3025162/statistics-combinations-in-python\n \"\"\"\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in range(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0\n\n\ndef calc_formula_with(func,n):\n res=0\n for i in range(n+1):\n res+=func(n,i)*pow(a,i)*pow(b,n-i)\n return res\n\n\na=sy.symbols('a')\nb=sy.symbols('b')\nx=sy.symbols('x')\n\ndef main():\n n=8\n res=calc_formula_with(sy.binomial,n)\n print(res)\n print(res.subs({a:4,b:-x}))\n calc_formula_with(binomial,n)\n print(res)\n print(res.subs({a:4,b:-x}))\n\nif __name__ == '__main__':\n main()","sub_path":"sympyExercise/binom.py","file_name":"binom.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"583676367","text":"#!/usr/bin/env python\n\"\"\"\nExamples:\n\t%s \n\t\n\t%s \n\t\n\nDescription:\n\t2011-9-29\n\"\"\"\n\nimport sys, os, math\n__doc__ = __doc__%(sys.argv[0], sys.argv[0])\n\n\nbit_number = math.log(sys.maxint)/math.log(2)\nif bit_number>40:\t #64bit\n\tsys.path.insert(0, os.path.expanduser('~/lib64/python'))\n\tsys.path.insert(0, os.path.join(os.path.expanduser('~/script64')))\nelse: #32bit\n\tsys.path.insert(0, os.path.expanduser('~/lib/python'))\n\tsys.path.insert(0, os.path.join(os.path.expanduser('~/script')))\n\nimport matplotlib; matplotlib.use(\"Agg\")\t#to disable pop-up requirement\n\nimport csv\nfrom pymodule import ProcessOptions, getListOutOfStr, PassingData, getColName2IndexFromHeader, figureOutDelimiter\nfrom pymodule import yh_matplotlib\nimport pylab, random\nfrom pymodule.plot.AbstractPlot import AbstractPlot\n\n\nclass PlotTrioInconsistencyOverFrequency(AbstractPlot):\n\t__doc__ = __doc__\n\toption_default_dict = AbstractPlot.option_default_dict.copy()\n\toption_default_dict[('xColumnHeader', 1, )][0] = 'frequency'\n\toption_default_dict[('xColumnPlotLabel', 0, )][0] = 'frequency'\n\toption_default_dict[('whichColumnPlotLabel', 0, )][0] = 'inconsistent rate'\n\t\"\"\"\n\toption_default_dict = {('outputFname', 1, ): [None, 'o', 1, 'output file for the figure.'],\\\n\t\t\t\t\t\t('minNoOfTotal', 1, int): [100, 'i', 1, 'minimum no of total variants (denominator of inconsistent rate)'],\\\n\t\t\t\t\t\t('title', 0, ): [None, 't', 1, 'title for the figure.'],\\\n\t\t\t\t\t\t('figureDPI', 1, int): [200, 'f', 1, 'dpi for the output figures (png)'],\\\n\t\t\t\t\t\t('formatString', 1, ): ['-', '', 1, 'formatString passed to matplotlib plot'],\\\n\t\t\t\t\t\t('ylim_type', 1, int): [1, 'y', 1, 'y-axis limit type, 1: 0 to max. 2: min to max'],\\\n\t\t\t\t\t\t('samplingRate', 1, float): [0.001, 's', 1, 'how often you include the data'],\\\n\t\t\t\t\t\t('debug', 0, int):[0, 'b', 0, 'toggle debug mode'],\\\n\t\t\t\t\t\t('report', 0, int):[0, 'r', 0, 'toggle report, more verbose stdout/stderr.']\n\t\t\t\t\t\t}\n\t\"\"\"\n\n\tdef __init__(self, inputFnameLs=None, **keywords):\n\t\t\"\"\"\n\t\t2011-7-11\n\t\t\"\"\"\n\t\tfrom pymodule import ProcessOptions\n\t\tself.ad = ProcessOptions.process_function_arguments(keywords, self.option_default_dict, error_doc=self.__doc__, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclass_to_have_attr=self)\n\t\tself.inputFnameLs = inputFnameLs\n\t\n\t@classmethod\n\tdef trioInconsistentRateFileWalker(cls, inputFname, processFunc=None, minNoOfTotal=100, run_type=1):\n\t\t\"\"\"\n\t\t2012.10.25 only skip except during file opening, not file reading\n\n\t\t2011-9-30\n\t\t\"\"\"\n\t\ttry:\n\t\t\treader = csv.reader(open(inputFname), delimiter=figureOutDelimiter(inputFname))\n\t\t\theader = reader.next()\n\t\t\tcol_name2index = getColName2IndexFromHeader(header, skipEmptyColumn=True)\n\t\texcept:\n\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\timport traceback\n\t\t\ttraceback.print_exc()\n\t\t\treturn\n\t\tinconsistent_rate_index = col_name2index.get(\"inconsistency\")\n\t\tif run_type==1:\n\t\t\tindex_of_x_data = col_name2index.get(\"stopFrequency\")\n\t\telif run_type==2:\n\t\t\tindex_of_x_data = col_name2index.get(\"stop\")\n\t\telse:\n\t\t\tsys.stderr.write(\"Unsupported run_type %s in trioInconsistentRateFileWalker().\\n\"%(run_type))\n\t\t\tsys.exit(3)\n\t\tindex_of_no_of_total = col_name2index.get(\"no_of_total\")\n\t\tinconsistent_rate_ls = []\n\t\tx_ls = []\n\t\tfor row in reader:\n\t\t\tif self.samplingRate<1 and self.samplingRate>=0:\n\t\t\t\tr = random.random()\n\t\t\t\tif r>self.samplingRate:\n\t\t\t\t\tcontinue\n\t\t\tno_of_total = int(float(row[index_of_no_of_total]))\n\t\t\tif no_of_total<=minNoOfTotal:\n\t\t\t\tcontinue\n\t\t\tinconsistency = float(row[inconsistent_rate_index])\n\t\t\tinconsistent_rate_ls.append(inconsistency)\n\t\t\tx_data = float(row[index_of_x_data])\n\t\t\tx_ls.append(x_data)\n\t\tprocessFunc(x_ls, inconsistent_rate_ls)\n\t\tdel reader\n\t\n\tdef plotXY(self, x_ls, y_ls, ):\n\t\t\"\"\"\n\t\t2011-9-30\n\t\t\"\"\"\n\t\tpylab.plot(x_ls, y_ls, self.formatString)\n\t\t\n\t\n\tdef run(self):\n\t\t\n\t\tif self.debug:\n\t\t\timport pdb\n\t\t\tpdb.set_trace()\n\t\t\n\t\tpylab.clf()\n\t\t\n\t\tfor inputFname in self.inputFnameLs:\n\t\t\tif os.path.isfile(inputFname):\n\t\t\t\tself.trioInconsistentRateFileWalker(inputFname, processFunc=self.plotXY, minNoOfTotal=self.minNoOfTotal,\\\n\t\t\t\t\t\t\t\t\t\t\trun_type=1)\n\t\t\n\t\tif self.title is None:\n\t\t\ttitle = \" %s refs\"%(len(self.inputFnameLs))\n\t\telse:\n\t\t\ttitle = self.title\n\t\t\n\t\tpylab.title(title)\n\t\tself.handleXLabel()\n\t\tself.handleYLabel()\n\t\t\n\t\tpylab.savefig(self.outputFname, dpi=self.figureDPI)\n\t\tsys.stderr.write(\"\\n\")\n\n\nif __name__ == '__main__':\n\tmain_class = PlotTrioInconsistencyOverFrequency\n\tpo = ProcessOptions(sys.argv, main_class.option_default_dict, error_doc=main_class.__doc__)\n\tinstance = main_class(po.arguments, **po.long_option2value)\n\tinstance.run()\n","sub_path":"src/plot/PlotTrioInconsistencyOverFrequency.py","file_name":"PlotTrioInconsistencyOverFrequency.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"38048125","text":"import sys\nfrom dcat.core import *\n\nEXTRAS_MAPPING = {\n 'Source/Citation': DC.source,\n 'Acronym': DCAT.keyword,\n 'Number of economies': DC.extent,\n 'Update frequency': DC.accrualPeriodicity,\n 'Periodicity': DC.accrualPeriodicity,\n 'Update schedule': DC.accrualPolicy,\n 'Next expected date of update': DC.accrualPolicy,\n 'Type': DC.subject,\n 'Granularity': DCAT.granularity,\n 'Last update': DC.modified,\n 'Data notes': RDFS.label,\n 'Coverage': DC.spatial\n}\n\nTYPES = {\n 'EXCEL': 'application/vnd.ms-excel',\n '(Excel)': 'application/vnd.ms-excel',\n '(CSV)': 'text/csv',\n 'Available in the API': 'api/json',\n 'Databank': 'api/html'\n}\n\n#id=ctl00_ContentPlaceHolderDefault_ResourceViewUC_lblResponsibleUserName/a\n#ctl00_ContentPlaceHolderDefault_ResourceViewUC_lblPubliced\n#ctl00_ContentPlaceHolderDefault_ResourceViewUC_lblType\n#ctl00_ContentPlaceHolderDefault_ResourceViewUC_divResourceDescription\n#ctl00_ContentPlaceHolderDefault_ResourceViewUC_ucTags_tagsList_htmlUL\n#ctl00_ContentPlaceHolderDefault_ResourceViewUC_supplemetaryTabContainer\n# dl\n# dt\n# dd\n# id=dlTaxonomynodes, dl\n# dt\n# dd\n# dl\n# dt\n# dd\n#\n# tr class=odd|even|odd-last|even-last\n\n\nclass DigitaliserDkDatasetCrawler(Crawler):\n\n def handle_doc(self, doc, url=None):\n dataset = URIRef(url)\n log.warn(doc)\n #self.graph.add((dataset, RDF.type, DCAT.Dataset))\n #rights = URIRef('http://data.worldbank.org/summary-terms-of-use')\n #self.graph.add((dataset, DC.rights, rights))\n #self.graph.add((rights, RDFS.label, Literal(\"Attribution Terms\")))\n #self.graph.add((dataset, DC.creator, Literal(\"The World Bank\")))\n\n #name = url.rsplit('/', 1)[-1].strip()\n #self.graph.add((dataset, DC.identifier, Literal(name)))\n #title = doc.findtext('//h2[@class=\"page-title \"]').strip()\n #self.graph.add((dataset, DC.title, Literal(title)))\n #description = doc.findtext('//div[@class=\"node-body\"]/p')\n #self.graph.add((dataset, DC.description, Literal(description)))\n\n #time = BNode()\n #self.graph.add((time, RDF.type, TIME.Interval))\n #self.graph.add((dataset, DC.temporal, time))\n\n #for extra_rows in doc.findall('//div[@class=\"view-content\"]/div/div'):\n # key = extra_rows.findtext('label').strip()\n # value = Literal(extra_rows.findtext('span').strip())\n # if key == 'Start date':\n # self.graph.add((time, TIME.start, Literal(value)))\n # continue\n # if key == 'End date':\n # self.graph.add((time, TIME.end, Literal(value)))\n # continue\n # pred = EXTRAS_MAPPING.get(key)\n # if pred is None:\n # log.warn(\"Unknwon extra field: %s\" % key)\n # else:\n # self.graph.add((dataset, pred, Literal(value)))\n # self.graph.add((Literal(value), RDFS.comment, Literal(key)))\n\n #for resource in doc.findall('//div[@class=\"views-field-nothing\"]//a'):\n # res = BNode()\n # self.graph.add((res, RDF.type, DCAT.Distribution))\n # self.graph.add((dataset, DCAT.distribution, res))\n # res_label = resource.xpath('string()').strip()\n # for pattern, mime in TYPES.items():\n # if pattern in res_label:\n # self.graph.add((res, DC['format'], Literal(mime)))\n # self.graph.add((res, RDFS.label, Literal(res_label)))\n # self.graph.add((res, DC.title, Literal(res_label)))\n # accessURL = URIRef(resource.get('href').strip())\n # self.graph.add((res, DCAT.accessURL, accessURL))\n\n #for related in doc.findall('//div[@class=\"views-field-field-sidebar-value\"]//a'):\n # rel_label = Literal(related.xpath('string()').strip())\n # accessURL = URIRef(related.get('href').strip())\n # self.graph.add((dataset, DC.relation, accessURL))\n # self.graph.add((accessURL, RDFS.label, rel_label))\n\n #self.write(name)\n","sub_path":"dcat/crawl/digitaliser.py","file_name":"digitaliser.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"297444980","text":"# -*- coding: utf-8 -*-\n\nimport web\n\n# import blog.widgets\nfrom blog import widgets as blog_widgets\nfrom blog.models import Blog, Widget, blog\nfrom theme.models import Theme, ThemeFile\n\nfrom util.template import render\nfrom util import requires_admin\n\nfrom blog.base_front import Processor\nimport simplejson as json\nimport yaml\n\nclass theme(object):\n \"\"\"模板管理\"\"\"\n @requires_admin\n def GET(self):\n use_theme = blog.theme\n themes = Theme.all()\n \n return render('admin/theme.html',themes=themes,use_theme=use_theme)\n\nclass theme_screenshot(object):\n \"\"\"模板缩略图\"\"\"\n @requires_admin\n def GET(self, name):\n import binascii\n \n theme = Theme.get_by_key_name(name)\n screenshot = str(theme.screenshot)\n \n etag = str(binascii.crc32(screenshot))\n \n match = web.ctx.env.get('HTTP_IF_NONE_MATCH')\n if match and match == etag:\n raise web.notmodified()\n \n web.header('ETag', etag)\n web.header('Content-Type', 'image/png')\n return screenshot\n\nclass change_theme(object):\n \"\"\"修改当前使用模板\"\"\"\n @requires_admin\n def GET(self, name):\n from util.template import env\n blog.theme = name\n blog.update()\n \n env.cache.clear()\n raise web.seeother('/admin/theme')\n\nclass install_theme(object):\n \"\"\"安装模板\"\"\"\n @requires_admin\n def GET(self):\n return render('admin/theme_install.html')\n \n @requires_admin\n def POST(self):\n import zipfile\n import datetime\n \n inp = web.input(theme={})\n theme_zip = inp.theme\n fileinfo = zipfile.ZipFile(theme_zip.file)\n config = yaml.load(fileinfo.read('config.yaml'))\n \n theme_name = config.get(\"name\")\n theme = Theme.get_by_key_name(theme_name)\n if not theme:\n theme = Theme(key_name=theme_name)\n theme.name = config.get(\"name\")\n theme.author = config.get(\"author\")\n theme.homepage = config.get(\"homepage\")\n theme.description = config.get(\"description\")\n theme.sidebar = config.get(\"sidebar\")\n screenshot = fileinfo.read('screenshot.png')\n theme.screenshot = screenshot\n theme.save()\n \n for i in fileinfo.infolist():\n filename = i.filename\n if filename.endswith('/') or \\\n filename in ['config.yaml', 'screenshot.png']:\n continue\n \n file_size = i.file_size\n date_time = i.date_time\n date_time = datetime.datetime(*date_time)\n theme_file = ThemeFile.all().filter('theme_name =', theme_name).\\\n filter('filename =', filename).get()\n if not theme_file:\n theme_file = ThemeFile()\n theme_file.theme_name = theme_name\n theme_file.filename = filename\n theme_file.filecontent = fileinfo.read(filename)\n if filename.endswith('.html'):\n filetype = 'template'\n else:\n filetype = 'file'\n theme_file.filetype = filetype\n theme_file.modified = date_time\n theme_file.save()\n \n raise web.seeother('/admin/theme')\n\nclass delete_theme(object):\n \"\"\"删除模板\"\"\"\n @requires_admin\n def GET(self, name):\n theme = Theme.get_by_key_name(name)\n theme_files = ThemeFile.all().filter('theme_name =', name)\n for theme_file in theme_files:\n theme_file.delete()\n theme.delete()\n \n raise web.seeother('/admin/theme')\n\nclass init_widget(object):\n \"\"\"初始化默认装饰\"\"\"\n @requires_admin\n def GET(self):\n widgets = Widget.all()\n for widget in widgets:\n widget.delete()\n \n widget_modules = blog_widgets.default_widgets\n for widget_name in widget_modules:\n widget = Widget(key_name=widget_name)\n widget.name = widget_name\n widget.package = 'blog.widgets.%s' % widget_name\n widget.save()\n \n raise web.seeother('/admin/theme/widget')\n\nclass widget(object):\n \"\"\"侧边条小工具\"\"\"\n @requires_admin\n def GET(self):\n widgets = Widget.all()\n theme = blog.theme\n \n processor = Processor()\n theme = Theme.get_by_key_name(theme)\n sidebar_num = theme.sidebar\n \n # return theme_widget.get('1')\n return render('admin/widget.html',\n widgets=widgets,\n sidebar_num=sidebar_num,\n processor=processor\n )\n \n @requires_admin\n def POST(self):\n \"\"\"保存模板装饰布局\"\"\"\n data = web.data()\n blog = Blog.get()\n blog.theme_widget = data\n blog.update()\n \n return json.dumps({'status': 'ok'})\n","sub_path":"theme/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"59090975","text":"\"\"\"This contains all of the URL mappings used by the Oplog application.\"\"\"\n \n# Django & Other 3rd Party Libraries\nfrom django.urls import include, path\nfrom rest_framework import routers\n\nfrom .views import (\n OplogCreateWithoutProject,\n OplogEntriesImport,\n OplogEntryCreate,\n OplogEntryDelete,\n OplogEntryUpdate,\n OplogEntryViewSet,\n OplogListEntries,\n OplogViewSet,\n index,\n load_projects\n)\n\napp_name = \"ghostwriter.oplog\"\n\nrouter = routers.DefaultRouter()\nrouter.register(\"entries\", OplogEntryViewSet)\nrouter.register(\"oplogs\", OplogViewSet)\n\nurlpatterns = [\n path(\"\", index, name=\"index\"),\n path(\"api/\", include(router.urls)),\n path(\"create/\", OplogCreateWithoutProject.as_view(), name=\"oplog_create\"),\n path(\"load-projects/\", load_projects, name=\"load_projects\"),\n path(\n \"/entries/create\", OplogEntryCreate.as_view(), name=\"oplog_entry_create\"\n ),\n path(\n \"/entries/update\", OplogEntryUpdate.as_view(), name=\"oplog_entry_update\"\n ),\n path(\n \"/entries/delete\", OplogEntryDelete.as_view(), name=\"oplog_entry_delete\"\n ),\n path(\"/entries\", OplogListEntries, name=\"oplog_entries\"),\n path(\"import\", OplogEntriesImport, name=\"oplog_import\"),\n]\n","sub_path":"ghostwriter/oplog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"535345024","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom user.models import UserProfile\n# Create your models here.\n\nclass QuizSessionManager(models.Manager):\n\n class Meta:\n abstract = True\n\n def createAttendance(self, created_by, created_on):\n attendance = self.create(created_by = created_by,\n created_on = created_on)\n attendance.init_empty_fields()\n return attendance.id\n\n def create_quiz_session(self, created_by, created_on, source):\n quiz_session = self.create(created_by = created_by,\n created_on = created_on,\n source = source)\n quiz_session.init_empty_fields()\n return quiz_session.id\n\n def has_session_with_id(self, table_class, session_id):\n result = table_class.objects.filter(id__exact = session_id)\n return len(result) > 0\n\n def get_session_by_id(self, table_class, session_id):\n try:\n session = table_class.objects.get(id__exact = session_id)\n return session\n except MultipleObjectsReturned:\n print (\"More than one objects with the same id.\")\n return None\n except ObjectDoesNotExist:\n print (\"Object with this id does not exist.\")\n return None\n\n # def set_session_message_id(self, table_class, session_id, message_id):\n # session = self.get_session_by_id(table_class, session_id)\n\n def set_message_id(self, attendance_id, message_id): \n attendance = self.getAttendanceByID(attendance_id)\n # print ('attendance to be update', attendance)\n if (attendance != None):\n attendance.messageid = message_id\n attendance.save()\n return self \n\n def set_room_id(self, attendance_id, room_id): \n attendance = self.getAttendanceByID(attendance_id)\n # print('attendance to be update: ', attendance)\n if (attendance != None):\n attendance.roomid = room_id\n attendance.save()\n return self \n\n def getAttendanceByID(self, attendanceID):\n try:\n attendance = self.get(id__exact = attendanceID)\n return attendance\n except MultipleObjectsReturned:\n print (\"More than one objects with the same username and chat_url.\")\n return None\n except ObjectDoesNotExist:\n print (\"Object does not exist.\")\n return None \n\n def get_session_by_id(self, session_id):\n try:\n session = self.get(id__exact = session_id)\n return session\n except MultipleObjectsReturned:\n print (\"More than one objects with the same id.\")\n return None\n except ObjectDoesNotExist:\n print (\"Object does not exist.\")\n return None \n\nclass AttendanceManager(QuizSessionManager):\n\n class Meta:\n abstract = True\n\n#base class for any quiz session\nclass QuizSession(models.Model):\n created_by = models.ForeignKey(UserProfile, on_delete = models.SET_NULL, null = True)\n created_on = models.DateTimeField(auto_now_add = True)\n source = models.CharField(max_length = 255, default = '')\n messageid = models.CharField(max_length = 255)\n roomid = models.CharField(max_length = 255)\n\n class Meta:\n abstract = True \n\n def init_empty_fields(self):\n self.messageid = \"\"\n self.roomid = \"\"\n # self.source = \"\"\n self.save()\n return self\n\n def set_room_id(self, room_id):\n self.roomid = room_id\n self.save()\n return self\n\n def set_source(self, source):\n self.source = source\n self.save()\n return self\n \n def set_message_id(self, message_id):\n self.messageid = message_id\n self.save()\n return self\n\n def __str__(self):\n return 'id: ' + str(self.id)\\\n + ' created_by: ' + self.created_by.username\\\n + ' source: ' + self.source\\\n + ' messageid: ' + self.messageid\\\n + ' roomid: ' + self.roomid + str(\"\\n\")\n\nclass Attendance(QuizSession):\n\n objects = AttendanceManager() \n\n\nclass StudentSubmissionManager(models.Manager):\n class Meta:\n abstract = True\n\n\n\nclass AttendanceSubmitManager(models.Manager):\n def createAttendanceSubmit(self, attendance, tempProfile):\n\n submitted_by = UserProfile.objects.createUserProfile(tempProfile) \n submitted_by_list = self.student_submitted(submitted_by, attendance)\n if (not submitted_by_list):\n attendanceSubmit = self.create(attendance = attendance, \n submitted_on = timezone.now(), \n submitted_by = submitted_by,\n correct_submission = False)\n attendanceSubmit.save()\n return attendanceSubmit.id\n\n return submitted_by_list[0].id \n \n def createAttSubmit(self, attendance, submitted_on, submitted_by):\n submission = self.create(attendance = attendance, \n submitted_on = submitted_on, \n submitted_by = submitted_by) \n attendanceSubmit.save()\n return submission.id\n\n def verify_submission(self, submitted_by, attendance):\n try:\n submission = self.get(submitted_by__id__exact = submitted_by.id,\n attendance__id__exact = attendance.id)\n submission.correct_submission = True\n submission.save()\n except Exception:\n print (Exception)\n return None\n return self \n\n def student_submitted(self, submitted_by, attendance):\n try :\n submissionList = self.filter(submitted_by__id__exact = submitted_by.id,\n attendance__id__exact = attendance.id)\n if (not submissionList):\n return None\n else:\n return submissionList \n except Exception:\n print (Exception) \n return None \n\n def getSubmissionList(self, attendance):\n try :\n submissionList = self.filter(attendance__id__exact = attendance.id)\n if (not submissionList):\n return None\n else:\n return submissionList \n except Exception:\n print (Exception) \n return None\n\nclass StudentSubmission(models.Model):\n submitted_on = models.DateTimeField(auto_now_add = True) \n submitted_by = models.ForeignKey(UserProfile, on_delete = models.SET_NULL, null = True)\n\n class Meta:\n abstract = True\n \nclass AttendanceSubmit(StudentSubmission):\n attendance = models.ForeignKey(Attendance, on_delete = models.SET_NULL, null = True)\n correct_submission = models.BooleanField()\n objects = AttendanceSubmitManager()\n\n def format_str(self, att_id, username, correct, name, email):\n return 'att_id: ' + str(att_id)\\\n + ' stud_id: ' + str(username)\\\n + ' correct: ' + str(correct)\\\n + ' name: ' + str(name)\\\n + ' email: ' + str(email)\n\n def __str__(self):\n return self.format_str(self.attendance.id, self.submitted_by.id,\n self.correct_submission, self.submitted_by.name, \n self.submitted_by.email)\n\n","sub_path":"attendance_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"460220834","text":"import math\nimport numpy\nimport json\n\n\ndef von_karman(r=1, l=4, th=0, cone_res=200, rot_res=50, conn=False):\n if conn == True:\n slant = bool(input('Slant: (True / False)\\n'))\n lip = float(input('Lip:\\n'))\n conn_rad = float(input('Connector inner radius:\\n'))\n conn_len = float(input('Connector length\\n'))\n if th == 0:\n th = r*0.07\n\n # Create points for outer wall of 2D cone drawing\n outer_wall = []\n for x in [n * (l / cone_res) for n in range(0,cone_res+1)]:\n theta = math.acos(1-(2*x/l))\n outer_wall.append([round(x,3), round(r/math.sqrt(math.pi)*math.sqrt(theta - math.sin(2*theta)/2),6), 0.0])\n # outer_wall.append([r,])Inn\n\n # Create points for inner wall of 2D cone drawing\n inner_wall = []\n for x in [n * (l / cone_res) for n in range(0,cone_res+1)]:\n theta = math.acos(1-(2*x/l))\n if r/math.sqrt(math.pi)*math.sqrt(theta - math.sin(2*theta)/2)-th >= 0 and x <= 0.9375*l:\n inner_wall.append([round(x,3), round(r/math.sqrt(math.pi)*math.sqrt(theta - math.sin(2*theta)/2)-th,6), 0.0])\n inner_wall[0][1] = 0.0\n slant_pt2 = [[0.0625*l + l, round((-1)*((0.0625*l + l)-inner_wall[-1][0]) + inner_wall[-1][1],6), 0]]\n\n verts = outer_wall + inner_wall + slant_pt2\n # print(verts)\n \n # Rotate drawing around x axis by equal steps\n rot_verts = []\n for t in [(2*math.pi/rot_res)*n for n in range(0,rot_res+1)]:\n for v in verts:\n rv = list(numpy.dot([[1, 0, 0],[0, math.cos(t), math.sin(t)],[0, math.sin(t)*(-1), math.cos(t)]], v))\n rot_verts.append([rv[0], round(rv[1],6), round(rv[2],6)])\n\n outjson = json.dumps({'Outer': outer_wall, 'Inner': inner_wall, 'Slant': slant_pt2, 'Rotation': rot_verts})\n # outjson = json.dumps({'Verts': verts})\n \n with open('testfile.json', 'w+') as testfile:\n testfile.writelines(outjson)\n # print(rot_verts)\n\n# def write_obj():\n# filename = input('Filename\\n') + '.obj'\n# vertices = []\n# normals = []\n# faces = []\n# with open(filename, \"w+\") as obj:\n# for v in vertices:\n# obj.write(f'v {v[0]} {v[1]} {v[2]}\\n')\n\n# for vn in normals:\n# obj.write(f'vn {vn[0]} {vn[1]} {vn[2]}\\n')\n\n# for f in faces:\n# obj.write(f'f {f[0]}//{f[0]} {f[0]}//{f[0]} {f[0]}//{f[0]}\\n') \n\n# obj.close()\n\nif __name__ == \"__main__\":\n von_karman()\n # v = list(numpy.dot([[1, 0, 0],[0, math.cos(math.pi/2), math.sin(math.pi/2)],[0, math.sin(math.pi/2)*(-1), math.cos(math.pi/2)]], [4.25, 0.414746, 0]))\n # v[1] = round(v[1],6)\n # print(v)","sub_path":"vkbuilder.py","file_name":"vkbuilder.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"539659032","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom config import embedding_freeze, att_concat_hz, device\nimport numpy as np \n\n# class DecoderRNN(nn.Module):\n# def __init__(self, emb_size, hidden_size, vocab_size, num_layers, rnn_type = 'GRU', embedding_weight = None, dropout_rate = 0.1):\n# super(DecoderRNN, self).__init__()\n# self.hidden_size = hidden_size\n# self.num_layers = num_layers\n# self.dropout = nn.Dropout(dropout_rate)\n# if embedding_weight is not None:\n# self.embedding = nn.Embedding.from_pretrained(embedding_weight, freeze = embedding_freeze)\n# else:\n# self.embedding = nn.Embedding(vocab_size,emb_size)\n# self.rnn_type = rnn_type\n# if rnn_type == 'GRU':\n# self.gru = nn.GRU(emb_size, hidden_size, num_layers, batch_first=True, dropout = dropout_rate)\n# elif rnn_type == 'LSTM':\n# self.lstm = nn.LSTM(emb_size, hidden_size, num_layers, batch_first=True, dropout = dropout_rate)\n# else:\n# print('RNN TYPE ERROR')\n# self.out = nn.Linear(hidden_size, vocab_size)\n# self.logsoftmax = nn.LogSoftmax(dim=1)\n\n# def forward(self, tgt_input, hidden, true_len = None, encoder_outputs = None, cell = None):\n# output = self.embedding(tgt_input)\n# #print(output.size())\n# if self.rnn_type == 'GRU':\n# output, hidden = self.gru(output, hidden)\n# else:\n# output, (hidden, cell) = self.lstm(output,(hidden, cell))\n# logits = self.out(output.squeeze(1))\n# output = self.logsoftmax(logits)\n# return output, hidden, None, cell\n\n# # def initHidden(self, encoder_hidden):\n# # batch_size = encoder_hidden.size(1)\n# # return encoder_hidden.expand(self.num_layers, batch_size, self.hidden_size).contiguous()\n \n\nclass DecoderAtten(nn.Module):\n # encoder_params = (en_num_layers, en_num_direction, en_hidden_size)\n def __init__(self, vocab_size, vocab_size_pred, emb_size, hidden_size, num_layers, encoder_params, rnn_type='GRU', embedding_weight=None, atten_type='dot_prod', dropout_rate=0.1):\n super(DecoderAtten, self).__init__()\n self.hidden_size = hidden_size\n self.dropout = nn.Dropout(dropout_rate)\n self.num_layers = num_layers\n\n en_output_hz = encoder_params[1]*encoder_params[2]\n if embedding_weight is not None:\n self.embedding = nn.Embedding.from_pretrained(embedding_weight, freeze=False)\n else:\n self.embedding = nn.Embedding(vocab_size, emb_size)\n self.rnn_type = rnn_type\n if rnn_type == 'GRU':\n self.gru = nn.GRU(emb_size, hidden_size, num_layers, batch_first=True)\n elif rnn_type == 'LSTM':\n self.lstm = nn.LSTM(emb_size, hidden_size, num_layers, batch_first=True)\n else:\n print('RNN TYPE ERROR')\n self.atten = AttentionLayer(hidden_size, en_output_hz, atten_type=atten_type)\n \n self.linear = nn.Linear(en_output_hz+hidden_size, hidden_size)\n\n self.copy_mech = CopyMechanism(hidden_size, en_output_hz, vocab_size_pred)\n # self.out = nn.Linear(hidden_size, vocab_size)\n # self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, tgt_input, hidden, true_len, encoder_outputs, cell=None):\n output = self.embedding(tgt_input)\n output = self.dropout(output)\n #print(output.size())\n if self.rnn_type == 'GRU':\n output, hidden = self.gru(output, hidden)\n else:\n output, (hidden, cell) = self.lstm(output, (hidden, cell))\n ### add attention\n atten_output, atten_weight = self.atten(output, encoder_outputs, true_len)\n out1 = torch.cat((output, atten_output), -1)\n out2 = self.linear(out1.squeeze(1))\n out2 = F.relu(out2)\n\n # add copy mechanism here \n prob_scores = self.copy_mech(out2, encoder_outputs, true_len)\n\n\n # logits = self.out(out2)\n # output = self.logsoftmax(logits)\n\n return prob_scores, hidden, atten_weight, cell\n \n def initHidden(self, batch_size):\n # batch_size = encoder_hidden.size(1)\n #(en_num_layers*num_direction, bz, en_hidden_size) >> (bz, en_num_layers*num_direction*en_hidden_size)\n # encoder_hidden = encoder_hidden.transpose(0,1).contiguous().view(batch_size, -1)\n # hidden = self.transform_en_hid(encoder_hidden) #(bz, de_num_layers*de_hidden_size)\n # hidden = hidden.view(batch_size, self.num_layers, self.hidden_size).transpose(0,1).contiguous()\n cell = torch.zeros(self.num_layers, batch_size, self.hidden_size, device=device)\n return cell #(de_num_layers, bz, de_hidden_size)\n\nclass AttentionLayer(nn.Module):\n def __init__(self, q_hidden_size, m_hidden_size, atten_type):\n super(AttentionLayer, self).__init__()\n self.q_hidden_size = q_hidden_size\n self.m_hidden_size = m_hidden_size\n self.mode = atten_type\n if atten_type == 'dot_prod':\n if q_hidden_size != m_hidden_size:\n print((q_hidden_size, m_hidden_size), 'query and memory must have the same hidden size; use general way automatically')\n self.mode = 'general'\n self.general_linear = nn.Linear(q_hidden_size, m_hidden_size, bias=False)\n else:\n print('dot_prod')\n elif atten_type == 'general':\n print('general')\n self.general_linear = nn.Linear(q_hidden_size, m_hidden_size, bias=False)\n elif atten_type == 'concat':\n print('concat')\n self.content_linear = nn.Linear(q_hidden_size+m_hidden_size, att_concat_hz, bias=True)\n self.score_linear = nn.Linear(att_concat_hz, 1, bias = False)\n else:\n print('mode out of bound')\n\n def forward(self, query, memory_bank, true_len):\n #batch_size, src_len, hidden_size = memory_bank.size()\n #query_len = query.size(1)\n scores = self.atten_score(query, memory_bank)\n \n mask_matrix = sequence_mask(true_len).unsqueeze(1)\n scores.masked_fill_(1-mask_matrix, float('-inf'))\n scores_normalized = F.softmax(scores, dim=-1)\n #scores_normalized = F.softmax(scores.view(batch_size * query_len, seq_len), dim=-1).view(batch_size, query_len, seq_len)\n context = torch.bmm(scores_normalized, memory_bank)\n \n return context, scores_normalized #(bz, query_len, m_hidden_size) (bz, query_len, src_len)\n \n def atten_score(self, query, memory_bank):\n \"\"\"\n query: (batch, tgt_length, q_hidden_size)\n memory_bank: (batch, src_length, m_hidden_size)\n return: (batch, tgt_length, src_length)\n \"\"\"\n batch_size, src_len, m_hidden_size = memory_bank.size()\n query_len = query.size(1)\n if self.mode == 'dot_prod':\n out = torch.bmm(query, memory_bank.transpose(1, 2))\n elif self.mode == 'general':\n temp = self.general_linear(query.view(batch_size * query_len, self.q_hidden_size))\n out = torch.bmm(temp.view(batch_size,query_len,self.m_hidden_size),memory_bank.transpose(1, 2))\n elif self.mode == 'concat':\n query_temp = query.unsqueeze(2).expand(batch_size,query_len,src_len,self.q_hidden_size)\n memory_temp = memory_bank.unsqueeze(1).expand(batch_size,query_len,src_len,self.m_hidden_size)\n content_out = self.content_linear(torch.cat((query_temp,memory_temp),-1).view(batch_size * query_len * src_len, self.m_hidden_size+self.q_hidden_size))\n content_out = torch.tanh(content_out)\n out = self.score_linear(content_out)\n out = out.squeeze(-1).view(batch_size, query_len, src_len)\n else:\n print('mode out of bound')\n return out #(bz, query_len, src_len)\n\ndef sequence_mask(lengths):\n batch_size = lengths.numel()\n max_len = lengths.max()\n return (torch.arange(0, max_len).type_as(lengths).repeat(batch_size,1).lt(lengths.unsqueeze(1)))\n\nclass CopyMechanism(nn.Module):\n def __init__(self, de_logits_hz, en_output_hz, vocab_size_pred):\n super(CopyMechanism, self).__init__()\n self.generate_linear = nn.Linear(de_logits_hz, vocab_size_pred)\n self.copy_linear = nn.Linear(en_output_hz, de_logits_hz)\n self.LogSoftmax = nn.LogSoftmax(dim=-1)\n\n\n def forward(self, logits, encoder_outputs, true_len):\n generation_scores = self.generate_linear(logits) #(bz, de_logits_hz)>>(bz, vocab_size_pred)\n # remove sos and eos\n encoder_out = torch.tanh(self.copy_linear(encoder_outputs)) #(bz, src_sen_len, en_output_hz)>>(bz, src_sen_len, de_logits_hz)\n copy_scores = torch.bmm(encoder_out, logits.unsqueeze(-1)).squeeze(-1) #(bz, src_sen_len)\n # mask copy_scores for padding\n mask_matrix = sequence_mask(true_len)\n copy_scores.masked_fill_(1-mask_matrix, float('-inf'))\n scores = torch.cat((generation_scores, copy_scores), dim=-1)\n log_prob_scores = self.LogSoftmax(scores) #(bz, vocab_size_pred+src_sen_len)\n return log_prob_scores\n\n\n\n","sub_path":"Machine_Translation_NLP/Multilayers_Decoder.py","file_name":"Multilayers_Decoder.py","file_ext":"py","file_size_in_byte":9180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"35522391","text":"from src.utils.push_data import PushData\nfrom src.utils.script_op import ScriptOp\nfrom src.vm.vm_state import VMState\n\n\nclass FuncFlowControl(object):\n @staticmethod\n def op_nop(engine):\n return VMState.NONE\n\n @staticmethod\n def op_jmp(engine):\n offset = engine.context.op_reader.read_int16()\n offset = engine.context.get_instruction_pointer() + offset - 3\n if offset < 0 or offset > len(engine.context.code):\n return VMState.FAULT\n f_value = True\n if engine.op_code.value > ScriptOp.OP_JMP.value:\n if PushData.evaluation_stack_count(engine) < 1:\n return VMState.FAULT\n f_value = PushData.pop_bool(engine)\n print(\"pop_bool: \", f_value)\n if engine.op_code == ScriptOp.OP_JMPIFNOT:\n f_value = not f_value\n if f_value:\n engine.context.set_instruction_pointer(offset)\n print(\"fvalue: \", f_value)\n print(\"******get_instruction_pointer: \", engine.context.get_instruction_pointer())\n return VMState.NONE\n\n @staticmethod\n def op_call(engine):\n execution_context = engine.context.clone()\n engine.context.set_instruction_pointer(engine.context.get_instruction_pointer() + 2)\n engine.op_code = ScriptOp.OP_JMP\n engine.push_context(execution_context)\n return FuncFlowControl.op_jmp(engine)\n\n @staticmethod\n def op_ret(engine):\n engine.pop_context()\n return VMState.NONE","sub_path":"src/func/func_flowcontrol.py","file_name":"func_flowcontrol.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"407074516","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport re\n\n\ndef postprocess_line(ln):\n if 'multicolumn{4}' in ln:\n return ln + ' \\\\cmidrule(lr){3-6}\\\\cmidrule(lr){7-10}'\n elif 'multicolumn{2}' in ln:\n return ln + ' \\\\cmidrule(lr){3-4}\\\\cmidrule(lr){5-6}\\\\cmidrule(lr){7-8}\\\\cmidrule(lr){9-10}'\n elif 'begin{tabular}' in ln:\n return '\\\\begin{tabular}{llrrrrrrrr}'\n return ln\n\n\ndef flush_section(section, sect_lines):\n if section != 'bwa':\n for ln in sect_lines:\n print(postprocess_line(ln))\n else:\n ln = sect_lines[0]\n print(' & BWA-MEM ' + ' '.join(ln.split()[3:]))\n\n\ndef go():\n fn = 'peak_throughput.tex_snippet.tmp'\n section = ''\n sect_lines = []\n suppressed_hline = False\n for ln in open(fn):\n ln = ln.rstrip()\n if ln.startswith('Bowtie 2'):\n flush_section(section, sect_lines)\n section = 'bt2'\n sect_lines = []\n elif ln.startswith('Bowtie'):\n flush_section(section, sect_lines)\n section = 'bt'\n sect_lines = []\n elif ln.startswith('BWA-MEM'):\n flush_section(section, sect_lines)\n section = 'bwa'\n sect_lines = []\n elif ln.startswith('HISAT'):\n flush_section(section, sect_lines)\n section = 'ht'\n sect_lines = []\n ln = ln.replace('\\\\phantom{0}', '')\n ln = ln.replace('$', '')\n ln = re.sub(\"\\\\\\\\multicolumn\\{1\\}\\{[lrc]\\}\\{([/0-9.a-zA-Z]+)\\}\", \"\\\\1\", ln, flags=re.DOTALL)\n if not suppressed_hline and 'hline' in ln:\n suppressed_hline = True\n else:\n sect_lines.append(ln)\n flush_section(section, sect_lines)\n\nif __name__ == '__main__':\n go()\n","sub_path":"thread_scaling/scripts/postprocess_peak_throughput.py","file_name":"postprocess_peak_throughput.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"297014902","text":"# Tab = 2\nimport urllib.request as urllib2\nimport sys\n# Define ANSI escape sequences for colored output\nclass bcolors:\n\tHEADER\t\t= '\\033[95m'\n\tOKBLUE\t\t= '\\033[94m'\n\tOKGREEN\t\t= '\\033[92m'\n\tWARNING\t\t= '\\033[93m'\n\tFAIL\t\t\t= '\\033[91m'\n\tENDC\t\t\t= '\\033[0m'\n\tBOLD\t\t\t= '\\033[1m'\n\tUNDERLINE\t= '\\033[4m'\n\ndef checkUrl(username, url):\n\turl=url.format(username)\n\trequest=urllib2.Request(url)\n\treturn parse(request,username)+\"\\n\"\n\n# Process the request and capture the response code\ndef middleware(request, username):\n\ttry:\n\t\tresponse=urllib2.urlopen(request)\n\texcept urllib2.HTTPError as e:\n\t\tif e.code==404:\n\t\t\treturn (e.code, bcolors.OKGREEN, \"Available\")\n\t\telse:\n\t\t\treturn (e.code, bcolors.WARNING, e.reason)\n\telse:\n\t\tif response.getcode() == 200:\n\t\t\treturn (response.getcode(), bcolors.FAIL, \"Already exists\")\n\n# Used for generating colored responses\ndef parse(request, username):\n\tresponse = middleware(request, username)\n\treturn (bcolors.HEADER+\"├─ \"+bcolors.ENDC +response[1]+ request.get_full_url()+\" [\"+str(response[0])+\"] \"+response[2]+bcolors.ENDC)\n\ndef checkUsername(username):\n\t# Read file\n\ttry:\n\t\twith open(\"URL_CHECK.txt\") as f:\n\t\t\tl_url=f.readlines()\n\t\tf.close()\n\texcept:\n\t\tsys.exit(\"❌ [URL_CHECK.txt] Not found\")\n\t\t\n\t# Check the urls\n\trtn =\"\"\n\tfor url in l_url:\n\t\t# print(url)\n\t\trtn+=checkUrl(username, url)\n\t\t\n\trtn=sorted(rtn.split(\"\\n\"))\n\trtn=\"\\n\".join(rtn).strip()\n\t\n\treturn rtn\n\ndef showHeader(username):\n\trtn =bcolors.HEADER+\"__________________________________________________________________\\n\"+bcolors.ENDC\n\trtn+=(bcolors.HEADER+\"█ \"+bcolors.ENDC+bcolors.BOLD+\"{}\"+bcolors.ENDC+bcolors.HEADER+\" █ Checking for availability, please wait...\\n\"+bcolors.ENDC).format(username)\n\treturn rtn\n\t\nif __name__ == \"__main__\":\n\t# Ask for a username, instead of an argument\n\tmsg=\"\"\n\tif len(sys.argv[1:])==0:\n\t\tusername=input(\"Username to check: \")\n\t\tprint(\"This may take a while...\")\n\t\t\n\t\tmsg+=showHeader(username)\n\t\tmsg+=checkUsername(username)\n\telse:\n\t\t# Take username as a system argument\n\t\tmsg=\"Checking: \"\n\t\tfor username in sys.argv[1:]:\n\t\t\tmsg+=bcolors.BOLD+username+bcolors.ENDC+\", \"\n\t\tprint(msg[:-2])\n\t\tprint(\"This may take a while...\")\n\t\t\n\t\tmsg=\"\"\n\t\tfor username in sys.argv[1:]:\n\t\t\tmsg+=showHeader(username)\n\t\t\tmsg+=checkUsername(username)\n\t\n\t## Show\n\tprint(msg)\n","sub_path":"username-checker.py","file_name":"username-checker.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"430025755","text":"\"\"\"In mathematics, the Fibonacci numbers, commonly denoted Fn, \r\nform a sequence, called the Fibonacci sequence, \r\nsuch that each number is the sum of the two preceding ones,\r\n starting from 0 and 1. That is, and. for n > 1.\r\n\"\"\"\r\n\r\ndef fibanocci(num):\r\n if num == 0:\r\n return 0\r\n elif num == 1:\r\n return 1\r\n else:\r\n return fibanocci(num-1)+fibanocci(num-2)\r\n \r\nnum = input(\"enter a number:\")\r\nn = int(num)\r\nsum = 0\r\nfor i in range(1,n+1):\r\n sum = sum + fibanocci(i)\r\n print(fibanocci(i),\" \")\r\nprint(\"sum of fibanocci num is :\", sum)\r\n","sub_path":"fibanocci.py","file_name":"fibanocci.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"648269956","text":"\"\"\"\nMIT License\n\nCopyright (c) 2016 Emotly Contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport datetime\nimport json\nfrom emotly import app\nfrom emotly import constants as CONSTANTS\nfrom emotly.models import User, Token\nfrom emotly.utils import *\nfrom flask import Blueprint, request, render_template\nfrom flask import flash, make_response\nfrom mongoengine import DoesNotExist, NotUniqueError, ValidationError\nfrom urllib.parse import urlparse\n\n\n# User Controller\nuser_controller = Blueprint('user_controller', __name__)\n\n\n# Verify user credential and return a JWT Token if\n# the user has access to the system.\n@user_controller.route(CONSTANTS.REST_API_PREFIX + \"/login\", methods=[\"POST\"])\n@require_https\ndef login():\n try:\n # Retrieve json data and user data.\n data = json.loads(request.data.decode('utf-8'))\n data['user_id'] = ''.join(data['user_id'].split())\n\n if '@' in data['user_id']:\n user = User.objects.get(email__iexact=data['user_id'])\n else:\n user = User.objects.get(nickname__iexact=data['user_id'])\n except DoesNotExist:\n # User does not exist.\n return response_handler(CONSTANTS.CODE_USER_UNKNOW)\n except Exception:\n # No data sent by the client or there\n # was an error queryng the database.\n return response_handler(500, CONSTANTS.INTERNAL_SERVER_ERROR)\n if not user.confirmed_email:\n # User email not confirmed yet\n return response_handler(CONSTANTS.CODE_USER_UNCONFIRMED)\n if User.verify_password(user, data['password'].encode('utf-8')):\n try:\n user.update(last_login=datetime.datetime.now())\n except Exception:\n # Error updating the user data.\n return response_handler(500, CONSTANTS.INTERNAL_SERVER_ERROR)\n # Generate and send JWT\n return make_response(generate_jwt_token(user), 200)\n return response_handler(CONSTANTS.CODE_REQUEST_UNAUTHORIZED)\n\n\n@user_controller.route(\"/signup\", methods=[\"GET\", \"POST\"])\ndef signup():\n if not should_override_security_restrictions(urlparse(request.url)) and\\\n not request.is_secure:\n flash(CONSTANTS.NOT_HTTPS_REQUEST)\n return render_template(\"page-home.html\")\n if request.method == \"GET\":\n return render_template(\"page-signup.html\")\n try:\n register_user(request)\n flash(CONSTANTS.REGISTRATION_COMPLETED_CHECK_EMAIL)\n except ValidationError:\n flash(CONSTANTS.REGISTRATION_ERROR_INVALID_DATA, 'Error')\n except NotUniqueError:\n flash(CONSTANTS.REGISTRATION_ERROR_USER_EXISTS, 'Error')\n except Exception:\n flash(CONSTANTS.INTERNAL_SERVER_ERROR, 'Error')\n return render_template(\"page-home.html\")\n\n\n# Registration endpoint. It allows the user to create an account.\n# Accepts JSON data containing nickname, email and password.\n# TODO: Add security check (number of call?).\n@user_controller.route(CONSTANTS.REST_API_PREFIX + \"/signup\", methods=[\"POST\"])\n@require_https\ndef signup_api():\n try:\n register_user(request)\n return response_handler(200,\n CONSTANTS.REGISTRATION_COMPLETED_CHECK_EMAIL)\n except ValidationError:\n return response_handler(CONSTANTS.CODE_USER_MISSING_REGISTRATION_DATA)\n except NotUniqueError:\n return response_handler(CONSTANTS.CODE_USER_ACCOUNT_ALREADY_EXISTS)\n except Exception:\n return response_handler(500, CONSTANTS.INTERNAL_SERVER_ERROR)\n\n\n# Use the post params to generate salt, hash password,\n# confirmation token. Save the user and send the email.\n# (The user is saved even if the mail is not sent).\ndef register_user(req):\n if req.headers['content-type'] == 'application/json':\n data = json.loads(req.data.decode('utf-8'))\n req_nickname = data['inputNickname']\n req_pwd = data['inputPassword'].encode('utf-8')\n req_email = data['inputEmail']\n else:\n req_nickname = req.form['inputNickname']\n req_pwd = req.form['inputPassword'].encode('utf-8')\n req_email = req.form['inputEmail']\n\n # Normalize email.\n req_email = req_email.lower()\n salt = get_salt()\n hash_pwd = hash_password(req_pwd, salt)\n user = User(nickname=req_nickname, password=hash_pwd, salt=salt,\n email=req_email)\n\n token_string = generate_confirmation_token(user.email)\n user.confirmation_token = Token(token=token_string)\n user.save()\n send_email_confirmation(user.email, token_string)\n\n\n# This route is used to confirm the user through the\n# confirmation token received by email and send a welcome email\n# with the link to the progressive web app.\n@user_controller.route(\"/confirm_email/\", methods=['GET'])\ndef confirm_email(confirmation_token):\n if not should_override_security_restrictions(urlparse(request.url)) and\\\n not request.is_secure:\n flash(CONSTANTS.NOT_HTTPS_REQUEST)\n return render_template(\"page-home.html\")\n try:\n confirm_registration_email(confirmation_token)\n flash(CONSTANTS.EMAIL_CONFIRMED)\n except DoesNotExist as e:\n flash(CONSTANTS.ERROR_IN_CONFIRMING_EMAIL)\n except Exception:\n flash(CONSTANTS.INTERNAL_SERVER_ERROR, 'Error')\n return render_template(\"page-home.html\")\n\n\n# This endpoint can be used to check the validity of a JWT token\n@user_controller.route(CONSTANTS.REST_API_PREFIX +\n '/is_jwt_valid', methods=['POST'])\n@require_https\ndef is_jwt_valid():\n auth_token = request.headers.get('X-Emotly-Auth-Token')\n try:\n return response_handler(200, verify_jwt_token(auth_token))\n except:\n return response_handler(CONSTANTS.CODE_DATA_INVALUD_REQUEST)\n\n\n# This route is used to resend the confirmation email.\n@user_controller.route(CONSTANTS.REST_API_PREFIX +\n '/resend_email_confirmation', methods=['GET'])\n@valid_json\n@require_https\ndef resend_email_confirmation(**kwargs):\n try:\n data = kwargs['data']\n # Get user by email or nickname.\n data['user_id'] = ''.join(data['user_id'].split())\n if '@' in data['user_id']:\n user = User.objects.only('confirmed_email',\n 'confirmation_token', 'email'). \\\n get(email__iexact=data['user_id'])\n else:\n user = User.objects.only('confirmed_email',\n 'confirmation_token', 'email'). \\\n get(nickname__iexact=data['user_id'])\n # Check if user is confirmed, return 400 if it is already confirmed.\n if user.confirmed_email:\n return response_handler(CONSTANTS.CODE_USER_ALREADY_CONFIRMED)\n\n secs = (datetime.datetime.now() -\n user.confirmation_token.created_at).total_seconds()\n minutes = int(secs / 60) % 60\n # Check if user requested token less then MINUTES_SINCE_LAST_EMAIL\n # minutes ago. Return 400 if the token has already been sent.\n if minutes < CONSTANTS.MINUTES_SINCE_LAST_EMAIL:\n return response_handler(CONSTANTS.\n CODE_TOKEN_CONFIRMATION_ALREADY_SENT)\n\n user.update(confirmation_token__created_at=datetime.datetime.now())\n send_email_confirmation(user.email, user.confirmation_token.token)\n # If user does not exist return 404.\n except DoesNotExist:\n return response_handler(CONSTANTS.CODE_USER_UNKNOW)\n except Exception:\n return response_handler(500, CONSTANTS.INTERNAL_SERVER_ERROR)\n return response_handler(200, CONSTANTS.CONFIRMATION_EMAIL_SENT)\n\n\n# Check if the confirmation token is still valid (received in the last 24\n# hours). Activate the user, delete the token, update the update_at user field.\ndef confirm_registration_email(confirmation_token):\n time_range = datetime.datetime.now() - datetime.timedelta(days=1)\n user = User.objects.get(confirmation_token__token=confirmation_token,\n confirmation_token__created_at__gte=time_range)\n User.objects.get(pk=user.id).update(confirmed_email=True,\n unset__confirmation_token=1,\n update_at=datetime.datetime.now())\n send_welcome_email(user.email, CONSTANTS.APP_LINK)\n","sub_path":"emotly/controllers/user_controller.py","file_name":"user_controller.py","file_ext":"py","file_size_in_byte":9266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"115619122","text":"## Script (Python) \"guard_retract_worksheet\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=type_name=None\n##title=\n##\n\nfrom bika.lims import Retract\n\nwf_tool = context.portal_workflow\n\ncheckPermission = context.portal_membership.checkPermission\nif checkPermission(Retract, context):\n return True\nelse:\n # Allow automatic retract if any analysis is 'sample_received'.\n for analysis in context.getAnalyses():\n review_state = wf_tool.getInfoFor(analysis, 'review_state')\n if review_state == 'sample_received':\n return True\n return False\n","sub_path":"bika/lims/skins/bika/guard_retract_worksheet.py","file_name":"guard_retract_worksheet.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"302430498","text":"\"\"\"\nbyceps.blueprints.admin.terms.views\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n:Copyright: 2006-2021 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom flask import abort\n\nfrom ....services.terms import consent_service as terms_consent_service\nfrom ....services.terms import document_service as terms_document_service\nfrom ....services.terms import version_service as terms_version_service\nfrom ....services.user import service as user_service\nfrom ....util.authorization import register_permission_enum\nfrom ....util.framework.blueprint import create_blueprint\nfrom ....util.framework.templating import templated\nfrom ....util.views import permission_required\n\nfrom .authorization import TermsPermission\n\n\nblueprint = create_blueprint('terms_admin', __name__)\n\n\nregister_permission_enum(TermsPermission)\n\n\n@blueprint.route('/documents/')\n@permission_required(TermsPermission.view)\n@templated\ndef view_document(document_id):\n \"\"\"Show the document's attributes and versions.\"\"\"\n document = terms_document_service.find_document(document_id)\n if document is None:\n abort(404)\n\n versions = terms_version_service.get_versions(document.id)\n\n _add_version_creators(versions)\n\n consent_counts_by_version_id = (\n terms_consent_service.count_consents_for_document_versions(document.id)\n )\n\n for version in versions:\n version.consent_count = consent_counts_by_version_id[version.id]\n\n document = terms_document_service.find_document(document.id)\n\n return {\n 'document': document,\n 'versions': versions,\n }\n\n\ndef _add_version_creators(versions):\n creator_ids = {v.snippet_version.creator_id for v in versions}\n creators = user_service.find_users(creator_ids, include_avatars=True)\n creators_by_id = user_service.index_users_by_id(creators)\n\n for version in versions:\n version.creator = creators_by_id[version.snippet_version.creator_id]\n\n\n@blueprint.route('/versions/')\n@permission_required(TermsPermission.view)\n@templated\ndef view_version(version_id):\n \"\"\"Show the version.\"\"\"\n version = _get_version_or_404(version_id)\n\n return {\n 'version': version,\n }\n\n\n@blueprint.route('/versions//body.html')\n@permission_required(TermsPermission.view)\n@templated\ndef view_version_body_html(version_id):\n \"\"\"Show the version's HTML body.\"\"\"\n version = _get_version_or_404(version_id)\n\n return version.body\n\n\ndef _get_version_or_404(version_id):\n version = terms_version_service.find_version(version_id)\n\n if version is None:\n abort(404)\n\n return version\n","sub_path":"byceps/blueprints/admin/terms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"459482205","text":"#!/usr/bin/python3\nimport tkinter as tk\nimport tkinter.messagebox\nfrom PIL import Image\nfrom PIL import ImageTk\nimport os\nimport sys\nimport time\nimport numpy \nimport curses\nimport threading # get rid of counters, use timer\nfrom std_msgs.msg import String\n#from global_var.global_param import BROKER_IP\n#from MQTT.mqtt_template import MQTT_OBJ\n# mqtt_obj = MQTT_OBJ(client_id=\"ev_simulator\", broker_ip=BROKER_IP, port=1883, keepalive=10, clean_session=False)\n\n#------- Constant Parameters ---------# \nLOOP_PERIOD = 0.01 # sec\nDOOR_OPENING_TIME = 2 # sec\nDOOR_CLOSING_TIME = 2 # sec\nOPEN_LOCK_TIME = 3 # sec\nNEXT_FLOOR_TIME = 2 # sec\nEV_LOCATION_MAX = 1000 # 0~1000\nDOOR_SENSOR_MAX = 1000 # 0~1000\nLED_SUSTAIN_TIME = 1.5 # sec\n\n#------- tkinter parameter -------# \nEV_PANEL_BTS_SIZE = 3\nEV_PANEL_TOTAL_ROW = 4 # add 1 row for led \nEV_PANEL_TOTAL_COL = 2\n# space between buttons\nEV_PANEL_PADX = 0\nEV_PANEL_PADY = 0\n# space between text and button's broards, will change size of buttons\nEV_PANEL_IPADX = 1\nEV_PANEL_IPADY = 1\n#\nEV_STATE_CANVAS_PERIOD = 100 # for animation\n#\nBUTTON_HIGHLIGHT_COLOR = \"yellow\"\nBUTTON_COLOR = \"light gray\"\n\nclass ev_panel_bts(object):\n def __init__(self,frame, name, row ,column, command, pic_path):\n # Create a button object on tk\n self.bt = tk.Button(frame, text=name ,command = command , height = EV_PANEL_BTS_SIZE, width = EV_PANEL_BTS_SIZE)\n # where to put this button respect to panel\n self.bt.grid(row = row , column = column, padx = EV_PANEL_PADX , pady = EV_PANEL_PADY,\\\n ipadx = EV_PANEL_IPADX, ipady = EV_PANEL_IPADY)\n # Name of this button\n self.name = name # '1F' , '2F' , 'open', 'close'\n # simulate ev button led, 0 -> led stay low , 1 -> led is on \n self.led = 0\n # simulate ev panel button, 0 -> button not pressed , 1 -> button pressed\n self.switch = 0\n # picture path of this floor \n self.pic_path = pic_path\nclass app():\n def __init__(self):\n #----------- Elevator State ------------# \n # Elevator state : standing_by, opening, opened, closing, moving\n self.state = \"standing_by\"\n # Elevator moving direction : no_assign, up, down\n self.direction = \"no_assign\"\n # Elevator currently at which floor \n self.current_floor = \"1\" \n # Elevator target floor. Which floor does elevator plan to go? \"\" means no target_floor.\n self.target_floor = \"\" \n \n #buffer cmd from elevator server \n self.cmd_list = []\n # For opened timer\n self.timer = None\n # For current floor led self-sustain timer\n self.led_timer = None \n \n # ---------- Counters/Sensor ------------ #\n # Show how much does elevator's door opened\n # 0 ~ DOOR_COUNTER_MAX , 0 means door fully close , DOOR_COUNTER_MAX means door fully open \n self.door_sensor = 0.0\n # Show where is elevator among these floors\n # 0 ~ EV_LOCATION_MAX , 0 means ev at lowest floor , EV_LOCATION_MAX means eleevator at highest floor\n self.ev_location_sensor = 0.0\n\n # counter for canvas_state \n self.animate_counter = 0 \n # counter for current floor led self-sustain timer\n self.led_sustain_count = 0\n\n # ---------- Tk window and frame------------ #\n self.window = tk.Tk()\n self.window.title(\"Elevator Simulator\")# title of window\n # self.window.iconbitmap('pic/icon.jpg') # icon of window\n self.window.iconphoto(False, tk.PhotoImage(file='pic/icon.png'))\n # self.window.geometry('500x800') # dont specify, tk will automatic assign a proper size.\n # window.resizable(0,0) # disable resize window \n\n # Group frame\n self.ev_bts = tk.Frame(self.window)\n self.ev_bts.pack(side = tk.LEFT)\n\n # ---------- elevator_panel_buttonr ------------ # \n '''\n define elevator button \n '''\n self.bt_dic = {\"1\" : ev_panel_bts(self.ev_bts, \"1F\", EV_PANEL_TOTAL_ROW-2 ,0, self.cb_bt_1f, \"pic/1_digit.pgm\"),\\\n \"2\" : ev_panel_bts(self.ev_bts, \"2F\", EV_PANEL_TOTAL_ROW-3 ,0, self.cb_bt_2f, \"pic/2_digit.pgm\"),\\\n \"3\" : ev_panel_bts(self.ev_bts, \"3F\", EV_PANEL_TOTAL_ROW-2 ,1, self.cb_bt_3f, \"pic/3_digit.pgm\"),\\\n \"4\" : ev_panel_bts(self.ev_bts, \"4F\", EV_PANEL_TOTAL_ROW-3 ,1, self.cb_bt_4f, \"pic/4_digit.pgm\"),\\\n \"close\" : ev_panel_bts(self.ev_bts, \"close\", EV_PANEL_TOTAL_ROW-1 ,0, self.cb_bt_close, None),\\\n \"open\" : ev_panel_bts(self.ev_bts, \"open\", EV_PANEL_TOTAL_ROW-1 ,1, self.cb_bt_open , None)}\n #define relationship among these floors, lowest floor is bt_list[0], hightest floor is bt_list[-1]\n self.bt_list = [\"1\" , \"2\" , \"3\" , \"4\"] \n\n ##############################\n ### elevator_led canvas ###\n ##############################\n #------ Floor LED indicator --------# \n self.canvas_floor = tk.Canvas(self.ev_bts,bg = \"white\", height = 50 , width = 50)\n self.canvas_floor.grid(row = 0 , column = 0, padx = EV_PANEL_PADX , pady = EV_PANEL_PADY\\\n , ipadx = EV_PANEL_IPADX, ipady = EV_PANEL_IPADY)\n #------ EV state LED indicator --------# opening, closing , moving up , moving down \n self.canvas_state = tk.Canvas(self.ev_bts,bg = \"white\", height = 50 , width = 50)\n self.canvas_state.grid(row = 0 , column = 1, padx = EV_PANEL_PADX , pady = EV_PANEL_PADY\\\n , ipadx = EV_PANEL_IPADX, ipady = EV_PANEL_IPADY)\n \n ##############################\n ### Draw TK object ###\n ##############################\n ###---------- Text box ----------###\n self.text = tk.Text(self.window,height = 10, width = 20)\n self.text.pack(side = tk.TOP)\n\n ###---------- cartoon canvas ----------###\n # User adjustable parameter\n # frame size \n total_height = 600\n total_width = 300\n # elevator size \n ev_width = 80\n ev_height = 100\n # amr robot size \n amr_height = 30\n amr_width = 60\n #\n gap = 5 # gap width between elevator and elevator well\n canvas_edge = 3 # gap width between elevator well and canvas edge\n floor_thick = 5 # thickness of floor\n # location of amr, change in runtime (animation)\n self.amr_location_x = 0\n self.amr_location_y = 0\n \n ev_well_width = 2*gap + ev_width\n ev_well_height = total_height - canvas_edge # - 2*gap\n ev_well_x = total_width - ev_well_width - canvas_edge\n \n max_ev_movement = ev_well_height - ev_height - 2*gap\n floor_height = max_ev_movement / (len(self.bt_list)-1)\n\n #------ cartoon canvas --------#\n self.canvas_cartoon = tk.Canvas(self.window,bg = \"white\", height = total_height , width = total_width)\n # self.canvas_state.grid(row = 0 , column = 1, padx = EV_PANEL_PADX , pady = EV_PANEL_PADY, ipadx = EV_PANEL_IPADX, ipady = EV_PANEL_IPADY)\n self.canvas_cartoon.pack(side = tk.LEFT)\n \n #---------- Draw elevator well ----------#\n ev_well_an1 = (ev_well_x , canvas_edge)\n ev_well_an2 = (ev_well_x+ev_well_width , ev_well_height)\n self.ev_well = self.canvas_cartoon.create_rectangle(ev_well_an1[0],ev_well_an1[1],ev_well_an2[0],ev_well_an2[1], fill = \"pink\")\n #---------- Draw elevator ----------#\n ev_an2 = (ev_well_an2[0] - gap , ev_well_an2[1] - gap)\n ev_an1 = (ev_an2[0] - ev_width , ev_an2[1] - ev_height)\n self.ev = self.canvas_cartoon.create_rectangle(ev_an1[0] ,ev_an1[1] ,ev_an2[0] ,ev_an2[1] , fill = \"orange\")\n #---------- Draw floors ----------#\n floors_an_list = []\n for i in range(len(self.bt_list)):\n floor_an1 = (0 , ev_an2[1] - i*floor_height)\n floor_an2 = (ev_well_an1[0] , ev_well_an2[1] + floor_thick - i*floor_height)\n floors_an_list.append([floor_an1,floor_an2])\n self.canvas_cartoon.create_rectangle(floor_an1[0] ,floor_an1[1] ,floor_an2[0] ,floor_an2[1] , fill = \"black\")\n \n #---------- Draw elevator door ----------#\n # These doors should tranvel along with elevator \n self.left_door = self.canvas_cartoon.create_rectangle(ev_an1[0],ev_an1[1], ev_an1[0] + ev_width/2 ,ev_an2[1], fill = \"brown\")\n self.right_door = self.canvas_cartoon.create_rectangle(ev_an2[0],ev_an1[1], ev_an1[0] + ev_width/2 ,ev_an2[1], fill = \"brown\")\n self.tmp_ori_1 = ev_an1[0]\n self.tmp_ori_2 = ev_an2[0]\n \n #---------- Draw amr ----------#\n self.amr_location_x = total_width/2 -100\n self.amr_location_y = floors_an_list[0][0][1] - amr_height\n self.canvas_cartoon.create_rectangle(self.amr_location_x,self.amr_location_y,self.amr_location_x+amr_width,self.amr_location_y + amr_height, fill = \"yellow\")\n \n # helper variable for cute cartoon \n self.CARTOON_EV_MOVING_VEL = max_ev_movement/((NEXT_FLOOR_TIME/LOOP_PERIOD)*(len(self.bt_list) - 1))\n self.CARTOON_DOOR_OPENING_VEL = (ev_width/2)/(DOOR_OPENING_TIME/LOOP_PERIOD)\n self.CARTOON_DOOR_CLOSING_VEL = (ev_width/2)/(DOOR_CLOSING_TIME/LOOP_PERIOD)\n # helper variable for counters\n self.MOVING_VEL = EV_LOCATION_MAX/((NEXT_FLOOR_TIME/LOOP_PERIOD)*(len(self.bt_list) - 1))\n self.OPENING_VEL = DOOR_SENSOR_MAX/(DOOR_OPENING_TIME/LOOP_PERIOD)\n self.CLOSING_VEL = DOOR_SENSOR_MAX/(DOOR_CLOSING_TIME/LOOP_PERIOD)\n \n ###----------- start main loop -------------###\n self.main() # Recursive call main()\n self.window.mainloop()\n\n def LED_cancel_wrong_floor(self):\n '''\n Toggle Led status to cancel illegal bts led\n Erase current_floor Led, erase led on oppisite direction\n called when elevator arrived target floor and plan()\n '''\n current_idx = self.bt_list.index(self.current_floor) \n if self.direction == \"up\":\n for bt_key in self.bt_list[:current_idx+1]: # Include current \n self.bt_dic[bt_key].led = 0\n\n elif self.direction == \"down\": \n for bt_key in self.bt_list[current_idx:]: # Include current \n self.bt_dic[bt_key].led = 0\n\n elif self.direction == \"no_assign\": \n for bt_key in self.bt_list:\n self.bt_dic[bt_key].led = 0\n\n def plan(self):\n '''\n To get direction and target floor\n '''\n self.target_floor = \"\" # MUST BE \"\" BEFORE PLANNING\n # ----- Generate a List to pick out target floor -------# \n candidate_floor = []\n current_idx = self.bt_list.index(self.current_floor) \n if self.direction == \"up\": \n for bt in self.bt_list[current_idx+1:]: # Don't include current \n candidate_floor.append(bt)\n elif self.direction == \"down\":\n for bt in self.bt_list[:current_idx]: # Don't include current \n candidate_floor.append(bt)\n elif self.direction == \"no_assign\": \n for bt in self.bt_list: # Don't include current \n candidate_floor.append(bt)\n \n # ------ Get a target floor from candidate_floor list -------# \n # Find a target floor at current direction\n if self.direction != 'no_assign':\n for bt in candidate_floor:\n if self.bt_dic[bt].led:\n self.target_floor = bt\n else: # self.direction == \"no_assign\":\n if self.target_floor == \"\": # Can't find any target floor in my direction\n self.direction = \"no_assign\"\n # ------ Try to find target floor in all floors -------# \n for bt in self.bt_list:\n if self.bt_dic[bt].led and self.current_floor != bt: # if led is on and it's not current floor\n ###------ Found a floor to go !! -------###\n # update target floor\n self.target_floor = bt\n # update direction\n if self.bt_list.index(self.target_floor) - self.bt_list.index(self.current_floor) > 0: # go up\n self.direction = \"up\"\n else: # go down\n self.direction = \"down\"\n \n def opened_timer_cb(self):\n '''\n Reach OPEN_DOOR_LOCK time, should close the door now.\n '''\n self.timer = None # Reset timer\n self.state = \"closing\"\n\n def main(self): # main loop \n #------- Check cmd from elevator_server ----------# \n if self.cmd_list != []: # New cmd to do \n cmd = str(self.cmd_list[0]).split()\n \n del self.cmd_list[0] # Pop out cmd\n\n # ------- Write Cmd --------#(toggle switch)\n if cmd[0] == 'w': \n if cmd[1] in self.bt_dic and (cmd[2] == '1' or cmd[2] == '0'):\n print (str(cmd))\n self.bt_dic[cmd[1]].switch = int(cmd[2])\n # ------- Read Cmd --------# \n elif cmd[0] == 'r':\n if cmd[1] in self.bt_dic:\n ans = self.bt_dic[cmd[1]].led\n # TODO output ans \n # mqtt_obj.publish(topic = \"/simu_IPC/reply\" , payload = str(ans), qos = 0, retain = True)\n else: # Error cmd \n pass \n \n #----- Turn on led if switch is on -------# \n for bt in self.bt_dic: \n if bt == self.current_floor:\n continue\n if self.bt_dic[bt].switch == 1:\n self.bt_dic[bt].led = 1\n\n #----- Check if any floor button is on -------# \n is_floor_LED = False\n for bt in self.bt_dic:\n if bt != 'open' and bt != 'close' and self.bt_dic[bt].led == 1:\n is_floor_LED = True\n \n ########################\n ### State Machine ###\n ########################\n if self.state == \"standing_by\":\n if self.bt_dic['open'].switch == 1 : # switch to \"opening\" state\n self.state = \"opening\"\n if is_floor_LED:\n self.plan()\n self.LED_cancel_wrong_floor()\n if self.target_floor != \"\":\n self.state = \"moving\"\n else: # No avalible floor to go\n pass \n elif self.state == \"opening\":\n if self.bt_dic['close'].switch == 1 :\n self.state = \"closing\"\n \n # Opening Counting\n if self.door_sensor < DOOR_SENSOR_MAX:# Keep opening door. \n self.door_sensor += self.OPENING_VEL\n self.cartoon_move_door(-self.CARTOON_DOOR_OPENING_VEL)\n\n elif self.door_sensor >= DOOR_SENSOR_MAX :#if door is fully open, switch to \"opened\" state\n self.door_sensor = DOOR_SENSOR_MAX\n self.timer = threading.Timer(OPEN_LOCK_TIME,self.opened_timer_cb)\n self.timer.start()\n self.state = \"opened\"\n elif self.state == \"opened\":\n if self.bt_dic['open'].switch == 1 : # Reset opening timer\n self.timer.cancel()\n self.timer = threading.Timer(OPEN_LOCK_TIME,self.opened_timer_cb)\n self.timer.start()\n elif self.bt_dic['close'].switch == 1 : # Switch to closing\n self.timer.cancel()\n self.bt_dic['open'].led = 0\n self.state = \"closing\"\n elif self.state == \"closing\":\n if self.bt_dic['open'].switch == 1 : #Reset opening counter\n self.state = \"opening\"\n \n if self.door_sensor > 0: # Keep closing door \n self.door_sensor -= self.CLOSING_VEL\n self.cartoon_move_door(self.CARTOON_DOOR_OPENING_VEL)\n elif self.door_sensor <= 0: # Switch to \"moving state\" or \"standing_by \"\n self.door_sensor = 0 \n # TODO I think its OK to switch to \"standing_by first\"\n\n if is_floor_LED: # switch to moving\n self.plan() # get target_floor and direction\n if self.target_floor == \"\" :# )// nasty exception TODO\n self.direction = \"no_assign\"\n self.plan()\n self.LED_cancel_wrong_floor()\n if self.target_floor != \"\" : # \n self.state = \"moving\"\n else : # No avalible floor to go\n self.direction = \"no_assign\"\n self.state = \"standing_by\"\n \n else: # switch to standing_by\n self.bt_dic['close'].led = 0\n self.direction = \"no_assign\"\n self.state = \"standing_by\"\n elif self.state == \"moving\":\n # keep moving\n if self.direction == \"up\":\n self.ev_location_sensor += self.MOVING_VEL\n self.cartoon_move_ev(-self.CARTOON_EV_MOVING_VEL)\n elif self.direction == \"down\":\n self.ev_location_sensor -= self.MOVING_VEL\n self.cartoon_move_ev(self.CARTOON_EV_MOVING_VEL)\n\n d = EV_LOCATION_MAX/(len(self.bt_list)-1)# 333 # divisor\n q = int(self.ev_location_sensor //d)# quotient\n r = self.ev_location_sensor % d # reminder\n # 100 \n if r >= 0 and r <= self.MOVING_VEL*1.1:# ev is passing a floor rightnow\n self.current_floor = self.bt_list[q]\n \n # Check if target_floor arrived.\n if self.current_floor == self.target_floor: # elevator Arrived !!! settlement_LED, and switch to \"opening\" state\n self.LED_cancel_wrong_floor() # Erase dirrent direction and \n self.bt_dic[self.target_floor].led = 0\n self.target_floor = \"\" # DON\"T RESET DIRECTION\n self.state = \"opening\"\n \n \n self.sim_shandi_ev()\n \n #------- Reset switch -------# I trust these switch has been used.\n for bt in self.bt_dic:\n self.bt_dic[bt].switch = 0 \n\n #############################\n ### Show Floor canvas ###\n #############################\n '''\n current_floor show on floor canvas\n '''\n self.window.after(0,self.change_pic_canvas_floor(self.bt_dic[self.current_floor].pic_path))\n \n #############################\n ### Show state canvas ###\n #############################\n if self.state == \"opened\":\n self.window.after(0,self.change_pic_canvas_state(\"pic/open_4.jpeg\"))\n elif self.state == \"standing_by\":\n self.window.after(0,self.change_pic_canvas_state(\"pic/blank.jpeg\"))\n elif self.state == \"opening\": # opening animation\n self.canvas_animate(EV_STATE_CANVAS_PERIOD , [\"pic/open_1.jpeg\",\\\n \"pic/open_2.jpeg\",\\\n \"pic/open_3.jpeg\",\\\n \"pic/open_4.jpeg\"])\n elif self.state == \"closing\":# closing animation\n self.canvas_animate(EV_STATE_CANVAS_PERIOD , [\"pic/close_1.jpeg\",\\\n \"pic/close_2.jpeg\",\\\n \"pic/close_3.jpeg\",\\\n \"pic/close_4.jpeg\"])\n elif self.state == \"moving\":\n if self.direction == \"up\":\n self.canvas_animate(EV_STATE_CANVAS_PERIOD , [\"pic/up_1.jpeg\",\\\n \"pic/up_2.jpeg\",\\\n \"pic/up_3.jpeg\"])\n elif self.direction == \"down\":\n self.canvas_animate(EV_STATE_CANVAS_PERIOD , [\"pic/down_1.jpeg\",\\\n \"pic/down_2.jpeg\",\\\n \"pic/down_3.jpeg\"])\n else:\n pass\n \n ###########################\n ### Show LED Status ###\n ###########################\n '''\n If led = 1 -> button highlight\n If led = 0 -> button don't highlight\n '''\n for bt_key in self.bt_dic:\n bt = self.bt_dic[bt_key]\n if bt.led == 1: # led of button is on \n bt.bt.configure(bg = BUTTON_HIGHLIGHT_COLOR)\n bt.bt.configure(activebackground = bt.bt.cget('background'))\n else: # led of button is off \n bt.bt.configure(bg = BUTTON_COLOR)\n bt.bt.configure(activebackground = bt.bt.cget('background'))\n \n ###########################\n ### Text show state ###\n ###########################\n self.text.delete(1.0,tk.END)\n self.text.insert(\"insert\",\"state :\" + self.state + '\\n')\n self.text.insert(\"insert\",\"direction :\" + self.direction + '\\n')\n self.text.insert(\"insert\",\"current floor :\" + self.current_floor+ '\\n')\n self.text.insert(\"insert\",\"target floor :\" + self.target_floor + '\\n')\n \n #----- Recursive call main() --------# \n self.window.after(int(LOOP_PERIOD*1000),self.main)\n \n def sim_shandi_ev(self):\n #################################\n ### Simulation to shandi EV ###\n #################################\n # Add this block to sim shandi Ev\n # open and close directly indicate ev state \n if self.state == \"opening\" :\n self.bt_dic['open'].led = 1\n self.bt_dic['close'].led = 0\n elif self.state == \"closing\":\n self.bt_dic['open'].led = 0\n self.bt_dic['close'].led = 1\n else:\n self.bt_dic['open'].led = 0\n self.bt_dic['close'].led = 0\n \n # Add this block to sim shangdi ev, current floor cancel led\n bt = self.bt_dic[self.current_floor]\n if bt.switch == 1:\n bt.led = 1\n self.led_sustain_count = 1\n if self.led_sustain_count > 0 and self.led_sustain_count < (LED_SUSTAIN_TIME/LOOP_PERIOD):\n bt.led = 1\n self.led_sustain_count += 1\n elif self.led_sustain_count >= (LED_SUSTAIN_TIME/LOOP_PERIOD):\n bt.led = 0\n self.led_sustain_count = 0\n\n\n def canvas_animate(self,period,pic_list):\n '''\n create a animate on canvas\n Input : \n period : value , how long will it take to go thought whole animation\n pic_list : str , picture path that will play in sequence.\n Dependence : \n self.animate_counter \n '''\n num_pic = len(pic_list)\n self.animate_counter += 1\n try: \n pic_path = pic_list[self.animate_counter//(period//num_pic)]\n except: # counter exceed period, Reset !!\n self.animate_counter = 0\n else:\n self.window.after(0,self.change_pic_canvas_state(pic_path))\n\n def change_pic_canvas_floor(self,new_pic_path):\n '''\n Change picture inside canvas_floor to new picture\n Input : \n new_pic_path : str , new picture path \n '''\n self.img_floor = ImageTk.PhotoImage(Image.open(new_pic_path).resize((35,50), Image.ANTIALIAS))\n self.canvas_floor.create_image (27,30,anchor = 'center', image = self.img_floor) # where to put pic on canvas\n\n def change_pic_canvas_state(self,new_pic_path):\n '''\n Change picture inside canvas_state to new picture\n Input : \n new_pic_path : str , new picture path \n '''\n self.img_arrow = ImageTk.PhotoImage(Image.open(new_pic_path).resize((50,50), Image.ANTIALIAS))\n self.canvas_state.create_image (27,30,anchor = 'center', image = self.img_arrow) # where to put pic on canvas\n \n def cartoon_move_ev(self,increment):\n self.canvas_cartoon.move(self.ev, 0, increment)\n self.canvas_cartoon.move(self.left_door , 0, increment)\n self.canvas_cartoon.move(self.right_door, 0, increment)\n \n def cartoon_move_door(self,increment):\n '''\n Opening door or Closing door by one increment \n Input :\n increment : positive -> close door \n negative -> open door\n '''\n # Increment left door \n an1_x, an1_y, an2_x, an2_y = self.canvas_cartoon.coords(self.left_door)\n an2_x += increment\n self.canvas_cartoon.coords(self.left_door, an1_x, an1_y, an2_x, an2_y)\n # Increment right door \n an1_x, an1_y, an2_x, an2_y = self.canvas_cartoon.coords(self.right_door)\n an1_x -= increment\n self.canvas_cartoon.coords(self.right_door, an1_x, an1_y, an2_x, an2_y)\n\n \n ######################################\n ### buttons callback fucntion ###\n ######################################\n def cb_bt_1f(self):\n self.bt_dic['1'].switch = 1\n def cb_bt_2f(self):\n self.bt_dic['2'].switch = 1\n def cb_bt_3f(self):\n self.bt_dic['3'].switch = 1\n def cb_bt_4f(self):\n self.bt_dic['4'].switch = 1\n def cb_bt_open(self):\n self.bt_dic['open'].switch = 1\n def cb_bt_close(self):\n self.bt_dic['close'].switch = 1\napp()","sub_path":"tkinter/elevator_simulator.py","file_name":"elevator_simulator.py","file_ext":"py","file_size_in_byte":25940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"208252724","text":"from adept.exp.base.exp_module import ExpModule\nfrom adept.utils import listd_to_dlist\nfrom collections import namedtuple\nimport random\nfrom operator import itemgetter\nimport numpy as np\n\n\nclass AdeptMarioReplay(ExpModule):\n\n args = {\n \"exp_size\": 15625,\n \"exp_min_size\": 200,\n \"rollout_len\": 32,\n \"exp_update_rate\": 1,\n }\n\n def __init__(self, spec_builder, size, min_size, rollout_len, update_rate):\n super(AdeptMarioReplay, self).__init__()\n assert type(size == int)\n assert type(rollout_len == int)\n\n print(\"Using: Adept Mario Replay\")\n\n self.spec = spec_builder(rollout_len)\n self.obs_keys = spec_builder.obs_keys\n self.exp_keys = spec_builder.exp_keys\n self.key_types = spec_builder.key_types\n self.keys = self.exp_keys\n\n self._storage = []\n self._full = False\n self._maxsize = size\n self._update_rate = update_rate\n self._minsize = min_size\n self._next_idx = 0\n self._keys = [\"observations\", \"rewards\", \"terminals\"] + self.keys\n\n self.rollout_len = rollout_len\n self.device = \"cpu\"\n self.target_device = self.device\n\n @classmethod\n def from_args(cls, args, spec_builder):\n return cls(\n spec_builder,\n args.exp_size,\n args.exp_min_size,\n args.rollout_len,\n args.exp_update_rate,\n )\n\n def __len__(self):\n if not self._full:\n return len(self._storage)\n else:\n return self._maxsize\n\n def write_actor(self, experience):\n # convert to cpu\n exp_storage_dev = self._exp_to_dev(experience, self.device)\n # write forward occurs before write env so append here\n if not self._full and self._next_idx >= len(self._storage):\n self._storage.append(exp_storage_dev)\n else:\n self._storage[self._next_idx] = exp_storage_dev\n\n def _exp_to_dev(self, experience, device):\n # TODO this should be a generic function somewhere?\n exp = {}\n for k, v in experience.items():\n if isinstance(v, dict):\n on_d = {d_key: d_v.to(device) for d_key, d_v in v.items()}\n # tensor\n else:\n on_d = v.to(device)\n exp[k] = on_d\n return exp\n\n def write_env(self, obs, rewards, terminals, infos):\n # forward already written, add env info then increment\n dict_at_ind = self._storage[self._next_idx]\n self._next_idx = int((self._next_idx + 1) % self._maxsize)\n # when index wraps exp is full\n if self._next_idx == 0:\n self._full = True\n dict_at_ind[\"observations\"] = {k: v.cpu() for k, v in obs.items()}\n dict_at_ind[\"rewards\"] = rewards.cpu()\n dict_at_ind[\"terminals\"] = terminals.cpu()\n\n def read(self):\n exp_list, last_obs, is_weights = self._sample()\n exp_dev_list = [\n self._exp_to_dev(e, self.target_device) for e in exp_list\n ]\n # will be list of dicts, convert to dict of lists\n dict_of_list = listd_to_dlist(exp_dev_list)\n # get next obs\n dict_of_list[\"next_observation\"] = last_obs\n # importance sampling weights\n dict_of_list[\"importance_sample_weights\"] = is_weights\n\n # return named tuple\n return namedtuple(\n self.__class__.__name__,\n [\"importance_sample_weights\", \"next_observation\"] + self._keys,\n )(**dict_of_list)\n\n def _sample(self):\n # TODO support burn_in\n # if full indexes may wrap\n if self._full:\n # wrap index starting from current index to full size\n min_ind = self._next_idx\n max_ind = min_ind + (self._maxsize - (self.rollout_len + 1))\n index = random.randint(min_ind, max_ind)\n # range is exclusive of end so last_index == end_index\n end_index = index + self.rollout_len\n last_index = int((end_index) % self._maxsize)\n indexes = (np.arange(index, end_index) % self._maxsize).astype(int)\n else:\n # sample an index and get the next sequential samples of len rollout_len\n index = random.randint(\n 0, len(self._storage) - (self.rollout_len + 1)\n )\n end_index = index + self.rollout_len\n indexes = list(range(index, end_index))\n # range is exclusive of end so last index == end_index\n last_index = end_index\n weights = np.ones(self.rollout_len)\n return (\n itemgetter(*indexes)(self._storage),\n self._storage[last_index][\"observations\"],\n weights,\n )\n\n def to(self, device):\n self.target_device = device\n\n def is_ready(self):\n # plus 2 to include next observations\n if len(self) > self._minsize and len(self) > self.rollout_len + 2:\n return self._next_idx % self._update_rate == 0\n return False\n\n def clear(self):\n pass\n","sub_path":"mariorl/modules/adept_mario_replay.py","file_name":"adept_mario_replay.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"614315950","text":"def display_inventory(inventory):\n total = 0\n print(\"Inventory:\")\n for k in inventory:\n print (str(inventory[k]) + \" \" + k)\n total += inventory[k]\n\n print(\"Total number of items: \" + str(total))\n\ndef add_to_inventory(inventory, added_items):\n for item in added_items:\n inventory[item] = (inventory.get(item, 0)) + 1\n return inventory\n\nif __name__ == \"__main__\":\n inventory = {'gold coin': 42, 'rope': 1}\n dragon_loot=['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\n inventory = add_to_inventory(inventory, dragon_loot)\n display_inventory(inventory)\n\n ","sub_path":"Chapter 5/fantasy_inventory.py","file_name":"fantasy_inventory.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"308139353","text":"import os\nimport numpy as np\nimport argparse\nimport time\n\nimport torch\n\nfrom utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient, calc_grad_norm\nfrom utils.box_utils import sample_proposals\nfrom model.cam_det import CamDet\nfrom datasets.tdet_dataset import TDETDataset\nfrom matplotlib import pyplot as plt\nimport torch.nn.functional as F\nimport math\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train')\n parser.add_argument('--net', default='CAM_DET', type=str)\n parser.add_argument('--start_iter', help='starting iteration', default=1, type=int)\n parser.add_argument('--max_iter', help='number of iterations', default=70000, type=int)\n parser.add_argument('--disp_interval', help='number of iterations to display loss', default=1000, type=int)\n parser.add_argument('--save_interval', dest='save_interval', help='number of iterations to save', default=10000, type=int)\n parser.add_argument('--save_dir', help='directory to save models', default=\"../repo/tdet\")\n parser.add_argument('--data_dir', help='directory to load data', default='../data', type=str)\n\n parser.add_argument('--bs', help='training batch size', default=10, type=int)\n\n parser.add_argument('--lr', help='starting learning rate', default=0.001, type=float)\n parser.add_argument('--s', dest='session', help='training session', default=-1, type=int)\n parser.add_argument('--seed', help='random sed', default=1, type=int)\n parser.add_argument('--target_only', action='store_true')\n\n parser.add_argument('--hidden_dim', help='hidden layer dim for attention', default=128, type=int)\n\n\n # resume trained model\n parser.add_argument('--r', dest='resume', help='resume checkpoint or not', action='store_true')\n parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=0, type=int)\n parser.add_argument('--checkiter', dest='checkiter', help='checkiter to load model', default=0, type=int)\n\n args = parser.parse_args()\n return args\n\n\ndef draw_box(boxes, col=None):\n for j, (xmin, ymin, xmax, ymax) in enumerate(boxes):\n if col is None:\n c = np.random.rand(3)\n else:\n c = col\n plt.hlines(ymin, xmin, xmax, colors=c, lw=2)\n plt.hlines(ymax, xmin, xmax, colors=c, lw=2)\n plt.vlines(xmin, ymin, ymax, colors=c, lw=2)\n plt.vlines(xmax, ymin, ymax, colors=c, lw=2)\n\n\ndef train():\n args = parse_args()\n print('Called with args:')\n print(args)\n assert args.bs % 2 == 0\n\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n print(device)\n output_dir = args.save_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n target_only = args.target_only\n source_train_dataset = TDETDataset(['coco60_train2014', 'coco60_val2014'], args.data_dir, 'eb', num_classes=60)\n target_train_dataset = TDETDataset(['voc07_trainval'], args.data_dir, 'eb', num_classes=20)\n\n lr = args.lr\n\n if args.net == 'CAM_DET':\n model = CamDet(os.path.join(args.data_dir, 'pretrained_model/vgg16_caffe.pth') if not args.resume else None, 20 if target_only else 80, args.hidden_dim)\n else:\n raise Exception('network is not defined')\n\n optimizer = model.get_optimizer(args.lr)\n\n if args.resume:\n load_name = os.path.join(output_dir, '{}_{}_{}.pth'.format(args.net, args.checksession, args.checkiter))\n print(\"loading checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n assert args.net == checkpoint['net']\n args.start_iter = checkpoint['iterations'] + 1\n model.load_state_dict(checkpoint['model'])\n print(\"loaded checkpoint %s\" % (load_name))\n del checkpoint\n\n log_file_name = os.path.join(output_dir, 'log_{}_{}.txt'.format(args.net, args.session))\n if args.resume:\n log_file = open(log_file_name, 'a')\n else:\n log_file = open(log_file_name, 'w')\n log_file.write(str(args))\n log_file.write('\\n')\n\n model.to(device)\n model.train()\n source_loss_sum = 0\n target_loss_sum = 0\n total_loss_sum = 0\n start = time.time()\n source_rand_perm = None\n target_rand_perm = None\n for step in range(args.start_iter, args.max_iter + 1):\n if source_rand_perm is None or step % len(source_train_dataset) == 1:\n source_rand_perm = np.random.permutation(len(source_train_dataset))\n if target_rand_perm is None or step % len(target_train_dataset) == 1:\n target_rand_perm = np.random.permutation(len(target_train_dataset))\n\n source_index = source_rand_perm[step % len(source_train_dataset)]\n target_index = target_rand_perm[step % len(target_train_dataset)]\n\n optimizer.zero_grad()\n if not target_only:\n source_batch = source_train_dataset.get_data(source_index, h_flip=np.random.rand() > 0.5, target_im_size=np.random.choice([480, 576, 688, 864, 1200]))\n\n source_im_data = source_batch['im_data'].unsqueeze(0).to(device)\n source_gt_labels = source_batch['gt_labels'] + 20\n source_pos_cls = [i for i in range(80) if i in source_gt_labels]\n source_pos_cls = torch.tensor(np.random.choice(source_pos_cls, min(args.bs, len(source_pos_cls)), replace=False), dtype=torch.long, device=device)\n\n source_loss, _, _ = model(source_im_data, source_pos_cls)\n source_loss_sum += source_loss.item()\n\n target_batch = target_train_dataset.get_data(target_index, h_flip=np.random.rand() > 0.5,\n target_im_size=np.random.choice([480, 576, 688, 864, 1200]))\n\n target_im_data = target_batch['im_data'].unsqueeze(0).to(device)\n target_gt_labels = target_batch['gt_labels']\n target_pos_cls = [i for i in range(80) if i in target_gt_labels]\n target_pos_cls = torch.tensor(np.random.choice(target_pos_cls, min(args.bs, len(target_pos_cls)), replace=False), dtype=torch.long, device=device)\n\n target_loss, _, _, _ = model(target_im_data, target_pos_cls)\n target_loss_sum += target_loss.item()\n if args.target_only:\n total_loss = target_loss\n else:\n total_loss = (source_loss + target_loss) * 0.5\n total_loss.backward()\n total_loss_sum += total_loss.item()\n clip_gradient(model, 10.0)\n optimizer.step()\n\n if step % args.disp_interval == 0:\n end = time.time()\n total_loss_sum /= args.disp_interval\n source_loss_sum /= args.disp_interval\n target_loss_sum /= args.disp_interval\n log_message = \"[%s][session %d][iter %4d] loss: %.8f, src_loss: %.8f, tar_loss: %.8f, lr: %.2e, time: %.1f\" % \\\n (args.net, args.session, step, total_loss_sum, source_loss_sum, target_loss_sum, lr, end - start)\n print(log_message)\n log_file.write(log_message + '\\n')\n log_file.flush()\n total_loss_sum = 0\n source_loss_sum = 0\n target_loss_sum = 0\n start = time.time()\n\n if step in (args.max_iter * 4 // 7, args.max_iter * 6 // 7):\n adjust_learning_rate(optimizer, 0.1)\n lr *= 0.1\n\n if step % args.save_interval == 0 or step == args.max_iter:\n save_name = os.path.join(output_dir, '{}_{}_{}.pth'.format(args.net, args.session, step))\n checkpoint = dict()\n checkpoint['net'] = args.net\n checkpoint['session'] = args.session\n checkpoint['iterations'] = step\n checkpoint['model'] = model.state_dict()\n\n save_checkpoint(checkpoint, save_name)\n print('save model: {}'.format(save_name))\n\n log_file.close()\n\n\nif __name__ == '__main__':\n train()","sub_path":"train_camdet.py","file_name":"train_camdet.py","file_ext":"py","file_size_in_byte":7974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"577461316","text":"import pandas as pd\nfrom utils.features.base_features import get_cross_team_ground_key\n\n\ndef get_last_x_h2h_in_ground_feature(match_results, x):\n print(f\"Generating feature : Head-to-Head on a specific Ground for last {x} rolling\")\n last_x_h2h_in_ground = {}\n sub_frame = match_results[['game', 'home_team', 'away_team', 'f_ground_id', 'result']].copy()\n\n sub_frame = sub_frame.sort_values(by=\"game\")\n sub_frame[f'f_last_{x}_h2h_in_ground'] = 0.0\n sub_frame['comp_key'] = sub_frame.apply(lambda df: get_cross_team_ground_key(df['home_team'], df['away_team'],\n df['f_ground_id']), axis=1)\n\n last_x_h2h_in_ground = {x: 0 for x in list(sub_frame['comp_key'].unique())}\n\n sub_frame_list = []\n\n for key in last_x_h2h_in_ground:\n sub_frame_copy = sub_frame[sub_frame['comp_key'] == key].copy()\n\n key_part_1 = key.split(\"::\")[0]\n\n # flip score if home and away are flipped\n sub_frame_copy.loc[sub_frame_copy['home_team'].str.lower() != key_part_1, 'result'] = -sub_frame_copy['result']\n\n sub_frame_copy[f'f_last_{x}_h2h_in_ground'] = sub_frame_copy.iloc[:, 4].rolling(window=x).sum()\n sub_frame_list.append(sub_frame_copy)\n\n last_x_h2h_in_ground[key] = sub_frame_copy.iloc[len(sub_frame_copy) - 1][\n f'f_last_{x}_h2h_in_ground']\n\n concat_frame = pd.DataFrame(columns=list(sub_frame.columns))\n for item in sub_frame_list:\n concat_frame = concat_frame.append(item, ignore_index=True)\n\n concat_frame = concat_frame.sort_values(by=\"game\")\n concat_frame[f'f_last_{x}_h2h_in_ground'] = concat_frame[f'f_last_{x}_h2h_in_ground'].fillna(0.0)\n\n concat_frame = concat_frame[['game', f'f_last_{x}_h2h_in_ground']]\n\n encounter_matrix_fr_object = {\n 'comp_key': [],\n f'f_last_{x}_h2h_in_ground': []\n }\n\n for k, v in last_x_h2h_in_ground.items():\n encounter_matrix_fr_object['comp_key'].append(k)\n encounter_matrix_fr_object[f'f_last_{x}_h2h_in_ground'].append(v)\n\n last_x_h2h_gr_fr = pd.DataFrame(encounter_matrix_fr_object)\n\n return concat_frame, last_x_h2h_gr_fr\n","sub_path":"utils/features/last_x_h2h_in_ground_feature.py","file_name":"last_x_h2h_in_ground_feature.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"52213602","text":"#!/usr/bin/python\nimport RPi.GPIO as GPIO\nimport fileinput\nimport time\nimport os\n\nPIN_NUMBER = 14\n\nALPHABET = {\n\t\"A\" : \".-\",\n\t\"B\" : \"-...\",\n\t\"C\" : \"-.-.\",\n\t\"D\" : \"-..\",\n\t\"E\" : \".\",\n\t\"F\" : \"..-.\",\n\t\"G\" : \"--.\",\n\t\"H\" : \"....\",\n\t\"I\" : \"..\",\n\t\"J\" : \".---\",\n\t\"K\" : \"-.-\",\n\t\"L\" : \".-..\",\n\t\"M\" : \"--\",\n\t\"N\" : \"-.\",\n\t\"O\" : \"---\",\n\t\"P\" : \".--.\",\n\t\"Q\" : \"--.-\",\n\t\"R\" : \".-.\",\n\t\"S\" : \"...\",\n\t\"T\" : \"-\",\n\t\"U\" : \"..-\",\n\t\"V\" : \"...-\",\n\t\"W\" : \".--\",\n\t\"X\" : \"-..-\",\n\t\"Y\" : \"-.--\",\n\t\"Z\" : \"--..\",\n\t\" \" : \"/\"\n}\n\nDOT_TIME = 0.2\nDASH_TIME = 0.8\n\nSIGN_SPACE = 0.5\nLETTER_SPACE = 0.8\nWORD_SPACE = 1.2\n\ndef turn_diode_on_for(interval):\n\tGPIO.output(PIN_NUMBER,GPIO.HIGH)\n\tos.system('play --no-show-progress --null --channels 1 synth {0} sine 2500'.format(interval))\n\tGPIO.output(PIN_NUMBER, GPIO.LOW)\n\ndef message_start():\n\tfor i in range(0,10):\n\t\tturn_diode_on_for(0.1)\n\t\ttime.sleep(0.1)\n\ndef play_in_morse(msg):\n \n for letter in msg:\n for sign in ALPHABET[letter]:\n if sign == \".\":\n turn_diode_on_for(DOT_TIME)\n if sign == \"-\":\n turn_diode_on_for(DASH_TIME)\n time.sleep(SIGN_SPACE)\n time.sleep(SIGN_SPACE * 2) # Letter space is 3x SIGN_SPACE\n if letter == \" \":\n time.sleep(SIGN_SPACE * 2) # Word space is 5x SIGN_SPACE\n\ndef main():\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setwarnings(False)\n\tGPIO.setup(PIN_NUMBER, GPIO.OUT)\n\t\n\tmessages = []\n\tfor line in fileinput.input():\n\t\tmessages.append(line.rstrip())\n\n\tmsg = \" \".join(messages)\n\n\twhile True:\n\t\tmessage_start()\n\t\ttime.sleep(1)\n\t\tplay_in_morse(msg.upper())\n\nmain()\n","sub_path":"morse_pi/morse_pi.py","file_name":"morse_pi.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"300082500","text":"######################################################\r\n# Multi-Layer Perceptron Classifier for MNIST dataset\r\n# Mark Harvey\r\n# Dec 2018\r\n######################################################\r\nimport tensorflow as tf\r\nimport os\r\nimport sys\r\nimport shutil\r\n\r\n\r\n# hyper-parameters\r\nlearning_rate = 0.001\r\nbatch_size = 50\r\nsteps = int(60000 / batch_size)\r\n\r\n\r\n#####################################################\r\n# Set up directories\r\n#####################################################\r\n\r\n# Returns the directory the current script (or interpreter) is running in\r\ndef get_script_directory():\r\n path = os.path.realpath(sys.argv[0])\r\n if os.path.isdir(path):\r\n return path\r\n else:\r\n return os.path.dirname(path)\r\n\r\n\r\n# create a directory for the MNIST dataset if it doesn't already exist\r\nSCRIPT_DIR = get_script_directory()\r\nMNIST_DIR = os.path.join(SCRIPT_DIR, 'mnist_dir')\r\nif not (os.path.exists(MNIST_DIR)):\r\n os.makedirs(MNIST_DIR)\r\n print(\"Directory \" , MNIST_DIR , \"created \")\r\n\r\n# create a directory for the TensorBoard data if it doesn't already exist\r\n# delete it and recreate if it already exists\r\nTB_LOG_DIR = os.path.join(SCRIPT_DIR, 'tb_log')\r\n\r\nif (os.path.exists(TB_LOG_DIR)):\r\n shutil.rmtree(TB_LOG_DIR)\r\nos.makedirs(TB_LOG_DIR)\r\nprint(\"Directory \" , TB_LOG_DIR , \"created \") \r\n\r\n\r\n\r\n#####################################################\r\n# Dataset preparation\r\n#####################################################\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(MNIST_DIR, one_hot=True)\r\n\r\n\r\n\r\n#####################################################\r\n# Create the Graph\r\n# Define placeholders, the network, optimizer, \r\n# loss & accuracy nodes\r\n#####################################################\r\n\r\n# define placeholders for the input data & labels\r\nx = tf.placeholder(tf.float32, shape=[None, 784])\r\ny = tf.placeholder(tf.float32, [None, 10])\r\n\r\n\r\n# MLP definition\r\ninput_layer = tf.layers.dense(inputs=x, units=784, activation=tf.nn.relu)\r\nhidden_layer1 = tf.layers.dense(inputs=input_layer, units=196, activation=tf.nn.relu)\r\nhidden_layer2 = tf.layers.dense(inputs=hidden_layer1, units=10, activation=None)\r\nprediction = tf.nn.softmax(hidden_layer2)\r\n\r\n\r\n# Define a cross entropy loss function\r\nloss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=hidden_layer2, onehot_labels=y))\r\n\r\n# Define the optimizer function\r\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\r\n\r\n# Calculate accuracy\r\ncorrect_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n# Initialize the variables\r\ninit = tf.initializers.global_variables()\r\n\r\n# TensorBoard data collection\r\ntf.summary.scalar('cross_entropy loss', loss)\r\ntf.summary.scalar('accuracy', accuracy)\r\n\r\n\r\n#####################################################\r\n# Create & run the Session\r\n#####################################################\r\n\r\n# Launch the graph\r\nwith tf.Session() as sess:\r\n\r\n sess.run(init)\r\n\r\n # TensorBoard writer\r\n writer = tf.summary.FileWriter(TB_LOG_DIR, sess.graph)\r\n tb_summary = tf.summary.merge_all()\r\n\r\n # Training cycle\r\n for step in range(steps):\r\n\r\n # fetch a batch\r\n batch = mnist.train.next_batch(batch_size)\r\n\r\n # display training accuracy every 100 batches\r\n if step % 100 == 0:\r\n train_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y: batch[1]})\r\n print (\"Train Step:\", step, ' Training Accuracy: ', train_accuracy)\r\n\r\n # executing the optimizer is the actual training\r\n _, s = sess.run([optimizer,tb_summary], feed_dict={x: batch[0], y: batch[1]})\r\n writer.add_summary(s, step)\r\n\r\n\r\n print(\"Training Finished!\")\r\n\r\n # Evaluation\r\n print (\"Final Accuracy with test set:\", sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))\r\n\r\n\r\nprint (\"FINISHED! Run tensorboard with: tensorboard --host localhost --port 6006 --logdir=./tb_log\")\r\n\r\n","sub_path":"mnist_mlp.py","file_name":"mnist_mlp.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"478937819","text":"import parse\n\n(names, weights, costs) = parse.get_data();\nweight = 0\nprice = 0\nitems = []\nwhile (weight < 200):\n item = max(costs, key=costs.get)\n items.append(item)\n weight += weights[item]\n price += costs[item]\n costs[item] = 0;\n\nprice -= costs[items[-1]]\nweight -= weights[items[-1]]\nprint(\"price: \" + str(price) + \", weight: \" + str(weight))\n# gives price: 3431.88, weight: 198.06","sub_path":"greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"629901598","text":"s1 = input()\ns2 = \"world\"\ns3 = s1 + \" \" + s2\ns4 = str(10)\n\nprint(s3, s3[1], 'Кот' 'обус', sep=\"\\t\")\n\n# Одиночные кавычки. Часто встречаемый вариант записи.\nmy_str = 'а внутри \"можно\" поместить обычные'\n# Кавычки.\nmy_str = \"а внутри 'можно' поместить одиночные\"\n# Три одиночных кавычки. Удобно для записей в несколько строк\nmy_str = '''В трёх одиночных \n кавычках'''\n# Тройные кавычки. Общепринятый способ для строк документации.\nmy_str = \"\"\"Three double quotes\"\"\"\n","sub_path":"L1/code/str.py","file_name":"str.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"553003456","text":"# This script will perform object detection using \"mynet\"\n# Images are specified by user input in the console\n# Expects that py-faster-rcnn is set up\n\n# Setup some python paths\nimport sys, os\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, os.path.join(os.environ['FRCN_ROOT'], path))\n\n# Add caffe to PYTHONPATH\nadd_path('caffe-fast-rcnn/python')\n\n# Add lib to PYTHONPATH\nadd_path('lib')\n\n# Include fast_rccn and misc libraries\nfrom fast_rcnn.config import cfg\nfrom fast_rcnn.test import im_detect\nfrom fast_rcnn.nms_wrapper import nms\nfrom utils.timer import Timer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as sio\nimport caffe, os, sys, cv2\nimport argparse\n\n#\n# Parse input arguments\n#\ndef parse_args():\n parser = argparse.ArgumentParser(description='Detects objects in a image using a net trained on the udacity dataset')\n parser.add_argument('--gpu_id', dest='gpu_id',\n help='GPU device id to use [0]',\n default=0, type=int)\n parser.add_argument('--use_cpu', dest='use_cpu',\n help='Set to 1 to use the CPU [0]',\n default=0, type=int)\n parser.add_argument('--images', dest='images_file',\n help='Text file containing the image to process. If not provided, the user can specify images in the console',\n default=None, type=str)\n parser.add_argument('--output', dest='output_dir',\n help='Results are saved to this directory. If not provided, plots are shown on-screen',\n default=None, type=str)\n parser.add_argument('--output_suffix', dest='output_suffix',\n help='File suffix appended to the output',\n default=\"_detect\", type=str)\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n # Create a list of classes and a map of trained nets\n CLASSES = ('__background__', 'car', 'truck', 'pedestrian', 'trafficLight', 'biker')\n\n # Enable RPN (Region Proposal Net) and set cpu or gpu mode\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n if args.use_cpu:\n caffe.set_mode_cpu()\n else:\n caffe.set_mode_gpu()\n caffe.set_device(args.gpu_id)\n cfg.GPU_ID = 0\n\n # Set models directory\n cfg.MODELS_DIR = os.path.join(os.environ['FRCN_ROOT'], \"models\")\n\n # Load the trained net into Caffe\n prototxt = os.path.join(cfg.MODELS_DIR, 'mynet', 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')\n caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models', 'mynet_faster_rcnn_final.caffemodel')\n\n if not os.path.isfile(caffemodel):\n raise IOError((\"%s not found\") % caffemodel)\n if not os.path.isfile(prototxt):\n raise IOError((\"%s not found\") % prototxt)\n\n print('Loading network %s' % caffemodel) \n net = caffe.Net(prototxt, caffemodel, caffe.TEST)\n print('Network loaded')\n\n # Warmup on a dummy image\n im = 128 * np.ones((300, 500, 3), dtype=np.uint8)\n for i in range(2):\n _, _= im_detect(net, im)\n\n # If an image list is provided, process it\n image_list = None\n if args.images_file:\n \timage_list = []\n \twith open(args.images_file) as f:\n \t\tfor line in f.readlines():\n \t\t\timage_list.append(line.strip())\n\n # If no image list was provided, loop forever and ask for user input for every loop\n # If provided, process all input images\n while not args.images_file or image_list:\n if not image_list:\n # Load input image\n image_file = input(\"Image path: \")\n if image_file == \"quit\": break\n else:\n image_file = os.path.join(os.path.dirname(args.images_file), image_list.pop())\n\n # Check if file exists\n if not os.path.isfile(image_file):\n # Try to load from data/MyData/data/Images\n tmp = os.path.join(cfg.DATA_DIR, 'MyData', 'data', 'Images', image_file)\n if not os.path.isfile(tmp):\n print(\"Image file %s not found\" % image_file)\n continue\n else:\n image_file = tmp\n\n print(\"Detecting objects in image %s\" % image_file)\n\n # Load image\n im = cv2.imread(image_file)\n\n # Perform object detection (im_detect)\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n timer.toc()\n print(('Detection took {:.3f}s for {:d} object proposals').format(timer.total_time, boxes.shape[0]))\n\n ### Draw detected bounding boxes ###\n def vis_detections(im, class_name, dets, ax, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0] # inds = the list of the indices of\n if len(inds) == 0: # objects whose score are above tresh\n return\n \n # For every object we detected\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n # Draw bounding box\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n \n # Draw class name and score\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n \n\n\n # Plot the detection results\n #\n # Convert image from BGR to RGB\n imrgb = im[:, :, (2, 1, 0)]\n \n # Plot image in figure\n fig, ax = plt.subplots(figsize=(12, 12))\n fig.canvas.set_window_title(image_file) \n ax.imshow(imrgb, aspect='equal')\n\n # Visualize detections for each class\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n ax.set_title(('detection threshold >= %.1f\\ndetection time = %.3f (using %s)') % (CONF_THRESH, timer.total_time, \"CPU\" if args.use_cpu else \"GPU\"), fontsize=14)\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] # Grabs all the (300) rectangles for this class\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis]) # Create a list dets[n] = [x0, y0, x1, y1, score]\n ).astype(np.float32) # Basically, append the score to the rectangle coordinates\n keep = nms(dets, NMS_THRESH) # Non-maximum supression\n dets = dets[keep, :] # Remove non-maxima rectangles from dets\n vis_detections(im, cls, dets, ax, thresh=CONF_THRESH) # Visualize detections\n\n # Draw and show figure\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n if args.output_dir:\n \tplt.savefig(os.path.join(args.output_dir, os.path.splitext(os.path.basename(image_file))[0] + args.output_suffix + \".png\"))\n else:\n \tplt.show()\n\n # DEBUG: For each class, print it's max score\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n print(cls, scores[:, cls_ind].max())\n","sub_path":"object_detection_mynet.py","file_name":"object_detection_mynet.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"323910681","text":"#!/usr/bin/python\r\n\r\nimport networkx\r\nimport obonet\r\nimport csv\r\nimport os.path\r\n\r\ndef return_latest_ontology():\r\n '''\r\n This function imports the latest updated version of the ChEBI ontology, and returns the version number and ontology.\r\n '''\r\n url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/ontology/chebi.obo'\r\n # file = open('files/chebi_180.obo', encoding = 'utf8')\r\n graph = obonet.read_obo(url)\r\n # file.close()\r\n\r\n # Mapping from term ID to name\r\n id_to_name = {id_: data.get('name') for id_, data in graph.nodes(data=True)}\r\n version = graph.graph['data-version']\r\n return version, graph, id_to_name\r\n\r\ndef return_current_version():\r\n '''\r\n This function opens the ChEBI files with id's and names, and returns the version number used to update this file.\r\n '''\r\n file = open('files/ontology_version.txt', 'r')\r\n version = file.read()\r\n return version\r\n\r\ndef return_archived_ontology(version):\r\n '''\r\n This function returns an archived ontology based on the version number.\r\n '''\r\n url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/archive/rel' + version + '/ontology/chebi.obo'\r\n graph = obonet.read_obo(url)\r\n return graph\r\n\r\ndef show_updates(graph_new, graph_old):\r\n '''\r\n This function compares two ontologies and returnes the difference in nodes and edges (chemicals and relations).\r\n '''\r\n difference_nodes = len(graph_new) - len(graph_old)\r\n difference_edges = graph_new.number_of_edges() - graph_old.number_of_edges()\r\n message = 'Newly updated ChEBI ontology contains %d new chemicals and %d new relations' % (difference_nodes, difference_edges)\r\n return message\r\n\r\ndef get_mass(node, graph):\r\n '''\r\n This function retrieves the mass of a molecule from the ontology.\r\n '''\r\n mass = \"-\"\r\n try:\r\n for value in graph.node[node]['property_value']:\r\n if 'mass' in value and 'monoisotopicmass' not in value:\r\n mass = value.split('\\\"')[1]\r\n except:\r\n pass\r\n return mass\r\n\r\ndef get_smiles(node, graph):\r\n '''\r\n This function retrieves Smiles from the ontology.\r\n '''\r\n smile = ''\r\n try:\r\n for value in graph.node[node]['property_value']:\r\n if 'smile' in value:\r\n smile = value.split('\\\"')[1]\r\n except:\r\n pass\r\n return smile\r\n\r\ndef get_relations(nodes, graph, has_role):\r\n '''\r\n This function recieves a list of ids for which parents with 'is a' and 'has role' relationships types need to be returned.\r\n It returns all ChEBI IDs of those parents in a dictionary with the child ChEBI ID as key.\r\n '''\r\n parent_to_key = dict()\r\n\r\n if has_role:\r\n for node in nodes:\r\n for child, parent, key in graph.out_edges(node, keys=True):\r\n if key == 'is_a' or key == 'has_role':\r\n try:\r\n parent_to_key[parent]\r\n except:\r\n parent_to_key[parent] = key\r\n else:\r\n for node in nodes:\r\n for child, parent, key in graph.out_edges(node, keys=True):\r\n if key == 'is_a':\r\n try:\r\n parent_to_key[parent]\r\n except:\r\n parent_to_key[parent] = key\r\n\r\n return parent_to_key\r\n\r\ndef get_superterms(id, graph, has_role):\r\n '''\r\n This function recieves an id of which all superterms of a certain relationships type needs to be returned.\r\n The function searches for 'is a' relationships (and 'has role' if has_role = True) until all possible relationships with other ChEBI IDs are found.\r\n It returns a list of these ChEBI IDs.\r\n '''\r\n list_relations = []\r\n nodes = [id]\r\n end = False\r\n\r\n while end == False:\r\n # get the 'is a' and 'has role' (if has_role == True) relationships for the list of ids\r\n parent_to_key = get_relations(nodes, graph, has_role)\r\n\r\n #if there are no 'is a' (or 'has role') relationships, end the search\r\n if len(parent_to_key) == 0:\r\n end = True\r\n else:\r\n # clear the list for a new search for relationships\r\n nodes = []\r\n\r\n for parent in parent_to_key.keys():\r\n # add the parents to the list for a new search for relationships\r\n nodes.append(parent)\r\n\r\n # add parents to list of relationships\r\n new_id = parent.split(\":\")[1]\r\n list_relations.append(new_id)\r\n\r\n return list_relations\r\n\r\ndef update_version_number(number):\r\n '''\r\n This function updates the ontology version text file with the version number of the ontology by which the files have been updated.\r\n '''\r\n file = open('files/ontology_version.txt', 'w')\r\n file.write(number)\r\n return\r\n\r\ndef read_file(file):\r\n '''\r\n This function reads a file and returns a dictionary of the CHEBI ID's.\r\n If the file does not exists, the file is made and an empty dictionary is returned.\r\n '''\r\n id_to_info = dict()\r\n\r\n if os.path.exists(file):\r\n f = open(file, 'r')\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n line_to_list = line.split('\\t')\r\n id = line_to_list[0]\r\n info = line_to_list[1].strip()\r\n id_to_info[id] = info\r\n else:\r\n f = open(file, 'w') # make file\r\n\r\n return id_to_info\r\n\r\ndef update_smile(file, graph):\r\n '''\r\n This function writes old and new id's with their smile to a .tsv file, and the new smiles will be written to a seperate text file.\r\n The new id's will be written to the file after the old id's, so that the order is similar to the new smiles text file.\r\n '''\r\n\r\n id_to_smile = read_file(file)\r\n new_smiles = dict()\r\n for key in graph.nodes():\r\n id = key.split(\":\")[1]\r\n try:\r\n id_to_smile[id]\r\n except:\r\n new_smiles[id] = get_smiles(key, graph)\r\n\r\n with open(file, 'w', newline='', encoding=\"utf-8\") as tsvfile: # first write old smiles to smiles file, and the new smiles to the smiles file\r\n writer = csv.writer(tsvfile, delimiter = '\\t')\r\n for id in id_to_smile.keys():\r\n smile = id_to_smile[id]\r\n if smile != '': # make sure no empty smiles are in the file\r\n writer.writerow([id, smile])\r\n for id in new_smiles.keys():\r\n smile = new_smiles[id]\r\n if smile != '': # make sure no empty smiles are in the file\r\n writer.writerow([id, smile])\r\n tsvfile.close()\r\n\r\n f = open('files/new_smiles.txt', 'w') # then add the new smiles to a text file (in the same order)\r\n for id in new_smiles.keys():\r\n smile = new_smiles[id]\r\n if smile != '': # no empty smiles in the file\r\n f.write(smile+'\\n')\r\n f.close()\r\n\r\ndef update_file(file, graph, id_to_name):\r\n '''\r\n This function recieves the file path, the corresponding file content in a dictionary, and the latest ontology.\r\n The keys in the latest ontology are CHEBI IDs. Every CHEBI ID is tested in the dictionary to determine if its present in the file.\r\n If it's not present, the CHEBI ID and its information (smile, name, or superterms) is added to the file.\r\n '''\r\n with open(file, 'w', newline='', encoding=\"utf-8\") as tsvfile:\r\n writer = csv.writer(tsvfile, delimiter = '\\t')\r\n for key in graph.nodes():\r\n id = key.split(\":\")[1]\r\n if file == 'files/ChEBI2Names.tsv':\r\n info = id_to_name[key]\r\n elif file == 'files/ChEBI2Superterms.tsv':\r\n info = get_superterms(key, graph, has_role=False)\r\n elif file == 'files/ChEBI2Superterms_roles.tsv':\r\n info = get_superterms(key, graph, has_role=True)\r\n elif file == 'files/ChEBI2Mass.tsv':\r\n info = get_mass(key, graph)\r\n writer.writerow([id, info])\r\n\r\ndef main():\r\n files = ['files/ChEBI2Names.tsv','files/ChEBI2Smiles.tsv', 'files/ChEBI2Superterms.tsv', 'files/ChEBI2Superterms_roles.tsv', 'files/ChEBI2Mass.tsv']\r\n current_version = return_current_version()\r\n latest_version, graph, id_to_name = return_latest_ontology() # graph = ontology\r\n\r\n if current_version == latest_version:\r\n print('files are up-to-date')\r\n else:\r\n print('files need updating')\r\n graph_old = return_archived_ontology(current_version)\r\n updates = show_updates(graph, graph_old)\r\n print(updates)\r\n\r\n for file in files:\r\n if file == 'files/ChEBI2Smiles.tsv':\r\n update_smile(file, graph)\r\n else:\r\n update_file(file, graph, id_to_name)\r\n print('%s updated' % file)\r\n\r\n update_version_number(latest_version)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"update_chebis.py","file_name":"update_chebis.py","file_ext":"py","file_size_in_byte":8837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"193266035","text":"class Kernel(object):\n def apply(self, **kwargs):\n raise NotImplementedError(\"Not implemented\")\n\n\nclass QuadraticKernel(Kernel):\n def apply(self, X, Y, x):\n ret = np.dot(X.T, x)\n ret += 1\n ret = np.power(ret, 2)\n\n d, T = x.shape\n ret = np.multiply(ret, np.repeat(Y[:, np.newaxis], repeats=T, axis=1))\n ret = np.sum(ret, axis=0)\n return ret\n\nclass RbfKernel(Kernel):\n def __init__(self, sigma):\n self.sigma = sigma\n super(RbfKernel, self).__init__()\n\n def apply(self, X, Y, x):\n d, T = x.shape\n _, N = X.shape\n ret = np.zeros((N, T))\n for i in xrange(T):\n point = x[:, i]\n cur = X - np.repeat(point[:, np.newaxis], repeats=N, axis=1)\n cur = np.linalg.norm(cur, axis=0)\n cur = -np.power(cur, 2)\n cur = np.exp(cur/(2*self.sigma**2))\n cur = np.multiply(cur, Y)\n ret[:, i] = cur\n\n ret = np.sum(ret, axis=0)\n return ret\n","sub_path":"data_files/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"313846406","text":"# compute cache power\n\ndef compute_cache_power(num_cycle,\n l1_d_num_read,\n l1_d_num_read_hit,\n l1_d_num_read_miss,\n l1_d_num_write,\n l1_d_num_write_hit,\n l1_d_num_write_miss,\n l1_d_num_update,\n l1_d_num_insert,\n l1_i_num_read,\n l1_i_num_read_hit,\n l1_i_num_read_miss,\n l1_i_num_write,\n l1_i_num_write_hit,\n l1_i_num_write_miss,\n l1_i_num_update,\n l1_i_num_insert,\n l2_num_read,\n l2_num_read_hit,\n l2_num_read_miss,\n l2_num_write,\n l2_num_write_hit,\n l2_num_write_miss,\n l2_num_update,\n l2_num_insert,\n l3_num_read,\n l3_num_read_hit,\n l3_num_read_miss,\n l3_num_write,\n l3_num_write_hit,\n l3_num_write_miss,\n l3_num_update,\n l3_num_insert,\n num_refresh,\n access_mode,\n l1_d_leakage_power,\n l1_d_read_energy,\n l1_d_write_energy,\n l1_i_leakage_power,\n l1_i_read_energy,\n l1_i_write_energy,\n l2_leakage_power,\n l2_read_energy,\n l2_write_energy,\n l3_leakage_power,\n l3_read_energy,\n l3_write_energy,\n l3_tag_energy,\n l3_refresh_energy,\n l3_overhead_power,\n proc_freq,\n num_core,\n num_bank):\n fout1 = open('l1_d_power.dat', 'w') \n fout2 = open('l1_i_power.dat', 'w') \n fout3 = open('l2_power.dat', 'w') \n fout4 = open('l3_power.dat', 'w') \n fout5 = open('l1_d_energy.dat', 'w') \n fout6 = open('l1_i_energy.dat', 'w')\n fout7 = open('l2_energy.dat', 'w') \n fout8 = open('l3_energy.dat', 'w') \n\n time = 0\n if proc_freq == '1GHz': time = num_cycle * 1E-9 # (cycle time = 1ns)\n elif proc_freq == '2GHz': time = num_cycle * 5E-10\n elif proc_freq == '3GHz': time = num_cycle * 3E-10\n elif proc_freq == '4GHz': time = num_cycle * 2.5E-10\n\n # calculate energy\n l1_d_read_E = (l1_d_num_read*l1_d_read_energy) * 1E-6 # mJ\n l1_d_write_E = (l1_d_num_write*l1_d_write_energy + l1_d_num_insert*l1_d_write_energy) * 1E-6 # mJ\n l1_d_dynamic_E = l1_d_read_E + l1_d_write_E # mJ\n l1_d_leakage_E = time * l1_d_leakage_power * num_core # mJ\n\n l1_i_read_E = (l1_i_num_read*l1_i_read_energy) * 1E-6 # mJ\n l1_i_write_E = (l1_i_num_write*l1_i_write_energy + l1_i_num_insert*l1_i_write_energy) * 1E-6 # mJ\n l1_i_dynamic_E = l1_i_read_E + l1_i_write_E # mJ\n l1_i_leakage_E = time * l1_i_leakage_power * num_core # mJ\n\n l2_read_E = (l2_num_read*l2_read_energy) * 1E-6 # mJ\n l2_write_E = (l2_num_write*l2_write_energy + l2_num_insert*l2_write_energy) * 1E-6 # mJ\n l2_dynamic_E = l2_read_E + l2_write_E # mJ\n l2_leakage_E = time * l2_leakage_power * num_core # mJ\n\n # sequential access \n if (access_mode == \"seq\"):\n l3_read_E = l3_num_read_hit*l3_read_energy*1E-6\n l3_read_E += l3_num_write_hit*l3_read_energy*1E-6 # write hit = loading from L3\n l3_read_E += (l3_num_read_miss+l3_num_write_miss)*l3_tag_energy*1E-6 # mJ\n\n l3_write_E = l3_num_update*l3_write_energy*1E-6 # writeback from upper caches\n l3_write_E += l3_num_insert*l3_write_energy*1E-6 # insert from MEM\n\n # parallel access (FIXME)\n elif (access_mode == \"par\"):\n l3_read_E = (l3_num_read*l3_read_energy) * 1E-6 # mJ\n l3_write_E = (l3_num_write*l3_write_energy + l3_num_update*l3_write_energy) * 1E-6 # mJ\n\n l3_dynamic_E = l3_read_E + l3_write_E # mJ\n l3_leakage_E = time * l3_leakage_power * num_bank # mJ\n l3_refresh_E = num_refresh * l3_refresh_energy * 1E-6 # mJ\n l3_overhead_E = time * l3_overhead_power * 1E-6 # mJ\n\n total_l1_d_E = l1_d_dynamic_E + l1_d_leakage_E\n total_l1_i_E = l1_i_dynamic_E + l1_i_leakage_E\n total_l2_E = l2_dynamic_E + l2_leakage_E\n total_l3_E = l3_dynamic_E + l3_leakage_E + l3_refresh_E + l3_overhead_E\n\n # calculate power\n l1_d_dynamic_P = l1_d_dynamic_E / time # mW\n l1_d_leakage_P = l1_d_leakage_E / time # mW\n\n l1_i_dynamic_P = l1_i_dynamic_E / time # mW\n l1_i_leakage_P = l1_i_leakage_E / time # mW\n\n l2_dynamic_P = l2_dynamic_E / time # mW\n l2_leakage_P = l2_leakage_E / time # mW\n\n l3_dynamic_P = l3_dynamic_E / time # mW\n l3_leakage_P = l3_leakage_E / time # mW\n l3_refresh_P = l3_refresh_E / time # mW\n l3_overhead_P = l3_overhead_E / time # mW\n\n total_l1_d_P = l1_d_dynamic_P + l1_d_leakage_P\n total_l1_i_P = l1_i_dynamic_P + l1_i_leakage_P\n total_l2_P = l2_dynamic_P + l2_leakage_P\n total_l3_P = l3_dynamic_P + l3_leakage_P + l3_refresh_P + l3_overhead_P\n \n print >>fout1, \"%f %f %f\" %(total_l1_d_P, l1_d_dynamic_P, l1_d_leakage_P)\n print >>fout2, \"%f %f %f\" %(total_l1_i_P, l1_i_dynamic_P, l1_i_leakage_P)\n print >>fout3, \"%f %f %f\" %(total_l2_P, l2_dynamic_P, l2_leakage_P)\n print >>fout4, \"%f %f %f %f %f\" %(total_l3_P, l3_dynamic_P, l3_leakage_P, l3_refresh_P, l3_overhead_P)\n print >>fout5, \"%f %f %f\" %(total_l1_d_E, l1_d_dynamic_E, l1_d_leakage_E)\n print >>fout6, \"%f %f %f\" %(total_l1_i_E, l1_i_dynamic_E, l1_i_leakage_E)\n print >>fout7, \"%f %f %f\" %(total_l2_E, l2_dynamic_E, l2_leakage_E)\n print >>fout8, \"%f %f %f %f %f\" %(total_l3_E, l3_dynamic_E, l3_leakage_E, l3_refresh_E, l3_overhead_E)\n\n fout1.close()\n fout2.close()\n fout3.close()\n fout4.close()\n fout5.close()\n fout6.close()\n fout7.close()\n fout8.close()\n","sub_path":"my_util/cache_power_module/compute_cache_power.py","file_name":"compute_cache_power.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"67849549","text":"# -*- coding:utf-8 -*-\n# @Desc : \n# @Author : Administrator\n# @Date : 2019-07-02 15:09\n\n# 字典dict:\n# 1. 字典是一种可变的容器,可以存储任意类型的数据\n# 2. 字典中的每个数据都是用'键'(key) 进行索引,而不像序列可以用下标来进行索引\n# 3. 字典的数据没有先后顺序关系,字典的存储是无序的\n# 4. 字典中的数据以键(key)-值(value)对进行映射存储\n# 5. 字典的键不能重复,且只能用不可变类型作为字典的键。\n\n# 字典的字面值表示方式: 用{}括起来,以冒号(:) 分隔键-值对,各键值对用分号分隔开\n\n# 创建空字典: d = {}\n# 创建非空的字典:\n# d = {'name': 'tarena', 'age': 15}\n# d = {1:'壹', 2:'贰'}\n\n# 字典的构造函数: dict()\n# dict() 创建一个空字典,等同于 {}\n# dict(iterable) 用可迭代对象初始化一��字典\n# dict(**kwargs) 关键字传参形式生成一个字典\n# 创建示例:\n# d = dict()\n# d = dict([('name', 'tarena'), ('age',15)])\n# d = dict(name='tarena', age=15)\n\n# 字典的键索引: 用[] 运算符可以获取字典内'键'所对应的'值'\n# 语法: 字典[键] ---> 获取数据元素\nd = dict(name='tarena', age=15)\nprint(d['age']) # 15\n\n# 添加/修改字典元素: 字典[键] = 表达式\n# 注: 当键存在就是修改键对应的值,当键不存在就是添加一个键值对\n# 示例:\nd = {}\nd['name'] = 'tarena' # 创建一个新的键值对\nd['age'] = 15 # 创建键值对\nd['age'] = 16 # 修改键值对\n\n# del 语句删除字典的元素\n# 语法: del 字典[键]\n# 示例:\nd = {'name': 'china', 'pos': 'asia'}\ndel d['pos']\nprint(d)\ndel d['name']\nprint(d) # {}\n\n\n# 字典的 in / not in 运算符:\n# 可以用 in 运算符来判断一个'键'是否存在于字典中,如果存在则返回True, 否则返回False\n# not in 与 in 返回值相反\n# 示例:\n# d = {'a': 1, 'b': 2}\n# 'a' in d # True\n# 1 in d # False\n# 100 not in d # True\n# 2 not in d # True\n\n# 字典的迭代访问: 字典是可迭代对象,字典只能对键进行迭代访问\nd = {'name': 'tarena', (2002, 1, 1): '生日'}\nfor x in d:\n print(x)\n\n## 字典dict的常用方法:\n# D.clear()\t 清空字典\n# D.pop(key)\t 移除键,同时返回此键所对应的值\n# D.copy()\t 返回字典D的副本,只复制一层(浅拷贝)\n# D.update(D2)\t 将字典 D2 合并到D中,如果键相同,则此键的值取D2的值作为新值\n# D.get(key, default)\t返回键key所对应的值,如果没有此键,则返回default\n# D.keys()\t 返回可迭代的 dict_keys 集合对象\n# D.values()\t 返回可迭代的 dict_values 值对象\n# D.items()\t 返回可迭代的 dict_items 对象\n\ndi = {\n \"name\":\"zhangsan\",\n \"age\":20,\n}\n\nfor k in di.keys():\n print(k)\n\nfor v in di.values():\n print(v)\n\nfor k,v in di.items():\n print(k,v)\n\n","sub_path":"[03]Python-基础知识部分/07Python-字典dict操作.py","file_name":"07Python-字典dict操作.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"29414385","text":"import csv\nfrom collections import namedtuple\nimport re\n\nwith open('data_files/stocks.csv') as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n for row in f_csv:\n # process row\n print(row)\nprint()\n\n# since the tuples in row are indexed with number, namedtuple can be useful to make it clearer\nwith open('data_files/stocks.csv') as f:\n f_csv = csv.reader(f)\n headings = next(f_csv)\n Row = namedtuple('Row', headings)\n for r in f_csv:\n row = Row(*r)\n # process row\n print(row)\nprint()\n\n# can also read rows as a series of dictionaries\nwith open('data_files/stocks.csv') as f:\n f_csv = csv.DictReader(f)\n for row in f_csv:\n # process row\n print(row)\nprint()\n\n# DictWriter also exists that writes headers, one row at a time, or a sequence of rows\n\n# delimiter can be set in order to read tab delimited data (or anything else)\nwith open('data_files/stocks.tsv') as f:\n f_tsv = csv.reader(f, delimiter='\\t')\n for row in f_tsv:\n print(row)\nprint()\n\n# if headers include invalid python identifier characters (like spaces or hyphens) they may have to be scrubbed by\n# substituting underscores\nwith open('data_files/bad_stocks.csv') as f:\n f_csv = csv.reader(f)\n headings = [re.sub('[^a-zA-Z_]', '_', h) for h in next(f_csv)]\n Row = namedtuple('Row', headings)\n for r in f_csv:\n row = Row(*r)\n # process row\n print(row)\nprint()\n\n# note: csv will parse everything as a string, if other types are desired they must be explicitly converted\ncol_types = [str, float, str, str, float, int]\nwith open('data_files/stocks.csv') as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n for row in f_csv:\n row = tuple(convert(value) for convert, value in zip(col_types, row))\n print(row)\nprint()\n\n# can also convert selected fields of a dictionary\nfield_types = [('Price', float),\n ('Change', float),\n ('Volume', int)]\nwith open('data_files/stocks.csv') as f:\n for row in csv.DictReader(f):\n row.update((key, conversion(row[key])) for key, conversion in field_types)\n print(row)\n","sub_path":"ch6_data_encoding/csv_data.py","file_name":"csv_data.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"341099696","text":"import os\nimport cv2\nimport numpy as np\nimport pickle\nfrom PIL import Image\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nimg_dir = os.path.join(BASE_DIR, \"img\")\n\nface_cascade = cv2.CascadeClassifier('weights/haarcascade_frontalface_default.xml')\nrecognizer = cv2.face.createLBPHFaceRecognizer()\n\ncurrent_id = 0\nlabel_id = {}\ny_labels = []\nx_train = []\n\n\nfor root, dirs, files, in os.walk(img_dir):\n for f in files:\n # Load images\n if f.endswith(\"png\") or f.endswith(\"jpg\") or f.endswith(\"jpeg\"):\n path = os.path.join(root, f)\n label = os.path.basename(os.path.dirname(path)).replace(\" \", \"-\").lower()\n # Check for name in label, create if not found.\n if not label in label_id:\n label_id[label] = current_id\n current_id += 1\n id_ = label_id[label]\n # Open, resize, antialias, and convert image to np.array\n pil_img = Image.open(path).convert(\"L\")\n size = (250, 250)\n final_image = pil_img.resize(size, Image.ANTIALIAS)\n img_array = np.array(pil_img, \"uint8\")\n # Detect faces in images\n faces = face_cascade.detectMultiScale(img_array, 1.3, 5)\n # Find the region of interest\n for (x,y,w,h) in faces:\n roi = img_array[y:y+h, x:x+w]\n # Append the result with the id\n x_train.append(roi)\n y_labels.append(id_)\n\n# Saving labels\nwith open(\"labels.pkl\", 'wb') as f:\n pickle.dump(label_id, f)\n\n# Train and save recognizer to be used in face_recognition.py\nrecognizer.train(x_train, np.array(y_labels))\nrecognizer.save(\"trainer.yml\")","sub_path":"face_train.py","file_name":"face_train.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"469933024","text":"\"\"\"\nConcatenate the exofop search results stored in the data/exofop folder, as created\nby ExoFOP searches using the TIC IDs resulting from the get_tic_ids script. See\nthe file docstring in get_tic_ids.py for more information on how to exofop search\nresults are queried.\n\"\"\"\nimport glob\nimport argparse\n\nimport pandas as pd\n\n# Command line arguments. For most uses, the defaults will be fine.\nparser = argparse.ArgumentParser(description='Merge multiple exofop search results into a single .csv file.')\nparser.add_argument('fname_base', type=str, default=None, help='File save path for output. Numbers will be appended to \\\nkeep the total number of lines less than 1000, which is the max length of a file that the ExoFOP page can read in at one time.')\nparser.add_argument('--exofop_folder', type=str, default='data/exofop/', help='Folder with ExoFOP search results.')\nparser.add_argument('--save_fname', type=str, default=None, help='Name of file to save output to. By default it matches the str in fname_base')\n\ndef get_files_like(fname_base, exofop_folder):\n '''\n Like get_newest_csv in X_ranking.py, but match the format of fname_base.\n '''\n fname_like = exofop_folder + fname_base + '*'\n list_of_files_with_base = glob.glob(fname_like)\n return list_of_files_with_base\n\ndef concat_exofop_search_results(fname_base, exofop_folder):\n \"\"\"\n Concatenate the exofop search results stored in the data/exofop folder, as created\n by ExoFOP searches using the TIC IDs resulting from the get_tic_ids script. See\n the file docstring in get_tic_ids.py for more information on how to exofop search\n results are queried.\n \"\"\"\n\n files = get_files_like(fname_base, exofop_folder)\n print(f\"Concatenating {len(files)} files into a single .csv\")\n return pd.concat([pd.read_csv(f, comment='#') for f in files], sort=False)\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n fname_base = args.fname_base\n exofop_folder = args.exofop_folder\n\n save_fname = args.save_fname\n if save_fname is None:\n save_fname = exofop_folder + args.fname_base + '.csv'\n\n fname_base += '_' # This prevents us from concatenating a newly saved output .csv file to itself.\n\n exofop_df = concat_exofop_search_results(fname_base, exofop_folder)\n exofop_df.to_csv(save_fname, index=False)\n\n print(f\"The concatenated ExoFOP .csv file was saved to {save_fname}.\")\n","sub_path":"concat_exofop_search_results.py","file_name":"concat_exofop_search_results.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"317537334","text":"import argparse\nimport collections\nimport glob\nimport multiprocessing\nimport os\nimport traceback\n\nimport numpy as np\nfrom pydub import AudioSegment\nfrom tqdm import tqdm\nfrom webrtcvad import Vad\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('src_path', metavar='DIR', help='root directory containing mp3 files to index')\n parser.add_argument('dst_path', metavar='DIR', help='output directory')\n parser.add_argument('--src-ext', default='mp3', type=str, metavar='EXT', help='extension to look for')\n parser.add_argument('--dst-ext', default='flac', type=str, metavar='EXT', help='extension to convert to')\n return parser\n\n\nPreprocessResult = collections.namedtuple('PreprocessResult', [\n 'src_duration',\n 'dst_durations'\n])\n\n\n\"\"\" source: https://github.com/wiseman/py-webrtcvad/blob/master/example.py \"\"\"\ndef frame_generator(frame_duration_ms, audio, sample_rate):\n \"\"\"Generates audio frames from PCM audio data.\n Takes the desired frame duration in milliseconds, the PCM data, and\n the sample rate.\n Yields Frames of the requested duration.\n \"\"\"\n n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n offset = 0\n while offset + n < len(audio):\n yield audio[offset:offset + n]\n offset += n\n\n\n\"\"\" source: https://github.com/wiseman/py-webrtcvad/blob/master/example.py \"\"\"\ndef vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, aggressiveness, audio):\n \"\"\"Filters out non-voiced audio frames.\n Given a webrtcvad.Vad and a source of audio frames, yields only\n the voiced audio.\n Uses a padded, sliding window algorithm over the audio frames.\n When more than 90% of the frames in the window are voiced (as\n reported by the VAD), the collector triggers and begins yielding\n audio frames. Then the collector waits until 90% of the frames in\n the window are unvoiced to detrigger.\n The window is padded at the front and back to provide a small\n amount of silence or the beginnings/endings of speech around the\n voiced frames.\n Arguments:\n sample_rate - The audio sample rate, in Hz.\n frame_duration_ms - The frame duration in milliseconds.\n padding_duration_ms - The amount to pad the window, in milliseconds.\n vad - An instance of webrtcvad.Vad.\n frames - a source of audio frames (sequence or generator).\n Returns: A generator that yields PCM audio data.\n \"\"\"\n vad = Vad(aggressiveness)\n num_padding_frames = int(padding_duration_ms / frame_duration_ms)\n # We use a deque for our sliding window/ring buffer.\n ring_buffer = collections.deque(maxlen=num_padding_frames)\n # We have two states: TRIGGERED and NOTTRIGGERED. We start in the\n # NOTTRIGGERED state.\n triggered = False\n\n voiced_frames = []\n for frame in frame_generator(30, audio, sample_rate):\n is_speech = vad.is_speech(frame, sample_rate)\n\n if not triggered:\n ring_buffer.append((frame, is_speech))\n num_voiced = len([f for f, speech in ring_buffer if speech])\n # If we're NOTTRIGGERED and more than 90% of the frames in\n # the ring buffer are voiced frames, then enter the\n # TRIGGERED state.\n if num_voiced > 0.9 * ring_buffer.maxlen:\n triggered = True\n # We want to yield all the audio we see from now until\n # we are NOTTRIGGERED, but we have to start with the\n # audio that's already in the ring buffer.\n for f, s in ring_buffer:\n voiced_frames.append(f)\n ring_buffer.clear()\n else:\n # We're in the TRIGGERED state, so collect the audio data\n # and add it to the ring buffer.\n voiced_frames.append(frame)\n ring_buffer.append((frame, is_speech))\n num_unvoiced = len([f for f, speech in ring_buffer if not speech])\n # If more than 90% of the frames in the ring buffer are\n # unvoiced, then enter NOTTRIGGERED and yield whatever\n # audio we've collected.\n if num_unvoiced > 0.9 * ring_buffer.maxlen:\n triggered = False\n yield b''.join([f for f in voiced_frames])\n ring_buffer.clear()\n voiced_frames = []\n # If we have any leftover voiced audio when we run out of input,\n # yield it.\n if voiced_frames:\n yield b''.join([f for f in voiced_frames])\n\n\ndef split_audio(src_path, dst_path):\n assert os.path.isfile(src_path)\n file_ext = os.path.splitext(src_path)[-1]\n audio_segment = AudioSegment.from_file(src_path, format=file_ext.replace('.', ''))\n if audio_segment.frame_rate != 16000:\n audio_segment = audio_segment.set_frame_rate(16000)\n if audio_segment.sample_width != 2:\n audio_segment = audio_segment.set_sample_width(2)\n if audio_segment.channels != 1:\n audio_segment = audio_segment.set_channels(1)\n chunk_durations = []\n for i, raw_chunk in enumerate(vad_collector(sample_rate=audio_segment.frame_rate, frame_duration_ms=30,\n padding_duration_ms=300, aggressiveness=3, audio=audio_segment.raw_data)):\n # assure even chunk length (must be a multiple of (sample_width*channels), which is 2)\n if len(raw_chunk) & 1 != 0:\n raw_chunk = raw_chunk[:-1]\n chunk = AudioSegment(raw_chunk, frame_rate=audio_segment.frame_rate, sample_width=2, channels=1)\n chunk.export(f'{os.path.splitext(dst_path)[0]}.{i}.flac', format='flac')\n chunk_durations.append(float(chunk.duration_seconds))\n return PreprocessResult(src_duration=float(audio_segment.duration_seconds), dst_durations=chunk_durations)\n\n\ndef convert_audio(src_path, dst_dir):\n src_dirname = os.path.basename(os.path.dirname(src_path))\n src_basename_no_ext = os.path.basename(os.path.splitext(src_path)[0])\n # keep sub-folder structure to prevent filename conflicts:\n dst_path = os.path.join(dst_dir, src_dirname, f'{src_basename_no_ext}.flac')\n os.makedirs(os.path.join(dst_dir, src_dirname), exist_ok=True)\n if not os.path.exists(dst_path):\n try:\n return split_audio(src_path, dst_path)\n except Exception as e:\n traceback.print_exc()\n print(f\"Failed to convert {src_path} to {dst_path}: {e}\")\n return None\n\n\ndef main(args):\n dir_path = os.path.realpath(args.src_path)\n dst_dir = os.path.realpath(args.dst_path)\n search_path = os.path.join(dir_path, '**/*.' + args.src_ext)\n src_paths = glob.glob(search_path, recursive=True)\n\n os.makedirs(dst_dir, exist_ok=True)\n\n progress_bar = tqdm(total=len(src_paths))\n\n src_durations, dst_durations = [], []\n\n def on_result(preprocess_result: PreprocessResult):\n if preprocess_result is not None:\n src_durations.append(preprocess_result.src_duration)\n dst_durations.extend(preprocess_result.dst_durations)\n progress_bar.update()\n\n with multiprocessing.Pool(processes=os.cpu_count()) as pool:\n results = []\n for src_path in src_paths:\n src_path = os.path.realpath(src_path)\n results.append(pool.apply_async(convert_audio, args=(src_path, dst_dir), callback=on_result))\n for result in results:\n result.wait()\n\n print(\"Raw audio stats:\")\n print(\"Mean length: \", np.mean(src_durations), \"(Std: \", np.std(src_durations), \")\")\n print(\"Median length: \", np.median(src_durations))\n print(\"Min length: \", np.min(src_durations))\n print(\"Max length: \", np.max(src_durations))\n\n print(\"Processed audio stats:\")\n print(\"Mean length: \", np.mean(dst_durations), \"Std:\", np.std(dst_durations))\n print(\"Median length: \", np.median(dst_durations))\n print(\"Min length: \", np.min(dst_durations))\n print(\"Max length: \", np.max(dst_durations))\n\n\nif __name__ == '__main__':\n parser = get_parser()\n args = parser.parse_args()\n main(args)\n","sub_path":"examples/wav2vec/preprocess_audios.py","file_name":"preprocess_audios.py","file_ext":"py","file_size_in_byte":7996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"280809388","text":"import datetime\nimport logging\nfrom typing import Any, Dict, List, Tuple\n\nimport flask_restless\nimport gunicorn.app.base\nfrom dbcat import Catalog\nfrom dbcat.catalog import CatColumn\nfrom dbcat.catalog.db import DbScanner\nfrom dbcat.catalog.models import (\n CatSchema,\n CatSource,\n CatTable,\n ColumnLineage,\n Job,\n JobExecution,\n JobExecutionStatus,\n)\nfrom flask import Flask\nfrom flask_restful import Api, Resource, reqparse\n\nfrom data_lineage.parser import extract_lineage, parse, visit_dml_query\n\n\nclass Kedro(Resource):\n def __init__(self, catalog: Catalog):\n self._catalog = catalog\n self._parser = reqparse.RequestParser()\n self._parser.add_argument(\n \"job_ids\", action=\"append\", help=\"List of job ids for a sub graph\"\n )\n\n def get(self):\n nodes = []\n edges = []\n\n args = self._parser.parse_args()\n column_edges = self._catalog.get_column_lineages(args[\"job_ids\"])\n for edge in column_edges:\n nodes.append(self._column_info(edge.source))\n nodes.append(self._column_info(edge.target))\n nodes.append(self._job_info(edge.job_execution.job))\n edges.append(\n {\n \"source\": \"column:{}\".format(edge.source_id),\n \"target\": \"task:{}\".format(edge.job_execution.job_id),\n }\n )\n edges.append(\n {\n \"source\": \"task:{}\".format(edge.job_execution.job_id),\n \"target\": \"column:{}\".format(edge.target_id),\n }\n )\n\n return {\"nodes\": nodes, \"edges\": edges}\n\n @staticmethod\n def _column_info(node: CatColumn):\n return {\n \"id\": \"column:{}\".format(node.id),\n \"name\": \".\".join(node.fqdn),\n \"type\": \"data\",\n }\n\n @staticmethod\n def _job_info(node: Job):\n return {\"id\": \"task:{}\".format(node.id), \"name\": node.name, \"type\": \"task\"}\n\n\nclass Scanner(Resource):\n def __init__(self, catalog: Catalog):\n self._catalog = catalog\n self._parser = reqparse.RequestParser()\n self._parser.add_argument(\"id\", required=True, help=\"ID of the resource\")\n\n def post(self):\n args = self._parser.parse_args()\n logging.debug(\"Args for scanning: {}\".format(args))\n source = self._catalog.get_source_by_id(int(args[\"id\"]))\n DbScanner(self._catalog, source).scan()\n return \"Scanned {}\".format(source.fqdn), 200\n\n\nclass Parser(Resource):\n def __init__(self, catalog: Catalog):\n self._catalog = catalog\n self._parser = reqparse.RequestParser()\n self._parser.add_argument(\"query\", required=True, help=\"Query to parse\")\n self._parser.add_argument(\"name\", help=\"Name of the ETL job\")\n\n def post(self):\n args = self._parser.parse_args()\n logging.debug(\"Parse query: {}\".format(args[\"query\"]))\n parsed = parse(args[\"query\"], args[\"name\"])\n\n chosen_visitor = visit_dml_query(self._catalog, parsed)\n\n if chosen_visitor is not None:\n job_execution = extract_lineage(self._catalog, chosen_visitor, parsed)\n\n return (\n {\n \"data\": {\n \"id\": job_execution.id,\n \"type\": \"job_executions\",\n \"attributes\": {\n \"job_id\": job_execution.job_id,\n \"started_at\": job_execution.started_at.strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \"ended_at\": job_execution.ended_at.strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \"status\": job_execution.status.name,\n },\n }\n },\n 200,\n )\n\n return {\"data\": {\"error\": \"Query is not a DML Query\"}}, 400\n\n\nclass Server(gunicorn.app.base.BaseApplication):\n def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super().__init__()\n\n def load_config(self):\n config = {\n key: value\n for key, value in self.options.items()\n if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return self.application\n\n\ndef job_execution_serializer(instance: JobExecution, only: List[str]):\n return {\n \"id\": instance.id,\n \"type\": \"job_executions\",\n \"attributes\": {\n \"job_id\": instance.job_id,\n \"started_at\": instance.started_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"ended_at\": instance.ended_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"status\": instance.status.name,\n },\n }\n\n\ndef job_execution_deserializer(data: Dict[\"str\", Any]):\n attributes = data[\"data\"][\"attributes\"]\n logging.debug(attributes)\n job_execution = JobExecution()\n job_execution.job_id = int(attributes[\"job_id\"])\n job_execution.started_at = datetime.datetime.strptime(\n attributes[\"started_at\"], \"%Y-%m-%d %H:%M:%S\"\n )\n job_execution.ended_at = datetime.datetime.strptime(\n attributes[\"ended_at\"], \"%Y-%m-%d %H:%M:%S\"\n )\n job_execution.status = (\n JobExecutionStatus.SUCCESS\n if attributes[\"status\"] == \"SUCCESS\"\n else JobExecutionStatus.SUCCESS\n )\n\n logging.debug(job_execution)\n logging.debug(job_execution.status == JobExecutionStatus.SUCCESS)\n return job_execution\n\n\ndef create_server(\n catalog_options: Dict[str, str], options: Dict[str, str], is_production=True\n) -> Tuple[Any, Catalog]:\n logging.debug(catalog_options)\n catalog = Catalog(**catalog_options)\n\n app = Flask(__name__)\n\n # Create CRUD APIs\n methods = [\"DELETE\", \"GET\", \"PATCH\", \"POST\"]\n url_prefix = \"/api/v1/catalog\"\n api_manager = flask_restless.APIManager(app, catalog.scoped_session)\n api_manager.create_api(\n CatSource,\n methods=methods,\n url_prefix=url_prefix,\n additional_attributes=[\"fqdn\"],\n )\n api_manager.create_api(\n CatSchema,\n methods=methods,\n url_prefix=url_prefix,\n additional_attributes=[\"fqdn\"],\n )\n api_manager.create_api(\n CatTable,\n methods=methods,\n url_prefix=url_prefix,\n additional_attributes=[\"fqdn\"],\n )\n api_manager.create_api(\n CatColumn,\n methods=methods,\n url_prefix=url_prefix,\n additional_attributes=[\"fqdn\"],\n )\n api_manager.create_api(Job, methods=methods, url_prefix=url_prefix)\n api_manager.create_api(\n JobExecution,\n methods=methods,\n url_prefix=url_prefix,\n serializer=job_execution_serializer,\n deserializer=job_execution_deserializer,\n )\n api_manager.create_api(\n ColumnLineage,\n methods=methods,\n url_prefix=url_prefix,\n collection_name=\"column_lineage\",\n )\n\n restful_manager = Api(app)\n restful_manager.add_resource(\n Kedro, \"/api/main\", resource_class_kwargs={\"catalog\": catalog}\n )\n restful_manager.add_resource(\n Scanner,\n \"{}/scanner\".format(url_prefix),\n resource_class_kwargs={\"catalog\": catalog},\n )\n restful_manager.add_resource(\n Parser, \"/api/v1/parser\", resource_class_kwargs={\"catalog\": catalog}\n )\n\n for rule in app.url_map.iter_rules():\n rule_methods = \",\".join(rule.methods)\n logging.debug(\"{:50s} {:20s} {}\".format(rule.endpoint, rule_methods, rule))\n\n if is_production:\n return Server(app=app, options=options), catalog\n else:\n return app, catalog\n","sub_path":"data_lineage/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"565188953","text":"import elasticsearch_dsl\nimport matplotlib.pyplot as plt\n\nimport elasticsearch_client as es_client\nfrom common_task_functions import get_elastic_object, get_active_users_filter, index, save_path\n\n\ndef has_city(vk_elastic_db: es_client.VkDataDatabaseClient, is_need_print=False, is_need_plot=True,\n is_need_active=False, days_delta=20):\n aggs_name = \"has_city\"\n title = \"has city\"\n if is_need_active:\n title += \" active\"\n es = get_elastic_object(vk_elastic_db)\n s = elasticsearch_dsl.Search(using=es, index=index)\n if is_need_active:\n s = get_active_users_filter(es, index, s, days_delta=days_delta)\n s = s.filter()\n size = 10000\n missing_str = \"missing\"\n a = elasticsearch_dsl.A('terms', field=\"city.title.keyword\", missing=missing_str, size=size)\n s.aggs.bucket(aggs_name, a)\n response = s.execute()\n\n data = {\n \"has city\": 0,\n \"missing city\": 0\n }\n for hit in response.aggregations[aggs_name].buckets:\n if hit.key == missing_str:\n data[\"missing city\"] += hit.doc_count\n else:\n data[\"has city\"] += hit.doc_count\n x_axis = [key for key in data]\n y_axis = [data[key] for key in data]\n\n if is_need_print:\n print(title)\n for i in range(len(x_axis)):\n print(f\"{i + 1}\\t{x_axis[i]} {y_axis[i]}\")\n\n if is_need_plot:\n sizes = [elem / sum(y_axis) for elem in y_axis]\n fig, ax = plt.subplots(1, 1)\n ax.set_title(title)\n ax.pie(sizes, labels=x_axis, autopct='%1.1f%%', startangle=90)\n # plt.show()\n fig.savefig(f\"{save_path}/{title.replace(' ', '_')}.png\", dpi=300, format='png', bbox_inches='tight')\n plt.close(fig)\n","sub_path":"libs/task_has_city.py","file_name":"task_has_city.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"415414791","text":"import cv2\nimport pytesseract\nfrom pytesseract import Output\nimport os\nimport json\nimport numpy as np\nimport csv\nimport psycopg2\nimport matplotlib.pyplot as plt\n\nfinalJson = []\n\ndef convertToPng():\n path = os.getcwd()\n if('images' not in path):\n os.chdir(path)\n for file in os.listdir():\n if file.endswith(\".jpg\"):\n file_path = f\"{path}/{file}\"\n file_name = os.path.basename(file_path)\n file_name = file_name.replace(\".jpg\", \"\")\n png =cv2.imread(file_path, 1)\n cv2.imwrite(file_name+\".png\", png)\n\n# get grayscale image\ndef get_grayscale(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# noise removal\ndef remove_noise(image):\n return cv2.medianBlur(image,5)\n \n#thresholding\ndef thresholding(image):\n return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n\n\n#opening - erosion followed by dilation\ndef opening_image(image):\n kernel = np.ones((5,5),np.uint8)\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n#canny edge detection\ndef canny_image(image):\n return cv2.Canny(image, 100, 200)\n\ndef ocr(file_path):\n image = cv2.imread(file_path)\n #Optimization techniques | Did not work in this case\n # gray = get_grayscale(image)\n # thresh = thresholding(gray)\n # opening = opening_image(thresh)\n # canny = canny_image(gray)\n custom_config = r'--oem 3 --psm 6'\n text = pytesseract.image_to_string(image,config=custom_config)\n # print(text)\n length = len(text.splitlines())\n data = text.splitlines()\n \n #Remove Burial Permit\n if data[0].find('Burial') >= 0:\n data.pop(1)\n #Remove first column\n if data[0].find('DAINTY') == -1:\n data.pop(0)\n #Removing\n if data[0].find('_') >= 0:\n data.pop(0)\n #Remove empty list item\n removeEmptyItem(data)\n #Get name\n name = getName(data[0])\n\n #Iterating and identifying\n index = 0\n for i in data:\n i = i.lower()\n if i.find('oath of') >= 0 or i.find('oate of') >= 0 or i.find('date of') >= 0 or i.find('dati of') >= 0:\n data.pop(index)\n if checkForRelation(i):\n relation = checkForRelation(i)\n if getAddress(i):\n address = getAddress(i)\n if getPincode(i):\n pincode = getPincode(i)\n index = index + 1\n if(getState(i)):\n state = getState(i)\n\n #Remove empty rows\n data = removeEmpty(data)\n\n #form json\n # print(name+\" : \"+relation+\" : \"+address+\" : \"+pincode)\n jsonItem = {\"NAME\":name,\"RELATION\":relation,\"ADDRESS\":address,\"STATE\":state,\"PINCODE\":pincode}\n finalJson.append(jsonItem)\n\ndef removeEmptyItem(data):\n index = 0\n for item in data:\n if len(item) == 0:\n data.pop(index)\n index = index + 1\n\ndef getName(col):\n if col.find('NABA') >= 0:\n return 'DAILEY, JAE NABA'\n elif col.find('SARA') >= 0:\n return 'DAILEY, SARA'\n elif col.find('KATHELEEN') >= 0:\n return 'DALAS ,KATHELEEN'\n elif col.find('KATHERINE') >= 0:\n return 'DAINTY, KATHERINE'\n elif col.find('N/A') >= 0:\n return col.replace(\"N/A\", \"\")\n elif col.find('NA') >= 0:\n return col.replace(\"NA\", \"\")\n else:\n return col\n\ndef checkForRelation(rel):\n if rel.find('sister') >= 0:\n return 'Sister'\n elif rel.find('son') >= 0:\n return 'Son'\n elif rel.find('daughter') >= 0:\n return 'Daughter'\n elif rel.find('mom') >= 0:\n return 'Mom'\n elif rel.find('uncle') >= 0:\n return 'Uncle'\n elif rel.find('father') >= 0:\n return 'Father'\n elif rel.find('wife') >= 0:\n return 'Wife'\n elif rel.find('granddaughter') >= 0:\n return 'GrandDaughter'\n elif rel.find('choni') >= 0:\n return 'Parents'\n\ndef getAddress(col):\n col = col.upper()\n if col.find('IL_60637') >= 0:\n return '6253 S. MICHIGAN AVE. IL 60637'\n elif col.find('CHICAGO') >= 0 or col.find('CHGO') >= 0 or col.find('SPRINGS') >= 0 or col.find('HARVEY') >= 0:\n col = col.replace('\\u00b0','')\n return col\n elif col.find('PRAIRIE') >= 0:\n return '371 Fourth St,Prairie du Sac, Wis 53578'\n elif col.find('N.Y') >= 0:\n return col\n elif col.find('VALPARISO') >= 0:\n return col\n \ndef getState(col):\n col = col.upper()\n if col.find('IL_60637') >= 0:\n return 'Illinois'\n elif col.find('CHICAGO') >= 0 or col.find('CHGO') >= 0 or col.find('SPRINGS') >= 0 or col.find('HARVEY') >= 0:\n col = col.replace('\\u00b0','')\n return 'Illinois'\n elif col.find('PRAIRIE') >= 0:\n return 'Wisconsin'\n elif col.find('N.Y') >= 0:\n return 'New York'\n elif col.find('VALPARISO') >= 0:\n return 'Indiana'\n\ndef getPincode(col):\n col = col.upper()\n if col.find('IL_60637') >= 0:\n return '60637'\n elif col.find('CHICAGO') >= 0 or col.find('CHGO') >= 0 or col.find('SPRINGS') >= 0 or col.find('HARVEY') >= 0:\n return processPin(col)\n elif col.find('PRAIRIE') >= 0:\n return '53578'\n elif col.find('N.Y') >= 0:\n return processPin(col)\n elif col.find('VALPARISO') >= 0:\n return processPin(col)\n\ndef processPin(col):\n splitText = col.split(' ')\n for item in splitText:\n if item.isnumeric() and len(item) == 5 and int(item) > int(11264):\n return item\n \n\ndef removeEmpty(data):\n index = 0\n for i in data:\n if len(data[index]) == 0:\n data.pop(index)\n index = index + 1\n return data\n\ndef convertToCSV(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n csvData = data\n # Open a file for writing\n data_file = open('final_ocr_data.csv', 'w')\n \n # create the csv writer object\n csv_writer = csv.writer(data_file)\n \n # Counter variable used for writing\n # headers to the CSV file\n count = 0\n for item in csvData:\n if count == 0:\n # Writing headers of CSV file\n header = item.keys()\n csv_writer.writerow(header)\n count += 1\n # Writing data of CSV file\n csv_writer.writerow(item.values())\n data_file.close()\n\ndef getZoneDate(file_path):\n image = cv2.imread(file_path)\n x = 700\n y = 120\n w = 120\n h = 50\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = pytesseract.image_to_data(rgb, output_type= Output.DICT)\n # loop over each of the individual text localizations\n for i in range(0, len(results[\"text\"])):\n # extract the bounding box coordinates of the text region from\n # the current result\n x = results[\"left\"][i]\n y = results[\"top\"][i]\n w = results[\"width\"][i]\n h = results[\"height\"][i]\n \n # extract the OCR text itself along with the confidence of the\n # text localization\n text = results[\"text\"][i]\n conf = int(results[\"conf\"][i])\n # filter out weak confidence text localizations\n if conf > 60:\n # display the confidence and text to our terminal\n print(\"Confidence: {}\".format(conf))\n print(\"Text: {}\".format(text))\n print(\"\")\n # strip out non-ASCII text so we can draw the text on the image\n # using OpenCV, then draw a bounding box around the text along\n # with the text itself\n text = \"\".join([c if ord(c) < 128 else \"\" for c in text]).strip()\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,\n 1.2, (0, 0, 255), 3)\n # show the output image\n plt.figure()\n plt.imshow(image) \n plt.show()\n\n\ndef readAll():\n print(\"-- Processing Images --\")\n path = os.getcwd()\n if(path.find('images') < 0):\n os.chdir(path+'/images')\n index = 0\n for file in os.listdir():\n if file.endswith(\".png\"):\n file_path = f\"{path}/{file}\"\n ocr(file_path)\n # print(str(finalJson))\n json_file_name = 'final_ocr_data.json'\n with open(json_file_name, 'w') as outfile:\n json.dump(finalJson, outfile)\n print(\"-- Output saved to system as .json file --\")\n print(\"-- Inserting data into Postgres table --\")\n insertData(finalJson)\n\n convertToCSV(json_file_name)\n print(\"-- Output saved--\")\n\ndef readZone():\n print(\"-- Processing Images --\")\n path = os.getcwd()\n if(path.find('images') < 0):\n os.chdir(path+'/images')\n index = 0\n for file in os.listdir():\n if file.endswith(\".png\"):\n file_path = f\"{path}/{file}\"\n getZoneDate(file_path)\n\ntableName = 'PERSON'\ncolumns = ['NAME','RELATION','ADDRESS','STATE','PINCODE']\n\ndef createDB():\n con = psycopg2.connect(dbname='ocr',user='yoar', host='localhost' ,password='565656')\n cur = con.cursor()\n try:\n cur.execute(\"CREATE TABLE \"+tableName+\"(id serial PRIMARY KEY, NAME varchar, RELATION varchar, ADDRESS varchar, STATE varchar, PINCODE integer)\")\n except psycopg2.Error:\n cur.close()\n con.close()\n return\n finally:\n con.commit()\n cur.close()\n con.close()\n \n\ndef insertData(jsondata):\n con = psycopg2.connect(dbname='ocr',user='yoar', host='localhost' ,password='565656')\n cur = con.cursor()\n for item in jsondata:\n my_data = [item[field] for field in columns]\n for i, v in enumerate(my_data):\n if isinstance(v, dict):\n my_data[i] = json.dumps(v)\n insert_query = \"INSERT INTO \"+tableName+\" (NAME,RELATION,ADDRESS,STATE,PINCODE) VALUES (%s, %s, %s, %s,%s)\"\n cur.execute(insert_query, tuple(my_data))\n con.commit()\n cur.close()\n con.close()\n\n \n\n#Run only the first time if images are in jpg\n# convertToPng()\n\ncreateDB()\n#Read image and store as CSV\nreadAll()\n\n# Experimenting with zonal read\n# readZone()","sub_path":"PyTesseract_tutorial.py","file_name":"PyTesseract_tutorial.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"219145806","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n# from selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as ec\r\nfrom pprint import pprint\r\nfrom pymongo import MongoClient\r\nclient = MongoClient('mongodb://127.0.0.1:27017')\r\ndb = client['mail_letters']\r\nldb = db.mail_letters\r\n\r\ndriver = webdriver.Chrome()\r\n\r\ndriver.get('https://mail.ru/')\r\nassert \"Mail.ru: почта\" in driver.title\r\n\r\nelem = driver.find_element_by_id(\"mailbox:login\")\r\nelem.send_keys('study.ai_172')\r\nelem = driver.find_element_by_id(\"mailbox:password\")\r\nelem.send_keys('Password172')\r\nelem.send_keys(Keys.RETURN)\r\n\r\nelem = WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.CLASS_NAME, \"dataset-letters\")))\r\n\r\nassert \"Входящие - Почта Mail.ru\" in driver.title\r\n\r\n# elem = driver.find_element_by_class_name('llc')\r\n# attr_value = elem.get_attribute(\"href\")\r\n# print(attr_value)\r\nelems = driver.find_elements_by_class_name('llc')\r\nletters = []\r\nfor letr in elems:\r\n ltr_url = letr.get_attribute(\"href\")\r\n# letters\r\n letr_from = letr.find_element_by_class_name('ll-crpt').get_attribute(\"title\")\r\n letr_tema = letr.find_element_by_class_name('llc__subject').text\r\n letr_date = letr.find_element_by_class_name('llc__item_date').get_attribute(\"title\")\r\n# letr_tema = letr_from. #.get_attribute(\"title\")\r\n\r\n letters.append({\r\n 'from': letr_from,\r\n 'tema': letr_tema,\r\n 'date': letr_date,\r\n 'text': ltr_url\r\n })\r\n\r\npprint(letters)\r\n\r\nfor letr in letters:\r\n driver.get(letr['text'])\r\n elem = WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.CLASS_NAME, \"letter-body\")))\r\n letr_text = driver.find_element_by_class_name('letter-body').text.strip()\r\n letr['text'] = letr_text\r\n\r\nprint('=======================================')\r\nprint('=======================================')\r\nprint('=======================================')\r\npprint(letters)\r\n\r\nldb.insert_many(letters) # загрузка в базу\r\n\r\ndriver.quit()","sub_path":"mail_ru.py","file_name":"mail_ru.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"572674806","text":"# - *- coding: utf- 8 - *-\n__author__ = 'abhinandan'\nimport sys\nimport subprocess\nimport threading\nimport csv\nflag=0\ndef scheduler():\n global flag\n print(\"Scheduler Started\")\n with open('source.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n next(readCSV)\n for row in readCSV:\n jarpath = str(sys.argv[1])\n retailer=row[1].replace(\" \", \"\").replace(\"'\", \"\").replace(\".\", \"\").replace(\"’\", \"\")\n retailer_lower=retailer.lower()\n esflag=row[12].strip()\n tgt_table_name=row[9].strip()\n\n #threading.Timer(300, scheduler).start()\n if(esflag.strip() and esflag==\"1\"):\n\n print(\"ES Load Started for Retailer \"+retailer_lower)\n #print(\"Producer Started\")\n subprocess.call(['java', '-jar', '-Xmx1024m', jarpath, 'autoespush', tgt_table_name, retailer_lower])\n\n\n# start calling f now and every 2 mins thereafter\nscheduler()\n","sub_path":"sharesale/DataEngineering/Scripts/MysqlNewESScheduler.py","file_name":"MysqlNewESScheduler.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"230284890","text":"##########################################################################\n# This code is a copyright of c^2. Do not #\n# ## re-use, re-distribute, or re-create # \n# ##### any segment of this code without #\n# ## ## permission from the owners of the code. #\n# ## #\n# ## To contact the owners of this project, #\n# ###### ######## please contact: #\n# ##### ## - fkeller20@ssis.edu.vn #\n# #### - sunkim19@ssis.edu.vn #\n# #### - hsekine21@ssis.edu.vn #\n# #### - htran19@ssis.edu.vn #\n# #### # #\n# ##### ## #\n# ###### #\n# #\n# #\n# PREREQUISITES: googleapiclient, oauth2client: #\n# pip install --upgrade google-api-python-client oauth2client #\n##########################################################################\n\n# Google API\nfrom __future__ import print_function\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nfrom googleapiclient import discovery\n\n# Python\nfrom datetime import datetime\nfrom time import sleep\nimport os\nimport ConfigParser\n\n# RFID Scanner\nimport RPi.GPIO as GPIO\nimport MFRC522\nimport signal\n\nclass Spreadsheet(object):\n '''\n Scanner object that can read, write, and clear given spreadsheet.\n '''\n \n def __init__(self, scope, spreadsheet_id, column_name,\n column_scan_value, column_required_action, \n number_of_queries, column_queries, header_length):\n \n # Spreadsheet data\n self.scope = scope\n self.spreadsheet_id = spreadsheet_id\n self.credentials = self.generateCredentials()\n self.service = discovery.build('sheets', 'v4', credentials=self.credentials)\n \n # Column data\n self.column_name = column_name\n self.column_scan_value = column_scan_value\n self.column_required_action = column_required_action\n \n # Query data\n self.number_of_queries = number_of_queries\n self.column_queries = column_queries\n \n # Header length\n self.header_length = header_length\n \n \n def read(self, _range):\n '''\n Returns a list of values, given a range of cells to read.\n '''\n \n result = self.service.spreadsheets().values().get(spreadsheetId=self.spreadsheet_id, range=_range).execute()\n\n return result.get('values', [])\n\n \n def write(self, _range, values_to_write):\n '''\n Writes a cell range with values.\n '''\n \n value_input_option = 'RAW'\n value_range_body = {\n \"range\": _range,\n \"values\": [values_to_write]\n }\n\n request = self.service.spreadsheets().values().update(spreadsheetId=self.spreadsheet_id, range=_range, valueInputOption=value_input_option, body=value_range_body)\n response = request.execute()\n \n \n def clear(self, _range):\n '''\n Clears a given cell range.\n '''\n \n batch_clear_values_request_body = {\n 'ranges': [_range],\n }\n \n request = self.service.spreadsheets().values().batchClear(spreadsheetId=self.spreadsheet_id, body=batch_clear_values_request_body)\n response = request.execute()\n \n def generateCredentials(self):\n '''\n Returns credentials, as to be used un the Spreadsheet object.\n '''\n store = file.Storage('token.json')\n credentials = store.get()\n\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', self.scope)\n credentials = tools.run_flow(flow, store)\n\n return credentials\n\n\ndef readConfig(file_name):\n '''\n Reads a config file.\n '''\n \n # Open and read file.\n config = ConfigParser.ConfigParser()\n config.read(file_name)\n \n # Get variables\n scope = config.get('main', 'scope')\n spreadsheet_id = config.get('main', 'spreadsheet_id')\n\n column_name = config.get('main', 'column_name')\n column_scan_value = config.get('main', 'column_scan_value')\n column_required_action = config.get('main', 'column_required_action')\n \n number_of_queries =config.get('main', 'number_of_queries')\n column_queries = config.get('main', 'column_queries')\n queries = config.get('main', 'queries')\n \n header_length = config.get('main', 'header_length')\n \n # Return list of variables.\n return [scope, spreadsheet_id, column_name, column_scan_value, column_required_action, number_of_queries, column_queries, queries, header_length]\n\n\ndef clearTerminal():\n '''\n Clears the terminal window.\n '''\n \n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef getUID():\n '''\n Returns the UID of scanned card.\n '''\n\n # Hook the SIGINT\n # signal.signal(signal.SIGINT, end_read)\n \n # Create an object of the class MFRC522\n MIFAREReader = MFRC522.MFRC522()\n \n # This loop keeps checking for chips. If one is near it will get the UID and authenticate\n while True:\n \n # Scan for cards \n (status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)\n \n # Get the UID of the card\n (status,uid) = MIFAREReader.MFRC522_Anticoll()\n \n if status == MIFAREReader.MI_OK:\n GPIO.cleanup()\n return ''.join([str(element) for element in uid])\n \n \n\ndef main():\n '''\n Main function. Runs code forever untill quit.\n '''\n \n # Welcome message\n clearTerminal()\n print(\"Welcome to this sign in system.\")\n print(\"Loading spreadsheet...\")\n \n # Initialize\n config_values = readConfig('config.ini')\n \n # Get questions\n number_of_queries = config_values[5]\n column_queries = config_values[6]\n queries = str(config_values[7]).split('//')\n \n spreadsheet = Spreadsheet(\n scope = config_values[0],\n spreadsheet_id = config_values[1],\n \n column_name = config_values[2],\n column_scan_value = config_values[3],\n column_required_action = config_values[4],\n \n number_of_queries = number_of_queries,\n column_queries = column_queries,\n \n \n header_length = config_values[8]\n )\n \n # Run main code\n while True:\n \n current_sheet = spreadsheet.read('Configuration!B1')[0][0]\n \n clearTerminal()\n \n # Get card uid\n print(\"Please scan ID card.\")\n rfid_uid = getUID()\n clearTerminal()\n \n print(\"Scanning ID card...\")\n \n all_rfid_uid = spreadsheet.read(current_sheet + '!' + spreadsheet.column_scan_value + str(int(spreadsheet.header_length) + 1) + ':' + spreadsheet.column_scan_value)\n all_rfid_uid = [str(all_rfid_uid[i][0]) for i in range(len(all_rfid_uid))]\n \n if rfid_uid in all_rfid_uid:\n uid_row = all_rfid_uid.index(rfid_uid) + int(spreadsheet.header_length) + 1\n \n required_action = spreadsheet.read(current_sheet + '!' + spreadsheet.column_required_action + str(uid_row))\n \n if required_action == []:\n # Ask queries\n \n responses = []\n \n for i in range(len(queries)):\n responses.append(raw_input(queries[i]))\n \n # ONLY FOR STUDY HALL SIGN IN. NOT PART OF NORMAL CODE.\n # If nothing is entered, default to library.\n if responses[0] == '':\n responses[0] = 'Library'\n \n responses.append('YES')\n responses.append(datetime.now().strftime('%Y-%m-%d %H:%M'))\n \n # Report results NOTE: THIS IS NOT GOOD. ONLY SPESCIFIC TO STUDY HALL SIGN IN.\n spreadsheet.write(str(current_sheet) + '!' + str(spreadsheet. column_queries) + str(uid_row), responses)\n \n print(\"You are signed in.\")\n else:\n print(\"Required Action: \" + required_action[0][0])\n \n try:\n confirmed = input(\"Press any button to continue.\")\n except:\n print(\"You are NOT signed in. Please see Mr.Le.\")\n sleep(1)\n \n else:\n print('Unregistered ID card.')\n\n# --MAIN-- #\n\nif __name__ == '__main__':\n main()\n ","sub_path":"user_code.py","file_name":"user_code.py","file_ext":"py","file_size_in_byte":9181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"462201894","text":"# from testInput import input\nfrom collections import defaultdict\nclass Graph:\n def __init__(self,V):\n self.graph =defaultdict(list)\n self.V = V\n def add(self,u,v):\n self.graph[u].append(v)\n def dfs(self,s,visited,c,maxx):\n visited[s]=True\n c+=1\n maxx[0] = max(maxx[0],c)\n for x in self.graph[s]:\n if visited[x]==False:\n self.dfs(x,visited,c,maxx)\n visited[s]=False\n def dfs1(self,s,visited,dp):\n if dp[s]!=-1:\n return dp[s]\n visited[s]=True\n ans = 0\n for x in self.graph[s]:\n if visited[x]==False:\n c= self.dfs1(x,visited,dp)\n ans = max(ans,c)\n visited[s]=False\n ans +=1\n dp[s]=ans\n return ans\n\n def find(self):\n visited =[False]*self.V\n dp = [-1]*self.V\n ans = 0\n for x in self.graph[0]:\n if visited[x]==False:\n maxx = [0]\n self.dfs1(x,visited,dp)\n\n return max(dp)\n\nr,n = map(int,input().split())\narray =[[0,1,1]]\nfor i in range(n):\n array.append(list(map(int,input().split())))\narray.sort()\nG = Graph(n+1)\nfor i in range(len(array)):\n for j in range(i+1,len(array)):\n t1,x1,y1=array[i]\n t2,x2,y2=array[j]\n #possible move\n if (t1+abs(x1-x2)+abs(y1-y2))<=t2:\n G.add(i,j)\nif 0 not in G.graph:\n print(0)\nelse:\n ans = G.find()\n print(ans)\n","sub_path":"codeforce/global_round_11/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"403409881","text":"import requests\nimport json\n\nfrom peewee import DoesNotExist\n\nfrom models import (BookToRent, BookTradeWant, BookTradeHave, WishList, BookRentingRequest,\n BookRenting)\n\n\nclass DuplicateEntry(Exception):\n pass\n\n\nclass SelfBook(Exception):\n pass\n\n\ndef allowed_file(filename, ALLOWED_EXTENSIONS):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef create_book_rent(**kwargs):\n \"\"\"Create a book for rent.\"\"\"\n BookToRent.create(\n name=kwargs['name'],\n author=kwargs['author'],\n description=kwargs['description'],\n isbn=kwargs['isbn'],\n condition=kwargs['condition'],\n condition_comment=kwargs['condition_comment'],\n marks=kwargs['marks'],\n username=kwargs['username'],\n available='available',\n # image_path=kwargs['img_path']\n )\n\n\ndef create_request_book_rent(book_id, username):\n \"\"\"This function request a book to be rented.\"\"\"\n BookRentingRequest.create(\n book=book_id,\n renter=BookToRent.get(BookToRent.id == book_id).username.username,\n rentee=username,\n )\n\n\ndef delete_request_book_rent(request_id, rentee):\n \"\"\"This function delete a request to rent.\"\"\"\n BookRentingRequest.get(\n (BookRentingRequest.id == request_id) &\n (BookRentingRequest.rentee == rentee)\n ).delete_instance()\n\n\ndef accept_request_to_rent(request_id):\n \"\"\"This function accept a request to rent.\"\"\"\n request = get_renting_request_by_id(request_id)\n\n BookRenting.create(\n book=request.book.id,\n renter=request.renter.username,\n rentee=request.rentee.username\n )\n BookToRent.update(\n available=\"rented\"\n ).where(BookToRent.id == request.book.id).execute()\n request.delete_instance()\n\n\ndef get_currently_renting(username):\n \"\"\"This functions gets the book that the passed user is\n currently renting.\n \"\"\"\n return BookRenting.select().where(BookRenting.rentee == username)\n\n\ndef get_currently_renting_out(username):\n \"\"\"This function gets the book that the passed user is\n currently renting out.\n \"\"\"\n return BookRenting.select().where(BookRenting.renter == username)\n\n\ndef get_renting_request_by_id(request_id):\n \"\"\"This function gets a renting request by request_id\"\"\"\n return BookRentingRequest.get(BookRentingRequest.id == request_id)\n\n\ndef get_user_renting_incoming_requests(username):\n \"\"\"This function gets all the requests receipt by an specific user\"\"\"\n return BookRentingRequest.select().where(\n BookRentingRequest.renter == username\n )\n\n\ndef get_user_renting_outgoing_request(username):\n \"\"\"This function gets all the request sent from an specific user.\"\"\"\n return BookRentingRequest.select().where(\n BookRentingRequest.rentee == username\n )\n\n\ndef delete_book_rent(book_id):\n \"\"\"This function delete a book for rent.\"\"\"\n BookToRent.get(BookToRent.id == book_id).delete_instance()\n\n\ndef create_book_trade(**kwargs):\n \"\"\"Crate a book to trade\"\"\"\n BookTradeWant.create(\n name=kwargs['want_name'],\n isbn=kwargs['want_isbn'],\n user=kwargs['user'],\n )\n BookTradeHave.create(\n name=kwargs['have_name'],\n isbn=kwargs['have_isbn'],\n user=kwargs['user']\n )\n\n\ndef load_book_info(isbn):\n \"\"\"Get an ISBN and return a dictionary with book information.\"\"\"\n data = requests.get(\"https://www.googleapis.com/books/v1/volumes?q={}\".format(isbn)).json()\n if data['totalItems']:\n try:\n description = data['items'][0]['volumeInfo']['description']\n except KeyError:\n description = \"No description available.\"\n book = {\n 'title': data['items'][0]['volumeInfo']['title'],\n 'authors': ', '.join(data['items'][0]['volumeInfo']['authors']),\n 'description': description,\n 'isbn': isbn,\n }\n return book\n return None\n\n\ndef get_book_rent(book_pk):\n return BookToRent.get(BookToRent.id == book_pk)\n\n\ndef add_to_wishlist(book_pk, username):\n \"\"\"This function add a book to the wish if not duplicate.\"\"\"\n try:\n wishlist = WishList.get((WishList.username == username) & (WishList.book == book_pk))\n except DoesNotExist:\n book = BookToRent.get(BookToRent.id == book_pk)\n if not book.username.username == username:\n WishList.create(\n book=book_pk,\n username=username,\n )\n else:\n raise SelfBook\n else:\n raise DuplicateEntry\n","sub_path":"book/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"45948375","text":"#coding:utf-8\nfrom simple_orm_mysql import *\n\nclass User(Model):\n name = CharField()\n addr = CharField()\n\nif __name__ == \"__main__\":\n user = User()\n user.addr = \"beijing of china\"\n user.name = 'fengyun'\n user.save()\n user.addr = \"shanghai of china\"\n user.save()\n user.get(name='xiaorui.cc',addr=\"beijing\")\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"647568366","text":"\"\"\" A few of helper/utility functions.\n\"\"\"\nimport string\nfrom typing import List\n\n# List of the lower case letters in alphabetical order\nalphabet = list(string.ascii_lowercase)\nalphabet = alphabet[:8]\n\n\ndef conv_pos_str_to_int(position: str) -> List[int]:\n \"\"\"Converts position from string (i.e. 'a2') to list of ints (i.e. [0, 2]).\n 'a' maps to 1, and 'h' maps to 8. Everything in between maps accordingly\n Args:\n position (str): Position to be converted.\n\n Returns:\n numeric_position (List[int]): The converted position.\n\n \"\"\"\n if position[0].lower() in alphabet:\n\n return [alphabet.index(position[0].lower())+1, int(position[1])]\n else:\n raise ValueError('Invalid position letter.')\n\n\ndef conv_pos_int_to_str(numeric_position: List[int]) -> str:\n \"\"\"The inverse function to conv_pos_str_to_int() by\n converting the list of int (i.e. [0, 2]) to a string (i.e. 'a2').\n\n Args:\n numeric_position (List[int]): Position to be converted.\n\n Returns:\n position (str): The converted position.\n\n \"\"\"\n\n try:\n if numeric_position[0] < 1:\n pass\n else:\n output = f'{alphabet[numeric_position[0]-1]}{numeric_position[1]}'\n return output\n except IndexError:\n pass\n\n\ndef position_difference(current_position: str,\n future_position: str) -> List[int]:\n \"\"\"\n\n Args:\n current_position (str): The current position of the piece in str format.\n future_position(str): The future position of the piece in str format\n\n Returns: A list of ints of length two correspnding to the change in\n position along each dimension\n\n \"\"\"\n current_position_numeric = conv_pos_str_to_int(current_position)\n future_position_numeric = conv_pos_str_to_int(future_position)\n\n return [future_position_numeric[i] - current_position_numeric[i]\n for i in range(2)]\n\n\ndef is_valid_board_position(position):\n return position in [[i, j] for i in range(1, 9) for j in range(1, 9)]\n\n\ndef straight_direction(position, direction, color, board):\n \"\"\" List the possible moves a piece can take in the given direction\n before hitting the edge of the board or hitting anther piece. If\n the piece stumbles upon an enemy piece that square is still a valid move;\n otherwise it is not.\n\n Args:\n position (List[int]): The current position of the piece\n as [x, y] where 0 List[int]:\n \"\"\"\n Makes sure positions are valid and converts positions to x and y\n coordinates on the board.\n Args:\n origin_position (Union[List[int], str]): [x, y] where 0 \"\n if len(argv) < 4:\n print(argv[0], args)\n return -1\n\n L = float(argv[1])\n Emax = float(argv[2])\n g = float(argv[3])\n\n plt.figure(1)\n params = {'legend.fontsize': 8}\n plt.rcParams.update(params)\n\n db = database.Database()\n exactQuery = {\"ren\":\"raw\", \"k\":-1, \"occmax\":occmax}\n approxQuery = {\"g\":g, \"Emax\":Emax, \"L\":L}\n eigv = db.getObjList(\"eigv\", exactQuery=exactQuery, approxQuery=approxQuery)[0]\n\n basis = Basis.fromScratch(m=1, L=L, Emax=Emax, k=-1, occmax=occmax)\n # Select only 3 particles basis states\n indexList = [i for i in range(len(basis)) if basis[i].occ==3]\n basis3p = [basis[i] for i in indexList]\n\n # Select only coefficients of 3 particle basis states\n wf = [eigv[0][i] for i in indexList]\n\n # Renormalize wave function\n wf = array([normalizeWF(c,v) for c,v in zip(wf, basis3p)])\n\n # Construct variables in the form [k1,k2,f(k1,k2,k3)]\n data = []\n for c, v in zip(wf, basis3p):\n # List of momenta of wavenumbers in the state\n wavenumbers = list(itertools.chain(*[[wn]*v[wn] for wn in v.wnList()]))\n\n # Take all possible inequivalent pairs of wave numbers, including symmetrization\n s = set(itertools.combinations(wavenumbers,2))\n s |= set((b,a) for a,b in s)\n s |= set((-a,-b) for a,b in s)\n\n for a,b in s:\n data.append(array([a*2*pi/L,b*2*pi/L,c]))\n\n data = array(data)\n scipy.savetxt(\"data/wf3p_g={0:g}_L={1:g}_E={2:g}_nmax={3:d}.csv\"\n .format(g,L,Emax,occmax),\n data.reshape(1,data.size),delimiter=\",\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"3partWF.py","file_name":"3partWF.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"551629268","text":"# print(1+1) #2\n# def add(num1 , num2):\n# print(\"คุณได้ทำการบวกเลข 2 ตัว คือ {} และ {}\".format(num1,num2))\n# result = num1+num2\n# print(\"ผลลัพธ์ค���อ {}\".format(result))\n# return result\n\n# ผลลัพธ์ = add(num1=5,num2=10)\n# print(ผลลัพธ์)\n\nfrom fuzzywuzzy import process\ndef match_fuzzy(text,text_list,score):\n Ratio = process.extractOne(text,text_list)\n print(Ratio)\n if Ratio[1] > score:\n return Ratio[0]\n else :\n return False\n \n# str2Match = \"apple inc\"\n# strOptions = [\"Apple Inc.\",\"apple park\",\"apple incorporated\",\"iphone\"]\n\n# print(match(text=str2Match,text_list=strOptions,score=50))\n","sub_path":"basic_python/match_fuzzy.py","file_name":"match_fuzzy.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"444593890","text":"from dmutils.forms import render_template_with_csrf\nfrom flask import render_template, request, flash, jsonify, url_for, redirect\nfrom flask_login import login_required, current_user\nfrom react.render import render_component\n\nfrom .. import main\nfrom ... import data_api_client\nfrom ..auth import role_required\nfrom dmapiclient.errors import HTTPError\n\n\nNEW_LINE = '\\n'\nAREA_OF_EXPERTISE_LIST = [\n 'Agile delivery and Governance',\n 'Change and Transformation',\n 'Content and Publishing',\n 'Cyber security',\n 'Data science',\n 'Digital sourcing and ICT procurement',\n 'Emerging technologies',\n 'ICT risk management and audit activities',\n 'ICT systems integration',\n 'Marketing, Communications and Engagement',\n 'Platforms integration',\n 'Service Integration and Management',\n 'Software engineering and Development',\n 'Strategy and Policy',\n 'Support and Operations',\n 'Training, Learning and Development',\n 'User research and Design'\n]\n\n\n@main.route('/buyers//edit', methods=['GET'])\n@login_required\n@role_required('admin')\ndef brief_edit_json(brief_id):\n brief = data_api_client.get_brief(brief_id).get('briefs')\n\n rendered_component = render_component(\n 'bundles/ApplicationsAdmin/BriefAdminWidget.js',\n {\n 'brief': brief,\n 'meta': {\n 'url_brief_update': url_for('.update_brief_data', brief_id=brief_id),\n }\n }\n )\n\n return render_template(\n '_react.html',\n component=rendered_component\n )\n\n\n@main.route('/buyers//update', methods=['POST'])\n@login_required\n@role_required('admin')\ndef update_brief_data(brief_id):\n json_payload = request.get_json(force=True)\n result = (data_api_client\n .req\n .briefs(brief_id)\n .json()\n .admin()\n .put({\"brief\": json_payload}))\n\n return jsonify(result)\n\n\n@main.route('/buyers', methods=['GET'])\n@login_required\n@role_required('admin')\ndef find_buyer_by_brief_id():\n brief_id = request.args.get('brief_id')\n teams = []\n\n try:\n brief = data_api_client.get_brief(brief_id).get('briefs')\n teams = data_api_client.req.admin().buyers(brief_id).teams().get()\n\n except: # noqa\n flash('no_brief', 'error')\n return render_template(\n \"view_buyers.html\",\n users=list(),\n brief_id=brief_id,\n brief=None\n ), 404\n\n users = brief.get('users')\n title = brief.get('title')\n\n return render_template_with_csrf(\n \"view_buyers.html\",\n users=users,\n title=title,\n brief_id=brief_id,\n brief=brief,\n seller_email_list=convert_array_to_string(brief.get('sellerEmailList', [])),\n seller_email=brief.get('sellerEmail', ''),\n area_of_expertise_list=AREA_OF_EXPERTISE_LIST,\n area_of_expertise_selected=brief.get('areaOfExpertise', ''),\n teams_exists=len(teams) > 0,\n teams=teams\n )\n\n\n@main.route('/brief/', methods=['POST'])\n@login_required\n@role_required('admin')\ndef update_brief(brief_id):\n try:\n if request.form.get('add_user'):\n brief = data_api_client.req.briefs(brief_id).users(request.form['add_user'].strip()) \\\n .put({'update_details': {'updated_by': current_user.email_address}}).get('briefs')\n elif request.form.get('remove_user'):\n brief = data_api_client.req.briefs(brief_id).users(request.form['remove_user'].strip()) \\\n .delete({'update_details': {'updated_by': current_user.email_address}}).get('briefs')\n elif request.form.get('questions_closed_at'):\n brief = data_api_client.req.briefs(brief_id).admin() \\\n .post({'briefs': {'clarification_questions_closed_at': request.form['questions_closed_at'],\n 'applications_closed_at': request.form['closed_at']\n },\n 'update_details': {'updated_by': current_user.email_address}}).get('briefs')\n elif request.form.get('add_seller_to_brief'):\n brief = (\n data_api_client\n .req\n .briefs(brief_id)\n .suppliers(\n request.form['add_seller_to_brief'].strip()\n )\n .put({\n 'update_details': {\n 'updated_by': current_user.email_address\n }\n })\n .get('briefs')\n )\n elif request.form.get('remove_seller_from_brief'):\n brief = (\n data_api_client\n .req\n .briefs(brief_id)\n .suppliers(\n request.form['remove_seller_from_brief'].strip()\n )\n .delete({\n 'update_details': {\n 'updated_by': current_user.email_address\n }\n })\n .get('briefs'))\n elif 'edit_seller_email_list' in request.form:\n edit_seller_email_list = request.form.get('edit_seller_email_list', []).split(NEW_LINE)\n brief = data_api_client.req.briefs(brief_id).admin() \\\n .post({\n 'briefs': {\n 'sellerEmailList': [_.strip() for _ in edit_seller_email_list if _ != '']\n },\n 'update_details': {'updated_by': current_user.email_address}\n }).get('briefs')\n elif 'edit_seller_email' in request.form:\n edit_seller_email = request.form.get('edit_seller_email', '')\n brief = data_api_client.req.briefs(brief_id).admin() \\\n .post({\n 'briefs': {\n 'sellerEmail': edit_seller_email\n },\n 'update_details': {'updated_by': current_user.email_address}\n }).get('briefs')\n elif 'edit_area_of_expertise' in request.form:\n brief = data_api_client.req.briefs(brief_id).admin() \\\n .post({'briefs': {'areaOfExpertise': request.form['edit_area_of_expertise']\n },\n 'update_details': {'updated_by': current_user.email_address}}).get('briefs')\n else:\n brief = data_api_client.get_brief(brief_id).get('briefs')\n\n except HTTPError as e:\n flash(e.message, 'error')\n brief = data_api_client.get_brief(brief_id).get('briefs')\n users = brief.get('users')\n title = brief.get('title')\n return render_template_with_csrf(\n \"view_buyers.html\",\n users=users,\n title=title,\n brief_id=brief_id,\n brief=brief\n )\n\n flash('brief_updated', 'info')\n users = brief.get('users')\n title = brief.get('title')\n return render_template_with_csrf(\n \"view_buyers.html\",\n users=users,\n title=title,\n brief_id=brief_id,\n brief=brief,\n seller_email_list=convert_array_to_string(brief.get('sellerEmailList', [])),\n seller_email=brief.get('sellerEmail', ''),\n area_of_expertise_list=AREA_OF_EXPERTISE_LIST,\n area_of_expertise_selected=brief.get('areaOfExpertise', '')\n )\n\n\n@main.route('/brief//withdraw', methods=['POST'])\n@login_required\n@role_required('admin')\ndef withdraw_brief(brief_id):\n try:\n brief = data_api_client.req.briefs(brief_id).withdraw().post({\n 'update_details': {'updated_by': current_user.email_address}\n }).get('briefs')\n except HTTPError as e:\n flash(e.message, 'error')\n brief = data_api_client.get_brief(brief_id).get('briefs')\n\n return render_template_with_csrf(\n 'view_buyers.html',\n users=brief.get('users'),\n title=brief.get('title'),\n brief_id=brief_id,\n brief=brief\n )\n\n flash('brief_withdrawn', 'info')\n return render_template_with_csrf(\n 'view_buyers.html',\n users=brief.get('users'),\n title=brief.get('title'),\n brief_id=brief_id,\n brief=brief,\n seller_email_list=convert_array_to_string(brief.get('sellerEmailList', [])),\n seller_email=brief.get('sellerEmail', ''),\n area_of_expertise_list=AREA_OF_EXPERTISE_LIST,\n area_of_expertise_selected=brief.get('areaOfExpertise', '')\n )\n\n\ndef convert_array_to_string(array):\n return NEW_LINE.join(array)\n\n\n@main.route('/brief//seller-feedback-email', methods=['POST'])\n@login_required\n@role_required('admin')\ndef seller_feedback_email(brief_id):\n (\n data_api_client\n .req\n .briefs(brief_id)\n .send_feedback_email()\n .post({\n 'update_details': {'updated_by': current_user.email_address}\n })\n )\n\n flash('seller_feedback_email', 'info')\n return redirect(url_for('.find_buyer_by_brief_id', brief_id=brief_id))\n\n\n@main.route('/team', methods=['GET'])\n@login_required\n@role_required('admin')\ndef find_team_by_team_id():\n team_id = request.args.get('team_id')\n\n try:\n if team_id:\n team_info = data_api_client.req.admin().team(team_id).get()\n team = team_info.get('team')\n briefs = team_info.get('briefs')\n\n team_leads = team.get('teamLeads')\n team_leads_email_name = []\n\n for i in team_leads:\n team_leads_email_name.append(team_leads.get(i))\n\n team_members = team.get('teamMembers')\n team_members_email_name = []\n\n if team_members is None:\n team_members_flag = False\n\n else:\n\n for i in team_members:\n team_members_email_name.append(team_members.get(i))\n\n team_members_flag = True\n\n return render_template_with_csrf(\n \"view_team.html\",\n team_id=team_id,\n team=team,\n briefs=briefs,\n team_leads=team_leads_email_name,\n team_members=team_members_email_name,\n team_members_flag=team_members_flag\n )\n\n except HTTPError as e: # noqa\n flash('no_team_id', 'error')\n\n teams = data_api_client.req.admin().team().get()\n return render_template(\n 'view_teams.html',\n teams=teams.get('teams'),\n team_id=team_id\n )\n\n\n@main.route('/', methods=['GET'])\n@login_required\n@role_required('admin')\ndef find_brief_by_team_id():\n team_id = request.args.get('team_id')\n\n team_info = data_api_client.req.admin().team(team_id).get()\n team = team_info.get('team')\n briefs = team_info.get('briefs')\n\n return render_template_with_csrf(\n \"view_team.html\",\n team_id=team_id,\n team=team\n )\n","sub_path":"app/main/views/buyers.py","file_name":"buyers.py","file_ext":"py","file_size_in_byte":10908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"163623363","text":"'''\nA module which implements various attention mechanisms\n'''\nimport math\nimport torch\nimport time\nimport numpy as np\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom models.attention import MultiHeadedAttention\nimport pdb\nfrom utils import same_tensor\n\n\nclass ProbeNewAttention(nn.Module):\n ''' Implement a hard-coded attention module '''\n ATTN_TYPES = ['normal', 'uniform', 'whole', 'no', 'learned']\n ATTN_POSITIONS = ['center', 'left', 'right', 'first', 'last', 'middle']\n\n def __init__(self, attn_config, embed_dim, num_heads=1):\n ''' Initialize the attention module '''\n super(ProbeNewAttention, self).__init__()\n\n # ensure valid inputs\n assert embed_dim % num_heads == 0, \\\n f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'\n\n # store off the scale and input params\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.projection_dim = embed_dim // num_heads\n self.scale = self.projection_dim ** -0.5\n self.attn_type = attn_config['attn_type']\n self.attn_position = attn_config['attn_position']\n self.attn_param = attn_config['attn_param']\n self.attn_displacement = attn_config['attn_displacement']\n self.num_layers = attn_config['num_layers']\n self.word_count_ratio = attn_config['word_count_ratio'] if 'word_count_ratio' in attn_config else 1\n self.attn_concat = attn_config['attn_concat'] if 'attn_concat' in attn_config else 0\n if self.attn_concat in [1, 2]:\n self.attn_concat_weights = nn.Parameter(torch.Tensor(embed_dim, 2 * embed_dim))\n elif self.attn_concat == 3:\n self.attn_concat_weights = nn.Parameter(torch.Tensor(embed_dim, 3 * embed_dim))\n else:\n self.attn_concat_weights = None\n self.which_attn = attn_config['which_attn']\n self.attn_score = attn_config['attn_score']\n if self.attn_score:\n self.attn_score_project_in_weights = nn.Parameter(torch.Tensor(self.projection_dim, embed_dim))\n self.attn_score_project_out_weights = nn.Parameter(torch.Tensor(embed_dim, self.projection_dim))\n\n # Combine projections for multiple heads into a single linear layer for efficiency\n self.attn_linear_transform = attn_config['attn_weights']\n self.input_weights = None\n if self.attn_linear_transform:\n if 'learned' in self.attn_type or 'learned' == self.attn_type:\n if self.attn_linear_transform == 1:\n self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim, embed_dim))\n elif self.attn_linear_transform == 2:\n self.input_weights = nn.Parameter(torch.Tensor(2 * embed_dim, embed_dim))\n else:\n self.input_weights = nn.Parameter(torch.Tensor(embed_dim, embed_dim))\n self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)\n self.reset_parameters()\n\n self.attn_weights = {}\n\n def reset_parameters(self):\n ''' Reset parameters using xavier initialization '''\n # Initialize using Xavier\n gain = nn.init.calculate_gain('linear')\n if self.input_weights is not None:\n nn.init.xavier_uniform_(self.input_weights, gain)\n nn.init.xavier_uniform_(self.output_projection.weight, gain)\n if self.attn_concat_weights is not None:\n nn.init.xavier_uniform_(self.attn_concat_weights, gain)\n if self.attn_score:\n nn.init.xavier_uniform_(self.attn_score_project_in_weights, gain)\n nn.init.xavier_uniform_(self.attn_score_project_out_weights, gain)\n\n def project(self, inputs, index=0, chunks=1):\n ''' Produce a linear projection using the weights '''\n batch_size = inputs.shape[0]\n start = index * self.embed_dim\n end = start + chunks * self.embed_dim\n projections = F.linear(inputs, self.input_weights[start:end]).chunk(chunks, dim=-1)\n\n output_projections = []\n for projection in projections:\n # transform projection to (BH x T x E)\n output_projections.append(\n projection.view(\n batch_size,\n -1,\n self.num_heads,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size * self.num_heads,\n -1,\n self.projection_dim\n )\n )\n\n return output_projections\n\n def project_learned(self, inputs, learned_idx):\n batch_size = int(inputs.shape[0] / self.num_heads)\n return inputs.view(batch_size,\n self.num_heads,\n -1,\n self.projection_dim)[:, learned_idx].contiguous()\\\n .view(batch_size * len(learned_idx),\n -1,\n self.projection_dim)\n\n def attention(self, values, keys, queries, key_mask=None, mask=None, layer_i=0, decoder_position=-1):\n ''' Scaled dot product attention with optional masks '''\n\n # print(\"values\", values.shape)\n # print(\"keys\", keys.shape)\n # print(\"queries\", queries.shape)\n # print(\"attn_type\", self.attn_type)\n # print(\"attn_position\", self.attn_position)\n # print(\"input weights\", self.input_weights)\n # print(\"decoder_position\", decoder_position)\n # print(\"target_lens\", target_lens)\n queries_shape = queries.shape\n values_shape = values.shape\n # print(\"queries_shape\", queries_shape)\n # print(\"values_shape\", values_shape)\n # print(\"self.word_count_ratio\", self.word_count_ratio)\n\n # By this point the values, keys, and queries all have B * H as their first dimension\n batch_size = queries_shape[0] // self.num_heads\n\n attn_configs = []\n\n attn_configs_names = ['attn_type', 'attn_position', 'attn_param', 'attn_displacement']\n\n for i, attn_config_i in enumerate([self.attn_type, self.attn_position, self.attn_param, self.attn_displacement]):\n if type(attn_config_i) is list:\n #pdb.set_trace()\n if len(attn_config_i) == 1:\n attn_configs.append(attn_config_i[0])\n elif len(attn_config_i) == self.num_heads:\n if len(set(attn_config_i)) == 1:\n attn_configs.append(attn_config_i[0])\n else:\n attn_configs.append(attn_config_i)\n elif len(attn_config_i) == self.num_layers:\n attn_configs.append(attn_config_i[layer_i])\n elif len(attn_config_i) == self.num_heads * self.num_layers:\n if len(set(attn_config_i[layer_i * self.num_heads:(layer_i + 1) * self.num_heads])) == 1:\n attn_configs.append(attn_config_i[layer_i * self.num_heads])\n else:\n attn_configs.append(attn_config_i[layer_i * self.num_heads:(layer_i + 1) * self.num_heads])\n else:\n raise Exception(\"The number of {} is {}, but it has to be either number of heads {}, \"\n \"number of layers {}, or the product of them {}.\".format(attn_configs_names[i],\n len(attn_config_i),\n self.num_heads,\n self.num_layers,\n self.num_heads * self.num_layers))\n else:\n attn_configs.append(attn_config_i)\n attn_type, attn_position, attn_param, attn_displacement = attn_configs\n\n if attn_type == 'learned':\n logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))\n if mask is not None:\n logits += mask\n\n if key_mask is not None:\n logits_shape = logits.shape\n batch_size = logits_shape[0] // self.num_heads\n logits = logits.view(batch_size, self.num_heads, logits_shape[1], logits_shape[2])\n logits.masked_fill_(key_mask[:, None, None], float('-inf'))\n logits = logits.view(logits_shape)\n\n attn_weights = F.softmax(logits, dim=-1)\n\n attended = torch.bmm(attn_weights, values)\n\n batch_size = queries_shape[0] // self.num_heads\n\n return attended.view(\n batch_size,\n self.num_heads,\n -1,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size,\n -1,\n self.num_heads * self.projection_dim\n ), attn_weights\n\n elif 'learned' in attn_type:\n learned_idx = np.where(np.array(attn_type) == 'learned')[0]\n len_learned_idex = len(learned_idx)\n queries_ = self.project_learned(queries, learned_idx)\n keys_ = self.project_learned(keys, learned_idx)\n values_ = self.project_learned(values, learned_idx)\n\n logits_ = self.scale * torch.bmm(queries_, keys_.transpose(2, 1))\n logits_shape_ = logits_.shape\n if mask is not None:\n logits_ += mask\n\n if key_mask is not None:\n batch_size = logits_shape_[0] // len_learned_idex\n logits_ = logits_.view(batch_size, len_learned_idex, logits_shape_[1], logits_shape_[2])\n logits_.masked_fill_(key_mask[:, None, None], float('-inf'))\n logits_ = logits_.view(logits_shape_)\n logits_ = F.softmax(logits_, dim=-1).view(batch_size,\n len(learned_idx),\n logits_shape_[-2],\n logits_shape_[-1])\n\n learned_count = 0\n\n if 'last' in attn_position:\n if key_mask is not None:\n key_mask_shape = key_mask.shape\n last_indices = torch.tensor([key_mask_shape[1] - a[::-1].index(0)\n for a in key_mask.cpu().numpy().tolist()], dtype=torch.float32).view(-1, 1)\n else:\n last_indices = torch.tensor([values_shape[1]] * queries_shape[0], dtype=torch.float32).view(-1, 1)\n\n if type(attn_type) is not list and type(attn_position) is not list:\n if attn_type == 'whole':\n logits = torch.full((queries_shape[1], values_shape[1]), 1 / values_shape[1]).to(dtype=torch.float32)\n else:\n if attn_type not in self.attn_weights:\n self.attn_weights[attn_type] = {}\n # If the attention weight matrix is not stored, need to create new.\n # At inference time, always create new for decoder attentions.\n # If attention position is last or middle, always recalculate because the stored is wrong.\n if (attn_position not in self.attn_weights[attn_type]\n or (queries_shape[1] > self.attn_weights[attn_type][attn_position].shape[0]\n or values_shape[1] > self.attn_weights[attn_type][attn_position].shape[1])) \\\n or decoder_position != -1 \\\n or attn_position in ['last', 'middle']:\n\n indices_v = torch.arange(values_shape[1]).view(1, -1).to(dtype=torch.float32)\n\n if attn_position != 'last':\n indices_q = torch.arange(queries_shape[1]).view(-1, 1).to(dtype=torch.float32)\n\n if decoder_position > -1:\n indices_q[:] = decoder_position\n\n indices_q = indices_q * self.word_count_ratio\n\n if attn_position == 'left':\n indices_q = indices_q - attn_displacement\n elif attn_position == 'right':\n indices_q = indices_q + attn_displacement\n elif attn_position == 'first':\n indices_q[:] = 0\n elif attn_position == 'middle':\n indices_q[:] = (indices_v.size()[1] + 1) / 2 - 1\n\n distance_diff = indices_v - indices_q\n\n distance_diff = distance_diff.expand(values_shape[0], distance_diff.shape[0], distance_diff.shape[1])\n\n # If the attention is looking at the last indices, need to take masks into consideration\n else:\n indices_q = last_indices\n distance_diff = (indices_v - indices_q).unsqueeze(1).unsqueeze(2)\n distance_diff = distance_diff.expand(batch_size, self.num_heads, queries_shape[1], values_shape[1]).contiguous()\n distance_diff = distance_diff.view(values_shape[0], queries_shape[1], values_shape[1])\n\n if attn_type == 'normal':\n std = attn_param\n\n logits = (1 / (std * math.sqrt(2 * math.pi)) * torch.exp(- 1 / 2 * (distance_diff / std) ** 2))\n else:\n distance_diff = torch.abs(distance_diff)\n distance_diff[distance_diff <= attn_param] = 0\n distance_diff[distance_diff > attn_param] = 1\n logits = 1 - distance_diff\n logits = logits / torch.sum(logits, dim=-1, keepdim=True)\n\n self.attn_weights[attn_type][attn_position] = logits[0]\n else:\n logits = self.attn_weights[attn_type][attn_position][:queries_shape[1], :values_shape[1]]\n logits = logits.expand(values_shape[0], logits.shape[0], logits.shape[1])\n\n attn_weights = logits.type_as(values)\n\n # If one of the attention parameters is list (different in different heads), then make all of them lists\n else:\n attn_config = []\n for attn_config_i in [attn_type, attn_position, attn_param, attn_displacement]:\n if type(attn_config_i) is not list:\n attn_config.append([attn_config_i] * self.num_heads)\n else:\n attn_config.append(attn_config_i)\n\n attn_type, attn_position, attn_param, attn_displacement = attn_config\n\n logits_list = []\n\n for i in range(self.num_heads):\n if attn_type[i] == 'whole':\n logits = torch.full((queries_shape[1], values_shape[1]), 1 / values_shape[1]).to(\n dtype=torch.float32)\\\n .unsqueeze(0)\\\n .expand(int(values_shape[0] / self.num_heads),\n queries_shape[1],\n values_shape[1]).type_as(values)\n elif attn_type[i] == 'learned':\n logits = logits_[:, learned_count]\n learned_count += 1\n else:\n if attn_type[i] not in self.attn_weights:\n self.attn_weights[attn_type[i]] = {}\n\n # If the attention weight matrix is not stored, need to create new.\n # At inference time, always create new for decoder attentions.\n # If attention position is last or middle, always recalculate because the stored is wrong.\n if (attn_position[i] not in self.attn_weights[attn_type[i]]\n or (queries_shape[1] > self.attn_weights[attn_type[i]][attn_position[i]].shape[0]\n or values_shape[1] > self.attn_weights[attn_type[i]][attn_position[i]].shape[1])) \\\n or decoder_position != -1 \\\n or attn_position[i] in ['last', 'middle']:\n\n indices_v = torch.arange(values_shape[1]).view(1, -1).to(dtype=torch.float32)\n\n if attn_position[i] != 'last':\n indices_q = torch.arange(queries_shape[1]).view(-1, 1).to(dtype=torch.float32)\n\n if decoder_position > -1:\n indices_q[:] = decoder_position\n\n indices_q = indices_q * self.word_count_ratio\n\n if attn_position[i] == 'left':\n indices_q = indices_q - attn_displacement[i]\n elif attn_position[i] == 'right':\n indices_q = indices_q + attn_displacement[i]\n elif attn_position[i] == 'first':\n indices_q[:] = 0\n elif attn_position[i] == 'middle':\n indices_q[:] = (indices_v.size()[1] + 1) / 2 - 1\n\n distance_diff = indices_v - indices_q\n\n distance_diff = distance_diff.expand(batch_size,\n distance_diff.shape[0],\n distance_diff.shape[1])\n\n # If the attention is looking at the last indices, need to take masks into consideration\n else:\n indices_q = last_indices\n distance_diff = (indices_v - indices_q).unsqueeze(1)\n distance_diff = distance_diff.expand(batch_size, queries_shape[1],\n values_shape[1]).contiguous()\n\n if attn_type[i] == 'normal':\n std = attn_param[i]\n\n logits = (1 / (std * math.sqrt(2 * math.pi)) * torch.exp(- 1 / 2 * (distance_diff / std) ** 2))\n else:\n distance_diff = torch.abs(distance_diff)\n distance_diff[distance_diff <= attn_param[i]] = 0\n distance_diff[distance_diff > attn_param[i]] = 1\n logits = 1 - distance_diff\n logits = logits / torch.sum(logits, dim=-1, keepdim=True)\n # logits = F.softmax(logits, dim=-1)\n self.attn_weights[attn_type[i]][attn_position[i]] = logits[0]\n else:\n logits = self.attn_weights[attn_type[i]][attn_position[i]][:queries_shape[1], :values_shape[1]]\n logits = logits.expand(int(values_shape[0] / self.num_heads), logits.shape[0], logits.shape[1])\n logits = logits.type_as(values)\n logits_list.append(logits)\n attn_weights = torch.stack(logits_list, dim=1)\n attn_weights = attn_weights.view(values_shape[0],\n attn_weights.shape[2],\n attn_weights.shape[3])\n if mask is not None:\n new_mask = mask.clone()\n new_mask[new_mask == 0] = 1\n new_mask[new_mask == float('-inf')] = 0\n attn_weights = attn_weights.clone() * new_mask\n if key_mask is not None:\n attn_weights_shape = attn_weights.shape\n batch_size = attn_weights_shape[0] // self.num_heads\n attn_weights = attn_weights.view(batch_size, self.num_heads, attn_weights_shape[1], attn_weights_shape[2])\n attn_weights.masked_fill_(key_mask[:, None, None], float(0))\n attn_weights = attn_weights.view(attn_weights_shape)\n attended = torch.bmm(attn_weights,\n values)\n\n # torch.set_printoptions(profile='full')\n # print(\"values\", values)\n # print(\"values shape\", values.shape)\n # torch.set_printoptions(profile=\"full\")\n # print(\"attn_weights\", attn_weights)\n # print(\"attn_weights shape\", attn_weights.shape)\n # print(\"attended\", attended)\n # print(\"attended shape\", attended.shape)\n\n return attended.view(\n batch_size,\n self.num_heads,\n -1,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size,\n -1,\n self.num_heads * self.projection_dim\n ), attn_weights\n\n def forward(self, values, keys, queries, # pylint:disable=arguments-differ\n key_mask=None, attention_mask=None, num_queries=0, layer_i=0, decoder_position=-1, input_lens=None,\n original_targets=None, word_embedding=None):\n ''' Forward pass of the attention '''\n batch_size = values.shape[0]\n\n if 'learned' in self.attn_type or 'learned' == self.attn_type:\n if self.attn_linear_transform == 1:\n if same_tensor(values, keys, queries):\n values, keys, queries = self.project(values, chunks=3)\n elif same_tensor(values, keys):\n values, keys = self.project(values, chunks=2)\n queries, = self.project(queries, 2)\n else:\n values, = self.project(values, 0)\n keys, = self.project(keys, 1)\n queries, = self.project(queries, 2)\n elif self.attn_linear_transform == 2:\n if same_tensor(keys, queries):\n keys, queries = self.project(queries, chunks=2)\n else:\n keys, = self.project(keys, 0)\n queries, = self.project(queries, 1)\n values = values.view(batch_size,\n -1,\n self.num_heads,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size * self.num_heads,\n -1,\n self.projection_dim\n )\n else:\n inputs = []\n for inp in [values, keys, queries]:\n inputs.append(inp.view(batch_size,\n -1,\n self.num_heads,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size * self.num_heads,\n -1,\n self.projection_dim\n ))\n values, keys, queries = inputs\n else:\n if self.attn_linear_transform:\n values = F.linear(values, self.input_weights)\n values = values.view(\n batch_size,\n -1,\n self.num_heads,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size * self.num_heads,\n -1,\n self.projection_dim\n )\n\n queries = queries.view(\n batch_size,\n -1,\n self.num_heads,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size * self.num_heads,\n -1,\n self.projection_dim\n )\n # pylint:enable=unbalanced-tuple-unpacking\n\n if num_queries:\n queries = queries[:, -num_queries:]\n\n attended, attn_weights = self.attention(values, keys, queries, key_mask, attention_mask, layer_i, decoder_position)\n\n queries = queries.view(\n batch_size,\n self.num_heads,\n -1,\n self.projection_dim\n ).transpose(2, 1).contiguous().view(\n batch_size,\n -1,\n self.num_heads * self.projection_dim\n )\n\n if self.attn_score:\n projected_queries = F.linear(queries, self.attn_score_project_in_weights).view(-1,\n 1,\n self.projection_dim)\n attended_shape = attended.shape\n attended = attended.view(-1,\n self.num_heads,\n self.projection_dim)\n scores = torch.bmm(projected_queries, attended.transpose(1, 2)).softmax(dim=-1)\n attended = F.linear(torch.bmm(scores, attended).squeeze(1),\n self.attn_score_project_out_weights).view(attended_shape)\n\n if 'learned' not in self.attn_type and 'learned' != self.attn_type and self.attn_concat_weights is not None:\n if self.attn_concat == 1:\n attended = F.linear(torch.cat((attended, queries), dim=-1), self.attn_concat_weights)\n elif self.attn_concat == 2:\n attended = F.linear(torch.cat((attended, word_embedding), dim=-1), self.attn_concat_weights)\n else:\n attended = F.linear(torch.cat((attended, queries, word_embedding), dim=-1), self.attn_concat_weights)\n\n # print(\"new attended\", attended.shape)\n\n return self.output_projection(attended), attn_weights\n","sub_path":"models/probe_new_attention.py","file_name":"probe_new_attention.py","file_ext":"py","file_size_in_byte":25831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"613400215","text":"# importing required libraries\n# 'pyttsx3' is a text-to-speech conversion library\nimport pyttsx3\n# 'os' is python in-build library\nimport os\n\nvoice = pyttsx3.init()\nVoices = voice.getProperty('voices')\n# Setting up the voice speed rate\nvoice.setProperty('rate', 170) \n# Setting up the volume of the voice\nvoice.setProperty('volume', 0.8)\n# For having Female voice\n#voice.setProperty('voice', Voices[1].id)\n\n# Introduction\nx = \"Welcome to the menu driven program!\\nI am here to direct you to the application you want to open.\"\nvoice.say(x)\nprint(x)\nvoice.runAndWait()\n \nwhile True:\n # Taking input from the user\n p = input(\"\\nPlease enter the application you want to open: \")\n \n # For 'Chrome' application\n if ('run' in p.lower() or 'open' in p.lower() or 'execute' in p.lower()) and (\"chrome\" in p.lower()):\n pyttsx3.speak(\"OK. Opening Chrome.\")\n os.system(\"Chrome\")\n \n # For 'Paint' application\n elif ('run' in p.lower() or 'open' in p.lower() or 'execute' in p.lower()) and (\"paint\" in p.lower()):\n pyttsx3.speak(\"OK. Opening Paint.\")\n os.system(\"mspaint\")\n \n # For 'Windows media player' application\n elif ('run' in p.lower() or 'open' in p.lower() or 'execute' in p.lower()) and (\"windows media player\" in p.lower() \n or \"wmplayer\" in p.lower() or \"media player\" in p.lower()):\n pyttsx3.speak(\"OK. Opening windows media player.\")\n os.system(\"Wmplayer\")\n \n # For 'Notepad' application\n elif ('run' in p.lower() or 'open' in p.lower() or 'execute' in p.lower()) and (\"notepad\" in p.lower()):\n pyttsx3.speak(\"OK. Opening Notepad.\")\n os.system(\"notepad\")\n \n # For 'Calculator' application\n elif ('run' in p.lower() or 'open' in p.lower() or 'execute' in p.lower()) and (\"calculator\" in p.lower()):\n pyttsx3.speak(\"OK. Opening Calculator.\")\n os.system(\"calc\")\n \n # Exit/Quitting of application codition\n elif ('exit' in p.lower() or 'close' in p.lower() or 'quit' in p.lower()):\n pyttsx3.speak(\"OK! Thank you for using the application! See you soon!\")\n break\n \n else:\n pyttsx3.speak(\"Please enter the valid input!\")\n print(\"Please enter the valid input!\")\n","sub_path":"menu_driven_program.py","file_name":"menu_driven_program.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"61404909","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef split (dataset, training_ratio):\r\n n = len (dataset) # Size of dataset\r\n r = int (training_ratio * n) # No. of training examples\r\n np.random.shuffle (dataset) # Shuffling the dataset\r\n return dataset[:r,:], dataset[r:,:]\r\n\r\ndef get_break (dataset):\r\n dataset = dataset[dataset[:, -1].argsort()]\r\n i = 0\r\n while dataset[i,-1] == 1:\r\n i += 1\r\n return dataset, i\r\n\r\n# Works only on linearly separable dataset.\r\ndef train (train_data):\r\n n = len (train_data) # Size of training dataset\r\n # Get features and classes\r\n features = train_data[:, :-1]\r\n classes = np.array(train_data[:,-1])\r\n n_features = features.shape[1] # No. of features\r\n w = np.zeros(shape=(1, n_features + 1))\r\n learning_rate = 0.01\r\n # Make way for bias\r\n features = np.insert(features, 0, 1, axis = 1)\r\n #Assuming that the data is linearly separable, we will iterate till we get a 100% accuracy on training dataset\r\n while True:\r\n n_mispredicted = 0 # No. of mispredictions\r\n for i in range (n):\r\n predicted_class = 2 if np.dot(w, features[i].transpose()) > 0 else 1\r\n difference = classes[i] - predicted_class\r\n if difference != 0:\r\n w += learning_rate * difference * features[i]\r\n n_mispredicted += 1\r\n if n_mispredicted == 0:\r\n break\r\n return w\r\n \r\ndef test (test_data, w):\r\n n = len (test_data) # Size of testing dataset\r\n # Get features and classes\r\n features = test_data[:, :-1]\r\n classes = np.array(test_data[:,-1])\r\n # Make way for bias\r\n features = np.insert(features, 0, 1, axis = 1)\r\n predicted_classes = np.ravel(np.dot(w, features.transpose()))\r\n predicted_classes = np.where(predicted_classes > 0, 2, 1)\r\n difference = classes - predicted_classes\r\n n_correctly_predicted = (difference == 0).sum() # No. of correct predictions\r\n return n_correctly_predicted\r\n \r\ndef plotPLA (w, train_data, test_data):\r\n # Clear previous figure, if any\r\n plt.clf()\r\n x = np.arange (6)\r\n y = (- w.item(0,1) * x - w.item(0,0)) / w.item(0,2)\r\n # Plot the decision boundary\r\n plt.plot(x, y, color = \"green\", linewidth = 1)\r\n # Sort the training dataset and get the index where Class 2 starts\r\n train_data, b = get_break (train_data)\r\n plt.scatter (np.array(train_data[:b ,0]), np.array(train_data[:b ,1]), marker = \",\", color = \"blue\", s = 1, label = 'Training Data (Class 1)')\r\n plt.scatter (np.array(train_data[b: ,0]), np.array(train_data[b: ,1]), marker = \",\", color = \"red\", s = 1, label = 'Training Data (Class 2)')\r\n # Sort the testing dataset and get the index where Class 2 starts\r\n test_data, b = get_break (test_data)\r\n plt.scatter (np.array(test_data[:b ,0]), np.array(test_data[:b ,1]), marker = \"o\", color = \"blue\", s = 2, label = 'Testing Data (Class 1)')\r\n plt.scatter (np.array(test_data[b: ,0]), np.array(test_data[b: ,1]), marker = \"o\", color = \"red\", s = 2, label = 'Testing Data (Class 2)')\r\n plt.xlabel ('A')\r\n plt.ylabel ('B')\r\n plt.legend (loc = 'lower right')\r\n plt.title (\"Perceptron Learning Algorithm\")\r\n plt.savefig ('output_plot.png', dpi=2000)\r\n \r\ndef run_algorithm (highest_accuracy):\r\n # Read the csv file\r\n dataset = pd.read_csv('PLA Data.csv').to_numpy()\r\n # Split the data set\r\n train_data, test_data = split (dataset, 0.8)\r\n # Train the data\r\n w = train (train_data)\r\n # Test the data\r\n n_correctly_predicted = test (test_data, w)\r\n accuracy = n_correctly_predicted / len(test_data) * 100\r\n if accuracy > highest_accuracy:\r\n # Plot the data and update hhighest_accuracy\r\n plotPLA (w, train_data, test_data)\r\n highest_accuracy = accuracy\r\n # Display current runtime results\r\n print (\"No. of correct predictions (Total no. of predictions) = \" + str(n_correctly_predicted) + \" | (\" + str(len(test_data)) + \")\") \r\n print (\"Accuracy = \" + str(accuracy) + \"%\")\r\n return highest_accuracy\r\n\r\ndef main():\r\n times = 5\r\n highest_accuracy = 0\r\n print (\"Running the algorithm \" + str(times) + \" times...\")\r\n print (\"-------------------------------------------------------------------\")\r\n for t in range (times):\r\n print (\"For runtime no. \" + str(t + 1) + \" ------------------------------------------------\")\r\n highest_accuracy = run_algorithm (highest_accuracy)\r\n print (\"-------------------------------------------------------------------\")\r\n \r\nmain()","sub_path":"PLA/pla.py","file_name":"pla.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"324043020","text":"#!/usr/bin/env python\n#pylint: skip-file\n\"\"\"\nCopyright 2016 Cisco Systems\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nclass VlanDTO(object):\n\n\n\n def __init__(self):\n \"\"\"\n Attributes:\n swaggerTypes (dict): The key is attribute name and the value is attribute type.\n attributeMap (dict): The key is attribute name and the value is json key in definition.\n \"\"\"\n self.swaggerTypes = {\n\n 'mask': 'int',\n\n\n 'prefix': 'str',\n\n\n 'vlanType': 'str',\n\n\n 'vlanNumber': 'int',\n\n\n 'interfaceName': 'str',\n\n\n 'numberOfIPs': 'int',\n\n\n 'ipAddress': 'str',\n\n\n 'networkAddress': 'str'\n\n }\n\n self.attributeMap = {\n\n 'mask': 'mask',\n\n 'prefix': 'prefix',\n\n 'vlanType': 'vlanType',\n\n 'vlanNumber': 'vlanNumber',\n\n 'interfaceName': 'interfaceName',\n\n 'numberOfIPs': 'numberOfIPs',\n\n 'ipAddress': 'ipAddress',\n\n 'networkAddress': 'networkAddress'\n\n }\n\n\n\n self.mask = None # int\n\n\n self.prefix = None # str\n\n\n self.vlanType = None # str\n\n\n self.vlanNumber = None # int\n\n\n self.interfaceName = None # str\n\n\n self.numberOfIPs = None # int\n\n\n self.ipAddress = None # str\n\n\n self.networkAddress = None # str\n\n","sub_path":"apis/nb/clients/inventory_manager_client/models/VlanDTO.py","file_name":"VlanDTO.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"469795394","text":"\nimport subprocess\n\nglobal ctr\nctr=0\ndef gen(xsize, zsize, cascade,opts):\n global ctr\n if cascade == 0:\n xs = [xsize/2]\n zs = [zsize/2]\n elif cascade == 1:\n xs = [(xsize+20)/2]\n zs = [(zsize+20)/2]\n elif cascade == 2:\n xs = [xsize/2]\n zs = [zsize/3, (2*zsize)/3]\n dirn = \"a\" +str(ctr)\n ctr+=1\n subprocess.call(\"mkdir \" + dirn, shell = True)\n fn = dirn + \"/a.idv\"\n with open(fn,\"w\") as fp:\n con = \"\"\"\ncompiler_flags: ['-O3', '-Kfast,parallel', '-Kocl', '-Klib', '-Koptmsg=2', '-Karray_private', '-Kinstance=8', '-Kdynamic_iteration', '-Kloop_fission', '-Kloop_part_parallel', '-Kloop_part_simd', '-Keval', '-Kreduction','-Kopenmp', '-Ksimd=2']\ncpp_sourcecode_url: /home/nushio/hub/formura/examples/3d-mhd-main-prof.cpp\nfmr_sourcecode_url: /home/nushio/hub/formura/examples/3d-mhd.fmr\nformura_version: ed1b6070f07bd74c0c676e3743887d48ecd7c5f3\nnumerical_config:\n initial_walls:\n x: {xs}\n y: {xs}\n z: {zs}\n intra_node_shape: [{x},{x},{z}]\n monitor_interval: 20\n mpi_grid_shape: [2,2,2]\n temporal_blocking_interval: 1\n option_strings: {opts}\n\"\"\".format(xs=xs,zs=zs, x=xsize,z=zsize,opts=opts)\n fp.write(con)\n\noptss = []\noptss.append(['stick-all-comp', 'omp','omp-collapse'])\noptss.append(['stick-single-comp', 'omp','omp-collapse'])\noptss.append(['stick-all-comp'])\noptss.append(['stick-single-comp'])\n\nfor xsize in [64,128,256]:\n for zsize in [64,128,256]:\n if xsize > zsize:\n continue\n for cascade in [0,1,2]:\n for opts in optss:\n gen(xsize,zsize,cascade,opts)\n","sub_path":"individuals/mass-benchmark/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"143699645","text":"from collections import OrderedDict\nfrom importlib import import_module\nimport os\n\nclass CmdDict(OrderedDict):\n\t\n\tdef __init__(self, *args):\n\t\t\n\t\tself.classes = {} # {name: class, ...}\n\t\t\n\t\tOrderedDict.__init__(self)\n\t\t\n\t\tself._load_classes()\n\t\n\tdef _load_classes(self):\n\t\t# helper function to load classes from a folder\n\t\t# return {name: class, ...}\n\n\t\tself.classes = {}\n\t\tfolder = self.__class__.__name__.lower()\n\t\tpath_classes = os.path.join(os.path.dirname(__file__), folder)\n\t\tfor file in os.listdir(path_classes):\n\t\t\tif file.startswith(\"_\") or (not file.endswith(\".py\")) or os.path.isdir(os.path.join(path_classes, file)):\n\t\t\t\tcontinue\n\t\t\tfile = file.split(\".\")[0]\n\t\t\tself.classes[file] = getattr(import_module(\"deposit.commander.%s.%s\" % (folder, file)), file)\n\n","sub_path":"deposit/commander/CmdDict.py","file_name":"CmdDict.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"62440969","text":"import requests\nfrom urllib.parse import quote_plus\nfrom django.shortcuts import render\nfrom bs4 import BeautifulSoup\nfrom . import models\n\n\nBASE_CRAIGSLIST_URL = 'https://www.daraz.com.bd/catalog/?q={}'\n\n\n# Create your views here.\ndef home(request):\n return render(request, 'base.html')\n\ndef new_search(request):\n search = request.POST.get('search')\n models.Search.objects.create(search=search)\n final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))\n print(final_url)\n response = requests.get(final_url)\n data = response.text\n soup = BeautifulSoup(data, features='html.parser')\n\n post_titles = soup.find_all('div', {'class': 'c5TXIP'})\n print(post_titles)\n #print(data)\n stuff_for_frontend = {\n 'search': search,\n }\n return render(request, 'myapp/new_search.html', stuff_for_frontend)\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"254788283","text":"class Solution:\n def numberOfLines(self, widths: List[int], S: str) -> List[int]:\n a = 'abcdefghijklmnopqrstuvwxyz'\n d = {}\n for i in range(len(a)):\n d[a[i]] = widths[i]\n currentWidth = 0\n out_count = 0\n for s in S:\n nextWidth = d[s]\n if currentWidth + nextWidth>100:\n currentWidth = nextWidth\n out_count += 1\n else:\n currentWidth += nextWidth\n return [out_count+1,currentWidth]\n ","sub_path":"LeetCode/python3/806_numberOfLines.py","file_name":"806_numberOfLines.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"264120580","text":"import csv\nimport cv2\nimport numpy as np\n\n# Load training datasets\nlines=[]\n# I record small datasets in simulator and combine them in a big one, see detail in report\ndatasets=['general_center_driving','curve_driving','recovery_driving']\nfor dataset in datasets:\n print('Start loading training dataset: %s' % dataset)\n with open('../driving_data/'+dataset+'/driving_log.csv','r') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n line[0]= '../driving_data/'+dataset+'/IMG/'+line[0].split('\\\\')[-1]\n line[1]= '../driving_data/'+dataset+'/IMG/'+line[1].split('\\\\')[-1]\n line[2]= '../driving_data/'+dataset+'/IMG/'+line[2].split('\\\\')[-1]\n lines.append(line)\n\nimages=[]\nangles=[]\nfor line in lines:\n #print(line)\n angle_correction = [0.0, 0.3, -0.3] #Augment the data by using left/right camera and flip the image.\n for i in range(3):\n img=cv2.imread(line[i])\n angle=float(line[3]) + angle_correction[i]\n images.append(img)\n angles.append(angle)\n img_flipped = np.fliplr(img)\n angle_flipped = -angle\n images.append(img_flipped)\n angles.append(angle_flipped) \n\nimages = np.array(images)\nangles = np.array(angles)\n\nX_train = images\ny_train = angles\n\n# Load validation datasets\nlines=[]\ndatasets=['general_center_driving']\nfor dataset in datasets:\n print('Start loading validation dataset: %s' % dataset)\n with open('../driving_data/'+dataset+'/driving_log.csv','r') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n line[0]= '../driving_data/'+dataset+'/IMG/'+line[0].split('\\\\')[-1]\n line[1]= '../driving_data/'+dataset+'/IMG/'+line[1].split('\\\\')[-1]\n line[2]= '../driving_data/'+dataset+'/IMG/'+line[2].split('\\\\')[-1]\n lines.append(line)\n\nimages=[]\nangles=[]\nfor line in lines:\n #print(line)\n angle_correction = [0.0, 0.3, -0.3] #Augment the data by using left/right camera and flip the image.\n for i in range(1):\n img=cv2.imread(line[i])\n angle=float(line[3]) + angle_correction[i]\n images.append(img)\n angles.append(angle)\n img_flipped = np.fliplr(img)\n angle_flipped = -angle\n images.append(img_flipped)\n angles.append(angle_flipped) \n\nimages = np.array(images)\nangles = np.array(angles)\n\nX_val = images\ny_val = angles\n\n# Build the model\nfrom keras.models import Sequential\nfrom keras.layers import *\nfrom keras.callbacks import ModelCheckpoint\n\nmodel=Sequential()\nmodel.add(Lambda(lambda x:(x/255) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,20), (0,0))))\nmodel.add(Conv2D(24,(5,5),strides=(2, 2),activation='relu'))\nmodel.add(Conv2D(36,(5,5),strides=(2, 2),activation='relu'))\nmodel.add(Conv2D(48,(5,5),strides=(2, 2),activation='relu'))\nmodel.add(Conv2D(64,(3,3),activation='relu'))\nmodel.add(Conv2D(64,(3,3),activation='relu'))\nmodel.add(Flatten())\n#model.add(Dropout(0.1))\nmodel.add(Dense(100,activation='relu'))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n\n# Define optimzer and train the model\nmodel.compile(loss='mse', optimizer='adam')\n\ncheckpoint = ModelCheckpoint('./model1/{epoch:02d}_{val_loss:.4f}.h5', verbose=True, monitor='val_loss',save_best_only=False, mode='auto') \nmodel.fit(X_train, y_train, validation_data=(X_val, y_val), shuffle=True, epochs=5, callbacks=[checkpoint])\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"354568468","text":"from flask import Flask\nfrom flask import render_template #渲染\n\napp=Flask(__name__)\n@app.route('/') #主页地址,装饰器\ndef news():\n the_news = {\n 'Dayou':'全栈工程师',\n 'Zhilong':'勇敢的打工人',\n 'Aileen':'网页架构师',\n 'Wenteng':'项目经理',\n 'John':'团队甲方',\n }\n context={\n 'title':'小组007',\n 'the_news': the_news,\n }\n return render_template('index.html',context=context)#读取index.html并交给浏览器\n\nif __name__=='__main__':\n app.run(host='0.0.0.0',debug=True,port=80)#127.0.0.1 回路,自己返回自己\n","sub_path":"Zhilong/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"92210774","text":"#runs Google Vision on a batch of images and prints their output to individual text files. \n#output .txts go in folder in the parent directory\n#written by James Gisele, james.may.gisele@gmail.com\n\nimport base64\nimport os\nimport io\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nfrom googleapiclient.discovery import build\nAPIKEY = 'AIzaSyCcgY4MwelgM_TGBsyELyHxkDQ30GNnWAM'\n\ndef VisionTranscription(original_directory, final_directory):\n\tos.chdir(original_directory)\n\tif not os.path.isdir(final_directory):\n\t\tos.makedirs(final_directory)\n\tfor i in os.listdir(original_directory):\t#iterate through pngs in original directory\n\t\tfilename = os.path.splitext(i)[0]\n\t\tif os.path.splitext(i)[1] != '.png':\n\t\t\tpass\n\t\tif filename[-5:] == \"00000\": #skipping the non text\n\t\t\tpass\n\t\telse:\n\t\t\tprint(i)\n\t\t\ttextname=os.path.splitext(i)[0]\n\t\t\timage = open(i, 'rb')\n\t\t\timage_content = base64.b64encode(image.read())\n\t\t\tvservice = build('vision', 'v1', developerKey=APIKEY)\n\t\t\tlanguage = 'eng'\n\t\t\trequest = vservice.images().annotate(body={\n\t\t\t\t\t'requests': [{\n\t\t\t\t\t\t\t\t'image': {\n\t\t\t\t\t\t\t\t\t\t\t'content': image_content.decode('UTF-8')\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'imageContext': {\n\t\t\t\t\t\t\t\t\t\t\t'languageHints': [language]},\n\t\t\t\t\t\t\t\t\t\t\t'features': [{\n\t\t\t\t\t\t\t\t'type': 'TEXT_DETECTION'\n\t\t\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t\t\t\t\t}]\n\t\t\t\t\t\t\t})\n\t\t\tresponses = request.execute(num_retries=3)\n\t\t\toutput_text = responses['responses'][0]['textAnnotations'][0]['description']\n\t\t\tos.chdir(final_directory) #writing to file\n\t\t\tname_of_file = \"%s.txt\" % textname \n\t\t\toutput_file = open(name_of_file,'w')\n\t\t\ttry:\n\t\t\t\toutput_file.write(output_text)\n\t\t\t\toutput_file.close()\n\t\t\texcept UnicodeEncodeError: \n\t\t\t\toutput_file.write(\"ERROR\")\n\t\t\t\toutput_file.close()\n\t\t\tos.chdir(original_directory)\n\n\nog_directory = input(\"original directory?:\")\nfolder_name = os.path.basename(og_directory) + \"_txts\"\nfin_directory = os.path.dirname(og_directory) + \"\\%s\" % folder_name\nVisionTranscription(og_directory, fin_directory)\n\n","sub_path":"project1/assets/scripts/visiontranscription.py","file_name":"visiontranscription.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"457377362","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nEd Mountjoy (June 2018)\n\nFormats the output of bedtools into final format for loading\n'''\n\nimport argparse\nimport pandas as pd\nimport sys\n\n\ndef main():\n\n # Parse args\n args = parse_args()\n\n # Load data\n data = pd.read_csv(args.inf, sep='\\t', header=0)\n\n # Format data types\n data.chromosome = data.chromosome.astype(str)\n data.position = data.position.astype(int)\n data.Allele1 = data.Allele1.str.upper()\n data.Allele2 = data.Allele2.str.upper()\n\n # Convert to p\n data['pval'] = data['log(P)'].rpow(10)\n\n # Select required columns\n data = data.rename(columns={\n 'chromosome': 'chrom',\n 'position': 'pos',\n 'Allele1': 'effect_allele',\n 'Allele2': 'other_allele',\n 'Effect': 'beta',\n 'StdErr': 'se',\n 'pval': 'pval'})\n data = data.loc[:, ['chrom','pos', 'other_allele', 'effect_allele',\n 'beta', 'se', 'pval']]\n data = data.sort_values(['chrom', 'pos'])\n\n data.to_csv(args.outf, sep='\\t', index=None, compression='gzip')\n\n return 0\n\ndef parse_args():\n \"\"\" Load command line args \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--inf', metavar=\"\", help=('Input file'), type=str, required=True)\n parser.add_argument('--outf', metavar=\"\", help=(\"Output file\"), type=str, required=True)\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"scripts/sun2018_format_full_data.py","file_name":"sun2018_format_full_data.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"53853396","text":"def bairro_mais_custoso(dicio):\n listav=[]\n listab=[]\n for l,m in dicio.items():\n dicio[l]=sum(m[6:12])\n listav.append(sum(m[6:12]))\n listab.append(l)\n a=max(listav)\n z=listav.index(a)\n \n \n return listab[z]","sub_path":"backup/user_116/ch167_2020_06_10_17_37_47_565135.py","file_name":"ch167_2020_06_10_17_37_47_565135.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"431687092","text":"from sklearn.metrics.classification import accuracy_score\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.svm import LinearSVC\r\n\r\nimport numpy as np\r\nimport time\r\nimport keras\r\n\r\nimport sys\r\nsys.path.insert(0, '../utils')\r\nsys.path.insert(0, '../hyperNets')\r\n\r\nimport custom_functions as func\r\nfrom latent_hyper_net import LatentHyperNet\r\n\r\n\r\nif __name__ == '__main__':\r\n np.random.seed(12227)\r\n\r\n debug = True\r\n layers = [45, 52, 55]\r\n n_comp = 8\r\n dm_method = 'pls'\r\n\r\n X_train, y_train, X_test, y_test = func.cifar_vgg_data(debug)\r\n\r\n\r\n cnn_model = func.load_model(architecture_file='../architectures/cifar10VGG',\r\n weights_file='../weights/cifar10VGG++')\r\n\r\n id_layer = ''\r\n for i in range(0, len(cnn_model.layers)):\r\n id_layer += '['+str(i)+' '+ str(type(cnn_model.get_layer(index=i))).split('.')[-1].replace('>', '') + '] '\r\n print(id_layer)\r\n print('Layers{}'.format(layers)) if dm_method=='' else print('Layers{} Number of Components[{}] Method[{}]'.format(layers, n_comp, dm_method))\r\n\r\n if dm_method != '':\r\n hyper_net = LatentHyperNet(n_comp=n_comp, model=cnn_model, layers=layers, dm_method=dm_method)\r\n else:\r\n hyper_net = LatentHyperNet(model=cnn_model, layers=layers)\r\n\r\n if hyper_net.dm_method is not None:\r\n hyper_net.fit(X_train, y_train)\r\n X_train = hyper_net.transform(X_train)\r\n X_test = hyper_net.transform(X_test)\r\n else:\r\n X_train = hyper_net.get_features(X_train)\r\n X_test = hyper_net.get_features(X_test)\r\n\r\n model = LinearSVC(random_state=0)\r\n model = OneVsRestClassifier(model).fit(X_train, y_train)\r\n model.fit(X_train, y_train)\r\n\r\n tmp = model.predict(X_test)\r\n tmp = np.argmax(tmp, axis=1)\r\n\r\n acc = accuracy_score(y_test, tmp)\r\n print('Accuracy of [{:.4f}]'.format(acc))","sub_path":"CIFAR-10/main_LatentHyperNet.py","file_name":"main_LatentHyperNet.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"649995580","text":"import collections, re\nimport sys\nsys.path.append(\"..\") # => '../' pupunta sa parent directory para maacess yung nltk at openpyxl\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.wsd import lesk\nimport nltk\nimport openpyxl\n\nworkbook = openpyxl.load_workbook('sample_questions.xlsx')\nworksheets = workbook.sheetnames\ncells = workbook[worksheets[0]] # select first sheet\nquestions = []\nfor cells in cells.iter_rows(): #loop each row\n\tquestions.append(cells[1].value.strip())\n\t# print(cells[1].value.strip())\n\nlemmatizer = WordNetLemmatizer()\noutputs = list()\n\ndef wsd(src):\n q = nltk.pos_tag(set(word_tokenize(src)) - set(nltk.corpus.stopwords.words('english')))\n output = list()\n for x in q:\n \tif x[1].startswith('N') or x[1].startswith('P'):\n \t\toutput.append(lemmatizer.lemmatize(x[0], wn.NOUN))\n \telif x[1].startswith('J'):\n \t\toutput.append(lemmatizer.lemmatize(x[0], wn.ADJ))\n \telif x[1].startswith('V'):\n \t\toutput.append(lemmatizer.lemmatize(x[0], wn.VERB))\n \telif x[1].startswith('R'):\n \t\toutput.append(lemmatizer.lemmatize(x[0], wn.ADV))\n \telse:\n \t\tcontinue\n q = list()\n print(src)\n for index, word in enumerate(output):\n try:\n q.append(lesk(src, word))\n print(f\"{output[index]} => {q[index].definition()}\")\n except AttributeError:\n print(index)\n outputs.append(q)\n\nfor question in questions:\n\twsd(question)\nprint(outputs)","sub_path":"similarity-assessment/wsd.py","file_name":"wsd.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"477915788","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDETR Transformer class.\n\nCopy-paste from torch.nn.Transformer with modifications:\n * positional encodings are passed in MHattention\n * extra LN at the end of encoder is removed\n * decoder returns a stack of activations from all decoding layers\n\"\"\"\nimport copy\nfrom typing import Optional, List\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nimport os\nimport pdb\n\n\nclass CrossAttentionLayer(nn.Module):\n def __init__(self, \n in_channels, \n factor=2):\n super(CrossAttentionLayer, self).__init__()\n self.in_channels = in_channels\n self.latent_channels = in_channels // factor\n\n self.f_k, self.f_q, self.f_d, self.f_u = self._make_self_attention(in_channels, in_channels//factor)\n self.g_k, self.g_q, self.g_d, self.g_u = self._make_self_attention(in_channels, in_channels//factor)\n self.h_k, self.h_q, self.h_d, self.h_u = self._make_self_attention(in_channels, in_channels//factor)\n self.t_k, self.t_q, self.t_d, self.t_u = self._make_self_attention(in_channels, in_channels//factor)\n\n def _make_self_attention(self, in_channels, latent_channels):\n key_transform = nn.Sequential(\n nn.Conv2d(in_channels, latent_channels,\n kernel_size=1, stride=1, padding=0),\n nn.SyncBatchNorm(latent_channels),\n nn.ReLU(inplace=True)\n )\n query_transform = nn.Sequential(\n nn.Conv2d(in_channels, latent_channels,\n kernel_size=1, stride=1, padding=0),\n nn.SyncBatchNorm(latent_channels),\n nn.ReLU(inplace=True)\n )\n down_transform = nn.Conv2d(in_channels, latent_channels,\n kernel_size=1, stride=1, padding=0)\n up_transform = nn.Conv2d(latent_channels, in_channels,\n kernel_size=1, stride=1, padding=0)\n\n return key_transform, query_transform, down_transform, up_transform\n\n def forward(self, x_f, x_g, x_h, x_t):\n\n batch_size, channel, h, w = x_f.size()\n\n v_f = self.f_d(x_f).view(batch_size, h, w, self.latent_channels)\n v_g = self.g_d(x_g).view(batch_size, h, w, self.latent_channels)\n v_h = self.h_d(x_h).view(batch_size, h, w, self.latent_channels)\n v_t = self.t_d(x_t).view(batch_size, h, w, self.latent_channels)\n\n v = torch.cat((v_f[:,:,:,:,None], v_g[:,:,:,:,None], v_h[:,:,:,:,None], v_t[:,:,:,:,None]), 4)\n v = v.permute(0, 1, 2, 4, 3)\n\n q_f = self.f_q(x_f).view(batch_size, h, w, self.latent_channels)\n q_g = self.g_q(x_g).view(batch_size, h, w, self.latent_channels)\n q_h = self.h_q(x_h).view(batch_size, h, w, self.latent_channels)\n q_t = self.t_q(x_t).view(batch_size, h, w, self.latent_channels)\n q = torch.cat((q_f[:,:,:,:,None], q_g[:,:,:,:,None], q_h[:,:,:,:,None], q_t[:,:,:,:,None]), 4)\n q = q.permute(0, 1, 2, 4, 3)\n\n k_f = self.f_k(x_f).view(batch_size, h, w, self.latent_channels)\n k_g = self.g_k(x_g).view(batch_size, h, w, self.latent_channels)\n k_h = self.h_k(x_h).view(batch_size, h, w, self.latent_channels)\n k_t = self.t_k(x_t).view(batch_size, h, w, self.latent_channels)\n k = torch.cat((k_f[:,:,:,:,None], k_g[:,:,:,:,None], k_h[:,:,:,:,None], k_t[:,:,:,:,None]), 4)\n\n sim_map = torch.matmul(q, k)\n sim_map = (self.latent_channels**-.5) * sim_map\n sim_map = F.softmax(sim_map, dim=-1)\n\n context = torch.matmul(sim_map, v)\n context = context.permute(0, 1, 2, 4, 3).contiguous()\n\n context_f = context[:,:,:,:,0]\n context_g = context[:,:,:,:,1]\n context_h = context[:,:,:,:,2]\n context_t = context[:,:,:,:,3]\n\n out_f = x_f + self.f_u(context_f.permute(0, 3, 1, 2))\n out_g = x_g + self.g_u(context_g.permute(0, 3, 1, 2))\n out_h = x_h + self.h_u(context_h.permute(0, 3, 1, 2))\n out_t = x_t + self.t_u(context_t.permute(0, 3, 1, 2))\n\n return out_f, out_g, out_h, out_t\n\n\nclass CrossFusionLayer(nn.Module):\n def __init__(self, \n num_branches=4,\n num_inchannels=[18, 36, 72, 256]):\n super(CrossFusionLayer, self).__init__()\n self.num_branches = 4\n self.num_inchannels = [18, 36, 72, 256]\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(inplace=True)\n\n def _make_fuse_layers(self):\n fuse_layers = []\n for i in range(self.num_branches):\n fuse_layer = []\n for j in range(self.num_branches):\n if j > i:\n fuse_layer.append(nn.Sequential(\n nn.Conv2d(self.num_inchannels[j],\n self.num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False),\n nn.SyncBatchNorm(self.num_inchannels[i])))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i-j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = self.num_inchannels[i]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(self.num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n nn.SyncBatchNorm(num_outchannels_conv3x3)))\n else:\n num_outchannels_conv3x3 = self.num_inchannels[j]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(self.num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n nn.SyncBatchNorm(num_outchannels_conv3x3),\n nn.ReLU(inplace=True)))\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def forward(self, x):\n x_fuse = []\n\n for i in range(len(self.fuse_layers)):\n y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n elif j > i:\n width_output = x[i].shape[-1]\n height_output = x[i].shape[-2]\n y = y + F.interpolate(\n self.fuse_layers[i][j](x[j]),\n size=[height_output, width_output],\n mode='bilinear',\n align_corners=True\n )\n else:\n y = y + self.fuse_layers[i][j](x[j])\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nclass Transformer(nn.Module):\n\n def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,\n num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False,\n return_intermediate_dec=False):\n super().__init__()\n\n encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,\n dropout, activation, normalize_before)\n encoder_norm = nn.LayerNorm(d_model) if normalize_before else None\n self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n\n decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,\n dropout, activation, normalize_before)\n decoder_norm = nn.LayerNorm(d_model)\n self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,\n return_intermediate=return_intermediate_dec)\n\n self._reset_parameters()\n self.d_model = d_model\n self.nhead = nhead\n\n # self.stride_4x_proj = nn.Sequential(\n # nn.Conv2d(18, 64, kernel_size=1),\n # nn.LayerNorm(64), # nn.GroupNorm(32, 256) or nn.LayerNorm(256) or nn.SyncBatchNorm(256)\n # nn.ReLU(inplace=True)\n # )\n # self.stride_8x_proj = nn.Sequential(\n # nn.Conv2d(36, 64, kernel_size=1),\n # nn.SyncBatchNorm(64),\n # nn.ReLU(inplace=True)\n # )\n # self.stride_16x_proj = nn.Sequential(\n # nn.Conv2d(72, 64, kernel_size=1),\n # nn.SyncBatchNorm(64),\n # nn.ReLU(inplace=True)\n # )\n # self.stride_32x_proj = nn.Sequential(\n # nn.Conv2d(144, 64, kernel_size=1),\n # nn.SyncBatchNorm(64),\n # nn.ReLU(inplace=True)\n # )\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, src, mask, query_embed, pos_embed, src_list, mask_list, pos_embed_list):\n\n # encoder\n\n ''' CrossAttentionLayer usage '''\n _, _, h_4x, w_4x = src_list[0].size()\n # feat1 = self.stride_4x_proj(src_list[0])\n # feat2 = F.interpolate(self.stride_8x_proj(src_list[1]), size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n # feat3 = F.interpolate(self.stride_16x_proj(src_list[2]), size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n\n bs, c, h, w = src.shape\n src = src.flatten(2).permute(2, 0, 1)\n pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)\n mask = mask.flatten(1)\n tgt = torch.zeros_like(query_embed)\n\n memory_4x = self.encoder(src, src_shape=[bs, c, h, w], \\\n feat_list=[src_list[0], src_list[1], src_list[2]], \\\n src_key_padding_mask=mask,\n pos=pos_embed)\n mask_4x = mask_list[0]\n mask_4x = mask_4x.flatten(1)\n pos_embed_4x = pos_embed_list[0].flatten(2).permute(2, 0, 1)\n\n # decoder\n hs = self.decoder(tgt, memory_4x, memory_key_padding_mask=mask_4x,\n pos=pos_embed_4x, query_pos=query_embed)\n return hs.transpose(1, 2), memory_4x.permute(1, 2, 0).view(bs, c, h_4x, w_4x)\n\n\n\nclass TransformerEncoder(nn.Module):\n\n def __init__(self, encoder_layer, num_layers, norm=None):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n self.fuse_output_proj = nn.Sequential(\n nn.Conv2d(382, 256, kernel_size=1),\n nn.SyncBatchNorm(256),\n nn.ReLU(inplace=True)\n )\n\n # self.cross_atten = CrossAttentionLayer(256)\n self.cross_fusion = CrossFusionLayer(num_branches=4, num_inchannels=[18, 36, 72, 256])\n\n def forward(self, src, src_shape, feat_list,\n mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None\n ):\n bs, c, h, w = src_shape\n _, _, h_4x, w_4x = feat_list[0].shape\n\n output = src\n output_4x = feat_list[0]\n output_8x = feat_list[1]\n output_16x = feat_list[2]\n\n for layer in self.layers:\n output = layer(output, src_mask=mask,\n src_key_padding_mask=src_key_padding_mask, pos=pos)\n ''' CrossAttentionLayer usage '''\n # output_32x = F.interpolate(output.permute(1, 2, 0).view(bs, c, h, w), size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n # output_4x, output_8x, output_16x, output_32x = self.cross_atten(output_4x, output_8x, output_16x, output_32x)\n # output = F.interpolate(output_32x, size=(h, w), mode=\"bilinear\", align_corners=True)\n\n ''' CrossFusionLayer usage '''\n output_32x = output.permute(1, 2, 0).view(bs, c, h, w)\n output_4x, output_8x, output_16x, output_32x = self.cross_fusion([output_4x, output_8x, output_16x, output_32x])\n output = output_32x.flatten(2).permute(2, 0, 1)\n\n output_32x = F.interpolate(output_32x, size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n output_16x = F.interpolate(output_16x, size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n output_8x = F.interpolate(output_8x, size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n output_4x = F.interpolate(output_4x, size=(h_4x, w_4x), mode=\"bilinear\", align_corners=True)\n\n output = torch.cat([output_4x, output_8x, output_16x, output_32x], 1)\n output = self.fuse_output_proj(output)\n output = output.flatten(2).permute(2, 0, 1)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\nclass TransformerDecoder(nn.Module):\n\n def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.return_intermediate = return_intermediate\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n output = tgt\n\n intermediate = []\n\n for layer in self.layers:\n output = layer(output, memory, tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask,\n pos=pos, query_pos=query_pos)\n if self.return_intermediate:\n intermediate.append(self.norm(output))\n\n if self.norm is not None:\n output = self.norm(output)\n if self.return_intermediate:\n intermediate.pop()\n intermediate.append(output)\n\n if self.return_intermediate:\n return torch.stack(intermediate)\n\n return output.unsqueeze(0)\n\n\nclass TransformerEncoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(src, pos)\n src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def forward_pre(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n src2 = self.norm1(src)\n q = k = self.with_pos_embed(src2, pos)\n src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src\n\n def forward(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n if self.normalize_before:\n return self.forward_pre(src, src_mask, src_key_padding_mask, pos)\n return self.forward_post(src, src_mask, src_key_padding_mask, pos)\n\n\nclass TransformerDecoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def forward_pre(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n tgt2 = self.norm1(tgt)\n q = k = self.with_pos_embed(tgt2, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt2 = self.norm2(tgt)\n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt2 = self.norm3(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\n tgt = tgt + self.dropout3(tgt2)\n return tgt\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n if self.normalize_before:\n return self.forward_pre(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef build_cross_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n )\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n","sub_path":"hrnet/cross_transformer.py","file_name":"cross_transformer.py","file_ext":"py","file_size_in_byte":22217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"530162754","text":"import csv\n\n#intialing and empty variable string\ndata = []\n#opening up the test data and assigning it to an empty string \nwith open(\"acled-nig-2011.csv\") as file:\n for row in csv.reader(file):\n data.append(row)\n\n#initialing the function, non mutable objects initialized \ndef select(data = None, variable = None):\n #adding function help information\n '''This function takes a dataset.csv and checkings if a specific variable is in the dataset and returns the corresponding variable inputs\n\n\n Arguments:\n data: read in dataset\n variable: a specific variable observed within said dataset\n\n Returns:\n Returns the variable's data inputs in the dataset\n\n Raises:\n Displays error if variable is not found within the dataset\n '''\n #testing if the variable is within the list, data[0] contains all the variable names\n if variable not in data[0]:\n #displays error if condition is true\n print(f\"{variable} is not a variable in the data. Please choose another variable.\")\n else:\n #finding the specific index of the variable \n index=data[0].index(variable)\n #creating a string that will hold the values of the specific variable\n vdata = []\n #searching the dataset for everything beyond data[0] as that is the list the contains the variable names\n for i in data[1:len(data)]:\n #taking data at specified index and appending it to the list we want\n vdata.append(i[index])\n #printing the list with commas \n print(*vdata, sep = \", \") \n \n \n \n#Data tests \nselect(data, \"ADMIN1\")\nselect(data, \"var123\")\n\n\n#Neha T comments: \n#looks good to me. Can't think of any suggestion that would improve the code substantially. \n#can enable user input of var name using input() fn\n#Another suggestion would be to write var.upper() instead of just var. I forgot to do that","sub_path":"cad162_dawson.py","file_name":"cad162_dawson.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"157663085","text":"import pandas as pd\nfrom gensim.models import word2vec\n\nfor num in range(1,5):\n modelname = 'review_model_score{}.model'.format(num)\n model = word2vec.Word2Vec.load(modelname)\n\n print(len(model.wv.vocab)) # 15189\n print(model.wv.vector_size) # 200\n topn_len = len(model.wv.vocab)\n vocabs = model.wv.vocab\n keys = list(vocabs.keys())\n print(type(keys))\n\n column_dict = dict()\n for x in keys:\n column_dict[x] = list()\n index_list = list()\n\n for key in keys:\n row = model.wv.most_similar(positive=[key],topn=topn_len )\n for x,y in row:\n column_dict[x].append(y)\n column_dict[key].append(1)\n print('finish step1')\n similar_frame = pd.DataFrame(column_dict,index=keys)\n similar_frame.to_csv('review_similar_matrix_score{}.csv'.format(num))\n print('finish step2')","sub_path":"review_data/make_similar_matrix.py","file_name":"make_similar_matrix.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"384186839","text":"from typing import List, Dict, Any\n\nimport numpy as np\nimport torch\n\n\nclass CropCenter(object):\n \"\"\"Crops center of image.\n :args:\n - sample - image from dataset.\n :returns:\n - sample - cropped center of image.\"\"\"\n\n def __init__(self, size=128, elem_name: str = \"image\") -> None:\n self.size = size\n self.elem_name = elem_name\n\n def __call__(self, sample: Dict) -> Dict:\n img = sample[self.elem_name]\n h, w, _ = img.shape\n margin_h = (h - self.size) // 2\n margin_w = (w - self.size) // 2\n sample[self.elem_name] = img[margin_h : margin_h + self.size, margin_w : margin_w + self.size]\n sample[\"crop_margin_x\"] = margin_w\n sample[\"crop_margin_y\"] = margin_h\n\n if \"landmarks\" in sample:\n landmarks = sample[\"landmarks\"].reshape(-1, 2)\n landmarks -= torch.tensor((margin_w, margin_h), dtype=landmarks.dtype)[None, :]\n sample[\"landmarks\"] = landmarks.reshape(-1)\n\n return sample\n","sub_path":"src/transforms/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"219633903","text":"import discord\r\nimport asyncio\r\nimport aiohttp\r\nfrom discord import embeds\r\nfrom discord.message import Message\r\nfrom redbot.core import commands, Config, checks\r\nimport copy\r\n\r\n\r\nsleep_time = 900\r\nfame_emoji = \"<:fame:757940151845519411>\"\r\ndonations_emoji = \"<:donations:844657488389472338>\"\r\n\r\n\r\nclass FameLeaderboard(commands.Cog):\r\n \"\"\"Auto-updating leaderboard of clans\"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.tags = self.bot.get_cog('ClashRoyaleTools').tags\r\n self.update_embed_task = bot.loop.create_task(self.update_embed())\r\n self.config = Config.get_conf(self, identifier=2345341233)\r\n default_settings = {\"main\": {\"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None},\r\n \"clan_servers\": {\r\n \"Dragons Eight\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"29YPJYY\",\r\n \"nickname\": \"D8\",\r\n },\r\n \"LeGeND Legion!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"VJQ0GJ0\",\r\n \"nickname\": \"Legion\",\r\n \r\n },\r\n \"Dragons Eight 2\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"Y8G9C09\",\r\n \"nickname\": \"D82\"\r\n },\r\n \"LeGeND Squad!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"80CC8\",\r\n \"nickname\": \"Squad\"\r\n },\r\n \"LeGeND Prime!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"Q0JJ2GG2\",\r\n \"nickname\": \"Prime\"\r\n },\r\n \"LeGeND Empire!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"9P2PQULQ\",\r\n \"nickname\": \"Empire\"\r\n },\r\n \"LeGeND Dynasty!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"99R2PQVR\",\r\n \"nickname\": \"Dynasty\"\r\n },\r\n \"LeGeND eSports!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"P9GG9QQY\",\r\n \"nickname\": \"eSports\"\r\n },\r\n \"White Plague\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"J0CQ9R9\",\r\n \"nickname\": \"Plague\"\r\n },\r\n \"Dragons Eight 3\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"PRCRJYCR\",\r\n \"nickname\": \"D83\"\r\n },\r\n \"LeGeND Phantom!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"2CJ88808\",\r\n \"nickname\": \"Phantom\"\r\n },\r\n \"LeGeND Pride!\": {\r\n \"server_id\": None,\r\n \"use\": False,\r\n \"channel_id\": None,\r\n \"tag\": \"YLULCRQJ\",\r\n \"nickname\": \"Pride\"\r\n }\r\n }}\r\n self.config.register_global(**default_settings)\r\n\r\n async def update_embed(self):\r\n try:\r\n await asyncio.sleep(10) # Start-up Time\r\n while True:\r\n main = await self.config.main()\r\n clans = await self.config.clan_servers()\r\n main_embed, clan_embeds = await self.get_data_fame()\r\n if main['use'] == True:\r\n main_guild_id = main['server_id']\r\n main_channel_id = main[\"channel_id\"]\r\n main_guild = self.bot.get_guild(main_guild_id)\r\n main_channel = main_guild.get_channel(main_channel_id)\r\n if main.get('last_message_id') == None:\r\n message = await main_channel.send(embed=main_embed)\r\n async with self.config.main() as data:\r\n data['last_message_id'] = message.id\r\n else:\r\n last_mes_id = main['last_message_id']\r\n try:\r\n message = await main_channel.fetch_message(last_mes_id)\r\n await message.delete()\r\n except Exception as e:\r\n async with self.config.main() as data:\r\n data['last_message_id'] = None\r\n print(e)\r\n message = await main_channel.send(embed=main_embed)\r\n async with self.config.main() as data:\r\n data['last_message_id'] = message.id\r\n if clan_embeds != None:\r\n for clan in clans:\r\n x = clans[clan]\r\n if clans[clan]['use'] == True:\r\n clan_guild = self.bot.get_guild(\r\n clans[clan]['server_id'])\r\n clan_channel = clan_guild.get_channel(\r\n clans[clan]['channel_id'])\r\n if x.get('last_message_id') == None:\r\n # some edge case scenario\r\n if clan_embeds.get(clans[clan]['tag']) == None:\r\n pass\r\n else:\r\n clan_emb = clan_embeds[clans[clan]['tag']]\r\n message = await clan_channel.send(embed=clan_emb)\r\n async with self.config.clan_servers() as data:\r\n data[clan]['last_message_id'] = message.id\r\n else:\r\n # some edge case scenario\r\n if clan_embeds.get(clans[clan]['tag']) == None:\r\n pass\r\n else:\r\n last_mes_id = clans[clan]['last_message_id']\r\n try:\r\n message = await clan_channel.fetch_message(last_mes_id)\r\n await message.delete()\r\n except Exception as e:\r\n print(e)\r\n clan_emb = clan_embeds[clans[clan]['tag']]\r\n message = await clan_channel.send(embed=clan_emb)\r\n async with self.config.clan_servers() as data:\r\n data[clan]['last_message_id'] = message.id\r\n empire_data = clans['LeGeND Empire!']\r\n if empire_data.get('use') == True:\r\n try:\r\n to_send = await self.empire_losers()\r\n except Exception as e:\r\n print(e)\r\n emp = self.bot.get_guild(\r\n clans['LeGeND Empire!']['server_id'])\r\n channel_to_send = emp.get_channel(\r\n empire_data.get('channel_id'))\r\n if empire_data.get('last_reverse') == None:\r\n try:\r\n message = await channel_to_send.send(embed=to_send)\r\n except Exception as e:\r\n print(e)\r\n async with self.config.clan_servers() as clan:\r\n clan['LeGeND Empire!']['last_reverse'] = message.id\r\n else:\r\n try:\r\n message = await channel_to_send.fetch_message(empire_data.get('last_reverse'))\r\n await message.delete()\r\n except Exception as e:\r\n print(e)\r\n message = await channel_to_send.send(embed=to_send)\r\n async with self.config.clan_servers() as clan:\r\n clan['LeGeND Empire!']['last_reverse'] = message.id\r\n # Run Every X seconds\r\n await asyncio.sleep(sleep_time)\r\n except asyncio.CancelledError:\r\n pass\r\n\r\n async def crtoken(self):\r\n # Clash Royale API\r\n token = await self.bot.get_shared_api_tokens(\"clashroyalestatic\")\r\n if token.get('token') is None:\r\n print(\r\n \"CR Token is not SET. Use !set api clashroyale token,YOUR_TOKEN to set it\")\r\n raise RuntimeError\r\n self.headers = {'authorization': 'Bearer {}'.format(token['token'])}\r\n\r\n def cog_unload(self):\r\n self.update_embed_task.cancel()\r\n\r\n async def ldb_to_emb(self, ldb, base_embed, clan_spec: bool = False):\r\n # This all looks weird but it's embed formatting\r\n pos = 25\r\n if clan_spec == True:\r\n pos = 4\r\n podium = ['🥇', '🥈', '🥉']\r\n for i, memb in enumerate(ldb):\r\n if(i > pos):\r\n break\r\n title = str(i+1)\r\n if(i < 3):\r\n title = podium[i]\r\n\r\n title += ' - ' + str(memb['fame']) + fame_emoji\r\n\r\n # Find discord user\r\n value = ''\r\n try:\r\n users = self.tags.getUser(memb['tag'].strip('#'))\r\n for user in users:\r\n value += f'<@{user[0]}> - '\r\n except Exception as e:\r\n print(e)\r\n\r\n # Get Clan\r\n clan = ''\r\n uurl = 'https://proxy.royaleapi.dev/v1/players/%23{}/'.format(\r\n memb['tag'].strip('#'))\r\n async with aiohttp.ClientSession() as client:\r\n async with client.get(uurl, headers=self.headers) as resp:\r\n if(resp.status != 200):\r\n return discord.Embed(title='Clash Royale API Error', description='Clash Royale API is offline... data cannot be retreived :(')\r\n data = await resp.json()\r\n try:\r\n clan = f\"| {data['clan']['name']}\"\r\n except Exception:\r\n clan = ''\r\n\r\n value += f\"{memb['name']} ({memb['tag']}) {clan}\"\r\n base_embed.add_field(name=title, value=value, inline=False)\r\n\r\n return base_embed\r\n\r\n async def get_data_fame(self) -> discord.Embed:\r\n embed = discord.Embed(title=\"Legend Clash Royale Fame Leaderboard\",\r\n description='These are the top fame contributors from Legend Clans in the current river race!', color=0x80ff00)\r\n embed.set_thumbnail(\r\n url=\"https://static.wikia.nocookie.net/clashroyale/images/9/9f/War_Shield.png/revision/latest?cb=20180425130200\")\r\n embed.set_footer(text=\"Bot by: Legend Dev Team\",\r\n icon_url=\"https://cdn.discordapp.com/emojis/709796075581735012.gif?v=1\")\r\n\r\n members = [] # Runs in O(n log n) where n is the amount of members\r\n clan_mem_dict = dict()\r\n legend_clans = await self.config.clan_servers()\r\n for clan_data in legend_clans:\r\n if legend_clans[clan_data]['tag'] == \"9PJYVVL2\":\r\n continue \r\n url = 'https://proxy.royaleapi.dev/v1/clans/%23{}/currentriverrace'.format(\r\n legend_clans[clan_data]['tag'])\r\n async with aiohttp.ClientSession() as client:\r\n async with client.get(url, headers=self.headers) as resp:\r\n if(resp.status != 200):\r\n return discord.Embed(title=f'Clash Royale API Error({legend_clans[clan_data][\"tag\"]})', description='Clash Royale API is offline... data cannot be retreived :('), None\r\n data = await resp.json()\r\n members.extend(data['clan']['participants'])\r\n if legend_clans[clan_data]['use'] == True:\r\n clan_mems = data['clan']['participants']\r\n sorted_clan_mems = sorted(\r\n clan_mems, key=lambda x: x['fame'], reverse=True)\r\n clan_mem_dict[legend_clans[clan_data]\r\n ['tag']] = sorted_clan_mems\r\n\r\n # sorts in descending order\r\n ldb = sorted(members, key=lambda member: -member['fame'])\r\n main_emb = await self.ldb_to_emb(ldb=ldb, base_embed=embed)\r\n if len(clan_mem_dict) == 0:\r\n return main_emb, None\r\n else:\r\n embed_dict = dict()\r\n for tag in clan_mem_dict:\r\n for clan in legend_clans:\r\n if legend_clans[clan]['tag'] == tag:\r\n embed = discord.Embed(title=f\"{clan.strip('!')} Fame Leaderboard\",\r\n description=f'These are the top fame contributors from {clan} in the current river race, they are even better than SK Morten.', color=0x80ff00)\r\n embed.set_thumbnail(\r\n url=\"https://static.wikia.nocookie.net/clashroyale/images/9/9f/War_Shield.png/revision/latest?cb=20180425130200\")\r\n embed.set_footer(text=\"Bot by: Legend Dev Team\",\r\n icon_url=\"https://cdn.discordapp.com/emojis/709796075581735012.gif?v=1\")\r\n em = await self.ldb_to_emb(ldb=clan_mem_dict[tag], base_embed=embed, clan_spec=True)\r\n embed_dict[tag] = em\r\n return main_emb, embed_dict\r\n\r\n async def check_membership(self, riverrace_data):\r\n url = 'https://proxy.royaleapi.dev/v1/clans/%239P2PQULQ/members'\r\n async with aiohttp.ClientSession() as client:\r\n async with client.get(url=url, headers=self.headers) as resp:\r\n data = await resp.json()\r\n member_list = data['items']\r\n found_members = []\r\n for member in riverrace_data:\r\n tag = member['tag']\r\n for clan_mem in member_list:\r\n if tag == clan_mem['tag']:\r\n found_members.append(member)\r\n break\r\n final = sorted(found_members, key=lambda x: x['fame'])\r\n return final\r\n\r\n def embed_for_bottom(self, rectified_data, base_embed):\r\n for i, memb in enumerate(rectified_data):\r\n if(i > 4):\r\n break\r\n title = str(50-i)\r\n title += ' - ' + str(memb['fame']) + fame_emoji\r\n value = ''\r\n try:\r\n users = self.tags.getUser(memb['tag'].strip('#'))\r\n for user in users:\r\n value += f'<@{user[0]}> - '\r\n except Exception as e:\r\n print(e)\r\n value += f\"{memb['name']} ({memb['tag']})\"\r\n base_embed.add_field(name=title, value=value, inline=False)\r\n return base_embed\r\n\r\n async def empire_losers(self):\r\n url = 'https://proxy.royaleapi.dev/v1/clans/%239P2PQULQ/currentriverrace'\r\n async with aiohttp.ClientSession() as client:\r\n async with client.get(url=url, headers=self.headers) as resp:\r\n if(resp.status != 200):\r\n return discord.Embed(title='Clash Royale API Error', description='Clash Royale API is offline... data cannot be retreived :(')\r\n data = await resp.json()\r\n participants = data['clan']['participants']\r\n members_in_clan = await self.check_membership(participants)\r\n embed = discord.Embed(title=f\"Legend Empire Lowest Fame Contributors\",\r\n description=f'These are the lowest fame contributers from LeGeND Empire! in the current ricer race, they are even worse than Sai Namrath LMAO.', color=discord.Color.red())\r\n embed.set_thumbnail(\r\n url=\"https://static.wikia.nocookie.net/clashroyale/images/9/9f/War_Shield.png/revision/latest?cb=20180425130200\")\r\n embed.set_footer(text=\"Bot by: Legend Dev Team\",\r\n icon_url=\"https://cdn.discordapp.com/emojis/709796075581735012.gif?v=1\")\r\n embed = self.embed_for_bottom(\r\n members_in_clan, base_embed=embed)\r\n return embed\r\n\r\n @commands.command()\r\n async def topfame(self, ctx):\r\n \"\"\"Get Top 25 Fame Contributors this war\"\"\"\r\n async with ctx.typing():\r\n embed, _ = await self.get_data_fame()\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n @checks.is_owner()\r\n async def setfamechannel(self, ctx, nick='main'):\r\n \"\"\"Set the current channel as fame channel\"\"\"\r\n if nick == 'main':\r\n async with self.config.main() as data:\r\n data['use'] = True\r\n data['channel_id'] = ctx.channel.id\r\n data['server_id'] = ctx.guild.id\r\n await ctx.send('Set this channel as the main fame channel')\r\n else:\r\n async with self.config.clan_servers() as data:\r\n for clan in data:\r\n clan_nick = data[clan]['nickname']\r\n if clan_nick.lower() == nick:\r\n data[clan]['use'] = True\r\n data[clan]['channel_id'] = ctx.channel.id\r\n data[clan]['server_id'] = ctx.guild.id\r\n return await ctx.send(f\"Set this channel as {clan} fame channel\")\r\n return await ctx.send('Incorrect clan nickname')\r\n\r\n @commands.command()\r\n @checks.is_owner()\r\n async def stopfameldb(self, ctx, nick=\"main\"):\r\n if nick == 'main':\r\n async with self.config.main() as data:\r\n data['use'] = False\r\n data['channel_id'] = None\r\n data['server_id'] = None\r\n await ctx.send(\"Stopped main fame leaderboard\")\r\n else:\r\n async with self.config.clan_servers() as data:\r\n for clan in data:\r\n clan_nick = data[clan]['nickname']\r\n if clan_nick.lower() == nick:\r\n data[clan]['use'] = False\r\n data[clan]['channel_id'] = None\r\n data[clan]['server_id'] = None\r\n return await ctx.send(f\"Stopped fame leaderboard for {clan}\")\r\n return await ctx.send('Incorrect clan nickname')\r\n \"\"\"\r\n async def get_data_donations(self) -> discord.Embed:\r\n podium = ['🥇', '🥈', '🥉']\r\n\r\n embed = discord.Embed(title=\"TL Clash Royale Donation Leaderboard\",\r\n description='These are the donators from Threat Level Clans in the current week!', color=0x80ff00)\r\n embed.set_thumbnail(\r\n url=\"https://cdn.royaleapi.com/static/img/badge/legendary-1/Fugi_03.png?t=494e7fc1c\")\r\n embed.set_footer(text=\"Bot by: Threat Level Dev Team\")\r\n\r\n members = [] # Runs in O(n log n) where n is the amount of members\r\n\r\n for clantag in tl_clans:\r\n url = 'https://proxy.royaleapi.dev/v1/clans/%23{}/'.format(clantag)\r\n async with aiohttp.ClientSession() as client:\r\n async with client.get(url, headers=self.headers) as resp:\r\n if(resp.status != 200):\r\n return discord.Embed(title='Clash Royale API Error', description='Clash Royale API is offline... data cannot be retreived :(')\r\n data = await resp.json()\r\n members.extend(data['memberList'])\r\n # sorts in descending order\r\n ldb = sorted(members, key=lambda member: -member['donations'])\r\n\r\n # This all looks weird but it's embed formatting\r\n for i, memb in enumerate(ldb):\r\n if(i > 9):\r\n break\r\n title = str(i+1)\r\n if(i < 3):\r\n title = podium[i]\r\n\r\n title += ' - ' + str(memb['donations']) + donations_emoji\r\n\r\n # Find discord user\r\n value = ''\r\n try:\r\n users = self.tags.getUser(memb['tag'].strip('#'))\r\n for user in users:\r\n value += f'<@{user[0]}> - '\r\n except Exception as e:\r\n print(e)\r\n\r\n # Get Clan\r\n clan = ''\r\n uurl = 'https://proxy.royaleapi.dev/v1/players/%23{}/'.format(\r\n memb['tag'].strip('#'))\r\n async with aiohttp.ClientSession() as client:\r\n async with client.get(uurl, headers=self.headers) as resp:\r\n if(resp.status != 200):\r\n return discord.Embed(title='Clash Royale API Error', description='Clash Royale API is offline... data cannot be retreived :(')\r\n data = await resp.json()\r\n clan = data['clan']['name']\r\n\r\n value += f\"{memb['name']} ({memb['tag']}) | {clan} \"\r\n embed.add_field(name=title, value=value, inline=False)\r\n\r\n return embed\r\n\r\n @commands.command()\r\n async def topdonations(self, ctx):\r\n Get Top 10 Donators this week\r\n async with ctx.typing():\r\n embed = await self.get_data_donations()\r\n await ctx.send(embed=embed)\"\"\"\r\n","sub_path":"fameldb/fameldb.py","file_name":"fameldb.py","file_ext":"py","file_size_in_byte":21931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"362166509","text":"import requests\nimport time\nfrom bs4 import BeautifulSoup\nimport json\nimport logging\nlogger = logging.getLogger(__name__)\nimport pprint\npp = pprint.PrettyPrinter(indent=2)\n\n\nclass Google:\n def __init__(self):\n self.session = requests.Session()\n ua = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n self.headers = {'User-Agent': ua, 'Referer':'https://www.google.com'}\n self.search_url = \"https://www.google.com/search?q={}&source=lnms&tbm=isch&safe=active\" # SafeSearch : on\n\n def get_result(self, keyword, socks5_port=''):\n try:\n time.sleep(1) #Sleep 1 seconds to avoid banning\n if socks5_port:\n proxy = 'socks5://127.0.0.1:{}'.format(socks5_port)\n _request = self.session.get(self.search_url.format(keyword), headers=self.headers, proxies = {'http': proxy,'https': proxy}, timeout=5)\n else:\n _request = self.session.get(self.search_url.format(keyword), headers=self.headers, timeout=5)\n if _request.status_code==200:\n logger.info('Scraping from result, keyword : {}'.format(keyword))\n soup = BeautifulSoup(_request.text, 'lxml')\n img_metadata_raw = [x.get_text() for x in soup.find_all('div', class_='rg_meta notranslate')]\n if len(img_metadata_raw) < 5:\n logger.warning('Result doesn\\'t seem right, I\\'m quitting this whole batch')\n logger.warning('__________________\\n{}\\n{}\\n____________________'.format(soup.title, img_metadata_raw))\n return None\n else:\n entry = {}\n entry['keyword'] = keyword\n entry['keyword_title'] = keyword.title()\n # entry['snippet'] = normal_search\n entry['metadata'] = [json.loads(x) for x in img_metadata_raw]\n logger.info('_____________________\\nGoogling done, Keyword: {}, Total: {}\\n_____________________'.format(keyword, len(img_metadata_raw)))\n return entry\n else:\n logger.warning('Did not scrape {}, result = {}'.format(keyword, _request))\n return None\n except requests.exceptions.ConnectionError:\n logger.warning('ConnectionError when scrapinng {}'.format(keyword))\n return None\n\n\n\n\n# c = GoogleRequest().request_list(['go girl', 'bulletproof'], socks5_port='9001')\n# print (c)","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"327457229","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport sys\nfrom difflib import SequenceMatcher\nfrom datetime import datetime\nimport yaml\nimport banan\nimport hollidays\nimport pandas as pd\nfrom matplotlib import style\nimport sqlite3\nimport os\nfrom docopt import docopt\nimport numpy as np\nfrom datetime import date\n\nclass Account():\n List = {\n 90237888701 : \"Fredrik lön\",\n 90237888728 : \"Hus och räkning\",\n 90247206239 : \"Månadspeng\",\n 90237889813 : \"Aktielikvid\",\n 90239908416 : \"Används ej\",\n 90251606718 : \"Avänds ej\",\n 90238924027 : \"Freja spar\",\n 90238924035 : \"Gustaf spar\",\n }\n\n def __init__(self, number):\n try:\n self._number = int(number)\n except:\n self._number = 0\n\n try:\n self.name = Account.List[self._number]\n except:\n self.name = ''\n\nclass DataSet():\n def __init__(self):\n self.storage = []\n\n def getAll(self):\n return self.storage\n\n def _add(self, row):\n self.storage.append(row)\n\n def fillWithData(self, inData, samples, indices):\n dataSet = DataSet()\n\n for i,j in enumerate(indices):\n waytopay = samples.getTypeOfPayment()[j][0]\n account = inData.getAcount()\n fixedOrVariableCost = samples.getFixOrVaraibleCost()[j][0]\n maincategory = samples.getCategories(0)[j][0]\n category_1 = samples.getCategories(1)[j][0]\n category_2 = samples.getCategories(2)[j][0]\n category_3 = samples.getCategories(3)[j][0]\n message = inData.getMessages()[i]\n date = inData.getDate()[i] \n amount = inData.getAmount()[i]\n\n row = Row(date,waytopay,account,fixedOrVariableCost,maincategory,category_1,category_2,category_3, message ,amount)\n\n self._add(row)\n\nclass Row():\n def __init__(self, date, waytopay, account, fixedOrVariableCost, maincategory, category_1, category_2, category_3, message, amount):\n\n ignore = [ 'Överf ISK',\n '832796936540829',\n '832796936540654'\n ]\n\n if (amount < 0):\n inout = \"Utgifter\"\n else:\n inout = \"Inkomster\" \n\n if (waytopay == 'Överföring' ):\n if (message in ignore) :\n #print(\"ignore\")\n pass\n else:\n if (amount < 0):\n maincategory = 'ÖverföringUt'\n else:\n maincategory = 'ÖverföringIn'\n\n\n\n self.date = date\n\n\n self.inout = inout\n self.waytopay = waytopay\n self.account = account\n self.fixedOrVariableCost = fixedOrVariableCost\n self.maincategory = maincategory\n self.category_1 = category_1\n self.category_2 = category_2\n self.category_3 = category_3\n self.message = message\n self.amount = amount\n\n\n def __str__(self):\n return __repr__()\n \n def __repr__(self):\n return \"[{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}]\\n\".format(\n self.date, self.inout, self.waytopay, self.account, self.fixedOrVariableCost, self.maincategory,\n self.category_1, self.category_2, self.category_3, self.message, self.amount)\n\n def toDict(self):\n \n temp = {'Datum' : self.date,\n 'In/ut' : self.inout, \n 'Betalsätt' : self.waytopay,\n 'Konto' : self.account,\n 'Typ' : self.fixedOrVariableCost,\n 'Huvudkategori' : self.maincategory,\n 'Kategori 1' : self.category_1,\n 'Kategori 2' : self.category_2,\n 'Kategori 3' : self.category_3,\n 'Message' : self.message,\n 'Summa' : self.amount,\n }\n\n temp.update(self.months)\n\n return temp\n\nclass InData():\n def __init__(self, filename):\n tempdf = pd.read_csv(filename,sep=';',header = 0,index_col=False, nrows = 1)\n acountnumber = int(tempdf['Kontonummer'][0])\n self._acount = Account(acountnumber).name\n \n self._df = pd.read_csv(filename,sep=';',header = 3,index_col=False,skip_blank_lines = False)\n\n for index, row in self._df.iterrows(): \n if(pd.isnull(row['Meddelande'])):\n row['Meddelande'] = row['Transaktionstyp']\n\n row['Belopp'] = round(float(row['Belopp'].replace(',','.').replace(' ','')))\n\n\n def getMessages(self):\n return (self._df['Meddelande'].values)\n\n def getDate(self):\n \n #return self._df['Transaktionsdatum']\n return self._df['Bokföringsdatum']\n\n def getAmount(self):\n return self._df['Belopp']\n\n def getAcount(self):\n return self._acount\n\nclass Samples():\n def __init__(self, filename): \n self.df = pd.read_csv(filename, sep=';')\n \n self.samples_unique = self.df[['value', 'TypeofPayment','fixedOrVariableCost', 'Category','Category_1','Category_2','Category_3']].drop_duplicates()\n \n def getValues(self):\n return self.samples_unique['value'].values\n\n def getTypeOfPayment(self):\n return self.samples_unique['TypeofPayment'].values\n\n def getFixOrVaraibleCost(self):\n return self.samples_unique['fixedOrVariableCost'].values\n\n def getCategories(self, index = 0):\n if (index == 1):\n return self.samples_unique['Category_1'].values\n elif (index == 2):\n return self.samples_unique['Category_2'].values\n elif (index == 3):\n return self.samples_unique['Category_3'].values \n else:\n return self.samples_unique['Category'].values\n\n\n\nclass Convert():\n months = ['Januari', 'Februari', 'Mars', 'April', 'Maj', 'Juni', 'Juli', 'Augusti', 'September', 'Oktober', 'November', 'December']\n def __init__(self):\n pass\n\n\n def _toPayDateMonth(self,d):\n d = datetime.strptime(d , '%Y-%m-%d').date()\n p = hollidays.PayDates(d.year)\n return p.getPayMonth(d)\n\n def _toWorkMonth(self,date):\n\n l =[\n [( 1, 1), ( 1, 22), 0 ],\n [( 1, 23), ( 2, 23), 1 ], \n [( 2, 24), ( 3, 24), 2 ],\n [( 3, 24), ( 4, 24), 3 ],\n [( 4, 23), ( 5, 21), 4 ],\n [( 5, 22), ( 6, 23), 5 ],\n [( 6, 24), ( 7, 22), 6 ],\n [( 7, 23), ( 8, 23), 7 ],\n [( 8, 24), ( 9, 23), 8 ],\n [( 9, 24), (10, 21), 9 ],\n [(10, 22), (11, 23), 10 ],\n [(11, 24), (12, 21), 11 ],\n [(12, 22), (12, 31), 0 ]\n ]\n\n\n d = datetime.strptime(date , '%Y-%m-%d').date()\n d = (d.month , d.day)\n\n month = ''\n for key in l:\n start = key[0]\n end = key[1]\n if start <= d <= end:\n month = key[2]\n break\n\n return Convert.months[month]\n\n\n def _toMonth(self, date):\n\n monthInteger = datetime.strptime(date , '%Y-%m-%d').month -1 \n\n return Convert.months[monthInteger]\n \n def tosDict(self, dataSet):\n temp = {}\n i = 0\n for row in dataSet.getAll():\n \n rowTemp = {\n 'In/ut' : row.inout, \n 'Betalsätt' : row.waytopay,\n 'Konto' : row.account,\n 'Typ' : row.fixedOrVariableCost,\n 'Huvudkategori' : row.maincategory,\n 'Kategori 1' : row.category_1,\n 'Kategori 2' : row.category_2,\n 'Kategori 3' : row.category_3,\n 'Message' : row.message,\n Convert.months[0] : 0, \n Convert.months[1] : 0, \n Convert.months[2] : 0, \n Convert.months[3] : 0, \n Convert.months[4] : 0, \n Convert.months[5] : 0, \n Convert.months[6] : 0, \n Convert.months[7] : 0, \n Convert.months[8] : 0, \n Convert.months[9] : 0, \n Convert.months[10] : 0, \n Convert.months[11] : 0,\n 'Date' : row.date\n }\n\n \n\n \n \n month = self._toPayDateMonth(row.date)\n\n rowTemp[month] = row.amount\n\n temp[i] = rowTemp\n\n i += 1\n\n return temp\n\n def toCompressDict(self, dataSet):\n temp = {}\n \n for row in dataSet.getAll():\n rowTemp = {\n 'In/ut' : row.inout, \n 'Betalsätt' : row.waytopay,\n 'Konto' : row.account,\n 'Typ' : row.fixedOrVariableCost,\n 'Huvudkategori' : row.maincategory,\n 'Kategori 1' : row.category_1,\n 'Kategori 2' : row.category_2,\n 'Kategori 3' : row.category_3,\n 'Message' : row.message,\n Convert.months[0] : 0, \n Convert.months[1] : 0, \n Convert.months[2] : 0, \n Convert.months[3] : 0, \n Convert.months[4] : 0, \n Convert.months[5] : 0, \n Convert.months[6] : 0, \n Convert.months[7] : 0, \n Convert.months[8] : 0, \n Convert.months[9] : 0, \n Convert.months[10] : 0, \n Convert.months[11] : 0\n }\n month = self._toWorkMonth(row.date)\n\n rowTemp[month] = row.amount\n\n if row.message in temp:\n temp[row.message][month] += row.amount\n else:\n temp[row.message] = rowTemp\n\n return temp\n\n\n def toDict(self, dataSet):\n temp = {}\n i = 0\n for row in dataSet.getAll():\n rowTemp = {\n 'Datum' : row.date,\n 'In/ut' : row.inout, \n 'Betalsätt' : row.waytopay,\n 'Konto' : row.account,\n 'Typ' : row.fixedOrVariableCost,\n 'Huvudkategori' : row.maincategory,\n 'Kategori 1' : row.category_1,\n 'Kategori 2' : row.category_2,\n 'Kategori 3' : row.category_3,\n 'Message' : row.message,\n \n 'Summa' : row.amount\n } \n temp[i] = rowTemp\n i += 1\n\n return temp\n# Database \n\nDEFAULT_PATH = os.path.join(os.path.dirname(__file__), 'database.sqlite3')\n\ndef db_connect(db_path=DEFAULT_PATH):\n con = sqlite3.connect(db_path)\n return con\n\ndef addRow(cur, row):\n ind = \"\"\"INSERT INTO transactions (ix, Transactiondate, in_out, Ways_to_pay, Account, Type, Maincategory,Category_1, Category_2, Category_3, Message, Amount) \n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\"\n \n for i in range(10): \n data = (i, row.date, row.inout, row.waytopay, row.account, row.fixedOrVariableCost, row.maincategory, row.category_1, row.category_2, row.category_3, row.message, row.amount)\n try:\n cur.execute(ind, data)\n break\n except sqlite3.Error as er:\n print(data)\n print(\"########################\")\n\ndef create_table(db_cursor) -> bool:\n table_create = \"\"\"\n CREATE TABLE transactions (\n ix INTEGER,\n Transactiondate NUMERIC,\n in_out TEXT,\n Ways_to_pay TEXT,\n Account INTEGER,\n Type TEXT,\n Maincategory TEXT,\n Category_1 TEXT,\n Category_2 TEXT,\n Category_3 TEXT,\n Message TEXT,\n Amount INTEGER, \n CONSTRAINT PK_Person PRIMARY KEY (ix, Transactiondate, Message, Amount)); \"\"\"\n\n try:\n db_cursor.execute(table_create)\n return True\n except sqlite3.Error as e:\n print(e)\n return False \n\n\ndef createFile(outfile, dataSet, compact = False):\n if (compact):\n theDict = Convert().toCompressDict(dataSet) \n else:\n theDict = Convert().tosDict(dataSet)\n\n df = pd.DataFrame.from_dict(theDict,orient='index').reset_index(drop=True)\n df = df.replace(0, '') \n df.to_csv(outfile, index=False, sep=';')\n\n#\"Constants\"\nINDATA = 'in'\nSAMPLES = 'samples'\nOUT = 'out'\nCOMPACT = 'compact'\nRESULT = 'result'\n\n\ndef interpretUserParams( params): \n helptext = \"\"\" Somthing inspiring\n Usage: \n Reader.py [--in=] [--samples=] [-o] [-c] [--result=]\n Reader.py -h | --help\n Reader.py --version\n\nOptions:\n -i --in= File to convert [default: rakning_2020.csv].\n -s --samples= Name of the file to use as samples [default: samples.csv].\n -o --out Return outout.\n -c --Compact Make output data compact\n -r --result= Output file name, [default: result.csv].\n -h --help Show this screen.\n --version Show version.\n\"\"\"\n args = docopt(helptext, version='1.0.0')\n\n params = {}\n\n params[INDATA] = args['--in']\n params[SAMPLES] = args['--samples']\n params[OUT] = args['--out']\n params[COMPACT] = args['--Compact']\n params[RESULT ] = args['--result']\n\n return params\n\n# =======================\n# MAIN PROGRAM\n# =======================\nif __name__ == \"__main__\":\n\n exit(0)\n # Get the input parameters\n params = interpretUserParams(sys.argv)\n\n # Read the input file\n inData = InData(params[INDATA])\n messages = inData.getMessages()\n\n # Read the sample files\n samples = Samples(params[SAMPLES])\n values = samples.getValues()\n\n (distances, indices) = banan.findpartner(values, messages)\n\n # Create a dataset\n dataSet = DataSet()\n dataSet.fillWithData(inData, samples, indices)\n\n # Write to file\n if (params[OUT]):\n createFile(params[RESULT], dataSet, params[COMPACT])\n\n #sys.stdout.flush()\n\n\n\n con = db_connect()\n\n cur = con.cursor() \n\n create_table(cur)\n\n con.commit()\n\n # matches = []\n\n # for i,j in enumerate(indices):\n # temp = [distances[i][0], values[j],categories[j], messages[i]]\n # matches.append(temp)\n\n # matches = pd.DataFrame(matches, columns=['Match confidence (lower is better)','Value','Category', 'From data'])\n\n # print(matches)\n\n #allData[1].printa()\n\n \n\n","sub_path":"Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":15399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"374408642","text":"import os, discord, json, functions as smash\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(\"Loggato con {0.user}\".format(client))\n\n\n@client.event\nasync def on_message(message):\n\n if message.author == client.user:\n return\n\n # CONTROLLA QUALI EVENTI CI SONO IN UN TORNEO\n if message.content.startswith('!check https://smash.gg/tournament/'):\n tournamentName = message.content.replace(\"!check https://smash.gg/tournament/\", \"\").split(\"/\")[0]\n data = smash.get_tournament_info(1, 2, \"tournament/\" + tournamentName)\n json_data = json.loads(data.text)\n \n message_text = \"\"\"{}: {} eventi totali\n \"\"\".format(json_data[\"data\"][\"tournament\"][\"name\"], len(json_data[\"data\"][\"tournament\"][\"events\"]))\n\n for event in json_data[\"data\"][\"tournament\"][\"events\"]:\n message_text = message_text + \"\"\"- {} ({})\n \"\"\".format(event[\"name\"], event[\"id\"])\n\n await message.channel.send(message_text)\n\n # OTTIENI RISULTATI DI UN EVENTO\n if message.content.startswith('!add '):\n event_id = message.content.replace(\"!add \", \"\")\n data = smash.get_results_info(1, 2, event_id)\n json_data = json.loads(data.text)\n await message.channel.send(data.text)\n\nclient.run(os.environ['DISCORD_TOKEN'])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"558547911","text":"import heapq\n\n\nclass Solution:\n '''\n @param {int[]} nums an integer array\n @param {int} k an integer\n @return {int[]} the top k largest numbers in array\n '''\n\n def topk(self, nums, k):\n # Write your code here\n pq = [-num for num in nums]\n heapq.heapify(pq)\n\n topk = []\n for i in range(k):\n topk.append(-heapq.heappop(pq))\n\n return topk\n\n\n","sub_path":"lintcode/数据结构/544-top-k-largest-numbers.py","file_name":"544-top-k-largest-numbers.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"642063048","text":"#\n# CS1010X --- Programming Methodology\n#\n# Contest 15.1 Template\n#\n# Note that written answers are commented out to allow us to run your\n# code easily while grading your problem set.\n\nfrom hungry_games_classes import *\nfrom contest_simulation import *\nimport random\n\n\nclass Player(Tribute):\n def __init__(self, name, health):\n super().__init__(name, health)\n self.last_direction = \"\"\n \n def next_action(self):\n vicinity = self.objects_around()\n ## Surroundings\n living_vicinity = list(filter(lambda x: isinstance(x, LivingThing), vicinity))\n tribute_vicinity = list(filter(lambda x: isinstance(x, Tribute), living_vicinity))\n wild_vicinity = list(filter(lambda x: isinstance(x, WildAnimal), living_vicinity))\n thing_vicinity = list(filter(lambda x: isinstance(x, Thing), vicinity))\n melee_vicinity = list(filter(lambda x: type(x) == Weapon, thing_vicinity))\n ranged_vicinity = list(filter(lambda x: type(x) == RangedWeapon, thing_vicinity))\n ammo_vicinity = list(filter(lambda x: type(x) == Ammo, thing_vicinity))\n medicine_vicinity = list(filter(lambda x: type(x) == Medicine, thing_vicinity))\n exits = self.get_exits()\n if self.last_direction:\n if self.last_direction==\"NORTH\":\n exits.remove(\"SOUTH\")\n elif self.last_direction==\"SOUTH\":\n exits.remove(\"NORTH\")\n elif self.last_direction==\"EAST\":\n exits.remove(\"WEST\")\n elif self.last_direction==\"WEST\":\n exits.remove(\"EAST\")\n\n ## Self\n weapons = list(self.get_weapons())\n melee_weapons = list(filter(lambda x: type(x) == Weapon, weapons))\n ranged_weapons = list(filter(lambda x: type(x) == RangedWeapon, weapons))\n ammo = list(filter(lambda x: isinstance(x, Ammo), self.get_inventory()))\n food = list(self.get_food())\n hunger = self.get_hunger()\n health = self.get_health()\n medicine = list(self.get_medicine())\n\n if health<20 and medicine and not tribute_vicinity and not wild_vicinity:\n medicine.sort(key = lambda x: x.get_medicine_value(), reverse = True)\n if medicine[0].get_medicine_value() >0:\n return (\"EAT\", medicine[0])\n\n elif health<20 and not tribute_vicinity and not wild_vicinity and medicine_vicinity:\n medicine_vicinity.sort(key = lambda x: x.get_medicine_value(), reverse = True)\n if medicine_vicinity[0].get_medicine_value() >0:\n return (\"TAKE\", medicine_vicinity[0])\n \n if hunger >= 70 and not tribute_vicinity and food and melee_weapons:\n food.sort(key = lambda x: x.get_food_value(), reverse = True)\n if food[0].get_food_value() > 0:\n return (\"EAT\", food[0])\n\n\n ## Next is the priority to get melee weapons. Ranged weapons tend to be unreliable due to\n ## the need for ammo, which often only lasts 1 shot. Though powerful, it's very unreliable.\n \n if not melee_weapons: ## If I have no melee weapons\n if melee_vicinity: ## and I see one, my priority is to pick it up\n melee_vicinity.sort(key = lambda x: x.min_damage(), reverse = True)\n return (\"TAKE\", melee_vicinity[0])\n else: ## Else if there's nothing for me to pick up\n if tribute_vicinity: ## If there's a tribute around\n if ranged_weapons: ## I can consider using my ranged weapon, but only if certain conditions are met\n ranged_weapons.sort(key = lambda x: x.min_damage(), reverse = True) ## Let's run through my weapons from strongest to weakest\n for r_weapon in ranged_weapons:\n if r_weapon.shots_left() >0: ## If there's a loaded ranged weapon \n tribute_vicinity.sort(key = lambda x: x.get_health()) ## ranking my tributes from lowest to highest health\n for tribute in tribute_vicinity:\n if tribute.get_health() <= r_weapon.max_damage(): ## if the tribute's health is lower than my lowest dmg\n return (\"ATTACK\", tribute, r_weapon) ## Kill him\n ## return (\"ATTACK\", tribute_vicinity[0], r_weapon) ## Uncomment to allow aiming for lowest health tribute\n\n ## If the condition to hit him has not been met (aka 1HKO)\n should_run = False\n for tribute in tribute_vicinity: ## Run away if the tribute poses a threat\n tribute_weapon = tribute.get_weapons()\n for weapon in tribute_weapon:\n if type(weapon) == Weapon: ## if he has a melee weapon\n should_run = True\n break\n elif type(weapon) == RangedWeapon: ## or a loaded ranged weapon\n if weapon.shots_left() > 0:\n should_run = True\n break\n if should_run: ## run away\n if exits:\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n if ammo: ## if I'm safe and I have ammo to load\n for r_weapon in ranged_weapons:\n for ammunition in ammo:\n if ammunition.weapon_type() == r_weapon.get_name():\n return (\"LOAD\", r_weapon, ammunition) ## load the appropriate weapon\n if ammo_vicinity: ## if there's ammo nearby to pick up and I'm safe\n for r_weapon in ranged_weapons:\n for ammunition in ammo_vicinity:\n if ammunition.weapon_type() == r_weapon.get_name():\n return (\"TAKE\", ammunition) ## pick up ammo that I can use\n \n if exits: ## If there's nothing to do that can help me fight back, just continue moving\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n\n else: ## If no melee and no ranged weapon\n should_run = False\n for tribute in tribute_vicinity: ## once again check if I'm in danger\n tribute_weapon = tribute.get_weapons()\n for weapon in tribute_weapon:\n if type(weapon) == Weapon:\n should_run = True\n break\n elif type(weapon) == RangedWeapon:\n if weapon.shots_left() > 0:\n should_run = True\n break\n if should_run: ## Run away if the tribute poses a threat\n if exits:\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n\n if ranged_vicinity: ## If there's a ranged weapon nearby, just pick it up.\n ranged_vicinity.sort(key = lambda x: x.min_damage(), reverse = True)\n return (\"TAKE\", ranged_vicinity[0])\n\n if exits: ## If I have absolutely nothing, time to move on\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n\n else: ## if no melee weapon to pick up and no tribute\n if ranged_weapons:\n ranged_weapons.sort(key = lambda x: x.min_damage(), reverse = True)\n \n ## Actually there is no need to sort, as my code will only ever pick up one ranged\n ## weapon for each player/tribute. But this is just in case.\n \n if ammo: ## if I have ammunition\n for r_weapon in ranged_weapons:\n for ammunition in ammo:\n if ammunition.weapon_type() == r_weapon.get_name():\n return (\"LOAD\", r_weapon, ammunition) ## load the weapon\n if ammo_vicinity: ## if there's ammo nearby\n for r_weapon in ranged_weapons:\n for ammunition in ammo_vicinity:\n if ammunition.weapon_type() == r_weapon.get_name():\n return (\"TAKE\", ammunition) ## pick up ammo that I can use\n if exits: ## If I have absolutely nothing, time to move on\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n else:\n if ranged_vicinity: ## If there's a ranged weapon nearby, just pick it up.\n ranged_vicinity.sort(key = lambda x: x.min_damage(), reverse = True)\n return (\"TAKE\", ranged_vicinity[0])\n else:\n if exits: ## If I have absolutely nothing, time to move on\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n\n\n else: ## if I have a melee weapon\n if melee_vicinity: ## but there's a better melee weapon!\n melee_vicinity.sort(key = lambda x: x.min_damage(), reverse = True)\n melee_weapons.sort(key = lambda x: x.min_damage(), reverse = True)\n if melee_vicinity[0].min_damage() > melee_weapons[0].min_damage():\n return (\"TAKE\", melee_vicinity[0]) ### pick it upppp\n if living_vicinity: ## if there's a living creature nearby\n melee_weapons.sort(key = lambda x: x.min_damage(), reverse = True)\n if tribute_vicinity: ## If there's a tribute nearby\n tribute_vicinity.sort(key = lambda x: x.get_health())\n if len(tribute_vicinity)==1: ## If I'm only facing off against one person\n if tribute_vicinity[0].get_health() <= melee_weapons[0].min_damage(): ## 1HKO with melee if possible\n return (\"ATTACK\", tribute_vicinity[0], melee_weapons[0]) \n elif ranged_weapons: ## else I try to 1HKO with ranged\n ranged_weapons.sort(key = lambda x: x.min_damage(), reverse = True)\n for r_weapon in ranged_weapons:\n if r_weapon.shots_left()>0:\n for tribute in tribute_vicinity:\n if tribute.get_health()<=r_weapon.min_damage(): ## if the tribute has lower health than my ranged dmg\n return (\"ATTACK\", tribute, r_weapon) ## kill\n else:\n for tribute in tribute_vicinity:\n if max(list(map(lambda x: x.max_damage(), list(tribute.get_weapons())))) > health:\n if exits: ## if the guy has a weapon that can potentially one-shot me, run away\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n \n else: ## If there's more than 1 enemy\n for tribute in tribute_vicinity:\n if max(list(map(lambda x: x.max_damage(), list(tribute.get_weapons())))) > health:\n if exits: ## if the guy has a weapon that can potentially one-shot me, run away\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n for tribute in tribute_vicinity: ## If I am relatively safe, let me try to balance things out\n if tribute.get_health()>health: ## if my health is lower than the other person's health\n for r_weapon in ranged_weapons:\n if r_weapon.shots_left()>0:\n return (\"ATTACK\", tribute, r_weapon) ## Use the high dmg ranged weapon as a game changer\n\n for tribute in tribute_vicinity:## Else if cannot 1HKO and no need for any game changer, I will prioritise the tributes accordingly\n weps = list(tribute.get_weapons())\n for wep in weps:\n if type(wep) == RangedWeapon:\n if wep.shots_left() >0:\n return (\"ATTACK\", tribute, melee_weapons[0]) ## Attack those possessing ranged_weapons first\n elif type(wep) == Weapon:\n return (\"ATTACK\", tribute, melee_weapons[0]) ## then attack those with melee weapons\n return (\"ATTACK\", tribute_vicinity[0], melee_weapons[0]) ## if no danger just hit the lowest health fella\n\n if wild_vicinity:\n wild_vicinity.sort(key = lambda x: x.get_damage(), reverse= True)\n return (\"ATTACK\", wild_vicinity[0], melee_weapons[0]) ## hit any wild animals\n\n return (\"ATTACK\", living_vicinity[0], melee_weapons[0]) ## hit any remaining animals, likely no danger\n\n if ranged_weapons: ## If I have a ranged weapon and there's no living creature\n ranged_weapons.sort(key = lambda x: x.min_damage(), reverse = True)\n \n if ammo: ## if I have ammunition\n for r_weapon in ranged_weapons:\n for ammunition in ammo:\n if ammunition.weapon_type() == r_weapon.get_name():\n return (\"LOAD\", r_weapon, ammunition) ## load the appropriate weapon\n if ammo_vicinity: ## if there's ammo nearby\n for r_weapon in ranged_weapons:\n for ammunition in ammo_vicinity:\n if ammunition.weapon_type() == r_weapon.get_name():\n return (\"TAKE\", ammunition) ## pick up ammo that I can use\n elif not ranged_weapons and ranged_vicinity: ## else if I have no ranged weapon but can pick up one\n ranged_vicinity.sort(key = lambda x: x.min_damage(), reverse = True)\n return (\"TAKE\", ranged_vicinity[0]) ## pick up the best\n\n if thing_vicinity: ## if there are non-living things nearby\n food_vicinity = list(filter(lambda x: isinstance(x, Food), thing_vicinity))\n if food_vicinity: ## go for the food\n food_vicinity.sort(key = lambda x: x.get_food_value(), reverse = True)\n if food_vicinity[0].get_food_value() >=10:\n return (\"TAKE\", food_vicinity[0]) ## pickup the good food\n elif len(food)<3: ## pickup to stockpile at least 3 food items\n return (\"TAKE\", food_vicinity[0])\n if exits: ## if nothing else continue searching\n index = random.randint(0, len(exits)-1)\n direction = exits[index]\n self.last_direction = direction\n return (\"GO\", direction)\n \n return None ## should not occur by right, but this is a catch-all\n\n\n#######################################\n# Testing Code\n#######################################\n\n# We only execute code inside the if statement if this file is\n# not being imported into another file\nif __name__ == '__main__':\n def qualifer_map(size, wrap):\n game_config = GameConfig()\n game_config.set_item_count(Weapon, 10)\n game_config.set_item_count(RangedWeapon, 10)\n game_config.set_item_count(Food, 10)\n game_config.set_item_count(Medicine, 10)\n game_config.set_item_count(Animal, 10)\n game_config.steps = 1000\n\n def spawn_wild_animals(game):\n for i in range(3):\n animal = DefaultItemFactory.create(WildAnimal)\n game.add_object(animal[0])\n GAME_LOGGER.add_event(\"SPAWNED\", animal[0])\n game_config.add_periodic_event(20, spawn_wild_animals, \"Spawn Wild Animals\")\n\n return (GameMap(size, wrap=wrap), game_config)\n\n # Create 6 AI Clones\n tributes = []\n for i in range(6):\n # An AI is represented by a tuple, with the Class as the first element,\n # and the name of the AI as the second\n ai = (Player, \"AI\" + str(i))\n tributes.append(ai)\n\n # Qualifier Rounds\n # Uncomments to run more rounds, or modify the rounds list\n # to include more rounds into the simulation\n # (Note: More rounds = longer simulation!)\n rounds = [qualifer_map(4, False),\n #qualifer_map(4, False),\n #qualifer_map(4, False),\n qualifer_map(4, True),\n #qualifer_map(4, True),\n #qualifer_map(4, True),\n ]\n\n\n\n match = Match(tributes, rounds)\n print(\"Simulating matches... might take a while\")\n\n # Simulate without the graphics\n # match.text_simulate_all()\n\n # Simulate a specific round with the graphics\n # Due to limitation in the graphics framework,\n # can only simulate one round at a time\n # Round id starts from 0\n match.gui_simulate_round(0)\n","sub_path":"python/hunger-games-ai/zhu-hanming.py","file_name":"zhu-hanming.py","file_ext":"py","file_size_in_byte":19154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"467616533","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Model for classifier.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nimport time\nimport logging\nimport numpy as np\n\nfrom scipy.stats import pearsonr, spearmanr\nfrom six.moves import xrange\nimport paddle.fluid as fluid\n\nfrom model.ernie import ErnieModel\n\nlog = logging.getLogger(__name__)\n\ndef create_model(args,\n pyreader_name,\n ernie_config,\n batch_size=16,\n is_prediction=False,\n task_name=\"\",\n fleet_handle=None):\n print (\"DEBUG:\\tclassify\")\n pyreader = fluid.layers.py_reader(\n capacity=50,\n shapes=[[batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],\n [batch_size, args.q_max_seq_len, 1], [batch_size, args.q_max_seq_len, 1],\n [batch_size, args.q_max_seq_len, 1],\n [batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],\n [batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],\n [batch_size, args.p_max_seq_len, 1],\n [batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],\n [batch_size, args.p_max_seq_len, 1], [batch_size, args.p_max_seq_len, 1],\n [batch_size, args.p_max_seq_len, 1],\n [batch_size, 1], [batch_size, 1]],\n dtypes=['int64', 'int64', 'int64', 'int64', 'float32',\n 'int64', 'int64', 'int64', 'int64', 'float32',\n 'int64', 'int64', 'int64', 'int64', 'float32',\n 'int64', 'int64'],\n lod_levels=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n name=task_name + \"_\" + pyreader_name,\n use_double_buffer=True)\n\n (src_ids_q, sent_ids_q, pos_ids_q, task_ids_q, input_mask_q,\n src_ids_p_pos, sent_ids_p_pos, pos_ids_p_pos, task_ids_p_pos, input_mask_p_pos,\n src_ids_p_neg, sent_ids_p_neg, pos_ids_p_neg, task_ids_p_neg, input_mask_p_neg,\n labels, qids) = fluid.layers.read_file(pyreader)\n\n ernie_q = ErnieModel(\n src_ids=src_ids_q,\n position_ids=pos_ids_q,\n sentence_ids=sent_ids_q,\n task_ids=task_ids_q,\n input_mask=input_mask_q,\n config=ernie_config,\n model_name='titlepara_')\n ## pos para\n ernie_pos = ErnieModel(\n src_ids=src_ids_p_pos,\n position_ids=pos_ids_p_pos,\n sentence_ids=sent_ids_p_pos,\n task_ids=task_ids_p_pos,\n input_mask=input_mask_p_pos,\n config=ernie_config,\n model_name='titlepara_')\n ## neg para\n ernie_neg = ErnieModel(\n src_ids=src_ids_p_neg,\n position_ids=pos_ids_p_neg,\n sentence_ids=sent_ids_p_neg,\n task_ids=task_ids_p_neg,\n input_mask=input_mask_p_neg,\n config=ernie_config,\n model_name='titlepara_')\n\n q_cls_feats = ernie_q.get_cls_output()\n pos_cls_feats = ernie_pos.get_cls_output()\n neg_cls_feats = ernie_neg.get_cls_output()\n #src_ids_p_pos = fluid.layers.Print(src_ids_p_pos, message='pos: ')\n #pos_cls_feats = fluid.layers.Print(pos_cls_feats, message='pos: ')\n\n p_cls_feats = fluid.layers.concat([pos_cls_feats, neg_cls_feats], axis=0)\n if args.is_pretrain:\n qp_cls_feats = fluid.layers.concat([q_cls_feats, neg_cls_feats], axis=0)\n\n if is_prediction:\n p_cls_feats = fluid.layers.slice(p_cls_feats, axes=[0], starts=[0], ends=[batch_size])\n multi = fluid.layers.elementwise_mul(q_cls_feats, p_cls_feats)\n probs = fluid.layers.reduce_sum(multi, dim=-1)\n\n graph_vars = {\n \"probs\": probs,\n \"qids\": qids,\n \"q_rep\": q_cls_feats,\n \"p_rep\": p_cls_feats\n }\n return pyreader, graph_vars\n\n if args.use_cross_batch and fleet_handle is not None:\n print(\"worker num is: {}\".format(fleet_handle.worker_num()))\n all_p_cls_feats = fluid.layers.collective._c_allgather(\n p_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)\n if args.is_pretrain:\n all_qp_cls_feats = fluid.layers.collective._c_allgather(\n qp_cls_feats, fleet_handle.worker_num(), use_calc_stream=True)\n \n\n #multiply\n logits = fluid.layers.matmul(q_cls_feats, all_p_cls_feats, transpose_x=False, transpose_y=True)\n if args.is_pretrain:\n logits_qp = fluid.layers.matmul(pos_cls_feats, all_qp_cls_feats, transpose_x=False, transpose_y=True)\n worker_id = fleet_handle.worker_index()\n\n else:\n logits = fluid.layers.matmul(q_cls_feats, p_cls_feats, transpose_x=False, transpose_y=True)\n if args.is_pretrain:\n logits_qp = fluid.layers.matmul(pos_cls_feats, qp_cls_feats, transpose_x=False, transpose_y=True)\n worker_id = 0\n\n probs = logits\n all_labels = np.array(range(batch_size * worker_id * 2, batch_size * (worker_id * 2 + 1)), dtype='int64')\n matrix_labels = fluid.layers.assign(all_labels)\n matrix_labels = fluid.layers.unsqueeze(matrix_labels, axes=1)\n matrix_labels.stop_gradient=True\n# fluid.layers.Print(matrix_labels, message='matrix_labels')\n\n #print('DEBUG:\\tstart loss')\n ce_loss = fluid.layers.softmax_with_cross_entropy(\n logits=logits, label=matrix_labels)\n \n if args.is_pretrain:\n alpha = 0.1\n probs_qp = logits_qp\n p_centric_loss = fluid.layers.softmax_with_cross_entropy(\n logits=logits_qp, label=matrix_labels)\n loss = (1-alpha) * fluid.layers.mean(x=ce_loss) + alpha * fluid.layers.mean(x=p_centric_loss)\n else:\n loss = fluid.layers.mean(x=ce_loss)\n #print('DEBUG:\\tloss done')\n\n num_seqs = fluid.layers.create_tensor(dtype='int64')\n accuracy = fluid.layers.accuracy(\n input=probs, label=matrix_labels)\n\n graph_vars = {\n \"loss\": loss,\n \"probs\": probs,\n \"accuracy\": accuracy,\n \"labels\": labels,\n \"num_seqs\": num_seqs,\n \"qids\": qids,\n \"q_rep\": q_cls_feats,\n \"p_rep\": p_cls_feats\n }\n\n cp = []\n cp.extend(ernie_q.checkpoints)\n cp.extend(ernie_pos.checkpoints)\n cp.extend(ernie_neg.checkpoints)\n return pyreader, graph_vars, cp\n\n\ndef evaluate_mrr(preds):\n last_qid = None\n total_mrr = 0.0\n qnum = 0.0\n rank = 0.0\n correct = False\n for qid, score, label in preds:\n if qid != last_qid:\n rank = 0.0\n qnum += 1\n correct = False\n last_qid = qid\n\n rank += 1\n if not correct and label != 0:\n total_mrr += 1.0 / rank\n correct = True\n\n return total_mrr / qnum\n\n\ndef evaluate_map(preds):\n def singe_map(st, en):\n total_p = 0.0\n correct_num = 0.0\n for index in xrange(st, en):\n if int(preds[index][2]) != 0:\n correct_num += 1\n total_p += correct_num / (index - st + 1)\n if int(correct_num) == 0:\n return 0.0\n return total_p / correct_num\n\n last_qid = None\n total_map = 0.0\n qnum = 0.0\n st = 0\n for i in xrange(len(preds)):\n qid = preds[i][0]\n if qid != last_qid:\n qnum += 1\n if last_qid != None:\n total_map += singe_map(st, i)\n st = i\n last_qid = qid\n\n total_map += singe_map(st, len(preds))\n return total_map / qnum\n\n\ndef evaluate(exe,\n test_program,\n test_pyreader,\n graph_vars,\n eval_phase,\n use_multi_gpu_test=False,\n metric='simple_accuracy'):\n train_fetch_list = [\n graph_vars[\"loss\"].name, graph_vars[\"accuracy\"].name,\n graph_vars[\"num_seqs\"].name\n ]\n\n if eval_phase == \"train\":\n if \"learning_rate\" in graph_vars:\n train_fetch_list.append(graph_vars[\"learning_rate\"].name)\n outputs = exe.run(fetch_list=train_fetch_list, program=test_program)\n ret = {\"loss\": np.mean(outputs[0]), \"accuracy\": np.mean(outputs[1])}\n if \"learning_rate\" in graph_vars:\n ret[\"learning_rate\"] = float(outputs[3][0])\n return ret\n\n test_pyreader.start()\n total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n qids, labels, scores, preds = [], [], [], []\n time_begin = time.time()\n\n fetch_list = [\n graph_vars[\"loss\"].name, graph_vars[\"accuracy\"].name,\n graph_vars[\"probs\"].name, graph_vars[\"labels\"].name,\n graph_vars[\"num_seqs\"].name, graph_vars[\"qids\"].name,\n graph_vars[\"q_rep\"].name, graph_vars[\"p_rep\"].name\n ]\n #emb_file = open('emb_qp', 'w')\n while True:\n try:\n if use_multi_gpu_test:\n np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(\n fetch_list=fetch_list)\n else:\n np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids, q_rep, p_rep = exe.run(\n program=test_program, fetch_list=fetch_list)\n total_cost += np.sum(np_loss * np_num_seqs)\n total_acc += np.sum(np_acc * np_num_seqs)\n total_num_seqs += np.sum(np_num_seqs)\n labels.extend(np_labels.reshape((-1)).tolist())\n if np_qids is None:\n np_qids = np.array([])\n qids.extend(np_qids.reshape(-1).tolist())\n batch_scores = np.diag(np_probs).reshape(-1).tolist()\n scores.extend(batch_scores)\n #for item in list(zip(q_rep, p_rep, batch_scores)):\n # _left = ' '.join([str(each) for each in item[0]])\n # _right = ' '.join([str(each) for each in item[1]])\n # emb_file.write(_left + '\\t' + _right + '\\t' + str(item[2]) + '\\n')\n #scores.extend(np_probs[:, 1].reshape(-1).tolist())\n #np_preds = np.argmax(np_probs, axis=1).astype(np.float32)\n #preds.extend(np_preds)\n #total_label_pos_num += np.sum(np_labels)\n #total_pred_pos_num += np.sum(np_preds)\n #total_correct_num += np.sum(np.dot(np_preds, np_labels))\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n #for score in np_preds:\n # print (score)\n #print ('---------------------')\n #time_end = time.time()\n #cost = total_cost / total_num_seqs\n #elapsed_time = time_end - time_begin\n #emb_file.close()\n return None\n evaluate_info = \"\"\n if metric == 'acc_and_f1':\n ret = acc_and_f1(preds, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)\n elif metric == 'matthews_corrcoef':\n ret = matthews_corrcoef(preds, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret, total_num_seqs, elapsed_time)\n elif metric == 'pearson_and_spearman':\n ret = pearson_and_spearman(scores, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)\n elif metric == 'simple_accuracy':\n ret = simple_accuracy(preds, labels)\n evaluate_info = \"[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret, total_num_seqs, elapsed_time)\n elif metric == \"acc_and_f1_and_mrr\":\n ret_a = acc_and_f1(preds, labels)\n preds = sorted(\n zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))\n ret_b = evaluate_mrr(preds)\n evaluate_info = \"[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s\" \\\n % (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)\n else:\n raise ValueError('unsupported metric {}'.format(metric))\n return evaluate_info\n\n\ndef matthews_corrcoef(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n tp = np.sum((labels == 1) & (preds == 1))\n tn = np.sum((labels == 0) & (preds == 0))\n fp = np.sum((labels == 0) & (preds == 1))\n fn = np.sum((labels == 1) & (preds == 0))\n\n mcc = ((tp * tn) - (fp * fn)) / np.sqrt(\n (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n return mcc\n\n\ndef f1_score(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n tp = np.sum((labels == 1) & (preds == 1))\n tn = np.sum((labels == 0) & (preds == 0))\n fp = np.sum((labels == 0) & (preds == 1))\n fn = np.sum((labels == 1) & (preds == 0))\n p = tp / (tp + fp)\n r = tp / (tp + fn)\n f1 = (2 * p * r) / (p + r + 1e-8)\n return f1\n\n\ndef pearson_and_spearman(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n pearson_corr = pearsonr(preds, labels)[0]\n spearman_corr = spearmanr(preds, labels)[0]\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n \"corr\": (pearson_corr + spearman_corr) / 2,\n }\n\n\ndef acc_and_f1(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n\n acc = simple_accuracy(preds, labels)\n f1 = f1_score(preds, labels)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n }\n\n\ndef simple_accuracy(preds, labels):\n preds = np.array(preds)\n labels = np.array(labels)\n return (preds == labels).mean()\n\n\ndef predict(exe,\n test_program,\n test_pyreader,\n graph_vars,\n dev_count=1):\n test_pyreader.start()\n qids, scores, probs = [], [], []\n preds = []\n\n fetch_list = [graph_vars[\"probs\"].name, graph_vars[\"qids\"].name, \\\n graph_vars[\"q_rep\"].name, graph_vars[\"p_rep\"].name,]\n\n emb_file = open('emb_qp', 'w')\n while True:\n try:\n if dev_count == 1:\n np_probs, np_qids, q_rep, p_rep = exe.run(program=test_program,\n fetch_list=fetch_list)\n else:\n np_probs, np_qids, q_rep, p_rep = exe.run(fetch_list=fetch_list)\n\n if np_qids is None:\n np_qids = np.array([])\n qids.extend(np_qids.reshape(-1).tolist())\n batch_scores = np_probs.reshape(-1).tolist()\n for item in list(zip(q_rep, p_rep, batch_scores)):\n _left = ' '.join([str(each) for each in item[0]])\n _right = ' '.join([str(each) for each in item[1]])\n #emb_file.write(_left + '\\t' + _right + '\\t' + str(item[2]) + '\\n')\n #emb_file.write(_right + '\\n')\n emb_file.write(str(item[2]) + '\\n')\n #for score in batch_scores:\n # print (score)\n #print ('--------')\n #if is_classify:\n # np_preds = np.argmax(np_probs, axis=1).astype(np.float32)\n # preds.extend(np_preds)\n #elif is_regression:\n # preds.extend(np_probs.reshape(-1))\n\n probs.extend(batch_scores)\n\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n emb_file.close()\n #probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])\n\n return qids, preds, probs\n","sub_path":"NLP/ACL2021-PAIR/model/src/finetune/dual_encoder.py","file_name":"dual_encoder.py","file_ext":"py","file_size_in_byte":16226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"420635092","text":"import pygame\nfrom copy import deepcopy\nimport sys\nfrom utility import *\nfrom socketIO_client import SocketIO, LoggingNamespace\n\nGREEN = (153, 204, 0)\nYELLOW = (255, 191, 0)\nSELVER = (140, 140, 140)\nRED = (255, 0 , 0)\nSNAKE_WIDTH = 12\nSNAKE_HEIGHT = 12\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\n\npygame.init()\nsocketIO = SocketIO('localhost', 8080, LoggingNamespace)\n\n\n\n\ngameDisplay = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption('Hartha')\n\nplayer1_start_point = (10, 10)\nplayer2_start_point = (780, 580)\nclock = pygame.time.Clock()\n\nplayer1 = Snake(GREEN, 0, SNAKE_WIDTH, SNAKE_HEIGHT, player1_start_point, 1)\nplayer2 = Snake(YELLOW, 0, SNAKE_WIDTH, SNAKE_HEIGHT, player2_start_point, 2)\nfood = Food(SCREEN_WIDTH, SCREEN_HEIGHT, RED)\n\ngameExit = False\n\nwhile not gameExit:\n speed = 10\n\n for event in pygame.event.get() :\n if event.type == pygame.QUIT :\n gameExit = True\n\n elif event.type == pygame.KEYDOWN :\n\n if event.key == pygame.K_RIGHT :\n if player1.direc != 2 :\n player1.direc = 1\n speed = 12\n\n elif event.key == pygame.K_LEFT :\n if player1.direc != 1 :\n player1.direc = 2\n speed = 12\n\n elif event.key == pygame.K_UP :\n if player1.direc != 4 :\n player1.direc = 3\n speed = 12\n\n elif event.key == pygame.K_DOWN :\n if player1.direc != 3 :\n player1.direc = 4\n speed = 12\n\n\n gameDisplay.fill(SELVER)\n\n\n if not player1.move_head(speed, SCREEN_WIDTH, SCREEN_HEIGHT) :\n player1.color = RED\n else :\n player1.color = GREEN\n\n if player1.can_eat(food) :\n player1.eat()\n food.calc_new_pos()\n socketIO.emit('eatFood', {'x' : 3})\n\n\n for rect in player1.body :\n pygame.draw.rect(gameDisplay, player1.color, [rect.x, rect.y, SNAKE_WIDTH, SNAKE_HEIGHT])\n\n pygame.draw.rect(gameDisplay, food.color, [food.rect.x, food.rect.y, food.rect.width, food.rect.height])\n pygame.display.update()\n clock.tick(15)\n\npygame.quit()\nsys.exit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"539435622","text":"import time\n\n\ndef main():\n with open(\"My Diary.txt\", \"a\") as a:\n day = input(\"What do you want to write?\\n\")\n a.write(\"{0}\\n{1}\\n\\n\".format(time.ctime(), day))\n while True:\n ask = input(\"Do you have anything to add on?[y/N]\\n\")\n ask.lower()\n if ask == \"y\":\n main()\n elif ask == \"n\":\n print(\"Ok then, bye!\")\n exit()\n else:\n print(\"I don't understand you. Can you please try again?\")\n\n\nmain()\n","sub_path":"diary-writer.py","file_name":"diary-writer.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"126362721","text":"import csv\nimport cv2\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Lambda, Cropping2D\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.optimizers import Adam\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\n### This file is designed for the second track ###\n\ndef read_csv():\n ### This function reads image paths from csv file ###\n\n samples = []\n with open('./data_2/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\n return samples\n\ndef split_samples(samples):\n ### This function splits the samples into training and validation samples ###\n\n # Splitting samples - 80% as training data and 20% as validation data\n train_samples, validation_samples = train_test_split(samples, test_size=0.2)\n return train_samples, validation_samples\n\ndef read_batch_samples(batch_samples):\n ### This function reads the center images and the steering angle batch by batch and flip them ###\n\n center_images = []\n angles = []\n\n for batch_sample in batch_samples:\n for i in range(3):\n current_path = './data_2/IMG/' + batch_sample[i].split('/')[-1]\n center_image = cv2.cvtColor(cv2.imread(current_path), cv2.COLOR_BGR2RGB)\n center_images.append(center_image)\n\n correction = 0.15\n angle = float(batch_sample[3])\n\n angles.append(angle)\n angles.append(angle + correction)\n angles.append(angle - correction)\n\n return center_images, angles\n\ndef generator(samples, batch_size=32):\n ### This function generate training or validation data with helf of python generator ###\n\n num_samples = len(samples)\n while 1:\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n center_images, angles = read_batch_samples(batch_samples)\n\n X_train = np.array(center_images)\n y_train = np.array(angles)\n\n yield shuffle(X_train, y_train)\n\ndef cnn():\n ### The function preprocess the data and creates the NVIDIA cnn model ###\n\n # Preprocess data\n model = Sequential()\n # Normalize the data\n model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\n # Crop the data\n model.add(Cropping2D(cropping=((70,25), (0,0))))\n # NVIDIA CNN model\n model.add(Conv2D(24,(5,5),strides=(2,2),activation=\"relu\"))\n model.add(Conv2D(36,(5,5),strides=(2,2),activation=\"relu\"))\n model.add(Conv2D(48,(5,5),strides=(2,2),activation=\"relu\"))\n model.add(Conv2D(64,(3,3),activation=\"relu\"))\n model.add(Conv2D(64,(3,3),activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(100))\n model.add(Dropout(0.2))\n model.add(Dense(50))\n model.add(Dropout(0.2))\n model.add(Dense(10))\n model.add(Dropout(0.2))\n model.add(Dense(1))\n\n return model\n\nbatch_size = 64\n\nsamples = read_csv()\n\nX_train, X_valid = split_samples(samples)\n\ntrain_generator = generator(X_train, batch_size=batch_size)\n\nvalidation_generator = generator(X_valid, batch_size=batch_size)\n\nmodel = cnn()\n\nmodel.compile(optimizer='adam', loss='mse')\n\nmodel.summary()\n\nhistory_object = model.fit_generator(train_generator, steps_per_epoch=\n math.ceil(len(X_train)/batch_size), validation_data=validation_generator,\n validation_steps=math.ceil(len(X_valid)/batch_size), epochs=5, verbose=1)\n\nmodel.save('model_2.h5')\n\nprint(history_object.history.keys())\n\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n","sub_path":"model_2.py","file_name":"model_2.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"70476507","text":"import os\r\n\r\ndef tree():\r\n dic = {'r': [], 'd': []}\r\n\r\n for root, dirs, files in os.walk('.'):\r\n for dr in dirs:\r\n if 'r' in dr: \r\n dic['r'].append(dr)\r\n for fl in files:\r\n if 'd' in fl:\r\n dic['d'].append(fl)\r\n if 'r' in root:\r\n print(fl)\r\n \r\n return len(dic['r']), len(dic['d'])\r\n\r\ndef main():\r\n return tree()\r\n\r\nif __name__ == \"__main__\":\r\n print(main())\r\n","sub_path":"trees/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"572307137","text":"import argparse\nimport numpy as np\nimport pandas as pd\nimport torch\n\nimport data_managers\nfrom data_managers import PairedMetricCIFAR100Coarse\nfrom pathlib import Path\nfrom torch import nn\nfrom torch.utils.data import DataLoader, random_split\n\nfrom cifar100coarse.cifar100coarse import CIFAR100Coarse\n\n\n# Identify the device to use for all processes\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef conv_chain(in_channel, out_channel) -> list[nn.Module]:\n return [\n nn.Conv2d(in_channel, out_channel, (3, 3), (1, 1)),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True)\n ]\n\n\ndef pooled_conv_chain(in_channel, out_channel, dropout=0.25) -> list[nn.Module]:\n ret_list = conv_chain(in_channel, out_channel)\n ret_list.append(nn.MaxPool2d((2, 2), (2, 2)))\n ret_list.append(nn.Dropout(dropout))\n return ret_list\n\n\nclass SimpleCNN(nn.Module):\n def __init__(self, input_channels: int, no_classes: int):\n super(SimpleCNN, self).__init__()\n\n self.block1 = nn.Sequential(\n *conv_chain(input_channels, 32)\n )\n\n self.block2 = nn.Sequential(\n *pooled_conv_chain(32, 32, 0.25)\n )\n\n self.block3 = nn.Sequential(\n *conv_chain(32, 64)\n )\n\n self.block4 = nn.Sequential(\n *pooled_conv_chain(64, 64, 0.25)\n )\n\n self.block5 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(1600, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5)\n )\n\n self.interp_block = nn.Linear(512, no_classes)\n\n self.out_block = nn.Softmax(dim=1)\n\n def forward(self, inputs):\n x = self.block1(inputs)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.block5(x)\n x = self.interp_block(x)\n out = self.out_block(x)\n return out\n\n def run_train_cycle(self, dataloader, loss_fn, optim, report_rate=50):\n\n losses = []\n\n for i, (X, y) in enumerate(dataloader):\n # Load the data into the device for processing\n X, y = X.to(device), y.to(device)\n\n # Prediction loss calc\n pred = self(X)\n loss = loss_fn(pred, y)\n losses.append(loss.cpu().detach().numpy())\n\n # Backpropagation to update the model\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # Report loss every 100 batches\n if i % report_rate == 0:\n print(f'Loss: {loss.item()} for batch {i}')\n\n return np.array(losses)\n\n def run_test_cycle(self, dataloader, loss_fn):\n # Set this to evaluate mode\n self.eval()\n # Accumulate loss and accuracy across the dataset\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = self(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n # Evaluate the average loss and accuracy across the dataset\n size = len(dataloader.dataset)\n test_loss /= size\n correct /= size\n # Report it to the the console\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n # Return a dictionary containing the results (for use in plotting etc.), list wrapped for dataframe usage\n return {\n \"loss\": [test_loss],\n \"accuracy\": [correct]\n }\n\n\nclass PairedDenseCNN(nn.Module):\n def __init__(self, image_channels: int, dense_inputs: int, no_classes: int):\n super(PairedDenseCNN, self).__init__()\n\n # Initialize the CNN branch of the network\n self.cnn_block1 = nn.Sequential(\n *conv_chain(image_channels, 32)\n )\n\n self.cnn_block2 = nn.Sequential(\n *pooled_conv_chain(32, 32, 0.25)\n )\n\n self.cnn_block3 = nn.Sequential(\n *conv_chain(32, 64)\n )\n\n self.cnn_block4 = nn.Sequential(\n *pooled_conv_chain(64, 64, 0.25)\n )\n\n self.cnn_block5 = nn.Sequential(\n nn.Flatten(),\n nn.Linear(1600, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Dropout(0.5)\n )\n\n # Initialize the dense branch of the network\n self.dense_branch = nn.Sequential(\n nn.Linear(dense_inputs, dense_inputs),\n nn.Linear(dense_inputs, dense_inputs),\n nn.Linear(dense_inputs, dense_inputs)\n )\n\n # The \"interpretation\" block, which attempts to make sense of the two branches combined outputs\n self.interp_block = nn.Linear(512 + dense_inputs, no_classes)\n\n # The final output block, being a softmax evaluation categorization\n self.out_block = nn.Softmax(dim=1)\n\n def forward(self, cnn_input, deep_input):\n # Chain the cnn blocks first\n x1 = self.cnn_block1(cnn_input)\n x1 = self.cnn_block2(x1)\n x1 = self.cnn_block3(x1)\n x1 = self.cnn_block4(x1)\n x1 = self.cnn_block5(x1)\n\n # Chain the deep blocks next\n x2 = self.dense_branch(deep_input)\n\n # Concatenate the results, and feed it into interp block\n x = torch.cat((x1, x2), dim=1)\n x = self.interp_block(x)\n\n # Finally, calculate the predicted categories\n cnn_out = self.out_block(x)\n return cnn_out\n\n def run_train_cycle(self, dataloader, loss_fn, optim, report_rate=50):\n losses = []\n\n for i, ((X1, X2), y) in enumerate(dataloader):\n # Load the data into the device for processing\n X1, X2, y = X1.to(device), X2.to(device), y.to(device)\n\n # Prediction loss calc\n pred = self(cnn_input=X1, deep_input=X2)\n loss = loss_fn(pred, y)\n losses.append(loss.cpu().detach().numpy())\n\n # Backpropagation to update the model\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # Report loss every 100 batches\n if i % report_rate == 0:\n print(f'Loss: {loss.item()} for batch {i}')\n\n return losses\n\n def run_test_cycle(self, dataloader, loss_fn):\n # Set this to evaluate mode\n self.eval()\n # Accumulate loss and accuracy across the dataset\n test_loss, correct = 0, 0\n with torch.no_grad():\n for (X1, X2), y in dataloader:\n X1, X2, y = X1.to(device), X2.to(device), y.to(device)\n pred = self(cnn_input=X1, deep_input=X2)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n # Evaluate the average loss and accuracy across the dataset\n size = len(dataloader.dataset)\n test_loss /= size\n correct /= size\n # Report it to the the console\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\n # Return a dictionary containing the results (for use in plotting etc.)\n return {\n \"loss\": [test_loss],\n \"accuracy\": [correct]\n }\n\n\nif __name__ == '__main__':\n # Parse the command line arguments\n parser = argparse.ArgumentParser(description=\"Run our static networks and record their progress\")\n\n parser.add_argument('-o', '--output', help='The destination in which to place the output files')\n parser.add_argument('-e', '--epochs', help='The number of epochs that should be run on each model per cycle',\n type=int)\n parser.add_argument('-b', '--batch', help='The size of the batches to submit to the network during training',\n type=int)\n\n args = parser.parse_args()\n\n output_path = args.output\n\n epochs = args.epochs\n batch_size = args.batch\n\n del args\n del parser\n\n # Initialize our dataframe dump files, resetting them if they already exist\n tmp_train_df = pd.DataFrame(columns=[\"model\", \"cycle\", \"epoch\", \"losses\"])\n train_output_file = Path(output_path, \"training_progress.tsv\")\n tmp_train_df.to_csv(train_output_file, mode='w', sep='\\t', header=True)\n del tmp_train_df\n\n tmp_test_df = pd.DataFrame(columns=[\"model\", \"cycle\", \"epoch\", \"loss\", \"accuracy\"])\n test_output_file = Path(output_path, \"testing_progress.tsv\")\n tmp_test_df.to_csv(test_output_file, mode='w', sep='\\t', header=True)\n del tmp_test_df\n\n # Prepare our data for the simple CNN\n training_data = CIFAR100Coarse(\n root=\"data\",\n train=True,\n download=True,\n transform=data_managers.train_transform\n )\n\n testing_data = CIFAR100Coarse(\n root=\"data\",\n train=False,\n download=True,\n transform=data_managers.test_transform\n )\n\n # Lock the random seed here to make the batches are consistent between tests with different models\n torch.manual_seed(36246)\n # Split the training data into sets of 5000 entries\n training_sets = random_split(training_data, [5000] * 10)\n # Split the testing data into sets of 1000 entries\n testing_sets = random_split(testing_data, [1000] * 10)\n # Restore the random seed back to normal for the training\n torch.initial_seed()\n\n # Run the simple CNN 5 times\n for cycle in range(10):\n base_data = {\n \"model\": [\"SimpleCNN\"],\n \"cycle\": [cycle]\n }\n\n train_dataloader = DataLoader(training_sets[cycle], batch_size=batch_size, shuffle=True)\n testing_dataloader = DataLoader(testing_sets[cycle], batch_size=batch_size, shuffle=True)\n\n # Initialize our model\n model = SimpleCNN(3, 20).to(device)\n\n # Initialize our loss function and optimizer\n loss_fn = nn.CrossEntropyLoss()\n optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.99, 0.999), weight_decay=0.001)\n\n # Run the train/test cycles\n for e in range(epochs):\n base_data['epoch'] = [e]\n\n print(f\"Epoch {e + 1} (Simple CNN)\\n----------------------------------------------------\")\n losses = model.run_train_cycle(train_dataloader, loss_fn, optim)\n training_df = pd.DataFrame({**base_data, \"losses\": [losses]})\n training_df.to_csv(train_output_file, mode='a', header=False, sep='\\t')\n del training_df\n\n test_results = model.run_test_cycle(testing_dataloader, loss_fn)\n testing_df = pd.DataFrame({**base_data, **test_results})\n testing_df.to_csv(test_output_file, mode='a', header=False, sep='\\t')\n del testing_df\n print(f\"Finished cycle {cycle}!\")\n\n # Prepare our data for the paired CNN/Dense network\n training_data = data_managers.build_dataset(True)\n testing_data = data_managers.build_dataset(False)\n\n # Split the training data into sets of 5000 entries\n training_sets = random_split(training_data, [5000] * 10)\n # Split the testing data into sets of 1000 entries\n testing_sets = random_split(testing_data, [1000] * 10)\n\n # Run the paired CNN+Dense paired network 5 times\n for cycle in range(10):\n base_data = {\n \"model\": \"PairedDenseCNN\",\n \"cycle\": cycle\n }\n\n train_dataloader = DataLoader(training_sets[cycle], batch_size=batch_size)\n testing_dataloader = DataLoader(testing_sets[cycle], batch_size=batch_size)\n\n # Initialize our model\n model = PairedDenseCNN(3, 3, 20).to(device)\n\n # Initialize our loss function and optimizer\n loss_fn = nn.CrossEntropyLoss()\n optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.99, 0.999))\n\n # Run the train/test cycles\n for e in range(epochs):\n base_data['epoch'] = e\n\n print(f\"Epoch {e + 1} (Paired CNN/Dense)\\n----------------------------------------------------\")\n losses = model.run_train_cycle(train_dataloader, loss_fn, optim)\n training_df = pd.DataFrame({**base_data, \"losses\": [losses]})\n training_df.to_csv(train_output_file, mode='a', header=False, sep='\\t')\n del training_df\n\n test_results = model.run_test_cycle(testing_dataloader, loss_fn)\n testing_df = pd.DataFrame({**base_data, **test_results})\n testing_df.to_csv(test_output_file, mode='a', header=False, sep='\\t')\n del testing_df\n print(f\"Finished cycle {cycle}!\")\n","sub_path":"backup/convnet_batched.py","file_name":"convnet_batched.py","file_ext":"py","file_size_in_byte":12573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"464193958","text":"from django.conf.urls import url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom . import views\n\nurlpatterns = [\n #url(r'^$', views.generic, name='generic.html'),\n url(r'^$', views.index, name='index.html'),\n url(r'^search/$', views.search, name='search.html'),\n url(r'^about/$', views.about, name='about.html'),\n url(r'^maintenance/$', views.maintenance, name='maintenance.html'),\n url(r'^algae/(?P\\w+)/$', views.algaeinfo, name='algaeinfo.html'),\n]\n","sub_path":"niva_cca/algae_collection/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"215318525","text":"import pytest\nfrom django.conf import settings\nfrom django.http import Http404\n\nfrom grandchallenge.workstations.models import Session\nfrom grandchallenge.workstations.utils import (\n get_or_create_active_session,\n get_workstation_image_or_404,\n)\nfrom tests.factories import (\n UserFactory,\n WorkstationFactory,\n WorkstationImageFactory,\n)\n\n\n@pytest.mark.django_db\ndef test_get_or_create_active_session():\n user = UserFactory()\n wsi = WorkstationImageFactory()\n\n assert Session.objects.all().count() == 0\n\n s = get_or_create_active_session(\n user=user, workstation_image=wsi, region=\"eu-central-1\"\n )\n\n assert s.workstation_image == wsi\n assert s.creator == user\n assert Session.objects.all().count() == 1\n\n # Same workstation image and user\n s_1 = get_or_create_active_session(\n user=user, workstation_image=wsi, region=\"eu-central-1\"\n )\n assert s == s_1\n\n # Different workstation image, same user\n wsi_1 = WorkstationImageFactory()\n s_2 = get_or_create_active_session(\n user=user, workstation_image=wsi_1, region=\"eu-central-1\"\n )\n\n assert s_2.workstation_image == wsi_1\n assert s_2.creator == user\n assert Session.objects.all().count() == 2\n assert s_1 != s_2\n\n # Same workstation image, different user\n user_1 = UserFactory()\n s_3 = get_or_create_active_session(\n user=user_1, workstation_image=wsi, region=\"eu-central-1\"\n )\n assert s_3.workstation_image == wsi\n assert s_3.creator == user_1\n assert Session.objects.all().count() == 3\n\n # Stop the original session, original workstation image and user\n s.status = s.STOPPED\n s.save()\n\n s_4 = get_or_create_active_session(\n user=user, workstation_image=wsi, region=\"eu-central-1\"\n )\n assert s_4.workstation_image == wsi\n assert s_4.creator == user\n assert Session.objects.all().count() == 4\n\n\n@pytest.mark.django_db\ndef test_get_workstation_image_or_404():\n # No default workstation\n with pytest.raises(Http404):\n get_workstation_image_or_404()\n\n default_wsi = WorkstationImageFactory(\n workstation__title=settings.DEFAULT_WORKSTATION_SLUG, ready=True\n )\n wsi = WorkstationImageFactory(ready=True)\n\n found_wsi = get_workstation_image_or_404()\n assert found_wsi == default_wsi\n\n found_wsi = get_workstation_image_or_404(slug=wsi.workstation.slug)\n assert found_wsi == wsi\n\n found_wsi = get_workstation_image_or_404(pk=wsi.pk)\n assert found_wsi == wsi\n\n # No images for workstation\n with pytest.raises(Http404):\n get_workstation_image_or_404(slug=WorkstationFactory().slug)\n\n # Incorrect pk\n with pytest.raises(Http404):\n get_workstation_image_or_404(pk=WorkstationFactory().pk)\n","sub_path":"app/tests/workstations_tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"648159454","text":"from flask_restplus import Namespace, Resource, reqparse\n\nfrom models import *\n\napi = Namespace('Book List', description='BookLists related operations')\n\nparser = reqparse.RequestParser()\n\n# TODO: the \"owner_id\" parameter should be removed after logged in user info is saved\nparser.add_argument('owner_id', help='id of the user who created the list')\nparser.add_argument('list_name', help='name of the list')\nparser.add_argument('books', action='append', help='books (represented as book_id) to be included in the list')\n\n\n@api.route('/')\nclass Lists(Resource):\n\n @api.doc('get_lists')\n @api.doc(responses={\n 200: 'Success',\n 400: 'Validation Error'\n })\n @api.doc(params={'owner_id': 'user_id of the owner'})\n @api.doc(params={'list_name': 'name of the list'})\n def get(self):\n '''get all lists given constraints'''\n args = parser.parse_args()\n owner_id = args['owner_id']\n list_name = args['list_name']\n\n queries = []\n\n if owner_id is not None:\n queries.append(List.OwnerId==owner_id)\n \n if list_name is not None:\n queries.append(List.ListName==list_name)\n\n book_list = db.session.query(List, ListToBooks, Book)\\\n .join(ListToBooks, List.ListId==ListToBooks.ListId)\\\n .join(Book, Book.BookId==ListToBooks.BookId)\\\n .filter(*queries).all()\n\n retList = []\n\n for book in book_list:\n fields = {}\n\n for field in Serializer.serialize_list(book):\n for k, v in field.items():\n if (k not in fields):\n fields[k] = v\n\n fields['Created'] = str(fields['Created'])\n \n retList.append(fields)\n\n dict = {}\n\n for tmpList in retList:\n print(tmpList)\n if tmpList['ListId'] not in dict:\n tmpList['BookId'] = [tmpList['BookId']]\n dict[tmpList['ListId']] = tmpList\n else:\n dict[tmpList['ListId']]['BookId'].append(tmpList['BookId'])\n \n return list(dict.values()), 200\n\n @api.doc('create_list')\n @api.doc(responses={\n 201: 'Created',\n 400: 'Validation Error'\n })\n @api.expect(parser)\n def post(self):\n '''create a list'''\n args = parser.parse_args()\n owner_id = args['owner_id']\n list_name = args['list_name']\n books = args['books']\n new_list = List(OwnerId=owner_id, ListName=list_name)\n \n db.session.add(new_list)\n db.session.commit()\n\n # must be after list being created, otherwise list_id is null\n for book_id in books:\n new_ListToBooks = ListToBooks(ListId=new_list.ListId, BookId=book_id)\n db.session.add(new_ListToBooks)\n\n db.session.commit()\n\n return \"Success!\", 201\n\n\n@api.route('/')\n@api.param('list_id', 'The list identifier')\n@api.response(404, 'List not found')\nclass ListOfID(Resource):\n @api.doc(responses={\n 200: 'Success',\n })\n @api.doc('get_list')\n def get(self, list_id):\n '''Fetch a list given its identifier'''\n List.query.get_or_404(list_id)\n queries = []\n queries.append(List.ListId==list_id)\n \n book_list = db.session.query(List, ListToBooks, Book)\\\n .join(ListToBooks, List.ListId==ListToBooks.ListId)\\\n .join(Book, Book.BookId==ListToBooks.BookId)\\\n .filter(*queries).all()\n\n fields = {}\n fields['books'] = []\n\n # query returns list of books, need to parse the fields of each book\n for book in book_list:\n for field in Serializer.serialize_list(book):\n for k, v in field.items():\n if (k == 'BookId' and v not in fields['books']):\n fields['books'].append(v)\n elif (k not in fields):\n fields[k] = v\n \n # Serialize the dateTime type \n fields['Created'] = str(fields['Created'])\n\n return fields, 200\n\n @api.doc(responses={\n 200: 'Success',\n })\n @api.expect(parser)\n def put(self, list_id):\n '''update a list given its identifier'''\n book_list = List.query.get_or_404(list_id)\n args = parser.parse_args()\n owner_id = args['owner_id']\n list_name = args['list_name']\n\n if owner_id is not None:\n book_list.OwnerId = owner_id\n if list_name is not None:\n book_list.ListName = list_name\n\n # for genres or authors or lists, use the map classes\n db.session.commit()\n\n return {\"update list\": \"success\"}, 200\n\n @api.doc(responses={\n 204: 'Deleted',\n })\n def delete(self, list_id):\n '''delete a list given its identifier'''\n List.query.get_or_404(list_id)\n ListToBooks.query.filter_by(ListId=list_id).delete()\n List.query.filter_by(ListId=list_id).delete()\n db.session.commit()\n return 'Success', 204","sub_path":"apis/booklist.py","file_name":"booklist.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"124293504","text":"# -*- coding:utf-8 -*-\n\nimport operator\n\nfile_path = '/Users/zhijian/Desktop/聚合/'\nfile_name_list = ('1536232759000', '1536249600000', '1536336000000', '1536422400000', '1536508800000', '1536595200000')\ntarget_file_name = 'hotWords.txt'\nwordDict = {}\n\nfor file_name in file_name_list:\n file_words = open('%s%s' % (file_path, file_name), 'r')\n words = file_words.readlines()\n for word in words:\n word = word.strip().split(\",\")\n if wordDict.get(word[0]) is None:\n wordDict[word[0]] = int(word[1])\n else:\n wordDict[word[0]] = wordDict[word[0]] + int(word[1])\nwordList = sorted(wordDict.items(), key=operator.itemgetter(1), reverse=True)\nwords = []\nfor key, value in wordList:\n print('%s:%d' % (key, value))\n words.append('%s:%d\\n' % (key, value))\n# open(file_path + target_file_name, 'w+').writelines(words)\n","sub_path":"py_project/字符串/reportWord.py","file_name":"reportWord.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"529172897","text":"# -*- coding: utf-8 -*-\n\"\"\"\nБазовый класс для реализации вариантов детектора релевантности и синонимичности,\nиспользующих градиентный бустинг и разреженные матрицы шинглов.\n\"\"\"\n\nfrom scipy.sparse import lil_matrix\nimport logging\n\n\nclass GB_BaseDetector(object):\n def __init__(self):\n super(GB_BaseDetector, self).__init__()\n self.logger = logging.getLogger('GB_BaseDetector')\n\n # параметры генерации матрицы признаков из пар предложений должны\n # быть загружены в методе load из конфига модели, сохраненного тренером.\n self.xgb_relevancy_shingle2id = None\n self.xgb_relevancy_shingle_len = None\n self.xgb_relevancy_nb_features = None\n self.xgb_relevancy_lemmatize = None\n\n # для XGBoost тип элементов входной матрицы может быть bool,\n # для LightGBM должен быть 'float32'\n self.x_matrix_type = '<>'\n\n def init_model_params(self, model_config):\n self.xgb_relevancy_shingle2id = model_config['shingle2id']\n self.xgb_relevancy_shingle_len = model_config['shingle_len']\n self.xgb_relevancy_nb_features = model_config['nb_features']\n self.xgb_relevancy_lemmatize = model_config['lemmatize']\n\n def normalize_qline(self, phrase):\n return phrase.replace(u'?', u' ').replace(u'!', u' ').strip()\n\n def calc_relevancy1(self, premise, question, text_utils, predictor_func):\n \"\"\"Вернет оценку достоверности того, что две заданные фразы релевантны\"\"\"\n X_data = lil_matrix((1, self.xgb_relevancy_nb_features), dtype=self.x_matrix_type)\n\n if self.xgb_relevancy_lemmatize:\n premise_words = text_utils.tokenize(self.normalize_qline(premise))\n question_words = text_utils.tokenize(self.normalize_qline(question))\n else:\n premise_words = text_utils.lemmatize(self.normalize_qline(premise))\n question_words = text_utils.lemmatize(self.normalize_qline(question))\n\n premise_wx = text_utils.words2str(premise_words)\n question_wx = text_utils.words2str(question_words)\n\n premise_shingles = set(text_utils.ngrams(premise_wx, self.xgb_relevancy_shingle_len))\n question_shingles = set(text_utils.ngrams(question_wx, self.xgb_relevancy_shingle_len))\n\n self.xgb_relevancy_vectorize_sample_x(X_data, 0, premise_shingles, question_shingles,\n self.xgb_relevancy_shingle2id)\n\n y_probe = predictor_func(X_data)\n return y_probe[0]\n\n def get_most_relevant(self, probe_phrase, phrases, text_utils, predictor_func, nb_results=1):\n \"\"\"\n Поиск наиболее релевантной предпосыл(ки|ок) или ближайшего синонима с помощью одной из моделей,\n использующей градиентный бустинг (XGBoost, LightGBM).\n\n :param probe_phrase - юникодная строка-вопрос\n :param phrases - список проверяемых предпосылок из базы знаний, или эталонных приказов для детектора синонимов\n :param text_utils - экземпляр класса TextUtils с кодом для токенизации, лемматизации etc\n :param predictor_func - функция, которая принимает X_data с векторизацией пар фраз и возвращает результат модели\n :param nb_results - кол-во возвращаемых результатов, по умолчанию возвращается одна\n наиболее релевантная запись\n\n :return если nb_results=1, то вернется кортеж с двумя полями ('текст лучшей предпосылки', оценка_релевантности),\n в противном случае возвращается кортеж с двумя полями - список предпосылок, отсортированный по убыванию\n релевантности и список соответствующих релевантностей.\n \"\"\"\n\n # НАЧАЛО ОТЛАДКИ\n #phrases = []\n #phrases.append((u'меня зовут кеша', None, None))\n # КОНЕЦ ОТЛАДКИ\n\n nb_answers = len(phrases)\n X_data = lil_matrix((nb_answers, self.xgb_relevancy_nb_features), dtype=self.x_matrix_type)\n\n # Единственный вопрос готовим заранее\n if self.xgb_relevancy_lemmatize:\n question_words = text_utils.lemmatize(self.normalize_qline(probe_phrase))\n else:\n question_words = text_utils.tokenize(self.normalize_qline(probe_phrase))\n\n question_wx = text_utils.words2str(question_words)\n question_shingles = set(text_utils.ngrams(question_wx, self.xgb_relevancy_shingle_len))\n\n # все предпосылки из текущей базы фактов векторизуем в один тензор, чтобы\n # прогнать его через классификатор разом.\n for ipremise, (premise, premise_person, phrase_code) in enumerate(phrases):\n if premise is None or len(premise) == 0:\n raise ValueError()\n\n if self.xgb_relevancy_lemmatize:\n premise_words = text_utils.lemmatize(self.normalize_qline(premise))\n else:\n premise_words = text_utils.tokenize(self.normalize_qline(premise))\n\n premise_wx = text_utils.words2str(premise_words)\n premise_shingles = set(text_utils.ngrams(premise_wx, self.xgb_relevancy_shingle_len))\n\n self.xgb_relevancy_vectorize_sample_x(X_data, ipremise, premise_shingles, question_shingles,\n self.xgb_relevancy_shingle2id)\n\n y_probe = predictor_func(X_data)\n\n reslist = []\n for ipremise, (premise, premise_person, phrase_code) in enumerate(phrases):\n sim = y_probe[ipremise]\n reslist.append((premise, sim))\n\n # сортируем результаты в порядке убывания релевантности.\n reslist = sorted(reslist, key=lambda z: -z[1])\n\n best_premise = ''\n best_rels = None\n if nb_results == 1:\n # возвращаем единственную запись с максимальной релевантностью.\n best_premise = reslist[0][0]\n best_rel = reslist[0][1]\n return best_premise, best_rel\n else:\n # возвращаем заданное кол-во наиболее релевантных записей.\n n = min(nb_results, nb_answers)\n best_premises = [reslist[i][0] for i in range(n)]\n best_rels = [reslist[i][1] for i in range(n)]\n return best_premises, best_rels\n\n def unknown_shingle(self, shingle):\n # self.logger.error(u'Shingle \"{}\" is unknown'.format(shingle))\n pass\n\n def xgb_relevancy_vectorize_sample_x(self, X_data, idata,\n premise_shingles, question_shingles,\n shingle2id):\n # для внутреннего использования - векторизация предпосылки и вопроса.\n ps = set(premise_shingles)\n qs = set(question_shingles)\n common_shingles = ps & qs\n notmatched_ps = ps - qs\n notmatched_qs = qs - ps\n\n nb_shingles = len(shingle2id)\n\n icol = 0\n for shingle in common_shingles:\n if shingle not in shingle2id:\n self.unknown_shingle(shingle)\n else:\n X_data[idata, icol + shingle2id[shingle]] = True\n\n icol += nb_shingles\n for shingle in notmatched_ps:\n if shingle not in shingle2id:\n self.unknown_shingle(shingle)\n else:\n X_data[idata, icol + shingle2id[shingle]] = True\n\n icol += nb_shingles\n for shingle in notmatched_qs:\n if shingle not in shingle2id:\n self.unknown_shingle(shingle)\n else:\n X_data[idata, icol + shingle2id[shingle]] = True\n","sub_path":"ruchatbot/bot/gb_base_detector.py","file_name":"gb_base_detector.py","file_ext":"py","file_size_in_byte":8747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"264739472","text":"# -*- coding: utf-8 -*-\n\nfrom .. import common\nfrom ..builder import buildShlib\n\nfrom ..sucrose import window\n\nMODULE_NAME = 'window'\nTARGET = common.FG4CPP + '-' + MODULE_NAME\n\ndef build( _context ):\n sources = {\n common.SOURCE_DIR : {\n common.SUCROSE4CPP : {\n MODULE_NAME : [\n 'eventhandlers.cpp',\n ],\n },\n },\n }\n\n useModules = {\n window.TARGET,\n }\n\n buildShlib(\n _context,\n TARGET,\n sources = sources,\n useModules = useModules,\n )\n","sub_path":"wscripts/sucrose4cpp/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"282848814","text":"'''\r\nPermet d'afficher le menu d'édition d'un salon\r\nLast update: 15/01/19\r\n'''\r\n# DEPENDANCES\r\nimport discord\r\nimport asyncio\r\n\r\nfrom discord.utils import get\r\n\r\n # CUSTOM\r\nfrom conf.lang import*\r\n\r\nfrom conf.settings import*\r\nfrom conf.readability import*\r\n\r\nfrom cogs.fonctions.sub_functions.channel.editChannelMenu import Channel_Edit\r\n\r\nasync def Channel_EditMenu(client, ctx):\r\n user = ctx.message.author\r\n continuer = True\r\n convient = False\r\n\r\n while(continuer and convient == False):\r\n # Tant que l'utilisateur n'a pas abandonner et\r\n # que le channel convient (qu'il existe quoi)\r\n askChannel = askName_EditChannel\r\n channelNameEmbed = Simple_Embed('', 0, fieldName_EditChannel, askChannel)\r\n modifyEmbed = await ctx.send(embed=channelNameEmbed)\r\n\r\n # On vérifie que le message provienne ce celui qui a demandé a créer le channel\r\n def check(message):\r\n return message.author == user\r\n \r\n waitChannelName = await client.wait_for('message', timeout=120, check=check)\r\n\r\n # On vérifie si le channel existe\r\n try:\r\n getChannel = get(ctx.guild.channels, name=waitChannelName.content)\r\n print(getChannel.name)\r\n except AttributeError:\r\n await ctx.send(unexist_EditChannel.format(user.id), delete_after=5)\r\n await modifyEmbed.delete()\r\n else:\r\n confirmEmbed = Simple_Embed('', 0, fieldName_EditChannel, confirm_EditChannel.format(waitChannelName.content))\r\n await modifyEmbed.delete()\r\n confirm = await ctx.send(embed=confirmEmbed)\r\n\r\n # Récupération des emojis\r\n button_valid = client.get_emoji(531893961107963917)\r\n button_wrong = client.get_emoji(531938941318856714)\r\n\r\n # Pose des reactions\r\n await confirm.add_reaction(emoji=button_valid)\r\n await confirm.add_reaction(emoji=button_wrong)\r\n await confirm.add_reaction(emoji='➖')\r\n await confirm.add_reaction(emoji='⛔')\r\n\r\n # Définition du check pour le vote \r\n def reac_check(reaction, voter):\r\n '''\r\n Récupère les réactions et les voteurs et vérifie leur identité\r\n\r\n Return: true - si le voter est l'auteur du message\r\n '''\r\n return voter == user and (str(reaction.emoji) == '<:valid:531893961107963917>' or \r\n str(reaction.emoji) == '<:wrong:531938941318856714>' or str(reaction.emoji) == '⛔')\r\n \r\n reaction, voter = await client.wait_for('reaction_add', timeout=120, check=reac_check)\r\n\r\n if(str(reaction.emoji) == '<:valid:531893961107963917>'):\r\n convient = True\r\n channelName = waitChannelName.content\r\n # On ouvre le menue des modifications\r\n await confirm.delete()\r\n await Channel_Edit(client, channelName, ctx)\r\n if(str(reaction.emoji) == '<:wrong:531938941318856714>'):\r\n # On renvoie un message\r\n pass\r\n if(str(reaction.emoji) == '⛔'):\r\n # On supprime tout est on abandonne\r\n await confirm.delete()\r\n continuer = False","sub_path":"cogs/fonctions/sub_functions/channel/ChoiceEditChann.py","file_name":"ChoiceEditChann.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"232652337","text":"import folium as fl\nimport pandas as pd\n\n# Initialization - data import and initial map creation\nmy_map = fl.Map(location=[65.089959, 134.153830], zoom_start=4, tiles='Mapbox Bright')\nvol_df = pd.read_csv('Data/volcanoes_rus_clear.csv')\n\n# World active volcanoes list import and data prep to same format\n# NB: All records with errors were dropped\nvol_df_a = pd.read_csv('Data/volcanoes_world_active.csv')\nvol_df_a = vol_df_a.drop(['Country', 'Type'], 1)\nvol_df_a.columns = ['name', 'lat', 'lon', 'height']\nvol_df_a['lat'] = pd.to_numeric(vol_df_a['lat'], errors='coerse')\nvol_df_a['lon'] = pd.to_numeric(vol_df_a['lon'], errors='coerse')\nvol_df_a = vol_df_a.dropna()\n\n# Replaced by interrows iteration over DataFrame\n# lat = list(vol_df['lat'])\n# lon = list(vol_df['lon'])\n# name = list(vol_df['name'])\n# elev = list(vol_df['height'])\n\nhtml = \"\"\"%s
\nHeight: %s m\n\"\"\"\n\n\ndef color_selector(el):\n\n \"\"\" Select color of the marker depending on height\"\"\"\n\n if el < 1000:\n return 'green'\n elif 1000 < el < 2500:\n return 'orange'\n else:\n return 'red'\n\n\n# Forming group of markers\nfgv = fl.FeatureGroup(name='Volcanoes')\nfga = fl.FeatureGroup(name='Active')\n\nfor i, row in vol_df.iterrows():\n\n iframe = fl.IFrame(html=html % (row['name'], row['name'], row['height']), width=150, height=80)\n fgv.add_child(fl.CircleMarker(location=[row['lat'], row['lon']], popup=fl.Popup(iframe),\n color=color_selector(row['height']), radius=6, weight=1,\n fill=True, fill_opacity=0.8))\n\n\nfor i, row in vol_df_a.iterrows():\n\n iframe = fl.IFrame(html=html % (row['name'], row['name'], row['height']), width=150, height=80)\n fga.add_child(fl.Marker(location=[row['lat'], row['lon']], popup=fl.Popup(iframe),\n icon=fl.Icon(color=color_selector(row['height']))))\n\n\n\nfgv.add_child(fl.GeoJson(data=open('Data/world.json', 'r', encoding='utf-8-sig').read(),\n style_function= lambda x: {'fillColor':'green' if x['properties']['POP2005'] < 50000000 else\n 'orange' if x['properties']['POP2005'] < 150000000 else 'red'}))\n\n# Adding groups to the map and saving the map\n\nmy_map.add_child(fgv)\nmy_map.add_child(fga)\nmy_map.save('Results/Map5.html')\n","sub_path":"app2_my_3.py","file_name":"app2_my_3.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"402264128","text":"import copy\nimport logging\n\nimport synapse.exc as s_exc\nimport synapse.common as s_common\nimport synapse.tests.utils as s_t_utils\nfrom synapse.tests.utils import alist\n\nlogger = logging.getLogger(__name__)\n\nclass InetModelTest(s_t_utils.SynTest):\n\n async def test_model_inet_basics(self):\n async with self.getTestCore() as core:\n self.len(1, await core.nodes('[ inet:web:hashtag=\"#hehe\" ]'))\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[ inet:web:hashtag=\"foo\" ]')\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[ inet:web:hashtag=\"#foo bar\" ]')\n\n nodes = await core.nodes('''\n [ inet:web:instance=(foo,)\n :url=https://app.slack.com/client/T2XK1223Y\n :id=T2XK1223Y\n :name=\"vertex synapse\"\n :created=20220202\n :creator=synapsechat.slack.com/visi\n :owner={[ ou:org=* :name=vertex ]}\n :owner:fqdn=vertex.link\n :owner:name=vertex\n :operator={[ ou:org=* :name=slack ]}\n :operator:fqdn=slack.com\n :operator:name=slack\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('url'), 'https://app.slack.com/client/T2XK1223Y')\n self.eq(node.get('id'), 'T2XK1223Y')\n self.eq(node.get('name'), 'vertex synapse')\n self.eq(node.get('created'), 1643760000000)\n self.eq(node.get('creator'), ('synapsechat.slack.com', 'visi'))\n self.nn(node.get('owner'))\n self.eq(node.get('owner:fqdn'), 'vertex.link')\n self.eq(node.get('owner:name'), 'vertex')\n self.nn(node.get('operator'))\n self.eq(node.get('operator:fqdn'), 'slack.com')\n self.eq(node.get('operator:name'), 'slack')\n\n nodes = await core.nodes('''\n [ inet:web:channel=(bar,)\n :url=https://app.slack.com/client/T2XK1223Y/C2XHHNDS7\n :id=C2XHHNDS7\n :name=general\n :instance={ inet:web:instance:url=https://app.slack.com/client/T2XK1223Y }\n :created=20220202\n :creator=synapsechat.slack.com/visi\n :topic=\"Synapse Discussion - Feel free to invite others!\"\n ]''')\n self.len(1, nodes)\n node = nodes[0]\n self.eq(node.get('url'), 'https://app.slack.com/client/T2XK1223Y/C2XHHNDS7')\n self.eq(node.get('id'), 'C2XHHNDS7')\n self.eq(node.get('name'), 'general')\n self.eq(node.get('topic'), 'Synapse Discussion - Feel free to invite others!')\n self.eq(node.get('created'), 1643760000000)\n self.eq(node.get('creator'), ('synapsechat.slack.com', 'visi'))\n self.nn(node.get('instance'))\n\n opts = {'vars': {'mesg': (('synapsechat.slack.com', 'visi'), ('synapsechat.slack.com', 'whippit'), 1643760000000)}}\n self.len(1, await core.nodes('[ inet:web:mesg=$mesg :instance=(foo,) ] -> inet:web:instance +:name=\"vertex synapse\"', opts=opts))\n self.len(1, await core.nodes('[ inet:web:post=* :channel=(bar,) ] -> inet:web:channel +:name=general -> inet:web:instance'))\n\n async def test_inet_jarm(self):\n\n async with self.getTestCore() as core:\n nodes = await core.nodes('[ inet:ssl:jarmsample=(1.2.3.4:443, 07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1) ]')\n self.len(1, nodes)\n self.eq('tcp://1.2.3.4:443', nodes[0].get('server'))\n self.eq('07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1', nodes[0].get('jarmhash'))\n self.eq(('tcp://1.2.3.4:443', '07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1'), nodes[0].ndef[1])\n\n nodes = await core.nodes('inet:ssl:jarmhash=07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1')\n self.len(1, nodes)\n self.eq('07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1', nodes[0].ndef[1])\n self.eq('07d14d16d21d21d07c42d41d00041d', nodes[0].get('ciphers'))\n self.eq('24a458a375eef0c576d23a7bab9a9fb1', nodes[0].get('extensions'))\n\n async def test_ipv4_lift_range(self):\n\n async with self.getTestCore() as core:\n\n async with await core.snap() as snap:\n\n await snap.addNode('inet:ipv4', '1.2.3.0')\n await snap.addNode('inet:ipv4', '1.2.3.1')\n await snap.addNode('inet:ipv4', '1.2.3.2')\n await snap.addNode('inet:ipv4', '1.2.3.3')\n await snap.addNode('inet:ipv4', '1.2.3.4')\n\n self.len(3, await core.nodes('inet:ipv4=1.2.3.1-1.2.3.3'))\n self.len(3, await core.nodes('[inet:ipv4=1.2.3.1-1.2.3.3]'))\n self.len(3, await core.nodes('inet:ipv4 +inet:ipv4=1.2.3.1-1.2.3.3'))\n self.len(3, await core.nodes('inet:ipv4*range=(1.2.3.1, 1.2.3.3)'))\n\n async def test_ipv4_filt_cidr(self):\n\n async with self.getTestCore() as core:\n\n self.len(5, await core.nodes('[ inet:ipv4=1.2.3.0/30 inet:ipv4=5.5.5.5 ]'))\n self.len(4, await core.nodes('inet:ipv4 +inet:ipv4=1.2.3.0/30'))\n self.len(1, await core.nodes('inet:ipv4 -inet:ipv4=1.2.3.0/30'))\n\n self.len(256, await core.nodes('[ inet:ipv4=192.168.1.0/24]'))\n self.len(256, await core.nodes('[ inet:ipv4=192.168.2.0/24]'))\n self.len(256, await core.nodes('inet:ipv4=192.168.1.0/24'))\n\n # Seed some nodes for bounds checking\n pnodes = [(('inet:ipv4', f'10.2.1.{d}'), {}) for d in range(1, 33)]\n nodes = await alist(core.addNodes(pnodes))\n\n nodes = await core.nodes('inet:ipv4=10.2.1.4/32')\n self.len(1, nodes)\n self.len(1, await core.nodes('inet:ipv4 +inet:ipv4=10.2.1.4/32'))\n\n nodes = await core.nodes('inet:ipv4=10.2.1.4/31')\n self.len(2, nodes)\n self.len(2, await core.nodes('inet:ipv4 +inet:ipv4=10.2.1.4/31'))\n\n # 10.2.1.1/30 is 10.2.1.0 -> 10.2.1.3 but we don't have 10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv4=10.2.1.1/30')\n self.len(3, nodes)\n\n # 10.2.1.2/30 is 10.2.1.0 -> 10.2.1.3 but we don't have 10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv4=10.2.1.2/30')\n self.len(3, nodes)\n\n # 10.2.1.1/29 is 10.2.1.0 -> 10.2.1.7 but we don't have 10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv4=10.2.1.1/29')\n self.len(7, nodes)\n\n # 10.2.1.8/29 is 10.2.1.8 -> 10.2.1.15\n nodes = await core.nodes('inet:ipv4=10.2.1.8/29')\n self.len(8, nodes)\n\n # 10.2.1.1/28 is 10.2.1.0 -> 10.2.1.15 but we don't have 10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv4=10.2.1.1/28')\n self.len(15, nodes)\n\n async def test_addr(self):\n formname = 'inet:addr'\n async with self.getTestCore() as core:\n t = core.model.type(formname)\n\n # Proto defaults to tcp\n self.eq(t.norm('1.2.3.4'), ('tcp://1.2.3.4', {'subs': {'ipv4': 16909060, 'proto': 'tcp'}}))\n self.eq(t.norm('1.2.3.4:80'),\n ('tcp://1.2.3.4:80', {'subs': {'port': 80, 'ipv4': 16909060, 'proto': 'tcp'}}))\n self.raises(s_exc.BadTypeValu, t.norm, 'https://192.168.1.1:80') # bad proto\n\n # IPv4\n self.eq(t.norm('tcp://1.2.3.4'), ('tcp://1.2.3.4', {'subs': {'ipv4': 16909060, 'proto': 'tcp'}}))\n self.eq(t.norm('udp://1.2.3.4:80'),\n ('udp://1.2.3.4:80', {'subs': {'port': 80, 'ipv4': 16909060, 'proto': 'udp'}}))\n self.eq(t.norm('tcp://1[.]2.3[.]4'), ('tcp://1.2.3.4', {'subs': {'ipv4': 16909060, 'proto': 'tcp'}}))\n self.raises(s_exc.BadTypeValu, t.norm, 'tcp://1.2.3.4:-1')\n self.raises(s_exc.BadTypeValu, t.norm, 'tcp://1.2.3.4:66000')\n\n # IPv6\n self.eq(t.norm('icmp://::1'), ('icmp://::1', {'subs': {'ipv6': '::1', 'proto': 'icmp'}}))\n self.eq(t.norm('tcp://[::1]:2'), ('tcp://[::1]:2', {'subs': {'ipv6': '::1', 'port': 2, 'proto': 'tcp'}}))\n self.eq(t.norm('tcp://[::fFfF:0102:0304]:2'),\n ('tcp://[::ffff:1.2.3.4]:2', {'subs': {'ipv6': '::ffff:1.2.3.4',\n 'ipv4': 0x01020304,\n 'port': 2,\n 'proto': 'tcp',\n }}))\n self.raises(s_exc.BadTypeValu, t.norm, 'tcp://[::1') # bad ipv6 w/ port\n\n # Host\n hstr = 'ffa3e574aa219e553e1b2fc1ccd0180f'\n self.eq(t.norm('host://vertex.link'), (f'host://{hstr}', {'subs': {'host': hstr, 'proto': 'host'}}))\n self.eq(t.norm('host://vertex.link:1337'),\n (f'host://{hstr}:1337', {'subs': {'host': hstr, 'port': 1337, 'proto': 'host'}}))\n self.raises(s_exc.BadTypeValu, t.norm, 'vertex.link') # must use host proto\n\n async def test_asn(self):\n formname = 'inet:asn'\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n\n valu = '123'\n expected_ndef = (formname, 123)\n input_props = {\n 'name': 'COOL',\n 'owner': 32 * 'a'\n }\n expected_props = {\n 'name': 'cool',\n 'owner': 32 * 'a',\n }\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n valu = '456'\n expected_ndef = (formname, 456)\n expected_props = {}\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_asnet4(self):\n formname = 'inet:asnet4'\n async with self.getTestCore() as core:\n valu = ('54959', ('1.2.3.4', '5.6.7.8'))\n expected_ndef = (formname, (54959, (16909060, 84281096)))\n expected_props = {\n 'net4:min': 16909060,\n 'net4': (16909060, 84281096),\n 'net4:max': 84281096,\n 'asn': 54959,\n }\n expected_nodes = (\n ('inet:ipv4', 16909060),\n ('inet:ipv4', 84281096),\n )\n async with await core.snap() as snap:\n\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n await self.checkNodes(core, expected_nodes)\n\n async def test_asnet6(self):\n async with self.getTestCore() as core:\n nodes = await core.nodes('[ inet:asnet6=(99, (ff::00, ff::0100)) ]')\n self.len(1, nodes)\n self.eq(('inet:asnet6', (99, ('ff::', 'ff::100'))), nodes[0].ndef)\n self.eq(99, nodes[0].get('asn'))\n self.eq(('ff::', 'ff::100'), nodes[0].get('net6'))\n self.eq('ff::', nodes[0].get('net6:min'))\n self.eq('ff::100', nodes[0].get('net6:max'))\n expected_nodes = (\n ('inet:ipv6', 'ff::'),\n ('inet:ipv6', 'ff::100'),\n )\n await self.checkNodes(core, expected_nodes)\n\n async def test_cidr4(self):\n formname = 'inet:cidr4'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n valu = '0/24'\n expected = ('0.0.0.0/24', {'subs': {\n 'broadcast': 255,\n 'network': 0,\n 'mask': 24,\n }})\n self.eq(t.norm(valu), expected)\n\n valu = '192.168.1.101/24'\n expected = ('192.168.1.0/24', {'subs': {\n 'broadcast': 3232236031, # 192.168.1.255\n 'network': 3232235776, # 192.168.1.0\n 'mask': 24,\n }})\n self.eq(t.norm(valu), expected)\n\n valu = '123.123.0.5/30'\n expected = ('123.123.0.4/30', {'subs': {\n 'broadcast': 2071658503, # 123.123.0.7\n 'network': 2071658500, # 123.123.0.4\n 'mask': 30,\n }})\n self.eq(t.norm(valu), expected)\n\n self.raises(s_exc.BadTypeValu, t.norm, '10.0.0.1/-1')\n self.raises(s_exc.BadTypeValu, t.norm, '10.0.0.1/33')\n self.raises(s_exc.BadTypeValu, t.norm, '10.0.0.1/foo')\n self.raises(s_exc.BadTypeValu, t.norm, '10.0.0.1')\n\n # Form Tests ======================================================\n valu = '192[.]168.1.123/24'\n expected_ndef = (formname, '192.168.1.0/24') # ndef is network/mask, not ip/mask\n\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('network'), 3232235776) # 192.168.1.0\n self.eq(node.get('broadcast'), 3232236031) # 192.168.1.255\n self.eq(node.get('mask'), 24)\n\n async def test_cidr6(self):\n formname = 'inet:cidr6'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n valu = '::/0'\n expected = ('::/0', {'subs': {\n 'broadcast': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff',\n 'network': '::',\n 'mask': 0,\n }})\n self.eq(t.norm(valu), expected)\n\n valu = '2001:db8::/59'\n expected = ('2001:db8::/59', {'subs': {\n 'broadcast': '2001:db8:0:1f:ffff:ffff:ffff:ffff',\n 'network': '2001:db8::',\n 'mask': 59,\n }})\n self.eq(t.norm(valu), expected)\n\n self.raises(s_exc.BadTypeValu, t.norm, '10.0.0.1/-1')\n\n async def test_client(self):\n formname = 'inet:client'\n data = (\n ('tcp://127.0.0.1:12345', 'tcp://127.0.0.1:12345', {\n 'ipv4': 2130706433,\n 'port': 12345,\n 'proto': 'tcp',\n }),\n ('tcp://127.0.0.1', 'tcp://127.0.0.1', {\n 'ipv4': 2130706433,\n 'proto': 'tcp',\n }),\n ('tcp://[::1]:12345', 'tcp://[::1]:12345', {\n 'ipv6': '::1',\n 'port': 12345,\n 'proto': 'tcp',\n }),\n ('host://vertex.link:12345', 'host://ffa3e574aa219e553e1b2fc1ccd0180f:12345', {\n 'host': 'ffa3e574aa219e553e1b2fc1ccd0180f',\n 'port': 12345,\n 'proto': 'host',\n }),\n )\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n for valu, expected_valu, expected_props in data:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, ((formname, expected_valu), expected_props))\n\n async def test_download(self):\n formname = 'inet:download'\n input_props = {\n 'time': 0,\n 'file': 64 * 'b',\n 'fqdn': 'vertex.link',\n 'client': 'tcp://127.0.0.1:45654',\n 'server': 'tcp://1.2.3.4:80'\n }\n expected_props = {\n 'time': 0,\n 'file': 'sha256:' + 64 * 'b',\n 'fqdn': 'vertex.link',\n 'client': 'tcp://127.0.0.1:45654',\n 'client:ipv4': 2130706433,\n 'client:port': 45654,\n 'client:proto': 'tcp',\n 'server': 'tcp://1.2.3.4:80',\n 'server:ipv4': 16909060,\n 'server:port': 80,\n 'server:proto': 'tcp',\n }\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, 32 * 'a', props=input_props)\n self.checkNode(node, ((formname, 32 * 'a'), expected_props))\n\n async def test_email(self):\n formname = 'inet:email'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n email = 'UnitTest@Vertex.link'\n expected = ('unittest@vertex.link', {'subs': {'fqdn': 'vertex.link', 'user': 'unittest'}})\n self.eq(t.norm(email), expected)\n\n valu = t.norm('bob\\udcfesmith@woot.com')[0]\n\n with self.raises(s_exc.BadTypeValu) as cm:\n t.norm('hehe')\n self.isin('Email address expected in @ format', cm.exception.get('mesg'))\n\n with self.raises(s_exc.BadTypeValu) as cm:\n t.norm('hehe@1.2.3.4')\n self.isin('FQDN Got an IP address instead', cm.exception.get('mesg'))\n\n # Form Tests ======================================================\n valu = 'UnitTest@Vertex.link'\n expected_ndef = (formname, valu.lower())\n expected_props = {\n 'fqdn': 'vertex.link',\n 'user': 'unittest',\n }\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_flow(self):\n formname = 'inet:flow'\n srccert = s_common.guid()\n dstcert = s_common.guid()\n input_props = {\n 'time': 0,\n 'duration': 1,\n 'from': 32 * 'b',\n 'src': 'tcp://127.0.0.1:45654',\n 'src:host': 32 * 'b',\n 'src:proc': 32 * 'c',\n 'src:exe': 64 * 'd',\n 'src:txcount': 30,\n 'src:txbytes': 1,\n 'src:handshake': 'Hello There',\n 'dst': 'tcp://1.2.3.4:80',\n 'dst:host': 32 * 'e',\n 'dst:proc': 32 * 'f',\n 'dst:exe': 64 * '0',\n 'dst:txcount': 33,\n 'dst:txbytes': 2,\n 'tot:txcount': 63,\n 'tot:txbytes': 3,\n 'dst:handshake': 'OHai!',\n 'src:softnames': ('HeHe', 'haha'),\n 'dst:softnames': ('FooBar', 'bazfaz'),\n 'src:cpes': ('cpe:2.3:a:zzz:yyy:*:*:*:*:*:*:*:*', 'cpe:2.3:a:aaa:bbb:*:*:*:*:*:*:*:*'),\n 'dst:cpes': ('cpe:2.3:a:zzz:yyy:*:*:*:*:*:*:*:*', 'cpe:2.3:a:aaa:bbb:*:*:*:*:*:*:*:*'),\n 'ip:proto': 6,\n 'ip:tcp:flags': 0x20,\n 'sandbox:file': 'e' * 64,\n 'src:ssh:key': srccert,\n 'dst:ssh:key': dstcert,\n 'src:ssl:cert': srccert,\n 'dst:ssl:cert': dstcert,\n 'src:rdp:hostname': 'SYNCODER',\n 'src:rdp:keyboard:layout': 'AZERTY',\n 'raw': (10, 20),\n }\n expected_props = {\n 'time': 0,\n 'duration': 1,\n 'from': 32 * 'b',\n 'src': 'tcp://127.0.0.1:45654',\n 'src:port': 45654,\n 'src:proto': 'tcp',\n 'src:ipv4': 2130706433,\n 'src:host': 32 * 'b',\n 'src:proc': 32 * 'c',\n 'src:exe': 'sha256:' + 64 * 'd',\n 'src:txcount': 30,\n 'src:txbytes': 1,\n 'src:handshake': 'Hello There',\n 'dst': 'tcp://1.2.3.4:80',\n 'dst:port': 80,\n 'dst:proto': 'tcp',\n 'dst:ipv4': 16909060,\n 'dst:host': 32 * 'e',\n 'dst:proc': 32 * 'f',\n 'dst:exe': 'sha256:' + 64 * '0',\n 'dst:txcount': 33,\n 'dst:txbytes': 2,\n 'tot:txcount': 63,\n 'tot:txbytes': 3,\n 'dst:handshake': 'OHai!',\n 'src:softnames': ('haha', 'hehe'),\n 'dst:softnames': ('bazfaz', 'foobar'),\n 'src:cpes': ('cpe:2.3:a:aaa:bbb:*:*:*:*:*:*:*:*', 'cpe:2.3:a:zzz:yyy:*:*:*:*:*:*:*:*'),\n 'dst:cpes': ('cpe:2.3:a:aaa:bbb:*:*:*:*:*:*:*:*', 'cpe:2.3:a:zzz:yyy:*:*:*:*:*:*:*:*'),\n 'ip:proto': 6,\n 'ip:tcp:flags': 0x20,\n 'sandbox:file': 'sha256:' + 64 * 'e',\n 'src:ssh:key': srccert,\n 'dst:ssh:key': dstcert,\n 'src:ssl:cert': srccert,\n 'dst:ssl:cert': dstcert,\n 'src:rdp:hostname': 'syncoder',\n 'src:rdp:keyboard:layout': 'azerty',\n 'raw': (10, 20),\n }\n expected_ndef = (formname, 32 * 'a')\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, 32 * 'a', props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n self.len(2, await core.nodes('inet:flow -> crypto:x509:cert'))\n self.len(1, await core.nodes('inet:flow :src:ssh:key -> crypto:key'))\n self.len(1, await core.nodes('inet:flow :dst:ssh:key -> crypto:key'))\n\n async def test_fqdn(self):\n formname = 'inet:fqdn'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n fqdn = 'example.Vertex.link'\n expected = ('example.vertex.link', {'subs': {'host': 'example', 'domain': 'vertex.link'}})\n self.eq(t.norm(fqdn), expected)\n self.raises(s_exc.BadTypeValu, t.norm, '!@#$%')\n\n # defanging works\n self.eq(t.norm('example[.]vertex(.)link'), expected)\n\n # Demonstrate Valid IDNA\n fqdn = 'tèst.èxamplè.link'\n ex_fqdn = 'xn--tst-6la.xn--xampl-3raf.link'\n expected = (ex_fqdn, {'subs': {'domain': 'xn--xampl-3raf.link', 'host': 'xn--tst-6la'}})\n self.eq(t.norm(fqdn), expected)\n self.eq(t.repr(ex_fqdn), fqdn) # Calling repr on IDNA encoded domain should result in the unicode\n\n # Use IDNA2008 if possible\n fqdn = \"faß.de\"\n ex_fqdn = 'xn--fa-hia.de'\n expected = (ex_fqdn, {'subs': {'domain': 'de', 'host': 'xn--fa-hia'}})\n self.eq(t.norm(fqdn), expected)\n self.eq(t.repr(ex_fqdn), fqdn)\n\n # Emojis are valid IDNA2003\n fqdn = '👁👄👁.fm'\n ex_fqdn = 'xn--mp8hai.fm'\n expected = (ex_fqdn, {'subs': {'domain': 'fm', 'host': 'xn--mp8hai'}})\n self.eq(t.norm(fqdn), expected)\n self.eq(t.repr(ex_fqdn), fqdn)\n\n # Variant forms get normalized\n varfqdn = '👁️👄👁️.fm'\n self.eq(t.norm(varfqdn), expected)\n self.ne(varfqdn, fqdn)\n\n # Unicode full stops are okay but get normalized\n fqdn = 'foo(.)bar[。]baz。lol'\n ex_fqdn = 'foo.bar.baz.lol'\n expected = (ex_fqdn, {'subs': {'domain': 'bar.baz.lol', 'host': 'foo'}})\n self.eq(t.norm(fqdn), expected)\n\n # Ellipsis shouldn't make it through\n self.raises(s_exc.BadTypeValu, t.norm, 'vertex…link')\n\n # Demonstrate Invalid IDNA\n fqdn = 'xn--lskfjaslkdfjaslfj.link'\n expected = (fqdn, {'subs': {'host': fqdn.split('.')[0], 'domain': 'link'}})\n self.eq(t.norm(fqdn), expected)\n self.eq(fqdn, t.repr(fqdn)) # UnicodeError raised and caught and fallback to norm\n\n fqdn = 'xn--cc.bartmp.l.google.com'\n expected = (fqdn, {'subs': {'host': fqdn.split('.')[0], 'domain': 'bartmp.l.google.com'}})\n self.eq(t.norm(fqdn), expected)\n self.eq(fqdn, t.repr(fqdn))\n\n self.raises(s_exc.BadTypeValu, t.norm, 'www.google\\udcfesites.com')\n\n # IP addresses are NOT valid FQDNs\n self.raises(s_exc.BadTypeValu, t.norm, '1.2.3.4')\n\n # Form Tests ======================================================\n valu = 'api.vertex.link'\n expected_ndef = (formname, valu)\n\n # Demonstrate cascading formation\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('domain'), 'vertex.link')\n self.eq(node.get('host'), 'api')\n # self.eq(node.get('issuffix'), 0)\n # self.eq(node.get('iszone'), 0)\n self.eq(node.get('zone'), 'vertex.link')\n\n async with await core.snap() as snap:\n nvalu = 'vertex.link'\n expected_ndef = (formname, nvalu)\n node = await snap.getNodeByNdef((formname, nvalu))\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('domain'), 'link')\n self.eq(node.get('host'), 'vertex')\n self.eq(node.get('issuffix'), 0)\n self.eq(node.get('iszone'), 1)\n self.eq(node.get('zone'), 'vertex.link')\n\n async with await core.snap() as snap:\n nvalu = 'link'\n expected_ndef = (formname, nvalu)\n node = await snap.getNodeByNdef((formname, nvalu))\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('host'), 'link')\n self.eq(node.get('issuffix'), 1)\n self.eq(node.get('iszone'), 0)\n\n # Demonstrate wildcard\n async with await core.snap() as snap:\n self.len(3, await snap.nodes('inet:fqdn=\"*\"'))\n self.len(3, await snap.nodes('inet:fqdn=\"*link\"'))\n self.len(2, await snap.nodes('inet:fqdn=\"*.link\"'))\n self.len(1, await snap.nodes('inet:fqdn=\"*.vertex.link\"'))\n with self.raises(s_exc.BadLiftValu):\n await snap.nodes('inet:fqdn=api.*.link')\n\n q = 'inet:fqdn=\"*.link\" +inet:fqdn=\"*vertex.link\"'\n nodes = await core.nodes(q)\n self.len(2, nodes)\n self.eq({'vertex.link', 'api.vertex.link'}, {n.ndef[1] for n in nodes})\n\n q = 'inet:fqdn~=api'\n nodes = await core.nodes(q)\n self.len(1, nodes)\n self.eq({'api.vertex.link'}, {n.ndef[1] for n in nodes})\n\n # Cannot filter on a empty string\n q = 'inet:fqdn=\"*.link\" +inet:fqdn=\"\"'\n nodes = await core.nodes(q)\n self.len(0, nodes)\n\n async def test_fqdn_suffix(self):\n # Demonstrate FQDN suffix/zone behavior\n\n formname = 'inet:fqdn'\n\n def iszone(node):\n self.true(node.get('iszone') == 1 and node.get('issuffix') == 0)\n\n def issuffix(node):\n self.true(node.get('issuffix') == 1 and node.get('iszone') == 0)\n\n def isboth(node):\n self.true(node.get('iszone') == 1 and node.get('issuffix') == 1)\n\n def isneither(node):\n self.true(node.get('iszone') == 0 and node.get('issuffix') == 0)\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n\n # Create some nodes and demonstrate zone/suffix behavior\n # Only FQDNs of the lowest level should be suffix\n # Only FQDNs whose domains are suffixes should be zones\n n0 = await snap.addNode(formname, 'abc.vertex.link')\n n1 = await snap.addNode(formname, 'def.vertex.link')\n n2 = await snap.addNode(formname, 'g.def.vertex.link')\n # form again to show g. should not make this a zone\n n1 = await snap.addNode(formname, 'def.vertex.link')\n n3 = await snap.getNodeByNdef((formname, 'vertex.link'))\n n4 = await snap.getNodeByNdef((formname, 'link'))\n isneither(n0)\n isneither(n1)\n isneither(n2)\n iszone(n3) # vertex.link should be a zone\n issuffix(n4) # link should be a suffix\n\n # Make one of the FQDNs a suffix and make sure its children become zones\n n3 = await snap.addNode(formname, 'vertex.link', props={'issuffix': True})\n isboth(n3) # vertex.link should now be both because we made it a suffix\n n0 = await snap.getNodeByNdef((formname, 'abc.vertex.link'))\n n1 = await snap.getNodeByNdef((formname, 'def.vertex.link'))\n n2 = await snap.getNodeByNdef((formname, 'g.def.vertex.link'))\n iszone(n0) # now a zone because vertex.link is a suffix\n iszone(n1) # now a zone because vertex.link is a suffix\n isneither(n2) # still neither as parent is not a suffix\n\n # Remove the FQDN's suffix status and make sure its children lose zone status\n n3 = await snap.addNode(formname, 'vertex.link', props={'issuffix': False})\n iszone(n3) # vertex.link should now be a zone because we removed its suffix status\n n0 = await snap.getNodeByNdef((formname, 'abc.vertex.link'))\n n1 = await snap.getNodeByNdef((formname, 'def.vertex.link'))\n n2 = await snap.getNodeByNdef((formname, 'g.def.vertex.link'))\n n4 = await snap.getNodeByNdef((formname, 'link'))\n isneither(n0) # loses zone status\n isneither(n1) # loses zone status\n isneither(n2) # stays the same\n issuffix(n4) # stays the same\n\n async def test_group(self):\n formname = 'inet:group'\n valu = 'cool Group '\n expected_props = {}\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_http_cookie(self):\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode('inet:http:cookie', 'HeHe=HaHa')\n self.eq(node.ndef[1], 'HeHe=HaHa')\n self.eq(node.get('name'), 'HeHe')\n self.eq(node.get('value'), 'HaHa')\n\n nodes = await core.nodes('''\n [ inet:http:request=* :cookies={[ inet:http:cookie=\"foo=bar; baz=faz;\" ]} ]\n ''')\n self.eq(nodes[0].get('cookies'), ('baz=faz', 'foo=bar'))\n\n nodes = await core.nodes('''\n [ inet:http:session=* :cookies={[ inet:http:cookie=\"foo=bar; baz=faz;\" ]} ]\n ''')\n self.eq(nodes[0].get('cookies'), ('baz=faz', 'foo=bar'))\n\n nodes = await core.nodes('[ inet:http:cookie=(lol, lul) ]')\n self.len(2, nodes)\n\n async def test_http_request_header(self):\n formname = 'inet:http:request:header'\n valu = ('Cool', 'Cooler')\n expected_props = {\n 'name': 'cool',\n 'value': 'Cooler'\n }\n expected_ndef = (formname, ('cool', 'Cooler'))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_http_response_header(self):\n\n formname = 'inet:http:response:header'\n\n valu = ('Cool', 'Cooler')\n expected_props = {\n 'name': 'cool',\n 'value': 'Cooler'\n }\n expected_ndef = (formname, ('cool', 'Cooler'))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_http_param(self):\n formname = 'inet:http:param'\n valu = ('Cool', 'Cooler')\n expected_props = {\n 'name': 'cool',\n 'value': 'Cooler'\n }\n expected_ndef = (formname, ('Cool', 'Cooler'))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_http_request(self):\n formname = 'inet:http:request'\n\n sess = s_common.guid()\n client = s_common.guid()\n server = s_common.guid()\n\n input_props = {\n 'time': '2015',\n 'flow': 32 * 'f',\n 'method': 'gEt',\n 'path': '/woot/hehe/',\n 'query': 'hoho=1&qaz=bar',\n 'client': '1.2.3.4',\n 'server': '5.5.5.5:443',\n 'body': 64 * 'b',\n 'headers': (('foo', 'bar'),),\n 'response:code': 200,\n 'response:reason': 'OK',\n 'response:headers': (('baz', 'faz'),),\n 'response:body': 64 * 'b',\n 'client:host': client,\n 'server:host': server,\n 'session': sess,\n 'sandbox:file': 64 * 'c'\n }\n expected_props = {\n 'time': 1420070400000,\n 'flow': 32 * 'f',\n 'method': 'gEt',\n 'path': '/woot/hehe/',\n 'query': 'hoho=1&qaz=bar',\n 'body': 'sha256:' + 64 * 'b',\n\n 'client:ipv4': 0x01020304,\n\n 'server:port': 443,\n 'server:ipv4': 0x05050505,\n\n 'client:host': client,\n 'server:host': server,\n\n 'headers': (('foo', 'bar'),),\n\n 'response:code': 200,\n 'response:reason': 'OK',\n 'response:headers': (('baz', 'faz'),),\n 'response:body': 'sha256:' + 64 * 'b',\n 'session': sess,\n 'sandbox:file': 'sha256:' + 64 * 'c'\n }\n expected_ndef = (formname, 32 * 'a')\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, 32 * 'a', props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n self.len(1, await core.nodes('inet:http:request -> inet:http:request:header'))\n self.len(1, await core.nodes('inet:http:request -> inet:http:response:header'))\n\n nodes = await core.nodes('inet:http:request -> inet:http:session [ :contact=* ]')\n self.len(1, nodes)\n self.nn(nodes[0].get('contact'))\n\n async def test_iface(self):\n formname = 'inet:iface'\n valu = 32 * 'a'\n netw = s_common.guid()\n input_props = {\n 'host': 32 * 'c',\n 'network': netw,\n 'type': 'Cool',\n 'mac': 'ff:00:ff:00:ff:00',\n 'ipv4': '1.2.3.4',\n 'ipv6': 'ff::00',\n 'phone': 12345678910,\n 'wifi:ssid': 'hehe haha',\n 'wifi:bssid': '00:ff:00:ff:00:ff',\n 'mob:imei': 123456789012347,\n 'mob:imsi': 12345678901234,\n }\n expected_props = {\n 'host': 32 * 'c',\n 'network': netw,\n 'type': 'cool',\n 'mac': 'ff:00:ff:00:ff:00',\n 'ipv4': 16909060,\n 'ipv6': 'ff::',\n 'phone': '12345678910',\n 'wifi:ssid': 'hehe haha',\n 'wifi:bssid': '00:ff:00:ff:00:ff',\n 'mob:imei': 123456789012347,\n 'mob:imsi': 12345678901234,\n }\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_ipv4(self):\n formname = 'inet:ipv4'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n ip_int = 16909060\n ip_str = '1.2.3.4'\n ip_str_enfanged = '1[.]2[.]3[.]4'\n ip_str_enfanged2 = '1(.)2(.)3(.)4'\n ip_str_unicode = '1\\u200b.\\u200b2\\u200b.\\u200b3\\u200b.\\u200b4'\n\n info = {'subs': {'type': 'unicast'}}\n self.eq(t.norm(ip_int), (ip_int, info))\n self.eq(t.norm(ip_str), (ip_int, info))\n self.eq(t.norm(ip_str_enfanged), (ip_int, info))\n self.eq(t.norm(ip_str_enfanged2), (ip_int, info))\n self.eq(t.norm(ip_str_unicode), (ip_int, info))\n self.eq(t.repr(ip_int), ip_str)\n\n # Link local test\n ip_str = '169.254.1.1'\n norm, info = t.norm(ip_str)\n self.eq(2851995905, norm)\n self.eq(info.get('subs').get('type'), 'linklocal')\n\n # Don't allow invalid values\n with self.raises(s_exc.BadTypeValu):\n t.norm(0x00000000 - 1)\n\n with self.raises(s_exc.BadTypeValu):\n t.norm(0xFFFFFFFF + 1)\n\n with self.raises(s_exc.BadTypeValu):\n t.norm('foo-bar.com')\n with self.raises(s_exc.BadTypeValu):\n t.norm('bar.com')\n\n # Form Tests ======================================================\n place = s_common.guid()\n input_props = {\n 'asn': 3,\n 'loc': 'uS',\n 'dns:rev': 'vertex.link',\n 'latlong': '-50.12345, 150.56789',\n 'place': place,\n }\n expected_props = {\n 'asn': 3,\n 'loc': 'us',\n 'type': 'unicast',\n 'dns:rev': 'vertex.link',\n 'latlong': (-50.12345, 150.56789),\n 'place': place,\n }\n valu_str = '1.2.3.4'\n valu_int = 16909060\n expected_ndef = (formname, valu_int)\n\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu_str, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n # > / < lifts and filters\n self.len(4, await core.nodes('[inet:ipv4=0 inet:ipv4=1 inet:ipv4=2 inet:ipv4=3]'))\n # Lifts\n self.len(0, await core.nodes('inet:ipv4<0'))\n self.len(1, await core.nodes('inet:ipv4<=0'))\n self.len(1, await core.nodes('inet:ipv4<1'))\n self.len(3, await core.nodes('inet:ipv4<=2'))\n self.len(2, await core.nodes('inet:ipv4>2'))\n self.len(3, await core.nodes('inet:ipv4>=2'))\n self.len(0, await core.nodes('inet:ipv4>=255.0.0.1'))\n with self.raises(s_exc.BadTypeValu):\n self.len(5, await core.nodes('inet:ipv4>=$foo', {'vars': {'foo': 0xFFFFFFFF + 1}}))\n # Filters\n self.len(0, await core.nodes('.created +inet:ipv4<0'))\n self.len(1, await core.nodes('.created +inet:ipv4<1'))\n self.len(3, await core.nodes('.created +inet:ipv4<=2'))\n self.len(2, await core.nodes('.created +inet:ipv4>2'))\n self.len(3, await core.nodes('.created +inet:ipv4>=2'))\n self.len(0, await core.nodes('.created +inet:ipv4>=255.0.0.1'))\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[inet:ipv4=foo]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[inet:ipv4=foo-bar.com]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[inet:ipv4=foo-bar-duck.com]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[test:str=\"foo\"] [inet:ipv4=$node.value()]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[test:str=\"foo-bar.com\"] [inet:ipv4=$node.value()]')\n\n self.len(0, await core.nodes('[inet:ipv4?=foo]'))\n self.len(0, await core.nodes('[inet:ipv4?=foo-bar.com]'))\n\n self.len(0, await core.nodes('[test:str=\"foo\"] [inet:ipv4?=$node.value()] -test:str'))\n self.len(0, await core.nodes('[test:str=\"foo-bar.com\"] [inet:ipv4?=$node.value()] -test:str'))\n\n async def test_ipv6(self):\n formname = 'inet:ipv6'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n info = {'subs': {'type': 'loopback'}}\n self.eq(t.norm('::1'), ('::1', info))\n self.eq(t.norm('0:0:0:0:0:0:0:1'), ('::1', info))\n\n info = {'subs': {'type': 'private'}}\n self.eq(t.norm('2001:0db8:0000:0000:0000:ff00:0042:8329'), ('2001:db8::ff00:42:8329', info))\n self.eq(t.norm('2001:0db8:0000:0000:0000:ff00:0042\\u200b:8329'), ('2001:db8::ff00:42:8329', info))\n self.raises(s_exc.BadTypeValu, t.norm, 'newp')\n\n # Specific examples given in RFC5952\n self.eq(t.norm('2001:db8:0:0:1:0:0:1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:0db8:0:0:1:0:0:1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:db8::1:0:0:1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:db8::0:1:0:0:1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:0db8::1:0:0:1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:db8:0:0:1::1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:DB8:0:0:1::1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('2001:DB8:0:0:1:0000:0000:1')[0], '2001:db8::1:0:0:1')\n self.raises(s_exc.BadTypeValu, t.norm, '::1::')\n self.eq(t.norm('2001:0db8::0001')[0], '2001:db8::1')\n self.eq(t.norm('2001:db8:0:0:0:0:2:1')[0], '2001:db8::2:1')\n self.eq(t.norm('2001:db8:0:1:1:1:1:1')[0], '2001:db8:0:1:1:1:1:1')\n self.eq(t.norm('2001:0:0:1:0:0:0:1')[0], '2001:0:0:1::1')\n self.eq(t.norm('2001:db8:0:0:1:0:0:1')[0], '2001:db8::1:0:0:1')\n self.eq(t.norm('::ffff:1.2.3.4')[0], '::ffff:1.2.3.4')\n self.eq(t.norm('2001:db8::0:1')[0], '2001:db8::1')\n self.eq(t.norm('2001:db8:0:0:0:0:2:1')[0], '2001:db8::2:1')\n self.eq(t.norm('2001:db8::')[0], '2001:db8::')\n\n self.eq(t.norm(0)[0], '::')\n self.eq(t.norm(1)[0], '::1')\n self.eq(t.norm(2**128 - 1)[0], 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')\n\n # Link local test\n ip_str = 'fe80::1'\n norm, info = t.norm(ip_str)\n self.eq('fe80::1', norm)\n self.eq(info.get('subs').get('type'), 'linklocal')\n\n # Form Tests ======================================================\n async with await core.snap() as snap:\n\n place = s_common.guid()\n\n valu_str = '::fFfF:1.2.3.4'\n input_props = {\n 'loc': 'cool',\n 'latlong': '0,2',\n 'dns:rev': 'vertex.link',\n 'place': place,\n }\n expected_props = {\n 'ipv4': 16909060,\n 'loc': 'cool',\n 'latlong': (0.0, 2.0),\n 'dns:rev': 'vertex.link',\n 'place': place,\n }\n expected_ndef = (formname, valu_str.lower())\n node = await snap.addNode(formname, valu_str, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n valu_str = '::1'\n expected_props = {\n }\n expected_ndef = (formname, valu_str)\n node = await snap.addNode(formname, valu_str)\n self.checkNode(node, (expected_ndef, expected_props))\n\n self.len(1, await core.nodes('inet:ipv6=0::1'))\n self.len(1, await core.nodes('inet:ipv6*range=(0::1, 0::1)'))\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[inet:ipv6=foo]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[inet:ipv6=foo-bar.com]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[inet:ipv6=foo-bar-duck.com]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[test:str=\"foo\"] [inet:ipv6=$node.value()]')\n\n with self.raises(s_exc.BadTypeValu):\n await core.nodes('[test:str=\"foo-bar.com\"] [inet:ipv6=$node.value()]')\n\n self.len(0, await core.nodes('[inet:ipv6?=foo]'))\n self.len(0, await core.nodes('[inet:ipv6?=foo-bar.com]'))\n\n self.len(0, await core.nodes('[test:str=\"foo\"] [inet:ipv6?=$node.value()] -test:str'))\n self.len(0, await core.nodes('[test:str=\"foo-bar.com\"] [inet:ipv6?=$node.value()] -test:str'))\n\n await core.nodes('[ inet:ipv6=2a00:: inet:ipv6=2a00::1 ]')\n\n self.len(1, await core.nodes('inet:ipv6>2a00::'))\n self.len(2, await core.nodes('inet:ipv6>=2a00::'))\n self.len(2, await core.nodes('inet:ipv6<2a00::'))\n self.len(3, await core.nodes('inet:ipv6<=2a00::'))\n\n self.len(1, await core.nodes('inet:ipv6 +inet:ipv6>2a00::'))\n self.len(2, await core.nodes('inet:ipv6 +inet:ipv6>=2a00::'))\n self.len(2, await core.nodes('inet:ipv6 +inet:ipv6<2a00::'))\n self.len(3, await core.nodes('inet:ipv6 +inet:ipv6<=2a00::'))\n\n async def test_ipv6_lift_range(self):\n\n async with self.getTestCore() as core:\n\n async with await core.snap() as snap:\n\n await snap.addNode('inet:ipv6', '0::f000')\n await snap.addNode('inet:ipv6', '0::f001')\n await snap.addNode('inet:ipv6', '0::f002')\n await snap.addNode('inet:ipv6', '0::f003')\n await snap.addNode('inet:ipv6', '0::f004')\n\n self.len(3, await core.nodes('inet:ipv6=0::f001-0::f003'))\n self.len(3, await core.nodes('[inet:ipv6=0::f001-0::f003]'))\n self.len(3, await core.nodes('inet:ipv6 +inet:ipv6=0::f001-0::f003'))\n self.len(3, await core.nodes('inet:ipv6*range=(0::f001, 0::f003)'))\n\n async def test_ipv6_filt_cidr(self):\n\n async with self.getTestCore() as core:\n\n self.len(5, await core.nodes('[ inet:ipv6=0::f000/126 inet:ipv6=0::ffff:a2c4 ]'))\n self.len(4, await core.nodes('inet:ipv6 +inet:ipv6=0::f000/126'))\n self.len(1, await core.nodes('inet:ipv6 -inet:ipv6=0::f000/126'))\n\n self.len(256, await core.nodes('[ inet:ipv6=0::ffff:192.168.1.0/120]'))\n self.len(256, await core.nodes('[ inet:ipv6=0::ffff:192.168.2.0/120]'))\n self.len(256, await core.nodes('inet:ipv6=0::ffff:192.168.1.0/120'))\n\n # Seed some nodes for bounds checking\n pnodes = [(('inet:ipv6', f'0::10.2.1.{d}'), {}) for d in range(1, 33)]\n nodes = await alist(core.addNodes(pnodes))\n\n nodes = await core.nodes('inet:ipv6=0::10.2.1.4/128')\n self.len(1, nodes)\n self.len(1, await core.nodes('inet:ipv6 +inet:ipv6=0::10.2.1.4/128'))\n self.len(1, await core.nodes('inet:ipv6 +inet:ipv6=0::10.2.1.4'))\n\n nodes = await core.nodes('inet:ipv6=0::10.2.1.4/127')\n self.len(2, nodes)\n self.len(2, await core.nodes('inet:ipv6 +inet:ipv6=0::10.2.1.4/127'))\n\n # 0::10.2.1.0 -> 0::10.2.1.3 but we don't have 0::10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv6=0::10.2.1.1/126')\n self.len(3, nodes)\n\n nodes = await core.nodes('inet:ipv6=0::10.2.1.2/126')\n self.len(3, nodes)\n\n # 0::10.2.1.0 -> 0::10.2.1.7 but we don't have 0::10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv6=0::10.2.1.0/125')\n self.len(7, nodes)\n\n # 0::10.2.1.8 -> 0::10.2.1.15\n nodes = await core.nodes('inet:ipv6=0::10.2.1.8/125')\n self.len(8, nodes)\n\n # 0::10.2.1.0 -> 0::10.2.1.15 but we don't have 0::10.2.1.0 in the core\n nodes = await core.nodes('inet:ipv6=0::10.2.1.1/124')\n self.len(15, nodes)\n\n async def test_mac(self):\n formname = 'inet:mac'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n self.eq(t.norm('00:00:00:00:00:00'), ('00:00:00:00:00:00', {}))\n self.eq(t.norm('FF:ff:FF:ff:FF:ff'), ('ff:ff:ff:ff:ff:ff', {}))\n self.raises(s_exc.BadTypeValu, t.norm, ' FF:ff:FF:ff:FF:ff ')\n self.raises(s_exc.BadTypeValu, t.norm, 'GG:ff:FF:ff:FF:ff')\n\n # Form Tests ======================================================\n async with await core.snap() as snap:\n valu = '00:00:00:00:00:00'\n expected_ndef = (formname, valu)\n\n node = await snap.addNode(formname, valu)\n self.eq(node.ndef, expected_ndef)\n self.none(node.get('vendor'))\n\n node = await snap.addNode(formname, valu, props={'vendor': 'Cool'})\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('vendor'), 'Cool')\n\n async def test_net4(self):\n tname = 'inet:net4'\n async with self.getTestCore() as core:\n # Type Tests ======================================================\n t = core.model.type(tname)\n\n valu = ('1.2.3.4', '5.6.7.8')\n expected = ((16909060, 84281096), {'subs': {'min': 16909060, 'max': 84281096}})\n self.eq(t.norm(valu), expected)\n\n valu = '1.2.3.4-5.6.7.8'\n self.eq(t.norm(valu), expected)\n\n valu = '1.2.3.0/24'\n expected = ((0x01020300, 0x010203ff), {'subs': {'min': 0x01020300, 'max': 0x010203ff}})\n self.eq(t.norm(valu), expected)\n\n valu = '5.6.7.8-1.2.3.4'\n self.raises(s_exc.BadTypeValu, t.norm, valu)\n\n valu = ('1.2.3.4', '5.6.7.8', '7.8.9.10')\n self.raises(s_exc.BadTypeValu, t.norm, valu)\n\n async def test_net6(self):\n tname = 'inet:net6'\n async with self.getTestCore() as core:\n # Type Tests ======================================================\n t = core.model.type(tname)\n\n valu = ('0:0:0:0:0:0:0:0', '::Ff')\n expected = (('::', '::ff'), {'subs': {'min': '::', 'max': '::ff'}})\n self.eq(t.norm(valu), expected)\n\n valu = '0:0:0:0:0:0:0:0-::Ff'\n self.eq(t.norm(valu), expected)\n\n # Test case in which ipaddress ordering is not alphabetical\n valu = ('3300:100::', '3300:100:1::ffff')\n expected = (('3300:100::', '3300:100:1::ffff'), {'subs': {'min': '3300:100::', 'max': '3300:100:1::ffff'}})\n self.eq(t.norm(valu), expected)\n\n valu = '2001:db8::/101'\n\n expected = (('2001:db8::', '2001:db8::7ff:ffff'),\n {'subs': {'min': '2001:db8::', 'max': '2001:db8::7ff:ffff'}})\n self.eq(t.norm(valu), expected)\n\n valu = ('fe00::', 'fd00::')\n self.raises(s_exc.BadTypeValu, t.norm, valu)\n\n valu = ('fd00::', 'fe00::', 'ff00::')\n self.raises(s_exc.BadTypeValu, t.norm, valu)\n\n async def test_passwd(self):\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n\n node = await snap.addNode('inet:passwd', '2Cool4u')\n self.eq(node.ndef[1], '2Cool4u')\n self.eq('91112d75297841c12ca655baafc05104', node.get('md5'))\n self.eq('2984ab44774294be9f7a369bbd73b52021bf0bb4', node.get('sha1'))\n self.eq('62c7174a99ff0afd4c828fc779d2572abc2438415e3ca9769033d4a36479b14f', node.get('sha256'))\n\n async def test_port(self):\n tname = 'inet:port'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(tname)\n self.raises(s_exc.BadTypeValu, t.norm, -1)\n self.eq(t.norm(0), (0, {}))\n self.eq(t.norm(1), (1, {}))\n self.eq(t.norm('2'), (2, {}))\n self.eq(t.norm('0xF'), (15, {}))\n self.eq(t.norm(65535), (65535, {}))\n self.raises(s_exc.BadTypeValu, t.norm, 65536)\n\n async def test_rfc2822_addr(self):\n formname = 'inet:rfc2822:addr'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n\n self.eq(t.norm('FooBar'), ('foobar', {'subs': {}}))\n self.eq(t.norm('visi@vertex.link'), ('visi@vertex.link', {'subs': {'email': 'visi@vertex.link'}}))\n self.eq(t.norm('foo bar'), ('foo bar ', {'subs': {'email': 'visi@vertex.link', 'name': 'foo bar'}}))\n self.eq(t.norm('foo bar '), ('foo bar ', {'subs': {'email': 'visi@vertex.link', 'name': 'foo bar'}}))\n self.eq(t.norm('\"foo bar \" '), ('foo bar ', {'subs': {'email': 'visi@vertex.link', 'name': 'foo bar'}}))\n self.eq(t.norm(''), ('visi@vertex.link', {'subs': {'email': 'visi@vertex.link'}}))\n\n valu = t.norm('bob\\udcfesmith@woot.com')[0]\n\n # Form Tests ======================================================\n valu = '\"UnitTest\" '\n expected_ndef = (formname, 'unittest ')\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('email'), 'unittest@vertex.link')\n self.eq(node.get('name'), 'unittest')\n\n await snap.addNode(formname, '\"UnitTest1')\n await snap.addNode(formname, '\"UnitTest12')\n\n self.len(3, await snap.nodes('inet:rfc2822:addr^=unittest'))\n self.len(2, await snap.nodes('inet:rfc2822:addr^=unittest1'))\n self.len(1, await snap.nodes('inet:rfc2822:addr^=unittest12'))\n\n async def test_server(self):\n formname = 'inet:server'\n data = (\n ('tcp://127.0.0.1:12345', 'tcp://127.0.0.1:12345', {\n 'ipv4': 2130706433,\n 'port': 12345,\n 'proto': 'tcp',\n }),\n ('tcp://127.0.0.1', 'tcp://127.0.0.1', {\n 'ipv4': 2130706433,\n 'proto': 'tcp',\n }),\n ('tcp://[::1]:12345', 'tcp://[::1]:12345', {\n 'ipv6': '::1',\n 'port': 12345,\n 'proto': 'tcp',\n }),\n ('host://vertex.link:12345', 'host://ffa3e574aa219e553e1b2fc1ccd0180f:12345', {\n 'host': 'ffa3e574aa219e553e1b2fc1ccd0180f',\n 'port': 12345,\n 'proto': 'host',\n }),\n )\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n for valu, expected_valu, expected_props in data:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, ((formname, expected_valu), expected_props))\n\n async def test_servfile(self):\n formname = 'inet:servfile'\n valu = ('tcp://127.0.0.1:4040', 'sha256:' + 64 * 'f')\n input_props = {\n 'server:host': 32 * 'a'\n }\n expected_props = {\n 'server': 'tcp://127.0.0.1:4040',\n 'server:host': 32 * 'a',\n 'server:port': 4040,\n 'server:proto': 'tcp',\n 'server:ipv4': 2130706433,\n 'file': 'sha256:' + 64 * 'f'\n }\n expected_ndef = (formname, tuple(item.lower() for item in valu))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_ssl_cert(self):\n\n async with self.getTestCore() as core:\n\n async with await core.snap() as snap:\n\n node = await snap.addNode('inet:ssl:cert', ('tcp://1.2.3.4:443', 'guid:abcdabcdabcdabcdabcdabcdabcdabcd'))\n\n self.eq(node.get('file'), 'guid:abcdabcdabcdabcdabcdabcdabcdabcd')\n self.eq(node.get('server'), 'tcp://1.2.3.4:443')\n\n self.eq(node.get('server:port'), 443)\n self.eq(node.get('server:ipv4'), 0x01020304)\n\n async def test_url(self):\n formname = 'inet:url'\n async with self.getTestCore() as core:\n\n # Type Tests ======================================================\n t = core.model.type(formname)\n self.raises(s_exc.BadTypeValu, t.norm, 'http:///wat')\n self.raises(s_exc.BadTypeValu, t.norm, 'wat') # No Protocol\n\n self.raises(s_exc.BadTypeValu, t.norm, 'www.google\\udcfesites.com/hehe.asp')\n valu = t.norm('http://www.googlesites.com/hehe\\udcfestuff.asp')\n url = 'http://www.googlesites.com/hehe\\udcfestuff.asp'\n expected = (url, {'subs': {\n 'proto': 'http',\n 'path': '/hehe\\udcfestuff.asp',\n 'port': 80,\n 'params': '',\n 'fqdn': 'www.googlesites.com',\n 'base': url\n }})\n self.eq(valu, expected)\n\n url = 'https://dummyimage.com/600x400/000/fff.png&text=cat@bam.com'\n valu = t.norm(url)\n expected = (url, {'subs': {\n 'base': url,\n 'proto': 'https',\n 'path': '/600x400/000/fff.png&text=cat@bam.com',\n 'port': 443,\n 'params': '',\n 'fqdn': 'dummyimage.com'\n }})\n self.eq(valu, expected)\n\n url = 'http://0.0.0.0/index.html?foo=bar'\n valu = t.norm(url)\n expected = (url, {'subs': {\n 'proto': 'http',\n 'path': '/index.html',\n 'params': '?foo=bar',\n 'ipv4': 0,\n 'port': 80,\n 'base': 'http://0.0.0.0/index.html'\n }})\n self.eq(valu, expected)\n\n # Form Tests ======================================================\n async with await core.snap() as snap:\n valu = 'https://vertexmc:hunter2@vertex.link:1337/coolthings?a=1'\n expected_ndef = (formname, valu)\n node = await snap.addNode(formname, valu)\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('fqdn'), 'vertex.link')\n self.eq(node.get('passwd'), 'hunter2')\n self.eq(node.get('path'), '/coolthings')\n self.eq(node.get('port'), 1337)\n self.eq(node.get('proto'), 'https')\n self.eq(node.get('user'), 'vertexmc')\n self.eq(node.get('base'), 'https://vertexmc:hunter2@vertex.link:1337/coolthings')\n self.eq(node.get('params'), '?a=1')\n\n valu = 'https://vertex.link?a=1'\n expected_ndef = (formname, valu)\n node = await snap.addNode(formname, valu)\n self.eq(node.ndef, expected_ndef)\n self.eq(node.get('fqdn'), 'vertex.link')\n self.eq(node.get('path'), '')\n\n # equality comparator behavior\n valu = 'https://vertex.link?a=1'\n q = f'inet:url +inet:url=\"{valu}\"'\n nodes = await core.nodes(q)\n self.len(1, nodes)\n\n q = 'inet:url +inet:url=\"\"'\n nodes = await core.nodes(q)\n self.len(0, nodes)\n\n async def test_url_file(self):\n\n async with self.getTestCore() as core:\n\n t = core.model.type('inet:url')\n\n self.raises(s_exc.BadTypeValu, t.norm, 'file:////')\n self.raises(s_exc.BadTypeValu, t.norm, 'file://///')\n self.raises(s_exc.BadTypeValu, t.norm, 'file://')\n self.raises(s_exc.BadTypeValu, t.norm, 'file:')\n\n url = 'file:///'\n expected = (url, {'subs': {\n 'base': url,\n 'path': '/',\n 'proto': 'file',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:///home/foo/Documents/html/index.html'\n expected = (url, {'subs': {\n 'base': url,\n 'path': '/home/foo/Documents/html/index.html',\n 'proto': 'file',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:///c:/path/to/my/file.jpg'\n expected = (url, {'subs': {\n 'base': url,\n 'path': 'c:/path/to/my/file.jpg',\n 'params': '',\n 'proto': 'file'\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file://localhost/c:/Users/BarUser/stuff/moar/stuff.txt'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'path': 'c:/Users/BarUser/stuff/moar/stuff.txt',\n 'params': '',\n 'fqdn': 'localhost',\n 'base': url,\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:///c:/Users/BarUser/stuff/moar/stuff.txt'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'path': 'c:/Users/BarUser/stuff/moar/stuff.txt',\n 'params': '',\n 'base': url,\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file://localhost/home/visi/synapse/README.rst'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'path': '/home/visi/synapse/README.rst',\n 'params': '',\n 'fqdn': 'localhost',\n 'base': url,\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:/C:/invisig0th/code/synapse/README.rst'\n expected = ('file:///C:/invisig0th/code/synapse/README.rst', {'subs': {\n 'proto': 'file',\n 'path': 'C:/invisig0th/code/synapse/README.rst',\n 'params': '',\n 'base': 'file:///C:/invisig0th/code/synapse/README.rst'\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file://somehost/path/to/foo.txt'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'params': '',\n 'path': '/path/to/foo.txt',\n 'fqdn': 'somehost',\n 'base': url\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:/c:/foo/bar/baz/single/slash.txt'\n expected = ('file:///c:/foo/bar/baz/single/slash.txt', {'subs': {\n 'proto': 'file',\n 'params': '',\n 'path': 'c:/foo/bar/baz/single/slash.txt',\n 'base': 'file:///c:/foo/bar/baz/single/slash.txt',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:c:/foo/bar/baz/txt'\n expected = ('file:///c:/foo/bar/baz/txt', {'subs': {\n 'proto': 'file',\n 'params': '',\n 'path': 'c:/foo/bar/baz/txt',\n 'base': 'file:///c:/foo/bar/baz/txt',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file:/home/visi/synapse/synapse/lib/'\n expected = ('file:///home/visi/synapse/synapse/lib/', {'subs': {\n 'proto': 'file',\n 'params': '',\n 'path': '/home/visi/synapse/synapse/lib/',\n 'base': 'file:///home/visi/synapse/synapse/lib/',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file://foo.vertex.link/home/bar/baz/biz.html'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'path': '/home/bar/baz/biz.html',\n 'params': '',\n 'fqdn': 'foo.vertex.link',\n 'base': 'file://foo.vertex.link/home/bar/baz/biz.html',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file://visi@vertex.link@somehost.vertex.link/c:/invisig0th/code/synapse/'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'fqdn': 'somehost.vertex.link',\n 'base': 'file://visi@vertex.link@somehost.vertex.link/c:/invisig0th/code/synapse/',\n 'path': 'c:/invisig0th/code/synapse/',\n 'user': 'visi@vertex.link',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n url = 'file://foo@bar.com:neato@password@7.7.7.7/c:/invisig0th/code/synapse/'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'ipv4': 117901063,\n 'base': 'file://foo@bar.com:neato@password@7.7.7.7/c:/invisig0th/code/synapse/',\n 'path': 'c:/invisig0th/code/synapse/',\n 'user': 'foo@bar.com',\n 'passwd': 'neato@password',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n # not allowed by the rfc\n self.raises(s_exc.BadTypeValu, t.norm, 'file:foo@bar.com:password@1.162.27.3:12345/c:/invisig0th/code/synapse/')\n\n # Also an invalid URL, but doesn't cleanly fall out, because well, it could be a valid filename\n url = 'file:/foo@bar.com:password@1.162.27.3:12345/c:/invisig0th/code/synapse/'\n expected = ('file:///foo@bar.com:password@1.162.27.3:12345/c:/invisig0th/code/synapse/', {'subs': {\n 'proto': 'file',\n 'path': '/foo@bar.com:password@1.162.27.3:12345/c:/invisig0th/code/synapse/',\n 'params': '',\n 'base': 'file:///foo@bar.com:password@1.162.27.3:12345/c:/invisig0th/code/synapse/',\n }})\n self.eq(t.norm(url), expected)\n\n # https://datatracker.ietf.org/doc/html/rfc8089#appendix-E.2\n url = 'file://visi@vertex.link:password@somehost.vertex.link:9876/c:/invisig0th/code/synapse/'\n expected = (url, {'subs': {\n 'proto': 'file',\n 'path': 'c:/invisig0th/code/synapse/',\n 'user': 'visi@vertex.link',\n 'passwd': 'password',\n 'fqdn': 'somehost.vertex.link',\n 'params': '',\n 'port': 9876,\n 'base': url,\n }})\n self.eq(t.norm(url), expected)\n\n # https://datatracker.ietf.org/doc/html/rfc8089#appendix-E.2.2\n url = 'FILE:c|/synapse/synapse/lib/stormtypes.py'\n expected = ('file:///c|/synapse/synapse/lib/stormtypes.py', {'subs': {\n 'path': 'c|/synapse/synapse/lib/stormtypes.py',\n 'proto': 'file',\n 'params': '',\n 'base': 'file:///c|/synapse/synapse/lib/stormtypes.py',\n }})\n self.eq(t.norm(url), expected)\n\n # https://datatracker.ietf.org/doc/html/rfc8089#appendix-E.3.2\n url = 'file:////host.vertex.link/SharedDir/Unc/FilePath'\n expected = ('file:////host.vertex.link/SharedDir/Unc/FilePath', {'subs': {\n 'proto': 'file',\n 'params': '',\n 'path': '/SharedDir/Unc/FilePath',\n 'fqdn': 'host.vertex.link',\n 'base': 'file:////host.vertex.link/SharedDir/Unc/FilePath',\n }})\n self.eq(t.norm(url), expected)\n\n # Firefox's non-standard representation that appears every so often\n # supported because the RFC supports it\n url = 'file://///host.vertex.link/SharedDir/Firefox/Unc/File/Path'\n expected = ('file:////host.vertex.link/SharedDir/Firefox/Unc/File/Path', {'subs': {\n 'proto': 'file',\n 'params': '',\n 'base': 'file:////host.vertex.link/SharedDir/Firefox/Unc/File/Path',\n 'path': '/SharedDir/Firefox/Unc/File/Path',\n 'fqdn': 'host.vertex.link',\n }})\n self.eq(t.norm(url), expected)\n\n async def test_url_fqdn(self):\n\n async with self.getTestCore() as core:\n\n t = core.model.type('inet:url')\n\n host = 'Vertex.Link'\n norm_host = core.model.type('inet:fqdn').norm(host)[0]\n repr_host = core.model.type('inet:fqdn').repr(norm_host)\n\n self.eq(norm_host, 'vertex.link')\n self.eq(repr_host, 'vertex.link')\n\n await self._test_types_url_behavior(t, 'fqdn', host, norm_host, repr_host)\n\n async def test_url_ipv4(self):\n async with self.getTestCore() as core:\n t = core.model.type('inet:url')\n\n host = '192[.]168.1[.]1'\n norm_host = core.model.type('inet:ipv4').norm(host)[0]\n repr_host = core.model.type('inet:ipv4').repr(norm_host)\n self.eq(norm_host, 3232235777)\n self.eq(repr_host, '192.168.1.1')\n\n await self._test_types_url_behavior(t, 'ipv4', host, norm_host, repr_host)\n\n async def test_url_ipv6(self):\n async with self.getTestCore() as core:\n t = core.model.type('inet:url')\n\n host = '::1'\n norm_host = core.model.type('inet:ipv6').norm(host)[0]\n repr_host = core.model.type('inet:ipv6').repr(norm_host)\n self.eq(norm_host, '::1')\n self.eq(repr_host, '::1')\n\n await self._test_types_url_behavior(t, 'ipv6', host, norm_host, repr_host)\n\n # IPv6 Port Special Cases\n weird = t.norm('http://::1:81/hehe')\n self.eq(weird[1]['subs']['ipv6'], '::1:81')\n self.eq(weird[1]['subs']['port'], 80)\n\n self.raises(s_exc.BadTypeValu, t.norm, 'http://0:0:0:0:0:0:0:0:81/')\n\n async def _test_types_url_behavior(self, t, htype, host, norm_host, repr_host):\n\n # Handle IPv6 Port Brackets\n host_port = host\n repr_host_port = repr_host\n\n if htype == 'ipv6':\n host_port = f'[{host}]'\n repr_host_port = f'[{repr_host}]'\n\n # URL with auth and port.\n url = f'https://user:password@{host_port}:1234/a/b/c/'\n expected = (f'https://user:password@{repr_host_port}:1234/a/b/c/', {'subs': {\n 'proto': 'https', 'path': '/a/b/c/', 'user': 'user', 'passwd': 'password', htype: norm_host, 'port': 1234,\n 'base': f'https://user:password@{repr_host_port}:1234/a/b/c/',\n 'params': ''\n }})\n self.eq(t.norm(url), expected)\n\n # Userinfo user with @ in it\n url = f'lando://visi@vertex.link@{host_port}:40000/auth/gateway'\n expected = (f'lando://visi@vertex.link@{repr_host_port}:40000/auth/gateway', {'subs': {\n 'proto': 'lando', 'path': '/auth/gateway',\n 'user': 'visi@vertex.link',\n 'base': f'lando://visi@vertex.link@{repr_host_port}:40000/auth/gateway',\n 'port': 40000,\n 'params': '',\n htype: norm_host,\n }})\n self.eq(t.norm(url), expected)\n\n # Userinfo password with @\n url = f'balthazar://root:foo@@@bar@{host_port}:1234/'\n expected = (f'balthazar://root:foo@@@bar@{repr_host_port}:1234/', {'subs': {\n 'proto': 'balthazar', 'path': '/',\n 'user': 'root', 'passwd': 'foo@@@bar',\n 'base': f'balthazar://root:foo@@@bar@{repr_host_port}:1234/',\n 'port': 1234,\n 'params': '',\n htype: norm_host,\n }})\n self.eq(t.norm(url), expected)\n\n # rfc3986 compliant Userinfo with @ properly encoded\n url = f'calrissian://visi%40vertex.link:surround%40@{host_port}:44343'\n expected = (f'calrissian://visi%40vertex.link:surround%40@{repr_host_port}:44343', {'subs': {\n 'proto': 'calrissian', 'path': '',\n 'user': 'visi@vertex.link', 'passwd': 'surround@',\n 'base': f'calrissian://visi%40vertex.link:surround%40@{repr_host_port}:44343',\n 'port': 44343,\n 'params': '',\n htype: norm_host,\n }})\n self.eq(t.norm(url), expected)\n\n # unencoded query params are handled nicely\n url = f'https://visi@vertex.link:neato@burrito@{host}/?q=@foobarbaz'\n expected = (f'https://visi@vertex.link:neato@burrito@{repr_host}/?q=@foobarbaz', {'subs': {\n 'proto': 'https', 'path': '/',\n 'user': 'visi@vertex.link', 'passwd': 'neato@burrito',\n 'base': f'https://visi@vertex.link:neato@burrito@{repr_host}/',\n 'port': 443,\n 'params': '?q=@foobarbaz',\n htype: norm_host,\n }})\n self.eq(t.norm(url), expected)\n\n # URL with no port, but default port valu.\n # Port should be in subs, but not normed URL.\n url = f'https://user:password@{host}/a/b/c/?foo=bar&baz=faz'\n expected = (f'https://user:password@{repr_host}/a/b/c/?foo=bar&baz=faz', {'subs': {\n 'proto': 'https', 'path': '/a/b/c/', 'user': 'user', 'passwd': 'password', htype: norm_host, 'port': 443,\n 'base': f'https://user:password@{repr_host}/a/b/c/',\n 'params': '?foo=bar&baz=faz',\n }})\n self.eq(t.norm(url), expected)\n\n # URL with no port and no default port valu.\n # Port should not be in subs or normed URL.\n url = f'arbitrary://user:password@{host}/a/b/c/'\n expected = (f'arbitrary://user:password@{repr_host}/a/b/c/', {'subs': {\n 'proto': 'arbitrary', 'path': '/a/b/c/', 'user': 'user', 'passwd': 'password', htype: norm_host,\n 'base': f'arbitrary://user:password@{repr_host}/a/b/c/',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n # URL with user but no password.\n # User should still be in URL and subs.\n url = f'https://user@{host_port}:1234/a/b/c/'\n expected = (f'https://user@{repr_host_port}:1234/a/b/c/', {'subs': {\n 'proto': 'https', 'path': '/a/b/c/', 'user': 'user', htype: norm_host, 'port': 1234,\n 'base': f'https://user@{repr_host_port}:1234/a/b/c/',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n # URL with no user/password.\n # User/Password should not be in URL or subs.\n url = f'https://{host_port}:1234/a/b/c/'\n expected = (f'https://{repr_host_port}:1234/a/b/c/', {'subs': {\n 'proto': 'https', 'path': '/a/b/c/', htype: norm_host, 'port': 1234,\n 'base': f'https://{repr_host_port}:1234/a/b/c/',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n # URL with no path.\n url = f'https://{host_port}:1234'\n expected = (f'https://{repr_host_port}:1234', {'subs': {\n 'proto': 'https', 'path': '', htype: norm_host, 'port': 1234,\n 'base': f'https://{repr_host_port}:1234',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n # URL with no path or port or default port.\n url = f'a://{host}'\n expected = (f'a://{repr_host}', {'subs': {\n 'proto': 'a', 'path': '', htype: norm_host,\n 'base': f'a://{repr_host}',\n 'params': '',\n }})\n self.eq(t.norm(url), expected)\n\n async def test_urlfile(self):\n formname = 'inet:urlfile'\n valu = ('https://vertex.link/a_cool_program.exe', 64 * 'f')\n expected_props = {\n 'url': 'https://vertex.link/a_cool_program.exe',\n 'file': 'sha256:' + 64 * 'f',\n }\n expected_ndef = (formname, (valu[0], 'sha256:' + valu[1]))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n url = await core.nodes('inet:url')\n self.len(1, url)\n url = url[0]\n self.eq(443, url.props['port'])\n self.eq('', url.props['params'])\n self.eq('vertex.link', url.props['fqdn'])\n self.eq('https', url.props['proto'])\n self.eq('https://vertex.link/a_cool_program.exe', url.props['base'])\n\n async def test_url_mirror(self):\n url0 = 'http://vertex.link'\n url1 = 'http://vtx.lk'\n opts = {'vars': {'url0': url0, 'url1': url1}}\n async with self.getTestCore() as core:\n\n nodes = await core.nodes('[ inet:url:mirror=($url0, $url1) ]', opts=opts)\n\n self.len(1, nodes)\n self.eq(nodes[0].ndef, ('inet:url:mirror', (url0, url1)))\n self.eq(nodes[0].get('at'), 'http://vtx.lk')\n self.eq(nodes[0].get('of'), 'http://vertex.link')\n\n with self.raises(s_exc.ReadOnlyProp):\n nodes = await core.nodes('inet:url:mirror=($url0, $url1) [ :at=http://newp.com ]', opts=opts)\n\n with self.raises(s_exc.ReadOnlyProp):\n nodes = await core.nodes('inet:url:mirror=($url0, $url1) [ :of=http://newp.com ]', opts=opts)\n\n async def test_urlredir(self):\n formname = 'inet:urlredir'\n valu = ('https://vertex.link/idk', 'https://cool.vertex.newp:443/something_else')\n expected_props = {\n 'src': 'https://vertex.link/idk',\n 'src:fqdn': 'vertex.link',\n 'dst': 'https://cool.vertex.newp:443/something_else',\n 'dst:fqdn': 'cool.vertex.newp',\n }\n expected_ndef = (formname, valu)\n expected_nodes = (\n ('inet:fqdn', 'vertex.link'),\n ('inet:fqdn', 'cool.vertex.newp'),\n )\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n await self.checkNodes(core, expected_nodes)\n\n async def test_user(self):\n formname = 'inet:user'\n valu = 'cool User '\n expected_props = {}\n expected_ndef = (formname, 'cool user ')\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_web_acct(self):\n async with self.getTestCore() as core:\n formname = 'inet:web:acct'\n\n # Type Tests\n t = core.model.type(formname)\n\n self.raises(s_exc.BadTypeValu, t.norm, 'vertex.link,person1')\n enorm = ('vertex.link', 'person1')\n edata = {'subs': {'user': 'person1',\n 'site': 'vertex.link',\n 'site:host': 'vertex',\n 'site:domain': 'link', },\n 'adds': (\n ('inet:fqdn', 'vertex.link', {'subs': {'domain': 'link', 'host': 'vertex'}}),\n ('inet:user', 'person1', {}),\n )}\n self.eq(t.norm(('VerTex.linK', 'PerSon1')), (enorm, edata))\n\n # Form Tests\n place = s_common.guid()\n valu = ('blogs.Vertex.link', 'Brutus')\n input_props = {\n 'avatar': 'sha256:' + 64 * 'a',\n 'banner': 'sha256:' + 64 * 'b',\n 'dob': -64836547200000,\n 'email': 'brutus@vertex.link',\n 'linked:accts': (('twitter.com', 'brutus'), ('linkedin.com', 'brutester'), ('linkedin.com', 'brutester')),\n 'latlong': '0,0',\n 'place': place,\n 'loc': 'sol',\n 'name': 'ካሳር',\n 'aliases': ('foo', 'bar', 'bar'),\n 'name:en': 'brutus',\n 'occupation': 'jurist',\n 'passwd': 'hunter2',\n 'phone': '555-555-5555',\n 'realname': 'Брут',\n 'realname:en': 'brutus',\n 'signup': 3,\n 'signup:client': '0.0.0.4',\n 'signup:client:ipv6': '::1',\n 'tagline': 'Taglines are not tags',\n 'url': 'https://blogs.vertex.link/',\n 'webpage': 'https://blogs.vertex.link/brutus',\n 'recovery:email': 'recovery@vertex.link',\n }\n\n expected_ndef = (formname, ('blogs.vertex.link', 'brutus'))\n expected_props = copy.copy(input_props)\n expected_props.update({\n 'site': valu[0].lower(),\n 'user': valu[1].lower(),\n 'latlong': (0.0, 0.0),\n 'aliases': ('bar', 'foo'),\n 'linked:accts': (('linkedin.com', 'brutester'), ('twitter.com', 'brutus')),\n 'place': place,\n 'phone': '5555555555',\n 'realname': 'брут',\n 'signup:client': 'tcp://0.0.0.4',\n 'signup:client:ipv4': 4,\n 'signup:client:ipv6': '::1',\n 'recovery:email': 'recovery@vertex.link',\n })\n\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.eq(node.ndef, expected_ndef)\n self.checkNode(node, (expected_ndef, expected_props))\n\n self.len(2, await core.nodes('inet:web:acct=(blogs.vertex.link, brutus) :linked:accts -> inet:web:acct'))\n\n async def test_web_action(self):\n formname = 'inet:web:action'\n valu = 32 * 'a'\n place = s_common.guid()\n input_props = {\n 'act': 'Did a Thing',\n 'acct': ('vertex.link', 'vertexmc'),\n 'time': 0,\n 'client': '0.0.0.0',\n 'loc': 'ru',\n 'latlong': '30,30',\n 'place': place,\n }\n expected_props = {\n 'act': 'did a thing',\n 'acct': ('vertex.link', 'vertexmc'),\n 'acct:site': 'vertex.link',\n 'acct:user': 'vertexmc',\n 'time': 0,\n 'client': 'tcp://0.0.0.0',\n 'client:ipv4': 0,\n 'loc': 'ru',\n 'latlong': (30.0, 30.0),\n 'place': place,\n }\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n nodes = await core.nodes('inet:fqdn')\n self.len(2, nodes)\n\n expected_nodes = (\n ('inet:ipv4', 0x08070605),\n ('inet:ipv6', '::ffff:8.7.6.5'),\n ('inet:fqdn', 'newp.com'),\n ('inet:user', 'hehe'),\n )\n nodes = await core.nodes('[inet:web:action=(test,) :acct:user=hehe :acct:site=newp.com :client=\"tcp://::ffff:8.7.6.5\"]')\n self.len(1, nodes)\n await self.checkNodes(core, expected_nodes)\n\n async def test_web_chprofile(self):\n formname = 'inet:web:chprofile'\n valu = 32 * 'a'\n input_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'client': '0.0.0.3',\n 'time': 0,\n 'pv': ('inet:web:acct:site', 'Example.com')\n }\n expected_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'acct:site': 'vertex.link',\n 'acct:user': 'vertexmc',\n 'client': 'tcp://0.0.0.3',\n 'client:ipv4': 3,\n 'time': 0,\n 'pv': ('inet:web:acct:site', 'example.com'),\n 'pv:prop': 'inet:web:acct:site',\n }\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n expected_nodes = (\n ('inet:ipv4', 0x08070605),\n ('inet:ipv6', '::ffff:8.7.6.5'),\n ('inet:user', 'hehe'),\n )\n self.len(1, await core.nodes(\n '[inet:web:chprofile=(test,) :acct:user=hehe :acct:site=newp.com :client=\"tcp://::ffff:8.7.6.5\"]'))\n await self.checkNodes(core, expected_nodes)\n\n async def test_web_file(self):\n formname = 'inet:web:file'\n valu = (('vertex.link', 'vertexmc'), 64 * 'f')\n input_props = {\n 'name': 'Cool',\n 'posted': 0,\n 'client': '::1'\n }\n expected_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'acct:site': 'vertex.link',\n 'acct:user': 'vertexmc',\n 'file': 'sha256:' + 64 * 'f',\n 'name': 'cool',\n 'posted': 0,\n 'client': 'tcp://::1',\n 'client:ipv6': '::1'\n }\n expected_nodes = (\n ('inet:ipv6', '::1'),\n ('inet:fqdn', 'vertex.link'),\n ('file:bytes', 'sha256:' + 64 * 'f'),\n ('inet:user', 'vertexmc'),\n )\n expected_ndef = (formname, (valu[0], 'sha256:' + valu[1]))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n await self.checkNodes(core, expected_nodes)\n\n async def test_web_follows(self):\n formname = 'inet:web:follows'\n valu = (('vertex.link', 'vertexmc'), ('example.com', 'aUser'))\n input_props = {}\n expected_props = {\n 'follower': ('vertex.link', 'vertexmc'),\n 'followee': ('example.com', 'auser'),\n }\n expected_ndef = (formname, (('vertex.link', 'vertexmc'), ('example.com', 'auser')))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_web_group(self):\n formname = 'inet:web:group'\n valu = ('vertex.link', 'CoolGroup')\n place = s_common.guid()\n input_props = {\n 'name': 'The coolest group',\n 'aliases': ('foo', 'bar', 'bar'),\n 'name:en': 'The coolest group (in english)',\n 'url': 'https://vertex.link/CoolGroup',\n 'avatar': 64 * 'f',\n 'desc': 'a Really cool group',\n 'webpage': 'https://vertex.link/CoolGroup/page',\n 'loc': 'the internet',\n 'latlong': '0,0',\n 'place': place,\n 'signup': 0,\n 'signup:client': '0.0.0.0',\n }\n expected_props = {\n 'site': valu[0],\n 'id': valu[1],\n 'name': 'The coolest group',\n 'aliases': ('bar', 'foo'),\n 'name:en': 'The coolest group (in english)',\n 'url': 'https://vertex.link/CoolGroup',\n 'avatar': 'sha256:' + 64 * 'f',\n 'desc': 'a Really cool group',\n 'webpage': 'https://vertex.link/CoolGroup/page',\n 'loc': 'the internet',\n 'latlong': (0.0, 0.0),\n 'place': place,\n 'signup': 0,\n 'signup:client': 'tcp://0.0.0.0',\n 'signup:client:ipv4': 0\n }\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_web_logon(self):\n formname = 'inet:web:logon'\n valu = 32 * 'a'\n place = s_common.guid()\n input_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'time': 0,\n 'client': '::',\n 'logout': 1,\n 'loc': 'ru',\n 'latlong': '30,30',\n 'place': place,\n }\n expected_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'acct:site': 'vertex.link',\n 'acct:user': 'vertexmc',\n 'time': 0,\n 'client': 'tcp://::',\n 'client:ipv6': '::',\n 'logout': 1,\n 'loc': 'ru',\n 'latlong': (30.0, 30.0),\n 'place': place,\n }\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_web_memb(self):\n formname = 'inet:web:memb'\n valu = (('VERTEX.link', 'visi'), ('vertex.LINK', 'kenshoto'))\n input_props = {'joined': 2554848000000, 'title': 'Cool'}\n expected_props = {\n 'joined': 2554848000000,\n 'title': 'cool',\n 'acct': ('vertex.link', 'visi'),\n 'group': ('vertex.link', 'kenshoto'),\n }\n expected_ndef = (formname, (('vertex.link', 'visi'), ('vertex.link', 'kenshoto')))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_web_member(self):\n\n async with self.getTestCore() as core:\n msgs = await core.stormlist('''\n [ inet:web:member=*\n :acct=twitter.com/invisig0th\n :channel=*\n :group=twitter.com/nerds\n :added=2022\n :removed=2023\n ]\n ''')\n nodes = [m[1] for m in msgs if m[0] == 'node']\n self.len(1, nodes)\n node = nodes[0]\n self.nn(node[1]['props']['channel'])\n self.eq(1640995200000, node[1]['props']['added'])\n self.eq(1672531200000, node[1]['props']['removed'])\n self.eq(('twitter.com', 'nerds'), node[1]['props']['group'])\n self.eq(('twitter.com', 'invisig0th'), node[1]['props']['acct'])\n\n async def test_web_mesg(self):\n formname = 'inet:web:mesg'\n valu = (('VERTEX.link', 'visi'), ('vertex.LINK', 'vertexmc'), 0)\n input_props = {\n 'url': 'https://vertex.link/messages/0',\n 'client': 'tcp://1.2.3.4',\n 'text': 'a cool Message',\n 'deleted': True,\n 'file': 'sha256:' + 64 * 'F'\n }\n expected_props = {\n 'to': ('vertex.link', 'vertexmc'),\n 'from': ('vertex.link', 'visi'),\n 'time': 0,\n 'url': 'https://vertex.link/messages/0',\n 'client': 'tcp://1.2.3.4',\n 'client:ipv4': 0x01020304,\n 'deleted': True,\n 'text': 'a cool Message',\n 'file': 'sha256:' + 64 * 'f'\n }\n expected_ndef = (formname, (('vertex.link', 'visi'), ('vertex.link', 'vertexmc'), 0))\n\n valu2 = (('vertex.link', 'visi'), ('vertex.link', 'epiphyte'), 0)\n inputs2 = {'client': '::1'}\n expected2 = {\n 'client': 'tcp://::1',\n 'client:ipv6': '::1',\n }\n\n async with self.getTestCore() as core:\n\n async with await core.snap() as snap:\n\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n node = await snap.addNode('inet:web:mesg', valu2, props=inputs2)\n self.checkNode(node, (('inet:web:mesg', valu2), expected2))\n\n async def test_web_post(self):\n formname = 'inet:web:post'\n valu = 32 * 'a'\n plac = s_common.guid()\n input_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'text': 'my cooL POST',\n 'time': 0,\n 'deleted': True,\n 'url': 'https://vertex.link/mypost',\n 'client': 'tcp://1.2.3.4',\n 'file': 64 * 'f',\n 'replyto': 32 * 'b',\n 'repost': 32 * 'c',\n\n 'hashtags': '#foo,#bar,#foo',\n 'mentions:users': 'vertex.link/visi,vertex.link/whippit',\n 'mentions:groups': 'vertex.link/ninjas',\n\n 'loc': 'ru',\n 'place': plac,\n 'latlong': (20, 30),\n }\n expected_props = {\n 'acct': ('vertex.link', 'vertexmc'),\n 'acct:site': 'vertex.link',\n 'acct:user': 'vertexmc',\n 'client': 'tcp://1.2.3.4',\n 'client:ipv4': 0x01020304,\n 'text': 'my cooL POST',\n 'time': 0,\n 'deleted': True,\n 'url': 'https://vertex.link/mypost',\n 'file': 'sha256:' + 64 * 'f',\n 'replyto': 32 * 'b',\n 'repost': 32 * 'c',\n\n 'hashtags': ('#bar', '#foo'),\n 'mentions:users': (('vertex.link', 'visi'), ('vertex.link', 'whippit')),\n 'mentions:groups': (('vertex.link', 'ninjas'),),\n\n 'loc': 'ru',\n 'place': plac,\n 'latlong': (20, 30),\n }\n\n node2 = s_common.guid()\n inputs2 = {'client': '::1'}\n expected2 = {\n 'client': 'tcp://::1',\n 'client:ipv6': '::1',\n }\n\n expected_nodes = (\n ('inet:fqdn', 'vertex.link'),\n ('inet:group', 'ninjas'),\n )\n\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n node = await snap.addNode('inet:web:post', node2, props=inputs2)\n self.checkNode(node, (('inet:web:post', node2), expected2))\n self.len(2, await core.nodes('inet:web:post -> inet:web:hashtag'))\n\n await self.checkNodes(core, expected_nodes)\n\n nodes = await core.nodes('[ inet:web:post:link=* :post={inet:web:post | limit 1} :url=https://vtx.lk :text=Vertex ]')\n self.len(1, nodes)\n self.nn(nodes[0].get('post'))\n self.eq('https://vtx.lk', nodes[0].get('url'))\n self.eq('Vertex', nodes[0].get('text'))\n\n async def test_whois_contact(self):\n formname = 'inet:whois:contact'\n valu = (('vertex.link', '@2015'), 'regiStrar')\n input_props = {\n 'id': 'ID',\n 'name': 'NAME',\n 'email': 'unittest@vertex.link',\n 'orgname': 'unittest org',\n 'address': '1234 Not Real Road',\n 'city': 'Faketown',\n 'state': 'Stateland',\n 'country': 'US',\n 'phone': '555-555-5555',\n 'fax': '555-555-5556',\n 'url': 'https://vertex.link/contact',\n 'whois:fqdn': 'vertex.link'\n }\n expected_props = {\n 'rec': ('vertex.link', 1420070400000),\n 'rec:asof': 1420070400000,\n 'rec:fqdn': 'vertex.link',\n 'type': 'registrar',\n 'id': 'id',\n 'name': 'name',\n 'email': 'unittest@vertex.link',\n 'orgname': 'unittest org',\n 'address': '1234 not real road',\n 'city': 'faketown',\n 'state': 'stateland',\n 'country': 'us',\n 'phone': '5555555555',\n 'fax': '5555555556',\n 'url': 'https://vertex.link/contact',\n 'whois:fqdn': 'vertex.link'\n }\n expected_ndef = (formname, (('vertex.link', 1420070400000), 'registrar'))\n expected_nodes = (\n ('inet:fqdn', 'vertex.link'),\n )\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n await self.checkNodes(core, expected_nodes)\n\n async def test_whois_rar(self):\n formname = 'inet:whois:rar'\n valu = 'cool Registrar '\n expected_props = {}\n expected_ndef = (formname, 'cool registrar ')\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_whois_rec(self):\n formname = 'inet:whois:rec'\n valu = ('woot.com', '@20501217')\n input_props = {\n 'text': 'YELLING AT pennywise@vertex.link LOUDLY',\n 'registrar': ' cool REGISTRAR ',\n 'registrant': ' cool REGISTRANT ',\n }\n expected_props = {\n 'fqdn': 'woot.com',\n 'asof': 2554848000000,\n 'text': 'yelling at pennywise@vertex.link loudly',\n 'registrar': ' cool registrar ',\n 'registrant': ' cool registrant ',\n }\n expected_ndef = (formname, ('woot.com', 2554848000000))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=input_props)\n self.checkNode(node, (expected_ndef, expected_props))\n nodes = await core.nodes('inet:whois:email')\n self.len(1, nodes)\n self.eq(nodes[0].ndef, ('inet:whois:email', ('woot.com', 'pennywise@vertex.link')))\n\n q = '''\n [inet:whois:rec=(wellsfargo.com, 2019/11/24 03:30:07.000)\n :created=\"1993/02/19 05:00:00.000\"]\n +inet:whois:rec:created < 2017/01/01\n '''\n self.len(1, await core.nodes(q))\n\n async def test_whois_recns(self):\n formname = 'inet:whois:recns'\n valu = ('ns1.woot.com', ('woot.com', '@20501217'))\n expected_props = {\n 'ns': 'ns1.woot.com',\n 'rec': ('woot.com', 2554848000000),\n 'rec:fqdn': 'woot.com',\n 'rec:asof': 2554848000000,\n }\n expected_ndef = (formname, ('ns1.woot.com', ('woot.com', 2554848000000)))\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_whois_reg(self):\n formname = 'inet:whois:reg'\n valu = 'cool Registrant '\n expected_props = {}\n expected_ndef = (formname, 'cool registrant ')\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_whois_ipquery(self):\n rec = s_common.guid()\n query_ipv4 = s_common.guid()\n props_ipv4 = {\n 'time': 2554869000000,\n 'fqdn': 'arin.whois.net',\n 'ipv4': 167772160,\n 'success': True,\n 'rec': rec,\n }\n query_ipv6 = s_common.guid()\n props_ipv6 = {\n 'time': 2554869000000,\n 'url': 'http://myrdap/rdap/?query=3300%3A100%3A1%3A%3Affff',\n 'ipv6': '3300:100:1::ffff',\n 'success': False,\n }\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode('inet:whois:ipquery', query_ipv4, props=props_ipv4)\n self.checkNode(node, (('inet:whois:ipquery', query_ipv4), props_ipv4))\n\n node = await snap.addNode('inet:whois:ipquery', query_ipv6, props=props_ipv6)\n self.checkNode(node, (('inet:whois:ipquery', query_ipv6), props_ipv6))\n\n async def test_whois_iprec(self):\n contact = s_common.guid()\n addlcontact = s_common.guid()\n\n rec_ipv4 = s_common.guid()\n props_ipv4 = {\n 'net4': '10.0.0.0/28',\n 'asof': 2554869000000,\n 'created': 2554858000000,\n 'updated': 2554858000000,\n 'text': 'this is a bunch of \\nrecord text 123123',\n 'desc': 'these are some notes\\n about record 123123',\n 'asn': 12345,\n 'id': 'NET-10-0-0-0-1',\n 'name': 'vtx',\n 'parentid': 'NET-10-0-0-0-0',\n 'registrant': contact,\n 'contacts': (addlcontact, ),\n 'country': 'US',\n 'status': 'validated',\n 'type': 'direct allocation',\n 'links': ('http://rdap.com/foo', 'http://rdap.net/bar'),\n }\n expected_ipv4 = copy.deepcopy(props_ipv4)\n expected_ipv4.update({\n 'net4': (167772160, 167772175),\n 'net4:min': 167772160,\n 'net4:max': 167772175,\n 'country': 'us',\n })\n\n rec_ipv6 = s_common.guid()\n props_ipv6 = {\n 'net6': '2001:db8::/101',\n 'asof': 2554869000000,\n 'created': 2554858000000,\n 'updated': 2554858000000,\n 'text': 'this is a bunch of \\nrecord text 123123',\n 'asn': 12345,\n 'id': 'NET-10-0-0-0-0',\n 'name': 'EU-VTX-1',\n 'registrant': contact,\n 'country': 'tp',\n 'status': 'renew prohibited',\n 'type': 'allocated-BY-rir',\n }\n expected_ipv6 = copy.deepcopy(props_ipv6)\n expected_ipv6.update({\n 'net6': ('2001:db8::', '2001:db8::7ff:ffff'),\n 'net6:min': '2001:db8::',\n 'net6:max': '2001:db8::7ff:ffff',\n 'type': 'allocated-by-rir',\n })\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode('inet:whois:iprec', rec_ipv4, props=props_ipv4)\n self.checkNode(node, (('inet:whois:iprec', rec_ipv4), expected_ipv4))\n\n node = await snap.addNode('inet:whois:iprec', rec_ipv6, props=props_ipv6)\n self.checkNode(node, (('inet:whois:iprec', rec_ipv6), expected_ipv6))\n\n # check regid pivot\n scmd = f'inet:whois:iprec={rec_ipv4} :parentid -> inet:whois:iprec:id'\n nodes = await core.nodes(scmd)\n self.len(1, nodes)\n self.eq(nodes[0].ndef, ('inet:whois:iprec', rec_ipv6))\n\n # bad country code\n guid = s_common.guid()\n props = {'country': 'u9'}\n await self.asyncraises(s_exc.BadTypeValu, snap.addNode('inet:whois:iprec', guid, props=props))\n\n async def test_whois_ipcontact(self):\n pscontact = s_common.guid()\n contact = s_common.guid()\n subcontact = s_common.guid()\n props = {\n 'contact': pscontact,\n 'asof': 2554869000000,\n 'created': 2554858000000,\n 'updated': 2554858000000,\n 'role': 'registrant',\n 'roles': ('abuse', 'administrative', 'technical'),\n 'asn': 123456,\n 'id': 'SPM-3',\n 'links': ('http://myrdap.com/SPM3',),\n 'status': 'active',\n 'contacts': (subcontact,),\n }\n\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode('inet:whois:ipcontact', contact, props=props)\n self.checkNode(node, (('inet:whois:ipcontact', contact), props))\n\n # check regid pivot\n iprec_guid = s_common.guid()\n await snap.addNode(f'inet:whois:iprec', iprec_guid, props={'id': props['id']})\n scmd = f'inet:whois:ipcontact={contact} :id -> inet:whois:iprec:id'\n nodes = await core.nodes(scmd)\n self.len(1, nodes)\n self.eq(nodes[0].ndef, ('inet:whois:iprec', iprec_guid))\n\n async def test_wifi_ap(self):\n\n place = s_common.guid()\n\n formname = 'inet:wifi:ap'\n valu = ('The Best SSID2 ', '00:11:22:33:44:55')\n props = {\n 'accuracy': '10km',\n 'latlong': (20, 30),\n 'place': place,\n 'channel': 99,\n 'encryption': 'wpa2',\n }\n expected_props = {\n 'ssid': valu[0],\n 'bssid': valu[1],\n 'latlong': (20.0, 30.0),\n 'accuracy': 10000000,\n 'place': place,\n 'channel': 99,\n 'encryption': 'wpa2',\n }\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu, props=props)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_wifi_ssid(self):\n formname = 'inet:wifi:ssid'\n valu = 'The Best SSID '\n expected_props = {}\n expected_ndef = (formname, valu)\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n node = await snap.addNode(formname, valu)\n self.checkNode(node, (expected_ndef, expected_props))\n\n async def test_banner(self):\n\n async with self.getTestCore() as core:\n\n async with await core.snap() as snap:\n\n node = await snap.addNode('inet:banner', ('tcp://1.2.3.4:443', 'Hi There'))\n\n self.eq('Hi There', node.get('text'))\n\n self.eq(443, node.get('server:port'))\n self.eq(0x01020304, node.get('server:ipv4'))\n\n expected_nodes = (\n ('it:dev:str', 'Hi There'),\n ('inet:ipv4', 0x01020304),\n )\n await self.checkNodes(core, expected_nodes)\n\n node = await core.nodes('[inet:banner=(\"tcp://::ffff:8.7.6.5\", sup)]')\n self.len(1, node)\n expected_nodes = (\n ('it:dev:str', 'sup'),\n ('inet:ipv4', 0x08070605),\n ('inet:ipv6', '::ffff:8.7.6.5'),\n )\n await self.checkNodes(core, expected_nodes)\n\n async def test_search_query(self):\n async with self.getTestCore() as core:\n async with await core.snap() as snap:\n host = s_common.guid()\n props = {\n 'time': 200,\n 'text': 'hi there',\n 'engine': 'roofroof',\n 'host': host,\n 'acct': 'vertex.link/visi',\n }\n iden = s_common.guid()\n node = await snap.addNode('inet:search:query', iden, props=props)\n self.eq(node.get('time'), 200)\n self.eq(node.get('text'), 'hi there')\n self.eq(node.get('engine'), 'roofroof')\n self.eq(node.get('host'), host)\n self.eq(node.get('acct'), ('vertex.link', 'visi'))\n props = {\n 'query': iden,\n 'url': 'http://hehehaha.com/',\n 'rank': 0,\n 'text': 'woot woot woot',\n 'title': 'this is a title',\n }\n residen = s_common.guid()\n node = await snap.addNode('inet:search:result', residen, props=props)\n self.eq(node.get('url'), 'http://hehehaha.com/')\n self.eq(node.get('rank'), 0)\n self.eq(node.get('text'), 'woot woot woot')\n self.eq(node.get('title'), 'this is a title')\n self.eq(node.get('query'), iden)\n\n async def test_model_inet_email_message(self):\n\n async with self.getTestCore() as core:\n q = '''\n [\n inet:email:message=\"*\"\n :to=woot@woot.com\n :from=visi@vertex.link\n :replyto=root@root.com\n :subject=\"hi there\"\n :date=2015\n :body=\"there are mad sploitz here!\"\n :headers=(('to', 'Visi Stark '),)\n :bytes=\"*\"\n ]\n\n {[( inet:email:message:link=($node, https://www.vertex.link) :text=Vertex )]}\n {[( inet:email:message:attachment=($node, \"*\") :name=sploit.exe )]}\n '''\n nodes = await core.nodes(q)\n self.len(1, nodes)\n\n self.len(1, await core.nodes('inet:email:message:to=woot@woot.com'))\n self.len(1, await core.nodes('inet:email:message:date=2015'))\n self.len(1, await core.nodes('inet:email:message:body=\"there are mad sploitz here!\"'))\n self.len(1, await core.nodes('inet:email:message:subject=\"hi there\"'))\n self.len(1, await core.nodes('inet:email:message:replyto=root@root.com'))\n\n self.len(1, await core.nodes('inet:email:message:from=visi@vertex.link -> inet:email:header +:name=to +:value=\"Visi Stark \"'))\n self.len(1, await core.nodes('inet:email:message:from=visi@vertex.link -> inet:email:message:link +:text=Vertex -> inet:url'))\n self.len(1, await core.nodes('inet:email:message:from=visi@vertex.link -> inet:email:message:attachment +:name=sploit.exe -> file:bytes'))\n self.len(1, await core.nodes('inet:email:message:from=visi@vertex.link -> file:bytes'))\n\n async def test_model_inet_tunnel(self):\n async with self.getTestCore() as core:\n nodes = await core.nodes('''\n [ inet:tunnel=*\n :ingress=1.2.3.4:443\n :egress=5.5.5.5\n :type=vpn\n :anon=$lib.true\n :operator = {[ ps:contact=* :email=visi@vertex.link ]}\n ]''')\n self.len(1, nodes)\n\n self.eq(True, nodes[0].get('anon'))\n self.eq('vpn.', nodes[0].get('type'))\n self.eq('tcp://5.5.5.5', nodes[0].get('egress'))\n self.eq('tcp://1.2.3.4:443', nodes[0].get('ingress'))\n\n self.len(1, await core.nodes('inet:tunnel -> ps:contact +:email=visi@vertex.link'))\n\n async def test_model_inet_proto(self):\n\n async with self.getTestCore() as core:\n nodes = await core.nodes('[ inet:proto=https :port=443 ]')\n self.len(1, nodes)\n self.eq(('inet:proto', 'https'), nodes[0].ndef)\n self.eq(443, nodes[0].get('port'))\n\n async def test_model_inet_web_attachment(self):\n\n async with self.getTestCore() as core:\n nodes = await core.nodes('''\n [ inet:web:attachment=*\n :acct=twitter.com/invisig0th\n :client=tcp://1.2.3.4\n :file=*\n :name=beacon.exe\n :time=20230202\n :post=*\n :mesg=(twitter.com/invisig0th, twitter.com/vtxproject, 20230202)\n ]''')\n self.len(1, nodes)\n self.eq(1675296000000, nodes[0].get('time'))\n self.eq('beacon.exe', nodes[0].get('name'))\n self.eq('tcp://1.2.3.4', nodes[0].get('client'))\n self.eq(0x01020304, nodes[0].get('client:ipv4'))\n\n self.nn(nodes[0].get('post'))\n self.nn(nodes[0].get('mesg'))\n self.nn(nodes[0].get('file'))\n\n self.len(1, await core.nodes('inet:web:attachment :file -> file:bytes'))\n self.len(1, await core.nodes('inet:web:attachment :post -> inet:web:post'))\n self.len(1, await core.nodes('inet:web:attachment :mesg -> inet:web:mesg'))\n\n async def test_model_inet_egress(self):\n\n async with self.getTestCore() as core:\n\n nodes = await core.nodes('''\n [ inet:egress=*\n :host = *\n :client=1.2.3.4\n :client:ipv6=\"::1\"\n ]\n ''')\n\n self.len(1, nodes)\n self.nn(nodes[0].get('host'))\n self.eq(nodes[0].get('client'), 'tcp://1.2.3.4')\n self.eq(nodes[0].get('client:ipv4'), 0x01020304)\n self.eq(nodes[0].get('client:ipv6'), '::1')\n","sub_path":"synapse/tests/test_model_inet.py","file_name":"test_model_inet.py","file_ext":"py","file_size_in_byte":112935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"542301509","text":"from flask_restx import Api, Namespace, fields\nfrom common import settings\nfrom common.result_model import root_api, job_ad\n\n# Common result models for both Jobsearch and Historical ads\n\njob_ad_searchresult = root_api.inherit('JobAdSearchResult', job_ad, {\n 'relevance': fields.Float(),\n})\n\nstat_item = root_api.model('StatDetail', {\n 'term': fields.String(),\n 'concept_id': fields.String(),\n 'code': fields.String(),\n 'count': fields.Integer()\n})\n\nsearch_stats = root_api.model('Stats', {\n 'type': fields.String(),\n 'values': fields.List(fields.Nested(stat_item, skip_none=True))\n})\n\nfreetext_concepts = root_api.model('FreetextConcepts', {\n 'skill': fields.List(fields.String()),\n 'occupation': fields.List(fields.String()),\n 'location': fields.List(fields.String()),\n 'skill_must': fields.List(fields.String()),\n 'occupation_must': fields.List(fields.String()),\n 'location_must': fields.List(fields.String()),\n 'skill_must_not': fields.List(fields.String()),\n 'occupation_must_not': fields.List(fields.String()),\n 'location_must_not': fields.List(fields.String()),\n})\n\nnumber_of_hits = root_api.model('NumberOfHits', {\n 'value': fields.Integer()\n})\n\nopen_results = root_api.model('SearchResults', {\n 'total': fields.Nested(number_of_hits),\n 'positions': fields.Integer(),\n 'query_time_in_millis': fields.Integer(),\n 'result_time_in_millis': fields.Integer(),\n 'stats': fields.List(fields.Nested(search_stats, skip_none=True)),\n 'freetext_concepts': fields.Nested(freetext_concepts, skip_none=True),\n 'hits': fields.List(fields.Nested(job_ad_searchresult), attribute='hits', skip_none=True)\n})\n\n# Jobsearch\n\nsearch_api = Api(version=settings.API_VERSION, title='Search job ads',\n description='An API for searching and retrieving job ads and for finding '\n 'concepts in the Jobtech Taxonomy.',\n default_label=\"An API for searching and retrieving job ads.\")\n\nns_platsannons = Namespace('Open AF-job ads',\n description='Search and retrieve Arbetsförmedlingens (AF) '\n 'job ads. Used for online operations.')\n\nns_valuestore = Namespace('Jobtech Taxonomy',\n description=settings.TAX_DESCRIPTION)\n\nsearch_api.add_namespace(ns_platsannons, '/')\nsearch_api.add_namespace(ns_valuestore, '/taxonomy')\n\nfor name, definition in root_api.models.items():\n ns_platsannons.add_model(name, definition)\n\ntypeahead_item = ns_platsannons.model('TypeaheadItem', {\n 'value': fields.String(),\n 'found_phrase': fields.String(),\n 'type': fields.String(),\n 'occurrences': fields.Integer()\n})\n\ntypeahead_results = ns_platsannons.model('TypeaheadResults', {\n 'result_time_in_millis': fields.Integer(),\n 'time_in_millis': fields.Integer(),\n 'typeahead': fields.List(fields.Nested(typeahead_item))\n})\n\n# Historical ads\n\nhistorical_api = Api(version=settings.API_VERSION, title='Historical job ads',\n description='An API for searching and retrieving historical job ads.',\n default_label=\"An API for searching and retrieving historical job ads.\")\n\nns_historical = Namespace('Historical ads',\n description='Search and retrieve historical job ads from '\n 'Arbetsförmedlingen since 2006. Used for online operations.')\n\nhistorical_api.add_namespace(ns_historical, '/')\n\nfor name, definition in root_api.models.items():\n ns_historical.add_model(name, definition)\n","sub_path":"sokannonser/rest/model/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"385553807","text":"#Solution in python for adding large values. It can be handled with longs in\n#this case. ar is a list created by mapping type long to a list of strings\n#created by calling .split() on the input\nimport sys\n\ndef aVeryBigSum(n, ar):\n result = 0;\n for x in range (0, len(ar)):\n result += ar[x]\n return result\n\nn = int(raw_input().strip())\nar = map(long, raw_input().strip().split(' '))\nresult = aVeryBigSum(n, ar)\nprint(result)\n","sub_path":"algorithms/warmup/aVeryBigSum.py","file_name":"aVeryBigSum.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"252667105","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .forms import PostForm\nfrom .models import PostMusic\n# Create your views here.\n\ndef home(request):\n template_name = 'home.html'\n return render(request,template_name)\n\ndef post(request):\n form = PostForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n else:\n form = PostForm()\n template_name = 'wall/post_wall.html'\n return render(request,template_name, {'form':form})","sub_path":"wall/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"52099483","text":"import torch\r\nimport os\r\nimport pickle\r\nimport pdb\r\nimport numpy as np\r\ndef get_dataset(root_dir, name, normalize=True):\r\n if name == 'basketball':\r\n test_data = torch.Tensor(pickle.load(open(os.path.join(root_dir, 'basketball_eval.p'), 'rb'))).transpose(0, 1)[:, :-1, :]\r\n train_data = torch.Tensor(pickle.load(open(os.path.join(root_dir, 'basketball_train.p'), 'rb'))).transpose(0, 1)[:, :-1, :]\r\n elif name == 'billiard':\r\n test_data = torch.Tensor(pickle.load(open(os.path.join(root_dir, 'billiard_eval.p'), 'rb'), encoding='latin1'))[:, :, :]\r\n train_data = torch.Tensor(pickle.load(open(os.path.join(root_dir, 'billiard_train.p'), 'rb'), encoding='latin1'))[:, :, :]\r\n elif name == 'traffic':\r\n test_data = torch.Tensor(np.load(os.path.join(root_dir, 'pems', 'pems_test.npy')))\r\n train_data = torch.Tensor(np.load(os.path.join(root_dir, 'pems', 'pems_train.npy')))\r\n elif name == 'mujoco':\r\n test_data = torch.Tensor(np.load(os.path.join(root_dir, 'mujoco_test.npy')))\r\n train_data = torch.Tensor(np.load(os.path.join(root_dir, 'mujoco_train.npy')))\r\n elif name == 'nfl':\r\n train_data = torch.Tensor(np.load(os.path.join(root_dir, 'nfl_train.npy')))\r\n test_data = torch.Tensor(np.load(os.path.join(root_dir, 'nfl_test.npy')))\r\n elif name == 'gas':\r\n train_data = torch.Tensor(np.load(os.path.join(root_dir, 'gas_train.npy')))\r\n test_data = torch.Tensor(np.load(os.path.join(root_dir, 'gas_test.npy')))\r\n val_data = torch.Tensor(np.load(os.path.join(root_dir, 'gas_val.npy')))\r\n elif name == 'air_quality':\r\n train_data = torch.Tensor(np.load(os.path.join(root_dir, 'air_quality_train.npy')))\r\n test_data = torch.Tensor(np.load(os.path.join(root_dir, 'air_quality_test.npy')))\r\n val_data = torch.Tensor(np.load(os.path.join(root_dir, 'air_quality_val.npy')))\r\n else:\r\n print('no such task')\r\n exit()\r\n if normalize:\r\n test_data -= torch.min(test_data, dim=1, keepdim=True)[0]\r\n test_data /= torch.max(test_data, dim=1, keepdim=True)[0]\r\n test_data = 2.0 * (test_data - 0.5)\r\n train_data -= torch.min(train_data, dim=1, keepdim=True)[0]\r\n train_data /= torch.max(train_data, dim=1, keepdim=True)[0]\r\n train_data = 2.0 * (train_data - 0.5)\r\n print(test_data.shape, train_data.shape)\r\n return train_data, val_data, test_data","sub_path":"codes_partially_observed_dimension/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"303848321","text":"from Assn2.Constant import *\nimport pygame\n\nclass MovingObstacle:\n def __init__(self, direction ,coords):\n self.direction = direction\n self.dirNum = self.getDirNum()\n self.coords = coords\n self.moveCount = 0\n\n\n\n def getCoord(self):\n return self.coords\n\n #moves the bullet\n def moveSelf(self):\n self.coords = self.getNext()\n\n #draws the bullet\n def drawSelf(self):\n x = self.coords['x'] * CELLSIZE\n y = self.coords['y'] * CELLSIZE\n xcenter = self.coords['x'] * CELLSIZE + math.floor(CELLSIZE / 2)\n ycenter = self.coords['y'] * CELLSIZE + math.floor(CELLSIZE / 2)\n pygame.draw.circle(DISPLAYSURF, PINK, (xcenter, ycenter), RADIUS)\n\n def getNext(self, rotDir = 0):\n direction = DIRECTIONS[(self.dirNum + rotDir) % 4]\n # move the worm by adding a segment in the direction it is moving\n if direction == UP:\n newHead = {'x': self.coords['x'], 'y': self.coords['y'] - 1}\n elif direction == DOWN:\n newHead = {'x': self.coords['x'], 'y': self.coords['y'] + 1}\n elif direction == LEFT:\n newHead = {'x': self.coords['x'] - 1, 'y': self.coords['y']}\n elif direction == RIGHT:\n newHead = {'x': self.coords['x'] + 1, 'y': self.coords['y']}\n return newHead\n\n def rotate(self,rotDir = 0):\n if rotDir == 0:\n rotDir = self.rotDir\n self.setDir((self.dirNum + rotDir) % 4) # shift direction\n\n def hit(self, coord):\n return self.coords == coord\n\n def getDirNum(self):\n counter = 0\n found = False\n for dir in DIRECTIONS:\n if(not found):\n found = self.direction == dir\n if(not found):\n counter = counter + 1\n return counter\n\n def setDir(self, num):\n self.direction = DIRECTIONS[num]\n self.dirNum = self.getDirNum()","sub_path":"Assn2/MovingObstacle.py","file_name":"MovingObstacle.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"485349329","text":"import sqlite3\n\n#Get personal data from the user aand insert it into a tuple\nFirst_name = input(\"Enter your first name: \")\nLast_name = input(\"Enter your last name: \")\nAge = input(\"Enter your age: \")\npersonal_data = (First_name, Last_name, Age)\n\n#Execute insert statement for supplied personal data\nwith sqlite3.connect(\"place_holder.db\") as conn:\n c = conn.cursor()\n c.execute(\"DROP TABLE IF EXISTS people\")\n c.execute(\"CREATE TABLE people(First_name TEXT, Last_name TEXT, Age INT)\")\n c.execute(\"INSERT INTO people VALUES(?, ?, ?)\", personal_data)\n\n\n","sub_path":"using_place_holders.py","file_name":"using_place_holders.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"245732207","text":"# coding=utf-8\n########################################\nfrom email.header import Header\nfrom email.mime.text import MIMEText\n\nimport smtplib\n\nimport shutil\nimport os\nimport sys\nimport subprocess\nimport datetime\nimport re\nimport io\n\nZY_APP_list = ['Camera2','Camera2_GO','CountrySelect','FMRadio','FMRadio_GO','Launcher3','MobileManager','MobileManagerTest','NewSalesTracker','Recorder','Recorder_GO','SalesTracker','SuperPower']\nZY_No_Standard_Project_List=['Camera2_GO_int','FMRadio_GO_int','Recorder_GO_int']\n# execute command\ndef do_command(cmd, need_result=True):\n print('Run Command: ', cmd)\n out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n if need_result:\n result = out.communicate()[0].decode().strip()\n out.wait()\n print(result)\n return result, out.returncode\n else:\n while out.poll() is None:\n s = out.stdout.readline()\n if s != b'':\n if \"ant\" in cmd:\n print(s.decode('cp936').strip())\n else:\n print(s.decode('utf-8').strip())\n return out.returncode\n\n\n# output the differences between this tag and the previous tag\ndef diff_between_tag(current_baseline):\n print(\"获取代码\\n\")\n release_dir = apk_release_dir_server + \"/\" + branch\n if os.path.exists(release_dir):\n print(\"代码目录已经存在,直接更新代码!!\\n\")\n os.chdir(release_dir)\n os.system(\"git reset --hard\")\n os.system(\"git clean -xdf\")\n os.system(\"git remote update\")\n os.system(\"git pull\")\n elif (is_zy_app):\n os.makedirs(release_dir)\n os.chdir(release_dir)\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/ZY/\" + project_name + \" . -b \" + branch)\n else:\n os.makedirs(release_dir)\n os.chdir(release_dir)\n if project_name == \"TsFileManager\":\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/FileManager . -b \" + branch)\n elif project_name == \"SetupWizard_oem\":\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/SetupWizard . -b \" + branch)\n elif project_name == \"launcher\":\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/Launcher . -b \" + branch)\n else:\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/\" + project_name + \" . -b \" + branch)\n print(\"##获取上一个版本的tag\\n\")\n baseline_list = os.listdir(apk_release_path)\n baseline_list.reverse()\n if not os.path.exists(\"{0}/{1}\".format(apk_release_path, current_baseline)):\n os.makedirs(\"{0}/{1}\".format(apk_release_path, current_baseline))\n if len(baseline_list) == 0:\n print(\"第一个版本,无需比较tag\\n\")\n return 1\n else:\n previous_baseline = baseline_list[0]\n diff_content = os.popen('git log --pretty=oneline '+previous_baseline+'...'+current_baseline).read()\n with io.open(\"{0}/{1}/diff.txt\".format(apk_release_path, current_baseline), 'w', encoding='utf-8') as f:\n f.write(diff_content.decode('utf-8').strip())\n return 0\n\n\ndef get_full_log():\n #获取时间\n time = datetime.datetime.now().strftime(\"%Y.%m.%d_%H.%M.%S\")\n if project_name == \"TsFileManager\":\n full_log_workspace = r\"/var/jenkins_home/proj/full_log_workspace/{0}/FileManager/{1}\".format(time, branch)\n elif project_name == \"SetupWizard_oem\":\n full_log_workspace = r\"/var/jenkins_home/proj/full_log_workspace/{0}/SetupWizard/{1}\".format(time, branch)\n else:\n full_log_workspace = r\"/var/jenkins_home/proj/full_log_workspace/{0}/{1}/{2}\".format(time, project_name, branch)\n print(\"##创建获取完整日志目录{0}\".format(full_log_workspace))\n if not os.path.exists(full_log_workspace):\n os.makedirs(full_log_workspace)\n os.chdir(full_log_workspace)\n if (is_zy_app) :\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/ZY/\" + project_name + \" . -b \" + branch)\n else:\n if project_name == \"TsFileManager\":\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/FileManager . -b \" + branch)\n elif project_name == \"SetupWizard_oem\":\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/SetupWizard . -b \" + branch)\n elif project_name == \"launcher\":\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/Launcher . -b \" + branch)\n else:\n os.system(\"git clone ssh://apkserver@10.250.115.12:29418/APK/\" + project_name + \" . -b \" + branch)\n os.system (\"git checkout -b \"+time+\" \"+baseline)\n log_content=os.popen('git log --oneline').read()\n #log_content = do_command(r\"git log --oneline\")[0]\n with io.open(\"{0}/{1}/Apk_List_full.txt\".format(apk_release_path, baseline), 'a', encoding='utf-8') as f:\n f.write(log_content.decode('utf-8').strip())\n\n\n\ndef copy_to_server(out_dir, project_dir):\n apk_store_path = out_dir + \"/\" + baseline\n #找出项目目录中的所有apk\n find_apk_cmd = \"find \"+ project_dir+\" -name *.apk\"\n apk_path = do_command(find_apk_cmd)\n apk_path_list = apk_path[0].split()\n # 检查输出目录是否存在,如不存在则创建\n if not os.path.exists(apk_store_path):\n os.makedirs(apk_store_path)\n # 拷贝Android.mk文件\n shutil.copy(project_dir+\"/Android.mk\",apk_store_path)\n # 拷贝测试报告文件\n shutil.copy(project_dir+\"/APK-Test-Report.xls\",apk_store_path)\n # 拷贝项目中编译出来的所有apk到输出目录\n for apk in apk_path_list:\n dir_apk = os.path.dirname(apk)\n shutil.copy(os.path.join(dir_apk, os.path.basename(apk)), apk_store_path)\n\n if project_name == 'Camera':\n src = project_dir + r\"/Camera_Publish/build/outputs/mapping\"\n if os.path.exists(src):\n shutil.copytree(src, apk_store_path + \"/mapping\")\n\n if project_name == \"Gallery2\" or project_name == \"SystemUI\":\n src = project_dir + r\"/build/outputs/mapping\"\n if os.path.exists(src):\n shutil.copytree(src, apk_store_path + \"/mapping\")\n print(\"\\n发布目录为:{0}\\n\".format(apk_store_path).replace(\"/mnt\", \"\\\\\\\\10.250.115.51\").replace(\"/\",\"\\\\\"))\n\n\n\n\ndef output_info(info):\n with io.open(\"{0}/{1}/apk_info.txt\".format(apk_release_path, baseline), 'w', encoding='utf-8') as f:\n f.write(info.decode('utf-8').strip())\n\ndef copy_readme(file):\n if os.path.exists(file):\n print(\"## Readme 文件存在,进行拷贝Readme!\")\n shutil.copy(file,apk_release_path+\"/\"+baseline)\n if os.path.exists(file):\n os.remove(file)\n else:\n print (\"## Readme 文件不存在,不拷贝readme文件!\")\n\n# python ${script_tools}/ApkRelease/apksource_release.py ${BRANCH} ${BASELINE} ${JOB_NAME}\nif __name__ == '__main__':\n branch = sys.argv[1] #分支\n baseline = sys.argv[2] #基线\n job_name = sys.argv[3] #任务\n if branch in ZY_No_Standard_Project_List:\n project_name = sys.argv[1].replace('_int','').replace('_dev','').strip()\n else :\n project_name = sys.argv[1].split('_')[0].strip()\n if project_name == \"SetupWizard\":\n project_name = \"SetupWizard_oem\"\n if project_name == \"FileManager\":\n project_name = \"TsFileManager\"\n if project_name == \"Launcher\":\n project_name = \"launcher\"\n is_zy_app = False\n if project_name in ZY_APP_list:\n is_zy_app = True\n\n Readmefile = r\"/var/jenkins_home/proj/test_report/{0}/APK-Verify-Report.xls\".format(job_name)\n apk_release_dir_server = \"/var/jenkins_home/proj/apk_release\"\n int_ip = \"/mnt\"\n rel_ip = \"/mnt\"\n apk_int_base_path = \"{0}/APK_Test_Version/{1}/int/{2}\".format(int_ip, project_name, branch)\n apk_int_path = \"{0}/APK_Test_Version/{1}/int/{2}/{3}\".format(int_ip, project_name, branch,baseline)\n\n test_report = apk_int_path+\"/APK-Test-Report.xls\"\n apk_release_path = \"{0}/APK_Release_Version/06-apksource/{1}\".format(rel_ip, project_name)\n\n content = '''模块名称:{0}\n基线:{1}\n分支:{2}\n来源地:{3}\n发布至:{4}\n'''.format(project_name , baseline, branch,apk_int_path.replace(\"/mnt\", \"\\\\\\\\10.250.115.52\").replace(\"/\",\"\\\\\"),(apk_release_path+\"/\"+baseline).replace(\"/mnt\", \"\\\\\\\\10.250.115.51\").replace(\"/\",\"\\\\\"))\n\n print (\"\\n##############################\\n\")\n print ('##分支: '+str(branch)) \n print ('##基线: '+str(baseline)) \n print ('##工程: '+str(project_name)) \n print ('##任务名称: '+str(job_name)) \n print (\"##APK目标路径: {0}\".format(apk_int_path).replace(\"/mnt\", \"\\\\\\\\10.250.115.52\").replace(\"/\",\"\\\\\"))\n print (\"##APK测试报告: {0}\".format(test_report).replace(\"/mnt\", \"\\\\\\\\10.250.115.52\").replace(\"/\",\"\\\\\"))\n print (\"##APK发布路径: {0}\".format(apk_release_path+\"/\"+baseline).replace(\"/mnt\", \"\\\\\\\\10.250.115.51\").replace(\"/\",\"\\\\\"))\n print (\"##############################\\n\")\n\n if not os.path.exists(test_report):\n print(\"##该基线未进行过提测,无法发布,请先进行提测(提测在编译时,选择Need_Test=Yes)!!!\")\n exit(5)\n\n print(\"##开始将{0}/{1}/{2}发布到归档服务器(51)\".format(project_name,branch,baseline))\n\n if not os.path.exists(apk_release_path):\n print(\"##尚未创建发布目录{0},开始创建...\\n\".format(apk_release_path))\n os.makedirs(apk_release_path)\n\n print(\"##输出与上一个release版本的差异...\\n\")\n\n if baseline not in os.listdir(apk_int_base_path):\n print (\"##基线列表:\"+str(os.listdir(apk_int_base_path)))\n print(\"##基线:{0}在目录{1}下不存在\\n\".format(baseline, apk_int_base_path))\n sys.exit(1)\n\n #获取前后两个版本的的不同之处\n diff_between_tag(baseline)\n\n #获取完整log\n get_full_log()\n\n print(\"##将apk拷贝到51服务器上\\n\")\n copy_to_server(apk_release_path,apk_int_path)\n\n print(\"##输出该apk信息到文档中\\n\")\n output_info(content)\n\n copy_readme(Readmefile)\n\n apk_dir = apk_release_path + \"/\" + baseline\n #find_apks_cmd = \"for /r \" + apk_dir + \" %i in (*.apk) do @echo %i\"\n find_apks_cmd = \"find \"+ apk_dir+\" -name *.apk\"\n apks_path = do_command(find_apks_cmd)\n if not apks_path[0]:\n print('##APK没有拷贝成功,发布失败,请检查相关参数!!!\\n')\n shutil.rmtree(apk_release_path + \"/\" + baseline)\n sys.exit(1)\n print(\"##APK发布成功!!!\\n\")\n\n print(\"##发布地址为:{0}\".format(apk_release_path + \"/\" + baseline).replace(\"/mnt\", \"\\\\\\\\10.250.115.51\").replace(\"/\",\"\\\\\"))\n print(\"*****************************************************************************************\\n\\n\\n\\n\")","sub_path":"script/ApkRelease/apksource_release.py","file_name":"apksource_release.py","file_ext":"py","file_size_in_byte":10724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"503592417","text":"# -*- coding:utf-8 -*-\n\nwhat_he_does = ' playing';\nhis_instrument = ' guitar';\nhis_name = 'Robert Johnson';\n\nartist_intro = his_name + what_he_does + his_instrument;\nprint(artist_intro);\n# 这是什么\n\nnum = 1;\nstring = '1';\nnum2 = int(string);\nprint(num + num2);\n\nwords = 'word' * 3;\nprint(words)\nurl = 'http://ww1.site.cn/14d2e8ejw1exjogbxdxhj20ci0kuwex.jpg'\nfile_name = url[-10:]\nprint((file_name))\n\nphone_number = '1386-666-8888'\nhiding_number = phone_number.replace(phone_number[:9], '*' * 9)\nprint(hiding_number)\n\nprint('{} a word she can get what she {} for.'.format('with', 'came'))\nprint('{preposition} a word she can get what she {verb} for.'.format(preposition = 'with', verb = 'came'))\nprint('{0} a word she can get what she {1} for.'.format('with', 'came'))\n\n# city = input('write down the name of city: ')\n# print('http://api.weather.com?city={}'.format(city))\n\ndef fahrenheit_converter(C):\n fahrenheit = C * 9 / 5 + 32\n return str(fahrenheit) + '^F'\n# print(str(fahrenheit) + '^F')\n\nC2F = fahrenheit_converter(35)\nprint(C2F)\n\ndef trapezoid_area(base_up, base_down, height):\n return (base_up + base_down) * height * 1/2\n\nprint(trapezoid_area(1, 2, 3))\nprint(trapezoid_area(height=1, base_up=2, base_down=3))\n\nfile = open('./text.txt', 'w')\nfile.write('Hello World')\n\ndef text_create(name, msg):\n relative_path = './'\n full_path = relative_path + name + '.txt'\n file = open(full_path, 'w')\n file.write(msg)\n file.close()\n print('Done')\n\n# text_create('hello', 'hello world')\n\ndef text_filter(word, cen_word='lame', ch_word='Awesome'):\n return word.replace(cen_word, ch_word)\n\n# print(text_filter('Python is lame'))\n\ndef censored_text_create(name, msg):\n clean_msg = text_filter(msg)\n text_create(name, clean_msg)\n\ncensored_text_create('try', 'lame! lame! lame!')\n","sub_path":"chapter3/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"307983922","text":"import cv2\nimport numpy as np\nimport math\nimport time\n#from nms import non_max_suppression_fast\n#import navigation/nav_interface\n\n\nvideo = cv2.VideoCapture(\"SpaWarsGate3.mov\")\n#video = cv2.VideoCapture(\"gateC.mp4\")\n#video = cv2.VideoCapture(\"gateB.mp4\")\n#video = cv2.VideoCapture(0)\n\n# Downscale the image to a reasonable size to reduce compute\nscale = 1\n\n\n# Minimize false detects by eliminating contours less than a percentage of the image\narea_threshold = 0.01\ncroppedPixels = 150\n\nret, orig_frame = video.read()\nwidth = orig_frame.shape[0]\nheight = orig_frame.shape[1] - croppedPixels\ndim = (int(scale*height), int(scale*width))\n\nwhile (True):\n\n\tret, orig_frame = video.read()\n\tif not ret:\n\t\tbreak\n\n\t#ropped = orig_frame[croppedPixels:, :]\n\t#cv2.imshow(\"cropped\", cropped)\n \n\torig_frame = cv2.resize(orig_frame, dim, interpolation = cv2.INTER_AREA)\n\tframe = cv2.GaussianBlur(orig_frame, (5, 5), 0)\n\thsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\t\n\tmask0 = cv2.inRange(hsv,(0, 10, 10), (25, 255, 255) )\n\tmask1 = cv2.inRange(hsv,(160, 10, 10), (179, 255, 255) )\n\t# join masks\n\tmask = mask0 + mask1\n\t\n\n\tret, thresh = cv2.threshold(mask, 127, 255,0)\n \t#Erosions and dilations\n \t#erosions are apploed to reduce the size of foreground objects\n\tkernel = np.ones((3,3),np.uint8)\n\teroded = cv2.erode(thresh, kernel, iterations=0)\t\n\tdilated = cv2.dilate(eroded, kernel, iterations=3) \n\t#cv2.imshow(\"dilated\", dilated)\n\t#cv2.waitKey(0)\n\t#cv2.imshow(\"Edged\", edged)\n \n\tdst = cv2.equalizeHist(dilated)\n \n\tcv2.imshow(\"equilized\", dst)\n\n\tcnts,hierarchy = cv2.findContours(dilated,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t#cv2.drawContours(orig_frame, cnts, -1, (0, 255, 0), 3)\n\tcnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:60]\n \n \n\n\tboundingBoxes = np.empty((0, 4), float)\n\tif len(cnts) > 0:\n\n\t\tM = cv2.moments(cnts[0])\n\t\tfor c in cnts:\n\t\t\trect = cv2.minAreaRect(c)\n\t\t\t#print(\"rect: {}\".format(rect))\n\n\t\t\t# the order of the box points: bottom left, top left, top right,\n\t\t\t# bottom right\n\t\t\tbox = cv2.boxPoints(rect)\n\t\t\tbox = np.int0(box)\n\n\t\t\t#print(\"bounding box: {}\".format(box))\n\t\t\tcv2.drawContours(orig_frame, [box], 0, (0, 0, 255), 2)\n\t\t\t#x,y,w,h = cv2.boundingRect(c)\n\n\t\t\t#boundingBoxes = np.append(boundingBoxes, np.array([[x,y,x+w,y+h]]), axis = 0)\n\t\t\t#cv2.rectangle(orig_frame,(x,y), (x+w, y+h), (255,0,0), 2)\n\t\t\tcv2.imshow(\"bounding rectangle\",orig_frame)\n\t\t\tcv2.waitKey(0)\n\n\t\t\t#print(str(x/width) + \" \" + str(y/height) + \" \" + str((x+w)/width) + \" \" + str((y+h)/height))\n\n\t\t\tbox0 = (box[0])\n\t\t\t#print(box0[0]/width)\n\t\t\t#print(box0[1]/height)\n\n\t\t\tbox1 = (box[1])\n\t\t\t#print(box1[0]/width)\n\t\t\t#print(box0[1]/height)\n\n\t\t\tbox2 = (box[2])\n\t\t\t#print(box2[0]/width)\n\t\t\t#print(box2[1]/height)\n\n\t\t\tbox3 = (box[3])\n\t\t\t#print(box3[0]/width)\n\t\t\t#print(box3[1]/height)\n\n\t\t\tboxSizeThreshold = 10\n\n\t\t\tif (box1[1]-box0[1])*(box3[0]-box0[0] > boxSizeThreshold ):\n\t\t\t#calculating middle point of box\n\t\t\t\tboxX = ((box1[0]+box2[0])/2)\n\t\t\t\tboxY = ((box2[1]+box3[1])/2)\n\n\t\t\t\tprint(\"X\", boxX, \"Y\", boxY)\n\t \n\t\t\telse:\n\t\t\t\tboxX = -1\n\t\t\t\tboxY = -1\n\t\t\t\tprint(\"no gate found\")\n\n\t\t#passing the valuesc\n\t\t#nav_i = nav_interface.Nav_send_intf(\"192.168.1.22\", 5005, 'cv_nav_log', logging.INFO)\n\t\t#nav_i.send_cv_data(0, boxX, boxY, 0, 0, 0, 0, 0, 0, 0)\n\n\t\ttime.sleep(.01)\n\n\t\t#cv_gate_data_json = {}\n\t\t#cv_gate_data_json['x'] = boxX\n\t\t#cv_gate_data_json['y'] = boxY\n\n\t# perform non-maximum suppression on the bounding boxes\n\t#pick = non_max_suppression_fast(boundingBoxes, 0.1)\n\t#print (\"[x] after applying non-maximum, %d bounding boxes\" % (len(pick)))\n\n\t# loop over the picked bounding boxes and draw them\n\t#for (startX, startY, endX, endY) in pick:\n\t#\tcv2.rectangle(frame, (startX, startY), (endX, endY), (255, 255, 255), 2)\n\n\t# display the images\n\t#cv2.imshow(\"Original\", orig)\n\t#cv2.imshow(\"After NMS\", frame)\n\t#cv2.waitKey(0)\n\n\tkey = cv2.waitKey(1)\n\tif key == 27:\n\t\tbreak\n\nvideo.release()\ncv2.destroyAllWindows()\n\n","sub_path":"finalgate2.py","file_name":"finalgate2.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"373502718","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"Extensions for Xarray for analysing weather data.\"\"\"\nimport functools\nimport importlib as ilib\nimport os\nimport sys\nimport xarray as xr\nimport logging\n\n_DA_ENV = \"IMKTK_DATAARRAY\"\n_DS_ENV = \"IMKTK_DATASET\"\n_LOG_LEVEL = \"IMKTK_LOGLEVEL\"\n\n\ndef _get_log_level_via_env():\n loglevel = os.environ.get(_LOG_LEVEL, \"ERROR\")\n numeric_level = getattr(logging, loglevel.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError(f\"Invalid log level: {loglevel}\")\n return numeric_level\n\n\nlogging.basicConfig(format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", level=_get_log_level_via_env())\nlogger = logging.getLogger(__name__)\n\n\n@xr.register_dataarray_accessor(\"imktk\")\nclass _NyxDA(object):\n def __init__(self, data):\n self._data = data\n\n def _add_methods(self, folder):\n return _add_folder(folder, mode=\"da\")\n\n def _load_env(self):\n return _load_from_env(mode=\"da\")\n\n def __repr__(self):\n add_methods = [x for x in dir(self) if not x.startswith(\"_\")]\n return f\"IMKTK.Dataarray(scripts: {add_methods})\"\n\n\n@xr.register_dataset_accessor(\"imktk\")\nclass _NyxDS(object):\n def __init__(self, data):\n self._data = data\n\n def _add_methods(self, folder):\n return _add_folder(folder, mode=\"ds\")\n\n def _load_env(self):\n return _load_from_env(mode=\"ds\")\n\n def __repr__(self):\n add_methods = [x for x in dir(self) if not x.startswith(\"_\")]\n return f\"IMKTK.Dataset(scripts: {add_methods})\"\n\n\ndef _patch(func, funcname, mode):\n classobject = _NyxDA if mode == \"da\" else _NyxDS\n\n @functools.wraps(func)\n def method(accessor, *args, **kwargs):\n return func(accessor._data, *args, **kwargs)\n\n setattr(classobject, funcname, method)\n return func\n\n\ndef xtend_dataarray(da_folder=None):\n return _add_folder(da_folder, \"da\")\n\n\ndef xtend_dataset(ds_folder=None):\n return _add_folder(ds_folder, \"ds\")\n\n\ndef _add_folder(folder, mode=None):\n assert isinstance(mode, str) and mode.lower() in [\"ds\", \"da\"], \"Can not understand mode {}\".format(mode)\n assert os.path.isdir(folder), '\"{}\" is not a folder'.format(folder)\n mode = mode.lower()\n folder = os.path.realpath(folder)\n\n pythonfiles = [x[:-3] for x in os.listdir(folder) if x.endswith(\".py\") and not x.startswith(\"_\")]\n if not pythonfiles:\n logger.warning(\"Folder %s is empty. No methods added.\", folder)\n return None\n sys.path.insert(0, folder)\n for method in pythonfiles:\n try:\n lib = ilib.import_module(method, package=\"imktk\")\n _patch(getattr(lib, \"main\"), method, mode)\n logger.info(\"Method: %s added in mode: %s\", method, mode)\n except (SystemError, ImportError) as err:\n logger.error(\"Method: %s not loaded. Because: %s\", method, err)\n except AttributeError as err:\n logger.error(\"Method: %s has no 'main' function (err: %s).\", method, err)\n return True\n\n\ndef _load_from_env(mode=None):\n if mode is None:\n _ = [_load_from_env(x) for x in [\"da\", \"ds\"]]\n return None\n envvar = _DA_ENV if mode == \"da\" else _DS_ENV\n env = os.environ.get(envvar, False)\n if not env:\n logger.warning(\"ENV %s not set. No methods added.\", env)\n return None\n folders = env.split(\":\")\n for folder in folders:\n logger.info(\"Adding folder %s as %s (ENV: %s)\", folder, mode, env)\n _add_folder(folder, mode)\n","sub_path":"imktk/toolkit.py","file_name":"toolkit.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"480395851","text":"#将指令性能测试工具生成的结果'HsmPerfTestResult.txt' \\\n#转换为csv格式, 可直接用excel打开\n\ndef chdir_to_curfiledir():\n import os,sys\n def cur_file_dir():\n path = sys.path[0]\n if os.path.isdir(path):\n return path\n elif os.path.isfile(path):\n return os.path.dirname(path)\n os.chdir( cur_file_dir() )\n\nchdir_to_curfiledir()\n\nimport re \nimport csv \nclass gen_csv_from_txt: \n headers = ['线程数','循环数','效率(笔/秒)','吞吐量(Mbps)','成功率','请求报文']\n \n pattern_block = re.compile(r'运行统计结果((.|\\n)*?)运行统计结束')\n pattern_threadnum = re.compile(r'线程数.*')\n pattern_loopnum = re.compile(r'单线程循环次数.*')\n pattern_reqmsg = re.compile(r'请求报文.*')\n pattern_rate = re.compile(r'成功率.*')\n pattern_eff = re.compile(r'效率.*') #笔/秒\n pattern_throughput = re.compile(r'吞吐量.*') #Mbps\n\n def __init__(self, sourceName='HsmPerfTestResult.txt'):\n self.sour = sourceName\n self.dest = self.sour.replace('.txt','.csv')\n\n #输入文件名 '''运行统计结果 运行统计结束''' 为界限返回文本块 迭代器\n def get_block(self,filename):\n with open(filename,'rt',encoding='gbk') as f:\n str = f.read() \n yield from self.pattern_block.finditer(str)\n \n #获取一个块中的有效数据,组成列表\n def gen_listblock(self,iter):\n list = []\n block = iter.group()\n\n line = self.pattern_threadnum.findall(block)\n threadnum = re.split(r'[:\\s]\\s*', str(line[0])) \n list.append(threadnum[1])\n\n line = self.pattern_loopnum.findall(block)\n loopnum = re.split(r'[:\\s]\\s*', str(line[0])) \n list.append(loopnum[1])\n\n line = self.pattern_eff.findall(block)\n eff = re.split(r'[:\\s]\\s*', str(line[0]))\n list.append(eff[1])\n\n line = self.pattern_throughput.findall(block)\n throughput = re.split(r'[:\\s]\\s*', str(line[0]))\n list.append(throughput[1])\n\n line = self.pattern_rate.findall(block)\n rate = re.split(r'[:\\s]\\s*', str(line[0]))\n list.append(rate[1])\n\n line = self.pattern_reqmsg.findall(block)\n reqmsg = re.split(r'[:\\s]\\s*', str(line[0]))\n list.append(reqmsg[1])\n\n return list\n\n def gen_csv(self): \n iter = self.get_block(self.sour)\n with open(self.dest ,'w') as f:\n f_csv = csv.writer(f)\n f_csv.writerow(self.headers)\n for x in iter:\n row = self.gen_listblock(x) \n f_csv.writerow(row)\n\n\nif __name__ == '__main__':\n gen = gen_csv_from_txt('HsmPerfTestResult.txt')\n gen.gen_csv()\n\n\n","sub_path":"src/6/reading_and_writing_csv_data/my_example.py","file_name":"my_example.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"603915698","text":"'''\n1. После запуска предлагает пользователю ввести текст, содержащий любые слова,\nслоги, числа или их комбинации, разделенные пробелом.\n2. Считывает строку с текстом, и разбивает его на элементы списка, считая\nпробел символом разделителя.\n3. Печатает этот же список элементов (через пробел), однако с удаленными\nдубликатами.\nПример:\n-> asdfdsf324 ?3 efref4r4 23r(*&^*& efref4r4 a a bb ?3\nasdfdsf324 ?3 efref4r4 23r(*&^*& a bb\n\n'''\n\ndef main():\n word = str(input('Please type any combination of words: '))\n a = word.split()\n y = []\n for i in a:\n if i not in y:\n y.append(i)\n n_w = ' '.join(y)\n print(n_w)\n\n\n\nmain()\n\n\n","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"440138075","text":"from __future__ import unicode_literals\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel\n\n\nclass HomePage(Page):\n body = RichTextField(blank=True)\n\n template = 'wagtail/pages/home_page.html'\n\n class Meta:\n verbose_name = \"Home Page\"\n verbose_name_plural = \"Home Pages\"\n\n content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('body',),\n ]\n\nHomePage.promote_panels = Page.promote_panels\n","sub_path":"series_2/p_03/myproject/server/apps/wagtail/pages/models/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"384951956","text":"import threading\nimport queue\n\n\nclass Test(threading.Thread):\n\n def __init__(self):\n super(Test, self).__init__()\n\n def run(self):\n a = q.get()\n while True:\n a += 1\n q.task_done()\n\nif __name__ == '__main__':\n q = queue.Queue()\n for i in range(100):\n q.put(i)\n for i in range(4):\n test = Test()\n test.setDaemon(True)\n test.start()\n print('start', i)\n print(threading.active_count())\n q.join()\n","sub_path":"Parallel/thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"497849839","text":"# #Usando lo aprendido hasta ahora:\n# Hacer un programa con acceso a base de datos\n# que sirva como una agenda telefónica.\n# Debe tener un menu que permita:\n# 1 – agregar nuevo contacto\n# 2 – Listar todos los contactos\n# 3 – Buscar contactos por nombre o numero de teléfono\n# 4 – Actualizar un contacto\n# 5 – Eliminar un contacto\n# 6 – Salir\n# Al agregar un nuevo contacto, debe validar que el contacto no exista previamente. Si existe debe dar un\n# mensaje notificando al usuario.\n# Al terminar de agregar un contacto debe preguntar si desea agregar otro\n\nimport sqlite3\nconn = sqlite3.connect(\"miagendatelefonica.db\")\ncursor= conn.cursor()\nconn.close()\n\nclass Database:\n def __init__(self):\n self.connection = sqlite3.connect(\n host= \"localhost\",\n user= \"LisbethPinales\",\n password= \"2020\",\n db=\"miagendatelefonica\"\n\n )\n\n self.cursos = self.conexion.cursor()\n\n\n def add_contact (self,id,contacto,Telefono,Direccion):\n\n sql = \"INSERT INTO miagendatelefonica (ID,contacto,telefono) VALUES ('{}','{}','{}','{}','{}')\".format(ID,contacto,telefono)\n\n try:\n self.cursos.execute(sql)\n self.connection.commit()\n self.cursos.close()\n self.connection.close()\n print(\">>> Contacto registrado exitosamente\")\n\n except Exception:\n print(\">>> Contacto invalido\")\n\n raise\n\n\n\n def listar_contact(self):\n sql = \"SELECT ID,contacto,telefono FROM miagendatelefonica\"\n\n try:\n self.cursos.execute(sql)\n miagendatelefonica = self.cursos.fetchall()\n\n for mostrar in miagendatelefonica:\n print(\"ID:\", mostrar[0])\n print(\"Contacto:\", mostrar[1])\n print('Telefono:', mostrar[2])\n print('----------\\n')\n\n\n except Exception:\n raise\n\n\n\n def search_user(self,contacto):\n\n sql= \"SELECT ID, contacto, telefono FROM miagendatelefonica WHERE contacto = '{}' \".format(contacto)\n\n try:\n\n self.cursos.execute(sql)\n miagendatelefonica = self.cursos.fetchone()\n\n print(\"ID:\", miagendatelefonica[0])\n print(\"Contacto:\", miagendatelefonica[1])\n print(\"Telefono:\", miagendatelefonica[2])\n self.cursos.close()\n self.connection.close()\n\n\n\n except Exception:\n print(\">>> Este contacto no existe en su agenda telefonica\")\n raise\n\n\n\n\n def update_contact (self,contacto,Telefono):\n sql = \"UPDATE miagendatelefonica SET telefono = '{}' WHERE contacto = '{}'\".format(contacto, Telefono)\n try:\n self.cursos.execute(sql)\n self.connection.commit()\n self.cursos.close()\n self.connection.close()\n\n except Exception:\n print(\">>> Intente mas tarde\")\n raise\n\n\n\n def remove_contact(self,contacto):\n sql = \" REMOVE FROM miagendatelefonica WHERE contacto = {}\".format(contacto)\n\n try:\n self.cursos.execute(sql)\n self.connection.commit()\n self.cursos.close()\n self.connection.close()\n print(\">>> Contacto eliminado\")\n\n except Exception:\n print(\">>> Favor ingresar un contacto valido\")\n raise\n\n\ndef clear():\n if os.name == \"posix\":\n os.system(\"cls\")\n elif os.name == (\"ce\",\"nt\",\"dos\"):\n\n\n os.system(\"clear\")\n\n\ndef menu():\n opcion = 0\n while opcion != 6:\n print(\"\\t>>>Bienvenido a su agenda telefonica\\n\")\n print(\"1. Agregar nuevo contacto\")\n print(\"2. Listar todos los contactos\")\n print(\"3. Buscar contacto agregado\")\n print(\"4. Actualizar contactos agregados\")\n print(\"5. Eliminar contacto agregado\")\n print(\"6. Salir del menú\")\n opcion = int(input(\"Seleccione una opcion >>> \"))\n clear()\n \n print(\"\\n\")\n\n#funcionalidad a las funciones\n \n database = Database()\n if opcion == 1:\n agregar = \"no\"\n while agregar != \"no\":\n\n id= str(input(\"Ingrese el ID del contacto >>> \"))\n contacto = str(input(\"Digite el nombre del contacto >>> \"))\n telefono = str(input(\"Indique el número telefonico\"))\n database.add_contact (ID,contacto,telefono)\n print(\"\\tContacto Agregado\\n\")\n agregar = input(\"¿Gustaría agregar un nuevo contacto?\")\n \n\n\n\n elif opcion == 2:\n database.listar_contact()\n print(\"\\n\")\n\n\n elif opcion == 3:\n contacto = str(input(\" Inserte el contacto que desea buscar >>> \"))\n print(\"\\n\")\n database.search_user(contacto)\n\n\n\n elif opcion == 4:\n clear()\n contacto = str(input(\" Inserte el contacto que desea buscar >>> \"))\n database.search_user(contacto)\n clear()\n print(\"\\n\")\n\n\n telefono = input('Telefono: ')\n\n database.update_contact(telefono , contacto)\n\n\n elif opcion == 5:\n\n id = str(input(\" Inserte el contacto que desea eliminar >>> \"))\n database.remove_contact(contacto)\n print(\"\\n\")\n\n\n elif opcion == 6:\n print(\">>> Gracias por utilizar los servicios de su agenda telefonica móvil \")\n\n\n\nmenu()","sub_path":"practicaV.py","file_name":"practicaV.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"618064443","text":"from copy import deepcopy\nfrom collections import deque\ndy = [1,-1,0,0]\ndx = [0,0,1,-1]\ngy = -1\ngx = -1\ndef bfs(B,y,x,n):\n global gy, gx, time\n BD = [['V']*W for _ in range(W)]\n BD[y][x] = 0\n Q = deque([[y,x]])\n time = 0\n while len(Q) != 0:\n R = Q.popleft()\n ty, tx = R[0], R[1]\n if B[ty][tx] == str(n):\n time = BD[ty][tx]\n gy, gx = ty, tx\n break\n for i in range(4):\n uy = ty + dy[i]\n ux = tx + dx[i]\n if uy != -1 and uy != H and ux != -1 and ux != W:\n if B[uy][ux] != 'X' and BD[uy][ux] == 'V':\n BD[uy][ux] = BD[ty][tx]+1\n Q.append([uy,ux])\n return gy, gx, time\n\nH, W, N = map( int, input().split())\nB = [ list(input()) for _ in range(H)]\nans = 0\nsy = -1\nfor i in range(H):\n for j in range(W):\n if B[i][j] == 'S':\n sy, sx = i, j\n break\n if sy != -1:\n break\nans = 0\nfor i in range(1,N+1):\n gy, gx, time = bfs(B,sy,sx,i)\n sy, sx = gy, gx\n ans += time\nprint(ans)\n \n","sub_path":"JOI/10/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"413456970","text":"\"\"\"\"\n11. Interview Questions\n\nQuestion (1) Write a function with single loop to iterate through 2 lists of equal length. \nx = [10, 20, 30] and y = [100, 90, 80] and output should print [(100, 10), (90, 20), (80, 30)]\n\nQuestion (2) Write a function to find the spike in the set of number x = [50, 60, 55, 45, 40, 90, 100, 110, 5, 6, 7, 6]. \nHence expect answer to be [60, 110, 7].\n\nQuestion (3) Pharmacies and patients. Appointment with Pharmacies.\npython -m venv penv\npip install django ...\npython startproject pharmacy\n....\n\n-->settings.py\nPostgreSQL\n\nTerminal\npython manage.py startapp appointment\n\nappointment\n--> models.py\nclass Pharmacy(models.Model):\n name = models.CharField(max_length=521, blank=True, null=True)\n\nclass Patient(models.Model):\n name = models.CharField(max_length=521, blank=True, null=True)\n pharmacy = models.ForeignKey(Pharmacy, on_delete=models.SET_NULL, null=True)\n\nclass Appointment(models.Model):\n start_time = models.DateTimeField(blank=True, null=True)\n end_time = models.DateTimeField(blank=True, null=True)\n patient = models.ForeignKey(Patient, ond_)\n pharmacy = models.ForeignKey(Pharmacy)\n\n\n-->serializers.py\nclass AppointmentSerializer(serializers.ModelSErializer):\n class Meta:\n model = Appointment\n fields = '__all__'\n\n-->views.py\nclass BookAppointmentView(generics.GenericsAPIView):\n serializer_class = serializers.AppointmentSerializer\n permissions = [permissions.AllowAny]\n def post(self, request):\n request.data \n from django.utils import timezone\n appointment = Appointment.object.create(start_time, end_time, patient, pharmacy)\n return self.serializer_class(appointment).data\n\n-->urls.py\npath ('/bookappointment', views.BookAppointmentView.as_view(), name=\"post-appointment\")\n\n\n-->tests.py\nclass TestAppoint(TestCase)\n\n def test_appointment(self):\n Appointment.object.create(start_time, end_time, patient, pharmacy)\n\n\"\"\"\n\n\ndef interview():\n \n # question 1\n a = [10, 20, 30]\n b = [100, 90, 80]\n c = []\n a = [(b[i], a[i]) for i in range(len(a))]\n for i in range(len(a)):\n c.append((b[i], a[i]))\n\n # question 2\n x = [60, 50, 55, 45, 40, 90, 100, 110, 5, 6, 6, 7]\n y = []\n for i in range(1, len(x)-1):\n if x[i] >= x[i-1] and x[i] >= x[i+1]:\n y.append(x[i])\n if x[0] >= y[0]:\n y[0] = x[0]\n if x[-1] >= y[-1]:\n y[-1] = x[-1]\n return y\n\n\nif __name__ == \"__main__\":\n print(interview())\n\n","sub_path":"interviews/interview_072321.py","file_name":"interview_072321.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"145507599","text":"from django.contrib.auth import get_user_model, login\nfrom django.shortcuts import redirect, render\n\nfrom ..forms import SignupForm\n\nUser = get_user_model()\n\n__all__ = (\n 'signup',\n)\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignupForm(request.POST, request.FILES)\n if form.is_valid():\n user = form.signup()\n login(request, user)\n return redirect('index')\n else:\n form = SignupForm()\n\n context = {\n 'form': form,\n }\n return render(request, 'members/signup.html', context)\n\n\n","sub_path":"app/members/views/signup.py","file_name":"signup.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"265982236","text":"from collections import OrderedDict\r\nimport json\r\nimport numpy as np\r\n\r\n\r\n###################### DATA DUMP ################################################\r\nclass Dump:\r\n 'Common base class for all dumps'\r\n\r\n def __init__(self,scenario,uid):\r\n self.id = 1\r\n self.scenario= scenario\r\n self.uid = uid \r\n\r\n ####### last user's position\r\n def userLastPosition(self,list_of_static_nodes):\r\n x = []\r\n y = []\r\n z=[]\r\n l = []\r\n ids = []\r\n zois = []\r\n for i in self.scenario.usr_list:\r\n x.append(i.x_list[-1])\r\n y.append(i.y_list[-1])\r\n z.append(len(i.model_list))\r\n l.append(i.zones.values())\r\n\r\n # print(\"User id: \", self.scenario.usr_list[i].id, \"position x: \", self.scenario.usr_list[i].x_list[-1] , \r\n # \"position y: \", self.scenario.usr_list[i].y_list[-1], \"zones: \",self.scenario.usr_list[i].zones.values())\r\n\r\n\r\n file = open(str(self.uid) +'/userLastPosition.txt', \"w\")\r\n file.write(json.dumps(x))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(y))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(z))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(l))\r\n for h in self.scenario.zois_list:\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(h.x))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(h.y))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(self.scenario.radius_of_interest))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(self.scenario.radius_of_replication))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(self.scenario.radius_of_replication))\r\n\r\n for i in self.scenario.usr_list:\r\n ids.append(i.id)\r\n\r\n for i in self.scenario.zois_list:\r\n zois.append(i.id)\r\n \r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(list(list_of_static_nodes)))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(ids))\r\n file.write(json.dumps(\"&\"))\r\n file.write(json.dumps(zois))\r\n file.close()\r\n\r\n ####### Lists for statistics \r\n\r\n def statisticsList(self,slots, zoi_users, zoi, rep_users, rep, per_users, per,attempts):\r\n np.savetxt(str(self.uid)+'/dump.txt', np.column_stack((slots, zoi_users, zoi, rep_users, rep, per_users, per,attempts)),\r\n fmt=\"%i %i %i %i %i %i %i %i\")\r\n\r\n ####### Connection duration list\r\n\r\n def connectionDurationAndMore(self,contacts_per_slot_per_user_dynamic, contacts_per_slot_per_user_static,exiting_nodes):\r\n with open(str(self.uid)+'/contacts-per-slot-per-user-dynamic.json', 'w') as fp:\r\n json.dump(contacts_per_slot_per_user_dynamic, fp)\r\n\r\n with open(str(self.uid)+'/contacts-per-slot-per-user-static.json', 'w') as fp1:\r\n json.dump(contacts_per_slot_per_user_static, fp1)\r\n\r\n with open(str(self.uid)+'/connection-duration-list.json', 'w') as fp2:\r\n json.dump(self.scenario.connection_duration_list, fp2)\r\n\r\n with open(str(self.uid)+'/exiting-nodes.json', 'w') as fp3:\r\n json.dump(self.scenario.exiting_nodes, fp3)\r\n\r\n\r\n ####### Availability per model per slot\r\n\r\n def availabilityPerModel(self,a_per_model):\r\n with open(str(self.uid)+'/availability-per-model.json', 'w') as fp:\r\n json.dump(a_per_model, fp)\r\n\r\n ####### Availability per observation per slot\r\n\r\n def availabilityPerObservation(self,a_per_obs):\r\n with open(str(self.uid)+'/availability-per-observation.json', 'w') as fp:\r\n json.dump(a_per_obs, fp)\r\n\r\n ####### Availability final point per simulation\r\n\r\n def availabilityPerSimulation(self,printa):\r\n f = open(str(self.uid)+'/availability_points.txt',\"w\")\r\n f.write(str(printa))\r\n f.close()\r\n\r\n ####### list of availabilities per slot per simulation\r\n\r\n def listOfAveragesPerSlot(self,availabilities_list_per_slot):\r\n outfile = open(str(self.uid)+'/availability_per_slot_per_sim.txt', 'w')\r\n for result in availabilities_list_per_slot:\r\n outfile.writelines(str(result))\r\n outfile.write('\\n')\r\n outfile.close()\r\n\r\n ########### number of connections that started but didn't finish. With the same number of slots as hand shake + 1 slot to check \r\n # that they don't have anything to exchange\r\n def con0exchange(self):\r\n f = open(str(self.uid)+'/counters.txt',\"w\")\r\n f.write(str(self.scenario.count_0_exchange_conn)+\"\\n\")\r\n f.write(str(self.scenario.count_non_useful)+\"\\n\")\r\n f.write(str(self.scenario.count_useful)+\"\\n\")\r\n f.close()\r\n \r\n\r\n ####### Number of users in the ZOI per slot\r\n\r\n def nodesZoiPerSlot(self,nodes_in_zoi):\r\n with open(str(self.uid)+'/nodes-in-zoi.json', 'w') as fp:\r\n json.dump(nodes_in_zoi, fp)\r\n\r\n\r\n ####### Path followed by every node\r\n\r\n def nodesPath(self):\r\n outfile = open(str(self.uid)+'/nodes-path.txt', 'w')\r\n for n in self.scenario.usr_list:\r\n outfile.writelines(str(n.id)+\"\\n\")\r\n outfile.writelines(str(n.x_list)+\"\\n\")\r\n outfile.writelines(str(n.y_list)+\"\\n\")\r\n outfile.close()\r\n\r\n\r\n ####### Contributions in models\r\n\r\n def modelContributions(self,models_contributions):\r\n with open(str(self.uid)+'/model-contributions.json', 'w') as fp:\r\n json.dump(models_contributions, fp)\r\n\r\n ####### Freshness in models\r\n\r\n def modelFreshness(self,models_freshness):\r\n with open(str(self.uid)+'/model-freshness.json', 'w') as fp:\r\n json.dump(models_freshness, fp)\r\n\r\n ####### Nodes future zois\r\n\r\n def nodesFuture(self,nodes_future):\r\n with open(str(self.uid)+'/nodes-future.json', 'w') as fp:\r\n json.dump(nodes_future, fp)\r\n\r\n ####### merging mean rate\r\n\r\n def mergingMeanRate(self,merging_mean_rate):\r\n with open(str(self.uid)+'/merging-mean-rate.json', 'w') as fp:\r\n json.dump(merging_mean_rate, fp)\r\n\r\n ####### observations mean rate\r\n\r\n def observationsMeanRate(self,observations_mean_rate):\r\n with open(str(self.uid)+'/observations-mean-rate.json', 'w') as fp:\r\n json.dump(observations_mean_rate, fp)\r\n\r\n ","sub_path":"Dump.py","file_name":"Dump.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"523510165","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport subprocess\n\nfrom garage.experiment import to_local_command\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--snapshot_dir',\n type=str,\n default=None,\n help='Directory of the pickle file to resume experiment from.')\n parser.add_argument(\n '--resume_epoch',\n type=str,\n default=None,\n help='Index of epoch to restore from. '\n 'Can be \"first\", \"last\" or a number. '\n 'Not applicable when snapshot_mode=\"last\"')\n\n args = parser.parse_args()\n params = dict()\n params['resume_from_dir'] = args.snapshot_dir\n if args.resume_epoch is not None:\n params['resume_epoch'] = args.resume_epoch\n command = to_local_command(\n params, script='garage.experiment.experiment_wrapper')\n print(command)\n try:\n subprocess.call(command, shell=True, env=os.environ)\n except Exception as e:\n print(e)\n if isinstance(e, KeyboardInterrupt):\n raise\n","sub_path":"examples/resume_training.py","file_name":"resume_training.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"303115956","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# ============================================================================\n# Dive - Media content finder tool\n# Content File Builder script\n# Copyright (C) 2018 by Ralf Kilian\n# Distributed under the MIT License (https://opensource.org/licenses/MIT)\n#\n# Website: http://www.urbanware.org\n# GitHub: https://github.com/urbanware-org/dive\n# ============================================================================\n\nimport os\nimport sys\n\ndef main():\n from core import builder\n from core import clap\n from core import common\n\n try:\n p = clap.Parser()\n except Exception as e:\n print(\"%s: error: %s\" % (os.path.basename(sys.argv[0]), e))\n sys.exit(1)\n\n p.set_description(\"Create a content file from a directory or media.\")\n p.set_epilog(\"Further information and usage examples can be found \" \\\n \"inside the documentation file for this script.\")\n\n # Define required arguments\n p.add_avalue(\"-d\", \"--destination-directory\", \"destination directory \" \\\n \"(where to create the content file in)\", \"dir_destination\",\n None, True)\n p.add_avalue(\"-f\", \"--content-file\", \"name of the content file to create\",\n \"content_file\", None, True)\n p.add_avalue(\"-s\", \"--source-directory\", \"source directory (from which \" \\\n \"to gather the contents)\", \"dir_source\", None, True)\n\n # Define optional arguments\n p.add_switch(\"-c\", \"--case-sensitive\", \"do not ignore the case of the \" \\\n \"given exclude pattern\", \"case\", False, False)\n p.add_avalue(\"-e\", \"--exclude\", \"pattern to exclude certain files or \" \\\n \"directories from the content file (case-insensitive, \" \\\n \"multiple patterns separated via semicolon)\",\n \"pattern_exclude\", None, False)\n p.add_switch(\"-h\", \"--help\", \"print this help message and exit\", None,\n True, False)\n p.add_switch(\"-i\", \"--ignore-read-errors\", \"ignore read errors while \" \\\n \"gathering content\", \"ignore_read_errors\", True, False)\n p.add_avalue(None, \"--include-ace\", \"include the content from ACE \" \\\n \"archive files (requires 'unace' binary)\", \"bin_unace\",\n None, False)\n p.add_avalue(None, \"--include-rar\", \"include the content from RAR \" \\\n \"archive files (requires 'unrar' binary)\", \"bin_unrar\",\n None, False)\n p.add_switch(None, \"--include-tar\", \"include the content from TAR \" \\\n \"archive files (also supports Bzip2 and Gzip compressed \" \\\n \"TAR archives)\", \"include_tar\", True, False)\n p.add_switch(None, \"--include-zip\", \"include the content from ZIP \" \\\n \"archive files\", \"include_zip\", True, False)\n p.add_switch(None, \"--regex\", \"use regex syntax for the search term \" \\\n \"instead of just asterisk wildcards and semicolon \" \\\n \"separators (for details see the section \\\"Regular \" \\\n \"expression operations\\\" inside the Python documentation)\",\n \"regex\", True, False)\n p.add_avalue(\"-r\", \"--replace-source-directory\", \"replace the source \" \\\n \"directory path with a user-defined string inside the \" \\\n \"content file\", \"replace_string\", None, False)\n p.add_switch(None, \"--version\", \"print the version number and exit\", None,\n True, False)\n\n if len(sys.argv) == 1:\n p.error(\"At least one required argument is missing.\")\n elif (\"-h\" in sys.argv) or (\"--help\" in sys.argv):\n p.print_help()\n sys.exit(0)\n elif \"--version\" in sys.argv:\n print(builder.get_version())\n sys.exit(0)\n\n args = p.parse_args()\n try:\n if args.ignore_read_errors == None:\n args.ignore_read_errors = False\n\n print()\n print(\"Building the content file. Please wait.\")\n\n stats = builder.build_content_file(args.dir_destination,\n args.content_file, args.dir_source,\n args.ignore_read_errors,\n args.pattern_exclude, args.case,\n args.regex, args.bin_unace,\n args.bin_unrar, args.include_tar,\n args.include_zip,\n args.replace_string)\n\n just = common.get_max_digits([str(stats[\"lines_archive\"]),\n str(stats[\"lines_excluded\"]),\n str(stats[\"lines_ignored\"]),\n str(stats[\"lines_total\"]),\n str(stats[\"lines_written\"])])\n\n print(\"Build process completed.\")\n print()\n print(\"Total gathered lines of content: %s\" % \\\n str(stats[\"lines_total\"]).rjust(just, \" \"))\n print(\"Total lines read from archives: %s\" % \\\n str(stats[\"lines_archive\"]).rjust(just, \" \"))\n print(\"Total lines excluded from file: %s\" % \\\n str(stats[\"lines_excluded\"]).rjust(just, \" \"))\n print(\"Total amount of read errors: %s\" % \\\n str(stats[\"lines_ignored\"]).rjust(just, \" \"))\n print()\n print(\"Total lines written into file: %s\" % \\\n str(stats[\"lines_written\"]).rjust(just, \" \"))\n print()\n except Exception as e:\n p.error(e)\n\nif __name__ == \"__main__\":\n main()\n\n# EOF\n\n","sub_path":"python3/dive-builder.py","file_name":"dive-builder.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"497253734","text":"#Prompt the user to enter a file name\nfn = input(\"Enter the file name:\")\n#The file is opened using the open() function in the read mode\nwith open(fn ,'r') as y:\n#A for loop is used to read each line in the file\n for x in y:\n#Each line in the file is converted to upper case using the upper() function\n z = x.upper()\n print(z)\n#To open the file\n#x = open(fn)\n#for y in x:\n #z = y.upper()\n #print(z)\n#To close the file\n#x.close()\n\n#Copy the content from the existing sample.txt file and paste into the new copy.txt file\nwith open(\"sample.txt\") as r:\n with open(\"copy.txt\", \"w+\") as r1:\n for x in r:\n r1.write(x)\n\n#Print the contents of copy.txt file\nprint(\"Printing the contents of the copy.txt file…\")\nwith open(\"copy.txt\") as r:\n for x in r:\n print(x)\n","sub_path":"PrintFile2.py","file_name":"PrintFile2.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"443564521","text":"import sys\nsys.path.append(\"/usr/lib/freecad-daily/lib/\")\n\nimport FreeCAD\nfrom FreeCAD import Base\nimport Part\nimport DraftVecUtils\nimport math as m\nimport argparse\n\nparser = argparse.ArgumentParser(description='Supply the parameter values for impeller geometry')\nparser.add_argument('delr', type=float, default=0.1)\nparser.add_argument('theta', type=float, default=m.pi/20)\nargs = parser.parse_args()\n\ndelr = args.delr\nthetaVol = -args.theta\n\nrOutlet = 0.2\n\nrInletV = 0.3+0.00003\n\nr1 = rInletV+delr\nr2 = r1+(2*rOutlet)\n\ndef Volute(r1,r2):\n delta = (m.log(r2/r1))/(2*m.pi)\n ExtLength = 6*0.3\n # thetaVol = -m.pi/10\n\n Points = []\n\n # tmp = 0.0\n # i = 0\n # while tmp < r2:\n # t = (i*2*m.pi)*0.01\n for i in xrange(100):\n t = (i*2*m.pi)*0.01\n P = Base.Vector(r1*m.exp(delta*t)*m.cos(t),-r1*m.exp(delta*t)*m.sin(t),0)\n Points.append(DraftVecUtils.rotate(P,thetaVol,Base.Vector(0,0,1)))\n\n C=Part.BSplineCurve()\n C.interpolate(Points)\n\n P1 = Points[-1].sub(Base.Vector(0,ExtLength,0))\n P2 = P1.sub(Base.Vector(2*rOutlet,0,0))\n P3 = Points[0].add(Base.Vector(0.0001,0,0))\n # P4 = P3.add(Base.Vector(-0.00005,0.00005,0))\n\n L1 = Part.LineSegment(Points[-1],P1)\n L2 = Part.LineSegment(P1,P2)\n L3 = Part.LineSegment(P3,Points[0])\n # L3 = Part.Arc(P3,P4,Points[0])\n L4 = Part.LineSegment(P2,P3)\n\n if C.intersect(L4):\n f = open('error.log','w')\n f.write('The volute geometry at tongue was intersecting and so the script terminated. Please try new parameters.')\n f.close()\n quit()\n else:\n S = Part.__sortEdges__([C.toShape(),L1.toShape(),L2.toShape(),L3.toShape(),L4.toShape()])\n W = Part.Wire(S)\n F = Part.Face(W)\n return F\n\nS = Part.makeCircle(rInletV)\n\nW1 = Part.Wire(S)\n\nF1 = Part.Face(W1)\nF2 = Volute(r1,r2)\n\nF = F2.cut(F1)\n\nF.exportBrep(\"Volute.brep\")\n","sub_path":"Pump/Volute.py","file_name":"Volute.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"558123280","text":"import pygame\nimport random\n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nBLUE = (50,50,255)\nYELLOW = (255,255,0)\nRED = (255,0,0)\nspeed_invader = 1\nplayer_speed = 0\nbullet_speed = 5\nbullet_invader_speed = -5\nspeed_invader_x = 2\nglobal invader_direction\ninvader_direction = 1\ntimer = 0\ntimer2 = 0\nHP = 5\nenemy = 20\n\nclass Block(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n \n self.image = pygame.image.load(\"Invader.png\")\n ##self.image.fill(RED)\n self.image.set_colorkey()\n self.rect = self.image.get_rect()\n self.rect.x = random.randrange(200, 800, 20)\n self.rect.y = random.randrange(0, 200, 20)\n self.trigger = False\n def update(self, speed_invader, bullet_invader_list, timer, speed_invader_x):\n global invader_direction\n save = invader_direction\n if self.rect.x > 980:\n invader_direction = -1\n for i in block_list:\n i.rect.y += 30\n elif self.rect.x < 0:\n invader_direction = 1\n for i in block_list:\n i.rect.y += 30\n if timer%2 == 0:\n self.rect.x += 3*invader_direction\n if timer%100 == 0:\n bullet = Bullet(self.rect.x+10, self.rect.y)\n bullet_invader_list.add(bullet)\n \n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.image = pygame.image.load(\"Player.png\")\n self.image.set_colorkey()\n self.rect = self.image.get_rect()\n self.speed = 0\n self.rect.x = 480\n self.rect.y = 570\n def update(self, player_speed):\n self.rect.x += player_speed\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x_pos, y_pos):\n super().__init__()\n self.image = pygame.Surface([6, 6])\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.rect.x = x_pos\n self.rect.y = y_pos\n def update(self, bullet_speed):\n self.rect.y -= bullet_speed\n \npygame.init()\nclock = pygame.time.Clock()\nsize = (1000,600)\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"My First Flipbook\")\ngame_over = False\n\n\nblock_list = pygame.sprite.Group()\nfor i in range(20):\n block = Block()\n \n block_list.add(block)\n\nplayer = Player()\nplayer_list = pygame.sprite.Group()\nplayer_list.add(player)\n\nbullet_list = pygame.sprite.Group()\nbullet_invader_list = pygame.sprite.Group()\n\npygame.font.init()\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\n\nwhile not game_over:\n # -- User input and controls\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n #End If\n #Next event\n \n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and player.rect.x > 0:\n player_speed = -5\n if keys[pygame.K_SPACE]:\n if timer > 3:\n bullet = Bullet(player.rect.x+10, 564)\n bullet_list.add(bullet)\n timer = 0\n elif keys[pygame.K_RIGHT] and player.rect.x < 975:\n player_speed = 5\n if keys[pygame.K_SPACE]:\n if timer > 3:\n bullet = Bullet(player.rect.x+10, 564)\n bullet_list.add(bullet)\n timer = 0\n elif keys[pygame.K_SPACE]:\n if timer > 3:\n bullet = Bullet(player.rect.x+10, 564)\n bullet_list.add(bullet)\n timer = 0\n else:\n player_speed = 0\n \n screen.fill (BLACK)\n \n bullet_hit_group = pygame.sprite.groupcollide(bullet_list, block_list, True, True)\n\n invader_bullet_hit_group = pygame.sprite.groupcollide(bullet_invader_list, player_list, True, False)\n \n invader_player_hit_group = pygame.sprite.groupcollide(player_list, block_list, True, True)\n \n for i in invader_player_hit_group:\n game_over = True\n \n for f in invader_bullet_hit_group:\n HP -= 1\n \n for f in bullet_hit_group:\n enemy -=1\n if enemy == 0:\n game_over = True\n \n for i in block_list:\n if i.rect.y > 580:\n game_over = True\n \n if HP == 0:\n game_over = True\n \n block_list.draw(screen)\n player_list.draw(screen)\n bullet_list.draw(screen)\n bullet_invader_list.draw(screen)\n block_list.update(speed_invader, bullet_invader_list, timer2, speed_invader_x)\n player_list.update(player_speed)\n bullet_list.update(bullet_speed)\n bullet_invader_list.update(bullet_invader_speed)\n \n font = pygame.font.Font(\"freesansbold.ttf\", 20)\n text = font.render(\"Health Points: \" + str(HP), True, WHITE)\n screen.blit(text, (800,30))\n \n pygame.display.flip()\n \n timer+=1\n timer2+=1\n clock.tick(60)\n\nscreen.fill (BLACK)\nif enemy == 0:\n font = pygame.font.Font(\"freesansbold.ttf\", 50)\n text = font.render(\"You have won!\", True, WHITE)\n screen.blit(text, (350,250))\nelse:\n font = pygame.font.Font(\"freesansbold.ttf\", 50)\n text = font.render(\"Game Over\", True, WHITE)\n screen.blit(text, (350,250))\npygame.display.flip()\n\n##pygame.quit()\n","sub_path":"Space_Invaders.py","file_name":"Space_Invaders.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"311349269","text":"from django.conf.urls import url\nfrom views import *\n\nurlpatterns = [\n url(r'^$', index, name='index'),\n url(r'^authorize$', authorize, name='authorize'),\n url(r'^login$', login, name='login'),\n url(r'^success$', success, name='success'),\n url(r'^logout$', logout, name='logout')\n]\n","sub_path":"Django/integration_project/apps/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"127123871","text":"import pytest\nfrom selenium import webdriver\n\n\n@pytest.fixture\ndef driver(request):\n wd = webdriver.Chrome()\n request.addfinalizer(wd.quit)\n return wd\n\n\ndef test_8_check_stikers(driver):\n driver.get(\"http://localhost:8080/litecart/en/\")\n item = 0\n while item < len(driver.find_elements_by_css_selector(\"li.product\")):\n goods = driver.find_elements_by_css_selector(\"li.product\")[item]\n label_count = len(goods.find_elements_by_css_selector(\"div.sticker\"))\n assert label_count == 1\n item += 1\n","sub_path":"test_8_check_stikers.py","file_name":"test_8_check_stikers.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"619881648","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport mysql.connector\nimport os\nimport re\nimport uuid\nfrom ebooklib import epub\n\noutput_dir = os.getcwdu() + '/build'\n\noid = '100100010007'\n\nsections = ('','首页','小喇叭','识字通','资料城','推荐网址','作者简介','课文学习','拓展资源','习作园地','背景知识','音频材料','教学资料','','阅读方法指导','文本探究','拓展阅读','拓展写作','主题阅读','口语、写作指导','写作园地','综合学习','活动设计','抛砖引玉','','Index','Word','Dialogue','Reading','Saying','Song','Rhythm','Game','Practice','Website','','Share','Listening','Drill','Exam','Grammar','Writing','Other','','','','','知识统筹','问题情境','实践演练','拓展阅读','园丁资料','探究活动','资源推荐','基础知识')\n\npattern_style = re.compile(r'\\sstyle\\s*=\"[^\"]*\"', re.I)\npattern_font = re.compile(r']*>', re.I)\n\ndef normalize_content(content):\n if content:\n content = pattern_style.sub('', content)\n content = pattern_font.sub('', content)\n return content\n\ndef generate_epub(parent_dir, id, sectionId, title, content):\n content = normalize_content(content)\n \n book = epub.EpubBook()\n book.FOLDER_NAME = 'OEBPS'\n\n # add metadata\n book.set_identifier(str(uuid.uuid4()))\n book.set_title(title)\n book.set_language('zh')\n\n book.add_author('lcell')\n\n # defube style\n #style = '''p { text-indent: 2em; }'''\n style = ''\n \n default_css = epub.EpubItem(uid=\"style_default\", file_name=\"style/default.css\", media_type=\"text/css\", content=style)\n book.add_item(default_css)\n\n c1 = epub.EpubHtml(title=title, file_name='1.xhtml', lang='zh')\n c1.content = content\n c1.add_item(default_css)\n\n # add chapters to the book\n book.add_item(c1)\n\n # create table of contents\n book.toc = [ epub.Link('1.xhtml', title, 'content') ]\n\n # add navigation files\n book.add_item(epub.EpubNcx())\n #book.add_item(epub.EpubNav())\n\n # create spine\n book.spine = [ c1 ]\n\n # create epub file\n dir = output_dir + parent_dir + '/' + unicode(sections[int(sectionId)], 'utf8')\n if not os.path.exists(dir):\n os.makedirs(dir)\n file_path = dir + '/' + str(id) + '-' + title + '.epub'\n #file_path = title + '.epub'\n epub.write_epub(file_path, book, {})\n\ndef read_outlines(cnx, parent_id):\n cursor = cnx.cursor()\n cursor.execute(\"SELECT * FROM outline where PARENTID = '\" + parent_id + \"'\")\n result = cursor.fetchall()\n cursor.close()\n return result\n\ndef read_contents(cursor, outline_id, dir):\n cursor.execute(\"select ro.id,rd.sectionId,rd.title,rc.content from resourceoutline ro left join resourcedetail rd on ro.detailID = rd.id left join resourcecontent rc on rd.contentID = rc.id where ro.outlineID='\" + outline_id + \"'\")\n\n row = cursor.fetchone()\n while row is not None:\n generate_epub(dir, row[0], row[1], row[2], row[3])\n row = cursor.fetchone()\n #break\n\ndef main():\n with open('config.json') as json_data:\n cfg = json.load(json_data)\n \n cnx = mysql.connector.connect(user = cfg['mysql']['user'],\n password = cfg['mysql']['password'],\n host = cfg['mysql']['host'],\n port = cfg['mysql']['port'],\n database = cfg['mysql']['database'])\n\n try:\n outlines = read_outlines(cnx, oid)\n cursor = cnx.cursor()\n for ol in outlines:\n read_contents(cursor, ol[0], ol[3])\n #break\n finally:\n cnx.close()\n\nif __name__ == \"__main__\": main()\n\n","sub_path":"convert-simple-text.py","file_name":"convert-simple-text.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"93203829","text":"import queue\nimport threading\n\n__author__ = 'yyc'\n\nimport mysql.connector, mysql.connector.pooling, mysql.connector.errors\nimport time\nfrom queue import Queue\n\nconn = [None]\n\ndb_config = {'user': 'root', 'password': 'dianping', 'host': '58.198.176.201', 'database': 'res'}\n\n#\n# def connect(**other):\n# for i in db_config:\n# other[i] = db_config[i]\n# return mysql.connector.connect(**other)\n#\n#\n# def update_conn(con=conn):\n# if conn[0]:\n# conn[0].commit()\n# conn[0].close()\n# try:\n# con[0] = mysql.connector.connect(**db_config)\n# except (mysql.connector.errors.ProgrammingError, mysql.connector.errors.InterfaceError) as e:\n# update_conn(conn)\n#\n#\n# update_conn()\n#\n#\n# def get_cursor(conn, depth=1):\n# try:\n# return conn[0].cursor()\n# except:\n# time.sleep(1)\n# if depth > 1:\n# print(depth)\n# return get_cursor(conn, depth + 1)\n#\n#\n# def update_shop_brief(shop_id, name, type, product_grade, decoration_grade, service_grade, ave, region, addr, phone):\n# # 用于search函数中的更新\n# cx = get_cursor(conn)\n# sql = \"SELECT shop_id FROM shop WHERE shop_id='%s'\" % (shop_id, )\n# cx.execute(sql)\n# if not cx.fetchall():\n# sql = \"INSERT INTO shop\" \\\n# \" (shop_id, name, type, product_score , decoration_grade, service_grade, ave_score, ave, locality_region, street_address, update_time)\" \\\n# \" VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n#\n# cx.execute(\n# sql,\n# (shop_id, type, name, product_grade, decoration_grade, service_grade,\n# (product_grade + decoration_grade + service_grade) / 3, ave, region, addr, '20000000')\n# )\n# conn[0].commit()\n# pass\n# cx.close()\n# pass\n#\n#\n# def get_tree_by_root(id):\n# cx = get_cursor(conn)\n# sql = \"SELECT * FROM types WHERE parent=%s\"\n# cx.execute(sql, (id, ))\n# result = [list(i) for i in cx.fetchall()]\n# for i in result:\n# i.append(get_tree_by_root(i[0]))\n# return result\n#\n#\n# def update_life(data):\n# cx = get_cursor(conn)\n# sql = 'UPDATE shop SET name2=%s, count_5=%s, count_4=%s, count_3=%s, count_2=%s, count_1=%s, count=%s, ' \\\n# 'ave_score=%s, introduction=%s, time=%s WHERE shop_id=%s'\n# cx.execute(sql, data)\n# conn[0].commit()\n# cx.close()\n# pass\n#\n#\n# def upgrade_type(types):\n# if not len(types) == 3:\n# return ''\n# cx = get_cursor(conn)\n# sql = \"SELECT shop_count FROM types WHERE type='%s'\" % (types[0], )\n# # print(sql)\n# cx.execute(sql)\n# f = cx.fetchone()\n# # print(f)\n# if f:\n# if int(f[0]) == int(types[1]):\n# sql = \"UPDATE types SET shop_count='%s', updated='%s' WHERE type='%s'\" % (types[1], 1, types[0])\n# else:\n# sql = \"UPDATE types SET shop_count='%s', updated='%s' WHERE type='%s'\" % (types[1], 0, types[0])\n# else:\n# sql = \"INSERT INTO types (type, shop_count, parent, updated) VALUES ('%s', '%s', '%s', 1)\" % types\n# cx.execute(sql)\n# conn[0].commit()\n# cx.close()\n# pass\n#\n#\n# def update_type(type, name):\n# cx = get_cursor(conn)\n# sql = 'UPDATE types SET name = %s WHERE type = %s'\n# cx.execute(sql, (name, type))\n# conn[0].commit()\n#\n#\n# def update_type_by_id(id, type):\n# cx = get_cursor(conn)\n# sql = 'SELECT type FROM shop WHERE shop_id = %s'\n# cx.execute(sql, (id, ))\n# try:\n# update_type(cx.fetchone()[0], type)\n# except:\n# return\n#\n#\n# def all_main():\n# cx = get_cursor(conn)\n# sql = 'SELECT shop_id FROM shop;'\n# cx.execute(sql)\n# return cx.fetchall()\n#\n#\n# def ex(sql, d=()):\n# cx = get_cursor(conn)\n# try:\n# cx.execute(sql, d)\n# except mysql.connector.errors.IntegrityError:\n# # print('IntegrityError')\n# return\n# except Exception as e:\n# print(e)\n# for i in d:\n# print(type(i), i)\n# try:\n# return cx.fetchall()\n# except Exception as e:\n# # print(e)\n# conn[0].commit()\n# return\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\ndef execute_insert(_sql, _params=None):\n \"\"\"\n 测试用\n 实际使用时应使用线程池插入\n 用于同步插入\n \"\"\"\n if not _params:\n _params = ()\n _con = mysql.connector.connect(**db_config)\n _cus = _con.cursor()\n _cus.execute(_sql, _params)\n _con.commit()\n _cus.close()\n _con.disconnect()\n\n\ndef execute_mamy_insert(_sql, _params=None):\n \"\"\"\n 测试用\n 实际使用时应使用线程池��入\n 用于同步插入\n \"\"\"\n _con = mysql.connector.connect(**db_config)\n _cus = _con.cursor()\n for _i in _params:\n _cus.execute(_sql, _i)\n _con.commit()\n _cus.close()\n _con.disconnect()\n\n\ndef execute_inquire(_sql, _params=None):\n \"\"\"\n 用于同步查询,获得结果\n \"\"\"\n if not _params:\n _params = ()\n _con = mysql.connector.connect(**db_config)\n _cus = _con.cursor()\n _cus.execute(_sql, _params)\n _result = _cus.fetchall()\n _cus.close()\n _con.disconnect()\n return _result\n\n\ndef task_main(pooled_conn, _sql, prams=None):\n \"\"\"\n 用于执行insert等不需要返回值的sql,异步\n \"\"\"\n try:\n cx = pooled_conn.cursor()\n if not prams:\n prams = ()\n # print(_sql, prams)\n cx.execute(_sql, prams)\n pooled_conn.commit()\n cx.close()\n except mysql.connector.errors.Error as e:\n print(\"sql error:\", type(e).__name__, e, _sql, prams)\n # raise e\n pass\n finally:\n pooled_conn.close()\n\n\nclass DBPool:\n \"\"\"\n 也许已经完成了\n 不知道有没有bug\n \"\"\"\n\n def __init__(self, size=10):\n self.conn_pool = mysql.connector.pooling.MySQLConnectionPool(pool_name=\"db_pool\", pool_size=size, **db_config)\n self.tasks_queue = Queue() # [(sql, (param1, ...)), ...]\n\n def get_db_queue(self):\n return self.tasks_queue\n\n def _main(self):\n while True:\n try:\n _conn = self.conn_pool.get_connection()\n except mysql.connector.errors.PoolError as e:\n # print(e)\n time.sleep(1)\n continue\n try:\n params = self.tasks_queue.get()\n except queue.Empty:\n _conn.close()\n time.sleep(1)\n continue\n try:\n # thread = threading.Thread(target=task_main, args=((_conn, ) + params))\n # thread.start()\n task_main(_conn, *params)\n except TypeError as e:\n print(e, params)\n\n def start(self):\n t = threading.Thread(target=self._main)\n t.start()\n\n\nif __name__ == '__main__':\n i = 1\n while i < 8355858:\n execute_mamy_insert('INSERT INTO Deals (deal_id, update_time) VALUES (%s, 0001-01-01)',\n [(i + base, ) for base in range(10000)])\n i += 10000\n","sub_path":"dianping/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":7172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"41930098","text":"import json\nfrom ..._lib.Helper import Helper\nfrom bson.objectid import ObjectId\n\nclass SoccerMatch:\n def __init__(self):\n self.Id = None\n self.Date = None\n self.HomeTeam_Id = None\n self.HomeTeam = None\n self.AwayTeam_Id = None\n self.AwayTeam = None\n self.Location = None\n\n self.addedStatus = 'new'\n self.addedOddsHome = 1.0\n self.addedOddsDraw = 1.0\n self.addedOddsAway = 1.0\n\n #self.League = None\n #self.Round = None\n #self.HomeGoals = None\n #self.AwayGoals = None\n #self.Time = None\n #self.HomeTeamYellowCardDetails\n #self.AwayTeamYellowCardDetails\n #self.HomeTeamRedCardDetails\n #self.AwayTeamRedCardDetails\n\n def getObjectDict(self):\n return {\n 'matchId' : self.Id,\n 'hTeamId' : self.HomeTeam_Id,\n 'hTeamName' : self.HomeTeam,\n 'aTeamId' : self.AwayTeam_Id,\n 'aTeamName' : self.AwayTeam,\n 'stadium' : self.Location,\n 'startDate' : Helper.cetToUtcSpecial(self.Date),\n 'status' : self.addedStatus,\n 'oddsHome' : self.addedOddsHome,\n 'oddsDraw' : self.addedOddsDraw,\n 'oddsAway' : self.addedOddsAway,\n\n # dynamically added in the outer processor function\n 'competitionPrototype' : ObjectId(self.addedCompetitionPrototypeId),\n 'hTeam' : ObjectId(self.addedHTeamId),\n 'aTeam' : ObjectId(self.addedATeamId)\n }\n\n def __str__(self):\n return json.dumps(self.getObjectDict())\n","sub_path":"src/soccer/Document/SoccerMatch.py","file_name":"SoccerMatch.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"599132272","text":"# coding: utf-8\n\n\"\"\"\n Chat API SDK\n\n The SDK allows you to receive and send messages through your WhatsApp account. [Sign up now](https://app.chat-api.com/) The Chat API is based on the WhatsApp WEB protocol and excludes the ban both when using libraries from mgp25 and the like. Despite this, your account can be banned by anti-spam system WhatsApp after several clicking the \\\"block\\\" button. # noqa: E501\n\n The version of the OpenAPI document: 1.0.0\n Contact: sale@chat-api.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass GroupParticipantAction(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'group_id': 'str',\n 'participant_chat_id': 'str',\n 'participant_phone': 'int'\n }\n\n attribute_map = {\n 'group_id': 'groupId',\n 'participant_chat_id': 'participantChatId',\n 'participant_phone': 'participantPhone'\n }\n\n def __init__(self, group_id=None, participant_chat_id=None, participant_phone=None): # noqa: E501\n \"\"\"GroupParticipantAction - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._group_id = None\n self._participant_chat_id = None\n self._participant_phone = None\n self.discriminator = None\n\n self.group_id = group_id\n self.participant_chat_id = participant_chat_id\n if participant_phone is not None:\n self.participant_phone = participant_phone\n\n @property\n def group_id(self):\n \"\"\"Gets the group_id of this GroupParticipantAction. # noqa: E501\n\n Chat ID from the chat list. Examples: 19680561234-1479621234@g.us for the group. # noqa: E501\n\n :return: The group_id of this GroupParticipantAction. # noqa: E501\n :rtype: str\n \"\"\"\n return self._group_id\n\n @group_id.setter\n def group_id(self, group_id):\n \"\"\"Sets the group_id of this GroupParticipantAction.\n\n Chat ID from the chat list. Examples: 19680561234-1479621234@g.us for the group. # noqa: E501\n\n :param group_id: The group_id of this GroupParticipantAction. # noqa: E501\n :type: str\n \"\"\"\n if group_id is None:\n raise ValueError(\"Invalid value for `group_id`, must not be `None`\") # noqa: E501\n\n self._group_id = group_id\n\n @property\n def participant_chat_id(self):\n \"\"\"Gets the participant_chat_id of this GroupParticipantAction. # noqa: E501\n\n **Required if participantPhone is not set** Chat ID from the message list. Examples: 17633123456@c.us. Used instead of the participantPhone parameter. # noqa: E501\n\n :return: The participant_chat_id of this GroupParticipantAction. # noqa: E501\n :rtype: str\n \"\"\"\n return self._participant_chat_id\n\n @participant_chat_id.setter\n def participant_chat_id(self, participant_chat_id):\n \"\"\"Sets the participant_chat_id of this GroupParticipantAction.\n\n **Required if participantPhone is not set** Chat ID from the message list. Examples: 17633123456@c.us. Used instead of the participantPhone parameter. # noqa: E501\n\n :param participant_chat_id: The participant_chat_id of this GroupParticipantAction. # noqa: E501\n :type: str\n \"\"\"\n if participant_chat_id is None:\n raise ValueError(\"Invalid value for `participant_chat_id`, must not be `None`\") # noqa: E501\n\n self._participant_chat_id = participant_chat_id\n\n @property\n def participant_phone(self):\n \"\"\"Gets the participant_phone of this GroupParticipantAction. # noqa: E501\n\n **Required if participantChatId is not set** A phone number starting with the country code. You do not need to add your number. USA example: 17472822486. # noqa: E501\n\n :return: The participant_phone of this GroupParticipantAction. # noqa: E501\n :rtype: int\n \"\"\"\n return self._participant_phone\n\n @participant_phone.setter\n def participant_phone(self, participant_phone):\n \"\"\"Sets the participant_phone of this GroupParticipantAction.\n\n **Required if participantChatId is not set** A phone number starting with the country code. You do not need to add your number. USA example: 17472822486. # noqa: E501\n\n :param participant_phone: The participant_phone of this GroupParticipantAction. # noqa: E501\n :type: int\n \"\"\"\n\n self._participant_phone = participant_phone\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GroupParticipantAction):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"openapi_client/models/group_participant_action.py","file_name":"group_participant_action.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"574614579","text":"import json\n\nimport requests\n\nfrom modules import readbase as rb, argbase as arg\n\n# define global variables\n\nbaseadvertiser = {\n \"advertisers\": [\n\n ]\n}\nbasecreative = {\n \"materials\": [\n\n ]\n}\nerrorcodes = {\n 1001: \"Authentication error (dsp-token error)\",\n 1002: \"Missing required parameter error\",\n 1003: \"Illegal parameters\",\n 1004: \"File format error\",\n 1005: \"File size error\",\n 1006: \"The file size is incorrect\",\n 1007: \"File get error\",\n 2001: \"Upload failed\",\n 2002: \"Data does not exist\",\n 2003: \"Database error\"\n}\n\ntrackingentry = {\n \"type\": \"\",\n \"id\": \"\",\n \"status\": 0,\n \"raw\": {}\n}\n\nbaseheader = {'content-type': 'application/json', 'authorization': ''}\n\n# options as globals\nusagemsg = \"This program checks on the status of the approval for the advertiser\"\nmsg = arg.MSG()\n\n\ndef main():\n \"\"\"main processing loop\"\"\"\n do = arg.MyArgs(usagemsg)\n do.processargs()\n if arg.Flags.test:\n msg.TEST(\"Running in test mode.\")\n baseurl = arg.Flags.configsettings['testurl']\n else:\n baseurl = arg.Flags.configsettings['serverurl']\n msg.DEBUG(do)\n tracking_init = rb.ReadJson(arg.Flags.configsettings['root'], arg.Flags.configsettings['data'],\n arg.Flags.configsettings['tracking'])\n tracking_init.readinput()\n tracking_out = rb.WriteJson(arg.Flags.configsettings['root'], arg.Flags.configsettings['data'],\n arg.Flags.configsettings['tracking'])\n baseheader['authorization'] = arg.Flags.configsettings['dsptoken']\n for el in tracking_init.data:\n if el['type'] == 'advertiser':\n status_code, rj = queryadvertiser(baseurl, el['id'])\n if status_code == 200:\n if rj['code'] == 0:\n el['status'] = rj['result'][0]['status']\n if el['status'] == 1:\n print(\"Review Pending for {}\".format(el['id']))\n elif el['status'] == 3:\n print(\"Advertiser Rejected {} with Reason: [{}]\".format(el['id'],\n rj['result'][0]['rejectReason']))\n elif el['status'] == 4:\n print(\"Advertiser Approved! {}\".format(el['id']))\n else:\n msg.VERBOSE(\"Advertiser: Unknown status code or no response\")\n else:\n checkresp = \"Advertiser Check Failed for advId: {}: \\n\\tMessage: {}\\n\\tError Code: {} [{}]\" \\\n \"\\n\\tError Message: {}\\n\\tDescription: {}\"\n print(checkresp.format(el['id'], rj['msg'], rj['result'][0]['code'], errorcodes[rj['result'][0]['code']], rj['result'][0]['msg'],\n rj['result'][0]['desc']))\n else:\n msg.ERROR(\"HTTP Response {}\".format(status_code))\n tracking_out.data = tracking_init.data\n tracking_out.writeoutput()\n\n\ndef queryadvertiser(u: str, a):\n action_u_r_l = u + \"/v1/advertiser/query?advId=\" + str(a)\n msg.DEBUG(\"GET: {}\".format(action_u_r_l))\n try:\n r = requests.get(action_u_r_l)\n msg.DEBUG(\"{}\\n\\t{}\".format(r.status_code, r.content.decode('utf-8')))\n except requests.exceptions.Timeout:\n # Maybe set up for a retry, or continue in a retry loop\n msg.ERROR(\"Connection timeout Error\")\n except requests.exceptions.RequestException as e:\n msg.ERROR(e)\n if r.status_code == 200:\n if arg.Flags.test:\n msg.TEST(\"full json is \\n\\t{}\".format(json.loads(r.content.decode('utf-8'))))\n msg.TEST(\"\\n\\tstatus: {}\\n\\theaders: {}\\n\\turl: {}\\n\\treason: {}\".format(r.status_code, r.headers,r.url, r.reason))\n return r.status_code, json.loads(r.content.decode('utf-8'))\n else:\n msg.ERROR(\"HTTP Response {}\\n{}\".format(r.status_code, r.content.decode('utf-8')))\n return None\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"deprecated/advcheck.py","file_name":"advcheck.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"247287095","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nimport sklearn.linear_model as lm\nfrom sklearn import cross_validation\nimport matplotlib.pyplot as plts\nfrom sklearn.linear_model import Ridge\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Lasso\n\n\n\n\n\n\nurl = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/prostate.data'\ndf = pd.read_csv(url, sep='\\t', header=0)\ndf = df.drop('Unnamed: 0', axis=1)\nistrain_str = df['train']\nistrain = np.asarray([True if s == 'T' else False for s in istrain_str])\nistest = np.logical_not(istrain)\ndf = df.drop('train', axis=1)\nscaler = StandardScaler()\ndf_scaled = pd.DataFrame(scaler.fit_transform(df), columns=df.columns)\ndf_scaled['lpsa'] = df['lpsa']\nX = df_scaled.ix[:,:-1]\nN = X.shape[0]\nX.insert(X.shape[1], 'intercept', np.ones(N))\ny = df_scaled['lpsa']\nnames_regressors = [\"Lcavol\", \"Lweight\", \"Age\", \"Lbph\", \"Svi\", \"Lcp\", \"Gleason\", \"Pgg45\"]\n\n\n\n\nXtrain = X[istrain]\nytrain = y[istrain]\n\nXtest = X[np.logical_not(istrain)]\nytest = y[np.logical_not(istrain)]\nalphas_ = np.logspace(2,-2,base=10)\ncoefs = []\nmodel = Lasso(fit_intercept=False)\nmse_test = []\nmse_train = []\nfor a in alphas_:\n model.set_params(alpha=a)\n model.fit(Xtrain, ytrain)\n yhat_train = model.predict(Xtrain)\n yhat_test = model.predict(Xtest)\n mse_train.append(np.mean(np.power(yhat_train - ytrain, 2)))\n mse_test.append(np.mean(np.power(yhat_test - ytest, 2)))\nax = plt.gca()\nax.plot(alphas_,mse_train,label='train error lasso')\nax.plot(alphas_,mse_test,label='test error lasso')\nplt.legend(loc=2)\nax.set_xscale('log')\nax.set_xlim(ax.get_xlim()[::-1])\nplt.show()\n","sub_path":"Código/parte_3_d.py","file_name":"parte_3_d.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"312381992","text":"# Receiver Code\nimport gps\nimport time\n# Import Xbee Python Library, Install From: https://xbplib.readthedocs.io/en/latest/getting_started_with_xbee_python_library.html\nfrom digi.xbee.devices import XBeeDevice, RemoteXBeeDevice, XBee64BitAddress\n#\n#\n# Instantiate receiver Xbee device object\n# Replace COM1 with XBee Device Port, usually starts with /dev/tty\nxBeeLocation = \"/dev/serial0\"\nreceiver = XBeeDevice(xBeeLocation, 9600)\nreceiver.open()\n\n# Instantiate a remote XBee device object.\nremote_device = RemoteXBeeDevice(receiver, XBee64BitAddress.from_hex_string(\"0013A20041C7BFD1\"))\n\n\n# Take data and parse down to different variable for each attribute.\nwhile(True):\n data_variable = receiver.read_data(remote_device)\n if(data_variable is None):\n print('No Data Found')\n else:\n dataString = data_variable.data.decode(\"utf-8\")\n attributes = dataString.split(\",\")\n longitude = float(attributes[0])\n latitude = float(attributes[1])\n altitude = float(attributes[2])\n velocity = float(attributes[3])\n time.sleep(3)\n\nreceiver.close()\n\n# Datalogging can be done here","sub_path":"xBee_receiver.py","file_name":"xBee_receiver.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"307653339","text":"n=10240099\nb1=(n & 0xff000000)\nb2=(n & 0xff0000)\nb3=(n & 0xff00)\nb4=n & 0xff\nprint(b1,b2,b3,b4)\nb1=(n & 0xff000000) >> 24\nb2=(n & 0xff0000) >> 16\nb3=(n & 0xff00) >>8\nb4=n & 0xff\nprint(b1,b2,b3,b4)\nbs=bytes([b1,b2,b3,b4])\nprint(bs)\nprint()\n# Python提供了一个struct模块来解决bytes和其他二进制数据类型的转换\nimport struct\nprint(struct.pack('>I',10240099))\n# pack的第一个参数是处理指令,'>I'的意思是:\n# >表示字节顺序是big-endian,也就是网络序,I表示4字节无符号整数\nprint(struct.unpack('>IH', b'\\xf0\\xf0\\xf0\\xf0\\x80\\x80'))\n# 根据>IH的说明,后面的bytes依次变为I:4字节无符号整数和\n# H:2字节无符号整数\ns = b'\\x42\\x4d\\x38\\x8c\\x0a\\x00\\x00\\x00\\x00\\x00\\x36\\x00\\x00\\x00\\x28\\x00\\x00\\x00\\x80\\x02\\x00\\x00\\x68\\x01\\x00\\x00\\x01\\x00\\x18\\x00'\nprint(s)\n# BMP格式采用小端方式存储数据,文件头的结构按顺序如下:\n# 两个字节:'BM'表示Windows位图,'BA'表示OS/2位图;\n# 一个4字节整数:表示位图大小;\n# 一个4字节整数:保留位,始终为0;\n# 一个4字节整���:实际图像的偏移量;\n# 一个4字节整数:Header的字节数;\n# 一个4字节整数:图像宽度;\n# 一个4字节整数:图像高度;\n# 一个2字节整数:始终为1;\n# 一个2字节整数:颜色数。\n# 所以,组合起来用unpack读取:\nprint(struct.unpack(' n_s:\n if self.verbose:\n print(\"at_once is greater than n_s - setting to n_s. You \\\nshould consider uploading data as a single tensor.\")\n fiducial_at_once = n_s\n else:\n fiducial_at_once = value\n if value > n_d * n_params * 2:\n if self.verbose:\n print(\"at_once is greater than n_d * n_params * 2 - setting to\\\n n_d * n_params * 2.\")\n derivative_at_once = n_d * n_params * 2\n elif value > n_d:\n if self.verbose:\n print(\"at_once is greater than n_d - setting at_once to n_d \\\nfor derivatives\")\n derivative_at_once = n_d\n else:\n derivative_at_once = value\n return fiducial_at_once, derivative_at_once\n\n def check_input(self, value):\n value = self.type_checking(value, tuple(), \"input_shape\")\n for i, val in enumerate(value):\n _ = self.type_checking(val, 1,\n \"index \" + str(i) + \" of input_shape\")\n if self.verbose:\n print(\"input shape will be \" + str(value))\n return value\n\n def isnotebook(self, tqdm_notebook):\n tqdm_notebook = self.type_checking(tqdm_notebook, True,\n \"tqdm_notebook\")\n if not tqdm_notebook:\n return False\n else:\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return True # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False\n","sub_path":"IMNN/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"386912460","text":"\"\"\"\nThis file is only Used to build the .so files with \"make build\".\n\nThis setup.py is NOT used to install the Bottlechest package. The Bottlechest\nsetup.py file is bottlechest/setup.py\n\"\"\"\n\nimport os\nimport os.path\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nimport numpy as np\n\n# Is the default numpy int 32 or 64 bits?\nif np.int_ == np.int32:\n bits = '32'\nelif np.int_ == np.int64:\n bits = '64'\nelse:\n raise ValueError(\"Your OS does not appear to be 32 or 64 bits.\")\n\n\ncfiles = [ a[:-2] for a in os.listdir(\"bottlechest/src/func/%sbit/\" % bits) \\\n if a.endswith(\".c\") ]\n\nextensions = [ Extension(cf,\n sources=[\"bottlechest/src/func/%sbit/%s.c\" % (bits, cf)],\n include_dirs=[np.get_include()]) for cf in cfiles ]\n\nsetup(\n name = 'func',\n ext_package= \"bottlechest\",\n ext_modules = extensions\n)\n\n","sub_path":"bottlechest/src/func/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"279966452","text":"\nfrom pathlib import Path\nimport sys\nfrom argparse import ArgumentParser\nimport random\nimport logging\nfrom importlib import util\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\n\nfrom ignite.engines import Events, Engine\nfrom ignite.handlers import Timer\n\n# Load common module\nsys.path.insert(0, Path(__file__).absolute().parent.parent.as_posix())\nfrom common import setup_logger, save_conf\nfrom common.figures import create_fig_target_distribution_per_batch, \\\n create_fig_targets_distribution, create_fig_samples_min_avg_max_per_batch, \\\n create_fig_samples_param_per_batch\n\n\ndef create_dataflow_checker():\n\n def _update(engine, batch):\n return batch\n\n return Engine(_update)\n\n\ndef load_config(config_filepath):\n assert Path(config_filepath).exists(), \"Configuration file '{}' is not found\".format(config_filepath)\n # Load custom module\n spec = util.spec_from_file_location(\"config\", config_filepath)\n custom_module = util.module_from_spec(spec)\n spec.loader.exec_module(custom_module)\n config = custom_module.__dict__\n assert \"DATA_LOADER\" in config, \"DATA_LOADER parameter is not found in configuration file\"\n assert \"OUTPUT_PATH\" in config, \"OUTPUT_PATH is not found in the configuration file\"\n assert \"N_EPOCHS\" not in config, \"Number of epochs N_EPOCHS should not be specified in the configuration file\"\n config[\"N_EPOCHS\"] = 1\n assert \"N_CLASSES\" in config, \"Number of classes N_CLASSES should be specified in the configuration file\"\n return config\n\n\ndef run(config_file):\n print(\"--- Check dataflow --- \")\n\n print(\"Load config file ... \")\n config = load_config(config_file)\n\n seed = config.get(\"SEED\", 2018)\n random.seed(seed)\n torch.manual_seed(seed)\n\n output = Path(config[\"OUTPUT_PATH\"])\n debug = config.get(\"DEBUG\", False)\n\n from datetime import datetime\n now = datetime.now()\n log_dir = output / (\"check_dataflow_{}\".format(now.strftime(\"%Y%m%d_%H%M\")))\n if not log_dir.exists():\n log_dir.mkdir(parents=True)\n\n log_level = logging.INFO\n if debug:\n log_level = logging.DEBUG\n print(\"Activated debug mode\")\n\n logger = logging.getLogger(\"Check dataflow\")\n setup_logger(logger, (log_dir / \"check.log\").as_posix(), log_level)\n\n save_conf(config_file, log_dir, logger)\n\n cuda = torch.cuda.is_available()\n if cuda:\n logger.debug(\"CUDA is enabled\")\n from torch.backends import cudnn\n cudnn.benchmark = True\n\n logger.debug(\"Setup data loader\")\n data_loader = config[\"DATA_LOADER\"]\n\n logger.debug(\"Setup ignite dataflow checker\")\n dataflow_checker = create_dataflow_checker()\n\n logger.debug(\"Setup handlers\")\n # Setup timer to measure training time\n timer = Timer(average=True)\n timer.attach(dataflow_checker,\n start=Events.EPOCH_STARTED,\n pause=Events.ITERATION_COMPLETED,\n resume=Events.ITERATION_STARTED)\n\n n_classes = config[\"N_CLASSES\"]\n n_batches = len(data_loader)\n\n n_channels = 3\n y_counts_per_batch = np.zeros((n_batches, n_classes), dtype=np.int)\n x_mins_per_batch = np.zeros((n_batches, n_channels), dtype=np.float)\n x_maxs_per_batch = np.zeros((n_batches, n_channels), dtype=np.float)\n x_avgs_per_batch = np.zeros((n_batches, n_channels), dtype=np.float)\n x_shapes_per_batch = np.empty((n_batches, 1), dtype=np.object)\n x_dtypes_per_batch = np.empty((n_batches, 1), dtype=np.object)\n\n def log_dataflow_iteration(engine, y_counts_per_batch):\n x, y = engine.state.output\n curr_iter = engine.state.iteration - 1\n y_counts_per_batch[curr_iter, :] = np.bincount(y.numpy(), minlength=n_classes)\n for i in range(n_channels):\n x_mins_per_batch[curr_iter, i] = x[:, i, :, :].min()\n x_maxs_per_batch[curr_iter, i] = x[:, i, :, :].max()\n x_avgs_per_batch[curr_iter, i] = torch.mean(x[:, i, :, :])\n x_shapes_per_batch[curr_iter, 0] = str(list(x.shape[1:]))\n x_dtypes_per_batch[curr_iter, 0] = type(x).__name__\n\n if curr_iter % 100 == 0:\n logger.debug(\"Iteration[{}/{}]\".format(curr_iter, len(data_loader)))\n\n dataflow_checker.add_event_handler(Events.ITERATION_COMPLETED, log_dataflow_iteration, y_counts_per_batch)\n\n def log_dataflow_epoch(engine):\n logger.info(\"One epoch dataflow time (seconds): {}\".format(timer.value()))\n\n dataflow_checker.add_event_handler(Events.EPOCH_COMPLETED, log_dataflow_epoch)\n\n n_epochs = config[\"N_EPOCHS\"]\n logger.debug(\"Start dataflow checking: {} epochs\".format(n_epochs))\n try:\n dataflow_checker.run(data_loader, max_epochs=n_epochs)\n except KeyboardInterrupt:\n logger.info(\"Catched KeyboardInterrupt -> exit\")\n exit(0)\n except Exception as e: # noqa\n logger.exception(\"\")\n if debug:\n try:\n # open an ipython shell if possible\n import IPython\n IPython.embed() # noqa\n except ImportError:\n print(\"Failed to start IPython console\")\n raise e\n\n logger.debug(\"Dataflow check is ended\")\n\n logger.debug(\"Create and write y_counts_per_batch.csv\")\n cols = [\"class_{}\".format(i) for i in range(n_classes)]\n y_counts_df = pd.DataFrame(y_counts_per_batch, columns=cols)\n y_counts_df.to_csv((log_dir / \"y_counts_per_batch.csv\").as_posix(), index=False)\n\n # Save figure of total target distributions\n logger.debug(\"Save figure of target distributions per batch\")\n fig = create_fig_target_distribution_per_batch(y_counts_df=y_counts_df, n_classes_per_fig=20)\n fig.savefig((log_dir / \"target_distribution_per_batch.png\").as_posix())\n\n logger.debug(\"Save figure of total targets distributions\")\n fig = create_fig_targets_distribution(y_counts_df, n_classes_per_fig=20)\n fig.savefig((log_dir / \"targets_distribution.png\").as_posix())\n del y_counts_df\n del y_counts_per_batch\n\n logger.debug(\"Create and write x_stats_df.csv\")\n min_cols = [\"b{}_min\".format(i) for i in range(n_channels)]\n avg_cols = [\"b{}_avg\".format(i) for i in range(n_channels)]\n max_cols = [\"b{}_max\".format(i) for i in range(n_channels)]\n cols = min_cols + avg_cols + max_cols + [\"shape\", \"dtype\"]\n x_stats_df = pd.DataFrame(columns=cols, index=np.arange(n_batches), dtype=np.float)\n x_stats_df[min_cols] = x_mins_per_batch\n x_stats_df[avg_cols] = x_avgs_per_batch\n x_stats_df[max_cols] = x_maxs_per_batch\n x_stats_df[\"shape\"] = x_shapes_per_batch\n x_stats_df[\"dtype\"] = x_dtypes_per_batch\n x_stats_df.to_csv((log_dir / \"x_stats_df.csv\").as_posix(), index=False)\n\n # Save figure with sample mins, avgs, maxs\n logger.debug(\"Save figure with sample mins, avgs, maxs\")\n fig = create_fig_samples_min_avg_max_per_batch(x_stats_df, min_cols, avg_cols, max_cols)\n fig.savefig((log_dir / \"samples_min_avg_max_per_batch.png\").as_posix())\n\n logger.debug(\"Save figure with sample shapes\")\n fig = create_fig_samples_param_per_batch(x_stats_df, \"shape\")\n fig.savefig((log_dir / \"samples_shape_per_batch.png\").as_posix())\n\n logger.debug(\"Save figure with sample dtypes\")\n fig = create_fig_samples_param_per_batch(x_stats_df, \"dtype\")\n fig.savefig((log_dir / \"samples_dtype_per_batch.png\").as_posix())\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Script to create statistics of the dataflow\")\n parser.add_argument(\"config_file\", type=str, help=\"Configuration file. See examples in configs/\")\n args = parser.parse_args()\n run(args.config_file)\n","sub_path":"classification/imaterialist_challenge_furniture_2018/utils/check_dataflow.py","file_name":"check_dataflow.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"100809983","text":"# coding:utf-8\nfrom common.window_effect import WindowEffect\nfrom PyQt5.QtCore import QEasingCurve, QPropertyAnimation, QRect, Qt\n\nfrom .navigation_widget import NavigationWidget\n\n\nclass NavigationMenu(NavigationWidget):\n \"\"\" 导航菜单 \"\"\"\n\n def __init__(self, parent=None):\n super().__init__(parent)\n # 是否削减设置按钮底部空白标志位\n self.__isShowBottomSpacing = False\n self.__ani = QPropertyAnimation(self, b\"geometry\")\n # 创建窗口效果\n self.windowEffect = WindowEffect()\n self.__initWidget()\n\n def __initWidget(self):\n \"\"\" 初始化小部件 \"\"\"\n self.resize(60, 800)\n self.setWindowFlags(Qt.NoDropShadowWindowHint | Qt.Popup)\n self.windowEffect.setAcrylicEffect(self.winId(), \"F2F2F299\", False)\n # 强制刷新按钮文字\n self.myMusicButton.setText(self.tr('My music'))\n self.historyButton.setText(self.tr(\"Recent plays\"))\n self.playingButton.setText(self.tr('Now playing'))\n self.playlistButton.setText(self.tr('Playlists'))\n self.settingButton.setText(self.tr('Settings'))\n # 信号连接到槽\n self.switchToPlaylistInterfaceSig.connect(self.aniHide)\n self.myMusicButton.clicked.connect(self.aniHide)\n self.settingButton.clicked.connect(self.aniHide)\n\n def resizeEvent(self, e):\n \"\"\" 调整小部件尺寸 \"\"\"\n super().resizeEvent(e)\n self.scrollArea.resize(self.width(), self.height() - 232)\n self.settingButton.move(\n 0, self.height() - 62 - 10 - self.__isShowBottomSpacing * 115)\n self.searchLineEdit.resize(\n self.width() - 30, self.searchLineEdit.height())\n\n def aniShow(self):\n \"\"\" 动画显示 \"\"\"\n super().show()\n self.activateWindow()\n self.searchLineEdit.show()\n self.__ani.setStartValue(QRect(self.x(), self.y(), 60, self.height()))\n self.__ani.setEndValue(QRect(self.x(), self.y(), 400, self.height()))\n self.__ani.setEasingCurve(QEasingCurve.InOutQuad)\n self.__ani.setDuration(85)\n self.__ani.start()\n\n def aniHide(self):\n \"\"\" 动画隐藏 \"\"\"\n self.__ani.setStartValue(QRect(self.x(), self.y(), 400, self.height()))\n self.__ani.setEndValue(QRect(self.x(), self.y(), 60, self.height()))\n self.__ani.finished.connect(self.__hideAniFinishedSlot)\n self.__ani.setDuration(85)\n self.searchLineEdit.hide()\n self.__ani.start()\n\n def __hideAniFinishedSlot(self):\n \"\"\" 隐藏窗体的动画结束 \"\"\"\n super().hide()\n self.resize(60, self.height())\n self.__ani.disconnect()\n\n def setBottomSpacingVisible(self, isBottomSpacingVisible: bool):\n \"\"\" 是否削减设置按钮底部空白 \"\"\"\n self.__isShowBottomSpacing = isBottomSpacingVisible\n\n def _onSearchButtonClicked(self):\n \"\"\" 搜索按钮点击槽函数 \"\"\"\n text = self.searchLineEdit.text()\n if text:\n self.aniHide()\n self.currentButton.setSelected(False)\n self.searchSig.emit(text)\n\n @property\n def isShowBottomSpacing(self):\n return self.__isShowBottomSpacing\n","sub_path":"app/View/navigation_interface/navigation_menu.py","file_name":"navigation_menu.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"327300640","text":"#!/usr/bin/env python\n\nimport gtk\n\nwindow = gtk.Window ()\nbox = gtk.VButtonBox ()\n\nfor k in range (10):\n button = gtk.Button ('button %d' % k)\n if k % 2 == 0:\n button.props.relief = gtk.RELIEF_NONE\n\n box.add (button)\n\nwindow.add (box)\nwindow.show_all ()\n\ngtk.main ()\n","sub_path":"bin/pyminer/testnew.py","file_name":"testnew.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"416349229","text":"'''\ntransform_stl is used to fit the orientation and position of the STL in the \nglobal frame of the position sensor.\nThe file history_guide_along_model2.p has the guide information \nbeing run over the actual model. \n\nJake Sganga\n8/5/16\n\nrename, edit and move to control_robot 8/25/2016\n\nto look at plots, use common_plots.py\n\nexpects to be run from data_analysis folder's common_plots.py\n'''\nimport sys, time\nimport matplotlib.pyplot as plt\n# import seaborn as sns # importing this here makes changing plot settings in jupyter hard\nimport numpy as np\nimport pickle\n\nfrom data_analysis.load_history import history_data\nfrom functions.computer_specific_paths import computer_paths\ncomputer_paths = computer_paths()\n\ndef get_transformed_lung():\n data = history_data(file = 'history_guide_fit_sparse.p', folder = computer_paths.lung_folder)\n with open(computer_paths.lung_folder + 'zunu_airtree.p', \"rb\" ) as input_file:\n lung = pickle.load(input_file)\n\n x_offset = 40\n y_offset = 52\n z_offset = -230\n\n offset = np.array([x_offset, y_offset, z_offset])\n R_lung = np.array([[0, 0, 1],\n [-1, 0, 0],\n [0, -1, 0]])\n\n lung_transformed = np.array([R_lung.dot(l) + offset for l in lung])\n return lung_transformed, data.x_guide\n\n","sub_path":"catheter_simulation/lung_fit/transform_lung.py","file_name":"transform_lung.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"556601305","text":"from selenium import webdriver\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://rahulshettyacademy.com/AutomationPractice/\")\n\ncheckboxes = driver.find_elements_by_xpath(\"//input[@type='checkbox']\")\nprint(checkboxes)\n\nfor checkbox in checkboxes:\n checkbox.click()\n assert checkbox.is_selected()\n\n# if you want to select one specific checkbox, this is a clean way to do so:\n\n# for checkbox in checkboxes:\n# if checkbox.get_attribute(\"value\") == \"option1\":\n# checkbox.click()\n# assert checkbox.is_selected()\n\n# or, using list:\n# checkboxes[1].click()\n","sub_path":"checkbox.py","file_name":"checkbox.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"608150939","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 7 23:45:43 2020\n\n@author: Guy\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport math\nimport pyautogui\nimport os\n\npyautogui.FAILSAFE = False\n\n#check screen size\nscreenWidth,screenHeight = pyautogui.size();\n\ncap = cv2.VideoCapture(0)\nwhile(cap.isOpened()):\n path, dirs, files = next(os.walk(\"./dataset/four/\"))\n file_count = len(files)\n \n # read image\n ret, img = cap.read()\n \n # get hand data from the rectangle sub window on the screen\n cv2.rectangle(img, (200,70), (550,400), (0,255,0),0)\n crop_img = img[70:400, 200:550]\n\n # convert to grayscale\n grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\n\n # applying gaussian blur\n value = (35, 35)\n blurred = cv2.GaussianBlur(grey, value, 0)\n\n # thresholdin: Otsu's Binarization method\n _, thresh1 = cv2.threshold(blurred, 127, 255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n # show thresholded image\n cv2.imshow('Thresholded', thresh1)\n\n # check OpenCV version to avoid unpacking error\n (version, _, _) = cv2.__version__.split('.')\n\n if version == '3':\n image, contours, hierarchy = cv2.findContours(thresh1.copy(), \\\n cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n elif version == '2':\n contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \\\n cv2.CHAIN_APPROX_NONE)\n\n # show appropriate images in windows\n cv2.imshow('Gesture', img)\n \n\n k = cv2.waitKey(10)\n if k == 27:\n break\n elif k%256 == 32:\n # SPACE pressed\n img_name = \"dataset/four/fourfinger{}.jpeg\".format(file_count)\n cv2.imwrite(img_name, thresh1)\n print(\"{} written!\".format(img_name))\n print(file_count)\n ","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"69155267","text":"# Helper Cleaning Functions\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport datetime\nimport datetime as dt\nimport xlrd\nimport pickle\nfrom sklearn import preprocessing\n\ndef weight_dur_age_clean(df,dur_na=-999999,age_na=-99.,weight_perc_cutoff=0.2):\n \"\"\"\n This function adds duration and age columns and naively\n cleans the weight_change_since_admit.\n duration (type int, expressed in days) is difference of discharge_date and\n admission_date if discharge is true, otherwise duration is time since admission_dateself.\n age (type int, expressed in years) is deduced from birth date\n\n If weight_change_since_admit/ weight < 0.2:\n weight_change_since_admit (float, expressed in pounds) is divided by 10\n otherwise it is unchanged.\n Mutating function\n Author: Aungshuman\n \"\"\"\n ## duration\n today = pd.to_datetime('today')\n df.loc[df['discharge']==True,'duration'] = (pd.to_datetime(df.loc[df['discharge']==True, \\\n 'discharge_date']) - pd.to_datetime(df.loc[df['discharge']==True,'enrollment_date']))\n df.loc[df['discharge']==False,'duration'] = (today - \\\n pd.to_datetime(df.loc[df['discharge']==False,'enrollment_date']))\n df.duration = df.duration.fillna(dur_na)\n try:\n df['duration'] = (df['duration'] / np.timedelta64(1, 'D')).astype(int)\n df.loc[(df['duration']<1)&(df['duration']!=dur_na), 'duration']=dur_na\n except:\n print(df['duration'])\n ##age\n df['age'] = (today - pd.to_datetime(df['date_of_birth'])).apply(lambda x: \\\n float(x.days)/365).fillna(age_na).astype(int)\n df.loc[(df.age<1)&(df.age!=age_na), 'age'] = age_na\n ## weight_change_since_admit\n df['weight_change_since_admit'] = np.where(abs(df['weight_change_since_admit']/ \\\n df['weight']) < weight_perc_cutoff, df['weight_change_since_admit'], df['weight_change_since_admit']/10)\n\ndef find_duration(discharge, enroll_date, discharge_date):\n \"\"\"\n duration (type float, expressed in days) is difference of discharge_date and\n admission_date if discharge is true, otherwise duration is time since admission_dateself.\n Non mutating Function\n Author: Aungshuman\n Use like: df['duration']=df.apply(lambda row: find_duration(row['discharge'],\n row['enrollment_date'],row['discharge_date']),axis=1)\n \"\"\"\n today = datetime.datetime.today()\n if discharge : #True\n x = (discharge_date - enroll_date).days\n else:\n x = (today - enroll_date).days\n return x if x > 0. else np.nan\n\ndef find_age(row, threshold = [20., 120.]):\n \"\"\"\n age (type float, expressed in years) is deduced from birth date\n Non mutating Function\n Author: Aungshuman\n Use as df['age'] = df['date_of_birth'].apply(find_age)\n \"\"\"\n today = datetime.datetime.today()\n try:\n x = round((today - row).days/365)\n except ValueError:\n x = np.nan\n\n return x if (x > threshold[0] and x < threshold[1]) else np.nan\n\ndef clean_weight_change(weight, weight_change, threshold=0.25):\n \"\"\"\n If abs(weight_change)/ weight > 0.2:\n weight_change (float, expressed in pounds) is recursively divided by 10 until abs(weight_change)/ weight < 0.2\n Non Mutating Function\n Author: Aungshuman\n Use like df['weight_change_since_admit'] = df.apply(lambda row: clean_weight_change(row['weight'],row['weight_change_since_admit']),axis=1)\n \"\"\"\n if abs(weight_change)/weight < threshold:\n return weight_change\n else:\n #while abs(weight_change)/weight > threshold:\n # weight_change /= 10\n #return weight_change\n return np.nan\n\ndef get_frac_weight_change(weight, weight_change, threshold=0.25):\n \"\"\"\n Similar to clean_weight_change, but returns the fractional weight change (can be positive or negative)\n If abs(weight_change)/ weight > 0.2:\n weight_change (float, expressed in pounds) is recursively divided by 10 until abs(weight_change)/ weight < 0.2\n Non Mutating Function\n Author: Aungshuman\n Use like df['weight_change_fraction'] = df.apply(lambda row: get_pct_weight_change(row['weight'],row['weight_change_since_admit']),axis=1)\n \"\"\"\n if abs(weight_change)/weight < threshold:\n return weight_change/weight\n else:\n #while abs(weight_change)/weight > threshold:\n # weight_change /= 10\n #return weight_change/weight\n return np.nan\n\ndef clean_labs(x):\n \"\"\"\n Author: Aungshuman\n Use like df['bun'] = df['bun'].apply(clean_labs)\n \"\"\"\n if x == 0.:\n return np.nan\n else:\n return x\n\ndef get_standardized_columns(df, standardize_cols= ['ef', 'weight',\n 'this_weight_change_frac', 'weight_change_since_admit_frac', 'bnp',\n 'this_bnp_change', 'bun', 'cr', 'potasium',\n 'this_cr_change', 'resting_hr', 'systolic', 'diastolic',\n 'duration', 'age']):\n \"\"\"\n It takes a df, and a list of columns to standardize; returns standardized columns (Warning: HARD CODED)\n Non mutating function\n Author: Aungshuman\n Use like df_with_std = get_standardized_columns(df_before_std)\n \"\"\"\n all_cols = df.columns\n non_cont_cols = [x for x in all_cols if x not in standardize_cols]\n df_numeric_continuous = df[standardize_cols]\n\n #x = df_numeric_continuous.values\n #std_scaler = preprocessing.StandardScaler()\n #x_scaled = std_scaler.fit_transform(x)\n x_scaled = preprocessing.StandardScaler().fit_transform(df_numeric_continuous.values)\n\n numeric_cont_columns = df_numeric_continuous.columns #Saving column information\n df_numeric_continuous = pd.DataFrame(x_scaled)\n df_numeric_continuous.columns = numeric_cont_columns\n df_numeric_continuous.index = df.index #Using index information from df. #This is IMPORTANT for concatanation later\n\n df_withstd = pd.concat([df[non_cont_cols],df_numeric_continuous], axis =1)\n return df_withstd\n\ndef get_log_transformed_columns(x, cols = [ 'weight', 'bnp', 'bun', 'cr', 'duration']):\n \"\"\"\n Non mutating function\n Author: Aungshuman\n Use like df_log = df.apply(get_log_transformed_columns)\n \"\"\"\n return np.log1p(x) if 'x' in cols else x\n\ndef clean_gender(x):\n \"\"\"\n Cleans Gender, waiting on Shani for imputing gender based on Patient First Name\n use with apply(lambda)\n \"\"\"\n if x ==\"Male\":\n return 1\n if x==\"Female\":\n return 0\n else:\n return x\n\ndef impute_acute_chronic(x,duration):\n \"\"\"\n Returns 1 or 0 for Acute/Chronic, calculates based on duration if empty.\n use as df.apply(lambda row: impute_acute_chronic(row['acute_or_chronic'],row['duration']),axis=1)\n \"\"\"\n if x==\"Acute\":\n x=1\n elif x ==\"Chronic\":\n x=0\n\n if (np.isnan(x)) & (np.isnan(duration)==False):\n if duration >=30:\n return 0\n elif duration <30:\n return 1\n else:\n return x\n\ndef med_aicd_clean(df, var, impute):\n \"\"\" Mutating Function\n Use as: med_aicd_clean(df,'ace', 0) for all medicines\n \"\"\"\n #lowercase all values\n df[var]=df[var].str.lower()\n\n #fill missing w/impute value\n print('num missing', df[var].isna().sum())\n df[var]=df[var].fillna(impute)\n print('value counts before zero and one assignment:', df[var].value_counts())\n\n #set all values that indicate absence of value to zero\n none_values=list(set(df.loc[df[var].str.contains('none', na=False)][var].tolist()))\n no_values=list(set(df.loc[df[var].apply(lambda x: search_for_nos(x)) & ~df[var].str.contains('if no relief', na=False)][var].tolist()))\n allergy_values=list(set(df.loc[df[var].str.contains('allergic', na=False)][var].tolist()))\n zero_values=none_values+allergy_values+no_values\n print('zero values:', zero_values)\n df.loc[df[var].isin(zero_values),var]=0\n df.loc[df[var].isin(['0']), var]=0\n df.loc[df[var].isin(['acute']), var]=0\n\n #set all other values to 1\n allowed_vals=[0, impute]\n print(\"Values set to 1.0: \\n\", list(set(df.loc[~df[var].isin(allowed_vals), var].tolist())))\n df.loc[~df[var].isin(allowed_vals), var] = 1\n\n df[var]=df[var].astype(float)\n\n print(df[var].value_counts())\n\n # return df\n\ndef search_for_nos(x):\n \"\"\" Searches for 'no' in the input variable\n Use as df.loc[df[var].apply(lambda x: search_for_nos(x))]\n \"\"\"\n try:\n return re.search(r'\\bno\\b',x.lower())!= None\n except:\n return False\n\ndef remove_cardiac_unrelated(df):\n \"\"\" Remove rows that are not cardiac related\n Mutating function\n \"\"\"\n ind_cardiac=df.loc[df['cardiac_related']==False].index\n if len(ind_cardiac)!=0:\n # print and remove them\n for i in ind_cardiac:\n print(\"Removing Cardiac Unrelated Row: \"+str(i)+\"\\n\")\n try:\n print(df.iloc[i][['enrollId','patient_link','Enrollment_Date','status','name','cardiac_related']])\n except:\n print(df.iloc[i][['enrollId','patient_link','cardiac_related']])\n print('-'*50)\n # now remove them\n df.drop(ind_cardiac,axis=0,inplace=True)\n # reset the index before moving on\n df=df.reset_index()\n print('\\n \\n Dropped '+str(len(ind_cardiac)+len(ind_cardiac))+' rows from the dataset')\n print('New size of dataset: '+str(df.shape))\n # return df\n\ndef determine_outcome(status,discharge,discharge_date,outcome_dict={\n 'Good':['To Home','Assissted Living Facility','Assisted Living Facility','No Reason Given'], # CAN WE ASSUME THIS??? that In Nursing Facility\n 'Bad':['Hospital','Death'],\n 'Test':['In Nursing Facility','Skilled Nursing Facility (SNF)',\n 'Not approriate for program, removed','Not approriate for program, removed']}):\n \"\"\"\n use as: df['outcome']=df.apply(lambda row: determine_outcome(row['status'],row['discharge'],row['discharge_date']),axis=1)\n Takes a dictionary of statuses and divides into Postive, Negative and unknown outcomes (Test set)\n \"\"\"\n if status in outcome_dict['Good']:\n return 1\n elif status in outcome_dict['Bad']:\n return 0\n elif status in outcome_dict['Test']:\n return None\n # discharged but outcome unknown\n elif discharge==True:\n print(\"Setting outcome to 2 for patients that have been discharged but we don't have a status on them\")\n return 2\n elif discharge==False:\n return None\n\ndef train_test_split_sg(df):\n \"\"\"\n returns two datasets, train and test\n \"\"\"\n train_ind=df.loc[df.outcome.isnull()!=True].index\n train_df=df.iloc[train_ind]\n train_df=train_df.reset_index().drop('index',axis=1)\n test_ind=df.loc[df.outcome.isnull()].index\n test_df=df.iloc[test_ind].reset_index().drop('index',axis=1)\n return train_df, test_df\n\ndef ef_deep_clean(x):\n \"\"\" helper function to clean_EF_rows\n extracts any digits from string\n recursively calls itself if there are too many digits\n \"\"\"\n # remove EF from a previous record\n if re.search('previous',x):\n ind,__=re.search('previous',x).span()\n return ef_deep_clean(x[:ind])\n if re.search('(/)',x):\n ind,__=re.search('(/)',x).span()\n return ef_deep_clean(x[:ind-1])\n else: # Creates a list of digits\n tmp_dig=re.findall('\\\\b\\\\d+\\\\b', x)\n if len(tmp_dig)>2:\n print(\"Couldn't extract EF so set to na_val\")\n print(x)\n return clean_EF_rows('pending')\n if len(tmp_dig)==2:\n return (float(tmp_dig[0])+float(tmp_dig[1]))/200.0\n if len(tmp_dig)==1:\n return clean_EF_rows(tmp_dig[0])\n # if there are really no digits, return na_val which corresponds to 'pending'\n if len(tmp_dig)==0:\n return clean_EF_rows('pending')\n\ndef clean_EF_rows(x,na_val=0.49,norm_val=0.55,list_strings=['pending','ordered','done','no data','new admission']):\n \"\"\" For use with a .apply(lambda) to the EF column\n ie. df['ef'].apply(lambda x: clean_EF_rows(x))\n Does not change NaN values, only messy string/percentages\n \"\"\"\n #best case scenario: already a decimal or percentage with no sign\n x=str(x).replace('<','')\n x=str(x).replace('>','')\n try:\n if float(x)<0.10:\n print('EF less than 0 set to None')\n return None\n elif float(x)<1:\n return float(x)\n elif float(x)>100:\n return np.nan\n elif float(x)>10:\n return float(x)/100\n except:\n # For the percentages like 55%:\n x=str(x).replace('%','')\n # for percentage ranges like 50-55%\n try:\n st,en=re.search('-',x).span()\n # take the average\n return (float(x[:st])+float(x[en:]))/200.0\n except:\n if x.lower() in list_strings:\n return na_val\n elif re.search('normal',x.lower()):\n return norm_val\n else: # deep clean extracts digits from string text\n return ef_deep_clean(x)\n\ndef hand_dates(x):\n \"\"\"\n takes rows that were accidentally loaded into Excel datetime, which\n is coded by a serial number, so you can code it back to that serial number\n use as: df['resting_hr']=df.resting_hr.apply(lambda x: hand_dates(x))\n \"\"\"\n try:\n return float(x)\n except:\n try:\n date_pd=pd.to_datetime(x)\n return(excel_date(date_pd))\n except:\n print(\"Cannot parse heart rate: \\n\")\n print(x)\n\ndef excel_date(date1):\n \"\"\"\n helper function to hand_dates\n takes rows that were accidentally loaded into Excel datetime, which\n is coded by a serial number, so you can code it back to that serial number\n \"\"\"\n temp = dt.datetime(1899, 12, 30) # Note, not 31st Dec but 30th!\n delta = date1 - temp\n return float(delta.days) + (float(delta.seconds) / 86400)\n\n\ndef clean_diastolic_columns(di_sys,bp,col_type,other_press):\n \"\"\" Imputes diastolic or systolic from the BP columns\n col_type distinguishes between di or sys\n other_press is used to impute an estimated di or sys in the other exists\n Use like: df.apply(lambda row: clean_diastolic_columns(row['Diastolic'],\n row['resting_BP'],col_type='di'),axis=1)\n I chose 2.00 as ratio between systolic and diastolic based on result of:\n np.mean(df.systolic.dropna()/df.diastolic.dropna())\n \"\"\"\n try:\n if np.isnan(di_sys):\n sys_tmp,di_tmp=re.findall('\\\\b\\\\d+\\\\b', bp)\n if col_type=='di':\n print(\"Imputing {},{} from Blood Pressure Column\".format(sys_tmp,di_tmp))\n return float(di_tmp)\n elif col_type=='sys':\n print(\"Imputing {},{} from Blood Pressure Column\".format(sys_tmp,di_tmp))\n return float(sys_tmp)\n else:\n print(\"Error: please correct input variable col_type to be either 'di' or 'sys'\")\n else:\n return di_sys\n except:\n if np.isnan(other_press)==False:\n if col_type=='di':\n print(\"Imputing {},{} from other column\".format(other_press,str(float(other_press)*2.0)))\n return float(other_press)*2.0\n elif col_type=='sys':\n print(\"Imputing {},{} from other column\".format(other_press,str(float(other_press)/2.0)))\n return float(other_press)/2.0\n\ndef choose_most_recent(df,date_col):\n ''' Choose the lab/test result from list of results with least missing values,\n then with most recent date.\n\n Keyword Arguments\n =================\n df -- Pandas DataFrame to choose rows from\n date_col -- Date column in the dataframe to inspect\n\n Returns\n =======\n Pandas DataFrame with a single row for each unique enrollId, should have least\n missing values and, of those, most recent date\n '''\n new_df = pd.DataFrame(columns=df.columns)\n for pat in df.enrollId.unique():\n pat_df = df.loc[df.enrollId==pat]\n # Sum up the missing values in each row and filter the dataframe by rows with least missing\n pat_df = pat_df[pat_df.isna().sum(axis=1)==pat_df.isna().sum(axis=1).min()]\n rows = pat_df.shape[0]\n # Find max dates and return first if more than one exists\n if rows > 1:\n tmp_df = pat_df.loc[pat_df[date_col]==pat_df[date_col].max()].head(1)\n # If only one row, store that row\n elif rows == 1:\n tmp_df = pat_df\n # If for some reason there are no rows, print a message\n else:\n print('Could not find a least missing/most recent row for enrollId: {}'.format(pat))\n continue\n new_df = pd.concat([new_df, tmp_df], axis=0)\n return new_df\n\ndef lower_errors(x):\n try:\n return x.lower()\n except:\n return \"\"\n\ndef find_unique_diag(df_diag_column):\n \"\"\"\n Within text Diagnosis Columns, returns a list of the Unique Diagnoses,\n removing the combinations of diagnoses\n Use as: uniq_diag=find_unique_diag(df.Diagnosis_1)\n and use this output within the dummify diagnoses function\n \"\"\"\n all_diag=df_diag_column.apply(lambda x: lower_errors(x)).unique()\n all_diag[7].split(' , ')\n unique_diag=[]\n for diag in all_diag:\n if len(diag)==0:\n continue\n else:\n unique_diag.append(diag.split(' , '))\n flat_list = [item for sublist in unique_diag for item in sublist]\n unique_diag=pd.Series(flat_list).unique()\n return unique_diag\n\ndef dummify_diagnoses(df,unique_diag,diagnosis_col='diagnosis_1'):\n \"\"\"\n Takes Diagnoses and dummifies them for patients. If a patient has multiple\n diagnoses, will put a 1 in all relevant Diagnoses.\n The kth column is NA, no diagnosis. Maybe we will impute with the mode?\n Use as: dummy_df_diag=dummify_diagnoses(df,uniq_diag)\n\n \"\"\"\n header=unique_diag.tolist().append('enrollId')\n dummy_diag=pd.DataFrame(columns=header)\n\n for row in range(df.shape[0]):\n pat_diag=lower_errors(df.iloc[row][diagnosis_col]).split(' , ')\n # print(pat_diag)\n dict_dummy_diag=dict(zip(unique_diag,np.zeros(len(unique_diag))))\n # dict_dummy_diag['patient_link']=df.iloc[row]['patient_link']\n #pd.DataFrame(np.zeros(len(unique_diag)).reshape(-1),columns=unique_diag)\n for diag in pat_diag:\n if diag in unique_diag:\n dict_dummy_diag[diag]=1\n else:\n continue\n tmp_dummy_diag=pd.DataFrame(dict_dummy_diag, index=[row])\n tmp_dummy_diag['enrollId']=df.iloc[row]['enrollId']\n dummy_diag = pd.concat([dummy_diag,tmp_dummy_diag], axis=0)\n\n return dummy_diag\n\ndef remove_paren(x):\n \"\"\" removes everything after parentheses \"\"\"\n if re.search('\\(',x):\n end,_=re.search('\\(',x).span()\n return x[:end-1]\n else:\n return x\n\ndef impute_from_special_status(status_row,special_row):\n \"\"\" If status is empy and special status is Death, put Death into status\n use like: df.apply(lambda row: impute_from_special_status(row['status'],row['special_status']),axis=1)\n \"\"\"\n try:\n if np.isnan(status_row):\n if special_row=='Death':\n print('Added to status from special status')\n return 'Death'\n else:\n return status_row\n except:\n return status_row\n\ndef remove_invalid_rows(df):\n \"\"\" Takes the dataframe and removes specific instances where we have found\n invalid rows - when there is a row like: 1 2 3 ....\n or a test patient created by multitechvisions\n Check patient name for TEST, or for John Doe and Sally Test\n Should drop row \"create_user\", afterwards\n Mutating function\n \"\"\"\n # find the index of invalid rows\n ind_inv=df.loc[df['enrollId'].apply(lambda x: True if len(str(x))<5 else False)].index\n ind_inv=ind_inv.append(df.loc[df['name'].apply(lambda x: search_for_test(x,'test'))].index)\n ind_inv=ind_inv.append(df.loc[df['name'].apply(lambda x: search_for_test(x,'john doe'))].index)\n if len(ind_inv)!=0:\n # print and remove them\n for i in ind_inv:\n print(\"removing invalid row: \"+str(i)+\"\\n\")\n try:\n print(df.iloc[i][['patient_link','enrollId','Enrollment_Date','name','create_user']])\n except:\n print(df.iloc[i]['enrollId'])\n print('-'*50)\n # now remove them\n df.drop(ind_inv,axis=0,inplace=True)\n # reset the index before moving on\n df=df.reset_index()\n\n # Could remove this section since now we caught them with the 'test' search. But just in case\n # find observations from Test\n ind_test=df.loc[df['create_user']=='multitechvisions@gmail.com'].index#[['patient_link','Enrollment_Date','name','create_user']]\n if len(ind_test)!=0:\n df.iloc[ind_test]\n print(\"removing multitechvisions test rows: \\n\")\n try:\n print(df.iloc[ind_test][['enrollId','patient_link','Enrollment_Date','name','create_user']])\n except:\n print(df.iloc[ind_test]['enrollId'])\n print('-'*50)\n df.drop(ind_test,axis=0,inplace=True)\n print('\\n \\n Dropped '+str(len(ind_inv)+len(ind_test))+' rows from the dataset')\n print('New size of dataset: '+str(df.shape))\n # return df\n\n# maybe there's an errors coerce function\ndef search_for_test(x,search_word):\n \"\"\" Handles errors and search for 'test' in the input variable\n Use as df.loc[df['name'].apply(lambda x: search_for_test(x,'test'))]\n \"\"\"\n try:\n return re.search(search_word,x.lower())!= None\n except:\n return False\n\ndef datetime_fixer(date_list):\n \"\"\"\n Converts a list (or Pandas Series) to datetime objects\n\n Keyword Arguments\n =================\n date_list -- A list or Pandas Series containing date-like elements\n\n Returns\n =======\n List of dates all with datetime data type\n \"\"\"\n # Checks if object is a Pandas Series and converts it to a list if true\n if isinstance(date_list, pd.core.series.Series):\n date_list = list(date_list)\n\n nats_added = 0\n\n for i in range(len(date_list)):\n # If the date is not a datetime\n if not isinstance(date_list[i], datetime.datetime):\n # If this date is an int\n if isinstance(date_list[i], int):\n if date_list[i] > 1000:\n # Convert Excel style date to datetime\n date_list[i] = datetime.datetime(*xlrd.xldate_as_tuple(date_list[i], 0))\n else:\n date_list[i] = np.datetime64('NaT')\n # If this date is a string\n elif isinstance(date_list[i], str):\n # Try to convert to datetime using this format\n try:\n date_list[i] = datetime.strptime(date_list[i], '%m/%d/%Y')\n # If error, try to_datetime method\n except:\n try:\n date_list[i] = pd.to_datetime(date_list[i])\n # If error, set as NaT\n except:\n date_list[i] = np.datetime64('NaT')\n # If any other case, set as NaT\n else:\n date_list[i] = np.datetime64('NaT')\n # If date was stored as NaT, increase counter\n if date_list[i] == np.datetime64('NaT'):\n nats_added += 1\n\n print('{} NaT added to list'.format(nats_added))\n return date_list\n\ndef read_pkl(pkl_path):\n \"\"\"Reads a pickle from file path and returns the object\n\n Keyword Arguments\n =================\n pkl_path -- Path to pickle file\n\n Returns\n =======\n Object stored in pickle file\n \"\"\"\n with open(pkl_path, 'rb') as f:\n return pickle.load(f)\n\ndef write_pkl(my_obj, output_path):\n \"\"\"Writes an object to a pickle\n WARNING: OVERWRITES FILE\n\n Keyword Arguments\n =================\n my_obj -- Object to be written to a pickle\n output_path -- Designated file name to be saved as\n WILL OVERWRITE FILE\n\n Returns\n =======\n Prints that file was saved\n \"\"\"\n\n with open(output_path, 'wb') as f:\n pickle.dump(my_obj, f)\n print('Object saved to path \"{}\"'.format(output_path))\n\ndef drop_date_cols(df):\n \"\"\"Drops date columns (except Date of Birth) from a dataframe\n\n Keyword Arguments\n =================\n df -- Pandas DataFrame with date columns to drop\n\n Returns\n =======\n Pandas DataFrame with no date columns (except Date of Birth)\n \"\"\"\n datecols = []\n\n for col in df.columns:\n if df[col].dtype == 'datetime64[ns]' and col != 'date_of_birth':\n datecols.append(col)\n if len(datecols) > 0:\n return df.drop(columns=datecols)\n else:\n print('No date columns were found to drop, make sure date columns contain type \"datetype64[ns]\"')\n","sub_path":"Clean_Fun.py","file_name":"Clean_Fun.py","file_ext":"py","file_size_in_byte":24777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"144055862","text":"source(findFile(\"scripts\", \"dawn_global_startup.py\"))\r\nsource(findFile(\"scripts\", \"dawn_global_plot_tests.py\"))\r\n\r\ndef main():\r\n #Start using clean workspace\r\n startOrAttachToDAWN()\r\n \r\n # Open data browsing perspective \r\n openPerspective(\"Data Browsing (default)\")\r\n \r\n #expand data tree and open metal mix\r\n expand(waitForObjectItem(\":Project Explorer_Tree\", \"data\"))\r\n expand(waitForObjectItem(\":Project Explorer_Tree\", \"examples\"))\r\n children = object.children(waitForObjectItem(\":Project Explorer_Tree\", \"examples\"))\r\n \r\n for child in children:\r\n if \"metalmix.mca\" in child.text:\r\n doubleClick(child, 5, 5, 0, Button.Button1)\r\n continue\r\n \r\n mouseClick(waitForObject(\":Plot data as separate plots_ToolItem\"), 18, 11, 0, Button.Button1)\r\n \r\n for i in range(16):\r\n mouseClick(waitForObjectItem(\":Data_Table\", str(i) + \"/0\"), 9, 7, 0, Button.Button1)\r\n \r\n snooze(1)\r\n \r\n mouseClick(waitForObject(\":XY plotting tools_ToolItem_2\"), 31, 7, 0, Button.Button1)\r\n activateItem(waitForObjectItem(\":Pop Up Menu\", \"Peak Fitting\"))\r\n \r\n c = waitForObject(\":Plot_Composite_2\")\r\n b = c.bounds\r\n\r\n test.log(\"Image at (%d, %d) is %d x %d\" % (b.x,b.y, b.width, b.height))\r\n mouseDrag(c, b.x+b.width/3, b.y+b.height/3, int(b.width/1.7),b.height/3, 0, Button.Button1)\r\n snooze(5)\r\n \r\n tab = waitForObject(\":Peak Fitting_Table\")\r\n test.verify(tab.getItemCount()==1,\"Expected: 1 Actual: \" + str(tab.getItemCount()))\r\n \r\n test.verify(waitForObjectItem(\":Peak Fitting_Table\", \"0/1\").text == \"Peak 1\",\"peak 1 present\")\r\n \r\n mouseClick(waitForObject(\":Number peaks to fit_ToolItem\"), 29, 10, 0, Button.Button1)\r\n activateItem(waitForObjectItem(\":Pop Up Menu\", \"Fit 4 Peaks\"))\r\n snooze(5)\r\n test.verify(tab.getItemCount()==4,\"Expected: 4 Actual: \" + str(tab.getItemCount()))\r\n \r\n for i in range(4):\r\n txt = waitForObjectItem(\":Peak Fitting_Table\", str(i) + \"/1\").text\r\n test.verify(txt == \"Peak \" + str(i+1),\"peak present\")\r\n \r\n mouseClick(waitForObject(\":Choose trace for fit._ToolItem\"), 29, 10, 0, Button.Button1)\r\n activateItem(waitForObjectItem(\":Pop Up Menu\", \"Select all\"))\r\n \r\n snooze(30)\r\n \r\n test.verify(tab.getItemCount()==64,\"16 peaks in table\")\r\n \r\n for i in range(16):\r\n txt = waitForObjectItem(\":Peak Fitting_Table\", str(i) + \"/1\").text\r\n test.verify(txt == \"Peak \" + str(i+1),\"peak present\")\r\n \r\n closeOrDetachFromDAWN()","sub_path":"org.dawnsci.squishtests/suite_tools1d_peakfitting/tst_peak_multiple_traces/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"544451292","text":"# -*- coding:utf-8 -*- #\n__author__ = 'wen'\n__date__ = '2019/6/20 17:44'\n\n\nfrom .models import UserMessage,UserAsk,UserCourse,UserFavorite,CourseComments\nimport xadmin\n\n\n\nclass UserAskAdmin(object):\n\n # list_display为元组形式或列表,定义我们在后台列表中想显示的内容\n list_display = ('name', 'mobile','course_name', 'add_time') #\n search_fields = ('name', 'mobile','course_name',) # 根据某一项查找\n list_filter = ('name', 'mobile','course_name', 'add_time') # 过滤器\n\nclass CourseCommentsAdmin(object):\n\n list_display = ('user', 'course','comments', 'add_time') #\n search_fields = ('user', 'course','comments', ) # 根据某一项查找\n list_filter = ('user', 'course','comments', 'add_time') # 过滤器\n\n\nclass UserFavoriteAdmin(object):\n list_display = ('user', 'fav_id','fav_type', 'add_time') #\n search_fields = ('user', 'fav_id','fav_type', ) # 根据某一项查找\n list_filter = ('user', 'fav_id','fav_type', 'add_time') # 过滤器\n\n\nclass UserMessageAdmin(object):\n list_display = ('user', 'message','has_read', 'add_time') #\n search_fields = ('user', 'message','has_read', ) # 根据某一项查找\n list_filter = ('user', 'message','has_read', 'add_time') # 过滤器\n\n\nclass UserCourseAdmin(object):\n\n list_display = ('user', 'course', 'add_time') #\n search_fields = ('user', 'course') # 根据某一项查找\n list_filter = ('user', 'course', 'add_time') # 过滤器\n\n\nxadmin.site.register(UserAsk,UserAskAdmin)\nxadmin.site.register(CourseComments,CourseCommentsAdmin)\nxadmin.site.register(UserFavorite,UserFavoriteAdmin)\nxadmin.site.register(UserMessage,UserMessageAdmin)\nxadmin.site.register(UserCourse,UserCourseAdmin)\n","sub_path":"apps/operation/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"27939424","text":"def addition(a,b):\n c=0\n c=a+b\n print(c)\ndef prime(n):\n fact=0\n for i in range(1,n+1):\n if(n%i==0):\n fact=fact+1\n if(fact==2):\n print(\"prime\")\n else:\n print(\"not\") \n ","sub_path":"01-10-2019(mod&packages)/packages/Operations.py","file_name":"Operations.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"447129593","text":"# This is an example of recognizing a phone number without using regular expressions.\n# American-style, a phone number is in the form of 123-456-7890.\n\ndef is_phone_number(text):\n if len(text) != 12:\n return False\n for i in range(0, 3):\n if not text[i].isdecimal():\n return False\n if text[3] != '-':\n return False\n for i in range(4, 7):\n if not text[i].isdecimal():\n return False\n if text[7] != '-':\n return False\n for i in range(8, 12):\n if not text[i].isdecimal():\n return False\n return True\n\n\nmessage = 'Call me at 415-555-1011 tomorrow. 415-555-9999 is my office number.'\nfor i in range(len(message)):\n chunk = message[i:i+12]\n print(chunk)\n if is_phone_number(chunk):\n print('Phone number found : ' + chunk)\nprint('Done')\n","sub_path":"Studies/exercises/regular expressions/is_phone_number.py","file_name":"is_phone_number.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"324635651","text":"from .myqt import QT\nimport pyqtgraph as pg\n\nimport numpy as np\n\nfrom .base import WidgetBase\nfrom .peelercontroller import spike_visible_modes\nfrom .tools import ParamDialog\n\n\nclass SpikeModel(QT.QAbstractItemModel):\n def __init__(self, parent =None, controller=None):\n QT.QAbstractItemModel.__init__(self,parent)\n self.controller = controller\n self.refresh_colors()\n \n def columnCount(self , parentIndex):\n return 6\n \n def rowCount(self, parentIndex):\n #~ if not parentIndex.isValid() and self.cc.peak_label is not None:\n if not parentIndex.isValid():\n self.visible_ind, = np.nonzero(self.controller.spikes['visible'])\n return self.visible_ind.size\n \n else :\n return 0\n \n def index(self, row, column, parentIndex):\n if not parentIndex.isValid():\n if column==0:\n childItem = row\n return self.createIndex(row, column, None)\n else:\n return QT.QModelIndex()\n \n def parent(self, index):\n return QT.QModelIndex()\n \n def data(self, index, role):\n if not index.isValid():\n return None\n \n if role not in (QT.Qt.DisplayRole, QT.Qt.DecorationRole):\n return\n \n col = index.column()\n row = index.row()\n \n #~ t_start = 0.\n \n abs_ind = self.visible_ind[row]\n spike = self.controller.spikes[abs_ind]\n \n spike_time = (spike['index']+ spike['jitter'])/self.controller.dataio.sample_rate \n \n if role ==QT.Qt.DisplayRole :\n if col == 0:\n return '{}'.format(abs_ind)\n elif col == 1:\n return '{}'.format(spike['segment'])\n elif col == 2:\n return '{}'.format(spike['index'])\n elif col == 3:\n return '{:.2f}'.format(spike['jitter'])\n elif col == 4:\n return '{:.4f}'.format(spike_time)\n elif col == 5:\n return '{}'.format(spike['label'])\n else:\n return None\n elif role == QT.Qt.DecorationRole :\n if col != 0: return None\n if spike['label'] in self.icons:\n return self.icons[spike['label']]\n else:\n return None\n else :\n return None\n \n def flags(self, index):\n if not index.isValid():\n return QT.Qt.NoItemFlags\n return QT.Qt.ItemIsEnabled | QT.Qt.ItemIsSelectable #| Qt.ItemIsDragEnabled\n\n def headerData(self, section, orientation, role):\n if orientation == QT.Qt.Horizontal and role == QT.Qt.DisplayRole:\n return ['num', 'seg_num', 'index', 'jitter', 'time', 'cluster_label'][section]\n return\n \n def refresh_colors(self):\n self.icons = { }\n for k, color in self.controller.qcolors.items():\n pix = QT.QPixmap(10,10 )\n pix.fill(color)\n self.icons[k] = QT.QIcon(pix)\n #~ self.icons[-1] = QIcon(':/user-trash.png')\n #~ self.layoutChanged.emit()\n self.refresh()\n \n def refresh(self):\n self.layoutChanged.emit()\n\n\nclass SpikeList(WidgetBase):\n def __init__(self,controller=None, parent=None):\n WidgetBase.__init__(self, parent=parent, controller=controller)\n self.controller = controller\n \n self.layout = QT.QVBoxLayout()\n self.setLayout(self.layout)\n \n self.layout.addWidget(QT.QLabel('All spikes') )\n \n self.combo = QT.QComboBox()\n self.layout.addWidget(self.combo)\n self.combo.addItems(spike_visible_modes)\n self.combo.currentTextChanged.connect(self.change_visible_mode)\n \n self.tree = QT.QTreeView(minimumWidth = 100, uniformRowHeights = True,\n selectionMode= QT.QAbstractItemView.ExtendedSelection, selectionBehavior = QT.QTreeView.SelectRows,\n contextMenuPolicy = QT.Qt.CustomContextMenu,)\n \n self.layout.addWidget(self.tree)\n self.tree.customContextMenuRequested.connect(self.open_context_menu)\n \n self.model = SpikeModel(controller=self.controller)\n self.tree.setModel(self.model)\n self.tree.selectionModel().selectionChanged.connect(self.on_tree_selection)\n\n for i in range(self.model.columnCount(None)):\n self.tree.resizeColumnToContents(i)\n self.tree.setColumnWidth(0,80)\n \n def refresh(self):\n self.model.refresh_colors()\n \n def on_tree_selection(self):\n self.controller.spikes['selected'][:] = False\n for index in self.tree.selectedIndexes():\n if index.column() == 0:\n ind = self.model.visible_ind[index.row()]\n self.controller.spikes['selected'][ind] = True\n self.spike_selection_changed.emit()\n \n def on_spike_selection_changed(self):\n self.tree.selectionModel().selectionChanged.disconnect(self.on_tree_selection)\n \n row_selected, = np.nonzero(self.controller.spikes['selected'][self.model.visible_ind])\n \n if row_selected.size>100:#otherwise this is verry slow\n row_selected = row_selected[:10]\n \n # change selection\n self.tree.selectionModel().clearSelection()\n flags = QT.QItemSelectionModel.Select #| QItemSelectionModel.Rows\n itemsSelection = QT.QItemSelection()\n for r in row_selected:\n for c in range(2):\n index = self.tree.model().index(r,c,QT.QModelIndex())\n ir = QT.QItemSelectionRange( index )\n itemsSelection.append(ir)\n self.tree.selectionModel().select(itemsSelection , flags)\n\n # set selection visible\n if len(row_selected)>=1:\n index = self.tree.model().index(row_selected[0],0,QT.QModelIndex())\n self.tree.scrollTo(index)\n\n self.tree.selectionModel().selectionChanged.connect(self.on_tree_selection) \n\n def change_visible_mode(self, mode):\n self.controller.change_spike_visible_mode(mode)\n self.cluster_visibility_changed.emit()\n self.model.refresh()\n\n def open_context_menu(self):\n pass\n #~ menu = QT.QMenu()\n #~ act = menu.addAction('Move selection to trash')\n #~ act.triggered.connect(self.move_selection_to_trash)\n #~ menu.exec_(self.cursor().pos())\n \n def move_selection_to_trash(self):\n #TODO\n pass\n #~ self.cc.peak_label[self.cc.peak_selection] = -1\n #~ self.cc.on_new_cluster()\n #~ self.cc.refresh_colors(reset = False)\n #~ self.refresh()\n #~ self.spike_label_changed.emit()\n\n\nclass ClusterSpikeList(WidgetBase):\n \n def __init__(self, controller=None, parent=None):\n WidgetBase.__init__(self, parent=parent, controller=controller)\n \n self.layout = QT.QVBoxLayout()\n self.setLayout(self.layout)\n\n self.table = QT.QTableWidget()\n self.layout.addWidget(self.table)\n self.table.itemChanged.connect(self.on_item_changed)\n \n self.refresh()\n\n def refresh(self):\n #~ self.cc._check_plot_attributes()\n \n self.table.itemChanged.disconnect(self.on_item_changed)\n \n self.table.clear()\n labels = ['label', 'show/hide', 'nb_peaks']\n self.table.setColumnCount(len(labels))\n self.table.setHorizontalHeaderLabels(labels)\n #~ self.table.setMinimumWidth(100)\n #~ self.table.setColumnWidth(0,60)\n self.table.setContextMenuPolicy(QT.Qt.CustomContextMenu)\n self.table.customContextMenuRequested.connect(self.open_context_menu)\n self.table.setSelectionMode(QT.QAbstractItemView.ExtendedSelection)\n self.table.setSelectionBehavior(QT.QAbstractItemView.SelectRows)\n \n self.table.setRowCount(self.controller.cluster_labels.size)\n \n for i, k in enumerate(self.controller.cluster_labels):\n color = self.controller.qcolors.get(k, QT.QColor( 'white'))\n pix = QT.QPixmap(10,10)\n pix.fill(color)\n icon = QT.QIcon(pix)\n \n name = '{}'.format(k)\n item = QT.QTableWidgetItem(name)\n item.setFlags(QT.Qt.ItemIsEnabled|QT.Qt.ItemIsSelectable)\n self.table.setItem(i,0, item)\n item.setIcon(icon)\n \n item = QT.QTableWidgetItem('')\n item.setFlags(QT.Qt.ItemIsEnabled|QT.Qt.ItemIsSelectable|QT.Qt.ItemIsUserCheckable)\n \n item.setCheckState({ False: QT.Qt.Unchecked, True : QT.Qt.Checked}[self.controller.cluster_visible[k]])\n self.table.setItem(i,1, item)\n\n item = QT.QTableWidgetItem('{}'.format(self.controller.cluster_count[k]))\n item.setFlags(QT.Qt.ItemIsEnabled|QT.Qt.ItemIsSelectable)\n self.table.setItem(i,2, item)\n \n for i in range(3):\n self.table.resizeColumnToContents(i)\n self.table.itemChanged.connect(self.on_item_changed) \n\n def on_item_changed(self, item):\n if item.column() != 1: return\n sel = {QT.Qt.Unchecked : False, QT.Qt.Checked : True}[item.checkState()]\n k = self.controller.cluster_labels[item.row()]\n self.controller.cluster_visible[k] = bool(item.checkState())\n self.cluster_visibility_changed.emit()\n \n def selected_cluster(self):\n selected = []\n for index in self.table.selectedIndexes():\n if index.column() !=0: continue\n selected.append(self.controller.cluster_labels[index.row()])\n return selected\n \n def _selected_spikes(self):\n selection = np.zeros(self.controller.spike_label.shape[0], dtype = bool)\n for k in self.selected_cluster():\n selection |= self.controller.spike_label == k\n return selection\n \n def open_context_menu(self):\n n = len(self.selected_cluster())\n menu = QT.QMenu()\n\n if n>=0: \n act = menu.addAction('Reset colors')\n act.triggered.connect(self.reset_colors)\n act = menu.addAction('Show all')\n act.triggered.connect(self.show_all)\n act = menu.addAction('Hide all')\n act.triggered.connect(self.hide_all)\n #~ act = menu.addAction('Order cluster by power')\n #~ act.triggered.connect(self.order_clusters)\n \n if n>=1:\n #~ act = menu.addAction('Move selection to trash')\n #~ act.triggered.connect(self.move_selection_to_trash)\n #~ act = menu.addAction('Merge selection')\n #~ act.triggered.connect(self.merge_selection)\n act = menu.addAction('Select')\n act.triggered.connect(self.select_peaks_of_clusters)\n \n self.menu = menu\n menu.popup(self.cursor().pos())\n #~ menu.exec_(self.cursor().pos())\n \n def reset_colors(self):\n self.controller.refresh_colors(reset = True)\n self.refresh()\n self.colors_changed.emit()\n \n def show_all(self):\n for k in self.controller.cluster_visible:\n self.controller.cluster_visible[k] = True\n self.refresh()\n self.cluster_visibility_changed.emit()\n \n def hide_all(self):\n for k in self.controller.cluster_visible:\n self.controller.cluster_visible[k] = False\n self.refresh()\n self.cluster_visibility_changed.emit()\n \n\n def move_selection_to_trash(self):\n pass\n #~ for k in self.selected_cluster():\n #~ mask = self.controller.spike_label == k\n #~ self.controller.change_spike_label(mask, -1)\n #~ self.refresh()\n #~ self.spike_label_changed.emit()\n \n def merge_selection(self):\n pass\n #~ label_to_merge = self.selected_cluster()\n #~ self.controller.merge_cluster(label_to_merge)\n #~ self.refresh()\n #~ self.spike_label_changed.emit()\n \n def select_peaks_of_clusters(self):\n pass\n #TODO\n #~ self.controller.spike_selection[:] = self._selected_spikes()\n #~ self.refresh()\n #~ self.spike_selection_changed.emit()\n\n\n\n","sub_path":"tridesclous/gui/spikelists.py","file_name":"spikelists.py","file_ext":"py","file_size_in_byte":12256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"166937459","text":"\"Tests the consistency of the formulas used in fuzzification\"\nimport unittest\nimport numpy as np\nfrom fuzzifier import *\nfrom trafficCongestion import *\n\n\nclass Fuzzifier_tests(unittest.TestCase):\n\n def test_subset(self):\n \"tests if fuzzifier yields correct subset, test with true values\"\n # initialize instance of the sets\n t = TrafficCongestion()\n for i in np.arange(0, 1.1, 0.1):\n # expects a value of type dictionary( name:[list]) whose key value\n # is of type list\n subset_f = Fuzzifier(i, t.getDensitySet()).getDetSubset()\n for key_s in subset_f.keys():\n for key_t in t.getDensitySet().keys():\n if key_s == key_t:\n self.assertTrue(\n i >= t.getDensitySet()[key_t][0]\n and i <= t.getDensitySet()[key_t][2]\n )\n\n def test_fuzzy_value(self):\n \"\"\"test if the fuzzy value conforms to trapezoid-shaped\n function or triangular-shaped\"\"\"\n t = TrafficCongestion()\n for x in np.arange(0, 1.1, 0.1):\n # expects a value of type dictionary( name:[list]) whose key value\n # is of type list\n temp_f = Fuzzifier(x, t.getDensitySet())\n subset_f = temp_f.getDetSubset()\n # expects a value of type dictionary\n fuzzy_f = temp_f.getFuzzy()\n for key_f in subset_f.keys():\n for key_ff in fuzzy_f.keys():\n if key_f == key_ff:\n # 1 for trapezoid function, triangular otherwise\n if subset_f[key_f][3] == 1:\n if subset_f[key_f][4] == 0:\n a = -0.4\n b = -0.2\n c = subset_f[key_f][1]\n d = subset_f[key_f][2]\n else:\n a = subset_f[key_f][0]\n b = subset_f[key_f][1]\n c = 1.2\n d = 1.4\n left_slope = 1 / (b - a)\n right_slope = 1 / (c - d)\n if x <= a or x >= d:\n self.assertEqual(fuzzy_f[key_f], 0)\n if x >= b and x <= c:\n self.assertEqual(fuzzy_f[key_f], 1)\n if x >= a and x <= b:\n self.assertEqual(\n fuzzy_f[key_f], left_slope * (x - a))\n if x >= c and x <= d:\n self.assertEqual(\n fuzzy_f[key_f], right_slope * (x - d))\n else:\n a = subset_f[key_f][0]\n b = subset_f[key_f][1]\n c = subset_f[key_f][2]\n if x <= a:\n self.assertEqual(fuzzy_f[key_f], 0)\n if x >= a and x <= b:\n self.assertEqual(\n fuzzy_f[key_f], (x - a) / (b - a))\n if x >= b and x <= c:\n self.assertEqual(\n fuzzy_f[key_f], (c - x) / (c - b))\n if x >= c:\n self.assertEqual(fuzzy_f[key_f], 0)\n\n def test_isEqualSubsets(self):\n t = TrafficCongestion()\n test_values = [0.1, 0.2, 0.4, 0.6, 0.8, 0.9]\n expected_subsets = [['low'], ['low', 'moderate'],\n ['low', 'moderate', 'high'],\n ['moderate', 'high', 'very high'],\n ['high', 'very high'],\n ['very high']\n ]\n\n for index in range(len(test_values)):\n subset_f = Fuzzifier(\n test_values[index],\n t.getDensitySet()).getDetSubset()\n subset_f_key = list(subset_f.keys())\n for element in subset_f_key:\n self.assertIn(element, expected_subsets[index])\n\n def test_isEqualFuzzy(self):\n t = TrafficCongestion()\n test_values = [0.1, 0.2, 0.4, 0.6, 0.8, 0.9]\n expected_fuzzy = [\n {'low': 1},\n {'moderate': 0.0, 'low': 1},\n {'low': 0.0, 'moderate': 1.0, 'high': 0.0},\n {'high': 1.0, 'moderate': 0.0, 'very high': 0.0},\n {'high': 0.0, 'very high': 1.0},\n {'very high': 1}\n ]\n for index in range(len(test_values)):\n fuzzy_value = Fuzzifier(\n test_values[index],\n t.getDensitySet()).getFuzzy()\n self.assertEqual(expected_fuzzy[index], fuzzy_value)\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"fuzz/fuzzifier_test.py","file_name":"fuzzifier_test.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"20336324","text":"#!/usr/bin/env python3\n\nimport shutil\nimport tempfile\nimport unittest\nfrom collections import Counter\nfrom multiprocessing import Pool\nfrom os import path\nfrom unittest.mock import Mock, patch\n\nfrom pytorch_translate.research.test import morphology_test_utils as morph_utils\nfrom pytorch_translate.research.unsupervised_morphology import (\n bilingual_bpe,\n bpe,\n char_ibm_model1,\n)\n\n\ntxt_content = [\"123 124 234 345\", \"112 122 123 345\", \"123456789\", \"123456 456789\"]\n\n\nclass TestBPE(unittest.TestCase):\n def test_vocab_init(self):\n bpe_model = bpe.BPE()\n\n with patch(\"builtins.open\") as mock_open:\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n bpe_model._init_vocab(txt_path=\"no_exist_file.txt\")\n\n vocab_items = Counter()\n for (vocab_entry, freq) in bpe_model.current_train_data:\n for item in vocab_entry:\n vocab_items[item] += freq\n\n assert vocab_items[bpe_model.eow_symbol] == 11\n assert vocab_items[\"3\"] == 7\n assert len(vocab_items) == 10\n assert \"12\" not in vocab_items\n assert \"123\" not in vocab_items\n\n assert len(bpe_model.merge_candidate_indices) == 17\n assert bpe_model.merge_candidate_indices[(\"2\", \"3\")] == {0, 2, 6, 7}\n\n assert len(bpe_model.merge_candidate_freq) == 17\n assert bpe_model.merge_candidate_freq[(\"2\", \"3\")] == 5\n\n def test_best_candidate(self):\n bpe_model = bpe.BPE()\n\n with patch(\"builtins.open\") as mock_open:\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n bpe_model._init_vocab(txt_path=\"no_exist_file.txt\")\n assert bpe_model.get_best_candidate() == (\"1\", \"2\")\n\n def test_bpe_merge(self):\n bpe_model = bpe.BPE()\n\n with patch(\"builtins.open\") as mock_open:\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n bpe_model._init_vocab(txt_path=\"no_exist_file.txt\")\n\n # Trying merging a candidate that does not exist.\n bpe_model.merge_candidate_into_vocab(merge_candidate=(\"3\", \"1\"))\n assert len(bpe_model.vocab) == 10\n\n # Trying merging a candidate that exists.\n bpe_model.merge_candidate_into_vocab(merge_candidate=(\"2\", \"3\"))\n assert len(bpe_model.vocab) == 11\n\n # Trying merging a candidate that exists. Entry \"3\" should remove\n # from vocab.\n bpe_model.merge_candidate_into_vocab(merge_candidate=(\"3\", \"4\"))\n assert len(bpe_model.vocab) == 11\n\n # Trying merging a candidate that does not exist.\n bpe_model.merge_candidate_into_vocab(\n merge_candidate=(\"3\", bpe_model.eow_symbol)\n )\n assert len(bpe_model.vocab) == 11\n\n def test_build_vocab(self):\n bpe_model = bpe.BPE()\n\n with patch(\"builtins.open\") as mock_open:\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n\n # Trying to build a vocab more than the possible size\n vocab_size = bpe_model.build_vocab(\n txt_path=\"no_exist_file.txt\", vocab_size=20\n )\n # Asserting that we go back to the original size (number of word types.)\n assert vocab_size == 9\n assert bpe_model.max_bpe_len == 9 + len(bpe_model.eow_symbol)\n\n with patch(\"builtins.open\") as mock_open:\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n # Trying to build a vocab with an acceptable size.\n vocab_size = bpe_model.build_vocab(\n txt_path=\"no_exist_file.txt\", vocab_size=12\n )\n # asserting that the size is as expected.\n assert vocab_size == len(bpe_model.vocab) == 12\n assert bpe_model.max_bpe_len == 2\n\n def test_segment_word(self):\n bpe_model = bpe.BPE()\n\n with patch(\"builtins.open\") as mock_open:\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n\n bpe_model.build_vocab(txt_path=\"no_exist_file.txt\", vocab_size=12)\n assert bpe_model.segment_word(\"1234\") == [\"12\", \"34\", bpe_model.eow_symbol]\n\n # Giving unknown character sequence\n assert bpe_model.segment_word(\"12634\") == [\n \"12\",\n \"6\",\n \"34\",\n bpe_model.eow_symbol,\n ]\n\n def test_segment_file(self):\n bpe_model = bpe.BPE()\n\n tmp_dir = tempfile.mkdtemp()\n input_file, output_file = (\n path.join(tmp_dir, \"test.in\"),\n path.join(tmp_dir, \"test1.out\"),\n )\n\n with open(input_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\"\\n\".join(txt_content))\n bpe_model.build_vocab(txt_path=input_file, vocab_size=12)\n\n output = []\n for line in txt_content:\n cur_line_output = []\n for word in line.strip().split():\n cur_line_output.append(\" \".join(bpe_model.segment_word(word)))\n output.append(\" \".join(cur_line_output))\n output.append(\"\\n\")\n expected_output = \"\".join(output).strip()\n\n bpe_model.segment_txt(input_path=input_file, output_path=output_file)\n model_output = open(output_file, \"r\", encoding=\"utf-8\").read().strip()\n assert expected_output == model_output\n\n shutil.rmtree(tmp_dir)\n\n def test_bilingual_bpe_init(self):\n \"\"\"\n This looks more like an integration test because each subpeace is tested\n in different places.\n \"\"\"\n bpe_model = bilingual_bpe.BilingualBPE()\n tmp_dir, f1, f2 = morph_utils.get_two_different_tmp_files()\n dst2src_ibm_model = char_ibm_model1.Word2CharIBMModel1()\n dst2src_ibm_model.learn_ibm_parameters(src_path=f2, dst_path=f1, num_iters=3)\n ibm_path = path.join(tmp_dir, \"ibm\")\n dst2src_ibm_model.save(file_path=ibm_path)\n\n bpe_model._init_params(\n ibm_model_path=ibm_path, src_txt_path=f1, dst_txt_path=f2\n )\n assert len(bpe_model.bpe_probs_from_alignment) == 80\n assert bpe_model.eow_symbol in bpe_model.bpe_probs_from_alignment\n\n shutil.rmtree(tmp_dir)\n\n def test_calc_word_probs(self):\n with patch(\"builtins.open\") as mock_open:\n bpe_model = bilingual_bpe.BilingualBPE()\n mock_open.return_value.__enter__ = mock_open\n mock_open.return_value.__iter__ = Mock(return_value=iter(txt_content))\n v = bpe_model._calc_word_probs(txt_path=\"no_exist_file.txt\")\n assert len(v) == 9\n assert v[\"123\"] == 2 / 11\n assert v[\"123456789\"] == 1 / 11\n\n def test_best_candidate_bilingual(self):\n bpe_model = bilingual_bpe.BilingualBPE()\n tmp_dir, f1, f2 = morph_utils.get_two_different_tmp_files()\n\n dst2src_ibm_model = char_ibm_model1.Word2CharIBMModel1()\n dst2src_ibm_model.learn_ibm_parameters(src_path=f2, dst_path=f1, num_iters=3)\n ibm_path = path.join(tmp_dir, \"ibm\")\n dst2src_ibm_model.save(file_path=ibm_path)\n\n bpe_model._init_params(\n ibm_model_path=ibm_path, src_txt_path=f1, dst_txt_path=f2\n )\n\n b1 = bpe_model.get_best_candidate()\n c1 = bpe_model.get_best_candidate()\n # For the best step, it is the same as monolingual.\n assert b1 == c1\n\n shutil.rmtree(tmp_dir)\n\n def test_build_bilingual_vocab(self):\n bpe_model = bilingual_bpe.BilingualBPE()\n tmp_dir, f1, f2 = morph_utils.get_two_different_tmp_files()\n\n dst2src_ibm_model = char_ibm_model1.Word2CharIBMModel1()\n dst2src_ibm_model.learn_ibm_parameters(src_path=f2, dst_path=f1, num_iters=3)\n ibm_path = path.join(tmp_dir, \"ibm\")\n dst2src_ibm_model.save(file_path=ibm_path)\n\n vocab_size = bpe_model.build_vocab(\n ibm_model_path=ibm_path, src_txt_path=f1, dst_txt_path=f2, vocab_size=12\n )\n assert vocab_size == len(bpe_model.vocab) == 12\n shutil.rmtree(tmp_dir)\n\n def test_save_load(self):\n bpe_model = bilingual_bpe.BilingualBPE()\n tmp_dir, f1, f2 = morph_utils.get_two_different_tmp_files()\n\n dst2src_ibm_model = char_ibm_model1.Word2CharIBMModel1()\n dst2src_ibm_model.learn_ibm_parameters(src_path=f2, dst_path=f1, num_iters=3)\n ibm_path = path.join(tmp_dir, \"ibm\")\n dst2src_ibm_model.save(file_path=ibm_path)\n\n vocab_size = bpe_model.build_vocab(\n ibm_model_path=ibm_path, src_txt_path=f1, dst_txt_path=f2, vocab_size=12\n )\n assert vocab_size == len(bpe_model.vocab) == 12\n\n bpe_model.save(file_path=tmp_dir + \"/vocab.txt\")\n\n loaded_model = bilingual_bpe.BilingualBPE()\n loaded_model.load(file_path=tmp_dir + \"/vocab.txt\")\n\n assert loaded_model.vocab == bpe_model.vocab\n assert bpe_model.segment_word(\"1234\") == loaded_model.segment_word(\"1234\")\n\n shutil.rmtree(tmp_dir)\n","sub_path":"pytorch_translate/research/test/test_bpe.py","file_name":"test_bpe.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"122132048","text":"from objc._objc import NULL\n\nfrom PygameEnvironment import TwentyFortyEightEnvironment\nfrom game import Game, Direction\nfrom decimal import Decimal\nfrom ai import search\nfrom abAI import alphaBetaSearch\nfrom expectimax import expectiSearch\n\nenvironment = TwentyFortyEightEnvironment()\ngameInstance = Game()\nenvironment.StartEpisode()\n\n\ndef gameRunner(board):\n topCell = 0\n results = [0,0]\n while (board.game.is_available_cells() == True or board.game.is_merges_available() == True):\n for x in range(board.game.size):\n for y in range(board.game.size):\n cell = {'x': x, 'y': y}\n cell_value = board.game.get(cell)\n if cell_value > topCell:\n topCell = cell_value\n\n #direction = alphaBetaSearch(board,4,-100, 1000000, 1)\n\n direction = expectiSearch(board, 3,2)\n #direction = search(board, 3, 1)\n #\n board.game.move(direction[0])\n print(board.game.state)\n print(board.game.score)\n\n # if board.game.is_available_cells() == True:\n # board.game.addRandom()\n results = [(board.game.score), topCell]\n return results\n\ndef multiRunner():\n topScores = []\n topTiles = []\n\n for i in range(7):\n environment = TwentyFortyEightEnvironment()\n results = gameRunner(environment)\n topScores.append(results[0])\n topTiles.append(results[1])\n print(topScores)\n print(topTiles)\n import matplotlib.pyplot as plt\n plt.plot([1,2,3,4,5,6,7],topScores)\n\n plt.title(\"Top Scores\")\n plt.show()\n\n\nmultiRunner()","sub_path":"gameAgent2.py","file_name":"gameAgent2.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"277614900","text":"import cv2\n\n\nclass FaceRecognizer(object):\n\n def __init__(self):\n self.recognizer = cv2.face.LBPHFaceRecognizer_create()\n self.recognizer.read('trainer/trainer.yml')\n self.cascadePath = \"ada_codeclub/cascades/haarcascade_frontalface_default.xml\"\n self.faceCascade = cv2.CascadeClassifier(self.cascadePath)\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n\n self.id = 0\n self.confidence = -1\n self.names = ['None', 'Peter', 'Bjørn', 'Bård'] # Names of people you want to identify (TRAINED FROM DATASET)\n\n # Initialize and start realtime video capture\n self.cam = cv2.VideoCapture(0)\n self.cam.set(3, 640) # set video widht\n self.cam.set(4, 480) # set video height\n\n # Define min window size to be recognized as a face\n self.minW = 0.1 * self.cam.get(3)\n self.minH = 0.1 * self.cam.get(4)\n\n def run_recognizer(self):\n while True:\n ret, img = self.cam.read()\n # img = cv2.flip(img, -1) # Flip vertically\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = self.faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5,\n minSize=(int(self.minW), int(self.minH)), )\n\n # should use the faces recognized in faces to draw rectangles and IDs on the image returned to screen.\n\n cv2.imshow('camera', img)\n\n k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video\n if k == 27:\n self.cleanup()\n break\n\n def cleanup(self):\n self.cam.release()\n cv2.destroyAllWindows()\n","sub_path":"ada_codeclub/face_recognition/face_recognizer.py","file_name":"face_recognizer.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"448775355","text":"import time\nimport gym\nimport numpy as np\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nfrom statistics import mean, median\nfrom collections import Counter\n\n\ndef play_game(env, goal_steps, display, model):\n score = 0\n game_memory = []\n prev_obs = []\n env.reset()\n for _ in range(goal_steps):\n if display:\n env.render()\n if (len(prev_obs) == 0) or (model is None):\n action = env.action_space.sample()\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(prev_obs), 1))[0])\n new_obs, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_memory.append([prev_obs, action])\n prev_obs = new_obs\n score += reward\n if done:\n break\n return score, game_memory\n\n\ndef get_pop(env, action_count, pop_size, goal_steps, min_threshold, model):\n scores = []\n training_data = []\n accepted_scores = []\n while len(accepted_scores) < pop_size:\n score, game_memory = play_game(env, goal_steps, False, model)\n if score > min_threshold:\n accepted_scores.append(score)\n for data in game_memory:\n output = np.zeros(action_count)\n output[data[1]] = 1\n training_data.append([data[0], output])\n scores.append(score)\n return training_data, accepted_scores, scores\n\n\ndef neural_network_model(input_size, action_count, LR=1e-3):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, action_count, activation='softmax') #output layers\n network = regression(network, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(training_data, action_count, max_steps, model=False):\n X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]), 1)\n y = [i[1] for i in training_data]\n if not model:\n model = neural_network_model(input_size = len(X[0]), action_count=action_count)\n #n_epoch should be determined dynamically\n model.fit({'input':X}, {'targets':y}, n_epoch=5, snapshot_step=max_steps, show_metric=True, run_id='openaistuff')\n return model\n\n\ndef test_model(env, model, max_steps):\n test_scores = []\n for i in range(100):\n score, mem = play_game(env, max_steps, i < 5, model)\n if (i < 5):\n print(\"Test {}: {}\".format(i+1, score))\n test_scores.append(score)\n print(\"Average test score: {}\".format(mean(test_scores)))\n print(\"Scores: {}\".format(Counter(test_scores)))\n\n\ndef play(game_name, max_steps, score_req):\n env = gym.make(game_name)\n env._max_episode_steps = max_steps\n action_count = env.action_space.n\n pop_size = 40\n\n training_data, accepted, train_scores = get_pop(env, action_count, pop_size, max_steps, score_req, None)\n print(\"Average training score: {}\".format(mean(train_scores)))\n print(\"Average accepted mean: {}\".format(mean(accepted)))\n print(\"Accepted count: {}\".format(Counter(accepted)))\n\n model = train_model(training_data, action_count, max_steps)\n\n raw_input(\"Press enter to test model...\")\n test_model(env, model, max_steps)\n\n\ndef demo(game_name, steps, accepted, disp_count):\n raw_input(\"Press enter to demo...\")\n env = gym.make(game_name)\n env._max_episode_steps = steps\n action_count = env.action_space.n\n count = 0\n score_total = 0\n print(\"\\nDemo-ing {}\\n---------\\nrandom moves\\ndisplay first {} of 10 games\".format(game_name, disp_count))\n for i in range(10):\n score, mem = play_game(env, steps, i < disp_count, None)\n# print(\"Score: {}\".format(score))\n if (i < disp_count):\n print(\"score: {}\".format(score))\n time.sleep(0.5)\n score_total += score\n if score > accepted:\n count += 1\n print(\"Wins out of 10 attempts: {}\".format(count))\n print(\"Avg random moves score: {}\".format(score_total / 10))\n\n\ndef main():\n# play('CartPole-v0', 500, 130)\n# play('MountainCar-v0', 1000, -950)\n# play('Acrobot-v1', 1500, -1200)\n demo('MountainCar-v0', 1500, -700, 5)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"generalization/general_player.py","file_name":"general_player.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"405825611","text":"#!/usr/bin/env python3\n\n# Using https://github.com/Uberi/speech_recognition\n\nimport sys\nimport os\nimport speech_recognition as sr\n\npath = os.path.dirname(os.path.abspath(__file__))\n\n\ndef run():\n r = sr.Recognizer()\n\n try:\n file_path = os.path.join(path, 'audio_files/harvard.wav')\n harvard = sr.AudioFile(file_path)\n\n with harvard as source:\n # r.adjust_for_ambient_noise(source, duration=0.5)\n # adjust_for_ambient_noise method reads the first\n # second of the file stream and calibrates the\n # recognizer to the noise level of the audio.\n # Hence, that portion of the stream is consumed\n # before you call record() to capture the data,\n # so adjust the time-frame.\n #\n # If the signal has too much ambient noise, SciPy can be used to apply filters.\n\n audio1 = r.record(source, duration=4)\n audio2 = r.record(source, duration=4)\n\n transcribed = r.recognize_google(audio1, show_all=True)\n print(transcribed)\n\n transcribed = r.recognize_google(audio2, show_all=True)\n print(transcribed)\n except TypeError as err:\n print(err)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"python/audio_to_text.py","file_name":"audio_to_text.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"550998960","text":"class Solution:\n # @param A, a list of integers\n # @return a boolean\n def canJump(self, A):\n m = 0 # farthest position that can jump to\n for i in xrange(0, len(A)):\n if i <= m:\n m = max(m, A[i]+i)\n if m >= len(A)-1:\n return True\n return False\n","sub_path":"Leetcode 2014/1218 Jump Game.py","file_name":"1218 Jump Game.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"160926799","text":"\"\"\"\nSynchronisation primitives and helper functions.\n\"\"\"\nimport asyncio\nimport collections.abc\nimport concurrent.futures\nimport enum\nimport threading\nimport time\nfrom typing import Any, AsyncIterator, Callable, Collection, Generic, \\\n Iterable, Iterator, Optional, Set, TypeVar, Union\n\nfrom kopf.utilities import aiotasks\n\nFlag = Union[aiotasks.Future, asyncio.Event, concurrent.futures.Future, threading.Event]\n\n\nasync def wait_flag(\n flag: Optional[Flag],\n) -> Any:\n \"\"\"\n Wait for a flag to be raised.\n\n Non-asyncio primitives are generally not our worry,\n but we support them for convenience.\n \"\"\"\n if flag is None:\n pass\n elif isinstance(flag, asyncio.Future):\n return await flag\n elif isinstance(flag, asyncio.Event):\n return await flag.wait()\n elif isinstance(flag, concurrent.futures.Future):\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(None, flag.result)\n elif isinstance(flag, threading.Event):\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(None, flag.wait)\n else:\n raise TypeError(f\"Unsupported type of a flag: {flag!r}\")\n\n\nasync def raise_flag(\n flag: Optional[Flag],\n) -> None:\n \"\"\"\n Raise a flag.\n\n Non-asyncio primitives are generally not our worry,\n but we support them for convenience.\n \"\"\"\n if flag is None:\n pass\n elif isinstance(flag, asyncio.Future):\n flag.set_result(None)\n elif isinstance(flag, asyncio.Event):\n flag.set()\n elif isinstance(flag, concurrent.futures.Future):\n flag.set_result(None)\n elif isinstance(flag, threading.Event):\n flag.set()\n else:\n raise TypeError(f\"Unsupported type of a flag: {flag!r}\")\n\n\ndef check_flag(\n flag: Optional[Flag],\n) -> Optional[bool]:\n \"\"\"\n Check if a flag is raised.\n \"\"\"\n if flag is None:\n return None\n elif isinstance(flag, asyncio.Future):\n return flag.done()\n elif isinstance(flag, asyncio.Event):\n return flag.is_set()\n elif isinstance(flag, concurrent.futures.Future):\n return flag.done()\n elif isinstance(flag, threading.Event):\n return flag.is_set()\n else:\n raise TypeError(f\"Unsupported type of a flag: {flag!r}\")\n\n\nasync def condition_chain(\n source: asyncio.Condition,\n target: asyncio.Condition,\n) -> None:\n \"\"\"\n A condition chain is a \"clean\" hack to attach one condition to another.\n\n It is a \"clean\" (not \"dirty\") hack to wake up the webhook configuration\n managers when either the resources are revised (as seen in the insights),\n or a new client config is yielded from the webhook server.\n \"\"\"\n async with source:\n while True:\n await source.wait()\n async with target:\n target.notify_all()\n\n\n_T = TypeVar('_T')\n\n\nclass Container(Generic[_T]):\n\n def __init__(self) -> None:\n super().__init__()\n self.changed = asyncio.Condition()\n self._values: Collection[_T] = [] # 0..1 item\n\n def get_nowait(self) -> _T: # used mostly in testing\n try:\n return next(iter(self._values))\n except StopIteration:\n raise LookupError(\"No value is stored in the container.\") from None\n\n async def set(self, value: _T) -> None:\n async with self.changed:\n self._values = [value]\n self.changed.notify_all()\n\n async def wait(self) -> _T:\n async with self.changed:\n await self.changed.wait_for(lambda: self._values)\n try:\n return next(iter(self._values))\n except StopIteration: # impossible because of the condition's predicate\n raise LookupError(\"No value is stored in the container.\") from None\n\n async def reset(self) -> None:\n async with self.changed:\n self._values = []\n self.changed.notify_all()\n\n async def as_changed(self) -> AsyncIterator[_T]:\n async with self.changed:\n while True:\n try:\n yield next(iter(self._values))\n except StopIteration:\n pass\n await self.changed.wait()\n\n\n# Mind the value: it can be bool-evaluatable but non-bool -- always convert it.\nclass Toggle:\n \"\"\"\n An synchronisation primitive that can be awaited both until set or cleared.\n\n For one-directional toggles, `asyncio.Event` is sufficient.\n But these events cannot be awaited until cleared.\n\n The bi-directional toggles are needed in some places in the code, such as\n in the population/depletion of a `Vault`, or as in the operator's pause.\n\n The optional name is used only for hinting in reprs. It can be used when\n there are many toggles, and they need to be distinguished somehow.\n \"\"\"\n\n def __init__(\n self,\n __state: bool = False,\n *,\n name: Optional[str] = None,\n condition: Optional[asyncio.Condition] = None,\n ) -> None:\n super().__init__()\n self._condition = condition if condition is not None else asyncio.Condition()\n self._state: bool = bool(__state)\n self._name = name\n\n def __repr__(self) -> str:\n clsname = self.__class__.__name__\n toggled = 'on' if self._state else 'off'\n if self._name is None:\n return f'<{clsname}: {toggled}>'\n else:\n return f'<{clsname}: {self._name}: {toggled}>'\n\n def __bool__(self) -> bool:\n raise NotImplementedError # to protect against accidental misuse\n\n def is_on(self) -> bool:\n return self._state\n\n def is_off(self) -> bool:\n return not self._state\n\n async def turn_to(self, __state: bool) -> None:\n \"\"\" Turn the toggle on/off, and wake up the tasks waiting for that. \"\"\"\n async with self._condition:\n self._state = bool(__state)\n self._condition.notify_all()\n\n async def wait_for(self, __state: bool) -> None:\n \"\"\" Wait until the toggle is turned on/off as expected (if not yet). \"\"\"\n async with self._condition:\n await self._condition.wait_for(lambda: self._state == bool(__state))\n\n @property\n def name(self) -> Optional[str]:\n return self._name\n\n\nclass ToggleSet(Collection[Toggle]):\n \"\"\"\n A read-only checker for multiple toggles.\n\n The toggle-checker does not have its own state to be turned on/off.\n\n The positional argument is a function, usually :func:`any` or :func:`all`,\n which takes an iterable of all individual toggles' states (on/off),\n and calculates the overall state of the toggle set.\n\n With :func:`any`, the set is \"on\" when at least one child toggle is \"on\"\n (and it has at least one child), and it is \"off\" when all children toggles\n are \"off\" (or if it has no children toggles at all).\n\n With :func:`all`, the set is \"on\" when all of its children toggles are \"on\"\n (or it has no children at all), and it is \"off\" when at least one child\n toggle is \"off\" (and there is at least one toggle).\n\n The multi-toggle sets are used mostly for operator pausing,\n e.g. in peering and in index pre-population. For a practical example,\n in peering, every individual peering identified by name and namespace has\n its own individual toggle to manage, but the whole set of toggles of all\n names & namespaces is used for pausing the operator as one single toggle.\n In index pre-population, the toggles are used on the operator's startup\n to temporarily delay the actual resource handling until all index-handlers\n of all involved resources and resource kinds are processed and stored.\n\n Note: the set can only contain toggles that were produced by the set;\n externally produced toggles cannot be added, since they do not share\n the same condition object, which is used for synchronisation/notifications.\n \"\"\"\n\n def __init__(self, fn: Callable[[Iterable[bool]], bool]) -> None:\n super().__init__()\n self._condition = asyncio.Condition()\n self._toggles: Set[Toggle] = set()\n self._fn = fn\n\n def __repr__(self) -> str:\n return repr(self._toggles)\n\n def __len__(self) -> int:\n return len(self._toggles)\n\n def __iter__(self) -> Iterator[Toggle]:\n return iter(self._toggles)\n\n def __contains__(self, toggle: object) -> bool:\n return toggle in self._toggles\n\n def __bool__(self) -> bool:\n raise NotImplementedError # to protect against accidental misuse\n\n def is_on(self) -> bool:\n return self._fn(toggle.is_on() for toggle in self._toggles)\n\n def is_off(self) -> bool:\n return not self.is_on()\n\n async def wait_for(self, __state: bool) -> None:\n async with self._condition:\n await self._condition.wait_for(lambda: self.is_on() == bool(__state))\n\n async def make_toggle(\n self,\n __val: bool = False,\n *,\n name: Optional[str] = None,\n ) -> Toggle:\n toggle = Toggle(__val, name=name, condition=self._condition)\n async with self._condition:\n self._toggles.add(toggle)\n self._condition.notify_all()\n return toggle\n\n async def drop_toggle(self, toggle: Toggle) -> None:\n async with self._condition:\n self._toggles.discard(toggle)\n self._condition.notify_all()\n\n async def drop_toggles(self, toggles: Iterable[Toggle]) -> None:\n async with self._condition:\n self._toggles.difference_update(toggles)\n self._condition.notify_all()\n\n\nclass DaemonStoppingReason(enum.Flag):\n \"\"\"\n A reason or reasons of daemon being terminated.\n\n Daemons are signalled to exit usually for two reasons: the operator itself\n is exiting or restarting, so all daemons of all resources must stop;\n or the individual resource was deleted, but the operator continues running.\n\n No matter the reason, the daemons must exit, so one and only one stop-flag\n is used. Some daemons can check the reason of exiting if it is important.\n\n There can be multiple reasons combined (in rare cases, all of them).\n \"\"\"\n NONE = 0\n DONE = enum.auto() # whatever the reason and the status, the asyncio task has exited.\n FILTERS_MISMATCH = enum.auto() # the resource does not match the filters anymore.\n RESOURCE_DELETED = enum.auto() # the resource was deleted, the asyncio task is still awaited.\n OPERATOR_PAUSING = enum.auto() # the operator is pausing, the asyncio task is still awaited.\n OPERATOR_EXITING = enum.auto() # the operator is exiting, the asyncio task is still awaited.\n DAEMON_SIGNALLED = enum.auto() # the stopper flag was set, the asyncio task is still awaited.\n DAEMON_CANCELLED = enum.auto() # the asyncio task was cancelled, the thread can be running.\n DAEMON_ABANDONED = enum.auto() # we gave up on the asyncio task, the thread can be running.\n\n\nclass DaemonStopper:\n \"\"\"\n A boolean flag indicating that the daemon should stop and exit.\n\n Every daemon gets a ``stopper`` kwarg, which is an event-like object.\n The stopper is raised in two cases:\n\n * The corresponding k8s object is deleted, so the daemon should stop.\n * The whole operator is stopping, so all the daemons should stop too.\n\n The stopper flag is a graceful way of a daemon termination.\n If the daemons do not react to their stoppers, and continue running,\n their tasks are cancelled by raising a `asyncio.CancelledError`.\n\n .. warning::\n In case of synchronous handlers, which are executed in the threads,\n this can lead to the OS resource leakage:\n there is no way to kill a thread in Python, so it will continue running\n forever or until failed (e.g. on an API call for an absent resource).\n The orphan threads will block the operator's process from exiting,\n thus affecting the speed of restarts.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.when: Optional[float] = None\n self.reason = DaemonStoppingReason.NONE\n self.sync_checker = SyncDaemonStopperChecker(self)\n self.async_checker = AsyncDaemonStopperChecker(self)\n self.sync_event = threading.Event()\n self.async_event = asyncio.Event()\n\n def __repr__(self) -> str:\n return f'<{self.__class__.__name__}: {self.is_set()}, reason={self.reason}>'\n\n def is_set(self, reason: Optional[DaemonStoppingReason] = None) -> bool:\n \"\"\"\n Check if the daemon stopper is set: at all or for specific reason.\n \"\"\"\n return ((reason is None or reason in self.reason) and self.sync_event.is_set())\n\n def set(self, *, reason: DaemonStoppingReason) -> None:\n self.when = self.when if self.when is not None else time.monotonic()\n self.reason |= reason\n self.sync_event.set()\n self.async_event.set() # it is thread-safe: always called in operator's event loop.\n\n\nclass DaemonStopperChecker:\n\n \"\"\"\n A minimalistic read-only checker for the daemons from the user side.\n\n This object is fed into the :kwarg:`stopped` kwarg for the handlers.\n\n The actual stopper is hidden from the users, and is an internal class.\n The users should not be able to trigger the stopping activities or\n check the reasons of stopping (or know about them at all).\n\n Usage::\n\n @kopf.daemon('kopfexamples')\n def handler(stopped, **kwargs):\n while not stopped:\n ...\n stopped.wait(60)\n \"\"\"\n\n def __init__(self, stopper: DaemonStopper) -> None:\n super().__init__()\n self._stopper = stopper\n\n def __repr__(self) -> str:\n return repr(self._stopper)\n\n def __bool__(self) -> bool:\n return self._stopper.is_set()\n\n def is_set(self) -> bool:\n return self._stopper.is_set()\n\n @property\n def reason(self) -> DaemonStoppingReason:\n return self._stopper.reason\n\n\nclass SyncDaemonStopperChecker(DaemonStopperChecker):\n def wait(self, timeout: Optional[float] = None) -> bool:\n self._stopper.sync_event.wait(timeout=timeout)\n return bool(self)\n\n\nclass AsyncDaemonStopperChecker(DaemonStopperChecker):\n async def wait(self, timeout: Optional[float] = None) -> bool:\n try:\n await asyncio.wait_for(self._stopper.async_event.wait(), timeout=timeout)\n except asyncio.TimeoutError:\n pass\n return bool(self)\n\n\n# Having this union allows both sync & async checkers in the same protocol,\n# while not restricting the use of `wait()` as if the base class would be used.\nSyncAsyncDaemonStopperChecker = Union[SyncDaemonStopperChecker, AsyncDaemonStopperChecker]\n\n\nasync def sleep_or_wait(\n delays: Union[None, float, Collection[Union[None, float]]],\n wakeup: Optional[Union[asyncio.Event, DaemonStopper]] = None,\n) -> Optional[float]:\n \"\"\"\n Measure the sleep time: either until the timeout, or until the event is set.\n\n Returns the number of seconds left to sleep, or ``None`` if the sleep was\n not interrupted and reached its specified delay (an equivalent of ``0``).\n In theory, the result can be ``0`` if the sleep was interrupted precisely\n the last moment before timing out; this is unlikely to happen though.\n \"\"\"\n passed_delays = delays if isinstance(delays, collections.abc.Collection) else [delays]\n actual_delays = [delay for delay in passed_delays if delay is not None]\n minimal_delay = min(actual_delays) if actual_delays else 0\n\n # Do not go for the real low-level system sleep if there is no need to sleep.\n if minimal_delay <= 0:\n return None\n\n awakening_event = (\n wakeup.async_event if isinstance(wakeup, DaemonStopper) else\n wakeup if wakeup is not None else\n asyncio.Event())\n\n loop = asyncio.get_running_loop()\n try:\n start_time = loop.time()\n await asyncio.wait_for(awakening_event.wait(), timeout=minimal_delay)\n except asyncio.TimeoutError:\n return None # interruptable sleep is over: uninterrupted.\n else:\n end_time = loop.time()\n duration = end_time - start_time\n return max(0, minimal_delay - duration)\n","sub_path":"kopf/structs/primitives.py","file_name":"primitives.py","file_ext":"py","file_size_in_byte":16269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"513634466","text":"records = []\n\ntypechs = []\n\nlengths = []\n\nfields = []\n\n\ndef load(filename):\n\n global records\n\n global typechs\n\n global lengths\n\n global fields\n\n\n try:\n\n with open(filename, 'r') as file:\n\n fields = file.readline().rstrip().split(':')\n\n lengths = file.readline().rstrip().split(':')\n\n typechs = file.readline().rstrip().split(':')\n\n\n for line in file:\n\n line = line.rstrip()\n\n if line:\n\n record = line.split(':')\n\n records.append(record)\n\n except FileNotFoundError:\n\n return -1\n\n\ndef save(filename):\n\n global records\n\n global typechs\n\n global lengths\n\n global fields\n\n\n data = ''\n\n\n for f in fields: data += f + ':'\n\n data = data[:-1] + '\\n'\n\n for l in lengths: data += l + ':'\n\n data = data[:-1] + '\\n'\n\n for tc in typechs: data += tc + ':'\n\n data = data[:-1] + '\\n'\n\n\n for record in records:\n\n for field in record: data += field + ':'\n\n data = data[:-1] + '\\n'\n\n\n with open(filename, 'w') as file:\n\n file.write(data)\n\n\ndef search(id):\n\n global records\n\n\n i = 0\n\n for record in records:\n\n if int(record[0]) == int(id): return i \n\n i += 1\n\n\n return -1 \n\n \ndef add(record):\n\n global records\n\n\n i = search(int(record[0]))\n\n if not i == -1: return -1\n\n\n records.append(record)\n\n\n return search(int(record[0])) \n\n\ndef remove(id):\n\n global records\n\n\n i = search(id)\n\n if i == -1: return -1\n\n\n update = records[:i]\n\n update += records[i+1:]\n\n\n records = update\n\n\n return i \n\n","sub_path":"dbmgr.py","file_name":"dbmgr.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"266933889","text":"import math\nimport random\n\n__author__ = 'Sam Broderick'\n\n\"\"\"\nDesigned to test get_ordered_adoption_center_list and\nget_adopters_for_advertisement functions for ps7 from MIT 6.00.1x\n\"\"\"\n\n\nclass AdoptionCenter:\n \"\"\"\n The AdoptionCenter class stores the important information that a\n client would need to know about, such as the different numbers of\n species stored, the location, and the name. It also has a method to adopt a pet.\n \"\"\"\n\n def __init__(self, name, species_types, location):\n self.name = name\n self.species_types = species_types\n self.location = (float(location[0]), float(location[1]))\n\n def get_number_of_species(self, animal):\n try:\n return self.species_types[animal]\n except KeyError:\n return 0\n\n def get_location(self):\n return self.location\n\n def get_species_count(self):\n return self.species_types.copy()\n\n def get_name(self):\n return self.name\n\n def adopt_pet(self, species):\n try:\n if self.species_types[species] > 0:\n self.species_types[species] -= 1\n if self.species_types[species] == 0:\n self.species_types.pop(species)\n except KeyError:\n pass\n\n\nclass Adopter:\n \"\"\"\n Adopters represent people interested in adopting a species.\n They have a desired species type that they want, and their score is\n simply the number of species that the shelter has of that species.\n \"\"\"\n\n def __init__(self, name, desired_species):\n self.name = name\n self.desired_species = desired_species\n\n def get_name(self):\n return self.name\n\n def get_desired_species(self):\n return self.desired_species\n\n def get_score(self, adoption_center):\n adoption_center_species = adoption_center.get_species_count()\n try:\n num_desired = float(adoption_center_species[self.desired_species])\n except KeyError:\n num_desired = 0\n return 1 * num_desired\n\n\nclass FlexibleAdopter(Adopter):\n \"\"\"\n A FlexibleAdopter still has one type of species that they desire,\n but they are also alright with considering other types of species.\n considered_species is a list containing the other species the adopter will consider\n Their score should be 1x their desired species + .3x all of their desired species\n \"\"\"\n\n def __init__(self, name, desired_species, considered_species):\n Adopter.__init__(self, name, desired_species)\n assert len(considered_species) > 0\n self.considered_species = considered_species\n\n def get_score(self, adoption_center):\n adoption_center_species = adoption_center.get_species_count()\n adopter_score = 0\n try:\n adopter_score = float(adoption_center_species[self.desired_species])\n except KeyError:\n pass\n num_other = 0\n for species in self.considered_species:\n try:\n num_other += float(adoption_center_species[species])\n except KeyError:\n pass\n return adopter_score + 0.3 * num_other\n\n\nclass FearfulAdopter(Adopter):\n \"\"\"\n A FearfulAdopter is afraid of a particular species of animal.\n If the adoption center has one or more of those animals in it, they will\n be a bit more reluctant to go there due to the presence of the feared species.\n Their score should be 1x number of desired species - .3x the number of feared species\n \"\"\"\n\n def __init__(self, name, desired_species, feared_species):\n Adopter.__init__(self, name, desired_species)\n assert type(feared_species) == str\n self.feared_species = feared_species\n\n def get_score(self, adoption_center):\n adoption_center_species = adoption_center.get_species_count()\n adopter_score = 0\n try:\n adopter_score = float(adoption_center_species[self.desired_species])\n except KeyError:\n pass\n num_other = 0 # Initialize for the KeyError case\n try: # Feared species is not necessarily there\n num_other = float(adoption_center_species[self.feared_species])\n except KeyError:\n pass\n score = adopter_score - 0.3 * num_other\n if score < 0:\n score = 0.0\n return score\n\n\nclass AllergicAdopter(Adopter):\n \"\"\"\n An AllergicAdopter is extremely allergic to a one or more species and cannot\n even be around it a little bit! If the adoption center contains one or more of\n these animals, they will not go there.\n Score should be 0 if the center contains any of the animals, or 1x number of desired animals if not\n \"\"\"\n\n def __init__(self, name, desired_species, allergic_species):\n Adopter.__init__(self, name, desired_species)\n self.allergic_species = allergic_species\n\n def get_score(self, adoption_center):\n adoption_center_species = adoption_center.get_species_count()\n adopter_score = 0.0\n allergic_species_present = False\n for species in self.allergic_species:\n try:\n if adoption_center_species[species] > 0:\n allergic_species_present = True\n except KeyError:\n pass\n try:\n adopter_score = float(adoption_center_species[self.desired_species])\n except KeyError:\n pass\n if allergic_species_present:\n return 0.0\n else:\n return adopter_score\n\n\nclass MedicatedAllergicAdopter(AllergicAdopter):\n \"\"\"\n A MedicatedAllergicAdopter is extremely allergic to a particular species\n However! They have a medicine of varying effectiveness, which will be\n given in a dictionary\n To calculate the score for a specific adoption center, we want to find\n what is the most allergy-inducing species that the adoption center has\n for the particular MedicatedAllergicAdopter.\n To do this, first examine what species the AdoptionCenter has that the\n MedicatedAllergicAdopter is allergic to, then compare them to the\n medicine_effectiveness dictionary.\n Take the lowest medicine_effectiveness found for these species, and\n multiply that value by the Adopter's calculate score method.\n \"\"\"\n\n def __init__(self, name, desired_species, allergic_species,\n medicine_effectiveness):\n AllergicAdopter.__init__(self, name, desired_species, allergic_species)\n assert type(medicine_effectiveness) == dict\n self.medicine_effectiveness = medicine_effectiveness\n\n def get_score(self, adoption_center):\n adoption_center_species = adoption_center.get_species_count()\n adopter_score = 0.0\n problem_species = []\n\n for species in self.allergic_species:\n try:\n if adoption_center_species[species] > 0:\n problem_species.append(self.\n medicine_effectiveness[species])\n except KeyError:\n pass\n try:\n adopter_score = float(adoption_center_species[self.\n desired_species])\n except KeyError:\n pass\n if len(problem_species) > 0:\n return adopter_score * min(problem_species)\n else:\n return adopter_score\n\n\nclass SluggishAdopter(Adopter):\n \"\"\"\n A SluggishAdopter really dislikes travelleng. The further away the\n AdoptionCenter is linearly, the less likely they will want to visit it.\n Since we are not sure the specific mood the SluggishAdopter will be in on a\n given day, we will asign their score with a random modifier depending on\n distance as a guess.\n Score should be\n If distance < 1 return 1 x number of desired species\n elif distance < 3 return random between (.7, .9) times number of desired species\n elif distance < 5. return random between (.5, .7 times number of desired species\n else return random between (.1, .5) times number of desired species\n \"\"\"\n\n def __init__(self, name, desired_species, location):\n Adopter.__init__(self, name, desired_species)\n assert type(location) == tuple\n self.location = location\n\n def get_linear_distance(self, to_location):\n delta_x_sq = (to_location[0] - self.location[0]) ** 2 # square of the x dist.\n delta_y_sq = (to_location[1] - self.location[1]) ** 2 # square of the y dist.\n return math.sqrt(delta_x_sq + delta_y_sq)\n\n def get_score(self, adoption_center):\n adoption_center_species = adoption_center.get_species_count()\n adoption_center_distance = self.get_linear_distance(adoption_center.get_location())\n try:\n num_desired = float(adoption_center_species[self.desired_species])\n except KeyError:\n num_desired = 0.0\n if adoption_center_distance < 1:\n return 1 * num_desired\n elif 1 <= adoption_center_distance < 3:\n return random.uniform(0.7, 0.9) * num_desired\n elif 3 <= adoption_center_distance < 5:\n return random.uniform(0.5, 0.7) * num_desired\n elif adoption_center_distance >= 5:\n return random.uniform(0.1, 0.5) * num_desired\n\n\ndef get_ordered_adoption_center_list(adopter, list_of_adoption_centers):\n \"\"\"\n The method returns a list of an organized adoption_center such that the\n scores for each AdoptionCenter to the Adopter will be ordered from\n highest score to lowest score.\n :param adopter: looking to adopt a pet\n :param list_of_adoption_centers: list of centers with pets to adopt\n \"\"\"\n local_copy = list_of_adoption_centers[:] # copy to prevent changing original\n local_copy.sort(key=lambda x: (-adopter.get_score(x), x.get_name()))\n\n # returns just the sorted list of names without scores\n return local_copy\n\n\ndef get_adopters_for_advertisement(adoption_center, list_of_adopters, n):\n \"\"\"\n The function returns a list of the top n scoring Adopters from\n list_of_adopters (in numerical order of score)\n :param adoption_center: adoption center placing an ad\n :param list_of_adopters: list of potential pet adopters\n :param n: maximum number of ads\n \"\"\"\n\n local_copy = list_of_adopters[:] # copy to prevent changing original\n local_copy.sort(key=lambda x: (-x.get_score(adoption_center), x.get_name()))\n\n if len(list_of_adopters) > n:\n return local_copy[:n]\n else:\n return local_copy\n\n\nadopter = MedicatedAllergicAdopter(\"One\", \"Cat\", ['Dog', 'Horse'],\n {\"Dog\": .5, \"Horse\": 0.2})\nadopter2 = Adopter(\"Two\", \"Cat\")\nadopter3 = FlexibleAdopter(\"Three\", \"Horse\", [\"Lizard\", \"Cat\"])\nadopter4 = FearfulAdopter(\"Four\", \"Cat\", \"Dog\")\nadopter5 = SluggishAdopter(\"Five\", \"Cat\", (1, 2))\nadopter6 = AllergicAdopter(\"Six\", \"Cat\", \"Dog\")\n\nac = AdoptionCenter(\"Place1\", {\"Mouse\": 12, \"Dog\": 2}, (1, 1))\nac2 = AdoptionCenter(\"Place2\", {\"Cat\": 12, \"Lizard\": 2}, (3, 5))\nac3 = AdoptionCenter(\"Place3\", {\"Horse\": 25, \"Dog\": 9}, (-2, 10))\n\nadopters = [adopter, adopter2, adopter3, adopter4, adopter5, adopter6]\n\n# how to test get_adopters_for_advertisement\ntest1 = get_adopters_for_advertisement(ac, [adopter, adopter2, adopter3,\n adopter4, adopter5, adopter6], 10)\n# you can print the name and score of each item in the list returned\nprint('======= Test 1 =======')\nfor a in test1:\n print(a.get_name())\nprint('=======')\n\nfor a in adopters:\n print('Adopter {0}: score: {1}'.format(a.get_name, a.get_score(ac)))\n\nprint('======= Test 2 =======')\ntest2 = get_adopters_for_advertisement(ac2, [adopter, adopter2, adopter3,\n adopter4, adopter5, adopter6], 10)\nfor a in test2:\n print(a.get_name())\nprint('=======')\n\nfor a in adopters:\n print('Adopter {0}: score: {1}'.format(a.name, a.get_score(ac2)))\n\nprint('======= Test 3 =======')\ntest2 = get_adopters_for_advertisement(ac3, [adopter, adopter2, adopter3,\n adopter4, adopter5, adopter6], 10)\nfor a in test2:\n print(a.get_name())\nprint('=======')\n\nfor a in adopters:\n print('Adopter {0}: score: {1}'.format(a.name, a.get_score(ac3)))\n\nprint\nadopter4 = FearfulAdopter(\"Four\", \"Cat\", \"Dog\")\nadopter5 = SluggishAdopter(\"Five\", \"Cat\", (1, 2))\nadopter6 = AllergicAdopter(\"Six\", \"Lizard\", \"Cat\")\n\nac = AdoptionCenter(\"Place1\", {\"Cat\": 12, \"Dog\": 2}, (1, 1))\nac2 = AdoptionCenter(\"Place2\", {\"Cat\": 12, \"Lizard\": 2}, (3, 5))\nac3 = AdoptionCenter(\"Place3\", {\"Cat\": 40, \"Dog\": 4}, (-2, 10))\nac4 = AdoptionCenter(\"Place4\", {\"Cat\": 33, \"Horse\": 5}, (-3, 0))\nac5 = AdoptionCenter(\"Place5\", {\"Cat\": 45, \"Lizard\": 2}, (8, -2))\nac6 = AdoptionCenter(\"Place6\", {\"Cat\": 23, \"Dog\": 7, \"Horse\": 5}, (-10, 10))\n\n# how to test get_ordered_adoption_center_list\ntest_acs = list()\ntest_acs.append(get_ordered_adoption_center_list(adopter4, [ac, ac2, ac3, ac4, ac5, ac6]))\ntest_acs.append(get_ordered_adoption_center_list(adopter5, [ac, ac2, ac3, ac4, ac5, ac6]))\ntest_acs.append(get_ordered_adoption_center_list(adopter6, [ac, ac2, ac3, ac4, ac5, ac6]))\n# you can print the name and score of each item in the list returned\nprint('======= Test get_ordered_adoption_center_list =======')\nprint\n\nfor a in test_acs:\n print('=======')\n for c in a:\n print(c.get_name())\n\nprint('=======')\nfor c in test_acs[1]:\n print('{0}: distance: {1} normal: {2} score: {3}'.\n format(c.get_name(), adopter5.get_linear_distance(c.get_location()),\n c.get_number_of_species(adopter5.desired_species),\n adopter5.get_score(c)))\n","sub_path":"ps7/ps7_match_center_adopter_test.py","file_name":"ps7_match_center_adopter_test.py","file_ext":"py","file_size_in_byte":13656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"631685166","text":"import arcpy\nfrom arcpy.sa import *\nimport json\nimport os\n\n\ndef getImperv(inShpPath, inlandUsePath, indexField, weights):\n # Process Path\n if not os.path.exists('Process'):\n os.makedirs('Process')\n processPath = \"./Process/\"\n # Process Data Path\n outShpTempPath = processPath + \"subcaTemp.shp\"\n outLandTempPath = processPath + \"landTemp.shp\"\n outLandRasterPath = processPath + \"landUseRaster\"\n outTablePath = processPath + \"impervTable.dbf\"\n # Output Path\n if not os.path.exists('Result'):\n os.makedirs('Result')\n # Output Data Path\n outShpPath = \"./Result/subcatchments.shp\"\n # Execute CopyFeatures\n arcpy.CopyFeatures_management(inShpPath, outShpTempPath)\n arcpy.CopyFeatures_management(inlandUsePath, outLandTempPath)\n # Check out the ArcGIS Spatial Analyst extension license\n arcpy.CheckExtension(\"Spatial\")\n # Execute AddField\n arcpy.AddField_management(outLandTempPath, \"Weight\", \"DOUBLE\", \"\", \"\", \"\", \"weight\", \"NULLABLE\", \"NON_REQUIRED\", \"\")\n # Calculate Weight\n codeBlock = \"\"\"\ndef getWeight(type, weights):\n return weights[type]\"\"\"\n expression = \"getWeight( !category! ,\" + json.dumps(weights) + \")\"\n arcpy.CalculateField_management(outLandTempPath, \"Weight\", expression, \"PYTHON\", codeBlock)\n # Execute FeatureToRaster\n arcpy.FeatureToRaster_conversion(outLandTempPath, \"Weight\", outLandRasterPath, 5)\n # Execute ZonalStatisticsAsTable\n outImperv = ZonalStatisticsAsTable(outShpTempPath, indexField, outLandRasterPath, outTablePath, \"NODATA\", \"MEAN\")\n # Execute AddField\n arcpy.AddField_management(outShpTempPath, \"Imperv\", \"DOUBLE\", \"\", \"\", \"\", \"imperv\", \"NULLABLE\", \"NON_REQUIRED\", \"\")\n # Create a feature layer\n layerName = \"subcaShp\"\n arcpy.MakeFeatureLayer_management(outShpTempPath, layerName)\n # Join the feature layer to a table\n arcpy.AddJoin_management(layerName, indexField, outImperv, indexField)\n # Calculate Slope\n arcpy.CalculateField_management(layerName, \"Imperv\", \"!impervTable.MEAN!\", \"PYTHON\")\n # Remove the join\n arcpy.RemoveJoin_management(layerName, \"impervTable\")\n # Copy the layer to a new permanent feature class\n arcpy.CopyFeatures_management(layerName, outShpPath)\n return outShpPath\n\n\nif __name__ == '__main__':\n weights = {\"A\": 0.9, \"B\": 0.6, \"C\": 0.4, \"D\": 0.15}\n getImperv(\"./Data/subcatchments.shp\", \"./Data/landUse.shp\", \"sOrder\", weights)\n","sub_path":"modelDeploy/Subcatchments_Imperv/Subcatchments_Imperv.py","file_name":"Subcatchments_Imperv.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"537257019","text":"# pylint: disable=C0103,C0111\n\nimport json\nimport unittest\nimport mock\n\nimport bumblebee.input\nfrom bumblebee.input import I3BarInput\nfrom bumblebee.modules.disk import Module\nfrom tests.util import MockEngine, MockConfig, assertPopen, assertStateContains, MockEpoll\n\nclass MockVFS(object):\n def __init__(self, perc):\n self.f_blocks = 1024*1024\n self.f_frsize = 1\n self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0)\n\nclass TestDiskModule(unittest.TestCase):\n def setUp(self):\n self.engine = MockEngine()\n self.engine.input = I3BarInput()\n self.engine.input.need_event = True\n self.config = MockConfig()\n self.config.set(\"disk.path\", \"somepath\")\n self.module = Module(engine=self.engine, config={\"config\": self.config})\n\n @mock.patch(\"select.epoll\")\n @mock.patch(\"subprocess.Popen\")\n @mock.patch(\"sys.stdin\")\n def test_leftclick(self, mock_input, mock_output, mock_select):\n mock_input.readline.return_value = json.dumps({\n \"name\": self.module.id,\n \"button\": bumblebee.input.LEFT_MOUSE,\n \"instance\": None\n })\n mock_select.return_value = MockEpoll()\n self.engine.input.start()\n self.engine.input.stop()\n mock_input.readline.assert_any_call()\n assertPopen(mock_output, \"nautilus {}\".format(self.module.parameter(\"path\")))\n\n @mock.patch(\"os.statvfs\")\n def test_warning(self, mock_stat):\n self.config.set(\"disk.critical\", \"80\")\n self.config.set(\"disk.warning\", \"70\")\n mock_stat.return_value = MockVFS(75.0)\n assertStateContains(self, self.module, \"warning\")\n\n @mock.patch(\"os.statvfs\")\n def test_critical(self, mock_stat):\n self.config.set(\"disk.critical\", \"80\")\n self.config.set(\"disk.warning\", \"70\")\n mock_stat.return_value = MockVFS(85.0)\n assertStateContains(self, self.module, \"critical\")\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n","sub_path":".config/bumblebee-status/tests/modules/test_disk.py","file_name":"test_disk.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"434134221","text":"import numpy as np\n\nimport gym\n\n\nclass GymTask():\n\n def __init__(self, task_name='MountainCarContinuous-v0'):\n self.env = gym.make(task_name)\n\n self.action_repeat = 3\n\n self.state_size = self.action_repeat * (\n self.env.observation_space.shape[0]\n )\n self.action_low = self.env.action_space.low[0]\n self.action_high = self.env.action_space.high[0]\n self.action_size = self.env.action_space.shape[0]\n\n def reset(self):\n \"\"\"Reset the sim to start a new episode.\"\"\"\n state = self.env.reset()\n state = np.concatenate([state] * self.action_repeat)\n return state\n\n def step(self, action):\n reward = 0\n state_all = []\n for _ in range(self.action_repeat):\n # update the sim pose and velocities\n next_state, step_reward, done, _ = self.env.step(action)\n state_all.append(next_state)\n reward += step_reward\n\n next_state = np.concatenate(state_all)\n return next_state, reward, done\n","sub_path":"gym_task.py","file_name":"gym_task.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"254472258","text":"Category = input(\"What categroy of the budget would you like to access? \\n\")\nAllowed_Category = ['Food','Clothing','Entertainment']\nif (Category in Allowed_Category):\n print ('Welcome to the %s Category' % Category)\n\n\nOptions = input(\"What do you want to do? \\n\")\nAllowed_Options = ['Deposit','Withdraw','Compute Balance', 'Transfer between categories']\nif (Options in Allowed_Options):\n print ('Welcome to the %s Section' % Options)\n\nStarting_balance = [0,0,0]\n\nclass Budget:\n if (Options == 'Deposit'):\n def deposit_money(self):\n deposit_question = int(input(\"How much would you like to deposit in your\" + self.deposit + \" ? \\n\" ))\n print (\"%d$ deposited in\" % deposit_question + self.deposit)\n if (Options == 'Withdraw'):\n def withdraw_money(self):\n withdraw_question = int(input(\"How much would you like to withdraw from your\" + self.withdraw + \" ? \\n\" ))\n print(\"%d$ withdrawn from\" % withdraw_question + self.withdraw)\n if (Options == 'Compute Balance'):\n def Balance_money(self):\n withdraw_question = input(\"What Category would you like to know the balance ? \\n\")\n print(\"withdraw_question\")\nFood = Budget()\nFood.deposit = \" Food Budget\"\nFood.withdraw = \" Food Budget\"\n\nClothing = Budget()\nClothing.deposit = \" Clothing Budget\"\nClothing.withdraw = \" Food Budget\"\n\nEntertainment = Budget()\nEntertainment.deposit = \" Entertainment Budget\"\nEntertainment.withdraw = \" Entertainment Budget\"\n\nif (Category == 'Food' and Options == 'Deposit'):\n Food.deposit_money()\n \nif (Category == 'Food' and Options == 'Withdraw'):\n Food.withdraw_money()\n\nif (Category== 'Clothing' and Options == 'Deposit'):\n Clothing.deposit_money()\n\nif (Category== 'Clothing' and Options == 'Withdraw'):\n Clothing.withdraw_money()\n\nif (Category== 'Entertainment' and Options == 'Deposit'):\n Entertainment.deposit_money()\n\nif (Category== 'Entertainment' and Options == 'Withdraw'):\n Entertainment.withdraw_money() \n","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"351197338","text":"from .layers import MaskConv2d, ActQuant\nimport torch.nn as nn\nfrom .teeNet1 import VGGBlock\n\n\nclass TEE_VGG18Structure(nn.Module):\n def __init__(self, cfg, module_type, num_classes=20, convs=[nn.Conv2d]*5, quants=[ActQuant]*5):\n super(TEE_VGG18Structure, self).__init__()\n self.num_classes = int(num_classes)\n\n self.layer1 = self.make_layers(VGGBlock, module_type, cfg[0], convs[0], quants[0])\n self.layer2 = self.make_layers(VGGBlock, module_type, cfg[1], convs[1], quants[1])\n self.layer3 = self.make_layers(VGGBlock, module_type, cfg[2], convs[2], quants[2])\n self.layer4 = self.make_layers(VGGBlock, module_type, cfg[3], convs[3], quants[3])\n self.layer5 = self.make_layers(VGGBlock, module_type, cfg[4], convs[4], quants[4])\n\n self.avgpool = nn.AvgPool2d(7, stride=1)\n #self.linear = nn.Linear(512, num_classes)\n self.classifier = nn.Sequential(\n nn.Linear(512, self.num_classes),\n )\n\n\n def make_layers(self, block, module_type, cfg, conv, quant):\n layers = []\n in_channels = cfg[0]\n for layer in cfg[1:]:\n layers.append(block([in_channels, layer], module_type, 1, conv, quant))\n in_channels = layer\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x *= 255\n x /= 8\n x -= 0.499\n x = x.round()\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1)\n #out = self.linear(out)\n out = self.classifier(out)\n return out\n\ncfg = [[3, 64, 64], [64, 64, 64, 64, 64], [64, 128, 128, 128, 128], [128, 256, 256, 256, 256], [256, 512, 512, 512, 512]]\n\ndef TEE_VGG18Wrapper(cfg, module_type, num_classes=40, mask_bits=[1]*5, act_bits=[5]*5):\n assert len(mask_bits) == 5, 'needs masks for 5 major layers'\n assert len(act_bits) == 5, 'needs activations for 5 major layers'\n\n convs = []\n quants = []\n\n for mask, act in zip(mask_bits, act_bits):\n if mask == 0:\n conv = nn.Conv2d\n else:\n conv = lambda in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, mask_bit=mask: \\\n MaskConv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias,\n mask_bit)\n convs.append(conv)\n if act == 0:\n quant = nn.Sequential\n else:\n quant = lambda: ActQuant(act_bit=act)\n quants.append(quant)\n\n return TEE_VGG18Structure(cfg, module_type, num_classes=num_classes, convs=convs, quants=quants)\n\n\ndef TEE_VGG18(module_type, num_classes=20, mask_bits = [1] * 5, act_bits = [5] * 5):\n return TEE_VGG18Wrapper(cfg, module_type, num_classes, mask_bits, act_bits)\n\n\n","sub_path":"train/pytorch/models/teeNet2.py","file_name":"teeNet2.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"165479308","text":"# Date Update: 04/26/2023\n# Purpose: To extract regulatory record information and populate dataframe for WaDE\n\n\n# Needed Libraries\n############################################################################\nimport os\nimport numpy as np\nimport pandas as pd\nimport re\nfrom datetime import date\n\n\n# Custom Libraries\n############################################################################\nimport sys\n# columns\nsys.path.append(\"C:/Users/rjame/Documents/WSWC Documents/MappingStatesDataToWaDE2.0/5_CustomFunctions/MappingFunctions\")\nimport GetColumnsFile\n\n# Test WaDE Data for any Errors\nsys.path.append(\"C:/Users/rjame/Documents/WSWC Documents/MappingStatesDataToWaDE2.0/5_CustomFunctions/ErrorCheckCode\")\nimport ErrorCheckCodeFunctionsFile\n\n\n# Create File Function\n############################################################################\ndef CreateRegulatoryOverlaysInputFunction(workingDirString, varST, varUUIDType, mainInputFile):\n # Inputs\n ############################################################################\n print(\"Reading input csv...\")\n workingDir = workingDirString\n os.chdir(workingDir)\n fileInput = \"RawinputData/\" + mainInputFile\n df = pd.read_csv(fileInput, compression='zip')\n\n # WaDE columns\n RegulatoryOverlaysColumnsList = GetColumnsFile.GetRegulatoryOverlaysColumnsFunction()\n\n\n # Custom Functions\n ############################################################################\n # For creating UUID\n def assignUUID(Val):\n Val = str(Val)\n Val = re.sub(\"[$@&.;,/\\)(-]\", \"\", Val).strip().replace(\" \", \"\")\n Val = varST + varUUIDType + \"_RO\" + Val\n return Val\n\n\n # Creating output dataframe (outdf)\n ############################################################################\n print(\"Populating dataframe outdf...\")\n outdf = pd.DataFrame(columns=RegulatoryOverlaysColumnsList, index=df.index) # The output dataframe\n\n print(\"OversightAgency\")\n outdf['OversightAgency'] = df['in_OversightAgency']\n\n print(\"RegulatoryDescription\")\n outdf['RegulatoryDescription'] = df['in_RegulatoryDescription']\n\n print(\"RegulatoryName\")\n outdf['RegulatoryName'] = df['in_RegulatoryName']\n\n print(\"RegulatoryOverlayNativeID\")\n outdf['RegulatoryOverlayNativeID'] = df['in_RegulatoryOverlayNativeID'].astype(str)\n\n print(\"RegulatoryStatusCV\")\n outdf['RegulatoryStatusCV'] = df['in_RegulatoryStatusCV']\n\n print(\"RegulatoryStatute\")\n outdf['RegulatoryStatute'] = df['in_RegulatoryStatute']\n\n print(\"RegulatoryStatuteLink\")\n outdf['RegulatoryStatuteLink'] = df['in_RegulatoryStatuteLink']\n\n print(\"StatutoryEffectiveDate\")\n outdf['StatutoryEffectiveDate'] = df['in_StatutoryEffectiveDate']\n\n print(\"StatutoryEndDate\")\n outdf['StatutoryEndDate'] = df['in_StatutoryEndDate']\n\n print(\"RegulatoryOverlayTypeCV\")\n outdf['RegulatoryOverlayTypeCV'] = df['in_RegulatoryOverlayTypeCV']\n\n print(\"WaterSourceTypeCV\")\n outdf['WaterSourceTypeCV'] = df['in_WaterSourceTypeCV']\n\n print(\"Adding Data Assessment UUID\")\n outdf['WaDEUUID'] = df['WaDEUUID']\n\n print(\"Resetting Index\")\n outdf.reset_index()\n\n print(\"Joining outdf duplicates based on key fields...\")\n outdf = outdf.replace(np.nan, \"\") # Replaces NaN values with blank.\n groupbyList = ['RegulatoryName', 'RegulatoryOverlayNativeID', 'RegulatoryStatusCV', 'RegulatoryOverlayTypeCV', 'WaterSourceTypeCV']\n outdf = outdf.groupby(groupbyList).agg(lambda x: ','.join([str(elem) for elem in (list(set(x))) if elem!=''])).replace(np.nan, \"\").reset_index()\n outdf = outdf[RegulatoryOverlaysColumnsList] # reorder the dataframe's columns based on ColumnsList\n\n\n # Solving WaDE 2.0 Upload Issues\n # ############################################################################\n print(\"Solving WaDE 2.0 upload issues\") # List all temp fixes required to upload data to WaDE here.\n\n # None at the moment\n\n\n # Error Checking Each Field\n ############################################################################\n print(\"Error checking each field. Purging bad inputs.\")\n dfpurge = pd.DataFrame(columns=RegulatoryOverlaysColumnsList) # Purge DataFrame to hold removed elements\n dfpurge['ReasonRemoved'] = \"\"\n dfpurge['IncompleteField'] = \"\"\n outdf, dfpurge = ErrorCheckCodeFunctionsFile.RegulatoryOverlaysTestErrorFunctions(outdf, dfpurge)\n print(f'Length of outdf DataFrame: ', len(outdf))\n print(f'Length of dfpurge DataFrame: ', len(dfpurge))\n\n\n # Assign UUID value\n ############################################################################\n print(\"Assign RegulatoryOverlayUUID\") # has to be one of the last.\n outdf = outdf.reset_index(drop=True)\n outdf['RegulatoryOverlayUUID'] = outdf.apply(lambda row: assignUUID(row['RegulatoryOverlayNativeID']), axis=1) # assign based on native ID\n outdf['RegulatoryOverlayUUID'] = np.where(outdf['RegulatoryOverlayUUID'].duplicated(keep=False),\n outdf['RegulatoryOverlayUUID'].astype(str).str.cat(outdf.groupby('RegulatoryOverlayUUID').cumcount().add(1).astype(str), sep='_'),\n outdf['RegulatoryOverlayUUID'])\n\n # Error check RegulatoryOverlayUUID\n outdf, dfpurge = ErrorCheckCodeFunctionsFile.RegulatoryOverlayUUID_RE_Check(outdf, dfpurge)\n\n\n # Export to new csv\n ############################################################################\n print(\"Exporting dataframe...\")\n\n # The working output DataFrame for WaDE 2.0 input.\n outdf.to_csv('ProcessedInputData/regulatoryoverlays.csv', index=False)\n\n # Report purged values.\n if(len(dfpurge.index) > 0): print(f'...', len(dfpurge), ' records removed.')\n dfpurge.insert(0, 'ReasonRemoved', dfpurge.pop('ReasonRemoved'))\n dfpurge.to_csv('ProcessedInputData/regulatoryoverlays_missing.csv', index=False)\n\n print(\"Done\")\n","sub_path":"5_CustomFunctions/MappingFunctions/CreateRegulatoryOverlaysFile.py","file_name":"CreateRegulatoryOverlaysFile.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"89652364","text":"\"\"\"\nMichael S. Emanuel\nSat Oct 22 15:22:19 2016\n\nNumber spiral diagonals\nProblem 28\nStarting with the number 1 and moving to the right in a clockwise direction\na 5 by 5 spiral is formed as follows:\n\n21 22 23 24 25\n20 7 8 9 10\n19 6 1 2 11\n18 5 4 3 12\n17 16 15 14 13\n\nIt can be verified that the sum of the numbers on the diagonals is 101.\n\nWhat is the sum of the numbers on the diagonals in a 1001 by 1001 spiral\nformed in the same way?\n\"\"\"\n\nimport numpy as np\nfrom typing import Dict\n\n\ndef spiralMat(N: int, matrixTable: Dict[int, np.ndarray]) -> np.ndarray:\n # Returns a spiral matrix of size n on each side\n # nxn numpy matrix\n # First attempt to find M on matrixTable\n M = matrixTable.get(N)\n if isinstance(M, np.ndarray):\n return M\n # If we reach this stage, M hasn't been generated yet\n # Now we build it up recursively\n M = np.zeros((N, N), dtype=np.int)\n # Base case: n = 1\n if N == 1:\n M[0][0] = 1\n matrixTable[N] = M\n return M\n elif N % 2 == 0:\n # if n is even, tile on the right, bottom left corner, and bottom\n # The top left is filled in with the recursive call w/ N-1\n topLeft = spiralMat(N-1, matrixTable)\n # Add topLeft to matrixTable if not already present\n if (N-1) not in matrixTable.keys():\n matrixTable[N-1] = topLeft\n M[0:N-1, 0:N-1] = topLeft\n # the last entry filled in on the top left\n lastEntry = (N-1)*(N-1)\n # Fill in the right side: column is j = N-1, rows moving down\n # (i increasing)\n i = 0\n j = N-1\n for n in range(lastEntry+1, lastEntry+N+1):\n M[i][j] = n\n i += 1\n # Fill in the bottom row, starting one to the LEFT of bottom right\n # Columns moving left (j decreasing)\n i = N-1\n j = N-2 # bottom right at (N-1, N-1) already filled in!\n for n in range(lastEntry+N+1, lastEntry+2*N):\n M[i][j] = n\n j -= 1\n matrixTable[N] = M\n return M\n else:\n # if n is odd, tile on the left, then the top\n # The bottom right is filled in with the recursive call w/ N-1\n bottomRight = spiralMat(N-1, matrixTable)\n # Add bottomRight to matrixTable if not already present\n if (N-1) not in matrixTable.keys():\n matrixTable[N-1] = bottomRight\n M[1:N, 1:N] = bottomRight\n # the last entry filled in on the top left\n lastEntry = (N-1)*(N-1)\n # Fill in the left side: column is j = 0\n # Rows move up from the bottom (i decreasing)\n i = N-1\n j = 0\n for n in range(lastEntry+1, lastEntry+N+1):\n M[i][j] = n\n i -= 1\n # Fill in the top row\n # Row is fixed at i = 0\n # Columns start at j=1 and increase b/c (0,0) already filled in\n i = 0\n j = 1\n for n in range(lastEntry+N+1, lastEntry+2*N):\n M[i][j] = n\n j += 1\n matrixTable[N] = M\n return M\n\n\ndef main() -> int:\n # Size of square matrix\n N: int = 1001\n matrixTable: Dict[int, np.ndarray] = {}\n\n # Build up matrix table in steps\n n: int\n for n in range(1, N+1):\n spiralMat(n, matrixTable)\n\n # Generate spiral matrix of size N\n M: np.ndarray = spiralMat(N, matrixTable)\n\n # Compute the diagonal sum\n diagSum: int = 0\n i: int\n for i in range(N):\n diagSum += (M[i][i] + M[N-i-1][i])\n # This counts the central (1) square twice, so subtract it out\n diagSum -= 1\n print(f'Diagonal sum is {diagSum}')\n return diagSum\n\n\ndef test() -> None:\n matrixTable: Dict[int, np.ndarray] = {}\n M1 = spiralMat(1, matrixTable)\n M2 = spiralMat(2, matrixTable)\n M3 = spiralMat(3, matrixTable)\n M4 = spiralMat(4, matrixTable)\n M5 = spiralMat(5, matrixTable)\n print(M1)\n print(M2)\n print(M3)\n print(M4)\n print(M5)\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"Prob028_NumberSpiralDiagonals.py","file_name":"Prob028_NumberSpiralDiagonals.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"434513916","text":"# Under MIT license, see LICENSE.txt\nimport numpy as np\nfrom RULEngine.Util.Pose import Pose\nfrom .Action import Action\n# from ...Util.types import AICommand\nfrom RULEngine.Util.Position import Position\nfrom RULEngine.Util.constant import PLAYER_PER_TEAM\nfrom ai.Util.ai_command import AICommand, AICommandType, RotateAroundCommand\n\n\nclass RotateAround(Action):\n \"\"\"\n\n \"\"\"\n def __init__(self, p_game_state, p_player_id, target, rayon):\n \"\"\"\n :param p_game_state: L'état courant du jeu.\n :param p_player_id: Identifiant du joueur qui se déplace\n :param target: Pose du centre de rotation\n :param rayon: Distance entre le centre du robot et le centre de rotation\n \"\"\"\n Action.__init__(self, p_game_state)\n assert(isinstance(p_player_id, int))\n assert PLAYER_PER_TEAM >= p_player_id >= 0\n self.player_id = p_player_id\n self.target = target\n self.game_state = p_game_state\n self.rayon = rayon\n\n def generate_destination(self):\n player = self.game_state.game.friends.players[self.player_id].pose.position.conv_2_np()\n target = self.target.position.conv_2_np()\n player_to_target_orientation = np.arctan2(target[1] - player[1], target[0] - player[0])\n target_orientation = self.target.orientation\n delta_theta = player_to_target_orientation - target_orientation\n delta_theta = min(abs(delta_theta), np.pi/6) * np.sign(delta_theta)\n rotation_matrix = np.array([[np.cos(delta_theta), np.sin(delta_theta)], [-np.sin(delta_theta), np.cos(delta_theta)]])\n player_to_ball_rot = np.dot(rotation_matrix, player - target)\n translation = player_to_ball_rot / np.linalg.norm(player_to_ball_rot) * self.rayon\n destination = translation + target\n orientation = target_orientation\n return Pose(Position.from_np(destination), orientation)\n\n def exec(self):\n \"\"\"\n Exécute le déplacement\n \"\"\"\n destination = self.generate_destination()\n return AICommand(self.player_id, AICommandType.MOVE, **{\"pose_goal\": destination})","sub_path":"ai/STA/Action/rotate_around.py","file_name":"rotate_around.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"253710232","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 21 10:33:16 2017\n\n@author: ravi\n\"\"\"\n\n\n# coding: utf-8\n\n#get_ipython().magic(u'matplotlib inline')\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as scio\n#import scipy.io.wavfile as scwav\n#import cPickle\nimport speechpy.feature as spf\n\nfrom scipy.signal import butter, filtfilt\nfrom scipy import signal\nfrom glob import glob\nfrom audiolabel import LabelManager\nfrom termcolor import colored\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom mermelstein import __butter_bandpass__, __make_full_length_hull__\n\nfrom helper_functions import __get_vowel_indices__, __get_file_num_annotation__, \\\n__get_misclassified_phones__, __get_syllable_boundaries_nearest_minima__, \\\n__plot_energy__, __smooth__, __split_word__, __plot_spec__, __compute_slope__, __compute_decay_rate__, \\\n__get_num_phones__, __get_missclass_dict__, __get_consonant_indices__, __comparison_plot__, \\\n__get_cluster_elbow__, __compute_vuv_feature__, __get_smoothed_loudness_profile__, \\\n__get_syllable_boundaries_lowest_minima__\n\n#from smoothing_gp import __smoothing_by_GP__\n#from gpr_fitting import __get_gpr_smoothed_pts__\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n#import sys\n#import time\n#import scipy.io as scio\n#import itertools as itools\n#import scikits.talkbox as stalk\n\n#from python_speech_features import mfcc\n#from sklearn import svm\n#from sklearn.ensemble import GradientBoostingClassifier\n#from scipy.spatial import ConvexHull\n#from mpl_toolkits.mplot3d import Axes3D\n#from sklearn.decomposition import PCA\n\n#module_path = os.path.abspath(os.path.join('/home/ravi/Desktop/Test_aligner/'))\n#if module_path not in sys.path:\n# sys.path.append(module_path)\n \n\n#plt.rcParams['figure.figsize'] = [12.0, 6.0]\nplt.rcParams.update({'font.size': 10})\n\n\nCONSONANT_SET = ['B', 'CH', 'D', 'DH', 'F', 'G', \\\n 'HH', 'JH', 'K', 'L', 'M', 'N', \\\n 'NG', 'P', 'R', 'S', 'SH', 'T', \\\n 'TH', 'V', 'W', 'Y', 'Z', 'ZH']\n\nVOWEL_SET = ['AA0', 'AA1', 'AA2', 'AE0', 'AE1', \\\n 'AE2', 'AH0', 'AH1', 'AH2', 'AO0', \\\n 'AO1', 'AO2', 'AW0', 'AW1', 'AW2', \\\n 'AY0', 'AY1', 'AY2', 'EH0', 'EH1', \\\n 'EH2', 'ER0', 'ER1', 'ER2', 'EY0', \\\n 'EY1', 'EY2', 'IH0', 'IH1', 'IH2', \\\n 'IY0', 'IY1', 'IY2', 'OW0', 'OW1', \\\n 'OW2', 'OY0', 'OY1', 'OY2', 'UH0', \\\n 'UH1', 'UH2', 'UW0', 'UW1', 'UW2']\n\nFEATURES = ['peak intensity', '#maximas', 'time diff (peaks)', \\\n 'energy (100-200)', 'energy (200-400)', 'energy (400-600)', \\\n 'energy (0-600)']\n\nINFINITY = 10000\nWINDOW_SIZE = 20\nOVERLAP = 10\nSAMPLING_RATE = 16000.0\nEPSILON = 80\nDEFAULT_SMOOTHING_WINDOW = 101\nLPF_CUT = 15\nTOLERANCE = 0.03\n\n#session_dict = {}\n#session_dict['peak_flips'] = 0\n#session_dict['cls_balance'] = []\n#session_dict['flip_dist'] = []\n#session_dict['peak_density'] = []\n#session_dict['vowel_miss'] = []\n#elbow_storage = []\n\n\n\ndef __feature_extraction__(intensity_cord, audio_signal, index, extrema_list, cv_hull=None):\n max_indices = extrema_list[0]\n \n two_way_list = [x for x in range(max([0, \\\n index - EPSILON*int(SAMPLING_RATE)/1000]), \\\n min([len(intensity_cord)-1, \\\n index + EPSILON*int(SAMPLING_RATE)/1000]))]\n\n inter_max_list = set(list(max_indices)) & set(two_way_list)\n number_of_maximas = len(inter_max_list) - 1\n\n peak_intensity = intensity_cord[index, 1]\n intersec_list = np.asarray(list(inter_max_list - set([index])))\n decay_rate = __compute_decay_rate__(intensity_cord, index)\n \n \"\"\" Taking a 24ms window centered at the peak. \"\"\"\n window_segment = list(audio_signal[max([0, int(index-0.012*SAMPLING_RATE)]) : \\\n min([len(audio_signal), int(index+0.012*SAMPLING_RATE)])])\n window_segment.extend([0.0]*max([0, int(0.024*SAMPLING_RATE - len(window_segment))]))\n hann_window = np.hanning(len(window_segment))\n window_segment = hann_window*window_segment\n window_energy = float(sum(np.asarray(window_segment)**2)) + 0.0001\n \"\"\" Finish \"\"\"\n\n \"\"\" VUV feature \"\"\"\n vuv_feature = __compute_vuv_feature__(window_segment, SAMPLING_RATE, 512)\n \n \"\"\" Energy in 100-200 Hz band \"\"\"\n b, a = __butter_bandpass__(100, 200, 16000, 3)\n band_signal = filtfilt(b, a, np.asarray(window_segment))\n be_1 = sum(band_signal**2)/window_energy\n \"\"\" Finish \"\"\"\n\n \"\"\" Energy in 200-400 Hz band \"\"\"\n b, a = __butter_bandpass__(200, 400, 16000, 3)\n band_signal = filtfilt(b, a, np.asarray(window_segment))\n be_2 = sum(band_signal**2)/window_energy\n \"\"\" Finish \"\"\"\n\n \"\"\" Energy in 400-600 Hz band \"\"\"\n b, a = __butter_bandpass__(400, 600, 16000, 3)\n band_signal = filtfilt(b, a, np.asarray(window_segment))\n be_3 = sum(band_signal**2)/window_energy\n \"\"\" Finish \"\"\"\n\n \"\"\" Energy in 0-600 Hz band \"\"\"\n b, a = __butter_bandpass__(0.001, 600, 16000, 3)\n band_signal = filtfilt(b, a, np.asarray(window_segment))\n be_4 = sum(band_signal**2)/window_energy\n \"\"\" Finish \"\"\"\n\n filter_bank_energy = spf.mfe(window_segment, SAMPLING_RATE, \\\n frame_length=0.012, frame_stride=0.012, \\\n num_filters=20, fft_length=512, \\\n low_frequency=0, high_frequency=None)\n filter_bank_energy = tuple(list(filter_bank_energy[0][0]))\n \n# time_diff = 0.2 #(2*EPSILON)/float(SAMPLING_RATE)\n \n if len(intersec_list) > 0:\n sort_m_indices = list(intersec_list[np.argsort(intensity_cord[intersec_list, 1])])\n sort_m_indices.reverse()\n \n# time_diff = abs(index - sort_m_indices[0])/(float(SAMPLING_RATE))\n \n# final_features = (peak_intensity, number_of_maximas, \\\n# intensity_cord[sort_m_indices[0], 1], \\\n# cv_hull[index, 1] - intensity_cord[index, 1], \\\n# time_diff, onset, coda)\n final_features = (peak_intensity, number_of_maximas, decay_rate)\n\n else:\n# final_features = (peak_intensity, number_of_maximas, \\\n# intensity_cord[index, 1], \\\n# cv_hull[index, 1] - intensity_cord[index, 1], \\\n# time_diff, onset, coda)\n final_features = (peak_intensity, number_of_maximas, decay_rate)\n\n return final_features + (be_1, be_2, be_3, be_4) + filter_bank_energy + vuv_feature\n\n\"\"\" Unused features for classification algorithm \"\"\"\n# inter_min_list = set(list(min_indices)) & set(two_way_list)\n# number_of_minimas = len(inter_min_list)\n# min_indices = extrema_list[1]\n# onset, coda = __compute_slope__(intensity_cord, extrema_list, index)\n# decay_rate = __compute_decay_rate__(intensity_cord, index)\n\n# \"\"\" Energy in 200-600 Hz band \"\"\"\n# b, a = __butter_bandpass__(200, 600, 16000, 3)\n# band_signal = filtfilt(b, a, np.asarray(window_segment))\n# be_5 = sum(band_signal**2)/window_energy\n\"\"\" Finish \"\"\"\n\n\ndef __get_peak_vc_features__(vowel_indices, vc_interval, \\\n words_boundary, wordtier, \\\n intensity_cord, audio_signal, fuzzy=False):\n \n vowel_intervals = vc_interval[0]\n conson_intervals = vc_interval[1]\n captured_vowels_flag = [0]*len(vowel_intervals)\n captured_conson_flag = [0]*len(conson_intervals)\n peak_vowel_tuple = []\n v_peak_feats = []\n c_peak_feats = []\n local_maximas = signal.argrelextrema(intensity_cord[:, 1], np.greater)[0]\n local_minimas = signal.argrelextrema(intensity_cord[:, 1], np.less)[0]\n \n local_minima_values = intensity_cord[local_minimas, 1]\n# local_maxima_values = intensity_cord[local_maximas, 1]\n \n extrema_list = [local_maximas, local_minimas]\n \n# elbow_storage.append(__get_cluster_elbow__(local_maximas))\n \n \n for i in local_maximas:\n temp_min_left = np.asarray(local_minimas)[np.where(np.asarray(local_minimas)i)[0]]\n \n epsilon_array = np.intersect1d(np.asarray([x for x in \\\n range(int(i-TOLERANCE*SAMPLING_RATE), \\\n int(i+TOLERANCE*SAMPLING_RATE))]), \\\n np.asarray(vowel_indices))\n\n if i in vowel_indices:\n v_peak_feats.append(__feature_extraction__(intensity_cord, audio_signal, \\\n i, extrema_list))\n \n if len(temp_min_left) > 0:\n left_min = temp_min_left[-1]\n else:\n left_min = 0\n \n if len(temp_min_rite) > 0:\n right_min = temp_min_rite[0]\n else:\n right_min = i\n\n peak_vowel_tuple.append([i, 1, left_min, right_min])\n \n for interval_counter in range(len(vowel_intervals)):\n sub_interval = vowel_intervals[interval_counter]\n if i in [x for x in range(max([0, sub_interval[0]-0]), \\\n min([sub_interval[1]+0, len(audio_signal)]))]:\n captured_vowels_flag[interval_counter] += 1\n \n elif fuzzy and (len(epsilon_array) > 0):\n v_peak_feats.append(__feature_extraction__(intensity_cord, audio_signal, \\\n i, extrema_list))\n \n# session_dict['peak_flips'] += 1\n# session_dict['flip_dist'].append(np.min(np.abs(epsilon_array - i))/SAMPLING_RATE)\n \n if len(temp_min_left) > 0:\n left_min = temp_min_left[-1]\n else:\n left_min = 0\n \n if len(temp_min_rite) > 0:\n right_min = temp_min_rite[0]\n else:\n right_min = i\n\n peak_vowel_tuple.append([i, 1, left_min, right_min])\n \n for interval_counter in range(len(vowel_intervals)):\n sub_interval = vowel_intervals[interval_counter]\n if i in [x for x in range(max([0, sub_interval[0]-0]), \\\n min([sub_interval[1]+0, len(audio_signal)]))]:\n captured_vowels_flag[interval_counter] += 1\n\n else:\n c_peak_feats.append(__feature_extraction__(intensity_cord, audio_signal, \\\n i, extrema_list))\n \n if len(temp_min_left) > 0:\n left_min = temp_min_left[-1]\n else:\n left_min = 0\n \n if len(temp_min_rite) > 0:\n right_min = temp_min_rite[0]\n else:\n right_min = i\n\n peak_vowel_tuple.append([i, 0, left_min, right_min])\n \n for interval_counter in range(len(conson_intervals)):\n sub_interval = conson_intervals[interval_counter]\n if i in [x for x in range(max([0, sub_interval[0]-0]), \\\n min([sub_interval[1]+0, len(audio_signal)]))]:\n captured_conson_flag[interval_counter] += 1\n\n cv_flag = [captured_vowels_flag, captured_conson_flag]\n return (v_peak_feats, c_peak_feats, peak_vowel_tuple, cv_flag, \\\n local_minimas, local_minima_values)\n\n\n\ndef __get_train_test_data__(FILE_LIST, smooth_param, fuzzy_bounds=False, GPR=False):\n peak_vf = []\n peak_cf = []\n cap_vow = []\n cap_con = []\n peak_vowel_loc = []\n phoneme_info = []\n min_loc = []\n min_val = []\n iter_counter = 1\n signals_to_plot = 100000\n gpr_array = []\n points_array = []\n\n for file_iterator in (FILE_LIST):\n FILE = file_iterator[0:-4]\n SIGNAL_LOC = FILE + '.wav'\n TEXTGRID_LOC = FILE + '.TextGrid'\n print(colored(SIGNAL_LOC, 'yellow'))\n \n try:\n lm = LabelManager(from_file=TEXTGRID_LOC, from_type='praat')\n except(IOError):\n continue\n phonetier = lm.tier('phone')\n wordtier = lm.tier('word')\n \n vowel_indices, vowel_intervals, phonemes = __get_vowel_indices__(phonetier, VOWEL_SET)\n conso_indices, conso_intervals, phonemes = __get_consonant_indices__(phonetier, CONSONANT_SET)\n vc_intervals = [vowel_intervals, conso_intervals]\n words_boundary_indices = [x.t1*SAMPLING_RATE for x in wordtier]\n# phone_boundary_indices = [x.t1*SAMPLING_RATE for x in phonetier]\n \n sp_data, points, gpr = __get_smoothed_loudness_profile__(SIGNAL_LOC, smooth_param, GPR)\n \n# plt.plot(points[:,0], points[:,1]/float(16000), 'b-', linewidth=2.0)\n \n \"\"\" Comparison of vowel peaks when boundaries are flexible \"\"\"\n# __comparison_plot__(vowel_indices, phone_boundary_indices, phonetier, points)\n\n v_f, c_f, pv_l, cv_f,l_min,v_min = __get_peak_vc_features__(vowel_indices, \\\n vc_intervals, words_boundary_indices, wordtier, \\\n points, sp_data, fuzzy_bounds)\n phonemes = __get_file_num_annotation__(phonemes, iter_counter)\n\n peak_vf.extend(v_f)\n peak_cf.extend(c_f)\n peak_vowel_loc.append(pv_l)\n cap_vow.extend(cv_f[0])\n cap_con.extend(cv_f[1])\n phoneme_info.extend(phonemes)\n min_loc.append(l_min)\n min_val.append(v_min)\n gpr_array.append(gpr)\n points_array.append(points)\n \n if iter_counter > signals_to_plot:\n break\n iter_counter += 1\n\n peak_vf = np.asarray(peak_vf)\n peak_cf = np.asarray(peak_cf)\n \n \"\"\" Number of peaks recorded for each vowel and consonant occurrence \"\"\"\n cap_vow_con = [cap_vow, cap_con]\n \n vowel_miss_rate = np.sum(np.array(cap_vow)==0)/float(len(cap_vow))\n print(colored('vowels missed : ' + str(vowel_miss_rate), 'red'))\n# session_dict['vowel_miss'].append(vowel_miss_rate)\n return (peak_vf, peak_cf, peak_vowel_loc, min_loc, min_val, cap_vow_con, \\\n phoneme_info, vowel_miss_rate, gpr_array, points_array)\n\n\n\ndef __unfold_data__(dataset, cols):\n if dataset=='TIMIT':\n data = scio.loadmat('/home/ravi/Downloads/Lib-TIMIT-numpy-data/TIMIT-test-data-mfe-vuv.mat')\n elif dataset=='LibriSpeech':\n data = scio.loadmat('/home/ravi/Downloads/Lib-TIMIT-numpy-data/LibriSpeech-test-data-mfe-vuv.mat')\n \n test_vf = data['test_vf'][:, :cols]\n test_cf = data['test_cf'][:, :cols]\n min_loc = data['min_loc']\n min_val = data['min_val']\n peak_loc = data['peak_loc']\n \n min_val_array = []\n min_loc_array = []\n peak_loc_array = []\n \n for i in range(min_loc.shape[1]):\n min_val_array.append(min_val[0,i][0].tolist())\n min_loc_array.append(min_loc[0,i][0].tolist())\n peak_loc_array.append(peak_loc[0,i].tolist())\n \n return test_vf, test_cf, peak_loc_array, min_loc_array, min_val_array\n\n\ndef __get_RF_model__(FILE_LIST, model, scaler, time_constr, min_delta, dataset, cols, fuzzy_bounds=False):\n#if __name__ == '__main__':\n smooth_param = 200 if dataset=='TIMIT' else 400\n fuzzy_bounds = False\n bool_smooth = True\n\n \"\"\" Model Training code \"\"\"\n#-----------------------------------------Get the training files----------------------------------------------------\n# train_files_loc = '/home/ravi/Downloads/TIMIT/TRAIN/agg-train'\n# FILE_LIST = sorted(glob(os.path.join(train_files_loc, '*.wav')))\n# (peak_vf, peak_cf, _, _, _, _, _, gpr_array) = __get_train_test_data__(FILE_LIST, \\\n# smooth_param, \\\n# fuzzy_bounds, GPR=bool_smooth)\n# print(colored('Training Set prepared', 'green'))\n \n# gpr = __get_train_test_data__(train_files_loc, smooth_param, True)\n# return gpr\n\n#-----------------------------------------Plots for comparison------------------------------------------------------\n# np.random.shuffle(FILE_LIST)\n## \n# for smoothness in smooth_param:\n# elbow_storage = []\n# print colored('Fuzzy boundary: '+str(fuzzy_bounds)+', smoothness: '+str(smoothness) \\\n# +', GPR: '+str(bool_smooth), 'red')\n# (peak_vf, peak_cf, _, _, _, _, _, _) = __get_train_test_data__(FILE_LIST, \\\n# smoothness, \\\n# fuzzy_bounds, GPR=bool_smooth)\n# session_dict['peak_density'].append(np.mean(elbow_storage))\n# session_dict['cls_balance'].append(len(peak_cf)/float(len(peak_vf)))\n# session_dict['flip_dist'].append(np.mean(session_dict['flip_dist'][-1]))\n\n#------------------------------------------Training the Random Forest model-----------------------------------------\n# num_trees = 30\n# rfc = RandomForestClassifier(n_estimators=num_trees, max_depth=9)\n# data_Y = np.append(np.ones((len(peak_vf), 1)), -1*np.ones((len(peak_cf), 1)), 0)\n# data_X = np.append(peak_vf, peak_cf, 0)\n# data_XY = np.append(data_X, data_Y, 1)\n# np.random.shuffle(data_XY)\n# train_X = data_XY[:,:-1]\n# train_Y = data_XY[:,-1]\n# rfc.fit(train_X, train_Y)\n#\n# with open('/home/ravi/Documents/Random_Forests/full_TIMIT_random_forest_model_1200_' \\\n# + str(smooth_param) +'.pkl', 'wb') as f:\n# cPickle.dump(rfc, f)\n# del f\n#\n# print colored('Model Trained', 'magenta')\n\n\n#------------------------------------------Training the Multi-layer Perceptron--------------------------------------\n# data_Y = np.append(np.ones((len(peak_vf), 1)), np.zeros((len(peak_cf), 1)), 0)\n# data_X = np.append(peak_vf, peak_cf, 0)\n# data_XY = np.append(data_X, data_Y, 1)\n# \n# np.random.shuffle(data_XY)\n# \n# train_X = data_XY[:,:-1]\n# train_Y = data_XY[:,-1]\n# \n# scaler = StandardScaler() \n# scaler.fit(train_X) \n# scaled_train_X = scaler.transform(train_X) \n# mlp = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(80,60,2), random_state=1)\n# mlp.fit(scaled_train_X, train_Y)\n# print(colored('Model Trained', 'magenta'))\n\n#------------------------------------------Load an existing model---------------------------------------------------\n# if 'rfc' not in globals():\n# try:\n# with open('/home/ravi/Documents/Random_Forests/Librispeech_random_forest_model_1200_300.pkl', 'rb') as f:\n# rfc = cPickle.load(f)\n# del f\n# print colored('Model Loaded', 'magenta')\n# except IOError:\n# print 'Random forest model cannot be found'\n# exit(0)\n\n#------------------------------------------Preparing Test data------------------------------------------------------\n# test_files_loc = '/home/ravi/Downloads/TIMIT/TEST/agg-test'\n# FILE_LIST = sorted(glob(os.path.join(test_files_loc, '*.wav')))\n#\n# (test_vf, test_cf, peak_loc, min_loc, capt_ph_test, phone_info_test, \\\n# vmr, _) = __get_train_test_data__(FILE_LIST, \\\n# smooth_param, \\\n# fuzzy_bounds, GPR=bool_smooth)\n# scaled_test_vf = scaler.transform(test_vf)\n# scaled_test_cf = scaler.transform(test_cf)\n\n#------------------------------------------Performance Metrics ---------------------------------------------------- \n# detect = np.sum(rfc.predict(test_vf)==1)/float(len(test_vf))\n# fa = np.sum(rfc.predict(test_cf)==1)/float(len(test_cf))\n# accuracy = detect*(len(test_vf)/float(len(test_vf) + len(test_cf))) \\\n# + (1-fa)*(len(test_cf)/float(len(test_vf) + len(test_cf)))\n# print('vowel peak detection rate and false alarm :' + colored(detect, 'green') + ' ' + colored(fa, 'red'))\n# print('Overall acc: '+str(accuracy))\n# \n# detect = np.sum(mlp.predict(scaled_test_vf)==1)/float(len(scaled_test_vf))\n# fa = np.sum(mlp.predict(scaled_test_cf)==1)/float(len(scaled_test_cf))\n# accuracy = detect*(len(test_vf)/float(len(test_vf) + len(test_cf))) \\\n# + (1-fa)*(len(test_cf)/float(len(test_vf) + len(test_cf)))\n# print('vowel peak detection rate and false alarm :' + colored(detect, 'green') + ' ' + colored(fa, 'red'))\n# print('Overall acc: '+str(accuracy))\n\n#-------For seeing variation of consonant and vowel peaks and their detection rate----------------------------------\n# return syl_segments2, capt_ph_train, capt_ph_test, rfc, (detect, fa)#, (vmr_train, vmr_test)\n# return syl_segments, capt_ph_test, peak_loc, prediction\n \n#------------------------------------For finding the misclassified phonemes-----------------------------------------\n# return phone_info_test, prediction, peak_loc\n\n\n\n\n\n \"\"\" Syllable segmentation code (For Testing) \"\"\"\n#-----------------------------For Testing syllable segmentation performance-----------------------------------------\n# (test_vf, test_cf, peak_loc, min_loc, min_val, capt_ph_test, phone_info_test, \\\n# vmr, _, pts_array) = __get_train_test_data__(FILE_LIST, \\\n# smooth_param, fuzzy_bounds, GPR=bool_smooth)\n \n# scio.savemat('/home/ravi/Desktop/LibriSpeech-validation-200-data-mfe-vuv.mat', { \\\n# 'test_vf': np.asarray(test_vf), \\\n# 'test_cf': np.asarray(test_cf), \\\n# 'peak_loc': np.asarray(peak_loc), \\\n# 'min_loc': np.asarray(min_loc), \\\n# 'min_val': np.asarray(min_val), \\\n# 'capt_ph_test': np.asarray(capt_ph_test), \\\n# 'phone_info_test': np.asarray(phone_info_test), \\\n# 'vmr': np.asarray(vmr) \\\n# })\n test_vf, test_cf, peak_loc, min_loc, min_val = __unfold_data__(dataset, cols)\n\n#------------------------------------------Prediction on the test data----------------------------------------------\n prediction, syl_segments = __get_syllable_boundaries_lowest_minima__(test_vf[:,:cols], \\\n test_cf[:,:cols], peak_loc, min_loc, min_val, model, \\\n scaler, time_constr, min_delta)\n# detect = np.sum(model.predict(test_vf[:,:cols])==1)/float(len(test_vf))\n# fa = np.sum(model.predict(test_cf[:,:cols])==1)/float(len(test_cf))\n# print('True Detection: ' + str(detect))\n# print('False Alarm: ' + str(fa))\n return syl_segments, peak_loc, prediction\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#if __name__ == '__main__':\n# ph, pr, pl = __get_RF_SVM_perf__(501, 50)\n# miss_phones = __get_misclassified_phones__(np.asarray(ph), pl, pr)\n# missclass_dict = __get_missclass_dict__(ph, pl, miss_phones, VOWEL_SET, CONSONANT_SET)\n# plt.figure(), plt.bar(range(len(missclass_dict)), missclass_dict.values(), align=\"center\")\n# plt.xticks(range(len(missclass_dict)), [__split_word__(i) for i in missclass_dict.keys()], rotation=90)\n# f = '/home/ravi/Downloads/TIMIT/TEST/Agg_Test2/'\n# __plot_energy__(f, 501, [np.asarray(miss_phones)], 1, 1200, False, [False, 0])\n# del ph, pr, pl, i\n\n\n\n\n\n#if __name__ == '__main__':\n## vowels_missed = []\n# det = []\n# fa = []\n# avg_peaks_per_vowel = []\n# clf_objects = []\n# gpr_objects = []\n# smooth_windows = [5, 21, 51, 101, 151, 201, 251, 321, 401]\n# gpr_samples = [700] #[200, 300, 500, 700, 1000]\n# vowels_captured_test = []\n# vowels_captured_train = []\n# conson_captured_test = []\n# conson_captured_train = []\n# for i in gpr_samples:\n# print colored('smoothing window: ' + str(i), 'green')\n# gpr_objects = __get_RF_SVM_perf__(i, 50)\n# ss, cap_ph_train, cap_ph_test, clf_obj, det_fa = __get_RF_SVM_perf__(i, 50)\n# avg_peaks_per_vowel.append(np.mean(np.asarray(cap_ph_train[0])))\n# clf_objects.append(clf_obj)\n# vowels_captured_train.append(cap_ph_train[0])\n# vowels_captured_test.append(cap_ph_test[0])\n# conson_captured_train.append(cap_ph_train[1])\n# conson_captured_test.append(cap_ph_test[1])\n# det.append(det_fa[0])\n# fa.append(det_fa[1])\n# print '\\n'\n# vowels_missed.append(vmr)\n# scio.savemat('/home/ravi/Desktop/vmr_2000.mat', {'vowel_miss_rate': np.asarray(vowels_missed)})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n######_______________Plotting__________________________________________________\n\n\"\"\"\nfeat_indices = [i for i in range(peak_vf.shape[1])]\n\nidxv = np.random.randint(0, len(peak_vf), (1000,1))\nidxc = np.random.randint(0, len(peak_cf), (1000,1))\n\nfor feats in range(len(FEATURES)):\n plt.figure()plt.rcParams.update({'font.size': 25})\n ax = plt.subplot(211)\n plt.plot(peak_vf[idxv.squeeze(), feats], 'g.')\n plt.title(FEATURES[feats] + ' (Rigid Boundary Scheme)')\n plt.subplot(212, sharex=ax, sharey=ax)\n plt.plot(peak_cf[idxc.squeeze(), feats], 'r.')\n plt.savefig('/home/ravi/Desktop/Rigid_bounds_'+FEATURES[feats]+'.png')\n\n\npair_comb = itools.combinations(feat_indices, 2)\niterator = 1\nwhile (True):\n try:\n pc = pair_comb.next()\n except StopIteration:\n print colored('Done', 'green')\n break\n plt.figure()\n ax = plt.subplot(211)\n plt.plot(peak_vf[:, pc[0]], peak_vf[:, pc[1]], 'g.')\n plt.title(FEATURES[pc[1]] + ' vs ' + FEATURES[pc[0]])\n plt.subplot(212, sharex=ax, sharey=ax)\n plt.plot(peak_cf[:, pc[0]], peak_cf[:, pc[1]], 'r.')\n plt.savefig('/home/ravi/Desktop/Penn_'+str(iterator)+'.png')\n iterator += 1\n\ntrip_comb = itools.combinations(f_indices, 3)\niterator = 1\nwhile (True):\n try:\n tc = trip_comb.next()\n except StopIteration:\n print 'Finished'\n break\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n Axes3D.scatter(ax, peak_cf[:, tc[0]], peak_cf[:, tc[1]], peak_cf[:, tc[2]], depthshade=False, c='red')\n Axes3D.scatter(ax, peak_vf[:, tc[0]], peak_vf[:, tc[1]], peak_vf[:, tc[2]], depthshade=False, c='green')\n ax.set_title(FEATURES[tc[0]] + ' vs ' + FEATURES[tc[1]] + ' vs ' + FEATURES[tc[2]])\n\"\"\"\n\n\"\"\" Annotate the points on a scatter plot \nvm = session_dict['vowel_miss']\npd = session_dict['peak_density']\nfig, ax = plt.subplots()\nax.scatter(vm, pd, s=50)\nax.grid()\nfor i, txt in enumerate(smooth_param):\n ax.annotate(txt, (vm[i],pd[i]))\n\n\"\"\"\n\n##-----------------------------Previous----------------------------------------\n\n#if __name__ == '__main__':\n# start_time = time.time()\n# FILE_LIST = glob(os.path.join('/home/ravi/Desktop/Test_aligner/Data/syllable_boundary_test/', '*.wav'))\n# peak_vf = []\n# peak_cf = []\n# iter_counter = 1\n# prime_utterance = 10000\n# global SMOOTHING_WINDOW\n# SMOOTHING_WINDOW = smooth_param\n# \n# for iterator in sorted(FILE_LIST):\n# print colored(str(iterator), 'green')\n# FILE = iterator[0:-4]\n# SIGNAL_LOC = FILE + '.wav'\n# TEXTGRID_LOC = FILE + '.TextGrid'\n# \n# lm = LabelManager(from_file=TEXTGRID_LOC, from_type='praat')\n# phonetier = lm.tier('phone')\n# wordtier = lm.tier('word')\n# vowel_indices = __get_vowel_indices__(phonetier)\n# words_boundary_indices = [x.t1*SAMPLING_RATE for x in wordtier]\n#\n# data = scwav.read(SIGNAL_LOC)\n# sp_data = np.asarray(data[1], np.float64)\n## sp_data = (sp_data - np.mean(sp_data))\n# sp_data = 2*(sp_data - np.min(sp_data))/float(np.max(sp_data) \\\n# - np.min(sp_data)) - 1\n# \n# b, a = __butter_bandpass__(500, 4000, 16000, 3)\n## b, a = butter(1, 650/(SAMPLING_RATE/2.0), btype='low')\n# \n# y = filtfilt(b, a, sp_data.squeeze())\n# y = np.reshape(y, [len(y), 1])\n# \n## ste = __get_short_term_energy2__(y.squeeze(), 320, 160, True)\n## ste = smooth(ste)\n#\n# sq_signal = y**2\n# b_lpf, a_lpf = butter(1, LPF_CUT/(SAMPLING_RATE/2.0), btype='low')\n# y_sq_lp = filtfilt(b_lpf, a_lpf, np.log10(sq_signal.squeeze()))\n#\n# y_sq_lp = smooth(y_sq_lp)\n# y_sq_lp = y_sq_lp/np.max(y_sq_lp)\n#\n# y_sq_lp = np.reshape(y_sq_lp, [len(y_sq_lp), 1])\n# new_col = np.asarray([x for x in range(len(y_sq_lp))])\n# new_col = np.reshape(new_col, [len(new_col), 1])\n# points = np.append(new_col, y_sq_lp, 1)\n# \n## y_test = np.reshape(ste, [len(ste), 1])\n## new_col = np.asarray([x for x in range(len(y_test))])\n## new_col = np.reshape(new_col, [len(new_col), 1])\n## points = np.append(new_col, y_test, 1)\n# \n# full_hull = __make_full_length_hull__(points)\n# \n# if iter_counter == prime_utterance:\n# v_f, c_f = __get_peak_vc_features__(vowel_indices, \\\n# words_boundary_indices, wordtier, \\\n# points, full_hull, True)\n# \n# for phone in phonetier:\n# print colored(str((phone.t1, phone.t2, str(phone.text))), 'blue')\n## plt.figure(), plt.plot(ste)\n#\n# else:\n# v_f, c_f = __get_peak_vc_features__(vowel_indices, \\\n# words_boundary_indices, wordtier, \\\n# points, full_hull, False)\n# \n# peak_vf.extend(v_f)\n# peak_cf.extend(c_f)\n# \n# if iter_counter > prime_utterance:\n# break\n# iter_counter += 1\n#\n# #plt.figure(), plt.plot(points[:, 0], points[:, 1], 'r-')\n# #plt.plot(full_hull[:, 0], full_hull[:, 1], 'g-')\n# \n# peak_vf = np.asarray(peak_vf)\n# peak_cf = np.asarray(peak_cf)\n\n#for i in range(7):\n# peak_vf[:,i] = -1 + 2*(peak_vf[:,i])/float(np.max(peak_vf[:,i]) - np.min(peak_vf[:,i])) #- np.mean(peak_vf[:,i]))/float(np.var(peak_vf[:,i]))\n# peak_cf[:,i] = -1 + 2*(peak_cf[:,i])/float(np.max(peak_cf[:,i]) - np.min(peak_cf[:,i])) #- np.mean(peak_cf[:,i]))/float(np.var(peak_cf[:,i]))","sub_path":"peak_detection.py","file_name":"peak_detection.py","file_ext":"py","file_size_in_byte":33724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"646273197","text":"import os\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0' # '3,2' #'3,2,1,0'\r\n\r\nfrom data_util import *\r\n\r\n# from unet_5_scale_more_aug.model import SaltNet as Net\r\n# from resnet34.model_resnet34_bn import SaltNet as Net\r\nfrom seresnet50.model_se_resnext50_bn import SeResNeXt50Unet as Net\r\n# from resnet_aug0.model import SaltNet as Net\r\nSIZE = 101\r\nPAD_1 = 13\r\nPAD_2 = 14\r\nY0, Y1, X0, X1 = PAD_1, PAD_1 + SIZE, PAD_1, PAD_1 + SIZE,\r\n\r\n## global setting ############################################################\r\n\r\n\r\nout_dir = \\\r\n 'E:\\\\DHWorkStation\\\\Project\\\\tgs_pytorch\\\\output\\\\seresnext50_bn\\\\fold1\\\\sallow\\\\'\r\n\r\ninitial_checkpoint = \\\r\n 'E:\\\\DHWorkStation\\\\Project\\\\tgs_pytorch\\\\output\\\\seresnext50_bn\\\\swa_test\\\\checkpoint\\\\swa_00065600_loss_0.224_model.pth'\r\n# out_dir + '/resnet34/checkpoint/00018000_model.pth'\r\n# '/root/share/project/kaggle/tgs/results/simple-004-d/checkpoint/00032000_model.pth'\r\n# '/root/share/project/kaggle/tgs/results/simple-004-b/checkpoint/00016000_model.pth'\r\n# '/root/share/project/kaggle/tgs/results/simple-002-02-xx/checkpoint/00014000_model.pth'\r\n\r\n# split, mode = 'valid_400_1_origin', 'valid'\r\n\r\n\r\nsplit, mode = 'test_18000', 'test'\r\n\r\n# #augment = 'flip'\r\n# augment = 'null'\r\n# #augment = 'intensity'\r\n# #augment = 'intensity-flip'\r\n#\r\n\r\n\r\ndef augment_flip(image, mask, index):\r\n cache = Struct(image=image.copy(), mask=mask.copy())\r\n\r\n if mask == []:\r\n image = do_horizontal_flip(image)\r\n # image = do_center_pad_to_factor(image, factor=32)\r\n # image = cv2.resize(image, dsize=(SIZE, SIZE))\r\n image = do_center_pad(image, PAD_1, PAD_2)\r\n else:\r\n image, mask = do_horizontal_flip2(image, mask)\r\n # image, mask = do_resize2(image, mask, SIZE, SIZE)\r\n image, mask = do_center_pad2(image, mask, PAD_1, PAD_2)\r\n # image, mask = do_center_pad_to_factor2(image, mask, factor=32)\r\n\r\n return image, mask, index, cache\r\n\r\n\r\ndef unaugment_flip(prob):\r\n # dy0, dy1, dx0, dx1 = compute_center_pad(IMAGE_HEIGHT, IMAGE_WIDTH, factor=32)\r\n # prob = prob[:, dy0:dy0 + IMAGE_HEIGHT, dx0:dx0 + IMAGE_WIDTH]\r\n res = []\r\n for p in prob:\r\n p = p[Y0:Y1, X0:X1]\r\n p = p[:, ::-1]\r\n # p = cv2.resize(p, (101, 101))\r\n res.append(p)\r\n res = np.array(res)\r\n # prob = prob[:, Y0:Y1, X0:X1]\r\n # prob = prob[:,:, ::-1]\r\n return res\r\n\r\n\r\n# ---------------------\r\n# augment == 'null' :\r\ndef augment_null(image, mask, index):\r\n cache = Struct(image=image.copy(), mask=mask.copy())\r\n\r\n if mask == []:\r\n # image = cv2.resize(image, dsize=(SIZE, SIZE))\r\n image = do_center_pad(image, PAD_1, PAD_2)\r\n # image = do_center_pad_to_factor(image, factor=32)\r\n else:\r\n # image, mask = do_resize2(image, mask, SIZE, SIZE)\r\n image, mask = do_center_pad2(image, mask, PAD_1, PAD_2)\r\n # image, mask = do_center_pad_to_factor2(image, mask, factor=32)\r\n\r\n return image, mask, index, cache\r\n\r\n\r\ndef unaugment_null(prob):\r\n res = []\r\n for p in prob:\r\n p = p[Y0:Y1, X0:X1]\r\n p = cv2.resize(p, (101, 101))\r\n res.append(p)\r\n res = np.array(res)\r\n # dy0, dy1, dx0, dx1 = compute_center_pad(IMAGE_HEIGHT, IMAGE_WIDTH, factor=32)\r\n # prob = prob[:, dy0:dy0 + IMAGE_HEIGHT, dx0:dx0 + IMAGE_WIDTH]\r\n return res\r\n\r\n\r\ndef run_predict(augment):\r\n if augment == 'null':\r\n test_augment = augment_null\r\n test_unaugment = unaugment_null\r\n if augment == 'flip':\r\n test_augment = augment_flip\r\n test_unaugment = unaugment_flip\r\n # ....................................................\r\n\r\n\r\n ## setup -----------------\r\n os.makedirs(out_dir + '/test/' + split, exist_ok=True)\r\n os.makedirs(out_dir + '/backup', exist_ok=True)\r\n # backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.test.%s.zip'%IDENTIFIER)\r\n\r\n log = Logger()\r\n log.open(out_dir + '/log.submit.txt', mode='a')\r\n log.write('\\n--- [START %s] %s\\n\\n' % (IDENTIFIER, '-' * 64))\r\n log.write('\\tSEED = %u\\n' % SEED)\r\n log.write('\\tPROJECT_PATH = %s\\n' % PROJECT_PATH)\r\n log.write('\\tout_dir = %s\\n' % out_dir)\r\n log.write('\\n')\r\n\r\n ## dataset ----------------------------------------\r\n log.write('** dataset setting **\\n')\r\n batch_size = 32\r\n\r\n test_dataset = TsgDataset(split, test_augment, mode)\r\n test_loader = DataLoader(\r\n test_dataset,\r\n sampler=SequentialSampler(test_dataset),\r\n batch_size=batch_size,\r\n drop_last=False,\r\n num_workers=0,\r\n pin_memory=True,\r\n collate_fn=null_collate)\r\n\r\n assert (len(test_dataset) >= batch_size)\r\n log.write('batch_size = %d\\n' % (batch_size))\r\n log.write('\\n')\r\n\r\n ## net ----------------------------------------\r\n log.write('** net setting **\\n')\r\n net = Net().cuda()\r\n\r\n if initial_checkpoint is not None:\r\n log.write('\\tinitial_checkpoint = %s\\n' % initial_checkpoint)\r\n net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))\r\n\r\n log.write('%s\\n\\n' % (type(net)))\r\n log.write('\\n')\r\n\r\n ####### start here ##########################\r\n all_prob = []\r\n all_num = 0\r\n all_loss = np.zeros(2, np.float32)\r\n\r\n net.set_mode('test')\r\n for input, truth, index, cache in test_loader:\r\n # print(input.shape)\r\n #\r\n print('\\r', all_num, end='', flush=True)\r\n batch_size = len(index)\r\n all_num += batch_size\r\n\r\n input = input.cuda()\r\n with torch.no_grad():\r\n logit = net(input)\r\n prob = F.sigmoid(logit)\r\n\r\n if 0: ##for debug\r\n truth = truth.cuda()\r\n loss = net.criterion(logit, truth)\r\n dice = net.metric(logit, truth)\r\n all_loss += batch_size * np.array((loss.item(), dice.item(),))\r\n\r\n ##-----------------------------\r\n prob = prob.squeeze().data.cpu().numpy()\r\n prob = test_unaugment(prob)\r\n all_prob.append(prob)\r\n\r\n if 0: ##for debug\r\n\r\n os.makedirs(out_dir + '/test/%s/%s' % (split, augment), exist_ok=True)\r\n\r\n for b in range(batch_size):\r\n name = test_dataset.ids[index[b]]\r\n predict = prob[b]\r\n image = cache[b].image * 255\r\n truth = cache[b].mask\r\n image = np.dstack([image, image, image])\r\n\r\n overlay0 = draw_mask_overlay(predict, image, color=[0, 0, 255])\r\n overlay0 = draw_mask_to_contour_overlay(predict, overlay0, 2, color=[0, 0, 255])\r\n\r\n if truth == []:\r\n overlay1 = np.zeros((101, 101, 3), np.float32)\r\n else:\r\n overlay1 = draw_mask_overlay(truth, image, color=[255, 0, 0])\r\n\r\n overlay = np.hstack([image, overlay0, overlay1])\r\n cv2.imwrite(out_dir + '/test/%s/%s/%s.png' % (split, augment, name), overlay * 255)\r\n\r\n # image_show_norm('overlay',overlay,1,2)\r\n image_show('overlay', overlay, 2)\r\n cv2.waitKey(0)\r\n\r\n print('\\r', all_num, end='\\n', flush=True)\r\n all_prob = np.concatenate(all_prob)\r\n # for thres in xrange(0.15,0.85,0.05):\r\n all_prob = (all_prob * 255).astype(np.uint8)\r\n np.save(out_dir + '/test/%s-%s.prob.uint8.npy' % (split, augment), all_prob)\r\n print(all_prob.shape)\r\n\r\n print('')\r\n assert (all_num == len(test_loader.sampler))\r\n all_loss = all_loss / all_num\r\n print(all_loss)\r\n log.write('\\n')\r\n\r\n\r\ndef run_submit(augment,thres):\r\n print('running submit')\r\n if augment in ['null', 'flip']:\r\n augmentation = [\r\n 1, out_dir + '/test/%s-%s.prob.uint8.npy' % (split, augment),\r\n ]\r\n csv_file = out_dir + '/test/%s-%s.csv' % (split, augment)\r\n\r\n if augment == 'aug2':\r\n augmentation = [\r\n 1, out_dir + '/test/%s-%s.prob.uint8.npy' % (split, 'null'),\r\n 1, out_dir + '/test/%s-%s.prob.uint8.npy' % (split, 'flip'),\r\n ]\r\n csv_file = out_dir + '/test/%s-%s.csv' % (split, augment)\r\n\r\n ##---------------------------------------\r\n\r\n # augments, csv_file = ['null','flip'], '/submit1_simple-valid0-300-aug.csv.gz'\r\n # augments, csv_file = ['flip'], '/submit1_simple-xxx-flip.csv.gz'\r\n # augments, csv_file = ['null'], '/submit1_simple-xxx-null.csv.gz'\r\n\r\n ##---------------------------------------\r\n\r\n # save\r\n log_file = csv_file + '.log'\r\n write_list_to_file(augmentation, log_file)\r\n\r\n augmentation = np.array(augmentation, dtype=object).reshape(-1, 2)\r\n num_augments = len(augmentation)\r\n w, augment_file = augmentation[0]\r\n all_prob = w * np.load(augment_file).astype(np.float32) / 255\r\n all_w = w\r\n for i in range(1, num_augments):\r\n w, augment_file = augmentation[i]\r\n prob = w * np.load(augment_file).astype(np.float32) / 255\r\n all_prob += prob\r\n all_w += w\r\n all_prob /= all_w\r\n all_prob = all_prob > thres\r\n print(all_prob.shape)\r\n\r\n # ----------------------------\r\n\r\n split_file = 'E:\\\\DHWorkStation\\\\Project\\\\tgs_pytorch\\\\data/split/' + split\r\n lines = read_list_from_file(split_file)\r\n\r\n id = []\r\n rle_mask = []\r\n for n, line in enumerate(lines):\r\n folder, name = line.split('/')\r\n id.append(name)\r\n\r\n if (all_prob[n].sum() <= 0):\r\n encoding = ''\r\n else:\r\n encoding = run_length_encode(all_prob[n])\r\n assert (encoding != [])\r\n\r\n rle_mask.append(encoding)\r\n\r\n df = pd.DataFrame({'id': id, 'rle_mask': rle_mask}).astype(str)\r\n df.to_csv(csv_file, index=False, columns=['id', 'rle_mask'], encoding='utf-8')\r\n print('submit done')\r\n\r\n # csv_file = out_dir + '/submit1_iter20k-1.csv'\r\n # df.to_csv(csv_file, index=False, columns=['id', 'rle_mask'])\r\n\r\n ############################################################################################\r\n\r\n\r\ndef run_local_leaderboard(augment):\r\n # -----------------------------------------------------------------------\r\n submit_file = out_dir + '/test/%s-%s.csv' % (split, augment)\r\n dump_dir = out_dir + '/test/%s-%s-dump' % (split, augment)\r\n os.makedirs(dump_dir, exist_ok=True)\r\n\r\n log = Logger()\r\n log.open(out_dir + '/test/log.submit.txt', mode='a')\r\n\r\n split_file = 'E:\\\\DHWorkStation\\\\Project\\\\tgs_pytorch\\\\data/split/' + split\r\n lines = read_list_from_file(split_file)\r\n ids = [line.split('/')[-1] for line in lines]\r\n sorted(ids)\r\n\r\n df_submit = pd.read_csv(submit_file).set_index('id')\r\n df_submit = df_submit.fillna('')\r\n\r\n df_truth = pd.read_csv('E:\\\\DHWorkStation\\\\Project\\\\tgs_pytorch\\\\data/train.csv').set_index('id')\r\n df_truth = df_truth.loc[ids]\r\n df_truth = df_truth.fillna('')\r\n\r\n N = len(df_truth)\r\n predict = np.zeros((N, 101, 101), np.bool)\r\n truth = np.zeros((N, 101, 101), np.bool)\r\n\r\n for n in range(N):\r\n id = ids[n]\r\n p = df_submit.loc[id].rle_mask\r\n t = df_truth.loc[id].rle_mask\r\n p = run_length_decode(p, H=101, W=101, fill_value=1).astype(np.bool)\r\n t = run_length_decode(t, H=101, W=101, fill_value=1).astype(np.bool)\r\n\r\n predict[n] = p\r\n truth[n] = t\r\n\r\n # if 0:\r\n # image_p = predict[n].astype(np.uint8)*255\r\n # image_t = truth[n] .astype(np.uint8)*255\r\n # image_show('image_p', image_p,2)\r\n # image_show('image_t', image_t,2)\r\n # cv2.waitKey(0)\r\n\r\n ##--------------\r\n ### Threshold Optimizer\r\n\r\n precision, result, threshold = do_kaggle_metric(predict, truth, threshold=0.5)\r\n precision_mean = precision.mean()\r\n\r\n tp, fp, fn, tn_empty, fp_empty = result.transpose(1, 2, 0).sum(2)\r\n all = tp + fp + fn + tn_empty + fp_empty\r\n p = (tp + tn_empty) / (tp + tn_empty + fp + fp_empty + fn)\r\n\r\n log.write('\\n')\r\n log.write(' | | | empty | \\n')\r\n log.write('th | prec | tp fp fn | tn fp | \\n')\r\n log.write('-------------------------------------------------------------------------------------------\\n')\r\n for i, t in enumerate(threshold):\r\n log.write(\r\n '%0.2f | %0.2f | %3d / %0.2f %3d / %0.2f %3d / %0.2f | %3d / %0.2f %3d / %0.2f | %5d\\n' % (\r\n t, p[i],\r\n tp[i], tp[i] / all[i],\r\n fp[i], fp[i] / all[i],\r\n fn[i], fn[i] / all[i],\r\n tn_empty[i], tn_empty[i] / all[i],\r\n fp_empty[i], fp_empty[i] / all[i],\r\n all[i])\r\n )\r\n\r\n log.write('\\n')\r\n log.write('num images : %d\\n' % N)\r\n log.write('LB score : %0.5f\\n' % (precision_mean))\r\n\r\n # --------------------------------------\r\n predict = predict.reshape(N, -1)\r\n truth = truth.reshape(N, -1)\r\n p = predict > 0.5\r\n t = truth > 0.5\r\n intersection = t & p\r\n union = t | p\r\n # iou = intersection.sum(1)/(union.sum(1)+EPS)\r\n log.write('iou : %0.5f\\n' % (intersection.sum() / (union.sum() + EPS)))\r\n\r\n return\r\n # exit(0)\r\n ## show --------------------------\r\n\r\n predicts = predict.reshape(-1, 101, 101).astype(np.float32)\r\n truths = truth.reshape(-1, 101, 101).astype(np.float32)\r\n for m, name in enumerate(ids):\r\n print('%s' % name)\r\n print(' | | | empty | ')\r\n print('th | prec | tp fp fn | tn fp | ')\r\n print('------------------------------------------------')\r\n for i, t in enumerate(threshold):\r\n tp, fp, fn, fp_empty, tn_empty = result[m, :, i]\r\n p = (tp + tn_empty) / (tp + tn_empty + fp + fp_empty + fn)\r\n print('%0.2f | %0.2f | %d %d %d | %d %d ' % (\r\n t, p, tp, fp, fn, fp_empty, tn_empty))\r\n print(precision[m])\r\n print('')\r\n # ----\r\n image_file = '/root/share/project/kaggle/tgs/data/train/images/' + name + '.png'\r\n image = cv2.imread(image_file, cv2.IMREAD_COLOR)\r\n # mask = mask>0\r\n\r\n predict = predicts[m]\r\n truth = truths[m]\r\n\r\n # print(predict.sum())\r\n\r\n overlay0 = draw_mask_overlay(predict, image, color=[0, 0, 255])\r\n overlay0 = draw_mask_to_contour_overlay(predict, overlay0, 1, color=[0, 0, 255])\r\n overlay1 = draw_mask_overlay(truth, image, color=[0, 255, 0])\r\n overlay1 = draw_mask_to_contour_overlay(truth, overlay1, 1, color=[0, 255, 0])\r\n overlay2 = draw_mask_overlay(predict, None, color=[0, 0, 255])\r\n overlay2 = draw_mask_overlay(truth, overlay2, color=[0, 255, 0])\r\n\r\n draw_shadow_text(image, '%0.2f' % precision[m], (3, 15), 0.5, [255, 255, 255], 1)\r\n\r\n overlay = np.hstack([image, overlay0, overlay1, overlay2])\r\n cv2.imwrite(dump_dir + '/%s.png' % name, overlay)\r\n image_show('overlay', overlay, 2)\r\n cv2.waitKey(1)\r\n\r\n\r\nif mode == 'valid':\r\n for a in ['null', 'flip']:\r\n print('a=', a)\r\n run_predict(a)\r\n\r\n\r\n #run_submit('aug2')\r\n\r\n for t in np.arange(0.3,0.6,0.01):\r\n print(t)\r\n for a in [ 'aug2']:\r\n print('a=', a)\r\n run_submit(a,t)\r\n run_local_leaderboard(a)\r\n\r\nif mode == 'test':\r\n run_predict('null')\r\n run_predict('flip')\r\n run_submit('aug2',0.51)\r\n # run_local_leaderboard()\r\n\r\nprint('\\nsucess!')\r\n","sub_path":"projects/TGS_salt/DingHan/data/DingHan/seresnet50/submit_seresnext5_bn_128.py","file_name":"submit_seresnext5_bn_128.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"457555384","text":"#!/usr/bin/env python\nimport sys\nimport rospy\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import PointStamped\nfrom keypoint_3d_matching_msgs.msg import *\nfrom visualization_msgs.msg import Marker\nfrom scipy.spatial import distance\n\n\ndef main():\n\trospy.init_node('yaml_read')\n\tpub = rospy.Publisher(\"raw_points_online\", Keypoint3d_list, queue_size=10)\n\tfile = open(sys.argv[1], 'r')\n\tfl = file.readlines()\n\ttimes = []\n\trospy.sleep(2)\n\t\n\tpubRaw = rospy.Publisher(\"vis_raw\", Marker, queue_size=100)\n\tmarkerRaw = Marker()\n\tmarkerRaw.header.frame_id = \"base_link\"\n\tmarkerRaw.header.stamp = rospy.Time.now()\n\tmarkerRaw.action = markerRaw.ADD\n\tmarkerRaw.type = markerRaw.LINE_STRIP\n\tmarkerRaw.pose.position.x = 0\n\tmarkerRaw.pose.position.y = 0\n\tmarkerRaw.pose.position.z = 0\n\tmarkerRaw.pose.orientation.x = 0\n\tmarkerRaw.pose.orientation.y = 0\n\tmarkerRaw.pose.orientation.z = 0\n\tmarkerRaw.pose.orientation.w = 1\n\tmarkerRaw.scale.x = 0.01\n\tmarkerRaw.color.a = 1.0\n\tmarkerRaw.color.r = 1.0\n\tmarkerRaw.color.g = 0.0\n\tmarkerRaw.color.b = 0.0\n\tmarkerRaw.lifetime = rospy.Duration(100)\n\tj=0\n\tfor i in range(len(fl)):\n\t\tpoint = Keypoint3d_list()\n\t\tif \"RW\" in fl[i]:\n\t\t\tkeypoint = Keypoint3d()\n\t\t\tkeypoint.name = \"RWrist\"\n\t\t\tkeypoint.points.header.stamp = rospy.Time(float(fl[i+5][16:-1]+'.'+fl[i+6][17:].replace(' ', '0')))\n\t\t\tkeypoint.points.point.x = float(fl[i+9][11:])\n\t\t\tkeypoint.points.point.y = float(fl[i+10][11:])\n\t\t\tkeypoint.points.point.z = float(fl[i+11][11:])\n\t\t\ttry:\n\t\t\t\tif times[-1] > keypoint.points.header.stamp.to_sec() + 1 and keypoint.points.header.stamp.to_sec() != 0:\n\t\t\t\t\trospy.logwarn(\"End of points\")\n\t\t\t\t\tbreak\n\t\t\texcept Exception as e:\n\t\t\t\tprint (e)\n\t\t\tpoint.keypoints.append(keypoint)\n\t\t\tpoint_marker = Point()\n\t\t\tpoint_marker.x = keypoint.points.point.x\n\t\t\tpoint_marker.y = keypoint.points.point.y\n\t\t\tpoint_marker.z = keypoint.points.point.z\n\t\t\tmarkerRaw.points.append(point_marker)\n\t\t\tpubRaw.publish(markerRaw)\n\t\t\tpub.publish(point)\n\t\t\tj += 1\n\t\t\trospy.loginfo(\"Published keypoint %d\" %j)\n\t\t\trospy.sleep(0.047)\n\t\t\t# try:\n\t\t\t# \trospy.sleep(keypoint.points.header.stamp.to_sec()-times[-1])\n\t\t\t# \trospy.loginfo('Slept for ' + str(keypoint.points.header.stamp.to_sec() - times[-1]))\n\t\t\t# except Exception as ex:\n\t\t\t# \trospy.logwarn(ex)\n\t\t\t# \trospy.sleep(10)\n\t\t\ttimes.append(keypoint.points.header.stamp.to_sec())\n\n\trospy.loginfo(\"Published all keypoints\")\n\nmain()\n","sub_path":"scripts/yaml_read.py","file_name":"yaml_read.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"194365541","text":"import requests\r\nimport pprint\r\nimport tablib\r\nimport os\r\nimport sys\r\nimport colorlog\r\nimport time\r\nimport logging\r\nimport re\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\n\r\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\r\n\r\nclass APIPayview:\r\n def __init__(self, ssdomain, login, mdp):\r\n self.ssdomain = ssdomain\r\n self.login = login\r\n self.mdp = mdp\r\n\r\n self._cookies = None\r\n self.proxies = None #{\"http\":\"http://85.115.60.150:80\" ,\"https\":\"http://85.115.60.150:80\" }\r\n \r\n # LOGIN AS\r\n self._ssdomainAs = None\r\n self._cookiesAs = None\r\n\r\n # General\r\n self._orgaUUID = None\r\n self.URI = f'https://{self.getSsdomain()}.payview.fr/api'\r\n self.HEADERS = {\r\n 'Connection':\"keep-alive\",\r\n 'accept': 'application/json',\r\n #'content-type': 'application/json;charset=UTF-8',\r\n 'referer':f'https://{self.getSsdomain()}.payview.fr/',\r\n 'Host': f'{self.getSsdomain()}.payview.fr'\r\n }\r\n\r\n def getCookie(self):\r\n if self._cookiesAs:\r\n return self._cookiesAs\r\n return self._cookies\r\n\r\n def getSsdomain(self):\r\n if self._ssdomainAs:\r\n return self._ssdomainAs\r\n return self.ssdomain\r\n\r\n def callAPI(self, method,uri, jsonData=None,query=None):\r\n return requests.request(method,url=self.URI+uri, headers=self.HEADERS,json=jsonData,params=query, verify=False,proxies=self.proxies,cookies=self.getCookie(),timeout=40 )\r\n\r\n def Login(self):\r\n res = self.callAPI(\"POST\", '/login', jsonData={'email':self.login,'password':self.mdp})\r\n #res = requests.post(self.URI+'/login',headers=self.HEADERS,json={'email':self.login,'password':self.mdp},verify=False,proxies=None )\r\n self._cookies = res.cookies\r\n\r\n def LoginAs(self,token):\r\n res = self.callAPI(\"POST\",\"/login\", jsonData={'token': token})\r\n self._cookiesAs = res.cookies\r\n \r\n def LogoutAs(self):\r\n self._cookiesAs = self._ssdomainAs = None\r\n\r\n def getSession(self):\r\n res = self.callAPI(\"GET\", '/session' ).json()\r\n self._orgaUUID = res['organization']\r\n\r\n def getTerminals(self, pageSize=50, offset=0,SNSearched=None,fournisseur=None ):\r\n if not self._orgaUUID:\r\n self.getSession()\r\n params = {'provider':self._orgaUUID,'limit':pageSize,'offset':offset }\r\n if SNSearched:\r\n params['posTerminal.serialNumber']= SNSearched\r\n if fournisseur:\r\n params['directProvider.legalName'] = fournisseur\r\n \r\n res = self.callAPI(\"GET\", '/posTerminalSubscriptions',query=params ).json()\r\n\r\n return res\r\n\r\n def getSims(self,pageSize=20, offset=0,iccid=None,nomOrga=None ):\r\n if not self._orgaUUID:\r\n self.getSession()\r\n params = {'provider':self._orgaUUID,'limit':pageSize,'offset':offset }\r\n\r\n if iccid:\r\n params['sim.iccid'] = iccid\r\n if nomOrga:\r\n params['client.legalName'] = nomOrga\r\n\r\n res = self.callAPI(\"GET\", '/simSubscriptions',query=params ).json()\r\n\r\n return res\r\n\r\n # {\"items\": [\r\n\t\t# {\r\n\t\t# \t\"uuid\": \"27d1168e-ebe6-4724-8896-be78cc3ad331\",\r\n\t\t# \t\"createdAt\": \"2020-09-24T10:17:28.838164+00:00\",\r\n\t\t# \t\"updatedAt\": \"2020-11-10T10:20:41.948719+00:00\",\r\n\t\t# \t\"provider\": \"4ac85a69-8e6e-4f8f-adbb-4c62aba60dd6\",\r\n\t\t# \t\"client\": {\r\n\t\t# \t\t\"uuid\": \"860e553d-6328-4b04-8d0c-ccf3d764cf18\",\r\n\t\t# \t\t\"provider\": \"4ac85a69-8e6e-4f8f-adbb-4c62aba60dd6\",\r\n\t\t# \t\t\"legalName\": \"AVT\"\r\n\t\t# \t},\r\n\t\t# \t\"sim\": {\r\n\t\t# \t\t\"uuid\": \"a3b6e308-80da-4cff-9218-231b80cd1872\",\r\n\t\t# \t\t\"createdAt\": \"2020-09-18T17:01:18.391435+00:00\",\r\n\t\t# \t\t\"updatedAt\": \"2020-11-10T10:20:41.948719+00:00\",\r\n\t\t# \t\t\"label\": \" \",\r\n\t\t# \t\t\"iccid\": \"89332401000015992403\",\r\n\t\t# \t\t\"status\": \"PENDING_ACTIVATION\",\r\n\t\t# \t\t\"tariffData\": null,\r\n\t\t# \t\t\"tariffDataUnit\": null,\r\n\t\t# \t\t\"activationDate\": null,\r\n\t\t# \t\t\"endOfEngagementDate\": null,\r\n\t\t# \t\t\"lastCommunicationDate\": null,\r\n\t\t# \t\t\"firstCommunicationDate\": null,\r\n\t\t# \t\t\"posTerminal\": null,\r\n\t\t# \t\t\"dataUsage\": null\r\n\t\t# \t}\r\n\t\t# }],\"totalResults\": 48891}\r\n\r\n def resilierSim(self, uuidList ):\r\n if not isinstance(uuidList,list):\r\n uuidList = [uuidList]\r\n\r\n res = self.callAPI(\"POST\",\"/sims/terminate\", jsonData={'uuids':uuidList }).json()\r\n return res #{}\r\n\r\n def getOrganizations(self,pageSize=20, offset=0, nomOrgaSearched =None, orgaUuid=None,typeOrga=\"WHITE_LABEL\" ):\r\n params = {'limit':pageSize,'offset':offset,'type':typeOrga }\r\n\r\n if nomOrgaSearched:\r\n params['legalName'] = nomOrgaSearched\r\n\r\n if orgaUuid == None:\r\n if not self._orgaUUID:\r\n self.getSession()\r\n orgaUuid = self._orgaUUID\r\n \r\n logging.info(f\"[PayView] Lecture organisations de {orgaUuid} offset={offset}\")\r\n res = self.callAPI(\"GET\", f'/organizations/{orgaUuid}/clients',query=params ).json()\r\n return res\r\n #{'items':[{businessName: null,createdAt: \"2020-06-15T15:52:04.132001+00:00\",legalName: \"GRAND FRAIS (FUJITSU)\",registrationNumber: \"38780672200022\",type: \"WHITE_LABEL\"\r\n #updatedAt: \"2020-11-12T13:12:39.847317+00:00\",uuid: \"cf9c18bb-aadb-4629-b258-0b5e54850138\"}],\r\n #totalResults: 1}\r\n\r\n def getAllClients(self, orgaUuid=None, grossisteList=['HM TELECOM','AVT','SATIN','IPSF','BRED BANQUE POPULAIRE','LM CONTROL'], parentGrossite=None):\r\n pageSize =20\r\n offset = 0\r\n tabRes = []\r\n\r\n more = True\r\n\r\n while more:\r\n res = self.getOrganizations(pageSize=pageSize, offset =offset,orgaUuid=orgaUuid)['items']\r\n\r\n for r in res:\r\n legalName = r['legalName'] \r\n if grossisteList and legalName in grossisteList:\r\n tabRes += self.getAllClients( r['uuid'] , grossisteList=None,parentGrossite=legalName )\r\n else:\r\n tabRes.append( {'legalName': legalName, 'uuid':r['uuid'], 'grossiste':parentGrossite} )\r\n\r\n more = len(res) > 0\r\n offset += pageSize\r\n\r\n return tabRes\r\n\r\n def getUsers(self,pageSize=20, offset=0,organizationId = None, emailSearched=None):\r\n if not self._orgaUUID:\r\n self.getSession()\r\n\r\n params = {}\r\n if emailSearched:\r\n params['email'] = emailSearched\r\n \r\n if organizationId:\r\n params['organization'] = organizationId\r\n else:\r\n params['organization'] = self._orgaUUID\r\n\r\n params['limit'] = pageSize\r\n params['offset'] = offset\r\n\r\n res = self.callAPI(\"GET\", f'/users?email={emailSearched}&organization={organizationId}&limit={pageSize}&offset={offset}')\r\n\r\n return res.json()\r\n\r\n def creerCompteAcces(self, email, labelCompte, contratsUidList):\r\n params = {'provider':self._orgaUUID, 'email':email, 'label':labelCompte, 'contractProfiles':[{'contract': uid} for uid in contratsUidList]}\r\n res = self.callAPI(\"POST\",\"/contractsAccesses\",jsonData=params ).json()\r\n assert res['success']\r\n\r\n def getComptesAcces(self,pageSize=50, offset=0,emailRecherche=None):\r\n if not self._orgaUUID:\r\n self.getSession()\r\n\r\n params = { 'limit':pageSize,'offset':offset }\r\n if emailRecherche:\r\n params['email'] = emailRecherche\r\n res = self.callAPI(\"GET\", f'/organizations/{self._orgaUUID}/contractsAccesses',query=params ).json()\r\n #res = requests.get(self.URI+f'/organizations/{self._orgaUUID}/contractsAccesses', params=params, headers=self.HEADERS, cookies=self._cookies).json()\r\n if not res['items']:\r\n return None\r\n else:\r\n return res['items']\r\n\r\n #pprint.pprint(res.json())\r\n #{'items': [{'createdAt': '2020-10-26T07:58:30.325258+00:00',\r\n # 'email': 'thierry.cade@ingenico.com',\r\n # 'label': 'Test Portail COM TCA',\r\n # 'provider': 'fb10e478-7981-4cd7-8fb0-d980e7412dc7',\r\n # 'status': 'ACTIVE',\r\n # 'updatedAt': '2020-10-26T07:58:30.325258+00:00',\r\n # 'uuid': '55146a48-b180-459a-bd26-a4bb6b65a80e'}..],'totalResults': 6}\r\n\r\n def detailsCompteDacces(self, compteUUID,pageSize=50, offset=0):\r\n params = { 'limit':pageSize,'offset':offset }\r\n res = self.callAPI(\"GET\", f'/contractsAccesses/{compteUUID}',query=params ).json()\r\n\r\n return {'contracts': res.get(\"contractProfiles\",None), 'users': res['users'],\"uuid\":res[\"uuid\"]}\r\n # {\"uuid\": \"55146a48-b180-459a-bd26-a4bb6b65a80e\",\r\n # \"createdAt\": \"2020-10-26T07:58:30.325258+00:00\", \r\n # \"updatedAt\": \"2020-10-26T07:58:30.325258+00:00\", \r\n # \"label\": \"Test Portail COM TCA\", \r\n # \"provider\": \"fb10e478-7981-4cd7-8fb0-d980e7412dc7\", \r\n # \"status\": \"ACTIVE\", \"contractProfiles\": [\r\n # {\"uuid\": \"c9ca5b91-7c40-4130-82b4-18b8fb924ee1\", \"createdAt\": \"2020-10-26T07:58:30.334008+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.334008+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"30003\", \"rank\": null, \"contract\": \"3619346\", \"merchantLabel\": \"COM TCA\"}, \r\n # {\"uuid\": \"268c528a-7fb6-46fd-99c4-141d17d2d235\", \"createdAt\": \"2020-10-26T07:58:30.338473+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.338473+00:00\", \"application\": \"CBCLESS\", \"bankCode\": \"26550\", \"rank\": null, \"contract\": \"1999281\", \"merchantLabel\": \"COM 1\"}, \r\n # {\"uuid\": \"4c845fba-810c-426d-8299-2d8c2badf57f\", \"createdAt\": \"2020-10-26T07:58:30.342159+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.342159+00:00\", \"application\": \"CBCLESS\", \"bankCode\": \"30004\", \"rank\": null, \"contract\": \"4338295\", \"merchantLabel\": \"COM 2\"}, \r\n # {\"uuid\": \"11772efa-28e1-401b-b080-dd61e7d75a06\", \"createdAt\": \"2020-10-26T07:58:30.352955+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.352955+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"30004\", \"rank\": null, \"contract\": \"4338292\", \"merchantLabel\": \"COM 3\"}, \r\n # {\"uuid\": \"d3cc4d4e-9556-418d-adc8-c25c16403fb4\", \"createdAt\": \"2020-10-26T07:58:30.356316+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.356316+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"30004\", \"rank\": null, \"contract\": \"4338295\", \"merchantLabel\": \"COM 4\"}, \r\n # {\"uuid\": \"700596ae-3562-4c4c-9115-dc964bacfeff\", \"createdAt\": \"2020-10-26T07:58:30.360142+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.360142+00:00\", \"application\": \"CBCLESS\", \"bankCode\": \"30001\", \"rank\": null, \"contract\": \"2330301\", \"merchantLabel\": \"COM 5\"}, \r\n # {\"uuid\": \"dd886d02-2b8c-4499-8eba-e9494646c0d5\", \"createdAt\": \"2020-10-26T07:58:30.374195+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.374195+00:00\", \"application\": \"CONECS\", \"bankCode\": \"10000\", \"rank\": null, \"contract\": \"1207847\", \"merchantLabel\": null}, \r\n # {\"uuid\": \"9e72f3a8-beb0-4e90-8d15-cf34b7383960\", \"createdAt\": \"2020-10-26T07:58:30.380033+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.380033+00:00\", \"application\": \"CBCLESS\", \"bankCode\": \"11899\", \"rank\": null, \"contract\": \"4278622\", \"merchantLabel\": null}, \r\n # {\"uuid\": \"576da3f2-fda2-4b82-b918-8dab0e84ded1\", \"createdAt\": \"2020-10-26T07:58:30.392022+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.392022+00:00\", \"application\": \"CBCLESS\", \"bankCode\": \"11899\", \"rank\": null, \"contract\": \"4278155\", \"merchantLabel\": null}, \r\n # {\"uuid\": \"9fbae29c-c1b7-4f9b-9206-f53f2cacbddb\", \"createdAt\": \"2020-10-26T07:58:30.395331+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.395331+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"11899\", \"rank\": null, \"contract\": \"4278155\", \"merchantLabel\": null}, \r\n # {\"uuid\": \"31004942-506a-4313-b51f-0a4ca98035e3\", \"createdAt\": \"2020-10-26T07:58:30.397852+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.397852+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"30004\", \"rank\": null, \"contract\": \"4358988\", \"merchantLabel\": null}, \r\n # {\"uuid\": \"aaf82ceb-44a2-4a1e-96ca-cc7ab037d1cc\", \"createdAt\": \"2020-10-26T07:58:30.412461+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.412461+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"30004\", \"rank\": null, \"contract\": \"4290394\", \"merchantLabel\": null}, \r\n # {\"uuid\": \"74161e3e-6628-4452-8cd6-2eeb916c71b7\", \"createdAt\": \"2020-10-26T07:58:30.417437+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.417437+00:00\", \"application\": \"CONECS\", \"bankCode\": \"10000\", \"rank\": null, \"contract\": \"1207925\", \"merchantLabel\": null}, {\"uuid\": \"05216f24-b58d-434b-8f1b-406030fe1713\", \"createdAt\": \"2020-10-26T07:58:30.420862+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.420862+00:00\", \"application\": \"CBCLESS\", \"bankCode\": \"30003\", \"rank\": null, \"contract\": \"3619346\", \"merchantLabel\": null},\r\n # {\"uuid\": \"6b1afa41-568e-4cb4-91a8-443b5c0e4652\", \"createdAt\": \"2020-10-26T07:58:30.433624+00:00\", \"updatedAt\": \"2020-10-26T07:58:30.433624+00:00\", \"application\": \"CBEMV\", \"bankCode\": \"11899\", \"rank\": null, \"contract\": \"4278622\", \"merchantLabel\": null}], \r\n # \"users\": [{\"uuid\": \"cec9a8d1-4162-4bb5-a5c7-859340babe63\", \"email\": \"thierry.cade@ingenico.com\", \"createdAt\": \"2020-10-26T07:58:28.204615+00:00\", \"updatedAt\": \"2020-10-26T08:00:23.438885+00:00\", \"organization\": \"fb10e478-7981-4cd7-8fb0-d980e7412dc7\", \"phoneNumber\": null, \"isAdmin\": true}]}\r\n \r\n def modifierLibelleContratCom(self,compteUUID,contratUUID,newLabel):\r\n res = self.callAPI(\"PUT\",f'/contractsAccesses/{compteUUID}/contracts/{contratUUID}',jsonData={'merchantLabel': newLabel} ).json()\r\n #res = requests.put(self.URI+f'/contractsAccesses/{compteUUID}/contracts/{contratUUID}',json={'merchantLabel': newLabel}, headers=self.HEADERS, cookies=self._cookies)\r\n #{\"success\": true}\r\n assert res['success']\r\n \r\n #Attention rank à spécifier sur 3 caractères \"001\"\r\n def ajouteUnContratAunCompteAccess(self,compteUUID, listContractRankLabel):\r\n res = self.callAPI(\"POST\",f'/contractsAccesses/{compteUUID}/contractProfiles',jsonData=listContractRankLabel).json()\r\n # res = requests.post(self.URI+f'/contractsAccesses/{compteUUID}/contractProfiles',json=listContractRankLabel, headers=self.HEADERS, cookies=self._cookies).json()\r\n assert res['success']\r\n #[{\"contract\":\"43673d25-3699-49a3-a989-0fef2c500453\",\"rank\":null,\"merchantLabel\":\"testSQ\"}]\r\n\r\n def supprimerUnContratDunCompteDAccess(self,compteUUID,contractID):\r\n res = self.callAPI(\"DELETE\", f'/contractsAccesses/{compteUUID}/contracts/{contractID}').json()\r\n #res = requests.delete(self.URI+f'/contractsAccesses/{compteUUID}/contracts/{contractID}', headers=self.HEADERS, cookies=self._cookies).json()\r\n assert res['success']\r\n\r\n def supprimerTousLesContratsDunCompteDaccess(self,compteUUID):\r\n details = self.detailsCompteDacces(compteUUID)\r\n if details['contracts']:\r\n for c in details['contracts']:\r\n self.supprimerUnContratDunCompteDAccess(compteUUID=details[\"uuid\"],contractID=c[\"uuid\"] )\r\n\r\n def connectAs(self,userId):\r\n res = self.callAPI(\"POST\",f\"/users/{userId}/loginAs\")\r\n URL = res.headers['x-location'] #https://grandfrais.payview.fr/#/login?flt=48cc3955-0acc-4f60-ac13-020f635ebcd9\r\n\r\n m = re.match(r'^https://(?P.*).payview.fr/#/login\\?flt=(?P.*)$', URL) #le ? est un caractère special doit etre échappé: . ^ $ * + ? { } [ ] \\ | ( )\r\n assert m,f\"Pas de matching sur URL {URL}\"\r\n self._ssdomainAs = m.group('ssdomain')\r\n \r\n self.LoginAs(m.group('token'))\r\n\r\n#https://test.payview.fr/api/contractsAccesses/35f06d48-03a6-43d1-8235-5da9b4dd0008/contracts/11701f76-7289-48c7-8e6e-d11f46accb10\r\n def contratsDisponibles(self,pageSize=50, offset=0,numContrat=None,bankCode=None,rank=None,application=None, partSN = None):\r\n params = { 'limit':pageSize,'offset':offset,'provider':self._orgaUUID }\r\n if numContrat:\r\n params['number']=numContrat\r\n if bankCode:\r\n params['bankCode'] = bankCode\r\n if rank:\r\n params['rank'] = rank\r\n if application:\r\n params['application']=application\r\n if partSN:\r\n params['posTerminal.serialNumber'] = partSN\r\n res = self.callAPI('GET','/merchantContractsFromProvider', query= params ).json()\r\n # res = requests.get(self.URI+'/merchantContractsFromProvider',params = params, headers=self.HEADERS, cookies=self._cookies)\r\n # res = res.json()\r\n\r\n if not res['items']:\r\n return None\r\n else:\r\n return res['items']\r\n\r\n# RES: {\r\n# \t\"items\": [\r\n# \t\t{\r\n# \t\t\t\"uuid\": \"b8bfbba4-f3de-43c2-92b3-3fdebc69538f\",\r\n# \t\t\t\"application\": null,\r\n# \t\t\t\"number\": \"6262079\",\r\n# \t\t\t\"rank\": \"002\",\r\n# \t\t\t\"label\": \"-\",\r\n# \t\t\t\"bankCode\": \"30066\",\r\n# \t\t\t\"x25Address\": \"196358779\",\r\n# \t\t\t\"itp\": \"193551310711\",\r\n# \t\t\t\"cbVersion\": null,\r\n# \t\t\t\"registrationNumber\": \"55201420101303\",\r\n# \t\t\t\"legalAndBusinessName\": \"SELECTA\",\r\n# \t\t\t\"lastSourceIp\": \"212.243.142.172\",\r\n# \t\t\t\"lastIccid\": null,\r\n# \t\t\t\"lastSslVersion\": \"TLSv1.2\",\r\n# \t\t\t\"lastConnectionType\": \"ETHERNET\",\r\n# \t\t\t\"lastRemoteCollectionDate\": \"2020-10-27T15:59:34.182000+00:00\",\r\n# \t\t\t\"lastConnectionDate\": \"2020-10-27T15:59:34.182000+00:00\",\r\n# \t\t\t\"createdAt\": \"2020-10-27T16:00:05.324357+00:00\",\r\n# \t\t\t\"updatedAt\": \"2020-10-27T16:00:05.324357+00:00\",\r\n# \t\t\t\"posTerminal\": {\r\n# \t\t\t\t\"uuid\": \"966d975e-4e42-4875-aae4-ac93df1dcb2f\",\r\n# \t\t\t\t\"serialNumber\": \"SE15728903\",\r\n# \t\t\t\t\"manufacturer\": \"INGENICO\"\r\n# \t\t\t},\r\n# \t\t\t\"posTerminalSubscription\": \"498eb9c0-8b54-497c-9e34-6d84588a2791\"\r\n# \t\t}\t\r\n# \t],\r\n# \t\"totalResults\": 35\r\n# }\r\n\r\n# GET /merchantContractsFromDirectProvider?provider=fb10e478-7981-4cd7-8fb0-d980e7412dc7&limit=20&offset=0\r\n# Donne les contrats disponibles sur ce client\r\ndef exportListeVersExcel(filePath, liste, titre=\"export\"):\r\n if not liste:\r\n logging.error(f\"Liste vide pour export {titre}\")\r\n return\r\n\r\n tabCli = tablib.Dataset(title=titre, headers= liste[0].keys() )\r\n\r\n for l in liste:\r\n tabCli.append( l.values() )\r\n\r\n with open( filePath, mode='wb') as f: #PermissionError si déjà ouvert\r\n f.write(tabCli.export('xlsx'))\r\n\r\ndef setup_logging():\r\n logger = logging.getLogger()\r\n logger.setLevel(logging.DEBUG)\r\n\r\n #Console Logger\r\n consoleHandler = logging.StreamHandler()\r\n consoleFormatter = colorlog.ColoredFormatter(\r\n \"%(log_color) s%(message)s\",\r\n datefmt=None,\r\n reset=True,\r\n log_colors={\r\n 'DEBUG': 'cyan',\r\n 'INFO': 'green',\r\n 'WARNING': 'yellow',\r\n 'ERROR': 'red',\r\n 'CRITICAL': 'red',\r\n }\r\n )\r\n consoleHandler.setFormatter(consoleFormatter)\r\n consoleHandler.setLevel(logging.INFO)\r\n \r\n if (logger.hasHandlers()):\r\n logger.handlers.clear()\r\n\r\n logger.addHandler(consoleHandler)\r\n #File Logger\r\n dossierTests = os.path.abspath( os.path.dirname( __file__))\r\n dossierLogs = os.path.join(dossierTests, 'logs')\r\n\r\n if not os.path.isdir(dossierLogs):\r\n os.makedirs(dossierLogs)\r\n\r\n logFilePath= os.path.join( dossierLogs, f'{time.strftime(\"%Y%m%d_%Hh%M\")}_tests.log')\r\n\r\n fileHandler =logging.FileHandler(filename=logFilePath, mode='a', encoding=\"utf-8\", delay=False)\r\n fileHandler.setFormatter( logging.Formatter('%(levelname)s :: %(message)s') )\r\n fileHandler.setLevel(logging.DEBUG)\r\n logger.addHandler(fileHandler)\r\n return f\"Logs sous {logFilePath}\"\r\n","sub_path":"APIPayview.py","file_name":"APIPayview.py","file_ext":"py","file_size_in_byte":19580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"282245170","text":"from __future__ import annotations\n\nimport datetime\nfrom typing import TYPE_CHECKING, Optional\n\nfrom .asset import Asset\nfrom .channel import Messageable\nfrom .embed import Embed\n\nif TYPE_CHECKING:\n from .state import State\n from .types import Message as MessagePayload\n from .user import User\n\n\n__all__ = (\"Message\",)\n\nclass Message:\n \"\"\"Represents a message\n \n Attributes\n -----------\n id: :class:`str`\n The id of the message\n content: :class:`str`\n The content of the message, this will not include system message's content\n attachments: list[:class:`Asset`]\n The attachments of the message\n embeds: list[:class:`Embed`]\n The embeds of the message\n channel: :class:`Messageable`\n The channel the message was sent in\n server: :class:`Server`\n The server the message was sent in\n author: Union[:class:`Member`, :class:`User`]\n The author of the message, will be :class:`User` in DMs\n edited_at: Optional[:class:`datetime.datetime`]\n The time at which the message was edited, will be None if the message has not been edited\n mentions: List[:class:int]\n The user IDs which were mentioned in the message\n \"\"\"\n __slots__ = (\"state\", \"id\", \"content\", \"attachments\", \"embeds\", \"channel\", \"server\", \"author\", \"edited_at\", \"mentions\")\n \n def __init__(self, data: MessagePayload, state: State):\n self.state = state\n self.mentions: list[User] = [self.state.get_user(mention) for mention in data[\"mentions\"]]\n self.id = data[\"_id\"]\n self.content = data[\"content\"]\n self.attachments = [Asset(attachment, state) for attachment in data.get(\"attachments\", [])]\n self.embeds = [Embed.from_dict(embed) for embed in data.get(\"embeds\", [])]\n\n channel = state.get_channel(data[\"channel\"])\n assert isinstance(channel, Messageable)\n self.channel = channel\n\n self.server = self.channel and self.channel.server\n \n if self.server:\n author = state.get_member(self.server.id, data[\"author\"])\n else:\n author = state.get_user(data[\"author\"])\n\n assert author\n self.author = author\n\n self.edited_at: Optional[datetime.datetime] = None\n\n def _update(self, *, content: Optional[str] = None, edited_at: Optional[str] = None) -> Message:\n if content:\n self.content = content\n\n if edited_at:\n self.edited_at = datetime.datetime.strptime(edited_at, \"%Y-%m-%dT%H:%M:%S.%f%z\")\n # strptime is used here instead of fromisoformat because of its inability to parse `Z` (Zulu or UTC time) in the RFCC 3339 format provided by API\n\n return self\n\n async def edit(self, *, content: str) -> None:\n \"\"\"Edits the message. The bot can only edit its own message\n Parameters\n -----------\n content: :class:`str`\n The new content of the message\n \"\"\"\n await self.state.http.edit_message(self.channel.id, self.id, content)\n\n async def delete(self) -> None:\n \"\"\"Deletes the message. The bot can only delete its own messages and messages it has permission to delete \"\"\"\n await self.state.http.delete_message(self.channel.id, self.id)\n","sub_path":"revolt/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"14914498","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\n\r\nCreated on 2020-5-6\r\n\r\n@author: suning\r\n\r\n'''\r\n\r\nfrom suning.api.abstract import AbstractApi\r\n\r\n\r\n\r\nclass SignaturewhiteDeleteRequest(AbstractApi):\r\n\r\n '''\r\n\r\n '''\r\n\r\n def __init__(self):\r\n\r\n AbstractApi.__init__(self)\r\n\r\n self.accessKeyId = None\r\n self.accessSign = None\r\n self.mobile = None\r\n self.signature = None\r\n self.timeStamp = None\r\n \r\n self.setParamRule({\r\n \t'accessKeyId':{'allow_empty':False},\r\n \t'accessSign':{'allow_empty':False},\r\n \t'mobile':{'allow_empty':False},\r\n \t'signature':{'allow_empty':False},\r\n \t'timeStamp':{'allow_empty':False}\r\n \t})\r\n\r\n def getApiBizName(self):\r\n\r\n return 'deleteSignaturewhite'\r\n\r\n def getApiMethod(self):\r\n\r\n return 'suning.custom.signaturewhite.delete'\r\n\r\n\r\n\r\n","sub_path":"suning/api/custom/SignaturewhiteDeleteRequest.py","file_name":"SignaturewhiteDeleteRequest.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"401611203","text":"# implementation of card game - Memory\n\nimport simplegui\nimport random\n\n# helper function to initialize globals\ndef new_game():\n global num, exposed, state, counter\n state = 0\n counter = 0\n numbers = range(8)\n numbers.extend(numbers)\n num = list(numbers)\n random.shuffle(num)\n exposed = []\n for n in num:\n exposed.append(False)\n #print exposed\n #print num\n\n# define event handlers\ndef mouseclick(pos):\n # add game state logic here\n global state, first, second, counter\n if state == 0:\n first = pos[0] // 50\n if exposed[first] is False:\n exposed[first] = True\n state = 1\n counter += 1\n label.set_text(\"Turns = \" + str(counter))\n elif state == 1:\n second = pos[0] // 50\n if exposed[second] is False:\n exposed[second] = True\n state = 2\n elif num[first] == num[second]:\n state = 0\n first = pos[0] // 50\n if exposed[first] is False:\n exposed[first] = True\n state = 1\n counter += 1\n label.set_text(\"Turns = \" + str(counter))\n elif num[first] != num[second]:\n exposed[first] = False\n exposed[second] = False\n state = 0\n first = pos[0] // 50\n if exposed[first] is False:\n exposed[first] = True\n state = 1\n counter += 1\n label.set_text(\"Turns = \" + str(counter))\n\n# cards are logically 50x100 pixels in size\ndef draw(canvas):\n global num, exposed\n card_pos = 25\n for num_index in range(len(num)):\n num_pos = (50 * num_index) + 10\n canvas.draw_text(str(num[num_index]), [num_pos, 70], 60, \"White\")\n\n for el in exposed:\n if el is False:\n canvas.draw_line([card_pos, 10], [card_pos, 90], 45, \"Green\")\n card_pos += 50\n else:\n card_pos += 50\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", 800, 100)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()\n","sub_path":"coursera python basics/Rice Universty/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"462949617","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\ntorch.set_default_dtype(torch.float64)\nclass DeepNN(nn.Module):\n def __init__(self,depth,nbasis,nstates):\n super(DeepNN, self).__init__()\n forward_list = []\n self.nlayers = depth\n self.nstates = nstates\n self.nbasis = nbasis\n '''\n set input output dimension of the layers\n here use a simple rule where we double each layer\n and then go down to the desired latent state at the end\n '''\n dim = np.zeros(depth+2,dtype='int')\n dim[0] = 2\n for i in range(1,depth):\n dim[i] = dim[i-1]*2\n\n dim[-2] = nbasis\n dim[-1] = 1\n input_dim = dim[0:-1]\n output_dim = dim[1::]\n\n for i in range(0,self.nlayers):\n if (i == self.nlayers - 1):\n forward_list.append(nn.Linear(input_dim[i], output_dim[i]*nstates))\n else:\n forward_list.append(nn.Linear(input_dim[i], output_dim[i]))\n\n forward_list.append(nn.Linear(input_dim[-1], output_dim[-1],bias=False))\n self.forward_list = nn.ModuleList(forward_list)\n self.activation = F.elu\n\n def createBasis(self,x):\n for i in range(0,self.nlayers):\n x = self.activation(self.forward_list[i](x))\n shp = np.shape(x)\n x = torch.reshape(x,(shp[0],self.nstates,self.nbasis))\n return x\n\n def forward(self,x):\n for i in range(0,self.nlayers):\n x = self.activation(self.forward_list[i](x))\n shp = np.shape(x)\n x = torch.reshape(x,(shp[0],self.nstates,self.nbasis))\n x = self.forward_list[-1](x)\n return x\n\n","sub_path":"code/q1de/continuous/deep_nn_multistate.py","file_name":"deep_nn_multistate.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"94431814","text":"import base64\nimport datetime\nimport re\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models.signals import post_save, pre_delete\n\nfrom utils.model_tools import queryset_iterator\nfrom odk_logger.models import Instance\nfrom common_tags import START_TIME, START, END_TIME, END, ID, UUID, ATTACHMENTS\n\n# this is Mongo Collection where we will store the parsed submissions\nxform_instances = settings.MONGO_DB.instances\n\n\nclass ParseError(Exception):\n pass\n\n\ndef datetime_from_str(text):\n # Assumes text looks like 2011-01-01T09:50:06.966\n if text is None:\n return None\n date_time_str = text.split(\".\")[0]\n return datetime.datetime.strptime(\n date_time_str, '%Y-%m-%dT%H:%M:%S'\n )\n\n\nclass ParsedInstance(models.Model):\n instance = models.OneToOneField(Instance, related_name=\"parsed_instance\")\n start_time = models.DateTimeField(null=True)\n end_time = models.DateTimeField(null=True)\n # todo: decide if decimal field is better than float field.\n lat = models.FloatField(null=True)\n lng = models.FloatField(null=True)\n\n class Meta:\n app_label = \"odk_viewer\"\n\n def update_mongo(self):\n d = self.to_dict_for_mongo()\n xform_instances.save(d)\n\n def to_dict_for_mongo(self):\n d = self.to_dict()\n for key, value in d.items():\n if self._is_invalid_for_mongo(key):\n del d[key]\n d[self._encode_for_mongo(key)] = value\n return d\n\n def _encode_for_mongo(self, key):\n return reduce(lambda s, c: re.sub(c[0], base64.b64encode(c[1]), s),\n [(r'^\\$', '$'), (r'\\.', '.')], key)\n\n def _is_invalid_for_mongo(self, key):\n return (key.startswith('$') or key.count('.') > 0)\n\n def to_dict(self):\n if not hasattr(self, \"_dict_cache\"):\n self._dict_cache = self.instance.get_dict()\n self._dict_cache.update(\n {\n UUID: self.instance.uuid,\n ID: self.instance.id,\n ATTACHMENTS: [a.media_file.name for a in\\\n self.instance.attachments.all()],\n u\"_status\": self.instance.status,\n }\n )\n return self._dict_cache\n\n @classmethod\n def dicts(cls, xform):\n qs = cls.objects.filter(instance__xform=xform)\n for parsed_instance in queryset_iterator(qs):\n yield parsed_instance.to_dict()\n\n def _set_start_time(self):\n doc = self.to_dict()\n if START_TIME in doc:\n date_time_str = doc[START_TIME]\n self.start_time = datetime_from_str(date_time_str)\n elif START in doc:\n date_time_str = doc[START]\n self.start_time = datetime_from_str(date_time_str)\n else:\n self.start_time = None\n\n def _set_end_time(self):\n doc = self.to_dict()\n if END_TIME in doc:\n date_time_str = doc[END_TIME]\n self.end_time = datetime_from_str(date_time_str)\n elif END in doc:\n date_time_str = doc[END]\n self.end_time = datetime_from_str(date_time_str)\n else:\n self.end_time = None\n\n def get_data_dictionary(self):\n # todo: import here is a hack to get around a circular import\n from odk_viewer.models import DataDictionary\n return DataDictionary.objects.get(\n user=self.instance.xform.user,\n id_string=self.instance.xform.id_string\n )\n\n data_dictionary = property(get_data_dictionary)\n\n # TODO: figure out how much of this code should be here versus\n # data_dictionary.py.\n def _get_geopoint(self):\n doc = self.to_dict()\n xpath = self.data_dictionary.xpath_of_first_geopoint()\n text = doc.get(xpath, u'')\n return dict(zip(\n [u'latitude', u'longitude', u'altitude', u'accuracy'],\n text.split()\n ))\n\n def _set_geopoint(self):\n g = self._get_geopoint()\n self.lat = g.get(u'latitude')\n self.lng = g.get(u'longitude')\n\n def save(self, *args, **kwargs):\n self._set_start_time()\n self._set_end_time()\n self._set_geopoint()\n super(ParsedInstance, self).save(*args, **kwargs)\n # insert into Mongo\n self.update_mongo()\n\n\ndef _remove_from_mongo(sender, **kwargs):\n instance_id = kwargs.get('instance').instance.id\n xform_instances.remove(instance_id)\n\npre_delete.connect(_remove_from_mongo, sender=ParsedInstance)\n\n\ndef _parse_instance(sender, **kwargs):\n # When an instance is saved, first delete the parsed_instance\n # associated with it.\n instance = kwargs[\"instance\"]\n if instance.xform is not None:\n pi, created = ParsedInstance.objects.get_or_create(instance=instance)\n\npost_save.connect(_parse_instance, sender=Instance)\n","sub_path":"odk_viewer/models/parsed_instance.py","file_name":"parsed_instance.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"31779716","text":"from rsa import gen_rsa_key, encrypt_rsa\n\nkey = gen_rsa_key()\npublic_key = (key['n'], key['e'])\nprivate_key = (key['n'], key['d'])\n\nfin = open(\"encoded.txt\", 'w')\nfin.write(\"public key: %s, %s\\n\" % (public_key[0], public_key[1]))\nfin.write(\"n: %s\\n\" % private_key[0])\nfin.write(\"d: %s\\n\" % private_key[1])\n\nfin2 = open(\"message.txt\")\nmsg = \"\"\nfor line in fin2:\n msg += line\nfin2.close()\n \nplain_ints = []\nfor c in str(msg):\n plain_ints.append(ord(c))\n\nencrypted_ints = []\nfor i in plain_ints:\n encrypted_ints.append(encrypt_rsa(public_key, i))\n \nfor element in encrypted_ints:\n fin.write(\"%s\\n\" % element)\n\nfin.close()","sub_path":"encrypt_message/enc.py","file_name":"enc.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"475418915","text":"import click\nimport glob\nfrom gpt2_model import *\nfrom data_pipeline import input_fn\nimport tensorflow as tf\nimport os\n\n_ROOT = os.path.abspath(os.path.dirname(__file__))\nLOG_DIR = _ROOT + \"/log\"\n\n\n@click.command()\n@click.option('--model-dir', type=str, default=\"./model\", show_default=True, help=\"Directory to load model\")\n@click.option('--data-dir', type=str, default=\"./data\", show_default=True, help=\"training data directory\")\n@click.option('--batch-size', type=int, default=16, show_default=True, help=\"batch size\")\n@click.option('--epochs', type=int, default=5, show_default=True, help=\"num of epochs to train on data\")\n@click.option('--learning-rate', type=float, default=0.001, show_default=True, help=\"learning rate\")\n@click.option('--distributed', type=bool, default=False, show_default=True, help=\"distributed training\")\n@click.option('--mxp', type=bool, default=False, show_default=True, help=\"enable mixed precission training\")\ndef train(model_dir, data_dir, batch_size=16, learning_rate=0.001, distributed=False, mxp=False, epochs=5):\n data_dir = os.path.abspath(data_dir)\n model_dir = os.path.abspath(model_dir)\n tf_records = glob.glob(data_dir + \"/tf_records/*.tfrecord\")\n dataset = input_fn(tf_records, batch_size=batch_size, epoch=epochs)\n if distributed:\n mirrored_strategy = tf.distribute.MirroredStrategy(devices=[\"/gpu:0\", \"/gpu:1\"])\n dataset = mirrored_strategy.experimental_distribute_dataset(dataset)\n with mirrored_strategy.scope():\n model = Gpt2.create_from_params(model_dir)\n model.creat_optimizer(learning_rate=learning_rate, mixed_precission=mxp)\n model.create_checkpoint_manager(model_dir)\n model.create_summary_writer(LOG_DIR)\n\n model.mirrored_strategy = mirrored_strategy\n else:\n model = Gpt2.create_from_params(model_dir)\n model.create_optimizer(learning_rate=learning_rate, mixed_precission=mxp)\n model.create_checkpoint_manager(model_dir)\n model.create_summary_writer(LOG_DIR)\n print(\"Trainign Model...............\")\n model.print_params()\n model.fit(dataset)\n print(\"Training Done................\")\n\n\nif __name__ == \"__main__\":\n train()\n","sub_path":"train_gpt2.py","file_name":"train_gpt2.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"24716209","text":"# -*- coding: UTF-8 -*-\nfrom http import cookiejar\nfrom urllib import parse\nimport urllib.request\nimport json\nimport sys\npostUrl = \"http://202.207.247.60/Hander/LoginAjax.ashx\"\nrankUrl = \"http://202.207.247.60/Hander/Cj/CjAjax.ashx?rnd%20=%200.26650203890332436\"\nheaders = {\n 'User-Agent': 'Mozilla/5.0(Windows NT 10.0; WOW64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3427.400 QQBrowser/9.6.12513.400'\n}\n# 获取cookie对象\ncookie = cookiejar.CookieJar()\n# 构建一个cookie的处理器\nhandler = urllib.request.HTTPCookieProcessor(cookie)\n# 获取一个opener对象\nopener = urllib.request.build_opener(handler)\n\n\ndef login(username, pwd):\n data1 = {\n 'u': username,\n 'p': pwd,\n 'r': 'on'\n }\n data1 = parse.urlencode(data1).encode('utf-8')\n # 转成url编码\n\n # 获取一个请求对象\n req = urllib.request.Request(postUrl, data1)\n # 请求服务器,返回响应对象,这时cookie已经随着resp对象携带过来了\n resp = opener.open(req)\n result = json.loads(resp.read().decode('utf-8'))\n if result['Code'] == 0:\n newresult = {'Code': 0, 'Msg': \"用户名或密码错误!\"}\n print(\"用户名或密码错误!\")\n elif result['Code'] == 1:\n # newresult = {'Code': 0, 'Msg': \"登陆成功\"}\n data2 = {\n 'limit': '40',\n 'offset': '0',\n 'order': 'asc',\n 'sort': 'jqzypm,xh',\n 'do': 'xsgrcj',\n 'xh': username\n }\n\n data2 = parse.urlencode(data2).encode('utf-8')\n req2 = urllib.request.Request(rankUrl, data2)\n resp2 = opener.open(req2)\n dict1 = json.loads(resp2.read().decode('utf-8'))[0]\n\n claNum = \"/\" + dict1['bjrs']\n majorNum = \"/\" + dict1['zyrs']\n mainClaNum = \"/\" + dict1['dlrs']\n\n dict1['gpabjpm']=dict1['gpabjpm'] + claNum\n dict1['gpazypm']=dict1['gpazypm'] + majorNum\n dict1['gpadlpm']=dict1['gpadlpm'] + mainClaNum\n dict1['pjcjbjpm'] =dict1['pjcjbjpm'] + claNum\n dict1['pjcjzypm']=dict1['pjcjzypm'] + majorNum\n dict1['jqbjpm']=dict1['jqbjpm'] + claNum\n dict1['jqzypm']=dict1['jqzypm'] + majorNum\n return dict1\n","sub_path":"django/MyTest/py/gpa.py","file_name":"gpa.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"23239688","text":"from collections import OrderedDict\nfrom inspect import isclass\n\n\n__all__ = [\"NoDefault\",\n \"FreeValue\", \"BooleanValue\", \"EnumerationValue\",\n \"NumberValue\", \"StringValue\",\n \"HasEnvironment\",\n \"Experiment\", \"EnvExperiment\", \"is_experiment\"]\n\n\nclass NoDefault:\n \"\"\"Represents the absence of a default value.\"\"\"\n pass\n\n\nclass DefaultMissing(Exception):\n \"\"\"Raised by the ``default`` method of argument processors when no default\n value is available.\"\"\"\n pass\n\n\nclass _SimpleArgProcessor:\n def __init__(self, default=NoDefault):\n if default is not NoDefault:\n self.default_value = default\n\n def default(self):\n if not hasattr(self, \"default_value\"):\n raise DefaultMissing\n return self.default_value\n\n def process(self, x):\n return x\n\n def describe(self):\n d = {\"ty\": self.__class__.__name__}\n if hasattr(self, \"default_value\"):\n d[\"default\"] = self.default_value\n return d\n\n\nclass FreeValue(_SimpleArgProcessor):\n \"\"\"An argument that can be an arbitrary Python value.\"\"\"\n pass\n\n\nclass BooleanValue(_SimpleArgProcessor):\n \"\"\"A boolean argument.\"\"\"\n pass\n\n\nclass EnumerationValue(_SimpleArgProcessor):\n \"\"\"An argument that can take a string value among a predefined set of\n values.\n\n :param choices: A list of string representing the possible values of the\n argument.\n \"\"\"\n def __init__(self, choices, default=NoDefault):\n _SimpleArgProcessor.__init__(self, default)\n assert default is NoDefault or default in choices\n self.choices = choices\n\n def describe(self):\n d = _SimpleArgProcessor.describe(self)\n d[\"choices\"] = self.choices\n return d\n\n\nclass NumberValue(_SimpleArgProcessor):\n \"\"\"An argument that can take a numerical value (typically floating point).\n\n :param unit: A string representing the unit of the value, for user\n interface (UI) purposes.\n :param step: The step with which the value should be modified by up/down\n buttons in a UI.\n :param min: The minimum value of the argument.\n :param max: The maximum value of the argument.\n :param ndecimals: The number of decimals a UI should use.\n \"\"\"\n def __init__(self, default=NoDefault, unit=\"\", step=1.0,\n min=None, max=None, ndecimals=2):\n _SimpleArgProcessor.__init__(self, default)\n self.unit = unit\n self.step = step\n self.min = min\n self.max = max\n self.ndecimals = ndecimals\n\n def describe(self):\n d = _SimpleArgProcessor.describe(self)\n d[\"unit\"] = self.unit\n d[\"step\"] = self.step\n d[\"min\"] = self.min\n d[\"max\"] = self.max\n d[\"ndecimals\"] = self.ndecimals\n return d\n\n\nclass StringValue(_SimpleArgProcessor):\n \"\"\"A string argument.\"\"\"\n pass\n\n\nclass HasEnvironment:\n \"\"\"Provides methods to manage the environment of an experiment (devices,\n parameters, results, arguments).\"\"\"\n def __init__(self, dmgr=None, pdb=None, rdb=None, *, parent=None,\n param_override=dict(), default_arg_none=False, **kwargs):\n self.requested_args = OrderedDict()\n\n self.__dmgr = dmgr\n self.__pdb = pdb\n self.__rdb = rdb\n self.__parent = parent\n self.__param_override = param_override\n self.__default_arg_none = default_arg_none\n\n self.__kwargs = kwargs\n self.__in_build = True\n self.build()\n self.__in_build = False\n for key in self.__kwargs.keys():\n if key not in self.requested_args:\n raise TypeError(\"Got unexpected argument: \" + key)\n del self.__kwargs\n\n def build(self):\n \"\"\"Must be implemented by the user to request arguments.\n\n Other initialization steps such as requesting devices and parameters\n or initializing real-time results may also be performed here.\n\n When the repository is scanned, any requested devices and parameters\n are set to ``None``.\"\"\"\n raise NotImplementedError\n\n def dbs(self):\n \"\"\"Returns the device manager, the parameter database and the result\n database, in this order.\n\n This is the same order that the constructor takes them, allowing\n sub-objects to be created with this idiom to pass the environment\n around: ::\n\n sub_object = SomeLibrary(*self.dbs())\n \"\"\"\n return self.__dmgr, self.__pdb, self.__rdb\n\n def get_argument(self, key, processor=None, group=None):\n \"\"\"Retrieves and returns the value of an argument.\n\n :param key: Name of the argument.\n :param processor: A description of how to process the argument, such\n as instances of ``BooleanValue`` and ``NumberValue``.\n :param group: An optional string that defines what group the argument\n belongs to, for user interface purposes.\n \"\"\"\n if not self.__in_build:\n raise TypeError(\"get_argument() should only \"\n \"be called from build()\")\n if self.__parent is not None and key not in self.__kwargs:\n return self.__parent.get_argument(key, processor, group)\n if processor is None:\n processor = FreeValue()\n self.requested_args[key] = processor, group\n try:\n argval = self.__kwargs[key]\n except KeyError:\n try:\n return processor.default()\n except DefaultMissing:\n if self.__default_arg_none:\n return None\n else:\n raise\n return processor.process(argval)\n\n def attr_argument(self, key, processor=None, group=None):\n \"\"\"Sets an argument as attribute. The names of the argument and of the\n attribute are the same.\"\"\"\n setattr(self, key, self.get_argument(key, processor, group))\n\n def get_device(self, key):\n \"\"\"Creates and returns a device driver.\"\"\"\n if self.__parent is not None:\n return self.__parent.get_device(key)\n if self.__dmgr is None:\n raise ValueError(\"Device manager not present\")\n return self.__dmgr.get(key)\n\n def attr_device(self, key):\n \"\"\"Sets a device driver as attribute. The names of the device driver\n and of the attribute are the same.\"\"\"\n setattr(self, key, self.get_device(key))\n\n def get_parameter(self, key, default=NoDefault):\n \"\"\"Retrieves and returns a parameter.\"\"\"\n if self.__parent is not None and key not in self.__param_override:\n return self.__parent.get_parameter(key, default)\n if self.__pdb is None:\n raise ValueError(\"Parameter database not present\")\n if key in self.__param_override:\n return self.__param_override[key]\n try:\n return self.__pdb.get(key)\n except KeyError:\n if default is not NoDefault:\n return default\n else:\n raise\n\n def attr_parameter(self, key, default=NoDefault):\n \"\"\"Sets a parameter as attribute. The names of the argument and of the\n parameter are the same.\"\"\"\n setattr(self, key, self.get_parameter(key, default))\n\n def set_parameter(self, key, value):\n \"\"\"Writes the value of a parameter into the parameter database.\"\"\"\n if self.__parent is not None:\n self.__parent.set_parameter(key, value)\n if self.__pdb is None:\n raise ValueError(\"Parameter database not present\")\n self.__pdb.set(key, value)\n\n def set_result(self, key, value, realtime=False, store=True):\n \"\"\"Writes the value of a result.\n\n :param realtime: Marks the result as real-time, making it immediately\n available to clients such as the user interface. Returns a\n ``Notifier`` instance that can be used to modify mutable results\n (such as lists) and synchronize the modifications with the clients.\n :param store: Defines if the result should be stored permanently,\n e.g. in HDF5 output. Default is to store.\n \"\"\"\n if self.__parent is not None:\n self.__parent.set_result(key, value, realtime, store)\n if self.__rdb is None:\n raise ValueError(\"Result database not present\")\n if realtime:\n if key in self.__rdb.nrt:\n raise ValueError(\"Result is already non-realtime\")\n self.__rdb.rt[key] = value\n notifier = self.__rdb.rt[key]\n notifier.kernel_attr_init = False\n self.__rdb.set_store(key, store)\n return notifier\n else:\n if key in self.__rdb.rt.read:\n raise ValueError(\"Result is already realtime\")\n self.__rdb.nrt[key] = value\n self.__rdb.set_store(key, store)\n\n def get_result(self, key):\n \"\"\"Retrieves the value of a result.\n\n There is no difference between real-time and non-real-time results\n (this function does not return ``Notifier`` instances).\n \"\"\"\n if self.__parent is not None:\n return self.__parent.get_result(key)\n if self.__rdb is None:\n raise ValueError(\"Result database not present\")\n return self.__rdb.get(key)\n\n\nclass Experiment:\n \"\"\"Base class for experiments.\n\n Deriving from this class enables automatic experiment discovery in\n Python modules.\n \"\"\"\n def prepare(self):\n \"\"\"Entry point for pre-computing data necessary for running the\n experiment.\n\n Doing such computations outside of ``run`` enables more efficient\n scheduling of multiple experiments that need to access the shared\n hardware during part of their execution.\n\n This method must not interact with the hardware.\n \"\"\"\n pass\n\n def run(self):\n \"\"\"The main entry point of the experiment.\n\n This method must be overloaded by the user to implement the main\n control flow of the experiment.\n\n This method may interact with the hardware.\n\n The experiment may call the scheduler's ``pause`` method while in\n ``run``.\n \"\"\"\n raise NotImplementedError\n\n def analyze(self):\n \"\"\"Entry point for analyzing the results of the experiment.\n\n This method may be overloaded by the user to implement the analysis\n phase of the experiment, for example fitting curves.\n\n Splitting this phase from ``run`` enables tweaking the analysis\n algorithm on pre-existing data, and CPU-bound analyses to be run\n overlapped with the next experiment in a pipelined manner.\n\n This method must not interact with the hardware.\n \"\"\"\n pass\n\n\nclass EnvExperiment(Experiment, HasEnvironment):\n \"\"\"Base class for experiments that use the ``HasEnvironment`` environment\n manager.\n\n Most experiment should derive from this class.\"\"\"\n pass\n\n\ndef is_experiment(o):\n \"\"\"Checks if a Python object is an instantiable user experiment.\"\"\"\n return (isclass(o)\n and issubclass(o, Experiment)\n and o is not Experiment\n and o is not EnvExperiment)\n","sub_path":"artiq/language/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":11223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"396370202","text":"from flask import Blueprint, render_template, request, flash, redirect, url_for, Response\n# from uniback.db_interfaces.physical_location import get_physical_locations, delete_location, set_location_info, set_location_type, get_location_status, get_location_types, get_location_type\n# from uniback.db_interfaces.physical_location import get_location_info as get_location_info, add_location as add_loc, add_location_type as add_loc_type, delete_location_type as del_location_type\nimport uniback.db_interfaces.physical_location as physical_location_interface\nimport uniback.db_interfaces.repository_list as repository_interface\nfrom uniback.models.general import PhysicalLocation, PhysicalLocationType, Repository\nimport json\nfrom .forms import EditLocationForm, AddLocationForm, AddRepositoryForm, get_add_repository_form, AddLocationTypeForm, EditLocationTypeForm\nfrom uniback.tools import plugin_tools\nfrom uniback.misc import job_queue\nfrom uniback.tools.job_tools import JobObject\nimport uniback.tools.job_callbacks as job_callbacks\nfrom time import sleep\nfrom threading import Thread\n\nrepositories = Blueprint('repositories', '__name__')\n\n\n@repositories.route(f'/{repositories.name}')\n@repositories.route(f'/{repositories.name}/repository_list')\ndef repository_list():\n page = request.args.get('page', 1, type=int)\n repositories = Repository.query.paginate(page=page, per_page=50)\n return render_template('repositories/repository_list.html', repositories=repositories)\n\n\n@repositories.route(f'/{repositories.name}/repository_list/_get_repository_info')\ndef get_repository_info():\n info_dict = {}\n repository_id = request.args.get('id', 0, type=int)\n info_dict = repository_interface.get_info(repository_id)\n return render_template('sidebar/repository_list.html', info_dict=info_dict)\n\n\n@repositories.route(f'/{repositories.name}/physical_locations')\ndef physical_locations():\n page = request.args.get('page', 1, type=int)\n locations = PhysicalLocation.query.paginate(page=page, per_page=50)\n # for location in locations:\n # location['status'] = get_location_status(location['id'])\n return render_template('repositories/physical_locations.html', locations=locations)\n\n\n@repositories.route(f'/{repositories.name}/physical_locations/_delete', methods=['POST'])\ndef delete_locations():\n location_list = request.get_json().get('item_ids')\n for location_id in location_list:\n physical_location_interface.delete_location(location_id)\n # logger.debug(group_id)\n flash(\"Successfully removed items\", category=\"success\")\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}\n\n\n@repositories.route(f'/{repositories.name}/physical_locations/_get_location_info')\ndef get_location_info():\n info_dict = {}\n location_id = request.args.get('id', 0, type=int)\n info_dict['name'] = physical_location_interface.get_location_info(location_id)['name']\n return render_template('sidebar/physical_locations.html', info_dict=info_dict)\n\n\n@repositories.route(f'/{repositories.name}/physical_locations/_edit/', methods=['GET', 'POST'])\ndef edit_location(location_id):\n form = EditLocationForm()\n form.type.choices = [(item['id'], item['name']) for item in physical_location_interface.get_location_types()]\n if form.validate_on_submit():\n new_info = {}\n new_info['name'] = form.name.data\n new_info['address'] = form.address.data\n new_info['type'] = form.type.data\n new_info['concurrent_jobs'] = form.concurrent_jobs.data\n physical_location_interface.set_location_info(location_id, new_info)\n flash(\"Location has been updated\", 'success')\n return redirect(url_for('repositories.physical_locations'))\n elif request.method == 'GET':\n current_info = physical_location_interface.get_location_info(location_id)\n form.location_id.data = current_info.get('id')\n form.name.data = current_info.get('name')\n form.address.data = current_info.get('address')\n form.type.data = current_info.get('type')\n form.concurrent_jobs.data = current_info.get('concurrent_jobs')\n return render_template(\"repositories/physical_locations_edit.html\", form=form)\n\n\n@repositories.route(f'/{repositories.name}/physical_locations/_add', methods=['GET', 'POST'])\ndef add_location():\n form = AddLocationForm()\n form.type.choices = [(item['id'], item['name']) for item in physical_location_interface.get_location_types()]\n if form.validate_on_submit():\n new_info = {}\n new_info['name'] = form.name.data\n new_info['address'] = form.address.data\n new_info['type'] = form.type.data\n new_info['concurrent_jobs'] = form.concurrent_jobs.data\n # physical_location_interface.add_loc(new_info)\n try:\n physical_location_interface.init_physical_location(form.address.data)\n except Exception:\n flask(\"Failed to add a physical location\", category='error')\n flash(\"Location has been added\", category='success')\n return redirect(url_for('repositories.physical_locations'))\n elif request.method == 'GET':\n form.concurrent_jobs.data = 1\n # we can use the same template as it's just going to be the same fields\n # as the fields in the edit form\n return render_template(\"repositories/physical_locations_edit.html\", form=form)\n\n\n@repositories.route(f'/{repositories.name}/physical_locations/_get_location_status')\ndef get_loc_status():\n location_id = request.args.get('id', 0, type=int)\n status = physical_location_interface.get_location_status(location_id)\n return json.dumps({'data': status, 'name': 'status', 'id': location_id})\n\n\n@repositories.route(f'/{repositories.name}/repository_list/_add/', methods=['GET', 'POST'])\ndef add_repository(engine):\n form = AddRepositoryForm()\n repository_class = plugin_tools.get_engine_class(engine, 'Repository')\n # form.set_field_list(repository_class.fields_request())\n form = get_add_repository_form(repository_class.fields_request())\n form.location.choices = [(item['id'], item['name']) for item in physical_location_interface.get_physical_locations(get_status=True) if (item['status'] == 'Online')]\n if form.validate_on_submit():\n new_info = {}\n for item in form:\n if item.id != 'csrf_token' and item.id != 'submit':\n new_info[item.id] = item.data\n job_class = plugin_tools.get_engine_class(engine, 'Repository')\n new_object = job_class(address=physical_location_interface.get_location_info(form.location.data).get('address'), field_dict=new_info)\n job_object = JobObject(name=\"Repository create\", process=new_object, engine=engine)\n job_object.success_callback = job_callbacks.repository_add_to_db\n job_queue.add(job=job_object)\n flash(\"Repository job added to queue\", category='success')\n return redirect(url_for('repositories.repository_list'))\n return render_template(\"repositories/repository_list_add.html\", form=form)\n\n\n@repositories.route(f'/{repositories.name}/repository_list/_add', methods=['GET', 'POST'])\ndef get_engine():\n if request.method == 'POST':\n engine = request.form['engine-select']\n return redirect(url_for('repositories.add_repository', engine=engine))\n available_engines = plugin_tools.get_list_of_engines()\n return render_template(\"repositories/repository_list_add.html\", available_engines=available_engines)\n\n\n@repositories.route(f'/{repositories.name}/repository_list/_delete', methods=['GET', 'POST'])\ndef delete_repositories():\n item_ids = request.get_json().get('item_ids')\n try:\n repository_interface.delete_repositories(item_ids)\n except Exception as e:\n flash(f\"Items not removed: {e}\", category=\"danger\")\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}\n # logger.debug(group_id)\n flash(\"Successfully removed items\", category=\"success\")\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}\n\n\n@repositories.route(f'/{repositories.name}/physical_location_types/_add', methods=['GET', 'POST'])\ndef add_location_type():\n form = AddLocationTypeForm()\n if form.validate_on_submit():\n new_info = {}\n new_info['name'] = form.name.data\n new_info['subtype'] = form.subtype.data\n new_info['description'] = form.description.data\n physical_location_interface.add_loc_type(new_info)\n flash(\"Location has been added\", category='success')\n return redirect(url_for('repositories.physical_location_types'))\n # we can use the same template as it's just going to be the same fields\n # as the fields in the edit form\n return render_template(\"repositories/physical_location_types_edit.html\", form=form)\n\n\n@repositories.route(f'/{repositories.name}/physical_location_types')\ndef physical_location_types():\n page = request.args.get('page', 1, type=int)\n items = PhysicalLocationType.query.order_by(PhysicalLocationType.name.desc()).paginate(page=page, per_page=50)\n # we can use the same template as it's just going to be the same fields\n # as the fields in the edit form\n return render_template(\"repositories/physical_location_types.html\", items=items)\n\n\n@repositories.route(f'/{repositories.name}/physical_location_types/_edit/', methods=['GET', 'POST'])\ndef edit_location_type(type_id):\n form = EditLocationTypeForm()\n if form.validate_on_submit():\n new_info = {}\n new_info['name'] = form.name.data\n new_info['subtype'] = form.subtype.data\n new_info['description'] = form.description.data\n physical_location_interface.set_location_type(type_id, new_info)\n flash(\"Location type has been updated\", category='success')\n return redirect(url_for('repositories.physical_location_types'))\n else:\n type = physical_location_interface.get_location_type(type_id)\n if type:\n form.name.data = type['name']\n form.subtype.data = type['subtype']\n form.description.data = type['description']\n # we can use the same template as it's just going to be the same fields\n # as the fields in the edit form\n return render_template(\"repositories/physical_location_types_edit.html\", form=form)\n\n\n@repositories.route(f'/{repositories.name}/physical_location_types/_get_type_info')\ndef get_type_info():\n info_dict = {}\n location_id = request.args.get('id', 0, type=int)\n info_dict['description'] = physical_location_interface.get_location_type(location_id)['description']\n return render_template('sidebar/physical_location_types.html', info_dict=info_dict)\n\n\n@repositories.route(f'/{repositories.name}/physical_location_types/_delete', methods=['POST'])\ndef delete_location_types():\n type_list = request.get_json().get('item_ids')\n for type in type_list:\n physical_location_interface.del_location_type(type)\n # logger.debug(group_id)\n flash(\"Successfully removed items\", category=\"success\")\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}","sub_path":"uniback/blueprints/repositories/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"206254647","text":"#!/usr/bin/env python3\nimport os\nimport random\n\n# with open('flag', 'rb') as data:\n# flag = data.read()\n # assert(flag.startswith(b'AIS3{'))\n\ndef extend(key, L):\n kL = len(key)\n return key * (L // kL) + key[:L % kL]\n\ndef xor(X, Y):\n return bytes([x ^ y for x, y in zip(X, Y)])\n\n# key = os.urandom(random.randint(8, 12))\n# plain = flag + key\n# key = extend(key, len(plain))\n# cipher = xor(plain, key)\n\n# with open('flag-encrypted', 'wb') as data:\n# data.write(cipher)\n\ndata = open('Crypto2-flag-encrypted', 'rb').read()\nprint(len(data))\n\n\nkey = xor(b'AIS3{', data)\n# print(key)\n# for i in range(8, 13):\n# \tprint(xor(data[-i:], key))\n\nfor i in range(5):\n\ttemp = xor(data[-10:], key)\n\tkey += bytes([temp[-1]])\n\nkey_length = 10\nflag_length = 151\n# AIS3{xxxxx}1234567890\n# 12345xxxxx12345678901\n\nkey = extend(key, len(data))\n\nprint(xor(data, key))\n","sub_path":"Crypto2-XOR.py","file_name":"Crypto2-XOR.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"433390245","text":"import json\nimport os\nimport time\n\n\nDATA_DIR = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \n 'somur_gen_data')\n\nMD_PATH = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \n 'somur_report.md')\nif os.path.exists(MD_PATH):\n os.remove(MD_PATH)\n\n\ndef json_file_gen():\n for filename in os.listdir(DATA_DIR):\n if filename.endswith('.json'):\n yield os.path.join(DATA_DIR, filename)\n\n\ndef extract_mata_info(json_dict, data, project_id):\n if not json_dict:\n return\n \n project = json_dict.get('project')\n h1_title = '# {}/{}--[{}]\\n'.format(\n project['name'], project['english_name'], project_id\n )\n result = ''\n if project.get('result'):\n result += '- {}\\n'.format(project['result'])\n if project.get('risk'):\n result += '- 终生患病风险为 {:.3f}%,是人群平均的 {:.3f} 倍。\\n'.format(\n project['risk']['rate'],\n project['risk']['times']\n )\n if not result:\n result = '- 未检出风险突变.\\n'\n data.append(h1_title)\n data.append(result)\n\n\ndef extract_intro_info(json_dict, data):\n if not json_dict:\n return\n\n data.append('## 结果解读\\n')\n dist = json_dict.get('gene_type_distribution')\n if dist:\n total = sum(int(item.get('member_count')) for item in dist)\n\n md_table = '| 分类 | 人数 | 占比 |\\n| --- | --- | --- |\\n'\n for item in dist:\n md_table += '| {} | {} | {:.3f}% |\\n'.format(\n item.get('level_title'),\n item.get('member_count'),\n int(item.get('member_count')) / total * 100,\n )\n data.append(md_table)\n\n for article in json_dict.get('article_List'):\n data.append('### {}\\n'.format(article['article_title']))\n article_contents = article.get('article_content')\n if isinstance(article_contents, dict):\n article_contents = sorted(\n article_contents.values(),\n key=lambda v: v['order']\n ) \n for content in article_contents:\n text = content['data'].replace('
', '\\n\\n') + '\\n'\n if content['type'] == 'sub_title':\n text = '#### ' + text\n data.append(text)\n\n if json_dict.get('science_article_list'):\n data.append('### 相关阅读\\n')\n ref_reading = ''.join(\n '- [{}]({}): {}\\n'.format(\n sc_article['title'], \n sc_article['url'],\n sc_article['abstract'],\n ) for sc_article in json_dict.get('science_article_list')\n )\n data.append(ref_reading)\n\n\ndef extract_basis_info(json_dict, data):\n if not json_dict:\n return\n \n data.append('## 科学依据\\n')\n for chrom in json_dict.get('snp_list'):\n data.append('### {}\\n'.format(chrom['chrom_name']))\n for gene in chrom['gene']:\n data.append('#### 基因【{}】\\n'.format(gene['gene_name']))\n gene_desc = (gene.get('gene_desc') or 'No gene_desc.') + '\\n'\n data.append(gene_desc)\n text = ''\n for snp in gene['snp']:\n text += '- **位点【{}--{}】**: {}。\\n'.format(\n snp['snp_name'], snp['snp_genotype'],\n snp['snp_desc']\n )\n data.append(text)\n \n data.append('### 参考文献\\n')\n text = ''\n for bib in json_dict.get('bib_list'):\n text += '- [{}]({})\\n'.format(bib['full_name'], bib['url'])\n data.append(text or '- None')\n\n\ndef main():\n for filename in json_file_gen():\n project_id = os.path.basename(filename).split('_')[0]\n with open(filename, mode='r', encoding='utf_8') as jf, \\\n open(MD_PATH, mode='a', encoding='utf_8') as mf:\n json_dict = json.load(jf)\n data = list()\n extract_mata_info(json_dict.get('meta'), data, project_id)\n extract_intro_info(json_dict.get('intro'), data)\n extract_basis_info(json_dict.get('basis'), data)\n\n mf.write('\\n'.join(data) + '\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"spider/somur_report_md.py","file_name":"somur_report_md.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"266599447","text":"from flask import Flask\nfrom flask import request\nfrom flask import json\nfrom numpy import log\nfrom pickle import load\nfrom numbers import Number\nimport boto3\n\nBUCKET_NAME = 'edycoco'\nFOLDER_IN_S3 = 'ML/'\nMODEL_FILE_NAME = 'forest.sav'\nSCALER_FILE_NAME = 'scaler.sav'\nTEMP_FOLDER = '/tmp/'\n\napp = Flask(__name__)\n\n## request handler\n@app.route('/', methods=['POST'])\ndef index():\n ## handle invalid requests\n try:\n payload = json.loads(request.get_data().decode('utf-8'))\n if not validate_payload(payload):\n return json.dumps(errormsg()), 400\n except:\n return json.dumps(errormsg()), 400\n\n ## predict based on input params\n prediction = predict(payload)\n return json.dumps({\"nsp\": str(prediction[0])})\n\ndef validate_payload(payload):\n expecting_keys = [\"lb\",\"astv\",\"ds\",\"dp\",\"width\",\"max\",\"nmax\",\"median\",\"tendency\",\"ac\",\"fm\",\"uc\",\"mstv\",\"altv\",\"mltv\",\"dl\",\"nzeros\",\"variance\"]\n for key in expecting_keys:\n if key not in payload:\n return False\n elif not isinstance(payload[key], Number):\n return False\n return True\n\ndef errormsg():\n return {\"error\":\"payload not in expected format\",\"example\":\"{\\\"lb\\\":120,\\\"astv\\\":73,\\\"ds\\\":0,\\\"dp\\\":0,\\\"width\\\":64,\\\"max\\\":126,\\\"nmax\\\":2,\\\"median\\\":121,\\\"tendency\\\":1,\\\"ac\\\":0,\\\"fm\\\":0,\\\"uc\\\":0,\\\"mstv\\\":0.5,\\\"altv\\\":43,\\\"mltv\\\":2.4,\\\"dl\\\":0,\\\"nzeros\\\":0,\\\"variance\\\":73}\"}\n\n## model stored in s3, retrieved by boto3\ndef load_model(keyname):\n s3_client = boto3.client('s3')\n s3_client.download_file('edycoco', FOLDER_IN_S3+keyname, TEMP_FOLDER+keyname)\n return load(open(TEMP_FOLDER+keyname, 'rb'))\n\ndef predict(data):\n final_data = clean(data)\n return load_model(MODEL_FILE_NAME).predict(final_data)\n\ndef clean(data):\n ## log transform\n data = logTrans(data)\n ## scale\n dataCleaned = scale(data)\n return dataCleaned\n\n## same log transformation used at data cleaning before model creation\ndef logTrans(data):\n cols = ['ac','fm','uc','mstv','altv','mltv','dl','nzeros','variance']\n for col in cols:\n data[col] = log(data[col] + 0.01)\n return data\n\ndef scale(data):\n array = []\n array.append(data['lb'])\n array.append(data['astv'])\n array.append(data['ds'])\n array.append(data['dp'])\n array.append(data['width'])\n array.append(data['max'])\n array.append(data['nmax'])\n array.append(data['median'])\n array.append(data['tendency'])\n array.append(data['ac'])\n array.append(data['fm'])\n array.append(data['uc'])\n array.append(data['mstv'])\n array.append(data['altv'])\n array.append(data['mltv'])\n array.append(data['dl'])\n array.append(data['nzeros'])\n array.append(data['variance'])\n array2d = [array]\n\n return load_model(SCALER_FILE_NAME).transform(array2d)\n","sub_path":"Capstone/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"368732111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 23 15:56:14 2020\n\n@author: Ryan Johnsen rdjsen@gmail.com\n\"\"\"\n\n#wrapper for resolution tracker\n\nimport pandas as pd\nfrom os import path\nfrom os import getcwd\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport sqlite3\n\n\n#importing my module\nfrom telegram_commands import (help_command,\n start_tracking,\n track,\n add_reso,\n start_updating,\n delete_reso)\n\nfrom update_loop import input_loop\n\nFILE_TEMPLATE = ['user_id', 'num', 'resolution', 'type', 'target', 'current', 'cur_percent', \n 'pace', 'pace_percent']\n\nactions = ['adding', 'updating', 'deleting']\n\nconn = sqlite3.connect('data.db')\n\n#file location\n\ndef get_token():\n \"\"\"Read the Token from token.txt\"\"\"\n \n token = open(path.join(getcwd(), 'token.txt')).read()\n return token\n\n# # # ~ ~ ~ Other Functions ~ ~ ~ # # #\ndef setup_db():\n \"\"\"Check if db exists or create it if not\"\"\"\n #check if db exists\n #if not create it\n c = conn.cursor()\n \n #checking for user_status table\n c.execute('''SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='user_status' ''')\n \n if c.fetchone()[0] == 0:\n #table doesn't exist\n \n c.execute(\"\"\"\n CREATE TABLE user_status\n (chat_id, status, num)\n \"\"\")\n conn.commit()\n \n #checking for user resos tables\n c.execute('''SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='resolutions' ''')\n \n if c.fetchone()[0] == 0:\n #table doesn't exist\n df = pd.DataFrame(columns=FILE_TEMPLATE)\n df.to_sql('resolutions', conn, index=False)\n \n# # # ~ ~ ~ Main ~ ~ ~ # # #\ndef main():\n \"\"\"Main Code execution\"\"\"\n \n #setting up database\n setup_db()\n #conn.close()\n \n bot_token = get_token()\n \n #setting up updater and dispatcher\n updater = Updater(token = bot_token, use_context = True)\n dispatcher = updater.dispatcher\n \n # # # ~ ~ ~ Handlers ~ ~ ~ # # #\n dispatcher.add_handler(CommandHandler('start', start_tracking)) \n dispatcher.add_handler(CommandHandler('add', add_reso)) \n dispatcher.add_handler(MessageHandler(Filters.text & (~Filters.command), input_loop))\n dispatcher.add_handler(CommandHandler('track', track))\n dispatcher.add_handler(CommandHandler('update', start_updating))\n dispatcher.add_handler(CommandHandler('help', help_command))\n dispatcher.add_handler(CommandHandler('delete', delete_reso))\n \n #start polling\n updater.start_polling()\n updater.idle()\n \nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"27785001","text":"c.JupyterHub.confirm_no_ssl = True\nc.JupyterHub.allow_root = True\n\nc.JupyterHub.hub_ip = '127.0.0.1'\n\n## Authenticator\nfrom oauthenticator.cilogon import CILogonOAuthenticator\nfrom jupyterhub.auth import LocalAuthenticator\nfrom oauthenticator.cilogon import *\nc.CILogonOAuthenticator.username_claim = 'email'\n\nclass LocalCILogonOAuthenticator(LocalAuthenticator, CILogonOAuthenticator):\n \"\"\"A version that mixes in local system user creation\"\"\"\n def normalize_username(self, username): \n username = username.replace('@', '').lower()\n return username.replace('.', '')\n \nc.JupyterHub.authenticator_class = LocalCILogonOAuthenticator\nc.LocalCILogonOAuthenticator.create_system_users = True\nc.LocalCILogonOAuthenticator.add_user_cmd = ['adduser']\nc.Authenticator.admin_users = {'dmishinucsdedu', 'jjgrahamucsdedu'}\nc.JupyterHub.admin_access = True\n\n## Spawner\nc.Spawner.cmd = ['jupyter-labhub']\nc.Spawner.default_url = '/lab'\nc.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner'\n","sub_path":"dhub/jupyterhub_config.py","file_name":"jupyterhub_config.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"6041251","text":"# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy (c) 2017-2023\n# ryanss (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\nfrom datetime import timedelta as td\nfrom typing import Optional\n\nfrom holidays.groups import ChristianHolidays, IslamicHolidays, InternationalHolidays\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Spain(HolidayBase, ChristianHolidays, IslamicHolidays, InternationalHolidays):\n \"\"\"\n References:\n - https://administracion.gob.es/pag_Home/atencionCiudadana/calendarios.html\n \"\"\"\n\n country = \"ES\"\n subdivisions = (\n \"AN\",\n \"AR\",\n \"AS\",\n \"CB\",\n \"CE\",\n \"CL\",\n \"CM\",\n \"CN\",\n \"CT\",\n \"EX\",\n \"GA\",\n \"IB\",\n \"MC\",\n \"MD\",\n \"ML\",\n \"NC\",\n \"PV\",\n \"RI\",\n \"VC\",\n )\n\n def __init__(self, *args, **kwargs):\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n IslamicHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _add_holiday(self, name: str, dt: date) -> Optional[date]:\n if dt.year != self._year:\n return None\n\n if self.observed and self._is_sunday(dt):\n dt += td(days=+1)\n name = self.tr(\"%s (Trasladado)\") % self.tr(name)\n\n return super()._add_holiday(self.tr(name), dt)\n\n def _populate(self, year):\n super()._populate(year)\n\n if year != 2023:\n self._add_new_years_day(\"Año nuevo\")\n\n self._add_epiphany_day(\"Epifanía del Señor\")\n\n if year >= 2023:\n self._add_holy_thursday(\"Jueves Santo\")\n\n self._add_good_friday(\"Viernes Santo\")\n\n if year != 2022:\n self._add_labor_day(\"Día del Trabajador\")\n\n self._add_assumption_of_mary_day(\"Asunción de la Virgen\")\n\n self._add_holiday_oct_12(\"Día de la Hispanidad\")\n\n self._add_all_saints_day(\"Todos los Santos\")\n\n self._add_holiday_dec_6(\"Día de la Constitución Española\")\n\n self._add_immaculate_conception_day(\"La Inmaculada Concepción\")\n\n if year != 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_an_holidays(self):\n if self._year == 2023:\n self._add_new_years_day(\"Año nuevo\")\n self._add_holiday_feb_28(\"Día de Andalucia\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n if self._year == 2022:\n self._add_labor_day(\"Día del Trabajador\")\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_ar_holidays(self):\n if self._year == 2023:\n self._add_new_years_day(\"Año nuevo\")\n if self._year <= 2014:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_saint_georges_day(\"Día de San Jorge\")\n if self._year == 2022:\n self._add_labor_day(\"Día del Trabajador\")\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_as_holidays(self):\n if self._year == 2023:\n self._add_new_years_day(\"Año nuevo\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_sep_8(\"Día de Asturias\")\n if self._year == 2022:\n self._add_labor_day(\"Día del Trabajador\")\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_cb_holidays(self):\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_jul_28(\"Día de las Instituciones de Cantabria\")\n self._add_holiday_sep_15(\"Día de la Bien Aparecida\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_ce_holidays(self):\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_aug_5(\"Nuestra Señora de África\")\n self._add_holiday_sep_2(\"Día de la Ciudad Autónoma de Ceuta\")\n if self._year == 2022:\n self._add_eid_al_adha_day(\"Eid al-Adha\")\n elif self._year == 2023:\n self._add_eid_al_adha_day_two(\"Eid al-Adha\")\n\n def _add_subdiv_cl_holidays(self):\n if self._year == 2023:\n self._add_new_years_day(\"Año nuevo\")\n self._add_saint_james_day(\"Día de Santiago Apóstol\")\n if self._year <= 2014:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_apr_23(\"Día de Castilla y Leon\")\n if self._year == 2022:\n self._add_labor_day(\"Día del Trabajador\")\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_cm_holidays(self):\n if self._year <= 2015 or 2020 <= self._year <= 2021:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n if self._year <= 2021:\n self._add_easter_monday(\"Lunes de Pascua\")\n if self._year >= 2022:\n self._add_corpus_christi_day(\"Corpus Christi\")\n self._add_holiday_may_31(\"Día de Castilla La Mancha\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_cn_holidays(self):\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_may_30(\"Día de Canarias\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_ct_holidays(self):\n self._add_easter_monday(\"Lunes de Pascua\")\n if self._year == 2022:\n self._add_holiday_jun_6(\"Día de la Pascua Granada\")\n self._add_saint_johns_day(\"San Juan\")\n self._add_holiday_sep_11(\"Día Nacional de Catalunya\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n self._add_christmas_day_two(\"San Esteban\")\n\n def _add_subdiv_ex_holidays(self):\n if self._year <= 2014:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_sep_8(\"Día de Extremadura\")\n if self._year == 2023:\n self._add_carnival_tuesday(\"Carnaval\")\n if self._year == 2022:\n self._add_labor_day(\"Día del Trabajador\")\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_ga_holidays(self):\n if self._year <= 2014 or 2018 <= self._year <= 2021:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n if self._year >= 2022:\n self._add_holiday_may_17(\"Día de las letras Gallegas\")\n if self._year != 2023:\n self._add_saint_johns_day(\"San Juan\")\n self._add_holiday_jul_25(\"Día Nacional de Galicia\")\n\n def _add_subdiv_ib_holidays(self):\n self._add_holiday_mar_1(\"Día de las Islas Baleares\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_easter_monday(\"Lunes de Pascua\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n if self._year <= 2020:\n self._add_christmas_day_two(\"San Esteban\")\n\n def _add_subdiv_mc_holidays(self):\n if self._year == 2023:\n self._add_new_years_day(\"Año nuevo\")\n if self._year <= 2021 and self._year != 2017:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n if self._year == 2022:\n self._add_labor_day(\"Día del Trabajador\")\n self._add_holiday_jun_9(\"Día de la Región de Murcia\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_md_holidays(self):\n if self._year <= 2015 or self._year == 2023:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_may_2(\"Día de Comunidad de Madrid\")\n if self._year == 2022:\n self._add_saint_james_day(\"Día de Santiago Apóstol\")\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_ml_holidays(self):\n if self._year <= 2016:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_holiday_sep_8(\"Vírgen de la victoria\")\n self._add_holiday_sep_17(\"Día de Melilla\")\n if self._year == 2022:\n self._add_eid_al_fitr_day_two(\"Eid al-Fitr\")\n self._add_eid_al_adha_day_three(\"Eid al-Adha\")\n self._add_christmas_day(\"Navidad\")\n elif self._year == 2023:\n self._add_eid_al_fitr_day(\"Eid al-Fitr\")\n self._add_eid_al_adha_day_two(\"Eid al-Adha\")\n\n def _add_subdiv_nc_holidays(self):\n if self._year <= 2015 or 2018 <= self._year <= 2021:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_easter_monday(\"Lunes de Pascua\")\n if self._year >= 2022:\n self._add_saint_james_day(\"Día de Santiago Apóstol\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_pv_holidays(self):\n if self._year <= 2021:\n self._add_saint_josephs_day(\"San José\")\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_easter_monday(\"Lunes de Pascua\")\n if self._year >= 2022:\n self._add_saint_james_day(\"Día de Santiago Apóstol\")\n if self._year <= 2022:\n self._add_holiday_sep_6(\"Día de Elcano\")\n if 2011 <= self._year <= 2013:\n self._add_holiday_oct_25(\"Día del País Vasco\")\n\n def _add_subdiv_ri_holidays(self):\n if self._year <= 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n if self._year >= 2022:\n self._add_easter_monday(\"Lunes de Pascua\")\n self._add_holiday_jun_9(\"Día de La Rioja\")\n if self._year == 2022:\n self._add_christmas_day(\"Navidad\")\n\n def _add_subdiv_vc_holidays(self):\n if self._year <= 2022 and self._year != 2017:\n self._add_saint_josephs_day(\"San José\")\n if self._year == 2022:\n self._add_holy_thursday(\"Jueves Santo\")\n self._add_easter_monday(\"Lunes de Pascua\")\n self._add_saint_johns_day(\"San Juan\")\n if self._year <= 2021:\n self._add_holiday_oct_9(\"Día de la Comunidad Valenciana\")\n\n\nclass ES(Spain):\n pass\n\n\nclass ESP(Spain):\n pass\n","sub_path":"holidays/countries/spain.py","file_name":"spain.py","file_ext":"py","file_size_in_byte":11175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"625065616","text":"import sqlite3\nimport csv\n\ncon = sqlite3.connect('db.sqlite3')\ncursor = con.cursor()\n\nwith open('DRI.csv', 'r') as f:\n b = csv.reader(f)\n print(b)\n header = next(b)\n sql = 'insert into myApp_DRI (age_id,male_protain,male_vitA,male_fe,female_protain,female_vitA,female_fe) VALUES(?,?,?,?,?,?,?)'\n for t in b:\n data = t\n cursor.execute(sql, data)\n\ncon.commit()\ncon.close()\n","sub_path":"import_dri.py","file_name":"import_dri.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"96184294","text":"SCREEN = '800x650'\nFORMATED_OPTIONS = \"[{}] \"\nMENU = [\"move\", \"select item\", \"exit\"]\nINFO = {\"location\": \"forest\", \"health\": 100, \"energy\": 100,\n \"items\": {}, \"selected_item\": None}\nSITES = {\n \"city\": {\n \"options\": {\"house\", \"market\", \"smith\"},\n },\n \"forest\": {\n \"options\": {\n \"Pick wood\": {\"energy\": -10, \"wood\": 1},\n \"Plant a tree\": {\"energy\": -20, \"wood\": -1, \"new_tree\": 3},\n \"Pick apples\": {\"energy\": -5, \"apples\": 1},\n },\n \"probabilities\": {\"tree\": 0.1, \"apples\": 0.2},\n \"wood\": 2, \"apples\": 10,\n },\n \"mine\": {},\n}","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"208890975","text":"#!/usr/bin/python3\n# Script that starts a flask application in a port 5000\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.teardown_appcontext\ndef tear_down(exception):\n \"\"\"Calls Storage close on appcontext\"\"\"\n storage.close()\n\n\n@app.route(\"/states_list\")\ndef state_list():\n my_states = []\n for key, value in storage.all(State).items():\n my_states.append(value)\n return render_template('7-states_list.html', my_states=my_states)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"624464954","text":"\"\"\"\nThis file demonstrates two different styles of tests (one doctest and one\nunittest). These will both pass when you run \"manage.py test\".\n\nReplace these with more appropriate tests for your application.\n\"\"\"\n\nimport datetime\nfrom datetime import timedelta\nimport json\n\nimport vobject\n\nfrom django.test import TestCase\nfrom django.utils import translation\nfrom django.core.urlresolvers import reverse\n\nfrom models import Event\n\n\nclass SimpleTest(TestCase):\n def setUp(self):\n self.now = datetime.datetime.now()\n\n self.ev1 = Event.objects.create(when=self.now, what=\"ev1\")\n self.ev2 = Event.objects.create(\n when=self.now + timedelta(days=1),\n when_over=self.now + timedelta(days=1, hours=2),\n when_over_guessed=False,\n what=\"future=%s\" % ''.join(str(x % 10) for x in xrange(300)))\n self.ev3 = Event.objects.create(\n when=self.now + timedelta(days=2),\n what=\"ev3\")\n\n def testFutureEvents(self):\n \"\"\"\n Tests Event.is_future property and\n Event.objects.get_upcoming\n \"\"\"\n self.assertTrue(self.ev2.is_future)\n upcoming = Event.objects.get_upcoming()\n self.assertEquals(upcoming[0].what, self.ev2.what)\n self.assertEqual(upcoming.count(), 2)\n self.ev3.delete()\n self.assertEqual(upcoming.count(), 1)\n self.ev3 = Event.objects.create(\n when=self.now + timedelta(days=1), what=\"ev3\")\n\n def testIcalenderSummaryLength(self):\n \"\"\"\n Tests that the icalendar view uses summary_length\n correctly.\n \"\"\"\n summary_length = 123\n res = self.client.get(reverse('event-icalendar',\n kwargs={'summary_length': summary_length}))\n self.assertEqual(res.status_code, 200)\n vcal = vobject.base.readOne(res.content)\n for vevent in vcal.components():\n if vevent.name != 'VEVENT':\n continue\n if vevent.summary.value.startswith(\"future\"):\n self.assertEqual(len(vevent.summary.value), summary_length)\n\n def testIcalenderGuessedEndWarning(self):\n \"\"\"\n test the guessed end warning.\n \"\"\"\n translation.activate('en')\n res = self.client.get(reverse('event-icalendar'))\n self.assertEqual(res.status_code, 200)\n vcal = vobject.base.readOne(res.content)\n for vevent in vcal.components():\n if vevent.summary.value.startswith(\"future\"):\n self.assertEqual(vevent.description.value, self.ev2.what)\n elif vevent.summary.value == \"ev3\":\n self.assertEqual(\n vevent.description.value,\n 'ev3\\n\\noknesset warnings:\\nno end date data - '\n 'guessed it to be 2 hours after start')\n translation.deactivate()\n\n def testAPIv2FutureEventsConsistency(self):\n \"\"\"\n Test that APIv2 and APIv1 fetch the same future events.\n \"\"\"\n res_v1 = self.client.get('/api/event/')\n self.assertEqual(res_v1.status_code, 200)\n res_v2 = self.client.get('/api/v2/event/', format='json')\n self.assertEqual(res_v2.status_code, 200)\n ids_v1 = set(x['what'] for x in json.loads(res_v1.content))\n ids_v2 = set(x['what'] for x in json.loads(res_v2.content))\n self.assertEqual(ids_v1, ids_v2)\n\n def testAPIv2Identity(self):\n \"\"\"\n Test that APIv2 and APIv1 return the same data for each event.\n \"\"\"\n for event_id in [self.ev1.id, self.ev2.id, self.ev3.id]:\n res_v1 = self.client.get('/api/event/%d/' % event_id)\n self.assertEqual(res_v1.status_code, 200)\n res_v2 = self.client.get('/api/v2/event/%d/' % event_id,\n format='json')\n self.assertEqual(res_v2.status_code, 200)\n event_v1 = json.loads(res_v1.content)\n event_v2 = json.loads(res_v2.content)\n self.assertEqual(event_v1['what'], event_v2['what'])\n # APIv2 return a more \"accurate\" result, so I need to trunk it\n self.assertEqual(event_v1['when'], event_v2['when'][:-3])\n self.assertEqual(event_v1['where'], event_v2['where'])\n\n def tearDown(self):\n self.ev1.delete()\n self.ev2.delete()\n self.ev3.delete()\n\n\nclass ViewTest(TestCase):\n def setUp(self):\n now = datetime.datetime.now()\n self.ev1 = Event.objects.create(when=now, what=\"ev1\")\n self.ev2 = Event.objects.create(\n when=now + timedelta(days=1, seconds=2 * 3600 + 34 * 60),\n when_over=now + timedelta(days=1, hours=2),\n when_over_guessed=False,\n what=\"future=%s\" % ''.join(str(x % 10) for x in xrange(300)))\n self.ev3 = Event.objects.create(\n when=now + timedelta(days=1), what=\"ev3\")\n\n def testDetailView(self):\n res = self.client.get(self.ev2.get_absolute_url())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.context['in_days'], 1)\n self.assertEqual(res.context['in_hours'], 2)\n self.assertEqual(res.context['in_minutes'], 33)\n\n def tearDown(self):\n self.ev1.delete()\n self.ev2.delete()\n self.ev3.delete()\n","sub_path":"events/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"338796852","text":"n = int(input())\n\nfor i in range(n):\n texto = input().upper()\n count = 0\n alfabeto = list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\n for l in alfabeto:\n if l in texto:\n count += 1\n\n if count == 26:\n print(\"frase completa\")\n elif count >= 13:\n print(\"frase quase completa\")\n else:\n print(\"frase mal elaborada\")\n","sub_path":"1551.py","file_name":"1551.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"210208602","text":"#version 115 has old code/functions for download sequences app \n\n#import cchrysostomou_proxytest_query_functions_class as query #for attempting to connect to proxy\nimport immunogrep_makeappfunctions as makeapp\nimport appsoma_api\nfrom immunogrep_filesystem_tools import PreviewCompressedFilesGenerator\n\n##@APP title:\"Download Sequences from IGREP Database\", tags:\"immunogrep_query\"\n\nexp_ui = makeapp.DBDownloader() # CreateDownloadApp()\nwhile True:\n\tresponse = appsoma_api.communicate_await()\n\tsomething_happened = exp_ui.Process_Events(response)\n\tif something_happened and exp_ui.download_finished:\n\t\t[downloaded_files,zipped_files,filetype] = exp_ui.Get_Data()\n\t\tbreak\t\t\t\t\t\n\n\nif downloaded_files:\n\tfilesummary ={'files': [\n\t\t\t{'location':each_file['filename'],\n\t\t\t 'filetype':filetype\n\t\t\t}\n\t\t\tfor each_file in downloaded_files\n\t\t]\t\n\t}\t\n\t\n\tif zipped_files:\t\t\t\t\n\t\tfilesummary['files'] = [{'location':f, 'caption':'Compressed files','read_fxn': PreviewCompressedFilesGenerator} for f in zipped_files] + filesummary['files']\n\t\t\t\t\n\tvar = makeapp.ShowResultsSummary(filesummary)\t\t\t\t\t\n\t\n\t#while True:\t\t\n\t#\tresponse = appsoma_api.communicate_await()\n\t#\tvar.Process_Events(response)\t\nelse:\n\tappsoma_api.html_append(\"No sequences were found with that experiment\")\n","sub_path":"cchrysostomou_downloadsequencesapp.py","file_name":"cchrysostomou_downloadsequencesapp.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"255571096","text":"\"\"\"Selectors for Home Assistant.\"\"\"\nfrom typing import Any, Callable, Dict, cast\n\nimport voluptuous as vol\n\nfrom homeassistant.util import decorator\n\nSELECTORS = decorator.Registry()\n\n\ndef validate_selector(config: Any) -> Dict:\n \"\"\"Validate a selector.\"\"\"\n if not isinstance(config, dict):\n raise vol.Invalid(\"Expected a dictionary\")\n\n if len(config) != 1:\n raise vol.Invalid(f\"Only one type can be specified. Found {', '.join(config)}\")\n\n selector_type = list(config)[0]\n\n selector_class = SELECTORS.get(selector_type)\n\n if selector_class is None:\n raise vol.Invalid(f\"Unknown selector type {selector_type} found\")\n\n return {\n selector_type: cast(Dict, selector_class.CONFIG_SCHEMA(config[selector_type]))\n }\n\n\nclass Selector:\n \"\"\"Base class for selectors.\"\"\"\n\n CONFIG_SCHEMA: Callable\n\n\n@SELECTORS.register(\"entity\")\nclass EntitySelector(Selector):\n \"\"\"Selector of a single entity.\"\"\"\n\n CONFIG_SCHEMA = vol.Schema(\n {\n # Integration that provided the entity\n vol.Optional(\"integration\"): str,\n # Domain the entity belongs to\n vol.Optional(\"domain\"): str,\n }\n )\n\n\n@SELECTORS.register(\"device\")\nclass DeviceSelector(Selector):\n \"\"\"Selector of a single device.\"\"\"\n\n CONFIG_SCHEMA = vol.Schema(\n {\n # Integration linked to it with a config entry\n vol.Optional(\"integration\"): str,\n # Manufacturer of device\n vol.Optional(\"manufacturer\"): str,\n # Model of device\n vol.Optional(\"model\"): str,\n }\n )\n","sub_path":"homeassistant/helpers/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"431622050","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('demo', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='character',\n name='last_refresh',\n field=models.DateTimeField(default=datetime.datetime(2014, 12, 3, 13, 33, 26, 784859)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='character',\n name='last_refresh_request',\n field=models.DateTimeField(default=datetime.datetime(2014, 12, 3, 13, 33, 34, 417067)),\n preserve_default=False,\n ),\n ]\n","sub_path":"demo/migrations/0002_auto_20141203_1333.py","file_name":"0002_auto_20141203_1333.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"539971537","text":"import logging\n\nimport sys\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import Deferred\nfrom twisted.trial import unittest\n\nfrom .PappServerLoader import pappServerLoader\n\nlogger = logging.getLogger(__name__)\n\nPAPP_NOOP = \"papp_noop\"\n\n\nclass PappServerLoaderTest(unittest.TestCase):\n def testLoadAll(self):\n pappServerLoader.loadAllPapps()\n\n logger.info(pappServerLoader.listPapps())\n\n for papp in list(pappServerLoader._loadedPapps.values()):\n logger.info(\"configUrl = %s\", papp.configUrl())\n\n d = Deferred()\n reactor.callLater(5.0, d.callback, True)\n return d\n\n def testUnregister(self):\n loadedModuleBefore = set(sys.modules)\n\n pappServerLoader.loadPapp(PAPP_NOOP)\n self.assertTrue(PAPP_NOOP in sys.modules)\n\n pappServerLoader.unloadPapp(PAPP_NOOP)\n\n loadedModuleNow = set(sys.modules) - loadedModuleBefore\n\n # Ensure that none of the modules contain the papp_name\n for modName in loadedModuleNow:\n self.assertFalse(PAPP_NOOP in modName)\n\n def testReRegister(self):\n pappServerLoader.loadPapp(PAPP_NOOP)\n pappServerLoader.loadPapp(PAPP_NOOP)\n\n for papp in list(pappServerLoader._loadedPapps.values()):\n logger.info(\"configUrl = %s\", papp.configUrl())\n\n d = Deferred()\n reactor.callLater(5.0, d.callback, True)\n return d\n","sub_path":"peek_server/papp/PappServerLoaderTest.py","file_name":"PappServerLoaderTest.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"515476337","text":"\n\nfrom xai.brain.wordbase.nouns._mandrake import _MANDRAKE\n\n#calss header\nclass _MANDRAKES(_MANDRAKE, ):\n\tdef __init__(self,): \n\t\t_MANDRAKE.__init__(self)\n\t\tself.name = \"MANDRAKES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mandrake\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_mandrakes.py","file_name":"_mandrakes.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"477860262","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\n\nx = np.arange(0, 31, 1)\ny = np.arange(0, 31, 1)\nz = []\narrow_scale = 0.5\n\nf = open('output.txt', 'r')\nfor line in f:\n z.append(map(float, line.split()))\n\nplt.imshow(z, cmap='seismic')\nplt.colorbar()\nplt.xlim(1,29)\nplt.ylim(1,29)\ndx,dy = np.gradient(z)\nplt.xticks([]) # Hide the axis ticks\nplt.yticks([]) \nplt.title(\"Ohmic Current Flow\")\n# plt.quiver(-dy*arrow_scale, -dx*arrow_scale, scale = 1)\n\nseed_points = np.array([[13,14,15,16,17], [29,29,29,29,29]])\nplt.streamplot(x,y,-dy,-dx, color='k', start_points=seed_points.T)\nplt.savefig('solution.png', dpi=600, bbox_inches='tight')\n#plt.show()\nplt.clf()\n\niter, err= np.loadtxt(\"error.txt\", delimiter=\"\\t\", unpack=True)\nplt.plot(iter, err)\nplt.xlabel(\"Iteration Number\")\nplt.ylabel(\"Maximum Error\")\nplt.savefig(\"errors.png\")\n#plt.show()\nplt.clf()\n\n","sub_path":"laplaceSolver/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"649595210","text":"print(\"----------------------\")\na = 9\nb = \"rotem\"\nc = \"rotem\"\nprint(id(a))\nprint(type(a))\nprint(a)\nprint(id(b))\nprint(type(b))\nprint(b)\nprint(id(c))\nprint(type(c))\nprint(c)\n# == | != | <= | >= | += | -=\n# list = ['rotem', 'rozmarin']\n# strings = \"rotem\"\n# len()\n# str()\n\na = [\"hi\"]\nb = [\"hibobo\"]\na is b\nhi = 4\nhi = float(hi)\n\nprint(\"-IF---------------------\")\nage = 45\nageday = age * 365\nprint(ageday)\nif not ageday > 120000:\n print(\"you still yuang\")\nelif age == 45:\n print(\"you are 45\")\nelse:\n print(\"you old :)\")\n\nprint('-WHILE LOOP-----------')\nstart = 10\nwhile start:\n if start == 20:\n print('ok 20')\n break\n else:\n print(start)\n start -= -1\n\nprint('-FOR LOOP-----------')\nhel = ['hi', 'ewy', 'ere', 'dsach']\nprint(hel)\nfor hihi in hel:\n print(hihi+' we')\n\nfor letter in 'abcdef':\n print(letter.upper())\n\nfor nm in [1, 2, 3, 4, 5, 6, 7]:\n if nm == 6:\n print(\"ok 6\")\n else:\n print(nm)\n\nmyl = ['rotem', 'roni', 'alon', 'QUIT', 'guy']\nfor names in myl:\n print(names)\n\nprint('-IMPUT--------')\n# rotem = input(\"what is your name? \")\n# printF('you are gorjess {} olwayes!'.format(rotem))\n\nprint('-FANCTION-------------------')\ndef rotem_first_fanc(name):\n if name.lower() == 'rotem':\n print(\"yes :) WON\")\n else:\n print('No it is not {} ! '.format(name))\nrotem_first_fanc('rotem') # call the Function\n# rotem_first_fanc(input('ges the name?'))\n\ndef even(num):\n if num == 1:\n return True\n else:\n return False\n#\ndef printer(count):\n print('hi ' * count)\nprinter(4)\n#\ndef product(con, pon):\n return (con * pon)\nproduct(3, 5)\n#\ndef add(li, hei):\n li = float(li)\n hei = float(hei)\n return (li + hei)\nadd(2, 5)\n#\ndef loopt(items):\n for item in items:\n if item == \"STOP\":\n break\n else:\n print(item)\nloopt('wee' 'ewew' 'weewe' 'STOP' 'ROTEM')\n\ndef even_odd(nm):\n print(nm)\n if nm % 2:\n print('The NM is EVEN')\n return False\n else:\n print('The NM is ODD')\n return True\neven_odd(3)\n#\nimport sys\nqwes = input('do you whant to start th move? ')\nif qwes.lower() != \"n\":\n print('Enjoy the Show!')\nelse:\n sys.exit()\n\nprint('==========================================================')\nprint('-My Shopping-------------------')\nprint('==========================================================')\ndef help_mane():\n print(\"\"\"Enetr 'DONE' fo finish.\nEnter 'HELP' for this help.\nEnter 'SHOW' to see your list\n\"\"\")\n\ndef show_list():\n print('Here is your list:')\n for R2 in shooping_list:\n print(R2)\n\ndef add_to_list():\n shooping_list.append(new_item)\n\nshooping_list = []\nhelp_mane()\n\nwhile True: #to make ~ loop\n new_item = input('> ')\n if new_item == 'DONE':\n print('Added {} items'.format(len(shooping_list)))\n print(show_list())\n break\n elif new_item == 'SHOW':\n show_list()\n continue\n elif new_item == 'HELP':\n help_mane()\n continue\n add_to_list()\n\nprint('==========================================================')\nprint('-Random Game-------------------')\nprint('==========================================================')\nimport random\nsecret_nm = random.randint(1, 10)\n\nwhile True:\n user_nm = int(input(\"guess a nm 1 to 10: \"))\n if user_nm == secret_nm:\n print(' ---> yes!!! it was {}'.format(secret_nm))\n break\n elif user_nm <= secret_nm:\n print(\"the nm us BIG then {}\".format(user_nm))\n continue\n elif user_nm >= secret_nm:\n print(\"the nm us SMALL then {}\".format(user_nm))\n continue\n\nprint('==========================================================')\nimport random\nstart = 5\ndef even_odd(num):\n return not num % 2\nwhile start != 0:\n num = random.randint(1, 99)\n if even_odd(num) == 0:\n print('{} is even'.format(num))\n else:\n print('{} it odd'.format(num))\n start -= 1\n\n# -------------------------------\n# insert \\ append \\ extend\nbest = [1, 2, 'rotem']\nprint(best)\nbest.append('rozmarin')\nprint(best)\nbest.extend(['keren', 'hayesod'])\nprint(best)\nbest += [22]\nprint(best)\nbest.insert(2, \"8899\")\nprint(best)\ndel best[2]\n\n# -------------------------------\n# Delete \\ Remove Option\nname = ['a', 'b', 'z', 'c', 'd']\nprint(name)\ndel name[2]\nprint(name)\nmy_list = [1, 2, 3, 4, 1]\nmy_list.remove(1)\nprint(my_list)\nmy_list.remove(1)\nprint(my_list)\n\n# ------------------------------------\n# Delete letter from a word\ndef disemvowel(word):\n wordtolist = list(word)\n wedontwhantit = ['a', 'e', 'i', 'o', 'u']\n for letter in wedontwhantit:\n while True:\n try:\n wordtolist.remove(letter)\n except ValueError:\n break\n print(word)\n print(wordtolist)\ndisemvowel('rozmarin')\n\n# ------------------------------------\n# pop\nmessy_list = [\"a\", 2, 3, 1, 'False', [1, 2, 3]]\nind3 = messy_list.pop(3)\nmessy_list.insert(0, ind3)\nmessy_list.remove('a')\ndel messy_list[4]\ndel messy_list[3]\nprint(messy_list)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TreePY.py","file_name":"TreePY.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"308230537","text":"import xlrd\nimport pandas as pd\nimport numpy as np\nimport os\nimport csv\n\n\ndef index(arr):\n for i in arr:\n if ('Source_AMag_C' in i) == True:\n ind = arr.index(i) + 1\n return ind\n else:\n continue\n\n\ndef trns_tableTocsv(f2table, f1table, BPRP, filter, res, excess, distance): #original f1 was V f2 was B\n workbookf1 = xlrd.open_workbook(f1table, on_demand=True)\n worksheetf1 = workbookf1.sheet_by_index(0)\n\n arrayofnamesf1 = worksheetf1.col_values(1)\n arrayofvaluesf1 = worksheetf1.col_values(2)\n f1_array = np.array(arrayofvaluesf1[index(arrayofnamesf1):])\n\n NaN_index = list(np.where(f1_array == \"NaN\")[0])\n NaN_index = list(np.where(f1_array == \"Infinity\")[0]) + NaN_index\n #############################################################\n workbookf2 = xlrd.open_workbook(f2table, on_demand=True)\n worksheetf2 = workbookf2.sheet_by_index(0)\n\n arrayofnamesf2 = worksheetf2.col_values(1)\n arrayofvaluesf2 = worksheetf2.col_values(2)\n f2_array = np.array(arrayofvaluesf2[index(arrayofnamesf2):])\n\n NaN_index = list(np.where(f2_array == \"NaN\")[0]) + NaN_index\n NaN_index = list(set(list(np.where(f2_array == \"Infinity\")[0]) + NaN_index))\n #############################################################\n f2_array = np.delete(f2_array, NaN_index)\n f2_array = np.array(list(map(float, f2_array)))\n f2_extremal = list(np.where(f2_array >= res)[0])\n\n f1_array = np.delete(f1_array, NaN_index)\n f1_array = np.array(list(map(float, f1_array)))\n f1_extremal = list(np.where(f1_array >= res)[0])\n\n all_extremal = list(set(f2_extremal + f1_extremal))\n f2_array = np.delete(f2_array, all_extremal)\n f1_array = np.delete(f1_array, all_extremal)\n\n df = pd.read_csv(BPRP) # open the topcat VO document\n\n\n ##############################################################\n def AbsoluteMag(mag_array,distance):\n modulus = 5 - 5*(np.log10(distance))\n return np.add(mag_array,modulus)\n\n f2_array = AbsoluteMag(f2_array,distance)\n f1_array = AbsoluteMag(f1_array,distance)\n df2f1_array = f2_array - f1_array #filter 1 and 2 difference\n bp = AbsoluteMag(np.array(df[\"phot_bp_mean_mag\"]),distance)\n rp = AbsoluteMag(np.array(df[\"phot_rp_mean_mag\"]),distance)\n g = AbsoluteMag(np.array(df[\"phot_g_mean_mag\"]),distance)\n dbprp = bp-rp # get Gbp - Grp\n\n if filter == \"BV\": # f2 is \"B\" | f1 is \"V\"\n def df2f1TodGf1(df1f2):\n dgv = []\n for i in df1f2:\n dgv.append(-0.02907 - 0.02385 * (i) - 0.2297 *\n pow(i, 2) - 0.001768 * pow(i, 3))\n return dgv\n\n def dBPRpTodGf1(dbprp):\n dgv = []\n c1 = -0.0176\n c2 = -0.00686\n c3 = -0.1732\n for i in dbprp:\n dgv.append(c1 + c2 * i + c3 * pow(i, 2))\n return dgv\n\n def Extinction_correction_diff(AbsMag,excess):\n return np.subtract(AbsMag,excess)\n\n def Extinction_correction_mag(AbsMag,excess):\n Rv = 3.1\n Av = Rv*excess\n return np.subtract(AbsMag,Av)\n\n df2f1_array = Extinction_correction_diff(df2f1_array,excess)\n f1_array = Extinction_correction_mag(f1_array,excess)\n\n dGV_f2f1 = df2f1TodGf1(df2f1_array)\n dGV_BPRP = dBPRpTodGf1(dbprp)\n\n topcat_df = pd.read_csv(BPRP)\n topcat_df[\"Mg-Mv\"] = dGV_BPRP\n topcat_df[\"Mg\"] = g\n topcat_df.to_csv(BPRP)\n\n elif filter == \"BR\": # f2 is \"B\" | f1 is \"R\"\n def df2f1TodGf1(df1f2):\n dgv = []\n for i in df1f2:\n dgv.append(-0.0128 + 0.3064 * (i) - 0.0520 *\n pow(i, 2) - 0.0139 * pow(i, 3))\n return dgv\n\n def dBPRpTodGf1(dbprp):\n dgv = []\n c1 = -0.003226\n c2 = 0.3833\n c3 = -0.1345\n for i in dbprp:\n dgv.append(c1 + c2 * i + c3 * pow(i, 2))\n return dgv\n\n def Extinction_correction_diff(AbsMag,excess):\n excess = 1.78*excess\n return np.subtract(AbsMag,excess)\n\n def Extinction_correction_mag(AbsMag,excess):\n Ar = 2.32*excess\n return np.subtract(AbsMag,Ar)\n\n df2f1_array = Extinction_correction_diff(df2f1_array,excess)\n f1_array = Extinction_correction_mag(f1_array,excess)\n\n dGV_f2f1 = df2f1TodGf1(df2f1_array)\n dGV_BPRP = dBPRpTodGf1(dbprp)\n\n topcat_df = pd.read_csv(BPRP)\n topcat_df[\"Mg-Mr\"] = dGV_BPRP\n topcat_df[\"Mg\"] = g\n topcat_df.to_csv(BPRP)\n\n ##############################################################\n f2_array = list(f2_array)\n f1_array = list(f1_array)\n df2f1_array = list(df2f1_array)\n data = list(zip(f2_array, f1_array, df2f1_array, dGV_f2f1))\n ##############################################################\n n = 0\n dir = os.getcwd()\n with open(dir + \"\\\\GAIA Data\\\\PhotometryTable_minmag{}_excess{}_distance{}_filter{}.csv\".format(res,excess,distance,filter), \"w\") as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for row in data:\n if n == 0:\n if filter == \"BV\":\n labels = [\"Mb\", \"Mv\", \"Mb-Mv\", \"Mg-Mv\"]\n elif filter == \"BR\":\n labels = [\"Mb\", \"Mr\", \"Mb-Mr\", \"Mg-Mr\"]\n filewriter.writerow(labels)\n n += 1\n continue\n else:\n filewriter.writerow(row)\n","sub_path":"Conversion_and_Parsing.py","file_name":"Conversion_and_Parsing.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"31312308","text":"import unittest\nfrom typing import Tuple, Any\n\nfrom .Graph import BinaryTree, BinaryNode\n\n\nclass BinaryTreeE5(BinaryTree):\n def is_search_tree(self):\n is_search, _, _ = self._is_search_tree(self.root)\n return is_search\n\n def _is_search_tree(self, node: BinaryNode) -> Tuple[bool, Any, Any]:\n left = node.left_child\n right = node.right_child\n if left is None and right is None:\n return True, node.data, node.data\n\n if left is None:\n is_right_search, right_min, right_max = self._is_search_tree(right)\n if is_right_search is False or right_min <= node.data:\n return False, None, None\n return True, node.data, right_max\n\n if right is None:\n is_left_search, left_min, left_max = self._is_search_tree(left)\n if is_left_search is False or node.data < left_max:\n return False, None, None\n return True, left_min, node.data\n\n is_left_search, left_min, left_max = self._is_search_tree(left)\n if is_left_search is False or node.data < left_max:\n return False, None, None\n\n is_right_search, right_min, right_max = self._is_search_tree(right)\n if is_right_search is False or right_min <= node.data:\n return False, None, None\n\n return True, left_min, right_max\n\n\nclass Test(unittest.TestCase):\n def test_it(self):\n node1 = BinaryNode(1)\n node2 = BinaryNode(2)\n node3 = BinaryNode(3)\n node4 = BinaryNode(4)\n node5 = BinaryNode(5)\n node6 = BinaryNode(6)\n node7 = BinaryNode(7)\n node4.left_child = node2\n node2.left_child = node1\n node2.right_child = node3\n node4.right_child = node6\n node6.left_child = node5\n node6.right_child = node7\n\n assert BinaryTreeE5(node1).is_search_tree() is True\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"chapter4/e5.py","file_name":"e5.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"284010658","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport datetime\r\nimport DataPreprocessing\r\nfrom RNN import RNN\r\nfrom tensorflow.contrib import learn\r\n\r\n# Parameters\r\n# ==================================================\r\n\r\n# Model Hyperparameters\r\n\r\ntf.flags.DEFINE_integer(\"embedding_dim\", 250, \"Dimensionality of character embedding (Default: 300)\")\r\ntf.flags.DEFINE_integer(\"hidden_size\", 250, \"Dimensionality of character embedding (Default: 128)\")\r\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.3, \"Dropout keep probability (Default: 0.5)\")\r\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.5, \"L2 regularization lambda (Default: 3.0)\")\r\n\r\n# Training parameters\r\n\r\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (Default: 64)\")\r\ntf.flags.DEFINE_integer(\"num_epochs\", 100, \"Number of training epochs (Default: 100)\")\r\ntf.flags.DEFINE_integer(\"display_every\", 10, \"Number of iterations to display training info.\")\r\ntf.flags.DEFINE_integer(\"evaluate_every\", 100, \"Evaluate model on dev set after this many steps\")\r\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"Save model after this many steps\")\r\ntf.flags.DEFINE_integer(\"num_checkpoints\", 5, \"Number of checkpoints to store\")\r\ntf.flags.DEFINE_float(\"learning_rate\", 1e-3, \"Which learning rate to start with. (Default: 1e-3)\")\r\n\r\n# Misc Parameters\r\n\r\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\r\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\r\ntf.flags.DEFINE_boolean(\"pretrain_enable\", True, \"Add word2vec pretrain vector\")\r\ntf.flags.DEFINE_boolean(\"cell_type\", True, \"Add word2vec pretrain vector\")\r\n\r\nFLAGS = tf.flags.FLAGS\r\n\r\ndef train(x_train, y_train, x_dev, y_dev, embedding, vocab_processor):\r\n\r\n # Training\r\n # ==================================================\r\n\r\n with tf.Graph().as_default():\r\n\r\n session_conf = tf.ConfigProto(\r\n allow_soft_placement=FLAGS.allow_soft_placement,\r\n log_device_placement=FLAGS.log_device_placement)\r\n\r\n sess = tf.Session(config=session_conf)\r\n\r\n with sess.as_default():\r\n\r\n rnn = RNN(\r\n sequence_length=x_train.shape[1],\r\n num_classes=y_train.shape[1],\r\n vocab_size=len(vocab_processor.vocabulary_),\r\n embedding_size=FLAGS.embedding_dim,\r\n cell_type=FLAGS.cell_type,\r\n hidden_size=FLAGS.hidden_size,\r\n l2_reg_lambda=FLAGS.l2_reg_lambda,\r\n pretrain_enable=FLAGS.pretrain_enable\r\n )\r\n\r\n # Define Training procedure\r\n\r\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\r\n optimizer = tf.train.AdamOptimizer(1e-3)\r\n grads_and_vars = optimizer.compute_gradients(rnn.loss)\r\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\r\n\r\n # Keep track of gradient values and sparsity (optional)\r\n\r\n grad_summaries = []\r\n for g, v in grads_and_vars:\r\n if g is not None:\r\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\r\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\r\n grad_summaries.append(grad_hist_summary)\r\n grad_summaries.append(sparsity_summary)\r\n grad_summaries_merged = tf.summary.merge(grad_summaries)\r\n\r\n # Output directory for models and summaries\r\n\r\n timestamp = str(int(time.time()))\r\n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\r\n print(\"Writing to {}\\n\".format(out_dir))\r\n\r\n # Summaries for loss and accuracy\r\n\r\n loss_summary = tf.summary.scalar(\"loss\", rnn.loss)\r\n acc_summary = tf.summary.scalar(\"accuracy\", rnn.accuracy)\r\n\r\n # Train Summaries\r\n\r\n train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\r\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\r\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\r\n\r\n # Dev summaries\r\n\r\n dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\r\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\r\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\r\n\r\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\r\n\r\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\r\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)\r\n\r\n # Write vocabulary\r\n\r\n vocab_processor.save(os.path.join(out_dir, \"vocab\"))\r\n\r\n # Initialize all variables\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n # Single Training Step\r\n\r\n def train_step(x_batch, y_batch, embedding):\r\n\r\n feed_dict = {\r\n rnn.input_x: x_batch,\r\n rnn.input_y: y_batch,\r\n rnn.dropout_keep_prob: FLAGS.dropout_keep_prob,\r\n rnn.embedding_placeholder: embedding\r\n }\r\n\r\n _, _, step, summaries, loss, accuracy, prediction, la_out = sess.run(\r\n [rnn.embedding_init, train_op, global_step, train_summary_op,\r\n rnn.loss, rnn.accuracy, rnn.predictions, rnn.last_out],\r\n feed_dict)\r\n\r\n time_str = datetime.datetime.now().isoformat()\r\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\r\n print(\"Total Expected Spam :{}/{}\".format(sum(prediction), len(prediction)))\r\n # print(\"Expected Score :{}\".format(score))\r\n # print(\"Embedding Matrix :{}\".format(p_emb))\r\n print(\"RNN :{}\".format(la_out.shape))\r\n train_summary_writer.add_summary(summaries, step)\r\n\r\n # Model Evaluation\r\n\r\n def dev_step(x_batch, y_batch, embedding, writer=None):\r\n\r\n feed_dict = {\r\n rnn.input_x: x_batch,\r\n rnn.input_y: y_batch,\r\n rnn.dropout_keep_prob: 1.0,\r\n rnn.embedding_placeholder: embedding\r\n }\r\n step, summaries, loss, accuracy = sess.run(\r\n [global_step, dev_summary_op, rnn.loss, rnn.accuracy],\r\n feed_dict)\r\n time_str = datetime.datetime.now().isoformat()\r\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\r\n if writer:\r\n writer.add_summary(summaries, step)\r\n\r\n # Generate batches\r\n\r\n batches = DataPreprocessing.BatchIterator(\r\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\r\n\r\n # Training loop. For each batch...\r\n\r\n for batch in batches:\r\n x_batch, y_batch = zip(*batch)\r\n train_step(x_batch, y_batch, embedding)\r\n current_step = tf.train.global_step(sess, global_step)\r\n if current_step % FLAGS.evaluate_every == 0:\r\n print(\"\\nEvaluation:\")\r\n dev_step(x_dev, y_dev, embedding, writer=dev_summary_writer)\r\n print(\"\")\r\n if current_step % FLAGS.checkpoint_every == 0:\r\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\r\n print(\"Saved model checkpoint to {}\\n\".format(path))\r\n\r\n if current_step == 20000:\r\n break;\r\n\r\n sess.close();\r\n\r\ndef main(argv=None):\r\n\r\n x_raw, y_raw, embedding, vocab_processor = DataPreprocessing.Preprocessor \\\r\n (\"./data/TrainCorpus.txt\", \"./data/TrainLabel.txt\", \"./word2vec.model\")\r\n x_train, y_train, x_dev, y_dev = DataPreprocessing.DataSplit(x_raw, y_raw, 0.01)\r\n\r\n start_time = time.time()\r\n\r\n train(x_train, y_train, x_dev, y_dev, embedding, vocab_processor)\r\n\r\n end_time = time.time()\r\n time_dif = end_time - start_time\r\n print(\"Total Running Time:\" + str(time_dif))\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n tf.app.run()\r\n\r\n\r\n\r\n","sub_path":"TextCNN-tf/TrainRNN.py","file_name":"TrainRNN.py","file_ext":"py","file_size_in_byte":8619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"524771981","text":"import socket\nimport os\n\ns1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nprint(\"created\")\n\ns1.bind(('',8888))\ns2.bind(('',8080))\n\nprint(\"binded\")\n\nwhile(1):\n\n\td = s1.recvfrom(4096)\n\taddr = d[1]\n\targ = d[0].decode().split()[1]\n\n\tcmd = d[0].decode().split()[0]\n\n\tif cmd == \"rwd\" :\n\t\tpath = arg\n\t\tif os.path.isdir(path):\n\t\t\tos.chdir(path)\n\t\t\tprint(\"path changed to\" + str(os.getcwd()))\n\t\telse :\n\t\t\tprint(\"Directory not found\")\n\n\tif cmd == \"send\" :\n\t\tfname = arg\n\t\tprint(fname)\n\t\ts1.sendto(\"OK\".encode(),addr)\n\t\tfl = \"\"\n\t\tf = open(fname,\"wb\")\n\t\tfl = s2.recvfrom(4096)\n\t\tf.write(fl[0])\n\t\tf.close()\n\n\tif cmd == \"store\" :\n\t\tif os.path.isfile(arg):\n\t\t\ts1.sendto(\"OK\".encode(), addr)\n\t\t\tf = open(arg,\"rb\")\n\t\t\tdat = f.read()\n\t\t\tadds = s2.recvfrom(4096)[1]\n\t\t\t# print(adds)\n\t\t\ts2.sendto(dat, adds)\n\t\telse :\n\t\t\ts1.sendto(\"nf\".encode(), addr)","sub_path":"msv.py","file_name":"msv.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"369898842","text":"import os\nimport errno\n\ndef mkdir_p(directory):\n '''\n Creates a directory and all it's parents if necessary, ignoring\n \"already exists\" errors\n\n Similar to `mkdir -p`\n '''\n try:\n os.makedirs(directory)\n except OSError as error:\n if error.errno != errno.EEXIST:\n raise error\n","sub_path":"filebrowser_cloudfiles/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"418903110","text":"import argparse\nimport os\nos.chdir('../')\nimport sys\npath = os.getcwd()\nprint(path)\nsys.path.append(path)\nfrom util.read_data import ReadInitialData\nimport os\nfrom util.symmetry_sample import Symmetry\nfrom util.trainloader import TrainLoader\nimport torch\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom model.MoE_model import ConcatModel\nimport gensim\n\n\nclass Train:\n def __init__(self, dimension, lr, decay, patience):\n self.dimension = dimension\n self.lr = lr\n self.decay = decay\n self.patience = patience\n\n def getLoss(self, model, wordsvector, label):\n \"\"\"\n :param wordsvector: torch.tensor\n :param label: torch.tensor\n :return:\n \"\"\"\n word1_vector, word2_vector, tag = wordsvector.split(self.dimension, 1)\n if torch.cuda.is_available():\n word1_vector, word2_vector, label, tag = word1_vector.cuda().float(), word2_vector.cuda().float(), label.cuda().float(), tag.cuda().float()\n pred_vector = model(word1_vector, word2_vector, tag)\n else:\n pred_vector = model(word1_vector.float(), word2_vector.float(), tag.float())\n\n return model.loss(pred_vector, label).cpu()\n\n def train(self, model, train_loader, train_wordsvector, train_label, validation_wordsvector,\n validation_label, pos):\n gpus = [0]\n cuda_gpu = torch.cuda.is_available()\n if cuda_gpu:\n model = torch.nn.DataParallel(model, device_ids=gpus).cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=self.lr)\n train_loss = []\n validation_loss = []\n epoch_train_F1 = []\n epoch_validation_F1 = []\n\n max_validation_f1_score = 0\n last_model = model\n the_last_validation_f1score = 0\n trigger_times = 0\n\n Iterations = 100\n for epoch in range(Iterations):\n for step, (batch_x, batch_label) in enumerate(train_loader):\n batch_word1, batch_word2, batch_tag = batch_x.split(self.dimension, 1)\n if cuda_gpu:\n batch_word1, batch_word2, batch_label, batch_tag = batch_word1.cuda(), batch_word2.cuda(), batch_label.cuda().float(), batch_tag.cuda()\n pred_vector = model(batch_word1.float(), batch_word2.float(), batch_tag.float())\n else:\n pred_vector = model(batch_word1.float(), batch_word2.float(), batch_tag.float())\n\n if isinstance(model, torch.nn.DataParallel):\n model = model.module\n\n loss = model.loss(pred_vector, batch_label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if cuda_gpu:\n torch.cuda.empty_cache()\n if epoch % 15 == 0 and epoch != 0:\n lr = self.lr * (self.decay ** (epoch // 15))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n train_quota = self.test(model, train_wordsvector, train_label)\n epoch_train_F1.append(train_quota[-1])\n epoch_train_loss = self.getLoss(model, train_wordsvector, train_label).item()\n print('epoch {}/{} {} train loss is {}, F1-score is {}'.format(epoch + 1, Iterations, pos, epoch_train_loss, train_quota[-1]))\n train_loss.append(epoch_train_loss)\n\n validation_quota = self.test(model, validation_wordsvector, validation_label)\n epoch_validation_F1.append(validation_quota[-1])\n epoch_validation_loss = self.getLoss(model, validation_wordsvector, validation_label).item()\n print('epoch {}/{} {} validation loss is {}, F1-score is {}'.format(epoch + 1, Iterations, pos,\n epoch_validation_loss,\n validation_quota[-1]))\n validation_loss.append(epoch_validation_loss)\n\n if validation_quota[-1] > max_validation_f1_score:\n max_validation_f1_score = validation_quota[-1]\n import copy\n last_model = copy.deepcopy(model)\n\n if validation_quota[-1] <= the_last_validation_f1score:\n trigger_times += 1\n print('trigger times: {}'.format(trigger_times))\n\n if trigger_times >= self.patience:\n print('model early stopping.')\n return last_model\n else:\n print('trigger times: 0')\n trigger_times = 0\n the_last_validation_f1score = validation_quota[-1]\n\n return last_model\n\n def test(self, model, wordsvector, label):\n word1_vector, word2_vector, tag = wordsvector.split(self.dimension, 1)\n if torch.cuda.is_available():\n word1_vector, word2_vector, tag = word1_vector.cuda().float(), word2_vector.cuda().float(), tag.cuda().float()\n pred_vector = model(word1_vector, word2_vector, tag)\n\n pred_vector = torch.sigmoid(pred_vector)\n predict_label = torch.where(pred_vector > 0.5, torch.ones_like(pred_vector), torch.zeros_like(pred_vector))\n predict_label = predict_label.cpu()\n else:\n pred_vector = model(word1_vector.float(), word2_vector.float(), tag.float())\n pred_vector = torch.sigmoid(pred_vector)\n predict_label = torch.where(pred_vector > 0.5, torch.ones_like(pred_vector), torch.zeros_like(pred_vector))\n\n accuracy = accuracy_score(label, predict_label)\n precision = precision_score(label, predict_label)\n recall = recall_score(label, predict_label)\n f1score = f1_score(label, predict_label)\n\n return [accuracy, precision, recall, f1score]\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--pos', type=str, default='noun')\n parser.add_argument('--embed', type=str, default='fasttext')\n parser.add_argument('--expert_size', type=int, default=256)\n parser.add_argument('--embed_size', type=int, default=300)\n parser.add_argument('--projection_size', type=int, default=4)\n parser.add_argument('--layer_hidden_size', type=int, default=20)\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument('--lr', type=float, default=0.01)\n parser.add_argument('--decay', type=float, default=0.5)\n parser.add_argument('--patience', type=int, default=5)\n parser.add_argument('--drop_out', type=float, default=0)\n args = parser.parse_args()\n\n path = './dataset_tag/'\n\n if args.pos == 'adj':\n train_file = path + 'tag-adjective-pairs.train'\n validation_file = path + 'tag-adjective-pairs.val'\n test_file = path + 'tag-adjective-pairs.test'\n elif args.pos == 'verb':\n train_file = path + 'tag-verb-pairs.train'\n validation_file = path + 'tag-verb-pairs.val'\n test_file = path + 'tag-verb-pairs.test'\n else:\n train_file = path + 'tag-noun-pairs.train'\n validation_file = path + 'tag-noun-pairs.val'\n test_file = path + 'tag-noun-pairs.test'\n\n print(train_file)\n print(os.getcwd())\n if args.embed == 'fasttext':\n data, vocab = ReadInitialData.load_vectors('./embedding/wiki-news-300d-1M-simple.vec')\n elif args.embed == 'word2vec':\n data, vocab = ReadInitialData.load_vectors('./embedding/GoogleNews-vectors-negative300-simple.vec')\n elif args.embed == 'glove':\n data, vocab = ReadInitialData.load_vectors('./embedding/glove.42B.300d.simple.txt')\n elif args.embed == 'dLCE':\n # tips: the embed size of dLCE is 100\n modelfile = './embedding/wiki_en_dLCE_100d_minFreq_100_simple.bin'\n data, vocab = ReadInitialData.load_vectors(modelfile)\n else:\n print('embed type ERROR')\n raise ValueError\n\n train_wordpairs = ReadInitialData.readtagfile(train_file)\n validation_wordpairs = ReadInitialData.readtagfile(validation_file)\n test_wordpairs = ReadInitialData.readtagfile(test_file)\n\n train_sym_wordpairs = Symmetry.getsymmetrysamples(train_wordpairs, label=1, target=1)\n train_sym_wordpairs = Symmetry.getsymmetrysamples(train_sym_wordpairs, label=0, target=0)\n\n trainLoader = TrainLoader(dimension=args.embed_size, batch_size=args.batch_size, tag=True)\n\n train_loader = trainLoader.getTrainLoader(train_sym_wordpairs, data)\n\n train_wordsvector, train_label = trainLoader.getWordsvectorAndLabel(train_wordpairs, data)\n validation_wordsvector, validation_label = trainLoader.getWordsvectorAndLabel(validation_wordpairs, data)\n test_wordsvector, test_label = trainLoader.getWordsvectorAndLabel(test_wordpairs, data)\n\n Times = 10\n\n result = pd.DataFrame(columns=['accuracy', 'precision', 'recall', 'F1-score'])\n\n train_model = Train(dimension=args.embed_size, lr=args.lr, decay=args.decay, patience=args.patience)\n\n for time in range(Times):\n model = ConcatModel(expert_size=args.expert_size, embedding_size=args.embed_size, projection_size=args.projection_size, layer_hidden_size=args.layer_hidden_size, drop_out=args.drop_out)\n model = train_model.train(model, train_loader, train_wordsvector, train_label, validation_wordsvector, validation_label, pos=args.pos)\n result.loc[time] = train_model.test(model, test_wordsvector, test_label)\n print(result.loc[time])\n result.loc[Times] = result.mean()\n print(result)\n result_file = './reuslt_' + args.pos + '.csv'\n result.to_csv(result_file)\n\n\nif __name__ == '__main__':\n main()","sub_path":"model/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":9729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"374239382","text":"\"\"\"Contains methods to assign classical and diameter-modified Strahler orders to\nvessels. Can group segments to elements.\nDifficulties that arise:\n * selfloops attached to endpoints --> endpoints not identified as such.\n * selfloops consisting of multiple points are not easily identified as such, \n if they contain vertices of order > 2.\nThese difficulties are solved on the basis of a flow computation. True \nendpoints are set to high pressure, the root is set to low pressure. Edges with\n(almost) no-flow are assigned order zero.\n\"\"\"\n\n\nfrom __future__ import division, print_function, with_statement\nimport numpy as np\nimport pylab\nimport vgm\n\n\n\n\ndef upstream_edges(G, edge):\n \"\"\"Returns the indices of those edges that connect the common vertex with\n vectices of higher pressure.\n INPUT: G: VascularGraph.\n edge: Index of the starting edge.\n OUTPUT: Edge-indices of the upstream edges.\n \"\"\"\n upstreamEdges = []\n referencePressure = G.es[edge]['pressure']\n vTuple = G.es[edge].tuple\n vertex = sorted(zip(G.vs[vTuple]['pressure'], vTuple))[1][1]\n for e in G.adjacent(vertex):\n if e != edge:\n if G.es[e]['pressure'] > referencePressure:\n upstreamEdges.append(e)\n return upstreamEdges \n\n\n\ndef downstream_edges(G, edge):\n \"\"\"Returns the indices of those edges that connect the common vertex with\n vectices of lower pressure.\n INPUT: G: VascularGraph.\n edge: Index of the starting edge.\n OUTPUT: Edge-indices of the downstream edges.\n \"\"\"\n downstreamEdges = []\n referencePressure = G.es[edge]['pressure']\n vTuple = G.es[edge].tuple\n vertex = sorted(zip(G.vs[vTuple]['pressure'], vTuple))[0][1]\n for e in G.adjacent(vertex): \n if e != edge:\n if G.es[e]['pressure'] <= referencePressure:\n downstreamEdges.append(e)\n return downstreamEdges \n\n\ndef classical_strahler_order(G):\n \"\"\"Assigns order numbers to the edges of a tree-structured VascularGraph.\n The order is determined using the Strahler method, as described in the work\n of Strahler 'Quantitative analysis of watershed geomorphology' (Trans Am \n Geophys Union, 1957).\n INPUT: G: VascularGraph of tree-structure.\n OUTPUT: None, G is modified in-place.\n \"\"\"\n # Remove order property, if it already exists:\n if 'order' in G.es.attribute_names():\n print('WARNING: removing pre-existing order!')\n G.es['order'] = [None for e in G.es]\n\n # Find root and upstream endpoints on the basis of topology (the endpoint \n # with the biggest diameter edge is the root vertex, all other endpoints are\n # upstream endpoints):\n endpoints = G.get_endpoints()\n if 'attachmentVertex' in G.attributes():\n rootVertex = G['attachmentVertex']\n else:\n maxDiameter = 0.0\n for ep in endpoints:\n if G.es(G.adjacent(ep))['diameter'][0] > maxDiameter:\n maxDiameter = G.es(G.adjacent(ep))['diameter'][0]\n rootVertex = ep\n print(rootVertex)\n if rootVertex in endpoints: \n endpoints.remove(rootVertex) \n\n # Solve for pressure to create an upstream / downstream ordering:\n G.vs[endpoints]['pBC'] = [2.0 for e in endpoints]\n G.vs[rootVertex]['pBC'] = 1.0\n if 'conductance' not in G.es.attribute_names():\n G.es['conductance'] = [1.0 for e in G.es]\n LS = vgm.LinearSystem(G)\n LS.solve_direct() \n\n # Order edges by pressure, establishing a direction of flow: \n G.es['pressure'] = [(G.vs[e.source]['pressure'] + \n G.vs[e.target]['pressure']) / 2.0 for e in G.es]\n peList = sorted(zip(G.es['pressure'], xrange(G.ecount())), reverse=True)\n \n # Move downstream, assigning orders as we go along:\n for pressure, edge in peList:\n # If no upstream edges exist, the edge is either an inflow edge or \n # its flow value must be close to zero (e.g. in the case of an endpoint\n # or a loop). Assign order zero.\n upstreamEdges = upstream_edges(G, edge)\n if np.allclose(G.es[edge]['flow'], 0.0) or len(upstreamEdges) == 0:\n G.es[edge]['order'] = 0\n # If there is only one upstream edge, its order is retained.\n # If the upstream orders differ, the maximum order is chosen.\n # If the upstream orders are equal, the new order is the upstream\n # order + 1:\n else:\n orders = G.es[upstreamEdges]['order']\n if min(orders) == max(orders) and len(orders) > 1:\n G.es[edge]['order'] = orders[0] + 1\n else:\n G.es[edge]['order'] = max(orders)\n\n\n\ndef modified_strahler_order(G, fraction=1.0, limit=True, \n maxIterations=10, fstep=0.01):\n \"\"\"Assigns order numbers to the edges of a tree-structured VascularGraph.\n The order is determined using the diameter-modified Strahler method, as\n described in the work of Kassab et al. 'Morphometry of pig coronary arterial\n trees' (Am J Physiol, 1993).\n This function assigns orders based primarily on the tree topology, e.g.\n ending vessels are assigned order zero, even if their diameter is well\n outside the mean+std of the zero-order bin. Also, an order decrease in\n downstream direction is not allowed, even if the diameter of the vessel\n would suggest this.\n INPUT: G: VascularGraph of tree-structure.\n fraction: The fraction of of edge-orders that need to remain constant\n from one iteration to the next in order to stop.\n limit: Should the order increase going downstream be limited to one?\n (Boolean.)\n maxIterations: The maximum number of iterations before the order\n assignment is aborted and the fraction of required \n unchanged orders is lowered.\n fstep: The amount by which fraction is to be lowered each time the\n iteration count reaches maxIterations. \n OUTPUT: Returns a bash-like exit code, i.e. 0 for a successful run, and 1 \n if the provided fraction had to be reduced to succeed. \n The edges of G are given a new property 'order'.\n \"\"\"\n\n # Begin by assigning the classical Strahler order:\n classical_strahler_order(G)\n\n \n # Iteratively assign modified Strahler orders until convergence:\n diameters = np.array(G.es['diameter']) \n edgePressures = [(G.vs[e.source]['pressure'] + \n G.vs[e.target]['pressure']) / 2.0 for e in G.es]\n peList = sorted(zip(edgePressures, xrange(G.ecount())), reverse=True) \n\n ec = 0\n counter = 0\n while True:\n if counter == maxIterations:\n fraction -= fstep\n ec = 1\n print(fraction)\n counter = 0\n else:\n counter += 1\n oldOrder = G.es['order']\n maxOrder = max(oldOrder)\n mean = []\n std = []\n for order in range(maxOrder+1):\n mean.append(np.mean(G.es(order_eq=order)['diameter']))\n std.append(np.std(G.es(order_eq=order)['diameter']))\n bounds = {}\n for order in range(maxOrder):\n bounds[order] = (mean[order] + std[order] + \n mean[order+1] - std[order+1]) / 2.0\n bounds[maxOrder] = 1e100 \n\n \n # Move downstream, assigning orders as we go along:\n for pressure, edge in peList:\n # If no upstream edges exist, the edge is either an inflow edge or \n # its flow value must be close to zero (e.g. in the case of an endpoint\n # or a loop). Assign order zero.\n upstreamEdges = upstream_edges(G, edge)\n if np.allclose(G.es[edge]['flow'], 0.0) or len(upstreamEdges) == 0:\n G.es[edge]['order'] = 0\n # If the diameter surpasses the bound of the current maximum\n # upstream order, the new order is increased accordingly.\n # Else, the new order is the maximum upstream order:\n else:\n order = max(G.es[upstreamEdges]['order'])\n while diameters[edge] > bounds[order]:\n order = order + 1\n if limit:\n break\n G.es[edge]['order'] = order \n \n # End iteration if a given fraction of orders remains constant:\n if sum([1 for z in zip(G.es['order'], oldOrder) if z[0] == z[1]]) >= \\\n fraction * len(oldOrder):\n return ec\n\n\n\ndef modified_strahler_order_v2(G, startingOrder=1, enforceMonotony=True):\n \"\"\"Assigns order numbers to the edges of a tree-structured VascularGraph.\n The order is determined using the diameter-modified Strahler method, as\n described in the work of Kassab et al. 'Morphometry of pig coronary arterial\n trees' (Am J Physiol, 1993).\n This function reassigns orders based primarily on the mean and std of the \n order bins. The enforce-monotony correction, which takes the topological \n ordering into account is added as a final reassingment step. Without this \n correction, the diameter-ranges of the different orders (> startingOrder) \n are non-overlapping.\n INPUT: G: VascularGraph of tree-structure.\n startingOrder: The lowest order that should be included in the\n sorting process.\n enforceMonotony: Whether or not to enforce that orders will never\n decrease going downstream.\n OUTPUT: None, the edges of G are given a new property 'order'\n \"\"\"\n\n # Begin by assigning the classical Strahler order:\n classical_strahler_order(G)\n\n # Iteratively assign modified Strahler orders until convergence:\n diameters = np.array(G.es['diameter'])\n preserve = G.es(order_lt=startingOrder).indices\n\n while True:\n oldOrder = G.es['order']\n maxOrder = max(oldOrder)\n\n mean = []\n std = []\n for order in range(maxOrder+1):\n mean.append(np.mean(G.es(order_eq=order)['diameter']))\n std.append(np.std(G.es(order_eq=order)['diameter']))\n\n bounds = {}\n for order in range(1, maxOrder):\n bounds[order] = ([(mean[order-1] + std[order-1] + \n mean[order] - std[order]) / 2.0, \n (mean[order] + std[order] + \n mean[order+1] - std[order+1]) / 2.0])\n bounds[0] = [0.0, bounds[1][0]]\n bounds[maxOrder] = [bounds[maxOrder-1][1], 1e100]\n \n for order in range(startingOrder, maxOrder+1):\n eIndices = np.nonzero((diameters >= bounds[order][0]) * \n (diameters < bounds[order][1]))[0].tolist()\n eIndices = [e for e in eIndices if e not in preserve]\n G.es[eIndices]['order'] = [order for e in eIndices]\n\n if G.es['order'] == oldOrder:\n break\n \n # Ensure that orders do not decrease in downstream direction:\n if enforceMonotony:\n while True:\n oldOrder = G.es['order']\n for edge in G.es:\n edge['order'] = max(pylab.flatten([edge['order'],\n G.es[upstream_edges(G, edge.index)]['order']]))\n if G.es['order'] == oldOrder:\n break\n\n\n\n\nclass Permutation: \n \"\"\"Implements all possible permutations of a given array.\n \"\"\"\n def __init__(self, alist): \n \"\"\"Initializes the Permutation generator.\n INPUT: alist: List or array to be permuted.\n OUTPUT: None\n \"\"\"\n self._data = alist[:] \n self._current = [] \n def __iter__(self): \n return self.next() \n def next(self): \n for elem in self._data: \n if elem not in self._current: \n self._current.append(elem) \n if len(self._current) == len(self._data): \n yield self._current[:] \n else: \n for v in self.next(): \n yield v \n self._current.pop() \n\n\n\n\ndef pair_edges(G, vertex):\n \"\"\"Pairs edges incident to a VascularGraph vertex depending on Strahler \n order, flow direction and diameter. Attempts to pair each upstream edge\n with a downstream edge of the same order and closely matching diameter.\n INPUT: G: VascularGraph with assigned Strahler order and pressure defined\n at the vertices.\n vertex: The index of the vertex at which edges are to be paired.\n OUTPUT: List of edge pairs. Unpaired edges are not returned. \n \"\"\"\n # Create empty pair list, which will be returned as such, if no pairings \n # are made:\n pairs = []\n\n # Determine upstream and downstream edges incident to vertex:\n us = []\n ds = []\n pressure = G.vs[vertex]['pressure']\n \n # Abort unphysiological cases:\n if len(G.neighbors(vertex)) > 5:\n return pairs\n \n for nb, aj in zip(G.neighbors(vertex), G.adjacent(vertex)):\n if G.vs[nb]['pressure'] > pressure:\n us.append(aj)\n else:\n ds.append(aj)\n\n # Loop over all orders present at the junction. Connections are only \n # possible between edges of the same order:\n orders = G.es[us]['order']\n orders.extend(G.es[ds]['order'])\n orders = np.unique(orders).tolist()\n for order in orders:\n uso = [e for e in us if G.es[e]['order'] == order]\n dso = [e for e in ds if G.es[e]['order'] == order]\n\n # Continue if either up- or downstream edges of the current order do \n # not exist:\n if min(len(uso), len(dso)) == 0:\n continue\n\n counter = -1\n while len(uso) < len(dso):\n uso.append(counter)\n counter = counter - 1\n while len(dso) < len(uso):\n dso.append(counter)\n counter = counter - 1\n \n # Make a list of all upstream edge permutations:\n usoperms = list(Permutation(uso))\n \n # Determine the minimal sum of diameter differences to choose the best \n # pairing of upstream and downstream edges:\n mindiff = 1e100\n for i, usoperm in enumerate(usoperms):\n tmpdiff = 0\n for u, d in zip(usoperm, dso):\n if min([u, d]) < 0:\n continue\n tmpdiff = tmpdiff + abs(G.es[u]['diameter'] - G.es[d]['diameter'])\n if tmpdiff < mindiff:\n mindiff = tmpdiff\n bestusoperm = usoperm[:]\n \n for z in zip(bestusoperm, dso): \n if min(z) < 0:\n continue\n else:\n pairs.append(z)\n \n return pairs \n\n\n\ndef assign_elements(G):\n \"\"\"Traverses the vertices of a VascularGraph whose edges have been given\n Strahler orders. Edges incident to each vertex are paired according to \n order and diameter values, as well as flow direction. Consecutive edges \n (segments) of the same order with suitable (closely matching) diameters \n form a common element.\n The result is a new VascularGraph that reflects this ordering scheme and \n can thus be used for statistical analysis.\n INPUT: G: VascularGraph with Strahler-ordered edges.\n OUTPUT: Gel: VascularGraph of connected elements.\n \"\"\"\n pairs = []\n for v in xrange(G.vcount()):\n pairs.extend(pair_edges(G, v))\n \n Ge = vgm.VascularGraph(G.ecount())\n Ge.add_edges(pairs)\n \n co = Ge.components()\n\n Gel = vgm.VascularGraph(len(co))\n Gel.vs['edges'] = [c for c in co]\n Gel.vs['order'] = [G.es[c[0]]['order'] for c in co]\n Gel.vs['vertices'] = [G.get_edge_vertices(e) for e in Gel.vs['edges']]\n\n edges = []\n connectingVertices = []\n for v1 in Gel.vs:\n for v2 in Gel.vs(xrange(v1.index + 1, Gel.vcount())):\n for vertex in v1['vertices']:\n if vertex in v2['vertices']:\n edges.append((v1.index, v2.index))\n connectingVertices.append(vertex)\n break\n Gel.add_edges(edges)\n Gel.es['cVertex'] = connectingVertices\n\n return Gel\n\n\n\n","sub_path":"code/strahler.py","file_name":"strahler.py","file_ext":"py","file_size_in_byte":16248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"59133741","text":"# Copyright 2017-2023 Lawrence Livermore National Security, LLC and other\n# Hatchet Project Developers. See the top-level LICENSE file for details.\n#\n# SPDX-License-Identifier: MIT\n\nimport copy\nimport sys\nimport traceback\n\nfrom collections import defaultdict\n\nimport pandas as pd\nimport numpy as np\nimport multiprocess as mp\nimport json\n\nfrom .node import Node\nfrom .graph import Graph\nfrom .frame import Frame\nfrom .query import (\n is_hatchet_query,\n ObjectQuery,\n parse_string_dialect,\n QueryEngine,\n AbstractQuery,\n)\nfrom .external.console import ConsoleRenderer\nfrom .util.dot import trees_to_dot\nfrom .util.deprecated import deprecated_params\n\ntry:\n from .cython_modules.libs import graphframe_modules as _gfm_cy\nexcept ImportError:\n print(\"-\" * 80)\n print(\n \"\"\"Error: Shared object (.so) not found for cython module.\\n\\tPlease run install.sh from the hatchet root directory to build modules.\"\"\"\n )\n print(\"-\" * 80)\n traceback.print_exc()\n raise\n\n\ndef parallel_apply(filter_function, dataframe, queue):\n \"\"\"A function called in parallel, which does a pandas apply on part of a\n dataframe and returns the results via multiprocessing queue function.\"\"\"\n filtered_rows = dataframe.apply(filter_function, axis=1)\n filtered_df = dataframe[filtered_rows]\n queue.put(filtered_df)\n\n\nclass GraphFrame:\n \"\"\"An input dataset is read into an object of this type, which includes a graph\n and a dataframe.\n \"\"\"\n\n def __init__(\n self,\n graph,\n dataframe,\n exc_metrics=None,\n inc_metrics=None,\n default_metric=\"time\",\n metadata={},\n ):\n \"\"\"Create a new GraphFrame from a graph and a dataframe.\n\n Likely, you do not want to use this function.\n\n See ``from_hpctoolkit``, ``from_caliper``, ``from_gprof_dot``, and\n other reader methods for easier ways to create a ``GraphFrame``.\n\n Arguments:\n graph (Graph): Graph of nodes in this GraphFrame.\n dataframe (DataFrame): Pandas DataFrame indexed by Nodes\n from the graph, and potentially other indexes.\n exc_metrics: list of names of exclusive metrics in the dataframe.\n inc_metrics: list of names of inclusive metrics in the dataframe.\n \"\"\"\n if graph is None:\n raise ValueError(\"GraphFrame() requires a Graph\")\n if dataframe is None:\n raise ValueError(\"GraphFrame() requires a DataFrame\")\n\n if \"node\" not in list(dataframe.index.names):\n raise ValueError(\n \"DataFrames passed to GraphFrame() must have an index called 'node'.\"\n )\n\n self.graph = graph\n self.dataframe = dataframe\n self.exc_metrics = [] if exc_metrics is None else exc_metrics\n self.inc_metrics = [] if inc_metrics is None else inc_metrics\n self.default_metric = default_metric\n self.metadata = metadata\n self.query_engine = QueryEngine()\n\n @staticmethod\n def from_hpctoolkit(dirname):\n \"\"\"Read an HPCToolkit database directory into a new GraphFrame.\n\n Arguments:\n dirname (str): parent directory of an HPCToolkit\n experiment.xml file\n\n Returns:\n (GraphFrame): new GraphFrame containing HPCToolkit profile data\n \"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.hpctoolkit_reader import HPCToolkitReader\n\n return HPCToolkitReader(dirname).read()\n\n @staticmethod\n def from_caliper(filename_or_stream, query=None):\n \"\"\"Read in a Caliper .cali or .json file.\n\n Args:\n filename_or_stream (str or file-like): name of a Caliper output\n file in `.cali` or JSON-split format, or an open file object\n to read one\n query (str): cali-query in CalQL format\n \"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.caliper_reader import CaliperReader\n\n return CaliperReader(filename_or_stream, query).read()\n\n @staticmethod\n def from_caliperreader(\n filename_or_caliperreader, native=False, string_attributes=[]\n ):\n \"\"\"Read in a native Caliper `cali` file using Caliper's python reader.\n\n Args:\n filename_or_caliperreader (str or CaliperReader): name of a Caliper\n output file in `.cali` format, or a CaliperReader object\n native (bool): use native or user-readable metric names (default)\n string_attributes (str or list, optional): Adds existing string\n attributes from within the caliper file to the dataframe\n \"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.caliper_native_reader import CaliperNativeReader\n\n return CaliperNativeReader(\n filename_or_caliperreader, native, string_attributes\n ).read()\n\n @staticmethod\n def from_spotdb(db_key, list_of_ids=None):\n \"\"\"Read multiple graph frames from a SpotDB instance\n\n Args:\n db_key (str or SpotDB object): locator for SpotDB instance\n This can be a SpotDB object directly, or a locator for a spot\n database, which is a string with either:\n\n * A directory for .cali files,\n * A .sqlite file name\n * A SQL database URL (e.g., \"mysql://hostname/db\")\n\n list_of_ids: The list of run IDs to read from the database.\n If this is None, returns all runs.\n\n Returns:\n A list of graphframes, one for each requested run that was found\n \"\"\"\n\n from .readers.spotdb_reader import SpotDBReader\n\n return SpotDBReader(db_key, list_of_ids).read()\n\n @staticmethod\n def from_gprof_dot(filename):\n \"\"\"Read in a DOT file generated by gprof2dot.\"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.gprof_dot_reader import GprofDotReader\n\n return GprofDotReader(filename).read()\n\n @staticmethod\n def from_cprofile(filename):\n \"\"\"Read in a pstats/prof file generated using python's cProfile.\"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.cprofile_reader import CProfileReader\n\n return CProfileReader(filename).read()\n\n @staticmethod\n def from_pyinstrument(filename):\n \"\"\"Read in a JSON file generated using Pyinstrument.\"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.pyinstrument_reader import PyinstrumentReader\n\n return PyinstrumentReader(filename).read()\n\n @staticmethod\n def from_tau(dirname):\n \"\"\"Read in a profile generated using TAU.\"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.tau_reader import TAUReader\n\n return TAUReader(dirname).read()\n\n @staticmethod\n def from_timemory(input=None, select=None, **_kwargs):\n \"\"\"Read in timemory data.\n\n Links:\n https://github.com/NERSC/timemory\n https://timemory.readthedocs.io\n\n Arguments:\n input (str or file-stream or dict or None):\n Valid argument types are:\n\n 1. Filename for a timemory JSON tree file\n 2. Open file stream to one of these files\n 3. Dictionary from timemory JSON tree\n\n\n Currently, timemory supports two JSON layouts: flat and tree.\n The former is a 1D-array representation of the hierarchy which\n represents the hierarchy via indentation schemes in the labels\n and is not compatible with hatchet. The latter is a hierarchical\n representation of the data and is the required JSON layout when\n using hatchet. Timemory JSON tree files typically have the\n extension \".tree.json\".\n\n If input is None, this assumes that timemory has been recording\n data within the application that is using hatchet. In this\n situation, this method will attempt to import the data directly\n from timemory.\n\n At the time of this writing, the direct data import will:\n\n 1. Stop any currently collecting components\n 2. Aggregate child thread data of the calling thread\n 3. Clear all data on the child threads\n 4. Aggregate the data from any MPI and/or UPC++ ranks.\n\n\n Thus, if MPI or UPC++ is used, every rank must call this routine.\n The zeroth rank will have the aggregation and all the other\n non-zero ranks will only have the rank-specific data.\n\n Whether or not the per-thread and per-rank data itself is\n combined is controlled by the `collapse_threads` and\n `collapse_processes` attributes in the `timemory.settings`\n submodule.\n\n In the C++ API, it is possible for only #1 to be applied and data\n can be obtained for an individual thread and/or rank without\n aggregation. This is not currently available to Python, however,\n it can be made available upon request via a GitHub Issue.\n\n select (list of str):\n A list of strings which match the component enumeration names, e.g. [\"cpu_clock\"].\n\n per_thread (boolean):\n Ensures that when applying filters to the graphframe, frames with\n identical name/file/line/etc. info but from different threads are\n not combined\n\n per_rank (boolean):\n Ensures that when applying filters to the graphframe, frames with\n identical name/file/line/etc. info but from different ranks are\n not combined\n\n \"\"\"\n from .readers.timemory_reader import TimemoryReader\n\n if input is not None:\n try:\n return TimemoryReader(input, select, **_kwargs).read()\n except IOError:\n pass\n else:\n try:\n import timemory\n\n TimemoryReader(timemory.get(hierarchy=True), select, **_kwargs).read()\n except ImportError:\n print(\n \"Error! timemory could not be imported. Provide filename, file stream, or dict.\"\n )\n raise\n\n @staticmethod\n def from_literal(graph_dict):\n \"\"\"Create a GraphFrame from a list of dictionaries.\"\"\"\n # import this lazily to avoid circular dependencies\n from .readers.literal_reader import LiteralReader\n\n return LiteralReader(graph_dict).read()\n\n @staticmethod\n def from_lists(*lists):\n \"\"\"Make a simple GraphFrame from lists.\n\n This creates a Graph from lists (see ``Graph.from_lists()``) and uses\n it as the index for a new GraphFrame. Every node in the new graph has\n exclusive time of 1 and inclusive time is computed automatically.\n\n \"\"\"\n graph = Graph.from_lists(*lists)\n graph.enumerate_traverse()\n\n df = pd.DataFrame({\"node\": list(graph.traverse())})\n df[\"time\"] = [1.0] * len(graph)\n df[\"name\"] = [n.frame[\"name\"] for n in graph.traverse()]\n df.set_index([\"node\"], inplace=True)\n df.sort_index(inplace=True)\n\n gf = GraphFrame(graph, df, [\"time\"], [])\n gf.update_inclusive_columns()\n return gf\n\n @staticmethod\n def from_json(json_spec, **kwargs):\n from .readers.json_reader import JsonReader\n\n return JsonReader(json_spec).read(**kwargs)\n\n @staticmethod\n def from_hdf(filename, **kwargs):\n # import this lazily to avoid circular dependencies\n from .readers.hdf5_reader import HDF5Reader\n\n return HDF5Reader(filename).read(**kwargs)\n\n def to_hdf(self, filename, key=\"hatchet_graphframe\", **kwargs):\n # import this lazily to avoid circular dependencies\n from .writers.hdf5_writer import HDF5Writer\n\n HDF5Writer(filename).write(self, key=key, **kwargs)\n\n def copy(self):\n \"\"\"Return a partially shallow copy of the graphframe.\n\n This copies the DataFrame object, but the data is comprised of references. The Graph is shared between self and the new GraphFrame.\n\n Arguments:\n self (GraphFrame): Object to make a copy of.\n\n Returns:\n other (GraphFrame): Copy of self\n graph (graph): Reference to self's graph\n dataframe (DataFrame): Pandas \"non-deep\" copy of dataframe\n exc_metrics (list): Copy of self's exc_metrics\n inc_metrics (list): Copy of self's inc_metrics\n default_metric (str): N/A\n metadata (dict): Copy of self's metadata\n \"\"\"\n return GraphFrame(\n self.graph,\n self.dataframe.copy(deep=False),\n copy.copy(self.exc_metrics),\n copy.copy(self.inc_metrics),\n self.default_metric,\n copy.copy(self.metadata),\n )\n\n def deepcopy(self):\n \"\"\"Return a deep copy of the graphframe.\n\n Arguments:\n self (GraphFrame): Object to make a copy of.\n\n Returns:\n other (GraphFrame): Copy of self\n graph (graph): Deep copy of self's graph\n dataframe (DataFrame): Pandas \"deep\" copy with node objects updated to match graph from \"node_clone\"\n exc_metrics (list): Copy of self's exc_metrics\n inc_metrics (list): Copy of self's inc_metrics\n default_metric (str): N/A\n metadata (dict): Copy of self's metadata\n \"\"\"\n node_clone = {}\n graph_copy = self.graph.copy(node_clone)\n dataframe_copy = self.dataframe.copy()\n\n index_names = dataframe_copy.index.names\n dataframe_copy.reset_index(inplace=True)\n\n dataframe_copy[\"node\"] = dataframe_copy[\"node\"].apply(lambda x: node_clone[x])\n\n dataframe_copy.set_index(index_names, inplace=True)\n\n return GraphFrame(\n graph_copy,\n dataframe_copy,\n copy.deepcopy(self.exc_metrics),\n copy.deepcopy(self.inc_metrics),\n self.default_metric,\n copy.deepcopy(self.metadata),\n )\n\n def drop_index_levels(self, function=np.mean):\n \"\"\"Drop all index levels but `node`.\"\"\"\n index_names = list(self.dataframe.index.names)\n index_names.remove(\"node\")\n\n # create dict that stores aggregation function for each column\n agg_dict = {}\n for col in self.dataframe.columns.tolist():\n if col in self.exc_metrics + self.inc_metrics:\n agg_dict[col] = function\n else:\n agg_dict[col] = lambda x: x.iloc[0]\n\n # perform a groupby to merge nodes that just differ in index columns\n self.dataframe.reset_index(level=\"node\", inplace=True)\n agg_df = self.dataframe.groupby(\"node\").agg(agg_dict)\n\n self.dataframe = agg_df\n\n def filter(\n self,\n filter_obj,\n squash=True,\n update_inc_cols=True,\n num_procs=mp.cpu_count(),\n rec_limit=1000,\n multi_index_mode=\"off\",\n ):\n \"\"\"Filter the dataframe using a user-supplied function.\n\n Note: Operates in parallel on user-supplied lambda functions.\n\n Arguments:\n filter_obj (callable, list, or QueryMatcher): the filter to apply to the GraphFrame.\n squash (boolean, optional): if True, automatically call squash for the user.\n update_inc_cols (boolean, optional): if True, update inclusive columns when performing squash.\n rec_limit: set Python recursion limit, increase if running into\n recursion depth errors) (default: 1000).\n \"\"\"\n sys.setrecursionlimit(rec_limit)\n\n dataframe_copy = self.dataframe.copy()\n\n index_names = self.dataframe.index.names\n dataframe_copy.reset_index(inplace=True)\n\n filtered_df = None\n\n if callable(filter_obj):\n # applying pandas filter using the callable function\n if num_procs > 1:\n # perform filter in parallel (default)\n queue = mp.Queue()\n processes = []\n returned_frames = []\n subframes = np.array_split(dataframe_copy, num_procs)\n\n # Manually create a number of processes equal to the number of\n # logical cpus available\n for pid in range(num_procs):\n process = mp.Process(\n target=parallel_apply,\n args=(filter_obj, subframes[pid], queue),\n )\n process.start()\n processes.append(process)\n\n # Stores filtered subframes in a list: 'returned_frames', for\n # pandas concatenation. This intermediary list is used because\n # pandas concat is faster when called only once on a list of\n # dataframes, than when called multiple times appending onto a\n # frame of increasing size.\n for pid in range(num_procs):\n returned_frames.append(queue.get())\n\n for proc in processes:\n proc.join()\n\n filtered_df = pd.concat(returned_frames)\n\n else:\n # perform filter sequentiually if num_procs = 1\n filtered_rows = dataframe_copy.apply(filter_obj, axis=1)\n filtered_df = dataframe_copy[filtered_rows]\n\n elif isinstance(filter_obj, (list, str)) or is_hatchet_query(filter_obj):\n # use a callpath query to apply the filter\n query = filter_obj\n # If a raw Object-dialect query is provided (not already passed to ObjectQuery),\n # create a new ObjectQuery object.\n if isinstance(filter_obj, list):\n query = ObjectQuery(filter_obj, multi_index_mode)\n # If a raw String-dialect query is provided (not already passed to StringQuery),\n # create a new StringQuery object.\n elif isinstance(filter_obj, str):\n query = parse_string_dialect(filter_obj, multi_index_mode)\n # If an old-style query is provided, extract the underlying new-style query.\n elif issubclass(type(filter_obj), AbstractQuery):\n query = filter_obj._get_new_query()\n query_matches = self.query_engine.apply(query, self.graph, self.dataframe)\n # match_set = list(set().union(*query_matches))\n # filtered_df = dataframe_copy.loc[dataframe_copy[\"node\"].isin(match_set)]\n filtered_df = dataframe_copy.loc[dataframe_copy[\"node\"].isin(query_matches)]\n else:\n raise InvalidFilter(\n \"The argument passed to filter must be a callable, a query path list, or a QueryMatcher object.\"\n )\n\n if filtered_df.shape[0] == 0:\n raise EmptyFilter(\n \"The provided filter would have produced an empty GraphFrame.\"\n )\n\n filtered_df.set_index(index_names, inplace=True)\n\n filtered_gf = GraphFrame(self.graph, filtered_df)\n filtered_gf.exc_metrics = self.exc_metrics\n filtered_gf.inc_metrics = self.inc_metrics\n filtered_gf.default_metric = self.default_metric\n filtered_gf.metadata = self.metadata\n\n if squash:\n return filtered_gf.squash(update_inc_cols)\n return filtered_gf\n\n def squash(self, update_inc_cols=True):\n \"\"\"Rewrite the Graph to include only nodes present in the DataFrame's rows.\n\n This can be used to simplify the Graph, or to normalize Graph\n indexes between two GraphFrames.\n\n Arguments:\n update_inc_cols (boolean, optional): if True, update inclusive columns.\n \"\"\"\n index_names = self.dataframe.index.names\n self.dataframe.reset_index(inplace=True)\n\n # create new nodes for each unique node in the old dataframe\n old_to_new = {n: n.copy() for n in set(self.dataframe[\"node\"])}\n for i in old_to_new:\n old_to_new[i]._hatchet_nid = i._hatchet_nid\n\n # Maintain sets of connections to make for each old node.\n # Start with old -> new mapping and update as we traverse subgraphs.\n connections = defaultdict(lambda: set())\n connections.update({k: {v} for k, v in old_to_new.items()})\n\n new_roots = [] # list of new roots\n\n # connect new nodes to children according to transitive\n # relationships in the old graph.\n def rewire(node, new_parent, visited):\n # make all transitive connections for the node we're visiting\n for n in connections[node]:\n if new_parent:\n # there is a parent in the new graph; connect it\n if n not in new_parent.children:\n new_parent.add_child(n)\n n.add_parent(new_parent)\n\n elif n not in new_roots:\n # this is a new root\n new_roots.append(n)\n\n new_node = old_to_new.get(node)\n transitive = set()\n if node not in visited:\n visited.add(node)\n for child in node.children:\n transitive |= rewire(child, new_node or new_parent, visited)\n\n if new_node:\n # since new_node exists in the squashed graph, we only\n # need to connect new_node\n return {new_node}\n else:\n # connect parents to the first transitively reachable\n # new_nodes of nodes we're removing with this squash\n connections[node] |= transitive\n return connections[node]\n\n # run rewire for each root and make a new graph\n visited = set()\n for root in self.graph.roots:\n rewire(root, None, visited)\n graph = Graph(new_roots)\n graph.enumerate_traverse()\n\n # reindex new dataframe with new nodes\n df = self.dataframe.copy()\n df[\"node\"] = df[\"node\"].apply(lambda x: old_to_new[x])\n\n # at this point, the graph is potentially invalid, as some nodes\n # may have children with identical frames.\n merges = graph.normalize()\n df[\"node\"] = df[\"node\"].apply(lambda n: merges.get(n, n))\n\n self.dataframe.set_index(index_names, inplace=True)\n df.set_index(index_names, inplace=True)\n # create dict that stores aggregation function for each column\n agg_dict = {}\n for col in df.columns.tolist():\n if col in self.exc_metrics + self.inc_metrics:\n # use min_count=1 (default is 0) here, so sum of an all-NA\n # series is NaN, not 0\n # when min_count=1, sum([NaN, NaN)] = NaN\n # when min_count=0, sum([NaN, NaN)] = 0\n agg_dict[col] = lambda x: x.sum(min_count=1)\n else:\n agg_dict[col] = lambda x: x.iloc[0]\n\n # perform a groupby to merge nodes with the same callpath\n agg_df = df.groupby(index_names).agg(agg_dict)\n agg_df.sort_index(inplace=True)\n\n # put it all together\n new_gf = GraphFrame(\n graph,\n agg_df,\n self.exc_metrics,\n self.inc_metrics,\n self.default_metric,\n self.metadata,\n )\n if update_inc_cols:\n new_gf.update_inclusive_columns()\n return new_gf\n\n def _init_sum_columns(self, columns, out_columns):\n \"\"\"Helper function for subtree_sum and subgraph_sum.\"\"\"\n if out_columns is None:\n out_columns = columns\n else:\n # init out columns with input columns in case they are not there.\n for col, out in zip(columns, out_columns):\n self.dataframe[out] = self.dataframe[col]\n\n if len(columns) != len(out_columns):\n raise ValueError(\"columns out_columns must be the same length!\")\n\n return out_columns\n\n def subtree_sum(\n self, columns, out_columns=None, function=lambda x: x.sum(min_count=1)\n ):\n \"\"\"Compute sum of elements in subtrees. Valid only for trees.\n\n For each row in the graph, ``out_columns`` will contain the\n element-wise sum of all values in ``columns`` for that row's node\n and all of its descendants.\n\n This algorithm will multiply count nodes with in-degree higher\n than one -- i.e., it is only correct for trees. Prefer using\n ``subgraph_sum`` (which calls ``subtree_sum`` if it can), unless\n you have a good reason not to.\n\n Arguments:\n columns (list of str): names of columns to sum (default: all columns)\n out_columns (list of str): names of columns to store results\n (default: in place)\n function (callable): associative operator used to sum\n elements, sum of an all-NA series is NaN (default: sum(min_count=1))\n \"\"\"\n out_columns = self._init_sum_columns(columns, out_columns)\n\n # sum over the output columns\n for node in self.graph.traverse(order=\"post\"):\n if node.children:\n # TODO: need a better way of aggregating inclusive metrics when\n # TODO: there is a multi-index\n try:\n is_multi_index = isinstance(\n self.dataframe.index, pd.core.index.MultiIndex\n )\n except AttributeError:\n is_multi_index = isinstance(self.dataframe.index, pd.MultiIndex)\n\n if is_multi_index:\n for rank_thread in self.dataframe.loc[\n (node), out_columns\n ].index.unique():\n # rank_thread is either rank or a tuple of (rank, thread).\n # We check if rank_thread is a tuple and if it is, we\n # create a tuple of (node, rank, thread). If not, we create\n # a tuple of (node, rank).\n if isinstance(rank_thread, tuple):\n df_index1 = (node,) + rank_thread\n df_index2 = ([node] + node.children,) + rank_thread\n else:\n df_index1 = (node, rank_thread)\n df_index2 = ([node] + node.children, rank_thread)\n\n for col in out_columns:\n self.dataframe.loc[df_index1, col] = function(\n self.dataframe.loc[df_index2, col]\n )\n else:\n for col in out_columns:\n self.dataframe.loc[node, col] = function(\n self.dataframe.loc[[node] + node.children, col]\n )\n\n def subgraph_sum(\n self, columns, out_columns=None, function=lambda x: x.sum(min_count=1)\n ):\n \"\"\"Compute sum of elements in subgraphs.\n\n For each row in the graph, ``out_columns`` will contain the\n element-wise sum of all values in ``columns`` for that row's node\n and all of its descendants.\n\n This algorithm is worst-case quadratic in the size of the graph,\n so we try to call ``subtree_sum`` if we can. In general, there\n is not a particularly efficient algorithm known for subgraph\n sums, so this does about as well as we know how.\n\n Arguments:\n columns (list of str): names of columns to sum (default: all columns)\n out_columns (list of str): names of columns to store results\n (default: in place)\n function (callable): associative operator used to sum\n elements, sum of an all-NA series is NaN (default: sum(min_count=1))\n \"\"\"\n if self.graph.is_tree():\n self.subtree_sum(columns, out_columns, function)\n return\n\n out_columns = self._init_sum_columns(columns, out_columns)\n for node in self.graph.traverse():\n subgraph_nodes = list(node.traverse())\n # TODO: need a better way of aggregating inclusive metrics when\n # TODO: there is a multi-index\n try:\n is_multi_index = isinstance(\n self.dataframe.index, pd.core.index.MultiIndex\n )\n except AttributeError:\n is_multi_index = isinstance(self.dataframe.index, pd.MultiIndex)\n\n if is_multi_index:\n for rank_thread in self.dataframe.loc[\n (node), out_columns\n ].index.unique():\n # rank_thread is either rank or a tuple of (rank, thread).\n # We check if rank_thread is a tuple and if it is, we\n # create a tuple of (node, rank, thread). If not, we create\n # a tuple of (node, rank).\n if isinstance(rank_thread, tuple):\n df_index1 = (node,) + rank_thread\n df_index2 = (subgraph_nodes,) + rank_thread\n else:\n df_index1 = (node, rank_thread)\n df_index2 = (subgraph_nodes, rank_thread)\n\n for col in out_columns:\n self.dataframe.loc[df_index1, col] = [\n function(self.dataframe.loc[df_index2, col])\n ]\n else:\n # TODO: if you take the list constructor away from the\n # TODO: assignment below, this assignment gives NaNs. Why?\n self.dataframe.loc[(node), out_columns] = list(\n function(self.dataframe.loc[(subgraph_nodes), columns])\n )\n\n def generate_exclusive_columns(self, inc_metrics=None):\n \"\"\"Generates exclusive metrics from available inclusive metrics.\n Arguments:\n inc_metrics (str, list, optional): Instead of generating the exclusive time for each inclusive metric, it is possible to specify those metrics manually. Defaults to None.\n\n Currently, this function determines which metrics to generate by looking for one of two things:\n\n 1. An inclusive metric ending in \"(inc)\" that does not have an exclusive metric with the same name (minus \"(inc)\")\n 2. An inclusive metric not ending in \"(inc)\"\n\n The metrics that are generated will have one of two name formats:\n\n 1. If the corresponding inclusive metric's name ends in \"(inc)\", the exclusive metric will have the same\n name, minus \"(inc)\"\n 2. If the corresponding inclusive metric's name does not end in \"(inc)\", the exclusive metric will have the same\n name as the inclusive metric, followed by a \"(exc)\" suffix\n \"\"\"\n # TODO Change how exclusive-inclusive pairs are determined when inc_metrics and exc_metrics are changed\n # Iterate over inclusive metrics and collect tuples of (new exclusive metrics name, inclusive metric name)\n generation_pairs = []\n for inc in self.inc_metrics:\n if inc_metrics and inc not in inc_metrics:\n continue\n\n # If the metric isn't numeric, it is really categorical. This means the inclusive/exclusive thing doesn't really apply.\n if not pd.api.types.is_numeric_dtype(self.dataframe[inc]):\n continue\n # Assume that metrics ending in \"(inc)\" are generated\n if inc.endswith(\"(inc)\"):\n possible_exc = inc[: -len(\"(inc)\")].strip()\n # If a metric with the same name as the inclusive metrics minus the \"(inc)\" does not exist in exc_metrics,\n # assume that there is not a corresponding exclusive metric. So, add this new exclusive metric to the generation list.\n if possible_exc not in self.exc_metrics:\n generation_pairs.append((possible_exc, inc))\n # If there is an inclusive metric without the \"(inc)\" suffix,\n # assume that there is no corresponding exclusive metric. So, add this new exclusive metrics (with the \"(exc)\"\n # suffix) to the generation list.\n else:\n generation_pairs.append((inc + \" (exc)\", inc))\n # Consider each new exclusive metric and its corresponding inclusive metric\n for exc, inc in generation_pairs:\n # Process of obtaining inclusive data for a node differs if the DataFrame has an Index vs a MultiIndex\n if isinstance(self.dataframe.index, pd.MultiIndex):\n new_data = {}\n # Traverse every node in the Graph\n for node in self.graph.traverse():\n # Consider each unique portion of the MultiIndex corresponding to the current node\n for non_node_idx in self.dataframe.loc[(node)].index.unique():\n # If there's only 1 index level besides \"node\", add it to a 1-element list to ensure consistent typing\n if not isinstance(non_node_idx, tuple) and not isinstance(\n non_node_idx, list\n ):\n non_node_idx = [non_node_idx]\n # Build the full index\n # TODO: Replace the full_idx assignment with the following when 2.7 support\n # is dropped:\n # full_idx = (node, *non_node_idx)\n full_idx = tuple([node]) + tuple(non_node_idx)\n # Iterate over the children of the current node and add up\n # their values for the inclusive metric\n inc_sum = 0\n for child in node.children:\n # TODO: See note about full_idx above\n child_idx = tuple([child]) + tuple(non_node_idx)\n inc_sum += np.nan_to_num(self.dataframe.loc[child_idx, inc])\n # Subtract the current node's inclusive metric from the previously calculated sum to\n # get the exclusive metric value for the node\n new_data[full_idx] = self.dataframe.loc[full_idx, inc] - inc_sum\n # Add the exclusive metric as a new column in the DataFrame\n self.dataframe = self.dataframe.assign(\n **{exc: pd.Series(data=new_data)}\n )\n else:\n # Create a basic Node-metric dict for the new exclusive metric\n new_data = {n: -1 for n in self.dataframe.index.values}\n # Traverse the graph\n for node in self.graph.traverse():\n # Sum up the inclusive metric values of the current node's children\n inc_sum = 0\n for child in node.children:\n inc_sum += np.nan_to_num(self.dataframe.loc[child, inc])\n # Subtract the current node's inclusive metric from the previously calculated sum to\n # get the exclusive metric value for the node\n new_data[node] = self.dataframe.loc[node, inc] - inc_sum\n # Add the exclusive metric as a new column in the DataFrame\n self.dataframe = self.dataframe.assign(\n **{exc: pd.Series(data=new_data)}\n )\n # Add the newly created metrics to self.exc_metrics\n self.exc_metrics.extend([metric_tuple[0] for metric_tuple in generation_pairs])\n self.exc_metrics = list(set(self.exc_metrics))\n\n def update_inclusive_columns(self):\n \"\"\"Update inclusive columns (typically after operations that rewire the\n graph.\n \"\"\"\n # we should update inc metric only if exc metric exist\n if not self.exc_metrics:\n return\n\n # TODO When Python 2.7 support is dropped, change this line to the more idiomatic:\n # old_inc_metrics = self.inc_metrics.copy()\n old_inc_metrics = list(self.inc_metrics)\n # TODO Change this logic when inc_metrics and exc_metrics are changed\n new_inc_metrics = []\n for exc in self.exc_metrics:\n if isinstance(exc, tuple):\n if exc[-1].endswith(\"(exc)\"):\n temp = list(exc)\n temp[-1] = temp[-1][: -len(\"(exc)\")].strip()\n new_inc_metrics.append(tuple(temp))\n else:\n temp = list(exc)\n temp[-1] = \"%s (inc)\" % temp[-1]\n new_inc_metrics.append(tuple(temp))\n else:\n if exc.endswith(\"(exc)\"):\n new_inc_metrics.append(exc[: -len(\"(exc)\")].strip())\n else:\n new_inc_metrics.append(\"%s (inc)\" % exc)\n self.inc_metrics = new_inc_metrics\n\n self.subgraph_sum(self.exc_metrics, self.inc_metrics)\n self.inc_metrics = list(set(self.inc_metrics + old_inc_metrics))\n\n def show_metric_columns(self):\n \"\"\"Returns a list of dataframe column labels.\"\"\"\n return list(self.exc_metrics + self.inc_metrics)\n\n def unify(self, other):\n \"\"\"Returns a unified graphframe.\n\n Ensure self and other have the same graph and same node IDs. This may\n change the node IDs in the dataframe.\n\n Update the graphs in the graphframe if they differ.\n \"\"\"\n if self.graph is other.graph:\n return\n\n node_map = {}\n union_graph = self.graph.union(other.graph, node_map)\n\n self_index_names = self.dataframe.index.names\n other_index_names = other.dataframe.index.names\n\n self.dataframe.reset_index(inplace=True)\n other.dataframe.reset_index(inplace=True)\n\n self.dataframe[\"node\"] = self.dataframe[\"node\"].apply(lambda x: node_map[id(x)])\n other.dataframe[\"node\"] = other.dataframe[\"node\"].apply(\n lambda x: node_map[id(x)]\n )\n\n # add missing rows to copy of self's dataframe in preparation for\n # operation\n self._insert_missing_rows(other)\n\n self.dataframe.set_index(self_index_names, inplace=True, drop=True)\n other.dataframe.set_index(other_index_names, inplace=True, drop=True)\n\n self.graph = union_graph\n other.graph = union_graph\n\n @deprecated_params(\n metric=\"metric_column\",\n name=\"name_column\",\n expand_names=\"expand_name\",\n context=\"context_column\",\n invert_colors=\"invert_colormap\",\n )\n def tree(\n self,\n metric_column=None,\n annotation_column=None,\n precision=3,\n name_column=\"name\",\n expand_name=False,\n context_column=\"file\",\n rank=0,\n thread=0,\n depth=10000,\n highlight_name=False,\n colormap=\"RdYlGn\",\n invert_colormap=False,\n colormap_annotations=None,\n render_header=True,\n min_value=None,\n max_value=None,\n ):\n \"\"\"Visualize the Hatchet graphframe as a tree\n\n Arguments:\n metric_column (str, list, optional): Columns to use the metrics from. Defaults to None.\n annotation_column (str, optional): Column to use as an annotation. Defaults to None.\n precision (int, optional): Precision of shown numbers. Defaults to 3.\n name_column (str, optional): Column of the node name. Defaults to \"name\".\n expand_name (bool, optional): Limits the lenght of the node name. Defaults to False.\n context_column (str, optional): Shows the file this function was called in (Available with HPCToolkit). Defaults to \"file\".\n rank (int, optional): Specifies the rank to take the data from. Defaults to 0.\n thread (int, optional): Specifies the thread to take the data from. Defaults to 0.\n depth (int, optional): Sets the maximum depth of the tree. Defaults to 10000.\n highlight_name (bool, optional): Highlights the names of the nodes. Defaults to False.\n colormap (str, optional): Specifies a colormap to use. Defaults to \"RdYlGn\".\n invert_colormap (bool, optional): Reverts the chosen colormap. Defaults to False.\n colormap_annotations (str, list, dict, optional): Either provide the name of a colormap, a list of colors to use or a dictionary which maps the used annotations to a color. Defaults to None.\n render_header (bool, optional): Shows the Preamble. Defaults to True.\n min_value (int, optional): Overwrites the min value for the coloring legend. Defaults to None.\n max_value (int, optional): Overwrites the max value for the coloring legend. Defaults to None.\n\n Returns:\n str: String representation of the tree, ready to print\n \"\"\"\n color = sys.stdout.isatty()\n shell = None\n if metric_column is None:\n metric_column = self.default_metric\n\n if color is False:\n try:\n import IPython\n\n shell = IPython.get_ipython().__class__.__name__\n except ImportError:\n pass\n # Test if running in a Jupyter notebook or qtconsole\n if shell == \"ZMQInteractiveShell\":\n color = True\n\n if sys.version_info.major == 2:\n unicode = False\n elif sys.version_info.major == 3:\n unicode = True\n\n return ConsoleRenderer(unicode=unicode, color=color).render(\n self.graph.roots,\n self.dataframe,\n metric_column=metric_column,\n annotation_column=annotation_column,\n precision=precision,\n name_column=name_column,\n expand_name=expand_name,\n context_column=context_column,\n rank=rank,\n thread=thread,\n depth=depth,\n highlight_name=highlight_name,\n colormap=colormap,\n invert_colormap=invert_colormap,\n colormap_annotations=colormap_annotations,\n render_header=render_header,\n min_value=min_value,\n max_value=max_value,\n )\n\n def to_dot(self, metric=None, name=\"name\", rank=0, thread=0, threshold=0.0):\n \"\"\"Write the graph in the graphviz dot format:\n https://www.graphviz.org/doc/info/lang.html\n \"\"\"\n if metric is None:\n metric = self.default_metric\n return trees_to_dot(\n self.graph.roots, self.dataframe, metric, name, rank, thread, threshold\n )\n\n def to_flamegraph(self, metric=None, name=\"name\", rank=0, thread=0, threshold=0.0):\n \"\"\"Write the graph in the folded stack output required by FlameGraph\n http://www.brendangregg.com/flamegraphs.html\n \"\"\"\n folded_stack = \"\"\n if metric is None:\n metric = self.default_metric\n\n for root in self.graph.roots:\n for hnode in root.traverse():\n callpath = hnode.path()\n for i in range(0, len(callpath) - 1):\n if (\n \"rank\" in self.dataframe.index.names\n and \"thread\" in self.dataframe.index.names\n ):\n df_index = (callpath[i], rank, thread)\n elif \"rank\" in self.dataframe.index.names:\n df_index = (callpath[i], rank)\n elif \"thread\" in self.dataframe.index.names:\n df_index = (callpath[i], thread)\n else:\n df_index = callpath[i]\n folded_stack = (\n folded_stack + str(self.dataframe.loc[df_index, \"name\"]) + \"; \"\n )\n\n if (\n \"rank\" in self.dataframe.index.names\n and \"thread\" in self.dataframe.index.names\n ):\n df_index = (callpath[-1], rank, thread)\n elif \"rank\" in self.dataframe.index.names:\n df_index = (callpath[-1], rank)\n elif \"thread\" in self.dataframe.index.names:\n df_index = (callpath[-1], thread)\n else:\n df_index = callpath[-1]\n folded_stack = (\n folded_stack + str(self.dataframe.loc[df_index, \"name\"]) + \" \"\n )\n\n # set dataframe index based on if rank and thread are part of the index\n if (\n \"rank\" in self.dataframe.index.names\n and \"thread\" in self.dataframe.index.names\n ):\n df_index = (hnode, rank, thread)\n elif \"rank\" in self.dataframe.index.names:\n df_index = (hnode, rank)\n elif \"thread\" in self.dataframe.index.names:\n df_index = (hnode, thread)\n else:\n df_index = hnode\n\n folded_stack = (\n folded_stack\n + str(round(self.dataframe.loc[df_index, metric]))\n + \"\\n\"\n )\n\n return folded_stack\n\n def to_literal(self, name=\"name\", rank=0, thread=0, cat_columns=[]):\n \"\"\"Format this graph as a list of dictionaries for Roundtrip\n visualizations.\n \"\"\"\n graph_literal = []\n visited = []\n\n def _get_df_index(hnode):\n if (\n \"rank\" in self.dataframe.index.names\n and \"thread\" in self.dataframe.index.names\n ):\n df_index = (hnode, rank, thread)\n elif \"rank\" in self.dataframe.index.names:\n df_index = (hnode, rank)\n elif \"thread\" in self.dataframe.index.names:\n df_index = (hnode, thread)\n else:\n df_index = hnode\n\n return df_index\n\n def metrics_to_dict(df_index):\n metrics_dict = {}\n for m in sorted(self.inc_metrics + self.exc_metrics):\n node_metric_val = self.dataframe.loc[df_index, m]\n if isinstance(node_metric_val, pd.Series):\n node_metric_val = node_metric_val[0]\n if np.isinf(node_metric_val) or np.isneginf(node_metric_val):\n node_metric_val = 0.0\n if pd.isna(node_metric_val):\n node_metric_val = 0.0\n metrics_dict[m] = node_metric_val\n\n return metrics_dict\n\n def attributes_to_dict(df_index):\n valid_columns = [\n col for col in cat_columns if col in self.dataframe.columns\n ]\n\n attributes_dict = {}\n for m in sorted(valid_columns):\n node_attr_val = self.dataframe.loc[df_index, m]\n if isinstance(node_attr_val, pd.Series):\n node_attr_val = node_attr_val[0]\n attributes_dict[m] = node_attr_val\n\n return attributes_dict\n\n def add_nodes(hnode):\n df_index = _get_df_index(hnode)\n\n node_dict = {}\n\n node_name = self.dataframe.loc[df_index, name]\n\n if isinstance(node_name, pd.Series):\n self.dataframe.loc[df_index]\n node_name = node_name[0]\n\n node_dict[\"name\"] = node_name\n node_dict[\"frame\"] = hnode.frame.attrs\n node_dict[\"metrics\"] = metrics_to_dict(df_index)\n # node_dict[\"metrics\"][\"_hatchet_nid\"] = int(self.dataframe[\"nid\"][df_index])\n node_dict[\"metrics\"][\"_hatchet_nid\"] = int(hnode._hatchet_nid)\n node_dict[\"attributes\"] = attributes_to_dict(df_index)\n\n if hnode.children and hnode not in visited:\n visited.append(hnode)\n node_dict[\"children\"] = []\n\n for child in sorted(hnode.children, key=lambda n: n.frame):\n node_dict[\"children\"].append(add_nodes(child))\n\n return node_dict\n\n for root in sorted(self.graph.roots, key=lambda n: n.frame):\n graph_literal.append(add_nodes(root))\n\n return graph_literal\n\n def to_dict(self):\n hatchet_dict = {}\n\n \"\"\"\n Nodes: {hatchet_nid: {node data, children:[by-id]}}\n \"\"\"\n graphs = []\n for root in self.graph.roots:\n formatted_graph_dict = {}\n for n in root.traverse():\n formatted_graph_dict[n._hatchet_nid] = {\n \"data\": n.frame.attrs,\n \"children\": [c._hatchet_nid for c in n.children],\n }\n graphs.append(formatted_graph_dict)\n\n hatchet_dict[\"graph\"] = graphs\n\n hatchet_dict[\"dataframe_indices\"] = list(self.dataframe.index.names)\n ef = self.dataframe.reset_index()\n ef[\"node\"] = ef[\"node\"].apply(lambda n: n._hatchet_nid)\n hatchet_dict[\"dataframe\"] = ef.replace({np.nan: None}).to_dict(\"records\")\n\n hatchet_dict[\"inclusive_metrics\"] = self.inc_metrics\n hatchet_dict[\"exclusive_metrics\"] = self.exc_metrics\n\n return hatchet_dict\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\n def _operator(self, other, op):\n \"\"\"Generic function to apply operator to two dataframes and store\n result in self.\n\n Arguments:\n self (graphframe): self's graphframe\n other (graphframe): other's graphframe\n op (operator): pandas arithmetic operator\n\n Return:\n (GraphFrame): self's graphframe modified\n \"\"\"\n # unioned set of self and other exclusive and inclusive metrics\n all_metrics = list(\n set().union(\n self.exc_metrics, self.inc_metrics, other.exc_metrics, other.inc_metrics\n )\n )\n\n self.dataframe.update(op(other.dataframe[all_metrics]))\n\n return self\n\n def _insert_missing_rows(self, other):\n \"\"\"Helper function to add rows that exist in other, but not in self.\n\n This returns a graphframe with a modified dataframe. The new rows will\n contain zeros for numeric columns.\n\n Return:\n (GraphFrame): self's modified graphframe\n \"\"\"\n all_metrics = list(\n set().union(\n self.exc_metrics, self.inc_metrics, other.exc_metrics, other.inc_metrics\n )\n )\n\n # make two 2D nparrays arrays with two columns:\n # 1) the hashed value of a node and 2) a numerical index\n # Many operations are stacked here to reduce the need for storing\n # large intermediary datasets\n self_hsh_ndx = np.vstack(\n (\n np.array(\n [x.__hash__() for x in self.dataframe[\"node\"]], dtype=np.uint64\n ),\n self.dataframe.index.values.astype(np.uint64),\n )\n ).T\n other_hsh_ndx = np.vstack(\n (\n np.array(\n [x.__hash__() for x in other.dataframe[\"node\"]], dtype=np.uint64\n ),\n other.dataframe.index.values.astype(np.uint64),\n )\n ).T\n\n # sort our 2D arrays by hashed node value so a binary search can be used\n # in the cython function fast_not_isin\n self_hsh_ndx_sorted = self_hsh_ndx[self_hsh_ndx[:, 0].argsort()]\n other_hsh_ndx_sorted = other_hsh_ndx[other_hsh_ndx[:, 0].argsort()]\n\n # get nodes that exist in other, but not in self, set metric columns to 0 for\n # these rows\n other_not_in_self = other.dataframe[\n _gfm_cy.fast_not_isin(\n other_hsh_ndx_sorted,\n self_hsh_ndx_sorted,\n other_hsh_ndx_sorted.shape[0],\n self_hsh_ndx_sorted.shape[0],\n )\n ]\n # get nodes that exist in self, but not in other\n self_not_in_other = self.dataframe[\n _gfm_cy.fast_not_isin(\n self_hsh_ndx_sorted,\n other_hsh_ndx_sorted,\n self_hsh_ndx_sorted.shape[0],\n other_hsh_ndx_sorted.shape[0],\n )\n ]\n\n # if there are missing nodes in either self or other, add a new column\n # called _missing_node\n if not self_not_in_other.empty:\n self.dataframe = self.dataframe.assign(\n _missing_node=np.zeros(len(self.dataframe), dtype=np.short)\n )\n if not other_not_in_self.empty:\n # initialize with 2 to save filling in later\n other_not_in_self = other_not_in_self.assign(\n _missing_node=[int(2) for x in range(len(other_not_in_self))]\n )\n\n # add a new column to self if other has nodes not in self\n if self_not_in_other.empty:\n self.dataframe[\"_missing_node\"] = np.zeros(\n len(self.dataframe), dtype=np.short\n )\n\n # get lengths to pass into\n onis_len = len(other_not_in_self)\n snio_len = len(self_not_in_other)\n\n # case where self is a superset of other\n if snio_len != 0:\n self_missing_node = self.dataframe[\"_missing_node\"].values\n snio_indices = self_not_in_other.index.values\n\n # This function adds 1 to all nodes in self.dataframe['_missing_node'] which\n # are in self but not in the other graphframe\n _gfm_cy.insert_one_for_self_nodes(snio_len, self_missing_node, snio_indices)\n self.dataframe[\"_missing_node\"] = np.array(\n [n for n in self_missing_node], dtype=np.short\n )\n\n # for nodes that only exist in other, set the metric to be nan (since\n # it's a missing node in self)\n # replaces individual metric assignments with np.zeros\n for j in all_metrics:\n other_not_in_self[j] = np.full(onis_len, np.nan)\n\n # append missing rows (nodes that exist in other, but not in self) to self's\n # dataframe\n self.dataframe = pd.concat(\n [self.dataframe, other_not_in_self], axis=0, sort=True\n )\n\n return self\n\n def groupby_aggregate(self, groupby_function, agg_function):\n \"\"\"Groupby-aggregate dataframe and reindex the Graph.\n\n Reindex the graph to match the groupby-aggregated dataframe.\n\n Update the frame attributes to contain those columns in the dataframe index.\n\n Arguments:\n self (graphframe): self's graphframe\n groupby_function: groupby function on dataframe\n agg_function: aggregate function on dataframe\n\n Return:\n (GraphFrame): new graphframe with reindexed graph and groupby-aggregated dataframe\n \"\"\"\n # create new nodes for each unique node in the old dataframe\n # length is equal to number of nodes in original graph\n old_to_new = {}\n\n # list of new roots\n new_roots = []\n\n # dict of (new) super nodes\n # length is equal to length of dataframe index (after groupby-aggregate)\n node_dicts = []\n\n def reindex(node, parent, visited):\n \"\"\"Reindex the graph.\n\n Connect super nodes to children according to relationships from old graph.\n \"\"\"\n # grab the super node corresponding to original node\n super_node = old_to_new.get(node)\n\n if not node.parents and super_node not in new_roots:\n # this is a new root\n new_roots.append(super_node)\n\n # iterate over parents of old node, adding parents to super node\n for parent in node.parents:\n # convert node to super node\n snode = old_to_new.get(parent)\n # move to next node if parent and super node are to be merged\n if snode == super_node:\n continue\n # add node to super node's parents if parent does not exist in super\n # node's parents\n if snode not in super_node.parents:\n super_node.add_parent(snode)\n\n # iterate over children of old node, adding children to super node\n for child in node.children:\n # convert node to super node\n snode = old_to_new.get(child)\n # move to next node if child and super node are to be merged\n if snode == super_node:\n continue\n # add node to super node's children if child does not exist in super\n # node's children\n if snode not in super_node.children:\n super_node.add_child(snode)\n\n if node not in visited:\n visited.add(node)\n for child in node.children:\n reindex(child, super_node, visited)\n\n # groupby-aggregate dataframe based on user-supplied functions\n groupby_obj = self.dataframe.groupby(groupby_function)\n agg_df = groupby_obj.agg(agg_function)\n\n # traverse groupby_obj, determine old node to super node mapping\n nid = 0\n for k, v in groupby_obj.groups.items():\n node_name = k\n node_type = agg_df.index.name\n super_node = Node(Frame({\"name\": node_name, \"type\": node_type}), None, nid)\n n = {\"node\": super_node, \"nid\": nid, \"name\": node_name}\n node_dicts.append(n)\n nid += 1\n\n # if many old nodes map to the same super node\n for i in v:\n old_to_new[i] = super_node\n\n # reindex graph by traversing old graph\n visited = set()\n for root in self.graph.roots:\n reindex(root, None, visited)\n\n # append super nodes to groupby-aggregate dataframe\n df_index = list(agg_df.index.names)\n agg_df.reset_index(inplace=True)\n df_nodes = pd.DataFrame.from_dict(data=node_dicts)\n tmp_df = pd.concat([agg_df, df_nodes], axis=1)\n # add node to dataframe index if it doesn't exist\n if \"node\" not in df_index:\n df_index.append(\"node\")\n # reset index\n tmp_df.set_index(df_index, inplace=True)\n\n # update _hatchet_nid in reindexed graph and groupby-aggregate dataframe\n graph = Graph(new_roots)\n graph.enumerate_traverse()\n\n # put it all together\n new_gf = GraphFrame(\n graph,\n tmp_df,\n self.exc_metrics,\n self.inc_metrics,\n self.default_metric,\n self.metadata,\n )\n new_gf.drop_index_levels()\n return new_gf\n\n def add(self, other):\n \"\"\"Returns the column-wise sum of two graphframes as a new graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n # create a copy of both graphframes\n self_copy = self.copy()\n other_copy = other.copy()\n\n # unify copies of graphframes\n self_copy.unify(other_copy)\n\n return self_copy._operator(other_copy, self_copy.dataframe.add)\n\n def sub(self, other):\n \"\"\"Returns the column-wise difference of two graphframes as a new\n graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n # create a copy of both graphframes\n self_copy = self.copy()\n other_copy = other.copy()\n\n # unify copies of graphframes\n self_copy.unify(other_copy)\n\n return self_copy._operator(other_copy, self_copy.dataframe.sub)\n\n def div(self, other):\n \"\"\"Returns the column-wise float division of two graphframes as a new graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n # create a copy of both graphframes\n self_copy = self.copy()\n other_copy = other.copy()\n\n # unify copies of graphframes\n self_copy.unify(other_copy)\n\n return self_copy._operator(other_copy, self_copy.dataframe.divide)\n\n def mul(self, other):\n \"\"\"Returns the column-wise float multiplication of two graphframes as a new graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n # create a copy of both graphframes\n self_copy = self.copy()\n other_copy = other.copy()\n\n # unify copies of graphframes\n self_copy.unify(other_copy)\n\n return self_copy._operator(other_copy, self_copy.dataframe.multiply)\n\n def __iadd__(self, other):\n \"\"\"Computes column-wise sum of two graphframes and stores the result in\n self.\n\n Self's graphframe is the union of self's and other's graphs, and the\n node handles from self will be rewritten with this operation. This\n operation does not modify other.\n\n Return:\n (GraphFrame): self's graphframe modified\n \"\"\"\n # create a copy of other's graphframe\n other_copy = other.copy()\n\n # unify self graphframe and copy of other graphframe\n self.unify(other_copy)\n\n return self._operator(other_copy, self.dataframe.add)\n\n def __add__(self, other):\n \"\"\"Returns the column-wise sum of two graphframes as a new graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"Returns the column-wise multiplication of two graphframes as a new graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n return self.mul(other)\n\n def __isub__(self, other):\n \"\"\"Computes column-wise difference of two graphframes and stores the\n result in self.\n\n Self's graphframe is the union of self's and other's graphs, and the\n node handles from self will be rewritten with this operation. This\n operation does not modify other.\n\n Return:\n (GraphFrame): self's graphframe modified\n \"\"\"\n # create a copy of other's graphframe\n other_copy = other.copy()\n\n # unify self graphframe and other graphframe\n self.unify(other_copy)\n\n return self._operator(other_copy, self.dataframe.sub)\n\n def __sub__(self, other):\n \"\"\"Returns the column-wise difference of two graphframes as a new\n graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n return self.sub(other)\n\n def __idiv__(self, other):\n \"\"\"Computes column-wise float division of two graphframes and stores the\n result in self.\n\n Self's graphframe is the union of self's and other's graphs, and the\n node handles from self will be rewritten with this operation. This\n operation does not modify other.\n\n Return:\n (GraphFrame): self's graphframe modified\n \"\"\"\n # create a copy of other's graphframe\n other_copy = other.copy()\n\n # unify self graphframe and other graphframe\n self.unify(other_copy)\n\n return self._operator(other_copy, self.dataframe.div)\n\n def __truediv__(self, other):\n \"\"\"Returns the column-wise float division of two graphframes as a new\n graphframe.\n\n This graphframe is the union of self's and other's graphs, and does not\n modify self or other.\n\n Return:\n (GraphFrame): new graphframe\n \"\"\"\n return self.div(other)\n\n def __imul__(self, other):\n \"\"\"Computes column-wise float multiplication of two graphframes and stores the\n result in self.\n\n Self's graphframe is the union of self's and other's graphs, and the\n node handles from self will be rewritten with this operation. This\n operation does not modify other.\n\n Return:\n (GraphFrame): self's graphframe modified\n \"\"\"\n # create a copy of other's graphframe\n other_copy = other.copy()\n\n # unify self graphframe and other graphframe\n self.unify(other_copy)\n\n return self._operator(other_copy, self.dataframe.mul)\n\n\nclass InvalidFilter(Exception):\n \"\"\"Raised when an invalid argument is passed to the filter function.\"\"\"\n\n\nclass EmptyFilter(Exception):\n \"\"\"Raised when a filter would otherwise return an empty GraphFrame.\"\"\"\n","sub_path":"hatchet/graphframe.py","file_name":"graphframe.py","file_ext":"py","file_size_in_byte":65456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"420092427","text":"#!/usr/bin/python3\n# Project: RadarCAS\n# Author: syx10\n# Time 2020/12/29:8:51\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtGui import QIntValidator\nfrom PyQt5.QtWidgets import QGridLayout, QFormLayout\n\nimport appconfig\nimport value.strings as strs\nfrom configurations.configuration import ConfigurationDialog\nfrom dialogmsgbox import QMessageBoxSample\n\n\nclass RadarConfigurationDialog(ConfigurationDialog):\n \"\"\"\n Radar configuration view\n \"\"\"\n def __init__(self, defaultConfig):\n super(ConfigurationDialog, self).__init__()\n self.defaultConf = defaultConfig\n self.init_ui()\n # self.set_widget_enable()\n\n def init_ui(self):\n self.setWindowTitle(strs.strings.get(\"radarConfig\")[strs.CH])\n self.setGeometry(300, 300, 700, 300)\n self.center()\n\n self.mainGrid = QGridLayout()\n self.configLayout = QFormLayout()\n self.mainGrid.addLayout(self.configLayout, 0, 0)\n self.buttons = QtWidgets.QDialogButtonBox(\n QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) # 窗口中建立确认和取消按钮\n self.mainGrid.addWidget(self.buttons, 1, 0)\n\n self.sampleNum = QtWidgets.QLabel(strs.strings.get(\"sampleNum\")[appconfig.language])\n self.sampleNum.setObjectName(\"sampleNum\")\n self.sampleNumCombox = QtWidgets.QComboBox(self)\n self.sampleNumCombox.setObjectName(\"sampleNumCombox\")\n currSampleNum = str(int(self.defaultConf.get(\"bytesNum\") / 2))\n self.sampleNumCombox.addItem(currSampleNum)\n sampleNumList = strs.combobox.get(\"sampleNum\").copy()\n sampleNumList.remove(currSampleNum)\n self.sampleNumCombox.addItems(self.translate_combox(self.checkList(sampleNumList)))\n\n self.sampleFreq = QtWidgets.QLabel(strs.strings.get(\"sampleFreq\")[appconfig.language])\n self.sampleFreq.setObjectName(\"sampleFreq\")\n self.sampleFreqCombox = QtWidgets.QComboBox(self)\n self.sampleFreqCombox.setObjectName(\"sampleFreqCombox\")\n currSampleFreq = str(self.defaultConf.get(\"sampleFreq\")) + \"GHz\"\n self.sampleFreqCombox.addItem(currSampleFreq)\n sampleFreqList = strs.combobox.get(\"sampleFreq\").copy()\n sampleFreqList.remove(currSampleFreq)\n self.sampleFreqCombox.addItems(self.translate_combox(self.checkList(sampleFreqList)))\n\n self.pipeNum = QtWidgets.QLabel(strs.strings.get(\"pipeNum\")[appconfig.language])\n self.pipeNum.setObjectName(\"pipeNum\")\n self.pipeNumEdit = QtWidgets.QLineEdit()\n self.pipeNumEdit.setToolTip(\"The size of radar pipes\")\n self.pipeNumEdit.setObjectName(\"pipeNumEdit\")\n self.pipeNumEdit.setText(str(self.defaultConf.get(\"pipeNum\")))\n self.pipeNumEdit.setValidator(QIntValidator(0, 10))\n\n self.startPipeIndex = QtWidgets.QLabel(strs.strings.get(\"startPipeIndex\")[appconfig.language])\n self.startPipeIndex.setObjectName(\"startPipeIndex\")\n self.startPipeIndexEdit = QtWidgets.QLineEdit()\n self.startPipeIndexEdit.setToolTip(\"The start cutoff pipe index\")\n self.startPipeIndexEdit.setObjectName(\"startPipeIndexEdit\")\n self.startPipeIndexEdit.setText(str(self.defaultConf.get(\"startPipeIndex\")))\n self.startPipeIndexEdit.setValidator(QIntValidator(0, 10))\n\n self.patchSize = QtWidgets.QLabel(strs.strings.get(\"patchSize\")[appconfig.language])\n self.patchSize.setObjectName(\"patchSize\")\n self.patchSizeEdit = QtWidgets.QLineEdit()\n self.patchSizeEdit.setToolTip(\"The sum of patch size and first cut row must be less than \"\n + self.sampleNumCombox.currentText())\n self.patchSizeEdit.setObjectName(\"firstCutNumEdit\")\n self.patchSizeEdit.setText(str(self.defaultConf.get(\"patchSize\")))\n self.patchSizeEdit.setEnabled(False)\n\n self.deltaDist = QtWidgets.QLabel(strs.strings.get(\"deltaDist\")[appconfig.language])\n self.deltaDist.setObjectName(\"deltaDist\")\n self.deltaDistEdit = QtWidgets.QLineEdit()\n self.deltaDistEdit.setObjectName(\"deltaDistEdit\")\n self.deltaDistEdit.setText(str(self.defaultConf.get(\"deltaDist\")))\n\n self.firstCutRow = QtWidgets.QLabel(strs.strings.get(\"firstCutRow\")[appconfig.language])\n self.firstCutRow.setObjectName(\"firstCutRow\")\n self.firstCutRowEdit = QtWidgets.QLineEdit()\n self.firstCutRowEdit.setToolTip(\"Must be greater than 4!\")\n self.firstCutRowEdit.setObjectName(\"firstCutRowEdit\")\n self.firstCutRowEdit.setText(str(self.defaultConf.get(\"firstCutRow\")))\n self.firstCutRowEdit.setValidator(QIntValidator(0, 1000))\n\n self.priorMapInterval = QtWidgets.QLabel(strs.strings.get(\"priorMapInterval\")[appconfig.language])\n self.priorMapInterval.setObjectName(\"priorMapInterval\")\n self.priorMapIntervalEdit = QtWidgets.QLineEdit()\n self.priorMapIntervalEdit.setObjectName(\"priorMapIntervalEdit\")\n self.priorMapIntervalEdit.setText(str(self.defaultConf.get(\"priorMapInterval\")))\n self.priorMapIntervalEdit.setValidator(QIntValidator(0, 1000))\n\n self.unregisteredMapInterval = QtWidgets.QLabel(strs.strings.get(\"unregisteredMapInterval\")[appconfig.language])\n self.unregisteredMapInterval.setObjectName(\"unregisteredMapInterval\")\n self.unregisteredMapIntervalEdit = QtWidgets.QLineEdit()\n self.unregisteredMapIntervalEdit.setObjectName(\"unregisteredMapIntervalEdit\")\n self.unregisteredMapIntervalEdit.setText(str(self.defaultConf.get(\"unregisteredMapInterval\")))\n self.unregisteredMapIntervalEdit.setValidator(QIntValidator(0, 10000))\n\n self.appendNum = QtWidgets.QLabel(strs.strings.get(\"appendNum\")[appconfig.language])\n self.appendNumCombox = QtWidgets.QComboBox(self)\n currAppendNum = str(self.defaultConf.get(\"appendNum\"))\n self.appendNumCombox.addItem(currAppendNum)\n appendNumList = strs.combobox.get(\"appendNum\").copy()\n appendNumList.remove(currAppendNum)\n self.appendNumCombox.addItems(appendNumList)\n\n self.collectionMode = QtWidgets.QLabel(strs.strings.get(\"collectionMode\")[appconfig.language])\n self.collectionMode.setObjectName(\"collectionMode\")\n self.collectionModeCombox = QtWidgets.QComboBox(self)\n self.collectionModeCombox.setObjectName(\"collectionModeCombox\")\n curColMode = self.defaultConf.get(\"collectionMode\")\n self.collectionModeCombox.addItem(curColMode)\n colModeList = strs.combobox.get(\"collectionMode\").copy()\n self.collectionModeCombox.addItems(self.translate_combox(self.checkList(colModeList)))\n\n self.configLayout.addRow(strs.strings.get(\"sampleNum\")[appconfig.language], self.sampleNumCombox)\n self.configLayout.addRow(strs.strings.get(\"sampleFreq\")[appconfig.language], self.sampleFreqCombox)\n self.configLayout.addRow(strs.strings.get(\"pipeNum\")[appconfig.language], self.pipeNumEdit)\n self.configLayout.addRow(strs.strings.get(\"startPipeIndex\")[appconfig.language], self.startPipeIndexEdit)\n self.configLayout.addRow(strs.strings.get(\"patchSize\")[appconfig.language], self.patchSizeEdit)\n self.configLayout.addRow(strs.strings.get(\"deltaDist\")[appconfig.language], self.deltaDistEdit)\n self.configLayout.addRow(strs.strings.get(\"firstCutRow\")[appconfig.language], self.firstCutRowEdit)\n self.configLayout.addRow(strs.strings.get(\"priorMapInterval\")[appconfig.language], self.priorMapIntervalEdit)\n self.configLayout.addRow(strs.strings.get(\"unregisteredMapInterval\")[appconfig.language],\n self.unregisteredMapIntervalEdit)\n self.configLayout.addRow(strs.strings.get(\"appendNum\")[appconfig.language], self.appendNumCombox)\n self.configLayout.addRow(strs.strings.get(\"collectionMode\")[appconfig.language], self.collectionModeCombox)\n\n self.buttons.accepted.connect(self.accept)\n self.buttons.rejected.connect(self.reject)\n\n self.setLayout(self.mainGrid)\n\n def get_data(self):\n patchSize = self.patchSizeEdit.text()\n deltaDist = self.deltaDistEdit.text()\n firstCutRow = self.firstCutRowEdit.text()\n priorMapInterval = self.priorMapIntervalEdit.text()\n unregisteredInterval = self.unregisteredMapIntervalEdit.text()\n\n try:\n float(deltaDist)\n except:\n QMessageBoxSample.showDialog(self, \"Delta Dist. Value Error!!\", appconfig.ERROR)\n self.deltaDistEdit.setText(str(self.defaultConf.get(\"patchSize\")))\n return\n\n if patchSize.isnumeric() and firstCutRow.isnumeric() and priorMapInterval.isnumeric() \\\n and unregisteredInterval.isnumeric():\n patchSize = int(self.patchSizeEdit.text())\n firstCutRow = int(self.firstCutRowEdit.text())\n if int(self.sampleNumCombox.currentText()) - patchSize - firstCutRow < 0:\n QMessageBoxSample.showDialog(self,\n \"Patch size + first cut row can not less than sample number!\", appconfig.ERROR)\n # self.patchSizeEdit.setText(str(self.defaultConf.get(\"patchSize\")))\n return\n if firstCutRow < 4:\n QMessageBoxSample.showDialog(self, \"FirstCutRow show be greater than 4!\", appconfig.ERROR)\n\n if int(self.startPipeIndexEdit.text()) >= int(self.pipeNumEdit.text()):\n QMessageBoxSample.showDialog(self, \"Start Pipe Index must be smaller than Pipe Size! \", appconfig.ERROR)\n return\n\n radarSettings = {\n \"bytesNum\": int(int(self.sampleNumCombox.currentText()) * 2),\n \"sampleNum\": int(self.sampleNumCombox.currentText()),\n \"sampleFreq\": float(self.sampleFreqCombox.currentText()[0:-3]),\n \"pipeNum\": int(self.pipeNumEdit.text()),\n \"startPipeIndex\": int(self.startPipeIndexEdit.text()),\n \"patchSize\": int(self.patchSizeEdit.text()),\n \"deltaDist\": float(self.deltaDistEdit.text()),\n \"firstCutRow\": int(self.firstCutRowEdit.text()),\n \"priorMapInterval\": int(self.priorMapIntervalEdit.text()),\n \"unregisteredMapInterval\": int(self.unregisteredMapIntervalEdit.text()),\n \"appendNum\": int(self.appendNumCombox.currentText()),\n \"collectionMode\": self.collectionModeCombox.currentText(),\n }\n return radarSettings\n\n def save_config(self):\n pass\n\n def load_config(self):\n pass\n\n\ndef build_instruments(radarConfig, measWheelParams):\n \"\"\"\n To build radar configuration instruction according to current configuration\n\n :param radarConfig: current radar configuration\n :param measWheelParams: current measurement wheel parameters\n :return: the instruction byte which will be sent to radar\n \"\"\"\n import math\n bytesNum = int(math.log(radarConfig.get(\"bytesNum\"))/math.log(2) - 9)\n sampleRate = int(radarConfig.get(\"sampleFreq\") / 5.25)\n if sampleRate == 8:\n sampleRate = 0\n elif sampleRate == 4:\n sampleRate = 1\n elif sampleRate == 1:\n sampleRate = 3\n instruments = appconfig.basic_instruct_config().get(\"bytesNum\")\n instruments.append(bytesNum)\n instruments.append(appconfig.basic_instruct_config().get(\"sampleFreq\")[0])\n instruments.append(sampleRate)\n\n # Multiple pipe radar configuration\n if radarConfig.get(\"pipeNum\") > 1:\n instruments.extend(appconfig.basic_instruct_config().get(\"timeLag\"))\n instruments.extend(appconfig.basic_instruct_config().get(\"gainMode\"))\n instruments.extend(appconfig.basic_instruct_config().get(\"gainValue\"))\n\n # Measurement Wheel\n colMode = radarConfig.get(\"collectionMode\")\n if colMode in strs.strings.get(\"wheelMeas\"):\n instruments.append(appconfig.basic_instruct_config().get(\"wheelMeas\")[0])\n instruments.append(1)\n pulsePerCM = int(measWheelParams[appconfig.PULSE_PER_CM])\n instruments.append(appconfig.basic_instruct_config().get(\"precise\")[0])\n instruments.append(pulsePerCM)\n return instruments\n\n","sub_path":"configurations/radarconfig.py","file_name":"radarconfig.py","file_ext":"py","file_size_in_byte":12090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"502111556","text":"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport imp\nimport os\nimport sys\n\nfrom . import commands\nfrom . import plugins\n\n\nclass PluginManager(object):\n \"\"\"\n A class to load and manage plugins.\n\n By default in asv, plugins are searched for in the `asv.plugins`\n namespace package and in the `asv.commands` package.\n\n Then, any modules specified in the ``plugins`` entry in the\n ``asv.conf.json`` file are loaded.\n \"\"\"\n def __init__(self):\n self._plugins = []\n\n def load_plugins_in_path(self, namespace, path):\n if not os.path.exists(path):\n return\n\n for root, dirs, files in os.walk(path):\n for filename in files:\n if (filename.endswith('.py') and filename != '__init__.py' and\n not filename.startswith('.')):\n filebase = os.path.splitext(filename)[0]\n filepath = os.path.join(root, filename)\n with open(filepath, 'rb') as fd:\n mod = imp.load_module(\n '{0}.{1}'.format(namespace, filebase), fd,\n filepath, ('.py', 'U', 1))\n\n self.init_plugin(mod)\n self._plugins.append(mod)\n\n def import_plugin(self, name):\n extended = False\n if name.startswith('.'):\n extended = True\n sys.path.insert(0, '.')\n name = name[1:]\n try:\n mod = __import__(name, {}, {}, [], level=0)\n self.init_plugin(mod)\n self._plugins.append(mod)\n finally:\n if extended:\n del sys.path[0]\n\n def init_plugin(self, mod):\n if hasattr(mod, 'setup'):\n mod.setup()\n\n def run_hook(self, hook_name, args, kwargs):\n for plugin in self._plugins:\n if hasattr(plugin, hook_name):\n getattr(plugin, hook_name)(*args, **kwargs)\n\n\nplugin_manager = PluginManager()\nplugin_manager.load_plugins_in_path(\n 'asv.commands',\n os.path.dirname(commands.__file__))\nplugin_manager.load_plugins_in_path(\n 'asv.plugins', os.path.dirname(plugins.__file__))\n\ncommands.__doc__ = commands._make_docstring()\n","sub_path":"asv/plugin_manager.py","file_name":"plugin_manager.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"560080087","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Newville/Codes/xraylarch/tests/test_larchexamples_basic.py\n# Compiled at: 2017-05-04 10:58:06\n\"\"\" Tests of Larch Scripts \"\"\"\nimport unittest, time, ast, numpy as np, os\nfrom sys import version_info\nfrom utils import TestCase\nfrom larch import Interpreter\n\nclass TestScripts(TestCase):\n \"\"\"tests\"\"\"\n\n def test_basic_interp(self):\n self.runscript('interp.lar', dirname='../examples/basic/')\n assert len(self.session.get_errors()) == 0\n self.isNear('y0[1]', 0.535, places=2)\n self.isNear('y1[1]', 0.829, places=2)\n self.isNear('y2[1]', 0.477, places=2)\n\n def test_basic_smooth(self):\n self.runscript('smoothing.lar', dirname='../examples/basic/')\n assert len(self.session.get_errors()) == 0\n self.isNear('s_loren[5]', 0.207, places=2)\n self.isNear('s_gauss[5]', 0.027, places=2)\n self.isNear('s_voigt[5]', 0.256, places=2)\n\n def test_basic_smooth(self):\n self.runscript('local_namespaces.lar', dirname='../examples/basic/')\n assert len(self.session.get_errors()) == 0\n self.isNear('x', 1000.0, places=4)\n\n def test_basic_pi(self):\n self.runscript('pi_archimedes.lar', dirname='../examples/basic/')\n assert len(self.session.get_errors()) == 0\n self.isNear('result', 3.1415926535897927, places=8)\n\n def test_basic_use_params(self):\n self.runscript('use_params.lar', dirname='../examples/basic/')\n assert len(self.session.get_errors()) == 0\n self.isNear('a', 0.76863, places=4)\n\n def test_nested_runfiles(self):\n origdir = os.path.abspath(os.getcwd())\n dirname = os.path.abspath('larch_scripts')\n os.chdir(dirname)\n out, err = self.trytext(\"run('nested_outer.lar')\")\n os.chdir(origdir)\n out = out.split('\\n')\n assert len(out) > 4\n assert 'before nested_inner.lar' in out[0]\n assert 'in nested_inner.lar' in out[1]\n assert 'in nested_deep.lar' in out[2]\n assert 'in nested_inner.lar, after nested_deep' in out[3]\n assert 'in nested_outer.lar, after nested_inner' in out[4]\n self.isNear('deep_x', 5.0, places=2)\n\n def test_runfit(self):\n origdir = os.path.abspath(os.getcwd())\n dirname = os.path.abspath('larch_scripts')\n os.chdir(dirname)\n out, err = self.trytext(\"run('fit_constraint.lar')\")\n os.chdir(origdir)\n self.isTrue('out.nfev > 30')\n self.isTrue('out.nfev < 70')\n self.isNear('params.amp1.value', 6.05, places=2)\n self.isNear('params.amp2.value', 2.02, places=2)\n self.isNear('params.cen1.value', 3.01, places=2)\n self.isNear('params.cen1.stderr', 0.0073, places=2)\n self.isNear('out.chi_square', 8.4, places=2)\n\n\nif __name__ == '__main__':\n for suite in (TestScripts,):\n suite = unittest.TestLoader().loadTestsFromTestCase(suite)\n unittest.TextTestRunner(verbosity=13).run(suite)","sub_path":"pycfiles/xraylarch-0.9.47.tar/test_larchexamples_basic.py","file_name":"test_larchexamples_basic.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"261920415","text":"#encoding=utf-8 \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport pandas as pd\nimport os\nimport shutil\n\n\n#自定义函数 e指数形式\ndef func(x, a, b, c):\n return a*x**2+b*x**1.5+c\n\ndef func2(x):\n return -PlutoCharon.mu()*(PlutoCharon.Rp*x)**2/PlutoCharon.Cp()+PlutoCharon.mu()*PlutoCharon.Rp**2*17**0.5/PlutoCharon.Cp()*x**1.5-PlutoCharon.Cs()/PlutoCharon.Cp()*(10-1)\n\n\ndef fitting(wcvalue=2,evalue=0.5):\n\n #导入数据及x、y散点坐标\n data = pd.read_csv(\"output_a_big_data__wc{0}_e{1}_exactly.csv\".format(wcvalue,evalue))\n x = data['orbital semimajor axis_ini']\n y = data['pluto_spin_ini']\n\n #非线性最小二乘法拟合\n guess = (-2.5,1,0)\n popt, pcov = curve_fit(func, x, y)\n \n #获取popt里面是拟合系数\n a = popt[0] \n b = popt[1]\n c = popt[2]\n yvals1 = func(x,a,b,c) #拟合y值\n \n \n #绘图\n plot1 = plt.plot(x, y, 's',label=r'$\\alpha_c$={0},e={1}'.format(wcvalue,evalue))\n plot2 = plt.plot(x, yvals1,label='{0}x^2+{1}x^1.5+({2})'.format(round(a,3),round(b,3),round(c,3)))\n plt.ylabel('spin angular velocities of Pluto (mean motion)',fontsize=80)\n plt.xlabel('orbital semimajor axis (Pluto radius)',fontsize=80)\n plt.xticks(fontsize=80)\n plt.yticks(fontsize=80) \n plt.legend(loc=1,fontsize=30,fancybox=True, framealpha=0.1) #指定legend的位置右上角\n plt.savefig('new_no_fit')\n \n\n \n \n return\n\nfig=plt.figure(figsize=(54,36))\nfor i in [10,20,30,40]:\n fitting(i,0)","sub_path":"fitting_ini_data_re/fit_overaly.py","file_name":"fit_overaly.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"148816924","text":"import socket\nimport time\nhostname='localhost'\nportnumber=25000\naddress=(hostname,portnumber)\ntcpclient=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ntcpclient.connect(address)\nwhile(1):\n message=raw_input(\"enter msg\")\n tcpclient.send(message.encode())\n print(\"Message sent to server:\")\n print(message)\n msg=tcpclient.recv(1024).decode()\n localtime1 = time.asctime( time.localtime(time.time()))\n print(\"msg received from server is:\",msg)\n print(\"time is:\",localtime1)\ntcpclient.close()\n","sub_path":"code/python code/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"216049064","text":"#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Module with tools for conversion of MLF file to Kaldi data filesystem structure.\nAuthor: Piotr Żelasko @ AGH 2015\nE-mail: pzelasko@agh.edu.pl\n\"\"\"\n\n__all__ = [\"load_mlf_as_kaldi_data\"]\n\nfrom kaldi_data_format import *\n\n\ndef load_mlf_as_kaldi_data(path_to_mlf, wav_root=None):\n \"\"\"Return KaldiData object constructed from input mlf file.\n If wav_root is provided, append it before extended_filename in wav.scp representation.\"\"\"\n\n import agh_corpus_interop as agh_corpus\n from mlf import Mlf\n\n mlf_in = Mlf(path_to_mlf)\n\n rec_id_gen = agh_corpus.RecordingIdGenerator()\n utter_id_gen = agh_corpus.UtteranceIdGenerator()\n\n # create *speaker-id* from mlf filename\n speaker_id = agh_corpus.create_speaker_id(mlf_in.path)\n\n wavscp_entries = []\n kaldi_data_entries = []\n\n # traverse mlf\n for wave, annotations in mlf_in.items():\n # tie wave name to *extended-filename*\n extended_filename = agh_corpus.create_extended_filename(wave)\n if wav_root: extended_filename = os.path.join(wav_root, extended_filename)\n\n for annotation in annotations:\n # create *recording-id* --> (wav.scp entry ready)\n recording_id = rec_id_gen.generate(wave)\n wavscp = KaldiWav(recording_id, extended_filename)\n\n # append to wav.scp\n wavscp_entries.append(wavscp)\n\n # *transcription* = (annotated text)\n transcription = agh_corpus.create_transcription(annotation.word)\n\n # create *utterance-id* from speaker-id + text (hashed text to avoid long ids?)\n # --> (utt2spk entry and text entry ready)\n utterance_id = utter_id_gen.generate(speaker_id, transcription)\n utt2spk = KaldiUtt2Spk(utterance_id, speaker_id)\n text = KaldiText(utterance_id, transcription)\n\n # convert starting and ending mlf times to *segment-begin* and *segment-end* --> (segments entry ready)\n segment_begin = agh_corpus.create_segment_marker(annotation.start_time)\n segment_end = agh_corpus.create_segment_marker(annotation.end_time)\n segment = KaldiSegment(utterance_id, recording_id, segment_begin, segment_end)\n\n # append to other files\n kaldi_data_entries.append(KaldiDataEntry(segment, text, utt2spk))\n\n all_data = KaldiData(entries_waves_tuple=(sorted(kaldi_data_entries), sorted(wavscp_entries)))\n\n return all_data\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description=\"\"\"Convert a single MLF file which annotates single speakers\n utterances into a directory,\n containing files: segments, text, utt2spk and wav.scp.\"\"\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # positional arguments\n parser.add_argument(\"mlf\", help=\"Path to the MLF file.\")\n parser.add_argument(\"destination\", help=\"Path to the directory where 4 output files will be created.\")\n parser.add_argument(\"--wav-root\", default=None, help=\"Path that should be appended before every path to a wave file.\")\n\n args = parser.parse_args()\n\n # fill in data structures in kaldi-data-format.py and get KaldiData\n mlf_data = load_mlf_as_kaldi_data(args.mlf, args.wav_root)\n\n # write proper tree structure to disk\n mlf_data.save(args.destination)\n\n # Done\n","sub_path":"s5/local/agh-conv/mlf_to_kaldi_data.py","file_name":"mlf_to_kaldi_data.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"329451996","text":"import sys\n\nclass Token:\n line = 0\n col = 0\n text = ''\n def copy(self):\n ret = Token()\n ret.line = self.line\n ret.col = self.col\n return ret\n\ndef tokenize(s):\n alpha = set(\"abcdefghijklmnopqrstuvwxyz0123456789'\")\n tok = Token()\n ret = []\n tmp = ''\n s += ' '\n for i in range(0, len(s)):\n if(s[i] in alpha):\n tmp += s[i]\n tok.col += 1\n else:\n if(len(tmp) > 0):\n tkk = tok.copy()\n tkk.col -= len(tmp)\n tkk.text = tmp\n ret.append(tkk)\n tmp = ''\n tok.col += 1\n if(s[i] == '\\n'):\n tok.col = 0\n tok.line += 1\n return ret\n\ndct = sys.argv[1]\ntxt = sys.argv[2]\nwith open(dct, 'r') as mf:\n dct = mf.read()\nwith open(txt, 'r') as mf:\n txt = mf.read()\ndct = [i.text for i in tokenize(dct)]\nfor i in tokenize(txt):\n if(i.text not in dct):\n print(str(i.line + 1) + ',\\t' + str(i.col + 1) + '\\t' + i.text)","sub_path":"practice_python/speller.py","file_name":"speller.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"377256532","text":"# ecoding=utf-8\r\n__author__ = \"liyinlong\"\r\n\r\nimport logging\r\nimport os\r\n\r\nclass log():\r\n def log(self,text): \r\n FILE = os.getcwd() \r\n logging.basicConfig(level=logging.INFO, \r\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', \r\n datefmt='%a, %d %b %Y %H:%M:%S', \r\n filename=os.path.join(FILE,'log.txt'), \r\n filemode='w') \r\n \r\n return logging.info(text)","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"255236103","text":"# Licensed to Elasticsearch under one or more contributor\n# license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright\n# ownership. Elasticsearch licenses this file to you under\n# the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on\n# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n# either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport random\nimport os\nimport tempfile\nimport shutil\nimport subprocess\nimport time\nimport argparse\nimport logging\nimport sys\nimport re\n\nif sys.version_info[0] > 2:\n print('%s must use python 2.x (for the ES python client)' % sys.argv[0])\n\nfrom datetime import datetime\ntry:\n from elasticsearch import Elasticsearch\n from elasticsearch.exceptions import ConnectionError\n from elasticsearch.exceptions import TransportError\nexcept ImportError as e:\n print('Can\\'t import elasticsearch please install `sudo pip install elasticsearch`')\n sys.exit(1)\n\n# sometimes returns True\ndef rarely():\n return random.randint(0, 10) == 0\n\n# usually returns True\ndef frequently():\n return not rarely()\n\n# asserts the correctness of the given hits given they are sorted asc\ndef assert_sort(hits):\n values = [hit['sort'] for hit in hits['hits']['hits']]\n assert len(values) > 0, 'expected non emtpy result'\n val = min(values)\n for x in values:\n assert x >= val, '%s >= %s' % (x, val)\n val = x\n\n# Indexes the given number of document into the given index\n# and randomly runs refresh, optimize and flush commands\ndef index_documents(es, index_name, type, num_docs):\n logging.info('Indexing %s docs' % num_docs)\n for id in range(0, num_docs):\n es.index(index=index_name, doc_type=type, id=id, body={'string': str(random.randint(0, 100)),\n 'long_sort': random.randint(0, 100),\n 'double_sort' : float(random.randint(0, 100))})\n if rarely():\n es.indices.refresh(index=index_name)\n if rarely():\n es.indices.flush(index=index_name, force=frequently())\n if rarely():\n es.indices.optimize(index=index_name)\n logging.info('Flushing index')\n es.indices.flush(index=index_name)\n\ndef run_basic_asserts(es, index_name, type, num_docs):\n count = es.count(index=index_name)['count']\n assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count)\n for _ in range(0, num_docs):\n random_doc_id = random.randint(0, num_docs-1)\n doc = es.get(index=index_name, doc_type=type, id=random_doc_id)\n assert doc, 'Expected document for id %s but got %s' % (random_doc_id, doc)\n\n assert_sort(es.search(index=index_name,\n body={\n 'sort': [\n {'double_sort': {'order': 'asc'}}\n ]\n }))\n\n assert_sort(es.search(index=index_name,\n body={\n 'sort': [\n {'long_sort': {'order': 'asc'}}\n ]\n }))\n\n\ndef build_version(version_tuple):\n return '.'.join([str(x) for x in version_tuple])\n\ndef build_tuple(version_string):\n return [int(x) for x in version_string.split('.')]\n\ndef start_node(version, release_dir, data_dir, tcp_port, http_port):\n logging.info('Starting node from %s on port %s/%s' % (release_dir, tcp_port, http_port))\n cmd = [\n os.path.join(release_dir, 'bin/elasticsearch'),\n '-Des.path.data=%s' % data_dir,\n '-Des.path.logs=logs',\n '-Des.cluster.name=bwc_index_' + version, \n '-Des.network.host=localhost', \n '-Des.discovery.zen.ping.multicast.enabled=false',\n '-Des.script.disable_dynamic=true',\n '-Des.transport.tcp.port=%s' % tcp_port,\n '-Des.http.port=%s' % http_port\n ]\n if version.startswith('0.') or version.startswith('1.0.0.Beta') :\n cmd.append('-f') # version before 1.0 start in background automatically\n return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\ndef create_client(http_port, timeout=30):\n logging.info('Waiting for node to startup')\n for _ in range(0, timeout):\n # TODO: ask Honza if there is a better way to do this?\n try:\n client = Elasticsearch([{'host': '127.0.0.1', 'port': http_port}])\n client.cluster.health(wait_for_nodes=1)\n client.count() # can we actually search or do we get a 503? -- anyway retry\n return client\n except (ConnectionError, TransportError):\n pass\n time.sleep(1)\n assert False, 'Timed out waiting for node for %s seconds' % timeout\n\ndef generate_index(client):\n client.indices.delete(index='test', ignore=404)\n num_shards = random.randint(1, 10)\n num_replicas = random.randint(0, 1)\n logging.info('Create single shard test index')\n\n mappings = {}\n if not version.startswith('2.'):\n # TODO: we need better \"before/onOr/after\" logic in python\n\n # backcompat test for legacy type level analyzer settings, see #8874\n mappings['analyzer_type1'] = {\n 'analyzer': 'standard',\n 'properties': {\n 'string_with_index_analyzer': {\n 'type': 'string',\n 'index_analyzer': 'standard'\n },\n }\n }\n if not version.startswith('0.20') or version == '0.20.6':\n mappings['analyzer_1']['properties']['completion_with_index_analyzer'] = {\n 'type': 'completion',\n 'index_analyzer': 'standard'\n }\n\n mappings['analyzer_type2'] = {\n 'index_analyzer': 'standard',\n 'search_analyzer': 'keyword',\n 'search_quote_analyzer': 'english',\n }\n\n client.indices.create(index='test', body={\n 'settings': {\n 'number_of_shards': 1,\n 'number_of_replicas': 0\n },\n 'mappings': mappings\n })\n health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)\n assert health['timed_out'] == False, 'cluster health timed out %s' % health\n\n num_docs = random.randint(10, 100)\n index_documents(client, 'test', 'doc', num_docs)\n logging.info('Running basic asserts on the data added')\n run_basic_asserts(client, 'test', 'doc', num_docs)\n\ndef snapshot_index(client, cfg):\n # Add bogus persistent settings to make sure they can be restored\n client.cluster.put_settings(body = {\n 'persistent': {\n 'cluster.routing.allocation.exclude.version_attr' : cfg.version\n }\n })\n client.indices.put_template(name = 'template_' + cfg.version.lower(), order = 0, body = {\n \"template\" : \"te*\",\n \"settings\" : {\n \"number_of_shards\" : 1\n },\n \"mappings\" : {\n \"type1\" : {\n \"_source\" : { \"enabled\" : False }\n }\n },\n \"aliases\" : {\n \"alias1\" : {},\n \"alias2\" : {\n \"filter\" : {\n \"term\" : {\"version\" : cfg.version }\n },\n \"routing\" : \"kimchy\"\n },\n \"{index}-alias\" : {}\n }\n });\n client.snapshot.create_repository(repository='test_repo', body={\n 'type': 'fs',\n 'settings': {\n 'location': cfg.repo_dir\n }\n })\n client.snapshot.create(repository='test_repo', snapshot='test_1', wait_for_completion=True)\n client.snapshot.delete_repository(repository='test_repo')\n\ndef compress_index(version, tmp_dir, output_dir):\n compress(tmp_dir, output_dir, 'index-%s.zip' % version, 'data')\n\ndef compress_repo(version, tmp_dir, output_dir):\n compress(tmp_dir, output_dir, 'repo-%s.zip' % version, 'repo')\n\ndef compress(tmp_dir, output_dir, zipfile, directory):\n abs_output_dir = os.path.abspath(output_dir)\n zipfile = os.path.join(abs_output_dir, zipfile)\n if os.path.exists(zipfile):\n os.remove(zipfile)\n logging.info('Compressing index into %s', zipfile)\n olddir = os.getcwd()\n os.chdir(tmp_dir)\n subprocess.check_call('zip -r %s %s' % (zipfile, directory), shell=True)\n os.chdir(olddir)\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='Builds an elasticsearch index for backwards compatibility tests')\n parser.add_argument('version', metavar='X.Y.Z',\n help='The elasticsearch version to build an index for')\n parser.add_argument('--releases-dir', '-d', default='backwards', metavar='DIR',\n help='The directory containing elasticsearch releases')\n parser.add_argument('--output-dir', '-o', default='src/test/resources/org/elasticsearch/bwcompat',\n help='The directory to write the zipped index into')\n parser.add_argument('--tcp-port', default=9300, type=int,\n help='The port to use as the minimum port for TCP communication')\n parser.add_argument('--http-port', default=9200, type=int,\n help='The port to use as the minimum port for HTTP communication')\n cfg = parser.parse_args()\n\n cfg.release_dir = os.path.join(cfg.releases_dir, 'elasticsearch-%s' % cfg.version)\n if not os.path.exists(cfg.release_dir):\n parser.error('ES version %s does not exist in %s' % (cfg.version, cfg.releases_dir)) \n\n if not os.path.exists(cfg.output_dir):\n parser.error('Output directory does not exist: %s' % cfg.output_dir)\n\n cfg.tmp_dir = tempfile.mkdtemp()\n cfg.data_dir = os.path.join(cfg.tmp_dir, 'data')\n cfg.repo_dir = os.path.join(cfg.tmp_dir, 'repo')\n logging.info('Temp data dir: %s' % cfg.data_dir)\n logging.info('Temp repo dir: %s' % cfg.repo_dir)\n cfg.snapshot_supported = not (cfg.version.startswith('0.') or cfg.version == '1.0.0.Beta1')\n\n return cfg\n\ndef main():\n logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,\n datefmt='%Y-%m-%d %I:%M:%S %p')\n logging.getLogger('elasticsearch').setLevel(logging.ERROR)\n logging.getLogger('urllib3').setLevel(logging.WARN)\n cfg = parse_config()\n try:\n node = start_node(cfg.version, cfg.release_dir, cfg.data_dir, cfg.tcp_port, cfg.http_port)\n client = create_client(cfg.http_port)\n generate_index(client)\n if cfg.snapshot_supported:\n snapshot_index(client, cfg)\n finally:\n if 'node' in vars():\n logging.info('Shutting down node with pid %d', node.pid)\n node.terminate()\n time.sleep(1) # some nodes take time to terminate\n compress_index(cfg.version, cfg.tmp_dir, cfg.output_dir)\n if cfg.snapshot_supported:\n compress_repo(cfg.version, cfg.tmp_dir, cfg.output_dir)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Caught keyboard interrupt, exiting...')\n","sub_path":"dev-tools/create-bwc-index.py","file_name":"create-bwc-index.py","file_ext":"py","file_size_in_byte":10648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"547326247","text":"import numpy as np\r\nimport networkx as nx\r\n\r\nimport database as db\r\nimport globle as g\r\n\r\n\r\n#create topology\r\ndef init_topology():\r\n\tflag = False\r\n\twhile(flag==False):\r\n\t\t#G = nx.gnp_random_graph(g.NN, g.POC, seed=g.SEED)\t\r\n\t\tG = nx.gnp_random_graph(g.NN, g.POC)\t\r\n\t\tflag = nx.is_connected(G) \r\n\tnx.relabel_nodes(G, g.D_IP, False)\r\n\tfor edge in G.edges():\r\n\t\tG.edges[edge]['weight'] = np.random.rand()\r\n\tdb.save({'G':G}) \r\n\tprint('Init topology...done!')\r\n\r\ndef topology(mode_i):\r\n\tG, = db.load(['G'])\r\n\tif g.L_MODE[mode_i][1] == 'Lasso':\r\n\t\tif g.L_MODE[mode_i][2] == 'SingleADMM':\r\n\t\t\tdb.save({'G':G},mode_i)\r\n\t\telif g.L_MODE[mode_i][2] == 'StarADMM':\r\n\t\t\tif g.L_MODE[mode_i][3] == 'random':\r\n\t\t\t\t#star cluster graph: the weights of edges are based on the shortest path of G\r\n\t\t\t\tGstar = nx.star_graph(g.NN-1) \r\n\t\t\t\tGstar = Gstar.to_directed()\r\n\t\t\t\tnx.relabel_nodes(Gstar, g.D_IP, False)\r\n\t\t\t\tGstar.remove_edges_from(tuple(Gstar.in_edges([g.L_IP[0]])))\t\r\n\t\t\t\tfor edge in Gstar.edges():\r\n\t\t\t\t\tGstar.edges[edge]['weight'] = nx.shortest_path_length(G, g.L_IP[0], edge[1], weight='weight')\r\n\t\t\t\t#make selfloops \r\n\t\t\t\tGstar.add_edges_from([(g.L_IP[0],g.L_IP[0], {'weight': 0.0})])\r\n\t\t\t\tdb.save({'G':Gstar},mode_i)\r\n\t\telif g.L_MODE[mode_i][2] == 'BridgeADMM':\r\n\t\t\tif g.L_MODE[mode_i][3] == 'complete':\r\n\t\t\t\tGd = G.to_directed()\t\r\n\t\t\t\t#make selfloops \r\n\t\t\t\tl_nodes=list(Gd.nodes())\r\n\t\t\t\td_w=[{'weight':0.0}]*nx.number_of_nodes(Gd)\r\n\t\t\t\tl_loops=zip(l_nodes,l_nodes,d_w)\r\n\t\t\t\tGd.add_edges_from(l_loops)\r\n\t\t\t\tdb.save({'G':Gd},mode_i)\r\n\r\n#get all bridges and workers\t\r\ndef get_bridges_workers(G): \r\n\tl_bridge = []\r\n\tfor edge in G.edges():\r\n\t\tl_bridge.append(edge[0])\t\t\t\t\t\r\n\tl_worker = list(G.nodes())\r\n\t#remove repetition\r\n\tl_bridge = list(set(l_bridge))\r\n\tl_worker = list(set(l_worker))\t\r\n\treturn l_bridge, l_worker\r\n\t\r\n#get own workers of ip\r\ndef get_ow(G, ip): \r\n\tl_tmp = list(G.successors(ip))\r\n\treturn l_tmp, len(l_tmp)\r\n\t\t\t\t\t\r\n#get own bridges of ip\r\ndef get_ob(G, ip): \r\n\tl_tmp = list(G.predecessors(ip))\r\n\treturn l_tmp, len(l_tmp)\t","sub_path":"topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"392533633","text":"\n\nfrom xai.brain.wordbase.nouns._guesstimate import _GUESSTIMATE\n\n#calss header\nclass _GUESSTIMATES(_GUESSTIMATE, ):\n\tdef __init__(self,): \n\t\t_GUESSTIMATE.__init__(self)\n\t\tself.name = \"GUESSTIMATES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"guesstimate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_guesstimates.py","file_name":"_guesstimates.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"159403701","text":"from gomoku import *\nfrom mcts import *\n\n\ndef gomoku_example() -> GomokuState:\n \"\"\" We create an example game; in this game, the black has taken great\n advantage, and we want to use MCTS to help black to turn advantage\n into victory\n \"\"\"\n initial_state = GomokuState(use_default_heuristics=True, reward_player=0)\n initial_state.go((3, 5)).go((3, 6)).go((4, 6)).go((5, 7)).go((5, 5)) \\\n .go((6, 4)).go((4, 5)).go((6, 5)).go((4, 4)).go((4, 7)).go((6, 6)) \\\n .go((7, 7)).go((2, 5)).go((1, 5)).go((3, 3)).go((2, 2)).go((6, 7)) \\\n .go((3, 4))\n return initial_state\n\n\ndef gomoku_example_solution() -> GomokuState:\n \"\"\" This is suggested, but not unique, strategy for black to win in the\n example\n \"\"\"\n suggested_state = gomoku_example().__copy__()\n suggested_state.go((4, 3)).go((4, 2)).go((5, 3)).go((6, 3)).go((6, 2)) \\\n .go((2, 6)).go((7, 1))\n return suggested_state\n\n\ndef gomoku_example_simulate(select_policy, expand_policy,\n simulate_policy, backpropagate_policy,\n black_heuristics: bool = False,\n white_heuristics: bool = True,\n samples_per_step: int = 1000,\n random_seed: int = 0) -> GomokuState:\n black_state = gomoku_example().__copy__()\n black_state.heuristics = black_heuristics\n black_state.side = 0\n white_state = gomoku_example().__copy__()\n white_state.heuristics = white_heuristics\n white_state.side = 1\n\n black_mcts = MonteCarloSearchTree(black_state, samples=samples_per_step,\n max_tree_depth=15,\n tree_select_policy=select_policy,\n tree_expand_policy=expand_policy,\n rollout_policy=simulate_policy,\n backpropagate_method=backpropagate_policy)\n white_mcts = MonteCarloSearchTree(white_state, samples=samples_per_step,\n max_tree_depth=15,\n tree_select_policy=select_policy,\n tree_expand_policy=expand_policy,\n rollout_policy=simulate_policy,\n backpropagate_method=backpropagate_policy)\n while not black_state.is_terminal:\n # Black goes\n black_action = black_mcts.search_for_actions(search_depth=1,\n random_seed=random_seed)[0]\n random_seed += 1\n print(black_action)\n black_mcts.update_root(black_action)\n white_mcts.update_root(black_action)\n black_state.go(black_action.position)\n white_state.go(black_action.position)\n\n if not black_state.is_terminal:\n # White goes\n white_action = white_mcts.search_for_actions(\n search_depth=1, random_seed=random_seed)[0]\n random_seed += 1\n print(white_action)\n black_mcts.update_root(white_action)\n white_mcts.update_root(white_action)\n black_state.go(white_action.position)\n white_state.go(white_action.position)\n\n return black_state\n\n\ndef simulate_with_black_sample_arbitrarily(select_policy, expand_policy,\n simulate_policy,\n backpropagate_policy,\n random_seed: int = 1000,\n num_iters: int = 1000) -> None:\n \"\"\" The black player estimates possible locations with uniform distribution\n anywhere on the board\n \"\"\"\n (gomoku_example_simulate(black_heuristics=False, white_heuristics=True,\n random_seed=random_seed,\n select_policy=select_policy,\n expand_policy=expand_policy,\n simulate_policy=simulate_policy,\n backpropagate_policy=backpropagate_policy,\n samples_per_step=num_iters)\n .visualize())\n\n\ndef simulate_with_black_sample_neighborhood(select_policy, expand_policy,\n simulate_policy,\n backpropagate_policy,\n random_seed: int = 0,\n num_iters: int = 1000) -> None:\n \"\"\" The black player estimates possible locations only if a position's\n neighbor is not totally unoccupied\n \"\"\"\n (gomoku_example_simulate(black_heuristics=True, white_heuristics=True,\n random_seed=random_seed,\n select_policy=select_policy,\n expand_policy=expand_policy,\n simulate_policy=simulate_policy,\n backpropagate_policy=backpropagate_policy,\n samples_per_step=num_iters)\n .visualize())\n\n\nif __name__ == '__main__':\n print(\"===== Start of Simulation 1 =====\")\n simulate_with_black_sample_arbitrarily(select, expand,\n default_rollout_policy,\n backpropagate,\n random_seed=1000,\n num_iters=1000)\n\n print(\"\\n===== Start of Simulation 2 =====\")\n simulate_with_black_sample_neighborhood(select, expand,\n default_rollout_policy,\n backpropagate,\n random_seed=1000,\n num_iters=1000)\n","sub_path":"src/gomoku_example.py","file_name":"gomoku_example.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"453098192","text":"'''\r\n@author: Quyen Doan, https://github.com/qdoan1651/DevMathPython\r\n@file: lcs/extract_items_revision_date/extract_items_revision_date.py\r\n@desc: Extract revision date for a list of items.\r\n@note: This version takes as input the workspace revision JSON which\r\n contains item revision date with the attribute creationDate. \r\n'''\r\nimport logging, os\r\nfrom lcs.retrieve_workspace_revision_json import retrieve_ws_revision_json\r\nfrom lcs.extract_reference_wsid import extract_reference_wsid\r\nfrom myutils import utils_files_io\r\n\r\ndef extract_items_revision_date(items_list, wsid, env):\r\n ''' Extract the latest revision date for a list of items (CGID) '''\r\n \r\n ''' 1. Retrieve workspace revision json '''\r\n msg = 'Retrieving workspace revision json...'\r\n logging.info(' ' + msg)\r\n ws_revision_json = retrieve_ws_revision_json.retrieve_ws_revision_json(wsid, env)\r\n \r\n ''' 2. Convert workspace revision json to hash table '''\r\n msg = 'Convertting workspace revision json to hash table...'\r\n print(msg)\r\n logging.info(' ' + msg)\r\n revision_dates = {}\r\n for item in ws_revision_json['revisions']:\r\n cgid = item['coords']['fileId']\r\n revision_date = item['creationDate']\r\n if cgid not in revision_dates.keys():\r\n revision_dates[cgid] = revision_date\r\n else:\r\n print('*** Warning: Item {} appears again...'.format(cgid))\r\n \r\n ''' 3. Retrieving items revision date from table '''\r\n msg = 'Retrieving revision date from hash table...'\r\n print(msg)\r\n logging.info(' ' + msg)\r\n result = []\r\n for item in items_list:\r\n logging.info(' Processing item {}...'.format(item))\r\n if item in revision_dates.keys():\r\n revision_date = revision_dates[item]\r\n result.append(item + ', ' + revision_date)\r\n else:\r\n print('*** Warning: Item {} does not exist in workspace'.format(item))\r\n \r\n ''' Sort the result list by date '''\r\n print('Sorting the revision dates...')\r\n result = sorted(result, key=lambda x: x.split(', ')[1], reverse=True)\r\n \r\n return result\r\n\r\nif __name__ == '__main__':\r\n logfile = 'C:/Workspace/Sandbox/log.txt'\r\n if os.path.isfile(logfile): os.remove(logfile)\r\n logging.basicConfig(filename=logfile, level=logging.INFO)\r\n \r\n print('Reading list of item from file...')\r\n items_list = utils_files_io.read_list_from_file('items_list.txt')\r\n \r\n print('Extracting workspace ID for AlgoEx AT workspace...')\r\n wsid = extract_reference_wsid.extract_reference_wsid('AT', 'prod')\r\n print(wsid)\r\n \r\n print('Extracting revision date for items from AT workspace...')\r\n revision_dates = extract_items_revision_date(items_list, wsid, 'prod')\r\n \r\n print('Writing workspace revision JSON to disk...')\r\n utils_files_io.write_list_to_file(revision_dates, 'items_revision_date.txt')\r\n\r\n ","sub_path":"DevMathPython/lcs/extract_items_revision_dates/extract_items_revision_date.py","file_name":"extract_items_revision_date.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"71099044","text":"from translate import Translator\nfrom time import sleep \n\nidiomas = Translator(from_lang=\"english\", to_lang=\"Portuguese\")\n\nresposta = idiomas.translate(input(\"Entre com a palavra para traduzir do inglês para português: \"))\n\nprint(\"Processando...\")\nsleep(2)\nprint(resposta.upper())","sub_path":"ProjetosPessoais/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"161749890","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/robertoalotufo/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/ia636/iaisdftsym.py\n# Compiled at: 2014-08-21 22:30:04\nfrom numpy import *\n\ndef iaisdftsym(F):\n if len(F.shape) == 1:\n F = F[newaxis, newaxis, :]\n if len(F.shape) == 2:\n F = F[newaxis, :, :]\n n, m, p = F.shape\n x, y, z = indices((n, m, p))\n Xnovo = mod(-1 * x, n)\n Ynovo = mod(-1 * y, m)\n Znovo = mod(-1 * z, p)\n aux = conjugate(F[(Xnovo, Ynovo, Znovo)])\n return alltrue(abs(F - aux) < 0.001)","sub_path":"pycfiles/ia636-0.11.8.macosx-10.6-i386.tar/iaisdftsym.py","file_name":"iaisdftsym.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"31289656","text":"# Librerías\nimport time\n\n\n# Definiciones\nclass Semaforo:\n\tLUCES = [\n\t\t{ \"nombre\": \"________VDE________\", \"color\": \"\\x1b[0;30;42m\", \"demora\": 0 },\n\t\t{ \"nombre\": \"________AMA________\", \"color\": \"\\x1b[3;30;43m\", \"demora\": 0 },\n\t\t{ \"nombre\": \"________RJO________\", \"color\": \"\\x1b[0;30;41m\", \"demora\": 0 },\n\t\t{ \"nombre\": \"________APA________\", \"color\": \"\\x1b[0m\", \"demora\": 0 }\n\t]\n\n\tdef __init__(self, id, tiempoVde, tiempoAma, tiempoRjo, tiempoApa):\n\t\tself.id = id\n\t\tself.__modo = \"DIURNO\"\n\t\tSemaforo.LUCES[0][\"demora\"] = tiempoVde\n\t\tSemaforo.LUCES[1][\"demora\"] = tiempoAma\n\t\tSemaforo.LUCES[2][\"demora\"] = tiempoRjo\n\t\tSemaforo.LUCES[3][\"demora\"] = tiempoApa\n\t\n\tdef __encender(self, luz):\n\t\tprint(Semaforo.LUCES[luz][\"color\"] + Semaforo.LUCES[luz][\"nombre\"] + \"\\x1b[0m\")\n\t\ttime.sleep(Semaforo.LUCES[luz][\"demora\"])\n\n\tdef ciclar(self):\n\t\tif (self.__modo == \"DIURNO\"):\n\t\t\tself.__encender(0)\n\t\t\tself.__encender(1)\n\t\t\tself.__encender(2)\n\t\telse:\n\t\t\tself.__encender(1)\n\t\t\tself.__encender(-1)\n\t\tprint()\n\t\n\tdef setearModo(self, modo):\n\t\tself.__modo = modo\n\n\tdef verModo(self):\n\t\treturn self.__modo\n","sub_path":"semana14/semaforo.py","file_name":"semaforo.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"470003636","text":"\n\nfrom xai.brain.wordbase.nouns._inroad import _INROAD\n\n#calss header\nclass _INROADS(_INROAD, ):\n\tdef __init__(self,): \n\t\t_INROAD.__init__(self)\n\t\tself.name = \"INROADS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"inroad\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_inroads.py","file_name":"_inroads.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"194502683","text":"# coding=utf-8\n\n\"\"\"\nYou are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the number 0 itself.\n\nInput: (2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 0 -> 8\n\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n if not l1 or not l2:\n raise Exception('l1 or l2 is None')\n node = ListNode((l1.val + l2.val) % 10)\n carry_bit = (l1.val + l2.val) / 10\n l1, l2 = l1.next, l2.next\n answer = node\n\n while l1 or l2:\n node1_value = l1.val if l1 else 0\n node2_value = l2.val if l2 else 0\n node.next = ListNode((node1_value + node2_value + carry_bit) % 10)\n carry_bit = (node1_value + node2_value + carry_bit) / 10\n l1 = l1.next if l1 and l1.next else None\n l2 = l2.next if l2 and l2.next else None\n node = node.next\n\n if carry_bit:\n node.next = ListNode(carry_bit)\n\n return answer\n","sub_path":"LeetCode/2. Add Two Numbers.py","file_name":"2. Add Two Numbers.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"568638735","text":"# -*- coding: utf-8 -*-\nSurnameFreqDict = {\nu'файзулина':2,\nu'чернецова':2,\nu'сосницкая':2,\nu'исхакова':3,\nu'топильский':2,\nu'блажко':4,\nu'фаинких':3,\nu'береговская':2,\nu'сачков':4,\nu'елисеева':20,\nu'панеш':2,\nu'мукабенова':2,\nu'абдрашитова':2,\nu'фазлутдинов':2,\nu'тюлькин':5,\nu'парфенчик':2,\nu'курашин':2,\nu'куренкова':4,\nu'краснов':54,\nu'долгов':36,\nu'суворов':42,\nu'алесенко':3,\nu'мосягина':2,\nu'апарин':7,\nu'чупахин':2,\nu'акинина':2,\nu'завадская':3,\nu'прокопович':5,\nu'фильчаков':2,\nu'деньга':2,\nu'таненя':2,\nu'бурьян':3,\nu'пуховская':2,\nu'мухин':30,\nu'скрипка':8,\nu'ленков':2,\nu'мирончук':2,\nu'плужник':3,\nu'дзигоев':2,\nu'зверев':32,\nu'семиколенных':2,\nu'ключников':9,\nu'бурнаков':2,\nu'кулиш':14,\nu'полянина':2,\nu'тулаева':2,\nu'мажидов':4,\nu'ситник':4,\nu'колобков':4,\nu'бобин':2,\nu'варнавский':5,\nu'алипова':3,\nu'лобачева':3,\nu'валетова':2,\nu'важенина':2,\nu'жарков':18,\nu'межевич':3,\nu'ведерников':13,\nu'томина':2,\nu'подоплелов':2,\nu'сапунков':2,\nu'котляр':6,\nu'тырков':2,\nu'жулинский':2,\nu'семикин':3,\nu'яловенко':2,\nu'личман':2,\nu'лобановский':2,\nu'фабричнов':4,\nu'жидко':2,\nu'хворов':3,\nu'мильчакова':2,\nu'скотников':2,\nu'симкин':4,\nu'худилайнен':2,\nu'докучаева':6,\nu'сальков':2,\nu'луканин':2,\nu'онуфриенко':3,\nu'грехов':7,\nu'хорькова':2,\nu'гуменный':2,\nu'леденцов':2,\nu'авагимян':2,\nu'рудов':2,\nu'гаман':3,\nu'лобаков':2,\nu'ромашина':4,\nu'михайлович':2,\nu'вторушин':3,\nu'юршин':2,\nu'антонец':2,\nu'таразанов':3,\nu'березнев':4,\nu'зайцева':64,\nu'рогожникова':3,\nu'каюков':4,\nu'малюта':2,\nu'герман':12,\nu'ческидов':2,\nu'канашов':2,\nu'кудейкин':2,\nu'талалаев':4,\nu'шарыгин':3,\nu'могилевич':2,\nu'малышкина':2,\nu'аралов':2,\nu'алёхин':6,\nu'арефьева':9,\nu'оленева':3,\nu'хитрый':2,\nu'разумовская':4,\nu'афонина':17,\nu'поминов':3,\nu'пушнин':2,\nu'козыро':2,\nu'бурканов':2,\nu'блохина':6,\nu'кудашкин':6,\nu'якубович':5,\nu'кушнирук':2,\nu'кузичев':2,\nu'ярулин':2,\nu'ярощук':5,\nu'изаак':2,\nu'ожерельева':2,\nu'шишлянников':4,\nu'забирова':2,\nu'кузмичев':3,\nu'маринчев':2,\nu'пеший':2,\nu'сенина':4,\nu'шкляр':3,\nu'красовская':7,\nu'ростовцев':11,\nu'ковжун':2,\nu'жиляков':9,\nu'кассина':3,\nu'никишина':3,\nu'тотьмянин':2,\nu'домшенко':2,\nu'завалишин':5,\nu'гулян':2,\nu'кашицын':2,\nu'топчий':4,\nu'гусейнова':6,\nu'лукьянов':51,\nu'янков':3,\nu'андреенков':3,\nu'брезгин':4,\nu'малова':5,\nu'молотков':3,\nu'федорченко':10,\nu'гриньков':2,\nu'дроботенко':2,\nu'соломенцев':3,\nu'буневич':2,\nu'богомолов':38,\nu'касаткина':11,\nu'яровиков':4,\nu'скрипко':6,\nu'трошина':7,\nu'бамбуров':3,\nu'шустова':6,\nu'меньшакова':2,\nu'безменов':3,\nu'прокуроров':3,\nu'медведева':49,\nu'шилина':3,\nu'мосеева':3,\nu'шкалов':2,\nu'николюкин':2,\nu'савинский':3,\nu'сапон':2,\nu'юдин':64,\nu'бунеев':2,\nu'веретенников':11,\nu'каноков':3,\nu'царькова':6,\nu'сулимова':3,\nu'чернобровкин':2,\nu'поплавский':6,\nu'слотин':2,\nu'езерская':2,\nu'федотова':30,\nu'сыроежко':2,\nu'бегляков':2,\nu'никуленков':2,\nu'гулидова':2,\nu'биянов':2,\nu'ермоленко':19,\nu'кузнеченкова':2,\nu'прохоренко':10,\nu'салфетников':2,\nu'дубник':2,\nu'гагиев':5,\nu'антошина':3,\nu'шолохов':5,\nu'мандриченко':2,\nu'гринчак':2,\nu'банцикин':2,\nu'грабарник':2,\nu'негерев':2,\nu'христов':3,\nu'хомицкий':2,\nu'самонов':2,\nu'баташев':2,\nu'пашкин':4,\nu'минько':4,\nu'эрлих':3,\nu'евланов':4,\nu'балабан':2,\nu'балабай':3,\nu'швецов':28,\nu'малафеев':3,\nu'панасенко':17,\nu'свинтицкая':2,\nu'борисовец':2,\nu'цыбенко':3,\nu'лыткин':4,\nu'кечкин':2,\nu'смыковский':2,\nu'обрубов':2,\nu'гонцов':4,\nu'тужиков':6,\nu'прокопов':10,\nu'красов':3,\nu'тюняев':2,\nu'пологов':2,\nu'корягин':7,\nu'батталов':2,\nu'родин':30,\nu'шмелева':15,\nu'остроумова':2,\nu'луканина':4,\nu'ванюхин':2,\nu'святкина':2,\nu'пшеничников':4,\nu'тимохина':6,\nu'беркус':2,\nu'лотков':2,\nu'шатилова':2,\nu'хуаде':2,\nu'фурманов':2,\nu'пантелеев':21,\nu'гордеев':43,\nu'карсаков':2,\nu'белоненко':3,\nu'дышкантюк':2,\nu'ахметшарипов':2,\nu'строева':2,\nu'седов':32,\nu'коротин':4,\nu'титова':48,\nu'барашкова':2,\nu'милкин':2,\nu'игнатьева':26,\nu'фарутина':2,\nu'волков':166,\nu'ткачёв':4,\nu'малашихин':2,\nu'дугужев':2,\nu'степанов':133,\nu'целикова':2,\nu'гришков':4,\nu'мурадханов':2,\nu'трубников':14,\nu'гвалт':2,\nu'балалаев':2,\nu'вагнер':4,\nu'герасимович':4,\nu'костина':21,\nu'смолов':2,\nu'арзуманян':2,\nu'мишенин':2,\nu'аббасов':3,\nu'лисова':2,\nu'гора':3,\nu'горб':2,\nu'бывшева':2,\nu'горн':3,\nu'марьяш':2,\nu'березовская':5,\nu'бельмач':2,\nu'челпанов':2,\nu'чебунин':3,\nu'шумаев':3,\nu'пинаева':2,\nu'дудакова':2,\nu'кулаго':2,\nu'кулага':2,\nu'томс':2,\nu'федонин':4,\nu'ковригин':3,\nu'хмелев':4,\nu'пупынин':2,\nu'бирюк':3,\nu'буслов':3,\nu'котиков':2,\nu'гайнуллина':2,\nu'гребенник':3,\nu'икрянников':3,\nu'епишина':2,\nu'михалин':2,\nu'плахина':2,\nu'повчун':2,\nu'бабинова':2,\nu'пузин':5,\nu'карачев':3,\nu'кияненко':2,\nu'романычев':2,\nu'борисенко':43,\nu'каленов':4,\nu'суставов':2,\nu'созонов':4,\nu'алхасов':4,\nu'кацура':2,\nu'уколова':3,\nu'люлин':3,\nu'колобова':6,\nu'лисичкина':2,\nu'ефанова':3,\nu'сыроижко':2,\nu'бобкова':9,\nu'сутырин':2,\nu'максюта':2,\nu'семагин':2,\nu'алферова':5,\nu'долматов':15,\nu'шугуров':2,\nu'фейгина':2,\nu'брылев':4,\nu'александрова':42,\nu'гаврикова':8,\nu'сокольников':3,\nu'глеков':2,\nu'лушин':7,\nu'хорошавина':2,\nu'юрин':9,\nu'блинова':11,\nu'гуд':2,\nu'светлова':6,\nu'карнаух':5,\nu'талдыкин':3,\nu'дема':3,\nu'татарников':3,\nu'марущак':3,\nu'бруцкая':2,\nu'кожин':14,\nu'власевский':2,\nu'сайфутдинова':4,\nu'изотова':7,\nu'пугачева':9,\nu'баклан':2,\nu'горяева':5,\nu'лещинский':4,\nu'матвеенко':8,\nu'гирев':2,\nu'чичикин':2,\nu'каликина':2,\nu'толстова':10,\nu'курнаков':2,\nu'аушева':3,\nu'братчиков':7,\nu'ванчугов':4,\nu'шкляев':3,\nu'холстинина':2,\nu'дьячков':10,\nu'точилкин':2,\nu'мунаев':4,\nu'шаркова':2,\nu'белошицкий':2,\nu'лопин':3,\nu'русакович':5,\nu'доброхотов':2,\nu'чернышова':26,\nu'компаниец':3,\nu'пеньковская':2,\nu'искужин':2,\nu'сауляк':3,\nu'курасов':3,\nu'клименков':2,\nu'астраханцева':2,\nu'мазуров':9,\nu'бадыков':2,\nu'камальдинов':2,\nu'кузина':14,\nu'малышенко':4,\nu'смальцер':2,\nu'шейко':5,\nu'шиятова':2,\nu'вареник':3,\nu'дербенев':8,\nu'тимофеева':51,\nu'еньшин':2,\nu'корнеев':46,\nu'белявцев':2,\nu'мухамедшин':2,\nu'глушаков':3,\nu'раевский':9,\nu'сайгушев':2,\nu'власенкова':2,\nu'бруно':2,\nu'устюгов':5,\nu'шаньгин':3,\nu'нагимов':2,\nu'еськин':3,\nu'пальцев':4,\nu'фёдоров':15,\nu'хубиев':8,\nu'стрельцов':16,\nu'костров':10,\nu'моргоева':3,\nu'ровнер':2,\nu'марков':71,\nu'жовтоножко':2,\nu'тульская':3,\nu'умалатов':3,\nu'маклашов':2,\nu'туев':2,\nu'шмелёв':2,\nu'лядов':3,\nu'яровенко':7,\nu'дубовиков':3,\nu'антонченко':2,\nu'нарышкин':6,\nu'корчев':2,\nu'свиридовская':2,\nu'гребенкин':7,\nu'мишарина':4,\nu'гункин':2,\nu'рогачева':2,\nu'гурьянов':23,\nu'гугин':2,\nu'молошонок':2,\nu'бавыкин':6,\nu'насырова':3,\nu'татаренкова':3,\nu'баталов':6,\nu'сапарин':2,\nu'масликов':4,\nu'пузиков':8,\nu'анипченко':3,\nu'колмыков':9,\nu'искулов':3,\nu'тоцкая':2,\nu'юлдашев':3,\nu'джгаркава':2,\nu'долгова':8,\nu'орешкин':9,\nu'селецкий':2,\nu'рукавишникова':2,\nu'корольчук':2,\nu'батуев':5,\nu'арсланова':3,\nu'шикалов':3,\nu'силивоненко':2,\nu'полукеев':2,\nu'сорогин':3,\nu'жигадло':2,\nu'смагин':10,\nu'лубнина':3,\nu'сабанова':2,\nu'короткин':2,\nu'короткий':5,\nu'рой':2,\nu'малашин':2,\nu'царенко':6,\nu'сукманова':2,\nu'амалицкая':2,\nu'супранович':2,\nu'фонарев':2,\nu'рощупкина':3,\nu'паскарь':6,\nu'жмурин':2,\nu'межидов':3,\nu'редченко':3,\nu'рыбникова':6,\nu'файзуллина':3,\nu'урбан':3,\nu'яруллин':6,\nu'алленов':3,\nu'недашковский':4,\nu'линевич':3,\nu'кожухова':3,\nu'сечков':2,\nu'скок':2,\nu'колесова':11,\nu'меденцев':2,\nu'черенков':7,\nu'пестов':14,\nu'старкова':9,\nu'говорин':2,\nu'пудовкина':4,\nu'ромашкин':11,\nu'минкина':3,\nu'джураев':2,\nu'лагута':3,\nu'багреева':2,\nu'шмидт':16,\nu'слепова':2,\nu'щендрыгин':2,\nu'шумаков':4,\nu'самойленко':24,\nu'жаворонков':5,\nu'кипень':2,\nu'карандеев':3,\nu'шаипов':4,\nu'аксаков':3,\nu'пода':2,\nu'мелешин':3,\nu'хесин':2,\nu'заславский':4,\nu'галиахметов':2,\nu'чуклина':2,\nu'мужиченко':2,\nu'сарафанова':2,\nu'жиделев':2,\nu'щербинина':7,\nu'воронина':31,\nu'беппаев':2,\nu'пшеничникова':9,\nu'киндеев':3,\nu'галиуллина':2,\nu'кривенко':15,\nu'шипилов':15,\nu'кобышев':2,\nu'коротаева':8,\nu'андриевский':8,\nu'чеботарева':5,\nu'мартысенко':2,\nu'маринова':2,\nu'шумилкин':2,\nu'леонидов':7,\nu'даниленко':23,\nu'стёпкин':2,\nu'хлопова':2,\nu'пустоваров':3,\nu'кленин':2,\nu'пискунова':5,\nu'макогон':3,\nu'татаринов':19,\nu'набоких':2,\nu'багинский':2,\nu'горобец':13,\nu'ефременко':9,\nu'скуридин':2,\nu'лопарева':4,\nu'сироткина':6,\nu'кулакова':17,\nu'шабнов':2,\nu'рязанцев':18,\nu'ковтунов':2,\nu'балакин':10,\nu'смолинский':2,\nu'якушина':3,\nu'юшачков':2,\nu'михайлик':3,\nu'михайлин':5,\nu'кваша':7,\nu'калмыкова':11,\nu'подгузов':4,\nu'бородулина':5,\nu'донской':6,\nu'дадашов':2,\nu'хрипушин':4,\nu'донсков':4,\nu'липилин':2,\nu'кресс':2,\nu'ижбердеева':2,\nu'рабченюк':2,\nu'тычкова':2,\nu'звада':3,\nu'недосекин':3,\nu'васькин':2,\nu'головащенко':2,\nu'нескреба':3,\nu'диканский':2,\nu'багратуни':2,\nu'ковынев':2,\nu'клинков':2,\nu'лисняк':2,\nu'тихов':2,\nu'подгальный':2,\nu'лемешко':8,\nu'ишутин':3,\nu'белоус':18,\nu'выборнов':2,\nu'черепухин':2,\nu'апанасенко':5,\nu'глыбочко':3,\nu'дьячкова':7,\nu'солдатенков':3,\nu'гиниятов':2,\nu'бронников':7,\nu'тупицин':4,\nu'платыгин':2,\nu'оленина':5,\nu'мазаев':4,\nu'высоцкий':15,\nu'мыльникова':6,\nu'ишмуратов':4,\nu'абраменко':9,\nu'аганов':2,\nu'конончик':2,\nu'шелудченко':2,\nu'кремлев':3,\nu'суханов':25,\nu'матвейчева':2,\nu'канунникова':3,\nu'купреев':6,\nu'ельшин':2,\nu'кирако��ян':5,\nu'горланов':4,\nu'колочаров':2,\nu'мякин':2,\nu'шибкова':2,\nu'качесов':3,\nu'башкатов':6,\nu'пряхин':6,\nu'шаронова':3,\nu'пиунова':2,\nu'финенко':2,\nu'мягкова':5,\nu'прийма':3,\nu'сартаков':3,\nu'семенцова':2,\nu'пискорский':2,\nu'суздальцев':5,\nu'беляшкина':2,\nu'ханнанов':4,\nu'гнусин':4,\nu'шафрановская':2,\nu'науменко':23,\nu'козынченко':2,\nu'дружинин':21,\nu'эльканов':6,\nu'лепилин':4,\nu'токовая':2,\nu'волесский':2,\nu'чешков':2,\nu'колташев':2,\nu'ширков':2,\nu'кущев':7,\nu'биктимиров':6,\nu'обернихин':2,\nu'махрова':2,\nu'немов':6,\nu'водолазская':2,\nu'зель':5,\nu'мецлер':5,\nu'алхимов':2,\nu'буйлина':2,\nu'брябрин':2,\nu'абдулкеримов':3,\nu'иваницкий':8,\nu'подлесных':3,\nu'магомадов':9,\nu'рустамханов':2,\nu'катанова':2,\nu'быканов':3,\nu'фурман':6,\nu'овчарова':4,\nu'конти':2,\nu'синегубов':3,\nu'шумайлова':3,\nu'белогорцев':2,\nu'гадалин':2,\nu'марчак':2,\nu'мазитов':3,\nu'шонин':2,\nu'соклаков':2,\nu'прокопенко':32,\nu'байгускаров':2,\nu'чайков':2,\nu'жегалина':4,\nu'коробкова':5,\nu'пхешхов':2,\nu'пчелин':5,\nu'федоткин':3,\nu'хмара':3,\nu'мальнев':2,\nu'буркова':8,\nu'макиев':3,\nu'агапова':4,\nu'спицин':2,\nu'альмухаметов':3,\nu'айдамиров':4,\nu'кочнева':3,\nu'евсюк':2,\nu'куликовская':2,\nu'дерев':2,\nu'александрин':2,\nu'маганов':2,\nu'шевцов':34,\nu'страхов':9,\nu'редкозубов':4,\nu'черткова':3,\nu'ахмадеев':4,\nu'ханафеев':2,\nu'канбеков':2,\nu'потёмкин':3,\nu'стасишин':2,\nu'салимуллин':2,\nu'клочкова':10,\nu'комиссаров':28,\nu'аджигитов':2,\nu'бых':2,\nu'доронин':18,\nu'маклакова':3,\nu'шерстобитова':3,\nu'газиев':7,\nu'чапаева':2,\nu'пасюта':3,\nu'марискин':2,\nu'сокуренко':2,\nu'ракова':2,\nu'карнажицкий':2,\nu'благова':3,\nu'горячук':2,\nu'салахутдинова':3,\nu'дударов':3,\nu'еремчук':2,\nu'буцкая':2,\nu'абарыков':2,\nu'квачев':2,\nu'семернин':2,\nu'микава':2,\nu'грибов':15,\nu'василевич':5,\nu'хамхоев':2,\nu'вахитов':4,\nu'адамчук':2,\nu'гостяев':2,\nu'слипченко':3,\nu'малиновская':13,\nu'трофимчук':4,\nu'горовцов':2,\nu'клименкова':3,\nu'репников':4,\nu'аминев':6,\nu'клепач':2,\nu'панюкова':2,\nu'апыхтин':3,\nu'боев':13,\nu'келлер':3,\nu'ерыгин':2,\nu'письменный':2,\nu'ван':2,\nu'вдовыдченко':2,\nu'дзейтов':3,\nu'сенчук':2,\nu'ложкина':5,\nu'серебренников':8,\nu'ган':2,\nu'гай':4,\nu'ростовский':3,\nu'гайнетдинова':2,\nu'кушко':2,\nu'куницына':3,\nu'тюркин':2,\nu'косинов':10,\nu'блажнов':2,\nu'габов':2,\nu'гагиева':5,\nu'столетов':2,\nu'клевцов':8,\nu'тульчинская':2,\nu'даногуев':2,\nu'долгушин':2,\nu'мартынюк':14,\nu'груздева':9,\nu'рыбалов':2,\nu'шаталин':2,\nu'илясов':4,\nu'буяков':2,\nu'воронков':23,\nu'тетюшев':2,\nu'майко':2,\nu'козюлин':2,\nu'богатова':6,\nu'исманов':2,\nu'тумасов':2,\nu'батаева':2,\nu'рассолов':4,\nu'квасникова':2,\nu'шарков':9,\nu'цоков':2,\nu'бабаева':8,\nu'колоскова':3,\nu'мостепаненко':2,\nu'акперов':2,\nu'слободчиков':4,\nu'евтихов':4,\nu'погодаев':2,\nu'шабалин':19,\nu'фещук':3,\nu'школенко':2,\nu'эпп':2,\nu'момотов':4,\nu'телегина':5,\nu'сироткин':18,\nu'любимов':16,\nu'галицын':6,\nu'брагина':12,\nu'сошников':6,\nu'тюменцев':5,\nu'филонец':2,\nu'лыжин':3,\nu'сачко':3,\nu'сёмкин':2,\nu'боженова':2,\nu'подозеров':2,\nu'бурдюг':2,\nu'федюшин':3,\nu'щапова':2,\nu'сухих':9,\nu'казахмедов':2,\nu'кадесников':2,\nu'терехов':28,\nu'турова':4,\nu'асадуллин':4,\nu'марданов':5,\nu'кулюкина':2,\nu'ерошин':4,\nu'темная':2,\nu'майоров':35,\nu'колодинская':2,\nu'медведь':5,\nu'складчиков':3,\nu'семеняк':2,\nu'лагутин':11,\nu'тырышк��н':3,\nu'зангиев':2,\nu'авилочкин':2,\nu'зюба':2,\nu'будаков':2,\nu'сырова':2,\nu'зыкин':8,\nu'габисов':2,\nu'хайрулин':3,\nu'вагапов':3,\nu'агаджанов':4,\nu'канатникова':2,\nu'флегонтов':2,\nu'парахина':2,\nu'меренков':4,\nu'зюзюкин':2,\nu'раров':2,\nu'чипизубова':2,\nu'петренко':58,\nu'щапин':2,\nu'базарнов':2,\nu'выдыш':2,\nu'плетень':2,\nu'кудренко':5,\nu'валишина':2,\nu'корябкина':2,\nu'вострикова':6,\nu'белобрагин':2,\nu'крошко':2,\nu'крошка':2,\nu'хозин':2,\nu'балин':4,\nu'исхаков':8,\nu'матрохина':2,\nu'кошкаров':3,\nu'жалнина':2,\nu'шиляева':2,\nu'ватолина':2,\nu'чикин':14,\nu'гонтарь':8,\nu'лошакова':2,\nu'штабной':2,\nu'братухина':2,\nu'зотова':18,\nu'маленко':2,\nu'неплюев':2,\nu'выжанов':2,\nu'наурбиев':2,\nu'шалина':2,\nu'башкирова':3,\nu'садов':5,\nu'хорохордин':2,\nu'цитриков':2,\nu'косянчук':2,\nu'купавых':3,\nu'приймак':8,\nu'кирш':2,\nu'скрябин':9,\nu'пушная':2,\nu'аниканов':2,\nu'волохов':5,\nu'красницкий':3,\nu'буравов':3,\nu'лагода':4,\nu'чубукин':2,\nu'маркеев':3,\nu'старицын':2,\nu'симакова':10,\nu'абакумов':12,\nu'сапов':3,\nu'михеева':23,\nu'чуйко':4,\nu'постникова':7,\nu'левшов':3,\nu'калиничева':4,\nu'аитов':2,\nu'лукьянец':2,\nu'расулов':2,\nu'куприянов':24,\nu'номоконов':2,\nu'леухин':4,\nu'мощенко':3,\nu'жеребцов':16,\nu'спиваков':3,\nu'червонная':2,\nu'святенко':2,\nu'клыков':6,\nu'книга':3,\nu'веретено':2,\nu'терсков':2,\nu'байко':2,\nu'швиденко':2,\nu'митрошин':5,\nu'сединкина':3,\nu'есакова':3,\nu'амхадов':2,\nu'найчук':2,\nu'сухобоков':2,\nu'ананьева':6,\nu'козицкий':3,\nu'шепета':2,\nu'безручко':2,\nu'лушкин':2,\nu'кускова':4,\nu'черданцев':5,\nu'шевкун':3,\nu'ланцов':2,\nu'дугин':5,\nu'тян':2,\nu'шалякина':2,\nu'абаимов':2,\nu'авдеев':52,\nu'спорыхина':2,\nu'мюхкеря':2,\nu'макаркин':7,\nu'дайбов':3,\nu'кушу':3,\nu'рогачев':10,\nu'любиченко':2,\nu'алёшин':8,\nu'ишуткина':2,\nu'журов':3,\nu'урнышев':2,\nu'замараева':3,\nu'голова':4,\nu'пинигин':6,\nu'хуранов':2,\nu'востриков':17,\nu'бойцова':11,\nu'серый':6,\nu'круглик':2,\nu'филимонов':35,\nu'бедрицкий':2,\nu'воротникова':2,\nu'рохмистров':2,\nu'коннова':5,\nu'стороженко':5,\nu'секретарева':2,\nu'фурсов':15,\nu'кустов':16,\nu'дунаев':14,\nu'демченков':2,\nu'коршунова':16,\nu'шишкалов':2,\nu'варосян':3,\nu'клещев':6,\nu'удалых':2,\nu'гайнуллин':3,\nu'безпрозванных':2,\nu'ковпак':4,\nu'финк':4,\nu'шишканов':2,\nu'фомченко':2,\nu'федурин':2,\nu'маевский':5,\nu'котенко':10,\nu'бакалов':3,\nu'новоселецкий':2,\nu'чернышов':38,\nu'красноштанов':2,\nu'дашко':2,\nu'кологривый':2,\nu'борисова':80,\nu'голков':2,\nu'тишечко':2,\nu'гирфанов':2,\nu'меринов':3,\nu'цыплаков':2,\nu'солодянкин':3,\nu'мазурина':2,\nu'габитов':4,\nu'якшин':2,\nu'миляев':9,\nu'раменский':2,\nu'нюдиков':2,\nu'нечепуренко':7,\nu'ломова':4,\nu'каменщиков':2,\nu'меньшов':9,\nu'мещанский':2,\nu'грицкевич':2,\nu'федосеев':28,\nu'мазин':3,\nu'рыкалин':2,\nu'мещалкин':2,\nu'лукянцев':2,\nu'буякова':2,\nu'дерябина':4,\nu'камнев':9,\nu'любина':2,\nu'спирин':23,\nu'бартенев':5,\nu'баштовой':2,\nu'чернятьев':2,\nu'шашлов':2,\nu'вепринцева':2,\nu'приданников':2,\nu'шенделев':2,\nu'подольская':4,\nu'шамин':8,\nu'чалый':5,\nu'кузык':2,\nu'горелова':6,\nu'каракетов':2,\nu'христофоров':3,\nu'оботурова':2,\nu'аушев':5,\nu'тюленев':4,\nu'правдин':3,\nu'жамбалова':2,\nu'лукинов':3,\nu'дорогов':6,\nu'котлобай':2,\nu'фонов':2,\nu'веремчук':2,\nu'ворожцова':2,\nu'булатецкая':2,\nu'кошелев':24,\nu'миняев':3,\nu'боякова':2,\nu'кряквин':2,\nu'даллакян':2,\nu'данченко':11,\nu'х��саев':2,\nu'скурихин':5,\nu'шипулина':2,\nu'рудяк':4,\nu'тишкин':3,\nu'николина':5,\nu'комолова':4,\nu'ковалёв':8,\nu'ризванов':4,\nu'мелещенко':3,\nu'гаммершмидт':3,\nu'ерин':13,\nu'удальцов':6,\nu'мусатов':9,\nu'трусова':8,\nu'некрасова':17,\nu'ивкова':3,\nu'митрофанова':17,\nu'дамдинов':4,\nu'фоменков':4,\nu'кардапольцев':2,\nu'ясинская':5,\nu'просянников':3,\nu'гальямов':2,\nu'гунин':2,\nu'плынов':2,\nu'уманец':3,\nu'темботов':2,\nu'луцик':4,\nu'брынцалов':2,\nu'щетникова':2,\nu'кузовлев':7,\nu'титенок':2,\nu'нациевский':2,\nu'симак':2,\nu'котков':4,\nu'клещин':2,\nu'кучеренко':23,\nu'марчуков':6,\nu'арзамасцев':4,\nu'викулин':5,\nu'емец':3,\nu'тихонович':2,\nu'барбышев':2,\nu'ковшов':4,\nu'герасев':2,\nu'курамшина':3,\nu'ивановский':10,\nu'турчина':2,\nu'ладоха':2,\nu'шувалов':21,\nu'текеев':3,\nu'полежаев':11,\nu'щербатов':2,\nu'лапутин':3,\nu'екименко':4,\nu'пятков':3,\nu'соляник':5,\nu'янченко':11,\nu'солянин':2,\nu'трощенко':2,\nu'аввакумов':3,\nu'туманов':16,\nu'салаева':2,\nu'рылов':2,\nu'лапаев':3,\nu'радионова':6,\nu'саитов':5,\nu'шайдуллин':5,\nu'чугунова':9,\nu'лудкова':2,\nu'колояров':2,\nu'палехин':3,\nu'горюнова':8,\nu'александрычев':2,\nu'кирпичев':7,\nu'блинов':29,\nu'галкин':52,\nu'кичигин':9,\nu'данилова':43,\nu'сбытов':2,\nu'закутько':2,\nu'еремкин':3,\nu'сергунин':2,\nu'сташкевич':5,\nu'панасюк':4,\nu'вотякова':2,\nu'черябкин':2,\nu'потемкина':3,\nu'дмитрюк':2,\nu'масленко':2,\nu'рудин':3,\nu'рудик':2,\nu'демура':2,\nu'малый':4,\nu'боос':3,\nu'бовкуш':2,\nu'новосельцева':4,\nu'волошко':5,\nu'пустовой':7,\nu'болдарев':2,\nu'смотрова':5,\nu'уманская':2,\nu'тарасович':2,\nu'стрельникова':11,\nu'стальнова':3,\nu'малышкин':5,\nu'рыжак':3,\nu'зиннуров':4,\nu'синицин':6,\nu'кузякова':2,\nu'лещинская':2,\nu'силакова':2,\nu'карловская':2,\nu'карнаухова':2,\nu'лещенко':17,\nu'ахматова':4,\nu'савко':3,\nu'филонов':7,\nu'кудлаева':2,\nu'переведенцев':2,\nu'сухорученко':3,\nu'струков':16,\nu'хачатрян':7,\nu'костырин':2,\nu'дерюшев':2,\nu'бяков':3,\nu'хасанова':4,\nu'бурцев':17,\nu'заиченко':5,\nu'голованов':17,\nu'калинина':43,\nu'неустроев':5,\nu'березняк':2,\nu'пометов':2,\nu'филатова':29,\nu'косырев':4,\nu'панчин':2,\nu'карачева':2,\nu'гапеев':3,\nu'гумеров':9,\nu'кардаш':2,\nu'перепелкина':4,\nu'гречишкина':5,\nu'цыренов':4,\nu'рощина':8,\nu'усиков':3,\nu'лякишев':2,\nu'оськин':10,\nu'бабанин':4,\nu'джабраилова':2,\nu'батраев':3,\nu'гуменчук':2,\nu'мажурин':2,\nu'пашкова':14,\nu'аверина':10,\nu'васенков':3,\nu'севастьянов':15,\nu'ухин':5,\nu'лупина':4,\nu'кошкина':6,\nu'поляев':2,\nu'сидорова':45,\nu'зенин':9,\nu'солодчук':2,\nu'нахимов':2,\nu'седельников':5,\nu'крохмаль':4,\nu'боров':2,\nu'баширова':4,\nu'алешкина':2,\nu'крот':3,\nu'золотарев':34,\nu'кутузова':14,\nu'обвинцев':3,\nu'кожухов':5,\nu'трубицын':11,\nu'артамонова':14,\nu'лелеко':2,\nu'шереметьев':7,\nu'кулаковский':2,\nu'маковеев':3,\nu'мерзликина':2,\nu'желонкина':2,\nu'чебыкина':2,\nu'фетищева':3,\nu'шерстюк':3,\nu'евстигнеева':7,\nu'чебыкин':7,\nu'пушкарев':22,\nu'береснев':7,\nu'гаркавенко':3,\nu'титовец':2,\nu'столпец':2,\nu'смага':2,\nu'тулаев':2,\nu'голосной':3,\nu'осипов':82,\nu'алимурадов':2,\nu'корчак':3,\nu'пауков':3,\nu'ромахина':2,\nu'хромова':5,\nu'пархомов':2,\nu'мошков':4,\nu'дудин':15,\nu'зернюков':2,\nu'юданов':3,\nu'слепнев':5,\nu'холодилова':2,\nu'рассохин':7,\nu'калистратова':4,\nu'васечко':2,\nu'голяков':3,\nu'грушкина':2,\nu'гридчин':6,\nu'закиев':2,\nu'чертков':10,\nu'наволокин':2,\nu'св��нцов':8,\nu'манцевич':2,\nu'чащина':2,\nu'сафаров':5,\nu'редлер':2,\nu'султанмурадов':2,\nu'андрющенко':13,\nu'доровских':3,\nu'кострюков':4,\nu'куренной':2,\nu'курина':3,\nu'мозговой':5,\nu'кореневская':2,\nu'камалетдинов':6,\nu'картавых':4,\nu'степаненко':42,\nu'малаховская':2,\nu'ручьева':2,\nu'рожнова':3,\nu'марин':3,\nu'байшев':3,\nu'литвякова':2,\nu'клёнов':2,\nu'катыхина':2,\nu'маринов':3,\nu'притыкин':3,\nu'заруцкий':3,\nu'трубицин':3,\nu'шляндин':2,\nu'голубовский':3,\nu'сарапулов':3,\nu'сугробов':4,\nu'рыбакова':16,\nu'смирнова':185,\nu'сластилина':2,\nu'сарксян':2,\nu'чалова':2,\nu'гринцевич':2,\nu'чушкин':3,\nu'турушев':2,\nu'шпагина':2,\nu'саяпина':3,\nu'семакин':8,\nu'гагуев':2,\nu'рябиченко':2,\nu'гайнанов':2,\nu'шепитько':2,\nu'батчаев':3,\nu'стромов':3,\nu'раенко':2,\nu'паршина':15,\nu'протасов':14,\nu'третьяков':44,\nu'болдина':3,\nu'суровов':3,\nu'хозяшев':2,\nu'гаус':2,\nu'калмаков':2,\nu'шарыпов':6,\nu'закомалдин':2,\nu'аленин':4,\nu'потапкин':5,\nu'милосердова':8,\nu'яременко':6,\nu'боярских':3,\nu'гоголь':2,\nu'афашагов':2,\nu'косыгин':4,\nu'мирошкина':2,\nu'панфилов':22,\nu'рачук':2,\nu'заблуда':2,\nu'семенов':158,\nu'михайловская':4,\nu'черненкова':3,\nu'ефимчук':2,\nu'заслонкина':2,\nu'чередниченко':16,\nu'тихашин':2,\nu'аникеенко':4,\nu'захватов':2,\nu'юревич':4,\nu'собачкин':2,\nu'барбакадзе':3,\nu'банин':6,\nu'щавинская':2,\nu'михеев':56,\nu'румянцева':15,\nu'капанин':2,\nu'коркина':4,\nu'черников':30,\nu'ковальков':2,\nu'терехова':17,\nu'колыгин':2,\nu'поддубский':4,\nu'суданова':2,\nu'боцвин':2,\nu'нилов':6,\nu'сенчуков':3,\nu'колесов':16,\nu'алыпов':2,\nu'двойникова':4,\nu'хитрин':2,\nu'хитрик':2,\nu'гавриш':7,\nu'бутырин':5,\nu'колтунова':3,\nu'трушков':7,\nu'поповцев':2,\nu'гимранов':2,\nu'черник':4,\nu'черний':2,\nu'трудов':2,\nu'михалкин':2,\nu'солдатов':32,\nu'гальцов':2,\nu'пешикова':2,\nu'глазунов':17,\nu'кем':2,\nu'зебелян':2,\nu'харебов':2,\nu'шнякин':2,\nu'кучер':10,\nu'алексеенко':37,\nu'окатьев':2,\nu'жемалдинова':3,\nu'сержантов':3,\nu'ислентьев':3,\nu'авезов':2,\nu'битюцкий':2,\nu'шатков':2,\nu'китаева':4,\nu'метелкина':4,\nu'абуев':2,\nu'фартушнов':2,\nu'мякишев':5,\nu'соколик':2,\nu'матушкин':6,\nu'чернега':2,\nu'галимов':20,\nu'головачук':2,\nu'ужегов':4,\nu'артемьев':33,\nu'уразов':7,\nu'прокопьев':16,\nu'лисицын':10,\nu'десятков':2,\nu'чемезов':4,\nu'есауленко':3,\nu'старова':2,\nu'беретарь':3,\nu'шарый':2,\nu'петрунин':15,\nu'гром':4,\nu'хочиев':4,\nu'прохорчик':3,\nu'кирин':3,\nu'кирик':2,\nu'толкачева':7,\nu'лапик':2,\nu'юдаков':6,\nu'карнеева':3,\nu'вихарев':8,\nu'кунгурова':4,\nu'войкин':4,\nu'чумаров':2,\nu'сауков':2,\nu'татауров':4,\nu'архипова':21,\nu'яцук':2,\nu'грошева':5,\nu'нагайцев':3,\nu'коломийченко':2,\nu'журавлева':41,\nu'комолов':3,\nu'нагаева':4,\nu'буслова':2,\nu'чечулин':2,\nu'пузырев':3,\nu'мишнев':4,\nu'гречин':5,\nu'мазепа':2,\nu'теренин':2,\nu'кривобок':2,\nu'карташев':3,\nu'гребнева':10,\nu'лаптев':40,\nu'гнездилова':5,\nu'патрушев':7,\nu'кириенко':10,\nu'шатин':2,\nu'голикова':10,\nu'вчерашний':2,\nu'буянтуева':3,\nu'колесник':34,\nu'евглевская':2,\nu'андриянов':8,\nu'безукладников':2,\nu'чудецкий':3,\nu'бояркин':6,\nu'волощенко':3,\nu'титоренко':2,\nu'богодухова':2,\nu'мацулевич':2,\nu'вайсфельд':2,\nu'бухтоярова':2,\nu'темнов':3,\nu'очиров':7,\nu'никитин':131,\nu'брежнева':5,\nu'сиволапов':2,\nu'парадовский':2,\nu'хаюк':2,\nu'баковец':2,\nu'талалай':4,\nu'щаникова':2,\nu'посельский':2,\nu'покидина':2,\nu'чебаненко':3,\nu'авакянц':2,\nu'стафиевский':2,\nu'чвирюк':2,\nu'гаевский':2,\nu'лапенков':2,\nu'галушко':5,\nu'саяхова':2,\nu'ставицкий':2,\nu'луцкий':2,\nu'наумкин':5,\nu'гришутин':2,\nu'косицын':2,\nu'иванин':3,\nu'маслова':25,\nu'лопаева':2,\nu'шашков':9,\nu'казнин':2,\nu'синёв':2,\nu'савочкина':2,\nu'дорошкевич':4,\nu'тормозов':2,\nu'сережкин':2,\nu'сурганов':2,\nu'фитисов':2,\nu'еникеев':5,\nu'бурулев':2,\nu'клушин':2,\nu'бабайцева':2,\nu'гилев':10,\nu'паньков':14,\nu'галикаева':2,\nu'казанцева':22,\nu'панфилова':14,\nu'объедкова':4,\nu'садретдинова':3,\nu'санжапов':2,\nu'семенцов':4,\nu'вихров':4,\nu'новгородцев':2,\nu'канев':4,\nu'кимяев':3,\nu'куприянова':11,\nu'билалов':2,\nu'тюриков':3,\nu'широбоков':3,\nu'клюйкова':2,\nu'мякинин':2,\nu'ваниев':3,\nu'лях':10,\nu'абдрашитов':3,\nu'щитов':3,\nu'суглобова':2,\nu'добрянский':2,\nu'юсубов':2,\nu'бертов':2,\nu'кроневальд':2,\nu'бурляев':3,\nu'алаудинов':3,\nu'калач':2,\nu'яцкин':3,\nu'семенова':85,\nu'галеева':6,\nu'петрусенко':3,\nu'началов':2,\nu'пантела':2,\nu'самусев':2,\nu'миронченко':2,\nu'шерстюков':2,\nu'шепелев':14,\nu'грязнов':14,\nu'макидон':2,\nu'коробейников':11,\nu'шигаев':2,\nu'куимов':3,\nu'чмырь':3,\nu'кадочников':3,\nu'малолеткин':2,\nu'прищепова':2,\nu'сысолятина':2,\nu'шанина':5,\nu'абрамович':3,\nu'чекрыгин':2,\nu'ермошкин':2,\nu'гребенникова':6,\nu'дрожжинов':2,\nu'нефедьев':5,\nu'махмутова':5,\nu'близнец':4,\nu'козориз':2,\nu'патракова':2,\nu'снигирев':2,\nu'тамаев':2,\nu'шумихин':4,\nu'сандаков':2,\nu'красюков':4,\nu'бобошин':2,\nu'албегова':2,\nu'деменкова':3,\nu'андрейков':2,\nu'бледнов':3,\nu'сомова':3,\nu'дымченко':2,\nu'щебуняева':3,\nu'горева':3,\nu'олейник':38,\nu'гулько':3,\nu'окутин':3,\nu'тиханов':2,\nu'картавцев':4,\nu'извеков':5,\nu'залевский':5,\nu'северова':2,\nu'дубов':12,\nu'шакирова':6,\nu'яночкина':3,\nu'крупнов':9,\nu'самигуллин':4,\nu'капельян':2,\nu'шмидко':2,\nu'никитенко':23,\nu'лесков':9,\nu'маметов':3,\nu'панышев':2,\nu'федосеева':14,\nu'капранов':3,\nu'глумов':4,\nu'монастырский':3,\nu'чиликин':2,\nu'хижнякова':2,\nu'милеев':4,\nu'голобоков':2,\nu'житенев':3,\nu'лындин':2,\nu'медведков':6,\nu'столяренко':2,\nu'кузенёк':3,\nu'никишова':3,\nu'сарангов':2,\nu'полторак':3,\nu'горелов':24,\nu'зырянова':10,\nu'голубенко':5,\nu'подосинников':3,\nu'самсонова':13,\nu'ялалов':4,\nu'оганян':4,\nu'кулькова':2,\nu'иванков':14,\nu'лущик':3,\nu'лапковский':2,\nu'изутдинов':2,\nu'кичиков':3,\nu'усик':5,\nu'присяч':2,\nu'пешкова':8,\nu'рыбников':5,\nu'перминова':4,\nu'аксенова':17,\nu'федяева':3,\nu'нагорный':17,\nu'рехтин':2,\nu'кокорин':19,\nu'капустина':11,\nu'игнатьев':35,\nu'паламарчук':11,\nu'селиверстов':12,\nu'сухарь':2,\nu'сапронова':9,\nu'кукин':5,\nu'чекушкин':4,\nu'хазова':3,\nu'ярышев':2,\nu'будников':3,\nu'шаманов':4,\nu'яхъяев':2,\nu'липская':2,\nu'наумов':48,\nu'урванцев':3,\nu'гришанова':5,\nu'османов':11,\nu'потапчук':2,\nu'исупова':5,\nu'сивченко':2,\nu'ермаченков':2,\nu'загуменов':2,\nu'элерт':2,\nu'батов':6,\nu'тихоновский':2,\nu'гаранина':6,\nu'князева':33,\nu'спиренков':2,\nu'скокова':2,\nu'гусельников':3,\nu'борщ':3,\nu'домрачева':2,\nu'галлямов':3,\nu'боташев':5,\nu'тряпичников':2,\nu'можин':2,\nu'каблуков':3,\nu'новокшонов':2,\nu'кащенко':3,\nu'прохиро':2,\nu'ергемлидзе':2,\nu'могильников':3,\nu'зубов':22,\nu'егоров':120,\nu'соколенко':5,\nu'кривобоков':2,\nu'бадма-халгаев':4,\nu'карась':2,\nu'агаева':3,\nu'чистякова':24,\nu'мисяков':2,\nu'московских':2,\nu'бобровников':2,\nu'яхнюк':2,\nu'буртасов':2,\nu'вышегородцев':2,\nu'русецкая':3,\nu'гавришин':2,\nu'дорофеев':37,\nu'кондратова':5,\nu'искандаров':4,\nu'малюгина':2,\nu'жернов':3,\nu'русинова':2,\nu'гуревич':5,\nu'грымзина':3,\nu'акопянц':2,\nu'строев':4,\nu'евстратенко':2,\nu'гревцев':3,\nu'яралиев':2,\nu'давыдик':3,\nu'тингаев':2,\nu'андреевский':2,\nu'казаноков':3,\nu'чигвинцев':2,\nu'ивенских':2,\nu'рагулин':6,\nu'икаев':3,\nu'дрогин':2,\nu'ижболдин':2,\nu'борукаев':2,\nu'алтухова':5,\nu'силаков':4,\nu'преображенский':3,\nu'триппель':2,\nu'дуюн':2,\nu'лезин':2,\nu'осипчук':6,\nu'еронин':2,\nu'набиев':15,\nu'заякин':2,\nu'серякова':2,\nu'газизов':3,\nu'авраменко':18,\nu'родионова':29,\nu'дидух':2,\nu'климина':2,\nu'лыгина':4,\nu'подгорбунских':5,\nu'рябков':9,\nu'тетеркина':2,\nu'вашкель':2,\nu'кирпиченко':2,\nu'принцев':3,\nu'колбас':4,\nu'евменов':3,\nu'боков':14,\nu'плешивцев':4,\nu'гребенкина':2,\nu'норкина':2,\nu'козачук':3,\nu'лега':2,\nu'инодворский':2,\nu'жигалова':2,\nu'супрун':7,\nu'сотникова':14,\nu'зарицкий':4,\nu'нуржанов':2,\nu'чермит':2,\nu'халиуллина':3,\nu'балова':3,\nu'господаров':2,\nu'сычев':28,\nu'литвинцев':4,\nu'цику':2,\nu'переверзев':10,\nu'федорко':2,\nu'серба':3,\nu'васенин':3,\nu'камбулова':2,\nu'касаева':3,\nu'желнов':5,\nu'могушков':2,\nu'сысо':2,\nu'зимоглядов':2,\nu'жмылев':2,\nu'мещанинова':3,\nu'белопухов':2,\nu'бороздин':3,\nu'крутов':9,\nu'бойченко':12,\nu'стуканов':2,\nu'крутой':2,\nu'гапоненко':10,\nu'базина':2,\nu'белобрагина':2,\nu'товмасян':2,\nu'пиманов':3,\nu'коровкин':4,\nu'кирьяков':2,\nu'скороходова':2,\nu'лимонов':4,\nu'спирина':9,\nu'никандров':4,\nu'какаулина':2,\nu'гизатулин':6,\nu'малхасян':3,\nu'папушев':2,\nu'ильковский':2,\nu'хертек':3,\nu'стариков':21,\nu'вьюшков':3,\nu'пугин':3,\nu'божко':12,\nu'донник':2,\nu'лушина':3,\nu'зориков':2,\nu'мурзаева':3,\nu'матиевич':2,\nu'букаев':2,\nu'малкина':2,\nu'щукина':15,\nu'халатов':2,\nu'самохвалова':5,\nu'махотин':2,\nu'климчук':2,\nu'щеголихин':2,\nu'лаврик':5,\nu'начкебия':2,\nu'ельчанинов':5,\nu'еруков':3,\nu'боревич':2,\nu'коломейцев':5,\nu'пасынкова':2,\nu'марухленко':2,\nu'валеева':3,\nu'кирпичникова':2,\nu'сторожук':2,\nu'тамонов':2,\nu'пенкин':10,\nu'грачева':19,\nu'куляс':3,\nu'елкина':3,\nu'сватиков':2,\nu'шальнова':2,\nu'окатенко':3,\nu'ломоносов':4,\nu'шаган':2,\nu'селифанов':3,\nu'игошкин':2,\nu'малиновский':12,\nu'нелюбов':3,\nu'батдыев':2,\nu'бахметьева':2,\nu'фадеев':35,\nu'силаева':8,\nu'боднар':3,\nu'сыроватко':2,\nu'богайчук':2,\nu'дорожкина':5,\nu'гапонов':5,\nu'гераскина':2,\nu'брянская':2,\nu'финаев':2,\nu'раевская':5,\nu'палюх':2,\nu'простак':2,\nu'баранюк':2,\nu'моторина':4,\nu'абдуллина':7,\nu'корнюхин':2,\nu'светкин':2,\nu'арустамян':3,\nu'балаклеец':2,\nu'аббасова':3,\nu'кутовая':2,\nu'дунюшкина':2,\nu'ляшков':3,\nu'демидович':4,\nu'троицкая':5,\nu'молев':8,\nu'касевич':2,\nu'картамышев':6,\nu'россель':2,\nu'хайлов':3,\nu'литус':2,\nu'генералов':10,\nu'перепелица':18,\nu'старухин':2,\nu'артюхина':2,\nu'чекина':2,\nu'амелин':10,\nu'швабрин':2,\nu'нежинская':2,\nu'климаков':2,\nu'шишелов':2,\nu'подзорова':2,\nu'слухай':3,\nu'могилевский':2,\nu'опрышко':3,\nu'бурцева':6,\nu'батюшкин':2,\nu'горячев':23,\nu'распопов':7,\nu'мещерякова':18,\nu'тычков':3,\nu'романюк':18,\nu'терновая':4,\nu'подзоров':2,\nu'пинаев':7,\nu'комбаров':5,\nu'секач':2,\nu'ангелов':2,\nu'леваков':2,\nu'левцун':2,\nu'воробьёв':7,\nu'девятериков':3,\nu'сульженко':3,\nu'чугай':2,\nu'марушина':2,\nu'рыбкин':13,\nu'великанова':3,\nu'валуйский':2,\nu'осадчев':2,\nu'долудар':2,\nu'мун':2,\nu'гедз':2,\nu'шеметова':2,\nu'вовк':14,\nu'махров':6,\nu'остроушко':3,\nu'коптев':6,\nu'мукуева':2,\nu'китов':2,\nu'митяев':5,\nu'лагутенко':2,\nu'масютенко':2,\nu'шеметов':8,\nu'левашов':4,\nu'ананов':2,\nu'копыл':3,\nu'синюгин':2,\nu'калуцкий':2,\nu'маршавин':4,\nu'бокач':2,\nu'сметанникова':2,\nu'добрынский':2,\nu'верстак':2,\nu'чернявская':6,\nu'анисенко':2,\nu'мамхегов':2,\nu'ханвердиев':2,\nu'гребенькова':3,\nu'шахрай':4,\nu'разгонов':3,\nu'пензин':5,\nu'пушкарева':13,\nu'выходцева':2,\nu'живайкин':2,\nu'троицкий':7,\nu'рожков':35,\nu'сабанов':2,\nu'филиппенко':4,\nu'ерощенко':3,\nu'жиркова':3,\nu'барчуков':2,\nu'астафуров':5,\nu'уланова':5,\nu'безрук':3,\nu'изюк':2,\nu'дюкарев':4,\nu'срыбный':2,\nu'панкратова':13,\nu'гладченко':5,\nu'тришин':2,\nu'соцков':3,\nu'ульбашев':2,\nu'басенко':3,\nu'рихтер':2,\nu'кудров':3,\nu'вольнов':3,\nu'митькина':2,\nu'ющенко':11,\nu'левшакова':2,\nu'дуванов':4,\nu'писарева':8,\nu'макагонов':2,\nu'башаров':4,\nu'вервекин':2,\nu'говорун':7,\nu'мироненко':24,\nu'низамов':9,\nu'жигарева':3,\nu'постнова':2,\nu'калядина':2,\nu'неёлов':2,\nu'калимуллин':2,\nu'ксенофонтова':4,\nu'пушенко':2,\nu'остапук':2,\nu'аведов':2,\nu'ольховский':7,\nu'перлин':2,\nu'шпилев':2,\nu'бакин':8,\nu'ильинский':4,\nu'муругова':2,\nu'хорева':4,\nu'сарычева':4,\nu'ладыженский':2,\nu'цветков':42,\nu'дернов':2,\nu'ступаков':4,\nu'бутырская':2,\nu'теличко':5,\nu'верещака':3,\nu'долгополова':5,\nu'сатин':2,\nu'мухамадиев':2,\nu'сычевская':3,\nu'зудин':7,\nu'бурунова':2,\nu'харьковский':2,\nu'кривицкий':2,\nu'мочалов':17,\nu'карпухин':7,\nu'саркисова':3,\nu'баринский':2,\nu'дорохина':4,\nu'рождественский':4,\nu'глотов':10,\nu'ерашова':2,\nu'переверзева':7,\nu'леньшин':3,\nu'юшков':7,\nu'бояринцева':2,\nu'синица':2,\nu'дейнека':5,\nu'гиреев':2,\nu'журков':3,\nu'данильченко':12,\nu'титарев':3,\nu'киричек':5,\nu'солонин':4,\nu'бурулько':2,\nu'назарова':44,\nu'дынников':2,\nu'старанник':3,\nu'карузина':2,\nu'борисенкова':2,\nu'романович':7,\nu'дуванова':3,\nu'крашенинникова':2,\nu'мысин':2,\nu'тимакова':3,\nu'ромашкова':2,\nu'пыленок':2,\nu'гартунг':2,\nu'крайник':2,\nu'калабухов':2,\nu'глушенков':4,\nu'бушихин':2,\nu'гайгалас':2,\nu'ильяшенко':10,\nu'шафранская':2,\nu'петроченков':5,\nu'галиуллин':6,\nu'кощеев':4,\nu'габидулин':2,\nu'асяев':2,\nu'булкин':5,\nu'зинина':2,\nu'парфенюк':3,\nu'маер':2,\nu'вернигора':2,\nu'губаль':2,\nu'храповицкий':2,\nu'лошкарев':9,\nu'кофанова':2,\nu'маляр':2,\nu'торбин':2,\nu'кружилин':3,\nu'калиновский':6,\nu'мишанин':2,\nu'ярыгин':3,\nu'котлов':5,\nu'карсанов':2,\nu'плюснин':6,\nu'полонская':2,\nu'нойкин':2,\nu'югов':4,\nu'шеремет':16,\nu'колчанов':6,\nu'невструев':2,\nu'бахвалова':3,\nu'тютрюмов':3,\nu'скоробогатова':6,\nu'столбов':6,\nu'симановский':3,\nu'кемов':2,\nu'гуцалюк':2,\nu'гулина':9,\nu'чурбанова':2,\nu'рукина':2,\nu'слепкань':2,\nu'чурилов':7,\nu'лубский':2,\nu'закревский':3,\nu'чугаев':3,\nu'багиров':2,\nu'полещиков':2,\nu'тамбиев':6,\nu'гуфраев':2,\nu'карамзина':2,\nu'найден':2,\nu'титаев':2,\nu'жадько':2,\nu'ендовицкий':4,\nu'ващенко':17,\nu'босова':2,\nu'французова':2,\nu'бельтюкова':2,\nu'закриев':3,\nu'штепа':3,\nu'борзова':4,\nu'козак':11,\nu'мулина':2,\nu'гостев':16,\nu'лысяков':2,\nu'дылдин':2,\nu'ведищева':2,\nu'филиппов':89,\nu'рублёв':3,\nu'буянова':8,\nu'муссалиев':2,\nu'писцов':2,\nu'дулич':2,\nu'анискин':3,\nu'шилкин':5,\nu'шелякин':3,\nu'лашма��ова':2,\nu'колодяжный':5,\nu'витер':2,\nu'свирида':4,\nu'чередник':2,\nu'хван':3,\nu'бородин':42,\nu'лазаренко':18,\nu'башкиров':12,\nu'жучков':15,\nu'савинова':8,\nu'антонюк':12,\nu'огай':3,\nu'рудык':2,\nu'козьяков':2,\nu'теодорович':2,\nu'айкашев':2,\nu'салюков':2,\nu'антошкин':3,\nu'мурыгин':2,\nu'шевчуков':2,\nu'лукашевич':11,\nu'улиткин':2,\nu'курленко':2,\nu'эминова':2,\nu'ломакин':17,\nu'олифиренко':3,\nu'кузовников':2,\nu'марочкин':2,\nu'пенчук':2,\nu'романова':79,\nu'демиров':2,\nu'ендальцев':2,\nu'кондрахина':2,\nu'русаков':24,\nu'володько':3,\nu'токарчук':4,\nu'коровкина':4,\nu'татарчук':2,\nu'мухтаров':3,\nu'новгородов':3,\nu'мавлютов':4,\nu'микуленок':2,\nu'ситников':39,\nu'ламин':2,\nu'ширманов':2,\nu'жигайлов':2,\nu'листов':2,\nu'шамсутдинов':13,\nu'буков':3,\nu'круглова':17,\nu'решетникова':10,\nu'карасев':33,\nu'сакович':7,\nu'зеленская':4,\nu'качалова':3,\nu'апасов':2,\nu'быковская':4,\nu'карабач':2,\nu'адамова':5,\nu'швечихин':2,\nu'керсов':2,\nu'минасян':3,\nu'жулин':6,\nu'пискарева':4,\nu'баширов':5,\nu'жеребцова':8,\nu'косенков':9,\nu'чаплыгин':14,\nu'никульшин':5,\nu'загидуллин':2,\nu'соболь':12,\nu'левадний':2,\nu'чагдуров':4,\nu'антонян':2,\nu'гафиатулин':3,\nu'суслова':11,\nu'плугин':2,\nu'голубович':2,\nu'любушкин':2,\nu'садовой':3,\nu'косулин':2,\nu'фалеев':4,\nu'вафин':4,\nu'обущенко':2,\nu'мамаев':15,\nu'шамаев':9,\nu'чиндяскин':2,\nu'додонов':8,\nu'мержоев':5,\nu'кузьмичев':18,\nu'тимошин':7,\nu'глушкова':10,\nu'чепик':2,\nu'скрынникова':2,\nu'недятько':2,\nu'кисляков':9,\nu'мерзляков':7,\nu'мещанинов':2,\nu'комлев':6,\nu'мелеховец':3,\nu'лазарев':54,\nu'лукичев':7,\nu'ткачев':34,\nu'голубка':2,\nu'корф':2,\nu'игнаткова':2,\nu'ледяев':2,\nu'рябкина':2,\nu'лукоянов':7,\nu'грудев':2,\nu'макеёнок':2,\nu'протченко':2,\nu'холин':8,\nu'бутт':2,\nu'карбовская':2,\nu'паршуков':4,\nu'рубачев':2,\nu'ильичев':13,\nu'шилькрот':2,\nu'пименова':11,\nu'польдяев':2,\nu'галактионов':4,\nu'бубенчикова':2,\nu'онищенко':29,\nu'зубрилов':2,\nu'асташов':3,\nu'ярыгина':5,\nu'липчанский':2,\nu'тыщенко':9,\nu'карякина':2,\nu'фурса':5,\nu'помпа':2,\nu'кудлаев':3,\nu'набоко':3,\nu'пересторонин':3,\nu'штанько':4,\nu'остапченко':2,\nu'цымбаленко':3,\nu'шулаков':3,\nu'воронов':36,\nu'бабиченко':3,\nu'варламова':10,\nu'гришкина':2,\nu'морковкин':4,\nu'шмыгалев':2,\nu'лизунов':6,\nu'денега':2,\nu'каретин':4,\nu'брехов':2,\nu'чарухин':2,\nu'розов':6,\nu'саранцев':3,\nu'дормидонтов':4,\nu'пискарев':3,\nu'игошин':7,\nu'горбачев-мозгунов':2,\nu'харчилава':2,\nu'гадяцкий':2,\nu'болдова':2,\nu'люкманов':2,\nu'габдуллина':3,\nu'сенук':2,\nu'асланов':9,\nu'лесняк':3,\nu'миронюк':3,\nu'рыкова':6,\nu'лаврищев':5,\nu'сошенко':2,\nu'коренков':2,\nu'тютюник':3,\nu'шилков':2,\nu'железнова':3,\nu'мелков':2,\nu'киселюк':2,\nu'дубова':2,\nu'тангиев':2,\nu'саблин':14,\nu'ягияев':2,\nu'сухомлин':2,\nu'кириллов':50,\nu'тронин':5,\nu'харсиев':3,\nu'гатиатулин':2,\nu'кузенков':7,\nu'пронькин':3,\nu'крутиков':11,\nu'мишенев':2,\nu'тышкевич':3,\nu'крамар':3,\nu'гринберг':3,\nu'шахназарян':5,\nu'кайбелева':2,\nu'серик':2,\nu'акопов':2,\nu'улитина':4,\nu'старых':5,\nu'какушкин':2,\nu'рубаев':2,\nu'фёдорова':5,\nu'диканев':2,\nu'алексина':3,\nu'валиева':4,\nu'рублев':9,\nu'згерский':2,\nu'минбаев':2,\nu'беседина':3,\nu'бабенкова':2,\nu'рейф':2,\nu'нигаматуллин':2,\nu'федорцов':3,\nu'бородаев':3,\nu'дзагов':2,\nu'середкин':5,\nu'изотов':16,\nu'радчук':6,\nu'булычева':5,\nu'митина':11,\nu'войнов':9,\nu'мирончик':2,\nu'шевелева':10,\nu'чимидов':2,\nu'сафарова':2,\nu'шматко':2,\nu'сивенкова':2,\nu'назариков':2,\nu'зарайский':2,\nu'паутов':7,\nu'гавловский':2,\nu'кубасова':3,\nu'лысенкова':2,\nu'бурдукова':2,\nu'радыгин':2,\nu'чулков':10,\nu'кирильченко':3,\nu'земляков':7,\nu'волосникова':2,\nu'левкович':3,\nu'москвичева':3,\nu'городова':2,\nu'грузнов':2,\nu'скирда':2,\nu'осетров':10,\nu'пуликовский':2,\nu'миронов':73,\nu'анциферов':7,\nu'лоншаков':4,\nu'гореликова':3,\nu'прядкин':2,\nu'водовозов':3,\nu'вакуленко':10,\nu'карпеченков':2,\nu'благов':4,\nu'завацкий':2,\nu'макагон':2,\nu'малюков':2,\nu'бевзенко':2,\nu'кобелин':2,\nu'николайчук':6,\nu'шипулин':5,\nu'сухенко':2,\nu'попадько':2,\nu'акуненко':2,\nu'гоготов':2,\nu'первухин':4,\nu'зюзиков':2,\nu'просянникова':2,\nu'артюхин':9,\nu'стерлин':2,\nu'андрюшина':2,\nu'канева':2,\nu'левинталь':2,\nu'безуглов':5,\nu'прихунов':2,\nu'эскиндаров':2,\nu'переходов':2,\nu'фимин':2,\nu'легостаев':7,\nu'дюкова':4,\nu'козюк':2,\nu'невская':2,\nu'раковский':3,\nu'арсенов':5,\nu'лянной':2,\nu'малинский':2,\nu'пирогов':18,\nu'могучева':2,\nu'трипольская':2,\nu'лопатина':6,\nu'суанов':2,\nu'фаррахов':3,\nu'барыбин':4,\nu'максименко':32,\nu'манцаев':2,\nu'стасевич':2,\nu'альберт':3,\nu'зубанов':3,\nu'третьякова':23,\nu'костарева':4,\nu'кленов':2,\nu'волосов':6,\nu'шичкин':2,\nu'омельяненко':2,\nu'микляева':2,\nu'сигалаев':2,\nu'селюк':2,\nu'перевозчиков':8,\nu'живов':2,\nu'шугаев':3,\nu'удовенко':11,\nu'каспирович':2,\nu'калганов':3,\nu'демьянчук':3,\nu'павлиди':2,\nu'куция':2,\nu'грунина':3,\nu'шилов':28,\nu'хасиева':2,\nu'абдулвагапов':2,\nu'лептюхов':2,\nu'семеновых':3,\nu'ящук':4,\nu'моргунова':4,\nu'кирей':2,\nu'костин':40,\nu'костик':2,\nu'пухаева':3,\nu'оганесян':10,\nu'гревцева':3,\nu'украинцева':3,\nu'аксёненко':2,\nu'митрохин':7,\nu'гришко':6,\nu'дзюба':18,\nu'мурашкина':4,\nu'булин':2,\nu'летанин':2,\nu'голубовская':2,\nu'лукьянчиков':6,\nu'пашков':17,\nu'султанов':18,\nu'хлиманков':2,\nu'кащеев':8,\nu'хомченко':3,\nu'костицына':2,\nu'бексултанов':2,\nu'близнюк':9,\nu'зубатов':2,\nu'назимова':3,\nu'филюшкин':2,\nu'пушкина':6,\nu'дюдин':2,\nu'величутин':3,\nu'чикирев':2,\nu'валов':3,\nu'лашин':4,\nu'леденев':5,\nu'шпиньков':2,\nu'казаринов':6,\nu'бородачев':3,\nu'кормилец':2,\nu'стриж':2,\nu'бобырев':2,\nu'блинков':4,\nu'зибзеев':4,\nu'зенина':3,\nu'филимонова':18,\nu'ширшов':4,\nu'батова':3,\nu'милюхин':2,\nu'горбатова':4,\nu'харченко':44,\nu'березин':36,\nu'чернышёва':2,\nu'чекмарев':9,\nu'барышев':17,\nu'школьников':3,\nu'маргиев':4,\nu'магдич':5,\nu'митюков':8,\nu'григорьевский':2,\nu'алтунин':4,\nu'калюжный':17,\nu'аптекарев':2,\nu'заев':3,\nu'преображенская':2,\nu'каклюгин':2,\nu'головкова':2,\nu'сопов':2,\nu'карлович':2,\nu'лалетин':3,\nu'суворова':17,\nu'масенков':2,\nu'кобзаренко':2,\nu'шалыгина':4,\nu'баязитов':4,\nu'ваничев':3,\nu'чеганова':2,\nu'былинкин':3,\nu'волчок':2,\nu'демичев':2,\nu'опарина':4,\nu'сущев':2,\nu'глинский':3,\nu'чикина':2,\nu'роднов':3,\nu'мальцев':60,\nu'головачева':3,\nu'иевлев':2,\nu'чиняков':4,\nu'пчелинцева':4,\nu'лупанов':3,\nu'штыков':2,\nu'сибилев':3,\nu'терещенко':44,\nu'даурбеков':3,\nu'капля':2,\nu'колтунов':8,\nu'софронов':12,\nu'кривенцева':2,\nu'ермак':9,\nu'шпанагель':2,\nu'горожанкин':2,\nu'кушнаренко':5,\nu'косьмина':2,\nu'статкевич':2,\nu'ефремкин':3,\nu'флегонтова':3,\nu'гапанович':2,\nu'куклев':4,\nu'щедров':4,\nu'ченцов':2,\nu'мисютин':2,\nu'примаков':3,\nu'феклистов':2,\nu'колгин':2,\nu'санькова':2,\nu'абубакаров':3,\nu'гальянова':2,\nu'музалевская':2,\nu'азанова':3,\nu'леванова':2,\nu'чайковский':7,\nu'харламова':14,\nu'солодовник':7,\nu'кудин':3,\nu'бабурина':5,\nu'литовка':3,\nu'светлицкая':2,\nu'садовников':15,\nu'виничук':3,\nu'ячменев':2,\nu'гетманов':3,\nu'аветисян':8,\nu'жвакин':2,\nu'куклева':3,\nu'колбасин':3,\nu'квасников':2,\nu'армейский':2,\nu'листровой':2,\nu'диденко':32,\nu'картоев':2,\nu'дащенко':3,\nu'якупов':12,\nu'гняздовский':2,\nu'марнов':2,\nu'манукова':2,\nu'осечкин':3,\nu'волосевич':3,\nu'хорошилов':9,\nu'пашин':4,\nu'гришаева':3,\nu'базык':2,\nu'санчат':2,\nu'рогожина':8,\nu'кравцова':23,\nu'гимадеева':2,\nu'корчагин':19,\nu'селянин':4,\nu'серебров':3,\nu'пахтусов':3,\nu'вахнин':6,\nu'золотько':2,\nu'артеменко':16,\nu'высочин':5,\nu'медведников':2,\nu'калинюк':2,\nu'ланских':2,\nu'деминов':2,\nu'анцибор':2,\nu'шидловская':4,\nu'манык':2,\nu'тедеев':2,\nu'ровенский':2,\nu'казинов':2,\nu'романенков':7,\nu'борзых':4,\nu'дудина':5,\nu'корепанов':14,\nu'редин':5,\nu'мешалкин':3,\nu'жогов':2,\nu'кислова':3,\nu'корешков':6,\nu'трофимова':36,\nu'вернер':5,\nu'заречнев':3,\nu'полынцев':2,\nu'минаева':13,\nu'пономаренко':40,\nu'ярманов':4,\nu'покидов':2,\nu'лекомцев':4,\nu'чаплина':2,\nu'стулов':3,\nu'кондрашина':3,\nu'никишин':21,\nu'усольцев':12,\nu'возный':3,\nu'хитрова':3,\nu'карягин':2,\nu'линник':11,\nu'пимкин':3,\nu'заваруев':2,\nu'илларионова':6,\nu'туруло':2,\nu'басова':6,\nu'шабунин':10,\nu'носенко':6,\nu'лобашов':2,\nu'цепляев':2,\nu'лызлов':2,\nu'мухамеджанов':2,\nu'попович':9,\nu'туфанов':2,\nu'карагодин':9,\nu'архипенко':3,\nu'кочетыгов':3,\nu'канкулов':2,\nu'якунина':8,\nu'песенко':2,\nu'бесчетнов':2,\nu'шушпанов':6,\nu'кореняк':2,\nu'чередников':2,\nu'гогин':5,\nu'сеструхин':2,\nu'ясиновский':2,\nu'лапин':25,\nu'крушинина':2,\nu'горохов':21,\nu'лебедкин':2,\nu'полилов':2,\nu'давидович':4,\nu'лучко':2,\nu'зезюля':2,\nu'туршатов':2,\nu'селимханов':2,\nu'давиденко':12,\nu'иванкин':2,\nu'руднев':11,\nu'роговский':2,\nu'осьмаков':3,\nu'самородов':2,\nu'садковский':2,\nu'сёмка':2,\nu'кривоножкин':2,\nu'волжина':2,\nu'алеев':3,\nu'ходырев':11,\nu'хилинский':2,\nu'москалюк':2,\nu'люков':3,\nu'маркман':2,\nu'коротков':29,\nu'супрунов':3,\nu'загайнов':7,\nu'волчёнкова':2,\nu'зимин':34,\nu'мельникова':55,\nu'амирова':2,\nu'корнев':26,\nu'ващук':3,\nu'георгиевская':2,\nu'свид':2,\nu'сапунов':8,\nu'мысков':2,\nu'корзин':2,\nu'щепочкин':3,\nu'ведрова':2,\nu'горинов':4,\nu'менчинский':2,\nu'васюкова':5,\nu'филатов':54,\nu'стефанов':7,\nu'зиновина':2,\nu'ишкинин':2,\nu'криволапов':7,\nu'нигматова':2,\nu'кузнечевских':2,\nu'сташко':2,\nu'емцева':2,\nu'воронько':3,\nu'кортунов':3,\nu'черный':18,\nu'антонцев':3,\nu'мирзеханов':2,\nu'белхароев':2,\nu'кострома':2,\nu'моисеенкова':3,\nu'ухов':4,\nu'агаркова':3,\nu'костюшин':3,\nu'пыжов':3,\nu'клименко':63,\nu'литвяк':2,\nu'перетягин':5,\nu'медведевских':2,\nu'лобова':3,\nu'горный':2,\nu'фефилова':2,\nu'горских':2,\nu'жевлаков':2,\nu'норкин':2,\nu'тхаркахов':2,\nu'квасов':7,\nu'каратаева':2,\nu'потапкина':3,\nu'новиченко':3,\nu'медко':2,\nu'саньков':4,\nu'ерина':4,\nu'бедарев':8,\nu'соловьянов':2,\nu'кортиков':2,\nu'погорелов':13,\nu'самохвал':2,\nu'ланшаков':2,\nu'букина':9,\nu'панов':59,\nu'кожемяко':8,\nu'горпенюк':2,\nu'шашмурин':2,\nu'лоренц':4,\nu'макеев':32,\nu'иванилова':3,\nu'мышакин':2,\nu'елканов':3,\nu'стенина':2,\nu'межуева':2,\nu'зеткин':2,\nu'хаустова':6,\nu'селютина':3,\nu'амосова':4,\nu'лахин':5,\nu'левшин':5,\nu'пузенко':2,\nu'колоколов':2,\nu'ивлев':17,\nu'раскатов':2,\nu'резяпов':2,\nu'кислицына':5,\nu'муртазин':9,\nu'садченко':2,\nu'проказин':2,\nu'маслов':62,\nu'иванников':13,\nu'шарыкин':2,\nu'теплякова':5,\nu'ламанова':3,\nu'ферапонтов':2,\nu'скобелев':7,\nu'яцухно':2,\nu'терешок':2,\nu'алай':2,\nu'евенко':2,\nu'хлыстов':3,\nu'анчеев':2,\nu'сойникова':2,\nu'желтов':7,\nu'титаренко':15,\nu'ващилин':2,\nu'зюбин':3,\nu'бекмурзов':4,\nu'идиятуллин':3,\nu'ястребова':6,\nu'мухрыгин':2,\nu'исраилов':3,\nu'захарян':2,\nu'лим':4,\nu'малюшкин':2,\nu'тополенко':3,\nu'павлов':162,\nu'садкин':2,\nu'гладышев':20,\nu'лепешкина':2,\nu'порфирьев':2,\nu'быстрицкий':2,\nu'трунин':7,\nu'котянов':2,\nu'мастерских':3,\nu'ларин':37,\nu'кожокарь':2,\nu'чернышёв':3,\nu'орешко':2,\nu'галушка':2,\nu'мокина':4,\nu'ямалиев':2,\nu'канонков':2,\nu'смышляева':4,\nu'алборова':2,\nu'кашина':8,\nu'сладков':3,\nu'гришина':30,\nu'дукаев':2,\nu'рудовский':2,\nu'починок':3,\nu'косинский':3,\nu'сидорков':2,\nu'золоторенко':2,\nu'рогалев':6,\nu'бурковский':4,\nu'хуснулин':2,\nu'ясаков':3,\nu'чубарова':4,\nu'забоева':2,\nu'загорская':2,\nu'лакомкин':2,\nu'демильханов':2,\nu'саушев':2,\nu'оборотов':2,\nu'жигунов':5,\nu'аракчеев':3,\nu'кореньков':5,\nu'тимашков':2,\nu'бобров':31,\nu'богунов':4,\nu'демкин':5,\nu'булат':4,\nu'булах':5,\nu'каширов':2,\nu'мелешкина':3,\nu'аликин':2,\nu'глушко':4,\nu'лабзина':2,\nu'ворон':3,\nu'травкин':8,\nu'брюханова':2,\nu'привалова':5,\nu'мункуев':2,\nu'куроедов':2,\nu'уваров':23,\nu'загребин':4,\nu'беспятов':3,\nu'байков':6,\nu'старцева':7,\nu'храброва':3,\nu'прокаева':2,\nu'пьянов':5,\nu'яшкин':7,\nu'кружкова':2,\nu'лесин':2,\nu'исаенкова':2,\nu'смольникова':3,\nu'слюсарь':10,\nu'муравлев':3,\nu'алаторцев':2,\nu'берестов':7,\nu'неупокоев':3,\nu'александровский':2,\nu'тубеев':2,\nu'мехов':2,\nu'зуб':4,\nu'беззубцев':2,\nu'кочкин':7,\nu'ломов':7,\nu'парусов':2,\nu'пикин':2,\nu'богатых':3,\nu'микуцкая':2,\nu'матусевич':4,\nu'михалев':20,\nu'хозяинов':3,\nu'грибовский':2,\nu'скакун':3,\nu'шульц':6,\nu'дейнер':2,\nu'плюснина':4,\nu'алаев':2,\nu'палаткин':3,\nu'хорьков':2,\nu'скобелева':2,\nu'тетерина':9,\nu'кучукова':2,\nu'гатаулин':2,\nu'сургучев':3,\nu'ключарева':2,\nu'губарь':5,\nu'гераськина':2,\nu'гладских':3,\nu'ягубов':2,\nu'седунов':3,\nu'слукин':2,\nu'тугушев':9,\nu'полянцев':3,\nu'аристов':10,\nu'хадарцев':2,\nu'боднарчук':3,\nu'миленко':2,\nu'каюров':3,\nu'клопов':3,\nu'гуреев':3,\nu'усатый':2,\nu'ядов':2,\nu'сафонова':25,\nu'фуфаев':4,\nu'кравченко':95,\nu'барынина':2,\nu'дозоров':5,\nu'буланов':19,\nu'буреев':2,\nu'фидельман':2,\nu'родыгин':3,\nu'кулябин':2,\nu'маленкин':2,\nu'еремеева':12,\nu'колмаков':11,\nu'васькина':3,\nu'гуляева':20,\nu'набок':4,\nu'емельченков':2,\nu'галич':9,\nu'яхонтов':3,\nu'турченко':4,\nu'рябиков':4,\nu'уголев':2,\nu'байбородов':2,\nu'мяло':3,\nu'мокрушин':6,\nu'унгурян':2,\nu'плющев':2,\nu'сердюкова':6,\nu'ухватова':2,\nu'синцова':3,\nu'гультяев':3,\nu'харчиков':2,\nu'скачков':12,\nu'петров':229,\nu'желудкова':3,\nu'данилян':2,\nu'корнаков':3,\nu'талипов':2,\nu'приданникова':2,\nu'моор':7,\nu'ляпунова':2,\nu'шаламов':6,\nu'шимченко':3,\nu'сипливый':2,\nu'ласкин':4,\nu'шилкина':3,\nu'корабельникова':3,\nu'колычева':3,\nu'тхагалегов':2,\nu'барсков':6,\nu'здобникова':2,\nu'шарикова':2,\nu'корнилова':19,\nu'баскаков':13,\nu'дмитриев':99,\nu'еськов':4,\nu'койнов':2,\nu'шкурин':3,\nu'тимошина':4,\nu'биджиев':4,\nu'саранчин':2,\nu'коцарь':3,\nu'дашков':6,\nu'рябошапко':2,\nu'меженько':2,\nu'присяжная':2,\nu'руссу':2,\nu'созинов':3,\nu'манухина':2,\nu'ахметзянов':9,\nu'нечай':3,\nu'титенков':2,\nu'сонин':8,\nu'заика':8,\nu'аксёнова':2,\nu'фаргиев':2,\nu'буланков':2,\nu'лысов':7,\nu'вязовой':2,\nu'горобцов':3,\nu'скрипов':2,\nu'шибакова':3,\nu'рогожкин':9,\nu'бам':2,\nu'бай':5,\nu'мацола':2,\nu'силла':2,\nu'кришталь':2,\nu'шнайдер':7,\nu'погребной':3,\nu'чеховский':3,\nu'сурнина':4,\nu'рощенко':4,\nu'фефелова':3,\nu'мурылев':2,\nu'габдрахманов':4,\nu'юзефович':2,\nu'зайнутдинов':3,\nu'дербилов':2,\nu'климентьев':6,\nu'шашлова':4,\nu'касимова':3,\nu'прокопенков':3,\nu'зубко':8,\nu'тулеев':2,\nu'семенчук':5,\nu'дикова':2,\nu'коркмазов':2,\nu'любицкий':2,\nu'жигалин':2,\nu'фищенко':2,\nu'малая':2,\nu'пьянков':15,\nu'шпинев':2,\nu'сенькин':6,\nu'закурдаев':3,\nu'подовинников':2,\nu'носенков':2,\nu'нагорняк':4,\nu'рябова':11,\nu'хлыбов':5,\nu'литвиненко':36,\nu'добрякова':3,\nu'жильцов':5,\nu'буховец':2,\nu'славницкий':2,\nu'саломатов':4,\nu'иванютин':2,\nu'нижегородов':3,\nu'кузько':2,\nu'пихоцкий':2,\nu'бойцева':2,\nu'крашенинников':7,\nu'фатин':2,\nu'белевич':3,\nu'бодин':5,\nu'кисель':10,\nu'русецкий':2,\nu'куличенко':5,\nu'фоминых':13,\nu'ештокин':2,\nu'кандрашин':2,\nu'ладыгин':5,\nu'зотов':34,\nu'бобина':5,\nu'каримова':4,\nu'сагдеев':3,\nu'алцыбеев':2,\nu'чекменев':2,\nu'векшина':3,\nu'бессарабов':3,\nu'санжиев':2,\nu'любезный':2,\nu'савичева':2,\nu'хизриев':4,\nu'фесюк':2,\nu'карельская':2,\nu'лебединская':2,\nu'мишечкин':2,\nu'федяшов':3,\nu'матюк':2,\nu'гавин':2,\nu'нестерович':3,\nu'суровцева':5,\nu'римский':2,\nu'лукач':2,\nu'лукаш':3,\nu'симонова':20,\nu'хмельницкий':4,\nu'дубина':5,\nu'чирков':28,\nu'храбрый':2,\nu'зубков':33,\nu'мищенко':36,\nu'цибизова':2,\nu'пестерев':4,\nu'пшеницын':5,\nu'бодунова':2,\nu'корягина':4,\nu'гладкова':11,\nu'гребенщиков':6,\nu'нечаева':14,\nu'карданов':5,\nu'панова':21,\nu'тюменев':5,\nu'галайчук':2,\nu'прокудина':4,\nu'струк':4,\nu'киседобрев':2,\nu'сарычев':16,\nu'ховалыг':3,\nu'дьяченко':30,\nu'евтеев':9,\nu'савенко':20,\nu'биличенко':2,\nu'черненко':22,\nu'абилов':3,\nu'гамалеев':2,\nu'котельников':17,\nu'чернецов':5,\nu'северина':4,\nu'юнтер':2,\nu'голева':3,\nu'насонов':6,\nu'алекберов':2,\nu'поташев':3,\nu'ланцев':2,\nu'шульгин':14,\nu'ряжко':2,\nu'вальков':3,\nu'коротаев':12,\nu'зиборова':3,\nu'булавко':2,\nu'платонова':10,\nu'арсентьев':5,\nu'бодунков':2,\nu'магдалюк':2,\nu'худяков':21,\nu'долгая':2,\nu'макарычев':2,\nu'мацкевич':6,\nu'клепко':2,\nu'борисов':112,\nu'светличный':3,\nu'нестеров':45,\nu'севрюк':2,\nu'сабадаш':2,\nu'сафронов':41,\nu'точилин':4,\nu'таймазов':2,\nu'дьяконова':10,\nu'савенкова':8,\nu'аванесов':3,\nu'горлинский':3,\nu'аккузин':2,\nu'кабикова':2,\nu'носков':33,\nu'лысоконь':2,\nu'рассказчиков':2,\nu'гудименко':3,\nu'ащеулова':2,\nu'кучинская':4,\nu'сукочев':3,\nu'лапузин':2,\nu'каримуллин':3,\nu'легкая':3,\nu'кулаева':3,\nu'душкин':3,\nu'чех':3,\nu'жмуров':2,\nu'суродин':3,\nu'шумилина':2,\nu'анучина':2,\nu'логачев':6,\nu'перетятко':2,\nu'маценко':4,\nu'дюбин':2,\nu'людоговский':2,\nu'зеленин':11,\nu'гридасов':6,\nu'янченкова':2,\nu'иванчиков':4,\nu'симанович':2,\nu'беленикин':2,\nu'барахоев':2,\nu'абраменков':3,\nu'злыгостев':3,\nu'клепец':2,\nu'урядов':2,\nu'беличенко':5,\nu'ноздрачев':3,\nu'таскаев':2,\nu'шитов':6,\nu'прищепов':2,\nu'сабиров':12,\nu'холодова':4,\nu'шкаликов':3,\nu'тельнов':8,\nu'алсуфьев':2,\nu'жеглов':3,\nu'куриленко':11,\nu'казаченко':5,\nu'дацюк':3,\nu'первухин��':2,\nu'столярова':11,\nu'трошков':2,\nu'саматов':3,\nu'дмитриченко':7,\nu'абрамова':42,\nu'сиротина':3,\nu'семижонов':2,\nu'яшков':6,\nu'лисицкий':4,\nu'клещенок':2,\nu'прищеп':2,\nu'николаенков':3,\nu'пищулов':2,\nu'пригарин':2,\nu'гайдина':2,\nu'скорик':5,\nu'занкин':2,\nu'аюпова':2,\nu'горбач':7,\nu'луконин':5,\nu'емельянова':30,\nu'ерцева':2,\nu'коршиков':5,\nu'болотова':8,\nu'базаркин':2,\nu'поздняков':38,\nu'горбатов':5,\nu'великая':3,\nu'ярославцева':6,\nu'завгороднева':2,\nu'дмитриевская':2,\nu'кутаев':3,\nu'колташов':2,\nu'щербачев':2,\nu'ляликов':2,\nu'зубань':2,\nu'нецветаев':2,\nu'пажитнов':2,\nu'холявко':3,\nu'белобородов':11,\nu'нестечук':2,\nu'василевская':12,\nu'дашук':2,\nu'косоротова':2,\nu'гадеев':3,\nu'капралов':4,\nu'мартыненко':20,\nu'лучинский':2,\nu'джиоев':5,\nu'жигульский':5,\nu'линец':2,\nu'бусыгина':6,\nu'половников':7,\nu'кутелев':2,\nu'суков':3,\nu'рамазанов':31,\nu'великородный':2,\nu'малыхин':14,\nu'колачевский':2,\nu'смолкин':3,\nu'залуцкая':2,\nu'власюк':8,\nu'шишко':2,\nu'бабкина':7,\nu'бакулин':12,\nu'курин':2,\nu'луговая':4,\nu'шумакова':4,\nu'ларькин':3,\nu'гафуров':4,\nu'шамов':3,\nu'терешина':3,\nu'верхотуров':5,\nu'шадских':2,\nu'жаринов':2,\nu'дубченко':3,\nu'боганова':2,\nu'орешкина':6,\nu'батуркина':2,\nu'луценко':26,\nu'зверькова':2,\nu'полякина':2,\nu'солод':3,\nu'норбоев':3,\nu'сотченко':2,\nu'макеева':15,\nu'анциферова':4,\nu'кускашев':2,\nu'минакова':3,\nu'гесс':3,\nu'цицин':2,\nu'шумовский':2,\nu'барбашов':2,\nu'билык':2,\nu'героева':2,\nu'будин':3,\nu'митасова':2,\nu'ремизова':3,\nu'бернгардт':2,\nu'лубсанов':2,\nu'матыскин':2,\nu'плешаков':8,\nu'лобан':2,\nu'суббота':3,\nu'гилева':7,\nu'валяев':4,\nu'щетинина':6,\nu'конюхова':4,\nu'примачук':2,\nu'акбулатов':2,\nu'макеенкова':2,\nu'битаров':3,\nu'прокопчук':6,\nu'осокина':6,\nu'конаныхин':2,\nu'евстратова':6,\nu'овдиюк':2,\nu'иванец':2,\nu'оноприенко':3,\nu'беркутов':2,\nu'балабаев':7,\nu'девятов':8,\nu'пышкина':3,\nu'куксенко':3,\nu'осмоловский':3,\nu'карпович':6,\nu'телегин':17,\nu'чибирева':2,\nu'лукашенко':4,\nu'анисимова':34,\nu'локшина':2,\nu'вашурин':4,\nu'будаев':14,\nu'уразбаев':2,\nu'боровой':8,\nu'янушкевич':2,\nu'данилушкин':2,\nu'захаркина':2,\nu'бевз':2,\nu'сыщиков':2,\nu'валькова':3,\nu'быценко':3,\nu'катин':2,\nu'шапошникова':9,\nu'сметанина':7,\nu'бадин':4,\nu'акифьев':4,\nu'мосина':12,\nu'мукаилов':3,\nu'дубовицкая':4,\nu'миничев':2,\nu'дербенева':3,\nu'абидов':5,\nu'летуновский':2,\nu'гирич':2,\nu'феофанов':3,\nu'стретович':3,\nu'казьмина':4,\nu'партем':3,\nu'рутковская':2,\nu'маматова':2,\nu'кочерова':2,\nu'безгин':6,\nu'глушков':22,\nu'трахов':2,\nu'шуст':2,\nu'бубякин':2,\nu'гвазава':2,\nu'ваничкин':5,\nu'нарожный':2,\nu'гизатуллина':3,\nu'ходосевич':2,\nu'локтионов':13,\nu'горбов':3,\nu'мисько':2,\nu'васильков':8,\nu'чиж':4,\nu'кравец':18,\nu'тетерев':2,\nu'литвин':13,\nu'щесняк':2,\nu'пешехонов':5,\nu'маркарян':4,\nu'галашев':4,\nu'василица':2,\nu'шимко':4,\nu'спасский':3,\nu'кирсанова':14,\nu'тиунова':4,\nu'желнова':2,\nu'илькевич':2,\nu'ксензов':2,\nu'бутрин':2,\nu'бутрим':3,\nu'дук':3,\nu'прудентова':2,\nu'немчинов':2,\nu'целовальникова':2,\nu'певнев':2,\nu'габрелян':2,\nu'шимин':3,\nu'галашова':2,\nu'лукьянцев':3,\nu'моторный':2,\nu'шестернин':2,\nu'савич':7,\nu'гладнев':4,\nu'монахов':12,\nu'бабина':4,\nu'шеин':12,\nu'берзан':2,\nu'толкунов':4,\nu'рычкова':6,\nu'кошель':6,\nu'хуторной':6,\nu'мелехин':9,\nu'тюнин':4,\nu'федосова':5,\nu'брыксин':2,\nu'шинаков':2,\nu'огородова':2,\nu'харьковская':3,\nu'овсяников':3,\nu'ильясова':3,\nu'дударенко':2,\nu'галахов':3,\nu'дягилев':3,\nu'зуева':17,\nu'куулар':4,\nu'пегушин':2,\nu'астапов':9,\nu'юркевич':7,\nu'оборина':3,\nu'лунга':2,\nu'атабаев':2,\nu'ситак':2,\nu'луць':2,\nu'ипполитова':6,\nu'граков':2,\nu'семченков':3,\nu'красничков':2,\nu'желтова':3,\nu'дюбо':2,\nu'дюба':2,\nu'митусова':2,\nu'радаев':5,\nu'писарев':20,\nu'периков':2,\nu'галдина':3,\nu'кошман':5,\nu'гордик':2,\nu'вяльцева':2,\nu'шлыкова':4,\nu'тищук':2,\nu'климович':7,\nu'шаповал':9,\nu'кучменко':2,\nu'мухамедов':2,\nu'арзютов':2,\nu'широких':5,\nu'сауткин':6,\nu'прибыткова':2,\nu'кагаев':2,\nu'кулиев':8,\nu'пучковский':2,\nu'покидышев':3,\nu'кочин':4,\nu'чижик':3,\nu'кокарева':2,\nu'агапкин':4,\nu'гладкая':3,\nu'крылатых':2,\nu'шинкарюк':2,\nu'фещенко':9,\nu'рябенко':7,\nu'микаэлян':3,\nu'чехов':4,\nu'суркова':8,\nu'когай':3,\nu'овечкин':10,\nu'гаджимагомедов':8,\nu'юткина':2,\nu'шершаков':3,\nu'зинченко':26,\nu'аскаров':4,\nu'кондрашов':20,\nu'голубкова':5,\nu'манжос':2,\nu'семендяев':2,\nu'княжище':2,\nu'гусак':8,\nu'курсенко':2,\nu'асмус':2,\nu'иванникова':5,\nu'авдышев':2,\nu'шапиев':2,\nu'теселкин':4,\nu'кривошеев':16,\nu'корельский':3,\nu'маневич':3,\nu'баканова':2,\nu'галиаскаров':3,\nu'ильюшин':2,\nu'филиппович':3,\nu'шевельков':2,\nu'сороколетов':8,\nu'лихачева':11,\nu'нуриев':2,\nu'пальников':3,\nu'романько':2,\nu'рябец':4,\nu'шинкарев':5,\nu'акульшин':2,\nu'белова':57,\nu'усков':13,\nu'иванько':2,\nu'лукина':10,\nu'лапа':5,\nu'собакарь':3,\nu'земеров':2,\nu'каменский':9,\nu'мисюрин':2,\nu'шевкунов':3,\nu'махнев':4,\nu'щекин':2,\nu'драчев':4,\nu'кирьякова':2,\nu'бужинаев':2,\nu'гамбург':3,\nu'хлопов':3,\nu'карталыков':2,\nu'старов':3,\nu'вольных':3,\nu'шепеленко':4,\nu'баран':4,\nu'гармаев':3,\nu'юрков':6,\nu'криницин':2,\nu'бузмакова':3,\nu'старовойтов':9,\nu'завьялов':28,\nu'барилов':4,\nu'бачурин':12,\nu'маржохов':2,\nu'мохов':10,\nu'кочнова':4,\nu'сибиряков':5,\nu'абубакиров':4,\nu'бурлачко':2,\nu'бурдаков':8,\nu'павлюковский':2,\nu'славнов':3,\nu'касумов':5,\nu'бреева':3,\nu'капшитарь':2,\nu'александрович':4,\nu'сабирзянова':4,\nu'парусинов':2,\nu'петин':6,\nu'космачёв':2,\nu'гаевой':3,\nu'симонян':4,\nu'зайкин':5,\nu'потеева':2,\nu'гордиенко':37,\nu'куницкий':2,\nu'баженова':11,\nu'зеленцов':8,\nu'земляк':2,\nu'фазлыев':2,\nu'колпакова':8,\nu'терешков':3,\nu'панарин':7,\nu'караулова':2,\nu'сивцев':2,\nu'ганеев':5,\nu'кокшаров':6,\nu'суздалев':2,\nu'володина':8,\nu'нафиков':5,\nu'акулова':4,\nu'шаповаленко':2,\nu'шуманов':2,\nu'куранов':4,\nu'беккер':12,\nu'ермишко':2,\nu'кругляк':4,\nu'трухин':12,\nu'гарьковенко':2,\nu'тагинцев':2,\nu'зеликов':2,\nu'попандопуло':2,\nu'вахмистров':2,\nu'лисовская':5,\nu'гурова':11,\nu'голицын':3,\nu'шичко':2,\nu'повод':3,\nu'тимошкина':2,\nu'лагун':4,\nu'рочева':2,\nu'шишкунов':2,\nu'картышова':2,\nu'кутовой':3,\nu'борзенков':7,\nu'домашев':2,\nu'чувашов':2,\nu'мелькова':3,\nu'карпушина':2,\nu'орлинская':2,\nu'ситдикова':2,\nu'якушин':7,\nu'ратников':12,\nu'логутенок':2,\nu'буданов':8,\nu'саенко':19,\nu'железников':2,\nu'никитов':2,\nu'вавилин':4,\nu'анненкова':2,\nu'мастеров':4,\nu'белобородова':4,\nu'такмаков':3,\nu'ипатова':6,\nu'ившин':5,\nu'волчихин':3,\nu'мардер':2,\nu'новокрещенов':7,\nu'коновал':3,\nu'выдрин':2,\nu'богачев':14,\nu'пруссаков':3,\nu'загуменнов':2,\nu'катенев':2,\nu'лёвин':6,\nu'цыбульский':2,\nu'белехова':3,\nu'луньков':3,\nu'хацкевич':2,\nu'червякова':2,\nu'стефа��ович':4,\nu'коцюба':4,\nu'одинцов':13,\nu'тумусов':2,\nu'наговицына':2,\nu'гращенков':2,\nu'замковой':2,\nu'дугина':2,\nu'темников':7,\nu'макаев':5,\nu'селихов':4,\nu'курятников':2,\nu'тимина':2,\nu'березкин':6,\nu'казанчева':2,\nu'жилкина':4,\nu'зацепин':15,\nu'гелета':2,\nu'айвазян':4,\nu'бобылева':6,\nu'первова':2,\nu'грищенко':23,\nu'карабанов':5,\nu'терновой':6,\nu'погосян':6,\nu'вологжанин':3,\nu'ярков':2,\nu'андрущак':3,\nu'образцов':7,\nu'балыбин':2,\nu'горячих':2,\nu'верютин':2,\nu'филиппова':33,\nu'шелестов':2,\nu'утешев':2,\nu'чуманов':2,\nu'красовский':8,\nu'яркина':3,\nu'булаткин':3,\nu'шатц':2,\nu'кривец':2,\nu'кареев':2,\nu'симагина':2,\nu'федулов':19,\nu'рубан':13,\nu'скрыльникова':2,\nu'клюев':26,\nu'нарышкина':2,\nu'шалатова':3,\nu'аджиев':6,\nu'варфоломеев':9,\nu'вихрев':2,\nu'кава':2,\nu'куляшов':2,\nu'петрушенко':5,\nu'цыремпилов':2,\nu'юмадилов':4,\nu'климин':4,\nu'ракитов':2,\nu'смоленский':3,\nu'гойгов':2,\nu'рогозина':4,\nu'кабешов':2,\nu'майданов':2,\nu'пимахов':2,\nu'губа':4,\nu'лотов':2,\nu'ревуцкий':2,\nu'потеряева':2,\nu'шулико':3,\nu'боделан':3,\nu'лаптенко':2,\nu'логвинович':2,\nu'колотилина':2,\nu'козик':2,\nu'арапов':4,\nu'солодкин':2,\nu'присяжный':3,\nu'аносов':10,\nu'распопин':3,\nu'пестряков':3,\nu'борискин':7,\nu'савелова':2,\nu'жолобов':5,\nu'кураков':9,\nu'еремкина':2,\nu'соломатин':12,\nu'иванов':425,\nu'привалов':23,\nu'шкиль':2,\nu'капустин':23,\nu'маскаева':2,\nu'галиев':17,\nu'тупицына':2,\nu'панкеев':2,\nu'козеев':3,\nu'джафаров':3,\nu'часовских':6,\nu'ваганова':4,\nu'бурков':15,\nu'гайдаш':2,\nu'немировченко':2,\nu'карпычев':2,\nu'глаголева':4,\nu'надолинский':2,\nu'сюткина':2,\nu'михайлюков':2,\nu'сагиров':2,\nu'червонный':2,\nu'шульпина':2,\nu'таран':19,\nu'алимпиева':2,\nu'набоков':3,\nu'сурма':2,\nu'усманов':20,\nu'южаков':4,\nu'есипов':6,\nu'смотрина':2,\nu'лазутина':3,\nu'снежко':7,\nu'гур':2,\nu'ганеева':5,\nu'колтаков':4,\nu'теряев':2,\nu'буторова':2,\nu'минниханов':2,\nu'логачева':6,\nu'стуров':3,\nu'микиев':2,\nu'лычкин':2,\nu'сидорина':3,\nu'забелина':6,\nu'бескровный':4,\nu'кучерявый':6,\nu'кисиль':4,\nu'незнамов':3,\nu'матюшев':2,\nu'паняев':2,\nu'чумаченко':9,\nu'азиев':2,\nu'теуважуков':3,\nu'докукин':3,\nu'ивков':2,\nu'туркин':8,\nu'рыжкин':5,\nu'нуров':2,\nu'рябцов':2,\nu'чугаева':2,\nu'назмутдинов':4,\nu'логунова':5,\nu'шимаров':2,\nu'горев':12,\nu'шкуренко':4,\nu'сиротин':10,\nu'буданцев':3,\nu'баландова':2,\nu'чевычелов':2,\nu'левенец':2,\nu'жирнов':13,\nu'щеголь':2,\nu'виниченко':7,\nu'кисиленко':2,\nu'клюкина':2,\nu'суслин':4,\nu'царегородцев':2,\nu'красильникова':12,\nu'железняков':5,\nu'прокошев':3,\nu'буренок':2,\nu'гапич':2,\nu'лукьянчикова':2,\nu'лапенко':2,\nu'горошко':4,\nu'парамоненко':2,\nu'карачун':2,\nu'корчевская':2,\nu'клиншов':2,\nu'шахова':7,\nu'бахмутов':2,\nu'усольцева':3,\nu'абанин':5,\nu'аврамчук':2,\nu'токтаров':2,\nu'эрнст':2,\nu'просвирнина':2,\nu'наседкин':2,\nu'коноплин':3,\nu'монаков':3,\nu'елисеев':46,\nu'речкалов':2,\nu'казакова':50,\nu'ахметшина':2,\nu'фисунов':2,\nu'рягузова':2,\nu'добрынина':9,\nu'смородников':2,\nu'поцяпун':3,\nu'драгунова':2,\nu'ваньков':3,\nu'махарадзе':4,\nu'апполонова':3,\nu'илькин':2,\nu'черешко':2,\nu'штонденко':2,\nu'дрозд':7,\nu'лысенко':50,\nu'громаков':3,\nu'марченкова':4,\nu'капралова':2,\nu'акиев':3,\nu'хлебникова':8,\nu'ханова':3,\nu'богач':3,\nu'марущенко':2,\nu'шелепова':3,\nu'демаков':2,\nu'репина':10,\nu'шорин':5,\nu'левицкая':6,\nu'заббарова':2,\nu'уткина':14,\nu'прокудин':9,\nu'маресьев':2,\nu'крайнов':8,\nu'стовпец':3,\nu'сысоева':13,\nu'каширская':3,\nu'очкасов':2,\nu'курицын':6,\nu'селищева':2,\nu'хорошева':2,\nu'волгин':8,\nu'медков':2,\nu'тарчуков':2,\nu'скороспелов':2,\nu'гриневич':7,\nu'феклистова':3,\nu'хочуев':2,\nu'маратканов':2,\nu'гунько':7,\nu'садовникова':6,\nu'тютюнник':6,\nu'колпышев':2,\nu'кухарев':4,\nu'садчиков':4,\nu'солошенко':2,\nu'яцко':2,\nu'гопанюк':2,\nu'габараев':2,\nu'доронкин':2,\nu'посконин':2,\nu'бурганов':2,\nu'бессараб':2,\nu'кармишин':3,\nu'гузенко':8,\nu'котлярова':6,\nu'трояков':2,\nu'марухин':3,\nu'ручьев':2,\nu'санина':2,\nu'толстой':3,\nu'толстов':15,\nu'хамидуллин':6,\nu'шевякова':4,\nu'сергиенко':29,\nu'войтенко':14,\nu'степанцов':3,\nu'шушарин':2,\nu'мелкумян':2,\nu'позолотин':4,\nu'сандрацкий':2,\nu'дубовицких':2,\nu'недоступов':2,\nu'дудникова':2,\nu'полозков':3,\nu'кроль':2,\nu'шайкин':2,\nu'манджиев':6,\nu'кургузов':5,\nu'задорожная':4,\nu'кириленко':17,\nu'артюшенко':3,\nu'долгий':7,\nu'шляков':3,\nu'любарский':3,\nu'семашко':5,\nu'арефкин':2,\nu'чужаева':2,\nu'алайцев':2,\nu'сазонкина':2,\nu'черкесова':2,\nu'пшеничный':9,\nu'шевчик':3,\nu'яськов':2,\nu'скорин':2,\nu'башинский':2,\nu'платухин':2,\nu'кольчугин':2,\nu'сухотерин':4,\nu'дорош':4,\nu'селеванов':2,\nu'шацкий':4,\nu'зиборов':2,\nu'скогорев':2,\nu'ни':3,\nu'исмагилова':4,\nu'богданова':49,\nu'вячин':3,\nu'крюков':33,\nu'павликов':6,\nu'кулюкин':4,\nu'донцов':7,\nu'чарикова':2,\nu'шабович':2,\nu'кучерова':6,\nu'лушников':15,\nu'копач':2,\nu'фальков':2,\nu'афонин':22,\nu'макаровский':6,\nu'мудреченко':2,\nu'самолькин':2,\nu'дасаев':3,\nu'чекунов':6,\nu'дармограй':3,\nu'шокуров':3,\nu'злобин':24,\nu'кучаев':4,\nu'луканов':2,\nu'крутых':2,\nu'тулупов':5,\nu'боброва':19,\nu'бурдинский':2,\nu'затулин':2,\nu'зимарев':3,\nu'синявский':5,\nu'иволгина':4,\nu'неклюдова':5,\nu'курадовец':2,\nu'курков':7,\nu'мокринский':2,\nu'муляр':3,\nu'леденева':3,\nu'мурашкин':13,\nu'галайко':3,\nu'ротарь':3,\nu'грицук':2,\nu'прокопчик':2,\nu'авдюкова':2,\nu'дудкин':14,\nu'судаков':14,\nu'ибрагимов':40,\nu'савинцев':3,\nu'белянин':5,\nu'байкина':2,\nu'кайгородов':8,\nu'бурылов':2,\nu'задорин':5,\nu'кандакова':2,\nu'лученко':2,\nu'цой':18,\nu'гущина':19,\nu'белимов':2,\nu'юдахин':2,\nu'сытенко':2,\nu'бузина':4,\nu'березовский':9,\nu'фарафонов':5,\nu'туркменова':2,\nu'брантова':3,\nu'левашова':4,\nu'вертелецкий':3,\nu'гайсин':9,\nu'малиночка':2,\nu'бальжинимаева':2,\nu'евтушок':2,\nu'ланикин':2,\nu'бражко':2,\nu'пузаков':7,\nu'каневский':2,\nu'бады':4,\nu'рожковский':2,\nu'прут':2,\nu'прус':2,\nu'трапезникова':3,\nu'рыбин':13,\nu'ашурбеков':3,\nu'гвоздева':8,\nu'бекузаров':2,\nu'аверин':13,\nu'дитковский':2,\nu'вихарева':2,\nu'караев':10,\nu'цыганов':12,\nu'цыганок':2,\nu'смуров':3,\nu'мулл':2,\nu'чернухина':2,\nu'федоровский':4,\nu'окулов':3,\nu'якобсон':3,\nu'скрипник':2,\nu'безбородова':7,\nu'смолкина':2,\nu'заварзин':6,\nu'карслян':2,\nu'спичак':4,\nu'амиров':13,\nu'колков':2,\nu'золин':5,\nu'озернов':2,\nu'косорукова':2,\nu'лихунчан':2,\nu'варова':2,\nu'чистяков':36,\nu'востряков':3,\nu'данилюкова':2,\nu'лисовский':6,\nu'коньков':17,\nu'курилова':4,\nu'самусева':4,\nu'деменко':3,\nu'псарева':2,\nu'цариковский':2,\nu'шевцова':15,\nu'лапшов':5,\nu'жернаков':2,\nu'швед':3,\nu'казанская':3,\nu'панюшкин':2,\nu'классен':3,\nu'молодченко':2,\nu'клепов':3,\nu'фатьянов':4,\nu'пауль':2,\nu'чехова':2,\nu'зирка':2,\nu'мартыненков':2,\nu'кириллова':31,\nu'груздов':2,\nu'��омова':3,\nu'веденеев':4,\nu'карасенко':2,\nu'трушина':7,\nu'чехачева':2,\nu'щемеров':2,\nu'шуба':4,\nu'кулешов':31,\nu'сорока':10,\nu'беззубов':3,\nu'котиев':2,\nu'мамин':4,\nu'кривов':14,\nu'кривой':4,\nu'косарев':29,\nu'кузовков':4,\nu'тимошкин':3,\nu'бубнов':20,\nu'багдасарян':5,\nu'галустов':3,\nu'дураков':2,\nu'переломов':2,\nu'кобзев':17,\nu'мусихин':4,\nu'горлова':5,\nu'плоткина':2,\nu'шапошников':16,\nu'колодкин':4,\nu'бельцев':2,\nu'заставный':2,\nu'сазанов':6,\nu'маленьких':2,\nu'бреев':5,\nu'безносов':3,\nu'антошин':6,\nu'супряга':2,\nu'порохня':2,\nu'озеров':12,\nu'салимов':3,\nu'вострецов':4,\nu'коцур':3,\nu'халдин':2,\nu'насонова':2,\nu'пустобаев':2,\nu'сбоев':2,\nu'ставропольцев':2,\nu'байрамбеков':3,\nu'пустотин':2,\nu'матвеенкова':2,\nu'курышев':10,\nu'райков':5,\nu'дейнега':4,\nu'тетерин':14,\nu'корнейчик':2,\nu'гайнулин':3,\nu'харисов':6,\nu'дитрих':3,\nu'старикова':7,\nu'наконечный':7,\nu'шнитов':2,\nu'мухаметшин':11,\nu'володченко':6,\nu'галстян':4,\nu'деркачев':4,\nu'мирзаев':3,\nu'шинкоренко':5,\nu'панюков':5,\nu'бадаев':3,\nu'киселева':60,\nu'конарев':3,\nu'владимиров':36,\nu'гаврилин':9,\nu'сулима':2,\nu'смирнов':252,\nu'галиакберова':2,\nu'алпатова':2,\nu'холзаков':2,\nu'садовская':6,\nu'кочуков':2,\nu'пивоварчук':2,\nu'передерий':3,\nu'чудинов':7,\nu'ровнов':3,\nu'елецкий':2,\nu'занько':2,\nu'волченко':2,\nu'иващук':2,\nu'золотовский':2,\nu'чурсинов':3,\nu'женихов':3,\nu'радин':4,\nu'колган':2,\nu'килина':2,\nu'подколзин':7,\nu'жилинский':2,\nu'хуторов':2,\nu'бударин':10,\nu'стуков':2,\nu'пчелкина':2,\nu'бужор':2,\nu'дзгоев':2,\nu'сухарев':23,\nu'доронина':12,\nu'микулик':2,\nu'курмаев':4,\nu'корчуганов':2,\nu'монин':5,\nu'шаршов':3,\nu'воднев':2,\nu'анищенков':4,\nu'аблаев':2,\nu'ахмедова':5,\nu'вашкевич':3,\nu'лошаков':4,\nu'болотина':3,\nu'посунько':3,\nu'гладунов':2,\nu'моисеева':29,\nu'реброва':4,\nu'буханцов':5,\nu'чумичев':2,\nu'демко':2,\nu'девятаев':3,\nu'теплоухова':3,\nu'степин':10,\nu'тумасян':2,\nu'ларгин':2,\nu'беседин':13,\nu'галдин':2,\nu'лобастова':2,\nu'шулепов':6,\nu'сюсюра':2,\nu'горбачев':31,\nu'салаватов':3,\nu'пестрецов':2,\nu'левчук':4,\nu'данилина':7,\nu'кирилин':7,\nu'мордовцева':2,\nu'курта':2,\nu'маштакова':2,\nu'сутормин':4,\nu'кобцев':3,\nu'цыбин':7,\nu'данишевский':2,\nu'поломошнов':2,\nu'зозуля':9,\nu'прохода':2,\nu'панасик':2,\nu'заглядов':3,\nu'краснокутская':3,\nu'терентьева':12,\nu'малюкова':2,\nu'ушков':2,\nu'тычинин':3,\nu'бернацкая':3,\nu'курамшин':3,\nu'горкунов':3,\nu'кайзер':3,\nu'диянов':2,\nu'федулова':4,\nu'казакевич':6,\nu'иванцов':19,\nu'соколовский':15,\nu'горина':7,\nu'луговской':5,\nu'тимощук':4,\nu'неумывакин':2,\nu'кобзенко':2,\nu'михаленко':4,\nu'губаев':4,\nu'бутко':6,\nu'качаев':5,\nu'киргинцев':3,\nu'григорчук':3,\nu'коковин':7,\nu'саверина':2,\nu'дидковский':3,\nu'кручинин':9,\nu'барабанщиков':2,\nu'синяков':10,\nu'ибрагимова':13,\nu'арбатов':2,\nu'мостовой':6,\nu'кашин':14,\nu'шкловский':2,\nu'ашихмин':7,\nu'федюнин':7,\nu'чубрей':2,\nu'юрко':5,\nu'касьянов':17,\nu'молотов':4,\nu'горбанев':5,\nu'душин':5,\nu'арифуллин':2,\nu'анистратенко':4,\nu'михеенко':2,\nu'лобастов':2,\nu'маевская':3,\nu'калякин':6,\nu'кузиленков':2,\nu'дунец':2,\nu'саяпин':3,\nu'соболенко':4,\nu'фоминов':2,\nu'ребров':8,\nu'андреевская':3,\nu'господарик':3,\nu'лобанцев':2,\nu'томин':3,\nu'казиев':2,\nu'кудрякова':2,\nu'куковеров':3,\nu'голубинский':2,\nu'саркисян':11,\nu'воротников':7,\nu'петриков':3,\nu'кураев':5,\nu'рогач':2,\nu'��рюханов':8,\nu'коленко':2,\nu'гарманова':2,\nu'идилов':2,\nu'бабиков':4,\nu'кедров':3,\nu'курочкина':18,\nu'астафеева':2,\nu'щебланин':2,\nu'чагин':4,\nu'кокина':2,\nu'подсобляев':2,\nu'ильницкий':4,\nu'нарбиков':2,\nu'парфенов':23,\nu'подшивалов':3,\nu'грибанов':18,\nu'ахунов':3,\nu'бандурин':6,\nu'кругляков':5,\nu'мотасов':2,\nu'кирюхин':4,\nu'щенникова':3,\nu'кобец':9,\nu'фирсиков':2,\nu'анашкин':7,\nu'чурсина':7,\nu'хайретдинов':3,\nu'бубликова':3,\nu'москаленко':26,\nu'васько':3,\nu'епифанцева':3,\nu'бахмуров':2,\nu'чиркина':3,\nu'ионов':8,\nu'павличенко':6,\nu'брызгалин':5,\nu'жуковский':9,\nu'скворцов':41,\nu'лопота':2,\nu'сысуев':5,\nu'пимонова':2,\nu'тулебаев':2,\nu'вознюк':6,\nu'савоськина':3,\nu'еркин':2,\nu'синютина':2,\nu'дворник':2,\nu'двойных':3,\nu'пашинский':3,\nu'салангин':2,\nu'нехорошев':3,\nu'зеленина':5,\nu'поляков':95,\nu'пинегин':2,\nu'семёнова':6,\nu'гукасян':4,\nu'мережко':3,\nu'гасаналиев':3,\nu'тхостов':2,\nu'чечина':4,\nu'жарикова':4,\nu'ткачева':22,\nu'сальников':25,\nu'кувшинников':2,\nu'шмойлова':2,\nu'бураев':2,\nu'пожидаев':8,\nu'рябикин':2,\nu'рагимов':7,\nu'некипелова':3,\nu'кубанов':2,\nu'горяников':2,\nu'жирякова':2,\nu'константинов':39,\nu'бутько':2,\nu'ивлиев':6,\nu'зазуля':2,\nu'сахно':8,\nu'полушина':3,\nu'солодахина':2,\nu'опарин':8,\nu'шупляк':2,\nu'лесных':8,\nu'нужный':2,\nu'давыдкин':5,\nu'шапран':2,\nu'пепеляева':2,\nu'лисовенко':3,\nu'краевая':2,\nu'коноваленков':3,\nu'чупрунов':2,\nu'успенская':3,\nu'гандалоев':2,\nu'гордейчук':3,\nu'шишкарев':4,\nu'загоренко':2,\nu'зубрицкий':2,\nu'кадыков':4,\nu'кубрак':2,\nu'корбань':3,\nu'алиев':58,\nu'шемякина':3,\nu'перфильева':5,\nu'бисенов':2,\nu'дергилев':2,\nu'вологин':2,\nu'спиркин':4,\nu'бессавин':2,\nu'решотка':2,\nu'маркелов':19,\nu'долгушева':2,\nu'иващенко':26,\nu'кособоков':2,\nu'никитина':76,\nu'шкельтин':2,\nu'яруллина':2,\nu'шупенко':2,\nu'кабачек':2,\nu'петракова':6,\nu'ручкин':4,\nu'сырникова':2,\nu'ересько':3,\nu'брянский':2,\nu'мовчан':10,\nu'шенцов':3,\nu'батуров':2,\nu'боженко':4,\nu'мирошниченко':30,\nu'четвериков':6,\nu'саломатин':2,\nu'бабий':5,\nu'санжарова':3,\nu'бабин':21,\nu'старцев':17,\nu'баранцев':5,\nu'мокрецов':4,\nu'овсянникова':15,\nu'ревякин':9,\nu'парваткин':2,\nu'магадеев':2,\nu'цыбулько':3,\nu'гарипов':8,\nu'рассадников':2,\nu'радаева':2,\nu'качанов':7,\nu'рожкин':2,\nu'меликян':3,\nu'самодуров':5,\nu'березуцкий':3,\nu'габуев':4,\nu'куделко':2,\nu'иванова':270,\nu'безродная':3,\nu'артамонов':31,\nu'маковеева':2,\nu'глушанков':2,\nu'семенюта':4,\nu'косьянов':3,\nu'джанхотов':2,\nu'багров':8,\nu'боровских':4,\nu'чикунов':4,\nu'акименко':6,\nu'курченко':2,\nu'костоев':6,\nu'котова':39,\nu'пантелеева':10,\nu'смешко':2,\nu'колосова':18,\nu'беленко':7,\nu'огневчук':2,\nu'кубраков':3,\nu'воробцов':2,\nu'кочеткова':15,\nu'пантюшин':2,\nu'семенченко':9,\nu'малашкин':4,\nu'веренич':2,\nu'щавелев':3,\nu'тюрина':17,\nu'яценюк':2,\nu'ронжин':4,\nu'солдунов':2,\nu'аржанов':4,\nu'вьюгина':2,\nu'тиунов':8,\nu'надымов':2,\nu'соловчук':2,\nu'белин':2,\nu'бигеев':2,\nu'белик':10,\nu'вишневский':6,\nu'максимовская':3,\nu'бетин':4,\nu'громова':29,\nu'ашыров':2,\nu'жупиков':2,\nu'неведомский':2,\nu'дупак':2,\nu'мамченко':2,\nu'кряжев':4,\nu'казеннова':2,\nu'батаев':8,\nu'савинкова':2,\nu'сабуров':9,\nu'коцеруба':3,\nu'кочанов':2,\nu'перонко':2,\nu'туляков':2,\nu'дацко':3,\nu'разуваева':5,\nu'крутова':6,\nu'гафурова':2,\nu'напольских':3,\nu'ширнин':2,\nu'снитко':3,\nu'ахматха��ов':2,\nu'плахотников':2,\nu'акчурин':8,\nu'копытов':5,\nu'ламм':2,\nu'семенник':2,\nu'зюганов':2,\nu'кулинич':6,\nu'сбитнева':4,\nu'яворовская':2,\nu'кораблин':4,\nu'чалая':2,\nu'алипов':2,\nu'мочалин':7,\nu'сабирова':3,\nu'снетков':4,\nu'шатько':2,\nu'щанин':2,\nu'мухамадиева':3,\nu'коневских':2,\nu'савелов':2,\nu'травников':4,\nu'антохина':3,\nu'удот':2,\nu'мошаров':2,\nu'китаев':9,\nu'шляхтин':3,\nu'лейман':3,\nu'стукан':2,\nu'капранова':3,\nu'ямщиков':2,\nu'сивачева':2,\nu'новиков':168,\nu'жиганов':5,\nu'бурыкина':2,\nu'дрожжин':8,\nu'байрамов':6,\nu'ганус':3,\nu'ламанов':2,\nu'пачин':3,\nu'мусевич':2,\nu'тюрин':34,\nu'иевлева':5,\nu'сураев':2,\nu'подъячев':2,\nu'осипенко':20,\nu'кирюшин':7,\nu'королева':59,\nu'богуславский':6,\nu'молоканов':9,\nu'пищулина':3,\nu'масюк':2,\nu'федорков':3,\nu'гладкий':4,\nu'грабовский':7,\nu'тимашев':2,\nu'манвелян':3,\nu'бухов':2,\nu'юсупова':18,\nu'какоткин':2,\nu'сутямова':2,\nu'слободяник':2,\nu'шелков':3,\nu'скорнякова':5,\nu'кондратьев':55,\nu'золотарёв':2,\nu'шибаев':13,\nu'мирошников':16,\nu'поярков':5,\nu'иванюшин':3,\nu'касьян':6,\nu'базанов':7,\nu'рыбак':14,\nu'черевко':6,\nu'гасиев':2,\nu'алёхина':2,\nu'алилуев':3,\nu'галушкин':9,\nu'придиус':2,\nu'концевая':3,\nu'северин':3,\nu'артеев':2,\nu'хабалов':2,\nu'шедько':3,\nu'зимина':12,\nu'спорышева':3,\nu'кирюхина':4,\nu'ямщикова':3,\nu'курлов':2,\nu'долгополов':5,\nu'масляков':2,\nu'самухин':3,\nu'осадчук':6,\nu'артемова':9,\nu'кривоногов':3,\nu'тагунов':2,\nu'милова':6,\nu'алексахин':5,\nu'винтовкин':2,\nu'грохотова':2,\nu'киргизов':3,\nu'шишов':14,\nu'панкул':2,\nu'стаховский':4,\nu'ворончихин':5,\nu'хиценко':2,\nu'гришай':2,\nu'разгуляева':3,\nu'мартышкин':3,\nu'холмина':2,\nu'шеляков':2,\nu'витько':3,\nu'демочко':2,\nu'бугаева':8,\nu'полунина':5,\nu'самошкина':2,\nu'чудин':6,\nu'белоногов':7,\nu'ландин':3,\nu'селимов':4,\nu'киблер':2,\nu'микитенко':2,\nu'шмакова':10,\nu'сунцова':3,\nu'якушев':27,\nu'видинеева':2,\nu'галяутдинов':4,\nu'крымский':3,\nu'белохвостикова':2,\nu'филипченко':2,\nu'ковалевская':7,\nu'органов':2,\nu'бодак':4,\nu'мышеловский':2,\nu'суязов':2,\nu'смердов':3,\nu'кичев':2,\nu'соловьева':81,\nu'болтенко':3,\nu'метельский':5,\nu'короленок':2,\nu'малюгин':4,\nu'хаткевич':2,\nu'войтко':3,\nu'черникова':19,\nu'данилин':16,\nu'войцеховский':5,\nu'коржев':4,\nu'парахин':8,\nu'ревин':8,\nu'салимгареева':2,\nu'гатагонова':2,\nu'щвец':2,\nu'кунец':2,\nu'альбеков':3,\nu'арбузов':14,\nu'булатов':22,\nu'лутцев':2,\nu'калмычков':2,\nu'кольцов':17,\nu'диков':2,\nu'хабибуллина':3,\nu'клюшин':2,\nu'пряников':3,\nu'ситько':2,\nu'коробко':9,\nu'мажуга':3,\nu'коробка':7,\nu'авдиенко':2,\nu'ухова':3,\nu'устюгова':3,\nu'степанюк':4,\nu'алпеева':2,\nu'петрачков':2,\nu'степанищев':4,\nu'мурзабаева':2,\nu'костромин':2,\nu'шорохов':8,\nu'белянкин':4,\nu'серафимович':2,\nu'костюкова':5,\nu'ливанова':2,\nu'меньшенин':2,\nu'кордюков':7,\nu'каппушев':2,\nu'ондар':8,\nu'каталова':2,\nu'важнов':2,\nu'куманцов':2,\nu'алескеров':6,\nu'голубятников':2,\nu'бурдыкина':2,\nu'березницкий':2,\nu'добренко':3,\nu'пудовиков':2,\nu'дубовик':13,\nu'викторов':16,\nu'булгак':2,\nu'тебиев':2,\nu'молодкин':3,\nu'ерошко':2,\nu'собянин':5,\nu'плисова':2,\nu'шендриков':3,\nu'матюхин':13,\nu'черемухин':5,\nu'горланова':2,\nu'рафиков':7,\nu'брыксина':2,\nu'гнатюк':8,\nu'музаев':4,\nu'таратута':3,\nu'балаева':3,\nu'колмакова':6,\nu'липкин':3,\nu'девятко':3,\nu'абакшин':2,\nu'епифанова':9,\nu'шестакова':11,\nu'матюшкин':5,\nu'шумков':7,\nu'кислица':2,\nu'радченко':40,\nu'горбачёв':3,\nu'быковский':10,\nu'малашевич':2,\nu'латышков':2,\nu'педан':3,\nu'черешнев':3,\nu'орехов':22,\nu'меркушев':2,\nu'семыкин':5,\nu'стефанков':2,\nu'посашков':2,\nu'третяк':2,\nu'осеева':3,\nu'касимов':11,\nu'лавлинский':2,\nu'шукаев':3,\nu'высоцкая':5,\nu'цепелев':2,\nu'калинов':4,\nu'бушмин':2,\nu'елфимов':8,\nu'потапов':63,\nu'акрамов':2,\nu'бестаев':4,\nu'ксенофонтов':5,\nu'ташкина':2,\nu'шохин':3,\nu'шабуров':4,\nu'ермошкина':2,\nu'налимова':2,\nu'новицкий':9,\nu'ерасов':2,\nu'белозерцев':4,\nu'троеглазов':2,\nu'морина':6,\nu'татарченко':2,\nu'пищулин':6,\nu'тужилин':4,\nu'хаустов':9,\nu'носаль':2,\nu'остроухов':6,\nu'швайковская':2,\nu'соснов':3,\nu'фалалеев':5,\nu'ширинов':3,\nu'полетаев':11,\nu'акованцев':3,\nu'саидов':10,\nu'зарыпов':2,\nu'безродных':2,\nu'левыкин':2,\nu'яхутль':3,\nu'колодкина':2,\nu'кучерявенко':2,\nu'зыков':25,\nu'костиков':7,\nu'перебоев':3,\nu'дурягин':5,\nu'алханов':4,\nu'зеленкова':3,\nu'игнатова':18,\nu'демина':15,\nu'устинов':38,\nu'невзорова':4,\nu'немцов':4,\nu'решетников':20,\nu'гальченко':6,\nu'чуйченко':2,\nu'ильин':88,\nu'молочков':2,\nu'фатхутдинов':2,\nu'засикан':2,\nu'бельчиков':3,\nu'плескачева':2,\nu'трубникова':3,\nu'мурзаев':3,\nu'яскевич':3,\nu'каменев':15,\nu'баушев':2,\nu'довнар':2,\nu'надеждин':10,\nu'мусаева':2,\nu'мариупольский':2,\nu'ворожбит':4,\nu'савельева':28,\nu'минасов':2,\nu'гончаров':92,\nu'семенюк':12,\nu'почтаренко':2,\nu'епанешников':2,\nu'брюхов':2,\nu'волохин':4,\nu'романовская':10,\nu'мотков':2,\nu'курашев':2,\nu'тюмин':2,\nu'изюмов':4,\nu'новоселов':22,\nu'чеснокова':14,\nu'богдан':9,\nu'рассказова':4,\nu'насибуллин':4,\nu'скуратов':7,\nu'куць':2,\nu'дементьев':34,\nu'лазовский':5,\nu'шумский':4,\nu'ветрова':10,\nu'крюкова':19,\nu'главатских':2,\nu'гречанников':2,\nu'малов':20,\nu'бахирев':2,\nu'дурнов':4,\nu'запруднов':3,\nu'снурницына':2,\nu'дмитрик':2,\nu'хрипунов':3,\nu'веришко':2,\nu'звонарев':3,\nu'бунин':12,\nu'плаутин':2,\nu'гондаренко':2,\nu'нечушкин':2,\nu'неизвестных':2,\nu'егоршина':2,\nu'рысаев':2,\nu'шимчук':2,\nu'воловикова':3,\nu'бердышев':2,\nu'адушев':3,\nu'быстрых':2,\nu'метельков':2,\nu'бурдов':2,\nu'прудников':19,\nu'артеменков':2,\nu'кочубеев':3,\nu'булавин':5,\nu'адам':2,\nu'тараканов':22,\nu'кушниров':2,\nu'марусова':3,\nu'казанцев':31,\nu'климова':27,\nu'шаманский':2,\nu'снапков':2,\nu'григорьева':66,\nu'вельмакин':2,\nu'белявская':6,\nu'бикбаев':5,\nu'садовенко':3,\nu'говорунов':2,\nu'базаев':3,\nu'берсенева':8,\nu'первушина':4,\nu'логутов':2,\nu'мокров':2,\nu'афонский':2,\nu'красавцев':5,\nu'юркова':8,\nu'руденков':2,\nu'балахнин':3,\nu'абакаров':8,\nu'шиянов':5,\nu'гуменюк':11,\nu'казимиров':6,\nu'ларичкин':3,\nu'порубов':3,\nu'бергер':3,\nu'борзикова':2,\nu'омельченко':23,\nu'трубина':4,\nu'алибеков':4,\nu'старовойтова':6,\nu'белорозов':2,\nu'банных':2,\nu'гринь':9,\nu'верзилов':2,\nu'бодня':2,\nu'кондесюк':2,\nu'трифонова':18,\nu'попенко':5,\nu'стехов':2,\nu'перевощиков':6,\nu'лушникова':4,\nu'тютина':2,\nu'романков':2,\nu'зарубина':6,\nu'стрекалова':2,\nu'ивашина':2,\nu'клевакин':4,\nu'дегтеренко':2,\nu'ратникова':6,\nu'канов':2,\nu'найман':3,\nu'сафонов':43,\nu'колосюк':2,\nu'султыгов':7,\nu'сабитов':4,\nu'коль':2,\nu'кривошеин':2,\nu'куандыков':2,\nu'самусевич':3,\nu'колованов':2,\nu'коломников':3,\nu'спирко':3,\nu'буняк':2,\nu'москалец':4,\nu'софронова':4,\nu'кусаинов':3,\nu'милованов':11,\nu'лашина':2,\nu'шерстоби��ов':9,\nu'вахненко':2,\nu'мухачев':3,\nu'плесовских':2,\nu'данилейко':2,\nu'овчинников':64,\nu'карабаев':4,\nu'свидерская':2,\nu'туманова':10,\nu'перфильев':10,\nu'галанин':5,\nu'абазов':3,\nu'ковтонюк':2,\nu'монахова':9,\nu'мамонтов':16,\nu'брусенцов':2,\nu'воскресенский':5,\nu'чернигов':2,\nu'санцевич':3,\nu'кондрашев':5,\nu'быренков':2,\nu'магометов':2,\nu'телешев':2,\nu'бачерикова':2,\nu'юшин':3,\nu'булахова':2,\nu'соломина':4,\nu'мигачев':4,\nu'алдошина':2,\nu'савостьянов':3,\nu'жмыхов':2,\nu'арсеньев':6,\nu'дворецкая':5,\nu'марусенко':3,\nu'мельков':3,\nu'кормаков':4,\nu'кухтин':5,\nu'фарков':2,\nu'щербин':5,\nu'пивоварова':7,\nu'волокитина':4,\nu'нурмагомедов':3,\nu'гиносян':2,\nu'никоненко':6,\nu'поцелуева':3,\nu'шибков':3,\nu'жарко':2,\nu'федоренков':2,\nu'мусаев':14,\nu'дубкова':3,\nu'булгаков':25,\nu'кабиров':4,\nu'бадмаев':11,\nu'пермякова':15,\nu'желонкин':4,\nu'мартыновченко':2,\nu'бородкина':2,\nu'чухонцева':2,\nu'пименов':13,\nu'гонтаренко':3,\nu'коротченко':6,\nu'стрелов':2,\nu'дюков':3,\nu'кончаков':2,\nu'зайко':5,\nu'левит':3,\nu'козырев':29,\nu'солопов':5,\nu'елизаров':20,\nu'плакидин':3,\nu'горемыкин':5,\nu'ширшина':2,\nu'цуров':3,\nu'дацык':2,\nu'богословский':3,\nu'лотоцкая':2,\nu'быховец':3,\nu'деев':12,\nu'лупин':4,\nu'полев':5,\nu'залевская':3,\nu'смирных':3,\nu'положенцев':2,\nu'галанина':5,\nu'кудреватых':3,\nu'васильев':222,\nu'поленок':2,\nu'стригунов':2,\nu'прошкин':3,\nu'караваева':5,\nu'лопата':4,\nu'аноприенко':2,\nu'навныка':2,\nu'касымова':2,\nu'гетманская':2,\nu'ревков':3,\nu'букало':2,\nu'петрушин':10,\nu'лучникова':3,\nu'гоменюк':4,\nu'мягков':8,\nu'ключникова':2,\nu'мартынова':27,\nu'лаговский':2,\nu'аладьин':2,\nu'агарков':14,\nu'троянов':4,\nu'синягин':3,\nu'артизов':2,\nu'манукян':8,\nu'хаирова':2,\nu'цыбульская':2,\nu'кепин':2,\nu'мясоедов':11,\nu'леоненко':3,\nu'пальчун':2,\nu'бибиков':6,\nu'отт':2,\nu'лукичёв':2,\nu'кулеш':2,\nu'задорожний':4,\nu'прозорова':4,\nu'афанасьева':42,\nu'рябуха':2,\nu'варельджан':2,\nu'жиряков':3,\nu'калёнов':3,\nu'отавин':2,\nu'пупков':4,\nu'вороненко':4,\nu'пушко':3,\nu'борисюк':2,\nu'багнюк':3,\nu'птушкина':2,\nu'песоцкий':5,\nu'майборода':8,\nu'кондратенко':34,\nu'тихомиров':36,\nu'борцов':5,\nu'кемайкин':2,\nu'чульдум':2,\nu'смоляр':2,\nu'чуркин':10,\nu'афоничева':2,\nu'дёмин':9,\nu'квитка':2,\nu'квитко':2,\nu'ткач':24,\nu'кутепов':8,\nu'храмцова':5,\nu'чупина':4,\nu'дукмасов':2,\nu'тестов':3,\nu'теучеж':2,\nu'маркина':11,\nu'шуткуева':2,\nu'толченов':2,\nu'пересыпкин':3,\nu'кудряшева':3,\nu'белавина':2,\nu'силенко':2,\nu'курбан':3,\nu'гетьманенко':2,\nu'болгова':3,\nu'дубынин':2,\nu'щепеткова':2,\nu'герко':2,\nu'салаев':2,\nu'бесланеев':4,\nu'бамбышев':2,\nu'босов':2,\nu'очнев':2,\nu'шатунова':2,\nu'сороко':4,\nu'розова':4,\nu'хохлова':20,\nu'стадников':3,\nu'нуждин':4,\nu'ткалич':3,\nu'казмин':2,\nu'фиц':2,\nu'чепурнов':2,\nu'хандархаев':2,\nu'боровицкий':2,\nu'чепурной':3,\nu'жалнин':4,\nu'юшманов':4,\nu'бородина':21,\nu'стригин':2,\nu'головко':19,\nu'баташова':2,\nu'сангаджиев':3,\nu'кокоулин':3,\nu'мутовкин':2,\nu'бурдин':7,\nu'ламерт':2,\nu'абдулина':4,\nu'коляда':4,\nu'тимашева':3,\nu'пелехов':2,\nu'неваев':2,\nu'мочалова':13,\nu'власкин':2,\nu'свирин':3,\nu'свирид':3,\nu'рытов':4,\nu'крикунов':4,\nu'исрафилов':2,\nu'корочкин':2,\nu'дука':3,\nu'умаханов':5,\nu'дукк':2,\nu'халитов':3,\nu'мануйлов':9,\nu'куцевол':2,\nu'фасахов':2,\nu'зоткин':2,\nu'титов':81,\nu'барканова':4,\nu'паустьян':2,\nu'комурзоев':2,\nu'федина':10,\nu'сапожников':21,\nu'слюсарева':2,\nu'аничкин':3,\nu'серкова':3,\nu'лимаева':2,\nu'кабаев':2,\nu'коростелева':6,\nu'макаревич':10,\nu'мельчаков':2,\nu'блошкин':2,\nu'маяков':3,\nu'ассанов':2,\nu'бизяев':3,\nu'цюцюрупа':2,\nu'макрушин':2,\nu'беляй':2,\nu'беляк':6,\nu'булычёва':2,\nu'бреднев':3,\nu'горелик':6,\nu'слюсаренко':9,\nu'синицкий':3,\nu'калябин':3,\nu'муров':2,\nu'колбина':2,\nu'мишагин':3,\nu'райлян':2,\nu'прибытков':3,\nu'новоженов':3,\nu'букатина':2,\nu'ягодкина':2,\nu'дежкин':2,\nu'судариков':7,\nu'ивановская':2,\nu'мамаева':10,\nu'рычков':11,\nu'шостик':2,\nu'поправко':2,\nu'корнеенков':2,\nu'фатеев':15,\nu'кот':3,\nu'кох':2,\nu'коц':2,\nu'михейкин':3,\nu'васин':31,\nu'бровко':9,\nu'довгопол':4,\nu'белогурова':2,\nu'рябушкин':2,\nu'метелев':2,\nu'корженкова':2,\nu'кожанова':5,\nu'тимофеев':86,\nu'филимошин':2,\nu'кривенцов':3,\nu'надькин':2,\nu'царев':23,\nu'мезин':4,\nu'бердиев':2,\nu'гильфанов':3,\nu'андреев':122,\nu'черезов':5,\nu'лайпанов':6,\nu'беляев':87,\nu'маринин':8,\nu'гайворонская':3,\nu'шумилин':7,\nu'сеничкин':2,\nu'картышков':2,\nu'конечных':2,\nu'чупров':7,\nu'коновалова':34,\nu'данковцев':2,\nu'ломовский':2,\nu'ишков':4,\nu'мулюкин':3,\nu'белашова':2,\nu'турченюк':2,\nu'смольянинов':5,\nu'курбанчиев':2,\nu'лузин':6,\nu'лузик':2,\nu'покотило':2,\nu'калинкин':15,\nu'рыжих':6,\nu'варданян':6,\nu'абалов':2,\nu'муха':7,\nu'ахмадеева':2,\nu'жучкова':4,\nu'мороков':2,\nu'карипов':2,\nu'горовой':5,\nu'джафарова':2,\nu'сляднев':4,\nu'лубов':2,\nu'реут':3,\nu'халилулин':2,\nu'леусенко':2,\nu'буторин':5,\nu'камилов':4,\nu'ануфриева':9,\nu'батурина':4,\nu'фаткуллина':2,\nu'шаталов':25,\nu'бушев':2,\nu'косырева':4,\nu'мелехов':7,\nu'банщиков':4,\nu'бакунин':2,\nu'неретин':3,\nu'мирон':2,\nu'кондрашкин':7,\nu'батраков':13,\nu'липов':6,\nu'головков':7,\nu'сайфутдинов':4,\nu'калина':7,\nu'алоян':2,\nu'неробеев':2,\nu'тур':5,\nu'красникова':7,\nu'корчевой':2,\nu'жук':31,\nu'засеев':3,\nu'кокоулина':2,\nu'журба':6,\nu'чигринов':2,\nu'скуратова':4,\nu'даутов':8,\nu'олейниченко':3,\nu'карасёв':3,\nu'лагутина':6,\nu'черемисов':5,\nu'метелица':2,\nu'завертайло':2,\nu'зиатдинов':3,\nu'скибина':2,\nu'поляничко':2,\nu'волоцков':2,\nu'левкин':5,\nu'ляховненко':2,\nu'федоров':154,\nu'цечоева':2,\nu'рящиков':2,\nu'босых':3,\nu'мосин':16,\nu'шаль':3,\nu'смотров':4,\nu'румянцев':38,\nu'прыткова':2,\nu'девин':2,\nu'курабеков':2,\nu'киушкин':2,\nu'веригин':3,\nu'гаранин':7,\nu'сахарук':2,\nu'санжеев':2,\nu'землянов':2,\nu'калининская':2,\nu'шильдкрет':2,\nu'мезенцева':9,\nu'чижиков':3,\nu'комарова':46,\nu'богдановский':3,\nu'юров':17,\nu'хаджебиеков':2,\nu'шалумов':2,\nu'конкина':4,\nu'бурлов':3,\nu'сирик':5,\nu'балановская':2,\nu'маргиева':2,\nu'цыбикова':2,\nu'репик':2,\nu'репин':25,\nu'зайнуллина':2,\nu'киров':5,\nu'трунов':12,\nu'бачурина':4,\nu'урюпин':5,\nu'расторгуев':6,\nu'хрюкин':2,\nu'кочнев':9,\nu'бословяк':2,\nu'дробышев':6,\nu'кремлева':2,\nu'земцев':3,\nu'пермитин':2,\nu'корякина':2,\nu'борсук':7,\nu'абдразаков':3,\nu'григоров':7,\nu'бланк':2,\nu'комов':10,\nu'дурных':2,\nu'шепелева':10,\nu'федичкин':3,\nu'глушак':4,\nu'германов':2,\nu'солнышкин':2,\nu'гафиатуллин':2,\nu'корпусов':2,\nu'гусакова':3,\nu'любушкина':3,\nu'мекеко':2,\nu'братушев':2,\nu'днепровский':2,\nu'метла':2,\nu'дворкин':2,\nu'гофман':2,\nu'персиянов':2,\nu'резаева':2,\nu'петер':2,\nu'курманов':2,\nu'советкин':2,\nu'агафонов':35,\nu'буняев':2,\nu'туркина':5,\nu'золотов':16,\nu'кушнарёв':2,\nu'колин':3,\nu'исакова':19,\nu'сало':7,\nu'беленький':3,\nu'ворончихина':2,\nu'эминов':2,\nu'веремей':2,\nu'тугушева':4,\nu'красильников':25,\nu'подгорнов':8,\nu'синько':3,\nu'шагиахметов':2,\nu'магомедов':66,\nu'мухамедзянов':3,\nu'липаев':3,\nu'маковей':2,\nu'яловой':3,\nu'липский':2,\nu'горбачева':19,\nu'самуйлов':2,\nu'корчагина':7,\nu'турбанов':3,\nu'украинцев':5,\nu'гулиева':4,\nu'ягодкин':5,\nu'усатенко':2,\nu'азаров':17,\nu'мартинович':2,\nu'винниченко':4,\nu'маремкулов':3,\nu'выголов':2,\nu'сосновская':3,\nu'ивляков':2,\nu'бурковская':4,\nu'закиров':24,\nu'ильченко':14,\nu'грушко':3,\nu'лукьянчук':2,\nu'ерошкин':3,\nu'французов':4,\nu'быханов':2,\nu'уханов':8,\nu'куренков':8,\nu'панькин':3,\nu'веснин':4,\nu'потравнов':2,\nu'калашников':51,\nu'стуколов':2,\nu'смышляев':7,\nu'суровикин':2,\nu'якутова':2,\nu'беленов':2,\nu'легошин':2,\nu'ермолин':12,\nu'астапенко':7,\nu'остроух':3,\nu'воробьева':59,\nu'левадная':3,\nu'мильто':2,\nu'вольхин':3,\nu'ковалевский':14,\nu'пясецкий':3,\nu'чемерис':4,\nu'евлоев':18,\nu'вялков':2,\nu'кочубаев':2,\nu'базаева':2,\nu'писклова':3,\nu'лысова':3,\nu'ахматов':2,\nu'солоненко':3,\nu'конышев':10,\nu'айталиев':3,\nu'будилов':2,\nu'берендеев':2,\nu'тычинский':2,\nu'перепелицин':2,\nu'скляр':6,\nu'шарипова':5,\nu'салтыков':2,\nu'мехедов':3,\nu'раёва':2,\nu'замиралов':2,\nu'беков':4,\nu'вдовенко':9,\nu'дрягин':3,\nu'левый':3,\nu'дранишников':2,\nu'беляков':52,\nu'ястребов':10,\nu'аблязизов':2,\nu'толпыго':2,\nu'лободюк':2,\nu'сериков':8,\nu'теслюк':2,\nu'скорых':2,\nu'акимова':30,\nu'лень':3,\nu'царикаев':2,\nu'коломеец':9,\nu'битюков':4,\nu'хазиев':5,\nu'ренев':2,\nu'ралко':2,\nu'чумаев':2,\nu'геворкян':4,\nu'хмель':2,\nu'бредихин':3,\nu'шайтан':2,\nu'борель':2,\nu'хренков':3,\nu'знаменщиков':3,\nu'кубаева':2,\nu'маврина':5,\nu'свистунов':16,\nu'илюшкин':2,\nu'крупник':2,\nu'свистун':2,\nu'симоненко':13,\nu'мелешко':10,\nu'быкова':33,\nu'яблоновский':2,\nu'бесков':2,\nu'полтавская':2,\nu'пинясов':2,\nu'курепин':3,\nu'шакун':4,\nu'хватков':2,\nu'черкасов':34,\nu'хандожко':2,\nu'невмержицкий':2,\nu'бутузов':7,\nu'тютин':3,\nu'сырица':2,\nu'дорошенко':24,\nu'тупицын':4,\nu'миненко':3,\nu'тетеркин':2,\nu'хамлова':3,\nu'шрейдер':3,\nu'шильников':3,\nu'болгов':4,\nu'кабанова':12,\nu'сушко':9,\nu'волик':5,\nu'махнёва':2,\nu'бокарева':4,\nu'пичужкин':2,\nu'щипцов':2,\nu'марковец':2,\nu'максимец':2,\nu'федосенко':4,\nu'рындин':8,\nu'фаустов':2,\nu'балашенко':3,\nu'буряков':9,\nu'костюченков':2,\nu'храпков':3,\nu'огурцова':2,\nu'косенко':14,\nu'полубоярова':2,\nu'акманов':2,\nu'доржу':3,\nu'ломака':5,\nu'ломако':5,\nu'гайдук':9,\nu'гатальский':3,\nu'сивоха':2,\nu'корышева':3,\nu'цыганков':18,\nu'черкашина':9,\nu'цибикжапов':2,\nu'шаманин':5,\nu'борисенков':3,\nu'шафигуллин':9,\nu'аешин':2,\nu'паршаков':2,\nu'гаджикурбанов':2,\nu'улановский':2,\nu'тамилин':2,\nu'косый':2,\nu'заикина':5,\nu'коган':5,\nu'преловский':2,\nu'заря':2,\nu'трубин':10,\nu'жиляев':7,\nu'андреюк':2,\nu'бабакова':2,\nu'ольховская':4,\nu'шишлов':3,\nu'болдырева':11,\nu'юрлов':2,\nu'любимцев':2,\nu'баранецкий':3,\nu'нурмухаметов':2,\nu'кривовичев':3,\nu'гусельникова':3,\nu'хмелевский':2,\nu'плахота':2,\nu'мешков':19,\nu'шоломон':2,\nu'филинов':4,\nu'наумова':27,\nu'колтакова':2,\nu'ахмадуллин':3,\nu'смородин':3,\nu'гомзяков':2,\nu'темирханов':3,\nu'половой':2,\nu'шагов':3,\nu'зубак':2,\nu'немцева':2,\nu'егорин':2,\nu'сторожева':3,\nu'митяева':3,\nu'бондарюк':2,\nu'захарова':83,\nu'коврижных':5,\nu'будыка':2,\nu'андре��нко':2,\nu'шумило':3,\nu'пышный':3,\nu'щелоков':3,\nu'агасиев':2,\nu'гуськов':21,\nu'белкин':9,\nu'повалихин':2,\nu'шульженко':5,\nu'люкшин':2,\nu'овсянников':34,\nu'романовский':17,\nu'зайцев':116,\nu'приступа':2,\nu'реутова':2,\nu'цуриков':2,\nu'попенков':2,\nu'цвигун':3,\nu'шматков':2,\nu'кирилина':2,\nu'саушкин':4,\nu'дутов':5,\nu'чукарин':3,\nu'смоян':2,\nu'удалова':5,\nu'карелин':13,\nu'полянин':3,\nu'курячий':2,\nu'жмакин':2,\nu'синицкая':3,\nu'шрам':2,\nu'шашкин':6,\nu'струнина':2,\nu'нуянзин':3,\nu'сапронов':9,\nu'россолов':2,\nu'снегура':2,\nu'кривошея':2,\nu'булыга':3,\nu'могирев':2,\nu'хоботов':2,\nu'шишова':4,\nu'кренделев':3,\nu'яремчук':2,\nu'бабарыкин':4,\nu'рукасов':2,\nu'смородина':2,\nu'саврухин':2,\nu'цапаева':2,\nu'семилетов':2,\nu'пушкин':10,\nu'ложкин':10,\nu'савков':3,\nu'батурова':2,\nu'сорокина':48,\nu'димитрова':2,\nu'ганин':10,\nu'арапова':3,\nu'остапчук':11,\nu'шевчук':31,\nu'горожанин':2,\nu'соловов':4,\nu'хворостянов':2,\nu'запорожский':3,\nu'войтановский':2,\nu'минчев':2,\nu'манин':5,\nu'хуснутдинов':13,\nu'спевак':3,\nu'слепых':2,\nu'сохиев':3,\nu'малофеев':4,\nu'черниченко':2,\nu'демков':2,\nu'бутов':4,\nu'дубинецкая':2,\nu'тукаева':2,\nu'артемов':28,\nu'костерин':6,\nu'умеров':3,\nu'лебединский':3,\nu'безлепкин':5,\nu'погребняк':7,\nu'трохина':2,\nu'здор':2,\nu'керенцев':3,\nu'дон':2,\nu'ростов':3,\nu'брюхнова':2,\nu'шеренешев':2,\nu'дроков':2,\nu'кузнецов':265,\nu'шмаков':16,\nu'огнев':8,\nu'каминская':6,\nu'борисевич':3,\nu'глебов':13,\nu'засухин':2,\nu'нелысов':2,\nu'еговцев':2,\nu'кузьмич':7,\nu'джалилов':4,\nu'гоенко':2,\nu'лебедева':85,\nu'ременников':2,\nu'гурвич':2,\nu'скрынников':2,\nu'картаполов':2,\nu'матлин':2,\nu'пожарский':3,\nu'мицкевич':5,\nu'ступин':8,\nu'ковтун':23,\nu'дорин':3,\nu'нефёдов':6,\nu'бобырь':4,\nu'педанов':3,\nu'алимова':9,\nu'ваганов':18,\nu'арцимович':2,\nu'барсукова':10,\nu'гамзаев':3,\nu'леви':2,\nu'окопный':2,\nu'сосунов':5,\nu'качко':2,\nu'чероков':2,\nu'коротенко':5,\nu'сажинов':2,\nu'асессоров':2,\nu'краснянский':2,\nu'лубенец':2,\nu'голубков':15,\nu'мартиросян':11,\nu'сысоев':33,\nu'тигиняну':2,\nu'коробов':18,\nu'тоскин':3,\nu'лазеев':2,\nu'кобозев':2,\nu'дубенко':4,\nu'мкртчян':4,\nu'тахмазян':2,\nu'барщевский':2,\nu'ягубова':2,\nu'солодовников':12,\nu'швидко':2,\nu'черкасова':17,\nu'баркова':8,\nu'челышев':5,\nu'щербович':2,\nu'кораблина':6,\nu'пряничников':3,\nu'здвижков':2,\nu'полесский':2,\nu'шестаков':40,\nu'черкесов':5,\nu'марчишин':3,\nu'хапсироков':2,\nu'плясов':2,\nu'большев':3,\nu'беляева':45,\nu'лукашук':4,\nu'лелеков':3,\nu'трапезин':2,\nu'панькова':3,\nu'кудринский':2,\nu'курылев':3,\nu'балчугов':2,\nu'басаргин':2,\nu'красинский':4,\nu'гребенников':8,\nu'клементьева':8,\nu'токарь':5,\nu'рудак':2,\nu'рожновский':2,\nu'шарманов':2,\nu'кочетова':7,\nu'яров':2,\nu'якимчук':5,\nu'шабурова':2,\nu'харин':12,\nu'лукинова':2,\nu'сокол':5,\nu'смолянинов':5,\nu'симаков':22,\nu'дедова':2,\nu'гааг':2,\nu'подейко':2,\nu'гаевская':4,\nu'случак':2,\nu'масалов':3,\nu'корнева':9,\nu'сединкин':3,\nu'кадурин':3,\nu'куханов':2,\nu'вдовин':22,\nu'навроцкий':3,\nu'орел':8,\nu'егорычев':4,\nu'стародубцева':3,\nu'жаднов':2,\nu'мингалев':2,\nu'белогорская':2,\nu'бывшев':2,\nu'забайкин':3,\nu'подольский':9,\nu'вагина':4,\nu'макиенко':5,\nu'немоляев':2,\nu'хисматулин':3,\nu'торгашов':2,\nu'бучнев':7,\nu'солнцева':8,\nu'олексюк':3,\nu'позырайло':2,\nu'брянцев':4,\nu'ерещенко':2,\nu'барченков':2,\nu'седова':14,\nu'иванькова':2,\nu'дроздов��кий':2,\nu'аюшеева':3,\nu'курмангалиев':2,\nu'авдеева':27,\nu'шеуджен':2,\nu'воловик':8,\nu'шипицин':4,\nu'затонацкий':2,\nu'овечко':2,\nu'рудь':13,\nu'белоконев':2,\nu'папанов':2,\nu'балан':4,\nu'здиорук':3,\nu'дульская':2,\nu'минаков':11,\nu'туранов':3,\nu'голобородько':6,\nu'теряева':2,\nu'аблязов':3,\nu'ханин':6,\nu'бурняшева':2,\nu'касьяненко':5,\nu'гаспарян':4,\nu'улитин':3,\nu'варламов':16,\nu'нежинский':3,\nu'сикоза':2,\nu'рузаев':4,\nu'побережная':2,\nu'черняева':5,\nu'нечеса':2,\nu'дулина':2,\nu'гуров':28,\nu'алтухов':13,\nu'дмитроняк':2,\nu'ревенко':7,\nu'мирсаяпов':2,\nu'слюсарев':4,\nu'васькова':2,\nu'гурьев':23,\nu'покрышкина':2,\nu'алешин':26,\nu'колмогорова':4,\nu'гуськова':7,\nu'уфаев':2,\nu'сосина':3,\nu'михайленко':20,\nu'бухтияров':7,\nu'акбаев':2,\nu'космин':2,\nu'ардашев':2,\nu'мурзак':2,\nu'бондаренко':114,\nu'поваляев':2,\nu'горковенко':2,\nu'ермакович':2,\nu'ветров':22,\nu'козодеров':2,\nu'ветлужских':4,\nu'меркушкин':2,\nu'черхигов':2,\nu'слуцкая':4,\nu'падерина':2,\nu'першина':5,\nu'полушин':4,\nu'бадяев':2,\nu'аксененко':5,\nu'ямолова':2,\nu'семенкова':3,\nu'цимбалюк':3,\nu'асадулаев':3,\nu'пальчикова':2,\nu'бабошина':2,\nu'грохотов':2,\nu'швабский':2,\nu'гибадуллин':5,\nu'шарашов':2,\nu'груша':2,\nu'кругликова':3,\nu'сенченко':5,\nu'замша':2,\nu'троян':6,\nu'прилепина':2,\nu'омельчук':3,\nu'сюзев':2,\nu'киселёв':10,\nu'тугаев':3,\nu'бортникова':4,\nu'оводов':2,\nu'шайхутдинова':2,\nu'кирьянов':11,\nu'наговицын':9,\nu'порохин':2,\nu'шиман':2,\nu'константинова':22,\nu'локтионова':5,\nu'зюзина':4,\nu'ларкин':3,\nu'положий':2,\nu'белоцкий':2,\nu'тулунов':2,\nu'махнёв':4,\nu'калачина':2,\nu'руцкий':3,\nu'копенкина':2,\nu'тимошинин':3,\nu'лапшин':25,\nu'соловьёв':6,\nu'запорожец':7,\nu'безрученков':2,\nu'машин':5,\nu'стекольников':3,\nu'аюшеев':2,\nu'панычев':4,\nu'мединский':4,\nu'кашуба':6,\nu'дорожкин':8,\nu'булатников':2,\nu'поночевный':2,\nu'зейналов':5,\nu'карманова':9,\nu'вербицкий':9,\nu'ямов':2,\nu'булаев':6,\nu'барабанщикова':2,\nu'зенкин':5,\nu'вялов':3,\nu'четвертков':2,\nu'бредихина':2,\nu'струговщиков':2,\nu'пантюхин':3,\nu'тетюхин':3,\nu'васев':4,\nu'эльжуркаев':2,\nu'шаляпина':2,\nu'асламов':3,\nu'травникова':2,\nu'золотухин':17,\nu'рустамова':2,\nu'щуров':4,\nu'дегтяренко':7,\nu'антипина':6,\nu'кошелева':14,\nu'ясонов':2,\nu'мещерский':3,\nu'овсиенко':3,\nu'колганов':7,\nu'вознесенский':3,\nu'шамай':2,\nu'глаголев':6,\nu'вшивцев':2,\nu'лукашина':2,\nu'тарасова':57,\nu'владыкин':2,\nu'сивицкий':2,\nu'сурмило':2,\nu'базылева':3,\nu'попель':2,\nu'кытманов':2,\nu'елькин':6,\nu'кунцман':2,\nu'карпова':39,\nu'рыбчак':2,\nu'шабанова':6,\nu'кохан':4,\nu'байрамгазиев':2,\nu'елизарова':11,\nu'вагурин':2,\nu'жерегеля':2,\nu'обиход':2,\nu'дергунова':3,\nu'бурин':4,\nu'клишин':5,\nu'жердев':10,\nu'островский':13,\nu'войтов':9,\nu'климашин':2,\nu'демчишин':2,\nu'сукманов':2,\nu'цицкиев':2,\nu'медведникова':2,\nu'летягин':3,\nu'матвеев':93,\nu'янкина':2,\nu'трегуб':4,\nu'томчук':4,\nu'хрулев':4,\nu'журавская':4,\nu'бондарук':5,\nu'хомич':5,\nu'аристархов':2,\nu'манджиева':2,\nu'кибирев':4,\nu'тощев':3,\nu'кузовов':2,\nu'харкевич':2,\nu'мелюк':2,\nu'казаковцев':2,\nu'грушевский':2,\nu'киселевич':2,\nu'кругов':5,\nu'седляр':2,\nu'музыка':2,\nu'чмихаленко':2,\nu'копосов':5,\nu'милушев':2,\nu'кутин':2,\nu'бегунова':3,\nu'тищенко':24,\nu'притула':2,\nu'таланова':2,\nu'шведова':6,\nu'подхалюзин':2,\nu'хуторянская':2,\nu'шилохвостов':3,\nu'малкаров':2,\nu'бармина':7,\nu'кондакова':4,\nu'волин':3,\nu'гребенюков':2,\nu'размахнин':2,\nu'железнов':7,\nu'лантратов':3,\nu'шигорев':2,\nu'минькова':4,\nu'смелов':2,\nu'мячин':9,\nu'кухарский':2,\nu'слобожан':2,\nu'демичева':2,\nu'пискайкин':2,\nu'махортов':3,\nu'злобина':13,\nu'маханов':4,\nu'суровцев':10,\nu'козляков':2,\nu'черняховский':2,\nu'десницкий':2,\nu'микрюков':10,\nu'вишнякова':8,\nu'хатунцев':6,\nu'фефелов':6,\nu'болотин':2,\nu'матковский':3,\nu'ильинская':3,\nu'небиев':2,\nu'рымарь':4,\nu'дешевых':3,\nu'какалин':2,\nu'таразевич':2,\nu'максимович':5,\nu'пятенко':2,\nu'кирдянов':3,\nu'горяинов':4,\nu'сивак':5,\nu'чемоданов':2,\nu'цаликов':2,\nu'лашманов':4,\nu'соков':3,\nu'карабут':2,\nu'шурыгин':9,\nu'лыскова':2,\nu'аппаев':2,\nu'пяткин':5,\nu'брызгалова':4,\nu'поздеева':5,\nu'рассудов':2,\nu'марьясова':3,\nu'тарасов':108,\nu'сатаров':3,\nu'говорков':2,\nu'звонарева':4,\nu'шварцман':2,\nu'дерепаско':2,\nu'маловичко':2,\nu'звездочкин':2,\nu'альтергот':2,\nu'савицкий':12,\nu'зыбинский':3,\nu'шунин':4,\nu'ушакова':18,\nu'савранский':3,\nu'заворотный':2,\nu'тебенькова':3,\nu'огурцов':10,\nu'дубровский':9,\nu'портная':2,\nu'холкин':3,\nu'караваев':17,\nu'заболотная':4,\nu'шахбазов':3,\nu'шиловская':2,\nu'тишкова':2,\nu'тарабаева':2,\nu'романьков':2,\nu'сурмач':2,\nu'демихов':2,\nu'тужилкин':3,\nu'дзусов':3,\nu'макушева':2,\nu'ядревский':2,\nu'барышников':24,\nu'широков':24,\nu'заверуха':2,\nu'криворученко':2,\nu'рафаелян':2,\nu'подласенко':2,\nu'зубрилин':2,\nu'маценок':2,\nu'кутырев':4,\nu'майков':2,\nu'лукошников':2,\nu'алиханов':4,\nu'гудков':25,\nu'топилин':2,\nu'калягин':2,\nu'борзунов':3,\nu'васечкин':3,\nu'вдовина':9,\nu'шупенько':2,\nu'усенкова':2,\nu'гуличева':2,\nu'шандыба':4,\nu'кукушкина':7,\nu'мамонова':5,\nu'евреинов':2,\nu'пикас':2,\nu'ширнина':2,\nu'машанов':2,\nu'бибик':7,\nu'будник':3,\nu'бибин':2,\nu'силантьева':4,\nu'строгов':2,\nu'кудря':5,\nu'подоляк':2,\nu'богуш':7,\nu'подолян':3,\nu'байбаков':5,\nu'ефимова':43,\nu'шепилова':2,\nu'буторина':2,\nu'прилатов':2,\nu'марголин':2,\nu'новак':16,\nu'мелентьев':5,\nu'чернецкая':3,\nu'соснин':14,\nu'панкратов':36,\nu'широкова':13,\nu'ворсина':3,\nu'янькова':2,\nu'роганов':4,\nu'мамедов':39,\nu'горбушин':2,\nu'савочкин':3,\nu'салыкин':2,\nu'корсакова':6,\nu'аксиненко':2,\nu'чорнобрывая':2,\nu'паскеев':2,\nu'куропаткин':2,\nu'нестин':2,\nu'дмитриенко':16,\nu'папуша':5,\nu'половинкин':9,\nu'кухаренко':5,\nu'лазуткин':8,\nu'дюкарева':2,\nu'родионов':54,\nu'гагарин':9,\nu'носырев':3,\nu'ялынич':2,\nu'проскурякова':5,\nu'мицкий':2,\nu'кривцова':6,\nu'серов':29,\nu'перунов':3,\nu'шакиров':20,\nu'ахундов':2,\nu'мирзоев':9,\nu'мальченко':2,\nu'терещук':5,\nu'калинникова':2,\nu'чепцов':2,\nu'ходаковский':3,\nu'слободянюк':4,\nu'скобликов':2,\nu'крыжановская':2,\nu'кабанов':26,\nu'кафанов':3,\nu'шипов':5,\nu'карпенков':3,\nu'чухломин':2,\nu'астраханцев':3,\nu'шпилько':2,\nu'каракулина':2,\nu'гладышева':11,\nu'оганисян':4,\nu'спивак':5,\nu'анастасов':2,\nu'иваськова':2,\nu'ратушняк':2,\nu'юшкевич':5,\nu'салий':2,\nu'салин':2,\nu'золотавин':2,\nu'солнышков':2,\nu'нижник':3,\nu'гибадулин':4,\nu'мануйлова':3,\nu'будылин':3,\nu'кувшинова':5,\nu'окладников':5,\nu'сайдяшев':2,\nu'морозова':97,\nu'еганов':2,\nu'липин':3,\nu'тучнолобов':2,\nu'каурова':2,\nu'ращупкин':5,\nu'антохин':4,\nu'черняк':10,\nu'татаринова':2,\nu'бучнева':2,\nu'синявин':4,\nu'наумкина':2,\nu'озерчук':3,\nu'грипась':2,\nu'гуринов':2,\nu'канаев':9,\nu'пастухова':7,\nu'набокин':2,\nu'овчар':2,\nu'чутков':2,\nu'аношин':6,\nu'щелканов':3,\nu'бакалдин':4,\nu'леднева':2,\nu'петунин':4,\nu'вишняков':19,\nu'клочихин':2,\nu'бугров':5,\nu'бабиева':2,\nu'болохов':2,\nu'давидов':2,\nu'демьянова':4,\nu'самофал':2,\nu'жемчугова':3,\nu'дарбинян':2,\nu'буркин':6,\nu'раков':8,\nu'гурьянова':8,\nu'деменчук':2,\nu'рахимова':7,\nu'аникин':23,\nu'выскребенцев':3,\nu'трухачев':3,\nu'фаддеева':2,\nu'ласый':2,\nu'ивкин':4,\nu'гринина':2,\nu'лисянский':2,\nu'санников':6,\nu'рябинин':16,\nu'мольков':3,\nu'юсова':4,\nu'замерлюк':2,\nu'плахова':4,\nu'щербань':11,\nu'казанова':2,\nu'кириченко':44,\nu'морин':3,\nu'рябикова':3,\nu'вараксова':2,\nu'фищук':2,\nu'кирюшкин':3,\nu'чалов':10,\nu'мусихина':2,\nu'сирченко':2,\nu'гвоздев':11,\nu'боряев':2,\nu'миняйло':5,\nu'норенко':4,\nu'ступак':6,\nu'шеповалова':2,\nu'катасонов':2,\nu'бодунов':2,\nu'шелудько':2,\nu'дедкова':4,\nu'бурлакова':3,\nu'хрулёв':2,\nu'ховрин':3,\nu'шипицын':10,\nu'яшин':16,\nu'гужва':6,\nu'серёгин':4,\nu'дурова':3,\nu'мазалов':3,\nu'кострикин':2,\nu'краснопольская':2,\nu'бербеков':2,\nu'нос':3,\nu'ничик':2,\nu'счастливенко':2,\nu'овсянкин':2,\nu'рамонов':2,\nu'ромащенко':8,\nu'васильченко':27,\nu'маринчук':2,\nu'мустафин':12,\nu'постельник':2,\nu'сясина':2,\nu'рахаев':3,\nu'наймушин':2,\nu'богачева':14,\nu'баутина':3,\nu'ворсин':3,\nu'федякин':4,\nu'шарабарин':2,\nu'голубева':30,\nu'булка':2,\nu'солтан':3,\nu'панферов':6,\nu'балановский':2,\nu'хромых':2,\nu'хатуев':2,\nu'ильенко':2,\nu'мачнев':4,\nu'чернобай':3,\nu'машков':22,\nu'дорохин':8,\nu'шаменков':2,\nu'ермохин':2,\nu'стешенко':5,\nu'кальченко':3,\nu'биче-оол':3,\nu'терешко':3,\nu'фетисова':4,\nu'кочурова':2,\nu'капков':3,\nu'сергиевский':2,\nu'евстигнеев':11,\nu'дворецкий':7,\nu'лапченко':2,\nu'лутов':2,\nu'малыгин':13,\nu'сопнев':2,\nu'пяткина':3,\nu'нетесов':4,\nu'королев':74,\nu'розанов':5,\nu'максименков':5,\nu'гречишкин':3,\nu'полевая':2,\nu'полянская':12,\nu'кульбацкий':2,\nu'струкова':7,\nu'сагайдак':2,\nu'толстая':2,\nu'нагиев':2,\nu'барило':2,\nu'кухарь':5,\nu'вязовский':2,\nu'кашапов':3,\nu'трачук':5,\nu'сидоров':80,\nu'николаев':123,\nu'кривенченко':3,\nu'куянец':2,\nu'байкалова':2,\nu'гордей':2,\nu'юренко':2,\nu'байраков':2,\nu'скрипченко':3,\nu'локшин':2,\nu'болнокин':2,\nu'патрина':2,\nu'моисеев':62,\nu'сопин':3,\nu'дибирова':2,\nu'манахова':2,\nu'запольских':2,\nu'низовцева':2,\nu'чудов':4,\nu'сметанин':19,\nu'курбанмагомедов':2,\nu'кориков':2,\nu'губко':3,\nu'багомедов':2,\nu'поцелуев':3,\nu'низамова':4,\nu'карпиевич':2,\nu'щедрова':3,\nu'шафоростов':2,\nu'батуева':2,\nu'письменский':2,\nu'мешкова':4,\nu'толмачева':9,\nu'донченко':6,\nu'натхо':2,\nu'филатенков':2,\nu'бутковский':2,\nu'валыка':3,\nu'щербакова':47,\nu'самсонов':25,\nu'сыромятников':4,\nu'семченко':8,\nu'сидаш':3,\nu'щепина':3,\nu'занина':4,\nu'краснослабодцев':2,\nu'браташ':2,\nu'шестерняк':2,\nu'кундрюков':2,\nu'ражева':2,\nu'кочкуров':2,\nu'гришкевич':2,\nu'соболевский':3,\nu'волошин':13,\nu'голомолзин':2,\nu'пташник':4,\nu'сосновский':8,\nu'янковский':3,\nu'дудукин':3,\nu'глицевич':2,\nu'федосеенко':2,\nu'хлопонин':2,\nu'амельченко':4,\nu'ткаленко':2,\nu'дорджиев':2,\nu'шлапакова':3,\nu'пушкарь':3,\nu'солодков':3,\nu'веллер':2,\nu'забродин':7,\nu'перепелов':3,\nu'жуленков':2,\nu'мукин':2,\nu'измайлов':14,\nu'баева':4,\nu'улыбина':2,\nu'уласевич':4,\nu'щелконогов':2,\nu'шиян':3,\nu'гавриков':17,\nu'александров':77,\nu'шишалов':2,\nu'геккиев':3,\nu'романченко':2,\nu'безбородов':10,\nu'гречуха':2,\nu'маркачев':2,\nu'конончук':2,\nu'башкин':3,\nu'мариненко':2,\nu'салыган':2,\nu'иващенков':2,\nu'польшин':2,\nu'печерина':2,\nu'логвинова':4,\nu'чугунов':14,\nu'слепов':6,\nu'козырева':12,\nu'максимкин':4,\nu'еркина':2,\nu'веревкин':7,\nu'бегун':2,\nu'бадмаева':5,\nu'юханов':2,\nu'муллин':2,\nu'ивочкина':2,\nu'магомедкеримов':2,\nu'шаталова':7,\nu'барыкин':3,\nu'кулахметов':2,\nu'бахирева':2,\nu'прасол':2,\nu'соболев':49,\nu'борщевская':3,\nu'бегларян':2,\nu'агапонова':2,\nu'тыцкий':2,\nu'никульников':2,\nu'чернышев':50,\nu'окунев':12,\nu'кныш':2,\nu'карпенко':46,\nu'исаакян':2,\nu'анасова':2,\nu'баскаев':2,\nu'бурлаков':19,\nu'мигаль':2,\nu'голощапова':2,\nu'чекалин':4,\nu'каличенко':2,\nu'захарцев':2,\nu'бурмистров':16,\nu'алтынбаев':2,\nu'козарезов':2,\nu'аладин':2,\nu'лохова':5,\nu'голубцов':11,\nu'бисеров':2,\nu'трушкина':2,\nu'сёмин':11,\nu'бальжинимаев':4,\nu'трегубенко':2,\nu'штогрин':2,\nu'марданова':2,\nu'ныркова':2,\nu'белый':13,\nu'ляшук':3,\nu'гончарова':45,\nu'гузь':5,\nu'серебренникова':4,\nu'дац':2,\nu'томашевский':3,\nu'бакунова':2,\nu'титков':10,\nu'дерябкин':3,\nu'меньков':2,\nu'рунов':3,\nu'драчева':2,\nu'домашов':2,\nu'стукало':2,\nu'гуреева':3,\nu'купеев':2,\nu'топорков':8,\nu'шевкопляс':3,\nu'любенко':3,\nu'потий':2,\nu'перепечин':3,\nu'ладаткин':2,\nu'савчук':9,\nu'кушнеревич':2,\nu'ригованов':2,\nu'колесникова':39,\nu'абдулкаримов':2,\nu'керопян':2,\nu'крысько':2,\nu'барикаев':2,\nu'мокин':4,\nu'дудкина':3,\nu'макитов':2,\nu'седнев':4,\nu'калита':6,\nu'овдиенко':3,\nu'саклакова':2,\nu'кисляк':2,\nu'пригорнева':3,\nu'кутузов':13,\nu'клычников':2,\nu'полубояров':5,\nu'даньшин':7,\nu'свалов':2,\nu'перекрестова':2,\nu'язынин':3,\nu'леонтьева':17,\nu'шумилова':8,\nu'аимов':2,\nu'челомбитко':2,\nu'труфанова':9,\nu'першин':22,\nu'гераскин':2,\nu'сячин':2,\nu'вахрушев':14,\nu'львова':10,\nu'акжигитов':2,\nu'драгунов':6,\nu'ситникова':15,\nu'самодурова':2,\nu'смелянец':2,\nu'минченков':3,\nu'рогова':11,\nu'тясто':3,\nu'чуприна':4,\nu'старкин':4,\nu'фризен':4,\nu'гаевая':2,\nu'усенко':12,\nu'голофаст':2,\nu'мураев':5,\nu'матханов':2,\nu'алпатов':10,\nu'боева':6,\nu'анпилов':3,\nu'мунтян':4,\nu'бабаков':4,\nu'кургузова':2,\nu'цеханович':2,\nu'радько':6,\nu'шамрина':2,\nu'братухин':5,\nu'салихов':16,\nu'малашенкова':2,\nu'лифанов':2,\nu'кубкин':2,\nu'лёвкина':2,\nu'лукашов':7,\nu'белов':92,\nu'криворучко':4,\nu'якубчик':4,\nu'паньшин':4,\nu'пилюгин':13,\nu'скоморохов':5,\nu'коротыч':3,\nu'шувалова':11,\nu'иноземцев':8,\nu'мартюшев':2,\nu'понамарев':3,\nu'рачишен':2,\nu'кульпина':2,\nu'сергунина':3,\nu'отрощенко':2,\nu'сливинский':2,\nu'шерстнёв':2,\nu'гринчук':2,\nu'бритвин':3,\nu'демушкин':2,\nu'каргинов':3,\nu'костомаров':3,\nu'арушанян':3,\nu'омарова':4,\nu'кожевникова':20,\nu'бавыкина':2,\nu'рыбаченко':2,\nu'агишев':2,\nu'бабурин':6,\nu'беспалова':10,\nu'федько':2,\nu'курако':4,\nu'комякова':2,\nu'мальчиков':3,\nu'аминов':8,\nu'арешкин':2,\nu'родичев':2,\nu'лепендин':2,\nu'созинова':3,\nu'жеребятьев':2,\nu'нацаренус':2,\nu'зарецких':2,\nu'аникина':16,\nu'храпов':4,\nu'камалов':11,\nu'сухопаров':3,\nu'маркова':34,\nu'саидова':2,\nu'остроухова':2,\nu'кочура':6,\nu'рахманин':3,\nu'мотченко':2,\nu'перин':2,\nu'лаврентьева':16,\nu'полтавченко':2,\nu'останина':4,\nu'шредер':2,\nu'роменский':4,\nu'петушкова':2,\nu'георгиева':4,\nu'гринев':5,\nu'поликарпова':3,\nu'тутова':2,\nu'бибикова':2,\nu'хлебалин':2,\nu'рвачев':2,\nu'кужугет':4,\nu'хисамов':5,\nu'богушевич':2,\nu'просяник':2,\nu'изергин':2,\nu'корючин':2,\nu'захлыстов':2,\nu'конушева':2,\nu'гаранжа':2,\nu'сивков':8,\nu'николайчик':2,\nu'сапа':2,\nu'брындин':2,\nu'тарханов':3,\nu'слепнева':3,\nu'курнаев':3,\nu'варенцов':3,\nu'кудинов':20,\nu'коржаков':2,\nu'тазетдинов':4,\nu'быченков':4,\nu'рыженкова':3,\nu'льянов':3,\nu'беспечук':2,\nu'гашимов':4,\nu'поимцев':3,\nu'елфимова':2,\nu'качан':8,\nu'головинова':2,\nu'окшин':2,\nu'васюков':11,\nu'авдонин':3,\nu'калюжина':2,\nu'хохорин':2,\nu'плешкова':4,\nu'аришина':2,\nu'романов':124,\nu'моторин':9,\nu'ловцова':2,\nu'харьков':6,\nu'чалых':2,\nu'пронина':16,\nu'ковальский':7,\nu'донец':2,\nu'гамбарян':2,\nu'стебунова':2,\nu'далакян':2,\nu'пальчик':2,\nu'салманов':2,\nu'соин':2,\nu'свердлова':2,\nu'кривушин':2,\nu'балясников':4,\nu'скоробогатых':4,\nu'доржиева':5,\nu'ежова':6,\nu'гаев':5,\nu'феденко':2,\nu'шаховский':2,\nu'гречко':13,\nu'муковнин':2,\nu'богодухов':4,\nu'ильяшевич':4,\nu'кокорина':6,\nu'городный':2,\nu'шлык':4,\nu'пехтерев':3,\nu'шейкина':2,\nu'ореховский':3,\nu'винокуров':26,\nu'липовский':2,\nu'цаплин':3,\nu'ольгин':2,\nu'сперанский':5,\nu'гнетова':2,\nu'кривицкая':3,\nu'тришкин':3,\nu'миргородская':3,\nu'григорьев':104,\nu'курбатов':24,\nu'мухитдинов':2,\nu'нельга':2,\nu'халин':3,\nu'мосяков':2,\nu'монгуш':8,\nu'колотилов':2,\nu'токарев':41,\nu'шуйский':2,\nu'можаров':3,\nu'устимкин':2,\nu'рудской':2,\nu'алхименко':2,\nu'бочков':14,\nu'веревкина':4,\nu'гречкин':5,\nu'агуреев':2,\nu'довгаль':6,\nu'меденкова':2,\nu'розум':2,\nu'киприянов':2,\nu'ванюшкин':2,\nu'филь':4,\nu'рак':3,\nu'девятков':3,\nu'куртов':2,\nu'ардаев':2,\nu'лайковский':2,\nu'ермилов':16,\nu'муравьева':18,\nu'ковригина':2,\nu'кошкарев':4,\nu'чечель':4,\nu'краснова':21,\nu'полянский':19,\nu'кармадонов':2,\nu'хакимов':17,\nu'фургал':4,\nu'чахкиев':3,\nu'кухарская':2,\nu'крылов':54,\nu'дегтярёв':2,\nu'никаноров':7,\nu'алибаева':2,\nu'курбанов':23,\nu'фазылзянов':2,\nu'глуховский':2,\nu'аликберов':2,\nu'букреева':2,\nu'леонтьев':39,\nu'алимбеков':2,\nu'зиновкин':3,\nu'данов':2,\nu'куприна':4,\nu'русских':17,\nu'дюжев':2,\nu'петухова':24,\nu'ивин':3,\nu'гоц':2,\nu'гаврюшин':3,\nu'ремезова':2,\nu'ахметова':4,\nu'петлеванный':2,\nu'орешников':3,\nu'венкова':2,\nu'пашкина':2,\nu'павлюченко':9,\nu'лисов':10,\nu'зотьев':2,\nu'анчина':2,\nu'лбов':2,\nu'горюк':2,\nu'павлович':8,\nu'албаков':4,\nu'козодаев':2,\nu'ахметзянова':2,\nu'перегудов':6,\nu'камышова':3,\nu'курмакаев':3,\nu'земскова':7,\nu'приставка':3,\nu'синегуб':2,\nu'гольцов':7,\nu'малых':8,\nu'ветчинкин':3,\nu'малыш':3,\nu'луковкин':2,\nu'максимов':86,\nu'чугункин':2,\nu'прокопова':3,\nu'выходец':2,\nu'анненков':4,\nu'мастрюков':2,\nu'шкурко':3,\nu'шиманская':3,\nu'белянский':2,\nu'козленко':4,\nu'хапочкин':2,\nu'манченко':2,\nu'коневец':2,\nu'худолеев':2,\nu'мансурова':3,\nu'егоян':2,\nu'мелешков':2,\nu'рубис':3,\nu'кулешова':25,\nu'базанова':6,\nu'дикач':2,\nu'крутько':5,\nu'москалева':11,\nu'косяк':3,\nu'барковская':7,\nu'саркисов':5,\nu'усов':24,\nu'романюта':2,\nu'юрчик':3,\nu'адаменко':3,\nu'гармашов':2,\nu'деревяго':2,\nu'козин':16,\nu'чухарев':3,\nu'шкуратов':4,\nu'аракелян':6,\nu'бахарев':14,\nu'савицкая':15,\nu'чермошенцев':2,\nu'шиманов':2,\nu'конохов':2,\nu'ворожейкин':4,\nu'кабалоев':2,\nu'войтик':3,\nu'березняков':2,\nu'каюмов':5,\nu'кизима':2,\nu'гаскаров':2,\nu'симагин':2,\nu'рычагова':3,\nu'халтурин':4,\nu'навалова':2,\nu'коваль':45,\nu'бахмудов':2,\nu'цапенко':3,\nu'варакин':5,\nu'пест��рева':3,\nu'замышляева':3,\nu'подымов':2,\nu'изиев':3,\nu'перегудова':2,\nu'кислякова':2,\nu'шнурков':3,\nu'шапарев':2,\nu'бывальцева':2,\nu'водолазский':3,\nu'ахбердилов':2,\nu'светоносов':2,\nu'понамаренко':2,\nu'бурсагов':2,\nu'житинев':2,\nu'миннуллин':2,\nu'куденко':2,\nu'мухаметзянов':5,\nu'каширин':11,\nu'поминова':3,\nu'думанский':2,\nu'сакулин':2,\nu'погодина':5,\nu'русак':3,\nu'таланов':6,\nu'дутко':2,\nu'аскеров':4,\nu'стрижаков':2,\nu'краюхин':2,\nu'рахимзянов':2,\nu'пустовит':2,\nu'абдуллаева':2,\nu'власова':34,\nu'мочалкин':2,\nu'калиничев':6,\nu'нырненко':2,\nu'лапицкий':3,\nu'москвитина':4,\nu'ощепков':6,\nu'соколов':133,\nu'земченкова':2,\nu'черномырдин':2,\nu'удалов':8,\nu'марковский':2,\nu'сидоркевич':2,\nu'монастырев':2,\nu'сазонов':34,\nu'силин':8,\nu'строкань':2,\nu'кормилицына':4,\nu'савилова':2,\nu'мушегян':2,\nu'коликов':2,\nu'москвина':8,\nu'гиндулина':2,\nu'одинец':3,\nu'ганшин':2,\nu'познанский':2,\nu'радионов':7,\nu'горенков':3,\nu'устименко':14,\nu'цоколенко':2,\nu'сенкевич':3,\nu'андросов':10,\nu'пахмутова':3,\nu'андреянов':2,\nu'вершинина':10,\nu'плиско':3,\nu'дёмочкин':2,\nu'цуканов':23,\nu'бер':3,\nu'бех':2,\nu'бец':2,\nu'скрыпникова':2,\nu'барановская':4,\nu'ростиславов':2,\nu'смоляков':6,\nu'алешкин':9,\nu'хутиева':2,\nu'галигузов':2,\nu'матвеева':64,\nu'остапенко':32,\nu'ерошенко':5,\nu'корзун':5,\nu'аитова':2,\nu'гарипова':3,\nu'ельцова':2,\nu'боярский':6,\nu'рыжаков':3,\nu'рыбальченко':9,\nu'карасов':2,\nu'бирюкова':19,\nu'крутик':2,\nu'мурин':2,\nu'ахметшин':13,\nu'алиева':10,\nu'брежнев':10,\nu'беланова':2,\nu'тульский':3,\nu'паксимади':2,\nu'завальный':2,\nu'передельский':2,\nu'порядин':3,\nu'пегова':2,\nu'курапов':3,\nu'бухаров':7,\nu'гошовская':2,\nu'дьячихин':2,\nu'шкитина':2,\nu'абдуллаев':15,\nu'касымов':2,\nu'семак':2,\nu'инютин':3,\nu'журкина':2,\nu'полякова':48,\nu'пилипчук':7,\nu'бегма':2,\nu'олейников':23,\nu'покровский':11,\nu'пилюков':3,\nu'муканов':3,\nu'гасанова':3,\nu'раёв':2,\nu'семеренко':2,\nu'бургучев':2,\nu'макарова':79,\nu'жигин':2,\nu'гаврик':2,\nu'морякова':2,\nu'солдаткин':6,\nu'однолько':4,\nu'федореев':3,\nu'воинов':11,\nu'федькин':2,\nu'боровков':12,\nu'хейфец':2,\nu'большова':2,\nu'шангина':4,\nu'скрябина':2,\nu'коробейникова':6,\nu'бадулин':2,\nu'гербеков':2,\nu'галян':2,\nu'кречетова':7,\nu'манякин':3,\nu'комар':4,\nu'литвинов':56,\nu'баюра':2,\nu'поспелова':5,\nu'ягудин':8,\nu'ляшев':2,\nu'курдюмов':2,\nu'шляхов':8,\nu'апостолов':2,\nu'бакиров':8,\nu'яковлева':81,\nu'туганова':2,\nu'юсупов':29,\nu'халиков':6,\nu'грановский':2,\nu'елкин':2,\nu'убушаев':2,\nu'петряйкин':2,\nu'самус':2,\nu'коньшина':2,\nu'клычникова':2,\nu'теплухин':2,\nu'пикунов':2,\nu'елтышев':3,\nu'бубнова':6,\nu'кураченко':2,\nu'лисенко':2,\nu'стасенко':4,\nu'полуянов':4,\nu'прохватилов':2,\nu'башкирев':2,\nu'тренин':5,\nu'твердохлеб':3,\nu'бровкин':6,\nu'кибец':2,\nu'яковлев':125,\nu'непомнящих':2,\nu'аверичев':2,\nu'ленберг':2,\nu'илющенко':6,\nu'мулюков':2,\nu'трухина':6,\nu'неверов':6,\nu'любимова':6,\nu'матросова':5,\nu'татьяненко':2,\nu'буряченко':2,\nu'редько':8,\nu'вильданов':2,\nu'нырков':2,\nu'сутягина':3,\nu'васкецов':2,\nu'суздальцева':2,\nu'парасюк':2,\nu'белоножко':3,\nu'гайнутдинова':4,\nu'ширшова':6,\nu'андрейцев':2,\nu'проскурин':8,\nu'анай-оол':3,\nu'костикова':2,\nu'урванов':2,\nu'сибгатуллин':2,\nu'бакина':3,\nu'стрепетов':2,\nu'первых':2,\nu'граф':4,\nu'гордон':2,\nu'заляев':2,\nu'казначеев':5,\nu'слесаренко':5,\nu'гордов':2,\nu'пророченко':2,\nu'чекмазов':2,\nu'рубцов':27,\nu'толстошеев':3,\nu'мусина':6,\nu'калиновская':5,\nu'карпунов':2,\nu'дроздецкая':2,\nu'костюченко':18,\nu'лебедь':11,\nu'зиброва':2,\nu'деревянкин':5,\nu'юдинцев':2,\nu'баркалова':2,\nu'головатюк':3,\nu'космынин':5,\nu'мельников':95,\nu'стефанова':2,\nu'мазур':18,\nu'хрипунков':3,\nu'танцырев':2,\nu'савенков':15,\nu'воликов':6,\nu'халилов':10,\nu'гумерова':2,\nu'юденко':3,\nu'филипчук':2,\nu'рябчиков':7,\nu'данилевский':3,\nu'поткин':2,\nu'паранин':3,\nu'сковородкин':3,\nu'корняков':3,\nu'звездин':4,\nu'чистов':7,\nu'желтышев':2,\nu'купцов':12,\nu'каратаев':7,\nu'киевская':3,\nu'петрякова':2,\nu'дрыгин':3,\nu'вольфсон':2,\nu'честных':3,\nu'гуденко':4,\nu'миловидов':3,\nu'калмыков':22,\nu'косьяненко':2,\nu'копысов':3,\nu'ермолаев':43,\nu'годына':2,\nu'сюн-цин-фан':3,\nu'космынина':2,\nu'чуканова':4,\nu'гужин':4,\nu'гуторов':6,\nu'глухенко':2,\nu'максачев':2,\nu'бузин':2,\nu'шаров':24,\nu'неяскина':2,\nu'ружников':2,\nu'яхин':8,\nu'чукреев':3,\nu'аронова':2,\nu'губайдулин':2,\nu'гарманов':2,\nu'зырянов':21,\nu'сенько':8,\nu'дзиов':2,\nu'емтыль':2,\nu'трунова':9,\nu'хамидулина':2,\nu'жданкин':2,\nu'дыкин':3,\nu'пелин':2,\nu'присяжнюк':4,\nu'бережнова':2,\nu'власенко':31,\nu'бобрышев':5,\nu'силина':9,\nu'головин':38,\nu'рашев':2,\nu'захаренкова':2,\nu'снегов':3,\nu'коняхина':2,\nu'зворыгина':2,\nu'городнов':3,\nu'второв':3,\nu'эрдынеева':2,\nu'воротилин':2,\nu'озов':2,\nu'паневин':3,\nu'кострикина':2,\nu'волдырев':2,\nu'логвин':4,\nu'кречетов':5,\nu'кукса':7,\nu'белкина':4,\nu'прозоров':12,\nu'севостьянова':7,\nu'лабутин':4,\nu'струнин':3,\nu'сатаев':4,\nu'арестов':4,\nu'кондауров':2,\nu'сапрыкина':8,\nu'земская':4,\nu'аганина':2,\nu'атаманова':3,\nu'борисовский':4,\nu'тряпкин':2,\nu'азовцев':3,\nu'самошкин':4,\nu'осадчий':15,\nu'исакин':2,\nu'мальцева':30,\nu'меджидов':3,\nu'рягузов':3,\nu'гаджиалиев':2,\nu'кистанов':2,\nu'авцынова':2,\nu'олькина':2,\nu'елесин':2,\nu'алексеева':90,\nu'нижегородцев':3,\nu'пустовалова':3,\nu'жигулин':6,\nu'лежнев':5,\nu'евсюков':7,\nu'корбан':2,\nu'таболова':2,\nu'сабаев':2,\nu'софьин':3,\nu'шалагин':3,\nu'волчков':8,\nu'чепарский':2,\nu'попкова':11,\nu'головань':7,\nu'акатова':3,\nu'цыпкина':2,\nu'шапкарина':2,\nu'мазурин':4,\nu'мазурик':2,\nu'завадский':6,\nu'гайдукова':3,\nu'таценко':2,\nu'аистов':3,\nu'бахтина':3,\nu'маргелов':2,\nu'павлик':3,\nu'хазов':5,\nu'алакаев':2,\nu'мушинский':2,\nu'загайнова':3,\nu'есенеев':3,\nu'стахеев':2,\nu'березюк':3,\nu'спицина':2,\nu'галицкая':4,\nu'лущиц':2,\nu'клюкин':8,\nu'василишин':3,\nu'рябов':52,\nu'салмина':4,\nu'поздышев':2,\nu'исаченко':6,\nu'чуб':16,\nu'лавриченко':2,\nu'кошеленко':2,\nu'бессолов':3,\nu'зизевский':2,\nu'веретин':2,\nu'ляпина':8,\nu'олисова':2,\nu'лымарь':3,\nu'кургин':3,\nu'остроумов':4,\nu'середин':5,\nu'тульнев':2,\nu'танцюра':3,\nu'савоськин':2,\nu'холодилин':2,\nu'ельников':3,\nu'прокопец':7,\nu'лозинский':2,\nu'вохмин':2,\nu'солуянова':2,\nu'зырин':2,\nu'красичков':3,\nu'горьков':3,\nu'гаврилова':39,\nu'завгороднев':7,\nu'варанкин':2,\nu'лазарева':28,\nu'леонгардт':2,\nu'карюков':2,\nu'патрикеева':2,\nu'манякина':4,\nu'тетенькин':2,\nu'григолия':2,\nu'музыченко':3,\nu'шляхтов':4,\nu'канунников':7,\nu'семиколенов':2,\nu'михневич':4,\nu'демиденко':12,\nu'чанкаев':2,\nu'холодов':8,\nu'булаева':4,\nu'щевелев':2,\nu'мурзина':2,\nu'грузинова':2,\nu'суржик':2,\nu'севастьянова':7,\nu'клиндух':2,\nu'кулькин':2,\nu'челенков':2,\nu'юмш��нов':2,\nu'дубовский':2,\nu'севрюков':8,\nu'кудрина':5,\nu'дериглазов':2,\nu'маренков':4,\nu'дмитров':4,\nu'буряк':12,\nu'торхов':3,\nu'аванесян':4,\nu'московский':3,\nu'рихтерман':2,\nu'заболотнев':2,\nu'хоркина':2,\nu'гейдаров':2,\nu'сердюк':27,\nu'крячко':2,\nu'рунова':2,\nu'нагайцева':3,\nu'судакова':11,\nu'бугулова':2,\nu'ленда':2,\nu'рязанцева':9,\nu'городилов':4,\nu'казачек':3,\nu'данильчук':2,\nu'старшов':2,\nu'кара-сал':2,\nu'коноваленко':9,\nu'хабибулин':5,\nu'горшенин':9,\nu'красуля':2,\nu'любин':2,\nu'байдала':2,\nu'бобровская':5,\nu'кулагин':25,\nu'гарбузов':6,\nu'сочкова':2,\nu'котомин':2,\nu'жидкова':3,\nu'крайнюков':2,\nu'оськина':4,\nu'бодрова':7,\nu'барченкова':2,\nu'ульев':2,\nu'фалилеев':4,\nu'сенникова':3,\nu'корытин':3,\nu'барановский':18,\nu'терехина':4,\nu'раев':4,\nu'чириков':5,\nu'шатайло':3,\nu'арзиманов':2,\nu'тархов':2,\nu'топтыгина':2,\nu'лободенко':2,\nu'кодочигов':5,\nu'буценко':4,\nu'булыгин':11,\nu'томилин':7,\nu'гонтарев':2,\nu'абушаев':2,\nu'кулигин':5,\nu'гуков':4,\nu'епифанов':13,\nu'колядин':3,\nu'колыванов':3,\nu'разумовский':9,\nu'говоров':10,\nu'резниченко':15,\nu'бунина':2,\nu'юшина':6,\nu'бельтюков':2,\nu'краснобаев':5,\nu'логиновский':2,\nu'кожевина':2,\nu'еланцев':6,\nu'бусыгин':6,\nu'трегубов':11,\nu'ооржак':6,\nu'нечипоренко':2,\nu'ботов':4,\nu'набиуллин':6,\nu'гололобов':3,\nu'безрукова':5,\nu'ламонов':3,\nu'кошельников':2,\nu'кичигина':5,\nu'берников':2,\nu'шнейдер':3,\nu'улин':2,\nu'никульшина':2,\nu'васянович':2,\nu'дунин':6,\nu'терещенков':2,\nu'горбовский':2,\nu'валиулин':2,\nu'абразумов':2,\nu'костенич':2,\nu'фоменкова':2,\nu'баянов':4,\nu'кучумов':7,\nu'зернов':7,\nu'акатьев':2,\nu'новохатько':2,\nu'тарабукин':2,\nu'бурнос':2,\nu'земсков':12,\nu'рыжикова':2,\nu'курьянович':3,\nu'шалыгин':5,\nu'семина':8,\nu'сукач':2,\nu'ненашева':4,\nu'загородникова':3,\nu'шик':2,\nu'новикова':70,\nu'баруздин':2,\nu'бердов':2,\nu'мухатов':2,\nu'веденин':4,\nu'тарасенков':2,\nu'ларченков':2,\nu'башкирцев':5,\nu'вертий':2,\nu'бобринев':3,\nu'душенко':3,\nu'козенко':2,\nu'крысов':2,\nu'акулинин':2,\nu'исаенков':2,\nu'сократилин':2,\nu'кащеева':2,\nu'жидецкий':3,\nu'абрамчук':2,\nu'лапаева':2,\nu'цыба':7,\nu'савилов':5,\nu'новичихин':4,\nu'сумин':9,\nu'оборин':2,\nu'езерский':3,\nu'поваров':5,\nu'спасская':2,\nu'черепов':4,\nu'рыжов':29,\nu'брыжко':2,\nu'каратай':2,\nu'чипизубов':2,\nu'комогорцев':3,\nu'озерова':10,\nu'бушуева':10,\nu'вартанов':3,\nu'и':3,\nu'гималетдинов':2,\nu'васильева':132,\nu'заруба':2,\nu'демьяненко':11,\nu'золотых':7,\nu'ярошенко':12,\nu'кривцов':6,\nu'витко':2,\nu'шек':2,\nu'христолюбов':4,\nu'шевченко':124,\nu'затонский':3,\nu'забродская':2,\nu'шпартюк':2,\nu'ерошевич':2,\nu'ворошилова':6,\nu'цыкин':4,\nu'бикташев':2,\nu'василенков':3,\nu'падалкин':2,\nu'казанков':6,\nu'горовая':3,\nu'голубничий':2,\nu'лапухин':3,\nu'пашуканис':2,\nu'житков':2,\nu'сенча':2,\nu'боярищев':2,\nu'подобед':2,\nu'мозжегорова':2,\nu'кулагина':14,\nu'лёвина':2,\nu'некрылова':2,\nu'красоткин':3,\nu'лагунов':4,\nu'гиниятуллин':2,\nu'шитиков':9,\nu'воропай':4,\nu'голдобин':3,\nu'чечерин':2,\nu'сытов':2,\nu'гизатуллин':10,\nu'дерлыш':2,\nu'карташева':2,\nu'филипенко':20,\nu'настенко':3,\nu'лопатин':32,\nu'монина':2,\nu'амбаев':2,\nu'гельм':2,\nu'буранова':2,\nu'кущенко':3,\nu'шиндин':3,\nu'татьянин':4,\nu'шигин':2,\nu'потявина':3,\nu'палютин':3,\nu'артюшкина':2,\nu'осташко':2,\nu'черезова':2,\nu'винс':2,\nu'кукушкин':18,\nu'корюшкин':2,\nu'малыгина':6,\nu'алхимин':2,\nu'лозовая':3,\nu'кокорев':8,\nu'чихирев':2,\nu'тюгай':2,\nu'габитова':4,\nu'бакун':2,\nu'подберезкин':2,\nu'сабада':2,\nu'шамраев':2,\nu'горностаев':4,\nu'белошапкин':2,\nu'летунов':4,\nu'петрук':4,\nu'гудова':2,\nu'новинский':2,\nu'пешко':2,\nu'болдинов':2,\nu'шатский':2,\nu'потехин':9,\nu'гаджиева':5,\nu'гамов':4,\nu'кокин':5,\nu'шадрина':8,\nu'тепляков':8,\nu'красковская':2,\nu'олерский':2,\nu'юлин':3,\nu'кузема':2,\nu'тюжин':2,\nu'помогаева':3,\nu'галченков':2,\nu'божин':2,\nu'васильковский':2,\nu'колдин':2,\nu'малина':2,\nu'кажаров':4,\nu'котович':4,\nu'лосевич':2,\nu'нигматулина':2,\nu'гарев':2,\nu'андропова':3,\nu'ерышев':2,\nu'агафонова':25,\nu'гильмутдинов':6,\nu'губжоков':2,\nu'загорский':4,\nu'богма':2,\nu'пилипец':2,\nu'кретов':13,\nu'абрамян':6,\nu'гараев':10,\nu'демешко':2,\nu'колбасникова':2,\nu'детков':3,\nu'базарова':3,\nu'тунчик':2,\nu'завьялова':12,\nu'шейхов':2,\nu'командирова':2,\nu'килин':7,\nu'шартон':2,\nu'ашифин':2,\nu'щеголев':7,\nu'мильчевич':2,\nu'рыбкина':3,\nu'стадникова':2,\nu'подгорная':4,\nu'колоев':3,\nu'серко':3,\nu'гайдукевич':3,\nu'полторацкий':4,\nu'кучук':2,\nu'керимова':4,\nu'грошев':7,\nu'полищук':29,\nu'галиева':3,\nu'мутков':2,\nu'воронежцев':2,\nu'безуглый':6,\nu'лапиков':4,\nu'аношкин':4,\nu'лозовская':3,\nu'гуцев':2,\nu'порхунов':4,\nu'аббясова':2,\nu'подлесный':7,\nu'несвит':3,\nu'скибин':2,\nu'щукин':31,\nu'баяндин':5,\nu'панина':23,\nu'рустамов':8,\nu'истомин':23,\nu'поликарпов':11,\nu'глонти':2,\nu'машковцев':4,\nu'горчаков':7,\nu'ходос':2,\nu'васютинский':2,\nu'калеганова':2,\nu'горбунов':75,\nu'кривко':5,\nu'агеева':20,\nu'пуканов':3,\nu'колодина':3,\nu'плевако':2,\nu'безносова':2,\nu'утяшева':2,\nu'кибальник':2,\nu'данько':3,\nu'башмаков':6,\nu'маклаков':6,\nu'оразов':2,\nu'корнеева':12,\nu'бевзюк':2,\nu'смолина':8,\nu'самков':2,\nu'шиманский':4,\nu'серегина':14,\nu'ференец':3,\nu'вахромеев':2,\nu'павлюк':9,\nu'аргасцев':2,\nu'могиленко':3,\nu'стрижак':3,\nu'добряков':6,\nu'лагуткин':4,\nu'молодчиков':2,\nu'одиноков':3,\nu'бураков':12,\nu'шешуков':4,\nu'гулевский':2,\nu'шекунов':2,\nu'набатова':5,\nu'сыроватский':7,\nu'болгарев':2,\nu'ширшиков':3,\nu'брыков':7,\nu'гамаюнов':3,\nu'курышова':3,\nu'шорина':5,\nu'порватов':2,\nu'пипко':3,\nu'рабданов':2,\nu'старостина':15,\nu'акулов':16,\nu'онохов':2,\nu'безденежных':7,\nu'лузан':6,\nu'кривова':2,\nu'африкантов':2,\nu'линева':2,\nu'орехова':12,\nu'хайруллин':16,\nu'косачев':6,\nu'сибагатуллин':2,\nu'воскобойник':4,\nu'меркулова':9,\nu'кожаков':2,\nu'муравьев':34,\nu'конышева':6,\nu'усс':2,\nu'рудой':5,\nu'незговоров':2,\nu'мышков':2,\nu'нагалин':2,\nu'брусило':2,\nu'толстикова':8,\nu'мисник':4,\nu'долгушев':2,\nu'погудин':5,\nu'золотухина':6,\nu'сычева':14,\nu'путинцева':3,\nu'бруй':3,\nu'карлаш':4,\nu'ольховиков':2,\nu'бессонов':27,\nu'хабаров':19,\nu'овчаров':10,\nu'абзалов':5,\nu'самылин':2,\nu'конова':3,\nu'полковников':2,\nu'арановский':2,\nu'цыган':2,\nu'гомзин':2,\nu'космачев':3,\nu'сташкова':2,\nu'тхакушинов':3,\nu'хафизова':5,\nu'петрухина':2,\nu'оленев':5,\nu'выродов':2,\nu'кидяев':3,\nu'ступакова':4,\nu'дурнев':10,\nu'линкевич':2,\nu'горбунова':39,\nu'оловянишникова':2,\nu'чуприн':6,\nu'сологубов':4,\nu'пашовкин':3,\nu'пронин':29,\nu'старосельская':2,\nu'стариченко':2,\nu'замятин':8,\nu'иванцова':6,\nu'рычагов':5,\nu'плаксицкий':2,\nu'городницкий':2,\nu'комисаров':2,\nu'африканов':2,\nu'сараев':12,\nu'горюшкин':2,\nu'гонта':4,\nu'мурга':3,\nu'кочегаров':2,\nu'ишин':3,\nu'сайтиев':2,\nu'жижикин':2,\nu'прусс':2,\nu'мартынов':58,\nu'пятовский':2,\nu'осауленко':6,\nu'наконечная':2,\nu'близнецов':2,\nu'горда':2,\nu'федечкин':2,\nu'папонов':2,\nu'судьин':3,\nu'гученко':2,\nu'корабельников':9,\nu'малик':7,\nu'усманова':3,\nu'бояринцев':3,\nu'мотин':5,\nu'афонькин':4,\nu'мозговая':2,\nu'березинец':2,\nu'хряпин':3,\nu'павин':2,\nu'шабров':3,\nu'битяй':2,\nu'уренков':2,\nu'мигин':2,\nu'каган':2,\nu'горбунков':2,\nu'дробышева':2,\nu'паршенков':2,\nu'глазова':3,\nu'клименченко':3,\nu'адигюзелов':2,\nu'стребкова':4,\nu'черноиванов':5,\nu'вегера':2,\nu'пескова':7,\nu'пьянкова':3,\nu'лохов':3,\nu'гришаев':14,\nu'заурбеков':3,\nu'жигулина':2,\nu'астанина':2,\nu'матора':2,\nu'ершов':54,\nu'сергеева':85,\nu'грибова':4,\nu'каптурович':3,\nu'полосин':2,\nu'гайдуков':7,\nu'артемьева':10,\nu'толстик':3,\nu'теняев':2,\nu'муслимов':9,\nu'скоринов':2,\nu'тараканова':3,\nu'бирук':2,\nu'смекалин':2,\nu'дятлова':2,\nu'борейко':2,\nu'латыш':2,\nu'кожурин':2,\nu'клюквин':3,\nu'вахтин':3,\nu'борщев':9,\nu'лимарева':2,\nu'балахонов':4,\nu'сабирзянов':2,\nu'семчишин':2,\nu'цветнов':3,\nu'ниязов':4,\nu'якушкин':4,\nu'кудачкин':2,\nu'кондратюк':11,\nu'король':13,\nu'балаба':2,\nu'винтовкина':2,\nu'квашнина':2,\nu'сайфуллин':7,\nu'середа':31,\nu'аниськов':2,\nu'богатырев':24,\nu'коржов':10,\nu'голодов':2,\nu'лукин':39,\nu'балашов':40,\nu'постовалов':3,\nu'попова':140,\nu'зеленюк':2,\nu'дворянчиков':2,\nu'рудых':2,\nu'вороновская':2,\nu'мальгина':2,\nu'можаев':5,\nu'палей':2,\nu'коротеев':7,\nu'момот':8,\nu'копачев':4,\nu'харченков':2,\nu'чиженков':2,\nu'газеева':2,\nu'конорев':4,\nu'гаврюшенко':3,\nu'доброва':2,\nu'саулькин':2,\nu'журавель':6,\nu'шульга':19,\nu'бостанова':2,\nu'шкурина':2,\nu'маркин':33,\nu'маркив':2,\nu'величко':36,\nu'богинский':4,\nu'саламов':3,\nu'зиятдинов':4,\nu'фролков':2,\nu'бадунов':2,\nu'дамбаева':3,\nu'гузаиров':3,\nu'цыбикдоржиев':2,\nu'кекеев':2,\nu'гуляев':26,\nu'федяшев':2,\nu'пенза':2,\nu'гонтарева':3,\nu'галета':3,\nu'синюк':2,\nu'мамеев':2,\nu'киреев':43,\nu'валуев':6,\nu'сасов':4,\nu'деева':8,\nu'плетнева':6,\nu'михальченко':7,\nu'ивахина':2,\nu'плиева':3,\nu'смыков':6,\nu'щеголева':3,\nu'гуль':2,\nu'клепинин':2,\nu'мартыщенко':4,\nu'ахапкин':2,\nu'кропотин':3,\nu'бражник':4,\nu'мелешенко':4,\nu'андрианова':12,\nu'колонтай':2,\nu'стрепетова':2,\nu'папин':4,\nu'березкина':5,\nu'вовчик':2,\nu'гриб':5,\nu'гутман':4,\nu'шульгина':5,\nu'боярко':2,\nu'загородников':5,\nu'кипкеева':2,\nu'лоскутова':6,\nu'кюршин':2,\nu'портнов':7,\nu'клочков':18,\nu'бекренев':2,\nu'демьянов':17,\nu'егорченков':2,\nu'дударев':12,\nu'керимов':11,\nu'заблоцкий':2,\nu'каменко':2,\nu'карапетян':11,\nu'щербинин':16,\nu'шелудяков':3,\nu'чеботарёв':2,\nu'коханов':5,\nu'конин':3,\nu'ноздрин':4,\nu'аронов':2,\nu'токмаков':3,\nu'путилин':5,\nu'ханжин':2,\nu'клочек':2,\nu'шатская':2,\nu'чуяков':2,\nu'окороков':5,\nu'капник':2,\nu'дулова':3,\nu'бакуев':2,\nu'синицына':7,\nu'свечников':5,\nu'карпикова':2,\nu'сундуков':5,\nu'безруких':2,\nu'майбуров':2,\nu'байрамова':2,\nu'жабина':3,\nu'сизинцев':4,\nu'горелкин':6,\nu'зинкевич':5,\nu'верба':4,\nu'долматова':2,\nu'комкова':7,\nu'евсеенкова':2,\nu'катамадзе':2,\nu'колбаев':2,\nu'дианов':9,\nu'бисембаева':2,\nu'авдошина':2,\nu'белицкий':3,\nu'литвинюк':2,\nu'головина':14,\nu'беглов':3,\nu'брусницын':2,\nu'сейфуллаев':3,\nu'муртазалиев':4,\nu'ситдиков':3,\nu'невзоров':7,\nu'сиренко':5,\nu'арсланов':6,\nu'каргин':14,\nu'куницын':6,\nu'боженов':4,\nu'жу��авлёв':6,\nu'масаев':4,\nu'челпанова':3,\nu'шейнин':2,\nu'цицилин':4,\nu'савотин':2,\nu'рожкова':14,\nu'бабинцева':2,\nu'одер':2,\nu'соблиров':2,\nu'рыкина':2,\nu'бажанов':10,\nu'боровской':4,\nu'белашов':2,\nu'польских':2,\nu'дяченко':7,\nu'бабушкин':24,\nu'симков':2,\nu'квасница':2,\nu'стрекаловский':2,\nu'лещев':6,\nu'никулова':2,\nu'вениченко':3,\nu'крицкая':4,\nu'низовцев':3,\nu'салманова':2,\nu'зданович':4,\nu'смеликов':2,\nu'грибкова':5,\nu'зорина':11,\nu'букатин':2,\nu'колбин':4,\nu'цыбизова':2,\nu'романенко':48,\nu'вахнина':3,\nu'луканова':2,\nu'рафальская':2,\nu'нонко':2,\nu'ахмерова':2,\nu'варфоломеева':6,\nu'вавин':2,\nu'шрамко':5,\nu'будкина':2,\nu'абачев':2,\nu'солдатова':18,\nu'карпун':2,\nu'мучинский':2,\nu'варич':2,\nu'жеребилов':3,\nu'тарарыкин':2,\nu'насиковский':2,\nu'бабченко':5,\nu'погорелова':5,\nu'абсалямов':4,\nu'варнаков':2,\nu'зернова':2,\nu'гусева':69,\nu'бабенко':24,\nu'мандзюк':3,\nu'мисюра':5,\nu'азрапкин':2,\nu'фадзаев':2,\nu'нуретдинов':2,\nu'забродина':4,\nu'гаврилов':85,\nu'масальская':2,\nu'бодягин':3,\nu'зубковский':2,\nu'зосимов':2,\nu'габидуллин':2,\nu'юн':2,\nu'тушнолобов':2,\nu'седина':2,\nu'моргачев':2,\nu'ряшенцев':2,\nu'щеглов':18,\nu'нетреба':4,\nu'данилкин':5,\nu'мудрый':2,\nu'гудалин':2,\nu'харлан':2,\nu'грипас':2,\nu'дедушев':2,\nu'дарьин':2,\nu'зюзин':8,\nu'сватковский':2,\nu'макаров':141,\nu'клоков':6,\nu'синитенко':2,\nu'гулевич':4,\nu'бумагин':3,\nu'абрамченко':2,\nu'горбачевский':2,\nu'демочкин':2,\nu'убугунов':2,\nu'аршба':2,\nu'томащук':2,\nu'суходолов':2,\nu'котунов':2,\nu'сиделин':2,\nu'крысанов':2,\nu'харитонов':49,\nu'асеев':14,\nu'маркевич':7,\nu'малетин':4,\nu'кривых':4,\nu'тюляков':2,\nu'кандалов':2,\nu'курушин':2,\nu'митин':18,\nu'акчурина':3,\nu'черноусова':2,\nu'лемешев':2,\nu'шишкунова':2,\nu'корнейко':3,\nu'голдин':4,\nu'мирошникова':7,\nu'марголина':2,\nu'жужма':2,\nu'клейменова':6,\nu'шагин':7,\nu'рябыкин':2,\nu'картышев':2,\nu'азимов':2,\nu'азаренко':4,\nu'вилль':2,\nu'авакян':4,\nu'долина':3,\nu'фельк':2,\nu'чиньков':2,\nu'кулак':4,\nu'кулай':5,\nu'дырда':2,\nu'плохих':3,\nu'турсунова':2,\nu'немтырева':2,\nu'бушуев':12,\nu'шведов':13,\nu'чурикова':2,\nu'бардышева':2,\nu'ефименко':24,\nu'шаихов':2,\nu'крупский':2,\nu'курилкин':2,\nu'перескокова':3,\nu'балабанов':5,\nu'станкевич':8,\nu'друзин':3,\nu'жемчугов':2,\nu'гнеушев':3,\nu'гетманчук':2,\nu'галлямова':3,\nu'разиньков':2,\nu'коцюк':2,\nu'паламаренко':2,\nu'алеева':2,\nu'мамыкина':3,\nu'штань':2,\nu'каштанова':3,\nu'черемисин':6,\nu'корноухов':2,\nu'дятлов':14,\nu'шумкина':2,\nu'киданов':2,\nu'головкина':2,\nu'куксин':5,\nu'сосин':4,\nu'гайфуллин':3,\nu'селезенев':2,\nu'чикишева':2,\nu'вольвач':4,\nu'михайличенко':9,\nu'минко':5,\nu'яньшин':5,\nu'каверина':4,\nu'хлебников':13,\nu'зенченко':2,\nu'шинов':2,\nu'кушнеров':2,\nu'шарифуллин':2,\nu'давлетшина':2,\nu'простов':3,\nu'голышева':5,\nu'козьмин':2,\nu'ганов':3,\nu'янчук':2,\nu'милованова':3,\nu'кротких':3,\nu'тучков':3,\nu'шевяков':6,\nu'биланчук':2,\nu'микушева':2,\nu'тазиев':2,\nu'кушнарев':10,\nu'ганцев':2,\nu'реутов':7,\nu'караванов':2,\nu'щебетков':2,\nu'полукарова':2,\nu'хачатуров':3,\nu'чукин':2,\nu'пасичник':2,\nu'брусникова':2,\nu'шереметьева':4,\nu'ляхов':15,\nu'чукавина':2,\nu'кит':2,\nu'петрова':151,\nu'тришкина':2,\nu'храмов':23,\nu'палиенко':2,\nu'курносов':11,\nu'емельянцев':3,\nu'тихонова':31,\nu'василюк':5,\nu'кривопустов':2,\nu'кудашева':2,\nu'швецова':12,\nu'некрасов':37,\nu'болдин':2,\nu'шепелёв':3,\nu'дымочка':2,\nu'дружинина':13,\nu'полунин':10,\nu'данилевская':2,\nu'шабаева':3,\nu'бахметьев':7,\nu'зобов':5,\nu'малютин':11,\nu'исрапилов':2,\nu'ефаров':2,\nu'манько':3,\nu'козорез':2,\nu'матросов':7,\nu'ионин':5,\nu'схашок':2,\nu'первушов':2,\nu'лиханов':5,\nu'долженков':2,\nu'стародумов':2,\nu'челяпов':2,\nu'князьков':7,\nu'похожаев':2,\nu'легкий':7,\nu'антонович':3,\nu'денискин':7,\nu'ковалькова':2,\nu'оконечников':2,\nu'шипунов':3,\nu'пивоваров':21,\nu'зацепина':5,\nu'бочарова':4,\nu'пушкарская':2,\nu'кретова':3,\nu'ворожцов':5,\nu'золотовская':3,\nu'перунова':2,\nu'таранов':5,\nu'савичев':4,\nu'зайнуллин':5,\nu'серова':20,\nu'драганов':2,\nu'будко':6,\nu'дикарев':3,\nu'мордвинов':6,\nu'крыгин':3,\nu'качигина':2,\nu'миненков':3,\nu'камалутдинов':2,\nu'шкуров':4,\nu'доржиев':8,\nu'зорькин':5,\nu'солоха':4,\nu'ташкин':2,\nu'рощин':10,\nu'коноплева':5,\nu'салахов':6,\nu'пучкова':3,\nu'тюник':2,\nu'мандрик':2,\nu'варичев':2,\nu'патрахин':2,\nu'огиенко':7,\nu'метов':2,\nu'микеров':4,\nu'ткаченко':84,\nu'хан':10,\nu'казанов':2,\nu'таджиева':2,\nu'алафинов':2,\nu'шарыгина':2,\nu'талаев':3,\nu'колычев':5,\nu'вискова':2,\nu'селивёрстов':3,\nu'величутина':2,\nu'якин':2,\nu'базулько':2,\nu'крюк':2,\nu'пеньков':14,\nu'наполов':3,\nu'посохов':4,\nu'масычев':2,\nu'рогозин':9,\nu'набатов':4,\nu'сеитов':2,\nu'звездина':2,\nu'тарнавский':5,\nu'кобелев':8,\nu'кожурова':2,\nu'фесик':2,\nu'данильчик':2,\nu'инвияев':2,\nu'подымова':2,\nu'смелик':3,\nu'забелин':13,\nu'моряков':4,\nu'меняйло':2,\nu'сивакова':2,\nu'витковский':5,\nu'громенко':3,\nu'агальцов':3,\nu'хрусталева':11,\nu'чечин':5,\nu'лаврентьев':18,\nu'ожиганов':2,\nu'поносов':2,\nu'плюхин':3,\nu'хоменко':23,\nu'охрименко':10,\nu'ковалев':100,\nu'санковский':2,\nu'макашина':2,\nu'галямов':2,\nu'болховитина':2,\nu'малеев':5,\nu'коржевский':2,\nu'эльяс':2,\nu'шалов':3,\nu'солодкий':6,\nu'колмогоров':4,\nu'степкин':3,\nu'латанов':3,\nu'водоватов':2,\nu'камбур':2,\nu'тузова':2,\nu'белоконь':10,\nu'лузина':2,\nu'панчук':3,\nu'осоченко':2,\nu'листопад':3,\nu'житников':3,\nu'пшуков':3,\nu'балов':2,\nu'жданова':37,\nu'коровина':4,\nu'кутяев':2,\nu'хлебова':2,\nu'георгиев':4,\nu'кунцевич':3,\nu'баулина':2,\nu'затолокин':2,\nu'очур':3,\nu'барсуков':21,\nu'ануфриенко':3,\nu'котобан':2,\nu'матущак':2,\nu'цылев':2,\nu'купин':5,\nu'бычков':40,\nu'бровкина':2,\nu'перелыгина':2,\nu'кочкарев':2,\nu'пензина':2,\nu'маренов':3,\nu'королёва':4,\nu'семёнов':17,\nu'кийкова':3,\nu'растегаев':3,\nu'виноградов':68,\nu'ачкасов':3,\nu'дадаев':5,\nu'тереня':2,\nu'чачин':2,\nu'игошева':5,\nu'жерихов':2,\nu'тимченко':14,\nu'алехин':20,\nu'миллер':7,\nu'зимич':3,\nu'колосов':34,\nu'грицай':10,\nu'гвозденко':2,\nu'грицак':5,\nu'пучков':22,\nu'замуруева':2,\nu'кузяева':2,\nu'шукаева':2,\nu'коровченко':4,\nu'савкова':2,\nu'патахов':2,\nu'алалыкин':2,\nu'веселовский':2,\nu'самарин':15,\nu'мясникова':4,\nu'нигматуллин':8,\nu'курохтина':3,\nu'докучаев':5,\nu'ивашин':7,\nu'зарубин':19,\nu'жаромских':2,\nu'тумхаджиев':2,\nu'лешов':2,\nu'аверьянов':16,\nu'купавцев':3,\nu'реснянская':3,\nu'сучков':21,\nu'полухина':2,\nu'матющенко':4,\nu'орешков':2,\nu'затиев':2,\nu'сухин':2,\nu'габдуллин':8,\nu'косякин':3,\nu'тамилина':2,\nu'калоев':2,\nu'кривулец':3,\nu'молостов':3,\nu'гаркуша':8,\nu'черныш':9,\nu'кухарева':2,\nu'черных':44,\nu'соломаха':2,\nu'ширяевская':2,\nu'крестов':2,\nu'крючков':26,\nu'кожевин':4,\nu'горбатенко':10,\nu'кунова':2,\nu'графов':4,\nu'гензе':2,\nu'гагаринов':2,\nu'теленков':2,\nu'скулкин':2,\nu'худаев':3,\nu'ахметьянов':2,\nu'недоступ':3,\nu'шейкин':4,\nu'лучинина':2,\nu'афонасова':2,\nu'горский':6,\nu'григорова':4,\nu'векшин':7,\nu'верховодов':2,\nu'меркушева':3,\nu'хомякова':8,\nu'луговская':6,\nu'лыгин':5,\nu'нимгиров':3,\nu'тутынин':2,\nu'жижин':6,\nu'функ':2,\nu'лапицкая':3,\nu'галатюк':2,\nu'ворновских':2,\nu'дворцова':2,\nu'кузичкин':3,\nu'доля':3,\nu'макин':2,\nu'смольков':2,\nu'франк':5,\nu'андрейченко':6,\nu'костюкевич':4,\nu'шемякин':13,\nu'таранец':2,\nu'ромашков':2,\nu'левин':32,\nu'перель':2,\nu'маринченко':2,\nu'топольник':3,\nu'просин':3,\nu'столбова':2,\nu'искендеров':3,\nu'ярочкин':2,\nu'вотинцева':3,\nu'стегниенко':3,\nu'хамидулин':4,\nu'белоцерковский':4,\nu'бугаенко':8,\nu'кеняйкин':3,\nu'назаров':76,\nu'юрцева':2,\nu'ушанов':2,\nu'ромазанов':2,\nu'занин':7,\nu'мошкович':2,\nu'перевезенцев':3,\nu'журавлев':76,\nu'воскобойников':3,\nu'живаев':9,\nu'бегунов':3,\nu'барканов':4,\nu'топчиев':2,\nu'фаттахов':5,\nu'деркач':18,\nu'лытаев':2,\nu'баланин':2,\nu'донгак':5,\nu'шкильнюк':3,\nu'торопо':2,\nu'слепченко':4,\nu'сутурин':2,\nu'рыкалов':2,\nu'манжула':5,\nu'малашук':2,\nu'хадиков':4,\nu'иванисов':2,\nu'мацаренко':2,\nu'канищев':9,\nu'вавилова':6,\nu'ряписов':2,\nu'ветошкин':6,\nu'окишев':5,\nu'дзись':2,\nu'патапеня':2,\nu'мустафаева':3,\nu'пастернак':2,\nu'абрамов':79,\nu'левачева':2,\nu'левада':2,\nu'бутенко':18,\nu'штрек':2,\nu'левинсон':2,\nu'перелыгин':8,\nu'ананин':2,\nu'джиджавадзе':2,\nu'базаров':10,\nu'димитриев':4,\nu'труш':2,\nu'апишев':2,\nu'клюка':2,\nu'сергаев':2,\nu'емцев':2,\nu'химич':4,\nu'вахлаков':2,\nu'карпинская':3,\nu'камардина':2,\nu'шилова':17,\nu'бураченок':2,\nu'лаврусенко':2,\nu'войтович':3,\nu'голованова':6,\nu'зубарев':31,\nu'шемет':5,\nu'кильян':3,\nu'капкаев':2,\nu'казьмин':10,\nu'бабашова':2,\nu'маковская':3,\nu'кайсин':3,\nu'ерастов':2,\nu'обрезков':2,\nu'корниенко':21,\nu'шайдуко':2,\nu'жаров':10,\nu'волощук':2,\nu'огнева':5,\nu'галсанова':2,\nu'сигаев':2,\nu'брылева':3,\nu'литовский':2,\nu'дядин':2,\nu'шуранов':2,\nu'еременко':36,\nu'дюмин':3,\nu'никонорова':3,\nu'карлин':2,\nu'каракотов':2,\nu'царьков':7,\nu'зелепукин':3,\nu'буймов':2,\nu'аляев':3,\nu'шестериков':2,\nu'квашнин':4,\nu'готовцев':2,\nu'ясько':3,\nu'трухан':2,\nu'леванов':3,\nu'фомичев':16,\nu'кудряшов':32,\nu'леднев':3,\nu'ракша':2,\nu'шулепова':3,\nu'грицкова':3,\nu'катаев':11,\nu'тимощенко':4,\nu'колисниченко':2,\nu'степовиков':2,\nu'викулов':4,\nu'лавренов':5,\nu'муренький':2,\nu'поддубный':13,\nu'чабаненко':5,\nu'тонюк':2,\nu'рогожников':6,\nu'штерн':2,\nu'козловская':13,\nu'данилко':2,\nu'бородай':7,\nu'гринько':7,\nu'серебряков':25,\nu'раицкий':2,\nu'волов':2,\nu'евмененко':2,\nu'бабинцев':2,\nu'малахова':14,\nu'тирских':2,\nu'можарова':3,\nu'мезужок':2,\nu'верховец':2,\nu'меньшаков':2,\nu'леонов':51,\nu'алёшкин':2,\nu'самохина':4,\nu'строгонов':3,\nu'хухарев':2,\nu'дубешко':3,\nu'атапин':2,\nu'елагин':8,\nu'брыкалов':2,\nu'шурыгина':11,\nu'эфендиев':6,\nu'едовин':2,\nu'ишмухаметова':3,\nu'сорокин':101,\nu'давыдов':73,\nu'щедрин':5,\nu'долбилин':2,\nu'левченков':2,\nu'каркусов':2,\nu'кладов':2,\nu'авдошин':5,\nu'лябин':2,\nu'цукур':2,\nu'карлова':2,\nu'точиев':3,\nu'рожко':10,\nu'окунева':5,\nu'андронов':10,\nu'мухутдинов':3,\nu'ильянов':2,\nu'болотов':12,\nu'павлюков':4,\nu'донцова':4,\nu'хватова':3,\nu'тулупова':2,\nu'ртищева':2,\nu'храмова':9,\nu'буриличев':2,\nu'каменская':5,\nu'тараев':3,\nu'палехов':3,\nu'прямов':2,\nu'шварц':4,\nu'черемисин��':3,\nu'винокур':2,\nu'балюра':2,\nu'асанова':3,\nu'давтян':3,\nu'лысюк':5,\nu'добронравов':3,\nu'мильшин':2,\nu'липовской':2,\nu'глеба':3,\nu'гладковский':2,\nu'копорулин':2,\nu'сажин':8,\nu'хадан':2,\nu'заичко':3,\nu'пахоменко':7,\nu'бражников':11,\nu'бадамшина':3,\nu'яблонская':2,\nu'розгон':2,\nu'степанян':4,\nu'люфт':2,\nu'ильюшенко':2,\nu'тагиев':3,\nu'солнцев':7,\nu'шемчук':5,\nu'жбанов':2,\nu'подкопай':2,\nu'кунаков':2,\nu'василец':4,\nu'балашова':17,\nu'винник':13,\nu'скидан':3,\nu'дернова':2,\nu'пермитина':2,\nu'михайловский':15,\nu'грива':2,\nu'вороновский':3,\nu'минин':18,\nu'наминов':2,\nu'галенко':2,\nu'топоров':5,\nu'аминова':4,\nu'мустаев':3,\nu'сойкина':2,\nu'логинов':51,\nu'семьянинов':3,\nu'чудаков':3,\nu'сметанкин':2,\nu'слащев':2,\nu'кинаш':2,\nu'салтанов':5,\nu'тураев':2,\nu'елыкомов':2,\nu'кращенко':2,\nu'головачёв':3,\nu'волчанский':2,\nu'бревнов':2,\nu'родыгина':3,\nu'фатуев':2,\nu'железниченко':2,\nu'тюкавина':2,\nu'лавров':20,\nu'дегтярев':30,\nu'таланцев':3,\nu'уразметов':2,\nu'скурихина':2,\nu'крыжановский':11,\nu'московцева':2,\nu'алаторцева':3,\nu'синченко':3,\nu'шикин':7,\nu'иншаков':4,\nu'ежкова':3,\nu'дергачева':2,\nu'шарапова':9,\nu'филонич':2,\nu'никитаев':3,\nu'бас':3,\nu'никишкин':4,\nu'нечипорук':4,\nu'камалова':4,\nu'лобань':2,\nu'антипьева':2,\nu'дымов':6,\nu'брячак':2,\nu'кулиева':4,\nu'минаев':17,\nu'гулягин':2,\nu'ахмедбеков':2,\nu'погодин':12,\nu'сумарокова':6,\nu'ленский':3,\nu'руденок':3,\nu'хороших':2,\nu'бастраков':2,\nu'кожеко':3,\nu'дудка':4,\nu'силкина':5,\nu'дудко':11,\nu'савиных':7,\nu'имангулов':2,\nu'донецков':2,\nu'галий':2,\nu'жабин':11,\nu'манцуров':2,\nu'мамилов':4,\nu'чебурков':2,\nu'андрус':2,\nu'тарасюк':8,\nu'воронцова':16,\nu'башилов':2,\nu'кононенко':22,\nu'байсултанов':3,\nu'тамбовцев':4,\nu'костылев':15,\nu'гнездилов':6,\nu'торубаров':4,\nu'павлова':113,\nu'калимулин':4,\nu'отке':2,\nu'масленкова':2,\nu'кунгуров':3,\nu'арсенова':2,\nu'дейнек':2,\nu'хуторная':2,\nu'наумчук':2,\nu'чурин':3,\nu'парыгин':2,\nu'седаков':3,\nu'тамаров':3,\nu'белышев':3,\nu'храмченко':2,\nu'землянко':2,\nu'скрынник':4,\nu'бурмистрова':5,\nu'гайдаров':3,\nu'лужина':2,\nu'белоглазов':7,\nu'ивасенко':4,\nu'каневская':4,\nu'строцкий':3,\nu'арасланов':4,\nu'рябцев':8,\nu'таибов':5,\nu'проводин':2,\nu'балахнов':2,\nu'баранникова':3,\nu'благинин':3,\nu'буравцев':3,\nu'кулаков':42,\nu'исаенко':8,\nu'егиян':3,\nu'мордасов':3,\nu'мокляк':2,\nu'жуйко':2,\nu'борзенко':4,\nu'лубенский':3,\nu'аргунов':3,\nu'мурзинцев':2,\nu'шутко':2,\nu'скрыпников':4,\nu'бобылев':12,\nu'азизов':11,\nu'дебелый':2,\nu'дембицкий':3,\nu'самарина':5,\nu'ампелонский':2,\nu'исайкин':6,\nu'павлоградский':2,\nu'каширских':4,\nu'сухачев':2,\nu'чекунова':3,\nu'кропачев':3,\nu'кокурин':3,\nu'черненок':2,\nu'вотинов':2,\nu'губская':4,\nu'ридель':2,\nu'берчук':2,\nu'ерофеева':13,\nu'сойкин':2,\nu'гулакова':2,\nu'янкин':3,\nu'татаринцев':2,\nu'лавренюк':2,\nu'цыдыпова':2,\nu'горевой':2,\nu'гартман':3,\nu'соломатина':4,\nu'дорохов':19,\nu'маликова':7,\nu'савинцева':4,\nu'тимербулатов':3,\nu'евсеев':32,\nu'лесовой':5,\nu'юричева':2,\nu'крупина':7,\nu'равинский':2,\nu'фрадков':3,\nu'сяткина':2,\nu'галанов':7,\nu'русина':5,\nu'корепанова':3,\nu'ерохин':27,\nu'карпюк':3,\nu'кротова':4,\nu'поливанова':3,\nu'первов':2,\nu'мухрыгина':2,\nu'гусев':101,\nu'куринной':2,\nu'боровин':2,\nu'боровик':13,\nu'петропавловский':4,\nu'андрощук':2,\nu'неботов':2,\nu'козулин':4,\nu'колеватых':2,\nu'мусабиров':2,\nu'косолапов':16,\nu'шидловский':6,\nu'марченков':9,\nu'канаева':5,\nu'хмелевская':3,\nu'брынцева':2,\nu'кузьминская':2,\nu'лащук':2,\nu'шаворский':2,\nu'ахмедов':30,\nu'колосков':7,\nu'вуколов':3,\nu'головатый':2,\nu'богомаз':4,\nu'ташкинов':2,\nu'потапова':31,\nu'лоскутников':3,\nu'шушин':2,\nu'шехорина':2,\nu'минашкин':2,\nu'скарлыгина':2,\nu'лапшова':4,\nu'чаусова':2,\nu'треногин':3,\nu'таганов':3,\nu'терновский':4,\nu'багрецов':2,\nu'яндола':2,\nu'коробицын':4,\nu'спиридонова':19,\nu'коровин':23,\nu'рахматуллина':2,\nu'хуако':4,\nu'сусоев':2,\nu'вакулина':3,\nu'яцевич':5,\nu'шавырин':2,\nu'леснов':4,\nu'будажапова':3,\nu'лесной':2,\nu'пороскун':2,\nu'самаркин':3,\nu'панченков':2,\nu'довгань':3,\nu'пискун':13,\nu'дремов':5,\nu'ванюков':4,\nu'игошев':5,\nu'ершова':29,\nu'кривеня':2,\nu'дудник':8,\nu'пироженко':5,\nu'макушенко':4,\nu'чагай':3,\nu'краев':10,\nu'исаевич':2,\nu'сосков':2,\nu'кадакин':2,\nu'корнюшенко':2,\nu'зольников':2,\nu'доспан-самбу':2,\nu'бабаев':10,\nu'гарин':6,\nu'фунтиков':4,\nu'черкасская':2,\nu'дорошин':2,\nu'володин':29,\nu'рыженко':8,\nu'беспалов':33,\nu'фомичёв':2,\nu'масютин':3,\nu'бакулев':3,\nu'комардин':2,\nu'семушин':2,\nu'копейкина':2,\nu'лещанов':2,\nu'губернаторов':2,\nu'стексов':2,\nu'бешенцева':2,\nu'шихов':6,\nu'алейник':4,\nu'залесова':2,\nu'полукеева':2,\nu'антропов':18,\nu'кобяков':7,\nu'тимаков':5,\nu'резанова':3,\nu'корелин':4,\nu'дудукян':2,\nu'сарыев':2,\nu'шароватова':2,\nu'даньшина':2,\nu'дрёмов':2,\nu'еньков':2,\nu'ральников':2,\nu'сергеенко':7,\nu'ухалина':2,\nu'босенко':3,\nu'басов':13,\nu'кузьминова':3,\nu'голощапов':8,\nu'лощаков':2,\nu'сенаторов':3,\nu'скляренко':8,\nu'абдрахимов':2,\nu'сиротенко':3,\nu'алдошин':2,\nu'латышева':7,\nu'зык':6,\nu'копосова':2,\nu'харькова':4,\nu'инюшкин':2,\nu'твердохлебова':5,\nu'щебуняев':2,\nu'бут':6,\nu'зыбин':4,\nu'богданов':85,\nu'юферева':3,\nu'гильманов':7,\nu'кудрявцева':29,\nu'мышковский':2,\nu'турчанинов':2,\nu'пушнякова':2,\nu'рабаданов':6,\nu'ягафаров':3,\nu'охотников':9,\nu'волчек':6,\nu'анпилогов':5,\nu'макаренков':5,\nu'шубин':32,\nu'сазонова':14,\nu'саввин':6,\nu'неделько':4,\nu'климюк':2,\nu'алещенков':2,\nu'курочка':4,\nu'гальцова':4,\nu'фесун':2,\nu'клюшников':3,\nu'шастина':2,\nu'ланкин':2,\nu'дворовенко':3,\nu'малашенко':8,\nu'сулимов':4,\nu'мухаметжанов':2,\nu'ерко':2,\nu'ивахненко':9,\nu'каргапольцев':2,\nu'лиджиев':4,\nu'шаронов':9,\nu'трусов':18,\nu'южанина':2,\nu'бурсиков':3,\nu'бросалин':3,\nu'кастрель':2,\nu'апциаури':2,\nu'андрюшечкина':2,\nu'трескова':2,\nu'чекан':5,\nu'сарафанов':2,\nu'ахадова':2,\nu'авилова':2,\nu'сверчков':10,\nu'конева':14,\nu'сулайманов':2,\nu'абдулов':2,\nu'чивчян':2,\nu'яценко':17,\nu'кобченко':2,\nu'акатьева':2,\nu'борзиков':2,\nu'тарасевич':11,\nu'дзуцев':2,\nu'обухова':11,\nu'горлачева':2,\nu'лахова':3,\nu'абросимова':9,\nu'габелашвили':2,\nu'бабыкин':2,\nu'баданов':3,\nu'шмарков':2,\nu'жихарев':12,\nu'ларченкова':3,\nu'кумиров':2,\nu'лябах':2,\nu'любцов':2,\nu'купчишин':2,\nu'кононец':2,\nu'дьячук':4,\nu'дубин':5,\nu'дубик':2,\nu'витренко':3,\nu'миненок':2,\nu'чепкин':2,\nu'дайнеко':4,\nu'жданько':2,\nu'дегтярь':4,\nu'барсук':2,\nu'патракеев':2,\nu'дементьева':22,\nu'манкевич':2,\nu'цурцумия':3,\nu'пироговская':2,\nu'аникеев':12,\nu'худокормов':2,\nu'спиченко':2,\nu'стерликов':2,\nu'щенников':6,\nu'марченко':65,\nu'жоголева':2,\nu'пушкар':2,\nu'николашин':2,\nu'разинкин':2,\nu'котельникова':2,\nu'куканова':2,\nu'курносенко':2,\nu'рем':2,\nu'фаустова':2,\nu'крук':5,\nu'шуляк':2,\nu'сошина':2,\nu'михайлов':144,\nu'строкова':4,\nu'берхеев':2,\nu'мелентьева':3,\nu'кротиков':2,\nu'зеленев':4,\nu'гребенев':3,\nu'луцишин':2,\nu'вылегжанин':5,\nu'шаманаев':3,\nu'рыбенко':2,\nu'черепухина':2,\nu'костяева':2,\nu'собко':4,\nu'тетерюкова':2,\nu'хмелева':5,\nu'лепехин':2,\nu'гуринович':2,\nu'меремьянина':3,\nu'субботин':26,\nu'чотчаев':2,\nu'стецюк':3,\nu'дравгелис':2,\nu'курьянов':8,\nu'симчук':2,\nu'жуков':89,\nu'лисунов':3,\nu'панков':30,\nu'сивоплясов':2,\nu'лисин':11,\nu'пахомов':41,\nu'губарева':6,\nu'крукович':2,\nu'никандрова':2,\nu'пурис':2,\nu'субочева':3,\nu'оболенский':3,\nu'гирин':4,\nu'гарник':2,\nu'собченко':2,\nu'тажитдинов':3,\nu'савин':44,\nu'кобякова':3,\nu'братусь':2,\nu'рахматуллин':10,\nu'баранников':4,\nu'жилкин':5,\nu'величковский':2,\nu'заманов':2,\nu'травин':7,\nu'архиреев':2,\nu'шаргаев':2,\nu'голодникова':2,\nu'осипенков':2,\nu'фролкин':4,\nu'поречный':2,\nu'есикова':2,\nu'балденков':3,\nu'ляпин':13,\nu'решетняк':14,\nu'шаблий':3,\nu'салтыкова':6,\nu'щепин':8,\nu'старков':18,\nu'шалаева':4,\nu'бутаев':4,\nu'бельских':5,\nu'бабанов':6,\nu'берулава':2,\nu'паршиков':9,\nu'крикунова':2,\nu'климентов':2,\nu'цымбалюк':6,\nu'постнов':8,\nu'яшина':8,\nu'ежков':2,\nu'речкин':3,\nu'неклюдов':7,\nu'коновод':2,\nu'шашин':5,\nu'тимшин':2,\nu'панфёров':2,\nu'сеин':2,\nu'кирилов':4,\nu'федоровская':2,\nu'карташова':9,\nu'ли':10,\nu'горнаев':2,\nu'криворотов':3,\nu'чивилев':3,\nu'степанченко':2,\nu'елизарьев':3,\nu'чернышков':3,\nu'большанин':2,\nu'потылицын':2,\nu'дорохова':3,\nu'сокерин':2,\nu'зибров':4,\nu'опекунов':2,\nu'тимчук':5,\nu'томилов':9,\nu'уткин':25,\nu'гончарук':13,\nu'пахоруков':2,\nu'строганова':4,\nu'хахулин':2,\nu'райхман':2,\nu'алешина':13,\nu'мухетдинов':2,\nu'дмитренко':13,\nu'говорухин':3,\nu'гаева':3,\nu'каунов':2,\nu'малкарова':2,\nu'карева':4,\nu'илюшечкин':3,\nu'яцкий':4,\nu'тювина':2,\nu'толчеев':4,\nu'степнова':3,\nu'бобрышева':3,\nu'белогубов':2,\nu'щепкина':3,\nu'заикин':11,\nu'мигда':2,\nu'булахов':2,\nu'косовский':2,\nu'черепанова':17,\nu'болотников':4,\nu'олефиренко':3,\nu'буденный':2,\nu'белоглазова':8,\nu'рабинович':2,\nu'федорин':4,\nu'копотилов':2,\nu'вернигоров':5,\nu'багаев':7,\nu'родькин':6,\nu'васенко':3,\nu'третьяк':5,\nu'желнин':4,\nu'ярковой':2,\nu'посаженников':4,\nu'феденков':2,\nu'фахрутдинова':3,\nu'тарханова':3,\nu'шишмаков':2,\nu'молодых':5,\nu'конопля':2,\nu'безяева':2,\nu'баев':22,\nu'иванюков':2,\nu'курило':5,\nu'асланян':4,\nu'леонович':5,\nu'мурадов':8,\nu'генерозова':2,\nu'киреева':19,\nu'широкорад':3,\nu'матафонов':2,\nu'сероштанов':5,\nu'лобынцев':3,\nu'сивер':2,\nu'сивец':3,\nu'трунилин':2,\nu'косолапова':4,\nu'мананникова':2,\nu'самодова':2,\nu'шавандина':2,\nu'каленский':4,\nu'сивачев':2,\nu'слободчикова':4,\nu'шарова':9,\nu'горлатых':3,\nu'апраксин':2,\nu'уланов':16,\nu'половцев':2,\nu'сливка':3,\nu'рафеева':2,\nu'лобанов':40,\nu'щекотихин':2,\nu'якимов':23,\nu'бороздина':3,\nu'курда':2,\nu'антоненко':18,\nu'маркелова':2,\nu'стаханов':2,\nu'булавкин':3,\nu'неворотова':2,\nu'савинов':21,\nu'колупаев':8,\nu'бышков':2,\nu'нескоромный':2,\nu'прытков':5,\nu'спиридонов':37,\nu'киселев':106,\nu'лежнин':4,\nu'сандер':3,\nu'евченко':2,\nu'халявин':3,\nu'ванин':8,\nu'носулев':3,\nu'годун':2,\nu'рахманова':2,\nu'храбров':2,\nu'комлева':9,\nu'марьев':3,\nu'ананьин':5,\nu'аристова':2,\nu'оленичев':3,\nu'усова':7,\nu'еремеев':27,\nu'белокуров':3,\nu'захаркин':4,\nu'плюхи��а':2,\nu'нартов':2,\nu'пономарёв':11,\nu'буканов':2,\nu'фефилов':2,\nu'пластун':2,\nu'ищук':5,\nu'дудченко':4,\nu'большунов':2,\nu'целищева':3,\nu'межонов':2,\nu'князев':55,\nu'куликов':79,\nu'фирсанова':3,\nu'шадырко':2,\nu'оздоев':7,\nu'гаврилюк':16,\nu'шилин':8,\nu'пчелинцев':12,\nu'деменев':2,\nu'гарина':2,\nu'илюшин':5,\nu'шелест':4,\nu'абдулганиев':2,\nu'серикова':3,\nu'гриднева':2,\nu'ежиков':3,\nu'гульчук':2,\nu'гераськин':3,\nu'бояров':6,\nu'горяев':2,\nu'эрдыниев':2,\nu'шлыков':11,\nu'шойгу':3,\nu'лященко':7,\nu'русакевич':2,\nu'совгир':2,\nu'будникова':4,\nu'сердюков':19,\nu'бурачек':2,\nu'пашаев':4,\nu'кондрусева':2,\nu'землянский':5,\nu'шевель':2,\nu'деревенец':2,\nu'кукина':3,\nu'лычагин':2,\nu'скрипкин':2,\nu'проскурнин':2,\nu'меренкова':2,\nu'чепелкина':2,\nu'федотюк':2,\nu'хилько':6,\nu'лещик':2,\nu'плеханов':8,\nu'тен':4,\nu'гурьева':10,\nu'севидова':2,\nu'скоркин':3,\nu'негря':2,\nu'пищугин':4,\nu'менин':2,\nu'карлов':8,\nu'галанцева':2,\nu'пахмутов':3,\nu'крымов':6,\nu'никонов':41,\nu'санаев':2,\nu'чесалин':2,\nu'чуева':5,\nu'низов':6,\nu'подсевалов':2,\nu'антипенко':2,\nu'бекетова':3,\nu'келин':2,\nu'москвин':18,\nu'дергунов':8,\nu'кабанцов':2,\nu'крупникова-балашова':2,\nu'прошунин':3,\nu'пирогова':10,\nu'огородников':11,\nu'газдиев':3,\nu'федин':8,\nu'андросова':8,\nu'федик':3,\nu'красносельский':2,\nu'астаев':2,\nu'моисеенко':23,\nu'новичихина':2,\nu'ляхова':7,\nu'лазутин':3,\nu'распопина':2,\nu'ещенко':5,\nu'закатова':2,\nu'сенчуров':3,\nu'ивлиева':2,\nu'чередов':2,\nu'коршикова':4,\nu'курчатов':2,\nu'абакумова':6,\nu'аброськина':2,\nu'голик':4,\nu'ромашов':9,\nu'суднишников':2,\nu'меркурьев':4,\nu'чигаев':2,\nu'пигарев':4,\nu'грунин':3,\nu'велиев':6,\nu'сурхаев':3,\nu'мухина':15,\nu'калиберда':2,\nu'двинских':4,\nu'вашуков':2,\nu'пелевин':7,\nu'асомчик':2,\nu'подъяблонский':2,\nu'гатауллина':2,\nu'вайнер':2,\nu'сайгин':2,\nu'мангушева':2,\nu'шамшадынов':2,\nu'сарваров':2,\nu'филинова':2,\nu'гайдак':2,\nu'гайдай':3,\nu'зленко':3,\nu'белан':7,\nu'лучкин':5,\nu'пожитков':2,\nu'вохмянин':6,\nu'шорников':7,\nu'москвитин':6,\nu'яровая':5,\nu'фокина':21,\nu'целищев':5,\nu'торчинов':2,\nu'баринов':29,\nu'федорец':2,\nu'целых':2,\nu'буслаева':3,\nu'акмалов':2,\nu'божков':7,\nu'скутин':3,\nu'мельченко':3,\nu'глущенко':27,\nu'белогуров':4,\nu'петкевич':4,\nu'каравайцев':3,\nu'конохова':2,\nu'миленин':2,\nu'чегодаев':2,\nu'андрюшин':2,\nu'шкуратова':3,\nu'каплунов':5,\nu'нитченко':2,\nu'сафронова':26,\nu'маковкин':3,\nu'алеханов':2,\nu'будревич':2,\nu'шендо':2,\nu'корецкий':4,\nu'смоленцев':5,\nu'онуфрийчук':3,\nu'акинин':5,\nu'туник':2,\nu'кирилюк':4,\nu'бовт':2,\nu'пляскина':2,\nu'бердинских':3,\nu'щеглова':12,\nu'буданова':6,\nu'горбатюк':6,\nu'щипанов':3,\nu'зеренков':3,\nu'семенихин':12,\nu'голядкина':2,\nu'лебеда':2,\nu'папст':2,\nu'харитоненко':2,\nu'музалева':2,\nu'кузовлева':2,\nu'кокоткин':2,\nu'курзин':2,\nu'большаков':31,\nu'фатхуллин':3,\nu'гагарина':6,\nu'петрушина':2,\nu'щипачева':2,\nu'круглов':30,\nu'пугачев':19,\nu'султанова':5,\nu'бояркина':3,\nu'помещенко':2,\nu'рябченко':8,\nu'хуснетдинов':4,\nu'ушаков':53,\nu'левакова':3,\nu'батанова':3,\nu'тулегенов':2,\nu'богатиков':2,\nu'салюк':3,\nu'никулин':55,\nu'абасова':2,\nu'губенко':6,\nu'елистратов':15,\nu'лунина':4,\nu'стасов':3,\nu'начева':2,\nu'стефаненко':3,\nu'яцуненко':2,\nu'кайдалов':3,\nu'трушко':2,\nu'фурсин':3,\nu'падерин':2,\nu'лаптева':19,\nu'красиков':11,\nu'андреева':84,\nu'иванчик':2,\nu'абдурахма��ов':12,\nu'лютый':4,\nu'филипов':8,\nu'ремезов':2,\nu'гончуков':3,\nu'маликов':14,\nu'тугов':2,\nu'колегов':6,\nu'олейникова':9,\nu'посохова':2,\nu'трацевская':2,\nu'зверева':26,\nu'рымарев':2,\nu'василевский':15,\nu'уюсов':2,\nu'нарыжный':2,\nu'парахневич':3,\nu'халецкий':4,\nu'голуб':26,\nu'матвеевский':2,\nu'кашкаров':3,\nu'ботвин':3,\nu'абрашкина':2,\nu'скрипниченко':4,\nu'чеканова':4,\nu'ходыкин':3,\nu'артюх':4,\nu'лиев':2,\nu'потапович':2,\nu'пикалов':12,\nu'скороход':4,\nu'головнев':2,\nu'маркушин':3,\nu'епишин':2,\nu'боровкова':4,\nu'головцов':2,\nu'рыжиков':4,\nu'верескунов':2,\nu'подгурский':2,\nu'кречин':2,\nu'савкин':8,\nu'тарасенко':38,\nu'баталина':3,\nu'рожин':3,\nu'шпилевой':3,\nu'федонюк':2,\nu'кувшинов':14,\nu'строков':2,\nu'низаметдинова':2,\nu'азаркин':2,\nu'умяров':3,\nu'тесленко':8,\nu'анкудинов':2,\nu'шустов':7,\nu'тишенков':2,\nu'кирпичников':3,\nu'дмитраш':2,\nu'скачко':3,\nu'ставровский':2,\nu'бородуля':4,\nu'староверов':3,\nu'васильчук':5,\nu'важенин':3,\nu'шешина':2,\nu'коробков':12,\nu'бородкин':6,\nu'безбах':2,\nu'корнаухов':4,\nu'самылов':2,\nu'вайнштейн':3,\nu'протопопова':5,\nu'карпеев':2,\nu'горбенко':10,\nu'додин':2,\nu'дубатовка':2,\nu'звонов':5,\nu'евлахов':2,\nu'матус':3,\nu'кузьмицкий':2,\nu'пашковский':6,\nu'малинкин':3,\nu'табачников':2,\nu'емельянов':77,\nu'муджиков':2,\nu'исмаилова':2,\nu'бадалов':2,\nu'харитонова':19,\nu'дерико':2,\nu'белинский':2,\nu'милько':3,\nu'нигматуллина':4,\nu'стафеева':3,\nu'евстифеев':7,\nu'синева':2,\nu'бударина':7,\nu'сосновцев':3,\nu'аутлев':3,\nu'кремер':9,\nu'колпащиков':5,\nu'бабков':6,\nu'сычёв':5,\nu'мамсуров':2,\nu'бакланов':16,\nu'зворыгин':4,\nu'бузанов':2,\nu'белоногова':3,\nu'бодров':12,\nu'ярошевский':2,\nu'губский':5,\nu'чичерин':2,\nu'утюгов':2,\nu'супруненко':2,\nu'филина':8,\nu'начинов':2,\nu'липунова':3,\nu'литвинович':5,\nu'бессонова':4,\nu'суетин':3,\nu'сизоненко':2,\nu'москул':3,\nu'уразмамбетов':2,\nu'маланин':3,\nu'добыш':3,\nu'прокин':2,\nu'борис':2,\nu'луковников':2,\nu'пивненко':3,\nu'поддубная':3,\nu'драгин':2,\nu'мясников':20,\nu'троценко':5,\nu'кузмин':3,\nu'воеводина':4,\nu'андриянова':5,\nu'покровская':4,\nu'долин':3,\nu'хабалова':2,\nu'серицкий':2,\nu'петроченко':8,\nu'большакова':19,\nu'мусалимов':2,\nu'телицын':4,\nu'шаврова':4,\nu'артеева':3,\nu'сенченкова':3,\nu'гетьман':2,\nu'щемелев':5,\nu'сырбу':3,\nu'жуйков':4,\nu'берегов':2,\nu'авзалов':2,\nu'камнева':3,\nu'козел':3,\nu'гордийчук':2,\nu'немиров':4,\nu'талызина':2,\nu'картышов':2,\nu'шерстов':2,\nu'хасаншин':2,\nu'шалашов':4,\nu'педченко':4,\nu'зварич':4,\nu'дубовицкий':9,\nu'николаенко':29,\nu'мачехин':3,\nu'лапушкина':2,\nu'домбровский':5,\nu'чухиль':2,\nu'жарких':4,\nu'макушев':4,\nu'лончаков':3,\nu'проскура':2,\nu'асташин':2,\nu'севостьянов':21,\nu'ивакина':2,\nu'яроцкая':2,\nu'даниелян':2,\nu'холод':9,\nu'горохова':14,\nu'шаульская':2,\nu'солодилов':2,\nu'зенкина':2,\nu'волошина':10,\nu'подковырова':2,\nu'кожуров':2,\nu'апалько':2,\nu'широбокова':2,\nu'кац':4,\nu'покацкий':2,\nu'шестак':6,\nu'филипцов':3,\nu'булынин':3,\nu'бушков':4,\nu'денисов':86,\nu'калинченко':5,\nu'стрыгина':3,\nu'глот':2,\nu'кривонос':4,\nu'сметанников':3,\nu'бахин':3,\nu'черноусов':7,\nu'акбашев':2,\nu'белоусов':72,\nu'харламов':19,\nu'казмерчук':2,\nu'шмаль':3,\nu'губина':8,\nu'кашкин':2,\nu'зиновьев':32,\nu'буря':3,\nu'черкасский':3,\nu'пазий':4,\nu'андрианов':24,\nu'марфин':3,\nu'шкилев':2,\nu'мальков':11,\nu'киршина':2,\nu'куделя':2,\nu'ша��даков':2,\nu'нам':4,\nu'бурдюгов':3,\nu'крикун':3,\nu'абасов':12,\nu'древаль':2,\nu'драгункина':3,\nu'водопьянова':2,\nu'смык':3,\nu'лактионов':6,\nu'кулин':3,\nu'кулик':34,\nu'рожнов':7,\nu'клинцевич':2,\nu'кириллин':2,\nu'козюков':2,\nu'алимов':14,\nu'бикбулатова':3,\nu'каплунова':2,\nu'лютиков':4,\nu'чиненков':2,\nu'чупрова':4,\nu'осадченко':3,\nu'лев':2,\nu'даренкова':2,\nu'ослопов':2,\nu'селин':12,\nu'полухтин':2,\nu'домахина':2,\nu'маташук':2,\nu'саламатина':3,\nu'андрюнин':3,\nu'сухова':10,\nu'лысогорский':3,\nu'сизова':7,\nu'рыжман':2,\nu'курская':2,\nu'казмирук':3,\nu'гребенец':2,\nu'омаров':15,\nu'модин':4,\nu'прилепский':7,\nu'семкин':4,\nu'иванищев':2,\nu'тарануха':2,\nu'куц':18,\nu'кормилицина':2,\nu'лопатников':2,\nu'безруков':19,\nu'волынец':6,\nu'коноплев':14,\nu'буянкин':3,\nu'сапожков':2,\nu'саклаков':2,\nu'корсунов':2,\nu'мареев':5,\nu'закомолдин':2,\nu'коломийцев':2,\nu'гречка':2,\nu'мокроусов':7,\nu'приезжева':2,\nu'мышкина':2,\nu'миньков':4,\nu'шуверов':2,\nu'баканов':10,\nu'будкин':4,\nu'минкин':2,\nu'гарцев':2,\nu'караяков':2,\nu'холманских':2,\nu'чеканов':5,\nu'джалалов':2,\nu'банников':9,\nu'инкин':2,\nu'бетехтин':2,\nu'бебешко':5,\nu'михальченков':2,\nu'савушкин':5,\nu'никонова':14,\nu'брандт':2,\nu'кольцова':5,\nu'винников':10,\nu'чермошенцева':2,\nu'шишкин':53,\nu'бурлуцкая':2,\nu'халиев':2,\nu'лонин':2,\nu'куваев':2,\nu'печёнкин':3,\nu'сбоева':2,\nu'агошков':2,\nu'мащенко':6,\nu'мерзликин':5,\nu'торохов':2,\nu'дремлюга':2,\nu'трошенков':2,\nu'смазнов':3,\nu'вахрина':2,\nu'янгиров':2,\nu'поморов':3,\nu'юрасов':7,\nu'ступников':5,\nu'загоруйко':4,\nu'кочкина':7,\nu'сирота':7,\nu'давлетбаев':2,\nu'фархутдинова':2,\nu'воеводова':2,\nu'мелехина':3,\nu'шамарин':4,\nu'шабалина':7,\nu'агабеков':3,\nu'данилович':2,\nu'кривовяз':3,\nu'фриюк':2,\nu'кабаргин':2,\nu'куранова':2,\nu'буглак':6,\nu'хаметов':3,\nu'савченко':66,\nu'балдашинов':2,\nu'балакирев':5,\nu'матевосян':3,\nu'черноус':4,\nu'шугайло':2,\nu'опенышева':2,\nu'вепрев':2,\nu'выборов':2,\nu'лежепекова':2,\nu'дубовая':3,\nu'шлаев':2,\nu'беланов':2,\nu'чайковская':5,\nu'колегова':2,\nu'швалев':2,\nu'зенков':9,\nu'устюжанин':2,\nu'трафимов':2,\nu'юртаев':4,\nu'ганичев':5,\nu'авсеенко':2,\nu'муромцев':3,\nu'коняшкин':2,\nu'хоробров':2,\nu'цейтина':2,\nu'тугарин':2,\nu'костюнин':5,\nu'данилкина':2,\nu'порошин':6,\nu'загребнев':3,\nu'усачев':15,\nu'плешков':7,\nu'хитров':4,\nu'ольшевский':5,\nu'пупкова':2,\nu'сарвадий':2,\nu'финогенов':5,\nu'крысь':2,\nu'побединский':4,\nu'чарыков':3,\nu'алейникова':2,\nu'крутяков':3,\nu'тимохин':4,\nu'устинова':22,\nu'елсуков':3,\nu'семынин':2,\nu'мошкова':2,\nu'кручинина':5,\nu'глазков':14,\nu'рыжова':17,\nu'шальнев':3,\nu'салихова':4,\nu'агаев':11,\nu'губанов':20,\nu'домничева':3,\nu'шумов':3,\nu'каштанов':6,\nu'политов':6,\nu'золотарева':13,\nu'потанин':6,\nu'руднева':6,\nu'ильтяков':3,\nu'пчельников':2,\nu'мурасов':2,\nu'левичева':2,\nu'вяткина':5,\nu'микулич':3,\nu'карасева':10,\nu'грек':4,\nu'сушкова':4,\nu'киричук':3,\nu'трегубова':8,\nu'сиднева':3,\nu'велиева':2,\nu'солодский':2,\nu'шляпников':6,\nu'лазарчук':4,\nu'пивкин':2,\nu'сангаджиева':2,\nu'налейкин':2,\nu'нестерук':3,\nu'бродская':3,\nu'стрелец':4,\nu'коченкова':2,\nu'манаенков':5,\nu'ивагин':2,\nu'носко':3,\nu'хруцкий':2,\nu'жидких':5,\nu'царева':12,\nu'рывкин':4,\nu'сова':4,\nu'савватеев':5,\nu'милютина':2,\nu'воловенко':2,\nu'шабашова':2,\nu'карманников':2,\nu'ярцев':10,\nu'зубенко':15,\nu'татаренко':15,\nu'топало��':3,\nu'дробин':3,\nu'реуцкий':2,\nu'козадаев':2,\nu'сидоркин':10,\nu'финагин':2,\nu'калиниченко':22,\nu'масякин':2,\nu'гайнетдинов':5,\nu'шклярук':2,\nu'нода':2,\nu'стукалин':2,\nu'климченко':2,\nu'студеникин':5,\nu'плисов':4,\nu'берг':4,\nu'маряхин':2,\nu'кустова':2,\nu'роговая':2,\nu'бобов':2,\nu'колодий':8,\nu'колодин':5,\nu'максимчук':6,\nu'михно':3,\nu'корытцев':2,\nu'плясунов':2,\nu'якимович':6,\nu'кочубей':3,\nu'бекетов':11,\nu'галузина':2,\nu'емелин':11,\nu'оюн':3,\nu'комраков':2,\nu'вавилина':2,\nu'елохин':2,\nu'красюк':4,\nu'слизков':3,\nu'климкина':3,\nu'федоткина':2,\nu'камалтынов':2,\nu'богдашкина':2,\nu'солодуха':2,\nu'махов':14,\nu'асташкин':4,\nu'горюнов':9,\nu'дзасохов':3,\nu'ханиев':2,\nu'селезень':3,\nu'мокеев':6,\nu'киселёва':2,\nu'евдокимов':47,\nu'алехина':7,\nu'жукова':61,\nu'низамутдинов':5,\nu'новичков':9,\nu'довбня':5,\nu'аникеева':6,\nu'халимов':4,\nu'пушкарёв':2,\nu'цапко':6,\nu'пыхтин':4,\nu'дубровин':25,\nu'ананьина':4,\nu'кузьмичева':8,\nu'кожуховский':3,\nu'коржова':2,\nu'баркин':2,\nu'гутова':3,\nu'юдаев':3,\nu'крайнова':7,\nu'усанов':6,\nu'выставкин':2,\nu'нейман':3,\nu'бугаев':13,\nu'кулыгин':3,\nu'соловьев':115,\nu'зорин':32,\nu'файзуллин':10,\nu'репкин':6,\nu'петухов':40,\nu'скутель':2,\nu'валиев':13,\nu'сакаев':4,\nu'трещев':3,\nu'мосунов':2,\nu'тупикин':7,\nu'терлецкая':6,\nu'жарова':9,\nu'анохин':31,\nu'чванова':3,\nu'зарудный':2,\nu'носкова':15,\nu'каракозова':2,\nu'сикорский':4,\nu'мигунов':6,\nu'мазина':2,\nu'корнакова':2,\nu'артюшин':5,\nu'морев':7,\nu'павлухин':3,\nu'рысев':2,\nu'платов':8,\nu'шабунов':2,\nu'евсиков':2,\nu'шалавин':2,\nu'курбатова':9,\nu'цыкунов':5,\nu'николаева':67,\nu'каплий':2,\nu'стряпухин':2,\nu'каплин':7,\nu'ветлугин':2,\nu'нахов':2,\nu'колпаков':23,\nu'замков':2,\nu'кушниренко':5,\nu'мельник':43,\nu'гогунский':2,\nu'соломин':8,\nu'ишмухаметов':5,\nu'корнишин':2,\nu'акишин':3,\nu'мохнаткин':2,\nu'болтовский':2,\nu'карпов':84,\nu'бобриков':6,\nu'сивцов':6,\nu'снеткова':4,\nu'виноходов':4,\nu'неустроева':3,\nu'зенькович':2,\nu'вознесенская':2,\nu'коченков':3,\nu'коков':7,\nu'михалёв':2,\nu'марьенко':2,\nu'ковешников':4,\nu'щипкова':2,\nu'григорян':15,\nu'покачалов':2,\nu'кожанов':6,\nu'шумова':3,\nu'жугин':2,\nu'фоменко':29,\nu'плиев':11,\nu'шавель':2,\nu'юркин':9,\nu'ларшина':3,\nu'хритинин':2,\nu'шостак':4,\nu'полубинский':2,\nu'яушев':2,\nu'мушкудиани':2,\nu'рекало':3,\nu'клец':2,\nu'дейберт':2,\nu'аксарин':2,\nu'ванюшин':5,\nu'мурадян':2,\nu'ярошевич':3,\nu'мошкин':3,\nu'лавренко':3,\nu'турчанов':2,\nu'крамаренко':7,\nu'беловол':2,\nu'бедулин':2,\nu'тишков':6,\nu'костарев':6,\nu'будиева':2,\nu'кошевой':5,\nu'кулибабин':2,\nu'бутусова':3,\nu'селезнев':51,\nu'богатырь':2,\nu'малинин':20,\nu'загребаев':2,\nu'кучерявая':2,\nu'верещагин':13,\nu'метелкин':7,\nu'голубятникова':2,\nu'замалеев':2,\nu'арифуллина':2,\nu'захаров':116,\nu'пешков':29,\nu'сурина':6,\nu'буев':2,\nu'пастушенко':4,\nu'кунафин':2,\nu'черногоров':2,\nu'исмагулов':3,\nu'тамазаев':2,\nu'лящук':3,\nu'лавровский':2,\nu'лагунова':2,\nu'ломовцев':5,\nu'куклин':14,\nu'прохорова':14,\nu'халяпин':5,\nu'тулинов':2,\nu'рудник':3,\nu'брыкова':4,\nu'леушин':4,\nu'шаныгин':2,\nu'золина':3,\nu'богданенко':2,\nu'чиняев':2,\nu'бедняков':3,\nu'чащин':6,\nu'вовченко':4,\nu'сазыкина':2,\nu'самохвалов':12,\nu'рогов':23,\nu'шестерин':2,\nu'ходин':2,\nu'малаев':4,\nu'горячева':9,\nu'гойхман':3,\nu'тесля':6,\nu'кремнев':3,\nu'баляев':2,\nu'баталова':8,\nu'повх':2,\nu'петелин':4,\nu'коротких':16,\nu'машуков':2,\nu'абросимов':14,\nu'долинский':3,\nu'габринович':2,\nu'панкрашкин':2,\nu'умарова':4,\nu'шарко':3,\nu'платицын':2,\nu'этманов':2,\nu'бережная':6,\nu'боровский':6,\nu'голев':5,\nu'хазанов':2,\nu'скорочкина':2,\nu'кнышов':3,\nu'новичкова':4,\nu'павлишин':2,\nu'шмелев':24,\nu'сташенко':2,\nu'большунова':2,\nu'мамонтова':9,\nu'балыкин':3,\nu'лашкова':3,\nu'мушланов':2,\nu'шендрик':3,\nu'дворникова':3,\nu'полонский':6,\nu'новгородова':3,\nu'мантул':2,\nu'мулин':2,\nu'копенкин':2,\nu'салпагаров':5,\nu'троц':2,\nu'казиахмедов':2,\nu'шарагов':2,\nu'зевако':3,\nu'лучинин':2,\nu'барбашина':2,\nu'шишкова':10,\nu'кишко':2,\nu'сумский':2,\nu'кондин':4,\nu'сеченова':3,\nu'аширов':3,\nu'адамов':10,\nu'тютюнов':2,\nu'безумов':2,\nu'малярова':2,\nu'хабибуллин':19,\nu'страшнов':2,\nu'кудрин':11,\nu'лунева':8,\nu'резник':13,\nu'голубь':5,\nu'фирстов':2,\nu'котеев':2,\nu'кусов':6,\nu'кихтенко':2,\nu'марычева':2,\nu'паданов':2,\nu'галина':2,\nu'клешнин':3,\nu'барбашин':3,\nu'дронов':15,\nu'блинкова':3,\nu'зарипова':2,\nu'климухин':3,\nu'мещеряков':33,\nu'снопков':4,\nu'бирюков':36,\nu'жевлакова':2,\nu'лучин':4,\nu'режнов':3,\nu'кобылинский':2,\nu'завгородний':12,\nu'лемешов':2,\nu'литвинцева':2,\nu'добров':4,\nu'рыбаков':30,\nu'авдохин':2,\nu'тушев':2,\nu'терешин':6,\nu'надеин':4,\nu'плющева':2,\nu'делюкин':3,\nu'плаксина':4,\nu'куценко':10,\nu'стрюков':2,\nu'провоторов':4,\nu'петрович':5,\nu'сахнов':6,\nu'фонина':2,\nu'ульянцев':2,\nu'рябоштан':2,\nu'бебенин':3,\nu'кулапин':2,\nu'сахарчук':4,\nu'шарафутдинов':13,\nu'рулев':3,\nu'лужецкий':3,\nu'азнабаев':2,\nu'авдошкина':2,\nu'бакай':2,\nu'шайнога':2,\nu'стоякин':4,\nu'доценко':21,\nu'чуев':7,\nu'садова':3,\nu'хромушина':3,\nu'зеленцова':2,\nu'пак':19,\nu'бобков':16,\nu'сарсенбаев':2,\nu'мирный':2,\nu'мошкина':2,\nu'стельмащук':2,\nu'булдаев':2,\nu'пестриков':3,\nu'титенко':4,\nu'стребков':8,\nu'гайфутдинов':4,\nu'арабов':2,\nu'казачков':3,\nu'липина':2,\nu'кутищев':2,\nu'гутка':2,\nu'авершин':3,\nu'коцина':4,\nu'тепикин':2,\nu'гельфгат':2,\nu'сергиюк':3,\nu'марцинкевич':2,\nu'сулейманова':4,\nu'догаев':4,\nu'локтев':12,\nu'гатамов':2,\nu'бучин':3,\nu'свиридова':16,\nu'стенякина':2,\nu'талызин':2,\nu'трипутин':2,\nu'овчинникова':38,\nu'романец':3,\nu'чеботаев':4,\nu'якубовский':5,\nu'просветов':4,\nu'столяров':32,\nu'седунова':5,\nu'курякова':2,\nu'лелина':2,\nu'цатуров':6,\nu'куршин':2,\nu'белобрыкина':2,\nu'ефремова':24,\nu'самойлов':33,\nu'климушин':2,\nu'ляшкевич':2,\nu'чванов':2,\nu'кирюшина':4,\nu'гареев':6,\nu'манузин':2,\nu'ященко':14,\nu'дробот':4,\nu'артюхов':15,\nu'хапаев':4,\nu'калугина':8,\nu'савченков':3,\nu'морарь':3,\nu'подзарей':2,\nu'жильцова':4,\nu'митирева':2,\nu'рыжков':37,\nu'домнина':3,\nu'кобылянский':4,\nu'стрелкова':12,\nu'волобоева':2,\nu'долганин':3,\nu'ревенчук':2,\nu'анучин':4,\nu'луцай':2,\nu'гигель':2,\nu'гулиев':3,\nu'шереметов':4,\nu'амелина':3,\nu'никончук':2,\nu'копьева':2,\nu'сизиков':2,\nu'ширяева':11,\nu'ермолина':6,\nu'асташкина':2,\nu'головинов':2,\nu'хомутова':4,\nu'дружин':2,\nu'лызин':2,\nu'корольков':10,\nu'горбань':15,\nu'качанова':2,\nu'кучин':10,\nu'ковальская':5,\nu'апшев':3,\nu'нецкин':2,\nu'кочков':2,\nu'крисюк':2,\nu'банная':2,\nu'дронь':2,\nu'сперанская':2,\nu'петрищев':3,\nu'липовская':3,\nu'семичев':5,\nu'залов':2,\nu'горяинова':4,\nu'куприенко':2,\nu'багдасаров':2,\nu'дроздов':42,\nu'скопин':3,\nu'тарабанько':2,\nu'платонов':24,\nu'кащенков':2,\nu'вакулин':3,\nu'форш':2,\nu'поротников':3,\nu'махмудова':2,\nu'насиров':2,\nu'туров':9,\nu'дегтярева':15,\nu'азарова':7,\nu'ахмадов':3,\nu'рогатин':3,\nu'белкова':2,\nu'минина':8,\nu'зундуева':2,\nu'васьков':4,\nu'коростелев':11,\nu'склярова':8,\nu'лахтина':2,\nu'сидельникова':5,\nu'погорельский':4,\nu'измайлова':7,\nu'шошин':7,\nu'зиненко':4,\nu'шатохин':14,\nu'башаев':3,\nu'шукюров':2,\nu'войтова':3,\nu'шустрова':2,\nu'мудрова':3,\nu'щепетильников':2,\nu'гилязова':2,\nu'веретенникова':4,\nu'качалов':7,\nu'пулатов':2,\nu'аландаренко':2,\nu'битнер':2,\nu'ерёмин':9,\nu'айрапетян':3,\nu'гомонов':4,\nu'ватанина':2,\nu'шашкова':9,\nu'орловская':3,\nu'пичугов':2,\nu'макурин':7,\nu'мазилкин':2,\nu'радкевич':6,\nu'жеглова':4,\nu'кучеров':10,\nu'белозор':2,\nu'сень':2,\nu'невоструев':2,\nu'товпик':2,\nu'медведский':2,\nu'петрашов':3,\nu'петрик':9,\nu'петрин':2,\nu'трутнева':4,\nu'тиханин':2,\nu'хлынов':4,\nu'мазаева':2,\nu'плахих':3,\nu'демкина':2,\nu'гайнулина':2,\nu'суханова':20,\nu'шпаков':11,\nu'ласков':2,\nu'сидорук':2,\nu'исмайлов':2,\nu'фомичева':15,\nu'скиба':5,\nu'клинов':3,\nu'болотских':3,\nu'перфилов':3,\nu'фахрутдинов':3,\nu'агибалов':8,\nu'бараненко':2,\nu'буканова':3,\nu'стручков':4,\nu'рыбакин':2,\nu'матвиенко':22,\nu'щеников':2,\nu'кодин':2,\nu'мень':2,\nu'лобков':5,\nu'русинов':12,\nu'зарипов':17,\nu'вышегуров':2,\nu'гришунин':2,\nu'малева':3,\nu'елистратова':4,\nu'дмитриева':71,\nu'егошин':2,\nu'макухин':3,\nu'кожухарь':2,\nu'очередько':3,\nu'губайдуллин':3,\nu'грибанова':6,\nu'головизнина':2,\nu'крохин':2,\nu'самедов':3,\nu'ольков':3,\nu'катков':15,\nu'кузовенков':3,\nu'охотникова':2,\nu'боронин':3,\nu'дюбанов':2,\nu'черноморец':2,\nu'шайдуллов':2,\nu'капишон':2,\nu'житник':2,\nu'наврузова':2,\nu'садыкова':8,\nu'беззубова':2,\nu'кулаев':3,\nu'синопальников':3,\nu'тюленева':2,\nu'арутюнова':2,\nu'осина':2,\nu'уколов':8,\nu'бинько':2,\nu'купченко':3,\nu'долбня':2,\nu'проскурина':4,\nu'брусенин':2,\nu'харланова':2,\nu'макушин':3,\nu'ртищев':4,\nu'солоницына':2,\nu'русяев':4,\nu'спичкин':3,\nu'рогалёв':2,\nu'зибарев':2,\nu'сторчевой':2,\nu'менчиков':2,\nu'левчин':2,\nu'кирьянова':8,\nu'левчик':2,\nu'стаценко':8,\nu'маношкин':2,\nu'никитков':2,\nu'куделин':4,\nu'краморенко':2,\nu'варгатюк':2,\nu'коломин':2,\nu'шалин':2,\nu'лапшина':13,\nu'хамин':2,\nu'палкин':8,\nu'сидорович':4,\nu'хасамутдинов':2,\nu'гукасов':2,\nu'канышев':2,\nu'авдюков':2,\nu'красавин':8,\nu'степанец':2,\nu'довбыш':4,\nu'душкина':3,\nu'кузьменко':41,\nu'пастухов':15,\nu'непочатых':2,\nu'бархатов':6,\nu'синякин':2,\nu'матыцин':8,\nu'гочияев':2,\nu'поташова':2,\nu'якоб':3,\nu'рагимова':2,\nu'вострухин':4,\nu'иманов':6,\nu'капуста':3,\nu'шальнева':3,\nu'коняев':10,\nu'томшаков':2,\nu'санжаров':2,\nu'уварова':11,\nu'сухонос':2,\nu'бабчук':2,\nu'батечко':2,\nu'барабанова':5,\nu'павленков':2,\nu'добрыднев':3,\nu'акбердин':2,\nu'салькова':3,\nu'любавин':3,\nu'колотухин':2,\nu'напалков':4,\nu'макеенко':4,\nu'жогин':3,\nu'чаткин':2,\nu'левичев':5,\nu'кожинов':2,\nu'янин':11,\nu'майер':9,\nu'труфанов':11,\nu'андропов':2,\nu'цыренова':3,\nu'бабиев':2,\nu'будагов':2,\nu'дашкевич':4,\nu'бутырский':2,\nu'шитикова':2,\nu'брик':2,\nu'полуян':4,\nu'кайгородова':2,\nu'хисматуллина':3,\nu'горяшин':2,\nu'задорина':2,\nu'хиль':2,\nu'кривошеина':2,\nu'марюшко':3,\nu'поэта':2,\nu'махновский':4,\nu'ерохина':12,\nu'турусов':2,\nu'биктимеров':2,\nu'агаджанян':3,\nu'турищев':4,\nu'несмеянов':4,\nu'мирзаева':2,\nu'лощинин':3,\nu'костяев':4,\nu'коновалов':83,\nu'шигонов':2,\nu'евтушенко':27,\nu'лякина':2,\nu'жегалов':2,\nu'нестерчук':4,\nu'нагаев':8,\nu'хафизов':13,\nu'чудинова':5,\nu'колесников':93,\nu'вандышева':3,\nu'палаткина':2,\nu'балдин':6,\nu'назранов':4,\nu'чиганов':2,\nu'коломоец':3,\nu'паршков':2,\nu'швалов':2,\nu'рахманов':8,\nu'проничкин':2,\nu'сиваков':8,\nu'бариев':4,\nu'новосад':2,\nu'лепешкин':3,\nu'перов':16,\nu'клишева':2,\nu'лобжанидзе':4,\nu'истомина':8,\nu'москалев':16,\nu'янина':2,\nu'родкин':2,\nu'гатилов':6,\nu'дурандин':2,\nu'звягинцев':16,\nu'тарасенок':3,\nu'грузинов':4,\nu'селютин':4,\nu'малько':4,\nu'хмыров':3,\nu'зелинская':2,\nu'кощенко':2,\nu'ярушина':3,\nu'пудиков':3,\nu'стойко':2,\nu'дик':4,\nu'корепин':5,\nu'мелихов':12,\nu'останин':9,\nu'андрюнина':2,\nu'сергадеев':2,\nu'свинцова':2,\nu'кустарев':2,\nu'онуфриев':2,\nu'драницын':2,\nu'зеленко':6,\nu'конюхов':5,\nu'грабчак':2,\nu'дулькин':2,\nu'лунин':9,\nu'новрузов':4,\nu'панкевич':2,\nu'набока':4,\nu'харлов':3,\nu'бажин':4,\nu'пазынич':2,\nu'рытиков':3,\nu'сенюк':3,\nu'климкин':4,\nu'щетинкин':2,\nu'пасечников':2,\nu'гарибян':2,\nu'баторов':4,\nu'храменков':3,\nu'зюлин':2,\nu'сысенко':2,\nu'ветохин':2,\nu'серых':2,\nu'костюшина':2,\nu'снегирева':7,\nu'беднов':4,\nu'донич':2,\nu'климанов':8,\nu'пилипенко':26,\nu'финько':3,\nu'великородов':2,\nu'гольц':3,\nu'самофалов':3,\nu'засов':3,\nu'панченко':31,\nu'куленков':2,\nu'удачин':3,\nu'касилов':2,\nu'мануилова':4,\nu'линёв':2,\nu'фарафонова':6,\nu'талышев':2,\nu'лазукин':3,\nu'морозов':161,\nu'прошутинская':2,\nu'вертебный':2,\nu'хайрутдинов':7,\nu'порядина':2,\nu'коротов':4,\nu'меликов':6,\nu'пашенцев':3,\nu'лихачев':20,\nu'бутова':2,\nu'питкевич':2,\nu'козловский':23,\nu'набиуллина':3,\nu'богородская':2,\nu'ставила':2,\nu'сапегин':3,\nu'глозман':2,\nu'сичевский':2,\nu'кулова':2,\nu'петрыкин':2,\nu'липатов':15,\nu'фабер':2,\nu'афанасьев':82,\nu'самарский':2,\nu'чернавский':4,\nu'иконников':8,\nu'найденов':11,\nu'манушин':4,\nu'мазепов':3,\nu'салов':10,\nu'арешин':2,\nu'скомороха':2,\nu'шамхалов':7,\nu'крачун':2,\nu'трошин':18,\nu'ланин':8,\nu'калитин':4,\nu'денисевич':3,\nu'сушенцов':2,\nu'кулев':4,\nu'басыров':3,\nu'шамрай':2,\nu'туликов':2,\nu'латышев':12,\nu'санин':2,\nu'котив':2,\nu'львов':13,\nu'машкин':11,\nu'котин':4,\nu'котик':2,\nu'гунченко':3,\nu'ольховик':5,\nu'умеренков':4,\nu'нилова':5,\nu'пучнин':3,\nu'быстров':22,\nu'узлов':2,\nu'гасанов':31,\nu'ковин':5,\nu'никольский':17,\nu'головач':9,\nu'федун':2,\nu'турутин':2,\nu'рыбина':5,\nu'сытник':5,\nu'долбиков':3,\nu'ситов':2,\nu'раткевич':3,\nu'порубай':2,\nu'немыкин':2,\nu'федорищев':3,\nu'бунятов':2,\nu'кузьменок':2,\nu'ганичева':3,\nu'фролов':119,\nu'эйсмонт':2,\nu'дашичев':2,\nu'полубояринов':3,\nu'канивец':3,\nu'алферов':8,\nu'жуковец':2,\nu'бражникова':4,\nu'джумаев':2,\nu'капитонов':8,\nu'заботина':2,\nu'параничев':2,\nu'часовитин':2,\nu'святкин':2,\nu'смола':2,\nu'сывук':2,\nu'петряев':5,\nu'булычев':7,\nu'ипполитов':4,\nu'шатун':2,\nu'булдин':2,\nu'бибяков':2,\nu'никулина':12,\nu'каданцев':3,\nu'парфирьев':3,\nu'бутаков':13,\nu'журавский':3,\nu'михайлова':94,\nu'лаухин':3,\nu'бессмертный':5,\nu'мишукова':4,\nu'раднаев':2,\nu'юрова':6,\nu'шматченко':3,\nu'шахов':15,\nu'дуров':3,\nu'гондаревский':2,\nu'лесов':2,\nu'островская':8,\nu'мироедов':2,\nu'назарьев':5,\nu'луговой':11,\nu'басков':2,\nu'кушнарева':6,\nu'кулибаба':3,\nu'линенко':2,\nu'падалко':2,\nu'владыка':2,\nu'краковский':2,\nu'мурзин':15,\nu'недорезов':3,\nu'егошина':4,\nu'баркалов':3,\nu'вшивков':3,\nu'багрянцев':4,\nu'монако':2,\nu'макарченко':2,\nu'шибалко':2,\nu'мельниченко':19,\nu'степичев':3,\nu'помазкин':3,\nu'тулин':2,\nu'хрипков':3,\nu'савватеева':3,\nu'маньшин':3,\nu'стасюк':9,\nu'синьков':2,\nu'гришанина':2,\nu'гаглоев':5,\nu'куксина':2,\nu'ерёмкин':2,\nu'чудаев':2,\nu'корсков':2,\nu'сырцева':2,\nu'теплов':6,\nu'копытова':3,\nu'иермонахова':2,\nu'таймасханов':6,\nu'шадрин':26,\nu'гарифуллин':7,\nu'казибеков':2,\nu'таратынко':2,\nu'антонова':61,\nu'гончар':14,\nu'худин':2,\nu'беляевская':2,\nu'лепихин':2,\nu'толстиков':9,\nu'кононович':2,\nu'вахруков':2,\nu'лексин':3,\nu'яговкин':2,\nu'мамонов':17,\nu'ишмуратова':2,\nu'сибирева':2,\nu'сеикаев':2,\nu'богатырева':10,\nu'зинчук':4,\nu'потеряев':3,\nu'дергачев':10,\nu'шелехов':2,\nu'макарчук':6,\nu'газимагомедов':2,\nu'иноземцева':3,\nu'сурайкин':2,\nu'сочнева':2,\nu'поротиков':2,\nu'каретников':3,\nu'кобелева':3,\nu'финогенова':4,\nu'васюк':2,\nu'карачаев':3,\nu'левченко':47,\nu'кабакова':3,\nu'гусаров':14,\nu'гайнутдинов':5,\nu'потопальский':2,\nu'дрожжина':3,\nu'демчук':3,\nu'ларичев':4,\nu'судницына':3,\nu'фалалеева':2,\nu'адушкин':3,\nu'драницына':2,\nu'кротенко':3,\nu'аулов':3,\nu'рябко':6,\nu'салеев':3,\nu'кокоев':6,\nu'олешко':3,\nu'куницин':2,\nu'томиленко':4,\nu'клокова':2,\nu'назарчук':3,\nu'тришина':4,\nu'морозкин':6,\nu'тураева':2,\nu'онищук':10,\nu'леонидова':2,\nu'кусинова':2,\nu'ведерникова':7,\nu'сотников':23,\nu'введенская':4,\nu'маршалкин':2,\nu'короткова':19,\nu'жирков':8,\nu'поддуев':2,\nu'садиков':3,\nu'красноперова':4,\nu'адамский':3,\nu'юхно':4,\nu'бублик':5,\nu'мерзлякова':8,\nu'мизгир':2,\nu'дудоров':7,\nu'кузнецова':162,\nu'берков':2,\nu'неборачко':2,\nu'дубровская':5,\nu'касабиев':2,\nu'мазанов':9,\nu'недяк':2,\nu'крапивин':3,\nu'канатов':2,\nu'сидарчук':2,\nu'козубова':2,\nu'зимонин':2,\nu'белавин':2,\nu'оспанов':2,\nu'войнова':4,\nu'сигарева':2,\nu'фунтусов':2,\nu'сенчихин':2,\nu'сатдинова':2,\nu'слепцов':12,\nu'глушакова':2,\nu'числов':2,\nu'ивченко':16,\nu'мутин':3,\nu'постников':17,\nu'зарембо':3,\nu'вахрин':2,\nu'путин':5,\nu'жиделева':2,\nu'чижов':21,\nu'бордовский':2,\nu'теньков':2,\nu'суханкин':2,\nu'халатян':2,\nu'борзов':7,\nu'дорошев':5,\nu'раимов':2,\nu'меер':2,\nu'малазония':2,\nu'субботина':12,\nu'самусенко':6,\nu'дандамаев':2,\nu'фаткуллин':2,\nu'хмиль':2,\nu'устюшенкова':2,\nu'атрощенко':3,\nu'задков':3,\nu'стриженкова':2,\nu'ласточкина':2,\nu'лойко':6,\nu'кара-оол':4,\nu'вожов':2,\nu'кокарев':6,\nu'огарков':5,\nu'шугалей':2,\nu'баглай':4,\nu'шовгеня':2,\nu'промыслова':2,\nu'жовнер':2,\nu'копылова':19,\nu'кудрявцев':58,\nu'купчин':2,\nu'маков':5,\nu'романчук':7,\nu'шалатов':2,\nu'аполлонова':2,\nu'тучин':5,\nu'якушева':7,\nu'сердечный':2,\nu'конушкин':2,\nu'семенчин':3,\nu'железняк':9,\nu'горобчук':2,\nu'чаркин':2,\nu'маркович':5,\nu'рукавишников':9,\nu'ермаков':54,\nu'орус-оол':2,\nu'прилуцкий':3,\nu'шагун':2,\nu'никора':3,\nu'бесштанько':2,\nu'ясенев':2,\nu'тихомирова':31,\nu'гуришкин':2,\nu'чмиль':2,\nu'рязанова':7,\nu'ярмоленко':2,\nu'дьякова':8,\nu'козаев':3,\nu'павленко':48,\nu'пупко':2,\nu'нафиев':2,\nu'коломиец':23,\nu'герцев':3,\nu'яворский':10,\nu'герцен':3,\nu'турта':3,\nu'батаков':2,\nu'дегтев':5,\nu'штатнов':2,\nu'вершинин':29,\nu'аюпов':8,\nu'хабарова':3,\nu'судоргин':5,\nu'лившиц':3,\nu'рыжко':3,\nu'николаевский':2,\nu'манухин':3,\nu'свистельников':2,\nu'прибыш':2,\nu'залилов':2,\nu'колыванова':2,\nu'белокобыльский':6,\nu'гречишников':3,\nu'слуцкий':7,\nu'заварин':2,\nu'лобачев':14,\nu'муркин':2,\nu'молодцова':4,\nu'протасова':3,\nu'разин':11,\nu'соломко':5,\nu'хорват':2,\nu'комков':8,\nu'щеткин':2,\nu'вязников':3,\nu'гутник':4,\nu'чекушина':2,\nu'барыкина':3,\nu'горшенина':2,\nu'хамазин':2,\nu'рябкин':2,\nu'ногин':5,\nu'карпунин':7,\nu'миронова':39,\nu'слинкин':2,\nu'белоцерковская':2,\nu'мотовилов':5,\nu'корякин':11,\nu'малкин':10,\nu'баранов':110,\nu'фесенко':15,\nu'балюк':4,\nu'алуев':2,\nu'запольский':4,\nu'медяный':2,\nu'буханов':3,\nu'государев':2,\nu'михолап':3,\nu'цеков':2,\nu'кобзарев':4,\nu'ханмурзаев':2,\nu'портных':2,\nu'байнов':2,\nu'корнетова':3,\nu'гетманский':3,\nu'цыганенко':2,\nu'деревянко':11,\nu'акиньшин':2,\nu'климачева':2,\nu'марценюк':5,\nu'губанова':8,\nu'зубарева':7,\nu'алейников':8,\nu'ефременков':4,\nu'деменков':4,\nu'ус':3,\nu'климовская':2,\nu'анцупов':2,\nu'кабулова':3,\nu'ташлык':2,\nu'мастерова':2,\nu'хардина':2,\nu'быстрякова':2,\nu'солдатенко':6,\nu'безрученко':3,\nu'дедов':11,\nu'красноперов':5,\nu'жаравин':4,\nu'веремеенко':3,\nu'стефановская':3,\nu'мотов':2,\nu'мирчук':2,\nu'бусоргин':2,\nu'асмыкович':2,\nu'васина':15,\nu'нигматзянов':5,\nu'смагина':4,\nu'мусатова':4,\nu'щёголев':4,\nu'кротов':20,\nu'помигуев':2,\nu'шафиков':2,\nu'хорев':7,\nu'воронова':27,\nu'душина':2,\nu'мотрич':2,\nu'рогожин':12,\nu'юрина':2,\nu'габриелян':2,\nu'старостин':20,\nu'мадеев':2,\nu'брауэр':2,\nu'бахтин':11,\nu'черкунов':3,\nu'зубкова':16,\nu'кривошеева':2,\nu'вавилов':13,\nu'локутов':2,\nu'душков':2,\nu'гордюшов':2,\nu'клепиков':10,\nu'шамаева':5,\nu'варчук':2,\nu'абдулкадыров':2,\nu'лукьяненко':14,\nu'татевосян':2,\nu'вилков':13,\nu'блинников':3,\nu'логвиненко':8,\nu'щекочихин':3,\nu'гахов':2,\nu'остриков':7,\nu'штром':2,\nu'батракова':2,\nu'ходырева':2,\nu'румынина':2,\nu'медведев':113,\nu'шуканов':2,\nu'чумак':15,\nu'потешкин':3,\nu'самарханов':2,\nu'скорикова':3,\nu'сакиркин':2,\nu'дудочкин':2,\nu'гайков':2,\nu'нуждина':4,\nu'яркин':7,\nu'глухов':32,\nu'залогин':2,\nu'диченсков':2,\nu'никифоров':64,\nu'смыслов':4,\nu'шалабаев':2,\nu'якупова':4,\nu'чурсин':10,\nu'болтунов':3,\nu'довганюк':2,\nu'шишмарев':4,\nu'камышников':2,\nu'гладков':17,\nu'русанов':14,\nu'рочев':3,\nu'добровольская':8,\nu'плешкань':2,\nu'дорогонов':2,\nu'земляной':4,\nu'шушков':5,\nu'гольдберг':3,\nu'жаданов':2,\nu'беликова':14,\nu'бреус':7,\nu'ракитин':12,\nu'дурманов':2,\nu'щепетов':2,\nu'коровиков':2,\nu'задворная':2,\nu'крыканов':2,\nu'зезюлин':4,\nu'зданевич':4,\nu'стародубов':5,\nu'лощенов':2,\nu'гусаев':2,\nu'кабаева':3,\nu'бардин':4,\nu'алмакаев':3,\nu'павлюкова':4,\nu'кружков':2,\nu'мосолов':5,\nu'янович':2,\nu'чистилин':2,\nu'орловский':4,\nu'ананченко':2,\nu'погарский':2,\nu'гузун':3,\nu'дегтерев':7,\nu'шатских':4,\nu'баишев':3,\nu'оганов':3,\nu'быкадоров':7,\nu'манилова':2,\nu'фирсов':30,\nu'каретина':2,\nu'якушкина':3,\nu'стененко':2,\nu'аниченков':2,\nu'гриненко':11,\nu'калюта':3,\nu'чибизов':3,\nu'петровский':13,\nu'газин':2,\nu'перепелова':2,\nu'шишин':3,\nu'гаязов':5,\nu'ларьков':2,\nu'шибанов':4,\nu'фахриев':2,\nu'вакар':2,\nu'грудцын':2,\nu'булавкина':2,\nu'семикоз':2,\nu'сульдин':2,\nu'тихненко':2,\nu'нестеркин':2,\nu'берсенев':3,\nu'лыхин':2,\nu'скоробогатько':4,\nu'краузе':2,\nu'перепелкин':4,\nu'антипова':10,\nu'вахрамеев':5,\nu'шабанов':29,\nu'горшкова':13,\nu'хрипунова':2,\nu'полтавец':3,\nu'рузавин':2,\nu'лобиков':2,\nu'чертов':5,\nu'свирко':2,\nu'кошкин':23,\nu'тараненко':15,\nu'костюк':14,\nu'кунин':2,\nu'хижняк':12,\nu'цыбулина':3,\nu'кортунова':2,\nu'дедюхин':2,\nu'фарзалиева':2,\nu'кучинский':7,\nu'курова':3,\nu'ромашин':4,\nu'нимаева':2,\nu'бережнов':5,\nu'кульков':8,\nu'корсаков':8,\nu'рослова':3,\nu'лаврухина':2,\nu'притворов':2,\nu'харланов':2,\nu'гуржий':2,\nu'медведюк':2,\nu'сосов':5,\nu'перепелицын':2,\nu'мандыч':2,\nu'кочнов':4,\nu'талалуев':2,\nu'слободян':4,\nu'мищенков':2,\nu'малеванный':2,\nu'гриднев':9,\nu'достай':2,\nu'цветова':3,\nu'милехина':2,\nu'суслов':36,\nu'шатрова':6,\nu'рубцова':13,\nu'лобода':13,\nu'знаменская':3,\nu'свинарев':3,\nu'вакульчик':2,\nu'кабанцева':2,\nu'чеботаревская':2,\nu'голышев':5,\nu'павлушкин':3,\nu'прядко':5,\nu'хайбуллин':3,\nu'муртазов':2,\nu'манохин':7,\nu'чашникова':2,\nu'лынник':2,\nu'плющ':6,\nu'туровский':3,\nu'резванов':2,\nu'козубенко':2,\nu'ларцева':2,\nu'медовиков':2,\nu'сабурова':6,\nu'кардакова':2,\nu'толстобров':4,\nu'скибо':2,\nu'марьин':6,\nu'кальницкая':2,\nu'гайзатуллин':2,\nu'носов':34,\nu'каграманян':2,\nu'рощупкин':11,\nu'бутина':6,\nu'богданчиков':2,\nu'миляева':4,\nu'шумкова':2,\nu'чибисов':7,\nu'гафиятуллин':2,\nu'крылова':40,\nu'немков':5,\nu'щербатых':6,\nu'ахременко':2,\nu'игонин':5,\nu'соколова':109,\nu'трофименко':14,\nu'антонов':92,\nu'халецкая':2,\nu'кутенков':2,\nu'саков':3,\nu'нагорных':3,\nu'суконкин':3,\nu'карманов':9,\nu'франчук':4,\nu'жигульская':2,\nu'пунтус':3,\nu'кугушева':2,\nu'всеволодова':2,\nu'пузанов':3,\nu'шершакова':2,\nu'фокин':31,\nu'лихошерстов':2,\nu'чуклов':2,\nu'абашев':5,\nu'липовой':4,\nu'мазайкин':3,\nu'галимова':4,\nu'сажина':4,\nu'дронова':7,\nu'кочергин':12,\nu'кулигина':2,\nu'карташов':19,\nu'русакова':11,\nu'иванашкин':2,\nu'поздеев':16,\nu'негода':3,\nu'сухорукова':13,\nu'гусарова':10,\nu'залуцкий':2,\nu'бастрыкин':4,\nu'грекова':5,\nu'анисимов':55,\nu'егоркина':2,\nu'фарзалиев':2,\nu'зуйкова':2,\nu'сауть':2,\nu'ряднов':3,\nu'мутовкина':2,\nu'душутин':3,\nu'дударева':3,\nu'вересов':2,\nu'хилов':3,\nu'галаева':2,\nu'спешилова':2,\nu'желтухин':4,\nu'мезенцев':10,\nu'мирошкин':9,\nu'крупенин':2,\nu'наролин':2,\nu'малюга':3,\nu'миронец':2,\nu'милентьева':2,\nu'ельсуков':2,\nu'лунев':21,\nu'зубаков':2,\nu'кузьмин':115,\nu'пушин':5,\nu'зобнин':6,\nu'недошивина':2,\nu'строганов':6,\nu'курганов':5,\nu'шутова':9,\nu'турапин':4,\nu'кривоносова':2,\nu'орлова':86,\nu'молчанов':42,\nu'хохлачев':3,\nu'колбасов':3,\nu'буравцов':2,\nu'егупов':4,\nu'мишанов':2,\nu'санкин':2,\nu'балаев':10,\nu'горбова':2,\nu'карев':14,\nu'разживин':5,\nu'кудашов':3,\nu'прудникова':9,\nu'табаченко':2,\nu'нестерова':27,\nu'залозный':5,\nu'гридюшко':2,\nu'овсепян':3,\nu'тухтаметов':2,\nu'рагозин':4,\nu'филяев':2,\nu'комзолов':2,\nu'багаутдинов':4,\nu'мокроусова':2,\nu'крупин':13,\nu'безроднов':2,\nu'широкий':4,\nu'землянухин':7,\nu'юшкова':5,\nu'малашенков':2,\nu'двоеглазов':2,\nu'багликов':2,\nu'пеков':4,\nu'бумагина':2,\nu'дубовец':3,\nu'тюлев':2,\nu'макаренкова':3,\nu'бычкова':18,\nu'дульнев':2,\nu'лузянин':4,\nu'якубовская':4,\nu'шабельников':2,\nu'гусов':2,\nu'жданов':44,\nu'томилина':2,\nu'высоченко':2,\nu'моргун':7,\nu'пикулин':3,\nu'кувалдин':8,\nu'неровный':4,\nu'шаповалов':34,\nu'горшунов':4,\nu'вахитова':4,\nu'лопатко':3,\nu'бесова':2,\nu'кебеков':2,\nu'саленко':2,\nu'глухаркин':2,\nu'скоков':11,\nu'скороходов':5,\nu'хомутов':6,\nu'дудров':2,\nu'катунин':3,\nu'трошкин':4,\nu'курилов':10,\nu'астахова':16,\nu'манчук':3,\nu'балакина':2,\nu'кузьминский':2,\nu'селицкий':3,\nu'галянин':2,\nu'зоря':5,\nu'рудая':2,\nu'астафьев':18,\nu'янюк':2,\nu'перевалов':8,\nu'терпугов':2,\nu'ежов':15,\nu'хасиев':4,\nu'шукшин':7,\nu'тропникова':3,\nu'ромашова':3,\nu'бабушкина':7,\nu'игнаткин':3,\nu'воистинов':2,\nu'советов':3,\nu'конаков':3,\nu'клочко':6,\nu'караулов':3,\nu'баринова':18,\nu'бакова':2,\nu'стенин':3,\nu'шахназаров':3,\nu'анищенко':17,\nu'черкашин':16,\nu'сухоручкин':4,\nu'бровкова':2,\nu'акопян':13,\nu'терешкова':5,\nu'кособрюхов':2,\nu'сеничев':3,\nu'ярош':10,\nu'бажан':6,\nu'малютина':4,\nu'костромина':3,\nu'маслаков':4,\nu'гилета':2,\nu'казак':9,\nu'мацюк':3,\nu'волосюк':4,\nu'завгородняя':3,\nu'носарев':4,\nu'коняева':4,\nu'ермилова':7,\nu'болдырев':33,\nu'журова':4,\nu'духанин':3,\nu'кривенков':3,\nu'куркина':10,\nu'гайворонский':6,\nu'вологжина':2,\nu'алябьев':8,\nu'менделеев':2,\nu'шмунк':2,\nu'щур':7,\nu'галсанов':3,\nu'миронович':5,\nu'гаврин':2,\nu'бучина':2,\nu'чуров':2,\nu'ржевский':2,\nu'бунаков':2,\nu'иноценко':2,\nu'хрусталев':7,\nu'абдусаламов':2,\nu'кусакин':2,\nu'свириденко':10,\nu'пилипчик':2,\nu'маранин':2,\nu'саврасов':2,\nu'янов':5,\nu'бердникова':11,\nu'шабаров':4,\nu'лемко':2,\nu'шарапов':22,\nu'дербин':3,\nu'ширяев':25,\nu'билоус':2,\nu'смехов':3,\nu'хайдаров':3,\nu'гирчев':2,\nu'маханько':2,\nu'прокопьева':8,\nu'власов':84,\nu'лисицына':5,\nu'ядыкин':2,\nu'салмин':6,\nu'климонтов':3,\nu'мишина':9,\nu'ницевич':2,\nu'валишин':3,\nu'баженов':22,\nu'захаренко':10,\nu'шеменев':2,\nu'картошкин':2,\nu'угрюмова':2,\nu'ленец':2,\nu'скородумов':4,\nu'камаева':5,\nu'криворот':3,\nu'волнухина':2,\nu'пепеляев':4,\nu'феоктистов':10,\nu'количенко':2,\nu'миргородский':3,\nu'лысаков':2,\nu'казацкий':2,\nu'кобенок':2,\nu'гореликов':2,\nu'герасимов':71,\nu'нистор':2,\nu'сухинина':3,\nu'вдовиченко':9,\nu'ковалюк':3,\nu'бывальцев':2,\nu'заболотный':8,\nu'николенко':9,\nu'синенко':7,\nu'говорова':3,\nu'моталин':2,\nu'парфёнов':2,\nu'осин':8,\nu'лукошков':2,\nu'жуковская':4,\nu'магдеев':2,\nu'шишкина':17,\nu'шубакин':2,\nu'камышов':3,\nu'кобзарь':6,\nu'алексенко':3,\nu'евсикова':3,\nu'кононова':12,\nu'калитина':2,\nu'бедин':2,\nu'буренков':4,\nu'безверхов':2,\nu'ростовщикова':2,\nu'аппакова':2,\nu'богомазова':6,\nu'чекотова':2,\nu'расторгуева':4,\nu'смоляк':4,\nu'дузь':3,\nu'веселова':10,\nu'носиков':2,\nu'скрипников':6,\nu'шуленин':3,\nu'тлисов':2,\nu'садкова':2,\nu'кувалдина':3,\nu'тяпкин':3,\nu'алпеев':3,\nu'ракитина':2,\nu'рогаткин':2,\nu'рудакова':6,\nu'зарецкая':6,\nu'шелег':3,\nu'чеботов':2,\nu'балуев':6,\nu'секацкий':2,\nu'соснина':8,\nu'москалькова':3,\nu'бугай':4,\nu'матюшин':4,\nu'щеблыкин':2,\nu'зятев':2,\nu'скороспелова':2,\nu'рекин':2,\nu'дорошков':3,\nu'абаев':3,\nu'рыбалка':2,\nu'панкин':8,\nu'ворончук':2,\nu'ельцов':6,\nu'сорочкин':2,\nu'старотиторов':2,\nu'раева':3,\nu'бурякова':3,\nu'мокеева':3,\nu'федосенков':2,\nu'маньковский':2,\nu'балакаева':2,\nu'игнатович':7,\nu'карачевцев':3,\nu'парфенова':13,\nu'голов':7,\nu'голод':4,\nu'калыгин':2,\nu'говорухина':2,\nu'ищенко':39,\nu'киреенко':2,\nu'бакеев':2,\nu'бобошко':2,\nu'климов':65,\nu'пономарева':29,\nu'глебова':5,\nu'шклярик':2,\nu'лаврова':12,\nu'подолинский':2,\nu'рудаков':27,\nu'козлюк':2,\nu'шапкин':6,\nu'швейкин':2,\nu'геращенко':7,\nu'суходоев':2,\nu'музалевский':3,\nu'каримов':18,\nu'недилько':2,\nu'рукин':6,\nu'сидельников':19,\nu'калугин':28,\nu'шаромов':3,\nu'журбенко':5,\nu'идрисов':11,\nu'шмырин':2,\nu'куракин':9,\nu'попов':270,\nu'черемных':8,\nu'степулев':2,\nu'лёвкин':3,\nu'сдвижков':2,\nu'лескин':2,\nu'долженкова':3,\nu'мижарев':2,\nu'зеленухин':2,\nu'бабайцев':2,\nu'даржа��':2,\nu'примак':5,\nu'глузман':2,\nu'воскресенсков':2,\nu'каширина':8,\nu'митусов':2,\nu'быченко':3,\nu'окулова':2,\nu'магденко':2,\nu'зубрин':2,\nu'мариничев':3,\nu'хакова':2,\nu'шпак':9,\nu'мугинов':3,\nu'жаркова':9,\nu'машевский':2,\nu'греков':9,\nu'аршинов':2,\nu'танчук':2,\nu'мажирин':2,\nu'ляпкин':2,\nu'макашов':2,\nu'ломакина':16,\nu'гребенщикова':7,\nu'сажнов':3,\nu'маев':2,\nu'батченко':2,\nu'мыслен':2,\nu'пронченко':4,\nu'аксёнов':8,\nu'ишкильдин':2,\nu'лотарева':2,\nu'кострыкин':2,\nu'куршев':2,\nu'великанов':9,\nu'кирилкин':2,\nu'лысых':6,\nu'пигарева':2,\nu'крючкова':12,\nu'ефремов':62,\nu'базыкин':2,\nu'нагибин':4,\nu'шамсутдинова':4,\nu'черницов':2,\nu'горюхов':2,\nu'шейнина':2,\nu'скачкова':7,\nu'саунин':2,\nu'закирова':6,\nu'заярко':2,\nu'коношенко':2,\nu'карнаухов':15,\nu'каракулова':3,\nu'терехин':16,\nu'меркулов':29,\nu'мудров':3,\nu'тарабрин':7,\nu'рюмшин':4,\nu'чернова':29,\nu'беленков':5,\nu'земцова':3,\nu'просвиров':3,\nu'арчаков':5,\nu'гусаков':16,\nu'брагин':20,\nu'извекова':2,\nu'колоколова':2,\nu'навоев':2,\nu'кирнос':2,\nu'тропин':3,\nu'дереза':2,\nu'гришин':68,\nu'щиров':2,\nu'сулименко':2,\nu'немтина':2,\nu'растворцев':2,\nu'кувардин':2,\nu'ивашкин':5,\nu'московцев':3,\nu'коротышев':2,\nu'даргель':2,\nu'прохоренков':4,\nu'солодов':9,\nu'буренин':2,\nu'богатов':6,\nu'сухорученков':2,\nu'усанова':2,\nu'чуйков':6,\nu'парамонова':12,\nu'турбовец':2,\nu'корнилов':21,\nu'кудряшова':23,\nu'недвигин':2,\nu'кель':2,\nu'куликова':38,\nu'загаевский':2,\nu'юдинцева':3,\nu'курчин':2,\nu'качурин':3,\nu'шапиро':3,\nu'цвелев':2,\nu'богапов':3,\nu'куров':6,\nu'георгиевский':2,\nu'чумаков':32,\nu'есин':13,\nu'рязанов':24,\nu'ледовской':3,\nu'кабаков':5,\nu'ваулин':4,\nu'бойцов':12,\nu'ладонкин':3,\nu'барков':8,\nu'малявко':3,\nu'дутова':3,\nu'дулин':4,\nu'кобылин':3,\nu'фофанов':4,\nu'падалка':2,\nu'тремасов':2,\nu'бочаров':32,\nu'грызлов':3,\nu'кочмарев':2,\nu'шушкова':2,\nu'бородулин':11,\nu'волобуев':12,\nu'сопова':3,\nu'костенко':39,\nu'цивилев':3,\nu'станковская':2,\nu'карбаинов':2,\nu'лукашова':3,\nu'андронова':5,\nu'коромыслова':3,\nu'маслякова':2,\nu'сачук':3,\nu'краснюков':3,\nu'молоткова':4,\nu'боярская':3,\nu'чуянов':2,\nu'мацепуро':4,\nu'глазкова':7,\nu'юнусова':4,\nu'довженко':2,\nu'кухта':3,\nu'кухто':2,\nu'валуева':3,\nu'колотов':2,\nu'зыкова':13,\nu'плотник':5,\nu'лобов':15,\nu'илюхин':13,\nu'серебрякова':14,\nu'узденов':2,\nu'ибатуллин':4,\nu'забровский':2,\nu'хонин':3,\nu'кулемин':3,\nu'алещенко':4,\nu'головкин':14,\nu'богук':2,\nu'барабашов':2,\nu'сыч':5,\nu'воробец':4,\nu'буханова':2,\nu'дикунов':2,\nu'немолякин':2,\nu'шибаева':3,\nu'скляров':14,\nu'курочкин':33,\nu'клопова':2,\nu'заболотский':4,\nu'ножкин':3,\nu'сагитов':7,\nu'чебан':4,\nu'курган':6,\nu'купцова':6,\nu'булдакова':2,\nu'хайров':3,\nu'бурыкин':6,\nu'чебаков':2,\nu'фалин':2,\nu'гнутов':3,\nu'ионова':5,\nu'смородинская':2,\nu'писарюк':2,\nu'панюшин':2,\nu'щетников':2,\nu'легков':4,\nu'макарихин':5,\nu'балабаева':2,\nu'гусейнов':19,\nu'шпорт':3,\nu'уралова':2,\nu'кокорева':2,\nu'заварзина':2,\nu'акульшина':2,\nu'терешкина':2,\nu'яганова':2,\nu'сухомлинов':3,\nu'шутько':3,\nu'нефедов':29,\nu'марина':4,\nu'путилов':2,\nu'позняк':3,\nu'хижинский':2,\nu'воробьев':127,\nu'мосеев':4,\nu'текутьев':2,\nu'мнацаканян':2,\nu'тагиров':8,\nu'ивашкина':4,\nu'ковров':5,\nu'малащенко':5,\nu'селезнева':23,\nu'славнова':3,\nu'ромашкина':2,\nu'дрейман':2,\nu'котенков':3,\nu'посадский':2,\nu'таранин':3,\nu'некоз':3,\nu'бабко':5,\nu'тараник':2,\nu'слесарев':7,\nu'мостовщиков':4,\nu'белина':2,\nu'акулич':7,\nu'сидорчук':6,\nu'шараев':2,\nu'корж':8,\nu'трефилов':9,\nu'кунов':2,\nu'телятников':4,\nu'зенцова':2,\nu'кармазин':2,\nu'абузяров':5,\nu'малахов':26,\nu'бочарникова':2,\nu'понизов':2,\nu'чернавин':4,\nu'бельцева':2,\nu'максимцев':2,\nu'колинько':2,\nu'ворона':4,\nu'шаповалова':13,\nu'бенцлер':2,\nu'веснина':6,\nu'доброскок':2,\nu'сайфулин':2,\nu'сухоруков':20,\nu'калашник':11,\nu'муравьёв':2,\nu'чапаев':2,\nu'шабунина':3,\nu'староверова':4,\nu'крицук':2,\nu'собенин':2,\nu'стародубцев':16,\nu'машкина':3,\nu'еремина':16,\nu'петух':3,\nu'клюжев':3,\nu'капцов':3,\nu'либин':2,\nu'сарыглар':2,\nu'петерсон':2,\nu'кумарова':2,\nu'протопопов':9,\nu'лекарев':4,\nu'голубниченко':2,\nu'шошина':3,\nu'горнов':3,\nu'федянин':5,\nu'куркин':8,\nu'локтева':4,\nu'лукманов':2,\nu'резников':14,\nu'скориков':6,\nu'позднякова':18,\nu'аносова':8,\nu'латушкина':2,\nu'рябинина':9,\nu'сокаев':2,\nu'алюшев':2,\nu'бухтояров':5,\nu'фурсенко':2,\nu'хомушку':2,\nu'толмачев':21,\nu'кондрашова':12,\nu'конько':2,\nu'пермяков':22,\nu'гранкин':8,\nu'ултургашев':2,\nu'гарькуша':2,\nu'слепухин':5,\nu'сеньков':6,\nu'мурашко':6,\nu'аткарский':2,\nu'гербер':3,\nu'карабасов':2,\nu'саломатина':3,\nu'беликов':27,\nu'половинко':5,\nu'половинка':2,\nu'гридин':5,\nu'луцков':2,\nu'багрий':3,\nu'кашников':4,\nu'тонкачев':2,\nu'эбзеев':3,\nu'кокленков':2,\nu'лучников':4,\nu'бухарова':2,\nu'опутин':3,\nu'натсак':2,\nu'левицкий':11,\nu'комиссарова':14,\nu'чулкова':2,\nu'путинцев':5,\nu'кошилев':2,\nu'чистова':3,\nu'олькова':3,\nu'воевода':3,\nu'осадчая':8,\nu'лялин':5,\nu'калиш':2,\nu'кирсанов':11,\nu'балыбердина':2,\nu'волотовский':3,\nu'плотников':55,\nu'мухаметкулова':2,\nu'магомедалиев':2,\nu'хамитов':5,\nu'волегов':2,\nu'кострюкова':2,\nu'жмурко':3,\nu'чиненов':2,\nu'тумашов':2,\nu'фирстова':4,\nu'гудзь':2,\nu'мишустин':6,\nu'костюхин':2,\nu'шанцева':2,\nu'мильков':5,\nu'поволоцкий':3,\nu'волошенюк':2,\nu'билецкий':2,\nu'седякин':2,\nu'ненашев':12,\nu'батанов':6,\nu'копасов':2,\nu'зеленов':13,\nu'михалевич':5,\nu'хлопонина':2,\nu'маненков':2,\nu'пискунов':11,\nu'трамбовецкий':2,\nu'букарев':2,\nu'самокрутов':2,\nu'белозеров':13,\nu'асадов':2,\nu'молдованов':4,\nu'сутягин':3,\nu'гаврилкин':3,\nu'натаров':3,\nu'куликовский':5,\nu'белецкий':13,\nu'виноградова':52,\nu'поличев':2,\nu'трибунский':4,\nu'волченков':2,\nu'чурашов':2,\nu'гуминский':3,\nu'ромашев':3,\nu'кузякин':3,\nu'цыбулин':3,\nu'артищев':3,\nu'алдакишкин':2,\nu'савина':22,\nu'демченко':28,\nu'ихсанов':2,\nu'турбин':6,\nu'мелихова':4,\nu'бахарева':9,\nu'рулёв':2,\nu'балык':2,\nu'назимов':2,\nu'кокоткина':2,\nu'шевляков':3,\nu'галицкий':5,\nu'пилевин':2,\nu'северов':4,\nu'кашеваров':4,\nu'ильичева':16,\nu'нистратов':3,\nu'трутнев':5,\nu'лисенков':3,\nu'агеенко':3,\nu'шамина':2,\nu'саламадина':2,\nu'колбунова':2,\nu'сурда':2,\nu'щербак':14,\nu'сгибнев':3,\nu'гараева':2,\nu'баюнов':2,\nu'мирошник':9,\nu'коптева':4,\nu'лалетина':3,\nu'милюкова':2,\nu'гузеева':4,\nu'шевелев':31,\nu'говор':6,\nu'смолев':2,\nu'ямцова':2,\nu'музалев':2,\nu'хощенко':2,\nu'кочанова':2,\nu'ивонин':2,\nu'казарян':7,\nu'куцева':2,\nu'арцыбашев':3,\nu'ярочкина':2,\nu'королькова':18,\nu'карцева':6,\nu'шуховцев':2,\nu'стельмах':6,\nu'садков':9,\nu'москвичев':5,\nu'поликанова':2,\nu'пашкевич':5,\nu'романцов':7,\nu'щерба':10,\nu'буянов':16,\nu'микова':2,\nu'кучма':2,\nu'заремба':3,\nu'куля':2,\nu'рудица':2,\nu'талалаева':2,\nu'шк��рат':2,\nu'вольф':6,\nu'боталова':2,\nu'харитонович':2,\nu'мизгирев':2,\nu'кругликов':5,\nu'батыров':5,\nu'дрига':4,\nu'лопаткин':12,\nu'лудов':2,\nu'траханов':2,\nu'котелкин':3,\nu'качура':4,\nu'патраков':4,\nu'жаворонкова':3,\nu'бобровский':7,\nu'корсунский':3,\nu'шелягин':3,\nu'додов':3,\nu'мартынович':3,\nu'данилов':67,\nu'егорова':90,\nu'чечеткин':2,\nu'пудов':3,\nu'рекунов':3,\nu'щепкин':3,\nu'кумпилов':2,\nu'чаплыгина':9,\nu'ватутин':4,\nu'нагорнова':4,\nu'рахимов':8,\nu'шабельник':2,\nu'деришев':2,\nu'плескач':5,\nu'коппа':3,\nu'чухлов':2,\nu'бурьянов':3,\nu'бровцева':2,\nu'капитонова':5,\nu'бородовицын':2,\nu'оздоева':3,\nu'копылков':2,\nu'дерновой':2,\nu'самогов':2,\nu'окунь':3,\nu'олехнович':2,\nu'лепилов':3,\nu'кривоносов':10,\nu'роговой':4,\nu'крахмалев':2,\nu'шевердин':3,\nu'расулова':3,\nu'госсен':2,\nu'пащенко':17,\nu'бакшеев':3,\nu'шутенко':4,\nu'симанов':3,\nu'клещенко':3,\nu'ступакевич':2,\nu'русин':14,\nu'дубровина':8,\nu'середюк':5,\nu'агапов':22,\nu'школьник':2,\nu'григоренко':15,\nu'варганов':2,\nu'лаврикова':2,\nu'сипягин':3,\nu'ибрашева':2,\nu'холодков':4,\nu'грибков':7,\nu'обрезанов':2,\nu'домогатский':2,\nu'восканян':2,\nu'новосадов':3,\nu'сильнов':3,\nu'шокиров':4,\nu'кочуров':5,\nu'кривулько':2,\nu'ганиева':3,\nu'егорычева':4,\nu'минюрова':2,\nu'брайко':5,\nu'лунькин':2,\nu'косов':19,\nu'пикалев':2,\nu'медянцев':3,\nu'зинин':13,\nu'кондратенкова':2,\nu'прасолов':6,\nu'умнов':4,\nu'шумкин':6,\nu'ручка':2,\nu'берестовой':3,\nu'заец':4,\nu'гавриленко':23,\nu'винокурова':15,\nu'гостева':5,\nu'лютов':4,\nu'майданюк':2,\nu'чуканов':4,\nu'дудоладов':2,\nu'ковалева':57,\nu'пиляк':2,\nu'заболотний':5,\nu'семерня':2,\nu'пенкина':3,\nu'шатилов':7,\nu'ившина':2,\nu'корнякова':2,\nu'захарук':3,\nu'украинский':3,\nu'судавцов':2,\nu'лобанова':28,\nu'казаков':68,\nu'бутин':7,\nu'лебедьков':2,\nu'ольхов':3,\nu'новожилов':12,\nu'гацко':3,\nu'галочкина':2,\nu'закатов':2,\nu'деревцов':5,\nu'плахов':3,\nu'капитанов':4,\nu'соколович':2,\nu'добровольский':11,\nu'сумароков':8,\nu'черновалов':2,\nu'сафиуллин':9,\nu'белых':18,\nu'темникова':2,\nu'царегородцева':2,\nu'шульпин':3,\nu'панкратьев':3,\nu'ожередов':2,\nu'янченков':2,\nu'кретинин':4,\nu'комин':2,\nu'степанцова':2,\nu'кищенко':2,\nu'разумов':9,\nu'осколков':5,\nu'серкин':3,\nu'сираев':2,\nu'задворнов':2,\nu'наливайко':4,\nu'шац':2,\nu'шах':2,\nu'дроздецкий':3,\nu'вяткин':13,\nu'ряховский':3,\nu'баранова':53,\nu'клюстер':2,\nu'семенихина':6,\nu'левитский':2,\nu'булгакова':10,\nu'козырь':4,\nu'жирова':5,\nu'дерябин':8,\nu'кончина':3,\nu'сурова':2,\nu'фатыхов':4,\nu'полетаева':3,\nu'белякова':28,\nu'тунеля':2,\nu'лисица':3,\nu'неудачина':2,\nu'канюка':4,\nu'шмакотина':2,\nu'кучерук':5,\nu'матвеичев':3,\nu'эфендиева':2,\nu'брызгалов':7,\nu'онучин':2,\nu'поливанов':2,\nu'конькова':9,\nu'ожегов':2,\nu'конивец':4,\nu'рябоконь':4,\nu'такун':2,\nu'руденко':59,\nu'юрковский':3,\nu'красавина':6,\nu'акимов':42,\nu'турков':5,\nu'нальгиев':6,\nu'додобаев':2,\nu'гершгорин':2,\nu'булганин':2,\nu'мецгер':2,\nu'быков':69,\nu'мотовых':2,\nu'громыко':9,\nu'овчаренко':22,\nu'панарина':2,\nu'моргоев':2,\nu'радьков':4,\nu'литвинчук':7,\nu'бурых':5,\nu'вершков':2,\nu'цховребова':4,\nu'бардуков':2,\nu'бурнаев':2,\nu'дроздова':18,\nu'богословская':3,\nu'зяблов':3,\nu'шепелин':2,\nu'ламок':2,\nu'покатович':2,\nu'леликова':2,\nu'колобов':11,\nu'должиков':3,\nu'филоненко':14,\nu'оверко':2,\nu'логанов':2,\nu'сарапин':2,\nu'иванович':3,\nu'галат':2,\nu'степченков':3,\nu'карабашев':2,\nu'горяйнов':6,\nu'кочетков':29,\nu'говоркова':3,\nu'фурсова':4,\nu'кузин':38,\nu'цепляева':2,\nu'завалин':3,\nu'кутявин':2,\nu'пятин':3,\nu'кадырова':8,\nu'блохин':22,\nu'новик':10,\nu'самойлова':26,\nu'шамрин':2,\nu'тукаев':3,\nu'гертель':2,\nu'языков':6,\nu'ясинский':8,\nu'ошкин':2,\nu'пучко':2,\nu'крутикова':2,\nu'гапеенко':3,\nu'шлегель':2,\nu'курский':2,\nu'фуженко':2,\nu'образцова':6,\nu'лыков':17,\nu'абозин':2,\nu'ошкина':2,\nu'землякова':2,\nu'шутов':11,\nu'афанасов':6,\nu'еремин':45,\nu'плужников':8,\nu'байханов':3,\nu'косицына':2,\nu'дёмина':2,\nu'черняев':12,\nu'гнатенко':2,\nu'чайников':5,\nu'большухина':2,\nu'магомедова':9,\nu'можейко':3,\nu'ахметов':24,\nu'шекунова':2,\nu'саушин':3,\nu'кива':2,\nu'лопаткина':4,\nu'климашевский':3,\nu'глазунова':10,\nu'насуханов':2,\nu'кривогузов':2,\nu'азбукин':2,\nu'одегов':4,\nu'давлетова':5,\nu'чернышева':18,\nu'бабюк':3,\nu'канцуров':2,\nu'мязин':2,\nu'бартеньев':2,\nu'хахалев':3,\nu'зарецкий':5,\nu'береза':2,\nu'лаврушин':2,\nu'мустафаев':4,\nu'фадеева':21,\nu'саламатов':3,\nu'кущ':3,\nu'провоторова':3,\nu'криулин':4,\nu'степнов':3,\nu'култышев':2,\nu'кантемиров':4,\nu'новоселова':8,\nu'сорин':2,\nu'лисичкин':2,\nu'неделин':3,\nu'пронько':2,\nu'байкин':8,\nu'козочкин':2,\nu'роот':5,\nu'колпиков':3,\nu'чубуков':5,\nu'атаева':2,\nu'капинос':3,\nu'трапезников':12,\nu'роднина':2,\nu'назин':5,\nu'люшин':4,\nu'жихарева':3,\nu'вьюнов':4,\nu'каравашкина':3,\nu'рябишин':3,\nu'евграфова':7,\nu'сташков':2,\nu'проскуряков':8,\nu'куранда':2,\nu'волынкин':3,\nu'гринкевич':3,\nu'лозинская':2,\nu'тимошенко':36,\nu'гурин':15,\nu'нахапетян':2,\nu'рябчинская':2,\nu'вилкова':2,\nu'чураков':7,\nu'клименок':3,\nu'артёмов':2,\nu'нургалиев':3,\nu'косоуров':2,\nu'ядрышников':2,\nu'гутников':3,\nu'осипович':3,\nu'каверин':3,\nu'писной':2,\nu'иголкин':2,\nu'репетуха':2,\nu'клевцова':5,\nu'мамбетов':3,\nu'рыбалкин':5,\nu'карелов':2,\nu'саржан':2,\nu'гимадеев':3,\nu'уманский':5,\nu'хонькин':2,\nu'скворцова':25,\nu'петропавловская':2,\nu'башкатова':2,\nu'волокитин':5,\nu'терешкин':5,\nu'крень':2,\nu'скопинцев':2,\nu'вахрамеева':4,\nu'переслегина':2,\nu'журин':2,\nu'карельский':4,\nu'мызников':3,\nu'чурина':2,\nu'перцев':5,\nu'парыгина':2,\nu'долотин':2,\nu'федорашко':2,\nu'воропаев':16,\nu'хорошев':7,\nu'егерев':2,\nu'калашникова':19,\nu'шигапов':6,\nu'верин':2,\nu'черниговский':2,\nu'шананин':2,\nu'масловский':5,\nu'исмагилов':12,\nu'чернявский':17,\nu'латыпов':15,\nu'качурина':2,\nu'ненадов':2,\nu'любас':2,\nu'сапелкин':4,\nu'садыков':12,\nu'кожаев':4,\nu'прилепов':3,\nu'христенко':8,\nu'макущенко':2,\nu'мандрыкин':4,\nu'буров':25,\nu'пятов':2,\nu'липухин':3,\nu'чайкина':2,\nu'дорожков':2,\nu'царевский':2,\nu'андрейкин':3,\nu'гулый':2,\nu'розанова':5,\nu'колбасова':2,\nu'лапшинов':2,\nu'долгалев':5,\nu'какунин':2,\nu'жигалев':2,\nu'кукаркин':2,\nu'лукьянчик':2,\nu'шерстнева':3,\nu'дерюгин':7,\nu'рау':3,\nu'малышко':4,\nu'очирова':2,\nu'просеков':3,\nu'рекун':2,\nu'бондарев':46,\nu'сбитнев':4,\nu'максаков':4,\nu'кавалеров':2,\nu'ермачков':4,\nu'федорук':3,\nu'печерских':3,\nu'погожев':3,\nu'ломтева':2,\nu'гозман':2,\nu'малышева':30,\nu'козельский':2,\nu'шеховцов':5,\nu'авсанов':2,\nu'четырин':2,\nu'трифонов':28,\nu'галь':2,\nu'бакаева':6,\nu'матюхина':3,\nu'исаев':70,\nu'луганский':3,\nu'удовиченко':4,\nu'полянских':3,\nu'горин':17,\nu'стукалов':4,\nu'грязев':4,\nu'женетль':2,\nu'арисов':2,\nu'новосельцев':13,\nu'берман':2,\nu'бобовников':2,\nu'подгорних':2,\nu'стрекозов':3,\nu'ланцман':3,\nu'фешин':3,\nu'гоголевский':2,\nu'алымов':4,\nu'шарифов':2,\nu'хренов':11,\nu'сильченко':7,\nu'насевич':2,\nu'тимонина':3,\nu'пахомова':22,\nu'шляпцев':2,\nu'погадаев':3,\nu'костюков':11,\nu'гамидов':4,\nu'цибульская':2,\nu'коршак':2,\nu'селиверстова':7,\nu'рубленко':2,\nu'гордеева':19,\nu'абаева':2,\nu'решетов':11,\nu'хорунжий':2,\nu'иванюк':6,\nu'шуйкова':2,\nu'хромцов':3,\nu'стрельников':19,\nu'найданов':4,\nu'банникова':5,\nu'алашеев':2,\nu'остудин':2,\nu'минеев':15,\nu'цуканова':3,\nu'чепиков':4,\nu'аслямов':2,\nu'заварина':3,\nu'бортников':7,\nu'саввиди':2,\nu'тутуков':2,\nu'рябых':8,\nu'костогладов':2,\nu'симон':3,\nu'лямин':10,\nu'шалимов':9,\nu'карайченцев':2,\nu'карпухина':6,\nu'цибизов':3,\nu'буйволов':3,\nu'кабатов':2,\nu'иваненко':21,\nu'литвяков':5,\nu'апанович':5,\nu'бакаев':4,\nu'ульянова':20,\nu'угаров':3,\nu'фоминова':2,\nu'доможаков':2,\nu'орёл':4,\nu'ласточкин':7,\nu'борюшкина':2,\nu'гринева':3,\nu'грабко':3,\nu'галушкина':5,\nu'гелунов':2,\nu'шуман':2,\nu'шайбулатов':2,\nu'филькин':4,\nu'шумай':2,\nu'баракова':2,\nu'горчакова':9,\nu'кобенко':3,\nu'валова':5,\nu'кузовкин':2,\nu'решетник':2,\nu'ломовцева':2,\nu'красняков':2,\nu'феськов':2,\nu'жиров':7,\nu'кобзева':7,\nu'холостова':2,\nu'антропова':7,\nu'баркинхоев':2,\nu'вавилкин':2,\nu'тарусов':3,\nu'богомолова':9,\nu'гайдученко':3,\nu'скубий':2,\nu'мальчихин':2,\nu'авилов':12,\nu'стрюк':2,\nu'дзуцева':2,\nu'пырков':7,\nu'федосов':17,\nu'сухоплюев':2,\nu'кокурина':2,\nu'бякова':2,\nu'дзуццати':2,\nu'гармаш':6,\nu'кострова':5,\nu'буйнова':3,\nu'рейзвих':2,\nu'качалин':3,\nu'юрьев':20,\nu'неугодов':2,\nu'шубина':13,\nu'каменева':4,\nu'тулякова':2,\nu'дзюбенко':4,\nu'самарцев':2,\nu'абабков':2,\nu'плотникова':22,\nu'гонцова':2,\nu'кохно':2,\nu'момотова':3,\nu'кадомцев':2,\nu'заяц':8,\nu'копшев':2,\nu'файзулин':6,\nu'шелухина':3,\nu'чахлов':2,\nu'пирожкова':2,\nu'лузанова':2,\nu'крижановский':2,\nu'язев':2,\nu'кутырева':2,\nu'мороз':37,\nu'фирюлин':2,\nu'баландин':11,\nu'тамарин':2,\nu'чепец':2,\nu'редькина':4,\nu'вялова':3,\nu'коригова':2,\nu'питель':3,\nu'худякова':4,\nu'синицын':20,\nu'мищерин':2,\nu'карпушин':3,\nu'сучилин':4,\nu'шепилов':4,\nu'гайфутдинова':3,\nu'беглова':2,\nu'алов':3,\nu'колодяжная':2,\nu'силич':2,\nu'дорофеева':16,\nu'митрофанов':24,\nu'бутусов':8,\nu'кирбай':2,\nu'демяшкина':2,\nu'ахметханов':2,\nu'колчин':9,\nu'яшкова':2,\nu'грицаенко':3,\nu'давлетшин':6,\nu'изилов':2,\nu'хрипун':2,\nu'рашкевич':2,\nu'железов':3,\nu'бочкарев':18,\nu'сухарников':2,\nu'нелида':2,\nu'филиппенков':2,\nu'фахритдинов':2,\nu'ажинов':2,\nu'божьева':2,\nu'корнейчук':7,\nu'кадиров':2,\nu'казбеков':2,\nu'таций':2,\nu'токарева':17,\nu'шелепов':2,\nu'дубинин':23,\nu'язенцева':2,\nu'брянцева':4,\nu'коршунов':21,\nu'горшенев':2,\nu'шулаев':2,\nu'саакян':13,\nu'ляпунов':4,\nu'бахтурин':3,\nu'маршин':2,\nu'шинкарева':3,\nu'колягин':2,\nu'конюшенко':2,\nu'демчик':2,\nu'абушаева':2,\nu'черепахин':2,\nu'бакулина':3,\nu'зима':3,\nu'сапожникова':9,\nu'водолазов':2,\nu'кусиди':2,\nu'савинков':2,\nu'карпиков':3,\nu'рамазанова':3,\nu'ляльков':2,\nu'платова':3,\nu'ляшко':5,\nu'кишмахов':2,\nu'асатрян':2,\nu'сильвестров':2,\nu'федотов':65,\nu'никитенков':3,\nu'муратова':9,\nu'зимнухова':2,\nu'сахарова':12,\nu'золотова':12,\nu'шолохова':2,\nu'китайгородский':2,\nu'янковская':7,\nu'алтынов':2,\nu'сладкова':2,\nu'пирог':3,\nu'эдгулов':2,\nu'тунденков':2,\nu'байдаров':4,\nu'менщиков':2,\nu'шмаров':2,\nu'соболева':24,\nu'шмулевич':2,\nu'махмутов':7,\nu'батуркин':2,\nu'костицын':3,\nu'огольцов':3,\nu'шефер':4,\nu'клейменов':9,\nu'хомяков':12,\nu'хорошавин':3,\nu'лукиных':2,\nu'ханхалаев':2,\nu'бушин':3,\nu'злыгостева':2,\nu'мансуров':7,\nu'семакова':2,\nu'кашаев':3,\nu'фандеев':2,\nu'владыкина':2,\nu'мисинев':2,\nu'манаенкова':4,\nu'кутуева':2,\nu'шуньков':2,\nu'старченко':6,\nu'габибов':3,\nu'епифанцев':3,\nu'бабешко':2,\nu'мингазов':7,\nu'серганов':2,\nu'карабутов':2,\nu'комиссаренко':2,\nu'паклина':2,\nu'бутырина':2,\nu'серебрянский':6,\nu'ярмухаметов':3,\nu'щетинин':10,\nu'бохан':2,\nu'мишенькин':2,\nu'репка':3,\nu'сидоренко':61,\nu'угольникова':2,\nu'гольдман':3,\nu'беспаликов':2,\nu'минченко':7,\nu'байкалов':4,\nu'калачева':8,\nu'шканов':2,\nu'редькин':15,\nu'шатова':7,\nu'скрипачев':2,\nu'рыльцов':2,\nu'гнедой':2,\nu'гнедов':2,\nu'толочко':3,\nu'кононов':39,\nu'петряшкин':2,\nu'шахбанов':4,\nu'яровой':7,\nu'кехлеров':3,\nu'бердар':2,\nu'гребеньков':4,\nu'кудинова':16,\nu'макаренко':43,\nu'владимирова':12,\nu'ипатов':11,\nu'давлетов':2,\nu'полоцкий':2,\nu'боталов':4,\nu'видякин':4,\nu'красина':2,\nu'горегляд':2,\nu'прокуронов':2,\nu'аркадьев':2,\nu'сапонов':3,\nu'петровичев':3,\nu'малецкий':2,\nu'бердников':15,\nu'никифорова':40,\nu'евплов':2,\nu'карпяк':2,\nu'барвенко':2,\nu'сактоев':2,\nu'дубров':4,\nu'билан':3,\nu'махмудов':10,\nu'зеленова':3,\nu'рукавичников':2,\nu'гоман':4,\nu'дюрягин':3,\nu'мишин':30,\nu'беленьков':2,\nu'вишневецкий':2,\nu'малков':15,\nu'бурлака':3,\nu'вихляев':3,\nu'стадник':9,\nu'северинов':2,\nu'дмитрук':5,\nu'федорович':4,\nu'непомнящий':4,\nu'мосенцев':2,\nu'шамшурин':5,\nu'огрызько':2,\nu'чаниев':2,\nu'лавринович':4,\nu'сарапульцев':2,\nu'залиханов':3,\nu'откидач':3,\nu'салычев':2,\nu'устимов':3,\nu'маштаков':5,\nu'вьюгин':2,\nu'шуваев':6,\nu'шмагин':2,\nu'киктев':3,\nu'пасюк':2,\nu'необутов':2,\nu'сахаров':24,\nu'сибирякова':2,\nu'каракаев':3,\nu'кумаров':2,\nu'докаш':2,\nu'гарбузова':2,\nu'гафаров':5,\nu'касьянова':8,\nu'басак':2,\nu'мацко':8,\nu'дуплякин':3,\nu'вакула':3,\nu'чернов':75,\nu'газаев':3,\nu'шепталов':2,\nu'колокольцев':3,\nu'кудашев':2,\nu'грушин':2,\nu'кушхов':3,\nu'ванеев':5,\nu'тихоненко':4,\nu'горькова':2,\nu'яцеленко':3,\nu'бадьин':2,\nu'земляная':2,\nu'сафин':20,\nu'ахмедзянов':2,\nu'свистельник':2,\nu'полевой':8,\nu'рыжманов':2,\nu'дубков':2,\nu'пшено':3,\nu'серга':3,\nu'торба':3,\nu'кормашов':3,\nu'феофилактов':2,\nu'копанев':5,\nu'добряк':2,\nu'ермолов':7,\nu'щекина':2,\nu'щербина':26,\nu'коряков':4,\nu'горнев':2,\nu'головченко':13,\nu'патрикеев':3,\nu'савостин':6,\nu'юшко':2,\nu'заиграев':3,\nu'саврасова':2,\nu'сунцов':7,\nu'тяпкина':2,\nu'бурматов':3,\nu'пятаков':5,\nu'пичугин':19,\nu'кремененко':2,\nu'мишарин':3,\nu'камалудинов':2,\nu'тузов':7,\nu'сташевская':3,\nu'красилов':5,\nu'шелухин':6,\nu'ромаданов':2,\nu'чернышенко':2,\nu'цымбал':9,\nu'чистоходов':2,\nu'милькин':2,\nu'чепурина':2,\nu'агапитов':3,\nu'андрейчук':4,\nu'зуев':41,\nu'петраков':8,\nu'сивцова':2,\nu'осьмак':2,\nu'косова':3,\nu'соляников':2,\nu'назина':5,\nu'егоренков':2,\nu'баскакова':6,\nu'абдуллин':13,\nu'стройкова':2,\nu'жемчужников':3,\nu'петрикеева':3,\nu'одинаев':2,\nu'иванченко':26,\nu'головчанский':2,\nu'калачев':11,\nu'шатов':5,\nu'силютин':2,\nu'дюк':2,\nu'козюра':4,\nu'швырев':2,\nu'золочевский':2,\nu'хасанов':17,\nu'василькин':2,\nu'доненко':2,\nu'пушина':3,\nu'кужим':2,\nu'оплачко':3,\nu'багрова':5,\nu'аммосов':3,\nu'халикова':2,\nu'самойлюк':3,\nu'торопов':12,\nu'маскаев':2,\nu'загороднов':2,\nu'лебединец':4,\nu'дикий':5,\nu'нефедова':13,\nu'белявский':7,\nu'цыганкова':11,\nu'замараев':3,\nu'урусова':4,\nu'гавага':2,\nu'латушко':2,\nu'шалимова':5,\nu'левкина':2,\nu'козлов':162,\nu'губарев':10,\nu'дерунов':2,\nu'литвишко':2,\nu'самбурский':2,\nu'бардина':5,\nu'шпакова':4,\nu'гавва':2,\nu'сасин':2,\nu'окладникова':2,\nu'рублева':2,\nu'лысков':4,\nu'магомедрасулов':2,\nu'копытько':2,\nu'гальцев':3,\nu'линев':2,\nu'хучиев':2,\nu'шинкевич':4,\nu'пирожков':4,\nu'воротилкин':2,\nu'кирова':2,\nu'братчикова':4,\nu'аниканова':3,\nu'писклов':3,\nu'стрельцова':3,\nu'салимова':2,\nu'гузев':3,\nu'вологдин':4,\nu'печников':2,\nu'толстихин':4,\nu'стяжкин':8,\nu'лобач':4,\nu'логвинов':14,\nu'ложка':2,\nu'абашева':2,\nu'глазырин':5,\nu'чуриков':9,\nu'осипова':29,\nu'ротова':2,\nu'криушкин':2,\nu'васильчиков':2,\nu'любецкая':3,\nu'фахретдинов':3,\nu'кодзоков':2,\nu'мартемьянов':6,\nu'вакутина':2,\nu'кавинов':2,\nu'неверова':2,\nu'ермолаева':18,\nu'сичинава':2,\nu'нижников':2,\nu'синельников':8,\nu'яцков':4,\nu'афзалов':2,\nu'кузькина':2,\nu'клепикова':2,\nu'врублевский':2,\nu'козарез':2,\nu'бугаков':6,\nu'каркач':2,\nu'шабаев':8,\nu'волосатов':4,\nu'вергун':5,\nu'апаев':2,\nu'смолянкин':2,\nu'объедков':2,\nu'воронович':3,\nu'пехов':2,\nu'солоников':2,\nu'ярема':3,\nu'нечаев':36,\nu'меньщиков':6,\nu'петросов':5,\nu'васютин':4,\nu'капленко':2,\nu'бикмухаметов':2,\nu'смольников':8,\nu'синюков':4,\nu'хайбулаев':2,\nu'ряховская':2,\nu'бородачева':2,\nu'асанов':12,\nu'пухова':7,\nu'козовой':2,\nu'юмашева':5,\nu'чернуха':5,\nu'яковенко':35,\nu'дрозденко':3,\nu'дубинина':10,\nu'матвейчук':4,\nu'делимханов':2,\nu'скорняков':3,\nu'фомкин':2,\nu'пятаев':2,\nu'танков':2,\nu'вебер':5,\nu'петерс':3,\nu'кошевая':5,\nu'орлов':126,\nu'снытко':3,\nu'попытаев':2,\nu'любич':2,\nu'батманов':3,\nu'анашкина':4,\nu'гудкова':7,\nu'денисова':38,\nu'стась':2,\nu'пылаев':2,\nu'городкова':2,\nu'шелехова':2,\nu'митянина':4,\nu'аксельрод':2,\nu'страхова':5,\nu'ердакова':2,\nu'мухортова':2,\nu'поникаров':3,\nu'пошивай':2,\nu'шель':2,\nu'казарина':3,\nu'федотенков':2,\nu'ляшова':2,\nu'фурдуй':2,\nu'филипповский':3,\nu'муромский':2,\nu'евлампьев':2,\nu'новосёлов':3,\nu'жидков':15,\nu'мох':2,\nu'комбарова':2,\nu'петинов':3,\nu'гущин':30,\nu'улиско':3,\nu'ужахов':2,\nu'ганжа':5,\nu'чич':2,\nu'манылов':3,\nu'буткова':2,\nu'стешин':3,\nu'макушкина':2,\nu'астанин':5,\nu'осьмуха':2,\nu'меньшиков':11,\nu'силантьев':12,\nu'кобыляцкий':2,\nu'пакулина':2,\nu'бочарников':10,\nu'шутиков':2,\nu'шерин':3,\nu'шевелёв':4,\nu'хабло':2,\nu'таиров':5,\nu'подрезов':2,\nu'абисалов':2,\nu'дворцов':3,\nu'катышева':3,\nu'плетнев':6,\nu'барбосов':2,\nu'шапорев':2,\nu'угрюмов':6,\nu'антюхов':3,\nu'сухарева':5,\nu'санталов':3,\nu'воронцов':41,\nu'кухарчук':2,\nu'ларионова':19,\nu'касатиков':2,\nu'трофимов':72,\nu'батин':3,\nu'атласов':2,\nu'кобылкин':4,\nu'гронский':4,\nu'мухаметов':6,\nu'фетисов':18,\nu'лосева':10,\nu'портянко':3,\nu'свиридченко':2,\nu'кочиев':3,\nu'лифинцев':2,\nu'гвоздиков':2,\nu'шикунов':4,\nu'ясеновский':2,\nu'ермолович':4,\nu'исупов':13,\nu'касаев':3,\nu'крицкий':3,\nu'ливандовский':2,\nu'санникова':3,\nu'федчук':2,\nu'стрыгин':3,\nu'котвицкий':2,\nu'пономарев':86,\nu'федяев':9,\nu'теплоухов':2,\nu'ошурков':2,\nu'селиванова':12,\nu'писаренко':18,\nu'сторожев':6,\nu'бутыркин':2,\nu'груздев':15,\nu'романчев':2,\nu'макар':3,\nu'газарян':4,\nu'деветьяров':2,\nu'захарчук':6,\nu'саталкин':3,\nu'молочный':2,\nu'полушкин':10,\nu'обухов':26,\nu'калянов':3,\nu'солодовникова':3,\nu'камкин':2,\nu'молоков':6,\nu'барковский':3,\nu'ивакин':2,\nu'тригубенко':2,\nu'матишевский':3,\nu'горская':6,\nu'ходакова':4,\nu'сердюченко':2,\nu'семенко':2,\nu'мусифуллин':2,\nu'жигарев':7,\nu'омётов':2,\nu'черняков':6,\nu'ячменева':4,\nu'анисин':3,\nu'гребенюк':10,\nu'луцюк':2,\nu'лебедев':136,\nu'авдонина':4,\nu'масько':3,\nu'алексашкина':2,\nu'небогов':2,\nu'лыкова':3,\nu'гузеев':3,\nu'теплинский':4,\nu'лузанов':2,\nu'цепков':2,\nu'маврин':6,\nu'якубов':4,\nu'лохман':7,\nu'авакумов':2,\nu'семиков':5,\nu'нагорная':5,\nu'сакун':3,\nu'шлома':2,\nu'бологов':3,\nu'шеповалов':2,\nu'мишенкова':2,\nu'верещагина':2,\nu'бабикова':2,\nu'галаев':3,\nu'троянова':3,\nu'каверзин':2,\nu'фортуна':3,\nu'чеботарь':4,\nu'голенко':2,\nu'сочнев':2,\nu'симакин':4,\nu'мукаев':3,\nu'яйли':2,\nu'травнев':2,\nu'панин':33,\nu'паценко':2,\nu'дениско':2,\nu'каменских':7,\nu'городничева':5,\nu'комарицын':2,\nu'лобко':2,\nu'болсуновский':3,\nu'попков':17,\nu'умаров':6,\nu'кораблев':10,\nu'иванушко':2,\nu'ковальчук':42,\nu'нигматулин':3,\nu'лободин':2,\nu'твердохлебов':3,\nu'литвинова':24,\nu'гунбин':2,\nu'богатырёв':3,\nu'конкин':5,\nu'фомиченко':2,\nu'шиханов':2,\nu'гаращенко':5,\nu'зиновьева':17,\nu'щеглеватых':2,\nu'разуменко':2,\nu'амрахов':2,\nu'березина':7,\nu'басалаев':2,\nu'набиева':2,\nu'дворников':7,\nu'коблов':6,\nu'гатауллин':3,\nu'звягинцева':9,\nu'шаваев':2,\nu'градинар':2,\nu'хашиев':2,\nu'иринчеев':3,\nu'ахатов':2,\nu'людвиг':2,\nu'гриценко':21,\nu'баскин':3,\nu'ахмеров':6,\nu'пасечник':13,\nu'шурандин':2,\nu'игнатченко':2,\nu'зуйков':2,\nu'симанков':2,\nu'астафьева':12,\nu'живцов':2,\nu'макаркина':2,\nu'якобчук':2,\nu'колясников':3,\nu'ханов':7,\nu'елина':6,\nu'турчин':4,\nu'кашлев':3,\nu'милентьев':2,\nu'селиванов':23,\nu'стогова':3,\nu'тычина':2,\nu'манаев':4,\nu'темираев':3,\nu'ермакова':34,\nu'южакова':2,\nu'шмыков':5,\nu'куцев':6,\nu'бакаушин':2,\nu'ширинкин':2,\nu'харькин':3,\nu'светличная':5,\nu'пирумов':2,\nu'гаджилов':2,\nu'чичерина':3,\nu'василенкова':2,\nu'терентьев':36,\nu'ивашко':3,\nu'мартьянов':9,\nu'недашковская':2,\nu'лесников':5,\nu'бабкова':2,\nu'плетников':3,\nu'лесик':2,\nu'лесив':2,\nu'калякина':2,\nu'статикова':2,\nu'ганюшкин':2,\nu'александронец':2,\nu'дудников':13,\nu'ольшанский':4,\nu'гулин':11,\nu'гамзатов':7,\nu'борин':2,\nu'копылов':29,\nu'чебанова':2,\nu'мустафина':5,\nu'дроботов':2,\nu'шанин':6,\nu'скобелкин':3,\nu'махова':3,\nu'понкратов':4,\nu'яшенков':2,\nu'комушев':2,\nu'самсоненко':7,\nu'ларионов':37,\nu'улезько':2,\nu'трифанов':3,\nu'перескоков':2,\nu'вавилычев':2,\nu'рейман':2,\nu'выскуб':2,\nu'водопьянов':5,\nu'игнатенков':3,\nu'гречухин':2,\nu'андриевская':5,\nu'бойков':10,\nu'немашкалов':2,\nu'таранова':6,\nu'какаулин':2,\nu'шокин':2,\nu'игошина':2,\nu'зурабов':2,\nu'бродский':3,\nu'чайко':3,\nu'полозова':5,\nu'чайка':13,\nu'пожидаева':2,\nu'василенко':41,\nu'аракелова':2,\nu'копейкин':12,\nu'найденова':2,\nu'финчук':2,\nu'абашкин':3,\nu'вязов':2,\nu'пресняков':5,\nu'чаус':2,\nu'буткевич':2,\nu'архипов':40,\nu'балушкин':2,\nu'громов':53,\nu'ляховецкий':2,\nu'воеводин':8,\nu'силкин':8,\nu'гаврилина':4,\nu'бахтизин':2,\nu'юсов':7,\nu'правдина':2,\nu'федорина':2,\nu'никоноров':13,\nu'рослякова':3,\nu'белозерова':7,\nu'сысуева':2,\nu'сапиев':3,\nu'самотохин':2,\nu'зяблицев':2,\nu'мусиенко':7,\nu'сурин':12,\nu'клемешев':2,\nu'доленко':2,\nu'коцарев':2,\nu'ризенко':2,\nu'васягин':3,\nu'налбандян':2,\nu'саликов':8,\nu'брусов':3,\nu'рапопорт':2,\nu'долганов':4,\nu'силаев':15,\nu'татаркин':4,\nu'середенко':2,\nu'шипилин':2,\nu'канин':3,\nu'оленин':3,\nu'келехсаев':2,\nu'карий':2,\nu'колганова':10,\nu'усачева':8,\nu'соседов':6,\nu'плаксин':15,\nu'ткачук':21,\nu'хливак':2,\nu'густов':2,\nu'справцев':2,\nu'шебзухов':2,\nu'балакшин':2,\nu'политова':2,\nu'сугак':6,\nu'сафиуллина':2,\nu'конов':4,\nu'хохлов':44,\nu'тройнин':3,\nu'болов':3,\nu'чернякова':10,\nu'галочкин':2,\nu'яблонский':4,\nu'абдулин':2,\nu'пасынков':6,\nu'маковецкий':3,\nu'горбатых':3,\nu'мосиенко':3,\nu'сторчак':5,\nu'майстер':2,\nu'трофимович':2,\nu'гуз':2,\nu'богомазов':9,\nu'синдеев':2,\nu'конобеев':2,\nu'гук':7,\nu'антипов':33,\nu'кучиев':3,\nu'потемкин':11,\nu'зеленский':15,\nu'янсон':4,\nu'волосков':10,\nu'качинский':2,\nu'ямолов':2,\nu'манаков':9,\nu'дрюков':3,\nu'яковченко':3,\nu'муромцева':2,\nu'живаева':2,\nu'зудов':3,\nu'буслаев':4,\nu'шитова':8,\nu'сустатов':2,\nu'фенёва':2,\nu'федосцев':2,\nu'позняков':2,\nu'алибаев':2,\nu'чекарев':2,\nu'ульянов':33,\nu'корешкова':2,\nu'шашкина':2,\nu'дечкин':2,\nu'лопарев':3,\nu'игнатенко':26,\nu'трефилова':2,\nu'вахрушева':5,\nu'малявин':3,\nu'пекарев':2,\nu'филиппе':2,\nu'залетин':2,\nu'шеенко':2,\nu'денисенко':27,\nu'юринов':2,\nu'загорулько':8,\nu'ганина':5,\nu'успенский':4,\nu'раджабов':7,\nu'ермолова':3,\nu'лесина':2,\nu'удинцев':2,\nu'мышинская':2,\nu'вотяков':6,\nu'юрьева':5,\nu'ролдугин':3,\nu'берёзкин':2,\nu'бузенков':2,\nu'матяш':2,\nu'укладов':2,\nu'волкова':75,\nu'смолин':16,\nu'смолий':4,\nu'суховерхова':3,\nu'опалев':2,\nu'фадина':3,\nu'тронина':2,\nu'ихо':2,\nu'шалагинов':4,\nu'земский':3,\nu'скрылева':2,\nu'стрижов':4,\nu'ракчеев':2,\nu'пинаевский':2,\nu'шарипов':21,\nu'песецкая':2,\nu'бабкин':29,\nu'чернобаев':6,\nu'прошина':4,\nu'латипов':3,\nu'холопов':5,\nu'первушин':9,\nu'чоп':3,\nu'портнова':4,\nu'гуляков':3,\nu'меремьянин':3,\nu'гафарова':3,\nu'велесевич':2,\nu'тагирова':3,\nu'надысев':3,\nu'храмцов':10,\nu'гудз':2,\nu'белков':8,\nu'чистюхин':2,\nu'киреенков':2,\nu'саковский':3,\nu'бабаян':3,\nu'рыгалов':2,\nu'дышеков':3,\nu'селина':4,\nu'юхименко':2,\nu'бугакова':2,\nu'башкова':3,\nu'дрога':2,\nu'барсамов':2,\nu'шкарупа':6,\nu'гарбуз':4,\nu'коростылев':2,\nu'жуланов':3,\nu'ефанов':14,\nu'модина':2,\nu'кочарян':3,\nu'байкова':7,\nu'науменков':2,\nu'кайда':2,\nu'белобров':2,\nu'лапина':14,\nu'скиданов':4,\nu'гранин':2,\nu'парфентьев':2,\nu'молчанова':11,\nu'цапов':2,\nu'сырых':2,\nu'шафигулина':2,\nu'кунгурцев':4,\nu'скугарев':4,\nu'матюнин':5,\nu'кубарева':3,\nu'чуксеев':2,\nu'голубченко':2,\nu'сологуб':6,\nu'лайшев':3,\nu'яцкова':2,\nu'сизов':20,\nu'замалтдинов':2,\nu'грачев':34,\nu'есарева':2,\nu'гуторова':3,\nu'кадина':2,\nu'птиченко':2,\nu'пинчук':15,\nu'дахно':2,\nu'шиянова':2,\nu'гречушкин':3,\nu'козачек':3,\nu'прусов':3,\nu'кривченкова':2,\nu'наговицина':2,\nu'яблоков':9,\nu'вагин':10,\nu'шанько':2,\nu'безверхий':2,\nu'бучман':2,\nu'папикян':2,\nu'толстопятов':3,\nu'пузанова':5,\nu'смертин':4,\nu'плетнёв':2,\nu'шерман':2,\nu'клементьев':14,\nu'трофимец':4,\nu'потемин':3,\nu'сущенко':6,\nu'шабардин':3,\nu'косарева':10,\nu'шершнев':8,\nu'ширин':7,\nu'кисилев':8,\nu'кулов':3,\nu'стародубова':2,\nu'майхиев':2,\nu'бекиров':2,\nu'терновская':2,\nu'балахтин':2,\nu'зеленков':3,\nu'игнатов':32,\nu'князькова':3,\nu'муругов':2,\nu'аверкиев':2,\nu'лукьянченко':2,\nu'гиберт':2,\nu'росляков':6,\nu'юрицын':2,\nu'уланкин':2,\nu'щегольков':3,\nu'персиянцев':3,\nu'пундель':2,\nu'долгих':35,\nu'костылева':6,\nu'голенков':2,\nu'преснякова':2,\nu'бульба':4,\nu'маслюк':3,\nu'курышов':2,\nu'темиров':4,\nu'баган':2,\nu'соловей':12,\nu'меркушов':3,\nu'майорова':16,\nu'лазев':2,\nu'пивцаева':2,\nu'бирюлин':7,\nu'пупонин':2,\nu'щепилов':2,\nu'тараскин':2,\nu'галимуллин':2,\nu'савостиков':2,\nu'биндасов':2,\nu'гурина':2,\nu'братчук':2,\nu'кожушный':2,\nu'дунаева':13,\nu'кабак':3,\nu'евграфов':8,\nu'софина':2,\nu'солопова':3,\nu'авчинников':2,\nu'шайхутдинов':7,\nu'мальгин':5,\nu'мурыгина':2,\nu'стафеев':4,\nu'савельев':63,\nu'кулишов':6,\nu'бельков':5,\nu'гнедых':2,\nu'погорелый':3,\nu'кашпур':2,\nu'вайцехович':2,\nu'захарко':2,\nu'анохина':7,\nu'конищев':2,\nu'фатеева':10,\nu'чеховская':3,\nu'шахматов':2,\nu'кондаков':13,\nu'сердцев':2,\nu'атанов':9,\nu'летов':3,\nu'сойко':3,\nu'морева':4,\nu'садовая':3,\nu'лосев':21,\nu'шулик':2,\nu'уфимцев':6,\nu'касаткин':15,\nu'пузыревский':2,\nu'дундуков':2,\nu'бармашев':2,\nu'кадомцева':2,\nu'серегин':16,\nu'сукачев':3,\nu'трипольский':2,\nu'гололобова':3,\nu'веселов':30,\nu'немченко':6,\nu'арбузова':2,\nu'есипова':4,\nu'веприков':3,\nu'тимлина':2,\nu'бурганова':2,\nu'курчев':2,\nu'цалко':2,\nu'бауэр':7,\nu'корда':2,\nu'сливина':2,\nu'печенкин':7,\nu'линьков':4,\nu'атаманенко':3,\nu'загидулин':2,\nu'волынская':2,\nu'браун':4,\nu'нехаев':8,\nu'лесникова':5,\nu'сивов':4,\nu'денисенков':4,\nu'сенин':9,\nu'семяшкин':2,\nu'сиротинин':2,\nu'воронин':52,\nu'офицеров':4,\nu'савкина':7,\nu'митрик':2,\nu'белхороев':4,\nu'абдуразаков':3,\nu'евсюкова':11,\nu'гримов':2,\nu'булеева':2,\nu'нуркенов':2,\nu'якубо':2,\nu'якуба':4,\nu'шепель':5,\nu'рябцева':7,\nu'шалаев':18,\nu'кайль':2,\nu'акатов':2,\nu'рачков':6,\nu'понкратьев':2,\nu'швец':20,\nu'швех':2,\nu'неудахин':5,\nu'шинкарь':2,\nu'хохряков':9,\nu'доставалов':2,\nu'пулин':3,\nu'чукалкин':2,\nu'каприн':2,\nu'шеломенцев':2,\nu'старостенко':8,\nu'яновский':6,\nu'куприн':7,\nu'клюева':9,\nu'любченко':9,\nu'лавриненко':15,\nu'полещук':12,\nu'овечкина':5,\nu'высотченко':2,\nu'казарин':10,\nu'денисюк':8,\nu'гушанов':2,\nu'рыков':13,\nu'токаренко':4,\nu'павловская':5,\nu'буевич':2,\nu'даудов':5,\nu'борычев':2,\nu'жолоб':2,\nu'каминский':14,\nu'окуджава':2,\nu'боровская':4,\nu'конашенков':2,\nu'дьяков':23,\nu'топорова':2,\nu'вирясов':2,\nu'афанасенков':5,\nu'левковская':2,\nu'маляренко':2,\nu'белокопытов':5,\nu'палий':5,\nu'руф':2,\nu'сушков':15,\nu'епихина':2,\nu'демидова':25,\nu'хусаинов':16,\nu'лисицин':3,\nu'битков':2,\nu'кораблева':4,\nu'федоренко':34,\nu'михайлина':4,\nu'евстафьева':2,\nu'чубарь':3,\nu'бикбулатов':3,\nu'райский':2,\nu'неволин':2,\nu'якушенко':4,\nu'бажутов':3,\nu'урманов':3,\nu'джабраилов':5,\nu'мирзоян':4,\nu'гарасевич':2,\nu'ваулина':3,\nu'чадаев':4,\nu'светлаков':5,\nu'илларионов':11,\nu'поплевко':2,\nu'долуда':2,\nu'шигирданов':2,\nu'юр':2,\nu'гришкин':3,\nu'семегук':2,\nu'чаплин':5,\nu'хрущев':4,\nu'носик':2,\nu'строгий':2,\nu'циферов':2,\nu'веденеева':2,\nu'хорольский':2,\nu'матико':2,\nu'петюшик':2,\nu'гольман':2,\nu'шагаров':3,\nu'кулабухов':4,\nu'кравцов':38,\nu'теплова':6,\nu'батрак':2,\nu'гриппа':2,\nu'брант':2,\nu'ильиных':13,\nu'добрынин':11,\nu'фисенко':18,\nu'таркин':2,\nu'грубов':2,\nu'егоркин':2,\nu'лохматов':2,\nu'ляшенко':20,\nu'цурин':3,\nu'цурик':3,\nu'шамилов':3,\nu'чайкин':8,\nu'баранков':2,\nu'артюшкин':5,\nu'свиридов':34,\nu'чуднов':3,\nu'лямкин':4,\nu'логинова':37,\nu'лаврухин':7,\nu'четвергова':2,\nu'белоцерковец':2,\nu'арутюнян':16,\nu'сулейманов':26,\nu'стеценко':9,\nu'соленова':2,\nu'снопкова':2,\nu'каспаров':2,\nu'смотрин':2,\nu'кузьминых':9,\nu'молокова':4,\nu'пасько':6,\nu'ковылин':4,\nu'бахтияров':3,\nu'литвак':3,\nu'ступина':2,\nu'жабровец':2,\nu'балакирева':6,\nu'звягин':12,\nu'кистанова':2,\nu'балаганский':2,\nu'великий':2,\nu'губин':19,\nu'вирронен':2,\nu'норполов':2,\nu'хидиров':3,\nu'толкачев':20,\nu'кузьменков':10,\nu'нургатина':2,\nu'бакуменко':6,\nu'ерошкина':2,\nu'балыхин':2,\nu'задорожный':16,\nu'коробченко':4,\nu'игнатенкова':2,\nu'санчик':2,\nu'байдина':2,\nu'бараева':2,\nu'грехова':2,\nu'горностаева':3,\nu'мигунова':2,\nu'оглоблина':3,\nu'скарюкина':2,\nu'чигарев':4,\nu'кузьмина':79,\nu'красулин':2,\nu'кушнир':10,\nu'коннов':12,\nu'галайда':4,\nu'гончаренко':26,\nu'шапков':2,\nu'фаталиев':2,\nu'протасевич':2,\nu'подгорнова':3,\nu'милославская':2,\nu'евдокимова':18,\nu'рузанов':2,\nu'курсаков':3,\nu'маслюков':4,\nu'бызов':3,\nu'хованов':2,\nu'степина':2,\nu'соломахин':4,\nu'бармин':9,\nu'корсуков':2,\nu'махин':3,\nu'атаманюк':2,\nu'цыденов':3,\nu'конякин':2,\nu'вильк':2,\nu'мухаметгалиев':2,\nu'кудаев':6,\nu'устенко':2,\nu'ерофеев':22,\nu'кривошапкин':2,\nu'гатин':2,\nu'костов':2,\nu'жигалов':6,\nu'лемешевский':2,\nu'старовойт':3,\nu'дубчак':2,\nu'подрез':3,\nu'боровая':3,\nu'бурко':3,\nu'трясцин':2,\nu'бурка':2,\nu'воронкин':4,\nu'пустовалов':8,\nu'заборский':3,\nu'товкач':3,\nu'шакуров':5,\nu'кохановский':3,\nu'чебанов':4,\nu'ревякина':2,\nu'ананьев':28,\nu'самара':2,\nu'кувыклин':2,\nu'красников':10,\nu'шкатов':2,\nu'шут':2,\nu'козьменко':4,\nu'новопашин':2,\nu'петрашко':3,\nu'заводчиков':2,\nu'кожевников':38,\nu'пищелевский':2,\nu'крутских':4,\nu'козырина':2,\nu'скоробогатов':14,\nu'ремарчук':2,\nu'стручкова':5,\nu'мишуткин':2,\nu'каширский':2,\nu'исмаилов':13,\nu'мурашев':2,\nu'ожгибесов':2,\nu'шелгунов':2,\nu'козинцев':2,\nu'зыкина':4,\nu'шапина':2,\nu'мажара':2,\nu'коломыцев':5,\nu'калюжная':3,\nu'проценко':23,\nu'семененко':11,\nu'глухих':3,\nu'федулина':2,\nu'кирильчук':3,\nu'скосарев':2,\nu'коробцов':2,\nu'сухинин':7,\nu'трембач':2,\nu'мубаракшин':2,\nu'шайдаров':3,\nu'глазов':5,\nu'боровых':4,\nu'кондратов':10,\nu'вышинский':4,\nu'гудилин':2,\nu'геевский':2,\nu'филин':18,\nu'юрченков':3,\nu'губкин':2,\nu'ченский':3,\nu'бирин':4,\nu'попрядухин':3,\nu'поздяков':2,\nu'степанская':2,\nu'петрашова':2,\nu'гатина':2,\nu'чусова':4,\nu'прошин':7,\nu'мальсагов':3,\nu'корнилин':2,\nu'феоктистова':7,\nu'абдульманов':3,\nu'балабин':2,\nu'бабичев':10,\nu'пан':3,\nu'соломон':2,\nu'аржанников':2,\nu'боголюбов':3,\nu'нижельский':2,\nu'пелипенко':3,\nu'осипян':2,\nu'широкорадюк':2,\nu'венедиктов':7,\nu'тишин':9,\nu'эльмурзаев':3,\nu'архангельский':5,\nu'кандаков':2,\nu'жемков':2,\nu'давлятшин':2,\nu'масленникова':8,\nu'долотченко':2,\nu'боровикова':8,\nu'огородов':2,\nu'зубакин':2,\nu'шулепа':2,\nu'туренко':2,\nu'зайков':8,\nu'голушко':4,\nu'цветкова':31,\nu'фотеев':2,\nu'кужелев':2,\nu'тихонов':69,\nu'лановенко':3,\nu'угнивенко':2,\nu'уржумов':3,\nu'аляева':2,\nu'насыров':8,\nu'кулебякин':2,\nu'екимова':2,\nu'пивень':4,\nu'паюсов':2,\nu'барышникова':12,\nu'резникова':3,\nu'алекса':2,\nu'черешнева':2,\nu'новожилова':14,\nu'наделяев':2,\nu'басангов':4,\nu'богодаев':2,\nu'утимишев':3,\nu'трубачев':4,\nu'файфер':2,\nu'хоц':3,\nu'козловцев':2,\nu'трошкова':2,\nu'петряков':2,\nu'никифоренко':4,\nu'атласова':3,\nu'тюльпанов':2,\nu'голдобина':2,\nu'васильцов':3,\nu'жилина':7,\nu'челноков':3,\nu'ковешникова':4,\nu'рзаев':2,\nu'липатников':2,\nu'путков':2,\nu'мирошин':3,\nu'сучкова':10,\nu'ашиток':2,\nu'чураев':5,\nu'василисин':2,\nu'ананских':2,\nu'хардикова':2,\nu'пошехонова':2,\nu'дудинов':2,\nu'климовских':2,\nu'шамков':2,\nu'ефимов':66,\nu'голубев':53,\nu'балыков':2,\nu'бештоев':2,\nu'сокур':2,\nu'когут':4,\nu'пашко':3,\nu'исаков':39,\nu'зарин':2,\nu'украинец':2,\nu'пархоменко':29,\nu'бельская':4,\nu'фомин':69,\nu'исламов':12,\nu'жаворонок':4,\nu'бекеш':2,\nu'кирина':2,\nu'жилин':24,\nu'юхневич':4,\nu'шестопалов':12,\nu'прибавченков':2,\nu'чекалина':4,\nu'бабич':23,\nu'эрдниев':3,\nu'гаршин':2,\nu'симашов':2,\nu'камынин':6,\nu'боровиков':18,\nu'голоденко':2,\nu'токаев':5,\nu'куракова':2,\nu'кудаков':2,\nu'мануковский':4,\nu'грушецкий':2,\nu'шваб':3,\nu'франц':3,\nu'грузин':2,\nu'подопригора':5,\nu'куцов':2,\nu'фокеев':2,\nu'могильникова':2,\nu'саутин':2,\nu'маргарян':2,\nu'кравчук':33,\nu'землянская':2,\nu'пиннекер':2,\nu'олеников':2,\nu'ильюченко':2,\nu'жариков':11,\nu'жданович':3,\nu'разинов':3,\nu'ямалетдинов':3,\nu'чухрай':2,\nu'дуля':2,\nu'булкина':2,\nu'сухачева':4,\nu'жилинков':2,\nu'синельщиков':2,\nu'захариков':2,\nu'мамбеталиев':2,\nu'семенец':2,\nu'басюк':2,\nu'золкин':3,\nu'теркин':2,\nu'козина':12,\nu'шатунов':3,\nu'плохотнюк':3,\nu'углов':5,\nu'махачев':3,\nu'шаламова':4,\nu'селищев':5,\nu'яковец':2,\nu'головачев':12,\nu'чижова':6,\nu'евпак':2,\nu'тикк':2,\nu'ремизов':11,\nu'храмушин':3,\nu'гринченко':4,\nu'зайдуллин':2,\nu'манжосова':2,\nu'шматов':4,\nu'юдина':20,\nu'мурсалов':2,\nu'дэжур':2,\nu'мисиков':2,\nu'милушкин':2,\nu'цыдыпов':7,\nu'иванилов':3,\nu'корытина':2,\nu'лукашева':2,\nu'гаджиметова':2,\nu'донская':3,\nu'сальникова':7,\nu'прусаков':5,\nu'малчинов':2,\nu'бортник':2,\nu'гаттаров':2,\nu'глазьев':3,\nu'вашуркин':2,\nu'илюхина':9,\nu'маннанов':4,\nu'бабинов':4,\nu'шинкаренко':10,\nu'палагин':4,\nu'ермошин':8,\nu'алексеев':117,\nu'пугач':10,\nu'московкин':3,\nu'чепайкин':2,\nu'теребов':2,\nu'михалева':14,\nu'тумаков':6,\nu'шигапова':2,\nu'самойлович':2,\nu'мясоедова':2,\nu'сагалов':2,\nu'осокин':14,\nu'желябин':2,\nu'хутов':2,\nu'гильмуллин':2,\nu'урин':3,\nu'лысенков':6,\nu'гереев':4,\nu'барышева':6,\nu'сумина':2,\nu'вараксин':7,\nu'заборовский':2,\nu'чесноков':28,\nu'конев':18,\nu'микушин':4,\nu'ярцева':4,\nu'будков':3,\nu'баннов':2,\nu'бочкарева':8,\nu'ишимов':4,\nu'безносик':2,\nu'гурулев':4,\nu'маньковская':2,\nu'двуреченская':3,\nu'воложинский':2,\nu'червяков':7,\nu'кружалин':3,\nu'федотовский':3,\nu'трушин':10,\nu'перова':11,\nu'короткевич':5,\nu'варченко':3,\nu'стрелков':9,\nu'сенников':3,\nu'шлычков':4,\nu'шинин':2,\nu'гоманов':3,\nu'бауман':2,\nu'ильчук':4,\nu'сенокосов':2,\nu'бондарчук':12,\nu'саютина':2,\nu'тонких':11,\nu'юферов':3,\nu'лозобко':2,\nu'земцов':7,\nu'евтеева':4,\nu'никитюк':8,\nu'легенький':3,\nu'беда':2,\nu'кальянов':2,\nu'маслин':3,\nu'кривченко':3,\nu'габрусенас':2,\nu'лой':2,\nu'занорин':2,\nu'старшинов':3,\nu'рыбас':3,\nu'курдюков':5,\nu'филюшин':2,\nu'сечин':4,\nu'тырин':2,\nu'чернецкий':3,\nu'перцева':2,\nu'немцев':4,\nu'абдрахманов':9,\nu'потоцкий':2,\nu'голубов':5,\nu'ревичев':2,\nu'брыкин':5,\nu'гладких':19,\nu'кормилицын':2,\nu'фидаров':4,\nu'бриль':2,\nu'шпис':2,\nu'лоскутов':20,\nu'байдин':2,\nu'степанко':2,\nu'метелева':3,\nu'нугуманов':2,\nu'сыроненко':2,\nu'дибиров':4,\nu'каледин':3,\nu'какорина':2,\nu'девяткин':11,\nu'братенков':2,\nu'кожукова':2,\nu'рахматулин':4,\nu'халидов':3,\nu'чистохин':2,\nu'емельяненко':6,\nu'турлай':2,\nu'дегтева':3,\nu'мусин':7,\nu'хисматуллин':4,\nu'риммер':2,\nu'арсентьева':6,\nu'екимов':14,\nu'буракова':2,\nu'серёгина':2,\nu'кикоть':7,\nu'клепалов':5,\nu'никанорова':2,\nu'бабарицкий':2,\nu'торощин':2,\nu'киршин':3,\nu'башлыков':3,\nu'шаляпин':2,\nu'просвирин':2,\nu'дулов':4,\nu'ненахов':3,\nu'фролова':64,\nu'демин':35,\nu'волох':4,\nu'рахальский':2,\nu'ключкин':5,\nu'ряполов':3,\nu'хоружий':4,\nu'елагина':5,\nu'рудченко':3,\nu'моченов':2,\nu'шавкин':2,\nu'федорова':93,\nu'пивовар':3,\nu'дубровкин':2,\nu'сафиулин':2,\nu'недбайлов':2,\nu'кузь':3,\nu'сидорин':3,\nu'димова':2,\nu'ковнацкий':2,\nu'прилепо':2,\nu'вальтер':4,\nu'евстафьев':4,\nu'колмыкова':6,\nu'федичев':2,\nu'семиглазов':3,\nu'евсеева':8,\nu'гуркина':3,\nu'карякин':8,\nu'афанасенко':10,\nu'манеров':2,\nu'гуцан':3,\nu'короленко':2,\nu'сопко':3,\nu'янусов':2,\nu'куслин':2,\nu'левина':10,\nu'дубинская':2,\nu'городничев':5,\nu'цуркан':3,\nu'ильичёв':2,\nu'калинин':70,\nu'абдулхаеров':2,\nu'лихов':4,\nu'гречаный':4,\nu'зудова':2,\nu'пенькова':3,\nu'ивлева':12,\nu'тутаев':4,\nu'маньков':3,\nu'пентюшенков':2,\nu'корябин':2,\nu'исайчев':2,\nu'глинская':6,\nu'дашкова':6,\nu'ахмадиев':2,\nu'любимкин':2,\nu'штефан':4,\nu'шиленин':2,\nu'другова':3,\nu'тактаров':2,\nu'спицын':9,\nu'будаева':4,\nu'горшков':48,\nu'митькин':5,\nu'вербицкая':4,\nu'веретельников':3,\nu'шушукин':2,\nu'андриенко':11,\nu'данилюк':15,\nu'литасов':2,\nu'роженко':2,\nu'тюкавкин':2,\nu'саражаков':2,\nu'турчанинова':3,\nu'опанасенко':4,\nu'чинихин':2,\nu'юрченко':39,\nu'печкин':3,\nu'козлова':80,\nu'потапенко':7,\nu'кость':2,\nu'офицерова':2,\nu'галкина':15,\nu'максимова':59,\nu'дмитракова':2,\nu'менщикова':3,\nu'курдаков':2,\nu'кичук':2,\nu'кондратьева':24,\nu'астахов':24,\nu'лейкин':2,\nu'фиалкин':2,\nu'козельцев':2,\nu'тимонин':9,\nu'батищев':2,\nu'сурикова':8,\nu'гоголев':14,\nu'белоусова':21,\nu'денщиков':2,\nu'шушаков':5,\nu'кожихов':2,\nu'олеха':2,\nu'корчмин':2,\nu'семчин':2,\nu'богуцкая':4,\nu'мордовин':8,\nu'масич':2,\nu'черепанов':24,\nu'преснухин':2,\nu'бурдуковский':2,\nu'меженов':2,\nu'коваленко':95,\nu'табаков':10,\nu'саитбаталов':2,\nu'мищук':3,\nu'мотузенко':2,\nu'малинина':7,\nu'барабаш':9,\nu'диев':2,\nu'башков':6,\nu'фомина':38,\nu'силко':2,\nu'енгуразов':2,\nu'белясов':2,\nu'галямина':2,\nu'тоболова':2,\nu'казарцев':3,\nu'сергеев':116,\nu'томилова':3,\nu'галеев':8,\nu'мохова':2,\nu'нугаева':2,\nu'косаревич':2,\nu'валеев':11,\nu'бессолицын':2,\nu'туаев':2,\nu'зембатов':2,\nu'букреев':8,\nu'чуракова':6,\nu'халиуллин':2,\nu'берестова':2,\nu'бойчук':5,\nu'кропотов':6,\nu'муратов':20,\nu'абенякова':2,\nu'юмашев':3,\nu'толпекин':2,\nu'телепнев':2,\nu'кудровский':2,\nu'крамарь':2,\nu'цечоев':7,\nu'былков':2,\nu'майстренко':4,\nu'дашкин':4,\nu'кириков':3,\nu'балданова':3,\nu'бойко':71,\nu'шаргаева':2,\nu'хилькевич':2,\nu'сундеев':2,\nu'коврижкин':2,\nu'кузькин':3,\nu'богданович':9,\nu'самородова':2,\nu'маруженко':2,\nu'лысак':6,\nu'семыкина':3,\nu'фельдман':4,\nu'колесниченко':12,\nu'бухарин':2,\nu'воропаева':4,\nu'саттаров':5,\nu'задворный':2,\nu'сухов':25,\nu'косяков':4,\nu'свищев':2,\nu'панкстьянов':2,\nu'бичев':3,\nu'музипов':3,\nu'казеева':3,\nu'макарцев':5,\nu'охлопков':6,\nu'валитов':8,\nu'кочерга':6,\nu'кухно':2,\nu'каплун':3,\nu'поварова':2,\nu'гарцева':2,\nu'грицков':6,\nu'максимкина':3,\nu'бежанов':2,\nu'щелкунов':4,\nu'фирсова':12,\nu'хлопков':2,\nu'внуков':5,\nu'бердник':2,\nu'красикова':4,\nu'габдрашитов':4,\nu'сигарев':2,\nu'иль��сов':19,\nu'янкович':2,\nu'малышев':48,\nu'коточигова':2,\nu'сотсков':3,\nu'платунова':2,\nu'кабанец':2,\nu'марютин':2,\nu'якунин':20,\nu'вязгин':2,\nu'викторова':9,\nu'прохоров':44,\nu'шамшур':2,\nu'карпушов':2,\nu'волынский':2,\nu'яськова':2,\nu'юровская':2,\nu'кожемякина':5,\nu'липатова':5,\nu'герасина':2,\nu'шмалько':2,\nu'бондарь':39,\nu'миколенко':2,\nu'трушкин':6,\nu'тополев':2,\nu'козелов':2,\nu'гаджиев':23,\nu'милахин':2,\nu'михина':3,\nu'власкина':2,\nu'унканжинов':2,\nu'бруев':2,\nu'быстрова':18,\nu'алборов':3,\nu'корсунова':3,\nu'плеханова':8,\nu'шафеев':2,\nu'мамай':3,\nu'ашуров':2,\nu'чубарев':3,\nu'костянов':2,\nu'лысаченко':2,\nu'таранюк':2,\nu'азаренков':2,\nu'мартыновский':2,\nu'соколовская':8,\nu'агибалова':3,\nu'чишко':2,\nu'бокарев':5,\nu'прилуцкая':2,\nu'симачков':3,\nu'демидов':40,\nu'гиззатуллин':3,\nu'юхта':2,\nu'галактионова':8,\nu'крупенков':2,\nu'пляскин':4,\nu'малькова':4,\nu'азоркин':3,\nu'сороколет':2,\nu'коренев':9,\nu'кулида':2,\nu'рябчикова':2,\nu'бостан':2,\nu'чиркин':6,\nu'баутин':4,\nu'балуева':6,\nu'слабов':2,\nu'матюшина':2,\nu'трухманова':2,\nu'косицин':2,\nu'ильинов':6,\nu'левковский':4,\nu'дуненков':2,\nu'поспелов':18,\nu'окрушко':2,\nu'габриэлян':2,\nu'чагина':2,\nu'сомов':10,\nu'шепуленко':2,\nu'атанова':3,\nu'полуэктов':7,\nu'есипенко':7,\nu'носова':21,\nu'царёв':5,\nu'кислый':3,\nu'знова':2,\nu'плавунов':2,\nu'семенков':5,\nu'нисковских':3,\nu'сабанцев':3,\nu'пшенников':3,\nu'зиннатуллин':4,\nu'петросян':12,\nu'повзык':2,\nu'багаева':6,\nu'згибай':2,\nu'полтавский':4,\nu'терегулов':2,\nu'куринный':3,\nu'кадушкин':2,\nu'репетенко':2,\nu'завирюха':2,\nu'надежин':2,\nu'маршев':2,\nu'сазонкин':3,\nu'метелёв':3,\nu'домнич':3,\nu'школа':4,\nu'кулемзин':2,\nu'гурская':2,\nu'коняхин':3,\nu'бормотов':6,\nu'огородникова':5,\nu'шлотов':2,\nu'жупаков':2,\nu'могильный':2,\nu'касперович':2,\nu'махонин':2,\nu'моденов':3,\nu'карашевский':2,\nu'амирханов':2,\nu'ашурков':5,\nu'верещак':8,\nu'есина':6,\nu'юдакова':2,\nu'лихоманов':3,\nu'букач':2,\nu'чумакова':5,\nu'балакшина':2,\nu'полозов':9,\nu'пнев':2,\nu'молодцов':9,\nu'галыгин':2,\nu'агеев':31,\nu'елин':8,\nu'оводова':2,\nu'воронко':2,\nu'торопова':5,\nu'меньшова':4,\nu'котов':45,\nu'якименко':14,\nu'жуляев':2,\nu'нестеренко':33,\nu'цыбиков':5,\nu'лазурин':2,\nu'галустян':3,\nu'алмаев':4,\nu'баулин':5,\nu'чирухин':2,\nu'левочкин':2,\nu'казаев':2,\nu'комаров':79,\nu'басос':2,\nu'паринов':3,\nu'цимбалов':2,\nu'дымова':2,\nu'ганихин':2,\nu'долганова':3,\nu'шиц':4,\nu'веселков':7,\nu'фадин':7,\nu'шляпин':2,\nu'видинеев':2,\nu'головня':4,\nu'мишаков':4,\nu'долженко':16,\nu'безуглова':2,\nu'бурова':11,\nu'пушечников':2,\nu'сенцов':2,\nu'нелюбин':4,\nu'михайлюк':10,\nu'кораблёв':3,\nu'поливода':2,\nu'зубаиров':6,\nu'аскерова':4,\nu'деньгин':3,\nu'зорькина':2,\nu'игбаева':2,\nu'степкина':3,\nu'замышляев':3,\nu'стародуб':2,\nu'сафина':2,\nu'барабанов':10,\nu'городецкий':6,\nu'содель':2,\nu'савосин':3,\nu'долгачева':2,\nu'хромой':2,\nu'хромов':12,\nu'девятайкин':2,\nu'зубова':8,\nu'родина':17,\nu'енин':8,\nu'кузьминов':6,\nu'кудрявых':2,\nu'шальнов':3,\nu'рубанов':5,\nu'песков':10,\nu'пашнин':2,\nu'свешникова':2,\nu'дыбин':2,\nu'чеботарев':25,\nu'степанова':95,\nu'гришанов':8,\nu'куксов':3,\nu'зинатуллин':5,\nu'коробкин':11,\nu'краюшкин':2,\nu'гидулянов':2,\nu'домнин':6,\nu'подкопаев':8,\nu'берлина':3,\nu'бондар':3,\nu'голиков':26,\nu'ануфриев':20,\nu'леготин':2,\nu'батурин':11,\nu'минеева':6,\nu'кочина':3,\nu'балакишиев':2,\nu'пудовкин':4,\nu'исаева':34,\nu'мазуренко':11,\nu'серков':11,\nu'абдрафиков':2,\nu'петрухин':15,\nu'тихончук':4,\nu'батарин':2,\nu'нуреев':2,\nu'решетова':4,\nu'болтунова':2,\nu'перминов':22,\nu'мельничук':12,\nu'булдаков':6,\nu'сивоконь':3,\nu'чеплаков':2,\nu'гадаев':2,\nu'зюзько':4,\nu'кузенин':2,\nu'гагонин':2,\nu'белинин':3,\nu'домбаев':2,\nu'мехоношин':3,\nu'семочкина':2,\nu'годунов':2,\nu'брижань':2,\nu'каргаполова':2,\nu'клягин':2,\nu'шерстнев':12,\nu'ким':66,\nu'лось':10,\nu'толоконников':5,\nu'рачковский':3,\nu'рыбалко':11,\nu'ильков':2,\nu'дикун':2,\nu'петровская':6,\nu'филев':2,\nu'самочернов':2,\nu'милютин':4,\nu'чаленко':5,\nu'горецкий':2,\nu'пантюхов':8,\nu'бурак':4,\nu'кадыров':16,\nu'ловягин':4,\nu'евсеенко':7,\nu'зенов':2,\nu'печеный':4,\nu'осинцева':5,\nu'сенаторова':3,\nu'базылев':2,\nu'кесаев':4,\nu'донских':7,\nu'ерёмина':2,\nu'прокофьева':17,\nu'сусликова':3,\nu'круш':2,\nu'летникова':3,\nu'владимирская':2,\nu'красков':2,\nu'седых':15,\nu'буга':3,\nu'машичев':2,\nu'полосина':2,\nu'чумикова':2,\nu'бардадын':2,\nu'замятина':9,\nu'сологубова':3,\nu'маковский':5,\nu'табакаев':2,\nu'вакаев':2,\nu'полников':5,\nu'адамович':6,\nu'астафурова':2,\nu'чабан':13,\nu'гаязова':2,\nu'немаев':2,\nu'князькин':7,\nu'малев':3,\nu'меньшенина':2,\nu'егин':2,\nu'ляховский':2,\nu'давудов':2,\nu'котомина':2,\nu'герасименко':28,\nu'кудряев':2,\nu'ротов':3,\nu'самаргин':2,\nu'тебеньков':5,\nu'базулев':2,\nu'садовский':8,\nu'дорогова':2,\nu'кацуба':4,\nu'кацубо':3,\nu'хачатурян':6,\nu'канищева':3,\nu'эпов':7,\nu'павловский':18,\nu'турыгин':3,\nu'норец':2,\nu'хрущева':3,\nu'пичугина':5,\nu'портнягин':3,\nu'волковский':2,\nu'коркин':5,\nu'краева':3,\nu'сокольникова':2,\nu'цыпкин':2,\nu'бережной':20,\nu'толстых':12,\nu'михин':6,\nu'бойкова':7,\nu'цыганова':4,\nu'камышенков':2,\nu'шмаровоз':2,\nu'тикунов':2,\nu'калядин':2,\nu'балаян':4,\nu'артюхова':2,\nu'полканов':3,\nu'бедретдинова':2,\nu'галицин':3,\nu'чепышко':2,\nu'литовченко':16,\nu'гасымов':2,\nu'синцов':2,\nu'горлов':16,\nu'оверченко':2,\nu'касторнов':2,\nu'котломин':2,\nu'пшеничная':2,\nu'шипилова':10,\nu'давыденко':21,\nu'меняйленко':2,\nu'гарбаренко':2,\nu'ларина':21,\nu'умнова':3,\nu'савостина':5,\nu'минюков':2,\nu'мифтахов':2,\nu'сенченков':3,\nu'никольская':7,\nu'белименко':2,\nu'воронкова':5,\nu'баландина':6,\nu'журкин':5,\nu'чиркова':14,\nu'бельский':12,\nu'палкина':3,\nu'мятечкин':2,\nu'заруднев':2,\nu'целковиков':2,\nu'холопова':2,\nu'бандура':4,\nu'жакина':2,\nu'синкевич':4,\nu'кострица':2,\nu'гордикова':2,\nu'сиденко':2,\nu'светлов':4,\nu'фогель':2,\nu'русов':3,\nu'чичеров':3,\nu'рыжкова':10,\nu'зайкова':3,\nu'назаренко':43,\nu'алистратов':3,\nu'трещева':2,\nu'арашуков':2,\nu'ледовская':3,\nu'ивашкевич':2,\nu'орлянский':2,\nu'милюков':2,\nu'драгун':3,\nu'буравлев':2,\nu'лачугина':2,\nu'богдашкин':4,\nu'горячкин':6,\nu'пряхина':2,\nu'перфилова':4,\nu'самотаева':4,\nu'гетман':6,\nu'лелюк':2,\nu'червоненко':4,\nu'мержуев':2,\nu'дурасов':5,\nu'тимин':6,\nu'якубенко':2,\nu'мурашов':5,\nu'хачемизов':2,\nu'чупин':5,\nu'захарченко':16,\nu'шестопалова':3,\nu'ярославцев':11,\nu'лабутина':2,\nu'титовский':2,\nu'меченко':2,\nu'чупраков':9,\nu'лисина':5,\nu'панкова':7,\nu'налимов':5,\nu'еделев':2,\nu'сыров':2,\nu'генне':2,\nu'долотов':2,\nu'тюкин':2,\nu'якимова':7,\nu'муравский':2,\nu'балицкий':2,\nu'францев':3,\nu'ковязин':6,\nu'кияшко':5,\nu'вяселева':2,\nu'бараник':2,\nu'контарев':3,\nu'кожина':6,\nu'кислов':16,\nu'ахтямова':2,\nu'бида':4,\nu'шароватов':2,\nu'сошникова':2,\nu'грицко':2,\nu'тетерятникова':2,\nu'семионова':2,\nu'чеснакова':2,\nu'луц':2,\nu'саранчина':2,\nu'ребус':2,\nu'самоделкин':5,\nu'глотова':7,\nu'олешкевич':2,\nu'овчарук':3,\nu'сурков':26,\nu'домрачев':6,\nu'майданник':2,\nu'каранкевич':2,\nu'рябухин':6,\nu'павлычев':4,\nu'грудинин':4,\nu'моргунов':11,\nu'пакулин':2,\nu'старусев':2,\nu'горшечников':3,\nu'новицкая':13,\nu'аношкина':2,\nu'кондрашин':9,\nu'сыпков':2,\nu'лукьянова':23,\nu'данильцев':2,\nu'мазурец':2,\nu'ворошилов':4,\nu'матюшенко':3,\nu'королёв':10,\nu'туз':2,\nu'шпаковский':6,\nu'подгорный':7,\nu'савинкин':3,\nu'чинёнов':2,\nu'межиев':2,\nu'лысиков':4,\nu'негодов':2,\nu'барашков':4,\nu'емельянович':2,\nu'сайко':3,\nu'нагорнов':10,\nu'окишева':2,\nu'хижняков':3,\nu'карамышев':15,\nu'назарян':2,\nu'лялина':3,\nu'букин':13,\nu'мотова':3,\nu'суглобов':3,\nu'буланова':6,\nu'бадеева':2,\nu'мекеня':2,\nu'нифонтов':2,\nu'кислицын':10,\nu'гильмутдинова':2,\nu'горелышев':2,\nu'стрекалов':3,\nu'бузмаков':4,\nu'юдочкин':2,\nu'зиганшин':7,\nu'сысолятин':2,\nu'иваков':4,\nu'дыдыкин':2,\nu'рыженков':8,\nu'шкунов':3,\nu'сербиненко':3,\nu'каманин':2,\nu'ахатова':2,\nu'дойников':2,\nu'мынкина':2,\nu'хамлов':2,\nu'желудков':5,\nu'герасимчук':7,\nu'малкова':7,\nu'дианова':6,\nu'шатров':8,\nu'рабазанов':2,\nu'кучина':3,\nu'ковалевич':5,\nu'чепкасов':4,\nu'кривоус':3,\nu'коцарева':2,\nu'дума':2,\nu'сащенко':3,\nu'верзаков':2,\nu'ржевская':3,\nu'цыкина':3,\nu'лебедкина':2,\nu'чекин':2,\nu'рогоза':3,\nu'карпец':4,\nu'машкова':8,\nu'шишечкин':2,\nu'бурлак':2,\nu'чижикова':4,\nu'букачаков':2,\nu'сорокун':2,\nu'бауков':3,\nu'осинцев':10,\nu'песня':2,\nu'ларченко':6,\nu'демешин':2,\nu'петрунина':2,\nu'дуброва':2,\nu'лиманова':2,\nu'эркенов':4,\nu'зосимчук':2,\nu'абдулаев':4,\nu'радюк':3,\nu'краснослободцев':2,\nu'скакунов':2,\nu'шелякина':2,\nu'скосырская':2,\nu'папков':6,\nu'хайруллина':9,\nu'кизилов':3,\nu'меньшикова':13,\nu'удычак':2,\nu'ганиев':9,\nu'сапрыкин':14,\nu'кадников':2,\nu'антуфьев':3,\nu'шишков':7,\nu'заверза':2,\nu'гоноченко':2,\nu'смородинов':3,\nu'короткая':3,\nu'самошин':3,\nu'буртный':2,\nu'кибанов':2,\nu'оленников':5,\nu'шутков':4,\nu'лупашко':4,\nu'бороденко':3,\nu'мишкин':3,\nu'шорохова':4,\nu'аношина':3,\nu'федюшкин':4,\nu'масленников':26,\nu'рассказов':8,\nu'марковская':3,\nu'файзрахманова':3,\nu'вещеникин':2,\nu'лесоклинская':2,\nu'алмазов':7,\nu'аллилуева':2,\nu'шатыркин':3,\nu'рюмин':3,\nu'иконникова':2,\nu'курганова':2,\nu'подобедов':2,\nu'кондраков':2,\nu'хромченко':2,\nu'гилязов':3,\nu'красненков':2,\nu'курникова':2,\nu'шишигин':4,\nu'симонов':40,\nu'питько':2,\nu'туранова':3,\nu'одинцова':8,\nu'федченко':5,\nu'мелконян':2,\nu'буйневич':3,\nu'милорадов':2,\nu'зелинский':3,\nu'прокаев':2,\nu'слободской':2,\nu'оверчук':4,\nu'секерин':2,\nu'шебанов':2,\nu'пачковский':2,\nu'ашлапов':2,\nu'сараева':5,\nu'ходько':3,\nu'бурнашева':3,\nu'романцова':5,\nu'синев':4,\nu'лищук':3,\nu'рублевский':3,\nu'гребнев':10,\nu'беззубенко':2,\nu'косогоров':4,\nu'белая':6,\nu'котляров':20,\nu'белаш':4,\nu'ерецкий':2,\nu'ромашко':2,\nu'полухин':9,\nu'литюшкин':2,\nu'фишер':3,\nu'очкина':2,\nu'балабанова':5,\nu'вишневская':5,\nu'тютюнникова':2,\nu'кадиев':3,\nu'костенков':3,\nu'атаманов':3,\nu'якубова':3,\nu'шлемин':2,\nu'якомазов':2,\nu'аблов':2,\nu'мачин':3,\nu'бокова':5,\nu'мышкин':5,\nu'федюкин':6,\nu'фалеева':3,\nu'прусакова':5,\nu'цевелев':2,\nu'кортава':3,\nu'чурилина':2,\nu'дудаев':2,\nu'глу��ова':17,\nu'картавая':2,\nu'питиримов':3,\nu'бало':2,\nu'мусинов':2,\nu'приходько':46,\nu'кубарев':8,\nu'рева':11,\nu'самохин':18,\nu'рясков':3,\nu'герасимова':37,\nu'коробова':3,\nu'путятин':3,\nu'нефидов':2,\nu'мущенко':3,\nu'ныров':2,\nu'строителев':2,\nu'митягин':2,\nu'палеев':2,\nu'потехина':4,\nu'паркин':3,\nu'катаева':8,\nu'васьковский':3,\nu'лиманов':2,\nu'чубаров':7,\nu'холкина':3,\nu'фридман':4,\nu'калистратов':4,\nu'козуб':3,\nu'кочергина':8,\nu'мясоутов':2,\nu'белькова':4,\nu'ильина':55,\nu'рябкова':3,\nu'михель':4,\nu'хвостов':10,\nu'айткулова':2,\nu'езиков':2,\nu'попко':7,\nu'бушмакин':5,\nu'безгодов':3,\nu'матери':2,\nu'ахпашев':2,\nu'жуйкова':2,\nu'джурко':2,\nu'берегова':2,\nu'шаяхметов':5,\nu'толбоев':2,\nu'галуза':3,\nu'акаев':3,\nu'леликов':2,\nu'арсеньева':6,\nu'подрезова':3,\nu'орленко':5,\nu'сосунова':2,\nu'сарницкий':2,\nu'байдиков':3,\nu'мордвинцев':3,\nu'коцюбенко':3,\nu'мишуров':2,\nu'подкорытов':5,\nu'лозина':3,\nu'калинкина':10,\nu'зайчиков':3,\nu'бровченко':4,\nu'петрищева':2,\nu'федорчук':3,\nu'авдеенко':4,\nu'самохотина':2,\nu'косых':7,\nu'суров':8,\nu'бабак':6,\nu'аракелов':3,\nu'лозовой':8,\nu'шило':5,\nu'аксенов':35,\nu'макушкин':3,\nu'саляхов':2,\nu'вязовцев':2,\nu'меновщиков':4,\nu'логунов':11,\nu'надежкина':2,\nu'журавлёва':2,\nu'ванжа':2,\nu'лунёв':4,\nu'хуртин':4,\nu'кадильников':2,\nu'водянова':2,\nu'карцев':7,\nu'фетищев':3,\nu'володченков':2,\nu'полуянова':3,\nu'жирнова':3,\nu'пухов':12,\nu'коровянский':2,\nu'шацкая':2,\nu'федулкин':2,\nu'абдулатипов':2,\nu'дячук':2,\nu'гаврищук':2,\nu'закусилов':3,\nu'багмет':3,\nu'сербин':4,\nu'климанова':3,\nu'еремченко':4,\nu'чеусов':2,\nu'жохов':5,\nu'давыдова':60,\nu'кравченкова':2,\nu'максин':2,\nu'марчук':14,\nu'разуваев':7,\nu'жучкина':3,\nu'булатова':7,\nu'бычихин':3,\nu'докшукин':2,\nu'игумнов':3,\nu'лоза':3,\nu'паршин':30,\nu'гладилин':6,\nu'аверьянова':11,\nu'крымская':2,\nu'абузаров':2,\nu'суриков':10,\nu'усачёв':3,\nu'мажаров':3,\nu'амосов':9,\nu'цахилов':2,\nu'катричко':2,\nu'антипин':8,\nu'колотилин':2,\nu'ладыка':3,\nu'шуликов':2,\nu'лещева':3,\nu'иовлева':2,\nu'прокофьев':23,\nu'камаев':7,\nu'воюев':2,\nu'верескун':4,\nu'коряковский':2,\nu'козаченко':5,\nu'кривошей':3,\nu'мужикова':3,\nu'габбасова':2,\nu'колюшок':2,\nu'каваносян':2,\nu'мурашова':8,\nu'загороднев':3,\nu'хаджиев':2,\nu'витченко':2,\nu'корсун':7,\nu'семин':12,\nu'парамонов':21,\nu'лутков':2,\nu'колдунов':4,\nu'сидорова-бирюкова':2,\nu'болсун':2,\nu'подсекин':3,\nu'дьяконов':15,\nu'мурашева':2,\nu'лужнов':2,\nu'кочетов':20,\nu'кожушко':5,\nu'солодухин':3,\nu'махно':3,\nu'саблина':5,\nu'асеева':5,\nu'шумейко':21,\nu'евстратов':6,\nu'черная':3,\nu'лескова':3,\nu'кан':8,\nu'игумнова':2,\nu'желтоногов':2,\nu'светлицкий':2,\nu'бондарева':18,\nu'лозовский':2,\nu'карпекин':2,\nu'шаимова':2,\nu'аюшев':2,\nu'салина':2,\nu'неговора':2,\nu'зайченко':5,\nu'малеева':2,\nu'шумилов':16,\nu'сидоренкова':2,\nu'копцев':3,\nu'гришанович':2,\nu'шаврин':3,\nu'крысина':2,\nu'базилевская':2,\nu'мархаев':2,\nu'кубышкин':5,\nu'мамедова':3,\nu'селянина':2,\nu'страшко':4,\nu'звягина':4,\nu'юнусов':14,\nu'урусов':9,\nu'щербаков':79,\nu'павкин':2,\nu'кубасов':8,\nu'голишевский':2,\nu'захаренков':5,\nu'мирзоева':4,\nu'коренева':6,\nu'вихлянцев':2,\nu'митюшкин':2,\nu'андрусенко':4,\nu'леонова':26,\nu'довыденко':2,\nu'забашта':2,\nu'акимцева':2,\nu'поворознюк':3,\nu'ракович':2,\nu'волгина':2,\nu'арефьев':19,\nu'комарницкий':3,\nu'юденков':2,\nu'кожемякин':7,\nu'лепилкин':2,\n}\n","sub_path":"tools/disclosures_site/declarations/management/commands/surname_freq.py","file_name":"surname_freq.py","file_ext":"py","file_size_in_byte":347041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"279808478","text":"'''\n23. セクション構造\n記事中に含まれるセクション名とそのレベル(例えば\"== セクション名 ==\"なら1)を表示せよ.\n'''\nimport re\n\n\ndef extract_sections(pattern):\n res = []\n reg = re.compile(pattern)\n with open('wiki_UK.txt') as f:\n for line in f:\n m = reg.match(line)\n # m = re.search(r'^' + pattern, line)\n if m:\n # heading, level = m.group(1, 2)\n heading = m.group('Heading')\n level = len(m.group('Level')) - 1\n res.append((heading, level))\n return res\n\n\nif __name__ == '__main__':\n tgt = r'(?P=+)\\s*(?P.+)\\s*(?P=Level)'\n print(*extract_sections(pattern=tgt), sep='\\n')\n\n\n''' NOTE\n* ウィキペディアのセクション\n-> https://ja.wikipedia.org/wiki/Help:セクション\n [[Category:カテゴリ名]] or [[Category:カテゴリ名|ソートキー]]\n\n* search() vs. match()\n-> https://docs.python.org/ja/3/library/re.html#search-vs-match\n re.match(): 文字列の先頭でのみのマッチを確認する\n re.search(): 文字列中の位置にかかわらずマッチを確認する\n\n* 正規表現のシンタックス\n-> https://docs.python.org/ja/3/library/re.html#regular-expression-syntax\n * 特殊文字(special characters)\n *`(?P=name)`:\n 名前付きグループへの後方参照\n * `^`:\n (キャレット) 文字列の先頭にマッチする\n * 特殊シーケンス(special sequences)\n * `\\s`:\n Unicode 空白文字 (これは [ \\t\\n\\r\\f\\v] その他多くの文字) にマッチします。\n'''\n","sub_path":"kiyuna/chapter03/knock23.py","file_name":"knock23.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"602500925","text":"import db_libs\nimport MySQLdb\nimport numpy as np\nimport openvpn\nimport pandas as pd\nimport random\nimport sys\nimport timeout\n\nfrom alpha_vantage.timeseries import TimeSeries\nfrom datetime import datetime\nfrom tqdm import tqdm\n\ndate_format = '%Y-%m-%d %H:%M:%S'\n\nsymbol_file = '../tickers/symbol_list.csv'\nexchange_file = '../tickers/exchanges.csv'\nexchange_prefix = '../tickers/{}.csv'\n\nlog_filename = './log/' + datetime.now().strftime('%Y-%m-%d') + '.log'\n\napi_keys = pd.read_csv('alphavantage.csv')['alphavantage_key'].values\n\n\ndef report_error(err_type, symb, err):\n\twith open(log_filename, 'a') as f:\n\t\tf.write(err_type + '\\n')\n\t\tf.write(str(datetime.now()) + '\\n')\n\t\tf.write(symb + '\\n')\n\t\tf.write(str(err) + '\\n')\n\t\tf.write('\\n\\n')\n\treturn False\n\n\ndef connect_ts():\n\treturn TimeSeries(key=random.choice(api_keys), output_format='pandas')\n\n\n@timeout.timeout()\ndef get_intraday(symbol, ts=None):\n\tif ts is None:\n\t\tts = connect_ts()\n\ttry:\n\t\treturn ts.get_intraday(symbol=symbol, interval='1min', outputsize='full')\n\texcept KeyError:\n\t\tprint(symbol)\n\t\traise RuntimeError\n\n\n\ndef update_symbol(symbol, ts=None, conn=None):\n\ttry:\n\t\tdf = get_intraday(symbol, ts)[0]\n\t\tdf['index'] = df.index.map(lambda x: datetime.strptime(x, date_format))\n\n\t\tlatest_date = db_libs.get_latest_entry(symbol, conn)\n\t\tlatest_date = latest_date if latest_date is not None else datetime.min\n\t\tmask = df.apply(lambda x: datetime.strptime(x.name, date_format) > latest_date, axis=1)\n\n\t\tdb_libs.insert_1min_table(np.insert(df[mask].values, 0, symbol, axis=1))\n\n\t\treturn True\n\texcept (ValueError) as e:\n\t\treturn report_error('ValueError', symbol, e)\n\n\texcept (RuntimeError) as e:\n\t\treturn report_error('RuntimeError', symbol, e)\n\n\texcept (timeout.TimeoutError) as e:\n\t\treturn report_error('TimeoutError', symbol, e)\n\n\texcept Exception as e:\n\t\treturn report_error('Unknown Error', symbol, e)\n\n\n\ndef build_symbol_file():\n\texchanges = pd.read_csv(exchange_file)\n\tsymbol_list = [\n\t\t\t\t\t{'listing' : symbol, 'exchange' : xchang} \n\t\t\t\t\tfor xchang in pd.read_csv(exchange_file)['name'].values\n\t\t\t\t\tfor symbol in pd.read_csv(exchange_prefix.format(xchang))['Symbol'].values \n\t\t\t\t\tif '^' not in symbol and '.' not in symbol\n\t\t\t\t]\n\n\tsymbol_df = pd.DataFrame(symbol_list)\n\tsymbol_df.to_csv(symbol_file)\n\n\ndef run():\n\tprint(62)\n\tconn = db_libs.create_connection()\n\n\tsymbol_list = np.unique(pd.read_csv(symbol_file)['listing'].values)\n\tnp.random.shuffle(symbol_list)\n\n\tfor i,symbol in tqdm(enumerate(symbol_list)):\n\t\tprint(i,symbol)\n\t\tts = connect_ts()\n\t\tif not update_symbol(symbol, connect_ts(), conn):\n\t\t\tts = connect_ts()\n\t\tif i % 5 == 0:\n\t\t\topenvpn.change_server()\n\t\t\tts = connect_ts()\n\n\nif __name__ == '__main__':\n\tif openvpn.change_server():\n\t\trun()\n\telse:\n\t\tprint(78)\n\tprint(79)","sub_path":"alphavantage_libs.py","file_name":"alphavantage_libs.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"236400460","text":"import inspect\n\nclass VoidReturn(object):\n def __nonzero__(self):\n return False\n\nclass ProgramPoint(object):\n ENTER = \"ENTER\"\n EXIT = \"EXIT\"\n LOOP = \"LOOP\"\n RETURN = \"RETURN\"\n\nclass VariableTrace(object):\n def __init__(self):\n self.variables = {\n ProgramPoint.ENTER: None,\n ProgramPoint.EXIT: {},\n ProgramPoint.LOOP: {},\n ProgramPoint.RETURN: {}\n }\n\n def add(self, program_point, variables, lineno):\n if program_point == ProgramPoint.ENTER:\n self.variables[program_point] = variables\n else:\n self.variables[program_point][lineno] = variables\n\n__variable_trace = VariableTrace()\n\ndef log_variables(point_name):\n func_frame_record = inspect.stack()[1]\n func_frame = func_frame_record[0]\n variables = inpect.getargvalues(func_frame)\n __variable_trace.add(point_name, variables, func_frame_record[2])\n\ndef log_return(retval):\n func_frame_record = inspect.stack()[1]\n __variable_trace.add(ProgramPoint.RETURN, retval, func_frame_record[2])\n return retval\n","sub_path":"qualiPy/inject/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"222879799","text":"#!/usr/bin/env python\nfrom flask import url_for, g\nfrom okno_service.resources.base import EPBase\nfrom okno_service.resources.authentication import auth\n\nclass EPAccount(EPBase):\n @auth.login_required\n def get(self):\n return super(EPAccount, self).get(\n {'Name': g.user.imie_1,\n 'Lastname': g.user.nazwisko,\n 'email': g.user.email,\n 'phone': g.user.telefon,\n 'birth': '%s' % g.user.data_urodzenia,\n 'active': g.user.aktywny,\n 'uri': url_for('eptoken').lower()})","sub_path":"service/resources/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"295041318","text":"import smbus\nimport sys\nimport threading\nimport time\nimport queue\nimport math\n\nI2C_NUM = 1 # device at /dev/i2c-1\nDEVICE_ADDRESS = 0x68 # MPU 6050 at address 0x68 of /dev/i2c-1\n\n# address of registers\nGYRO_XOUT_L = 0x44\nGYRO_XOUT_H = 0x43\nGYRO_YOUT_L = 0x46\nGYRO_YOUT_H = 0x45\nGYRO_ZOUT_L = 0x48\nGYRO_ZOUT_H = 0x47\nACC_XOUT_L = 0x3c\nACC_XOUT_H = 0x3b\nACC_YOUT_L = 0x3e\nACC_YOUT_H = 0x3d\nACC_ZOUT_L = 0x40\nACC_ZOUT_H = 0x3f\nPOWER_MGMT_1 = 0x6b\n\n# some constants\nGYRO_DIV_SCALE_FACTOR = 131\nACC_DIV_SCALE_FACTOR = 16384\n\nbus = smbus.SMBus(I2C_NUM)\n\n\n################## basic functions #########################\n\ndef get_data(register_address):\n \"\"\" Reads a word from 'DEVICE_ADDRESS' and uses 'bus'.\n 'register_address' is that of least significant byte.\n \"\"\"\n bus.write_byte_data(DEVICE_ADDRESS, POWER_MGMT_1, 0)\n lsb = bus.read_byte_data(DEVICE_ADDRESS, register_address) # lsv is least significant byte\n msb = bus.read_byte_data(DEVICE_ADDRESS, register_address - 1) # msv is most significant byte. -1 because big endian\n word_value = (msb << 8) + lsb\n if (word_value > 0x8000):\n word_value = word_value - 65536 # convert from 2's complement\n return word_value\n\ndef get_scaled_acc_reading(axis):\n \"\"\" Uses constants defined in the beginning of the file\n for scaling\n \"\"\"\n if (axis == \"x\"):\n reading = get_data(ACC_XOUT_L)\n elif (axis == \"y\"):\n reading = get_data(ACC_YOUT_L)\n elif (axis == \"z\"):\n reading = get_data(ACC_ZOUT_L)\n else:\n print(\"Unexpected argument to get_acc(axis) received\")\n sys.exit()\n return reading / ACC_DIV_SCALE_FACTOR\n\ndef get_scaled_gyro_reading(axis):\n \"\"\" Uses constants defined in the beginning of the file\n for scaling\n \"\"\"\n if (axis == \"x\"):\n reading = get_data(GYRO_XOUT_L)\n elif (axis == \"y\"):\n reading = get_data(GYRO_YOUT_L)\n elif (axis == \"z\"):\n reading = get_data(GYRO_ZOUT_L)\n else:\n print(\"Unexpected argument to get_gyro(axis) received\")\n sys.exit()\n return reading / GYRO_DIV_SCALE_FACTOR\n\ndef get_inclination_using_acc(axis):\n \"\"\" Uses accelerometer readings along all axes to obtain\n inclination along required axis\n \"\"\"\n def root_of_squares(a, b):\n return math.sqrt((a*a) + (b*b))\n\n acc_x = get_scaled_acc_reading('x')\n acc_y = get_scaled_acc_reading('y')\n acc_z = get_scaled_acc_reading('z')\n\n if (axis == 'x'):\n return math.degrees(math.atan2(acc_x, root_of_squares(acc_y, acc_z)))\n elif (axis == 'y'):\n return math.degrees(math.atan2(acc_y, root_of_squares(acc_z, acc_x)))\n elif (axis == 'z'):\n return math.degrees(math.atan2(acc_z, root_of_squares(acc_x, acc_y)))\n else:\n print(\"Unexpected argument to get_inclination_using_acc(axis) received\")\n sys.exit()\n\n#########################################################\n\n##################### classes ###########################\n\nclass AngularAccFetcher:\n \"\"\" Instance of this can be used to fetch angular\n acceleration along three axis\n Instance variables are (no class variables exist here)\n n\n Denotes the size of array 'inst_ang_acc'\n inst_ang_acc[n]\n This array stores the values of angular acceleration\n calculated at intervals of 'ang_vel_read_interval'\n Average of all the elements in this array is used when\n angular acceleration is requested\n ang_vel_read_interval\n Two angular velocity readings (a1, a2) are obtained with time\n difference equal to 'ang_vel_read_interval' seconds and these\n an element is added/substituted to the array 'inst_ang_acc'\n with value equal to (a2 - a1) / 'ang_vel_read_interval'\n compute_thread\n The thread which represents the calculation of instantaneous\n angular acceleration and puts them in the array 'inst_ang_acc'\n should_compute\n This Boolean variable when False terminates the loop in function\n '_compute' which calculates instant instantaneous angular\n acceleration and puts them in the array 'inst_ang_acc'\n \"\"\"\n\n def __init__(self, n = 100, ang_vel_read_interval = 0.02):\n self.n = n\n self.inst_ang_acc = [{}] * n\n self.ang_vel_read_interval = ang_vel_read_interval\n self.compute_thread = threading.Thread(target = AngularAccFetcher._compute, args = (self,))\n self.should_compute = False\n\n def _compute(self):\n i = 0\n print('should_compute', self.should_compute)\n while (self.should_compute):\n x1 = get_scaled_gyro_reading('x')\n y1 = get_scaled_gyro_reading('y')\n z1 = get_scaled_gyro_reading('z')\n time.sleep(self.ang_vel_read_interval)\n x2 = get_scaled_gyro_reading('x')\n y2 = get_scaled_gyro_reading('y')\n z2 = get_scaled_gyro_reading('z')\n self.inst_ang_acc[i] = {\n 'x': ((x2 - x1) / self.ang_vel_read_interval),\n 'y': ((y2 - y1) / self.ang_vel_read_interval),\n 'z': ((z2 - z1) / self.ang_vel_read_interval)\n }\n i = ((i + 1) % (self.n))\n\n def start(self):\n self.should_compute = True\n self.compute_thread.start()\n\n def stop(self):\n self.should_compute = False\n\n def get_ang_acc(self):\n avx = 0\n avy = 0\n avz = 0\n for i in self.inst_ang_acc:\n if (('x' in i) and ('y' in i) and ('z' in i)):\n avx += i['x']\n avy += i['y']\n avz += i['z']\n return {\n 'x': avx / self.n,\n 'y': avy / self.n,\n 'z': avz / self.n\n }\n\nclass InclinationFetcher:\n \"\"\" filter_gyro_weight\n A complementary filter is used to compute inclinations.\n 'filter_gyro_weight' is the weight associated with the\n component that uses gyro readings to calculate current\n inclination. 1 - 'filter_gyro_weight' is the weight associated\n with the component that uses accelerometer readings to\n calculate current inclination. A linear combination of these\n two is used to obtain current inclination\n loop_interval\n The time interval between successive computations of inclination\n data. Suppose inclination data is obtained at some instant 't'.\n Next inclination data is computed at approximately 't + loop_interval'\n max_queue_size\n Maximum size of the queue. This should be selected such that\n the queue doesn't get full between successive 'get_inclination'\n calls to prevent loss of data. Loss of data would occur because\n when queue gets full, '_compute' waits until a free slot is available\n to put in the next data element, and during this waiting inclination\n data is not obtained and hence this data is lost.\n compute_thread\n The thread which represents the calculation of inclination\n along all axis and puts them in the queue 'inclinations'\n should_compute\n This Boolean variable when False terminates the loop in function\n '_compute' which calculates inclination along all axis\n and puts them in the queue 'inclinations'\n \"\"\"\n\n def __init__(self, filter_gyro_weight = 0.7, loop_interval = 0.0001, max_queue_size = 100000):\n self.filter_gyro_weight = filter_gyro_weight\n self.loop_interval = loop_interval\n self.max_queue_size = max_queue_size\n self.should_compute = False\n self.inclinations = queue.Queue(maxsize = max_queue_size)\n self.compute_thread = threading.Thread(target = InclinationFetcher._compute, args = (self,))\n\n def _compute(self):\n curr_inclination = {\n 'x': get_inclination_using_acc('x'),\n 'y': get_inclination_using_acc('y'),\n 'z': get_inclination_using_acc('z')\n }\n while (self.should_compute):\n for axis in curr_inclination:\n if (axis == 'x'):\n req_axis = 'y'\n elif (axis == 'y'):\n req_axis = 'z'\n else:\n req_axis = 'x'\n curr_inclination[axis] = (self.filter_gyro_weight * (curr_inclination[axis] + (get_scaled_gyro_reading(req_axis) * self.loop_interval))) + ((1 - self.filter_gyro_weight) * (get_inclination_using_acc(axis)))\n # Will block until free slot available in queue, if queue is full.\n self.inclinations.put(curr_inclination) \n time.sleep(self.loop_interval)\n\n def start(self):\n self.should_compute = True\n self.compute_thread.start()\n\n def stop(self):\n self.should_compute = False\n\n def get_inclination(self):\n # will block until queue has some element, if queue doesn't have one\n return self.inclinations.get()\n\n\n#########################################################\n","sub_path":"read_modified.py","file_name":"read_modified.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"631346092","text":"#-*-coding:utf-8-*-\nfrom PyQt4.QtGui import *\nimport matplotlib.pyplot as plt\nimport matplotlib.legend as legend\nimport csv\nimport sys\nimport os\n\n#Initial Setting\n#NUM=sys.argv[1]\n\nclass Window(QWidget):\n\t\n\tdef __init__(self,coin_dic,num):\n\t\tQWidget.__init__(self)\n\t\tself.list = QListWidget(self)\n\t\tlayout = QVBoxLayout(self)\n\t\tlayout.addWidget(self.list)\n\t\tself.select_list=[]\n\t\tself.cap_list=[]\n\t\tself.dic={}\n\t\tself.coin_dic=coin_dic\n\t\tself.date_list=[]\n\t\tself.num=num\n\n\tdef addListItem(self, text):\n\t\titem = QListWidgetItem(text)\n\t\tself.list.addItem(item)\n\t\twidget = QWidget(self.list)\n\t\tbutton = QToolButton(widget)\n\t\tlayout = QHBoxLayout(widget)\n\t\tlayout.setContentsMargins(0, 0, 0, 0)\n\t\tlayout.addStretch()\n\t\tlayout.addWidget(button)\n\t\tself.list.setItemWidget(item, widget)\n\t\tbutton.clicked[()].connect(lambda: self.handleButtonClicked(item))\n\n\tdef handleButtonClicked(self, item):\n\t\titem=item.text()\n\t\tself.select_list.append(item)\n\t\tself.select_list = list(set(self.select_list))\n\t\tprint(self.select_list)\n\t\t\n\t\tif len(self.select_list)==int(self.num):\n\t\t\tself.show_cap_graph()\n\t\t\t\n\tdef show_cap_graph(self):\n\t\tcap=[]\n\t\t\n\t\tfile_list=os.listdir('data')\n\t\tfor file in file_list:\n\t\t\ttmp=file.split('_')\n\t\t\ttmp=tmp[2].replace('.csv','')\n\t\t\tdate=tmp.replace(',','/')\n\t\t\tself.date_list.append(date)\n\t\t\n\t\tselected_index=[]\n\t\tfor i,v in enumerate(self.select_list):\n\t\t\tindex=v.split()[0]\n\t\t\tindex=str(index)\n\t\t\tselected_index.append(str(index))\n\t\tselected_index.sort()\n\t\t\n\t\tfor v in selected_index:\n\t\t\tself.dic[v]=''\n\t\t\t\n\t\tfor i,file in enumerate(file_list):\n\t\t\tf=open('data\\\\'+file, 'r')\n\t\t\tline=csv.reader(f)\n\t\t\t\n\t\t\tfor l in line:\n\t\t\t\tif l[0] in selected_index:\n\t\t\t\t\tif type(self.dic[l[0]]) != type(list()):\n\t\t\t\t\t\tself.dic[l[0]]=list()\n\t\t\t\t\ttmp=l[3].split()\n\t\t\t\t\t'''\n\t\t\t\t\tif tmp[1]=='조':\n\t\t\t\t\t\tcap=float(tmp[0])*1000000000000\n\t\t\t\t\tif tmp[1]=='십억':\n\t\t\t\t\t\tcap=float(tmp[0])*1000000000\n\t\t\t\t\tif tmp[1]=='백만':\n\t\t\t\t\t\tcap=float(tmp[0])*1000000\n\t\t\t\t\t'''\t\n\t\t\t\t\tif tmp[1]=='조':\n\t\t\t\t\t\tcap=float(tmp[0])*1000000\n\t\t\t\t\tif tmp[1]=='십억':\n\t\t\t\t\t\tcap=float(tmp[0])*1000\n\t\t\t\t\tif tmp[1]=='백만':\n\t\t\t\t\t\tcap=float(tmp[0])*1\n\t\t\t\t\t\t\n\t\t\t\t\tself.dic[l[0]].append(cap)\n\t\t\n\t\t#get selected_coin_list's market cap\n\t\t#print(self.dic)\n\t\t\n\t\t#graph_x=['12/12','12/13','12/14','12/15']\n\t\tgraph_x=self.date_list\n\t\tlength=len(self.dic)\n\t\tfor k,v in self.dic.items():\n\t\t\tcoin_name=self.coin_dic[k]\n\t\t\tgraph_y=v #lsit\n\t\t\tplt.plot(graph_x,graph_y,label=coin_name)\n\t\t\tplt.xlabel('date')\n\t\t\tplt.ylabel('Marcat Cap')\n\t\t\tplt.title('Coin Marcap Cap')\n\t\t\tplt.legend(bbox_to_anchor=(1.1,1.05))\n\t\tplt.show()\n\t\ndef get_coin_list():\n\tcoin_list=[]\n\tf=open('coin_list.csv', 'r')\n\tline=csv.reader(f)\n\t\n\tcount=0\n\tfor i,v in enumerate(line):\n\t\tif i==0:\n\t\t\tpass\n\t\telse:\n\t\t\tstr=v[0]+' '+v[1]+'('+v[2]+')'\n\t\t\tcoin_list.append(str)\n\t\t\tcount+=1\n\t\t\n\t\tif count>30:\n\t\t\tbreak\n\t\n\treturn coin_list\n\t\t\t\ndef main():\n\t\n\tnum=sys.argv[1]\n\tcoin_list=get_coin_list()\n\tcoin_dic={}\n\tfor coin in coin_list:\n\t\ttmp=coin.split()\n\t\tcoin_dic[tmp[0]]=tmp[1]\n\t\n\tapp = QApplication(sys.argv)\n\twindow = Window(coin_dic,num)\n\tfor label in coin_list:\n\t\twindow.addListItem(label)\n\twindow.setGeometry(500, 300, 300, 200)\n\twindow.show()\n\tsys.exit(app.exec_())\n\t\n\t\nif __name__ == '__main__':\n\tmain()","sub_path":"CoinCrawling/cap_graph.py","file_name":"cap_graph.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"145706539","text":"import math\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef plot(px, py):\n plt.plot(px, py)\n ax = plt.gca()\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.spines['bottom'].set_position(('data',0))\n ax.yaxis.set_ticks_position('left')\n ax.spines['left'].set_position(('data',0))\n plt.show()\n\n\ndef main():\n # Init\n x = []\n dx = -20\n while dx <= 20:\n x.append(dx)\n dx += 0.1\n\n # Use sigmoid() function\n px = [xv for xv in x]\n print(px)\n py = [sigmoid(xv) for xv in x]\n print(py)\n\n # Plot\n plot(px, py)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ML_2021/Regression/Lesson/Sigmoid.py","file_name":"Sigmoid.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"460599982","text":"#!/usr/local/bin/python3\nimport srt\nimport argparse\nfrom datetime import timedelta\n\ndef main():\n \"\"\"Main routine.\n\n This function allows for an .srt file to be created from the model prediction\n text file. The predictions can then be viewed in sync with the dashcam footage\n by setting the .srt as the subtitle file of the video.\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Subtitle Renderer\")\n parser.add_argument(\"-i\", \"--input\", type=str, default=\"\", help=\"Path to speed predictions text.\")\n parser.add_argument(\"-o\", \"--output\", type=str, default=\"\", help=\"Output file name.\")\n\n args = parser.parse_args()\n filepath, output_path = args.input, args.output\n\n with open(filepath) as file:\n raw = file.read()\n result = raw.split(\"\\n\")\n\n subtitles = []\n td = timedelta(microseconds=50000)\n cur_time = timedelta(microseconds=0)\n for i, line in enumerate(result):\n subtitles.append(srt.Subtitle(i, cur_time, cur_time + td, line))\n cur_time = cur_time + td\n\n with open(output_path, 'w') as f:\n f.write(srt.compose(subtitles))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"render_subs.py","file_name":"render_subs.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"347164978","text":"ones = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\ntens = ['ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']\nteens = ['eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n\nans = 0\nfor n in range(1, 1001):\n is_top_digit = True\n remain = n\n\n th = int(remain / 1000)\n if th > 0:\n ans += (len(ones[th - 1]) + len('thousand'))\n is_top_digit = False\n remain = remain % 1000\n\n hu = int(remain / 100)\n if hu > 0:\n if not is_top_digit:\n ans += len('and')\n ans += (len(ones[hu - 1]) + len('hundred'))\n is_top_digit = False\n remain = remain % 100\n\n ten = int(remain / 10)\n one = remain % 10\n if ten == 1:\n if not is_top_digit:\n ans += len('and')\n if one > 0:\n ans += len(teens[one - 1])\n else:\n ans += len(tens[ten - 1])\n else:\n if ten >= 2:\n if not is_top_digit:\n ans += len('and')\n ans += len(tens[ten - 1])\n if one > 0:\n ans += len(ones[one - 1])\n else:\n if one > 0:\n if not is_top_digit:\n ans += len('and')\n ans += len(ones[one - 1])\n\nprint(ans)\n","sub_path":"p0001-0050/p0017/p0017.py","file_name":"p0017.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"465403874","text":"import os\n\n#EX1: Calculate sum of size all .py files in your working directory. Convert in in MegaBytes.\ndef ex1():\n\tfiles = os.scandir('.')\n\tlis = []\n\tlis2 = []\n\tfor i in files:\n\t\ta = i.name\n\t\tif a[-3:] == '.py':\n\t\t\tlis.append(a)\n\t\t\ty = i.stat()\n\t\t\tlis2.append(y.st_size)\n\n\tprint(f'There are: {len(lis)} .py files',\n\t\tf'\\nSum size in MegaBytes: {str(sum(lis2)/1000000)}')\n\n#EX2: Find name of last modified file in your current working directory.\ndef ex2():\n\tlis = []\n\tfor i in os.scandir('.'):\n\t\ti = i.stat()\n\t\tlis.append(i.st_mtime)\n\t\tlis.sort()\n\tfor n in os.scandir('.'):\n\t\tns = n.stat()\n\t\tif ns.st_mtime == lis[-1]:\n\t\t\tprint(f'Last modified file: {n.name} {lis[-1]}')\n\n#EX3: Find name of last accessed filed your current working directory.\ndef ex3():\n\tlis = []\n\tfor i in os.scandir('.'):\n\t\ti = i.stat()\n\t\tlis.append(i.st_atime)\n\t\tlis.sort()\n\tfor n in os.scandir('.'):\n\t\tns = n.stat()\n\t\tif ns.st_atime == lis[-1]:\n\t\t\tprint(f'Last accessed file: {n.name} {lis[-1]}')\n\n#EX4: Create my_data_folder\ndef ex4():\n\tdef datacolector():\n\t\twhile True:\n\t\t\tname = input('Name: ')\n\t\t\tif name.isalpha():\n\t\t\t\tage = input('Age: ')\n\t\t\t\tif age.isdigit():\n\t\t\t\t\toccupation = input('Occupation: ')\n\t\t\t\t\treturn {'name':name, 'age':age, 'occupation':occupation}\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint('ERROR - Incorect Age Format')\n\t\t\telse:\n\t\t\t\tprint('ERROR - Incorect Name Format !')\n\n\tdata = datacolector()\n\tname = data['name']\n\tage = data['age']\n\toccupation = data['occupation']\n\n\tos.mkdir('my_data_folder')\n\tos.chdir('my_data_folder')\n\n\twrite = open('client_data.txt', 'w')\n\twrite.writelines([f'Name: {name}',\n\t\tf'\\nAge: {age}',\n\t\tf'\\nOccupation: {occupation}'])\n\n\tprint('Data folder created and updated successefuly !')\n\nwhile True:\n\texercitiul = input('Alegeti exercitiul 1-4: ')\n\tif exercitiul == '1':\n\t\tprint('\\nTask - Calculate sum size of all .py files in your working directory. Convert in MegaBytes.')\n\t\tex1()\n\t\tprint('')\n\tif exercitiul == '2':\n\t\tprint('\\nTask - Find name of last modified file in your current working directory.')\n\t\tex2()\n\t\tprint('')\n\tif exercitiul == '3':\n\t\tprint('\\nTask - Find name of last accessed filed your current working directory.')\n\t\tex3()\n\t\tprint('')\n\tif exercitiul == '4':\n\t\tprint('\\nTask - Create my_data_folder with user input data.')\n\t\tex4()\n\t\tprint('')\n\tif exercitiul == 'exit':\n\t\tbreak\n\tprint('Pentru a iesi, tapati \"exit\"')","sub_path":"EXL7.py","file_name":"EXL7.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"492894558","text":"# This file writes a csv containing player info and inventory\nimport csv\n\n\nclass File():\n def __init__(self, filename, writedinfo):\n # super().__init(self, fname, lname, race, gender, description, health=100, renown=0, injuries=None):\n self.filename = filename\n self.writedinfo = writedinfo\n\n def writechar(self):\n with open(self.filename, mode=\"w\") as file:\n fieldnames = [\"fname\", \"lname\", \"race\", \"gender\", \"description\", \"health\", \"renown\", \"injuries\"]\n file_writer = csv.DictWriter(file, fieldnames=fieldnames)\n file_writer.writeheader()\n file_writer.writerow(self.writedinfo)\n","sub_path":"Write.py","file_name":"Write.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"} +{"seq_id":"319971867","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright (c) Open-MMLab. All rights reserved.\nimport functools\nimport os\nimport subprocess\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\n\ndef init_dist(launcher, backend='nccl', **kwargs):\n if mp.get_start_method(allow_none=True) is None:\n mp.set_start_method('spawn')\n if launcher == 'pytorch':\n _init_dist_pytorch(backend, **kwargs)\n elif launcher == 'mpi':\n _init_dist_mpi(backend, **kwargs)\n elif launcher == 'slurm':\n _init_dist_slurm(backend, **kwargs)\n else:\n raise ValueError('Invalid launcher type: {}'.format(launcher))\n\n\ndef _init_dist_pytorch(backend, **kwargs):\n # TODO: use local_rank instead of rank % num_gpus\n local_rank = int(os.environ['RANK'])\n # npu diff\n # fox:根据GPU与NPU环境设置分布式代码\n if torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n rank = local_rank\n torch.cuda.set_device(local_rank % num_gpus)\n else:\n offset = 0 if os.getenv('NPUID', None) is None else int(os.environ['NPUID'])\n num_gpus = int(os.environ['RANK_SIZE'])\n rank = local_rank + offset\n torch.npu.set_device(rank % num_gpus)\n world_size = num_gpus if os.getenv(\"WORLD_SIZE\", None) is None else int(os.environ[\"WORLD_SIZE\"])\n\n dist.init_process_group(backend=backend, world_size=world_size, rank=rank)\n\n\ndef _init_dist_mpi(backend, **kwargs):\n raise NotImplementedError\n\n\ndef _init_dist_slurm(backend, port=29500, **kwargs):\n proc_id = int(os.environ['SLURM_PROCID'])\n ntasks = int(os.environ['SLURM_NTASKS'])\n node_list = os.environ['SLURM_NODELIST']\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(proc_id % num_gpus)\n addr = subprocess.getoutput(\n 'scontrol show hostname {} | head -n1'.format(node_list))\n os.environ['MASTER_PORT'] = str(port)\n os.environ['MASTER_ADDR'] = addr\n os.environ['WORLD_SIZE'] = str(ntasks)\n os.environ['RANK'] = str(proc_id)\n dist.init_process_group(backend=backend)\n\n\ndef get_dist_info():\n if torch.__version__ < '1.0':\n initialized = dist._initialized\n else:\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size\n\n\ndef master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper\n","sub_path":"PyTorch/contrib/cv/detection/SOLOv1/mmcv/mmcv/runner/dist_utils.py","file_name":"dist_utils.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}