diff --git "a/4579.jsonl" "b/4579.jsonl" new file mode 100644--- /dev/null +++ "b/4579.jsonl" @@ -0,0 +1,779 @@ +{"seq_id":"42535858","text":"def solve(list_num):\n s = []\n for item in list_num:\n s.append([item])\n sub_list = s[:len(s)-1]\n if sub_list:\n for sub_item in sub_list:\n s.append([])\n new_entry = s[len(s)-1]\n for ssub_item in sub_item:\n new_entry.append(ssub_item)\n new_entry.append(item)\n return s\n\nresult = solve([1, 2, 3])\nfor r in result:\n print(r)","sub_path":"combination2.py","file_name":"combination2.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"145081656","text":"# Time Complexity : O(NM) (Where N is total rows and M is toatl columns in the board)\n# Space Complexity : O(1) (We are doing all operations in place)\n# Did this code successfully run on Leetcode : Yes\n# Three line explanation of solution in plain english:\n# - When we change value of cell, we use some kind of marker other than given 2 values for dead and live cell.\n# - Consider the new marker when counting the neightbour, and in the end replace marker with original value.\n\n\nclass Solution:\n# Function to count live neighbours.\n def countNeighbour(self, board, row, col):\n# Storing all directions\n dx = [-1,-1,-1,0,0,1,1,1]\n dy = [-1,0,1,-1,1,-1,0,1]\n# Initialzie count\n count = 0\n# Iterate over all directions that means take all neighbours one by one.\n for i in range(len(dx)):\n# Calculate neighbours row and column.\n nx = row + dx[i]\n ny = col + dy[i]\n# Check that neighbour is valid and cell is live using original value or marker.\n if 0 <= nx < len(board) and 0 <= ny < len(board[0]) and (board[nx][ny] == 1 or board[nx][ny] == -1):\n# If all conditions pass increament the count\n count += 1\n# return total neighbours count.\n return count\n \n \n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n# Iterate over each cell\n for row in range(len(board)):\n for col in range(len(board[0])):\n# Find no of live neighbours for that cell\n count = self.countNeighbour(board, row, col)\n# Change value fo cell according to the question and put marker for updated cell.\n if board[row][col] == 1:\n if count < 2 or count > 3:\n board[row][col] = -1 # 1 -> 0 : -1\n if board[row][col] == 0:\n if count == 3:\n board[row][col] = 2 # 0 -> 1: 2\n \n# Change back marker value to original value\n for row in range(len(board)):\n for col in range(len(board[0])):\n if board[row][col] == -1:\n board[row][col] = 0\n if board[row][col] == 2:\n board[row][col] = 1\n","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219234271","text":"import glob\nimport os\nimport pandas as pd\n\nfrom PIL import Image\n\nIMG_GLOB = os.environ.get('IMG', '/home/csae8092/Documents/kem_img/kem-img-process/*/*.jpg')\nIMG_LIST = glob.glob(IMG_GLOB)\nGESAMT_DF = pd.read_csv('gesamt_liste.csv')\n\n\ndef yield_img_dict(images):\n for x in images:\n item = {}\n item['Dateiname'] = os.path.basename(x)\n item['folder'] = os.path.basename(os.path.split(x)[0])\n with Image.open(x) as image:\n item['width'], item['height'] = image.width, image.height\n yield item\n\n\nimages = sorted(IMG_LIST)\nsize_df = pd.DataFrame(yield_img_dict(images), columns=['Dateiname', 'folder', 'width', 'height'])\nnew = pd.merge(GESAMT_DF, size_df)\nnew.to_csv('enriched_gesamt.csv', index=False)\n","sub_path":"enrich.py","file_name":"enrich.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"632661171","text":"from sklearn.mixture import GMM\nfrom Coordinates import *\nimport numpy as np\n\nclass PairPotential:\n\n\tdef __init__(self, initial, final, n_gms):\n\t\tself.angm = None\n\t\t\n\t\tmag, angc = self.transform( initial, final )\n\t\n\t\tgmm = GMM( n_components = n_gms )\n\n\t\tgmm.fit( np.vstack( (mag, angc) ).T )\n\t\tself.gmm = gmm\n\n\tdef transform( self, initial, final):\n\n\t\tdx = final.x - initial.x\n\t\tdy = final.y - initial.y\n\n\t\tmag, angc, angm = centeredPolar( dx, dy, self.angm )\n\n\t\tif self.angm is None:\n\t\t\tself.angm = angm\n\n\t\treturn mag, angc\n\n\tdef eval( self, initial, final ):\n\n\t\tmag, angc = self.transform( initial, final )\n\n\t\treturn self.gmm.eval( np.vstack( (mag, angc) ).T )[0]\n","sub_path":"ver0/pairwise/PairPotential.py","file_name":"PairPotential.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184540603","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/src/sentry/src/sentry/api/endpoints/project_key_details.py\n# Compiled at: 2019-08-16 17:27:45\nfrom __future__ import absolute_import\nfrom django.db.models import F\nfrom rest_framework import serializers, status\nfrom rest_framework.response import Response\nfrom sentry import features\nfrom sentry.api.base import DocSection\nfrom sentry.api.bases.project import ProjectEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.fields.empty_integer import EmptyIntegerField\nfrom sentry.api.serializers import serialize\nfrom sentry.models import AuditLogEntryEvent, ProjectKey, ProjectKeyStatus\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom sentry.loader.browsersdkversion import get_default_sdk_version_for_project, get_browser_sdk_version_choices\n\n@scenario('DeleteClientKey')\ndef delete_key_scenario(runner):\n key = runner.utils.create_client_key(runner.default_project)\n runner.request(method='DELETE', path='/projects/%s/%s/keys/%s/' % (\n runner.org.slug, runner.default_project.slug, key.public_key))\n\n\n@scenario('UpdateClientKey')\ndef update_key_scenario(runner):\n key = runner.utils.create_client_key(runner.default_project)\n runner.request(method='PUT', path='/projects/%s/%s/keys/%s/' % (\n runner.org.slug, runner.default_project.slug, key.public_key), data={'name': 'Quite Positive Key'})\n\n\nclass RateLimitSerializer(serializers.Serializer):\n count = EmptyIntegerField(min_value=0, required=False, allow_null=True)\n window = EmptyIntegerField(min_value=0, max_value=86400, required=False, allow_null=True)\n\n\nclass KeySerializer(serializers.Serializer):\n name = serializers.CharField(max_length=200, required=False, allow_blank=True, allow_null=True)\n isActive = serializers.BooleanField(required=False)\n rateLimit = RateLimitSerializer(allow_null=True)\n browserSdkVersion = serializers.ChoiceField(choices=get_browser_sdk_version_choices(), required=False)\n\n\nclass ProjectKeyDetailsEndpoint(ProjectEndpoint):\n doc_section = DocSection.PROJECTS\n\n def get(self, request, project, key_id):\n try:\n key = ProjectKey.objects.get(project=project, public_key=key_id, roles=F('roles').bitor(ProjectKey.roles.store))\n except ProjectKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n return Response(serialize(key, request.user), status=200)\n\n def put(self, request, project, key_id):\n \"\"\"\n Update a Client Key\n ```````````````````\n\n Update a client key. This can be used to rename a key.\n\n :pparam string organization_slug: the slug of the organization the\n client keys belong to.\n :pparam string project_slug: the slug of the project the client keys\n belong to.\n :pparam string key_id: the ID of the key to update.\n :param string name: the new name for the client key.\n :auth: required\n \"\"\"\n try:\n key = ProjectKey.objects.get(project=project, public_key=key_id, roles=F('roles').bitor(ProjectKey.roles.store))\n except ProjectKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n serializer = KeySerializer(data=request.data, partial=True)\n default_version = get_default_sdk_version_for_project(project)\n if serializer.is_valid():\n result = serializer.validated_data\n if result.get('name'):\n key.label = result['name']\n if not result.get('browserSdkVersion'):\n key.data = {'browserSdkVersion': default_version}\n else:\n key.data = {'browserSdkVersion': result['browserSdkVersion']}\n if result.get('isActive') is True:\n key.status = ProjectKeyStatus.ACTIVE\n elif result.get('isActive') is False:\n key.status = ProjectKeyStatus.INACTIVE\n if features.has('projects:rate-limits', project):\n ratelimit = result.get('rateLimit', -1)\n if ratelimit is None or ratelimit != -1 and ratelimit and (ratelimit['count'] is None or ratelimit['window'] is None):\n key.rate_limit_count = None\n key.rate_limit_window = None\n elif result.get('rateLimit'):\n key.rate_limit_count = result['rateLimit']['count']\n key.rate_limit_window = result['rateLimit']['window']\n key.save()\n self.create_audit_entry(request=request, organization=project.organization, target_object=key.id, event=AuditLogEntryEvent.PROJECTKEY_EDIT, data=key.get_audit_log_data())\n return Response(serialize(key, request.user), status=200)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @attach_scenarios([delete_key_scenario])\n def delete(self, request, project, key_id):\n \"\"\"\n Delete a Client Key\n ```````````````````\n\n Delete a client key.\n\n :pparam string organization_slug: the slug of the organization the\n client keys belong to.\n :pparam string project_slug: the slug of the project the client keys\n belong to.\n :pparam string key_id: the ID of the key to delete.\n :auth: required\n \"\"\"\n try:\n key = ProjectKey.objects.get(project=project, public_key=key_id, roles=F('roles').bitor(ProjectKey.roles.store))\n except ProjectKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n self.create_audit_entry(request=request, organization=project.organization, target_object=key.id, event=AuditLogEntryEvent.PROJECTKEY_REMOVE, data=key.get_audit_log_data())\n key.delete()\n return Response(status=204)","sub_path":"pycfiles/sentry-10.0.0-py27-none-any/project_key_details.py","file_name":"project_key_details.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338480260","text":"import numpy as np\nfrom argparse import ArgumentParser\nfrom dpu_utils.utils import RichPath\n\n\ndef find_best_model(model_type: str, dataset: str, model_folder: RichPath) -> str:\n\n best_loss = 1e7\n best_model = None\n\n model_filter = f'model-train-log-{model_type}_rnn_model-{dataset}-*.pkl.gz'\n for model_train_log in model_folder.iterate_filtered_files_in_dir(model_filter):\n train_log = model_train_log.read_by_file_suffix()\n\n best_model_loss = 1e7\n for loss_dict in train_log:\n valid_losses = loss_dict['valid_losses']\n avg_valid_loss = np.average(list(valid_losses.values()))\n\n if avg_valid_loss < best_model_loss:\n best_model_loss = avg_valid_loss\n\n print(model_train_log)\n print(best_model_loss)\n if best_model_loss < best_loss:\n best_loss = best_model_loss\n\n model_train_log_name = model_train_log.path.split('/')[-1]\n model_name = model_train_log_name.replace('-train-log-', '-').replace('.pkl.gz', '')\n best_model = model_name\n\n return best_model\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--model-types', type=str, nargs='+')\n parser.add_argument('--datasets', type=str, nargs='+')\n parser.add_argument('--model-folder', type=str, required=True)\n args = parser.parse_args()\n\n model_folder = RichPath.create(args.model_folder)\n assert model_folder.exists(), f'The folder {model_folder} does not exist!'\n\n for model_type in args.model_types:\n for dataset in args.datasets:\n best = find_best_model(model_type, dataset, model_folder)\n print(f'Best model for {model_type} and {dataset}: {best}')\n","sub_path":"src/data_preparation/select_best_model.py","file_name":"select_best_model.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"540383198","text":"import vampytest\n\nfrom ....user import User\n\nfrom ..fields import validate_users\n\n\ndef test__validate_users__0():\n \"\"\"\n Tests whether ``validate_users`` works as intended.\n \n Case: passing.\n \"\"\"\n user_id = 202211050022\n user_name = 'Faker'\n \n user = User.precreate(\n user_id,\n name = user_name,\n )\n \n for input_value, expected_output in (\n (None, None),\n ([], None),\n ({}, None),\n ([user], {user_id: user}),\n ({user_id: user}, {user_id: user}),\n ):\n output = validate_users(input_value)\n vampytest.assert_eq(output, expected_output)\n\n\ndef test__validate_users__1():\n \"\"\"\n Tests whether ``validate_users`` works as intended.\n \n Case: raising.\n \"\"\"\n for input_value in (\n 12.6,\n [12.6],\n {12.6: 12.6},\n ):\n with vampytest.assert_raises(TypeError):\n validate_users(input_value)\n","sub_path":"hata/discord/interaction/resolved/tests/test__validate_users.py","file_name":"test__validate_users.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337391358","text":"import webbrowser\n\nclass Video():\n \"\"\"The abstract class that will be used to construct the Movie & Series Class\"\"\"\n def __init__(self, title, story_line, poster_image_url, trailerURL):\n self.title = title\n self.story_line = story_line\n self.poster_image_url = poster_image_url\n self.trailerURL = trailerURL\n \n\n def showTrailer(self):\n webbrowser.open(self.trailerURL)\n \n\nclass Movie(Video):\n \"\"\" This class provides a way to store movie related info by extending the Video class\"\"\"\n \n def __init__(self, movie_title, movie_story_line, trailerURL,\n poster_image_url, duration):\n \n Video.__init__(self,\n movie_title,\n movie_story_line,\n trailerURL, poster_image_url)\n self.duration = duration\n\nclass Series(Video):\n \"\"\" This class provides a way to store series related info by extending the Video class\"\"\"\n\n def __init__(self, movie_title,\n movie_story_line,\n trailerURL, poster_image_url,\n total_seasons, ongoing):\n \n Video.__init__(self, movie_title,\n movie_story_line, trailerURL,\n poster_image_url)\n \n self.total_seasons = total_seasons\n self.ongoing = ongoing\n\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184864450","text":"from random import shuffle\n\nclass Quiz:\n \"\"\"\n A quiz consists of a bank of questions and a unique identifier.\n It is also associated with a specific professor.\n\n Name must be unique due for purposes of uniquely identifying\n specific quizzes.\n \"\"\"\n def __init__(self, prof, name, students, attempts,\n start, end):\n \n self.professor = prof\n self.name = name\n self.questions = []\n self.students = students\n self.attempts = attempts\n self.start = start\n self.end = end\n self.weight = 0\n self.bonusMarks = 0\n self.notes = []\n\n def __str__(self):\n s = self.name + \" \" + self.professor\n return s\n\n def __eq__(self, other):\n return (self.name == other.name and self.attempts == other.attempts \n and self.students == other.students)\n\nclass Question:\n \"\"\"\n A question consists of the text of the question, the correct answer(s),\n the answers collectively and notes indicating modifications made.\n\n Question text must be unique for purposes of uniquely identifying\n specific questions.\n \"\"\"\n def __init__(self, text, correct, options, weight=1):\n ans = correct + options\n shuffle(ans)\n self.text = text\n self.weight = weight\n self.correctAnswers = correct\n self.options = ans\n \n\n def __str__(self):\n s = self.text + ' '\n for ans in self.options:\n s += ' ' + ans\n return s\n\n def __eq__(self, other):\n return (self.text == other.text and self.weight == other.weight and \n self.correctAnswers == other.correctAnswers)\n\nclass Answers:\n \"\"\"\n Provides object with attributes containing information for quiz and answers\n attributes:\n ansAttempts - list of lists\n attemptSubmitted - list\n stuID - string\n profID - string\n quizID - string\n quiz - Quiz object\n currentAttempt - int\n \"\"\"\n def __init__(self, stuID, profID, quizID):\n self.ansAttempts = []\n self.attemptSubmitted = []\n self.stuID = stuID\n self.profID = profID\n self.quizID = quizID\n self.currentAttempt = None\n self.bestAttempt = []","sub_path":"cawadden/Final Project/quiz_tools.py","file_name":"quiz_tools.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"2847650","text":"#!c:/Python34/python.exe\n\n#######################################################################\n# edge enhance the given image\n#\n# Author: Garry Morrison\n# email: garry -at- semantic-db.org\n# Date: 2016-03-05\n# Update: \n# Copyright: GPLv3\n#\n# Usage: ./image_edge_enhance_v2.py image.{png,jpg} [enhance-factor]\n#\n# eg: ./image_edge_enhance_v2.py Lenna.png\n# eg: ./image_edge_enhance_v2.py Lenna.png 40\n#\n#######################################################################\n\n\nimport sys\nfrom PIL import Image # if this line bugs out, you need to install Pillow, a python image library.\n\nif len(sys.argv) < 2:\n print(\"\\nUsage:\")\n print(\" ./image_edge_enhance_v2.py image.{png,jpg} [enhance-factor]\")\n sys.exit(1)\nfilename = sys.argv[1]\n\ntry:\n enhance_factor = int(sys.argv[2])\nexcept:\n enhance_factor = 20 # set default to 20 iterations of smooth\n\ntry:\n im = Image.open(filename)\nexcept:\n print(\"couldn't open image file:\",filename)\n sys.exit(1)\n\n# implements a Gaussian smooth.\n# the 1D version: f[k] -> f[k-1]/4 + f[k]/2 + f[k+1]/4 rapidly approaches a bell curve if you apply it several times.\n# image_smooth() implements a 2D version of that equation.\n#\ndef image_smooth(image):\n def smooth_pixel(image,w,h):\n pix = image.load()\n r = pix[w-1,h-1][0]/16 + pix[w,h-1][0]/16 + pix[w+1,h-1][0]/16 + pix[w-1,h][0]/16 + pix[w,h][0]/2 + pix[w+1,h][0]/16 + pix[w-1,h+1][0]/16 + pix[w,h+1][0]/16 + pix[w+1,h+1][0]/16\n g = pix[w-1,h-1][1]/16 + pix[w,h-1][1]/16 + pix[w+1,h-1][1]/16 + pix[w-1,h][1]/16 + pix[w,h][1]/2 + pix[w+1,h][1]/16 + pix[w-1,h+1][1]/16 + pix[w,h+1][1]/16 + pix[w+1,h+1][1]/16\n b = pix[w-1,h-1][2]/16 + pix[w,h-1][2]/16 + pix[w+1,h-1][2]/16 + pix[w-1,h][2]/16 + pix[w,h][2]/2 + pix[w+1,h][2]/16 + pix[w-1,h+1][2]/16 + pix[w,h+1][2]/16 + pix[w+1,h+1][2]/16\n return (int(r),int(g),int(b))\n\n width = image.size[0]\n height = image.size[1]\n im2 = image.crop((-1,-1,width + 1,height + 1))\n# im2.show()\n\n out_image = Image.new('RGB',(width,height))\n pixels = out_image.load()\n for h in range(height):\n for w in range(width):\n pixels[w,h] = smooth_pixel(im2,w+1,h+1)\n# out_image.show()\n return out_image\n\n\ndef old_edge_enhance(image,k):\n def pixel_difference(im1,im2,w,h):\n def massage_pixel(x):\n if x < 0:\n x = 0\n x *= 20\n x = int(x)\n if x > 255:\n x = 255\n return 255 - x\n\n pix1 = im1.load()\n pix2 = im2.load()\n r = pix1[w,h][0] - pix2[w,h][0]\n g = pix1[w,h][1] - pix2[w,h][1]\n b = pix1[w,h][2] - pix2[w,h][2]\n\n r = massage_pixel(r)\n g = massage_pixel(g)\n b = massage_pixel(b)\n \n return (r,g,b)\n\n smoothed_image = image\n for _ in range(k):\n smoothed_image = image_smooth(smoothed_image)\n smoothed_image.show()\n\n width = image.size[0]\n height = image.size[1]\n\n out_image = Image.new('RGB',(width,height))\n pixels = out_image.load()\n for h in range(height):\n for w in range(width):\n pixels[w,h] = pixel_difference(smoothed_image,image,w,h)\n return out_image\n\n\n# the old_edge_enhance() had a subtle bug.\n# because image_smooth() was returning an integer based image at each iteration, small features ended up being lost!\n# So I had to completely re-implement the thing, but this time allowing floats at each iteration of smooth.\n# This had a massive improvement in quality.\n# The old way also seemed to converge, in that if you applied k above some threshold, any larger k didn't seem to make much difference.\n# Now, larger k has a noticable improvement.\n# Very happy with the results this thing spits out!!\n#\ndef edge_enhance(image,k):\n width = image.size[0]\n height = image.size[1]\n original_pixels = image.load()\n\n # create an image with a 1*1 border:\n border_image = image.crop((-1,-1,width + 1,height + 1))\n border_pixels = border_image.load()\n\n # load the border_image into 3 image matrices, one for each of R,G,B:\n M_r = [[border_pixels[w,h][0] for w in range(width+2)] for h in range(height+2)]\n M_g = [[border_pixels[w,h][1] for w in range(width+2)] for h in range(height+2)]\n M_b = [[border_pixels[w,h][2] for w in range(width+2)] for h in range(height+2)]\n\n def smooth_pixel(M,w,h):\n r = M[h-1][w-1]/16 + M[h][w-1]/16 + M[h+1][w-1]/16 + M[h-1][w]/16 + M[h][w]/2 + M[h+1][w]/16 + M[h-1][w+1]/16 + M[h][w+1]/16 + M[h+1][w+1]/16\n return r\n\n # smooth our image matrices:\n # NB: we have to work with matrices and not images because we need to preserve floats at each step of smooth. Otherwise it harms the algo.\n # first, define some work-space matrices:\n new_M_r = [[0 for w in range(width+2)] for h in range(height+2)]\n new_M_g = [[0 for w in range(width+2)] for h in range(height+2)]\n new_M_b = [[0 for w in range(width+2)] for h in range(height+2)]\n\n # smooth k times:\n for _ in range(k):\n for h in range(height):\n for w in range(width):\n new_M_r[h+1][w+1] = smooth_pixel(M_r,w+1,h+1)\n new_M_g[h+1][w+1] = smooth_pixel(M_g,w+1,h+1)\n new_M_b[h+1][w+1] = smooth_pixel(M_b,w+1,h+1)\n M_r = new_M_r\n M_g = new_M_g\n M_b = new_M_b\n\n def massage_pixel(x):\n if x < 0:\n x = 0\n x *= 20\n x = int(x)\n if x > 255:\n x = 255\n return 255 - x\n\n # output the final matrix into image form:\n out_image = Image.new('RGB',(width,height))\n pixels = out_image.load()\n for h in range(height):\n for w in range(width):\n r = massage_pixel(M_r[h+1][w+1] - original_pixels[w,h][0])\n g = massage_pixel(M_g[h+1][w+1] - original_pixels[w,h][1])\n b = massage_pixel(M_b[h+1][w+1] - original_pixels[w,h][2])\n\n pixels[w,h] = (r,g,b)\n# out_image.show()\n return out_image\n\n\nim2 = edge_enhance(im,enhance_factor)\nim2.show()\n\n# now save it:\nfilename, ext = filename.rsplit('.',1)\nfilename = filename + \"--edge-enhanced-\" + str(enhance_factor) + \".\" + ext\nim2.save(filename)\n\n","sub_path":"tools/image_edge_enhance_v2.py","file_name":"image_edge_enhance_v2.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255270185","text":"# -*- coding: utf-8 -*-\n\n\nimport os\nfrom oujago.utils.common import is_list\nfrom oujago.utils import DATA_PATH\n\n\nclass ChineseStopWords(object):\n \"\"\"Chinese Stop Words.\n\n You can specify what kind of stop words you will use.\n\n Parameters\n ==========\n modes : str\n If ``all`` or ``ALL``, integrate all stopwords in ``_files/stopwords_zh`` directory.\n If ``hit`` or ``HIT``, use \"hit_stopwords.txt\".\n If ``baidu`` or ``Baidu``, use \"baidu_stopwords.txt\".\n If ``normal``, use \"normal_stopwords.txt\".\n\n \"\"\"\n\n def __init__(self, modes='all'):\n _base_path = os.path.join(DATA_PATH, 'stopwords_zh')\n self._stopwords = set()\n self._path_dict = {'all': os.listdir(_base_path),\n 'hit': 'hit_stopwords.txt',\n 'HIT': 'hit_stopwords.txt',\n 'baidu': \"baidu_stopwords.txt\",\n 'Baidu': \"baidu_stopwords.txt\",\n 'normal': \"normal_stopwords.txt\"}\n\n if not is_list(modes):\n modes = (modes,)\n\n for mode in modes:\n if mode not in self._path_dict:\n raise ValueError(\"Unknown mode: {}. Please specify mode \"\n \"using following types: {}.\".format(mode, list(self._path_dict.keys())))\n\n paths = self._path_dict[mode]\n if not is_list(paths):\n paths = (paths,)\n\n for path in paths:\n with open(os.path.join(_base_path, path), encoding='gbk') as fin:\n stopwords = [word.strip() for word in fin.readlines()]\n self._stopwords |= set(stopwords)\n\n def check(self, word):\n \"\"\"Check whether ``word`` is a stop word.\n\n Parameters\n ----------\n word : str\n\n Returns\n -------\n boolean\n True or False\n \"\"\"\n if word in self._stopwords:\n return True\n else:\n return False\n\n\n","sub_path":"oujago/nlp/stopwords.py","file_name":"stopwords.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450611001","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nimport xgboost as xgb\nfrom sklearn.linear_model import LogisticRegression\nfrom mlxtend.classifier import StackingClassifier\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\n\n\nclass Danny_ML_CLF:\n def __init__(self):\n self.X = ''\n self.y = ''\n\n self.svm = ''\n self.tree = ''\n self.bayes = ''\n self.knn = ''\n self.xgb = ''\n self.stacking = ''\n self.voting = ''\n self.bagging = ''\n self.rf = '' # random forest\n self.adaboost = ''\n\n self.svm_pred = ''\n self.tree_pred = ''\n self.bayes_pred = ''\n self.knn_pred = ''\n self.xgb_pred = ''\n self.stacking_pred = ''\n self.voting_pred = ''\n self.bagging_pred = ''\n self.rf_pred = ''\n self.adaboost_pred = ''\n\n self.svm_report = ''\n self.tree_report = ''\n self.bayes_report = ''\n self.knn_report = ''\n self.xgb_report = ''\n self.stacking_report = ''\n self.voting_report = ''\n self.bagging_report = ''\n self.rf_report = ''\n self.adaboost_report = ''\n\n self.svm_cm = ''\n self.tree_cm = ''\n self.bayes_cm = ''\n self.knn_cm = ''\n self.xgb_cm = ''\n self.stacking_cm = ''\n self.voting_cm = ''\n self.bagging_cm = ''\n self.rf_cm = ''\n self.adaboost_cm = ''\n\n self.svm_score = ''\n self.tree_score = ''\n self.bayes_score = ''\n self.knn_score = ''\n self.xgb_score = ''\n self.stacking_score = ''\n self.voting_score = ''\n self.bagging_score = ''\n self.rf_score = ''\n self.adaboost_score = ''\n\n def Fit_value(self, x, y):\n self.X = x\n self.y = y\n\n def Split_data(self,raw_X, raw_y, test_size, Standard=True):\n train_X, test_X, train_y, test_y = train_test_split(raw_X, raw_y, test_size=test_size, shuffle=True)\n if Standard:\n sc = StandardScaler()\n sc.fit(train_X)\n train_X = sc.transform(train_X)\n test_X = sc.transform(test_X)\n self.X = train_X\n self.y = train_y\n return train_X, test_X, train_y, test_y\n\n def SVM(self,C=1,kernel='rbf'):\n self.svm = SVC(C=C,kernel=kernel, degree=5, probability=True)\n self.svm.fit(self.X, self.y)\n def SVM_predict(self,pred_x):\n self.svm_pred = self.svm.predict(pred_x)\n return self.svm_pred\n\n def Tree(self,criterion='gini', max_depth=5):\n self.tree = DecisionTreeClassifier(criterion=criterion,max_depth=max_depth)\n self.tree.fit(self.X, self.y)\n def Tree_predict(self, pred_x):\n self.tree_pred = self.tree.predict(pred_x)\n return self.tree_pred\n\n def Bayes(self):\n self.bayes = GaussianNB()\n self.bayes.fit(self.X, self.y)\n def Bayes_predict(self, pred_x):\n self.bayes_pred = self.bayes.predict(pred_x)\n return self.bayes_pred\n\n def KNN(self, n_neighbors=3, weights='distance'):\n self.knn = KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights)\n self.knn.fit(self.X, self.y)\n def KNN_predict(self, pred_x):\n self.knn_pred = self.knn.predict(pred_x)\n return self.knn_pred\n\n def XGB(self):\n self.xgb = xgb.XGBClassifier()\n self.xgb.fit(self.X, self.y)\n def XGB_prediction(self, pred_x):\n self.xgb_pred = self.xgb.predict(pred_x)\n return self.xgb_pred\n\n def Stacking(self):\n meta_clf = LogisticRegression()\n self.stacking = StackingClassifier(classifiers=[self.svm,\n self.tree,\n self.bayes,\n self.knn,\n self.xgb], meta_classifier=meta_clf)\n self.stacking.fit(self.X, self.y)\n def Stacking_prediction(self, pred_x):\n self.stacking_pred = self.stacking.predict(pred_x)\n return self.stacking_pred\n\n def Voting(self):\n self.voting = VotingClassifier(estimators=[('svm',self.svm),\n ('tree',self.tree), ('bayes',self.bayes),\n ('knn',self.knn), ('xgb',self.xgb)],\n voting='soft', weights=[1,1,1,1,1])\n self.voting.fit(self.X, self.y)\n def Voting_prediction(self, pred_x):\n self.voting_pred = self.voting.predict(pred_x)\n return self.voting_pred\n\n def Bagging(self,n_estimators=100, oob_score=False):\n self.bagging = BaggingClassifier(n_estimators=n_estimators,oob_score=oob_score)\n self.bagging.fit(self.X, self.y)\n def Bagging_prediction(self, pred_x):\n self.bagging_pred = self.bagging.predict(pred_x)\n return self.bagging_pred\n\n def RF(self,n_estimators=200, criterion='gini', max_features='auto', oob_score=False):\n self.rf = RandomForestClassifier(n_estimators=n_estimators,criterion=criterion,\n max_features=max_features, oob_score=oob_score)\n self.rf.fit(self.X, self.y)\n\n def RF_prediction(self, pred_x):\n self.rf_pred = self.rf.predict(pred_x)\n return self.rf_pred\n\n def Adaboost(self, n_estimators=100):\n self.adaboost = AdaBoostClassifier(n_estimators=n_estimators)\n self.adaboost.fit(self.X, self.y)\n def Adaboost_prediction(self, pred_x):\n self.adaboost_pred = self.adaboost.predict(pred_x)\n return self.adaboost_pred\n\n def Train(self):\n self.SVM()\n self.Tree()\n self.Bayes()\n self.KNN()\n self.XGB()\n self.Stacking()\n self.Voting()\n self.Bagging()\n self.RF()\n self.Adaboost()\n\n def Report(self, test_X, test_y, labels, show_cm=True):\n self.SVM_predict(test_X)\n self.Tree_predict(test_X)\n self.Bayes_predict(test_X)\n self.KNN_predict(test_X)\n self.XGB_prediction(test_X)\n self.Stacking_prediction(test_X)\n self.Voting_prediction(test_X)\n self.Bagging_prediction(test_X)\n self.RF_prediction(test_X)\n self.Adaboost_prediction(test_X)\n\n self.svm_score = self.svm.score(test_X, test_y)\n self.tree_score = self.tree.score(test_X, test_y)\n self.bayes_score = self.bayes.score(test_X, test_y)\n self.knn_score = self.knn.score(test_X, test_y)\n self.xgb_score = self.xgb.score(test_X, test_y)\n self.stacking_score = self.stacking.score(test_X, test_y)\n self.voting_score = self.voting.score(test_X, test_y)\n self.bagging_score = self.bagging.score(test_X, test_y)\n self.rf_score = self.rf.score(test_X, test_y)\n self.adaboost_score = self.adaboost.score(test_X, test_y)\n\n\n self.svm_report = metrics.classification_report(test_y, self.svm_pred)\n self.tree_report = metrics.classification_report(test_y, self.tree_pred)\n self.bayes_report = metrics.classification_report(test_y, self.bayes_pred)\n self.knn_report = metrics.classification_report(test_y, self.knn_pred)\n self.xgb_report = metrics.classification_report(test_y, self.xgb_pred)\n self.voting_report = metrics.classification_report(test_y, self.voting_pred)\n self.stacking_report = metrics.classification_report(test_y, self.stacking_pred)\n self.bagging_report = metrics.classification_report(test_y, self.bagging_pred)\n self.rf_report = metrics.classification_report(test_y, self.rf_pred)\n self.adaboost_report = metrics.classification_report(test_y, self.adaboost_pred)\n\n self.svm_cm = metrics.confusion_matrix(test_y, self.svm_pred,labels=labels)\n self.tree_cm = metrics.confusion_matrix(test_y, self.tree_pred,labels=labels)\n self.bayes_cm = metrics.confusion_matrix(test_y, self.bayes_pred,labels=labels)\n self.knn_cm = metrics.confusion_matrix(test_y, self.knn_pred,labels=labels)\n self.xgb_cm = metrics.confusion_matrix(test_y, self.xgb_pred, labels=labels)\n self.stacking_cm = metrics.confusion_matrix(test_y, self.stacking_pred, labels=labels)\n self.voting_cm = metrics.confusion_matrix(test_y, self.voting_pred, labels=labels)\n self.bagging_cm = metrics.confusion_matrix(test_y, self.bagging_pred, labels=labels)\n self.rf_cm = metrics.confusion_matrix(test_y, self.rf_pred, labels=labels)\n self.adaboost_cm = metrics.confusion_matrix(test_y, self.adaboost_pred, labels=labels)\n\n if show_cm:\n self.plot_confusion_matrix(self.svm_cm, classes=labels, title='SVM')\n self.plot_confusion_matrix(self.tree_cm, classes=labels, title='Tree')\n self.plot_confusion_matrix(self.bayes_cm, classes=labels, title='Bayes')\n self.plot_confusion_matrix(self.knn_cm, classes=labels, title='KNN')\n self.plot_confusion_matrix(self.xgb_cm, classes=labels, title='XGB')\n self.plot_confusion_matrix(self.stacking_cm, classes=labels, title='Stacking')\n self.plot_confusion_matrix(self.voting_cm, classes=labels, title='Voting')\n self.plot_confusion_matrix(self.bagging_cm, classes=labels, title='Bagging')\n self.plot_confusion_matrix(self.rf_cm, classes=labels, title='RF')\n self.plot_confusion_matrix(self.adaboost_cm, classes=labels, title='Adaboost')\n\n def History(self):\n print('******************\\nSVM : ',self.svm_report)\n print('******************\\nTree : ',self.tree_report)\n print('******************\\nBayes : ',self.bayes_report)\n print('******************\\nKNN : ',self.knn_report)\n print('******************\\nXGB : ', self.xgb_report)\n print('******************\\nStacking : ', self.stacking_report)\n print('******************\\nVoting : ', self.voting_report)\n print('******************\\nBagging : ', self.bagging_report)\n print('******************\\nRF : ', self.rf_report)\n print('******************\\nAdaboost : ', self.adaboost_report)\n\n def Score(self):\n print('SVM Score : ', self.svm_score)\n print('Tree Score : ', self.tree_score)\n print('Bayes Score : ', self.bayes_score)\n print('KNN Score : ', self.knn_score)\n print('XGB Score : ', self.xgb_score)\n print('Stacking Score : ', self.stacking_score)\n print('Voting Score : ', self.voting_score)\n print('Bagging Score : ', self.bagging_score)\n print('RF Score : ', self.rf_score)\n print('Adaboost Score : ', self.adaboost_score)\n\n def Report2txt(self, filename):\n f = open(filename, 'w')\n f.write('SVM Score : '+ str(self.svm_score) + '\\n')\n f.write('Tree Score : '+ str(self.tree_score) +'\\n')\n f.write('Bayes Score : '+ str(self.bayes_score) + '\\n')\n f.write('KNN Score : '+ str(self.knn_score) + '\\n')\n f.write('XGB Score : '+ str(self.xgb_score) + '\\n')\n f.write('Stacking Score : '+ str(self.stacking_score) + '\\n')\n f.write('Voting Score : '+ str(self.voting_score) + '\\n')\n f.write('Bagging Score : '+ str(self.bagging_score) + '\\n')\n f.write('RF Score : '+ str(self.rf_score) + '\\n')\n f.write('Adaboost Score : '+ str(self.adaboost_score) + '\\n')\n f.write('Adaboost Score : '+ str(self.adaboost_score) + '\\n')\n f.write('XXXX\\n')\n f.write('******************\\nSVM : '+ str(self.svm_report) + '\\n')\n f.write('******************\\nTree : '+ str(self.tree_report) + '\\n')\n f.write('******************\\nBayes : '+ str(self.bayes_report) + '\\n')\n f.write('******************\\nKNN : '+ str(self.knn_report) + '\\n')\n f.write('******************\\nXGB : '+ str(self.xgb_report) + '\\n')\n f.write('******************\\nStacking : '+ str(self.stacking_report) + '\\n')\n f.write('******************\\nVoting : '+ str(self.voting_report) + '\\n')\n f.write('******************\\nBagging : '+ str(self.bagging_report) + '\\n')\n f.write('******************\\nRF : '+ str(self.rf_report) + '\\n')\n f.write('******************\\nAdaboost : '+ str(self.adaboost_report) + '\\n')\n f.close()\n\n def plot_confusion_matrix(self,cm, classes,normalize=False,title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(title, ' Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n # Source code from: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\n\n\n","sub_path":"Danny_ML_CLF.py","file_name":"Danny_ML_CLF.py","file_ext":"py","file_size_in_byte":14083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400733480","text":"from bayesian_model.data_processor import ProcessData\nfrom bayesian_model.bayesian_regression import Prediction\nfrom bayesian_model.performance_evaluation import Evaluation\nfrom bayesian_model.index import Calculate_index\nimport numpy as np\n\n\nclass Test:\n def __init__(self, input_data):\n self.input_data = input_data\n\n def run_model(self, n, n_cluster, n_effective, step, threshold):\n data = self.input_data\n data = data.values.reshape(1, -1)\n index = round(len(data[0]) / 3)\n p1 = data[0, 0: index]\n p2 = data[0, index: 2 * index]\n p_eval = data[0, 2 * index:]\n\n data_pro = ProcessData(p1, n, n_cluster, n_effective)\n effective = data_pro.select_effective_clusters()\n\n test_model = Prediction(effective, p2, n, p_eval)\n p = test_model.predict_delta_p()\n\n bench = np.random.randn(100, 1)\n hold = np.random.randn(1, 100)\n\n eval_result = Evaluation(p_eval, max(n), p, step, threshold, bench, hold, 100, True, 5000, 5000, 4)\n returns = eval_result.periodic_return()[0]\n market = eval_result.periodic_return()[1]\n temp = Calculate_index(returns, market, 0.05, 0.04, 1, 500, 4)\n sharpe = temp.sharpe_ratio()\n return sharpe, eval_result.visual_account(threshold)[0], eval_result.visual_account(threshold)[2]\n","sub_path":"bayesian_model/tuning_test.py","file_name":"tuning_test.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62910411","text":"\"\"\"\nthe purpose of this script is to use standarnd off the shelf HoG and KNN from scikit and apply to the ORL dataset\n\"\"\"\nfrom skimage.feature import hog\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OutputCodeClassifier\nfrom sklearn.svm import LinearSVC\nimport time\nfrom orl_face_dataset_examples.read_pgm_file import fetch_sw_orl\n\nPPC = 20\n\ncontrol = [False, False, True]\n\n# grab the data (is contained in Bunch object)\nb = fetch_sw_orl()\ntic = time.time()\nif control[0]:\n # hog() returns feature vector, and hog_image if visualize=True\n # apply this to the first image of b.data\n fd, hog_image = hog(b.data[0].reshape(b.shape), orientations=8, pixels_per_cell=(PPC, PPC),\n cells_per_block=(1, 1), visualize=True, multichannel=False)\n\n print(f'Original image size is {b.shape}')\n print(f'HoG size is {hog_image.size}')\n print(f'HoG features size is {fd.size}')\n fig = plt.figure()\n fig.add_subplot(1,2, 1)\n plt.imshow(b.data[0].reshape(b.shape), cmap='gray')\n fig.add_subplot(1, 2, 2)\n plt.imshow(hog_image, cmap='gray')\n plt.show()\n\n# split the data in\nX_train, X_test, y_train, y_true = train_test_split(b.data, b.target, test_size=0.2, stratify=b.target)\n\n\n\n# apply HoG to all the images in b.data\nhog_train = []\nfor img_array in X_train:\n img = img_array.reshape(b.shape)\n fd, _ = hog(img, orientations=8, pixels_per_cell=(PPC , PPC ), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_train.append(fd)\n\n\nclf = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=42)\nclf.fit(hog_train, y_train)\ntok = time.time()\n\nif control[1]:\n # create the hog fro the X_test\n hog_test = []\n for img_arry in X_test:\n fd, _ = hog(img_arry.reshape(b.shape), orientations=8, pixels_per_cell=(PPC , PPC ), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_test.append(fd)\n y_pred = clf.predict(hog_test)\n\n print(f'the number of correct example is {accuracy_score(y_true, y_pred, normalize=False)}, with accuracy score of {accuracy_score(y_true, y_pred)}')\n print(classification_report(y_true, y_pred, zero_division=0.0))\n print(f'time to train : {tok - tic:.5}')\n\n\ndef run_test(**kwargs):\n b = fetch_sw_orl()\n tic = time.time()\n\n # split the data in\n X_train, X_test, y_train, y_true = train_test_split(b.data, b.target, test_size=0.2, stratify=b.target)\n\n hog_train = []\n for img_array in X_train:\n fd, _ = hog(img_array.reshape(b.shape), orientations=8, pixels_per_cell=(PPC , PPC ), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_train.append(fd)\n\n clf = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2)\n clf.fit(hog_train, y_train)\n tok = time.time()\n\n hog_test = []\n for img_arry in X_test:\n fd, _ = hog(img_arry.reshape(b.shape), orientations=8, pixels_per_cell=(PPC, PPC), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_test.append(fd)\n y_pred = clf.predict(hog_test)\n return tok - tic, accuracy_score(y_true, y_pred)\n\nif control[2]:\n\n results = []\n times = []\n\n for i in range(10):\n print(i)\n r, t = run_test()\n results.append(r)\n times.append(t)\n\n","sub_path":"orl_face_dataset_examples/test_HoG_with_KNN.py","file_name":"test_HoG_with_KNN.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"549316252","text":"\nimport json\nimport urllib.parse\nimport boto3\nfrom elasticsearch import Elasticsearch\nimport requests\nfrom datetime import datetime\nfrom s3logparse import s3logparse\nimport os\nimport sys\nfrom tempfile import NamedTemporaryFile\nimport traceback\nimport logging\nfrom aws_xray_sdk.core import xray_recorder\nfrom aws_xray_sdk.core import patch_all\npatch_all()\n\n######################################################################\n# Notes:\n######################################################################\n# https://docs.aws.amazon.com/code-samples/latest/catalog/python-s3-get_object.py.html\n# https://forums.aws.amazon.com/thread.jspa?threadID=221549\n# https://stackoverflow.com/questions/32000934/python-print-a-variables-name-and-value\n# https://pypi.org/project/s3-log-parse/\n# https://www.geeksforgeeks.org/python-dictionary/\n# https://stackoverflow.com/questions/44381249/treat-a-string-as-a-file-in-python\n# https://github.com/elastic/elasticsearch-py\n# https://docs.aws.amazon.com/lambda/latest/dg/running-lambda-code.html\n# https://www.geeksforgeeks.org/python-interconversion-between-dictionary-and-bytes/\n# https://stackoverflow.com/questions/2266646/how-to-disable-and-re-enable-console-logging-in-python/2267567#2267567\n\n\n######################################################################\n# Initialize boto3 client at global scope for connection reuse\n######################################################################\nprint('Loading function')\nclient = boto3.client('ssm')\ns3 = boto3.client('s3')\n\n\ndef lambda_handler(event, context):\n ######################################################################\n # Create and Configure Python logging \n ######################################################################\n enable_logging = os.getenv('enable_logging')\n if enable_logging == 'True':\n enable_logging = True\n logging.Logger.disabled = False\n else: \n enable_logging = False\n logging.Logger.disabled = True\n\n # log = logging.getLogger(\"accesslogstoelasticcloud\")\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n # log.addHandler(handler)\n log.debug(\"Received event: \" + json.dumps(event, indent=2))\n # print(\"Received event: \" + json.dumps(event, indent=2))\n\n ######################################################################\n # Get all parameters containing credentials for this app\n # If not -> user credentials from environment variables\n ######################################################################\n parent_stack_name = os.getenv('parent_stack_name')\n try:\n param_name = '/' + parent_stack_name + '/cloud_id'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n cloud_id = parameter.get('Value')\n log.info('cloud_id=' + cloud_id)\n\n param_name = '/' + parent_stack_name + '/http_auth_username'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n http_auth_username = parameter.get('Value')\n log.info('http_auth_username=' + http_auth_username)\n \n param_name = '/' + parent_stack_name + '/http_auth_password'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n http_auth_password = parameter.get('Value')\n log.info('http_auth_password=' + http_auth_password)\n\n param_name = '/' + parent_stack_name + '/index_name'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n index_name = parameter.get('Value')\n log.info('index_name=' + index_name)\n\n except:\n log.debug(\"Encountered an error loading credentials from SSM.\")\n traceback.print_exc()\n cloud_id = os.getenv('cloud_id')\n http_auth_username = os.getenv('http_auth_username')\n http_auth_password = os.getenv('http_auth_password')\n index_name = os.getenv('index_name')\n \n\n ######################################################################\n # Get the object from the event and show its content type\n ######################################################################\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n try:\n response = s3.get_object(Bucket=bucket, Key=key)\n log.info(\"CONTENT TYPE: \" + response['ContentType'])\n except Exception as e:\n log.debug('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n log.debug(e)\n # print(e)\n # print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n raise e\n \n StreamingBody=response['Body']\n access_log=StreamingBody.read()\n\n ######################################################################\n # Example Access Log:\n ######################################################################\n # access_log='2279185f7619a617e0a834c7f0660e4b09ea7f842f9d768d39109ee6e4cdf522 bucket [20/Dec/2019:06:36:32 +0000] 174.65.125.92 arn:aws:sts::696965430234:assumed-role/AWSReservedSSO_AdministratorAccess_563d3ebb7af9cd35/dev@company.com 6ED2206C36ABCD61 REST.GET.ACL object.mov \"GET /bucket/object.mov?acl= HTTP/1.1\" 200 - 550 - 277 - \"-\" \"S3Console/0.4, aws-internal/3 aws-sdk-java/1.11.666 Linux/4.9.184-0.1.ac.235.83.329.metal1.x86_64 OpenJDK_64-Bit_Server_VM/25.232-b09 java/1.8.0_232 vendor/Oracle_Corporation\" - eGkU7fkbpX9QOfaV1GDHSXQ9zVEokrE0KgIhdVMr63PbSCxWwZoEtr5GDbaDGr1/LFf9lTpiJ3U= SigV4 ECDHE-RSA-AES128-SHA AuthHeader s3-us-west-2.amazonaws.com TLSv1.2\\n'\n log.info(f\"access_log={access_log}\\n\")\n\n f = NamedTemporaryFile(mode='w+', delete=False)\n f.write(str(access_log))\n f.close()\n # with open(f.name, \"r\") as new_f:\n # print(new_f.read())\n\n with open(f.name, \"r\") as fh:\n for log_entry in s3logparse.parse_log_lines(fh.readlines()):\n log.info(log_entry)\n\n os.unlink(f.name) # delete the file after usage\n\n ######################################################################\n # Start the X-Ray sub-segment\n ######################################################################\n subsegment = xray_recorder.begin_subsegment('accesslogstoelasticcloud - send data to ElasticCloud')\n subsegment.put_annotation('function', 'accesslogstoelasticcloud')\n xray_recorder.put_metadata(\"access_log\", access_log)\n\n ##################################################################################################\n #Now put that data in ElasticCloud! \n ##################################################################################################\n es = Elasticsearch(cloud_id=cloud_id, http_auth=(http_auth_username, http_auth_password))\n es.info()\n\n # create an index in elasticsearch, ignore status code 400 (index already exists)\n # es.indices.create(index='accesslogstoelasticcloud', ignore=400)\n es.indices.create(index=index_name, ignore=400)\n # {'acknowledged': True, 'shards_acknowledged': True, 'index': 'my-index'}\n # datetimes will be serialized\n # es.index(index=\"my-index\", id=44, body={\"any\": \"data44\", \"timestamp\": datetime.now()})\n \n \n es_body={\n \"bucket_owner\": log_entry.bucket_owner,\n \"bucket\": log_entry.bucket,\n \"timestamp\": log_entry.timestamp,\n \"remote_ip\": log_entry.remote_ip,\n \"requester\": log_entry.requester,\n \"request_id\": log_entry.request_id,\n \"operation\": log_entry.operation,\n \"s3_key\": log_entry.s3_key,\n \"request_uri\": log_entry.request_uri,\n \"status_code\": log_entry.status_code,\n \"error_code\": log_entry.error_code,\n \"bytes_sent\": log_entry.bytes_sent,\n \"object_size\": log_entry.object_size,\n \"total_time\": log_entry.total_time,\n \"turn_around_time\": log_entry.turn_around_time,\n \"referrer\": log_entry.referrer,\n \"user_agent\": log_entry.user_agent,\n \"version_id\": log_entry.version_id\n }\n\n es.index(index=index_name, body=es_body)\n\n ######################################################################\n # End the X-Ray sub-segment\n ######################################################################\n xray_recorder.end_subsegment()\n\n","sub_path":"sam-app/accesslogstoelasticcloud/accesslogstoelasticcloud.py","file_name":"accesslogstoelasticcloud.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"571207482","text":"import math # This will import math module\nimport glob\n\n# max ' ' = 32, max = '~'\ndef unigram(text):\n list = [0] * 95\n for i, c in enumerate(text):\n index = ord(c)-32\n if index > 94 or index < 0:\n continue\n list[index] = list[index]+1\n return list\n\n\ndef countCharacters(text):\n count = 0\n for i, c in enumerate(text):\n index = ord(c)-32\n if index > 94 or index < 0: # Count only characters that we use\n continue\n count = count+1\n return count\n\n\ndef countWords(text):\n return len(text.split()) # split whitespaces & get their length\n\n\ndef countSentences(text):\n return len(text.split('.')) # split dots & get their length\n\n\ndef normalize(vector):\n sum = 0\n res = [0.0] * len(vector)\n for c in vector:\n sum += c*c\n dist = math.sqrt(sum)\n for i,c in enumerate(vector):\n res[i] = c/dist\n return res\n\n\nncu = open('./results_ncu.txt', 'w')\nrawcu = open('./results_rawcu.txt', 'w')\n\n\navg = [0, 0, 0] # [#Character, #Word, #Sentece]\nfor x in range(1000, 1025):\n for y in range(1, 5):\n file = open(\"./CASIS/%d_%d.txt\" % (x, y), \"r\")\n raw = file.read() # Read contents of the file\n file.close() # We don't need that file anymore.\n rawResult = unigram(raw)\n # Counters\n avg[0] += countCharacters(raw)\n avg[1] += countWords(raw)\n avg[2] += countSentences(raw)\n\n rawString = ','.join(str(v) for v in rawResult)\n normalizedResult = normalize(rawResult)\n normalizedString = ','.join(str(v) for v in normalizedResult)\n rawcu.write(\"%d_%d,%s\\n\" % (x, y, rawString))\n ncu.write(\"%d_%d,%s\\n\" % (x, y, normalizedString))\n\nncu.close()\nrawcu.close()\n\n\n# Statistics file\nstats = open('./casis25_stats.txt', 'w')\nstats.write('Character #%d\\nWord #%d\\nSentence #%d\\n' % (avg[0]/100, avg[1]/100, avg[2]/100))\nstats.close()\n\n\nsncu = open('./msst_ncu.txt', 'w')\nsrawcu = open('./msst_rawcu.txt', 'w')\n\navg = [0, 0, 0] # [#Character, #Word, #Sentece]\nsamples = glob.glob(\"./MSST/*.txt\") # Put every text file in the MSST folder into a list\nfor fileName in samples:\n baseName = fileName.split('\\\\')[-1].split('.')[0] # Delete folder names & extension\n file = open(fileName, \"r\", encoding='utf-8') # Force encoding to utf8\n raw = file.read() # Read contents of the file\n file.close() # We don't need that file anymore.\n # Counters\n avg[0] += countCharacters(raw)\n avg[1] += countWords(raw)\n avg[2] += countSentences(raw)\n rawResult = unigram(raw) # Generate unigram from text\n rawString = ','.join(str(v) for v in rawResult)\n normalizedResult = normalize(rawResult)\n normalizedString = ','.join(str(v) for v in normalizedResult)\n srawcu.write(\"%s,%s\\n\" % (baseName, rawString))\n sncu.write(\"%s,%s\\n\" % (baseName, normalizedString))\n\nsncu.close()\nsrawcu.close()\n\n# Statistics file\nstats = open('./msst_stats.txt', 'w')\nstats.write('Character #%d\\nWord #%d\\nSentence #%d\\n' % (avg[0]/len(samples), avg[1]/len(samples), avg[2]/len(samples)))\nstats.close()\n","sub_path":"1 Data Collection & Feature Extraction/Machine Learning/unigram.py","file_name":"unigram.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485072676","text":"import numpy as np\nimport tensorflow as tf\nfrom scipy.linalg import solve_discrete_are\nfrom scipy.linalg import sqrtm\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\nfrom Classifier import Classifier\nimport ipdb\n\nclass LQRlayer(Classifier):\n def __init__(self, n, m, T, dt, n_modes, iLQR_niter, lr, momentum, mass, A, B, temperature):\n super().__init__(n, m, T, dt, n_modes, temperature)\n # Constructing the cvxpy layer\n self.iLQR_niter = iLQR_niter\n self.lr = lr\n self.momentum = momentum\n self.n_objects = 3\n self.ss_len = n*self.n_objects\n\n self.massinv_tf = tf.Variable( tf.random.uniform((self.n_objects,n_modes), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.C1train_tf = tf.Variable( tf.random.uniform((self.n_objects,self.n_objects), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.C2train_tf = tf.Variable( tf.random.uniform((self.n_objects,self.n_objects), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.C3train_tf = tf.Variable( tf.random.uniform((self.n_objects,self.n_objects), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n\n self.Qtrain_tf = tf.Variable( tf.random.uniform((n,n_modes), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.xfs_train_tf = tf.Variable( tf.zeros((int(self.n/2),n_modes), dtype=tf.dtypes.float64) )\n self.Qftrain_tf = tf.Variable(tf.random.uniform((n,), minval=0, maxval=2, dtype=tf.dtypes.float64))\n self.Rtrain_tf = tf.Variable( tf.random.uniform((m,n_modes), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n\n #Expert values\n mass_g = 1\n mass_o1 = 1\n mass_o2 = 1\n\n _mass = np.array(([[1/mass_g, 1/(mass_g+mass_o1), 1/(mass_g+mass_o1+mass_o2)],\n [0, 1/(mass_g+mass_o1), 1/(mass_g+mass_o1+mass_o2)],\n [0, 0, 1/(mass_g+mass_o1+mass_o2)]]), dtype=np.float64)\n\n xfs = np.zeros((self.ss_len, n_modes))\n xfs_part = np.array([[-0.5, 0.5, 1], [-1,-1,0]])\n xfs[:2, :] = xfs_part\n xfs[4:6, :] = xfs_part\n xfs[8:10, :] = xfs_part\n\n Q = 1e-6*np.ones((self.ss_len, n_modes))\n Q[:n,:] = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float64)\n Qf = 1e-6*np.ones(self.ss_len)\n Qf[:n] = np.array([1, 1, 1, 1], dtype=np.float64)\n\n R = np.array([[1, 0.5, 0.5], [1, 0.5, 0.5]], dtype=np.float64)\n\n C1 = np.array([[mass_g/(mass_g+mass_o1), mass_o1/(mass_g+mass_o1), 0], \n [mass_g/(mass_g+mass_o1), mass_o1/(mass_g+mass_o1), 0],\n [0, 0, 1]], dtype=np.float64)\n C2 = np.array([[mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)], \n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)],\n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)]], dtype=np.float64)\n C3 = np.array([[mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)], \n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)],\n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)]], dtype=np.float64)\n\n temp_c = np.zeros((self.ss_len, self.ss_len))\n temp_c[:2,:2] = np.eye(int(n/2))\n temp_c[4:6,4:6] = np.eye(int(n/2))\n temp_c[8:10,8:10] = np.eye(int(n/2))\n\n temp_l1 = np.zeros((self.ss_len, self.n_objects))\n temp_l1[2,0], temp_l1[6,1], temp_l1[10,2] = 1, 1, 1\n temp_l2 = np.zeros((self.ss_len, self.n_objects))\n temp_l2[3,0], temp_l2[7,1], temp_l2[11,2] = 1, 1, 1\n temp_r1 = np.zeros((self.n_objects, self.ss_len))\n temp_r1[0, 2], temp_r1[1, 6], temp_r1[2, 10] = 1, 1, 1\n temp_r2 = np.zeros((self.n_objects, self.ss_len))\n temp_r2[0, 3], temp_r2[1, 7], temp_r2[2, 11] = 1, 1, 1\n\n with tf.device('gpu:0'):\n # Variables\n # self.C1train_tf = tf.Variable(C1)\n # self.C2train_tf = tf.Variable(C2)\n # self.C3train_tf = tf.Variable(C3)\n # self.massinv_tf = tf.Variable(_mass)\n # self.Qtrain_tf = tf.Variable(Q)\n # self.Qftrain_tf = tf.Variable(Qf)\n # # self.xfs_train_tf = tf.Variable(xfs)\n # self.Rtrain_tf = tf.Variable(R)\n\n # Constants\n self.A_tf = tf.constant(A)\n self.Bpart_tf = tf.constant(B)\n\n ## x-z case\n self.tempm_r1 = tf.constant(np.array( [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ), dtype=tf.dtypes.float64)\n self.tempm_r2 = tf.constant(np.array( [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]] ), dtype=tf.dtypes.float64)\n self.temp_c = tf.constant(temp_c, dtype=tf.dtypes.float64)\n self.temp_l1 = tf.constant(temp_l1, dtype=tf.dtypes.float64)\n self.temp_l2 = tf.constant(temp_l2, dtype=tf.dtypes.float64)\n self.temp_r1 = tf.constant(temp_r1, dtype=tf.dtypes.float64)\n self.temp_r2 = tf.constant(temp_r2, dtype=tf.dtypes.float64)\n self.tempm_l1 = tf.constant(temp_l1, dtype=tf.dtypes.float64)\n self.tempm_l2 = tf.constant(temp_l2, dtype=tf.dtypes.float64)\n self.xobj_init = tf.constant(xfs, dtype=tf.dtypes.float64)\n\n def LQR_tf_xz(self, x0, rho):\n # vel_f = tf.zeros((int(self.ss_len/2),self.n_modes), dtype=tf.dtypes.float64)\n\n # xfs_full = tf.concat( [ tf.concat( [self.xfs_train_tf[int(i*self.n/2):int((i+1)*self.n/2), :], vel_f[int(i*self.n/2):int((i+1)*self.n/2), :]], axis = 0 ) \n # for i in range(int(self.ss_len/self.n)) ], axis = 0) + self.xobj_init\n xfs_full = tf.concat([self.xfs_train_tf, tf.zeros((int(self.ss_len - self.n/2),self.n_modes), dtype=tf.dtypes.float64)], axis = 0) + self.xobj_init\n\n # Ptp1 = tf.identity(tf.linalg.diag(self.Qftrain_tf))\n Ptp1 = tf.identity(tf.linalg.diag( tf.concat([self.Qftrain_tf, tf.zeros((int(self.ss_len - self.n),),dtype=tf.dtypes.float64)], axis=0) ))\n K = []\n At_all = []\n Bt_all = []\n # Backward ricatti\n for t in range(self.T-1,0,-1):\n # Qt = tf.linalg.diag( tf.squeeze(self.Qtrain_tf@tf.expand_dims(rho[:,t], axis=1)) )\n Qt = tf.linalg.diag( tf.squeeze( (tf.concat([self.Qtrain_tf, 1e-6*tf.ones((int(self.ss_len - self.n),self.n_modes), dtype=tf.dtypes.float64)], axis = 0) )@tf.expand_dims(rho[:,t], axis=1)) )\n Rt = tf.linalg.diag( tf.reshape(self.Rtrain_tf@tf.expand_dims(rho[:,t], axis=1), (self.m,)) )\n\n Bt_all.append( (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,t], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,t], axis=1)@self.tempm_r2 )@self.Bpart_tf )\n Bt = tf.identity(Bt_all[self.T-1-t][:self.ss_len,:])\n if (tf.math.argmax(rho[:,t-1]) == 0 and tf.math.argmax(rho[:,t]) == 1) or (tf.math.argmax(rho[:,t-1]) == 1 and tf.math.argmax(rho[:,t]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C1train_tf@self.temp_r1 + self.temp_l2@self.C1train_tf@self.temp_r2)\n elif (tf.math.argmax(rho[:,t-1]) == 1 and tf.math.argmax(rho[:,t]) == 2) or (tf.math.argmax(rho[:,t-1]) == 2 and tf.math.argmax(rho[:,t]) == 1):\n C_full = (self.temp_c + self.temp_l1@self.C2train_tf@self.temp_r1 + self.temp_l2@self.C2train_tf@self.temp_r2)\n elif(tf.math.argmax(rho[:,t-1]) == 0 and tf.math.argmax(rho[:,t]) == 2) or (tf.math.argmax(rho[:,t-1]) == 2 and tf.math.argmax(rho[:,t]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C3train_tf@self.temp_r1 + self.temp_l2@self.C3train_tf@self.temp_r2)\n else:\n C_full = tf.eye(self.ss_len, dtype=tf.float64)\n At_all.append(self.A_tf@C_full)\n At = tf.identity(At_all[self.T-1-t][:self.ss_len,:self.ss_len])\n\n Kt = -tf.linalg.inv(Rt+ tf.transpose(Bt)@Ptp1@Bt)@tf.transpose(Bt)@Ptp1@At\n # Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.n_objects*self.n, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.ss_len, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Ptp1 = tf.identity(Pt)\n K.append(Kt)\n\n # Qt = tf.linalg.diag( tf.squeeze(self.Qtrain_tf@tf.expand_dims(rho[:,0], axis=1)) )\n Qt = tf.linalg.diag( tf.squeeze( (tf.concat([self.Qtrain_tf, 1e-6*tf.ones((int(self.ss_len - self.n),self.n_modes), dtype=tf.dtypes.float64)], axis = 0) )@tf.expand_dims(rho[:,0], axis=1)) )\n Rt = tf.linalg.diag( tf.reshape(self.Rtrain_tf@tf.expand_dims(rho[:,0], axis=1), (self.m,)) ) \n \n Bt_all.append( (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r2 )@self.Bpart_tf )\n Bt = tf.identity(Bt_all[self.T-1][:self.ss_len,:])\n\n At_all.append(self.A_tf)\n At = tf.identity(At_all[self.T-1][:self.ss_len,:self.ss_len])\n\n Kt = -tf.linalg.inv(Rt+ tf.transpose(Bt)@Ptp1@Bt)@tf.transpose(Bt)@Ptp1@At\n # Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.n_objects*self.n, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.ss_len, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Ptp1 = tf.identity(Pt)\n K.append(Kt)\n\n # Forward pass\n x = []\n x.append(x0)\n u = []\n xft =xfs_full@tf.expand_dims(rho[:,0], axis=1)\n u.append( K[self.T-1]@(x[0][:self.ss_len,:]-xft) )\n xtp1 = At_all[self.T-1]@x[0] + Bt_all[self.T-1]@u[0]\n x.append(xtp1)\n for t in range(1,self.T):\n xft = xfs_full@tf.expand_dims(rho[:,t], axis=1)\n u.append( K[self.T-1-t]@(x[t][:self.ss_len,:] - xft) )\n xtp1 = At_all[self.T-1-t]@x[t] + Bt_all[self.T-1-t]@u[t]\n x.append( xtp1 )\n\n u = tf.concat([tf.concat(u, axis = 1), tf.zeros((self.m, 1), dtype=tf.dtypes.float64)], axis = 1)\n x = tf.concat(x, axis = 1)\n return u, x\n\n def sysID_traj_rollout_impact_xz(self, x_star, u_star):\n x_rollout = []\n x_rollout.append(tf.expand_dims(x_star[:,0], axis = 1))\n dist1 = tf.reduce_sum((x_star[:2,:]-x_star[4:6, :])**2, axis=0, keepdims=True)\n dist2 = tf.reduce_sum((x_star[4:6,:]-x_star[8:10, :])**2, axis=0, keepdims=True)\n dist3 = tf.reduce_sum((x_star[:2,:]-x_star[8:10, :])**2, axis=0, keepdims=True)\n \n # dist4 = tf.reduce_sum((x_star[:2,:self.T]-x_star[:2, 1:])**2, axis=0, keepdims=True)\n # dist4 = tf.concat((dist4, [dist4[:,-1]]), axis = 1)\n # dist5 = tf.reduce_sum((x_star[4:6,:self.T]-x_star[4:6, 1:])**2, axis=0, keepdims=True)\n # dist5 = tf.concat((dist5, [dist5[:,-1]]), axis = 1)\n # dist6 = tf.reduce_sum((x_star[8:10,:self.T]-x_star[8:10, 1:])**2, axis=0, keepdims=True)\n # dist6 = tf.concat((dist6, [dist6[:,-1]]), axis = 1)\n\n rho = self.mode_update_logReg(tf.concat([dist1, dist2, dist3], axis=0))\n # rho = self.mode_update_logReg(tf.concat([dist1, dist2, dist3, dist4, dist5, dist6], axis=0))\n xtp1 = self.A_tf@tf.expand_dims(x_star[:,0], axis = 1) + (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r2 )@self.Bpart_tf@tf.expand_dims(u_star[:,0], axis = 1)\n x_rollout.append(xtp1)\n for i in range(1, self.T):\n if (tf.math.argmax(rho[:,i-1]) == 0 and tf.math.argmax(rho[:,i]) == 1) or (tf.math.argmax(rho[:,i-1]) == 1 and tf.math.argmax(rho[:,i]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C1train_tf@self.temp_r1 + self.temp_l2@self.C1train_tf@self.temp_r2)\n elif (tf.math.argmax(rho[:,i-1]) == 1 and tf.math.argmax(rho[:,i]) == 2) or (tf.math.argmax(rho[:,i-1]) == 2 and tf.math.argmax(rho[:,i]) == 1):\n C_full = (self.temp_c + self.temp_l1@self.C2train_tf@self.temp_r1 + self.temp_l2@self.C2train_tf@self.temp_r2)\n elif(tf.math.argmax(rho[:,i-1]) == 0 and tf.math.argmax(rho[:,i]) == 2) or (tf.math.argmax(rho[:,i-1]) == 2 and tf.math.argmax(rho[:,i]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C3train_tf@self.temp_r1 + self.temp_l2@self.C3train_tf@self.temp_r2)\n else:\n C_full = tf.eye(self.ss_len, dtype=tf.float64)\n xtp1 = self.A_tf@C_full@tf.expand_dims(x_star[:,i], axis = 1) + (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,i], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,i], axis=1)@self.tempm_r2 )@self.Bpart_tf@tf.expand_dims(u_star[:,i], axis = 1)\n x_rollout.append(xtp1)\n # x_rollout = tf.transpose(tf.squeeze(tf.stack(x_rollout))) # converting y to tf array\n x_rollout = tf.concat(x_rollout, axis = 1)\n return x_rollout, rho\n \n def iLQR(self, x0_tf):\n \n rho_tf = tf.Variable(tf.concat( [tf.tile( tf.constant([[1.], [0.]] , dtype=tf.dtypes.float64), [1, int( self.T/2)]), tf.tile(tf.constant([[0.], [1.]] , dtype=tf.dtypes.float64), [1,self.T - int(self.T/2)] )], axis=1), dtype=tf.dtypes.float64)\n\n tol = 100\n thresh = 0.1\n i=0\n while thresh>1e-6 and i < self.iLQR_niter:\n # Forward pass\n # rho_tf = tf.Variable(rho[:,0:self.T]) # if you create a new variable everytime it loses information of everything before that\n if i > 0:\n tol_old = tol\n xold = tf.identity(x)\n uold = tf.identity(u)\n # with tf.GradientTape() as tape:\n # try:\n u, x = self.LQR_tf(x0_tf, rho_tf[:,:self.T])\n rho_tf = self.mode_update_logReg_expert(x)\n if i > 0:\n tol = tf.reduce_sum(tf.square(x-xold)) + tf.reduce_sum(tf.square(u-uold))\n thresh = (tol-tol_old)**2\n i = i+1\n # print('tol:',tol)\n # print('iLQR iterations: ', i)\n # ipdb.set_trace()\n return u, x, rho_tf\n\n def evaluate(self, train_xinit, expert_traj, converged_rho_tf):\n\n cost = 0.\n x_star = expert_traj[:self.n_objects*self.n, :]\n u_star = expert_traj[self.n_objects*self.n:, :]\n\n u_tr, x_tr = self.LQR_tf_xz(train_xinit, converged_rho_tf[:,:self.T])\n ## Considering SysID cost using expert controls\n # x_rollout, _ = self.sysID_traj_rollout_impact(x_star, u_star)\n # sysIDcost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_rollout)))\n\n cost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_tr))) + tf.reduce_sum( tf.square(tf.subtract(u_star, u_tr)) )\n cost = cost/(x_star.shape[1])#/(x_star.shape[0])\n # print('cost:', cost)\n # cost = (cost + 0.5*sysIDcost)/(x_star.shape[1])\n return cost\n\n def evaluate_sysID(self, expert_traj):\n cost = 0.\n x_star = expert_traj[:self.n_objects*self.n, :]\n u_star = expert_traj[self.n_objects*self.n:, :]\n\n x_rollout, _ = self.sysID_traj_rollout_impact_xz(x_star, u_star)\n sysIDcost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_rollout)))\n\n cost = 100*(sysIDcost)#/(x_star.shape[1])\n\n # cost = self.sysID_traj_rollout_impact_prob(x_star, u_star)\n return cost\n\n def eval_loss(self, train_xinit, N, expert_trajs, converged_rho_tf):\n return sum([self.evaluate(train_xinit[i], expert_trajs[i], converged_rho_tf[i])\n for i in range(N)]) / N\n\n def eval_loss_sysID(self, N, expert_trajs):\n return sum([self.evaluate_sysID(expert_trajs[i])\n for i in range(N)]) / N\n\n def train(self, train_xinit, expert_trajs, applygradient):\n # optimizer = tf.keras.optimizers.SGD(learning_rate=gamma, momentum=self.hparams['momentum'])\n optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-04)\n # converged_rho_tf = [self.iLQR(train_xinit[i])[2] for i in range(train_xinit.shape[0])]\n # converged_rho_tf = [self.mode_update_logReg_expert(expert_trajs[i]) for i in range(train_xinit.shape[0])]\n # converged_rho_tf = [self.mode_update_logReg_expert_exp1(expert_trajs[i]) for i in range(train_xinit.shape[0])]\n # converged_rho_tf = [self.mode_update_logReg(tf.square(expert_trajs[i,:self.n,:]-expert_trajs[i,self.n:self.n_objects*self.n,:])) for i in range(train_xinit.shape[0])]\n\n # converged_rho_tf = [out[0] for out in outputs]\n # converged_Ct_tf = [out[1] for out in outputs]\n\n # with tf.GradientTape(persistent=True) as tape:\n with tf.GradientTape() as tape:\n # converged_rho_tf = [self.mode_update_logReg(tf.square(expert_trajs[i,:self.n,:]-expert_trajs[i,self.n:self.n_objects*self.n,:])) for i in range(train_xinit.shape[0])]\n # loss = self.eval_loss(train_xinit, train_xinit.shape[0], expert_trajs, converged_rho_tf)\n loss_sysID = self.eval_loss_sysID(train_xinit.shape[0], expert_trajs)\n\n # variables = [self.Qtrain_tf, self.xfs_train_tf, self.Rtrain_tf, self.Qftrain_tf]\n # variables = [self.Qtrain_tf, self.xfs_train_tf, self.Rtrain_tf, self.Qftrain_tf, self.massinv_tf, *self.classifier.variables, self.Ctrain_tf]\n # variables_sysID = [self.massinv_tf]\n variables_sysID = [self.massinv_tf, *self.classifier.variables, self.C1train_tf, self.C2train_tf, self.C3train_tf]\n # variables_sysID = [self.massinv_tf, self.Ctrain_tf]\n\n # gradients = tape.gradient(loss, variables)\n\n gradients_sysID = tape.gradient(loss_sysID, variables_sysID)\n \n if applygradient == True:\n # optimizer.apply_gradients(zip(gradients, variables))\n optimizer.apply_gradients(zip(gradients_sysID, variables_sysID))\n\n self.massinv_tf.assign(tf.abs(self.massinv_tf)) # IMPORTANT WAY TO REASSSIGN VALUE SUCH THAT IT REMAINS A TF.VARIABLE AND DOESNT BECOME TF.TENSOR\n self.C1train_tf.assign(tf.abs(self.C1train_tf))\n self.C2train_tf.assign(tf.abs(self.C2train_tf))\n self.C3train_tf.assign(tf.abs(self.C3train_tf))\n # self.Rtrain_tf.assign(tf.abs(self.Rtrain_tf))\n # self.Qtrain_tf.assign(tf.abs(self.Qtrain_tf))\n # self.Qftrain_tf.assign(tf.abs(self.Qftrain_tf))\n return loss_sysID","sub_path":"LQRlayer_train_self_xz_exp1.py","file_name":"LQRlayer_train_self_xz_exp1.py","file_ext":"py","file_size_in_byte":17664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528967500","text":"import numpy as np\n\nimport sys\nsys.path.append(r'C:\\Users\\woottenm\\Documents\\Code\\zebrafish-analysis')\nfrom using_cropped import *\nfrom image_process import crop_to_nonzero\n\nimport skimage\nimport skimage.io\nimport skimage.draw\nimport skimage.color\nimport skimage.measure\nimport skimage.exposure\nimport skimage.morphology\n\ndef get_major_axis_line(blob, length=2):\n (cy, cx) = blob.centroid\n (dx, dy) = (length * np.cos(blob.orientation), length * np.sin(blob.orientation))\n xs = [cx + dx, cx - dx]\n ys = [cy - dy, cy + dy]\n return (xs, ys)\n\ndef extend_blobs(image, length=50):\n scaled = np.copy(skimage.exposure.rescale_intensity(skimage.img_as_float(image)))\n for blob in get_blobs(image):\n ([x0, x1], [y0, y1]) = get_major_axis_line(blob, length=length)\n line = skimage.draw.line(int(y0), int(x0), int(y1), int(x1))\n for (r, c) in zip(*line):\n try:\n scaled[r, c] = 0.5\n except IndexError:\n pass\n return scaled\n\nI_with_lines = np.array(list(map(extend_blobs, I_crop_just_eyes)))\n\ndef angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))\n\ndef angle_between_eyes(image):\n blobs = get_blobs(image)\n if len(blobs) != 2:\n return float('nan')\n [blob1, blob2] = blobs\n return angle_difference(blob1.orientation, blob2.orientation)\n\nangle_diffs = np.array(list(map(angle_between_eyes, I_crop_just_eyes)))\n","sub_path":"mwCode/wednesday_orienting.py","file_name":"wednesday_orienting.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5819739","text":"#!flask/bin/python\nimport six\nfrom flask import Flask, jsonify, abort, request, make_response, url_for\nfrom flask_httpauth import HTTPBasicAuth\n\napp = Flask(__name__, static_url_path=\"\")\nauth = HTTPBasicAuth()\n\n\n@auth.get_password\ndef get_password(username):\n if username == 'grholl':\n return 'python'\n return None\n\n\n@auth.error_handler\ndef unauthorized():\n # return 403 instead of 401 to prevent browsers from displaying the default\n # auth dialog\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify({'error': 'Bad request'}), 400)\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ntokens = [\n {\n 'id': 1,\n 'title': u'Silver Token',\n 'description': u'Good for: Domestic Beer (Budweiser, Miller, Coors)',\n 'done': False\n },\n {\n 'id': 2,\n 'title': u'Gold Token',\n 'description': u'Good for: Premium Beer (Ballast Point, Lakefront Brewery, Goose Island, Summit)',\n 'done': False\n }\n]\n\n\ndef make_public_token(token):\n new_token = {}\n for field in token:\n if field == 'id':\n new_token['uri'] = url_for('get_token', token_id=token['id'],\n _external=True)\n else:\n new_token[field] = token[field]\n return new_token\n\n\n@app.route('/pintshare/api/v1.0/tokens', methods=['GET'])\n@auth.login_required\ndef get_tokens():\n return jsonify({'tokens': [make_public_token(token) for token in tokens]})\n\n\n@app.route('/pintshare/api/v1.0/tokens/', methods=['GET'])\n@auth.login_required\ndef get_token(token_id):\n token = [token for token in tokens if token['id'] == token_id]\n if len(token) == 0:\n abort(404)\n return jsonify({'token': make_public_token(token[0])})\n\n\n@app.route('/pintshare/api/v1.0/tokens', methods=['POST'])\n@auth.login_required\ndef create_token():\n if not request.json or 'title' not in request.json:\n abort(400)\n token = {\n 'id': tokens[-1]['id'] + 1,\n 'title': request.json['title'],\n 'description': request.json.get('description', \"\"),\n 'done': False\n }\n tokens.append(token)\n return jsonify({'token': make_public_token(token)}), 201\n\n\n@app.route('/pintshare/api/v1.0/tokens/', methods=['PUT'])\n@auth.login_required\ndef update_token(token_id):\n token = [token for token in tokens if token['id'] == token_id]\n if len(token) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if 'title' in request.json and \\\n not isinstance(request.json['title'], six.string_types):\n abort(400)\n if 'description' in request.json and \\\n not isinstance(request.json['description'], six.string_types):\n abort(400)\n token[0]['title'] = request.json.get('title', token[0]['title'])\n token[0]['description'] = request.json.get('description',\n token[0]['description'])\n token[0]['done'] = request.json.get('done', token[0]['done'])\n return jsonify({'token': make_public_token(token[0])})\n\n\n@app.route('/pintshare/api/v1.0/tokens/', methods=['DELETE'])\n@auth.login_required\ndef delete_token(token_id):\n token = [token for token in tokens if token['id'] == token_id]\n if len(token) == 0:\n abort(404)\n tokens.remove(token[0])\n return jsonify({'result': True})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n #app.run(debug=True)\n","sub_path":"rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565345825","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\n\nbuild_name = []\nstatus = []\ntimestamp = []\n\nwith open(\"config.json\", \"r\") as f:\n config = json.load(f)\npage = requests.get(config[\"serverurl\"]).text\nsoup = BeautifulSoup(page, \"lxml\")\ntable = soup.find('table', {\"id\": \"projectstatus\"}).find_all(\"tr\")[1::]\n#fetches the data from a table from the sever specifyed in the config file\nfor n in table:\n build_name.append(str(n.find(\"a\", {\"class\": \"model-link inside\"}).text))\n status.append(str(n.find(\"img\").get('alt', '')))\n timestamp.append(str(n.find_all(\"td\")[2].get('data','')))\nbuild_data = [build_name, status, timestamp]\nwith open(\"last_build_data_log.json\", \"w\") as f:\n json.dump(build_data, f)\n","sub_path":"GetData.py","file_name":"GetData.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"265324168","text":"import netCDF4 as nc\nfrom os import path,chmod, remove\nimport stat\nimport numpy as np\nimport csv\nimport pandas\nimport re\nimport linecache\nimport time\n\nSITENUM = 61353\nDATA_HOME='/home/scr/Data'\nCOOR_PATH = DATA_HOME + '/IBIS_Data/5b9012e4c29ca433443dcfab/IBIS_site_info.txt'\n\nBIOME_OUT_PATH = DATA_HOME + '/Biome_BGC_Data/5b9012e4c29ca433443dcfab/outputs'\nBIOME_OUT_SUFFIX = '.annual-avg.ascii'\nBIOME_NC_PATH = 'data/Biome-BGC-annual-output.nc'\n\nIBIS_OUT_PATH = DATA_HOME + '/IBIS_Data/5b9012e4c29ca433443dcfab/outputs'\nIBIS_OUT_SUFFIX = '.annual.txt'\nIBIS_NC_PATH = 'data/IBIS-annual-out.nc'\n\nLPJ_OUT_PATH = DATA_HOME + '/LPJ/5b9012e4c29ca433443dcfab/outputs'\nLPJ_OUT_SUFFIX = '.daily.ascii'\nLPJ_NC_PATH = 'data/LPJ-annual-out.nc'\ngrid_path = DATA_HOME + '/LPJ/5b9012e4c29ca433443dcfab/grid/'\ngrid_suffix = '_grid.ascii'\n\nGRID_LENGTH = 0.5\nLON_START = -179.75\nLON_END = 179.75 + GRID_LENGTH\nLAT_START = -54.75\nLAT_END = 82.25 + GRID_LENGTH\nLAT_COUNT = (LAT_END-LAT_START)/GRID_LENGTH\nLON_COUNT = (LON_END-LON_START)/GRID_LENGTH\n\nYEAR_NUM = 32\nTIME_START = 1982\nTIME_END = TIME_START + YEAR_NUM\n\nLONS = np.arange(LON_START, LON_END, GRID_LENGTH)\nLATS = np.arange(LAT_START, LAT_END, GRID_LENGTH)\n\ndef readNC():\n # chmod(LPJ_NC_PATH, stat.S_IRWXU)\n # chmod(LPJ_NC_PATH, stat.S_IRWXG)\n # chmod(LPJ_NC_PATH, stat.S_IRWXO)\n\n dataset = nc.Dataset(LPJ_NC_PATH, 'r+', format='NETCDF4')\n\n lonDimension = dataset.dimensions['long']\n latDimension = dataset.dimensions['lat']\n timeDimension = dataset.dimensions['time']\n\n lonVariable = dataset.variables['long']\n latVariable = dataset.variables['lat']\n timeVariable = dataset.variables['time']\n gppVariable = dataset.variables['GPP']\n nppVariable = dataset.variables['NPP']\n # nepVariable = dataset.variables['NEP']\n # neeVariable = dataset.variables['NEE']\n\n\n # timeVariable.datatype = 'f4'\n # timeVariable.units = 'days since ' + str(TIME_START) + '-01-01'\n # timeVariable.calendar = '365_day'\n\n # lonVariable[:] = LONS\n # latVariable[:] = LATS\n # timeVariable[:] = [n * 365 for n in range(YEAR_NUM)]\n\n dataset.close()\n print('finished!')\n\ndef writeNC():\n if path.exists(LPJ_NC_PATH):\n remove(LPJ_NC_PATH)\n # chmod(LPJ_NC_PATH, stat.S_IRWXO)\n # chmod(LPJ_NC_PATH, stat.S_IRWXG)\n # chmod(LPJ_NC_PATH, stat.S_IRWXU)\n\n dataset = nc.Dataset(LPJ_NC_PATH, 'w', format='NETCDF4')\n\n lonDimension = dataset.createDimension('long', len(LONS))\n latDimension = dataset.createDimension('lat', len(LATS))\n timeDimension = dataset.createDimension('time', None)\n\n lonVariable = dataset.createVariable(\"long\", 'f4', (\"long\"))\n latVariable = dataset.createVariable(\"lat\", 'f4', (\"lat\"))\n timeVariable = dataset.createVariable(\"time\", 'f4', (\"time\"))\n\n gppVariable = dataset.createVariable('GPP', 'f4', ('time', 'lat', 'long'), zlib=True, least_significant_digit=4)\n nppVariable = dataset.createVariable('NPP', 'f4', ('time', 'lat', 'long'), zlib=True, least_significant_digit=4)\n\n # gppVariable.set_auto_mask(True)\n # nppVariable.set_auto_mask(True)\n # nepVariable.set_auto_mask(True)\n # neeVariable.set_auto_mask(True)\n # gppVariable.setncattr('missing_value', 0)\n # nppVariable.setncattr('missing_value', 0)\n # nepVariable.setncattr('missing_value', 0)\n # neeVariable.setncattr('missing_value', 0)\n\n lonDimension = dataset.dimensions['long']\n latDimension = dataset.dimensions['lat']\n timeDimension = dataset.dimensions['time']\n\n lonVariable = dataset.variables['long']\n latVariable = dataset.variables['lat']\n timeVariable = dataset.variables['time']\n gppVariable = dataset.variables['GPP']\n nppVariable = dataset.variables['NPP']\n\n lonVariable.units = 'degrees_east'\n latVariable.units = 'degrees_north'\n timeVariable.units = 'days since ' + str(TIME_START) + '-01-01'\n timeVariable.calendar = '365_day'\n\n lonVariable[:] = LONS\n latVariable[:] = LATS\n timeVariable[:] = [n*365 for n in range(YEAR_NUM)]\n\n lanNum=int((LAT_END-LAT_START)/GRID_LENGTH)\n LON_NUM=int((LON_END-LON_START)/GRID_LENGTH)\n gpp = np.empty([YEAR_NUM, lanNum, LON_NUM])\n npp = np.empty([YEAR_NUM, lanNum, LON_NUM])\n for i in range(SITENUM):\n siteCoorStr = linecache.getline(grid_path + str(i + 1) + grid_suffix , 1)\n lonLat = re.split('\\s+', siteCoorStr)\n siteLon = float(lonLat[0])\n siteLat = float(lonLat[1])\n lonIndex = int((siteLon - LON_START) / 0.5)\n latIndex = int((siteLat - LAT_START) / 0.5)\n if (latIndex < LAT_COUNT) and (latIndex >= 0) and (lonIndex < LON_COUNT) and(lonIndex >=0):\n filepath = LPJ_OUT_PATH + '/' + str(i+1) + LPJ_OUT_SUFFIX\n if path.exists(filepath):\n siteData = pandas.read_csv(filepath, sep='\\s+', header=None)\n colNPP = np.array(siteData.iloc[:, [0]]).reshape(32, -1).mean(axis=1)\n colGPP = np.array(siteData.iloc[:, [1]]).reshape(32, -1).mean(axis=1)\n npp[:, latIndex, lonIndex] = colNPP\n gpp[:, latIndex, lonIndex] = colGPP\n print(i+1, SITENUM)\n else:\n # 范围是 [1,61233]\n print('out of range: ', i+1)\n gppVariable[:] = gpp\n nppVariable[:] = npp\n\n gppVariable[:] = np.ma.masked_where((gppVariable[:] == 0), gppVariable)\n nppVariable[:] = np.ma.masked_where((nppVariable[:] == 0), nppVariable)\n \n dataset.close()\n print('finished!')\n\n# readNC()\nwriteNC()\n","sub_path":"scripts/LPJ-nc-convertor-LPJ-index.py","file_name":"LPJ-nc-convertor-LPJ-index.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36253431","text":"from datetime import datetime\n\nfrom densenetocr import DenseNetOCR\nfrom densenetocr.data_loader import load_dict\n\nif __name__ == '__main__':\n dict_file_path = \"data/char_std_5990.txt\"\n weight_path = \"model/weights-densent-init.hdf5\"\n config_path = \"config/densent-default.json\"\n id_to_char = load_dict(dict_file_path, \"UTF-8\")\n\n config = DenseNetOCR.load_config(config_path)\n config['weight_path'] = weight_path\n\n ocr = DenseNetOCR(**config)\n\n start = datetime.now()\n print(ocr.predict(\"data/20437812_1996125331.jpg\", id_to_char)[0])\n print(f\"cost {(datetime.now() - start).microseconds / 1000} ms\")\n","sub_path":"densenetocr_predict.py","file_name":"densenetocr_predict.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"331210807","text":"'''\n1. CRIM: per capita crime rate by town\n 2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.\n 3. INDUS: proportion of non-retail business acres per town\n 4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n 5. NOX: nitric oxides concentration (parts per 10 million)\n 6. RM: average number of rooms per dwelling\n 7. AGE: proportion of owner-occupied units built prior to 1940\n 8. DIS: weighted distances to five Boston employment centres\n 9. RAD: index of accessibility to radial highways\n10. TAX: full-value property-tax rate per 10,000 US Dollars\n11. PTRATIO: pupil-teacher ratio by town\n12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n13. LSTAT: % lower status of the population\n14. MEDV: Median value of owner-occupied homes in 1000's US Dollars\n'''\n\n\n\n\n#B) Linear Regression Algorithm Performance Evaluation Metrics\n##----------------------------------------------------------------\n #1) Mean Absolute Error.\n #2) Mean Squared Error.\n #3) R2 Error\n\n#1) Mean Absolute Error.\n\n\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LinearRegression\n\nfilename = 'housing.csv'\n\nhnames=['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE',\n 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']\n\ndataframe = pd.read_csv(filename, names=hnames)\narray = dataframe.values\n\nX = array[: , 0:13]\nY = array[:,13]\n\nkfold = KFold(n_splits=10 )\nmodel = LinearRegression()\n\nscoringMethod = 'neg_mean_absolute_error' #MAE\nresults = cross_val_score(model, X, Y, cv=kfold, scoring=scoringMethod)\n\nprint( \"MAE: %.3f (%.3f)\" % ( results.mean(), results.std() ) )\n","sub_path":"mlpackage/MLPractical60.py","file_name":"MLPractical60.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586732593","text":"from flask import Flask, jsonify\nfrom flask import request\nfrom flask import render_template\nimport ordrin\nimport json\nimport pprint\n\napp = Flask(__name__)\n\napi = ordrin.APIs(\"8cJxVC2vY0e4AvOUMo0wRM1VNeHvu_22QUWjYO0A-_E\", ordrin.TEST)\n\n@app.route('/')\ndef my_form():\n return render_template(\"derp.html\")\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n\n global time\n time = request.form['time']\n global address\n address = request.form['address']\n global city\n city = request.form['city']\n global zipcode\n zipcode = request.form['zipcode']\n\n delivery_list_immediate = api.delivery_list(time, address, city, zipcode)\n\n\n restaurants=\"\"\n for x in range(0, len(delivery_list_immediate)):\n \trestaurants+=str(delivery_list_immediate[x]['na']+ \" \")\n \tfor k in range(0, len(delivery_list_immediate[x]['cu'])):\n \t\trestaurants+=str(delivery_list_immediate[x]['cu'][k]+ \" \")\n \trestaurants+=str(delivery_list_immediate[x]['cs_phone'] + \"
\")\n\n return render_template('derp2.html',x = restaurants)\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340415323","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTest using the NIST Test Vectors\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport binascii\nimport os\n\nimport pytest\n\nfrom cryptography.hazmat.primitives.ciphers import algorithms, modes\n\nfrom .utils import generate_encrypt_test\nfrom ...utils import load_nist_vectors\n\n\n@pytest.mark.supported(\n only_if=lambda backend: backend.cipher_supported(\n algorithms.TripleDES(\"\\x00\" * 8), modes.CBC(\"\\x00\" * 8)\n ),\n skip_message=\"Does not support TripleDES CBC\",\n)\n@pytest.mark.cipher\nclass TestTripleDES_CBC(object):\n test_KAT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CBC\"),\n [\n \"TCBCinvperm.rsp\",\n \"TCBCpermop.rsp\",\n \"TCBCsubtab.rsp\",\n \"TCBCvarkey.rsp\",\n \"TCBCvartext.rsp\",\n ],\n lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),\n lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),\n )\n\n test_MMT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CBC\"),\n [\n \"TCBCMMT1.rsp\",\n \"TCBCMMT2.rsp\",\n \"TCBCMMT3.rsp\",\n ],\n lambda key1, key2, key3, **kwargs: algorithms.TripleDES(\n binascii.unhexlify(key1 + key2 + key3)\n ),\n lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),\n )\n\n\n@pytest.mark.supported(\n only_if=lambda backend: backend.cipher_supported(\n algorithms.TripleDES(\"\\x00\" * 8), modes.OFB(\"\\x00\" * 8)\n ),\n skip_message=\"Does not support TripleDES OFB\",\n)\n@pytest.mark.cipher\nclass TestTripleDES_OFB(object):\n test_KAT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"OFB\"),\n [\n \"TOFBpermop.rsp\",\n \"TOFBsubtab.rsp\",\n \"TOFBvarkey.rsp\",\n \"TOFBvartext.rsp\",\n \"TOFBinvperm.rsp\",\n ],\n lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),\n lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),\n )\n\n test_MMT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"OFB\"),\n [\n \"TOFBMMT1.rsp\",\n \"TOFBMMT2.rsp\",\n \"TOFBMMT3.rsp\",\n ],\n lambda key1, key2, key3, **kwargs: algorithms.TripleDES(\n binascii.unhexlify(key1 + key2 + key3)\n ),\n lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),\n )\n\n\n@pytest.mark.supported(\n only_if=lambda backend: backend.cipher_supported(\n algorithms.TripleDES(\"\\x00\" * 8), modes.CFB(\"\\x00\" * 8)\n ),\n skip_message=\"Does not support TripleDES CFB\",\n)\n@pytest.mark.cipher\nclass TestTripleDES_CFB(object):\n test_KAT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CFB\"),\n [\n \"TCFB64invperm.rsp\",\n \"TCFB64permop.rsp\",\n \"TCFB64subtab.rsp\",\n \"TCFB64varkey.rsp\",\n \"TCFB64vartext.rsp\",\n ],\n lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),\n lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),\n )\n\n test_MMT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CFB\"),\n [\n \"TCFB64MMT1.rsp\",\n \"TCFB64MMT2.rsp\",\n \"TCFB64MMT3.rsp\",\n ],\n lambda key1, key2, key3, **kwargs: algorithms.TripleDES(\n binascii.unhexlify(key1 + key2 + key3)\n ),\n lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),\n )\n","sub_path":"tests/hazmat/primitives/test_3des.py","file_name":"test_3des.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"513855263","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\n\ndef regression_loss(x, y):\n # x, y are in shape (N, C)\n x = F.normalize(x, dim=1)\n y = F.normalize(y, dim=1)\n return 2 - 2 * (x * y).sum(dim=-1)\n\n\ndef entropy(p):\n return Categorical(probs=p).entropy()\n\n\ndef entropy_loss(logits, reduction='mean'):\n\n losses = entropy(F.softmax(logits, dim=1)) # (N)\n if reduction == 'none':\n return losses\n elif reduction == 'mean':\n return torch.sum(losses) / logits.size(0)\n elif reduction == 'sum':\n return torch.sum(losses)\n else:\n raise AssertionError('reduction has to be none, mean or sum')\n\n\ndef cross_entropy(logits, labels, reduction='mean'):\n \"\"\"\n :param logits: shape: (N, C)\n :param labels: shape: (N, C)\n :param reduction: options: \"none\", \"mean\", \"sum\"\n :return: loss or losses\n \"\"\"\n N, C = logits.shape\n assert labels.size(0) == N and labels.size(1) == C, f'label tensor shape is {labels.shape}, while logits tensor shape is {logits.shape}'\n\n log_logits = F.log_softmax(logits, dim=1)\n losses = -torch.sum(log_logits * labels, dim=1) # (N)\n\n if reduction == 'none':\n return losses\n elif reduction == 'mean':\n return torch.sum(losses) / logits.size(0)\n elif reduction == 'sum':\n return torch.sum(losses)\n else:\n raise AssertionError('reduction has to be none, mean or sum')\n\n\ndef label_smoothing_cross_entropy(logits, labels, epsilon=0.1, reduction='none'):\n N = logits.size(0)\n C = logits.size(1)\n smoothed_label = torch.full(size=(N, C), fill_value=epsilon / (C - 1))\n smoothed_label.scatter_(dim=1, index=torch.unsqueeze(labels, dim=1).cpu(), value=1 - epsilon)\n if logits.is_cuda:\n smoothed_label = smoothed_label.cuda()\n return cross_entropy(logits, smoothed_label, reduction)\n\n\nclass SmoothingLabelCrossEntropyLoss(nn.Module):\n def __init__(self, epsilon=0.1, reduction='mean'):\n super().__init__()\n self._epsilon = epsilon\n self._reduction = reduction\n\n def forward(self, logits, labels):\n return label_smoothing_cross_entropy(logits, labels, self._epsilon, self._reduction)\n\n\nclass ScatteredCrossEntropyLoss(nn.Module):\n def __init__(self, reduction='mean'):\n super().__init__()\n self._reduction = reduction\n\n def forward(self, logits, labels):\n return cross_entropy(logits, labels, self._reduction)\n","sub_path":"utils/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18824171","text":"import socket\nimport threading\nimport time\n\nimport gameBoard \nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys \nimport json\nimport socket\n\n\nclass NetworkConfig:\n def __init__(self, playerName):\n self.playerName = playerName\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self.client.settimeout(5) # 5s\n self.host = \"localhost\" # For this to work on your machine this must be equal to the ipv4 address of the machine running the server\n # You can find this address by typing ipconfig in CMD and copying the ipv4 address. Again this must be the servers\n # ipv4 address. This feild will be the same for all your clients.\n self.port = 8080\n self.addr = (self.host, self.port)\n self.id = self.connect()\n self.connected = False\n \n def connect(self):\n try:\n print(\"{} is trying to connect to the server...\".format(self.playerName))\n self.client.connect(self.addr)\n except:\n print(\"Could not make a connection to the server\")\n input(\"Press enter to quit\")\n sys.exit(0)\n\n return self.client.recv(2048).decode()\n\n def send(self, data):\n try:\n self.client.send(str.encode(data))\n reply = json.loads(self.client.recv(2048).decode())\n return reply # in json\n except socket.error as e:\n return str(e)\n \n def recv(self):\n try:\n reply = self.client.recv(2048).decode()\n return reply\n except socket.error as e:\n return str(e) \n\nclass Player:\n def __init__(self,name):\n self.playerName = name\n self.net = NetworkConfig(self.playerName)\n self.signal = True\n self.connected = False\n \n \n def run(self):\n if self.net.id == \"connected\":\n print(\"Player game status: \", self.net.id)\n \n received = self.net.recv()\n \n if received != \"\":\n print(\"Received (from Server): {}\\n\".format(received))\n \n if received == \"start\" or \"start_first\":\n print(\"Game is starting\\n\")\n if received == \"start_first\":\n print(\"I am starting first\\n\") \n print(\"Starting GUI...\")\n self.run_gui()\n else:\n print(\"Wait to start...\")\n else:\n print(\"Empty package\\n\") \n \n def run_gui(self):\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = gameBoard.Ui_MainWindow(self.net, self.playerName)\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353988459","text":"\n# This is the file where you must work.\n# Write code in the functions (and create new functions) so that they work\n# according to the requirements.\nimport os\nimport csv\nfrom pathlib import Path\n\n\ndef isWritable(directory):\n try:\n tmp_prefix = \"write_tester\";\n count = 0\n filename = os.path.join(directory, tmp_prefix)\n while(os.path.exists(filename)):\n filename = \"{}.{}\".format(os.path.join(directory, tmp_prefix),count)\n count = count + 1\n f = open(filename,\"w\")\n f.close()\n os.remove(filename)\n return True\n except Exception as e:\n #print \"{}\".format(e)\n return False\n\n\ndef display_inventory(inventory):\n \"\"\"Display the contents of the inventory in a simple way.\"\"\"\n\n for key, value in inventory.items():\n print(f'{key}:{value}')\n\n\ndef add_to_inventory(inventory, added_items):\n \"\"\"Add to the inventory dictionary a list of items from added_items.\"\"\"\n if added_items in inventory:\n inventory[added_items] += 1\n else:\n inventory[element] = 1\n \n\n\ndef remove_from_inventory(inventory, removed_items):\n \"\"\"Remove from the inventory dictionary a list of items from removed_items.\"\"\"\n if removed_items in inventory:\n inventory[removed_items] -= 1\n if inventory[removed_items] == 0:\n inventory.pop(removed_items)\n else:\n pass\n\n\ndef print_table(inventory, order):\n \"\"\"\n Display the contents of the inventory in an ordered, well-organized table with\n each column right-aligned.\n \"\"\"\n if order == 'count,desc':\n sorted_inventory = sorted(inventory.items(), key=lambda x: x[1])\n elif order == 'count,asc':\n sorted_inventory = sorted(inventory.items(), key=lambda x: x[1], reverse=True)\n else: \n sorted_inventory = [(key,value) for key,value in inventory.items()]\n \n print(\"-\"*23)\n print(\"{:<10} | {:>10}\".format(\"item name\",\"count\"))\n print(\"-\"*23)\n for key,value in sorted_inventory:\n print(\"{:<10} | {:>10}\".format(key,value))\n print(\"-\"*23)\n\n\ndef import_inventory(inventory, filename):\n \"\"\"Import new inventory items from a CSV file.\"\"\"\n try:\n path = Path(__file__).parent\n csv_file = open(f'{path}\\\\{filename}',\"rt\", encoding=\"utf8\",)\n csv_reader = csv.reader(csv_file)\n lista = []\n for row in csv_reader:\n for element in row:\n if element in inventory:\n inventory[element] += 1\n else:\n inventory[element] = 1 \n except:\n print(f\"File {filename} not found!\")\n\n\n\n \ndef export_inventory(inventory, filename):\n \"\"\"Export the inventory into a CSV file.\"\"\"\n try:\n if not isWritable(path):\n raise ValueError(f'You don\\'t have permission creating file {filename}!')\n\n path = Path(__file__).parent\n csv_file = open(f'{path}\\\\{filename}',\"w\", encoding=\"utf8\",)\n\n csv_writer = csv.writer(csv_file)\n\n\n # inventory_to_export = ['One','Twoo',12343]\n # csv_writer.writerow(inventory_to_export)\n\n for element in inventory:\n element_times = inventory[element]\n while element_times > 0:\n csv_writer.writerow([element])\n print(element)\n element_times -= 1 \n except:\n print(f\"File {filename} not found!\")\n pass\n\ndef main_menu():\n\n user_inventory = {}\n\n # default filenames\n filename_to_import = \"import_inventory.csv\"\n filename_to_export = \"export_inventory.csv\"\n\n import_inventory(user_inventory,filename_to_import)\n #import_inventory(user_inventory,filename_to_export)\n display_inventory(user_inventory)\n print_table(user_inventory, 'count,asc')\n #print_table(user_inventory,'')\n add_to_inventory(user_inventory,\"diamond\")\n remove_from_inventory(user_inventory,\"rope\")\n remove_from_inventory(user_inventory,\"hammer\")\n print_table(user_inventory, 'count,asc')\n #export_inventory(user_inventory,filename_to_export)\n\nif __name__ == '__main__':\n main_menu()","sub_path":"task/game_inventory (original).py","file_name":"game_inventory (original).py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373047604","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport os\r\nimport csv\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom time import sleep\r\nimport time\r\nfrom Crypto.Cipher import AES\r\nimport base64\r\nfrom hashlib import md5\r\n\r\nclass Sky():\r\n def __init__(self):\r\n self.option = webdriver.ChromeOptions()\r\n self.option.add_experimental_option('excludeSwitches', ['enable-automation'])\r\n self.driver = webdriver.Chrome(options=self.option)\r\n self.driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\r\n \"source\": \"\"\"\r\n Object.defineProperty(navigator, 'webdriver', {\r\n get: () => undefined\r\n })\r\n \"\"\"\r\n })\r\n self.driver.implicitly_wait(10)\r\n self.driver.maximize_window()\r\n self.url = 'https://www.aqistudy.cn/historydata/daydata.php?city=%E6%88%90%E9%83%BD&month=201612'\r\n self.wait = WebDriverWait(self.driver, 15)\r\n self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36',}\r\n self.local_key = 'emhlbnFpcGFsbWtleQ=='\r\n self.local_vi = 'emhlbnFpcGFsbWl2'\r\n\r\n def get_data_by_selenium(self):\r\n self.driver.get(self.url)\r\n time.sleep(1)\r\n # items = self.driver.execute_script(\"return localStorage.getItem('781d10706b2d2ed381e835e06a3c5205')\")\r\n items = self.driver.execute_script(\"return localStorage.key(0)\")\r\n print(items)\r\n script = \"return localStorage.getItem('{}')\".format(items)\r\n print(script)\r\n items = self.driver.execute_script(script)\r\n print(items)\r\n return items\r\n\r\n def get_data_by_req(self):\r\n resp = requests.get(self.url, headers=self.headers)\r\n print(resp.status_code)\r\n print(resp.text)\r\n\r\n def AES_Decrypt(self, data):\r\n secretkey = md5(self.local_key.encode('utf-8')).hexdigest()[16:32]\r\n secretiv = md5(self.local_vi.encode('utf-8')).hexdigest()[0:16]\r\n print(secretkey,secretiv)\r\n data = data.encode('utf8')\r\n encodebytes = base64.decodebytes(data)\r\n # 将加密数据转换位bytes类型数据\r\n cryptos = AES.new(secretkey.encode('utf8'), AES.MODE_CBC, secretiv.encode('utf8'))\r\n text_decrypted = cryptos.decrypt(encodebytes)\r\n unpad = lambda s: s[0:-s[-1]]\r\n text_decrypted = unpad(text_decrypted)\r\n # 去补位\r\n text_decrypted = text_decrypted.decode('utf8')\r\n # if text_decrypted:\r\n # self.driver.close()\r\n return text_decrypted\r\n\r\n\r\nif __name__ == '__main__':\r\n s = Sky()\r\n items = s.get_data_by_selenium()\r\n print(items)\r\n text_decrypted = s.AES_Decrypt(items)\r\n print(text_decrypted)\r\n print(base64.b64decode(text_decrypted.encode('utf-8')).decode('utf-8'))\r\n","sub_path":"PycharmProjects/Reptile/seleniun_Learn/sky.py","file_name":"sky.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221359039","text":"__author__ = 'ian'\ndef geminator():\n number_of_rocks = int(input())\n rock_array = []\n for i in range(0, number_of_rocks):\n stbw = \"\".join(set(str(input())))\n rock_array.append(stbw)\n rock_dict = {\"a\": 0, \"b\": 0, \"c\": 0, \"d\": 0, \"e\": 0, \"f\": 0, \"g\": 0, \"h\": 0, \"i\": 0, \"j\": 0, \"k\": 0, \"l\": 0, \"m\": 0, \"n\": 0, \"o\": 0, \"p\": 0, \"q\": 0, \"r\": 0, \"s\": 0, \"t\": 0, \"u\": 0, \"v\": 0, \"w\": 0, \"x\": 0, \"y\": 0, \"z\": 0}\n for i in range(0, len(rock_array)):\n for s in range(0, len(rock_array[i])):\n rock_dict[str(rock_array[i][s])] += 1\n counter = 0\n for k, v in rock_dict.items():\n print(k, v)\n if v == number_of_rocks:\n counter += 1\n print(counter)\ngeminator()\n","sub_path":"Raw_cut.py","file_name":"Raw_cut.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467654317","text":"from sense_hat import SenseHat\r\nimport requests\r\nimport json\r\nfrom random import randint\r\nfrom time import sleep\r\nimport json\r\nimport sys\r\n\r\nsense = SenseHat()\r\nsense.clear()\r\n\r\ndef load_data():\r\n with open('data.json') as json_data:\r\n global data_file\r\n data_file = json.load(json_data)\r\n\r\ndef save(name, choice):\r\n global data_file\r\n if(choice == 'like'):\r\n data_file['liked'].append(name)\r\n else:\r\n data_file['disliked'].append(name)\r\n with open('data.json', 'w') as outfile:\r\n json.dump(data_file, outfile)\r\n \r\ndef get_user():\r\n global name\r\n\r\n response = requests.get(\"https://randomuser.me/api/\")\r\n jsonn = response.json()\r\n\r\n title = jsonn['results'][0]['name']['title']\r\n first_name = jsonn['results'][0]['name']['first']\r\n last_name = jsonn['results'][0]['name']['last']\r\n\r\n full_name = title + ' ' + first_name + ' ' + last_name\r\n\r\n\r\n\r\nwhile True:\r\n try:\r\n load_data()\r\n get_user()\r\n events = sense.stick.get_events()\r\n\r\n sense.show_message(full_name)\r\n\r\n current_event = sense.stick.wait_for_event()\r\n \r\n if(current_event.direction == 'right'):\r\n choice = 'like'\r\n sense.clear(0, 255, 0)\r\n else:\r\n choice = 'dislike'\r\n sense.clear(255, 0, 0)\r\n\r\n sleep(1)\r\n\r\n save(full_name, choice)\r\n\r\n except KeyboardInterrupt:\r\n sense.clear()\r\n sys.exit(0)","sub_path":"tinder/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433752842","text":"from .config import DiggicampConf, CONFIG_VERSION\n\n\n# migrate from no version tag to version tag\ndef migrate_none(conf: DiggicampConf):\n print(\"Migrating to version 1.1.0...\")\n conf.set('version', '1.1.0')\n\n if not conf.get('downloads'):\n return conf\n\n old_dl = conf.get('downloads')\n downloads = []\n for fid in old_dl:\n if isinstance(old_dl[fid], str):\n downloads.append({\n 'folder': fid,\n 'target': old_dl[fid]\n })\n else:\n downloads.append({\n 'folder': fid,\n 'target': old_dl[fid]['target'],\n 'regex': old_dl[fid]['regex']\n })\n\n conf.set('downloads', downloads)\n\n\nMIGRATIONS = {\n 'None': migrate_none\n}\n\n\ndef migrate_config(conf: DiggicampConf):\n while conf.version() != CONFIG_VERSION:\n if conf.version() == None:\n conf = MIGRATIONS['None'](conf)\n continue\n\n if conf.version() in MIGRATIONS:\n conf = MIGRATIONS[conf.version()](conf)\n else:\n raise Exception(\"Cannot migrate from \" + conf.version() + \" - No migration found!\")\n","sub_path":"diggicamp/config_migrations.py","file_name":"config_migrations.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185199629","text":"import numpy as np \r\nimport cv2 \r\nimport scipy.io \r\n#from scipy.misc import imread \r\n#import matplotlib.pyplot as plt\r\nimport imageio\r\nimport open3d\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport time\r\nfrom skimage.transform import downscale_local_mean\r\nfrom scipy.stats import entropy \r\nfrom collections import defaultdict\r\nfrom functools import reduce\r\n#from cam_funcs import *\r\n\r\nclass SpaceCarve(object):\r\n def __init__(self, resolution, p_sense, p_change, K, rgb_lower=[180, 180, 0], rgb_upper=[255, 255, 20], frame_width=576, frame_height=694, voxel_center=(0,0,-.6), voxbox_size=.4, mode='mujoco', version=(1, 1), update=3, z_prob_occ=.55):\r\n #self.resolution = resolution\r\n self.p_sense = p_sense\r\n self.p_change = p_change \r\n self.voxelCoords = self.makeVoxels(resolution, voxel_center, voxbox_size)\r\n self.voxelVals = np.divide(np.ones(np.shape(self.voxelCoords)[0]), 2)\r\n self.rgb_lower = rgb_lower\r\n self.rgb_upper = rgb_upper\r\n self.frame_width = frame_width\r\n self.frame_height = frame_height\r\n self.K = K \r\n self.num_carves = 0\r\n self.resolution = resolution \r\n self.mode = mode \r\n self.version = version \r\n self.update = update\r\n self.z_prob_occ = z_prob_occ\r\n\r\n def reset(self):\r\n self.num_carves = 0\r\n self.voxelVals = np.divide(np.ones(np.shape(self.voxelCoords)[0]), 2)\r\n\r\n def getVoxelCoords(self):\r\n return self.voxelCoords\r\n\r\n def getVoxelVals(self):\r\n return self.voxelVals\r\n\r\n def carve(self, cam_ext, img, segment=True, fpath=None):\r\n\r\n self.num_carves += 1\r\n\r\n proj = self.project(cam_ext, np.transpose(self.voxelCoords))\r\n\r\n if segment:\r\n img_mask = self.applySegmentation(img)\r\n else:\r\n img_mask = img\r\n\r\n cv2.imshow('mask', img_mask)\r\n cv2.waitKey(10)\r\n\r\n #imageio.imwrite('imgs/mask_{}.jpg'.format(self.num_carves), img_mask)\r\n if fpath is not None:\r\n imageio.imwrite('{}_mask_{}.jpg'.format(fpath, self.num_carves), img_mask)\r\n\r\n # if self.mode == 'mujoco':\r\n # hit_pixels = [(u, v) for u in range(0, self.frame_width) for v in range(0, self.frame_height) if img_mask[v, u] > 0]\r\n # voxel_hits = []\r\n #print(np.shape(img_mask))\r\n\r\n hit_dict = {}\r\n unhit_dict = {}\r\n count = 0\r\n \r\n for i in range(0, np.shape(proj)[1]):\r\n p_prev = self.p_change * self.voxelVals[i] + (1 - self.p_change) * (1 - self.voxelVals[i]) \r\n v = int(proj[1][i]/proj[2][i])\r\n u = int(proj[0][i]/proj[2][i])\r\n \r\n \r\n if u >=0 and v >= 0 and u < self.frame_width and v < self.frame_height: \r\n count += 1\r\n \r\n if img_mask[v, u] > 0 and self.mode == 'mujoco': # change back\r\n occupied = 1 \r\n if self.update == 2:\r\n self.voxelVals[i] = (self.z_prob_occ * p_prev)/(self.z_prob_occ * p_prev + (1 - self.z_prob_occ) * (1 - p_prev))\r\n \r\n if self.update == 3:\r\n if (u, v) not in hit_dict.keys():\r\n hit_dict[(u, v)] = [i]\r\n else:\r\n hit_dict.get((u, v)).append(i)\r\n\r\n elif img_mask[v, u] == 0 and self.mode == 'dino':\r\n occupied = 1\r\n if self.update == 2:\r\n self.voxelVals[i] = (self.z_prob_occ * p_prev)/(self.z_prob_occ * p_prev + (1 - self.z_prob_occ) * (1 - p_prev))\r\n \r\n if self.update == 3:\r\n if (u, v) not in hit_dict.keys():\r\n hit_dict[(u, v)] = [i]\r\n else:\r\n hit_dict.get((u, v)).append(i)\r\n \r\n else:\r\n occupied = 0\r\n if self.update != 3:\r\n likelihood = (1 - self.p_sense) # p(z = 0 | x = 1)\r\n nlikelihood = self.p_sense # p(z = 0 | x = 0)\r\n # p(z = 0 | x = 1) * p(x = 1) / (p(z = 0 | x = 1) * p(x = 1) + p(z = 0 | x = 0) * p(x = 0))\r\n self.voxelVals[i] = (likelihood * p_prev)/(likelihood * p_prev + nlikelihood * (1 - p_prev))\r\n\r\n else:\r\n if (u, v) not in unhit_dict.keys():\r\n unhit_dict[(u, v)] = [i]\r\n elif (u, v) in unhit_dict.keys(): \r\n unhit_dict.get((u, v)).append(i) \r\n\r\n\r\n if self.update == 3:\r\n voxelValsCopy = self.voxelVals\r\n for k in unhit_dict.keys():\r\n v_list = unhit_dict.get(k)\r\n for i in v_list:\r\n if len(v_list) > 1:\r\n p_others_empty = reduce(lambda x, y: x * y, map(lambda x: 1 - voxelValsCopy[x], list(set(v_list) - set([i]))))\r\n nlikelihood = p_others_empty * self.p_sense + (1 - p_others_empty) * (1 - self.p_sense)\r\n else:\r\n nlikelihood = self.p_sense\r\n likelihood = 1 - self.p_sense\r\n p_prev = self.p_change * voxelValsCopy[i] + (1 - self.p_change) * (1 - voxelValsCopy[i])\r\n self.voxelVals[i] = likelihood * p_prev/(likelihood * p_prev + nlikelihood * (1 - p_prev))\r\n for k in hit_dict.keys():\r\n v_list = hit_dict.get(k)\r\n for i in v_list:\r\n if len(v_list) > 1:\r\n p_others_empty = reduce(lambda x, y: x * y, map(lambda x: 1 - voxelValsCopy[x], list(set(v_list) - set([i]))))\r\n nlikelihood = (1 - p_others_empty) * self.p_sense + p_others_empty * (1 - self.p_sense)\r\n else:\r\n nlikelihood = (1 - self.p_sense) \r\n likelihood = self.p_sense \r\n p_prev = self.p_change * voxelValsCopy[i] + (1 - self.p_change) * (1 - voxelValsCopy[i])\r\n self.voxelVals[i] = likelihood * p_prev/(likelihood * p_prev + nlikelihood * (1 - p_prev))\r\n \r\n #projection = self.projectThresh(cam_ext, threshold=.5)\r\n #imageio.imwrite('imgs/camprojection_{}.jpg'.format(self.num_carves), projection)\r\n \r\n return \r\n\r\n def project(self, camE, worldCoords):\r\n transformedCoords = np.matmul(camE, worldCoords)\r\n\r\n if self.mode == 'mujoco':\r\n camX = -transformedCoords[0, :] - 0.23571429 +.01\r\n camY = transformedCoords[1, :] - 0.1744898 \r\n camZ = -transformedCoords[2, :] - .25 #+ 0.225 - .08 \r\n \r\n camCoords = np.vstack((camX, camY, camZ, transformedCoords[3, :]))\r\n res = np.matmul(self.K, camCoords)\r\n\r\n else:\r\n res = transformedCoords\r\n return res\r\n\r\n def projectThresh(self, camE, threshold=.5, fname=None):\r\n worldCoords = np.transpose(np.array([self.voxelCoords[i, :] for i in range(np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= threshold]))\r\n\r\n if np.shape(worldCoords)[0] == 0:\r\n thresh = max(self.voxelVals)\r\n print(\"threshold too high, use {}\".format(thresh))\r\n worldCoords = np.transpose(np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= thresh]))\r\n\r\n proj = self.project(camE, worldCoords)\r\n \r\n proj_img = np.zeros((self.frame_height, self.frame_width))\r\n\r\n distToCenter = []\r\n\r\n for i in range(0, np.shape(proj)[1]):\r\n v = int(proj[1, i]/proj[2, i])\r\n u = int(proj[0, i]/proj[2, i])\r\n\r\n distToCenter.append(np.linalg.norm(np.array([v, u]) - np.array([self.frame_height/2, self.frame_width/2])))\r\n \r\n if u >=0 and v >= 0 and u < self.frame_width and v < self.frame_height: \r\n proj_img[v, u] = 1\r\n #if np.isclose(u, self.frame_width/2, atol=20) and np.isclose(v, self.frame_height/2, atol=20):\r\n #print(\"index: {}\".format(i))\r\n\r\n if fname is not None:\r\n imageio.imwrite('{}_proj_{}.jpg'.format(fname, self.num_carves), proj_img)\r\n\r\n return proj_img\r\n\r\n\r\n def projectUncertainty(self, cam, fpath=None):\r\n proj = self.project(cam, np.transpose(self.voxelCoords))\r\n \r\n proj_img = np.ones((self.frame_height, self.frame_width))\r\n counts = np.zeros((self.frame_height, self.frame_width))\r\n max_vals = np.zeros((self.frame_height, self.frame_width))\r\n\r\n hit_dict = defaultdict(lambda: 1)\r\n \r\n for i in range(0, np.shape(proj)[1]):\r\n v = int(proj[1][i]/proj[2][i])\r\n u = int(proj[0][i]/proj[2][i])\r\n \r\n if u >=0 and v >= 0 and u < self.frame_width and v < self.frame_height: \r\n if self.version[0] == 1:\r\n proj_img[v, u] *= 1 - self.voxelVals[i] # fix to deal with unhit pixels\r\n hit_dict[(u, v)] = hit_dict[(u, v)] * (1 - self.voxelVals[i])\r\n elif self.version[0] == 2:\r\n counts[v, u] += 1\r\n # VERSION 2\r\n # running avg of sqr dist from .5\r\n #proj_img[v, u] += ((self.voxelVals[i] - 0.5)**2 - proj_img[v, u])/counts[v, u] # UNCOMMENT TO RUN\r\n if self.voxelVals[i] > max_vals[v, u]:\r\n max_vals[v, u] = self.voxelVals[i]\r\n hit_dict[(u, v)] = max_vals[v, u] \r\n\r\n # if fpath is not None:\r\n # imageio.imwrite('{}_{}.jpg'.format(fpath, self.num_carves), proj_img)\r\n \r\n # VERSION 1\r\n if self.version[0] == 1:\r\n res = np.ones((self.frame_height, self.frame_width)) - np.array(proj_img)\r\n for k in hit_dict.keys():\r\n hit_dict[k] = 1 - hit_dict[k]\r\n\r\n # VERSION 2\r\n else:\r\n #res = 0.5 - np.array(proj_img) # UNCOMMENT TO RUN \r\n res = np.array(proj_img)\r\n\r\n return res, hit_dict \r\n\r\n def view_certainty(self, cam_viewpoint, fpath=None):\r\n proj, value_dict = self.projectUncertainty(cam_viewpoint)\r\n value_array = np.array([value_dict.get(k) for k in value_dict.keys()])\r\n\r\n if fpath is not None:\r\n imageio.imwrite('{}_{}.jpg'.format(fpath, self.num_carves), proj)\r\n\r\n # VERSION 1\r\n if self.version[1] == 1:\r\n #dist = np.square(proj - np.ones((np.shape(proj)[0], np.shape(proj)[1])) * .5)\r\n dist = np.square(value_array - np.ones(np.shape(value_array)[0]) * .5)\r\n res = np.average(dist) # add percentage of visible voxels\r\n\r\n # VERSION 2\r\n elif self.version[1] == 2:\r\n # proj_distribution = proj/np.sum(proj)\r\n # res = entropy(proj_distribution)\r\n res = -entropy(value_array)\r\n\r\n return res \r\n\r\n def voxel_uncertainty(self):\r\n dist = np.square(self.voxelVals - 0.5)\r\n return (0.5**2 - np.average(dist))/(0.5**2)\r\n\r\n def pcd_count(self, thresh=.5):\r\n vox_count = [v for v in self.voxelVals if v >= thresh]\r\n return len(vox_count)\r\n\r\n\r\n def applySegmentation(self, img):\r\n mask = cv2.inRange(img, np.array(self.rgb_lower), np.array(self.rgb_upper))\r\n return cv2.medianBlur(mask, 25)\r\n \r\n def makeVoxels(self, res, center, size):\r\n (x_c, y_c, z_c) = center\r\n x_s = np.linspace(x_c - size/2, x_c + size/2, res)\r\n y_s = np.linspace(y_c - size/2, y_c + size/2, res)\r\n z_s = np.linspace(z_c - size/2, z_c + size/2, res)\r\n voxels = np.array([[x, y, z, 1] for x in x_s for y in y_s for z in z_s])\r\n return voxels \r\n\r\n def toVoxelRep(self, downscale=True, d_scale=10):\r\n xmin = min(self.voxelCoords[:, 0])\r\n ymin = min(self.voxelCoords[:, 1])\r\n zmin = min(self.voxelCoords[:, 2])\r\n xmax = max(self.voxelCoords[:, 0])\r\n vox_range = xmax - xmin\r\n \r\n scale = (self.resolution - 1)/vox_range \r\n\r\n shift_vec = np.array([xmin, ymin, zmin, 0])\r\n voxels = self.voxelCoords - shift_vec\r\n voxels *= scale\r\n voxels = np.rint(voxels)\r\n voxelCube = np.zeros((self.resolution, self.resolution, self.resolution))\r\n\r\n for i in range(np.size(voxels[:, 0])):\r\n voxelCube[int(voxels[i, 0]), int(voxels[i, 1]), int(voxels[i, 2])] = self.voxelVals[i]\r\n\r\n if downscale == True:\r\n factor = int(self.resolution/d_scale)\r\n res = downscale_local_mean(voxelCube, (factor, factor, factor))\r\n else:\r\n res = voxelCube\r\n return res\r\n\r\n\r\n def visualize(self, save=False, fname=\"./dino.ply\", threshold=0.5, show_frame=False):\r\n\r\n pcd = open3d.geometry.PointCloud()\r\n X = np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= threshold])\r\n if np.shape(X)[0] == 0:\r\n thresh = max(self.voxelVals)\r\n print(\"threshold too high, use {}\".format(thresh))\r\n X = np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= thresh])\r\n\r\n pcd.points = open3d.Vector3dVector(X[:, 0:3])\r\n open3d.estimate_normals(pcd, search_param = open3d.KDTreeSearchParamHybrid(radius = 0.1, max_nn = 100))\r\n open3d.orient_normals_to_align_with_direction(pcd)\r\n pcd.paint_uniform_color([1,0.706,0])\r\n mesh_frame = open3d.geometry.create_mesh_coordinate_frame(size=0.6, origin=[.6, 0, .75])\r\n \r\n if show_frame: \r\n open3d.visualization.draw_geometries([pcd, mesh_frame])\r\n else:\r\n open3d.visualization.draw_geometries([pcd])\r\n\r\n if save == True:\r\n open3d.write_point_cloud(fname, pcd)\r\n\r\n\r\n def visualize_plt(self, threshold=0.5):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n resVox = np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= threshold])\r\n\r\n\r\n x = resVox[:, 0]\r\n y = resVox[:, 1]\r\n z = resVox[:, 2]\r\n\r\n ax.scatter(x, y, z, c=voxVals, marker='o')\r\n\r\n ax.set_xlabel('X Label')\r\n ax.set_ylabel('Y Label')\r\n ax.set_zlabel('Z Label')\r\n #plt.gray()\r\n plt.show()\r\n","sub_path":"spacecarve.py","file_name":"spacecarve.py","file_ext":"py","file_size_in_byte":14660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16519827","text":"import json\nimport argparse\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(\n description=\"Path to json file\"\n )\n parser.add_argument(\n \"filepath\",\n help=\"Path to json file\",\n )\n args = parser.parse_args()\n return args\n\n\ndef load_data(filepath):\n with open(filepath, \"r\", encoding=\"UTF-8\") as json_file:\n return json.load(json_file)\n\n\ndef pretty_print_json(json_content):\n print(json.dumps(\n json_content,\n sort_keys=True,\n indent=4,\n ensure_ascii=False,\n separators=(\",\", \": \")\n ))\n\n\ndef main():\n file_path = get_arguments().filepath\n data_result = load_data(file_path)\n pretty_print_json(data_result)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except ValueError:\n print(\"This is not a json file\")\n","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350874467","text":"import numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport nltk\nfrom nltk import wordpunct_tokenize\nfrom nltk.stem.snowball import EnglishStemmer\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom tpot.builtins import StackingEstimator\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import train_test_split\n\nvectorizer = TfidfVectorizer(input='content', analyzer='word')\nsvd = TruncatedSVD(n_components=500, n_iter=5, random_state=27)\n\nnltk.download('punkt')\nnltk.download('stopwords')\nstop_words = set(stopwords.words('english'))\n\n#After we use get_text, use nltk's clean_html function.\ndef nltkPipe(soup_text):\n #Convert to tokens\n tokens = [x.lower() for x in wordpunct_tokenize(soup_text)]\n text = nltk.Text(tokens)\n #Get lowercase words. No single letters, and no stop words\n words = [w.lower() for w in text if w.isalpha() and len(w) > 1 and w.lower() not in stop_words]\n #Remove prefix/suffixes to cut down on vocab\n stemmer = EnglishStemmer()\n words_nostems = [stemmer.stem(w) for w in words]\n return words_nostems\n\ndef getTitleTokens(html):\n soup = BeautifulSoup(html,'html.parser')\n soup_title = soup.title\n if soup_title != None:\n soup_title_text = soup.title.get_text()\n text_arr = nltkPipe(soup_title_text)\n return text_arr\n else:\n return []\n \ndef getBodyTokens(html):\n soup = BeautifulSoup(html,'html.parser')\n #Get the text body\n soup_para = soup.find_all('p')\n soup_para_clean = ' '.join([x.get_text() for x in soup_para if x.span==None and x.a==None])\n text_arr = nltkPipe(soup_para_clean)\n return text_arr\n\n#Build the model\ndef get_html(in_df):\n keep_cols = [\"Webpage_id\",\"Tag\"]\n use_df = in_df[keep_cols]\n html_reader_obj = pd.read_csv(data_dir+'html_data.csv',iterator=True, chunksize=10000)\n frames = []\n match_indices = use_df['Webpage_id'].values.tolist()\n print(len(match_indices),' indices left...')\n while len(match_indices) > 0:\n for chunk in html_reader_obj:\n merge_df = pd.merge(use_df,chunk,how='inner',on='Webpage_id')\n merge_indices = merge_df['Webpage_id'].values.tolist()\n match_indices = [x for x in match_indices if x not in merge_indices]\n print(len(match_indices),' indices left...')\n frames.append(merge_df)\n #Process HTMl for bags of words of the body and title.\n process_df = pd.concat(frames)\n print(\"Getting tokens...\")\n title_tokens = process_df['Html'].progress_apply(getTitleTokens)\n body_tokens = process_df['Html'].progress_apply(getBodyTokens)\n process_df['all_tokens'] = title_tokens + body_tokens\n process_df.drop(['Html'],axis=1,inplace=True)\n print(\"Done!\")\n return process_df\n\ndef build_model():\n \"\"\"Return the estimator and the object to transform the test data.\"\"\"\n print(\"Getting HTML tokens\")\n data_dir = \"../data/2018-08-10_AV_Innoplexus/\"\n\n html_data = pd.read_csv(data_dir+'html_data.csv',iterator=True, chunksize=1000)\n \n train_df = pd.read_csv(data_dir+'train.csv')\n \n #Get tokens\n train_df_tokens = get_html(train_df)\n #Fit_transform to tdfif matrix\n train_df_tdif = vectorizer.fit_transform(train_df_tokens['all_tokens'])\n #Prune unneeded features\n svd_array = svd.fit_transform(train_df_tdif)\n \n vector_features = vectorizer.get_feature_names()\n eigen_features = [vector_features[i] for i in svd.components_[0].argsort()[::-1]][:500]\n\n train_df_svd = pd.DataFrame(svd_array,columns=eigen_features)\n train_df_svd['Tag'] = train_df['Tag']\n \n tags = train_df_svd['Tag'].unique().tolist()\n tags.sort()\n\n tag_dict = {key: value for (key, value) in zip(tags,range(len(tags)))}\n\n train_df_svd['Tag_encoded'] = train_df_svd['Tag'].map(tag_dict)\n train_df_svd = svd_df.drop('Tag',axis=1)\n \n exported_pipeline = make_pipeline(\n StackingEstimator(\n estimator=ExtraTreesClassifier(\n bootstrap=False, criterion=\"gini\", max_features=0.2, \n min_samples_leaf=11, min_samples_split=17, n_estimators=100)\n ),\n ExtraTreesClassifier(\n bootstrap=False, criterion=\"entropy\", max_features=0.5, \n min_samples_leaf=6, min_samples_split=9, n_estimators=100\n )\n )\n \n x_cols = [x for x in train_df_svd.columns if x != \"Tag_encoded\"]\n X_train, X_test, y_train, y_test = train_test_split(\n train_df_svd[x_cols],\n train_df_svd['Tag_encoded'],\n test_size=0.33\n )\n \n exported_pipeline.fit(X_train, y_train)\n return exported_pipeline, vectorizer, svd, tag_dict\n\ndef prep_test(vectorizer_obj, svd_obj):\n \"\"\"Transform test dataset for predicting.\"\"\"\n test_df = pd.read_csv(data_dir+'test.csv')\n #Get the HTMl\n test_df_tokens = get_html(test_df)\n #Transform to tdfif matrix\n test_df_tdif = vectorizer_obj.transform(test_df_tokens['all_tokens'])\n #Prune unneeded features\n test_svd_array = svd_obj.transform(test_df_tdif)\n \n vector_features = vectorizer_obj.get_feature_names()\n eigen_features = [vector_features[i] for i in svd_obj.components_[0].argsort()[::-1]][:500]\n #Map to dataframe\n test_df_svd = pd.DataFrame(test_svd_array,columns=eigen_features)\n test_df_svd['Tag'] = test_df['Tag']\n return test_df_svd\n\ndef main():\n #Get the model\n model, vectorizer_obj, svd_obj, tag_dict = build_model()\n #Prep the test set\n test_df = prep_test(vectorizer_obj, svd_obj)\n predictions = model.predict(test_df)\n return predictions","sub_path":"2018-08-10_AV_Innoplexus/submission_01.py","file_name":"submission_01.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69506520","text":"from math import*\r\nx=sqrt(2)\r\na=2\r\npi=2*(a/x)\r\nwhile x<2:\r\n x=(sqrt(2+x))\r\n pi=(pi*a/x)\r\nprint(\"Approximation of pi:\",round(pi,3))\r\nc=eval(input(\"Enter the radius:\\n\"))\r\nprint(\"Area:\",round(c**2*pi,3))","sub_path":"examples/data/Assignment_2/mphnok005/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103327437","text":"import sys\nimport getopt\nimport cv2 as cv\nimport numpy as np\n\ndef combine(str):\n source=cv.imread(str[0],1)\n hidden=cv.imread(str[1],1)\n if source.shape[0]