diff --git "a/4579.jsonl" "b/4579.jsonl"
new file mode 100644--- /dev/null
+++ "b/4579.jsonl"
@@ -0,0 +1,779 @@
+{"seq_id":"42535858","text":"def solve(list_num):\n s = []\n for item in list_num:\n s.append([item])\n sub_list = s[:len(s)-1]\n if sub_list:\n for sub_item in sub_list:\n s.append([])\n new_entry = s[len(s)-1]\n for ssub_item in sub_item:\n new_entry.append(ssub_item)\n new_entry.append(item)\n return s\n\nresult = solve([1, 2, 3])\nfor r in result:\n print(r)","sub_path":"combination2.py","file_name":"combination2.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"145081656","text":"# Time Complexity : O(NM) (Where N is total rows and M is toatl columns in the board)\n# Space Complexity : O(1) (We are doing all operations in place)\n# Did this code successfully run on Leetcode : Yes\n# Three line explanation of solution in plain english:\n# - When we change value of cell, we use some kind of marker other than given 2 values for dead and live cell.\n# - Consider the new marker when counting the neightbour, and in the end replace marker with original value.\n\n\nclass Solution:\n# Function to count live neighbours.\n def countNeighbour(self, board, row, col):\n# Storing all directions\n dx = [-1,-1,-1,0,0,1,1,1]\n dy = [-1,0,1,-1,1,-1,0,1]\n# Initialzie count\n count = 0\n# Iterate over all directions that means take all neighbours one by one.\n for i in range(len(dx)):\n# Calculate neighbours row and column.\n nx = row + dx[i]\n ny = col + dy[i]\n# Check that neighbour is valid and cell is live using original value or marker.\n if 0 <= nx < len(board) and 0 <= ny < len(board[0]) and (board[nx][ny] == 1 or board[nx][ny] == -1):\n# If all conditions pass increament the count\n count += 1\n# return total neighbours count.\n return count\n \n \n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n# Iterate over each cell\n for row in range(len(board)):\n for col in range(len(board[0])):\n# Find no of live neighbours for that cell\n count = self.countNeighbour(board, row, col)\n# Change value fo cell according to the question and put marker for updated cell.\n if board[row][col] == 1:\n if count < 2 or count > 3:\n board[row][col] = -1 # 1 -> 0 : -1\n if board[row][col] == 0:\n if count == 3:\n board[row][col] = 2 # 0 -> 1: 2\n \n# Change back marker value to original value\n for row in range(len(board)):\n for col in range(len(board[0])):\n if board[row][col] == -1:\n board[row][col] = 0\n if board[row][col] == 2:\n board[row][col] = 1\n","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"219234271","text":"import glob\nimport os\nimport pandas as pd\n\nfrom PIL import Image\n\nIMG_GLOB = os.environ.get('IMG', '/home/csae8092/Documents/kem_img/kem-img-process/*/*.jpg')\nIMG_LIST = glob.glob(IMG_GLOB)\nGESAMT_DF = pd.read_csv('gesamt_liste.csv')\n\n\ndef yield_img_dict(images):\n for x in images:\n item = {}\n item['Dateiname'] = os.path.basename(x)\n item['folder'] = os.path.basename(os.path.split(x)[0])\n with Image.open(x) as image:\n item['width'], item['height'] = image.width, image.height\n yield item\n\n\nimages = sorted(IMG_LIST)\nsize_df = pd.DataFrame(yield_img_dict(images), columns=['Dateiname', 'folder', 'width', 'height'])\nnew = pd.merge(GESAMT_DF, size_df)\nnew.to_csv('enriched_gesamt.csv', index=False)\n","sub_path":"enrich.py","file_name":"enrich.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"632661171","text":"from sklearn.mixture import GMM\nfrom Coordinates import *\nimport numpy as np\n\nclass PairPotential:\n\n\tdef __init__(self, initial, final, n_gms):\n\t\tself.angm = None\n\t\t\n\t\tmag, angc = self.transform( initial, final )\n\t\n\t\tgmm = GMM( n_components = n_gms )\n\n\t\tgmm.fit( np.vstack( (mag, angc) ).T )\n\t\tself.gmm = gmm\n\n\tdef transform( self, initial, final):\n\n\t\tdx = final.x - initial.x\n\t\tdy = final.y - initial.y\n\n\t\tmag, angc, angm = centeredPolar( dx, dy, self.angm )\n\n\t\tif self.angm is None:\n\t\t\tself.angm = angm\n\n\t\treturn mag, angc\n\n\tdef eval( self, initial, final ):\n\n\t\tmag, angc = self.transform( initial, final )\n\n\t\treturn self.gmm.eval( np.vstack( (mag, angc) ).T )[0]\n","sub_path":"ver0/pairwise/PairPotential.py","file_name":"PairPotential.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"184540603","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/src/sentry/src/sentry/api/endpoints/project_key_details.py\n# Compiled at: 2019-08-16 17:27:45\nfrom __future__ import absolute_import\nfrom django.db.models import F\nfrom rest_framework import serializers, status\nfrom rest_framework.response import Response\nfrom sentry import features\nfrom sentry.api.base import DocSection\nfrom sentry.api.bases.project import ProjectEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.fields.empty_integer import EmptyIntegerField\nfrom sentry.api.serializers import serialize\nfrom sentry.models import AuditLogEntryEvent, ProjectKey, ProjectKeyStatus\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom sentry.loader.browsersdkversion import get_default_sdk_version_for_project, get_browser_sdk_version_choices\n\n@scenario('DeleteClientKey')\ndef delete_key_scenario(runner):\n key = runner.utils.create_client_key(runner.default_project)\n runner.request(method='DELETE', path='/projects/%s/%s/keys/%s/' % (\n runner.org.slug, runner.default_project.slug, key.public_key))\n\n\n@scenario('UpdateClientKey')\ndef update_key_scenario(runner):\n key = runner.utils.create_client_key(runner.default_project)\n runner.request(method='PUT', path='/projects/%s/%s/keys/%s/' % (\n runner.org.slug, runner.default_project.slug, key.public_key), data={'name': 'Quite Positive Key'})\n\n\nclass RateLimitSerializer(serializers.Serializer):\n count = EmptyIntegerField(min_value=0, required=False, allow_null=True)\n window = EmptyIntegerField(min_value=0, max_value=86400, required=False, allow_null=True)\n\n\nclass KeySerializer(serializers.Serializer):\n name = serializers.CharField(max_length=200, required=False, allow_blank=True, allow_null=True)\n isActive = serializers.BooleanField(required=False)\n rateLimit = RateLimitSerializer(allow_null=True)\n browserSdkVersion = serializers.ChoiceField(choices=get_browser_sdk_version_choices(), required=False)\n\n\nclass ProjectKeyDetailsEndpoint(ProjectEndpoint):\n doc_section = DocSection.PROJECTS\n\n def get(self, request, project, key_id):\n try:\n key = ProjectKey.objects.get(project=project, public_key=key_id, roles=F('roles').bitor(ProjectKey.roles.store))\n except ProjectKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n return Response(serialize(key, request.user), status=200)\n\n def put(self, request, project, key_id):\n \"\"\"\n Update a Client Key\n ```````````````````\n\n Update a client key. This can be used to rename a key.\n\n :pparam string organization_slug: the slug of the organization the\n client keys belong to.\n :pparam string project_slug: the slug of the project the client keys\n belong to.\n :pparam string key_id: the ID of the key to update.\n :param string name: the new name for the client key.\n :auth: required\n \"\"\"\n try:\n key = ProjectKey.objects.get(project=project, public_key=key_id, roles=F('roles').bitor(ProjectKey.roles.store))\n except ProjectKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n serializer = KeySerializer(data=request.data, partial=True)\n default_version = get_default_sdk_version_for_project(project)\n if serializer.is_valid():\n result = serializer.validated_data\n if result.get('name'):\n key.label = result['name']\n if not result.get('browserSdkVersion'):\n key.data = {'browserSdkVersion': default_version}\n else:\n key.data = {'browserSdkVersion': result['browserSdkVersion']}\n if result.get('isActive') is True:\n key.status = ProjectKeyStatus.ACTIVE\n elif result.get('isActive') is False:\n key.status = ProjectKeyStatus.INACTIVE\n if features.has('projects:rate-limits', project):\n ratelimit = result.get('rateLimit', -1)\n if ratelimit is None or ratelimit != -1 and ratelimit and (ratelimit['count'] is None or ratelimit['window'] is None):\n key.rate_limit_count = None\n key.rate_limit_window = None\n elif result.get('rateLimit'):\n key.rate_limit_count = result['rateLimit']['count']\n key.rate_limit_window = result['rateLimit']['window']\n key.save()\n self.create_audit_entry(request=request, organization=project.organization, target_object=key.id, event=AuditLogEntryEvent.PROJECTKEY_EDIT, data=key.get_audit_log_data())\n return Response(serialize(key, request.user), status=200)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @attach_scenarios([delete_key_scenario])\n def delete(self, request, project, key_id):\n \"\"\"\n Delete a Client Key\n ```````````````````\n\n Delete a client key.\n\n :pparam string organization_slug: the slug of the organization the\n client keys belong to.\n :pparam string project_slug: the slug of the project the client keys\n belong to.\n :pparam string key_id: the ID of the key to delete.\n :auth: required\n \"\"\"\n try:\n key = ProjectKey.objects.get(project=project, public_key=key_id, roles=F('roles').bitor(ProjectKey.roles.store))\n except ProjectKey.DoesNotExist:\n raise ResourceDoesNotExist\n\n self.create_audit_entry(request=request, organization=project.organization, target_object=key.id, event=AuditLogEntryEvent.PROJECTKEY_REMOVE, data=key.get_audit_log_data())\n key.delete()\n return Response(status=204)","sub_path":"pycfiles/sentry-10.0.0-py27-none-any/project_key_details.py","file_name":"project_key_details.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"338480260","text":"import numpy as np\nfrom argparse import ArgumentParser\nfrom dpu_utils.utils import RichPath\n\n\ndef find_best_model(model_type: str, dataset: str, model_folder: RichPath) -> str:\n\n best_loss = 1e7\n best_model = None\n\n model_filter = f'model-train-log-{model_type}_rnn_model-{dataset}-*.pkl.gz'\n for model_train_log in model_folder.iterate_filtered_files_in_dir(model_filter):\n train_log = model_train_log.read_by_file_suffix()\n\n best_model_loss = 1e7\n for loss_dict in train_log:\n valid_losses = loss_dict['valid_losses']\n avg_valid_loss = np.average(list(valid_losses.values()))\n\n if avg_valid_loss < best_model_loss:\n best_model_loss = avg_valid_loss\n\n print(model_train_log)\n print(best_model_loss)\n if best_model_loss < best_loss:\n best_loss = best_model_loss\n\n model_train_log_name = model_train_log.path.split('/')[-1]\n model_name = model_train_log_name.replace('-train-log-', '-').replace('.pkl.gz', '')\n best_model = model_name\n\n return best_model\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--model-types', type=str, nargs='+')\n parser.add_argument('--datasets', type=str, nargs='+')\n parser.add_argument('--model-folder', type=str, required=True)\n args = parser.parse_args()\n\n model_folder = RichPath.create(args.model_folder)\n assert model_folder.exists(), f'The folder {model_folder} does not exist!'\n\n for model_type in args.model_types:\n for dataset in args.datasets:\n best = find_best_model(model_type, dataset, model_folder)\n print(f'Best model for {model_type} and {dataset}: {best}')\n","sub_path":"src/data_preparation/select_best_model.py","file_name":"select_best_model.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"540383198","text":"import vampytest\n\nfrom ....user import User\n\nfrom ..fields import validate_users\n\n\ndef test__validate_users__0():\n \"\"\"\n Tests whether ``validate_users`` works as intended.\n \n Case: passing.\n \"\"\"\n user_id = 202211050022\n user_name = 'Faker'\n \n user = User.precreate(\n user_id,\n name = user_name,\n )\n \n for input_value, expected_output in (\n (None, None),\n ([], None),\n ({}, None),\n ([user], {user_id: user}),\n ({user_id: user}, {user_id: user}),\n ):\n output = validate_users(input_value)\n vampytest.assert_eq(output, expected_output)\n\n\ndef test__validate_users__1():\n \"\"\"\n Tests whether ``validate_users`` works as intended.\n \n Case: raising.\n \"\"\"\n for input_value in (\n 12.6,\n [12.6],\n {12.6: 12.6},\n ):\n with vampytest.assert_raises(TypeError):\n validate_users(input_value)\n","sub_path":"hata/discord/interaction/resolved/tests/test__validate_users.py","file_name":"test__validate_users.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"337391358","text":"import webbrowser\n\nclass Video():\n \"\"\"The abstract class that will be used to construct the Movie & Series Class\"\"\"\n def __init__(self, title, story_line, poster_image_url, trailerURL):\n self.title = title\n self.story_line = story_line\n self.poster_image_url = poster_image_url\n self.trailerURL = trailerURL\n \n\n def showTrailer(self):\n webbrowser.open(self.trailerURL)\n \n\nclass Movie(Video):\n \"\"\" This class provides a way to store movie related info by extending the Video class\"\"\"\n \n def __init__(self, movie_title, movie_story_line, trailerURL,\n poster_image_url, duration):\n \n Video.__init__(self,\n movie_title,\n movie_story_line,\n trailerURL, poster_image_url)\n self.duration = duration\n\nclass Series(Video):\n \"\"\" This class provides a way to store series related info by extending the Video class\"\"\"\n\n def __init__(self, movie_title,\n movie_story_line,\n trailerURL, poster_image_url,\n total_seasons, ongoing):\n \n Video.__init__(self, movie_title,\n movie_story_line, trailerURL,\n poster_image_url)\n \n self.total_seasons = total_seasons\n self.ongoing = ongoing\n\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"184864450","text":"from random import shuffle\n\nclass Quiz:\n \"\"\"\n A quiz consists of a bank of questions and a unique identifier.\n It is also associated with a specific professor.\n\n Name must be unique due for purposes of uniquely identifying\n specific quizzes.\n \"\"\"\n def __init__(self, prof, name, students, attempts,\n start, end):\n \n self.professor = prof\n self.name = name\n self.questions = []\n self.students = students\n self.attempts = attempts\n self.start = start\n self.end = end\n self.weight = 0\n self.bonusMarks = 0\n self.notes = []\n\n def __str__(self):\n s = self.name + \" \" + self.professor\n return s\n\n def __eq__(self, other):\n return (self.name == other.name and self.attempts == other.attempts \n and self.students == other.students)\n\nclass Question:\n \"\"\"\n A question consists of the text of the question, the correct answer(s),\n the answers collectively and notes indicating modifications made.\n\n Question text must be unique for purposes of uniquely identifying\n specific questions.\n \"\"\"\n def __init__(self, text, correct, options, weight=1):\n ans = correct + options\n shuffle(ans)\n self.text = text\n self.weight = weight\n self.correctAnswers = correct\n self.options = ans\n \n\n def __str__(self):\n s = self.text + ' '\n for ans in self.options:\n s += ' ' + ans\n return s\n\n def __eq__(self, other):\n return (self.text == other.text and self.weight == other.weight and \n self.correctAnswers == other.correctAnswers)\n\nclass Answers:\n \"\"\"\n Provides object with attributes containing information for quiz and answers\n attributes:\n ansAttempts - list of lists\n attemptSubmitted - list\n stuID - string\n profID - string\n quizID - string\n quiz - Quiz object\n currentAttempt - int\n \"\"\"\n def __init__(self, stuID, profID, quizID):\n self.ansAttempts = []\n self.attemptSubmitted = []\n self.stuID = stuID\n self.profID = profID\n self.quizID = quizID\n self.currentAttempt = None\n self.bestAttempt = []","sub_path":"cawadden/Final Project/quiz_tools.py","file_name":"quiz_tools.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"2847650","text":"#!c:/Python34/python.exe\n\n#######################################################################\n# edge enhance the given image\n#\n# Author: Garry Morrison\n# email: garry -at- semantic-db.org\n# Date: 2016-03-05\n# Update: \n# Copyright: GPLv3\n#\n# Usage: ./image_edge_enhance_v2.py image.{png,jpg} [enhance-factor]\n#\n# eg: ./image_edge_enhance_v2.py Lenna.png\n# eg: ./image_edge_enhance_v2.py Lenna.png 40\n#\n#######################################################################\n\n\nimport sys\nfrom PIL import Image # if this line bugs out, you need to install Pillow, a python image library.\n\nif len(sys.argv) < 2:\n print(\"\\nUsage:\")\n print(\" ./image_edge_enhance_v2.py image.{png,jpg} [enhance-factor]\")\n sys.exit(1)\nfilename = sys.argv[1]\n\ntry:\n enhance_factor = int(sys.argv[2])\nexcept:\n enhance_factor = 20 # set default to 20 iterations of smooth\n\ntry:\n im = Image.open(filename)\nexcept:\n print(\"couldn't open image file:\",filename)\n sys.exit(1)\n\n# implements a Gaussian smooth.\n# the 1D version: f[k] -> f[k-1]/4 + f[k]/2 + f[k+1]/4 rapidly approaches a bell curve if you apply it several times.\n# image_smooth() implements a 2D version of that equation.\n#\ndef image_smooth(image):\n def smooth_pixel(image,w,h):\n pix = image.load()\n r = pix[w-1,h-1][0]/16 + pix[w,h-1][0]/16 + pix[w+1,h-1][0]/16 + pix[w-1,h][0]/16 + pix[w,h][0]/2 + pix[w+1,h][0]/16 + pix[w-1,h+1][0]/16 + pix[w,h+1][0]/16 + pix[w+1,h+1][0]/16\n g = pix[w-1,h-1][1]/16 + pix[w,h-1][1]/16 + pix[w+1,h-1][1]/16 + pix[w-1,h][1]/16 + pix[w,h][1]/2 + pix[w+1,h][1]/16 + pix[w-1,h+1][1]/16 + pix[w,h+1][1]/16 + pix[w+1,h+1][1]/16\n b = pix[w-1,h-1][2]/16 + pix[w,h-1][2]/16 + pix[w+1,h-1][2]/16 + pix[w-1,h][2]/16 + pix[w,h][2]/2 + pix[w+1,h][2]/16 + pix[w-1,h+1][2]/16 + pix[w,h+1][2]/16 + pix[w+1,h+1][2]/16\n return (int(r),int(g),int(b))\n\n width = image.size[0]\n height = image.size[1]\n im2 = image.crop((-1,-1,width + 1,height + 1))\n# im2.show()\n\n out_image = Image.new('RGB',(width,height))\n pixels = out_image.load()\n for h in range(height):\n for w in range(width):\n pixels[w,h] = smooth_pixel(im2,w+1,h+1)\n# out_image.show()\n return out_image\n\n\ndef old_edge_enhance(image,k):\n def pixel_difference(im1,im2,w,h):\n def massage_pixel(x):\n if x < 0:\n x = 0\n x *= 20\n x = int(x)\n if x > 255:\n x = 255\n return 255 - x\n\n pix1 = im1.load()\n pix2 = im2.load()\n r = pix1[w,h][0] - pix2[w,h][0]\n g = pix1[w,h][1] - pix2[w,h][1]\n b = pix1[w,h][2] - pix2[w,h][2]\n\n r = massage_pixel(r)\n g = massage_pixel(g)\n b = massage_pixel(b)\n \n return (r,g,b)\n\n smoothed_image = image\n for _ in range(k):\n smoothed_image = image_smooth(smoothed_image)\n smoothed_image.show()\n\n width = image.size[0]\n height = image.size[1]\n\n out_image = Image.new('RGB',(width,height))\n pixels = out_image.load()\n for h in range(height):\n for w in range(width):\n pixels[w,h] = pixel_difference(smoothed_image,image,w,h)\n return out_image\n\n\n# the old_edge_enhance() had a subtle bug.\n# because image_smooth() was returning an integer based image at each iteration, small features ended up being lost!\n# So I had to completely re-implement the thing, but this time allowing floats at each iteration of smooth.\n# This had a massive improvement in quality.\n# The old way also seemed to converge, in that if you applied k above some threshold, any larger k didn't seem to make much difference.\n# Now, larger k has a noticable improvement.\n# Very happy with the results this thing spits out!!\n#\ndef edge_enhance(image,k):\n width = image.size[0]\n height = image.size[1]\n original_pixels = image.load()\n\n # create an image with a 1*1 border:\n border_image = image.crop((-1,-1,width + 1,height + 1))\n border_pixels = border_image.load()\n\n # load the border_image into 3 image matrices, one for each of R,G,B:\n M_r = [[border_pixels[w,h][0] for w in range(width+2)] for h in range(height+2)]\n M_g = [[border_pixels[w,h][1] for w in range(width+2)] for h in range(height+2)]\n M_b = [[border_pixels[w,h][2] for w in range(width+2)] for h in range(height+2)]\n\n def smooth_pixel(M,w,h):\n r = M[h-1][w-1]/16 + M[h][w-1]/16 + M[h+1][w-1]/16 + M[h-1][w]/16 + M[h][w]/2 + M[h+1][w]/16 + M[h-1][w+1]/16 + M[h][w+1]/16 + M[h+1][w+1]/16\n return r\n\n # smooth our image matrices:\n # NB: we have to work with matrices and not images because we need to preserve floats at each step of smooth. Otherwise it harms the algo.\n # first, define some work-space matrices:\n new_M_r = [[0 for w in range(width+2)] for h in range(height+2)]\n new_M_g = [[0 for w in range(width+2)] for h in range(height+2)]\n new_M_b = [[0 for w in range(width+2)] for h in range(height+2)]\n\n # smooth k times:\n for _ in range(k):\n for h in range(height):\n for w in range(width):\n new_M_r[h+1][w+1] = smooth_pixel(M_r,w+1,h+1)\n new_M_g[h+1][w+1] = smooth_pixel(M_g,w+1,h+1)\n new_M_b[h+1][w+1] = smooth_pixel(M_b,w+1,h+1)\n M_r = new_M_r\n M_g = new_M_g\n M_b = new_M_b\n\n def massage_pixel(x):\n if x < 0:\n x = 0\n x *= 20\n x = int(x)\n if x > 255:\n x = 255\n return 255 - x\n\n # output the final matrix into image form:\n out_image = Image.new('RGB',(width,height))\n pixels = out_image.load()\n for h in range(height):\n for w in range(width):\n r = massage_pixel(M_r[h+1][w+1] - original_pixels[w,h][0])\n g = massage_pixel(M_g[h+1][w+1] - original_pixels[w,h][1])\n b = massage_pixel(M_b[h+1][w+1] - original_pixels[w,h][2])\n\n pixels[w,h] = (r,g,b)\n# out_image.show()\n return out_image\n\n\nim2 = edge_enhance(im,enhance_factor)\nim2.show()\n\n# now save it:\nfilename, ext = filename.rsplit('.',1)\nfilename = filename + \"--edge-enhanced-\" + str(enhance_factor) + \".\" + ext\nim2.save(filename)\n\n","sub_path":"tools/image_edge_enhance_v2.py","file_name":"image_edge_enhance_v2.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"255270185","text":"# -*- coding: utf-8 -*-\n\n\nimport os\nfrom oujago.utils.common import is_list\nfrom oujago.utils import DATA_PATH\n\n\nclass ChineseStopWords(object):\n \"\"\"Chinese Stop Words.\n\n You can specify what kind of stop words you will use.\n\n Parameters\n ==========\n modes : str\n If ``all`` or ``ALL``, integrate all stopwords in ``_files/stopwords_zh`` directory.\n If ``hit`` or ``HIT``, use \"hit_stopwords.txt\".\n If ``baidu`` or ``Baidu``, use \"baidu_stopwords.txt\".\n If ``normal``, use \"normal_stopwords.txt\".\n\n \"\"\"\n\n def __init__(self, modes='all'):\n _base_path = os.path.join(DATA_PATH, 'stopwords_zh')\n self._stopwords = set()\n self._path_dict = {'all': os.listdir(_base_path),\n 'hit': 'hit_stopwords.txt',\n 'HIT': 'hit_stopwords.txt',\n 'baidu': \"baidu_stopwords.txt\",\n 'Baidu': \"baidu_stopwords.txt\",\n 'normal': \"normal_stopwords.txt\"}\n\n if not is_list(modes):\n modes = (modes,)\n\n for mode in modes:\n if mode not in self._path_dict:\n raise ValueError(\"Unknown mode: {}. Please specify mode \"\n \"using following types: {}.\".format(mode, list(self._path_dict.keys())))\n\n paths = self._path_dict[mode]\n if not is_list(paths):\n paths = (paths,)\n\n for path in paths:\n with open(os.path.join(_base_path, path), encoding='gbk') as fin:\n stopwords = [word.strip() for word in fin.readlines()]\n self._stopwords |= set(stopwords)\n\n def check(self, word):\n \"\"\"Check whether ``word`` is a stop word.\n\n Parameters\n ----------\n word : str\n\n Returns\n -------\n boolean\n True or False\n \"\"\"\n if word in self._stopwords:\n return True\n else:\n return False\n\n\n","sub_path":"oujago/nlp/stopwords.py","file_name":"stopwords.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"450611001","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nimport xgboost as xgb\nfrom sklearn.linear_model import LogisticRegression\nfrom mlxtend.classifier import StackingClassifier\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\n\n\nclass Danny_ML_CLF:\n def __init__(self):\n self.X = ''\n self.y = ''\n\n self.svm = ''\n self.tree = ''\n self.bayes = ''\n self.knn = ''\n self.xgb = ''\n self.stacking = ''\n self.voting = ''\n self.bagging = ''\n self.rf = '' # random forest\n self.adaboost = ''\n\n self.svm_pred = ''\n self.tree_pred = ''\n self.bayes_pred = ''\n self.knn_pred = ''\n self.xgb_pred = ''\n self.stacking_pred = ''\n self.voting_pred = ''\n self.bagging_pred = ''\n self.rf_pred = ''\n self.adaboost_pred = ''\n\n self.svm_report = ''\n self.tree_report = ''\n self.bayes_report = ''\n self.knn_report = ''\n self.xgb_report = ''\n self.stacking_report = ''\n self.voting_report = ''\n self.bagging_report = ''\n self.rf_report = ''\n self.adaboost_report = ''\n\n self.svm_cm = ''\n self.tree_cm = ''\n self.bayes_cm = ''\n self.knn_cm = ''\n self.xgb_cm = ''\n self.stacking_cm = ''\n self.voting_cm = ''\n self.bagging_cm = ''\n self.rf_cm = ''\n self.adaboost_cm = ''\n\n self.svm_score = ''\n self.tree_score = ''\n self.bayes_score = ''\n self.knn_score = ''\n self.xgb_score = ''\n self.stacking_score = ''\n self.voting_score = ''\n self.bagging_score = ''\n self.rf_score = ''\n self.adaboost_score = ''\n\n def Fit_value(self, x, y):\n self.X = x\n self.y = y\n\n def Split_data(self,raw_X, raw_y, test_size, Standard=True):\n train_X, test_X, train_y, test_y = train_test_split(raw_X, raw_y, test_size=test_size, shuffle=True)\n if Standard:\n sc = StandardScaler()\n sc.fit(train_X)\n train_X = sc.transform(train_X)\n test_X = sc.transform(test_X)\n self.X = train_X\n self.y = train_y\n return train_X, test_X, train_y, test_y\n\n def SVM(self,C=1,kernel='rbf'):\n self.svm = SVC(C=C,kernel=kernel, degree=5, probability=True)\n self.svm.fit(self.X, self.y)\n def SVM_predict(self,pred_x):\n self.svm_pred = self.svm.predict(pred_x)\n return self.svm_pred\n\n def Tree(self,criterion='gini', max_depth=5):\n self.tree = DecisionTreeClassifier(criterion=criterion,max_depth=max_depth)\n self.tree.fit(self.X, self.y)\n def Tree_predict(self, pred_x):\n self.tree_pred = self.tree.predict(pred_x)\n return self.tree_pred\n\n def Bayes(self):\n self.bayes = GaussianNB()\n self.bayes.fit(self.X, self.y)\n def Bayes_predict(self, pred_x):\n self.bayes_pred = self.bayes.predict(pred_x)\n return self.bayes_pred\n\n def KNN(self, n_neighbors=3, weights='distance'):\n self.knn = KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights)\n self.knn.fit(self.X, self.y)\n def KNN_predict(self, pred_x):\n self.knn_pred = self.knn.predict(pred_x)\n return self.knn_pred\n\n def XGB(self):\n self.xgb = xgb.XGBClassifier()\n self.xgb.fit(self.X, self.y)\n def XGB_prediction(self, pred_x):\n self.xgb_pred = self.xgb.predict(pred_x)\n return self.xgb_pred\n\n def Stacking(self):\n meta_clf = LogisticRegression()\n self.stacking = StackingClassifier(classifiers=[self.svm,\n self.tree,\n self.bayes,\n self.knn,\n self.xgb], meta_classifier=meta_clf)\n self.stacking.fit(self.X, self.y)\n def Stacking_prediction(self, pred_x):\n self.stacking_pred = self.stacking.predict(pred_x)\n return self.stacking_pred\n\n def Voting(self):\n self.voting = VotingClassifier(estimators=[('svm',self.svm),\n ('tree',self.tree), ('bayes',self.bayes),\n ('knn',self.knn), ('xgb',self.xgb)],\n voting='soft', weights=[1,1,1,1,1])\n self.voting.fit(self.X, self.y)\n def Voting_prediction(self, pred_x):\n self.voting_pred = self.voting.predict(pred_x)\n return self.voting_pred\n\n def Bagging(self,n_estimators=100, oob_score=False):\n self.bagging = BaggingClassifier(n_estimators=n_estimators,oob_score=oob_score)\n self.bagging.fit(self.X, self.y)\n def Bagging_prediction(self, pred_x):\n self.bagging_pred = self.bagging.predict(pred_x)\n return self.bagging_pred\n\n def RF(self,n_estimators=200, criterion='gini', max_features='auto', oob_score=False):\n self.rf = RandomForestClassifier(n_estimators=n_estimators,criterion=criterion,\n max_features=max_features, oob_score=oob_score)\n self.rf.fit(self.X, self.y)\n\n def RF_prediction(self, pred_x):\n self.rf_pred = self.rf.predict(pred_x)\n return self.rf_pred\n\n def Adaboost(self, n_estimators=100):\n self.adaboost = AdaBoostClassifier(n_estimators=n_estimators)\n self.adaboost.fit(self.X, self.y)\n def Adaboost_prediction(self, pred_x):\n self.adaboost_pred = self.adaboost.predict(pred_x)\n return self.adaboost_pred\n\n def Train(self):\n self.SVM()\n self.Tree()\n self.Bayes()\n self.KNN()\n self.XGB()\n self.Stacking()\n self.Voting()\n self.Bagging()\n self.RF()\n self.Adaboost()\n\n def Report(self, test_X, test_y, labels, show_cm=True):\n self.SVM_predict(test_X)\n self.Tree_predict(test_X)\n self.Bayes_predict(test_X)\n self.KNN_predict(test_X)\n self.XGB_prediction(test_X)\n self.Stacking_prediction(test_X)\n self.Voting_prediction(test_X)\n self.Bagging_prediction(test_X)\n self.RF_prediction(test_X)\n self.Adaboost_prediction(test_X)\n\n self.svm_score = self.svm.score(test_X, test_y)\n self.tree_score = self.tree.score(test_X, test_y)\n self.bayes_score = self.bayes.score(test_X, test_y)\n self.knn_score = self.knn.score(test_X, test_y)\n self.xgb_score = self.xgb.score(test_X, test_y)\n self.stacking_score = self.stacking.score(test_X, test_y)\n self.voting_score = self.voting.score(test_X, test_y)\n self.bagging_score = self.bagging.score(test_X, test_y)\n self.rf_score = self.rf.score(test_X, test_y)\n self.adaboost_score = self.adaboost.score(test_X, test_y)\n\n\n self.svm_report = metrics.classification_report(test_y, self.svm_pred)\n self.tree_report = metrics.classification_report(test_y, self.tree_pred)\n self.bayes_report = metrics.classification_report(test_y, self.bayes_pred)\n self.knn_report = metrics.classification_report(test_y, self.knn_pred)\n self.xgb_report = metrics.classification_report(test_y, self.xgb_pred)\n self.voting_report = metrics.classification_report(test_y, self.voting_pred)\n self.stacking_report = metrics.classification_report(test_y, self.stacking_pred)\n self.bagging_report = metrics.classification_report(test_y, self.bagging_pred)\n self.rf_report = metrics.classification_report(test_y, self.rf_pred)\n self.adaboost_report = metrics.classification_report(test_y, self.adaboost_pred)\n\n self.svm_cm = metrics.confusion_matrix(test_y, self.svm_pred,labels=labels)\n self.tree_cm = metrics.confusion_matrix(test_y, self.tree_pred,labels=labels)\n self.bayes_cm = metrics.confusion_matrix(test_y, self.bayes_pred,labels=labels)\n self.knn_cm = metrics.confusion_matrix(test_y, self.knn_pred,labels=labels)\n self.xgb_cm = metrics.confusion_matrix(test_y, self.xgb_pred, labels=labels)\n self.stacking_cm = metrics.confusion_matrix(test_y, self.stacking_pred, labels=labels)\n self.voting_cm = metrics.confusion_matrix(test_y, self.voting_pred, labels=labels)\n self.bagging_cm = metrics.confusion_matrix(test_y, self.bagging_pred, labels=labels)\n self.rf_cm = metrics.confusion_matrix(test_y, self.rf_pred, labels=labels)\n self.adaboost_cm = metrics.confusion_matrix(test_y, self.adaboost_pred, labels=labels)\n\n if show_cm:\n self.plot_confusion_matrix(self.svm_cm, classes=labels, title='SVM')\n self.plot_confusion_matrix(self.tree_cm, classes=labels, title='Tree')\n self.plot_confusion_matrix(self.bayes_cm, classes=labels, title='Bayes')\n self.plot_confusion_matrix(self.knn_cm, classes=labels, title='KNN')\n self.plot_confusion_matrix(self.xgb_cm, classes=labels, title='XGB')\n self.plot_confusion_matrix(self.stacking_cm, classes=labels, title='Stacking')\n self.plot_confusion_matrix(self.voting_cm, classes=labels, title='Voting')\n self.plot_confusion_matrix(self.bagging_cm, classes=labels, title='Bagging')\n self.plot_confusion_matrix(self.rf_cm, classes=labels, title='RF')\n self.plot_confusion_matrix(self.adaboost_cm, classes=labels, title='Adaboost')\n\n def History(self):\n print('******************\\nSVM : ',self.svm_report)\n print('******************\\nTree : ',self.tree_report)\n print('******************\\nBayes : ',self.bayes_report)\n print('******************\\nKNN : ',self.knn_report)\n print('******************\\nXGB : ', self.xgb_report)\n print('******************\\nStacking : ', self.stacking_report)\n print('******************\\nVoting : ', self.voting_report)\n print('******************\\nBagging : ', self.bagging_report)\n print('******************\\nRF : ', self.rf_report)\n print('******************\\nAdaboost : ', self.adaboost_report)\n\n def Score(self):\n print('SVM Score : ', self.svm_score)\n print('Tree Score : ', self.tree_score)\n print('Bayes Score : ', self.bayes_score)\n print('KNN Score : ', self.knn_score)\n print('XGB Score : ', self.xgb_score)\n print('Stacking Score : ', self.stacking_score)\n print('Voting Score : ', self.voting_score)\n print('Bagging Score : ', self.bagging_score)\n print('RF Score : ', self.rf_score)\n print('Adaboost Score : ', self.adaboost_score)\n\n def Report2txt(self, filename):\n f = open(filename, 'w')\n f.write('SVM Score : '+ str(self.svm_score) + '\\n')\n f.write('Tree Score : '+ str(self.tree_score) +'\\n')\n f.write('Bayes Score : '+ str(self.bayes_score) + '\\n')\n f.write('KNN Score : '+ str(self.knn_score) + '\\n')\n f.write('XGB Score : '+ str(self.xgb_score) + '\\n')\n f.write('Stacking Score : '+ str(self.stacking_score) + '\\n')\n f.write('Voting Score : '+ str(self.voting_score) + '\\n')\n f.write('Bagging Score : '+ str(self.bagging_score) + '\\n')\n f.write('RF Score : '+ str(self.rf_score) + '\\n')\n f.write('Adaboost Score : '+ str(self.adaboost_score) + '\\n')\n f.write('Adaboost Score : '+ str(self.adaboost_score) + '\\n')\n f.write('XXXX\\n')\n f.write('******************\\nSVM : '+ str(self.svm_report) + '\\n')\n f.write('******************\\nTree : '+ str(self.tree_report) + '\\n')\n f.write('******************\\nBayes : '+ str(self.bayes_report) + '\\n')\n f.write('******************\\nKNN : '+ str(self.knn_report) + '\\n')\n f.write('******************\\nXGB : '+ str(self.xgb_report) + '\\n')\n f.write('******************\\nStacking : '+ str(self.stacking_report) + '\\n')\n f.write('******************\\nVoting : '+ str(self.voting_report) + '\\n')\n f.write('******************\\nBagging : '+ str(self.bagging_report) + '\\n')\n f.write('******************\\nRF : '+ str(self.rf_report) + '\\n')\n f.write('******************\\nAdaboost : '+ str(self.adaboost_report) + '\\n')\n f.close()\n\n def plot_confusion_matrix(self,cm, classes,normalize=False,title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(title, ' Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n # Source code from: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\n\n\n","sub_path":"Danny_ML_CLF.py","file_name":"Danny_ML_CLF.py","file_ext":"py","file_size_in_byte":14083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"400733480","text":"from bayesian_model.data_processor import ProcessData\nfrom bayesian_model.bayesian_regression import Prediction\nfrom bayesian_model.performance_evaluation import Evaluation\nfrom bayesian_model.index import Calculate_index\nimport numpy as np\n\n\nclass Test:\n def __init__(self, input_data):\n self.input_data = input_data\n\n def run_model(self, n, n_cluster, n_effective, step, threshold):\n data = self.input_data\n data = data.values.reshape(1, -1)\n index = round(len(data[0]) / 3)\n p1 = data[0, 0: index]\n p2 = data[0, index: 2 * index]\n p_eval = data[0, 2 * index:]\n\n data_pro = ProcessData(p1, n, n_cluster, n_effective)\n effective = data_pro.select_effective_clusters()\n\n test_model = Prediction(effective, p2, n, p_eval)\n p = test_model.predict_delta_p()\n\n bench = np.random.randn(100, 1)\n hold = np.random.randn(1, 100)\n\n eval_result = Evaluation(p_eval, max(n), p, step, threshold, bench, hold, 100, True, 5000, 5000, 4)\n returns = eval_result.periodic_return()[0]\n market = eval_result.periodic_return()[1]\n temp = Calculate_index(returns, market, 0.05, 0.04, 1, 500, 4)\n sharpe = temp.sharpe_ratio()\n return sharpe, eval_result.visual_account(threshold)[0], eval_result.visual_account(threshold)[2]\n","sub_path":"bayesian_model/tuning_test.py","file_name":"tuning_test.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"62910411","text":"\"\"\"\nthe purpose of this script is to use standarnd off the shelf HoG and KNN from scikit and apply to the ORL dataset\n\"\"\"\nfrom skimage.feature import hog\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OutputCodeClassifier\nfrom sklearn.svm import LinearSVC\nimport time\nfrom orl_face_dataset_examples.read_pgm_file import fetch_sw_orl\n\nPPC = 20\n\ncontrol = [False, False, True]\n\n# grab the data (is contained in Bunch object)\nb = fetch_sw_orl()\ntic = time.time()\nif control[0]:\n # hog() returns feature vector, and hog_image if visualize=True\n # apply this to the first image of b.data\n fd, hog_image = hog(b.data[0].reshape(b.shape), orientations=8, pixels_per_cell=(PPC, PPC),\n cells_per_block=(1, 1), visualize=True, multichannel=False)\n\n print(f'Original image size is {b.shape}')\n print(f'HoG size is {hog_image.size}')\n print(f'HoG features size is {fd.size}')\n fig = plt.figure()\n fig.add_subplot(1,2, 1)\n plt.imshow(b.data[0].reshape(b.shape), cmap='gray')\n fig.add_subplot(1, 2, 2)\n plt.imshow(hog_image, cmap='gray')\n plt.show()\n\n# split the data in\nX_train, X_test, y_train, y_true = train_test_split(b.data, b.target, test_size=0.2, stratify=b.target)\n\n\n\n# apply HoG to all the images in b.data\nhog_train = []\nfor img_array in X_train:\n img = img_array.reshape(b.shape)\n fd, _ = hog(img, orientations=8, pixels_per_cell=(PPC , PPC ), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_train.append(fd)\n\n\nclf = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=42)\nclf.fit(hog_train, y_train)\ntok = time.time()\n\nif control[1]:\n # create the hog fro the X_test\n hog_test = []\n for img_arry in X_test:\n fd, _ = hog(img_arry.reshape(b.shape), orientations=8, pixels_per_cell=(PPC , PPC ), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_test.append(fd)\n y_pred = clf.predict(hog_test)\n\n print(f'the number of correct example is {accuracy_score(y_true, y_pred, normalize=False)}, with accuracy score of {accuracy_score(y_true, y_pred)}')\n print(classification_report(y_true, y_pred, zero_division=0.0))\n print(f'time to train : {tok - tic:.5}')\n\n\ndef run_test(**kwargs):\n b = fetch_sw_orl()\n tic = time.time()\n\n # split the data in\n X_train, X_test, y_train, y_true = train_test_split(b.data, b.target, test_size=0.2, stratify=b.target)\n\n hog_train = []\n for img_array in X_train:\n fd, _ = hog(img_array.reshape(b.shape), orientations=8, pixels_per_cell=(PPC , PPC ), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_train.append(fd)\n\n clf = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2)\n clf.fit(hog_train, y_train)\n tok = time.time()\n\n hog_test = []\n for img_arry in X_test:\n fd, _ = hog(img_arry.reshape(b.shape), orientations=8, pixels_per_cell=(PPC, PPC), cells_per_block=(1, 1), visualize=True, multichannel=False)\n hog_test.append(fd)\n y_pred = clf.predict(hog_test)\n return tok - tic, accuracy_score(y_true, y_pred)\n\nif control[2]:\n\n results = []\n times = []\n\n for i in range(10):\n print(i)\n r, t = run_test()\n results.append(r)\n times.append(t)\n\n","sub_path":"orl_face_dataset_examples/test_HoG_with_KNN.py","file_name":"test_HoG_with_KNN.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"549316252","text":"\nimport json\nimport urllib.parse\nimport boto3\nfrom elasticsearch import Elasticsearch\nimport requests\nfrom datetime import datetime\nfrom s3logparse import s3logparse\nimport os\nimport sys\nfrom tempfile import NamedTemporaryFile\nimport traceback\nimport logging\nfrom aws_xray_sdk.core import xray_recorder\nfrom aws_xray_sdk.core import patch_all\npatch_all()\n\n######################################################################\n# Notes:\n######################################################################\n# https://docs.aws.amazon.com/code-samples/latest/catalog/python-s3-get_object.py.html\n# https://forums.aws.amazon.com/thread.jspa?threadID=221549\n# https://stackoverflow.com/questions/32000934/python-print-a-variables-name-and-value\n# https://pypi.org/project/s3-log-parse/\n# https://www.geeksforgeeks.org/python-dictionary/\n# https://stackoverflow.com/questions/44381249/treat-a-string-as-a-file-in-python\n# https://github.com/elastic/elasticsearch-py\n# https://docs.aws.amazon.com/lambda/latest/dg/running-lambda-code.html\n# https://www.geeksforgeeks.org/python-interconversion-between-dictionary-and-bytes/\n# https://stackoverflow.com/questions/2266646/how-to-disable-and-re-enable-console-logging-in-python/2267567#2267567\n\n\n######################################################################\n# Initialize boto3 client at global scope for connection reuse\n######################################################################\nprint('Loading function')\nclient = boto3.client('ssm')\ns3 = boto3.client('s3')\n\n\ndef lambda_handler(event, context):\n ######################################################################\n # Create and Configure Python logging \n ######################################################################\n enable_logging = os.getenv('enable_logging')\n if enable_logging == 'True':\n enable_logging = True\n logging.Logger.disabled = False\n else: \n enable_logging = False\n logging.Logger.disabled = True\n\n # log = logging.getLogger(\"accesslogstoelasticcloud\")\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n # log.addHandler(handler)\n log.debug(\"Received event: \" + json.dumps(event, indent=2))\n # print(\"Received event: \" + json.dumps(event, indent=2))\n\n ######################################################################\n # Get all parameters containing credentials for this app\n # If not -> user credentials from environment variables\n ######################################################################\n parent_stack_name = os.getenv('parent_stack_name')\n try:\n param_name = '/' + parent_stack_name + '/cloud_id'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n cloud_id = parameter.get('Value')\n log.info('cloud_id=' + cloud_id)\n\n param_name = '/' + parent_stack_name + '/http_auth_username'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n http_auth_username = parameter.get('Value')\n log.info('http_auth_username=' + http_auth_username)\n \n param_name = '/' + parent_stack_name + '/http_auth_password'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n http_auth_password = parameter.get('Value')\n log.info('http_auth_password=' + http_auth_password)\n\n param_name = '/' + parent_stack_name + '/index_name'\n param_details = client.get_parameter(Name=param_name,WithDecryption=True)\n if 'Parameter' in param_details and len(param_details.get('Parameter')) > 0:\n parameter = param_details.get('Parameter')\n index_name = parameter.get('Value')\n log.info('index_name=' + index_name)\n\n except:\n log.debug(\"Encountered an error loading credentials from SSM.\")\n traceback.print_exc()\n cloud_id = os.getenv('cloud_id')\n http_auth_username = os.getenv('http_auth_username')\n http_auth_password = os.getenv('http_auth_password')\n index_name = os.getenv('index_name')\n \n\n ######################################################################\n # Get the object from the event and show its content type\n ######################################################################\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n try:\n response = s3.get_object(Bucket=bucket, Key=key)\n log.info(\"CONTENT TYPE: \" + response['ContentType'])\n except Exception as e:\n log.debug('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n log.debug(e)\n # print(e)\n # print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n raise e\n \n StreamingBody=response['Body']\n access_log=StreamingBody.read()\n\n ######################################################################\n # Example Access Log:\n ######################################################################\n # access_log='2279185f7619a617e0a834c7f0660e4b09ea7f842f9d768d39109ee6e4cdf522 bucket [20/Dec/2019:06:36:32 +0000] 174.65.125.92 arn:aws:sts::696965430234:assumed-role/AWSReservedSSO_AdministratorAccess_563d3ebb7af9cd35/dev@company.com 6ED2206C36ABCD61 REST.GET.ACL object.mov \"GET /bucket/object.mov?acl= HTTP/1.1\" 200 - 550 - 277 - \"-\" \"S3Console/0.4, aws-internal/3 aws-sdk-java/1.11.666 Linux/4.9.184-0.1.ac.235.83.329.metal1.x86_64 OpenJDK_64-Bit_Server_VM/25.232-b09 java/1.8.0_232 vendor/Oracle_Corporation\" - eGkU7fkbpX9QOfaV1GDHSXQ9zVEokrE0KgIhdVMr63PbSCxWwZoEtr5GDbaDGr1/LFf9lTpiJ3U= SigV4 ECDHE-RSA-AES128-SHA AuthHeader s3-us-west-2.amazonaws.com TLSv1.2\\n'\n log.info(f\"access_log={access_log}\\n\")\n\n f = NamedTemporaryFile(mode='w+', delete=False)\n f.write(str(access_log))\n f.close()\n # with open(f.name, \"r\") as new_f:\n # print(new_f.read())\n\n with open(f.name, \"r\") as fh:\n for log_entry in s3logparse.parse_log_lines(fh.readlines()):\n log.info(log_entry)\n\n os.unlink(f.name) # delete the file after usage\n\n ######################################################################\n # Start the X-Ray sub-segment\n ######################################################################\n subsegment = xray_recorder.begin_subsegment('accesslogstoelasticcloud - send data to ElasticCloud')\n subsegment.put_annotation('function', 'accesslogstoelasticcloud')\n xray_recorder.put_metadata(\"access_log\", access_log)\n\n ##################################################################################################\n #Now put that data in ElasticCloud! \n ##################################################################################################\n es = Elasticsearch(cloud_id=cloud_id, http_auth=(http_auth_username, http_auth_password))\n es.info()\n\n # create an index in elasticsearch, ignore status code 400 (index already exists)\n # es.indices.create(index='accesslogstoelasticcloud', ignore=400)\n es.indices.create(index=index_name, ignore=400)\n # {'acknowledged': True, 'shards_acknowledged': True, 'index': 'my-index'}\n # datetimes will be serialized\n # es.index(index=\"my-index\", id=44, body={\"any\": \"data44\", \"timestamp\": datetime.now()})\n \n \n es_body={\n \"bucket_owner\": log_entry.bucket_owner,\n \"bucket\": log_entry.bucket,\n \"timestamp\": log_entry.timestamp,\n \"remote_ip\": log_entry.remote_ip,\n \"requester\": log_entry.requester,\n \"request_id\": log_entry.request_id,\n \"operation\": log_entry.operation,\n \"s3_key\": log_entry.s3_key,\n \"request_uri\": log_entry.request_uri,\n \"status_code\": log_entry.status_code,\n \"error_code\": log_entry.error_code,\n \"bytes_sent\": log_entry.bytes_sent,\n \"object_size\": log_entry.object_size,\n \"total_time\": log_entry.total_time,\n \"turn_around_time\": log_entry.turn_around_time,\n \"referrer\": log_entry.referrer,\n \"user_agent\": log_entry.user_agent,\n \"version_id\": log_entry.version_id\n }\n\n es.index(index=index_name, body=es_body)\n\n ######################################################################\n # End the X-Ray sub-segment\n ######################################################################\n xray_recorder.end_subsegment()\n\n","sub_path":"sam-app/accesslogstoelasticcloud/accesslogstoelasticcloud.py","file_name":"accesslogstoelasticcloud.py","file_ext":"py","file_size_in_byte":8931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"571207482","text":"import math # This will import math module\nimport glob\n\n# max ' ' = 32, max = '~'\ndef unigram(text):\n list = [0] * 95\n for i, c in enumerate(text):\n index = ord(c)-32\n if index > 94 or index < 0:\n continue\n list[index] = list[index]+1\n return list\n\n\ndef countCharacters(text):\n count = 0\n for i, c in enumerate(text):\n index = ord(c)-32\n if index > 94 or index < 0: # Count only characters that we use\n continue\n count = count+1\n return count\n\n\ndef countWords(text):\n return len(text.split()) # split whitespaces & get their length\n\n\ndef countSentences(text):\n return len(text.split('.')) # split dots & get their length\n\n\ndef normalize(vector):\n sum = 0\n res = [0.0] * len(vector)\n for c in vector:\n sum += c*c\n dist = math.sqrt(sum)\n for i,c in enumerate(vector):\n res[i] = c/dist\n return res\n\n\nncu = open('./results_ncu.txt', 'w')\nrawcu = open('./results_rawcu.txt', 'w')\n\n\navg = [0, 0, 0] # [#Character, #Word, #Sentece]\nfor x in range(1000, 1025):\n for y in range(1, 5):\n file = open(\"./CASIS/%d_%d.txt\" % (x, y), \"r\")\n raw = file.read() # Read contents of the file\n file.close() # We don't need that file anymore.\n rawResult = unigram(raw)\n # Counters\n avg[0] += countCharacters(raw)\n avg[1] += countWords(raw)\n avg[2] += countSentences(raw)\n\n rawString = ','.join(str(v) for v in rawResult)\n normalizedResult = normalize(rawResult)\n normalizedString = ','.join(str(v) for v in normalizedResult)\n rawcu.write(\"%d_%d,%s\\n\" % (x, y, rawString))\n ncu.write(\"%d_%d,%s\\n\" % (x, y, normalizedString))\n\nncu.close()\nrawcu.close()\n\n\n# Statistics file\nstats = open('./casis25_stats.txt', 'w')\nstats.write('Character #%d\\nWord #%d\\nSentence #%d\\n' % (avg[0]/100, avg[1]/100, avg[2]/100))\nstats.close()\n\n\nsncu = open('./msst_ncu.txt', 'w')\nsrawcu = open('./msst_rawcu.txt', 'w')\n\navg = [0, 0, 0] # [#Character, #Word, #Sentece]\nsamples = glob.glob(\"./MSST/*.txt\") # Put every text file in the MSST folder into a list\nfor fileName in samples:\n baseName = fileName.split('\\\\')[-1].split('.')[0] # Delete folder names & extension\n file = open(fileName, \"r\", encoding='utf-8') # Force encoding to utf8\n raw = file.read() # Read contents of the file\n file.close() # We don't need that file anymore.\n # Counters\n avg[0] += countCharacters(raw)\n avg[1] += countWords(raw)\n avg[2] += countSentences(raw)\n rawResult = unigram(raw) # Generate unigram from text\n rawString = ','.join(str(v) for v in rawResult)\n normalizedResult = normalize(rawResult)\n normalizedString = ','.join(str(v) for v in normalizedResult)\n srawcu.write(\"%s,%s\\n\" % (baseName, rawString))\n sncu.write(\"%s,%s\\n\" % (baseName, normalizedString))\n\nsncu.close()\nsrawcu.close()\n\n# Statistics file\nstats = open('./msst_stats.txt', 'w')\nstats.write('Character #%d\\nWord #%d\\nSentence #%d\\n' % (avg[0]/len(samples), avg[1]/len(samples), avg[2]/len(samples)))\nstats.close()\n","sub_path":"1 Data Collection & Feature Extraction/Machine Learning/unigram.py","file_name":"unigram.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"485072676","text":"import numpy as np\nimport tensorflow as tf\nfrom scipy.linalg import solve_discrete_are\nfrom scipy.linalg import sqrtm\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\nfrom Classifier import Classifier\nimport ipdb\n\nclass LQRlayer(Classifier):\n def __init__(self, n, m, T, dt, n_modes, iLQR_niter, lr, momentum, mass, A, B, temperature):\n super().__init__(n, m, T, dt, n_modes, temperature)\n # Constructing the cvxpy layer\n self.iLQR_niter = iLQR_niter\n self.lr = lr\n self.momentum = momentum\n self.n_objects = 3\n self.ss_len = n*self.n_objects\n\n self.massinv_tf = tf.Variable( tf.random.uniform((self.n_objects,n_modes), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.C1train_tf = tf.Variable( tf.random.uniform((self.n_objects,self.n_objects), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.C2train_tf = tf.Variable( tf.random.uniform((self.n_objects,self.n_objects), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.C3train_tf = tf.Variable( tf.random.uniform((self.n_objects,self.n_objects), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n\n self.Qtrain_tf = tf.Variable( tf.random.uniform((n,n_modes), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n self.xfs_train_tf = tf.Variable( tf.zeros((int(self.n/2),n_modes), dtype=tf.dtypes.float64) )\n self.Qftrain_tf = tf.Variable(tf.random.uniform((n,), minval=0, maxval=2, dtype=tf.dtypes.float64))\n self.Rtrain_tf = tf.Variable( tf.random.uniform((m,n_modes), minval=0, maxval=2, dtype=tf.dtypes.float64) )\n\n #Expert values\n mass_g = 1\n mass_o1 = 1\n mass_o2 = 1\n\n _mass = np.array(([[1/mass_g, 1/(mass_g+mass_o1), 1/(mass_g+mass_o1+mass_o2)],\n [0, 1/(mass_g+mass_o1), 1/(mass_g+mass_o1+mass_o2)],\n [0, 0, 1/(mass_g+mass_o1+mass_o2)]]), dtype=np.float64)\n\n xfs = np.zeros((self.ss_len, n_modes))\n xfs_part = np.array([[-0.5, 0.5, 1], [-1,-1,0]])\n xfs[:2, :] = xfs_part\n xfs[4:6, :] = xfs_part\n xfs[8:10, :] = xfs_part\n\n Q = 1e-6*np.ones((self.ss_len, n_modes))\n Q[:n,:] = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float64)\n Qf = 1e-6*np.ones(self.ss_len)\n Qf[:n] = np.array([1, 1, 1, 1], dtype=np.float64)\n\n R = np.array([[1, 0.5, 0.5], [1, 0.5, 0.5]], dtype=np.float64)\n\n C1 = np.array([[mass_g/(mass_g+mass_o1), mass_o1/(mass_g+mass_o1), 0], \n [mass_g/(mass_g+mass_o1), mass_o1/(mass_g+mass_o1), 0],\n [0, 0, 1]], dtype=np.float64)\n C2 = np.array([[mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)], \n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)],\n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)]], dtype=np.float64)\n C3 = np.array([[mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)], \n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)],\n [mass_g/(mass_g+mass_o1+mass_o2), mass_o1/(mass_g+mass_o1+mass_o2), mass_o2/(mass_g+mass_o1+mass_o2)]], dtype=np.float64)\n\n temp_c = np.zeros((self.ss_len, self.ss_len))\n temp_c[:2,:2] = np.eye(int(n/2))\n temp_c[4:6,4:6] = np.eye(int(n/2))\n temp_c[8:10,8:10] = np.eye(int(n/2))\n\n temp_l1 = np.zeros((self.ss_len, self.n_objects))\n temp_l1[2,0], temp_l1[6,1], temp_l1[10,2] = 1, 1, 1\n temp_l2 = np.zeros((self.ss_len, self.n_objects))\n temp_l2[3,0], temp_l2[7,1], temp_l2[11,2] = 1, 1, 1\n temp_r1 = np.zeros((self.n_objects, self.ss_len))\n temp_r1[0, 2], temp_r1[1, 6], temp_r1[2, 10] = 1, 1, 1\n temp_r2 = np.zeros((self.n_objects, self.ss_len))\n temp_r2[0, 3], temp_r2[1, 7], temp_r2[2, 11] = 1, 1, 1\n\n with tf.device('gpu:0'):\n # Variables\n # self.C1train_tf = tf.Variable(C1)\n # self.C2train_tf = tf.Variable(C2)\n # self.C3train_tf = tf.Variable(C3)\n # self.massinv_tf = tf.Variable(_mass)\n # self.Qtrain_tf = tf.Variable(Q)\n # self.Qftrain_tf = tf.Variable(Qf)\n # # self.xfs_train_tf = tf.Variable(xfs)\n # self.Rtrain_tf = tf.Variable(R)\n\n # Constants\n self.A_tf = tf.constant(A)\n self.Bpart_tf = tf.constant(B)\n\n ## x-z case\n self.tempm_r1 = tf.constant(np.array( [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ), dtype=tf.dtypes.float64)\n self.tempm_r2 = tf.constant(np.array( [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]] ), dtype=tf.dtypes.float64)\n self.temp_c = tf.constant(temp_c, dtype=tf.dtypes.float64)\n self.temp_l1 = tf.constant(temp_l1, dtype=tf.dtypes.float64)\n self.temp_l2 = tf.constant(temp_l2, dtype=tf.dtypes.float64)\n self.temp_r1 = tf.constant(temp_r1, dtype=tf.dtypes.float64)\n self.temp_r2 = tf.constant(temp_r2, dtype=tf.dtypes.float64)\n self.tempm_l1 = tf.constant(temp_l1, dtype=tf.dtypes.float64)\n self.tempm_l2 = tf.constant(temp_l2, dtype=tf.dtypes.float64)\n self.xobj_init = tf.constant(xfs, dtype=tf.dtypes.float64)\n\n def LQR_tf_xz(self, x0, rho):\n # vel_f = tf.zeros((int(self.ss_len/2),self.n_modes), dtype=tf.dtypes.float64)\n\n # xfs_full = tf.concat( [ tf.concat( [self.xfs_train_tf[int(i*self.n/2):int((i+1)*self.n/2), :], vel_f[int(i*self.n/2):int((i+1)*self.n/2), :]], axis = 0 ) \n # for i in range(int(self.ss_len/self.n)) ], axis = 0) + self.xobj_init\n xfs_full = tf.concat([self.xfs_train_tf, tf.zeros((int(self.ss_len - self.n/2),self.n_modes), dtype=tf.dtypes.float64)], axis = 0) + self.xobj_init\n\n # Ptp1 = tf.identity(tf.linalg.diag(self.Qftrain_tf))\n Ptp1 = tf.identity(tf.linalg.diag( tf.concat([self.Qftrain_tf, tf.zeros((int(self.ss_len - self.n),),dtype=tf.dtypes.float64)], axis=0) ))\n K = []\n At_all = []\n Bt_all = []\n # Backward ricatti\n for t in range(self.T-1,0,-1):\n # Qt = tf.linalg.diag( tf.squeeze(self.Qtrain_tf@tf.expand_dims(rho[:,t], axis=1)) )\n Qt = tf.linalg.diag( tf.squeeze( (tf.concat([self.Qtrain_tf, 1e-6*tf.ones((int(self.ss_len - self.n),self.n_modes), dtype=tf.dtypes.float64)], axis = 0) )@tf.expand_dims(rho[:,t], axis=1)) )\n Rt = tf.linalg.diag( tf.reshape(self.Rtrain_tf@tf.expand_dims(rho[:,t], axis=1), (self.m,)) )\n\n Bt_all.append( (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,t], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,t], axis=1)@self.tempm_r2 )@self.Bpart_tf )\n Bt = tf.identity(Bt_all[self.T-1-t][:self.ss_len,:])\n if (tf.math.argmax(rho[:,t-1]) == 0 and tf.math.argmax(rho[:,t]) == 1) or (tf.math.argmax(rho[:,t-1]) == 1 and tf.math.argmax(rho[:,t]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C1train_tf@self.temp_r1 + self.temp_l2@self.C1train_tf@self.temp_r2)\n elif (tf.math.argmax(rho[:,t-1]) == 1 and tf.math.argmax(rho[:,t]) == 2) or (tf.math.argmax(rho[:,t-1]) == 2 and tf.math.argmax(rho[:,t]) == 1):\n C_full = (self.temp_c + self.temp_l1@self.C2train_tf@self.temp_r1 + self.temp_l2@self.C2train_tf@self.temp_r2)\n elif(tf.math.argmax(rho[:,t-1]) == 0 and tf.math.argmax(rho[:,t]) == 2) or (tf.math.argmax(rho[:,t-1]) == 2 and tf.math.argmax(rho[:,t]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C3train_tf@self.temp_r1 + self.temp_l2@self.C3train_tf@self.temp_r2)\n else:\n C_full = tf.eye(self.ss_len, dtype=tf.float64)\n At_all.append(self.A_tf@C_full)\n At = tf.identity(At_all[self.T-1-t][:self.ss_len,:self.ss_len])\n\n Kt = -tf.linalg.inv(Rt+ tf.transpose(Bt)@Ptp1@Bt)@tf.transpose(Bt)@Ptp1@At\n # Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.n_objects*self.n, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.ss_len, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Ptp1 = tf.identity(Pt)\n K.append(Kt)\n\n # Qt = tf.linalg.diag( tf.squeeze(self.Qtrain_tf@tf.expand_dims(rho[:,0], axis=1)) )\n Qt = tf.linalg.diag( tf.squeeze( (tf.concat([self.Qtrain_tf, 1e-6*tf.ones((int(self.ss_len - self.n),self.n_modes), dtype=tf.dtypes.float64)], axis = 0) )@tf.expand_dims(rho[:,0], axis=1)) )\n Rt = tf.linalg.diag( tf.reshape(self.Rtrain_tf@tf.expand_dims(rho[:,0], axis=1), (self.m,)) ) \n \n Bt_all.append( (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r2 )@self.Bpart_tf )\n Bt = tf.identity(Bt_all[self.T-1][:self.ss_len,:])\n\n At_all.append(self.A_tf)\n At = tf.identity(At_all[self.T-1][:self.ss_len,:self.ss_len])\n\n Kt = -tf.linalg.inv(Rt+ tf.transpose(Bt)@Ptp1@Bt)@tf.transpose(Bt)@Ptp1@At\n # Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.n_objects*self.n, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Pt = Qt + tf.transpose(At)@tf.linalg.inv(tf.eye(self.ss_len, dtype=tf.dtypes.float64) + Ptp1@Bt@tf.linalg.inv(Rt)@tf.transpose(Bt))@Ptp1@At\n Ptp1 = tf.identity(Pt)\n K.append(Kt)\n\n # Forward pass\n x = []\n x.append(x0)\n u = []\n xft =xfs_full@tf.expand_dims(rho[:,0], axis=1)\n u.append( K[self.T-1]@(x[0][:self.ss_len,:]-xft) )\n xtp1 = At_all[self.T-1]@x[0] + Bt_all[self.T-1]@u[0]\n x.append(xtp1)\n for t in range(1,self.T):\n xft = xfs_full@tf.expand_dims(rho[:,t], axis=1)\n u.append( K[self.T-1-t]@(x[t][:self.ss_len,:] - xft) )\n xtp1 = At_all[self.T-1-t]@x[t] + Bt_all[self.T-1-t]@u[t]\n x.append( xtp1 )\n\n u = tf.concat([tf.concat(u, axis = 1), tf.zeros((self.m, 1), dtype=tf.dtypes.float64)], axis = 1)\n x = tf.concat(x, axis = 1)\n return u, x\n\n def sysID_traj_rollout_impact_xz(self, x_star, u_star):\n x_rollout = []\n x_rollout.append(tf.expand_dims(x_star[:,0], axis = 1))\n dist1 = tf.reduce_sum((x_star[:2,:]-x_star[4:6, :])**2, axis=0, keepdims=True)\n dist2 = tf.reduce_sum((x_star[4:6,:]-x_star[8:10, :])**2, axis=0, keepdims=True)\n dist3 = tf.reduce_sum((x_star[:2,:]-x_star[8:10, :])**2, axis=0, keepdims=True)\n \n # dist4 = tf.reduce_sum((x_star[:2,:self.T]-x_star[:2, 1:])**2, axis=0, keepdims=True)\n # dist4 = tf.concat((dist4, [dist4[:,-1]]), axis = 1)\n # dist5 = tf.reduce_sum((x_star[4:6,:self.T]-x_star[4:6, 1:])**2, axis=0, keepdims=True)\n # dist5 = tf.concat((dist5, [dist5[:,-1]]), axis = 1)\n # dist6 = tf.reduce_sum((x_star[8:10,:self.T]-x_star[8:10, 1:])**2, axis=0, keepdims=True)\n # dist6 = tf.concat((dist6, [dist6[:,-1]]), axis = 1)\n\n rho = self.mode_update_logReg(tf.concat([dist1, dist2, dist3], axis=0))\n # rho = self.mode_update_logReg(tf.concat([dist1, dist2, dist3, dist4, dist5, dist6], axis=0))\n xtp1 = self.A_tf@tf.expand_dims(x_star[:,0], axis = 1) + (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,0], axis=1)@self.tempm_r2 )@self.Bpart_tf@tf.expand_dims(u_star[:,0], axis = 1)\n x_rollout.append(xtp1)\n for i in range(1, self.T):\n if (tf.math.argmax(rho[:,i-1]) == 0 and tf.math.argmax(rho[:,i]) == 1) or (tf.math.argmax(rho[:,i-1]) == 1 and tf.math.argmax(rho[:,i]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C1train_tf@self.temp_r1 + self.temp_l2@self.C1train_tf@self.temp_r2)\n elif (tf.math.argmax(rho[:,i-1]) == 1 and tf.math.argmax(rho[:,i]) == 2) or (tf.math.argmax(rho[:,i-1]) == 2 and tf.math.argmax(rho[:,i]) == 1):\n C_full = (self.temp_c + self.temp_l1@self.C2train_tf@self.temp_r1 + self.temp_l2@self.C2train_tf@self.temp_r2)\n elif(tf.math.argmax(rho[:,i-1]) == 0 and tf.math.argmax(rho[:,i]) == 2) or (tf.math.argmax(rho[:,i-1]) == 2 and tf.math.argmax(rho[:,i]) == 0):\n C_full = (self.temp_c + self.temp_l1@self.C3train_tf@self.temp_r1 + self.temp_l2@self.C3train_tf@self.temp_r2)\n else:\n C_full = tf.eye(self.ss_len, dtype=tf.float64)\n xtp1 = self.A_tf@C_full@tf.expand_dims(x_star[:,i], axis = 1) + (self.tempm_l1@self.massinv_tf@tf.expand_dims(rho[:,i], axis=1)@self.tempm_r1 + self.tempm_l2@self.massinv_tf@tf.expand_dims(rho[:,i], axis=1)@self.tempm_r2 )@self.Bpart_tf@tf.expand_dims(u_star[:,i], axis = 1)\n x_rollout.append(xtp1)\n # x_rollout = tf.transpose(tf.squeeze(tf.stack(x_rollout))) # converting y to tf array\n x_rollout = tf.concat(x_rollout, axis = 1)\n return x_rollout, rho\n \n def iLQR(self, x0_tf):\n \n rho_tf = tf.Variable(tf.concat( [tf.tile( tf.constant([[1.], [0.]] , dtype=tf.dtypes.float64), [1, int( self.T/2)]), tf.tile(tf.constant([[0.], [1.]] , dtype=tf.dtypes.float64), [1,self.T - int(self.T/2)] )], axis=1), dtype=tf.dtypes.float64)\n\n tol = 100\n thresh = 0.1\n i=0\n while thresh>1e-6 and i < self.iLQR_niter:\n # Forward pass\n # rho_tf = tf.Variable(rho[:,0:self.T]) # if you create a new variable everytime it loses information of everything before that\n if i > 0:\n tol_old = tol\n xold = tf.identity(x)\n uold = tf.identity(u)\n # with tf.GradientTape() as tape:\n # try:\n u, x = self.LQR_tf(x0_tf, rho_tf[:,:self.T])\n rho_tf = self.mode_update_logReg_expert(x)\n if i > 0:\n tol = tf.reduce_sum(tf.square(x-xold)) + tf.reduce_sum(tf.square(u-uold))\n thresh = (tol-tol_old)**2\n i = i+1\n # print('tol:',tol)\n # print('iLQR iterations: ', i)\n # ipdb.set_trace()\n return u, x, rho_tf\n\n def evaluate(self, train_xinit, expert_traj, converged_rho_tf):\n\n cost = 0.\n x_star = expert_traj[:self.n_objects*self.n, :]\n u_star = expert_traj[self.n_objects*self.n:, :]\n\n u_tr, x_tr = self.LQR_tf_xz(train_xinit, converged_rho_tf[:,:self.T])\n ## Considering SysID cost using expert controls\n # x_rollout, _ = self.sysID_traj_rollout_impact(x_star, u_star)\n # sysIDcost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_rollout)))\n\n cost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_tr))) + tf.reduce_sum( tf.square(tf.subtract(u_star, u_tr)) )\n cost = cost/(x_star.shape[1])#/(x_star.shape[0])\n # print('cost:', cost)\n # cost = (cost + 0.5*sysIDcost)/(x_star.shape[1])\n return cost\n\n def evaluate_sysID(self, expert_traj):\n cost = 0.\n x_star = expert_traj[:self.n_objects*self.n, :]\n u_star = expert_traj[self.n_objects*self.n:, :]\n\n x_rollout, _ = self.sysID_traj_rollout_impact_xz(x_star, u_star)\n sysIDcost = tf.reduce_sum( tf.square(tf.subtract(x_star, x_rollout)))\n\n cost = 100*(sysIDcost)#/(x_star.shape[1])\n\n # cost = self.sysID_traj_rollout_impact_prob(x_star, u_star)\n return cost\n\n def eval_loss(self, train_xinit, N, expert_trajs, converged_rho_tf):\n return sum([self.evaluate(train_xinit[i], expert_trajs[i], converged_rho_tf[i])\n for i in range(N)]) / N\n\n def eval_loss_sysID(self, N, expert_trajs):\n return sum([self.evaluate_sysID(expert_trajs[i])\n for i in range(N)]) / N\n\n def train(self, train_xinit, expert_trajs, applygradient):\n # optimizer = tf.keras.optimizers.SGD(learning_rate=gamma, momentum=self.hparams['momentum'])\n optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-04)\n # converged_rho_tf = [self.iLQR(train_xinit[i])[2] for i in range(train_xinit.shape[0])]\n # converged_rho_tf = [self.mode_update_logReg_expert(expert_trajs[i]) for i in range(train_xinit.shape[0])]\n # converged_rho_tf = [self.mode_update_logReg_expert_exp1(expert_trajs[i]) for i in range(train_xinit.shape[0])]\n # converged_rho_tf = [self.mode_update_logReg(tf.square(expert_trajs[i,:self.n,:]-expert_trajs[i,self.n:self.n_objects*self.n,:])) for i in range(train_xinit.shape[0])]\n\n # converged_rho_tf = [out[0] for out in outputs]\n # converged_Ct_tf = [out[1] for out in outputs]\n\n # with tf.GradientTape(persistent=True) as tape:\n with tf.GradientTape() as tape:\n # converged_rho_tf = [self.mode_update_logReg(tf.square(expert_trajs[i,:self.n,:]-expert_trajs[i,self.n:self.n_objects*self.n,:])) for i in range(train_xinit.shape[0])]\n # loss = self.eval_loss(train_xinit, train_xinit.shape[0], expert_trajs, converged_rho_tf)\n loss_sysID = self.eval_loss_sysID(train_xinit.shape[0], expert_trajs)\n\n # variables = [self.Qtrain_tf, self.xfs_train_tf, self.Rtrain_tf, self.Qftrain_tf]\n # variables = [self.Qtrain_tf, self.xfs_train_tf, self.Rtrain_tf, self.Qftrain_tf, self.massinv_tf, *self.classifier.variables, self.Ctrain_tf]\n # variables_sysID = [self.massinv_tf]\n variables_sysID = [self.massinv_tf, *self.classifier.variables, self.C1train_tf, self.C2train_tf, self.C3train_tf]\n # variables_sysID = [self.massinv_tf, self.Ctrain_tf]\n\n # gradients = tape.gradient(loss, variables)\n\n gradients_sysID = tape.gradient(loss_sysID, variables_sysID)\n \n if applygradient == True:\n # optimizer.apply_gradients(zip(gradients, variables))\n optimizer.apply_gradients(zip(gradients_sysID, variables_sysID))\n\n self.massinv_tf.assign(tf.abs(self.massinv_tf)) # IMPORTANT WAY TO REASSSIGN VALUE SUCH THAT IT REMAINS A TF.VARIABLE AND DOESNT BECOME TF.TENSOR\n self.C1train_tf.assign(tf.abs(self.C1train_tf))\n self.C2train_tf.assign(tf.abs(self.C2train_tf))\n self.C3train_tf.assign(tf.abs(self.C3train_tf))\n # self.Rtrain_tf.assign(tf.abs(self.Rtrain_tf))\n # self.Qtrain_tf.assign(tf.abs(self.Qtrain_tf))\n # self.Qftrain_tf.assign(tf.abs(self.Qftrain_tf))\n return loss_sysID","sub_path":"LQRlayer_train_self_xz_exp1.py","file_name":"LQRlayer_train_self_xz_exp1.py","file_ext":"py","file_size_in_byte":17664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"528967500","text":"import numpy as np\n\nimport sys\nsys.path.append(r'C:\\Users\\woottenm\\Documents\\Code\\zebrafish-analysis')\nfrom using_cropped import *\nfrom image_process import crop_to_nonzero\n\nimport skimage\nimport skimage.io\nimport skimage.draw\nimport skimage.color\nimport skimage.measure\nimport skimage.exposure\nimport skimage.morphology\n\ndef get_major_axis_line(blob, length=2):\n (cy, cx) = blob.centroid\n (dx, dy) = (length * np.cos(blob.orientation), length * np.sin(blob.orientation))\n xs = [cx + dx, cx - dx]\n ys = [cy - dy, cy + dy]\n return (xs, ys)\n\ndef extend_blobs(image, length=50):\n scaled = np.copy(skimage.exposure.rescale_intensity(skimage.img_as_float(image)))\n for blob in get_blobs(image):\n ([x0, x1], [y0, y1]) = get_major_axis_line(blob, length=length)\n line = skimage.draw.line(int(y0), int(x0), int(y1), int(x1))\n for (r, c) in zip(*line):\n try:\n scaled[r, c] = 0.5\n except IndexError:\n pass\n return scaled\n\nI_with_lines = np.array(list(map(extend_blobs, I_crop_just_eyes)))\n\ndef angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))\n\ndef angle_between_eyes(image):\n blobs = get_blobs(image)\n if len(blobs) != 2:\n return float('nan')\n [blob1, blob2] = blobs\n return angle_difference(blob1.orientation, blob2.orientation)\n\nangle_diffs = np.array(list(map(angle_between_eyes, I_crop_just_eyes)))\n","sub_path":"mwCode/wednesday_orienting.py","file_name":"wednesday_orienting.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"5819739","text":"#!flask/bin/python\nimport six\nfrom flask import Flask, jsonify, abort, request, make_response, url_for\nfrom flask_httpauth import HTTPBasicAuth\n\napp = Flask(__name__, static_url_path=\"\")\nauth = HTTPBasicAuth()\n\n\n@auth.get_password\ndef get_password(username):\n if username == 'grholl':\n return 'python'\n return None\n\n\n@auth.error_handler\ndef unauthorized():\n # return 403 instead of 401 to prevent browsers from displaying the default\n # auth dialog\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(jsonify({'error': 'Bad request'}), 400)\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ntokens = [\n {\n 'id': 1,\n 'title': u'Silver Token',\n 'description': u'Good for: Domestic Beer (Budweiser, Miller, Coors)',\n 'done': False\n },\n {\n 'id': 2,\n 'title': u'Gold Token',\n 'description': u'Good for: Premium Beer (Ballast Point, Lakefront Brewery, Goose Island, Summit)',\n 'done': False\n }\n]\n\n\ndef make_public_token(token):\n new_token = {}\n for field in token:\n if field == 'id':\n new_token['uri'] = url_for('get_token', token_id=token['id'],\n _external=True)\n else:\n new_token[field] = token[field]\n return new_token\n\n\n@app.route('/pintshare/api/v1.0/tokens', methods=['GET'])\n@auth.login_required\ndef get_tokens():\n return jsonify({'tokens': [make_public_token(token) for token in tokens]})\n\n\n@app.route('/pintshare/api/v1.0/tokens/ \\r\\n')\n f.write('순위: ' + str(rank) + '
\")\n\n return render_template('derp2.html',x = restaurants)\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"340415323","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTest using the NIST Test Vectors\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport binascii\nimport os\n\nimport pytest\n\nfrom cryptography.hazmat.primitives.ciphers import algorithms, modes\n\nfrom .utils import generate_encrypt_test\nfrom ...utils import load_nist_vectors\n\n\n@pytest.mark.supported(\n only_if=lambda backend: backend.cipher_supported(\n algorithms.TripleDES(\"\\x00\" * 8), modes.CBC(\"\\x00\" * 8)\n ),\n skip_message=\"Does not support TripleDES CBC\",\n)\n@pytest.mark.cipher\nclass TestTripleDES_CBC(object):\n test_KAT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CBC\"),\n [\n \"TCBCinvperm.rsp\",\n \"TCBCpermop.rsp\",\n \"TCBCsubtab.rsp\",\n \"TCBCvarkey.rsp\",\n \"TCBCvartext.rsp\",\n ],\n lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),\n lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),\n )\n\n test_MMT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CBC\"),\n [\n \"TCBCMMT1.rsp\",\n \"TCBCMMT2.rsp\",\n \"TCBCMMT3.rsp\",\n ],\n lambda key1, key2, key3, **kwargs: algorithms.TripleDES(\n binascii.unhexlify(key1 + key2 + key3)\n ),\n lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),\n )\n\n\n@pytest.mark.supported(\n only_if=lambda backend: backend.cipher_supported(\n algorithms.TripleDES(\"\\x00\" * 8), modes.OFB(\"\\x00\" * 8)\n ),\n skip_message=\"Does not support TripleDES OFB\",\n)\n@pytest.mark.cipher\nclass TestTripleDES_OFB(object):\n test_KAT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"OFB\"),\n [\n \"TOFBpermop.rsp\",\n \"TOFBsubtab.rsp\",\n \"TOFBvarkey.rsp\",\n \"TOFBvartext.rsp\",\n \"TOFBinvperm.rsp\",\n ],\n lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),\n lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),\n )\n\n test_MMT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"OFB\"),\n [\n \"TOFBMMT1.rsp\",\n \"TOFBMMT2.rsp\",\n \"TOFBMMT3.rsp\",\n ],\n lambda key1, key2, key3, **kwargs: algorithms.TripleDES(\n binascii.unhexlify(key1 + key2 + key3)\n ),\n lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),\n )\n\n\n@pytest.mark.supported(\n only_if=lambda backend: backend.cipher_supported(\n algorithms.TripleDES(\"\\x00\" * 8), modes.CFB(\"\\x00\" * 8)\n ),\n skip_message=\"Does not support TripleDES CFB\",\n)\n@pytest.mark.cipher\nclass TestTripleDES_CFB(object):\n test_KAT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CFB\"),\n [\n \"TCFB64invperm.rsp\",\n \"TCFB64permop.rsp\",\n \"TCFB64subtab.rsp\",\n \"TCFB64varkey.rsp\",\n \"TCFB64vartext.rsp\",\n ],\n lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)),\n lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),\n )\n\n test_MMT = generate_encrypt_test(\n load_nist_vectors,\n os.path.join(\"ciphers\", \"3DES\", \"CFB\"),\n [\n \"TCFB64MMT1.rsp\",\n \"TCFB64MMT2.rsp\",\n \"TCFB64MMT3.rsp\",\n ],\n lambda key1, key2, key3, **kwargs: algorithms.TripleDES(\n binascii.unhexlify(key1 + key2 + key3)\n ),\n lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),\n )\n","sub_path":"tests/hazmat/primitives/test_3des.py","file_name":"test_3des.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"513855263","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\n\ndef regression_loss(x, y):\n # x, y are in shape (N, C)\n x = F.normalize(x, dim=1)\n y = F.normalize(y, dim=1)\n return 2 - 2 * (x * y).sum(dim=-1)\n\n\ndef entropy(p):\n return Categorical(probs=p).entropy()\n\n\ndef entropy_loss(logits, reduction='mean'):\n\n losses = entropy(F.softmax(logits, dim=1)) # (N)\n if reduction == 'none':\n return losses\n elif reduction == 'mean':\n return torch.sum(losses) / logits.size(0)\n elif reduction == 'sum':\n return torch.sum(losses)\n else:\n raise AssertionError('reduction has to be none, mean or sum')\n\n\ndef cross_entropy(logits, labels, reduction='mean'):\n \"\"\"\n :param logits: shape: (N, C)\n :param labels: shape: (N, C)\n :param reduction: options: \"none\", \"mean\", \"sum\"\n :return: loss or losses\n \"\"\"\n N, C = logits.shape\n assert labels.size(0) == N and labels.size(1) == C, f'label tensor shape is {labels.shape}, while logits tensor shape is {logits.shape}'\n\n log_logits = F.log_softmax(logits, dim=1)\n losses = -torch.sum(log_logits * labels, dim=1) # (N)\n\n if reduction == 'none':\n return losses\n elif reduction == 'mean':\n return torch.sum(losses) / logits.size(0)\n elif reduction == 'sum':\n return torch.sum(losses)\n else:\n raise AssertionError('reduction has to be none, mean or sum')\n\n\ndef label_smoothing_cross_entropy(logits, labels, epsilon=0.1, reduction='none'):\n N = logits.size(0)\n C = logits.size(1)\n smoothed_label = torch.full(size=(N, C), fill_value=epsilon / (C - 1))\n smoothed_label.scatter_(dim=1, index=torch.unsqueeze(labels, dim=1).cpu(), value=1 - epsilon)\n if logits.is_cuda:\n smoothed_label = smoothed_label.cuda()\n return cross_entropy(logits, smoothed_label, reduction)\n\n\nclass SmoothingLabelCrossEntropyLoss(nn.Module):\n def __init__(self, epsilon=0.1, reduction='mean'):\n super().__init__()\n self._epsilon = epsilon\n self._reduction = reduction\n\n def forward(self, logits, labels):\n return label_smoothing_cross_entropy(logits, labels, self._epsilon, self._reduction)\n\n\nclass ScatteredCrossEntropyLoss(nn.Module):\n def __init__(self, reduction='mean'):\n super().__init__()\n self._reduction = reduction\n\n def forward(self, logits, labels):\n return cross_entropy(logits, labels, self._reduction)\n","sub_path":"utils/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"18824171","text":"import socket\nimport threading\nimport time\n\nimport gameBoard \nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys \nimport json\nimport socket\n\n\nclass NetworkConfig:\n def __init__(self, playerName):\n self.playerName = playerName\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self.client.settimeout(5) # 5s\n self.host = \"localhost\" # For this to work on your machine this must be equal to the ipv4 address of the machine running the server\n # You can find this address by typing ipconfig in CMD and copying the ipv4 address. Again this must be the servers\n # ipv4 address. This feild will be the same for all your clients.\n self.port = 8080\n self.addr = (self.host, self.port)\n self.id = self.connect()\n self.connected = False\n \n def connect(self):\n try:\n print(\"{} is trying to connect to the server...\".format(self.playerName))\n self.client.connect(self.addr)\n except:\n print(\"Could not make a connection to the server\")\n input(\"Press enter to quit\")\n sys.exit(0)\n\n return self.client.recv(2048).decode()\n\n def send(self, data):\n try:\n self.client.send(str.encode(data))\n reply = json.loads(self.client.recv(2048).decode())\n return reply # in json\n except socket.error as e:\n return str(e)\n \n def recv(self):\n try:\n reply = self.client.recv(2048).decode()\n return reply\n except socket.error as e:\n return str(e) \n\nclass Player:\n def __init__(self,name):\n self.playerName = name\n self.net = NetworkConfig(self.playerName)\n self.signal = True\n self.connected = False\n \n \n def run(self):\n if self.net.id == \"connected\":\n print(\"Player game status: \", self.net.id)\n \n received = self.net.recv()\n \n if received != \"\":\n print(\"Received (from Server): {}\\n\".format(received))\n \n if received == \"start\" or \"start_first\":\n print(\"Game is starting\\n\")\n if received == \"start_first\":\n print(\"I am starting first\\n\") \n print(\"Starting GUI...\")\n self.run_gui()\n else:\n print(\"Wait to start...\")\n else:\n print(\"Empty package\\n\") \n \n def run_gui(self):\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = gameBoard.Ui_MainWindow(self.net, self.playerName)\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"353988459","text":"\n# This is the file where you must work.\n# Write code in the functions (and create new functions) so that they work\n# according to the requirements.\nimport os\nimport csv\nfrom pathlib import Path\n\n\ndef isWritable(directory):\n try:\n tmp_prefix = \"write_tester\";\n count = 0\n filename = os.path.join(directory, tmp_prefix)\n while(os.path.exists(filename)):\n filename = \"{}.{}\".format(os.path.join(directory, tmp_prefix),count)\n count = count + 1\n f = open(filename,\"w\")\n f.close()\n os.remove(filename)\n return True\n except Exception as e:\n #print \"{}\".format(e)\n return False\n\n\ndef display_inventory(inventory):\n \"\"\"Display the contents of the inventory in a simple way.\"\"\"\n\n for key, value in inventory.items():\n print(f'{key}:{value}')\n\n\ndef add_to_inventory(inventory, added_items):\n \"\"\"Add to the inventory dictionary a list of items from added_items.\"\"\"\n if added_items in inventory:\n inventory[added_items] += 1\n else:\n inventory[element] = 1\n \n\n\ndef remove_from_inventory(inventory, removed_items):\n \"\"\"Remove from the inventory dictionary a list of items from removed_items.\"\"\"\n if removed_items in inventory:\n inventory[removed_items] -= 1\n if inventory[removed_items] == 0:\n inventory.pop(removed_items)\n else:\n pass\n\n\ndef print_table(inventory, order):\n \"\"\"\n Display the contents of the inventory in an ordered, well-organized table with\n each column right-aligned.\n \"\"\"\n if order == 'count,desc':\n sorted_inventory = sorted(inventory.items(), key=lambda x: x[1])\n elif order == 'count,asc':\n sorted_inventory = sorted(inventory.items(), key=lambda x: x[1], reverse=True)\n else: \n sorted_inventory = [(key,value) for key,value in inventory.items()]\n \n print(\"-\"*23)\n print(\"{:<10} | {:>10}\".format(\"item name\",\"count\"))\n print(\"-\"*23)\n for key,value in sorted_inventory:\n print(\"{:<10} | {:>10}\".format(key,value))\n print(\"-\"*23)\n\n\ndef import_inventory(inventory, filename):\n \"\"\"Import new inventory items from a CSV file.\"\"\"\n try:\n path = Path(__file__).parent\n csv_file = open(f'{path}\\\\{filename}',\"rt\", encoding=\"utf8\",)\n csv_reader = csv.reader(csv_file)\n lista = []\n for row in csv_reader:\n for element in row:\n if element in inventory:\n inventory[element] += 1\n else:\n inventory[element] = 1 \n except:\n print(f\"File {filename} not found!\")\n\n\n\n \ndef export_inventory(inventory, filename):\n \"\"\"Export the inventory into a CSV file.\"\"\"\n try:\n if not isWritable(path):\n raise ValueError(f'You don\\'t have permission creating file {filename}!')\n\n path = Path(__file__).parent\n csv_file = open(f'{path}\\\\{filename}',\"w\", encoding=\"utf8\",)\n\n csv_writer = csv.writer(csv_file)\n\n\n # inventory_to_export = ['One','Twoo',12343]\n # csv_writer.writerow(inventory_to_export)\n\n for element in inventory:\n element_times = inventory[element]\n while element_times > 0:\n csv_writer.writerow([element])\n print(element)\n element_times -= 1 \n except:\n print(f\"File {filename} not found!\")\n pass\n\ndef main_menu():\n\n user_inventory = {}\n\n # default filenames\n filename_to_import = \"import_inventory.csv\"\n filename_to_export = \"export_inventory.csv\"\n\n import_inventory(user_inventory,filename_to_import)\n #import_inventory(user_inventory,filename_to_export)\n display_inventory(user_inventory)\n print_table(user_inventory, 'count,asc')\n #print_table(user_inventory,'')\n add_to_inventory(user_inventory,\"diamond\")\n remove_from_inventory(user_inventory,\"rope\")\n remove_from_inventory(user_inventory,\"hammer\")\n print_table(user_inventory, 'count,asc')\n #export_inventory(user_inventory,filename_to_export)\n\nif __name__ == '__main__':\n main_menu()","sub_path":"task/game_inventory (original).py","file_name":"game_inventory (original).py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"373047604","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport os\r\nimport csv\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom time import sleep\r\nimport time\r\nfrom Crypto.Cipher import AES\r\nimport base64\r\nfrom hashlib import md5\r\n\r\nclass Sky():\r\n def __init__(self):\r\n self.option = webdriver.ChromeOptions()\r\n self.option.add_experimental_option('excludeSwitches', ['enable-automation'])\r\n self.driver = webdriver.Chrome(options=self.option)\r\n self.driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\r\n \"source\": \"\"\"\r\n Object.defineProperty(navigator, 'webdriver', {\r\n get: () => undefined\r\n })\r\n \"\"\"\r\n })\r\n self.driver.implicitly_wait(10)\r\n self.driver.maximize_window()\r\n self.url = 'https://www.aqistudy.cn/historydata/daydata.php?city=%E6%88%90%E9%83%BD&month=201612'\r\n self.wait = WebDriverWait(self.driver, 15)\r\n self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36',}\r\n self.local_key = 'emhlbnFpcGFsbWtleQ=='\r\n self.local_vi = 'emhlbnFpcGFsbWl2'\r\n\r\n def get_data_by_selenium(self):\r\n self.driver.get(self.url)\r\n time.sleep(1)\r\n # items = self.driver.execute_script(\"return localStorage.getItem('781d10706b2d2ed381e835e06a3c5205')\")\r\n items = self.driver.execute_script(\"return localStorage.key(0)\")\r\n print(items)\r\n script = \"return localStorage.getItem('{}')\".format(items)\r\n print(script)\r\n items = self.driver.execute_script(script)\r\n print(items)\r\n return items\r\n\r\n def get_data_by_req(self):\r\n resp = requests.get(self.url, headers=self.headers)\r\n print(resp.status_code)\r\n print(resp.text)\r\n\r\n def AES_Decrypt(self, data):\r\n secretkey = md5(self.local_key.encode('utf-8')).hexdigest()[16:32]\r\n secretiv = md5(self.local_vi.encode('utf-8')).hexdigest()[0:16]\r\n print(secretkey,secretiv)\r\n data = data.encode('utf8')\r\n encodebytes = base64.decodebytes(data)\r\n # 将加密数据转换位bytes类型数据\r\n cryptos = AES.new(secretkey.encode('utf8'), AES.MODE_CBC, secretiv.encode('utf8'))\r\n text_decrypted = cryptos.decrypt(encodebytes)\r\n unpad = lambda s: s[0:-s[-1]]\r\n text_decrypted = unpad(text_decrypted)\r\n # 去补位\r\n text_decrypted = text_decrypted.decode('utf8')\r\n # if text_decrypted:\r\n # self.driver.close()\r\n return text_decrypted\r\n\r\n\r\nif __name__ == '__main__':\r\n s = Sky()\r\n items = s.get_data_by_selenium()\r\n print(items)\r\n text_decrypted = s.AES_Decrypt(items)\r\n print(text_decrypted)\r\n print(base64.b64decode(text_decrypted.encode('utf-8')).decode('utf-8'))\r\n","sub_path":"PycharmProjects/Reptile/seleniun_Learn/sky.py","file_name":"sky.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"221359039","text":"__author__ = 'ian'\ndef geminator():\n number_of_rocks = int(input())\n rock_array = []\n for i in range(0, number_of_rocks):\n stbw = \"\".join(set(str(input())))\n rock_array.append(stbw)\n rock_dict = {\"a\": 0, \"b\": 0, \"c\": 0, \"d\": 0, \"e\": 0, \"f\": 0, \"g\": 0, \"h\": 0, \"i\": 0, \"j\": 0, \"k\": 0, \"l\": 0, \"m\": 0, \"n\": 0, \"o\": 0, \"p\": 0, \"q\": 0, \"r\": 0, \"s\": 0, \"t\": 0, \"u\": 0, \"v\": 0, \"w\": 0, \"x\": 0, \"y\": 0, \"z\": 0}\n for i in range(0, len(rock_array)):\n for s in range(0, len(rock_array[i])):\n rock_dict[str(rock_array[i][s])] += 1\n counter = 0\n for k, v in rock_dict.items():\n print(k, v)\n if v == number_of_rocks:\n counter += 1\n print(counter)\ngeminator()\n","sub_path":"Raw_cut.py","file_name":"Raw_cut.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"467654317","text":"from sense_hat import SenseHat\r\nimport requests\r\nimport json\r\nfrom random import randint\r\nfrom time import sleep\r\nimport json\r\nimport sys\r\n\r\nsense = SenseHat()\r\nsense.clear()\r\n\r\ndef load_data():\r\n with open('data.json') as json_data:\r\n global data_file\r\n data_file = json.load(json_data)\r\n\r\ndef save(name, choice):\r\n global data_file\r\n if(choice == 'like'):\r\n data_file['liked'].append(name)\r\n else:\r\n data_file['disliked'].append(name)\r\n with open('data.json', 'w') as outfile:\r\n json.dump(data_file, outfile)\r\n \r\ndef get_user():\r\n global name\r\n\r\n response = requests.get(\"https://randomuser.me/api/\")\r\n jsonn = response.json()\r\n\r\n title = jsonn['results'][0]['name']['title']\r\n first_name = jsonn['results'][0]['name']['first']\r\n last_name = jsonn['results'][0]['name']['last']\r\n\r\n full_name = title + ' ' + first_name + ' ' + last_name\r\n\r\n\r\n\r\nwhile True:\r\n try:\r\n load_data()\r\n get_user()\r\n events = sense.stick.get_events()\r\n\r\n sense.show_message(full_name)\r\n\r\n current_event = sense.stick.wait_for_event()\r\n \r\n if(current_event.direction == 'right'):\r\n choice = 'like'\r\n sense.clear(0, 255, 0)\r\n else:\r\n choice = 'dislike'\r\n sense.clear(255, 0, 0)\r\n\r\n sleep(1)\r\n\r\n save(full_name, choice)\r\n\r\n except KeyboardInterrupt:\r\n sense.clear()\r\n sys.exit(0)","sub_path":"tinder/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"433752842","text":"from .config import DiggicampConf, CONFIG_VERSION\n\n\n# migrate from no version tag to version tag\ndef migrate_none(conf: DiggicampConf):\n print(\"Migrating to version 1.1.0...\")\n conf.set('version', '1.1.0')\n\n if not conf.get('downloads'):\n return conf\n\n old_dl = conf.get('downloads')\n downloads = []\n for fid in old_dl:\n if isinstance(old_dl[fid], str):\n downloads.append({\n 'folder': fid,\n 'target': old_dl[fid]\n })\n else:\n downloads.append({\n 'folder': fid,\n 'target': old_dl[fid]['target'],\n 'regex': old_dl[fid]['regex']\n })\n\n conf.set('downloads', downloads)\n\n\nMIGRATIONS = {\n 'None': migrate_none\n}\n\n\ndef migrate_config(conf: DiggicampConf):\n while conf.version() != CONFIG_VERSION:\n if conf.version() == None:\n conf = MIGRATIONS['None'](conf)\n continue\n\n if conf.version() in MIGRATIONS:\n conf = MIGRATIONS[conf.version()](conf)\n else:\n raise Exception(\"Cannot migrate from \" + conf.version() + \" - No migration found!\")\n","sub_path":"diggicamp/config_migrations.py","file_name":"config_migrations.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"185199629","text":"import numpy as np \r\nimport cv2 \r\nimport scipy.io \r\n#from scipy.misc import imread \r\n#import matplotlib.pyplot as plt\r\nimport imageio\r\nimport open3d\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport time\r\nfrom skimage.transform import downscale_local_mean\r\nfrom scipy.stats import entropy \r\nfrom collections import defaultdict\r\nfrom functools import reduce\r\n#from cam_funcs import *\r\n\r\nclass SpaceCarve(object):\r\n def __init__(self, resolution, p_sense, p_change, K, rgb_lower=[180, 180, 0], rgb_upper=[255, 255, 20], frame_width=576, frame_height=694, voxel_center=(0,0,-.6), voxbox_size=.4, mode='mujoco', version=(1, 1), update=3, z_prob_occ=.55):\r\n #self.resolution = resolution\r\n self.p_sense = p_sense\r\n self.p_change = p_change \r\n self.voxelCoords = self.makeVoxels(resolution, voxel_center, voxbox_size)\r\n self.voxelVals = np.divide(np.ones(np.shape(self.voxelCoords)[0]), 2)\r\n self.rgb_lower = rgb_lower\r\n self.rgb_upper = rgb_upper\r\n self.frame_width = frame_width\r\n self.frame_height = frame_height\r\n self.K = K \r\n self.num_carves = 0\r\n self.resolution = resolution \r\n self.mode = mode \r\n self.version = version \r\n self.update = update\r\n self.z_prob_occ = z_prob_occ\r\n\r\n def reset(self):\r\n self.num_carves = 0\r\n self.voxelVals = np.divide(np.ones(np.shape(self.voxelCoords)[0]), 2)\r\n\r\n def getVoxelCoords(self):\r\n return self.voxelCoords\r\n\r\n def getVoxelVals(self):\r\n return self.voxelVals\r\n\r\n def carve(self, cam_ext, img, segment=True, fpath=None):\r\n\r\n self.num_carves += 1\r\n\r\n proj = self.project(cam_ext, np.transpose(self.voxelCoords))\r\n\r\n if segment:\r\n img_mask = self.applySegmentation(img)\r\n else:\r\n img_mask = img\r\n\r\n cv2.imshow('mask', img_mask)\r\n cv2.waitKey(10)\r\n\r\n #imageio.imwrite('imgs/mask_{}.jpg'.format(self.num_carves), img_mask)\r\n if fpath is not None:\r\n imageio.imwrite('{}_mask_{}.jpg'.format(fpath, self.num_carves), img_mask)\r\n\r\n # if self.mode == 'mujoco':\r\n # hit_pixels = [(u, v) for u in range(0, self.frame_width) for v in range(0, self.frame_height) if img_mask[v, u] > 0]\r\n # voxel_hits = []\r\n #print(np.shape(img_mask))\r\n\r\n hit_dict = {}\r\n unhit_dict = {}\r\n count = 0\r\n \r\n for i in range(0, np.shape(proj)[1]):\r\n p_prev = self.p_change * self.voxelVals[i] + (1 - self.p_change) * (1 - self.voxelVals[i]) \r\n v = int(proj[1][i]/proj[2][i])\r\n u = int(proj[0][i]/proj[2][i])\r\n \r\n \r\n if u >=0 and v >= 0 and u < self.frame_width and v < self.frame_height: \r\n count += 1\r\n \r\n if img_mask[v, u] > 0 and self.mode == 'mujoco': # change back\r\n occupied = 1 \r\n if self.update == 2:\r\n self.voxelVals[i] = (self.z_prob_occ * p_prev)/(self.z_prob_occ * p_prev + (1 - self.z_prob_occ) * (1 - p_prev))\r\n \r\n if self.update == 3:\r\n if (u, v) not in hit_dict.keys():\r\n hit_dict[(u, v)] = [i]\r\n else:\r\n hit_dict.get((u, v)).append(i)\r\n\r\n elif img_mask[v, u] == 0 and self.mode == 'dino':\r\n occupied = 1\r\n if self.update == 2:\r\n self.voxelVals[i] = (self.z_prob_occ * p_prev)/(self.z_prob_occ * p_prev + (1 - self.z_prob_occ) * (1 - p_prev))\r\n \r\n if self.update == 3:\r\n if (u, v) not in hit_dict.keys():\r\n hit_dict[(u, v)] = [i]\r\n else:\r\n hit_dict.get((u, v)).append(i)\r\n \r\n else:\r\n occupied = 0\r\n if self.update != 3:\r\n likelihood = (1 - self.p_sense) # p(z = 0 | x = 1)\r\n nlikelihood = self.p_sense # p(z = 0 | x = 0)\r\n # p(z = 0 | x = 1) * p(x = 1) / (p(z = 0 | x = 1) * p(x = 1) + p(z = 0 | x = 0) * p(x = 0))\r\n self.voxelVals[i] = (likelihood * p_prev)/(likelihood * p_prev + nlikelihood * (1 - p_prev))\r\n\r\n else:\r\n if (u, v) not in unhit_dict.keys():\r\n unhit_dict[(u, v)] = [i]\r\n elif (u, v) in unhit_dict.keys(): \r\n unhit_dict.get((u, v)).append(i) \r\n\r\n\r\n if self.update == 3:\r\n voxelValsCopy = self.voxelVals\r\n for k in unhit_dict.keys():\r\n v_list = unhit_dict.get(k)\r\n for i in v_list:\r\n if len(v_list) > 1:\r\n p_others_empty = reduce(lambda x, y: x * y, map(lambda x: 1 - voxelValsCopy[x], list(set(v_list) - set([i]))))\r\n nlikelihood = p_others_empty * self.p_sense + (1 - p_others_empty) * (1 - self.p_sense)\r\n else:\r\n nlikelihood = self.p_sense\r\n likelihood = 1 - self.p_sense\r\n p_prev = self.p_change * voxelValsCopy[i] + (1 - self.p_change) * (1 - voxelValsCopy[i])\r\n self.voxelVals[i] = likelihood * p_prev/(likelihood * p_prev + nlikelihood * (1 - p_prev))\r\n for k in hit_dict.keys():\r\n v_list = hit_dict.get(k)\r\n for i in v_list:\r\n if len(v_list) > 1:\r\n p_others_empty = reduce(lambda x, y: x * y, map(lambda x: 1 - voxelValsCopy[x], list(set(v_list) - set([i]))))\r\n nlikelihood = (1 - p_others_empty) * self.p_sense + p_others_empty * (1 - self.p_sense)\r\n else:\r\n nlikelihood = (1 - self.p_sense) \r\n likelihood = self.p_sense \r\n p_prev = self.p_change * voxelValsCopy[i] + (1 - self.p_change) * (1 - voxelValsCopy[i])\r\n self.voxelVals[i] = likelihood * p_prev/(likelihood * p_prev + nlikelihood * (1 - p_prev))\r\n \r\n #projection = self.projectThresh(cam_ext, threshold=.5)\r\n #imageio.imwrite('imgs/camprojection_{}.jpg'.format(self.num_carves), projection)\r\n \r\n return \r\n\r\n def project(self, camE, worldCoords):\r\n transformedCoords = np.matmul(camE, worldCoords)\r\n\r\n if self.mode == 'mujoco':\r\n camX = -transformedCoords[0, :] - 0.23571429 +.01\r\n camY = transformedCoords[1, :] - 0.1744898 \r\n camZ = -transformedCoords[2, :] - .25 #+ 0.225 - .08 \r\n \r\n camCoords = np.vstack((camX, camY, camZ, transformedCoords[3, :]))\r\n res = np.matmul(self.K, camCoords)\r\n\r\n else:\r\n res = transformedCoords\r\n return res\r\n\r\n def projectThresh(self, camE, threshold=.5, fname=None):\r\n worldCoords = np.transpose(np.array([self.voxelCoords[i, :] for i in range(np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= threshold]))\r\n\r\n if np.shape(worldCoords)[0] == 0:\r\n thresh = max(self.voxelVals)\r\n print(\"threshold too high, use {}\".format(thresh))\r\n worldCoords = np.transpose(np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= thresh]))\r\n\r\n proj = self.project(camE, worldCoords)\r\n \r\n proj_img = np.zeros((self.frame_height, self.frame_width))\r\n\r\n distToCenter = []\r\n\r\n for i in range(0, np.shape(proj)[1]):\r\n v = int(proj[1, i]/proj[2, i])\r\n u = int(proj[0, i]/proj[2, i])\r\n\r\n distToCenter.append(np.linalg.norm(np.array([v, u]) - np.array([self.frame_height/2, self.frame_width/2])))\r\n \r\n if u >=0 and v >= 0 and u < self.frame_width and v < self.frame_height: \r\n proj_img[v, u] = 1\r\n #if np.isclose(u, self.frame_width/2, atol=20) and np.isclose(v, self.frame_height/2, atol=20):\r\n #print(\"index: {}\".format(i))\r\n\r\n if fname is not None:\r\n imageio.imwrite('{}_proj_{}.jpg'.format(fname, self.num_carves), proj_img)\r\n\r\n return proj_img\r\n\r\n\r\n def projectUncertainty(self, cam, fpath=None):\r\n proj = self.project(cam, np.transpose(self.voxelCoords))\r\n \r\n proj_img = np.ones((self.frame_height, self.frame_width))\r\n counts = np.zeros((self.frame_height, self.frame_width))\r\n max_vals = np.zeros((self.frame_height, self.frame_width))\r\n\r\n hit_dict = defaultdict(lambda: 1)\r\n \r\n for i in range(0, np.shape(proj)[1]):\r\n v = int(proj[1][i]/proj[2][i])\r\n u = int(proj[0][i]/proj[2][i])\r\n \r\n if u >=0 and v >= 0 and u < self.frame_width and v < self.frame_height: \r\n if self.version[0] == 1:\r\n proj_img[v, u] *= 1 - self.voxelVals[i] # fix to deal with unhit pixels\r\n hit_dict[(u, v)] = hit_dict[(u, v)] * (1 - self.voxelVals[i])\r\n elif self.version[0] == 2:\r\n counts[v, u] += 1\r\n # VERSION 2\r\n # running avg of sqr dist from .5\r\n #proj_img[v, u] += ((self.voxelVals[i] - 0.5)**2 - proj_img[v, u])/counts[v, u] # UNCOMMENT TO RUN\r\n if self.voxelVals[i] > max_vals[v, u]:\r\n max_vals[v, u] = self.voxelVals[i]\r\n hit_dict[(u, v)] = max_vals[v, u] \r\n\r\n # if fpath is not None:\r\n # imageio.imwrite('{}_{}.jpg'.format(fpath, self.num_carves), proj_img)\r\n \r\n # VERSION 1\r\n if self.version[0] == 1:\r\n res = np.ones((self.frame_height, self.frame_width)) - np.array(proj_img)\r\n for k in hit_dict.keys():\r\n hit_dict[k] = 1 - hit_dict[k]\r\n\r\n # VERSION 2\r\n else:\r\n #res = 0.5 - np.array(proj_img) # UNCOMMENT TO RUN \r\n res = np.array(proj_img)\r\n\r\n return res, hit_dict \r\n\r\n def view_certainty(self, cam_viewpoint, fpath=None):\r\n proj, value_dict = self.projectUncertainty(cam_viewpoint)\r\n value_array = np.array([value_dict.get(k) for k in value_dict.keys()])\r\n\r\n if fpath is not None:\r\n imageio.imwrite('{}_{}.jpg'.format(fpath, self.num_carves), proj)\r\n\r\n # VERSION 1\r\n if self.version[1] == 1:\r\n #dist = np.square(proj - np.ones((np.shape(proj)[0], np.shape(proj)[1])) * .5)\r\n dist = np.square(value_array - np.ones(np.shape(value_array)[0]) * .5)\r\n res = np.average(dist) # add percentage of visible voxels\r\n\r\n # VERSION 2\r\n elif self.version[1] == 2:\r\n # proj_distribution = proj/np.sum(proj)\r\n # res = entropy(proj_distribution)\r\n res = -entropy(value_array)\r\n\r\n return res \r\n\r\n def voxel_uncertainty(self):\r\n dist = np.square(self.voxelVals - 0.5)\r\n return (0.5**2 - np.average(dist))/(0.5**2)\r\n\r\n def pcd_count(self, thresh=.5):\r\n vox_count = [v for v in self.voxelVals if v >= thresh]\r\n return len(vox_count)\r\n\r\n\r\n def applySegmentation(self, img):\r\n mask = cv2.inRange(img, np.array(self.rgb_lower), np.array(self.rgb_upper))\r\n return cv2.medianBlur(mask, 25)\r\n \r\n def makeVoxels(self, res, center, size):\r\n (x_c, y_c, z_c) = center\r\n x_s = np.linspace(x_c - size/2, x_c + size/2, res)\r\n y_s = np.linspace(y_c - size/2, y_c + size/2, res)\r\n z_s = np.linspace(z_c - size/2, z_c + size/2, res)\r\n voxels = np.array([[x, y, z, 1] for x in x_s for y in y_s for z in z_s])\r\n return voxels \r\n\r\n def toVoxelRep(self, downscale=True, d_scale=10):\r\n xmin = min(self.voxelCoords[:, 0])\r\n ymin = min(self.voxelCoords[:, 1])\r\n zmin = min(self.voxelCoords[:, 2])\r\n xmax = max(self.voxelCoords[:, 0])\r\n vox_range = xmax - xmin\r\n \r\n scale = (self.resolution - 1)/vox_range \r\n\r\n shift_vec = np.array([xmin, ymin, zmin, 0])\r\n voxels = self.voxelCoords - shift_vec\r\n voxels *= scale\r\n voxels = np.rint(voxels)\r\n voxelCube = np.zeros((self.resolution, self.resolution, self.resolution))\r\n\r\n for i in range(np.size(voxels[:, 0])):\r\n voxelCube[int(voxels[i, 0]), int(voxels[i, 1]), int(voxels[i, 2])] = self.voxelVals[i]\r\n\r\n if downscale == True:\r\n factor = int(self.resolution/d_scale)\r\n res = downscale_local_mean(voxelCube, (factor, factor, factor))\r\n else:\r\n res = voxelCube\r\n return res\r\n\r\n\r\n def visualize(self, save=False, fname=\"./dino.ply\", threshold=0.5, show_frame=False):\r\n\r\n pcd = open3d.geometry.PointCloud()\r\n X = np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= threshold])\r\n if np.shape(X)[0] == 0:\r\n thresh = max(self.voxelVals)\r\n print(\"threshold too high, use {}\".format(thresh))\r\n X = np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= thresh])\r\n\r\n pcd.points = open3d.Vector3dVector(X[:, 0:3])\r\n open3d.estimate_normals(pcd, search_param = open3d.KDTreeSearchParamHybrid(radius = 0.1, max_nn = 100))\r\n open3d.orient_normals_to_align_with_direction(pcd)\r\n pcd.paint_uniform_color([1,0.706,0])\r\n mesh_frame = open3d.geometry.create_mesh_coordinate_frame(size=0.6, origin=[.6, 0, .75])\r\n \r\n if show_frame: \r\n open3d.visualization.draw_geometries([pcd, mesh_frame])\r\n else:\r\n open3d.visualization.draw_geometries([pcd])\r\n\r\n if save == True:\r\n open3d.write_point_cloud(fname, pcd)\r\n\r\n\r\n def visualize_plt(self, threshold=0.5):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n resVox = np.array([self.voxelCoords[i, :] for i in range(0, np.shape(self.voxelCoords)[0]) if self.voxelVals[i] >= threshold])\r\n\r\n\r\n x = resVox[:, 0]\r\n y = resVox[:, 1]\r\n z = resVox[:, 2]\r\n\r\n ax.scatter(x, y, z, c=voxVals, marker='o')\r\n\r\n ax.set_xlabel('X Label')\r\n ax.set_ylabel('Y Label')\r\n ax.set_zlabel('Z Label')\r\n #plt.gray()\r\n plt.show()\r\n","sub_path":"spacecarve.py","file_name":"spacecarve.py","file_ext":"py","file_size_in_byte":14660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"16519827","text":"import json\nimport argparse\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(\n description=\"Path to json file\"\n )\n parser.add_argument(\n \"filepath\",\n help=\"Path to json file\",\n )\n args = parser.parse_args()\n return args\n\n\ndef load_data(filepath):\n with open(filepath, \"r\", encoding=\"UTF-8\") as json_file:\n return json.load(json_file)\n\n\ndef pretty_print_json(json_content):\n print(json.dumps(\n json_content,\n sort_keys=True,\n indent=4,\n ensure_ascii=False,\n separators=(\",\", \": \")\n ))\n\n\ndef main():\n file_path = get_arguments().filepath\n data_result = load_data(file_path)\n pretty_print_json(data_result)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except ValueError:\n print(\"This is not a json file\")\n","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"350874467","text":"import numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport nltk\nfrom nltk import wordpunct_tokenize\nfrom nltk.stem.snowball import EnglishStemmer\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom tpot.builtins import StackingEstimator\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import train_test_split\n\nvectorizer = TfidfVectorizer(input='content', analyzer='word')\nsvd = TruncatedSVD(n_components=500, n_iter=5, random_state=27)\n\nnltk.download('punkt')\nnltk.download('stopwords')\nstop_words = set(stopwords.words('english'))\n\n#After we use get_text, use nltk's clean_html function.\ndef nltkPipe(soup_text):\n #Convert to tokens\n tokens = [x.lower() for x in wordpunct_tokenize(soup_text)]\n text = nltk.Text(tokens)\n #Get lowercase words. No single letters, and no stop words\n words = [w.lower() for w in text if w.isalpha() and len(w) > 1 and w.lower() not in stop_words]\n #Remove prefix/suffixes to cut down on vocab\n stemmer = EnglishStemmer()\n words_nostems = [stemmer.stem(w) for w in words]\n return words_nostems\n\ndef getTitleTokens(html):\n soup = BeautifulSoup(html,'html.parser')\n soup_title = soup.title\n if soup_title != None:\n soup_title_text = soup.title.get_text()\n text_arr = nltkPipe(soup_title_text)\n return text_arr\n else:\n return []\n \ndef getBodyTokens(html):\n soup = BeautifulSoup(html,'html.parser')\n #Get the text body\n soup_para = soup.find_all('p')\n soup_para_clean = ' '.join([x.get_text() for x in soup_para if x.span==None and x.a==None])\n text_arr = nltkPipe(soup_para_clean)\n return text_arr\n\n#Build the model\ndef get_html(in_df):\n keep_cols = [\"Webpage_id\",\"Tag\"]\n use_df = in_df[keep_cols]\n html_reader_obj = pd.read_csv(data_dir+'html_data.csv',iterator=True, chunksize=10000)\n frames = []\n match_indices = use_df['Webpage_id'].values.tolist()\n print(len(match_indices),' indices left...')\n while len(match_indices) > 0:\n for chunk in html_reader_obj:\n merge_df = pd.merge(use_df,chunk,how='inner',on='Webpage_id')\n merge_indices = merge_df['Webpage_id'].values.tolist()\n match_indices = [x for x in match_indices if x not in merge_indices]\n print(len(match_indices),' indices left...')\n frames.append(merge_df)\n #Process HTMl for bags of words of the body and title.\n process_df = pd.concat(frames)\n print(\"Getting tokens...\")\n title_tokens = process_df['Html'].progress_apply(getTitleTokens)\n body_tokens = process_df['Html'].progress_apply(getBodyTokens)\n process_df['all_tokens'] = title_tokens + body_tokens\n process_df.drop(['Html'],axis=1,inplace=True)\n print(\"Done!\")\n return process_df\n\ndef build_model():\n \"\"\"Return the estimator and the object to transform the test data.\"\"\"\n print(\"Getting HTML tokens\")\n data_dir = \"../data/2018-08-10_AV_Innoplexus/\"\n\n html_data = pd.read_csv(data_dir+'html_data.csv',iterator=True, chunksize=1000)\n \n train_df = pd.read_csv(data_dir+'train.csv')\n \n #Get tokens\n train_df_tokens = get_html(train_df)\n #Fit_transform to tdfif matrix\n train_df_tdif = vectorizer.fit_transform(train_df_tokens['all_tokens'])\n #Prune unneeded features\n svd_array = svd.fit_transform(train_df_tdif)\n \n vector_features = vectorizer.get_feature_names()\n eigen_features = [vector_features[i] for i in svd.components_[0].argsort()[::-1]][:500]\n\n train_df_svd = pd.DataFrame(svd_array,columns=eigen_features)\n train_df_svd['Tag'] = train_df['Tag']\n \n tags = train_df_svd['Tag'].unique().tolist()\n tags.sort()\n\n tag_dict = {key: value for (key, value) in zip(tags,range(len(tags)))}\n\n train_df_svd['Tag_encoded'] = train_df_svd['Tag'].map(tag_dict)\n train_df_svd = svd_df.drop('Tag',axis=1)\n \n exported_pipeline = make_pipeline(\n StackingEstimator(\n estimator=ExtraTreesClassifier(\n bootstrap=False, criterion=\"gini\", max_features=0.2, \n min_samples_leaf=11, min_samples_split=17, n_estimators=100)\n ),\n ExtraTreesClassifier(\n bootstrap=False, criterion=\"entropy\", max_features=0.5, \n min_samples_leaf=6, min_samples_split=9, n_estimators=100\n )\n )\n \n x_cols = [x for x in train_df_svd.columns if x != \"Tag_encoded\"]\n X_train, X_test, y_train, y_test = train_test_split(\n train_df_svd[x_cols],\n train_df_svd['Tag_encoded'],\n test_size=0.33\n )\n \n exported_pipeline.fit(X_train, y_train)\n return exported_pipeline, vectorizer, svd, tag_dict\n\ndef prep_test(vectorizer_obj, svd_obj):\n \"\"\"Transform test dataset for predicting.\"\"\"\n test_df = pd.read_csv(data_dir+'test.csv')\n #Get the HTMl\n test_df_tokens = get_html(test_df)\n #Transform to tdfif matrix\n test_df_tdif = vectorizer_obj.transform(test_df_tokens['all_tokens'])\n #Prune unneeded features\n test_svd_array = svd_obj.transform(test_df_tdif)\n \n vector_features = vectorizer_obj.get_feature_names()\n eigen_features = [vector_features[i] for i in svd_obj.components_[0].argsort()[::-1]][:500]\n #Map to dataframe\n test_df_svd = pd.DataFrame(test_svd_array,columns=eigen_features)\n test_df_svd['Tag'] = test_df['Tag']\n return test_df_svd\n\ndef main():\n #Get the model\n model, vectorizer_obj, svd_obj, tag_dict = build_model()\n #Prep the test set\n test_df = prep_test(vectorizer_obj, svd_obj)\n predictions = model.predict(test_df)\n return predictions","sub_path":"2018-08-10_AV_Innoplexus/submission_01.py","file_name":"submission_01.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"69506520","text":"from math import*\r\nx=sqrt(2)\r\na=2\r\npi=2*(a/x)\r\nwhile x<2:\r\n x=(sqrt(2+x))\r\n pi=(pi*a/x)\r\nprint(\"Approximation of pi:\",round(pi,3))\r\nc=eval(input(\"Enter the radius:\\n\"))\r\nprint(\"Area:\",round(c**2*pi,3))","sub_path":"examples/data/Assignment_2/mphnok005/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"103327437","text":"import sys\nimport getopt\nimport cv2 as cv\nimport numpy as np\n\ndef combine(str):\n source=cv.imread(str[0],1)\n hidden=cv.imread(str[1],1)\n if source.shape[0]
\\r\\n' )\n a_url = str(title_list.find('a'))\n f.write(a_url + \"\\n
\\n\")\n rank += 1\n f.write('
New summary
\"),\n (\"fiction\", \"nonfiction\"),\n ])\n response = self.manager.admin_work_controller.edit(lp.data_source.name, lp.identifier.identifier)\n\n eq_(200, response.status_code)\n eq_(\"New title\", self.english_1.title)\n assert \"New title\" in self.english_1.simple_opds_entry\n eq_(\"Adults Only\", self.english_1.audience)\n assert 'Adults Only' in self.english_1.simple_opds_entry\n eq_(\"New summary
\", self.english_1.summary_text)\n assert \"<p>New summary</p>\" in self.english_1.simple_opds_entry\n eq_(False, self.english_1.fiction)\n assert \"Nonfiction\" in self.english_1.simple_opds_entry\n\n with self.app.test_request_context(\"/\"):\n # Change the audience and fiction status again, and add a target age\n flask.request.form = ImmutableMultiDict([\n (\"title\", \"New title\"),\n (\"audience\", \"Young Adult\"),\n (\"summary\", \"New summary
\"),\n (\"fiction\", \"fiction\"),\n (\"target_age_min\", 13),\n (\"target_age_max\", 15),\n ])\n response = self.manager.admin_work_controller.edit(lp.data_source.name, lp.identifier.identifier)\n eq_(200, response.status_code)\n eq_(\"Young Adult\", self.english_1.audience)\n assert 'Young Adult' in self.english_1.simple_opds_entry\n assert 'Adults Only' not in self.english_1.simple_opds_entry\n eq_(True, self.english_1.fiction)\n assert \"Fiction\" in self.english_1.simple_opds_entry\n assert \"Nonfiction\" not in self.english_1.simple_opds_entry\n eq_(13, self.english_1.target_age.lower)\n eq_(15, self.english_1.target_age.upper)\n assert \"13-15\" in self.english_1.simple_opds_entry\n\n with self.app.test_request_context(\"/\"):\n # Change the summary again\n flask.request.form = ImmutableMultiDict([\n (\"title\", \"New title\"),\n (\"audience\", \"Young Adult\"),\n (\"summary\", \"abcd\"),\n (\"fiction\", \"fiction\"),\n (\"target_age_min\", 13),\n (\"target_age_max\", 15),\n ])\n response = self.manager.admin_work_controller.edit(lp.data_source.name, lp.identifier.identifier)\n eq_(200, response.status_code)\n eq_(\"abcd\", self.english_1.summary_text)\n assert 'New summary' not in self.english_1.simple_opds_entry\n\n with self.app.test_request_context(\"/\"):\n # Now delete the summary entirely and change the target age again\n flask.request.form = ImmutableMultiDict([\n (\"title\", \"New title\"),\n (\"audience\", \"Young Adult\"),\n (\"summary\", \"\"),\n (\"fiction\", \"fiction\"),\n (\"target_age_min\", 11),\n (\"target_age_max\", 14),\n ])\n response = self.manager.admin_work_controller.edit(lp.data_source.name, lp.identifier.identifier)\n eq_(200, response.status_code)\n eq_(\"\", self.english_1.summary_text)\n assert 'abcd' not in self.english_1.simple_opds_entry\n eq_(11, self.english_1.target_age.lower)\n eq_(14, self.english_1.target_age.upper)\n assert \"11-14\" in self.english_1.simple_opds_entry\n assert \"13-15\" not in self.english_1.simple_opds_entry\n\n with self.app.test_request_context(\"/\"):\n # Change audience and remove target age, so computed target age is based on audience\n flask.request.form = ImmutableMultiDict([\n (\"title\", \"New title\"),\n (\"audience\", \"Adult\"),\n (\"summary\", \"\"),\n (\"fiction\", \"fiction\"),\n ])\n response = self.manager.admin_work_controller.edit(lp.data_source.name, lp.identifier.identifier)\n eq_(200, response.status_code)\n eq_(\"Adult\", self.english_1.audience)\n assert 'Adult' in self.english_1.simple_opds_entry\n assert 'Young Adult' not in self.english_1.simple_opds_entry\n eq_(18, self.english_1.target_age.lower)\n eq_(None, self.english_1.target_age.upper)\n assert \"11-14\" not in self.english_1.simple_opds_entry\n assert \"18\" in self.english_1.simple_opds_entry\n\n def test_edit_invalid_input(self):\n [lp] = self.english_1.license_pools\n with self.app.test_request_context(\"/\"):\n # target age min greater than target age max\n flask.request.form = ImmutableMultiDict([\n (\"target_age_min\", 10),\n (\"target_age_max\", 5),\n ])\n response = self.manager.admin_work_controller.edit(lp.data_source.name, lp.identifier.identifier)\n eq_(400, response.status_code)\n eq_(INVALID_EDIT.uri, response.uri)\n\n def test_update_genres(self):\n # start with a couple genres\n [lp] = self.english_1.license_pools\n genre, ignore = Genre.lookup(self._db, \"Occult Horror\")\n lp.work.genres = [genre]\n\n # change genres\n with self.app.test_request_context(\"/\"):\n requested_genres = [\"Drama\", \"Urban Fantasy\", \"Women's Fiction\"]\n form = MultiDict()\n for genre in requested_genres:\n form.add(\"genres\", genre)\n flask.request.form = form\n response = self.manager.admin_work_controller.update_genres(lp.data_source.name, lp.identifier.identifier)\n\n new_genre_names = [work_genre.genre.name for work_genre in lp.work.work_genres]\n eq_(len(new_genre_names), len(requested_genres))\n for genre in requested_genres:\n eq_(True, genre in new_genre_names)\n\n # remove a genre\n with self.app.test_request_context(\"/\"):\n requested_genres = [\"Drama\", \"Women's Fiction\"]\n form = MultiDict()\n for genre in requested_genres:\n form.add(\"genres\", genre)\n flask.request.form = form\n response = self.manager.admin_work_controller.update_genres(lp.data_source.name, lp.identifier.identifier)\n\n new_genre_names = [work_genre.genre.name for work_genre in lp.work.work_genres]\n eq_(len(new_genre_names), len(requested_genres))\n for genre in requested_genres:\n eq_(True, genre in new_genre_names)\n\n previous_genres = requested_genres\n\n # try to add a nonfiction genre\n with self.app.test_request_context(\"/\"):\n requested_genres = [\"Drama\", \"Women's Fiction\", \"Cooking\"]\n form = MultiDict()\n for genre in requested_genres:\n form.add(\"genres\", genre)\n flask.request.form = form\n response = self.manager.admin_work_controller.update_genres(lp.data_source.name, lp.identifier.identifier)\n\n eq_(response, INCOMPATIBLE_GENRE)\n new_genre_names = [work_genre.genre.name for work_genre in lp.work.work_genres]\n eq_(len(new_genre_names), len(previous_genres))\n for genre in previous_genres:\n eq_(True, genre in new_genre_names)\n\n # try to add a nonexistent genre\n with self.app.test_request_context(\"/\"):\n requested_genres = [\"Drama\", \"Women's Fiction\", \"Epic Military Memoirs\"]\n form = MultiDict()\n for genre in requested_genres:\n form.add(\"genres\", genre)\n flask.request.form = form\n response = self.manager.admin_work_controller.update_genres(lp.data_source.name, lp.identifier.identifier)\n\n eq_(response, GENRE_NOT_FOUND)\n new_genre_names = [work_genre.genre.name for work_genre in lp.work.work_genres]\n eq_(len(new_genre_names), len(previous_genres))\n for genre in previous_genres:\n eq_(True, genre in new_genre_names)\n\n def test_suppress(self):\n [lp] = self.english_1.license_pools\n\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_work_controller.suppress(lp.data_source.name, lp.identifier.identifier)\n eq_(200, response.status_code)\n eq_(True, lp.suppressed)\n\n def test_unsuppress(self):\n [lp] = self.english_1.license_pools\n lp.suppressed = True\n\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_work_controller.unsuppress(lp.data_source.name, lp.identifier.identifier)\n eq_(200, response.status_code)\n eq_(False, lp.suppressed)\n\n def test_refresh_metadata(self):\n wrangler = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)\n success_provider = AlwaysSuccessfulCoverageProvider(\n \"Always successful\", [Identifier.GUTENBERG_ID], wrangler\n )\n failure_provider = NeverSuccessfulCoverageProvider(\n \"Never successful\", [Identifier.GUTENBERG_ID], wrangler\n )\n\n with self.app.test_request_context('/'):\n [lp] = self.english_1.license_pools\n response = self.manager.admin_work_controller.refresh_metadata(\n lp.data_source.name, lp.identifier.identifier, provider=success_provider\n )\n eq_(200, response.status_code)\n # Also, the work has a coverage record now for the wrangler.\n assert CoverageRecord.lookup(lp.identifier, wrangler)\n\n response = self.manager.admin_work_controller.refresh_metadata(\n lp.data_source.name, lp.identifier.identifier, provider=failure_provider\n )\n eq_(METADATA_REFRESH_FAILURE.status_code, response.status_code)\n eq_(METADATA_REFRESH_FAILURE.detail, response.detail)\n\n def test_complaints(self):\n type = iter(Complaint.VALID_TYPES)\n type1 = next(type)\n type2 = next(type)\n\n work = self._work(\n \"fiction work with complaint\",\n language=\"eng\",\n fiction=True,\n with_open_access_download=True)\n complaint1 = self._complaint(\n work.license_pools[0],\n type1,\n \"complaint1 source\",\n \"complaint1 detail\")\n complaint2 = self._complaint(\n work.license_pools[0],\n type1,\n \"complaint2 source\",\n \"complaint2 detail\")\n complaint3 = self._complaint(\n work.license_pools[0],\n type2,\n \"complaint3 source\",\n \"complaint3 detail\")\n\n SessionManager.refresh_materialized_views(self._db)\n [lp] = work.license_pools\n\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_work_controller.complaints(lp.data_source.name, lp.identifier.identifier)\n eq_(response['book']['data_source'], lp.data_source.name)\n eq_(response['book']['identifier'], lp.identifier.identifier)\n eq_(response['complaints'][type1], 2)\n eq_(response['complaints'][type2], 1)\n\n def test_resolve_complaints(self):\n type = iter(Complaint.VALID_TYPES)\n type1 = next(type)\n type2 = next(type)\n\n work = self._work(\n \"fiction work with complaint\",\n language=\"eng\",\n fiction=True,\n with_open_access_download=True)\n complaint1 = self._complaint(\n work.license_pools[0],\n type1,\n \"complaint1 source\",\n \"complaint1 detail\")\n complaint2 = self._complaint(\n work.license_pools[0],\n type1,\n \"complaint2 source\",\n \"complaint2 detail\")\n \n SessionManager.refresh_materialized_views(self._db)\n [lp] = work.license_pools\n\n # first attempt to resolve complaints of the wrong type\n with self.app.test_request_context(\"/\"):\n flask.request.form = ImmutableMultiDict([(\"type\", type2)])\n response = self.manager.admin_work_controller.resolve_complaints(lp.data_source.name, lp.identifier.identifier)\n unresolved_complaints = [complaint for complaint in lp.complaints if complaint.resolved == None]\n eq_(response.status_code, 404)\n eq_(len(unresolved_complaints), 2)\n\n # then attempt to resolve complaints of the correct type\n with self.app.test_request_context(\"/\"):\n flask.request.form = ImmutableMultiDict([(\"type\", type1)])\n response = self.manager.admin_work_controller.resolve_complaints(lp.data_source.name, lp.identifier.identifier)\n unresolved_complaints = [complaint for complaint in lp.complaints if complaint.resolved == None]\n eq_(response.status_code, 200)\n eq_(len(unresolved_complaints), 0)\n\n # then attempt to resolve the already-resolved complaints of the correct type\n with self.app.test_request_context(\"/\"):\n flask.request.form = ImmutableMultiDict([(\"type\", type1)])\n response = self.manager.admin_work_controller.resolve_complaints(lp.data_source.name, lp.identifier.identifier)\n eq_(response.status_code, 409)\n\n def test_classifications(self):\n e, pool = self._edition(with_license_pool=True)\n work = self._work(primary_edition=e)\n identifier = work.primary_edition.primary_identifier\n genres = self._db.query(Genre).all()\n subject1 = self._subject(type=\"type1\", identifier=\"subject1\")\n subject1.genre = genres[0]\n subject2 = self._subject(type=\"type2\", identifier=\"subject2\")\n subject2.genre = genres[1]\n subject3 = self._subject(type=\"type2\", identifier=\"subject3\")\n subject3.genre = None\n source = DataSource.lookup(self._db, DataSource.AXIS_360)\n classification1 = self._classification(\n identifier=identifier, subject=subject1, \n data_source=source, weight=1)\n classification2 = self._classification(\n identifier=identifier, subject=subject2, \n data_source=source, weight=2)\n classification3 = self._classification(\n identifier=identifier, subject=subject3, \n data_source=source, weight=1.5)\n\n SessionManager.refresh_materialized_views(self._db)\n [lp] = work.license_pools\n\n # first attempt to resolve complaints of the wrong type\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_work_controller.classifications(\n lp.data_source.name, lp.identifier.identifier)\n \n eq_(response['book']['data_source'], lp.data_source.name)\n eq_(response['book']['identifier'], lp.identifier.identifier)\n eq_(len(response['classifications']), 2)\n eq_(response['classifications'][0]['name'], subject2.identifier)\n eq_(response['classifications'][0]['type'], subject2.type)\n eq_(response['classifications'][0]['source'], source.name)\n eq_(response['classifications'][0]['weight'], classification2.weight)\n eq_(response['classifications'][1]['name'], subject1.identifier)\n eq_(response['classifications'][1]['type'], subject1.type)\n eq_(response['classifications'][1]['source'], source.name)\n eq_(response['classifications'][1]['weight'], classification1.weight)\n\n\nclass TestSignInController(AdminControllerTest):\n\n def setup(self):\n super(TestSignInController, self).setup()\n self.admin, ignore = create(\n self._db, Admin, email=u'example@nypl.org', access_token=u'abc123',\n credential=json.dumps({\n u'access_token': u'abc123',\n u'client_id': u'', u'client_secret': u'',\n u'refresh_token': u'', u'token_expiry': u'', u'token_uri': u'',\n u'user_agent': u'', u'invalid': u''\n })\n )\n\n def test_authenticated_admin_from_request(self):\n with self.app.test_request_context('/admin'):\n flask.session['admin_access_token'] = self.admin.access_token\n response = self.manager.admin_sign_in_controller.authenticated_admin_from_request()\n eq_(self.admin, response)\n\n # Returns an error if you aren't authenticated.\n with self.app.test_request_context('/admin'):\n # You get back a problem detail when you're not authenticated.\n response = self.manager.admin_sign_in_controller.authenticated_admin_from_request()\n eq_(401, response.status_code)\n eq_(INVALID_ADMIN_CREDENTIALS.detail, response.detail)\n\n def test_authenticated_admin(self):\n # Creates a new admin with fresh details.\n new_admin_details = {\n 'email' : u'admin@nypl.org',\n 'access_token' : u'tubular',\n 'credentials' : u'gnarly',\n }\n admin = self.manager.admin_sign_in_controller.authenticated_admin(new_admin_details)\n eq_('admin@nypl.org', admin.email)\n eq_('tubular', admin.access_token)\n eq_('gnarly', admin.credential)\n\n # Or overwrites credentials for an existing admin.\n existing_admin_details = {\n 'email' : u'example@nypl.org',\n 'access_token' : u'bananas',\n 'credentials' : u'b-a-n-a-n-a-s',\n }\n admin = self.manager.admin_sign_in_controller.authenticated_admin(existing_admin_details)\n eq_(self.admin.id, admin.id)\n eq_('bananas', self.admin.access_token)\n eq_('b-a-n-a-n-a-s', self.admin.credential)\n\n def test_admin_signin(self):\n with self.app.test_request_context('/admin/sign_in?redirect=foo'):\n flask.session['admin_access_token'] = self.admin.access_token\n response = self.manager.admin_sign_in_controller.sign_in()\n eq_(302, response.status_code)\n eq_(\"foo\", response.headers[\"Location\"])\n\n def test_staff_email(self):\n with temp_config() as config:\n config[Configuration.POLICIES] = {\n Configuration.ADMIN_AUTH_DOMAIN : \"alibrary.org\"\n }\n with self.app.test_request_context('/admin/sign_in'):\n staff_email = self.manager.admin_sign_in_controller.staff_email(\"working@alibrary.org\")\n interloper_email = self.manager.admin_sign_in_controller.staff_email(\"rando@gmail.com\")\n eq_(True, staff_email)\n eq_(False, interloper_email)\n\n\nclass TestFeedController(AdminControllerTest):\n\n def test_complaints(self):\n type = iter(Complaint.VALID_TYPES)\n type1 = next(type)\n type2 = next(type)\n \n work1 = self._work(\n \"fiction work with complaint 1\",\n language=\"eng\",\n fiction=True,\n with_open_access_download=True)\n complaint1 = self._complaint(\n work1.license_pools[0],\n type1,\n \"complaint source 1\",\n \"complaint detail 1\")\n complaint2 = self._complaint(\n work1.license_pools[0],\n type2,\n \"complaint source 2\",\n \"complaint detail 2\")\n work2 = self._work(\n \"nonfiction work with complaint\",\n language=\"eng\",\n fiction=False,\n with_open_access_download=True)\n complaint3 = self._complaint(\n work2.license_pools[0],\n type1,\n \"complaint source 3\",\n \"complaint detail 3\")\n\n SessionManager.refresh_materialized_views(self._db)\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_feed_controller.complaints()\n feed = feedparser.parse(response.data)\n entries = feed['entries']\n\n eq_(len(entries), 2)\n\n def test_suppressed(self):\n suppressed_work = self._work(with_open_access_download=True)\n suppressed_work.license_pools[0].suppressed = True\n\n unsuppressed_work = self._work()\n\n SessionManager.refresh_materialized_views(self._db)\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_feed_controller.suppressed()\n feed = feedparser.parse(response.data)\n entries = feed['entries']\n eq_(1, len(entries))\n eq_(suppressed_work.title, entries[0]['title'])\n\n def test_genres(self):\n with self.app.test_request_context(\"/\"):\n response = self.manager.admin_feed_controller.genres()\n \n for name in genres:\n top = \"Fiction\" if genres[name].is_fiction else \"Nonfiction\"\n eq_(response[top][name], dict({\n \"name\": name,\n \"parents\": [parent.name for parent in genres[name].parents],\n \"subgenres\": [subgenre.name for subgenre in genres[name].subgenres]\n })) \n","sub_path":"tests/admin/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":23980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394980027","text":"#!/usr/bin/env python\n\nimport asyncio\nimport websockets\nimport json\nimport csv\nimport time\nimport logging\n\nwssname = \"wss://push.planetside2.com/streaming?environment=ps2&service-id=s:19920214\"\nregEventMsg = '{\"service\":\"event\",\"action\":\"subscribe\",\"worlds\":[\"all\"],\"eventNames\":[\"PlayerLogin\",\"PlayerLogout\"]}'\nfieldnames = ['character_id', 'event_name', 'timestamp', 'world_id']\nmaxRowsPerFile = 100\n\n\ndef validate_login_response(message):\n data = json.loads(message)\n if 1 == 1 and data.get(\"service\") == \"event\" and data.get(\"type\") == \"serviceMessage\":\n return True\n else:\n return False\n\n\ndef get_loginfo_params(message):\n data = json.loads(message)\n payload = data[\"payload\"]\n character_id = payload[\"character_id\"]\n event_name = payload[\"event_name\"]\n if event_name == \"PlayerLogin\":\n event_name = 1\n elif event_name == \"PlayerLogout\":\n event_name = 0\n timestamp = payload[\"timestamp\"]\n world_id = payload[\"world_id\"]\n return character_id, event_name, timestamp, world_id\n\n\ndef create_new_file_writer(file):\n if file is not None:\n file.close()\n file = open('events_' + str(int(time.time())) + '.csv', 'w', newline='')\n return file\n\n\n@asyncio.coroutine\ndef start_stream():\n with open('myfile.txt', 'a') as f:\n f.write('asd5\\n')\n eventCnt = 0\n websocket = yield from websockets.connect(wssname)\n with open('myfile.txt', 'a') as f:\n f.write('asd6\\n')\n try:\n with open('myfile.txt', 'a') as f:\n f.write('asd7\\n')\n yield from websocket.send(regEventMsg)\n with open('myfile.txt', 'a') as f:\n f.write('asd8\\n')\n message = yield from websocket.recv()\n file = create_new_file_writer(None)\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n with open('myfile.txt', 'a') as f:\n f.write('asd9\\n')\n while message:\n if validate_login_response(message):\n character_id, event_name, timestamp, world_id = get_loginfo_params(message)\n if eventCnt % 10 == 0:\n print(eventCnt, character_id, event_name, timestamp, world_id)\n writer.writerow({'character_id': character_id, 'event_name': event_name, 'timestamp': timestamp,\n 'world_id': world_id})\n eventCnt += 1\n if eventCnt % maxRowsPerFile == 0:\n file = create_new_file_writer(file)\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n else:\n pass\n message = yield from websocket.recv()\n finally:\n with open('myfile.txt', 'a') as f:\n f.write('asd10\\n')\n yield from websocket.close()\n\n\n# start_stream()\nlogging.basicConfig(filename='example.log',level=logging.NOTSET)\nlogger = logging.getLogger('websockets.server')\nlogger.setLevel(logging.NOTSET)\nlogger.addHandler(logging.StreamHandler())\nasyncio.get_event_loop().run_until_complete(start_stream())\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"210867778","text":"import xml.etree.ElementTree as ET\nfrom urllib import request, parse\nfrom copy import copy\n\n\ndef soap_request(url, data):\n req = request.Request(url, data=data.encode('utf-8'),\n headers={'content-type': 'text/xml;charset=utf-8'}, method='POST')\n rep = request.urlopen(req)\n return xml_to_dict(ET.fromstring(rep.read().decode('utf-8'))) if rep.getcode() is 200 else None\n\n\ndef url_decode(url):\n return parse.unquote(url)\n\n\ndef strip_tag_name(t):\n idx = t.rfind(\"}\")\n if idx != -1:\n t = t[idx + 1:]\n return t\n\n\ndef xml_to_dict(r, root=True):\n if root:\n return {strip_tag_name(r.tag): xml_to_dict(r, False)}\n d = copy(r.attrib)\n if r.text:\n d['text'] = r.text\n for x in r.findall(\"./*\"):\n if x.tag not in d:\n d[strip_tag_name(x.tag)] = []\n d[strip_tag_name(x.tag)].append(xml_to_dict(x, False))\n return d\n\n\n","sub_path":"soaper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251624642","text":"\"\"\"\n This file is part of gempy.\n\n gempy is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n gempy is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with gempy. If not, see\" + \" | \" + plans_start[i] + \" | \" + plans_end[i] + \" | \" + \"\" + \"\\n\"\n\n text = text + \"\\n\" + Constants.BotInfo.BOT_USERNAME\n\n return text\n\n @staticmethod\n def get_channels_keyboard():\n keyboard =[\n [{\"text\": Constants.KeyboardButtons.KEYBOARD_BACK}],\n [{\"text\": \"شبکه سه\"}, {\"text\": \"شبکه دو\"}, {\"text\": \"شبکه یک\"}],\n [{\"text\": \"شبکه خبر\"}, {\"text\": \"شبکه تهران\"}, {\"text\": \"شبکه چهار\"}],\n [{\"text\": \"شبکه مستند\"}, {\"text\": \"شبکه قرآن\"}, {\"text\": \"شبکه آموزش\"}],\n [{\"text\": \"شبکه ورزش\"}, {\"text\": \"شبکه افق\"}, {\"text\": \"شبکه نمایش\"}],\n [{\"text\": \"شبکه نسیم\"}, {\"text\": \"شبکه سلامت\"}, {\"text\": \"شبکه پویا\"}],\n [{\"text\": \"شبکه تماشا\"}, {\"text\": \"شبکه امید\"}]\n ]\n reply_keyboard_markup = {\n \"keyboard\": keyboard,\n \"resize_keyboard\": True,\n \"one_time_keyboard\": True\n }\n\n return reply_keyboard_markup\n","sub_path":"Model/TvPlans.py","file_name":"TvPlans.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"457252473","text":"#打印功能提示\nprint(\"*\"*50)\nprint(\" 名片管理系统v0.1\")\nprint(\"1.新增一个名片\")\nprint(\"2.删除一个名片\")\nprint(\"3.修改一个名片\")\nprint(\"4.查询一个名片\")\nprint(\"5.显示所有名片\")\nprint(\"6.退出系统\")\nprint(\"*\"*50)\n\ncard_infors=[]#存储名片字典\nwhile True:\n#获取用户输入\n num =int(input(\"请输入你要操作的编号:\"))\n#根据用户数据执行对应操作\n if num==1:\n new_name=input(\"请输入名字:\")\n new_qq = input(\"请输入qq:\")\n new_phone = input(\"请输入手机号:\")\n new_addr = input(\"请输入地址:\")\n new_infors={}#定义字典来存储新增名片信息\n new_infors[\"name\"]=new_name\n new_infors[\"qq\"]=new_qq\n new_infors[\"phone\"]=new_phone\n new_infors[\"addr\"]=new_addr\n card_infors.append(new_infors)#将新增名片字典添加到列表中\n print(card_infors)#for test 显示增加结果\n\n elif num==2:\n del_flag=0\n del_name=input(\"请输入想要删除的名字:\")\n for temp in card_infors:\n if del_name==temp[\"name\"]:\n card_infors.remove(temp)\n print(\"删除成功\")\n print(card_infors) # for test 显示删除结果\n del_flag=1\n break\n if del_flag==0:\n print(\"没有该名字,删除失败!\")\n\n\n\n elif num==3:\n cha_flag = 0\n cha_name = input(\"请输入想要修改的名字:\")\n for temp in card_infors:\n if cha_name == temp[\"name\"]:\n temp[\"name\"] = input(\"名字要改成什么?:\")\n temp[\"qq\"] = input(\"qq要改成什么?:\")\n temp[\"phone\"] = input(\"手机号要改成什么?:\")\n temp[\"addr\"] = input(\"地址要改成什么?:\")\n print(card_infors) # for test 显示修改结果\n cha_flag = 1\n break\n if cha_flag == 0:\n print(\"没有该名字,无法修改!\")\n\n\n\n elif num==4:\n find_flag=0\n find_name=input(\"请输入要查询的名字:\")\n for temp in card_infors:\n if find_name==temp[\"name\"]:\n print(\"%s\\t%s\\t%s\\t%s\\t\"%(temp[\"name\"],temp[\"qq\"],temp[\"phone\"],temp[\"addr\"]))\n find_flag=1\n break\n if find_flag==0:\n print(\"查无此人!\")\n\n\n elif num==5:\n for temp in card_infors:\n print(\"名字:%s\\tQQ:%s\\t手机号:%s\\t住址:%s\"%(temp[\"name\"],temp[\"qq\"],temp[\"phone\"],temp[\"addr\"]))\n\n elif num==6:\n exit()\n else:\n print(\"输入有误!\")","sub_path":"py3/名片管理系统.py","file_name":"名片管理系统.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"434700355","text":"import sys\nimport time\nimport math\n\n#Set the maximum search range\nsearchRange = int(sys.argv[1])\n\n# Set up the boolean list (index corresponds to the number)\nnumberList = []\nfor i in range(searchRange):\n numberList.append(True)\n \nnumberList[0] = False\nnumberList[1] = False\n\n# Start measuring algorithm performance\nT1 = time.perf_counter()\n\nfor p in range(2, int(math.ceil(math.sqrt(searchRange)))):\n if(numberList[p]):\n j = p*p\n while(j < searchRange):\n numberList[j] = False\n j += p\n\n# The algorithm has finished, stop the timer\nT2 = time.perf_counter()\n\nprimeCounter = 0\n# Write all found primes to disk\nfirstPrimeWritten = False\nfout = open(sys.argv[2], 'w')\nfor i in range(searchRange):\n if(numberList[i]):\n if(firstPrimeWritten):\n fout.write('\\n')\n fout.write(str(i))\n firstPrimeWritten = True\n primeCounter += 1\n\nprint('Found', str(primeCounter), 'Prime numbers smaller than', str(searchRange), 'in', T2 - T1,'sec.')","sub_path":"Opdracht2/Sieve_of_E.py","file_name":"Sieve_of_E.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"500451896","text":"# Copyright (c) 2015, Daniele Venzano\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport threading\n\nfrom zoe_lib.sql_manager import Execution\n\nfrom zoe_master.exceptions import ZoeStartExecutionFatalException, ZoeStartExecutionRetryException\nfrom zoe_master.zapp_to_docker import execution_to_containers, terminate_execution\n\nlog = logging.getLogger(__name__)\n\n\nclass ZoeScheduler:\n def __init__(self):\n self.fifo_queue = []\n self.trigger_semaphore = threading.Semaphore(0)\n self.async_threads = []\n self.loop_quit = False\n self.loop_th = threading.Thread(target=self.loop_start_th, name='scheduler')\n self.loop_th.start()\n\n def trigger(self):\n self.trigger_semaphore.release()\n\n def incoming(self, execution: Execution):\n \"\"\"\n This method adds the execution to the end of the FIFO queue.\n :param execution: The execution\n :return:\n \"\"\"\n self.fifo_queue.append(execution)\n self.trigger()\n\n def terminate(self, execution: Execution) -> None:\n \"\"\"\n Inform the master that an execution has been terminated. This can be done asynchronously.\n :param execution: the terminated execution\n :return: None\n \"\"\"\n def async_termination():\n terminate_execution(execution)\n self.trigger()\n\n try:\n self.fifo_queue.remove(execution)\n except ValueError:\n pass\n th = threading.Thread(target=async_termination, name='termination_{}'.format(execution.id))\n th.start()\n self.async_threads.append(th)\n\n def remove_execution(self, execution: Execution):\n try:\n self.fifo_queue.remove(execution)\n except ValueError:\n pass\n\n def loop_start_th(self):\n while True:\n ret = self.trigger_semaphore.acquire()\n if not ret: # Semaphore timeout, do some thread cleanup\n counter = len(self.async_threads)\n while counter > 0:\n if len(self.async_threads) == 0:\n break\n th = self.async_threads.pop(0)\n th.join(0.1)\n if th.isAlive(): # join failed\n self.async_threads.append(th)\n counter -= 1\n continue\n if self.loop_quit:\n break\n\n log.debug(\"Scheduler start loop has been triggered\")\n if len(self.fifo_queue) == 0:\n continue\n\n e = self.fifo_queue[0]\n assert isinstance(e, Execution)\n e.set_starting()\n self.fifo_queue.pop(0) # remove the execution form the queue\n\n try:\n execution_to_containers(e)\n except ZoeStartExecutionRetryException as ex:\n log.warning('Temporary failure starting execution {}: {}'.format(e.id, ex.message))\n e.set_error_message(ex.message)\n terminate_execution(e)\n e.set_scheduled()\n self.fifo_queue.append(e)\n except ZoeStartExecutionFatalException as ex:\n log.error('Fatal error trying to start execution {}: {}'.format(e.id, ex.message))\n e.set_error_message(ex.message)\n terminate_execution(e)\n e.set_error()\n else:\n e.set_running()\n\n def quit(self):\n self.loop_quit = True\n self.trigger()\n self.loop_th.join()\n","sub_path":"zoe_master/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"21192823","text":"import urlparse\nimport jinja2\nimport cgi\nimport time\nimport StringIO\nimport Cookie\nimport sqlite3\nfrom app import render_template, fileData\n\ndef image_app(environ, start_response):\n method = environ['REQUEST_METHOD']\n path = environ['PATH_INFO']\n query_string = environ['QUERY_STRING']\n redirect = False\n\n # Set up jinja2\n loader = jinja2.FileSystemLoader('./templates')\n env = jinja2.Environment(loader=loader)\n\n vars = dict()\n if path == '/image_upload':\n # Get content\n content_type = environ['CONTENT_TYPE']\n content_length = int(environ['CONTENT_LENGTH'])\n content = environ['wsgi.input'].read(content_length)\n headers = {}\n for key, val in environ.iteritems():\n headers[key.lower().replace('_', '-')] = val\n fs = cgi.FieldStorage(fp=StringIO.StringIO(content),\n headers = headers, environ = environ)\n filename = fs['file'].filename\n image_type = filename.split('.')[-1]\n\n if image_type in {'png', 'jpg', 'tiff', 'jpeg'}:\n\n # connect to the already existing database\n db = sqlite3.connect('images.sqlite')\n\n # configure to allow binary insertions\n db.text_factory = bytes\n\n # data to be inserted into database\n r = fs['file'].value\n f = fs['file'].filename\n d = fs['description'].value\n u = 1\n\n # insert\n db.execute('INSERT INTO image_store (image, name, description, user_id) VALUES (?, ?, ?, ?)', (r, f, d, u))\n db.commit()\n \n start_response('302 Moved Temporarily',\n [('Content-type', 'text/plain'),\n ('Location', '/')])\n redirect = True\n if path == '/':\n db = sqlite3.connect('images.sqlite')\n\n # configure to retrieve bytes, not text\n db.text_factory = bytes\n\n # get a query handle (or \"cursor\")\n c = db.cursor()\n\n # select all of the images\n c.execute('SELECT iid, name, description, user_id FROM image_store ORDER BY iid DESC LIMIT 1')\n iid, name, description, user_id = c.fetchone()\n\n vars['name'] = name\n vars['description'] = description\n vars['time'] = time.time()\n start_response('200 OK', [('Content-type', 'text/html')])\n ret = render_template(env, 'imageapp.html', vars)\n\n elif path == 'imageapp_list':\n db = sqlite3.connect('images.sqlite')\n db.text_factory = bytes\n c = db.cursor()\n c.execute('SELECT iid, name, description, user_id FROM image_store ORDER BY iid DESC LIMIT 1')\n iid, name, description, user_id = c.fetchone()\n vars['iid'] = iid\n vars['name'] = name\n vars['description'] = description\n vars['time'] = time.time()\n start_response('200 OK', [('Content-type', 'text/html')])\n ret = render_template(env, 'imageapp.html', vars)\n elif path.startswith('/latest_image'):\n db = sqlite3.connect('images.sqlite')\n\n # configure to retrieve bytes, not text\n db.text_factory = bytes\n\n # get a query handle (or \"cursor\")\n c = db.cursor()\n\n # select all of the images\n c.execute('SELECT iid FROM image_store ORDER BY iid DESC LIMIT 1')\n iid, = c.fetchone()\n start_response('302 Moved Temporarily',\n [('Content-type', 'text/plain'),\n ('Location', '/image_raw/' + str(iid))])\n\n redirect = True\n \n elif path.startswith('/image_raw/'):\n image_no = path.lstrip('/image_raw/')\n db = sqlite3.connect('images.sqlite')\n\n # configure to retrieve bytes, not text\n db.text_factory = bytes\n\n # get a query handle (or \"cursor\")\n c = db.cursor()\n\n # select image with iid=image_no\n c.execute('SELECT image, name FROM image_store WHERE iid=?', (image_no,))\n\n image, name = c.fetchone()\n\n start_response('200 OK', [('Content-type',\n 'image/' + name.split('.')[-1])])\n ret = image\n\n else:\n start_response('200 OK', [('Content-type', 'text/html')])\n ret = render_template(env, '404.html', vars)\n\n # Needs to be a single-entry list for wsgi compliance\n if redirect:\n return ['']\n else:\n return [ret]\n\ndef make_image_app():\n return image_app\n","sub_path":"imageapp.py","file_name":"imageapp.py","file_ext":"py","file_size_in_byte":4430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"555871238","text":"import urllib\nimport urllib2\nimport httplib, mimetypes\n\ndef post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTPConnection(host)\n headers = {\n 'User-Agent': 'INSERT USERAGENTNAME',\n 'Content-Type': content_type\n }\n h.request('POST', selector, body, headers)\n res = h.getresponse()\n return res.status, res.reason, res.read()\n\ndef encode_multipart_formdata(fields, files):\n \"\"\"\n fields is a sequence of (name, value) elements for regular form fields.\n files is a sequence of (name, filename, value) elements for data to be uploaded as files\n Return (content_type, body) ready for httplib.HTTP instance\n \"\"\"\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body\n\ndef get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n\n\n\nclass MediaDBClient(object):\n server = 'http://localhost:8080/server'\n\n def isServerAlive(self):\n try:\n return urllib2.urlopen(\"http://localhost:8080/server\",\n data = urllib.urlencode({'action':'getstatus'})).read() == \"OK\"\n except urllib2.URLError:\n return False\n\n def addImage(self, name, content):\n data = urllib.urlencode({'action':'addimage', 'name':name})\n length = len(content)\n request = urllib2.Request(url, data=png_data)\n\n \n\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"415078295","text":"def repo_hash(repo):\n return {\n \"id\": repo.id,\n \"full_name\": repo.full_name,\n \"owner\": repo.owner.login,\n \"name\": repo.name,\n \"ssh_url\": repo.ssh_url,\n \"private\": repo.private,\n \"fork\": repo.fork,\n \"html_url\": repo.html_url\n }\n","sub_path":"chamberlain/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"636656158","text":"import matplotlib.pyplot as plt\nimport os\n\nplotname = 'mnist_ann'\n\n# ------------- Plot Setup ------------- #\nplt.ylim(0, 100)\n# plt.xlim(0, 1000)\nplt.title('ANN Training on MNIST dataset', fontsize='large', fontweight='bold')\nplt.xlabel('Trained batches', fontsize='large', fontweight='bold')\nplt.ylabel('Accuracy (%)', fontsize='large', fontweight='bold')\nplt.grid()\n# ------------- Plot Setup ------------- #\n\ndata = []\nwith open('MNIST_ANN_Results_3.txt') as f:\n data = f.readlines()\n\ndata = [[float(y) for y in x.strip().split(',')] for x in data]\n\nplt.plot([x[0] for x in data], [x[1]*100 for x in data], color='b', linewidth=1.5, label='Current batch')\nplt.plot([x[0] for x in data], [x[2]*100 for x in data], color='r', linewidth=1.5, label='Validation set')\n\n# ------------- Save and show plot ------------- #\nplt.legend(prop=dict(weight='bold', size='large'))\nplt.savefig(os.path.dirname(os.path.abspath(__file__)) + '/figures/' + plotname + '.png', bbox_inches='tight', dpi=300)\nplt.show()\n# ------------- Save and show plot ------------- #","sub_path":"Plotting/Results/MNIST_ANN_Results.py","file_name":"MNIST_ANN_Results.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"128999394","text":"import os\nimport json\nimport argparse\nimport numpy as np\nfrom factor import Factor\n\nPROJECT_DIR = os.path.abspath(os.path.dirname(__file__))\nDATA_DIR = os.path.join(PROJECT_DIR, 'data')\nANSWER_DIR = os.path.join(DATA_DIR, 'ground-truth')\nPREDICTION_DIR = os.path.join(DATA_DIR, 'predictions')\n\n\ndef parse_json(json_file: str):\n with open(json_file, 'r') as f:\n factor = json.load(f)\n factor = Factor(var=factor['var'], card=factor['card'], val=factor['val'])\n return factor\n\n\ndef check_answers(case: int, tolerance_decimal: int = 1):\n prediction_file = os.path.join(PREDICTION_DIR, '{}.json'.format(case))\n answer_file = os.path.join(ANSWER_DIR, '{}.json'.format(case))\n predictions = parse_json(json_file=prediction_file)\n answers = parse_json(json_file=answer_file)\n\n np.testing.assert_equal(actual=predictions.var, desired=answers.var)\n np.testing.assert_equal(actual=predictions.card, desired=answers.card)\n np.testing.assert_almost_equal(actual=predictions.val, desired=answers.val, decimal=tolerance_decimal)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--case', type=int, required=True)\n args = parser.parse_args()\n check_answers(case=args.case)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"lab4/part2/check_answers.py","file_name":"check_answers.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"572979160","text":"from flask import Flask, render_template, jsonify, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\napp = Flask(__name__)\n\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\ndef index():\n listings = mongo.db.marsNews.find()\n return render_template(\"index.html\", listings=listings)\n\n@app.route(\"/clear\")\ndef clear():\n result = mongo.db.marsNews.delete_many({})\n return redirect(\"http://127.0.0.1:5000/\", code=302)\n\n@app.route(\"/scrape\")\ndef scrape():\n # listings = mongo.db.marsNews\n listings_data = scrape_mars.scrape() \n return redirect(\"http://127.0.0.1:5000/\", code=302)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"63664530","text":"from tkinter import *\nfrom tkinter import colorchooser\n\nwin = Tk()\nwin.title('hello world')\nwin.geometry('400x400')\nwin.resizable(False, False)\n#mylable1 = Label(win, text='1 label hhh')\n#mylable1.pack()\n#mylable2 = Label(win, text='2 lable kkk')\n#mylable3 = Label(win, text='3 lable jjj')\n#mylable1.grid(row=0, column=0)\n#mylable2.grid(row=1, column=1)\n#mylable3.grid(row=0, column=2)\n\n#colour = colorchooser.askcolor()\n#print(colour)\n\n#def onclick():\n# mylable = Label(win, text=f'hello {input_field.get()}')\n# mylable.pack()\n\n\ndef get_squares(number):\n input_field.delete(0, END)\n input_field.insert(0, int(number) ** 2)\n\ninput_field = Entry(win, width=50, fg='red', bg='blue', borderwidth=5)\ninput_field.pack()\n\n\nmyButton = Button(win, text='click me', padx=50, pady=50, fg='#FF0000', bg='#00ff00',\n command=lambda: get_squares(input_field.get()))\nmyButton.pack()\n\nwin.mainloop()","sub_path":"043_tkinter.py","file_name":"043_tkinter.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"583869792","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 18 09:34:30 2021\r\n\r\n@author: Bichhh\r\n\"\"\"\r\n\r\nimport json \r\n\r\n\r\n#%% DESERIALISED EXAMPLE\r\nwith open(\"data_file.json\", \"r\") as read_file:\r\n data = json.load(read_file)\r\n\r\n\r\n\r\n#%%\r\n\r\nf = open('nameofdocument.json')\r\ndata = json.load(f)\r\n\r\nfor i in data['emp_details']:\r\n print(i)\r\n\r\nf.close()\r\n\r\n\r\n#%% JSON TO ARRAY\r\n\r\nimport json\r\n\r\ninput_file = open ('stores-small.json')\r\njson_array = json.load(input_file)\r\nstore_list = []\r\n\r\nfor item in json_array:\r\n store_details = {\"name\":None, \"city\":None}\r\n store_details['name'] = item['name']\r\n store_details['city'] = item['city']\r\n store_list.append(store_details)\r\n\r\nprint(store_list)","sub_path":"Tnut/JSONimport.py","file_name":"JSONimport.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"421634100","text":"import sublime\nimport re\nimport os.path\nimport json\nimport glob\n\n##\n# Finds the closest .ml-sublime-options file in the hierarchy and loads it\n##\nclass MlOptions():\n\t__cached_options_file = None\n\n\tdef read_file_contents(self, file_name):\n\t\twith open(file_name, \"r\") as myfile:\n\t\t\tfile_contents = myfile.read()\n\t\treturn file_contents\n\n\tdef write_file_contents(self, file_name, contents):\n\t\twith open(file_name, \"w\") as myfile:\n\t\t\tmyfile.write(contents)\n\n\tdef find_options_file(self):\n\t\t# walk up the tree until our path matches one in the above array\n\t\tcurrent_filename = sublime.active_window().active_view().file_name()\n\t\tif current_filename:\n\t\t\tpwd = os.path.dirname(current_filename)\n\t\t\tlast_dir = ''\n\t\t\tcount = 0\n\t\t\twhile pwd != last_dir and count < 50:\n\t\t\t\ttest_path = os.path.join(pwd, '.ml-sublime-options')\n\t\t\t\tif os.path.exists(test_path):\n\t\t\t\t\treturn test_path\n\t\t\t\tlast_dir = pwd\n\t\t\t\tpwd = os.path.dirname(pwd)\n\t\t\t\tcount = count + 1\n\t\telse:\n\t\t\tpaths = []\n\n\t\t\t# find all the directories with options files\n\t\t\tfor folder in sublime.active_window().folders():\n\t\t\t\tfor dirname, dirnames, filenames in os.walk(folder, topdown=True):\n\t\t\t\t\tif '.ml-sublime-options' in filenames:\n\t\t\t\t\t\tpaths.append(dirname)\n\n\t\t\tpaths = sorted(paths)\n\t\t\tif len(paths) > 0:\n\t\t\t\treturn os.path.join(paths[0], '.ml-sublime-options')\n\n\t\treturn None\n\n\tdef get_pref(self, key):\n\t\tif self.options and key in self.options:\n\t\t\treturn self.options[key]\n\t\treturn None\n\n\tdef get_sub_pref(self, key, sub_key):\n\t\tif self.options:\n\t\t\tif key in self.options:\n\t\t\t\tif sub_key in self.options[key]:\n\t\t\t\t\treturn self.options[key][sub_key]\n\t\treturn None\n\n\tdef set_sub_pref(self, key, sub_key, value):\n\t\tif self.options:\n\t\t\tif key in self.options:\n\t\t\t\tself.options[key][sub_key] = value\n\t\t\t\tself.write_file_contents(self._options_file, json.dumps(self.options, sort_keys=True, indent=4))\n\n\tdef has_key(self, key):\n\t\treturn self.options and key in self.options\n\n\tdef has_subkey(self, key, sub_key):\n\t\treturn self.options and key in self.options and sub_key in self.options[key]\n\n\tdef options_file(self):\n\t\treturn self._options_file\n\n\tdef __init__(self, options_file = None):\n\t\tself.options = None\n\n\t\tif options_file:\n\t\t\tself._options_file = options_file\n\t\telif (MlOptions.__cached_options_file and os.path.exists(MlOptions.__cached_options_file)):\n\t\t\tself._options_file = MlOptions.__cached_options_file\n\t\telse:\n\t\t\tself._options_file = self.find_options_file()\n\t\t\tMlOptions.__cached_options_file = self._options_file\n\n\t\tif self._options_file:\n\t\t\tis_match = re.match(r\"^Packages.*\", self._options_file)\n\t\t\tif (is_match != None):\n\t\t\t\ttry:\n\t\t\t\t\tif hasattr(sublime, 'load_resource'):\n\t\t\t\t\t\tcontent = sublime.load_resource(self._options_file)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._options_file = os.path.join(sublime.packages_path(), self._options_file[9:])\n\t\t\t\t\t\tcontent = self.read_file_contents(self._options_file)\n\t\t\t\texcept OSError as e:\n\t\t\t\t\tself._options_file = os.path.join(sublime.packages_path(), self._options_file[9:])\n\t\t\t\t\tcontent = self.read_file_contents(self._options_file)\n\t\t\telse:\n\t\t\t\tcontent = self.read_file_contents(self._options_file)\n\t\t\t# remove any comments\n\t\t\t# taken from http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html\n\t\t\tcomment_re = re.compile('(^)?[^\\S\\n]*/(?:\\*(.*?)\\*/[^\\S\\n]*|/[^\\n]*)($)?', re.DOTALL | re.MULTILINE)\n\t\t\tmatch = comment_re.search(content)\n\t\t\twhile match:\n\t\t\t\t# single line comment\n\t\t\t\tcontent = content[:match.start()] + content[match.end():]\n\t\t\t\tmatch = comment_re.search(content)\n\n\t\t\ttry:\n\t\t\t\tself.options = json.loads(content)\n\t\t\texcept ValueError as e:\n\t\t\t\tprint(\"Invalid Json Options file: %s\" % self._options_file)\n\t\t\t\tself.options = {}\n","sub_path":"ml/ml_options.py","file_name":"ml_options.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"326991387","text":"def DTRecorder(current_dt, data, filename, mode=None):\n \"\"\"\n Record an array of data with the current time\n \"\"\"\n\n if mode==None:\n mode=\"a\"\n \n dt = current_dt\n \n with open(filename, mode) as f:\n f.write( dt + ',' )\n [f.write(i + ',') for i in data[:-1]]\n f.write(data[-1])\n f.write('\\n')","sub_path":"server/bilidash/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"150500581","text":"import unittest\nimport setupFiles as base\nimport os\nimport re\n\nclass FakeFS(object):\n\t\"\"\"litle 'fake' file system\"\"\"\n\tdef __init__(self, fsdata, linesep='\\n'):\n\t\tself._fs = fsdata\n\t\tself.linesep = linesep\n\n\n\tdef _get_path_parts(self, path):\n\t\tif path == '/':\n\t\t\treturn ['/']\n\t\telif path[-1] == '/':\n\t\t\tpath = path[:-1]\n\n\t\tparts = path.split('/')\n\t\tresult = []\n\t\tfor part in parts:\n\t\t\tif part == '..':\n\t\t\t\tresult and result.pop()\n\t\t\telif part != '.':\n\t\t\t\tresult.append(part)\n\t\tif path[0] == '/':\n\t\t\tresult[0] = '/'\n\t\treturn result\n\n\tdef _match_path(self, path):\n\t\tparts = self._get_path_parts(path)\n\t\tremaining = self._fs\n\t\tfor part in parts:\n\t\t\tif not isinstance(remaining, dict):\n\t\t\t\treturn None\n\t\t\tremaining = remaining.get(part)\n\t\treturn remaining\n\n\tdef listdir(self, path):\n\t\trem = self._match_path(path)\n\t\tif rem is None:\n\t\t\traise FileNotFoundError(2, \"No such file or directory\", path)\n\t\telif isinstance(rem, dict):\n\t\t\treturn list(rem.keys())\n\t\telse:\n\t\t\traise NotADirectoryError(20, \"Not a directory\",path)\n\n\tdef isfile(self, path):\n\t\trem = self._match_path(path)\n\t\tif isinstance(rem,str):\n\t\t\treturn True\n\t\treturn False\n\n\tdef exists(self, path):\n\t\treturn not(self._match_path(path) is None)\n\n\tdef isdir(self, path):\n\t\trem = self._match_path(path)\n\t\treturn isinstance(rem, dict)\n\n\t\n\nclass TestFakeFS(unittest.TestCase):\n\n\tdef assertSortedEqual(self, a,b):\n\t\tself.assertListEqual(sorted(a), sorted(b))\n\n\tdef setUp(self):\n\t\tself.fs = FakeFS({\n\t\t\t'/' : {\n\t\t\t\t'a' : 'file1',\n\t\t\t\t'b' : 'fiel2',\n\t\t\t\t'dir' : {\n\t\t\t\t\t'e' : 'file3',\n\t\t\t\t\t'a' : { 'b' : 'file4' }\n\t\t\t\t},\n\t\t\t\t'dir2' : {}\n\t\t\t}\n\t\t})\n\n\tdef test_list_dir(self):\n\t\tself.assertSortedEqual(\n\t\t\tself.fs.listdir('/'), ['a','b','dir','dir2']\n\t\t)\n\t\tself.assertSortedEqual(\n\t\t\tself.fs.listdir('/dir'), ['e','a']\n\t\t)\n\t\tself.assertTrue(self.fs.isfile('/a'))\n\t\t\n\t\tself.assertSortedEqual(\n\t\t\tself.fs.listdir('/dir2'),[]\n\t\t)\n\n\tdef test_isfile(self):\n\t\tself.assertTrue(self.fs.isfile('/a'))\n\t\tself.assertTrue(self.fs.isfile('/dir/a/b'))\n\t\tself.assertFalse(self.fs.isfile('/a/lol'))\n\t\tself.assertFalse(self.fs.isfile('/dir/a'))\n\n\tdef test_isdir(self):\n\t\tself.assertTrue(self.fs.isdir('/dir'))\n\t\tself.assertTrue(self.fs.isdir('/dir/a'))\n\t\tself.assertFalse(self.fs.isdir('/a'))\n\t\tself.assertFalse(self.fs.isdir('/lol'))\n\n\tdef test_exists(self):\n\t\tself.assertTrue(self.fs.exists('/dir'))\n\t\tself.assertTrue(self.fs.exists('/dir/a'))\n\t\tself.assertTrue(self.fs.exists('/dir/a/b'))\n\t\tself.assertFalse(self.fs.exists('/a/lol'))\n\t\t\n\t\t\nclass TestDirFilter(unittest.TestCase):\n\t\n\t@classmethod\n\tdef setUpClass(cls):\n\t\tcls.fs = FakeFS({\n\t\t\t'/' : {\n\t\t\t\t'12_test.tex'\t\t:'file',\n\t\t\t\t'02_dir' : {\n\t\t\t\t\t'ab.txt' \t\t: '',\n\t\t\t\t\t'00_tex' \t\t: '',\n\t\t\t\t\t'01_e.tex' \t\t: ''\n\t\t\t\t},\n\t\t\t\t'03_dir' : {\n\t\t\t\t\t'234_.tex'\t\t: ''\n\t\t\t\t},\n\t\t\t\t'a13_anhang' : {\n\t\t\t\t\t'a00_ahf.tex'\t: ''\n\t\t\t\t},\n\t\t\t\t'01_test' \t\t\t: 'file1'\n\t\t\t}\n\t\t})\n\t\tcls.__os_path_isfile = os.path.isfile\n\t\tos.path.isfile = cls.fs.isfile\n\t\tcls.__os_path_isdir = os.path.isdir\n\t\tos.path.isdir = cls.fs.isdir\n\t\tcls.__os_listdir = os.listdir\n\t\tos.listdir = cls.fs.listdir\n\n\t@classmethod\n\tdef tearDownClass(cls):\n\t\tos.path.isfile = cls.__os_path_isfile\n\t\tos.path.isdir = cls.__os_path_isdir\n\t\tos.listdir = cls.__os_listdir\n\n\tdef test_dir_filter(self):\n\t\tdirf = r\"([0-9]+)_[\\w]+\"\n\t\tfilf = dirf + r\"\\.tex\"\n\t\tself.assertDictEqual(\n\t\t\tbase.dir_filter(\n\t\t\t\t'/',\n\t\t\t\tre.compile(filf),\n\t\t\t\tre.compile(dirf)\n\t\t\t),\n\t\t\t{ \n\t\t\t\t2 : {1: '/02_dir/01_e.tex'},\n\t\t\t\t3 : {},\n\t\t\t\t12 : '/12_test.tex'\n\t\t\t}\n\t\t)\n\n\tdef test_sort_and_flaten(self):\n\t\tself.assertSequenceEqual(\n\t\t\tbase.sort_and_flaten(\n\t\t\t\t{ 3: { 1: \"at3\", 12:\"tee\" }, 2: { 1000: { 2:'t2',1:'t1'} } }\n\t\t\t), \n\t\t\t[ 't1' ,'t2', 'at3', 'tee' ]\n\t\t)\n\t\t\t\t\n\n\tdef test_str_latex_input(self):\n\t\tself.assertEqual(\n\t\t\tbase.str_latex_input(['a','b']),\n\t\t\t\"\\\\input{a}\\n\\\\input{b}\"\n\t\t)\n\t\tself.assertEqual(\n\t\t\tbase.str_latex_input([]),\n\t\t\t\"\"\n\t\t)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n\t\n","sub_path":"test_setupFiles.py","file_name":"test_setupFiles.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"426274257","text":"'''\n\nAuthor : Fawad Ahmed\nSimple custom model for hand written digit recognition.\n\n'''\n\n\n\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nmnist = tf.keras.datasets.mnist\n\n# Importig data set\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n# Normalizing data\nx_train = tf.keras.utils.normalize(x_train)\nx_test = tf.keras.utils.normalize(x_test)\n# Input shape for flatten layer\ninput_dem = x_train[0].shape\n# model Initialization\n\nmodel = tf.keras.models.Sequential()\n# model architecture....\nmodel.add(tf.keras.layers.Flatten(input_shape=input_dem))\nmodel.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n# model parameters for training of the model\n\"\"\"\nA neuro network doesnt attempt to optimize accuracy, \nit tries to minimize loss and,\nthe loss function we chose can impact alot on our neuronet work. \n\"\"\"\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n\n# model training function\nmodel.fit(x_train, y_train, epochs=3)\n\n\"\"\"\nkeep in mind that neru net work are great at fitting data, \nbut the questions will our model overfit?.\nOur focus should be to make a model so that it can generalize on given Data.\n\"\"\"\n# next thing we do is calculate validation loss and validation accuracy.\nval_loss, val_acc = model.evaluate(x_test, y_test)\n\n\"\"\"\nWe should expect our loss to be relativly higher on out of sample data,\nNote: we should expect our out of sample accuracy to me slightly lower and our loss to be \n slightly higher.\n What we dont want to see is either to close or too much of a delta,\n if there is hugh delta chances are you have already overfit your model.\n\"\"\"\nprint(\"Loss : \", val_loss, \"Accuracy : \", val_acc)\n\n# Saving a model\nmodel.save(\"minist_custom.model\")\n","sub_path":"Minist_model/1_model_architecture.py","file_name":"1_model_architecture.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"126938046","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .models import Emp\nfrom .serializers import EmpSerializer #EmpSerializer is resp for conv qs into json data\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n#NON-ID based operations\nclass EmpListView(APIView):\n def get(self,request): #db-->qs\n qs = Emp.objects.all() #getting data using orm query(conv into sql)\n #qs ---> dict\n dict_data = EmpSerializer(qs,many=True) #if multiple records are there then we've keep many=True\n #dict ---> json\n return Response(dict_data.data) #serializer obj.data we've to use\n\n def post(self,request):\n #data = request.data #user sending data\n dict_data = EmpSerializer(data=request.data)\n\n if dict_data.is_valid():\n dict_data.save()\n return Response(dict_data.data,status=status.HTTP_201_CREATED)\n else:\n return Response(dict_data.errors,status=status.HTTP_400_BAD_REQUEST)\n\nimport json\nclass EmpDetailView(APIView):\n def get(self,request,id):\n try:\n emp = Emp.objects.get(id=id) #single obj\n except Emp.DoesNotExist:\n json_data = json.dumps({\"msg\":\"Requested data not found\"})\n return Response(json_data,status=status.HTTP_404_NOT_FOUND)\n else:\n dict_data = EmpSerializer(emp) #so many=true not req\n return Response(dict_data.data,status=status.HTTP_200_OK)\n\n def get_object_by_id(self,id):\n try:\n emp = Emp.objects.get(id=id) #single obj\n except Emp.DoesNotExist:\n emp=None\n return emp\n\n def put(self,request,id):\n emp = self.get_object_by_id(id) #will get object or None val\n\n if emp is None:\n json_data = json.dumps({\"msg\": \"Requested data not found to GET\"})\n return Response(json_data, status=status.HTTP_404_NOT_FOUND)\n\n dict_data = EmpSerializer(emp,data=request.data)\n\n if dict_data.is_valid():\n dict_data.save()\n return Response(dict_data.data,status=status.HTTP_200_OK)\n\n else:\n return Response(dict_data.data,status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,id):\n emp = self.get_object_by_id(id)\n\n if emp is None:\n json_data = json.dumps({\"msg\": \"Requested data not found to Update\"})\n return Response(json_data, status=status.HTTP_404_NOT_FOUND)\n\n emp.delete()\n json_data = json.dumps({\"msg\": \"Requested data Deleted Successfully\"})\n return Response(json_data, status=status.HTTP_404_NOT_FOUND)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"156356917","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 15:48:52 2019\n\n@author: digomattar\n\"\"\"\n\nimport pygame\nimport random\nimport time\nfrom os import path\n\n#diretorios de imagem e som\nimg_dir = path.join(path.dirname(__file__), 'sprites')\nsnd_dir = path.join(path.dirname(__file__), 'snd')\nfnt_dir = path.join(path.dirname(__file__), 'font')\n\nWIDTH = 380 # Largura da tela\nHEIGHT = 600 # Altura da tela\nFPS = 60 # F\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\n\n#Constantes\ngravidade = 2\nGROUND = HEIGHT*5//6\nvelocidadecano = -4\naceleracao = 0.5\n\n#3 possiveis estados do \nSTILL =0\nJUMPING = 1\nFALLING = 2\n#classe do passaro\nclass Player(pygame.sprite.Sprite):\n \n def __init__(self, player_img):\n #construtor e coletar imagem \n pygame.sprite.Sprite.__init__(self) \n self.image = player_img \n self.image = pygame.transform.scale(player_img, (50, 38)) \n self.image.set_colorkey(BLACK) \n self.rect = self.image.get_rect()\n \n #posicoes\n self.rect.y = HEIGHT/2\n self.rect.x = (WIDTH/2)-30\n #raio\n self.radius = 25\n \n self.speedx = 0 \n \n self.speedy = FALLING\n \n self.state = FALLING\n \n \n \n def update(self):\n self.rect.y += self.speedy \n if self.rect.bottom > GROUND:\n self.rect.bottom = GROUND\n self.speedy = 0 \n self.state = STILL\n state = DONE\n self.speedy += gravidade\n \nclass Cano_de_cima (pygame.sprite.Sprite):\n \n def __init__(self, canodecima):\n \n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(canodecima, (50, 100)) \n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n \n #poscicoes \n self.rect.y = 0\n self.rect.x = (WIDTH/2)\n \n self.speedx = 30\n self.speedy = 0 \n \n #def update(self):\n def update(self):\n self.speedy += aceleracao\n if self.speedy > 75:\n self.speedy = 75 \n if self.rect.x < 0:\n self.kill()\n \n\nclass Cano_de_baixo (pygame.sprite.Sprite):\n\n def __init__(self,canodebaixo):\n \n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(canodebaixo, (50, 100)) \n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n \n #posicao\n self.rect.y = HEIGHT-150\n self.rect.x =(WIDTH/2)\n \n self.speedx = 30\n self.speedy = 0\n \n #def update(self):\n def update(self):\n self.speedx += aceleracao\n if self.speedx > 75:\n self.speedx = 75 \n if self.rect.x < 0:\n self.kill()\n \nclass Base(pygame.sprite.Sprite): \n \n def __init__(self,base):\n \n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(base,(380,100))\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n \n # posicao \n self.rect.x = 0\n self.rect.y = HEIGHT-100\n \n #velocidade\n self.speedx = 30\n self.speedy = 0 \n \n def update(self):\n self.speedx += aceleracao\n if self.speedx > 75:\n self.speedx = 75 \n \n\n\ndef load_assets(img_dir, snd_dir, fnt_dir):\n assets = {}\n assets[\"player_img\"] = pygame.image.load(path.join(img_dir, \"yellowbird-upflap.png\")).convert()\n assets[\"canodecima\"] = pygame.image.load(path.join(img_dir, \"upper_pipe_img.png\")).convert()\n assets['canodebaixo']=pygame.image.load(path.join(img_dir, \"lower_pipe_img.png\")).convert()\n assets['barulho_pulo']=pygame.mixer.Sound(path.join(img_dir, 'swoosh.wav')) \n assets['background']=pygame.image.load(path.join(img_dir, \"background-day.png\")).convert()\n assets['barulho_pulo']=pygame.mixer.Sound(path.join(img_dir, 'swoosh.wav')) \n assets['base'] = pygame.image.load(path.join(img_dir, \"base.png\")).convert()\n assets['hit'] = pygame.mixer.Sound(path.join(img_dir, 'hit.wav'))\n return assets\n\ndef game_screen(screen):\n \n assets = load_assets(img_dir, snd_dir, fnt_dir)\n \n clock = pygame.time.Clock()\n \n background = assets[\"background\"]\n background_rect = background.get_rect()\n \n #cria o passaro\n player = Player(assets[\"player_img\"])\n all_sprites = pygame.sprite.Group()\n all_sprites.add(player)\n \n canodebaixo = Cano_de_baixo(assets['canodebaixo'])\n\n all_sprites.add(canodebaixo)\n \n canodecima = Cano_de_cima(assets['canodecima'])\n all_sprites.add(canodecima)\n \n base = Base(assets['base'])\n\n all_sprites.add(base)\n \n PLAYING = 0\n\n DONE = 1\n\n state = PLAYING\n while state != DONE:\n #hits = pygame.sprite.groupcollide(canodecima,player, False, pygame.sprite.collide_circle)\n #hits2 = pygame.sprite.groupcollide(canodebaixo,player,False, pygame.sprite.collide_circle)\n \n #if hits:\n # assets['hit'].play()\n #state = DONE\n # elif hits2:\n # assets['hit'].play()\n # state = DONE\n \n clock.tick(FPS)\n \n if state == PLAYING:\n for event in pygame.event.get():\n #adiciona o quit\n if event.type == pygame.QUIT:\n state = DONE\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n player.speedy = -15\n assets['barulho_pulo'].play()\n \n all_sprites.update()\n screen.fill(BLACK)\n screen.blit(background, background_rect)\n all_sprites.draw(screen)\n pygame.display.flip()\n \n\n# Inicialização do Pygame.\npygame.init()\npygame.mixer.init()\n\n# Tamanho da tela.\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\n# Nome do jogo\npygame.display.set_caption(\"Flappy_Bird\")\n \n\ntry:\n game_screen(screen)\nfinally:\n pygame.quit()\n\n\n \n \n \n \n ","sub_path":"Comeco do codigo flappy bird.py","file_name":"Comeco do codigo flappy bird.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"299463383","text":"from BluenetLib._EventBusInstance import BluenetEventBus\nfrom BluenetLib.lib.topics.SystemTopics import SystemTopics\nfrom BluenetLib.lib.topics.UsbTopics import UsbTopics\n\n\nclass StoneStateManager:\n def __init__(self):\n self.stones = {}\n BluenetEventBus.subscribe(SystemTopics.stateUpdate, self.handleStateUpdate)\n\n def handleStateUpdate(self,data):\n stoneId = data[0]\n stoneStatePacket = data[1]\n\n if stoneId in self.stones:\n if self.stones[stoneId][\"timestamp\"] < stoneStatePacket.timestamp:\n self.stones[stoneId] = stoneStatePacket.getSummary()\n self.emitNewData(stoneStatePacket)\n else:\n BluenetEventBus.emit(SystemTopics.newCrownstoneFound, stoneId)\n self.stones[stoneId] = stoneStatePacket.getSummary()\n self.emitNewData(stoneStatePacket)\n \n def emitNewData(self, stoneStatePacket):\n BluenetEventBus.emit(UsbTopics.newDataAvailable, stoneStatePacket.getSummary())\n\n def getIds(self):\n ids = []\n for stoneId, stoneData in self.stones.items():\n ids.append(stoneId)\n\n return ids\n","sub_path":"BluenetLib/lib/dataFlowManagers/StoneStateManager.py","file_name":"StoneStateManager.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"453546251","text":"from django.shortcuts import render, redirect\nfrom .forms import *\nfrom profiles.models import Profile\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\n\n@login_required(login_url='/accounts/login')\ndef newsfeed(request):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n friends = myprofile.friends.all()\n post = Post.objects.all().order_by('-id')\n post_f = PostForm(initial={'visibility': 'public',\n 'comment_choice': 'allow-comment'})\n comment_f = CommentForm()\n notification = Notification.objects.filter(\n not_receiver=myprofile).order_by('-id')\n total_notification = Notification.objects.filter(\n not_receiver=myprofile).count()\n\n\n \n\n context = {'pf': post_f, 'post': post, 'cf': comment_f,\n 'myprofile': myprofile, 'nt': notification, 'tn': total_notification, 'friends': friends}\n\n return render(request, 'posts/newsfeed.html', context)\n\n#submitpost\n@login_required\ndef subpost(request):\n post_f = PostForm()\n me = request.user\n myprofile = Profile.objects.get(user=me)\n\n if 'submit_post' in request.POST:\n post_f = PostForm(request.POST, request.FILES)\n if post_f.is_valid():\n instance = post_f.save(commit=False)\n\n instance.author = myprofile\n instance.save()\n post_f = PostForm()\n return redirect('newsfeed')\n return redirect('newsfeed')\n\n\n#submitcomment\n@login_required\ndef subcom(request):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n comment_f = CommentForm()\n if 'submit_comment' in request.POST:\n comment_f = CommentForm(request.POST)\n post_id = request.POST.get('post_id')\n post_obj = Post.objects.get(id=post_id)\n if comment_f.is_valid():\n instance = comment_f.save(commit=False)\n instance.user = myprofile\n instance.post = post_obj\n instance.save()\n if myprofile != post_obj.author:\n Notification.objects.create(\n not_sender=myprofile, not_receiver=post_obj.author, post=post_obj, status='commented')\n comment_f = CommentForm()\n return redirect('newsfeed')\n return redirect('newsfeed')\n\n#updatepost\n\n\n@login_required\ndef updatepost(request, id):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n post_obj = Post.objects.get(id=id)\n post_f = PostForm(request.POST or None,\n request.FILES or None, instance=post_obj)\n\n if request.method == 'POST':\n\n if post_f.is_valid():\n post_f.save()\n return redirect('newsfeed')\n\n context = {'pf': post_f, 'mp': myprofile, 'pb': post_obj}\n return render(request, 'posts/updatepost.html', context)\n\n\n@login_required\ndef deletepost(request, id):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n post_obj = Post.objects.get(id=id)\n\n if request.method == 'POST':\n post_obj.delete()\n return redirect('newsfeed')\n context = {'pb': post_obj}\n return render(request, 'posts/deletepost.html', context)\n\n\n@login_required\ndef updatecomment(request, id):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n comment_obj = Comment.objects.get(id=id)\n comment_f = CommentForm(request.POST or None, instance=comment_obj)\n\n if request.method == 'POST':\n\n if comment_f.is_valid():\n comment_f.save()\n return redirect('newsfeed')\n\n context = {'cf': comment_f, 'mp': myprofile, 'cb': comment_obj}\n return render(request, 'posts/updatecomment.html', context)\n\n\n@login_required\ndef deletecomment(request, id):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n comment_obj = Comment.objects.get(id=id)\n\n if request.method == 'POST':\n comment_obj.delete()\n\n return redirect('newsfeed')\n context = {'cb': comment_obj}\n return render(request, 'posts/deletecomment.html', context)\n\n\n@login_required\ndef detailpost(request, id):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n detail_post = Post.objects.get(id=id)\n comment_f = CommentForm()\n\n context = {'myprofile': myprofile, 'dp': detail_post, 'cf': comment_f}\n\n return render(request, 'posts/detail.html', context)\n\n#like\n\n\n@login_required\ndef like(request):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n if 'like' in request.POST:\n post_id = request.POST.get('like_id')\n post_obj = Post.objects.get(id=post_id)\n\n post_obj.liked.add(myprofile)\n\n post_obj.save\n if myprofile != post_obj.author:\n\n Notification.objects.create(\n not_sender=myprofile, not_receiver=post_obj.author, post=post_obj, status='liked')\n\n return redirect('newsfeed')\n return redirect('newsfeed')\n\n\n#unlike\n@login_required\ndef unlike(request):\n me = request.user\n myprofile = Profile.objects.get(user=me)\n if 'unlike' in request.POST:\n post_id = request.POST.get('unlike_id')\n post_obj = Post.objects.get(id=post_id)\n\n post_obj.liked.remove(myprofile)\n\n post_obj.save\n if myprofile != post_obj.author:\n nots = Notification.objects.get(\n not_sender=myprofile, not_receiver=post_obj.author, post=post_obj, status='liked')\n nots.delete()\n\n\n return redirect('newsfeed')\n return redirect('newsfeed')\n","sub_path":"sm/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"207556520","text":"from unittest import TestCase\nfrom random import randint\nfrom mock import MagicMock, patch\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\n\nfrom frontend.models.sqlobjects import FileWeb\nfrom lib.irma.common.exceptions import IrmaDatabaseError\nfrom lib.irma.common.exceptions import IrmaDatabaseResultNotFound\nfrom lib.irma.common.utils import IrmaProbeType\n\n\nclass TestFileWeb(TestCase):\n\n def setUp(self):\n self.file = MagicMock()\n self.name = \"name\"\n self.path = \"path\"\n self.scan = MagicMock()\n self.fw = FileWeb(self.file, self.name, self.path, self.scan)\n\n def tearDown(self):\n del self.fw\n\n def test001_load_from_ext_id(self):\n m_session = MagicMock()\n ext_id = \"whatever\"\n FileWeb.load_from_ext_id(ext_id, m_session)\n m_filter = m_session.query(FileWeb).filter\n m_filter.is_called_once_with(FileWeb.external_id == ext_id)\n\n def test002_load_from_ext_id_raises(self):\n m_session = MagicMock()\n ext_id = \"whatever\"\n m_session.query.side_effect = NoResultFound()\n with self.assertRaises(IrmaDatabaseResultNotFound):\n FileWeb.load_from_ext_id(ext_id, m_session)\n\n def test003_load_from_ext_id_raises(self):\n m_session = MagicMock()\n ext_id = \"whatever\"\n m_session.query.side_effect = MultipleResultsFound()\n with self.assertRaises(IrmaDatabaseError):\n FileWeb.load_from_ext_id(ext_id, m_session)\n\n def test004_load_by_scanid(self):\n m_session = MagicMock()\n scanid = \"scanid\"\n fileid = \"fileid\"\n FileWeb.load_by_scanid_fileid(scanid, fileid, m_session)\n m_filter = m_session.query(FileWeb).filter\n m_filter.is_called_once_with(FileWeb.id_scan == scanid,\n FileWeb.id_file == fileid)\n\n def test005_load_by_scanid_raises(self):\n m_session = MagicMock()\n m_session.query.side_effect = NoResultFound()\n with self.assertRaises(IrmaDatabaseError):\n FileWeb.load_by_scanid_fileid(None, None, m_session)\n\n @patch(\"frontend.models.sqlobjects.File\")\n @patch(\"frontend.models.sqlobjects.Tag\")\n def test006_find_by_name(self, m_Tag, m_File):\n m_session = MagicMock()\n name = \"something\"\n tag = MagicMock()\n tag.id = randint(0, 10)\n tags = [tag.id]\n FileWeb.query_find_by_name(name, tags, m_session)\n m_Tag.find_by_id.assert_called_once_with(tag.id, m_session)\n\n @patch(\"frontend.models.sqlobjects.File\")\n @patch(\"frontend.models.sqlobjects.Tag\")\n def test007_find_by_hash(self, m_Tag, m_File):\n m_session = MagicMock()\n hash_type, hash = \"something\", \"anotherthing\"\n tag = MagicMock()\n tag.id = randint(0, 10)\n tags = [tag.id]\n FileWeb.query_find_by_hash(hash_type, hash, tags, m_session)\n m_session.query.called_with(FileWeb)\n m_Tag.find_by_id.assert_called_once_with(tag.id, m_session)\n\n @patch(\"frontend.models.sqlobjects.File\")\n @patch(\"frontend.models.sqlobjects.Tag\")\n def test007_find_by_hash_distinct_false(self, m_Tag, m_File):\n m_session = MagicMock()\n hash_type, hash = \"something\", \"anotherthing\"\n tag = MagicMock()\n tag.id = randint(0, 10)\n tags = [tag.id]\n FileWeb.query_find_by_hash(hash_type, hash, tags, m_session,\n distinct_name=False)\n m_session.query.called_with(FileWeb)\n m_Tag.find_by_id.assert_called_once_with(tag.id, m_session)\n\n def test008_probes_finished(self):\n pr1, pr2 = MagicMock(), MagicMock()\n pr1.doc = None\n pr2.doc = \"whatever\"\n self.fw.probe_results = [pr1, pr2]\n self.assertEqual(self.fw.probes_finished, 1)\n\n def test009_probes_finished_all_none(self):\n pr1, pr2 = MagicMock(), MagicMock()\n pr1.doc = None\n pr2.doc = None\n self.fw.probe_results = [pr1, pr2]\n self.assertEqual(self.fw.probes_finished, 0)\n\n def test010_status_0(self):\n pr1, pr2 = MagicMock(), MagicMock()\n pr1.doc = {'type': IrmaProbeType.antivirus, 'status': 0}\n pr2.doc = {'type': IrmaProbeType.antivirus, 'status': 0}\n self.fw.probe_results = [pr1, pr2]\n self.assertEqual(self.fw.status, 0)\n\n def test010_status_1(self):\n pr1, pr2 = MagicMock(), MagicMock()\n pr1.doc = {'type': IrmaProbeType.antivirus, 'status': 0}\n pr2.doc = {'type': IrmaProbeType.antivirus, 'status': 1}\n self.fw.probe_results = [pr1, pr2]\n self.assertEqual(self.fw.status, 1)\n\n def test011_get_probe_results(self):\n pr1, pr2 = MagicMock(), MagicMock()\n pr1.doc = \"whatever\"\n pr2.doc = \"something\"\n self.fw.probe_results = [pr1, pr2]\n self.assertItemsEqual(self.fw.get_probe_results(),\n [pr1.get_details(True),\n pr2.get_details(True)])\n pr1.get_details.assert_called_with(True)\n pr2.get_details.assert_called_with(True)\n","sub_path":"frontend/tests/models/test_sqlobjects_FileWeb.py","file_name":"test_sqlobjects_FileWeb.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"216064787","text":"import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.metrics import mean_squared_error\r\nfrom math import sqrt\r\nfrom statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt\r\nimport statsmodels.api as sm\r\n\r\n#Importing data\r\n# dataframe\r\n\r\n#Homework Starts\r\ndf = pd.read_csv(\"Danish Kron.txt\", sep='\\t')\r\n\r\n\r\ndef replacezeroeswith(array, newvalue):\r\n array[ array == 0 ] = newvalue \r\n\r\nindex=0\r\n\r\nif df.VALUE[index]==0:\r\n upper=df.VALUE[index-1]\r\n lower=df.VALUE[index+1]\r\n mean=(upper+lower)/ 2\r\n replacezeroeswith(index,mean)\r\n df.replace('index', mean, inplace=True)\r\n \r\nprint(df)\r\n \r\n#Homework ends\r\n\r\n\r\n# Create slices for training and test data\r\nsize = len(df)\r\nhead = df[0:5]\r\ntail = df [size-5:]\r\nprint(\"Head\")\r\nprint(head)\r\nprint(\"Tail\")\r\nprint(tail)\r\n\r\ntrain = df[0:size-201]\r\ntest = df[size-200:]\r\n\r\n#train = df.copy()\r\n#test = df.copy()\r\n\r\n\r\ndf.DATE = pd.to_datetime(df.DATE,format=\"%Y-%m-%d\")\r\ndf.index = df.DATE \r\ntrain.DATE = pd.to_datetime(train.DATE,format=\"%Y-%m-%d\") \r\ntrain.index = train.DATE \r\ntest.DATE = pd.to_datetime(train.DATE,format=\"%Y-%m-%d\") \r\ntest.index = test.DATE \r\n\r\n#Naive approach\r\nprint(\"Naive\")\r\ndd= np.asarray(train.VALUE)\r\ny_hat = test.copy()\r\ny_hat['naive'] = dd[len(dd)-1]\r\nrms = sqrt(mean_squared_error(test.VALUE, y_hat.naive))\r\nprint(\"RMSE: \",rms)\r\n\r\n#Simple average approach\r\nprint(\"Simple Average\")\r\ny_hat_avg = test.copy()\r\ny_hat_avg['avg_forecast'] = train['VALUE'].mean()\r\nrms = sqrt(mean_squared_error(test.VALUE, y_hat_avg.avg_forecast))\r\nprint(\"RMSE: \",rms)\r\n\r\n#Moving average approach\r\nprint(\"Moving Average\")\r\nwindowsize = 15\r\ny_hat_avg = test.copy()\r\ny_hat_avg['moving_avg_forecast'] = train['VALUE'].rolling(windowsize).mean().iloc[-1]\r\nrms = sqrt(mean_squared_error(test.VALUE, y_hat_avg.moving_avg_forecast))\r\nprint(\"RMSE: \",rms)\r\n\r\n# Simple Exponential Smoothing\r\nprint(\"Simple Exponential Smoothing\")\r\ny_hat_avg = test.copy()\r\nalpha = 0.2\r\nfit2 = SimpleExpSmoothing(np.asarray(train['VALUE'])).fit(smoothing_level=alpha,optimized=False)\r\ny_hat_avg['SES'] = fit2.forecast(len(test))\r\nrms = sqrt(mean_squared_error(test.VALUE, y_hat_avg.SES))\r\nprint(\"RMSE: \",rms)\r\n\r\n# Holt\r\nprint(\"Holt\")\r\nsm.tsa.seasonal_decompose(train.VALUE).plot()\r\nresult = sm.tsa.stattools.adfuller(train.VALUE)\r\n# plt.show()\r\n\r\ny_hat_avg = test.copy()\r\nalpha = 0.4\r\nfit1 = Holt(np.asarray(train['VALUE'])).fit(smoothing_level = alpha,smoothing_slope = 0.1)\r\ny_hat_avg['Holt_linear'] = fit1.forecast(len(test))\r\nrms = sqrt(mean_squared_error(test.VALUE, y_hat_avg.Holt_linear))\r\nprint(\"RMSE: \",rms)\r\n\r\n# Holt-Winters\r\nprint(\"Holt-Winters\")\r\ny_hat_avg = test.copy()\r\nseasons = 10\r\nfit1 = ExponentialSmoothing(np.asarray(train['VALUE']) ,seasonal_periods=seasons ,trend='add', seasonal='add',).fit()\r\ny_hat_avg['Holt_Winter'] = fit1.forecast(len(test))\r\nrms = sqrt(mean_squared_error(test.VALUE, y_hat_avg.Holt_Winter))\r\nprint(\"RMSE: \",rms)\r\n\r\n# Seasonal ARIMA\r\n# This is a naive use of the technique. See - http://www.seanabu.com/2016/03/22/time-series-seasonal-ARIMA-model-in-python/\r\n# print(\"Seasonal ARIMA\")\r\n# y_hat_avg = test.copy()\r\n# fit1 = sm.tsa.statespace.SARIMAX(train.VALUE, order=(1, 0, 0),seasonal_order=(0,1,1,1)).fit()\r\n# y_hat_avg['SARIMA'] = fit1.predict(start=\"2008-12-01\", end=\"2018-11-29\", dynamic=True)\r\n# rms = sqrt(mean_squared_error(test.VALUE, y_hat_avg.SARIMA))\r\n# print(\"RMSE: \",rms)","sub_path":"homework2.py","file_name":"homework2.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"280490447","text":"# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nClasses for building YAML SparseML recipes without instantiating specific modifier\\\nimplementations\n\"\"\"\n\n\nimport textwrap\nfrom typing import Any, Dict, List, Optional, Type, Union\n\nimport yaml\n\nfrom sparseml.optim import BaseModifier, ModifierProp\nfrom sparseml.utils import create_parent_dirs\n\n\n__all__ = [\n \"ModifierYAMLBuilder\",\n \"RecipeYAMLBuilder\",\n \"to_yaml_str\",\n]\n\n\nclass ModifierYAMLBuilder(object):\n \"\"\"\n Class for building a YAML string representation of a modifier by setting\n various properties of it. Properties are automatically inferred through the\n serializable ModifierProps of the given modifier. They can be accessed through\n auto-generated set_{name} and get_{name}.\n\n :param modifier_class: reference to the class of modifier this object should create\n a YAML representation for\n \"\"\"\n\n def __init__(self, modifier_class: Type[BaseModifier]):\n assert issubclass(\n modifier_class, BaseModifier\n ), \"a subclass of Modifier must be used to instantiate a ModifierYAMLBuilder\"\n\n self._modifier_class = modifier_class\n self._properties = {}\n\n for attr in dir(modifier_class):\n attr_obj = getattr(modifier_class, attr)\n if isinstance(attr_obj, ModifierProp) and attr_obj.serializable:\n self._add_setter_getter(attr)\n\n @property\n def modifier_class(self) -> Type[BaseModifier]:\n \"\"\"\n :return: the class of the Modifier for which this object is building a string\n \"\"\"\n return self._modifier_class\n\n def build_yaml_str(self) -> str:\n \"\"\"\n :return: string representation of the built Modifier as a YAML list item\n \"\"\"\n class_name_yaml = f\"- !{self._modifier_class.__name__}\"\n properties_yaml = \"\\n\".join(\n [f\"{key}: {to_yaml_str(value)}\" for key, value in self._properties.items()]\n )\n properties_yaml = textwrap.indent(properties_yaml, \" \")\n return f\"{class_name_yaml}\\n{properties_yaml}\"\n\n def _add_setter_getter(self, property_name: str):\n # define generic setter and getter functions for this property\n def setter(self_, val: Any) -> ModifierYAMLBuilder:\n self_._properties[property_name] = val\n return self_\n\n def getter(self_) -> Optional[Any]:\n return self_._properties.get(property_name)\n\n # bind setter and getter functions to this class\n setter_bound = setter.__get__(self, self.__class__)\n setattr(self, f\"set_{property_name}\", setter_bound)\n getter_bound = getter.__get__(self, self.__class__)\n setattr(self, f\"get_{property_name}\", getter_bound)\n\n\nclass RecipeYAMLBuilder(object):\n \"\"\"\n Class for building a YAML SparseML recipe with standardized structure\n\n :param variables: dict of string initial variable names to non-modifier recipe\n variables to be included. Default is an empty dict\n :param modifier_groups: dict of string initial modifier group names to a list\n of ModifierYAMLBuilder objects of modifiers to be included in that group.\n All modifier group names must contain 'modifiers' in the string. Default is\n an empty dict\n \"\"\"\n\n def __init__(\n self,\n variables: Dict[str, Any] = None,\n modifier_groups: Dict[str, List[ModifierYAMLBuilder]] = None,\n ):\n self._variables = variables or {}\n self._modifier_groups = modifier_groups or {}\n\n self._validate()\n\n def add_modifier_group(\n self, name: str, modifier_builders: List[ModifierYAMLBuilder] = None\n ) -> \"RecipeYAMLBuilder\":\n \"\"\"\n Adds a modifier group with the given name to this builder\n\n :param name: name of new modifier group\n :param modifier_builders: list of modifier builder objects to initialize\n this group with. Default is an empty list\n :return: a reference to this object with the modifier group now added\n \"\"\"\n self._validate_modifier_group_name(name)\n if name in self._modifier_groups:\n raise KeyError(\n f\"{name} is already a modifier group name in this RecipeYAMLBuilder\"\n )\n\n modifier_builders = modifier_builders or []\n self._modifier_groups[name] = modifier_builders\n self._validate()\n return self\n\n def get_modifier_group(self, name: str) -> Optional[List[ModifierYAMLBuilder]]:\n \"\"\"\n :param name: name of the modifier group to retrieve the modifier builders of\n :return: reference to the list of modifier builders currently in this\n modifier group. if the modifier group does not exist, None will be\n returned\n \"\"\"\n return self._modifier_groups.get(name)\n\n def get_modifier_builders(\n self,\n modifier_type: Optional[Union[Type[BaseModifier], str]] = None,\n modifier_groups: Optional[Union[List[str], str]] = None,\n ):\n \"\"\"\n :param modifier_type: optional type of modifier to filter by. Can be\n a type reference that will match if the modifier is of that type\n or a subclass of it or a string where it will match if the class\n is exactly that name. Defaults to None\n :param modifier_groups: optional list of modifier group names to match\n to. Defaults to None\n :return: all modifier builders in this recipe, filtered by type and group\n \"\"\"\n if isinstance(modifier_groups, str):\n modifier_groups = [modifier_groups]\n\n modifier_builders = []\n for group, builders in self._modifier_groups.items():\n if modifier_groups is not None and group not in modifier_groups:\n continue\n for builder in builders:\n if modifier_type and not self._modifier_builder_is_instance(\n builder, modifier_type\n ):\n continue\n modifier_builders.append(builder)\n return modifier_builders\n\n def get_variable(self, name: str, default: Any = None) -> Any:\n \"\"\"\n :param name: name of the recipe variable to return\n :param default: default value that should be returned if the given\n name is not a current variable\n :return: current value of the given variable, or the default if\n the variable is not set in this builder\n \"\"\"\n return self._variables.get(name, default)\n\n def has_variable(self, name: str) -> bool:\n \"\"\"\n :param name: name of the recipe variable to check\n :return: True if this recipe builder has a variable with the given name.\n False otherwise\n \"\"\"\n return name in self._variables\n\n def set_variable(self, name: str, val: Any) -> \"RecipeYAMLBuilder\":\n \"\"\"\n Sets the given variable name to the given value\n\n :param name: variable name to set\n :param val: value to set the variable to\n :return: a reference to this object with the variable now set\n \"\"\"\n self._variables[name] = val\n return self\n\n def build_yaml_str(self) -> str:\n \"\"\"\n :return: yaml string representation of this recipe in standard format\n \"\"\"\n # write variables\n yaml_str = \"\\n\".join(\n [f\"{key}: {to_yaml_str(value)}\" for key, value in self._variables.items()]\n )\n # write modifier groups\n for group, builders in self._modifier_groups.items():\n if not builders:\n continue # do not write empty groups\n modifiers_yaml = \"\\n\\n\".join(\n [builder.build_yaml_str() for builder in builders]\n )\n modifiers_yaml = textwrap.indent(modifiers_yaml, \" \")\n yaml_str += f\"\\n\\n{group}:\\n{modifiers_yaml}\"\n return yaml_str\n\n def save_yaml(self, file_path: str):\n \"\"\"\n Saves this recipe as a yaml file to the given path\n\n :param file_path: file path to save file to. if no '.' character is found\n in the path, '.yaml' will be added to the path\n \"\"\"\n if \".\" not in file_path:\n file_path += \".yaml\"\n self._save_file_str(self.build_yaml_str(), file_path)\n\n def save_markdown(self, file_path: str, desc: str = \"\"):\n \"\"\"\n Saves this recipe as a markdown file to the given path with the\n recipe yaml contained in the frontmatter\n\n :param file_path: file path to save file to. if no '.' character is found\n in the path, '.md' will be added to the path\n :param desc: optional description to add to the markdown file after the recipe\n YAML in the frontmatter. Default is empty string\n \"\"\"\n if \".\" not in file_path:\n file_path += \".md\"\n\n md_content = f\"---\\n{self.build_yaml_str()}\\n---\\n{desc}\"\n self._save_file_str(md_content, file_path)\n\n @staticmethod\n def _save_file_str(content: str, file_path: str):\n create_parent_dirs(file_path)\n with open(file_path, \"w\") as file:\n file.write(content)\n\n @staticmethod\n def _validate_modifier_group_name(name: str) -> bool:\n if \"modifiers\" not in name:\n raise ValueError(\n \"modifier groups must contain 'modifiers' in their name received \"\n f\"group with name: {name}\"\n )\n\n @staticmethod\n def _modifier_builder_is_instance(\n builder: ModifierYAMLBuilder, type_: Union[Type[BaseModifier], str]\n ) -> bool:\n builder_class = builder.modifier_class\n if isinstance(type_, str):\n return builder_class.__name__ == type_\n return builder_class is type_ or issubclass(builder_class, type_)\n\n def _validate(self):\n if not isinstance(self._variables, Dict):\n raise ValueError(\n \"RecipeYAMLBuilder variables object must be a Dict \"\n f\"found type {type(self._variables)}\"\n )\n\n for name, builders in self._modifier_groups.items():\n self._validate_modifier_group_name(name)\n\n if not isinstance(builders, List):\n raise ValueError(\n \"All modifier groups in RecipeYAMLBuilder must contain a list\"\n f\"of ModifierYAMLBuilder objects. Group {name} has value of \"\n f\"type {type(builders)}\"\n )\n\n for builder in builders:\n if not isinstance(builder, ModifierYAMLBuilder):\n raise ValueError(\n \"All modifier groups in RecipeYAMLBuilder must contain a \"\n f\"list of ModifierYAMLBuilder objects. Group {name} \"\n f\"contains an element of type {type(builder)}\"\n )\n\n\ndef to_yaml_str(val: Any) -> str:\n \"\"\"\n :param val: value to get yaml str value of\n :return: direct str cast of val if it is an int, float, or bool, otherwise\n the stripped output of yaml.dump\n \"\"\"\n if isinstance(val, (str, int, float, bool)):\n return str(val)\n else:\n return yaml.dump(val).strip()\n","sub_path":"src/sparseml/sparsification/recipe_builder.py","file_name":"recipe_builder.py","file_ext":"py","file_size_in_byte":11804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"630857032","text":"import os\nimport supervisely_lib as sly\nfrom sly_train_progress import init_progress\nimport sly_globals as g\nfrom tools.train import main as mm_train\nfrom supervisely.train.src.export_to_coco import export_to_coco\n_open_lnk_name = \"open_app.lnk\"\n\n\ndef init(data, state):\n init_progress(\"Epoch\", data)\n init_progress(\"Iter\", data)\n init_progress(\"UploadDir\", data)\n data[\"eta\"] = None\n\n init_charts(data, state)\n\n state[\"collapsed9\"] = True\n state[\"disabled9\"] = True\n state[\"done9\"] = False\n\n state[\"started\"] = False\n\n data[\"outputName\"] = None\n data[\"outputUrl\"] = None\n\n\ndef restart(data, state):\n data[\"done9\"] = False\n\n\ndef init_chart(title, names, xs, ys, smoothing=None, yrange=None, decimals=None, xdecimals=None):\n series = []\n for name, x, y in zip(names, xs, ys):\n series.append({\n \"name\": name,\n \"data\": [[px, py] for px, py in zip(x, y)]\n })\n result = {\n \"options\": {\n \"title\": title,\n # \"groupKey\": \"my-synced-charts\",\n },\n \"series\": series\n }\n if smoothing is not None:\n result[\"options\"][\"smoothingWeight\"] = smoothing\n if yrange is not None:\n result[\"options\"][\"yaxisInterval\"] = yrange\n if decimals is not None:\n result[\"options\"][\"decimalsInFloat\"] = decimals\n if xdecimals is not None:\n result[\"options\"][\"xaxisDecimalsInFloat\"] = xdecimals\n return result\n\n\ndef init_charts(data, state):\n # demo_x = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]\n # demo_y = [[0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]]\n data[\"chartLR\"] = init_chart(\"LR\", names=[\"LR\"], xs=[[]], ys=[[]], smoothing=None,\n yrange=[state[\"lr\"] - state[\"lr\"] / 2.0, state[\"lr\"] + state[\"lr\"] / 2.0],\n decimals=6, xdecimals=2)\n data[\"chartTrainLoss\"] = init_chart(\"Train Loss\", names=[\"train\"], xs=[[]], ys=[[]], smoothing=0.6, xdecimals=2)\n # names = [\"det_mAP\", \"seg_mAP\"]\n # if state['selectedType'] == 'solo':\n # names = [\"seg_mAP\"]\n data[\"chartValAccuracy\"] = init_chart(\"Val Acc\", names=[\"det_mAP\", \"seg_mAP\"], # names\n xs=[[], []], ys=[[], []], smoothing=0.6)\n\n data[\"chartTime\"] = init_chart(\"Time\", names=[\"time\"], xs=[[]], ys=[[]], xdecimals=2)\n data[\"chartDataTime\"] = init_chart(\"Data Time\", names=[\"data_time\"], xs=[[]], ys=[[]], xdecimals=2)\n data[\"chartMemory\"] = init_chart(\"Memory\", names=[\"memory\"], xs=[[]], ys=[[]], xdecimals=2)\n state[\"smoothing\"] = 0.6\n\n\ndef _save_link_to_ui(local_dir, app_url):\n # save report to file *.lnk (link to report)\n local_path = os.path.join(local_dir, _open_lnk_name)\n sly.fs.ensure_base_path(local_path)\n with open(local_path, \"w\") as text_file:\n print(app_url, file=text_file)\n\n\nfrom sly_train_progress import _update_progress_ui\nfrom sly_train_args import init_script_arguments\nfrom functools import partial\n\n\ndef upload_artifacts_and_log_progress():\n _save_link_to_ui(g.artifacts_dir, g.my_app.app_url)\n\n def upload_monitor(monitor, api: sly.Api, task_id, progress: sly.Progress):\n if progress.total == 0:\n progress.set(monitor.bytes_read, monitor.len, report=False)\n else:\n progress.set_current_value(monitor.bytes_read, report=False)\n _update_progress_ui(\"UploadDir\", g.api, g.task_id, progress)\n\n progress = sly.Progress(\"Upload directory with training artifacts to Team Files\", 0, is_size=True)\n progress_cb = partial(upload_monitor, api=g.api, task_id=g.task_id, progress=progress)\n\n remote_dir = f\"/mmdetection/{g.task_id}_{g.project_info.name}\"\n res_dir = g.api.file.upload_directory(g.team_id, g.artifacts_dir, remote_dir, progress_size_cb=progress_cb)\n return res_dir\n\n\n@g.my_app.callback(\"train\")\n@sly.timeit\n@g.my_app.ignore_errors_and_show_dialog_window()\ndef train(api: sly.Api, task_id, context, state, app_logger):\n try:\n sly.json.dump_json_file(state, os.path.join(g.info_dir, \"ui_state.json\"))\n\n train_split = os.path.join(g.project_dir, \"train.json\")\n export_to_coco(split_json=train_split)\n val_split = os.path.join(g.project_dir, \"val.json\")\n export_to_coco(split_json=val_split)\n\n init_script_arguments(state)\n mm_train()\n\n # hide progress bars and eta\n fields = [\n {\"field\": \"data.progressEpoch\", \"payload\": None},\n {\"field\": \"data.progressIter\", \"payload\": None},\n {\"field\": \"data.eta\", \"payload\": None},\n ]\n g.api.app.set_fields(g.task_id, fields)\n\n remote_dir = upload_artifacts_and_log_progress()\n file_info = api.file.get_info_by_path(g.team_id, os.path.join(remote_dir, _open_lnk_name))\n api.task.set_output_directory(task_id, file_info.id, remote_dir)\n\n # show result directory in UI\n fields = [\n {\"field\": \"data.outputUrl\", \"payload\": g.api.file.get_url(file_info.id)},\n {\"field\": \"data.outputName\", \"payload\": remote_dir},\n {\"field\": \"state.done9\", \"payload\": True},\n {\"field\": \"state.started\", \"payload\": False},\n ]\n g.api.app.set_fields(g.task_id, fields)\n except Exception as e:\n api.app.set_field(task_id, \"state.started\", False)\n raise e # app will handle this error and show modal window\n\n # stop application\n g.my_app.stop()\n","sub_path":"supervisely/train/detection/src/ui/monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"145670171","text":"# -*- coding=UTF-8 -*-\n# pyright: strict, reportTypeCommentUsage=none\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nfrom wulifang._util import assert_isinstance, cast_str\n\nTYPE_CHECKING = False\nif TYPE_CHECKING:\n from typing import Text\n\n\ndef raise_panel(name):\n # type: (Text,) -> None\n \"\"\"raise panel by name.\n\n Args:\n name (str): panel name, (e.g. DopeSheet.1)\n\n Raises:\n RuntimeError: when panel not found.\n \"\"\"\n\n from wulifang.vendor.Qt import QtWidgets\n\n for i in QtWidgets.QApplication.topLevelWidgets():\n panel = i.findChild(QtWidgets.QWidget, cast_str(name))\n if not panel:\n continue\n\n parent = panel.parentWidget()\n if not isinstance(parent, QtWidgets.QStackedWidget):\n continue\n parent = assert_isinstance(parent, QtWidgets.QStackedWidget)\n index = parent.indexOf(panel)\n parent = parent.parentWidget()\n if not isinstance(parent, QtWidgets.QWidget):\n continue\n tab = parent.findChild(QtWidgets.QTabBar)\n if not tab:\n continue\n tab.setCurrentIndex(index)\n panel.window().raise_()\n return\n else:\n raise RuntimeError(\"no such panel: %s\" % (name,))\n","sub_path":"wulifang/nuke/_util/_raise_panel.py","file_name":"_raise_panel.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"212153341","text":"#\n# @lc app=leetcode id=128 lang=python3\n#\n# [128] Longest Consecutive Sequence\n#\n# https://leetcode.com/problems/longest-consecutive-sequence/description/\n#\n# algorithms\n# Hard (40.59%)\n# Total Accepted: 185.8K\n# Total Submissions: 457.6K\n# Testcase Example: '[100,4,200,1,3,2]'\n#\n# Given an unsorted array of integers, find the length of the longest\n# consecutive elements sequence.\n#\n# Your algorithm should run in O(n) complexity.\n#\n# Example:\n#\n#\n# Input: [100, 4, 200, 1, 3, 2]\n# Output: 4\n# Explanation: The longest consecutive elements sequence is [1, 2, 3, 4].\n# Therefore its length is 4.\n#\n#\n#\nclass Solution:\n def longestConsecutive(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n s = set(nums)\n maxcount = 0\n\n while s:\n el = s.pop()\n count = 1\n\n # print(\"0: poped\", el)\n # find all elements to the left\n tmp = el - 1\n while tmp in s:\n s.remove(tmp)\n count += 1\n # print(\"A: counting\", tmp, count)\n tmp -= 1\n\n tmp = el + 1\n while tmp in s:\n s.remove(tmp)\n count += 1\n # print(\"B: counting\", tmp, count)\n tmp += 1\n if count > maxcount:\n maxcount = count\n return maxcount\n\n\ntest = False\nif test:\n s = Solution()\n print(s.longestConsecutive([100,4,200,1,3,2]))\n","sub_path":"128/128.longest-consecutive-sequence.py","file_name":"128.longest-consecutive-sequence.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"538327800","text":"\"\"\"\nGiven a string of characters, check to see if square, round, and curly brackets \nare balanced. Function will return a boolean testing if brackets are balanced.\n\"\"\"\n\n# Complexity - Time O(N) | Space O(N) where N is the length of the given string\ndef balancedBrackets(string):\n openingBrackets = '([{'\n closingBrackets = ')]}'\n matchingBrackets = {\")\": \"(\", \"]\": \"[\", \"}\": \"{\"}\n stack = []\n for char in string:\n if char in openingBrackets:\n stack.append(char)\n elif char in closingBrackets:\n if len(stack) == 0:\n return False\n if stack[-1] == matchingBrackets[char]:\n stack.pop()\n else:\n return False\n return len(stack) == 0\n","sub_path":"AlgoExpert/balancedbrackets.py","file_name":"balancedbrackets.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"563846180","text":"from typing import Optional\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\ntodo_app_api = FastAPI()\n\ntodo_list = [\n {\n \"id\": 1,\n \"task\": \"Do the dishes\",\n \"isDone\": False\n }\n]\n\nclass todo(BaseModel):\n id: int\n task: str\n isDone: Optional[bool] = False\n\n@todo_app_api.get(\"/todos\")\nasync def return_all_todos():\n return {\"todo_list\": todo_list} \n\n@todo_app_api.get(\"/todo/{id}\")\nasync def return_specific_todo(id: int):\n try:\n for existing_todo in todo_list:\n if(existing_todo[\"id\"] == id):\n return existing_todo\n except Exception as err:\n return {\"message\": f\"{err}\"}\n return {\"message\": \"No such todo exists\"} \n\n@todo_app_api.post(\"/todo/add\")\nasync def add_todo(todo: todo):\n try:\n todo_list.append(dict(todo))\n except Exception as err:\n return {\"message\": f\"{err}\"}\n return {\"message\": \"Todo added\"} \n\n@todo_app_api.delete(\"/todo/{id}\")\nasync def delete_todo(id: int):\n try:\n for existing_todo in todo_list:\n if(existing_todo[\"id\"] == id):\n todo_list.remove(existing_todo)\n except Exception as err:\n return {\"message\": f\"{err}\"}\n return {\"message\": f\"Todo #{id} Deleted\"} \n \n@todo_app_api.put(\"/todo/{id}\")\nasync def update_todo(id: int, updated_todo: todo):\n try:\n for existing_todo in todo_list:\n if(existing_todo[\"id\"] == id):\n todo_list.remove(existing_todo)\n todo_list.append(dict(updated_todo))\n except Exception as err:\n return {\"message\": f\"{err}\"}\n return {\"message\": f\"Todo #{id} Updated\"} \n\n@todo_app_api.put(\"/todo/toggle/{id}\")\nasync def toggle_todo(id: int):\n try:\n for existing_todo in todo_list:\n if(existing_todo[\"id\"] == id):\n temp = {\n \"id\":existing_todo[\"id\"],\n \"task\":existing_todo[\"task\"],\n \"isDone\":not existing_todo[\"isDone\"]\n }\n todo_list.remove(existing_todo)\n todo_list.append(temp)\n except Exception as err:\n return {\"message\": f\"{err}\"}\n return {\"message\": f\"Todo #{id} Toggled\"} \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"70066559","text":"''' Which starting number, under one million, produces the longest chain? '''\n\ndef main():\n ''' Removes all elements in sequence from nums, reducing the number of\n values to check '''\n nums = {x for x in range(1, 1000000)}\n maximum = [0, 0]\n while nums:\n num = nums.pop()\n sequence = collatz(num)\n length = len(sequence)\n if length > maximum[1]:\n maximum = [num, length]\n nums.difference_update(sequence)\n print(maximum[0])\n\ndef collatz(num):\n ''' Return collatz sequence starting with num '''\n sequence = []\n while num != 1:\n if num % 2 == 0:\n num = int(num / 2)\n elif num % 2 == 1:\n num = 3 * num + 1\n sequence.append(num)\n return sequence\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"problems/Python/e014.py","file_name":"e014.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}
+{"seq_id":"449023308","text":"# Use this class to check the accuracy of POS recognition\n\n\nfrom lib.nlp.recognpos import POSRecognizer\nfrom lib.db import DB\nfrom lib.params import Params\nfrom lib.ui import progress, done, percentage\nimport os\nimport json\nfrom datetime import datetime\nfrom time import time\n\n\ndef log(string, f):\n if makeLogs:\n return f.write(string)\n else:\n return False\n\n\ndef logjson(obj, f, string=\"JSON is\"):\n log(\n \"\\n%s:\\n```json\\n%s\\n```\\nLength=%d\\n\\n\" % (\n string,\n json.dumps(\n obj,\n indent=4,\n default=lambda o: \"{l}
')\n return ''.join(html)\n\n def get_date(self, s):\n m = re.search(\"([0-9]{4}\\-[0-9]{2}\\-[0-9]{2})-\", s)\n return m.group(1)\n\n def run(self):\n info_path = os.path.join(self.blog_input_path, self.blog_entry_name, 'info.yml')\n with open(info_path) as fp:\n info = yaml.load(fp)\n\n template = self._template_env.get_template( 'blog_entry.jinja' )\n with open(self._blog_output_html, 'w') as fp:\n fp.write(template.render({\n 'galleries': self._gallery_links,\n 'title': info['title'],\n 'date': self.get_date(self.blog_entry_name),\n 'text': self.create_html(info['text']),\n }))\n\n\n","sub_path":"app/run_tools/build_blog_entry.py","file_name":"build_blog_entry.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75493458","text":"# -*- coding: utf-8 -*-\n\nfrom orm import User, SupportProvider, TroubleTicket, TroubleDealLog, TroubleTask, SupportProvider, session_scope\nfrom orm import TroubleCategory, ImpactArea, Region, Attachment, Tag, Wiki, wiki_tag, trouble_attachment\nfrom const import const\nimport logging, time, datetime\nfrom apis import APIValueError, APIError\nfrom sqlalchemy.exc import IntegrityError\n\nimport pdb\n\n\n\ndef addTroubleTicket(report_channel, type, region, level, description, \n\t\timpact, startTime, custid, mac, contact, contact_phone, \n\t\tcreate_user, create_user_name, deal_user, deal_user_name, attachments):\n\tlogging.info('创建工单,创建人: %s' % create_user_name)\n\tif not report_channel or not report_channel.strip():\n\t\traise APIValueError('report_channel','上报渠道不能为空')\n\tif not type or not type.strip():\n\t\traise APIValueError('trouble_type','故障类型不能为空')\n\tif not region or not region.strip():\n\t\traise APIValueError('region','故障地市不能为空')\n\tif not level or not level.strip():\n\t\traise APIValueError('level','故障级别不能为空')\n\tif not description or not description.strip():\n\t\traise APIValueError('description', '故障现象不能为空')\n\tif not contact or not contact.strip():\n\t\traise APIValueError('contact','故障联系人不能为空')\n\tif not create_user: \n\t\traise APIValueError('create_user', '工单创建人不能为空')\t\n\tif not deal_user: \n\t\traise APIValueError('deal_user', '当前故障处理人不能为空')\n\t\n\ttry:\n\t\t# 解析故障开始时间字符串到datetime对象\n\t\tst = datetime.datetime.strptime(startTime, '%Y-%m-%dT%H:%M')\n\texcept Exception as e:\n\t\traise APIValueError('datetime','故障日期格式不正确')\n\t\n\twith session_scope() as session:\n\t\ttroubleTicket = TroubleTicket(report_channel, type, region, level, description, \n\t\t\timpact, startTime, custid, mac, contact, contact_phone, \n\t\t\tcreate_user, create_user_name, deal_user, deal_user_name)\n\t\t#添加工单处理日志\n\t\tsession.add(troubleTicket)\n\t\tsession.flush()\n\t\tuser = session.query(User).join(User.support_provider).filter(User.id==deal_user).one()\n\t\tdealingLog = addTroubleLog(user, troubleTicket.id, '', const.DEALING_CREATE, None)\n\t\tsession.add(dealingLog)\n\t\t\n\t\tif len(attachments) > 0:\n\t\t\tfor attachment in attachments:\n\t\t\t\tstatement = trouble_attachment.insert().values(trouble_id=troubleTicket.id,attachment_id=attachment)\n\t\t\t\tsession.execute(statement)\n\t\tsession.commit()\n\n\n\tres = dict()\n\tres['returncode'] = const.RETURN_OK\n\tres['message'] = '故障工单添加成功'\n\treturn res\n\ndef updateTroubleTicket(tid, report_channel, type, region, level, description, \n\t\timpact, custid, mac, contact, contact_phone, \n\t\tdeal_user, deal_user_name):\n\tlogging.info('修改工单,修改人: %s' % deal_user_name)\n\tif not report_channel or not report_channel.strip():\n\t\traise APIValueError('report_channel','上报渠道不能为空')\n\tif not type or not type.strip():\n\t\traise APIValueError('trouble_type','故障类型不能为空')\n\tif not region or not region.strip():\n\t\traise APIValueError('region','故障地市不能为空')\n\tif not level or not level.strip():\n\t\traise APIValueError('level','故障级别不能为空')\n\tif not description or not description.strip():\n\t\traise APIValueError('description', '故障现象不能为空')\n\tif not contact or not contact.strip():\n\t\traise APIValueError('contact','故障联系人不能为空')\n\tif not deal_user: \n\t\traise APIValueError('deal_user', '当前故障处理人不能为空')\n\t\n\twith session_scope() as session:\n\t\tticket = session.query(TroubleTicket).filter('id=' + str(tid)).one()\n\t\tif not ticket:\n\t\t\traise APIValueError('troubleticket', '工单号不存在')\n\t\tticket.report_channel = report_channel\n\t\tticket.type = type\n\t\tticket.region = region\n\t\tticket.level = level\n\t\tticket.description = description\n\t\tticket.impact = impact\n\t\tticket.custid = custid\n\t\tticket.mac = mac\n\t\tticket.contact = contact\n\t\tticket.contact_phone = contact_phone\n\t\tticket.deal_user = deal_user\n\t\tticket.deal_user_name = deal_user_name\n\t\tsession.commit()\n\n\tres = dict()\n\tres['returncode'] = const.RETURN_OK\n\tres['message'] = '更新工单成功'\n\treturn res\n\n# 返回工单查询过滤字段\ndef getTroubleFilters(status, filterflag=False, stime='', etime='', region='', level='', confirmedType=''):\n\tfilters = ()\n\tif status.upper() != const.STATUS_ALL:\n\t\tfilters = filters + (TroubleTicket.status == status,)\n\tif filterflag:\n\t\tif(stime == ''):\n\t\t\tstime = '1970-1-1'\n\t\tif(etime == ''):\n\t\t\tetime = datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=1),'%Y-%m-%d')\n\t\telse:\n\t\t\t# 结束日期+1 以在数据库查询比较日期时能包含当日数据\n\t\t\tetime = datetime.datetime.strftime(datetime.datetime.strptime(etime,'%Y-%m-%d') + datetime.timedelta(days=1),'%Y-%m-%d')\n\t\tfilters = filters + (TroubleTicket.startTime > stime,)\n\t\tfilters = filters + (TroubleTicket.startTime < etime,)\n\t\tif(region != ''):\n\t\t\tfilters = filters + (TroubleTicket.region == region,)\n\t\tif(level != ''):\n\t\t\tfilters = filters + (TroubleTicket.level == level,)\n\t\tif(confirmedType != ''):\n\t\t\tfilters = filters + (TroubleTicket.confirmed_type == confirmedType,)\n\treturn filters\n\ndef getAllTroubleCount(status, filterflag=False, stime='', etime='', region='', level='', confirmedType=''):\n\tfilters = getTroubleFilters(status, filterflag, stime, etime, region, level, confirmedType)\n\treturn TroubleTicket.getTroubleCount(*filters)\n\ndef getTroublePageByStatus(page, items_perpage, status, filterflag=False, stime='', etime='', region='', level='', confirmedType=''):\n\tfilters = getTroubleFilters(status, filterflag, stime, etime, region, level, confirmedType)\n\treturn TroubleTicket.getTroublePage(page, items_perpage, *filters)\n\n\ndef getTaskCountByProvider(providerID):\n\tfilters = {TroubleTask.support_provider == providerID, TroubleTask.status != 1}\n\treturn TroubleTask.getTaskCount(*filters)\n\ndef getTask(providerID):\n\tfilters = {TroubleTask.support_provider == providerID, TroubleTask.status != 1}\n\treturn TroubleTask.getTask(*filters)\n\ndef getTaskPage(page, items_perpage, providerID):\n\tfilters = {TroubleTask.support_provider == providerID, TroubleTask.status != 1}\n\treturn TroubleTask.getTaskPage(page, items_perpage, *filters)\n\ndef getDealLogByTrouble(troubleId):\n\treturn TroubleDealLog.getDealLogByTrouble(troubleId)\n\ndef getProvider():\n\treturn SupportProvider.getAll()\n\ndef dealingTrouble(troubleid, dealingtype, nextprovider, reply, uid):\n\tres = dict()\n\t#生成任务\n\twith session_scope() as session:\n\t\ttry:\n\t\t\tif(dealingtype != const.DEALING_FINISHED):\n\t\t\t\t#生成下一个任务\n\t\t\t\tcreatetime = datetime.datetime.now()\n\t\t\t\tassign_user = uid\n\t\t\t\tnewTask = TroubleTask(trouble_ticket=troubleid, support_provider=nextprovider,\n\t\t\t\t\tremark=reply, createtime=createtime, assign_user=assign_user, status=0)\n\t\t\t\tsession.add(newTask)\n\t\t\t#添加工单处理记录\n\t\t\tuser = session.query(User).join(User.support_provider).filter(User.id==uid).one()\n\t\t\tdealingLog = addTroubleLog(user, troubleid, reply, dealingtype, nextprovider)\n\t\t\tsession.add(dealingLog)\n\n\t\t\t#更新工单状态\n\t\t\t\n\t\t\ttroubleTicketStatus = ''\n\t\t\ttrouble = session.query(TroubleTicket).filter(TroubleTicket.id==troubleid).one()\n\n\t\t\tif(dealingtype == const.DEALING_FINISHED):\n\n\t\t\t\tif not checkPermission(user.permission,'FIN'):\n\t\t\t\t\traise APIError('结单失败', 'permission', '你没有该权限')\n\t\t\t\ttroubleTicketStatus = const.STATUS_FINISHED\n\t\t\t\ttrouble.endtime = datetime.datetime.now()\n\t\t\telse:\n\t\t\t\ttroubleTicketStatus = const.STATUS_DEALING\n\t\t\ttrouble.status = troubleTicketStatus\n\t\t\ttrouble.deal_user = uid\n\t\t\ttrouble.deal_user_name = user.name\n\t\t\ttrouble.dealingtime = datetime.datetime.now()\n\t\t\tsession.commit()\n\t\texcept IntegrityError:\n\t\t\traise APIError('处理失败', 'database', '转派厂家信息异常')\n\tres['returncode'] = const.RETURN_OK\n\tres['message'] = '任务工单处理成功' + dealingtype\n\treturn res\n\n\n# 工单处理\ndef dealingTask(dealingtype, taskid, nextprovider, reply, confirmedtype, uid):\n\tres = dict()\n\twith session_scope() as session:\n\t\ttry:\n\t\t\tuser = session.query(User).join(User.support_provider).filter(User.id==uid).one()\n\t\t\t#更新当前工单,接单不添加流转记录,仅更新状态和接单时间\n\t\t\ttask = session.query(TroubleTask).filter(TroubleTask.id==taskid).one()\n\t\t\tif(dealingtype == const.DEALING_ACCEPT):\n\t\t\t\ttask.status = const.TASK_ACCEPTED\n\t\t\t\ttask.accepttime = datetime.datetime.now()\n\t\t\telse:\n\t\t\t\ttask.status = const.TASK_FINISHED\n\t\t\t\ttask.reply = reply\n\t\t\t\ttask.endtime = datetime.datetime.now()\n\t\t\t\t#添加工程单处理记录\n\t\t\t\tdealingLog = addTroubleLog(user, task.trouble.id, reply, dealingtype, nextprovider)\n\t\t\t\tsession.add(dealingLog)\n\t\t\t\n\t\t\t#接单和完成任务均不生成下一个任务\n\t\t\tif(dealingtype != const.DEALING_FINISHED and dealingtype != const.DEALING_ACCEPT):\n\t\t\t\t#生成下一个任务\n\t\t\t\ttrouble_ticket = task.trouble.id\n\t\t\t\tsupport_provider = nextprovider\n\t\t\t\tremark = reply\n\t\t\t\tcreatetime = datetime.datetime.now()\n\t\t\t\tassign_user = uid\n\t\t\t\tnewTask = TroubleTask(trouble_ticket=trouble_ticket, support_provider=support_provider,\n\t\t\t\t\tremark=remark, createtime=createtime, assign_user=assign_user, status=0)\n\t\t\t\tsession.add(newTask)\n\t\t\n\t\t\t#更新工单状态\n\t\t\ttroubleTicketStatus = ''\n\t\t\ttrouble = session.query(TroubleTicket).filter(TroubleTicket.id==task.trouble.id).one()\n\n\t\t\tif(dealingtype == const.DEALING_FINISHED):\n\n\t\t\t\tif not checkPermission(user.permission,'FIN'):\n\t\t\t\t\traise APIError('结单失败', 'permission', '你没有该权限')\n\t\t\t\ttroubleTicketStatus = const.STATUS_FINISHED\n\t\t\t\ttrouble.endtime = datetime.datetime.now()\n\t\t\t\tif not confirmedtype or not confirmedtype.strip():\n\t\t\t\t\tsession.rollback()\n\t\t\t\t\traise APIValueError('confirmedtype','问题归类不能为空')\n\t\t\t\telse:\n\t\t\t\t\ttrouble.confirmed_type = confirmedtype\n\t\t\telse:\n\t\t\t\ttroubleTicketStatus = const.STATUS_DEALING\n\t\t\ttrouble.status = troubleTicketStatus\n\t\t\ttrouble.deal_user = uid\n\t\t\ttrouble.deal_user_name = user.name\n\t\t\ttrouble.dealingtime = datetime.datetime.now()\n\t\t\tsession.commit()\n\t\texcept IntegrityError:\n\t\t\traise APIError('处理失败', 'database', '转派厂家信息异常')\n\tres['returncode'] = const.RETURN_OK\n\tres['message'] = '任务工单处理成功' + dealingtype\n\treturn res\n\ndef addTroubleLog(user, troubleId, reply, dealingtype, nextprovider):\n\t\n\tdeal_user_name = user.name\n\tsupport_provider_name = user.support_provider.provider_name\n\ttrouble_ticket_id = troubleId\n\tdeal_user = user.id\n\tlog_type = dealingtype\n\tif(not nextprovider or int(nextprovider) < 1):\n\t\tnextprovider = None\n\tdealingLog = TroubleDealLog(trouble_ticket_id=trouble_ticket_id, deal_user=deal_user,remark=reply,\n\t\tlog_type=log_type, deal_user_name=deal_user_name, support_provider_name=support_provider_name,\n\t\tcreatetime=datetime.datetime.now(),next_provider_id=nextprovider)\n\treturn dealingLog\n\ndef getTroubleCategory(categoryType):\n\treturn TroubleCategory.getCategory(categoryType)\n\ndef getImpactArea():\n\treturn ImpactArea.getImpactArea()\n\ndef getRegion():\n\treturn Region.getRegion()\n\n# 通过用户或分组的权限字符串检查特定权限\ndef checkPermission(userPermission, needed):\n\tpermissionlist = userPermission.split('|')\n\treturn needed in permissionlist\n\ndef addAttachment(userId, uuid, docType, filename, size):\n\tattachementId = 0\n\twith session_scope() as session:\n\t\tattachment = Attachment(user_id=userId, uuid=uuid, doc_type=docType, filename=filename, size=size)\n\t\tsession.add(attachment)\n\t\tsession.commit()\n\t\tattachementId = attachment.id\n\treturn attachementId\n\ndef addTag(tagName):\n\ttagId = 0\n\twith session_scope() as session:\n\t\tnewTag = Tag(name=tagName)\n\t\tsession.add(newTag)\n\t\tsession.commit()\n\t\ttagId = newTag.id\n\treturn tagId\n\ndef getAllTags(*filters):\n\treturn Tag.getAll(*filters)\n\ndef addWiki(userid, username,\n\t\tsubject, summary, tags, attachmentId):\n\twikiId = 0\n\tres = dict()\n\twith session_scope() as session:\n\t\twiki = Wiki(subject=subject, summary=summary, attachment=attachmentId, \n\t\t\tcreate_time=datetime.datetime.now(), create_user=userid,\n\t\t\tcreate_user_name=username)\n\t\tsession.add(wiki)\n\t\tsession.commit()\n\t\twikiId = wiki.id\n\t\tfor tagid in tags:\n\t\t\tstatement = wiki_tag.insert().values(wiki_id=wikiId,tag_id=tagid)\n\t\t\tsession.execute(statement)\n\t\tsession.commit()\n\tres['returncode'] = const.RETURN_OK\n\tres['message'] = '添加案例成功, 案例名称:' + subject \n\treturn res\n\ndef getWikiCount(tag=''):\n\tif tag != '':\n\t\t# filters = ()\n\t\t# filters = filters + (Wiki.tags == status,)\n\t\treturn Wiki.getWikiCount(tag)\n\telse:\n\t\treturn Wiki.getWikiCount()\n\ndef getWikiPage(page, items_perpage, tag=''):\n\tif tag != '':\n\t\treturn Wiki.getWikiPage(page, items_perpage,tag)\n\telse:\n\t\treturn Wiki.getWikiPage(page, items_perpage)\n\n\n\n\n","sub_path":"www/trouble.py","file_name":"trouble.py","file_ext":"py","file_size_in_byte":12693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105167104","text":"import os\nimport cv2\nimport glob\nimport time\nimport numpy as np\n\nCALIBRATION_SIZE = (8,6)\n\n\ndef multiview_reprojection_error(objpoints, imgpoints, rvecs, tvecs, mtx, dist):\n # Calculate reprojection error\n total_error = 0\n for i in range(len(objpoints)):\n imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)\n\n error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)\n total_error += error\n\n mean_error = total_error/len(objpoints)\n return mean_error\n\n\n\ndef reprojection_error(objpoints, imgpoints, rvec, tvec, mtx, dist):\n \"\"\"Calculate reprojection error with a single view\"\"\"\n total_error = 0\n imgpoints2, _ = cv2.projectPoints(objpoints, rvec, tvec, mtx, dist)\n error = cv2.norm(imgpoints, imgpoints2, cv2.NORM_L2)/len(imgpoints2)\n return error\n\n\n\ndef draw_cube(img, corners, imgpts):\n imgpts = np.int32(imgpts).reshape(-1,2)\n\n # draw ground floor in green\n img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)\n\n # draw pillars in blue color\n for i,j in zip(range(4),range(4,8)):\n img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)\n\n # draw top layer in red color\n img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)\n\n return img\n\n\n\ndef draw_axis(img, imgpts):\n \"\"\"Draw an axis located at imgponts[0]\"\"\"\n origin = tuple(imgpts[0].ravel())\n img = cv2.line(img, origin, tuple(imgpts[1].ravel()), (255,0,0), 5)\n img = cv2.line(img, origin, tuple(imgpts[2].ravel()), (0,255,0), 5)\n img = cv2.line(img, origin, tuple(imgpts[3].ravel()), (0,0,255), 5)\n return img\n\n\n\ndef imshow(img, scale=0.4):\n \"\"\"Resize and display image\n Wait for enter to continue\n \"\"\"\n img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n cv2.imshow('img',img)\n k = cv2.waitKey(0) & 0xff\n if k == 's':\n cv2.imwrite(fname[:6]+'.png', img)\n\n\n\ndef get_objp(calibration_size, z=0):\n \"\"\"Return object points for chessboard\"\"\"\n n = calibration_size[0]\n m = calibration_size[1]\n objp = np.zeros((m*n,3), np.float32)\n objp[:,:2] = np.mgrid[0:n,0:m].T.reshape(-1,2)\n objp[:,2] = z\n return objp\n\n\n\ndef collect_calibration_images(camera, folder, n=100):\n # termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = get_objp(CALIBRATION_SIZE)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n count = 0\n\n raw_image_folder = os.path.join(folder, \"raw\")\n test_image_folder = os.path.join(folder, \"pattern\")\n\n os.makedirs(raw_image_folder)\n os.makedirs(test_image_folder)\n\n for img in range(n):\n\n # Get a camera image\n img = camera.get_image(verbose=True)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, CALIBRATION_SIZE, None)\n\n # If found, add object points, image points (after refining them)\n if ret is not True:\n print(\"Failed to find chessboard\")\n time.sleep(1)\n else:\n # Save original image\n filename = os.path.join(folder, \"raw\", \"%i.png\"%count)\n cv2.imwrite(filename, img)\n\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (7,9), corners2,ret)\n\n # Draw the calibration image\n filename = os.path.join(folder, \"pattern\", \"%i.png\"%count)\n cv2.imwrite(filename, img)\n count+=1\n\n\n\ndef calibrate_camera_intrinsics(camera, folder, debug=False):\n # termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = get_objp(CALIBRATION_SIZE)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n image_folder = os.path.join(folder, \"raw\")\n\n for filename in os.listdir(image_folder):\n\n img_path = os.path.join(image_folder, filename)\n img = cv2.imread(img_path,-1)\n\n if img is None:\n raise ValueError(\"Can not read file\", img_path)\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n print(\"Searching for chessboard corners\")\n ret, corners = cv2.findChessboardCorners(gray, CALIBRATION_SIZE, None)\n\n # If found, add object points, image points (after refining them)\n if ret is True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n objpoints.append(objp)\n\n # Draw and display the corners\n if debug:\n img = cv2.drawChessboardCorners(img, (7,9), corners2,ret)\n imshow('img',img)\n cv2.destroyAllWindows()\n else:\n print(\"Could not find chessboard corners\")\n\n cv2.destroyAllWindows()\n\n # Perform the calibration\n print(\"Performing calibration\")\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n if ret is None:\n raise ValueError(\"Failed to calibrate \", camera.name)\n\n # Store the intrisic paramereters\n camera.set_intrinsics(mtx, dist)\n print(\"Successfully calibrated \",camera.name)\n\n # Return the calibrated camera\n return camera\n\n\n\ndef take_extrinsic_photo(camera, path, debug=True):\n \"\"\"\n Take a photo for extrisic calculation\n Check that the chessboard can be found\n Save it to the correct folder\n \"\"\"\n # Arrays to store object points and image points from all the images.\n imgpoints = [] # 2d points in image plane.\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n message = \"Press enter to take photo = {0}\"\n input(message.format(path))\n\n while True:\n # Take an image of the extrinsic\n img = camera.get_image()\n\n if img is None:\n raise ValueError(\"Can not read image from camera\", img)\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n print(\"Searching for chessboard corners\")\n ret, corners = cv2.findChessboardCorners(gray, CALIBRATION_SIZE, None)\n\n # If found, add object points, image points (after refining them)\n if ret is True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11),(-1,-1),criteria)\n chess_image = img.copy()\n chess_image = cv2.drawChessboardCorners(chess_image, CALIBRATION_SIZE, corners2, ret)\n # Draw and display the corners\n if debug:\n imshow(chess_image)\n cv2.destroyAllWindows()\n # Break as soon as we have a good photo\n break\n else:\n print(\"Could not find chessboard corners\")\n\n # Save the chessboard corners\n print(\"Saving file to \", path)\n cv2.imwrite(path, img)\n\n # Save the chess image as well\n chess_path = path.replace(\".png\",\"-chess.png\")\n print(\"Saving chess version\", chess_path)\n cv2.imwrite(chess_path, chess_image)\n\n # Return the calibrated camera\n return camera\n\n\n\n\ndef calibrate_camera_extrinsics(camera, config, debug=True):\n # termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n for item in [config[0]]:\n z = item[\"z\"]\n img_path = item[\"file\"]\n\n # prepare object points\n objp = get_objp(CALIBRATION_SIZE, z=z)\n\n img = cv2.imread(img_path,-1)\n #img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n\n if img is None:\n raise ValueError(\"Can not read file\", img_path)\n\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n print(\"Searching for chessboard corners\")\n ret, corners = cv2.findChessboardCorners(gray, CALIBRATION_SIZE, None)\n\n # If found, add object points, image points (after refining them)\n if ret is True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n objpoints.append(objp)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, CALIBRATION_SIZE, corners2,ret)\n\n if debug:\n imshow(img)\n cv2.destroyAllWindows()\n else:\n print(\"Could not find chessboard corners\")\n\n objpoints = np.vstack(objpoints)\n imgpoints = np.vstack(imgpoints)\n\n print(\"\\nCalibrating extrinsics ... \")\n ret, rvec, tvec = cv2.solvePnP(objpoints, imgpoints, camera.mtx, camera.dist, flags=cv2.SOLVEPNP_ITERATIVE)\n\n if ret is not None:\n camera.set_extrinsics(rvec, tvec)\n draw_cube_on_chessboard(camera, img_path)\n print(\"Successfully calibrated extrinsics for:\", camera.name)\n else:\n raise ValueError(\"Failed to calibrate \", camera.name)\n\n # Return the calibrated camera\n return camera\n\n\n\n\ndef calibrate(chessboard_image, test_image, debug=True):\n # termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = get_objp(CALIBRATION_SIZE)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n img = chessboard_image\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n print(\"Searching for chessboard corners\")\n ret, corners = cv2.findChessboardCorners(gray, CALIBRATION_SIZE, None)\n print(\"Done\")\n\n # If found, add object points, image points (after refining them)\n if ret is True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11),(-1,-1),criteria)\n\n imgpoints.append(corners2)\n objpoints.append(objp)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (7,9), corners2,ret)\n\n output = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n\n if debug:\n cv2.imshow('img',output)\n cv2.waitKey(0)\n\n else:\n raise ValueError(\"Could not find chessboard corners\")\n\n cv2.destroyAllWindows()\n\n # Perform the calibration\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n # Print the calibration error\n error = multiview_reprojection_error(objpoints, imgpoints, camera.r, camera.t, mtx, dist)\n print(\"Calibration reprojection error: %.4f\"%error)\n\n if debug:\n print(\"Stacking calibration points to determine rotation\")\n objpoints = np.vstack(objpoints)\n imgpoints = np.vstack(imgpoints)\n\n\n # Get the rotation and translation matricsz\n print(\"\\nCalibrating...\")\n ret, rvec, tvec = cv2.solvePnP(objpoints, imgpoints, mtx, dist, flags=cv2.SOLVEPNP_ITERATIVE)\n\n # Stats\n print(\"Success: \",ret)\n print(\"Rvec:\\n\",rvec)\n print(\"Tvec:\\n\",tvec)\n print(\"Reprojection Error:\",reprojection_error(objpoints, imgpoints, rvec, tvec, mtx, dist))\n\n # Draw cube\n if debug:\n box = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],\n [0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])\n\n imgpts, _ = cv2.projectPoints(box, rvec, tvec, mtx, dist)\n img = test_image\n img = draw_cube(img, None, imgpts)\n imshow(img)\n cv2.destroyAllWindows()\n\n return rvec, tvec, mtx, dist\n\n\n\ndef draw_axis_on_image(camera, image_file, out_file):\n \"\"\"\n Draw a cube on the chessboard for testing purposes\n Returns\n \"\"\"\n img = cv2.imread(image_file)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Define the box coordinates\n axis = np.float32([[0,0,0], [6,0,0], [0,6,0], [0,0,-6]]).reshape(-1,3)\n\n # project 3D points to image plane\n imgpts, _ = cv2.projectPoints(axis, camera.r, camera.t, camera.mtx, camera.dist)\n rpe = reprojection_error(axis, imgpts, camera.r, camera.t, camera.mtx, camera.dist)\n print(\"Axis reprojection error:\",rpe)\n\n img = draw_axis(img, imgpts)\n\n cv2.imwrite(out_file, img)\n\n return img\n\n\ndef draw_cube_on_chessboard(camera, filename, debug=True):\n \"\"\"\n Draw a cube on the chessboard for testing purposes\n Returns\n \"\"\"\n img = cv2.imread(filename)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, CALIBRATION_SIZE, None)\n\n # Define the box coordinates\n axis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],\n [0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n objp = get_objp(CALIBRATION_SIZE)\n\n if ret is not True:\n raise ValueError(\"Could not \")\n\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n\n # Find the rotation and translation vectors.\n ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, camera.mtx, camera.dist, flags=cv2.SOLVEPNP_ITERATIVE)\n\n # Get reprojection error\n imgpts, _ = cv2.projectPoints(objp, rvecs, tvecs, camera.mtx, camera.dist)\n rpe = reprojection_error(objp, imgpts, rvecs, tvecs, camera.mtx, camera.dist)\n print(\"Chessboard reprojection error:\",rpe)\n\n # project 3D points to image plane\n imgpts, _ = cv2.projectPoints(axis, rvecs, tvecs, camera.mtx, camera.dist)\n rpe = reprojection_error(axis, imgpts, rvecs, tvecs, camera.mtx, camera.dist)\n print(\"Box reprojection error:\",rpe)\n\n img = draw_cube(img, corners2, imgpts)\n\n if debug:\n imshow(img)\n\n return rvecs, tvecs\n\n\n\n\ndef undistort(images):\n #for i,image in enumerate(sorted(glob.glob('photos/iphone/*.jpg'))):\n for i,image in enumerate(images):\n print(\"Processing image\",image)\n img = cv2.imread(image)\n h, w = img.shape[:2]\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))\n\n # Undistort the image\n dst = cv2.undistort(img, mtx, dist, None, newcameramtx)\n\n # crop the image\n x,y,w,h = roi\n dst = dst[y:y+h, x:x+w]\n cv2.imwrite('output/image%i.png'%i,dst)\n\n\n\n\nif __name__==\"__main__\":\n note_images = sorted(glob.glob('photos/note/cb*.jpg'))\n note_test = 'photos/note/all.jpg'\n\n iphone_images = sorted(glob.glob('photos/iphone/cb*.jpg'))\n iphone_test = 'photos/iphone/all.jpg'\n\n print(\"Calibrating Note\")\n calibrate(note_images, note_test)\n\n print(\"Calibrating iPhone\")\n calibrate(iphone_images, iphone_test)\n\n\n","sub_path":"calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":15263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298330809","text":"import numpy as np\nfrom types import MethodType, FunctionType\nfrom RamanControl_wrapper import *\nfrom ctypes import c_int, c_double, c_char_p, POINTER, Structure\n\n\nclass ADict(dict):\n \"\"\"\n Dictionary where you can access keys as attributes\n \"\"\"\n def __getattr__(self, item):\n try:\n return self[item]\n except KeyError:\n dict.__getattribute__(self, item)\n\n\nclass RamanControl:\n \"\"\"\n \"\"\"\n\n def __init__(self, params, **kwargs):\n \"\"\"\n __init__ function call to initialize variables from the\n parameters for the class instance provided in __main__ and\n add new variables for use in other functions in this class.\n \"\"\"\n\n for name, value in kwargs.items():\n if isinstance(value, FunctionType):\n setattr(self, name, MethodType(value, self, self.__class__))\n else:\n setattr(self, name, value)\n\n self.time_AE = np.linspace(-params.timeAMP_AE, params.timeAMP_AE, params.timeDIM_AE)\n self.time_VR = np.linspace(-params.timeAMP_VR, params.timeAMP_VR, params.timeDIM_VR)\n\n self.frequency_A = 1./np.linspace(1./params.frequencyMAX_A, 1./params.frequencyMIN_A, params.frequencyDIM_AE)\n self.frequency_E = 1./np.linspace(1./params.frequencyMAX_E, 1./params.frequencyMIN_E, params.frequencyDIM_AE)\n self.frequency_VR = 1./np.linspace(1./params.frequencyMAX_VR, 1./params.frequencyMIN_VR, params.frequencyDIM_VR)\n\n self.field_A = np.empty(params.timeDIM_AE, dtype=np.complex)\n self.field_E = np.empty(params.timeDIM_AE, dtype=np.complex)\n self.field_V = np.empty(params.timeDIM_VR, dtype=np.complex)\n self.field_R = np.empty(params.timeDIM_VR, dtype=np.complex)\n\n self.gamma_decay = np.ascontiguousarray(self.gamma_decay)\n self.gamma_pure_dephasingA = np.ascontiguousarray(self.gamma_pure_dephasingA)\n self.gamma_pure_dephasingB = np.ascontiguousarray(self.gamma_pure_dephasingB)\n self.mu = np.ascontiguousarray(self.mu)\n self.rho_0 = np.ascontiguousarray(params.rho_0_A)\n self.rhoA = np.ascontiguousarray(params.rho_0_A.copy())\n self.rhoB = np.ascontiguousarray(params.rho_0_A.copy())\n self.energies_A = np.ascontiguousarray(self.energies_A)\n self.energies_B = np.ascontiguousarray(self.energies_B)\n\n N = len(self.energies_A)\n\n self.abs_spectraA = np.ascontiguousarray(np.zeros(len(self.frequency_A)))\n self.abs_spectraB = np.ascontiguousarray(np.zeros(len(self.frequency_A)))\n self.ems_spectraA = np.ascontiguousarray(np.zeros(len(self.frequency_E)))\n self.ems_spectraB = np.ascontiguousarray(np.zeros(len(self.frequency_E)))\n self.vib_spectraA = np.ascontiguousarray(np.zeros(len(self.frequency_VR)))\n self.vib_spectraB = np.ascontiguousarray(np.zeros(len(self.frequency_VR)))\n self.Raman_spectraA = np.ascontiguousarray(np.zeros(len(self.frequency_VR)))\n self.Raman_spectraB = np.ascontiguousarray(np.zeros(len(self.frequency_VR)))\n\n self.dyn_rho_A = np.ascontiguousarray(np.zeros((N, params.timeDIM_VR)), dtype=np.complex)\n self.dyn_rho_B = np.ascontiguousarray(np.zeros((N, params.timeDIM_VR)), dtype=np.complex)\n\n def create_molecules(self, molA, molB):\n molA.nDIM = len(self.energies_A)\n molA.energies = self.energies_A.ctypes.data_as(POINTER(c_double))\n molA.gamma_decay = self.gamma_decay.ctypes.data_as(POINTER(c_double))\n molA.gamma_pure_dephasing = self.gamma_pure_dephasingA.ctypes.data_as(POINTER(c_double))\n molA.mu = self.mu.ctypes.data_as(POINTER(c_complex))\n molA.rho = self.rhoA.ctypes.data_as(POINTER(c_complex))\n molA.rho_0 = self.rho_0.ctypes.data_as(POINTER(c_complex))\n molA.abs_spectra = self.abs_spectraA.ctypes.data_as(POINTER(c_double))\n molA.ems_spectra = self.ems_spectraA.ctypes.data_as(POINTER(c_double))\n molA.vib_spectra = self.vib_spectraA.ctypes.data_as(POINTER(c_double))\n molA.Raman_spectra = self.Raman_spectraA.ctypes.data_as(POINTER(c_double))\n molA.dyn_rho = self.dyn_rho_A.ctypes.data_as(POINTER(c_complex))\n\n molB.nDIM = len(self.energies_B)\n molB.energies = self.energies_B.ctypes.data_as(POINTER(c_double))\n molB.gamma_decay = self.gamma_decay.ctypes.data_as(POINTER(c_double))\n molB.gamma_pure_dephasing = self.gamma_pure_dephasingB.ctypes.data_as(POINTER(c_double))\n molB.mu = self.mu.ctypes.data_as(POINTER(c_complex))\n molB.rho = self.rhoB.ctypes.data_as(POINTER(c_complex))\n molB.rho_0 = self.rho_0.ctypes.data_as(POINTER(c_complex))\n molB.abs_spectra = self.abs_spectraB.ctypes.data_as(POINTER(c_double))\n molB.ems_spectra = self.ems_spectraB.ctypes.data_as(POINTER(c_double))\n molB.vib_spectra = self.vib_spectraB.ctypes.data_as(POINTER(c_double))\n molB.Raman_spectra = self.Raman_spectraB.ctypes.data_as(POINTER(c_double))\n molB.dyn_rho = self.dyn_rho_B.ctypes.data_as(POINTER(c_complex))\n\n def create_parameters_spectra(self, spectra_params, params):\n spectra_params.rho_0_A = params.rho_0_A.ctypes.data_as(POINTER(c_complex))\n spectra_params.rho_0_E = params.rho_0_E.ctypes.data_as(POINTER(c_complex))\n spectra_params.time_AE = self.time_AE.ctypes.data_as(POINTER(c_double))\n spectra_params.time_VR = self.time_VR.ctypes.data_as(POINTER(c_double))\n spectra_params.frequency_A = self.frequency_A.ctypes.data_as(POINTER(c_double))\n spectra_params.frequency_E = self.frequency_E.ctypes.data_as(POINTER(c_double))\n spectra_params.frequency_VR = self.frequency_VR.ctypes.data_as(POINTER(c_double))\n\n spectra_params.field_amp_AE = params.field_amp_AE\n spectra_params.field_amp_VR = params.field_amp_VR\n spectra_params.omega_R = params.omega_R\n\n spectra_params.nDIM = len(self.energies_A)\n spectra_params.nEXC = params.nEXC\n\n spectra_params.timeDIM_AE = len(self.time_AE)\n spectra_params.timeDIM_VR = len(self.time_VR)\n\n spectra_params.freqDIM_A = len(self.frequency_A)\n spectra_params.freqDIM_E = len(self.frequency_E)\n spectra_params.freqDIM_VR = len(self.frequency_VR)\n\n spectra_params.field_A = self.field_A.ctypes.data_as(POINTER(c_complex))\n spectra_params.field_E = self.field_E.ctypes.data_as(POINTER(c_complex))\n spectra_params.field_V = self.field_V.ctypes.data_as(POINTER(c_complex))\n spectra_params.field_R = self.field_R.ctypes.data_as(POINTER(c_complex))\n\n spectra_params.omega_v1 = params.omega_v1\n spectra_params.omega_v2 = params.omega_v2\n spectra_params.omega_v3 = params.omega_v3\n spectra_params.omega_v4 = params.omega_v4\n spectra_params.omega_e1 = params.omega_e1\n\n def calculate_spectra(self, params):\n molA = Molecule()\n molB = Molecule()\n self.create_molecules(molA, molB)\n params_spectra = Parameters_Spectra()\n self.create_parameters_spectra(params_spectra, params)\n CalculateSpectra(molA, molB, params_spectra)\n return\n\n\nif __name__ == '__main__':\n\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n import time\n\n np.set_printoptions(precision=4)\n energy_factor = 1. / 27.211385\n time_factor = .02418884 / 1000\n\n energies_A = np.array((0.000, 0.08233, 0.09832, 0.16304, 0.20209, 1.7679256, 1.85871, 1.87855, 1.96783, 2.02991)) * energy_factor\n energies_B = np.array((0.000, 0.08313, 0.09931, 0.15907, 0.19924, 1.5879256, 1.66871, 1.67546, 1.76783, 1.82991)) * energy_factor\n N = len(energies_A)\n N_vib = N - 5\n N_exc = N - N_vib\n rho_0_ems = np.zeros((N, N), dtype=np.complex)\n rho_0_ems[N_vib, N_vib] = 1. + 0j\n rho_0_abs = np.zeros((N, N), dtype=np.complex)\n rho_0_abs[0, 0] = 1. + 0j\n\n mu = 4.97738 * np.ones_like(rho_0_abs)\n np.fill_diagonal(mu, 0j)\n population_decay = 2.418884e-8\n electronic_dephasingA = 2.7 * 2.418884e-4\n electronic_dephasingB = 5.0 * 2.418884e-4\n vibrational_dephasing = 0.1 * 2.418884e-5\n\n gamma_decay = np.ones((N, N)) * population_decay\n np.fill_diagonal(gamma_decay, 0.0)\n gamma_decay = np.tril(gamma_decay)\n\n gamma_pure_dephasingA = np.ones_like(gamma_decay) * vibrational_dephasing\n np.fill_diagonal(gamma_pure_dephasingA, 0.0)\n\n for i in range(N_vib):\n for j in range(N_vib, N):\n gamma_pure_dephasingA[i, j] = electronic_dephasingA\n gamma_pure_dephasingA[j, i] = electronic_dephasingA\n\n gamma_pure_dephasingA[5, 4] = electronic_dephasingA * 0.65\n gamma_pure_dephasingA[4, 5] = electronic_dephasingA * 0.65\n gamma_pure_dephasingA[0, 9] = electronic_dephasingA * 0.65\n gamma_pure_dephasingA[9, 0] = electronic_dephasingA * 0.65\n\n gamma_pure_dephasingA[5, 3] = electronic_dephasingA * 0.70\n gamma_pure_dephasingA[3, 5] = electronic_dephasingA * 0.70\n gamma_pure_dephasingA[0, 8] = electronic_dephasingA * 0.70\n gamma_pure_dephasingA[8, 0] = electronic_dephasingA * 0.70\n\n gamma_pure_dephasingA[5, 2] = electronic_dephasingA * 0.20\n gamma_pure_dephasingA[2, 5] = electronic_dephasingA * 0.20\n gamma_pure_dephasingA[0, 7] = electronic_dephasingA * 0.20\n gamma_pure_dephasingA[7, 0] = electronic_dephasingA * 0.20\n\n gamma_pure_dephasingA[5, 1] = electronic_dephasingA * 0.18\n gamma_pure_dephasingA[1, 5] = electronic_dephasingA * 0.18\n gamma_pure_dephasingA[0, 6] = electronic_dephasingA * 0.18\n gamma_pure_dephasingA[6, 0] = electronic_dephasingA * 0.18\n\n gamma_pure_dephasingA[5, 0] = electronic_dephasingA * 0.60\n gamma_pure_dephasingA[0, 5] = electronic_dephasingA * 0.60\n mu[5, 0] *= 0.10\n mu[0, 5] *= 0.10\n\n gamma_pure_dephasingB = np.ones_like(gamma_decay) * vibrational_dephasing\n np.fill_diagonal(gamma_pure_dephasingB, 0.0)\n for i in range(N_vib):\n for j in range(N_vib, N):\n gamma_pure_dephasingB[i, j] = electronic_dephasingB\n gamma_pure_dephasingB[j, i] = electronic_dephasingB\n\n gamma_pure_dephasingB[5, 4] = electronic_dephasingB * 0.65\n gamma_pure_dephasingB[4, 5] = electronic_dephasingB * 0.65\n gamma_pure_dephasingB[0, 9] = electronic_dephasingB * 0.65\n gamma_pure_dephasingB[9, 0] = electronic_dephasingB * 0.65\n\n gamma_pure_dephasingB[5, 3] = electronic_dephasingB * 0.70\n gamma_pure_dephasingB[3, 5] = electronic_dephasingB * 0.70\n gamma_pure_dephasingB[0, 8] = electronic_dephasingB * 0.70\n gamma_pure_dephasingB[8, 0] = electronic_dephasingB * 0.70\n\n gamma_pure_dephasingB[5, 2] = electronic_dephasingB * 0.20\n gamma_pure_dephasingB[2, 5] = electronic_dephasingB * 0.20\n gamma_pure_dephasingB[0, 7] = electronic_dephasingB * 0.20\n gamma_pure_dephasingB[7, 0] = electronic_dephasingB * 0.20\n\n gamma_pure_dephasingB[5, 1] = electronic_dephasingB * 0.18\n gamma_pure_dephasingB[1, 5] = electronic_dephasingB * 0.18\n gamma_pure_dephasingB[0, 6] = electronic_dephasingB * 0.18\n gamma_pure_dephasingB[6, 0] = electronic_dephasingB * 0.18\n\n gamma_pure_dephasingB[5, 0] = electronic_dephasingB * 0.60\n gamma_pure_dephasingB[0, 5] = electronic_dephasingB * 0.60\n\n np.set_printoptions(precision=2)\n\n print(gamma_pure_dephasingA)\n\n params = ADict(\n energy_factor=energy_factor,\n time_factor=time_factor,\n rho_0_A=rho_0_abs,\n rho_0_E=rho_0_ems,\n\n timeDIM_AE=1000,\n timeAMP_AE=2000,\n\n timeDIM_VR=5000,\n timeAMP_VR=50000,\n\n frequencyDIM_AE=250,\n frequencyMIN_A=1.5*energy_factor,\n frequencyMAX_A=2.7*energy_factor,\n\n frequencyMIN_E=1.2 * energy_factor,\n frequencyMAX_E=2.3 * energy_factor,\n\n frequencyDIM_VR=250,\n frequencyMIN_VR=0.075*energy_factor,\n frequencyMAX_VR=0.21*energy_factor,\n\n field_amp_AE=0.000003,\n field_amp_VR=0.000014,\n\n omega_R=0.5*energy_factor,\n nEXC=N_exc,\n\n omega_v1=energies_A[1],\n omega_v2=energies_A[2],\n omega_v3=energies_A[3],\n omega_v4=energies_A[4],\n\n omega_e1=energies_A[5]-energies_A[4]\n )\n\n FourLevels = dict(\n energies_A=energies_A,\n energies_B=energies_B,\n gamma_decay=gamma_decay,\n gamma_pure_dephasingA=gamma_pure_dephasingA,\n gamma_pure_dephasingB=gamma_pure_dephasingB,\n mu=mu,\n )\n\n def render_ticks(axes):\n axes.get_xaxis().set_tick_params(which='both', direction='in', width=1, labelrotation=0, labelsize='large')\n axes.get_yaxis().set_tick_params(which='both', direction='in', width=1, labelcolor='r', labelsize='large')\n axes.get_xaxis().set_ticks_position('both')\n axes.get_yaxis().set_ticks_position('both')\n axes.grid()\n\n\n molecules = RamanControl(params, **FourLevels)\n\n start = time.time()\n molecules.calculate_spectra(params)\n\n print(time.time() - start)\n\n # fig, axes = plt.subplots(nrows=3, ncols=1)\n # axes[0].plot(molecules.time_VR, molecules.field_R.real)\n # axes[0].plot(molecules.time_AE, molecules.field_A.real)\n # axes[1].plot(energy_factor * 1239.84 / molecules.frequency_A, molecules.abs_spectraA)\n # axes[1].plot(energy_factor * 1239.84 / molecules.frequency_A, molecules.abs_spectraB)\n # axes[2].plot(energy_factor * 1239.84 / molecules.frequency_VR, molecules.Raman_spectraA)\n # axes[2].plot(energy_factor * 1239.84 / molecules.frequency_VR, molecules.Raman_spectraB)\n # plt.show()\n\n fig, axes = plt.subplots(nrows=3, ncols=1)\n axes[0].plot(molecules.time_VR, molecules.field_R.real)\n\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[0], label='g1')\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[1], label='g2')\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[2], label='g3')\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[3], label='g4')\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[4], label='g5')\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[5])\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[6])\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[7])\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[8])\n axes[1].plot(molecules.time_VR, molecules.dyn_rho_A[9])\n\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[0], label='g1')\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[1], label='g2')\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[2], label='g3')\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[3], label='g4')\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[4], label='g5')\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[5])\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[6])\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[7])\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[8])\n axes[2].plot(molecules.time_VR, molecules.dyn_rho_B[9])\n\n render_ticks(axes[0])\n render_ticks(axes[1])\n render_ticks(axes[2])\n\n axes[1].legend()\n axes[2].legend()\n\n print(molecules.rhoA.diagonal())\n print()\n print(molecules.rhoB.diagonal())\n\n plt.show()\n","sub_path":"10LevelSystem_5plus5/Spectra.py","file_name":"Spectra.py","file_ext":"py","file_size_in_byte":15150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468658295","text":"import readchar\r\nimport sys\r\nimport time\r\n\r\nimport oneservocontrol as controller\r\nfrom reset_arm import reset_arm\r\n\r\nsys.dont_write_bytecode = True\r\n\r\ndef move_as(base, char):\r\n\tspeed = 100\r\n\tif char == 's':\r\n\t\tbase.set_position_res(base.position - 1, speed)\r\n\t\tprint (base.position)\r\n\telif char == 'f':\r\n\t\tbase.set_position_res(base.position + 1, speed)\r\n\t\tprint (base.position)\r\n\t\t\r\n\telse:\r\n\t\tpass\r\n\r\ndef main(): \r\n\tarm = controller.Arm()\r\n\tarm.reset()\r\n\t\r\n\tpath = []\r\n\twhile True:\r\n\t# for char in path:\r\n\t\tchar = readchar.readchar()\r\n\t\thex_char = hex(ord(char))\r\n\t\r\n\t\tif hex_char == '0x3':\r\n\t\t\tprint(\"path followed: {}\".format(path))\r\n\t\t\t\r\n\t\t\tprint(\"received {} in hex, exiting\".format(hex_char))\r\n\r\n\t\t\tsys.exit()\r\n\r\n\t\tprint(char)\r\n\r\n\t\tpath.append(char)\r\n\r\n\t\tmove_as(arm.base, char)\r\n\r\n\t\tif char == 'r':\r\n\t\t\treset_arm(arm.base)\r\n\r\n\t\t\tfor char in path:\r\n\t\t\t\tmove_as(arm.base, char)\r\n\r\n\t\t\tpath = []\r\n\t\telif char == 'v':\r\n\t\t\t# filename = 'path_' + str(datetime.datetime.now()).replace(' ', '_')[:-3] + '.txt'\r\n\t\t\tfilename = 'path.txt'\r\n\t\t\t\r\n\t\t\twith open(filename, 'w') as f:\r\n\t\t\t\tf.write(''.join(path)[:-1])\r\n\t\t\t\t\r\n\t\telif char == 'c':\r\n\t\t\tfilename = 'path.txt'\r\n\t\t\t\r\n\t\t\twith open(filename, 'r') as f:\r\n\t\t\t\tpath = f.readlines()\r\n\t\t\t\tprint(path)\r\n\t\t\t\t\r\n\t\t\t\treset_arm(arm.base)\r\n\t\t\t\t\r\n\t\t\t\tfor char in path:\r\n\t\t\t\t\tmove_as(arm.base, char)\r\n\r\n\t\t\t\tpath = []\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\t\r\n\t","sub_path":"Robot/oneservotest/manualcontrol.py","file_name":"manualcontrol.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"217304128","text":"#!/usr/bin/python\r\n\r\nimport os, sys\r\ninput_file = sys.argv[1]\r\nrecords = open(input_file, 'r').read().replace(\"\\r\", \"\\n\").split(\"\\n\")\r\n\r\noutput_file = sys.argv[2]\r\nf = open(output_file, 'w')\r\n\r\nf.write(\"#!/bin/bash\\n\")\r\n\r\nf.write(\"pushd $MTURK_CMD_HOME/bin/\\n\")\r\ntitles=records[0].strip().split('\\t')\r\n\r\nassignIDIndex = titles.index('\"assignmentid\"')\r\nworkerIDindex = titles.index('\"workerid\"')\r\ncorrectIDindex = titles.index('\"Answer.correctBonus\"')\r\nsubmittedIDindex = titles.index('\"assignmentstatus\"')\r\n\r\nfor r in records[1:]:\r\n\tr2=r.strip().split('\\t')\r\n\tif len(r2) == len(titles) and r2[submittedIDindex] == \t'\"Submitted\"':\r\n\t\t(assignmentID,workerID,correct,comment) = (r2[assignIDIndex],r2[workerIDindex],r2[correctIDindex],\"performance bonus\")\r\n\t\tbonus=(correct.strip('[\\s\"]+'))\r\n\t\t#print(bonus)\r\n\t\tcmd=\"./grantBonus.sh -workerid %s -assignment %s -amount %s -reason %s\" %(workerID,assignmentID,bonus,comment)\r\n\t\tprint(cmd)\r\n\t\tf.write(cmd + \"\\n\")\r\n\r\nf.write(\"popd\\n\")\r\nf.close()\r\nos.system(\"chmod +x \" + output_file)\r\n","sub_path":"AMT.expt2/giveBonus2.py","file_name":"giveBonus2.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354780178","text":"from pyscipopt import Eventhdlr, SCIP_EVENTTYPE\nimport logging\n\n\nclass DualBoundEvent(Eventhdlr):\n \"\"\"This class is used to stop the solving process when an instance is proved to be verifiable or refutable.\"\"\"\n\n def __init__(self, mip):\n self.mip = mip\n self.log = logging.getLogger('main_log')\n\n\n def eventexec(self, event):\n \"\"\"Execute the event listener.\"\"\"\n\n node = event.getNode()\n if event.getType == SCIP_EVENTTYPE.NODEINFEASIBLE:\n self.log.debug(\"Node %i infeasible, addedcons %i\", node.getNumber(), node.getNAddedConss())\n\n if self.mip.model.getDualbound() > self.mip.eps:\n self.mip.model.interruptSolve()\n self.log.info(\"Interrupted solving, dual bound %f > 0.\", self.mip.model.getDualbound())\n self.log.info(\"total time: %f\", self.mip.model.getTotalTime())\n\n if self.mip.model.getPrimalbound() < -self.mip.eps:\n self.mip.model.interruptSolve()\n self.log.info(\"Interrupted solving, primal bound %f < 0.\", self.mip.model.getPrimalbound())\n self.log.info(\"total time: %f\", self.mip.model.getTotalTime())\n\n","sub_path":"src/dualbound_event.py","file_name":"dualbound_event.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301868293","text":"import cs50\n\n\ndef main():\n\n string = cs50.get_string(\"Text: \")\n\n # string = \"It was a bright cold day in April, and the clocks were striking thirteen. Winston Smith, his chin nuzzled into his breast in an effort to escape the vile wind, slipped quickly through the glass doors of Victory Mansions, though not quickly enough to prevent a swirl of gritty dust from entering along with him.\"\n\n # string = \"Alice was beginning to get very tired of sitting by her sister on the bank, and of having nothing to do: once or twice she had peeped into the book her sister was reading, but it had no pictures or conversations in it, and what is the use of a book, thought Alice without pictures or conversation?\"\n\n grade(string)\n\n # print(N_words(string))\n # print(N_sentences(string))\n\n\ndef N_words(string):\n count = 0\n for i in range(len(string) - 1):\n if string[i].isspace() and not(string[i+1].isspace()):\n count += 1\n\n return count + 1\n\n\ndef N_sentences(string):\n count = 0\n for i in range(len(string)):\n if string[i] == '.' or string[i] == '!' or string[i] == '?':\n count += 1\n\n return count\n\n\ndef N_letters(string):\n count = 0\n for i in range(len(string)):\n if string[i].isalpha():\n count += 1\n\n return count\n\n\ndef grade(string):\n\n words = float(N_words(string))\n points = float(N_sentences(string))\n letters = float(N_letters(string))\n\n L = float(letters / words) * 100\n S = float(points / words) * 100\n\n print(words)\n print(points)\n print(letters)\n\n index = round((0.0588 * L) - (0.296 * S) - 15.8)\n\n if index < 1:\n print(\"Before Grade 1\")\n\n elif index > 16:\n print(\"Grade 16+\")\n\n else:\n print(f\"Grade {index}\")\n\n\nmain()","sub_path":"6_Python/pset6/readbility/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"207122038","text":"# -*- coding:utf-8 -*-\n\nfrom i3pystatus.cpu_usage import CpuUsage\nfrom i3pystatus.core.util import make_bar\n\nclass CpuUsageBar(CpuUsage):\n \"\"\"\n Shows CPU usage as a bar (made with unicode box characters).\n The first output will be inacurate.\n\n Linux only\n\n Available formatters:\n\n * {usage_bar} usage average of all cores\n * {usage_bar_cpu*} usage of one specific core. replace \"*\"\n by core number starting at 0\n\n \"\"\"\n\n format = \"{usage_bar}\"\n settings = (\n (\"format\", \"format string\"),\n )\n\n def run(self):\n cpu_usage = self.get_usage()\n\n cpu_usage_bar = {}\n\n for core, usage in cpu_usage.items():\n core = core.replace('usage', 'usage_bar')\n cpu_usage_bar[core] = make_bar(usage)\n\n cpu_usage.update(cpu_usage_bar)\n\n # for backward compatibility\n cpu_usage['usage_bar'] = cpu_usage['usage_bar_cpu']\n\n self.output = {\n \"full_text\": self.format.format_map(cpu_usage)\n }\n\n","sub_path":"i3pystatus/cpu_usage_bar.py","file_name":"cpu_usage_bar.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482635394","text":"import torch\nimport torch.nn.functional as F\nfrom torchvision.utils import save_image\n\nfrom tensorboardX import SummaryWriter\n\nfrom model import Generator\nfrom model import Discriminator\n\nimport numpy as np\n\nimport os\nimport time\nimport datetime\nimport random\nimport glob\nimport re\n\n\nclass InitializerClass(object):\n def __init__(self, config):\n\n # Model configurations.\n self.c_dim = config.c_dim\n self.image_size = config.image_size\n self.g_conv_dim = config.g_conv_dim\n self.d_conv_dim = config.d_conv_dim\n self.g_repeat_num = config.g_repeat_num\n self.d_repeat_num = config.d_repeat_num\n self.lambda_cls = config.lambda_cls\n self.lambda_rec = config.lambda_rec\n self.lambda_gp = config.lambda_gp\n self.lambda_smooth = config.lambda_smooth\n self.lambda_sat = config.lambda_sat\n self.alpha_rec = 0\n\n # Training configurations.\n self.batch_size = config.batch_size\n self.num_epochs = config.num_epochs\n self.num_epochs_decay = config.num_epochs_decay\n self.first_epoch = config.first_epoch\n self.g_lr = config.g_lr\n self.d_lr = config.d_lr\n self.n_critic = config.n_critic\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.resume_iters = config.resume_iters\n self.use_virtual = config.use_virtual\n self.first_iteration = 0\n self.global_counter = 0\n\n # Miscellaneous.\n self.use_tensorboard = config.use_tensorboard\n self.device = 'cuda:' + \\\n str(config.gpu_id) if torch.cuda.is_available() else 'cpu'\n self.num_sample_targets = config.num_sample_targets\n\n print(f\"Runing the model on {self.device}\")\n\n # Directories.\n self.log_dir = config.log_dir\n self.sample_dir = config.sample_dir\n self.model_save_dir = config.model_save_dir\n self.result_dir = config.result_dir\n self.outputs_dir = config.outputs_dir\n\n # Test variables\n self.test_images_dir = config.test_images_dir\n self.test_attributes_path = config.test_attributes_path\n self.test_models_dir = config.test_models_dir\n self.test_results_dir = config.test_results_dir\n\n # Step sizes.\n self.log_step = config.log_step\n self.sample_step = config.sample_step\n self.model_save_step = config.model_save_step\n\n # Build the model and tensorboard.\n self.build_model()\n if self.use_tensorboard:\n self.build_tensorboard()\n self.loss_visualization = {}\n\n def build_model(self):\n \"\"\"Create a generator and a discriminator.\"\"\"\n self.G = Generator(self.g_conv_dim, self.c_dim,\n self.g_repeat_num).to(self.device)\n self.D = Discriminator(\n self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num).to(self.device)\n\n self.g_optimizer = torch.optim.Adam(\n self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n self.d_optimizer = torch.optim.Adam(\n self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # TODO: implement data parallelization for multiple gpus\n # self.gpu_ids = torch.cuda.device_count()\n # print(\"GPUS AVAILABLE: \", self.gpu_ids)\n # if self.gpu_ids > 1:\n # torch.nn.DataParallel(self.D, device_ids=list(range(self.gpu_ids)))\n # torch.nn.DataParallel(self.G, device_ids=list(range(self.gpu_ids)))\n\n def build_tensorboard(self):\n \"\"\"Build a tensorboard logger.\"\"\"\n from logger import Logger\n self.logger = Logger(self.log_dir)\n self.writer = SummaryWriter(logdir=self.log_dir)\n\n def smooth_loss(self, att):\n return torch.mean(torch.mean(torch.abs(att[:, :, :, :-1] - att[:, :, :, 1:])) + torch.mean(torch.abs(att[:, :, :-1, :] - att[:, :, 1:, :])))\n\n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n def update_lr(self, g_lr, d_lr):\n \"\"\"Decay learning rates of the generator and discriminator.\"\"\"\n for param_group in self.g_optimizer.param_groups:\n param_group['lr'] = g_lr\n for param_group in self.d_optimizer.param_groups:\n param_group['lr'] = d_lr\n\n def reset_grad(self):\n \"\"\"Reset the gradient buffers.\"\"\"\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()\n\n def denorm(self, x):\n \"\"\"Convert the range from [-1, 1] to [0, 1].\"\"\"\n out = (x + 1) / 2\n return out.clamp_(0, 1)\n\n\nclass UtilsClass(object):\n def gradient_penalty(self, y, x):\n \"\"\"Compute gradient penalty: (L2_norm(dy/dx) - 1)**2.\"\"\"\n weight = torch.ones(y.size()).to(self.device)\n dydx = torch.autograd.grad(outputs=y,\n inputs=x,\n grad_outputs=weight,\n retain_graph=True,\n create_graph=True,\n only_inputs=True)[0]\n\n dydx = dydx.view(dydx.size(0), -1)\n dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))\n return torch.mean((dydx_l2norm-1)**2)\n\n def imFromAttReg(self, att, reg, x_real):\n \"\"\"Mixes attention, color and real images\"\"\"\n return (1-att)*reg + att*x_real\n\n def create_labels(self, data_iter):\n \"\"\"Return samples for visualization\"\"\"\n x, c = [], []\n x_data, c_data = data_iter.next()\n\n for i in range(self.num_sample_targets):\n x.append(x_data[i].repeat(\n self.batch_size, 1, 1, 1).to(self.device))\n c.append(c_data[i].repeat(self.batch_size, 1).to(self.device))\n\n return x, c\n\n def save_models(self, iteration, epoch):\n try: # To avoid crashing on the first step\n os.remove(os.path.join(self.model_save_dir,\n '{}-{}-G.ckpt'.format(iteration+1-self.model_save_step, epoch)))\n os.remove(os.path.join(self.model_save_dir,\n '{}-{}-D.ckpt'.format(iteration+1-self.model_save_step, epoch)))\n os.remove(os.path.join(self.model_save_dir,\n '{}-{}-G_optim.ckpt'.format(iteration+1-self.model_save_step, epoch)))\n os.remove(os.path.join(self.model_save_dir,\n '{}-{}-D_optim.ckpt'.format(iteration+1-self.model_save_step, epoch)))\n except:\n pass\n\n G_path = os.path.join(self.model_save_dir,\n '{}-{}-G.ckpt'.format(iteration+1, epoch))\n D_path = os.path.join(self.model_save_dir,\n '{}-{}-D.ckpt'.format(iteration+1, epoch))\n torch.save(self.G.state_dict(), G_path)\n torch.save(self.D.state_dict(), D_path)\n\n G_path_optim = os.path.join(\n self.model_save_dir, '{}-{}-G_optim.ckpt'.format(iteration+1, epoch))\n D_path_optim = os.path.join(\n self.model_save_dir, '{}-{}-D_optim.ckpt'.format(iteration+1, epoch))\n torch.save(self.g_optimizer.state_dict(), G_path_optim)\n torch.save(self.d_optimizer.state_dict(), D_path_optim)\n\n print(f'Saved model checkpoints in {self.model_save_dir}...')\n\n def restore_model(self, resume_iters):\n \"\"\"Restore the trained generator and discriminator.\"\"\"\n print('Loading the trained models from step {}-{}...'.format(resume_iters, self.first_epoch))\n G_path = os.path.join(\n self.model_save_dir, '{}-{}-G.ckpt'.format(resume_iters, self.first_epoch))\n D_path = os.path.join(\n self.model_save_dir, '{}-{}-D.ckpt'.format(resume_iters, self.first_epoch))\n self.G.load_state_dict(torch.load(\n G_path, map_location=lambda storage, loc: storage))\n self.D.load_state_dict(torch.load(\n D_path, map_location=lambda storage, loc: storage))\n\n G_optim_path = os.path.join(\n self.model_save_dir, '{}-{}-G_optim.ckpt'.format(resume_iters, self.first_epoch))\n D_optim_path = os.path.join(\n self.model_save_dir, '{}-{}-D_optim.ckpt'.format(resume_iters, self.first_epoch))\n self.d_optimizer.load_state_dict(torch.load(D_optim_path))\n self.g_optimizer.load_state_dict(torch.load(G_optim_path))\n\n def numericalSort(self, value):\n numbers = re.compile(r'(\\d+)')\n parts = numbers.split(value)\n parts[1::2] = map(int, parts[1::2])\n return parts\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"181401666","text":"import pandas as pd\n\nfrom onecodex.exceptions import OneCodexException\nfrom onecodex.taxonomy import TaxonomyMixin\n\n\nclass DistanceMixin(TaxonomyMixin):\n def alpha_diversity(self, metric=\"simpson\", rank=\"auto\"):\n \"\"\"Calculate the diversity within a community.\n\n Parameters\n ----------\n metric : {'simpson', 'chao1', 'shannon'}\n The diversity metric to calculate.\n rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional\n Analysis will be restricted to abundances of taxa at the specified level.\n\n Returns\n -------\n pandas.DataFrame, a distance matrix.\n \"\"\"\n import skbio.diversity\n\n if metric not in (\"simpson\", \"chao1\", \"shannon\"):\n raise OneCodexException(\n \"For alpha diversity, metric must be one of: simpson, chao1, shannon\"\n )\n\n # needs read counts, not relative abundances\n if self._guess_normalized():\n raise OneCodexException(\"Alpha diversity requires unnormalized read counts.\")\n\n df = self.to_df(rank=rank, normalize=False)\n\n output = {\"classification_id\": [], metric: []}\n\n for c_id in df.index:\n output[\"classification_id\"].append(c_id)\n output[metric].append(\n skbio.diversity.alpha_diversity(metric, df.loc[c_id].tolist(), [c_id]).values[0]\n )\n\n return pd.DataFrame(output).set_index(\"classification_id\")\n\n def beta_diversity(self, metric=\"braycurtis\", rank=\"auto\"):\n \"\"\"Calculate the diversity between two communities.\n\n Parameters\n ----------\n metric : {'jaccard', 'braycurtis', 'cityblock'}\n The distance metric to calculate.\n rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional\n Analysis will be restricted to abundances of taxa at the specified level.\n\n Returns\n -------\n skbio.stats.distance.DistanceMatrix, a distance matrix.\n \"\"\"\n import skbio.diversity\n\n if metric not in (\"jaccard\", \"braycurtis\", \"cityblock\"):\n raise OneCodexException(\n \"For beta diversity, metric must be one of: jaccard, braycurtis, cityblock\"\n )\n\n df = self.to_df(rank=rank, normalize=self._guess_normalized())\n\n counts = []\n for c_id in df.index:\n counts.append(df.loc[c_id].tolist())\n\n # NOTE: see #291 for a discussion on using these metrics with normalized read counts. we are\n # explicitly disabling skbio's check for a counts matrix to allow normalized data to make\n # its way into this function.\n return skbio.diversity.beta_diversity(metric, counts, df.index.tolist(), validate=False)\n\n def unifrac(self, weighted=True, rank=\"auto\"):\n \"\"\"Calculate the UniFrac beta diversity metric.\n\n UniFrac takes into account the relatedness of community members. Weighted UniFrac considers\n abundances, unweighted UniFrac considers presence.\n\n Parameters\n ----------\n weighted : `bool`\n Calculate the weighted (True) or unweighted (False) distance metric.\n rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional\n Analysis will be restricted to abundances of taxa at the specified level.\n\n Returns\n -------\n skbio.stats.distance.DistanceMatrix, a distance matrix.\n \"\"\"\n # needs read counts, not relative abundances\n import skbio.diversity\n\n if self._guess_normalized():\n raise OneCodexException(\"UniFrac requires unnormalized read counts.\")\n\n df = self.to_df(rank=rank, normalize=False)\n\n counts = []\n for c_id in df.index:\n counts.append(df.loc[c_id].tolist())\n\n tax_ids = df.keys().tolist()\n\n tree = self.tree_build()\n tree = self.tree_prune_rank(tree, rank=df.ocx_rank)\n\n # there's a bug (?) in skbio where it expects the root to only have\n # one child, so we do a little faking here\n from skbio.tree import TreeNode\n\n new_tree = TreeNode(name=\"fake root\")\n new_tree.rank = \"no rank\"\n new_tree.append(tree)\n\n # then finally run the calculation and return\n if weighted:\n return skbio.diversity.beta_diversity(\n \"weighted_unifrac\", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids\n )\n else:\n return skbio.diversity.beta_diversity(\n \"unweighted_unifrac\", counts, df.index.tolist(), tree=new_tree, otu_ids=tax_ids\n )\n","sub_path":"onecodex/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176900241","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as tfs\nfrom torchvision.transforms import functional\nimport torchvision\nimport math\nimport torch.nn.functional as F\nfrom utils.precess import PreProc\nimport matplotlib.pyplot as plt\n\n#import cv2\n\ndef getdata(img_path,mask_path,ph,pw,npatches,valid=True):\n images, labels = read(img_path,mask_path)\n if valid:\n # ph = 48\n # pw = 48\n # npatches = 120000\n images_patches, labels_patches = extarct_patches_train(images, labels,ph,pw,npatches)\n split_percent = 0.2\n valid_num = int(npatches * 0.2)\n n = valid_num\n train_ipatches, train_lpatches = images_patches[:-n], labels_patches[:-n]\n valid_ipatches, valid_lpatches = images_patches[-n:], labels_patches[-n:]\n\n #print\n print('all patch num:{} | valid:{} | train:{}'.format(len(images_patches),\n valid_num, npatches - valid_num))\n return train_ipatches,train_lpatches,valid_ipatches,valid_lpatches\n else:\n ph = 20\n pw = 20\n sh = 10\n sw = 10\n images_patches, labels_patches ,_,_= extract_patches_test(images, labels, ph, pw, sh, sw)\n return images_patches,labels_patches\n\ndef getData(img_list,label_list):\n img_data = []\n label_data = []\n for i in range(len(img_list)):\n img = img_list[i]\n label_name = label_list[i]\n\n #read image and convert into numpy format\n # WxHx3 -> HxWx3\n img_matrix = Image.open(img).convert('RGB')\n img_matrix = np.array(img_matrix)\n\n #read segmentation images\n # WxH -> 1xHxW\n label_matrix = Image.open(label_name)\n label_matrix = np.array(label_matrix)\n label_matrix = np.expand_dims(label_matrix,axis=0)\n\n #add to data matrix\n img_data.append(img_matrix)\n label_data.append(label_matrix)\n return img_data,label_data\n\ndef read(img_path,label_path):\n img_list = sorted(os.listdir(img_path))\n label_list = sorted(os.listdir(label_path))\n for i in range(len(img_list)):\n if isinstance(img_list[i],str):\n img_list[i] = img_path + '/'+img_list[i]\n label_list[i] = label_path +'/' + label_list[i]\n return img_list, label_list\n\ndef extarct_patches_train(img_list,label_list,patch_h,patch_w,n_patches):#PIL Image format\n length = len(img_list)\n k = n_patches // length\n img_data = []\n label_data = []\n for i in range(len(img_list)):\n img = Image.open(img_list[i])\n H,W = img.size[0],img.size[1]\n label = Image.open(label_list[i])\n #preprocess PIL image\n img = np.asarray(img)\n #print(img.shape)\n data = np.expand_dims(np.transpose(img,(2,0,1)),0)\n gray_img = PreProc(data)\n #To Image\n data = np.transpose(np.squeeze(gray_img,0), (1,2,0))\n #print(type(data))\n # plt.figure()\n # plt.imshow(data[:,:,0])\n # plt.show()\n img =Image.fromarray(data[:,:,0])\n #img.show()\n #img = data\n\n cnt = 0\n while cnt