diff --git "a/2682.jsonl" "b/2682.jsonl" new file mode 100644--- /dev/null +++ "b/2682.jsonl" @@ -0,0 +1,1648 @@ +{"seq_id":"40319639560","text":"#! /usr/bin/env python\n\nfrom bottle import Bottle, run, template, static_file, request, post, redirect, TEMPLATE_PATH\nimport argparse\nimport json\nfrom datetime import datetime\nfrom random import randrange\nimport hashlib\nimport os\n\nabs_app_dir_path = os.path.dirname(os.path.realpath(__file__))\nabs_views_path = os.path.join(abs_app_dir_path, 'views')\nTEMPLATE_PATH.insert(0, abs_views_path )\n\napp = Bottle()\n\nparse_arguments = argparse.ArgumentParser(description='zukunft')\nparse_arguments.add_argument('-p', '--path', required=True)\nparse_arguments.add_argument('-t', '--token', required=True)\narguments = parse_arguments.parse_args()\n\n@app.route('/datenverarbeitung')\ndef reroute():\n redirect(\"/\")\n\n@app.route('/datenverarbeitung', method='POST')\ndef submit():\n if request.query.token == arguments.token:\n with open(arguments.path + '/data/' + str(datetime.timestamp(datetime.now())) + '-' + hashlib.md5(str(randrange(0, 1312)).encode('UTF-8')).hexdigest() + '.txt', 'w') as file:\n payload = 'name: ' + request.forms.name + '\\nactive: ' + request.forms.active + \\\n '\\nmembership: ' + request.forms.membership + '\\ninterests: '+ \\\n request.forms.interests + '\\npositive: ' + request.forms.positive + \\\n '\\nnegative: ' + request.forms.negative\n \n file.write(str(payload))\n return template('thanks')\n else:\n redirect(\"/\")\n\n@app.route('/', method='GET')\ndef check():\n if request.query.token == arguments.token:\n token = request.query.token\n return template('survey', token=token)\n else:\n return template('start')\n\n@app.route('/static/')\ndef static(filename):\n return static_file(filename, root=arguments.path + '/resources/')\n\n\nrun(app, host='localhost', port=8161)\n","repo_name":"riotbib/zukunft","sub_path":"zukunft.py","file_name":"zukunft.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11304113339","text":"from django.shortcuts import render, redirect, get_list_or_404\nfrom django.urls import is_valid_path\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.exceptions import APIException, AuthenticationFailed\nfrom rest_framework.authentication import get_authorization_header\nfrom .authentication import create_access_token, create_refresh_token, decode_access_token, decode_refresh_token, access_token_exp\nfrom .serializer import UserSerializer, SubSerializer, Sub_pdSerializer\nfrom .models import User, Subscription, Sub_pd\nfrom datetime import datetime\nfrom django.utils import timezone\n\n# Create your views here.\n# @api_view(['GET']) # 전체 유저 조회\n# def getUsers(request): \n# users = User.objects.all()\n# serializer = UserSerializer(users, many = True)\n# return Response(serializer.data)\n\n# @api_view(['GET', 'PATCH', 'DELETE']) # 단일 회원 조회, 수정, 삭제\n# def userDetail(request, user_id): \n# user = User.objects.get(pk = user_id)\n# if request.method == 'GET':\n# serializer = UserSerializer(user)\n# return Response(serializer.data)\n# elif request.method == 'PATCH':\n# serializer = UserSerializer(user, data=request.data, partial = True)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data, status=status.HTTP_200_OK)\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n# elif request.method == 'DELETE':\n# user.delete()\n# return Response({'message':'sucess', 'code' : 200})\n\n# @api_view(['POST']) # 회원가입\n# def signup_view(request): \n# serializer = UserSerializer(data=request.data)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data, status=status.HTTP_201_CREATED)\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# @api_view(['POST']) # 로그인\n# def login_view(request): \n# serializer = UserSerializer(data=request.data)\n# username = serializer.initial_data['username']\n# password = serializer.initial_data['password']\n# user = authenticate(request=request, username=username, password=password)\n# token = Token.objects.get(user=user)\n# if user is not None:\n# login(request, user)\n# return Response(serializer.initial_data, {\"Token\" : token.key}, status=status.HTTP_201_CREATED)\n# else:\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# def logout_view(request): # 로그아웃\n# logout(request)\n# return redirect('getUsers')\n\nclass MyNotFoundException(APIException):\n status_code = 400\n default_detail = '로그인 실패 다시 확인해주세요'\n default_code = 'KeyNotFound'\n\nclass SignupException(APIException):\n status_code = 400\n default_detail = '아이디 혹은 패스워드를 다시 확인해주세요'\n default_code = 'KeynotFound'\n\nclass SignupAPIView(APIView):\n def post(self, request):\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n \n user = User.objects.filter(username=request.data['username']).first()\n if not user:\n raise SignupException()\n if not user.check_password(request.data['password']):\n raise SignupException()\n\n access_token = create_access_token(user.id)\n access_exp = access_token_exp(access_token) \n refresh_token = create_refresh_token(user.id)\n #serializer_data = serializer.data\n\n response = Response()\n response.set_cookie(key='refreshToken', value=refresh_token, httponly=True)\n response.data = {\n #'serializer_data' : serializer_data,\n 'access_token' : access_token, \n 'access_exp' : access_exp, \n 'refresh_token' : refresh_token \n }\n return response\n\nclass LoginAPIView(APIView):\n def post(self, request):\n user = User.objects.filter(username=request.data['username']).first()\n \n if not user:\n raise MyNotFoundException()\n if not user.check_password(request.data['password']):\n raise MyNotFoundException()\n\n access_token = create_access_token(user.id)\n access_exp = access_token_exp(access_token) # 생성된 access token의 decode된 만료기간 생성\n refresh_token = create_refresh_token(user.id)\n\n response = Response()\n response.set_cookie(key='refreshToken', value=refresh_token, httponly=True)\n response.data = {\n 'access_token' : access_token, # access token 반환\n 'access_exp' : access_exp, # access 만료기간 반환\n 'refresh_token' : refresh_token # refresh token 반환\n }\n return response\n\n\nclass UserAPIView(APIView):\n def get(self, request, **kwargs):\n if kwargs.get('user_id') is None:\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n\n user = User.objects.filter(pk=id).first()\n return Response(UserSerializer(user).data)\n \n raise AuthenticationFailed('unauthenticated')\n else:\n user_id = kwargs.get('user_id') \n user_serializer = UserSerializer(User.objects.get(pk=user_id))\n response = Response()\n\n user_id = user_serializer.data.get('id')\n nickname = user_serializer.data.get('nickname')\n profile = user_serializer.data.get('profile')\n response.data = {\n 'id' : user_id,\n 'nickname' : nickname,\t\n 'profile' :profile\n }\n return response\n \n def patch(self, request):\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n user = User.objects.filter(pk=id).first()\n serializer = UserSerializer(user, data=request.data, partial=True)\n \n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RefreshAPIView(APIView):\n def post(self, request):\n token = request.data['refresh_token']\n \n byt_token = bytes(token, 'utf-8')\n\n id = decode_refresh_token(byt_token)\n access_token = create_access_token(id)\n access_exp = access_token_exp(access_token)\n return Response({\n 'access_token': access_token,\n 'access_exp': access_exp\n })\n \n\n\nclass LogoutAPIView(APIView):\n def post(self, _):\n response = Response()\n response.delete_cookie(key=\"refreshToken\")\n response.data = {\n 'message': 'success'\n }\n\n return response\n\nclass SubscriptionAPIView(APIView):\n def get(self, request):\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n subscription = Subscription.objects.filter(pk=id).first()\n serializer = SubSerializer(subscription, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def post(self, request):\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n data = {\n \"user_id\": id, \"subpd_id\": request.data[\"subpd_id\"]\n }\n serializer = SubSerializer(data=data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def patch(self, request):\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n subscription = Subscription.objects.filter(id=request.data['sub_id']).first()\n \n \n date = timezone.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n data = {\n \"delete_time\" : date\n }\n\n serializer = SubSerializer(subscription, data=data, partial=True)\n \n if serializer.is_valid() :\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass Sub_pdAPIView(APIView):\n def get(self,request):\n sub_pd = Sub_pd.objects.all()\n serializer = Sub_pdSerializer(sub_pd, many=True)\n \n if sub_pd:\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n\n# class SubAPIView(APIView):\n# def get(self, request):\n# auth = get_authorization_header(request).split()\n# if auth and len(auth) == 2:\n# token = auth[1].decode('utf-8')\n# id = decode_access_token(token)\n# subscription = Subscription.objects.filter(pk=id).first()\n# serializer = SubSerializer(subscription, data=request.data, partial=True)\n\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data, status=status.HTTP_200_OK)\n# else:\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # def post(self, request):\n # serializer = SubSerializer(data=request.data)\n # if serializer.is_valid():\n # serializer.save()\n # return Response(serializer.data, status=status.HTTP_201_CREATED)\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # def patch(self, request):\n #user_id = kwargs.get('user_id')\n #subscription = Subscription.objects.filter(pk=1).first()\n # serializer = SubSerializer(subscription, data=request.data, partial = True)\n # print(serializer)\n # date = datetime.now()\n # serializer['delete_time'].save(date)\n # if serializer.is_valid() and serializer['delete_time'] is None:\n # serializer.save()\n # return Response(serializer.data, status=status.HTTP_200_OK)\n # else:\n # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n","repo_name":"wodnrP/HobbyDiscovery","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70945208516","text":"import dload\nimport subprocess\nimport os\n\n\ndef get_celeba(path=\"datasets/celeba\"):\n \"\"\"\n Download and extract the CelebA dataset.\n\n Returns:\n str: Path to the extracted CelebA dataset.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading CelebA dataset...\")\n url = \"https://link.eu1.storjshare.io/s/jurm4owtgpgrekgmrsvtz67n3wuq/datasets/celeba.zip?wrap=0\"\n return dload.save_unzip(url, \"/\".join(path.split(\"/\")[:-1]), True)\n\n\ndef get_dataset(path=\"datasets/dataset\"):\n \"\"\"\n Download and extract the masked dataset.\n\n Returns:\n str: Path to the extracted masks dataset.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading dataset...\")\n url = \"https://link.eu1.storjshare.io/jxjaaumkj2zlbsadwkbu2dr4p7dq/datasets/dataset.zip?wrap=0\"\n return dload.save_unzip(url, \"/\".join(path.split(\"/\")[:-1]), True)\n\n\ndef get_masks_samples(path=\"datasets/mask\"):\n \"\"\"\n Download and extract the celebA masks dataset.\n\n Returns:\n str: Path to the extracted masked dataset.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading mask samples dataset...\")\n url = \"https://link.eu1.storjshare.io/juhnpwlokhikmpmp3qczr2ukpega/datasets/mask.zip?wrap=0\"\n return dload.save_unzip(url, \"/\".join(path.split(\"/\")[:-1]), True)\n\n\ndef get_MaskTheFace(path=\"MaskTheFace/\"):\n \"\"\"\n Download and extract the MaskTheFace dataset.\n\n Returns:\n str: Path to the extracted MaskTheFace dataset.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Cloning MaskTheFace...\")\n url = \"https://github.com/aqeelanwar/MaskTheFace.git\"\n return dload.git_clone(url, path)\n\n\ndef get_YOLOv5_repo(path=\"mask_detection/YOLOv5\"):\n \"\"\"\n Download and extract the YOLOv5 repository.\n\n Returns:\n str: Path to the extracted YOLOv5 repository.\n \"\"\"\n if os.path.exists(path):\n return path\n print(\"Cloning YOLOv5...\")\n cloneCommand = f\"git clone -b adapt-yolo-to-unmask https://github.com/Arthemide/yolov5.git {path}\"\n cloneProcess = subprocess.Popen(cloneCommand.split(), stdout=subprocess.PIPE)\n cloneProcess.wait()\n requCommand = \"pip install -r mask_detection/YOLOv5/requirements.txt\"\n requProcess = subprocess.Popen(requCommand.split(), stdout=subprocess.PIPE)\n requProcess.wait()\n return path\n\n\ndef get_YOLOv5_dataset(path=\"../datasets/yolov5\"):\n \"\"\"\n Download and extract the YOLOv5 dataset.\n\n Returns:\n str: Path to the extracted YOLOv5 dataset.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(path, exist_ok=True)\n print(\"Downloading YOLOv5 dataset...\")\n url = \"https://link.eu1.storjshare.io/jvrankoogai762vqn4foajoo4v4a/datasets/yolov5.zip?wrap=0\"\n return dload.save_unzip(url, path, True)\n\n\ndef get_YOLOv5_model(path=\"model_weights/mask_face_detector.pt\"):\n \"\"\"\n Download and extract the YOLOv5 model.\n\n Returns:\n str: Path to the extracted YOLOv5 model.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading YOLOv5 model...\")\n url = \"https://link.eu1.storjshare.io/s/jwqmbuztmtpkoachps4qydrtq2ca/datasets/mask_face_detector.pt?wrap=0\"\n return dload.save(url, path)\n\n\ndef get_face_detector_model(path=\"model_weights/face_detector\"):\n \"\"\"\n Download and extract the FaceDetector model.\n\n Returns:\n str: Path to the extracted FaceDetector model.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading face detector model...\")\n url = \"https://link.eu1.storjshare.io/s/juv6co67qia72ieiqziwg4ou7lpq/datasets/face_detector.zip?wrap=0\"\n return dload.save_unzip(url, \"/\".join(path.split(\"/\")[:-1]), True)\n\n\ndef get_mask_detector_model(path=\"model_weights/mask_detector_model.pth\"):\n \"\"\"\n Download and extract the MaskDetector model.\n\n Returns:\n str: Path to the extracted MaskDetector model.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading mask detector model...\")\n url = \"https://link.eu1.storjshare.io/juktaddoxro75bg4irc55ewerevq/datasets/model_mask_detector.pth?wrap=0\"\n return dload.save(url, path)\n\n\ndef get_mask_segmentation_model(path=\"model_weights/model_mask_segmentation.pth\"):\n \"\"\"\n Download and extract the mask segmentation model.\n\n Returns:\n str: Path to the extracted mask segmentation model.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading mask segmentation model...\")\n url = \"https://link.eu1.storjshare.io/juscxcz7e3k3hsw7h3tgocew477q/datasets/model_mask_segmentation.pth?wrap=0\"\n return dload.save(url, path)\n\n\ndef get_ccgan_model(path=\"model_weights/ccgan-110.pth\"):\n \"\"\"\n Download and extract the ccgan-110 model.\n\n Returns:\n str: Path to the extracted ccgan-110 model.\n \"\"\"\n if os.path.exists(path):\n return path\n os.makedirs(\"/\".join(path.split(\"/\")[:-1]), exist_ok=True)\n print(\"Downloading ccgan-110 model...\")\n url = \"https://link.eu1.storjshare.io/juznbc7nwnpecayfjhu4zmlwhpaa/datasets/ccgan-110.pth?wrap=0\"\n return dload.save(url, path)\n\n\ndef replace_face(image, gan_preds, locations):\n \"\"\"\n Replace the face in the image with the generated predictions.\n\n Args:\n image (numpy.ndarray): Image to be replaced.\n gan_preds (numpy.ndarray): Predictions from the GAN.\n locations (list): Locations of the face in the image.\n\n Returns:\n numpy.ndarray: Image with replaced face.\n \"\"\"\n for (box, pred) in zip(locations, gan_preds):\n (startX, startY, endX, endY) = box\n image[startY:endY, startX:endX] = pred\n return image\n","repo_name":"Arthemide/UnmaskMe","sub_path":"ressources.py","file_name":"ressources.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17278796955","text":"def solve(input):\n ins = parse(input)\n cores = []\n for cuboid in ins:\n # cuboid = (power, [xmin, xmax, ymin, ymax, zmin, zmax])\n to_add = [cuboid] if cuboid[0] else []\n for core in cores:\n inter = inter_cuboid(cuboid, core)\n if inter:\n to_add.append(inter)\n cores.extend(to_add)\n\n return count_cores(cores)\n\n\ndef inter_cuboid(c1, c2):\n c1_ranges = c1[1]\n c2_ranges = c2[1]\n action = not c2[0]\n inter_ranges = [max, min, max, min, max, min]\n c3 = [inter_ranges[i](c1_ranges[i], c2_ranges[i]) for i in range(6)]\n if c3[0] > c3[1] or c3[2] > c3[3] or c3[4] > c3[5]:\n return None\n return (action, c3)\n\n\ndef count_cores(cores):\n count = 0\n for power, core in cores:\n if power:\n count += (\n (core[1] - core[0] + 1)\n * (core[3] - core[2] + 1)\n * (core[5] - core[4] + 1)\n )\n else:\n count -= (\n (core[1] - core[0] + 1)\n * (core[3] - core[2] + 1)\n * (core[5] - core[4] + 1)\n )\n return count\n\n\ndef process_coords(raw):\n coords = []\n raw = raw.split(\",\")\n for plane in raw:\n plane = list(map(int, plane[2:].split(\"..\")))\n coords.extend(plane)\n return coords\n\n\ndef parse(input):\n ins = []\n for line in input:\n line = line.split()\n power = True if line[0] == \"on\" else False\n cuboid = process_coords(line[1])\n ins.append((power, cuboid))\n return ins\n","repo_name":"DavidFM43/AoC2021","sub_path":"Day_22/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4092298524","text":"'''Configuration file for kFlight'''\n\n#URL = 'http://127.0.0.1:8085/telemachus/datalink?'\nURL = 'http://192.168.0.183:8085/telemachus/datalink?'\n#URL = 'http://192.168.1.11:8085/telemachus/datalink?'\n# This is the URL that Telemachus can be found at.\n# Adjust it based on your firewall settings.\n\n#GoFullscreen = False\nGoFullscreen = True\n# The program should use fullscreen mode (True/False)\n\n#If you set fullscreen to false you must manually input the resolution\nScreenDimensions = (1366, 768)\n\nSpaceBetweenGaugesX = 5\n#Number of pizels in between the circular gauges\n\nRefreshRate = 10\n","repo_name":"KK4TEE/kFlightPanel","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"10071289028","text":"def diffBetweenTwoStrings(source, target):\n \"\"\"\n @param source: str\n @param target: str\n @return: str[]\n \"\"\"\n S = len(source)\n T = len(target)\n sol = [[None for _ in range(T+1)] for _ in range(S+1)]\n\n s_sol = []\n for x in range(S+1): # aa \"\" a aa\n sol[x][0] = s_sol[:]\n if x < S:\n s_sol.append(\"-\" + source[x])\n s_sol = []\n for x in range(T+1):\n sol[0][x] = s_sol[:]\n if x < T:\n s_sol.append(\"+\" + target[x])\n print(sol)\n\n # sol_st = dif_rec(source, S, target, T, sol)\n sol_st = tab(source, target, sol)\n print(sol)\n return sol_st\n\n\ndef dif_rec(source, i_s, target, i_t, sol):\n if sol[i_s][i_t] != None:\n return sol[i_s][i_t][:]\n if source[i_s-1] == target[i_t-1]:\n ans = dif_rec(source, i_s-1, target, i_t-1, sol)\n ans.append(source[i_s-1])\n sol[i_s][i_t] = ans\n else:\n # remove\n rem = dif_rec(source, i_s-1, target, i_t, sol)\n rem.append(\"-\" + source[i_s-1])\n print(\"rem is\")\n print(rem)\n # add\n add = dif_rec(source, i_s, target, i_t-1, sol)\n add.append(\"+\" + target[i_t-1])\n print(\"add is\")\n print(add)\n\n if len(add) <= len(rem):\n sol[i_s][i_t] = add\n else:\n sol[i_s][i_t] = rem\n\n return sol[i_s][i_t][:]\n\n\ndef tab(source, target, sol):\n for x in range(1, len(source) + 1):\n for y in range(1, len(target) + 1):\n if source[x-1] == target[y-1]:\n sol[x][y] = sol[x-1][y-1] + list(source[x-1])\n else:\n rem = sol[x-1][y].copy()\n rem.append(\"-\" + source[x-1])\n add = sol[x][y-1].copy()\n add.append(\"+\" + target[y-1])\n if len(add) <= len(rem):\n sol[x][y] = add\n else:\n sol[x][y] = rem\n return sol[-1][-1]\n\n\nprint(diffBetweenTwoStrings(\"ABCDEFG\", \"ABDFFGH\"))\n","repo_name":"xavierpjb/AlgoDataStruct","sub_path":"python/IkAlgs/dp/diffStrings.py","file_name":"diffStrings.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2163756894","text":"# tictactoe\nimport sys\nimport os\n\nimport curses\nfrom curses import wrapper\n\nimport display\n\n\ndef init_table(size):\n \"\"\"\n Returns a (2d) list[[]] with 0 values as table[row][column]\n \"\"\"\n t = []\n for i in range(size['y']):\n temp = []\n for j in range(size['x']):\n temp.append(0) # populating with 0s\n t.append(temp)\n return t\n\n\n# NOTE: game logic\n\n\ndef check_win(table, step_poz, player):\n \"\"\"\n Checks and returns whether a player won\n \"\"\"\n s = 3 # min(size['y'], size['x']) # biggest inner square edge length\n # horizontally ---\n nr = 0\n for cell in table[step_poz['y']]:\n if cell == player:\n nr += 1\n if nr == s:\n return True\n else:\n nr = 0\n # vertically |\n nc = 0\n for row in table:\n if row[step_poz['x']] == player:\n nc += 1\n if nc == s:\n return True\n else:\n nc = 0\n # diagonally\n ndl = 0 # diagonal left /\n ndr = 0 # diagonal right \\\n for i in range(s):\n if table[i][s - i - 1] == player:\n ndl += 1\n if ndl == s:\n return True\n else:\n ndl = 0\n if table[i][i] == player:\n ndr += 1\n if ndr == s:\n return True\n else:\n ndr = 0\n return False\n\n\ndef check_game_over(table):\n \"\"\" Checks and returns whether there's any empty cell \"\"\"\n game_over = True\n for row in table:\n for cell in row:\n if cell == 0:\n game_over = False\n return game_over\n\n\ndef check_step(table, step_poz):\n \"\"\"\n Checks whether step_poz is an empty cell (no player have stepped there before)\n \"\"\"\n if table[step_poz['y']][step_poz['x']] == 0:\n return True\n else:\n return False\n\n\ndef step(table, step_poz, player):\n \"\"\"\n Steps if it's possible, and returns it's possibility\n \"\"\"\n valid_step = check_step(table, step_poz)\n if valid_step:\n table[step_poz['y']][step_poz['x']] = player\n return valid_step\n\n\ndef color_win(stdscr, size, start_poz, table, locs):\n # prints table content\n for i in range(size['y']):\n for j in range(size['x']): # generate all rows\n if table[i][j] == 1: # player 1\n stdscr.addstr(locs[i][j]['y'], locs[i][j]['x'], \"X\")\n elif table[i][j] == 2: # player 2\n stdscr.addstr(locs[i][j]['y'], locs[i][j]['x'], \"O\")\n\n\nsize = {'y': 3, 'x': 3}\n\n\ndef main(stdscr):\n # init vars\n run = 1\n start_poz = {'y': 2, 'x': 4}\n current_poz = {'y': 0, 'x': 0}\n table = init_table(size) # init an empty table[row][column]\n locations = display.generate_cell_locations(size, start_poz) # calc possible step locations[row][col]['y' or 'x']\n cursor_poz = {'y': locations[current_poz['y']][current_poz['x']]['y'],\n 'x': locations[current_poz['y']][current_poz['x']]['x']}\n player = 1\n\n display.print_table(stdscr, size, start_poz, table, locations)\n # XXX\n # stdscr.addstr(15, 0, str(curses.has_colors()))\n # stdscr.addstr(10, 0, str(table))\n # stdscr.move(cursor_poz['y'], cursor_poz['x'])\n\n stepped = False\n while run == 1:\n stdscr.addstr(0, 0, \"player \" + str(player))\n stdscr.move(cursor_poz['y'], cursor_poz['x'])\n k = stdscr.getkey()\n if k == \" \":\n stepped = step(table, current_poz, player)\n if check_game_over(table):\n run = 0\n elif check_win(table, current_poz, player):\n run = 2\n elif stepped:\n if player == 1:\n player = 2\n else:\n player = 1\n elif k == \"KEY_DOWN\":\n if current_poz['y'] < size['y'] - 1:\n current_poz['y'] += 1\n elif k == \"KEY_UP\":\n if current_poz['y'] > 0:\n current_poz['y'] -= 1\n elif k == \"KEY_LEFT\":\n if current_poz['x'] > 0:\n current_poz['x'] -= 1\n elif k == \"KEY_RIGHT\":\n if current_poz['x'] < size['x'] - 1:\n current_poz['x'] += 1\n else:\n continue\n display.print_table(stdscr, size, start_poz, table, locations)\n\n cursor_poz = {'y': locations[current_poz['y']][current_poz['x']]['y'],\n 'x': locations[current_poz['y']][current_poz['x']]['x']}\n\n # XXX\n # stdscr.move(0, 0)\n # stdscr.addstr(str(current_poz))\n # stdscr.move(0, 20)\n # stdscr.addstr(str(cursor_poz))\n # stdscr.addstr(15, 0, str(table))\n # --XXX\n stdscr.refresh()\n curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.curs_set(False)\n if run == 0:\n\n stdscr.addstr(1, 0, \"You lose!\", curses.color_pair(1))\n elif run == 2:\n stdscr.addstr(1, 0, \"Player \" + str(player) + \" won!\", curses.color_pair(2))\n stdscr.addstr(10, 0, \"Press any key to exit!\")\n stdscr.refresh()\n stdscr.getkey()\n\n\ndef intro():\n # 80x24 |\n print(\" \")\n print(\" \")\n print(\" WELCOME TO TICTACTOE! \")\n print(\" \")\n print(\" by László Székely-Tóth \")\n print(\" \")\n input_string = str(input(\"Enter the table size as ROWxCOLUMN (default: 3x3, max 5x5): \"))\n if input_string:\n col_size, row_size = int(input_string.split('x', 1)[0]), int(input_string.split('x', 1)[1])\n size['y'] = row_size\n size['x'] = col_size\n\nintro()\nwrapper(main)\n","repo_name":"racer01/codecool_tictactoe","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11537024226","text":"\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name='balance'\n\nurlpatterns = [\n url(r'^balance/badges/$', views.BadgesListView.as_view(), name='badge_list'),\n url(r'^balance/badges/create$', views.NewSocialBadge.as_view(), name='create_badge'),\n url(r'^balance/badges/(?P\\d+)/$', views.SocialBadgeDetailView.as_view(), name='badge_detail'),\n url(r'^balance/badges/(?P\\d+)/edit/$', views.SocialBadgeEditView.as_view(), name='badge_edit'),\n url(r'^balance/render/(?P\\d+)/$', views.SocialBadgeRender.as_view(), name='badge_render'),\n\n url(r'^balance/$', views.SocialBalanceYear.as_view(), name='balance'),\n url(r'^balance/(?P\\d+)/$', views.SocialBalanceYear.as_view(), name='balance_year'),\n url(r'^accounts/providers/(?P\\d+)/balance/(?P\\d+)/$', views.SocialBalanceEditView.as_view(), name='entity_year'),\n url(r'^accounts/providers/(?P\\d+)/balance/(?P\\d+)/renderbadge/$', views.generate_badge, name='generate_badge'),\n\n url(r'^balance/import/$', views.ImportSocialBalanceFormView.as_view(), name='bulk_import'),\n\n url(r'^balance/processes/$', views.BalanceProcessList.as_view(), name='process_list'),\n url(r'^balance/processes/generate/$', views.BalanceProcessGenerate.as_view(), name='process_generate'),\n url(r'^balance/processes/year/(?P\\d+)/$', views.BalanceProcessList.as_view(), name='process_list_year'),\n url(r'^balance/processes/(?P\\d+)/$', views.BalanceProcessDetail.as_view(), name='process_detail'),\n url(r'^balance/processes/cancel/$', views.cancel, name='cancel_process'),\n]\n\n\n","repo_name":"Mercado-Social-de-Madrid/gestionMES","sub_path":"social_balance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"3893211757","text":"from audio import speach\r\nfrom random import randint, choice\r\nimport time\r\n\r\nlevels = {\r\n \"easy\": [\"dairy\", \"mouse\", \"computer\"],\r\n \"medium\": [\"programming\", \"algorithm\", \"developer\"],\r\n \"hard\": [\"neural network\", \"machine learning\", \"artificial intelligence\"]\r\n}\r\n\r\ndef play_game(level):\r\n words = levels.get(level, [])\r\n\r\n score = 0\r\n num_attempts = 3\r\n\r\n for _ in range(len(words)):\r\n random_word = choice(words)\r\n print(f\"Произнесите слово {random_word}\")\r\n recog_word = speach().lower()\r\n print(recog_word)\r\n\r\n if recog_word == random_word:\r\n print('ДААА')\r\n score += 1\r\n else:\r\n print(f\"Нет! Правильное слово: {random_word}\")\r\n time.sleep(2)\r\n print(f'Игра завершена! Ваш счёт: {score}/{len(words)}')\r\n\r\nsel_level = input(\"easy/medium/hard \")\r\nplay_game(sel_level)","repo_name":"mkevorkov/AIbot","sub_path":"bonus_game.py","file_name":"bonus_game.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32784492261","text":"# -*- encoding: utf-8 -*-\nimport base64\nimport cStringIO\nimport qrcode\n\nclass ImpuestosInternosHelper():\n\n def obtenerbase64(self, numero):\n \"\"\"Toma como parametro un numero y retorna una palabra en BASE64\"\"\"\n diccionario = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\",\n \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"a\", \"b\",\n \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\",\n \"v\", \"w\", \"x\", \"y\", \"z\", \"+\", \"/\"]\n cociente = 1.0\n resto = 0\n palabra = \"\"\n while cociente > 0:\n cociente = numero / 64\n resto = numero % 64\n palabra = diccionario[resto] + palabra\n numero = cociente\n return palabra\n\n def inviertecadena(self, cadena):\n \"\"\"Invierte una cadena :v\"\"\"\n return cadena[::-1]\n\n def obtenerverhoeff(self, cifra):\n \"\"\"Genera por medio una cifra una cifra verhoeff\"\"\"\n mul = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 2, 3, 4, 0, 6, 7, 8, 9, 5],\n [2, 3, 4, 0, 1, 7, 8, 9, 5, 6],\n [3, 4, 0, 1, 2, 8, 9, 5, 6, 7],\n [4, 0, 1, 2, 3, 9, 5, 6, 7, 8],\n [5, 9, 8, 7, 6, 0, 4, 3, 2, 1],\n [6, 5, 9, 8, 7, 1, 0, 4, 3, 2],\n [7, 6, 5, 9, 8, 2, 1, 0, 4, 3],\n [8, 7, 6, 5, 9, 3, 2, 1, 0, 4],\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]]\n per = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [1, 5, 7, 6, 2, 8, 3, 0, 9, 4],\n [5, 8, 0, 3, 7, 9, 6, 1, 4, 2],\n [8, 9, 1, 6, 0, 4, 3, 5, 2, 7],\n [9, 4, 5, 3, 1, 2, 6, 8, 7, 0],\n [4, 2, 8, 6, 5, 7, 3, 9, 0, 1],\n [2, 7, 9, 3, 8, 0, 6, 4, 1, 5],\n [7, 0, 4, 6, 9, 1, 3, 2, 5, 8]]\n inv = [0, 4, 3, 2, 1, 5, 6, 7, 8, 9]\n check = 0\n numeroinvertido = self.inviertecadena(str(cifra))\n\n for i in range(0, len(numeroinvertido)):\n pf = ((i + 1) % 8)\n pc = int(numeroinvertido[i])\n mf = check\n mc = per[pf][pc]\n check = mul[mf][mc]\n return str(inv[check])\n\n def allegedrc4(self, mensaje, key):\n \"\"\"Genera el cifrado Alleged Rc4 por medio de un mensaje y una llave\"\"\"\n state = []\n x = 0\n y = 0\n index1 = 0\n index2 = 0\n mensaje_cifrado = \"\"\n\n for position in range(0,256):\n state.append(position)\n\n for position in range(0, 256):\n index2 = (ord(key[index1]) + state[position] + index2) % 256\n state[position], state[index2] = state[index2],state[position]\n index1 = (index1 + 1) % len(key)\n\n for position in range(0,len(mensaje)):\n x = (x+1) % 256\n y = (state[x] + y) % 256\n state[x], state[y] = state[y], state[x]\n nuevo_mensaje = (ord(mensaje[position]) ^ (state[(state[x] + state[y]) % 256]))\n mensaje_cifrado = mensaje_cifrado + \"-\" + self.rellenaCero(hex(nuevo_mensaje))\n\n return mensaje_cifrado[1:len(mensaje_cifrado)]\n\n def rellenaCero(self, mensaje):\n \"\"\"Rellena una cadena de cero en caso de que solo tenga longitud 1\"\"\"\n mensaje = str(mensaje).split('x')[1].upper()\n if len(mensaje) == 1:\n return \"0\" + mensaje\n return mensaje\n\n def generar_verhoeff_n_veces(self, cifra, n=2):\n \"\"\"Llama a verhoeff una n cantidad de veces, por defecto son 2 iteraciones\"\"\"\n for i in range(n):\n cifra = cifra + '' + self.obtenerverhoeff(cifra)\n return str(cifra)\n\n def generar_codigo_control(self, numero_autorizacion, numero_de_factura, nit_ci, fecha_transaccion, monto_total, llave_dosificacion):\n \"\"\"Genera un codigo de control valido para Impuestos internos\"\"\"\n verhoeff_numero_de_factura = self.generar_verhoeff_n_veces(numero_de_factura)\n verhoeff_nit_ci = self.generar_verhoeff_n_veces(nit_ci)\n verhoeff_fecha_trasaccion = self.generar_verhoeff_n_veces(fecha_transaccion.replace('/',''))\n verhoeff_monto_total = self.generar_verhoeff_n_veces(str(int(round(float(monto_total)))))\n\n suma_verhoeff = int(verhoeff_numero_de_factura) + int(verhoeff_nit_ci) + int(verhoeff_fecha_trasaccion) + int(verhoeff_monto_total)\n digito_verhoeff = str(self.generar_verhoeff_n_veces(str(suma_verhoeff), 5))[-5:]\n cifra_verhoeff = self.tratar_verhoeff(digito_verhoeff)\n cadena_array_verhoeff = self.tratar_dosificacion(llave_dosificacion, cifra_verhoeff)\n cadena_concatenada = self.generar_verhoeff_concatenado(\n [numero_autorizacion, verhoeff_numero_de_factura, verhoeff_nit_ci,verhoeff_fecha_trasaccion, verhoeff_monto_total],\n cadena_array_verhoeff)\n dosificacion_verhoeff = llave_dosificacion + digito_verhoeff\n cifra_alleged4rc = self.allegedrc4(cadena_concatenada, dosificacion_verhoeff).replace('-', '')\n # ARRAYS EMPIEZAN EN CERO!!!\n suma_total_alleged = self.suma_ascii(cifra_alleged4rc)\n suma_1_alleged = self.suma_ascii(cifra_alleged4rc, 0, 5)\n suma_2_alleged = self.suma_ascii(cifra_alleged4rc, 1, 5)\n suma_3_alleged = self.suma_ascii(cifra_alleged4rc, 2, 5)\n suma_4_alleged = self.suma_ascii(cifra_alleged4rc, 3, 5)\n suma_5_alleged = self.suma_ascii(cifra_alleged4rc, 4, 5)\n\n alleged_tratado = self.tratar_alleged(suma_total_alleged,\n [suma_1_alleged, suma_2_alleged, suma_3_alleged, suma_4_alleged, suma_5_alleged],\n cifra_verhoeff)\n mensaje_alleged = self.obtenerbase64(alleged_tratado)\n codigo_control = self.allegedrc4(mensaje_alleged, dosificacion_verhoeff)\n return codigo_control\n\n def tratar_alleged(self,total_alleged, alleged_data, verhoeff_data):\n \"\"\"Retorna una suma total del cifrado alleged y verhoeff\"\"\"\n suma = 0\n for position in range(len(alleged_data)):\n resultado = round(total_alleged * alleged_data[position] / verhoeff_data[position])\n suma += resultado\n return int(suma)\n\n def suma_ascii(self, mensaje, inicio=0, paso=1):\n \"\"\"Suma valores ASCII y retorna una suma total\"\"\"\n suma = 0\n while inicio < len(mensaje):\n suma += ord(mensaje[inicio])\n inicio += paso\n return suma\n\n def suma_verhoeff(self, array_data):\n \"\"\"Suma un array verhoeff y retorna la suma\"\"\"\n suma = 0\n for data in array_data:\n suma = suma + int(data)\n return suma\n\n def tratar_verhoeff(self, verhoeff):\n \"\"\"Retorna un array por medio del valor verhoeff aumentando 1 a cada valor\"\"\"\n cifra = []\n for actual in verhoeff:\n cifra.append(int(actual)+1)\n return cifra\n\n def tratar_dosificacion(self, llave_dosificacion, cifra_verhoeff):\n \"\"\"Retorna un array por medio de la llave de dosificacion y el valor verhoeff \"\"\"\n cifra = []\n inicio = 0\n for position in cifra_verhoeff:\n final = inicio + position\n cifra.append(llave_dosificacion[inicio:final])\n inicio = final\n return cifra\n\n def generar_verhoeff_concatenado(self, array_data, cadena_array_verhoeff):\n \"\"\"Por medio de una cadena de datos de array y una cadena verhoeff, retorna una cadena\"\"\"\n cadena = \"\"\n for actual in range(len(array_data)):\n cadena = cadena + array_data[actual] + cadena_array_verhoeff[actual]\n return cadena\n\n def generar_qr(self,\n nit_emisor,\n numero_factura,\n numero_autorizacion,\n fecha_emision,\n total,\n importe_base,\n codigo_control,\n nit_comprador,\n importe_ice_iehd_tasas=0,\n importe_ventas_gravada=0,\n importe_no_credito_fiscal=0,\n descuentos=0):\n \"\"\"Retorna una imagen en BASE64 JPEG\"\"\"\n cadena_qr = \"%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\" % \\\n (nit_emisor,\n numero_factura,\n numero_autorizacion,\n fecha_emision,\n total,importe_base,\n codigo_control,\n nit_comprador,\n importe_ice_iehd_tasas,\n importe_ventas_gravada,\n importe_no_credito_fiscal,\n descuentos)\n\n qr_base = qrcode.make(cadena_qr)\n buffer_string = cStringIO.StringIO()\n qr_base.save(buffer_string, format=\"JPEG\")\n qr64 = base64.b64encode(buffer_string.getvalue())\n return qr64","repo_name":"drkpkg/codigo_control","sub_path":"data/lib/impuestos_internos.py","file_name":"impuestos_internos.py","file_ext":"py","file_size_in_byte":8942,"program_lang":"python","lang":"es","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"16924287165","text":"import os\n\nfrom ..UiTrader import UiTrader\nfrom .YhUiTrader import YHClientTrader\nfrom .log import log\n\nfrom DyCommon.DyCommon import *\nfrom ...DyStockTradeCommon import *\n\n\nclass YhTrader(UiTrader):\n \"\"\" 银河证券窗口交易类 \"\"\"\n brokerName = '银河证券'\n broker = 'yh'\n\n # consts set by config menu\n exePath = None\n account = None\n password = None\n\n heartBeatTimer = 0 # no heart beat\n\n curEntrustHeaderNoIndex = 11\n curEntrustHeaderStateIndex = 4\n\n\n def __init__(self, eventEngine, info):\n super().__init__(eventEngine, info)\n\n log.dyInfo = info\n\n self._uiClient = YHClientTrader(DyCommon.createPath('Stock/Program/Temp'))\n\n def _login(self):\n user = self.account\n password = self.password\n exePath = self.exePath\n\n try:\n self._uiClient.login(user, password, exePath)\n except Exception as ex:\n self._info.print('登录[银河证券]异常:{}'.format(ex), DyLogData.error)\n return False\n\n return True\n\n @UiTrader.retryWrapper\n def _logout(self, oneKeyHangUp=False):\n return self._uiClient.exit()\n\n def getBalance(self, parse=True, fromBroker=True):\n \"\"\"\n 获取账户资金状况\n @return: header, [[item]]\n \"\"\"\n if self._balanceHeader is not None and not fromBroker:\n return self._balanceHeader, self._balance\n\n df = self._uiClient.balance\n\n header, rows = list(df), df.values.tolist()\n\n self._balanceHeader = header\n self._balance = rows\n \n return header, rows\n\n def getPositions(self, fromBroker=True):\n \"\"\"\n 获取账户持仓\n @return: header, [[item]], autoForegroundColName\n \"\"\"\n if self._positionHeader is not None and not fromBroker:\n return self._positionHeader, self._positions, '参考盈亏'\n\n df = self._uiClient.position\n\n header, rows = list(df), df.values.tolist()\n\n self._positionHeader = header\n self._positions = rows\n\n return header, rows, '参考盈亏'\n\n def getCurEntrusts(self):\n \"\"\"\n 获取当日委托\n @return: header, [[item]]\n \"\"\"\n df = self._uiClient.today_entrusts\n\n return list(df), df.values.tolist()\n\n def getCurDeals(self):\n \"\"\"\n 获取当日成交\n @return: header, [[item]]\n \"\"\"\n df = self._uiClient.today_trades\n\n return list(df), df.values.tolist()\n\n def onTicks(self, ticks):\n \"\"\"\n For UI\n update stock price related data, e.g. stock market value, stock price, PnL\n \"\"\"\n if self._balance is None or self._positions is None:\n return\n\n # positions\n marketValue = 0 # 账户市值\n for pos in self._positions:\n code = DyStockCommon.getDyStockCode(pos[0])\n tick = ticks.get(code)\n if tick is None:\n self._info.print('银河证券: 无法获取{0}({1})的Tick数据'.format(code, pos[1]), DyLogData.warning)\n marketValue += float(pos[9])\n continue\n\n pos[9] = tick.price*float(pos[2]) # 市值\n pos[11] = tick.price # 最新价格\n pos[7] = (tick.price - float(pos[-4]))*float(pos[2]) # 浮动盈亏\n\n marketValue += pos[9]\n\n # balance\n if self._positions:\n balance = self._balance[0]\n\n marketValueDelta = marketValue - float(balance[-2])\n balance[-2] = marketValue\n balance[-1] = float(balance[-1]) + marketValueDelta\n\n def refresh(self):\n self._uiClient.refresh()\n\n @UiTrader.retryWrapper\n def buy(self, code, name, price, volume):\n try:\n ret = self._uiClient.buy(code[:6], price, volume)\n if ret is None:\n return False\n except:\n return False\n\n return True\n\n @UiTrader.retryWrapper\n def sell(self, code, name, price, volume):\n try:\n ret = self._uiClient.sell(code[:6], price, volume)\n if ret is None:\n return False\n except:\n return False\n\n return True\n\n @UiTrader.retryWrapper\n def cancel(self, entrust):\n ret = self._uiClient.cancel_entrust(entrust.brokerEntrustId)\n\n message = ret['message']\n if '错误' in message or 'unkown' in message:\n log.warning('银河证券客户端:撤单[{}({}), 委托号{}]错误:{}'.format(entrust.name, entrust.code, entrust.brokerEntrustId, message))\n return False\n\n return True","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/Trade/Broker/YhNew/YhTrader.py","file_name":"YhTrader.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"} +{"seq_id":"73731556353","text":"from flask import Blueprint, render_template, redirect, request, flash, url_for\nfrom .form import OrderForm, ContactForm\nfrom models import Order\nfrom flask_mail import Message, Mail\nfrom app import db\n\nmain = Blueprint('main', __name__)\n\n@main.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@main.route('/user_change')\ndef user_change():\n return render_template(\"user_change.html\")\n\n@main.route('/cancelorder')\ndef cancelorder():\n return render_template(\"cancelorder.html\")\n\n@main.route('/contactform', methods=['POST', 'GET'])\ndef contactform():\n form = ContactForm(request.form)\n\n if request.method == 'POST':\n if form.validate() == False:\n flash(f\"All fields are required.\")\n return render_template('contactform.html', form=form)\n else:\n msg = Message(subject=form.subject.data, sender=form.email.data, recipients=[\"akorir233@gmail.com\"])\n msg.body = \"Thanks your message has been recieved. We will get back to you shortly\"\n # (form.name.data, form.email.data, form.message.data)\n # mail.send(msg)\n \n\n return redirect(url_for(\"main.index\"))\n elif request.method == 'GET':\n return render_template(\"contactform.html\", form=form)\n\n@main.route('/order', methods=['POST', 'GET'])\ndef order():\n form = OrderForm(request.form)\n if request.method == 'POST':\n parcel_name = request.form.get('parcel_name')\n parcel_number = request.form.get('parcel_number')\n order = Order(parcel_name=form.parcel_name.data, parcel_number=form.parcel_number.data)\n db.session.add(order)\n db.session.commit()\n flash(f\"Parcel ordered succesfully\")\n return redirect(url_for('main.order'))\n return render_template('order.html', form=form)\n\n\n@main.route('/orders', methods=['GET'])\ndef orders():\n if request.method == 'GET':\n order = Order.query.all()\n return render_template('order_items.html', order=order)\n\n@main.route('/items')\ndef status():\n return render_template(\"status.html\")\n","repo_name":"alexarirok/Flask-Project","sub_path":"resources/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"422146049","text":"# Created by william from lexueoude.com. 更多正版技术视频讲解,公众号1.乐学偶得(lexueoude)2.乐学FinTech (LoveShareFinTech)\n\nfrom random import seed\nfrom random import randrange\nfrom csv import reader\nfrom math import sqrt\n\n\n# 1.读取数据 csv\n\ndef read_our_csv_file(filename):\n dataset = list()\n with open(filename, 'r') as file:\n csv_reader = reader(file)\n for row in csv_reader:\n if not row:\n continue\n dataset.append(row)\n return dataset\n\n\n# 2.数据类型转换(str to float)\n\n\ndef change_string_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column].strip())\n\n\n# 3.数据类型转换(str to int)\n\ndef change_string_to_int(dataset, column):\n class_value = [row[column] for row in dataset]\n find_the_unique_class = set(class_value)\n lookup = dict()\n for i, value in enumerate(find_the_unique_class):\n lookup[value] = i\n for row in dataset:\n row[column] = lookup[row[column]]\n return lookup\n\n\n# 4.正则化\ndef find_the_min_and_max_of_our_data(dataset):\n min_and_max_list = list()\n for i in range(len(dataset[0])):\n column_value = [row[i] for row in dataset]\n the_min_value = min(column_value)\n the_max_value = max(column_value)\n min_and_max_list.append([the_min_value, the_max_value])\n return min_and_max_list\n\n\ndef normalize_our_data(dataset, min_and_max_list):\n for row in dataset:\n for i in range(len(row)):\n row[i] = (row[i] - min_and_max_list[i][0]) / (min_and_max_list[i][1] - min_and_max_list[i][0])\n\n\n# 5. k fold切分数据,\n# 注意:不改变原数据\ndef k_fold_cross_validation(dataset, n_folds):\n dataset_split = list()\n dataset_copy = list(dataset)\n every_fold_size = int(len(dataset) / n_folds)\n for i in range(n_folds):\n fold = list()\n while len(fold) < every_fold_size:\n index = randrange(len(dataset_copy))\n fold.append(dataset_copy.pop(index))\n dataset_split.append(fold)\n return dataset_split\n\n\n# 6.判断准确性(accuracy)\ndef calculate_our_model_accuracy(actual, predicted):\n correct_counter = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct_counter += 1\n return correct_counter / float(len(actual)) * 100.0\n\n\n# 7.给我们的算法进行评估(打分)\n\ndef how_good_is_our_algo(dataset, algo, n_folds, *args):\n folds = k_fold_cross_validation(dataset, n_folds)\n scores = list()\n for fold in folds:\n train_dataset = list(folds)\n train_dataset.remove(fold)\n train_dataset = sum(train_dataset, [])\n test_dataset = list()\n for row in fold:\n row_copy = list(row)\n test_dataset.append(row_copy)\n row_copy[-1] = None\n predicted = algo(train_dataset, test_dataset, *args)\n actual = [row[-1] for row in fold]\n accuracy = calculate_our_model_accuracy(actual, predicted)\n scores.append(accuracy)\n return scores\n\n\n# 8.计算欧几里德举例\n\ndef calculate_euclidiean_distance(row1, row2):\n distance = 0.0\n for i in range(len(row1) - 1):\n distance += (row1[i] - row2[i]) ** 2\n return sqrt(distance)\n\n\n# 9.找到最近的k个点\ndef get_our_neighbors(train_dataset, test_row, num_of_neighbors):\n distances = list()\n for train_dataset_row in train_dataset:\n dist = calculate_euclidiean_distance(test_row, train_dataset_row)\n distances.append((train_dataset_row, dist))\n distances.sort(key=lambda every_tuple: every_tuple[1])\n neighbors = list()\n for i in range(num_of_neighbors):\n neighbors.append(distances[i][0])\n return neighbors\n\n\n# 10.做预测\ndef make_prediction(train_dataset, test_row, num_of_neighbors):\n neighbors = get_our_neighbors(train_dataset, test_row, num_of_neighbors)\n output = [row[-1] for row in neighbors]\n our_prediction = max(set(output), key=output.count)\n return our_prediction\n\n\n# 11.运用KNN算法\ndef get_our_prediction_using_knn_algo(train_dataset, test_dataset, num_of_neighbors):\n predictions = list()\n for test_row in test_dataset:\n our_prediction = make_prediction(train_dataset, test_row, num_of_neighbors)\n predictions.append(our_prediction)\n return predictions\n\n\nseed(1)\ndataset = read_our_csv_file('abalone.csv')\nfor i in range(1, len(dataset[0])):\n change_string_to_float(dataset, i)\n\nchange_string_to_int(dataset, 0)\n\n# print(dataset)\n\nn_folds = 10\n\nnum_neighbors = 7\nscores = how_good_is_our_algo(dataset, get_our_prediction_using_knn_algo, n_folds, num_neighbors)\n\nprint('Our model\\'s scores are : %s' % scores)\nprint('The mean accuracy is :%.3f%%' % (sum(scores) / float(len(scores))))\n","repo_name":"williamjiamin/Pure_Python_for_DS_ML","sub_path":"X.Kaggle数据用PurePython实战/case05_abalone_age_using_KNN.py","file_name":"case05_abalone_age_using_KNN.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"5828244423","text":"#!/usr/bin/env python\nimport sys\n# ==================================================\n# EDIT THE FOLLOWING PATH TO POINT TO YOUR DIRECTORY\n# ==================================================\nsys.path.append('/home/ashwith/Development/pyMOSChar')\n# ==================================================\n\nimport charMOS\nimport numpy as np\n\n# Specify the name of the MOSFET model. Simple way to do so\n# is to create a schematic in Virtuoso that contains both\n# nmos and pmos transistors. Then generate the netlist in\n# ADE. You'll then be able to view the netlist and see what\n# the name of the model is.\nnmos = \"CMOSN\"\npmos = \"CMOSP\"\n\n# Specify the MOSFET width in microns.\nwidth = 1\n\n\n# Specify the MOSFET lengths you're interested\n# in. The following code creates an array of\n# values from 0.1 to 5.1 in steps of 0.1. Note\n# that the arange() function omits the last value\n# so if you call np.arange(0.1, 5.1, 0.1), the\n# last value in the array will be 0.5.\n# MOS lengths are in microns. Don't keep the\n# step size too small. Fine steps will use a \n# LOT of RAM can cause the machine to hang!\n# start, stop, step\nmosLengths = np.arange(0.1, 5.1, 0.1)\n\n## Example 2 for lenghs\n#mosLengths = np.concatenate(\n#np.arange(0.1, 1, 0.1),\n#np.arange(1, 10, 0.5),\n#np.arange(10, 100, 10))\n\n# Initialize the characterization process. Modify\n# the values below as per your requirements. Ensure\n# that the step values aren't too small. Otherwise\n# your RAM will get used up.\ncharMOS.init(\nsimulator='ngspice',\nmosLengths=mosLengths,\nmodelFiles=(\"/home/ashwith/Development/pyMOSChar/pdk.mod\",),\nmodelN=nmos,\nmodelP=pmos,\nsimOptions=\"\",\ncorners=(\"\",),\nsubcktPath=\"\",\ndatFileName=\"mosPDK_90_W{0}u.dat\".format(width),\nvgsMax=1,\nvgsStep=20e-3,\nvdsMax=1,\nvdsStep=20e-3,\nvsbMax=1,\nvsbStep=20e-3,\nnumfing=1,\ntemp=300,\nwidth=width)\n\n# This function call finally generates the required database.\ncharMOS.genDB()\n\n","repo_name":"ashwith/pyMOSChar","sub_path":"charMOSExample.py","file_name":"charMOSExample.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"61"} +{"seq_id":"37074746297","text":"\"\"\"ApiGate URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf.urls import url\nfrom apigate.main import http as api_view\n\nurlpatterns = [\n url(r'DatabaseManagement', api_view.DatabaseManagement, name=\"api_view_DatabaseManagement\"),\n url(r'DatabaseList', api_view.DatabaseList, name=\"api_view_DatabaseList\"),\n url(r'ChangeStatus', api_view.ChangeStatus, name=\"api_view_ChangeStatus\"),\n url(r'TestConnection', api_view.TestConnection, name=\"api_view_TestConnection\"),\n url(r'GetTablesByDB', api_view.GetTablesByDB, name=\"api_view_GetTablesByDB\"),\n url(r'ApiList', api_view.ApiList, name=\"api_view_ApiList\"),\n url(r'ApiConfig', api_view.ApiConfig, name=\"api_view_ApiConfig\"),\n url(r'DeleteConfig', api_view.delete, name=\"api_view_delete\"),\n url(r'', api_view.base, name=\"api_view_base\"),\n]\n","repo_name":"longmiaohao/NoCodeApi","sub_path":"apigate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"73881873795","text":"from datetime import datetime, timedelta\nfrom glob import glob\nfrom os import chdir, remove\nfrom os.path import join, getsize, exists\nfrom shutil import disk_usage\nfrom time import sleep\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nimport ffmpeg\nfrom humanize import naturalsize\nimport pandas as pd\nfrom pytz import UTC\n\nfrom camera.models import Camera\nfrom storage.models import Video\n\n\ndef check_storage(path, min_percent, min_bytes):\n print(\n f\"{datetime.now()}: Checking available storage space on {path} ...\",\n end=\"\",\n flush=True,\n )\n total, used, free = disk_usage(path)\n min_percent_bytes = total * min_percent / 100\n if free < min_bytes:\n print(f\"not good! {naturalsize(free)} < {naturalsize(min_bytes)}\")\n return min_bytes - free\n if free < min_percent_bytes:\n print(\n f\"not good! {naturalsize(free)} < {naturalsize(min_percent_bytes)} ({min_percent}% of {naturalsize(total)})\"\n )\n return min_percent_bytes - free\n print(\"good.\")\n return 0\n\n\ndef delete_old_videos(bytes_to_free):\n print(\n f\"{datetime.now()}: Deleting old videos to free up {naturalsize(bytes_to_free)} of space ...\",\n end=\"\",\n flush=True,\n )\n\n video_fields = [\"id\", \"camera_id\", \"start_date\", \"end_date\", \"file\"]\n video_qs = Video.objects.order_by(\"camera_id\", \"start_date\")\n video_values = video_qs.values_list(*video_fields)\n\n video_df = pd.DataFrame.from_records(video_values, columns=video_fields)\n video_df = video_df.set_index(\"id\")\n\n chdir(settings.STORAGE_DIR)\n video_df[\"size\"] = video_df[\"file\"].apply(\n lambda vf: getsize(vf) if exists(vf) else 0\n )\n video_df = video_df[video_df[\"size\"] != 0]\n\n # TODO: Update to pull actual priorities once added to model\n camera_fields = [\"camera_id\", \"priority\"]\n camera_qs = Camera.objects.all()\n camera_values = [(i, 1) for i, enabled in camera_qs.values_list(\"id\", \"enabled\")]\n\n camera_df = pd.DataFrame.from_records(camera_values, columns=camera_fields)\n camera_df = camera_df.set_index(\"camera_id\")\n\n camera_df[\"videocount\"] = video_df.groupby(\"camera_id\").size()\n camera_df = camera_df.fillna({\"videocount\": 0}).astype(\"int\")\n camera_df[\"vc_norm\"] = camera_df[\"videocount\"] / camera_df[\"priority\"]\n\n videos_to_delete = set()\n files_to_delete = []\n bytes_freed = 0\n while bytes_freed < bytes_to_free:\n camera_id = camera_df[\"vc_norm\"].idxmax()\n video_row = video_df[video_df[\"camera_id\"] == camera_id].iloc[0]\n video_id = video_row.name\n\n videos_to_delete.add(video_id)\n files_to_delete.append(video_row[\"file\"])\n bytes_freed += video_row[\"size\"]\n\n video_df.drop(video_id, inplace=True)\n camera_df.at[camera_id, \"vc_norm\"] -= 1 / camera_df.at[camera_id, \"priority\"]\n\n chdir(settings.STORAGE_DIR)\n for f in files_to_delete:\n remove(f)\n Video.objects.filter(id__in=videos_to_delete).delete()\n\n print(f\"deleted {len(videos_to_delete)} videos ({naturalsize(bytes_freed)})\")\n\n\ndef add_missing_db_records():\n print(\n f\"{datetime.now()}: Checking for missing video database entries ...\",\n end=\"\",\n flush=True,\n )\n\n records_to_create = []\n skipped = []\n\n chdir(settings.STORAGE_DIR)\n for video_path in glob(\"record/**/*.mp4\", recursive=True):\n if not Video.objects.filter(file=video_path).exists():\n path_parts = video_path.split(\"/\")\n camera_id = path_parts[-2]\n try:\n start_date = datetime.strptime(\n \"_\".join(path_parts[-1].split(\".\")[0].split(\"_\")[-2:]),\n \"%Y%m%d_%H%M%S\",\n ).replace(tzinfo=UTC)\n except ValueError:\n print(\n f\"{datetime.now()}: WARNING: unrecognized video file {video_path}\"\n )\n continue\n\n if datetime.utcnow().replace(tzinfo=UTC) - start_date < timedelta(\n minutes=15\n ):\n continue\n\n try:\n duration = float(ffmpeg.probe(video_path)[\"format\"][\"duration\"])\n except ffmpeg.Error:\n skipped.append(video_path)\n continue\n end_date = start_date + timedelta(seconds=duration)\n\n records_to_create.append(\n Video(\n camera_id=camera_id,\n start_date=start_date,\n end_date=end_date,\n file=video_path,\n )\n )\n\n Video.objects.bulk_create(records_to_create)\n\n if len(records_to_create):\n print(f\"created {len(records_to_create)} entries.\")\n else:\n print(\"none.\")\n\n if len(skipped):\n print(f\"WARN: Skipped {len(skipped)} broken videos:\")\n for video_path in skipped:\n print(f\"- {video_path}\")\n\n\ndef delete_stale_db_records():\n print(\n f\"{datetime.now()}: Checking for stale video database entries ...\",\n end=\"\",\n flush=True,\n )\n records_to_delete = {\n v.id\n for v in Video.objects.all()\n if not exists(join(settings.STORAGE_DIR, v.file))\n }\n Video.objects.filter(id__in=records_to_delete).delete()\n if len(records_to_delete):\n print(f\"deleted {len(records_to_delete)} entries.\")\n else:\n print(\"none.\")\n\n\ndef handle_cmr():\n print(\n f\"{datetime.now()}: Checking for concurrent multi recordings ...\",\n end=\"\",\n flush=True,\n )\n\n videos_to_delete = set()\n files_to_delete = []\n\n for camera in Camera.objects.all():\n tracks = []\n for video in camera.video_set.order_by(\"-start_date\"):\n added = False\n i = 0\n while not added and i < len(tracks):\n if video.end_date <= tracks[i][-1].start_date:\n tracks[i].append(video)\n added = True\n i += 1\n if not added:\n tracks.append([video])\n\n if len(tracks) == 0:\n continue\n\n max_i, _ = max(enumerate(tracks), key=lambda it: len(it[1]))\n for i, track in enumerate(tracks):\n if i != max_i:\n for video in track:\n videos_to_delete.add(video.id)\n files_to_delete.append(video.file)\n\n chdir(settings.STORAGE_DIR)\n for f in files_to_delete:\n remove(f)\n Video.objects.filter(id__in=videos_to_delete).delete()\n\n if len(videos_to_delete):\n print(f\"deleted {len(videos_to_delete)} videos.\")\n else:\n print(\"none.\")\n\n\ndef handle_housekeep(do_db_cleanup=True):\n if do_db_cleanup:\n add_missing_db_records()\n delete_stale_db_records()\n\n record_dir = f\"{settings.STORAGE_DIR}/record/\"\n bytes_to_free = check_storage(\n record_dir, settings.MIN_FREE_PERCENT, settings.MIN_FREE_BYTES\n )\n if bytes_to_free:\n delete_old_videos(bytes_to_free)\n\n\nclass Command(BaseCommand):\n help = \"Does housekeeping tasks\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"--oneshot\", action=\"store_true\")\n parser.add_argument(\"--cmr\", action=\"store_true\")\n\n def handle(self, *args, **options):\n if options.get(\"cmr\", False):\n return handle_cmr()\n\n run_counter = 0\n while run_counter == 0 or not options.get(\"oneshot\", False):\n if run_counter > 0:\n print(\n f\"{datetime.now()}: Sleeping for 60 secs ...\",\n end=\"\",\n flush=True,\n )\n sleep(60)\n print(\"done.\")\n handle_housekeep(run_counter <= 1)\n run_counter += 1\n if run_counter > 60:\n run_counter = 1\n","repo_name":"abraha2d/mirador","sub_path":"worker/management/commands/housekeep.py","file_name":"housekeep.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27615877906","text":"# 110. Balanced Binary Tree\n# Easy\n# Tree, Depth-First Search, Binary Tree\n# https://leetcode.com/problems/balanced-binary-tree\n\n# Determine if a binary tree is height-balanced.\n# def isBalanced(self, root: Optional[TreeNode]) -> bool:\n# Input: root = [1,2,2,3,3,null,null,4,4]\n# Output: false\n\nfrom typing import Optional\n\nclass TreeNode:\n # Definition for a binary tree node.\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n # PostOrder DFS Recursion | Time: O(n log n) | Space: O(h)\n def isBalanced(self, root: Optional[TreeNode]) -> bool:\n if not root:\n return True\n\n def height(node: Optional[TreeNode]) -> int:\n if not root:\n return -1\n lh = height(node.left) if node else 0\n rh = height(node.right) if node else 0\n return 1 + max(lh, rh)\n\n return (\n # Height in subtrees do not differ by more than 1.\n abs(height(root.left) - height(root.right)) < 2 \n # Is the subtree balanced with the above condition?\n and self.isBalanced(root.left)\n and self.isBalanced(root.right))","repo_name":"daviscvance/Practice","sub_path":"Leetcode/Python/binary_trees/easy/110-balanced-binary-tree.py","file_name":"110-balanced-binary-tree.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22816659537","text":"from bs4 import BeautifulSoup as b\r\nimport requests as r\r\nimport pandas as p\r\nimport openpyxl\r\n\r\nexcel = openpyxl.Workbook() # creates an excel file\r\nprint(excel.sheetnames) # how many sheets are created\r\nsheet = excel.active # make sure we're working of the main sheet\r\nsheet.title = 'Top Rated Movies' # changed the sheet name\r\nprint(excel.sheetnames)\r\nsheet.append(['Rank', 'Movie Title', 'Year Released',\r\n 'IMDB Ratings']) # created a row in the file\r\n\r\ntry: # this will print error in case the url is not valid and avoid crashing the system\r\n myUrl = 'https://www.imdb.com/chart/top?pf_rd_m=A2FGELUUNOQJNL&pf_rd_p=470df400-70d9-4f35-bb05-8646a1195842&pf_rd_r=JSRC950SSY27FSK79KS0&pf_rd_s=right-4&pf_rd_t=15506&pf_rd_i=moviemeter&ref_=chtmvm_ql_3'\r\n download = r.get(myUrl)\r\n soup = b(download.text, \"html.parser\")\r\n\r\n movies = soup.find('tbody', class_=\"lister-list\").find_all('tr')\r\n\r\n for mov in movies:\r\n ranking = mov.find('td', class_=\"titleColumn\").get_text(\r\n strip=True).split('.')[0]\r\n title = mov.find('td', class_=\"titleColumn\").a.text\r\n year = mov.find('td', class_=\"titleColumn\").span.text.strip('()')\r\n ratings = mov.find('td', class_=\"ratingColumn imdbRating\").strong.text\r\n\r\n #print(ranking, title, year, ratings)\r\n sheet.append([ranking, title, year, ratings])\r\n\r\nexcept Exception as e:\r\n print(e)\r\n\r\nexcel.save('IMDB Top 250 Movies.xlsx') # save this to a excel file\r\n","repo_name":"chill800/Data-extraction-scraping-Projects","sub_path":"webscraping-imdb.py","file_name":"webscraping-imdb.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72565257474","text":"'''\n\nDescription:\n\nIn LOL world, there is a hero called Teemo and his attacking can make his enemy Ashe be in poisoned condition. Now, given the Teemo's attacking ascending time series towards Ashe and the poisoning time duration per Teemo's attacking, you need to output the total time that Ashe is in poisoned condition.\n\nYou may assume that Teemo attacks at the very beginning of a specific time point, and makes Ashe be in poisoned condition immediately.\n\n\n\nExample 1:\n\nInput: [1,4], 2\nOutput: 4\nExplanation: At time point 1, Teemo starts attacking Ashe and makes Ashe be poisoned immediately. \nThis poisoned status will last 2 seconds until the end of time point 2. \nAnd at time point 4, Teemo attacks Ashe again, and causes Ashe to be in poisoned status for another 2 seconds. \nSo you finally need to output 4.\n \n \n\nExample 2:\n\nInput: [1,2], 2\nOutput: 3\nExplanation: At time point 1, Teemo starts attacking Ashe and makes Ashe be poisoned. \nThis poisoned status will last 2 seconds until the end of time point 2. \nHowever, at the beginning of time point 2, Teemo attacks Ashe again who is already in poisoned status. \nSince the poisoned status won't add up together, though the second poisoning attack will still work at time point 2, it will stop at the end of time point 3. \nSo you finally need to output 3.\n \n\nNote:\n\nYou may assume the length of given time series array won't exceed 10000.\nYou may assume the numbers in the Teemo's attacking time series and his poisoning time duration per attacking are non-negative integers, which won't exceed 10,000,000.\n\n'''\n\n\nfrom typing import List\n\nclass Solution:\n def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:\n \n output = 0\n n = len(timeSeries)\n \n if n == 0:\n # Quick response for empty attack time-series\n return 0\n \n \n # for the first attack\n output += duration\n \n # scan from second attack to last attack\n for i in range(1, n):\n \n time_gap = timeSeries[i] - timeSeries[i-1]\n \n if time_gap < duration:\n # has overlap between two attacks\n output += time_gap\n \n else:\n # no overlap between two attacks\n output += duration\n \n return output\n\n\n\n# n : the length of timeSeries\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of for-loop iteration, which is of O( n )\n\n## Space Complexity: O( 1 )\n#\n# The overhead in space is the storage for loop index and temporary variable, which is of O( 1 )\n\n\nimport unittest\n\nclass Testing( unittest.TestCase ):\n\n def test_case_1( self ):\n\n result = Solution().findPoisonedDuration( timeSeries=[1,4], duration=2 )\n self.assertEqual(result, 4)\n\n\n def test_case_2( self ):\n\n result = Solution().findPoisonedDuration( timeSeries=[1,2], duration=3 )\n self.assertEqual(result, 4)\n\n\n\nif __name__ == '__main__':\n\n unittest.main() ","repo_name":"brianchiang-tw/leetcode","sub_path":"2020_September_Leetcode_30_days_challenge/Week_4_Teemo Attacking/by_iteration.py","file_name":"by_iteration.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"24364429172","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport argparse\nfrom prepare_dataset import *\nfrom source.models.mobilenet_bmai import *\nfrom trainer_bmai_2 import *\nimport torchvision\nimport torch\nfrom source.models.OpenPose_bmai import *\n\n\n# In[ ]:\n\n\nimg_size = 384\nmodel_name = 'mobilenet'\nSEXE=True\nAGE=True\nlr=0.005\nSEED=0\nmethod_sex_age=0\n\n\n# In[2]:\n\n\nmodel = torch.load('results/best_model_mobilenet_v2_cambodge.pt')\ntransforms = prepare_transforms()\ndataset_guinee = bmaiDataset(csv_file=['/hdd/data/bmai_clean/full_guinee_data.csv'],img_size=img_size,transform=transforms)\n\n\n# In[7]:\n\n\nimport numpy as np\nimport torch\nimport os\nfrom torch.utils.data import DataLoader, random_split, Dataset\nimport torch.optim as optim\nfrom torch import nn\nimport pandas as pd\nimport wandb\n\ndevice = ('cuda' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\nmodel = model.to(device)\n\n# Split data (train/test)\ntrain_size = 0.8\n\nnum_train_entries = int(train_size * len(dataset_guinee))\nnum_test_entries = len(dataset_guinee) - num_train_entries\ntrain_dataset, test_dataset = torch.utils.data.random_split(dataset_guinee,[num_train_entries,num_test_entries],generator=torch.Generator().manual_seed(SEED))\n\n# Data loaders :\nbatch_size = 64\nnum_workers = 16\n\ntest_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True,num_workers=num_workers)\n\n# Loss function and stored losses\nlosses = []\nbatch_losses = []\n\ndef loss_fn(y_pred,y_true):\n diff = torch.abs(y_pred-y_true)\n return torch.where(diff < (0.05*y_true),torch.zeros(1, 2,dtype=float).to(device),diff)\n \ndef test():\n model.eval()\n batch_losses = []\n y_true = []\n predictions = []\n predictions_branch = []\n for batch_idx, data in enumerate(test_dataloader):\n\n\n ## data in form ['img',sexe','days','height','weight']\n\n imgs = data[0].to(device)\n target = data[1:][0]\n num_elems_in_batch = target.shape[0] ## Forward\n imgs = data[0].to(device)\n target = data[1:][0]\n num_elems_in_batch = target.shape[0]\n\n\n ## sex and age :\n sexe = target[:,0].reshape((num_elems_in_batch,1)).to(device)\n age = target[:,1].reshape((num_elems_in_batch,1)).to(device)\n\n ## Target:\n target = target[:,2:].to(device)\n\n ## Forward\n scores,mean_h_w = model(imgs,age,sexe)\n\n\n y_true.append(target.detach().numpy() if device=='cpu' else target.cpu().detach().numpy()) \n predictions.append(scores.detach().numpy() if device=='cpu' else scores.cpu().detach().numpy())\n predictions_branch.append(mean_h_w.cpu().detach().numpy())\n\n # loss\n loss = loss_fn(scores,target).sum()\n batch_losses.append(loss.item() if device=='cpu' else loss.cpu().item())\n\n\n average_loss = np.mean(batch_losses)\n print(f'Average test loss is {average_loss}')\n\n y_true= np.vstack(y_true)\n predictions = np.vstack(predictions)\n predictions_branch = np.vstack(predictions_branch) #### JUST TO SEE BRANCH PREDICTIONS\n\n mean_height_rel_error,mean_weight_rel_error = calculate_mean_absolute_error_results(y_true,predictions)\n print(f'mean_height_rel_error = {mean_height_rel_error}')\n print(f'mean_weight_rel_error = {mean_weight_rel_error}')\n\n# wandb.log({'epoch':epoch_num,'epoch_test_loss':average_loss, 'mean_height_rel_error':mean_height_rel_error, 'mean_weight_rel_error':mean_weight_rel_error})\n\n\n torch.save(y_true,'results/y_true_cambodge_on_guinee_mobilenet_v2_with_branch.pt')\n torch.save(predictions,f'results/predictions_cambodge_on_guinee_mobilenet_v2_with_branch.pt')\n torch.save(predictions_branch,f'results/branch_predictions_cambodge_on_guinee_mobilenet_v2_with_branch.pt')\n\n\n return mean_height_rel_error,mean_weight_rel_error#,average_loss\n\n\n\n\ndef calculate_mean_absolute_error_results(y_true,predictions):\n df = pd.DataFrame()\n df['true_height'] = y_true[:,0]\n df['true_weight'] = y_true[:,1]\n df['predicted_height'] = predictions[:,0]\n df['predicted_weight'] = predictions[:,1]\n\n df['height_rel_err'] = df.apply(lambda row : np.abs(row.true_height - row.predicted_height)/row.true_height,axis=1)\n df['weight_rel_err'] = df.apply(lambda row : np.abs(row.true_weight - row.predicted_weight)/row.true_weight,axis=1)\n\n mean_height_rel_error = df.height_rel_err.values.mean()\n mean_weight_rel_error = df.weight_rel_err.values.mean()\n\n return mean_height_rel_error,mean_weight_rel_error\n\n\n\n# In[ ]:\n\ntest()\n\n\n","repo_name":"benhaj/bmai_ahmed","sub_path":"test_cambodge_on_guinee.py","file_name":"test_cambodge_on_guinee.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41649615847","text":"\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv1D, Flatten\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\n\ndef solution_model():\n fashion_mnist = tf.keras.datasets.fashion_mnist\n\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n # print(x_train.shape, x_test.shape) # (60000, 28, 28) (10000, 28, 28)\n # print(y_train.shape, y_test.shape) # (60000,) (10000,)\n\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,\n train_size=0.8, shuffle=True, random_state=6)\n\n y_train = to_categorical(y_train)\n y_test = to_categorical(y_test)\n y_val = to_categorical(y_val)\n\n # 모양은 원래 (60000, 28, 28)이므로 전처리만\n x_train = x_train.astype('float32') / 255.\n x_test = x_test.astype('float32') / 255.\n x_val = x_val.astype('float32') / 255.\n\n x_train= x_train.reshape(-1, 28, 28)\n x_test = x_test.reshape(-1, 28, 28)\n x_val = x_val.reshape(-1, 28, 28)\n\n\n # ========= 모델 ==============\n model = Sequential()\n model.add(Conv1D(filters=50, kernel_size=2, padding='same', input_shape = (28, 28)))\n # model.add(Dense(56, input_shape=(28, 28)))\n model.add(Dense(16))\n model.add(Dense(16))\n model.add(Dense(8))\n model.add(Flatten())\n model.add(Dense(16))\n model.add(Dense(8))\n model.add(Dense(10, activation='softmax'))\n\n # ============= 컴파일, 훈련 ==============\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=8, epochs=30)\n\n # ============= 평가, 예측 ================\n acc = model.evaluate(x_test, y_test, batch_size=8)\n print('acc', acc[1])\n # y_pred = model.predict(x_test)\n # print(y_pred)\n\n # YOUR CODE HERE\n return model\n\n\n# Note that you'll need to save your model as a .h5 like this.\n# When you press the Submit and Test button, your saved .h5 model will\n# be sent to the testing infrastructure for scoring\n# and the score will be returned to you.\nif __name__ == '__main__':\n model = solution_model()\n model.save(\"mymodel.h5\")\n","repo_name":"YoungriKIM/STUDY","sub_path":"tf_certificate/angel's_code_02.py","file_name":"angel's_code_02.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"28225873064","text":"import sys\nimport smtplib, ssl\n\nfrom tables import ticket_records, camera_location, handicapped_lp, candidate_record, license_plates\n\n\ndef get_camera_id():\n num_camera = 1;\n cameras = camera_location.objects()\n if cameras:\n num_camera = max(map(lambda item: item['num_camera'],cameras)) + 1\n return num_camera\ndef get_ticket_id():\n num_ticket = 1;\n tickets = ticket_records.objects()\n if tickets:\n num_ticket = max(map(lambda item: item['num_ticket'], tickets)) + 1\n return num_ticket\n\ndef get_img_name(num_ticket):\n return ticket_records.objects(num_ticket=num_ticket)[0]['img']\n\ndef get_candidate():\n candidate = candidate_record.objects().first()\n return candidate\ndef check_lp(lp,num_camera):#chek if this lp is candidate for getting ticket\n #need to check according the num camera type\n handicapped = handicapped_lp.objects()\n response = True\n print(handicapped)\n if handicapped:\n for h_lp in handicapped:\n print(h_lp['lp'],lp)\n if h_lp['lp'] == lp:\n print(\"its exist\")\n response = False\n break\n # if response:\n # send_warning_email(h_lp['email'])\n return response\n\ndef get_phone_number(license):\n print(license_plates.objects(lp=license)[0]['phone_number'])\n return license_plates.objects(lp=license)[0]['phone_number']","repo_name":"ElanaFelsi/Ticket-Prevention","sub_path":"EEE_Park_flask/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10883453762","text":"import pika\nimport threading\nimport json\nfrom time import sleep\nfrom pyrabbit.api import Client\n\n\nclass Observer(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=\"localhost\")\n )\n self.queue = \"observer\"\n self.channel = self.connection.channel()\n self.channel.queue_declare(self.queue)\n self.adaptation = False\n self.steps_to_adapt = None\n self.steps_for_normal_behave = None\n self.messages_types = None\n self.exceptional_scenarios = None\n self.subscribe_in_all_queues()\n\n def run(self):\n print(\n \" [*] Observer is waiting for messages.\"\n + \" To exit press CTRL+C\"\n )\n self.channel.basic_consume(\n queue=self.queue, on_message_callback=self.callback,\n auto_ack=False\n )\n self.channel.start_consuming()\n\n def get_bindings(self):\n client = Client(\"localhost:15672\", \"guest\", \"guest\")\n bindings = client.get_bindings()\n bindings_result = []\n\n for b in bindings:\n if b[\"source\"] == \"exchange_baby_monitor\":\n bindings_result.append(b)\n\n return bindings_result\n\n def subscribe_in_all_queues(self):\n bindings = self.get_bindings()\n\n for bind in bindings:\n self.channel.queue_bind(\n exchange=bind[\"source\"],\n queue=self.queue,\n routing_key=bind[\"routing_key\"],\n )\n print(\"Subscribed in \", bind[\"routing_key\"])\n return bindings\n\n def callback(self, ch, method, properties, body):\n ch.basic_ack(delivery_tag=method.delivery_tag)\n print(\n \" [OBSERVER] Receive: %r Data: %r\" % (method.routing_key, body)\n )\n body = json.loads(body.decode(\"UTF-8\"))\n if body['type'] in self.messages_types:\n self.read_message(body, method.routing_key)\n\n def define_messages(self, types: list):\n self.messages_types = types\n\n def stop(self):\n raise SystemExit()\n\n def read_message(self, message, source):\n if message[\"type\"] == \"notification\":\n print(\"ACTION - I've received a notification message.\")\n if self.adaptation:\n print(\"ACTION - My adaptation failed.\")\n\n if message[\"type\"] == \"confirmation\":\n if self.adaptation:\n print(\"ACTION - My adaptation succeeded.\")\n self.adaptation = False\n self.return_normal_behave()\n\n if (\n message[\"type\"] == \"status\" and\n source == \"st_info\" and\n message[\"block\"]\n ):\n print(\"ACTION - I'm going to unlock the TV.\")\n self.adaptation = True\n self.adaptation_action()\n\n def adaptation_action(self):\n for function, params in self.steps_to_adapt:\n function(*params)\n sleep(2)\n\n def return_normal_behave(self):\n print(\"ACTION - I'm going to lock the TV.\")\n for function, params in self.steps_for_normal_behave:\n function(*params)\n","repo_name":"Adrilene/simpleBM_simulator","sub_path":"project/solution/observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71076810754","text":"'''\nCreated on Jul 3, 2016\n\n@author: matuschd\n'''\nimport unittest\nimport time\nfrom datetime import datetime\n\nfrom knxip.ip import KNXIPTunnel\n\n\nclass TestKNXIPTunnel(unittest.TestCase):\n\n def testConnect(self):\n \"\"\"Test if the system can connect to an auto-discovered gateway\"\"\"\n # Try to connect to an auto-discovered KNX gateway\n tunnel = KNXIPTunnel(\"0.0.0.0\")\n self.assertTrue(tunnel.connect())\n tunnel.disconnect()\n\n # Try to connect to a non-existing gateway\n # Check if the timeout works as expected\n tunnel = KNXIPTunnel(\"240.0.0.0\")\n tick = datetime.now()\n self.assertFalse(tunnel.connect(2))\n tock = datetime.now()\n diff = tock - tick # the result is a datetime.timedelta object\n self.assertTrue(diff.total_seconds() >= 1 and diff.total_seconds() < 3)\n\n def testAutoConnect(self):\n \"\"\"Test if the KNX tunnel will be automatically connected.\"\"\"\n tunnel = KNXIPTunnel(\"0.0.0.0\")\n self.assertFalse(tunnel.connected)\n tunnel.group_read(1)\n self.assertTrue(tunnel.connected)\n \n \n def testKeepAlive(self):\n \"\"\"Test if the background thread runs and updated the state\"\"\"\n tunnel = KNXIPTunnel(\"0.0.0.0\")\n self.assertTrue(tunnel.connect())\n # Background thread should reset this to 0 if the connection is still\n # alive\n tunnel.connection_state=1\n time.sleep(66)\n self.assertEqual(tunnel.connection_state,0) \n\n def testReadTimeout(self):\n \"\"\"Test if read timeouts work and group_read operations\n\n group_read operations should never block\n \"\"\"\n tunnel = KNXIPTunnel(\"0.0.0.0\")\n tunnel.connect()\n\n # Read from some random address and hope nothing responds here\n tick = datetime.now()\n res = tunnel.group_read(37000, timeout=1)\n tock = datetime.now()\n diff = tock - tick # the result is a datetime.timedelta object\n self.assertTrue(diff.total_seconds() >= 1 and diff.total_seconds() < 3)\n self.assertIsNone(res)\n\n # Read from some random address and hope nothing responds here\n tick = datetime.now()\n res = tunnel.group_read(37000, timeout=5)\n tock = datetime.now()\n diff = tock - tick # the result is a datetime.timedelta object\n self.assertTrue(diff.total_seconds() >= 5 and diff.total_seconds() < 6)\n self.assertIsNone(res)\n\n tunnel.disconnect()\n\n def testCleanup(self):\n \"\"\"Test of disconnect works fine\n\n Makes sure that there are no connections left open\n \"\"\"\n for _i in range(0, 10):\n # Try to connect to an auto-discovered KNX gateway\n tunnel = KNXIPTunnel(\"0.0.0.0\")\n tunnel.connect()\n tunnel.disconnect()\n\n def testListeners(self):\n \"\"\"Test if listeners can be registered and unregistered.\"\"\"\n\n def message_received(address, data):\n pass\n\n tunnel = KNXIPTunnel(\"0.0.0.0\")\n tunnel.register_listener(0, message_received)\n res = tunnel.unregister_listener(0, message_received)\n assert(res)\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testConnect']\n unittest.main()\n","repo_name":"daBONDi/pknx","sub_path":"knxip/tests/test_ip.py","file_name":"test_ip.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39325762508","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@version: 1.0\n@author: li\n@file: configuration.py\n@time: 2019-03-04 10:13\n\"\"\"\n\n\nclass XGBConfig:\n def __init__(self):\n self.params = {}\n self.max_round = None\n self.early_stop_round = None\n self.ts_cv_folds = None\n self.cv_folds = None\n self.cv_seed = None\n self.save_model_path = None\n\n def xgb_config_r(self):\n # 回归\n self.params = {'booster': 'gbtree',\n 'objective': 'reg:linear',\n 'eval_metric': ['rmse', 'logloss'],\n 'nthread': 4, # 运行的线程数,-1所有线程\n 'silent': 1,\n 'learning_rate': 0.01,\n 'max_depth': 5,\n 'eta': 0.03,\n 'alpha': 0, # L1正则,树的深度过大时,可以适大该参数\n 'lambda': 0, # L2正则\n 'subsample': 0.7, # 随机采样的比率,通俗理解就是选多少样本做为训练集,选择小于1的比例可以减少方差,即防止过拟合\n 'colsample_bytree': 0.5, # 这里是选择多少列作为训练集,具体的理解就是选择多少特征\n 'min_child_weight': 3, # 决定最小叶子节点样本权重和。当它的值较大时,可以避免模型学习到局部的特殊样本。但如果这个值过高,会导致欠拟合\n 'seed': 2019, # 这个随机指定一个常数,防止每次结果不一致\n }\n self.max_round = 1000\n self.early_stop_round = 100\n self.cv_folds = None\n self.cv_seed = 2019\n self.save_model_path = '../bst_model/xgb/'\n\n def xgb_config_c(self):\n self.params = {'objective': 'multi:softmax', # 目标函数\n 'num_class': 3, # 当是objective为'multi:softmax'时需要指定类别数量,eg:'num_class':33\n 'nthread': 4, # 运行的线程数,-1所有线程\n 'silent': 0,\n 'learning_rate': 0.01,\n 'eta': 0.03,\n 'gamma': 0.1, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子\n \"eval_metric\": [\"mlogloss\", \"merror\"], # 评价函数,如果该参数没有指定,缺省值是通过目标函数来做匹配,\n 'max_depth': 5, # 树的深度,对结果影响较大,越深越容易过拟合\n 'alpha': 0, # L1正则,树的深度过大时,可以适大该参数\n 'lambda': 0, # L2正则\n 'subsample': 0.7, # 随机采样的比率,通俗理解就是选多少样本做为训练集,选择小于1的比例可以减少方差,即防止过拟合\n 'colsample_bytree': 0.5, # 这里是选择多少列作为训练集,具体的理解就是选择多少特征\n 'min_child_weight': 3, # 决定最小叶子节点样本权重和。当它的值较大时,可以避免模型学习到局部的特殊样本。但如果这个值过高,会导致欠拟合\n 'seed': 2019, # 这个随机指定一个常数,防止每次结果不一致\n }\n\n self.max_round = 1000\n self.cv_folds = None\n self.early_stop_round = 100\n self.cv_seed = 2019\n\n\nclass LGBConfig:\n def __init__(self):\n self.params = {}\n self.max_round = None\n self.early_stop_round = None\n self.ts_cv_folds = None\n self.cv_folds = None\n self.cv_seed = None\n self.save_model_path = None\n\n def lgb_config_c(self):\n self.params = {'task': 'train',\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 3,\n 'metric': ['multi_error', 'multi_logloss'],\n 'max_bin': 63, # 表示 feature 将存入的 bin 的最大数量\n 'metric_freq': 1,\n 'num_leaves': 31, # 由于lightGBM是leaves_wise生长,官方说法是要num_leaves<=2^max_depth,超过此值容易过拟合\n 'max_depth': 6, # 树的最大层数为7 ,可以选择一个适中的值,其实4-10都可以。但要注意它越大越容易出现过拟合\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9, # bagging_fraction相当于样本特征采样,使bagging运行更快的同时可以降拟合\n 'bagging_fraction': 0.95, # 用来进行特征的子抽样,可以用来防止过拟合并提高训练速度[0.5, 0.6, 0.7,0.8,0.9]\n 'bagging_freq': 5,\n 'lambda_l1': 0.9,\n 'lambda_l2': 0.95, # L2正则化系数\n 'verbosity': -1,\n # 'device': 'gpu', # 默认使用集显\n # 'gpu_platform_id': 1, # 确定是使用集成显卡还是独立显卡,0代表独显,1代表独显\n # 'gpu_device_id': 0 # id为0的独显\n }\n\n self.max_round = 1000\n self.cv_folds = None\n self.early_stop_round = 100\n self.cv_seed = 2019\n self.save_model_path = 'bst_model/lgb/lgb.txt'\n\n def lgb_config_r(self):\n self.params = {\n 'task': 'train',\n 'boosting': 'gbdt', # 设置提升类型\n 'objective': 'regression', # 目标函数\n 'metric': {'l2', 'mean_squared_error'}, # 评估函数\n 'num_leaves': 31, # 叶子节点数\n 'learning_rate': 0.05, # 学习速率\n 'feature_fraction': 0.9, # 建树的特征选择比例 # 样本列采样\n 'bagging_fraction': 0.8, # 建树的样本采样比例\n 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging\n 'lambda_l1': 0.90, # L1 正则化\n 'lambda_l2': 0.95, # L2 正则化\n 'bagging_seed': 100, # 随机种子,light中默认为100\n 'verbosity': -1 # <0 显示致命的, =0 显示错误 (警告), >0 显示信息\n }\n self.max_round = 1000\n self.early_stop_round = 100\n self.cv_folds = None\n self.cv_seed = 2019\n\n\nxgb_conf = XGBConfig()\nlgb_conf = LGBConfig()\n","repo_name":"STHSF/MultiFactors","sub_path":"src/conf/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"zh","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"1889425247","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl = 'http://ilo.org/dyn/seafarers/seafarersBrowse.list'\n#url = 'https://www.google.com'\n\ncounter = 0\nboat_details = {}\ndetailed_boat_list = []\n\nr = requests.get(url)\n#print(r.content[:100])\n\nsoup = BeautifulSoup(r.content, 'html.parser')\n#print(soup.prettify())\n\nboat_list = soup.find_all('p') #soup of all items wrapped in a p tag\n\nfor boats in boat_list: #for all the items wrapped in a p tag, extract their contents\n boat_contents = boats.contents #gets a list of the items that are wrapped in a p tag\n\n #boat_details['Link'] = boat_contents[1]\n link = boat_contents[1]\n boat_details['url'] = link['href'] #gets url for more info\n\n\n #boat_details['Boat Name'] = str(link.next_element.contents[0])[1:-1]\n #boat_details['Boat Status'] = link.next_element.contents[1].string\n\n if ' \\nFlag: ' in boat_contents:\n boat_details['Flag'] = str(boat_contents[boat_contents.index(' \\nFlag: ')+1])[3:-4]\n else:\n boat_details['Flag'] = ''\n\n if '; Abandoned: ' in boat_contents:\n boat_details['Abandoned'] = True\n boat_details['Abandoned Date'] = str(boat_contents[boat_contents.index('; Abandoned: ')+1])[3:-4]\n else:\n boat_details['Abandoned'] = False\n boat_details['Abandoned Date'] = ''\n\n if '; Notified: ' in boat_contents:\n boat_details['Notified'] = True\n boat_details['Notified Date'] = str(boat_contents[boat_contents.index('; Notified: ')+1])[3:-4]\n else:\n boat_details['Notified'] = False\n boat_details['Notified Date'] = ''\n\n if 'Port of abandonment: ' in boat_contents:\n boat_details['Notified'] = True\n boat_details['Port of Abandonment'] = str(boat_contents[boat_contents.index('Port of abandonment: ')+1])[3:-4]\n else:\n boat_details['Notified'] = False\n boat_details['Port of Abandonment'] = ''\n\n if '; Reported by: ' in boat_contents:\n boat_details['Reported by'] = str(boat_contents[boat_contents.index('; Reported by: ')+1])[3:-4]\n else:\n boat_details['Reported by'] = ''\n\n detailed_boat_list.append(boat_details.copy())\nprint(detailed_boat_list[len(detailed_boat_list)-1])\n\n","repo_name":"jamesrseal/seafarers","sub_path":"old/seafarers_scrape.py","file_name":"seafarers_scrape.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73176495875","text":"import multimodel\nimport vgg_occ_dataset as vgg_occlusion\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, SubsetRandomSampler\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom sklearn.model_selection import KFold\n\n#specify data directories\ndata_directory = '../datasets/VGG-Face2/'\ncsv_directory = '../datasets/MAAD_Face.csv'\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.CenterCrop(224)])\ndataset = vgg_occlusion.VggFace2(samples = 240000, path = data_directory , csvfile= csv_directory, transform = transform)\n\nsplits = KFold(n_splits = 10, shuffle = True,random_state=77)\n\n\ncriterion1 = nn.CrossEntropyLoss(weight = torch.FloatTensor([1.0, len(dataset)/dataset.eyeglasses]).to('cuda'))\ncriterion2 = nn.CrossEntropyLoss(weight = torch.FloatTensor([1.0, len(dataset)/dataset.beard]).to('cuda'))\ncriterion3 = nn.CrossEntropyLoss(weight = torch.FloatTensor([1.0, len(dataset)/dataset.hat]).to('cuda'))\n\n\ncriterion = [criterion1, criterion2, criterion3]\n\nbest_model = None\n\nbest_loss = np.Inf\n\nlosses = []\n\nepochs = 12\nfor train_idx, test_idx in splits.split(np.arange(len(dataset))):\n\n #get fold\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n train_loader = DataLoader(dataset, batch_size=16, sampler=train_sampler)\n test_loader = DataLoader(dataset, batch_size=16, sampler=test_sampler)\n #define model\n model = multimodel.MultiCNN(True)\n model.setTrainableParams(60)\n\n #get optimizer with model params\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n #train model\n\n model_trained, loss = multimodel.train(epochs, model, train_loader, test_loader, criterion, optimizer)\n \n losses.append(loss)\n\n if loss < best_loss:\n best_loss = loss\n best_model = model_trained\n\n\ntorch.save(best_model.state_dict(), 'best_3occ_model.pth')\n\nf = open('write_loss_multimodel.txt', 'w')\n\nfor i in losses:\n f.write(str(i) + '\\n')\n\nf.close()\n\n","repo_name":"JaimeAznar98/TFG","sub_path":"occlusion_attributes/train_multimodel.py","file_name":"train_multimodel.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24562049956","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This file is part of DRK Testerfassung.\n\n\nimport sys\nimport csv\nimport logging\n\nfrom datetime import datetime\nsys.path.append(\"..\")\nfrom utils.database import Database\n\n\nlogger = logging.getLogger('CSV Export Accounting')\nlogger.debug('Logger for createCSV was initialised')\n\n\ndef create_CSV(content, month, year):\n filename = \"../../Reports/export_abrechnung_\" + str(month) + \"_\" + str(year) + \".csv\"\n with open(filename, mode='w', newline='') as csvfile:\n writeEntry = csv.writer(csvfile, delimiter=';')\n writeEntry.writerow([\"Station\",\n \"Name\",\n \"Datum\",\n \"Anzahl\"\n ])\n for i in content:\n writeEntry.writerow(i)\n return filename\n\nlogFile = '../../Logs/AccountingExport.log'\nlogging.basicConfig(filename=logFile,level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger('CSV Export')\nlogger.debug('Starting')\n\n\nif __name__ == \"__main__\":\n try:\n \n if len(sys.argv) == 3:\n requestedMonth = sys.argv[1]\n requestedYear = sys.argv[2]\n sql = \"Select Teststation,Station.Ort,Date,Amount from Abrechnung JOIN Station on Abrechnung.Teststation=Station.id where MONTH(Date)=%s and YEAR(Date)=%s order by Teststation;\" % (requestedMonth,requestedYear)\n else:\n logger.debug(\n 'Input parameters are not correct, month and year needed')\n raise Exception\n logger.debug('Getting all Events for employee of the month and year with the following query: %s' % (sql))\n DatabaseConnect = Database()\n exportEvents = DatabaseConnect.read_all(sql)\n logger.debug('Received the following entries: %s' %(str(exportEvents)))\n filename = create_CSV(exportEvents, requestedMonth, requestedYear) \n print(filename.replace('../../Reports/', ''))\n logger.debug('Done')\n except Exception as e:\n logging.error(\"The following error occured: %s\" % (e))\n finally:\n DatabaseConnect.close_connection()\n","repo_name":"DRK-Odenwaldkreis/Testerfassung","sub_path":"AccountJob/createCSV.py","file_name":"createCSV.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72832662274","text":"import paho.mqtt.client as mqtt\nimport subprocess\n\nimport yaml\nimport time\nimport numpy as np\n\n\n##\n## testParse Functions\n##\n\n# Turn the data String into a list\ndef stringToList(dataString):\n data = dataString.split(\",\")\n return data\n\n# db_name, table_name, data_name, data, mac_address, start_timestamp, interval\n# shake Z value 1,2,3 00.00.00.00.00 1692491796.001 .001\ndef parseString(exampleString):\n returnDict = {}\n \n stringList = exampleString.split(\" \")\n\n # clean up the ints\n returnDict[\"db_name\"] = stringList[0]\n returnDict[\"table_name\"] = stringList[1]\n returnDict[\"data_name\"] = stringList[2]\n returnDict[\"data\"] = stringToList(stringList[3])\n returnDict[\"mac_address\"] = stringList[4]\n returnDict[\"start_timestamp\"] = float(stringList[5])\n returnDict[\"interval\"] = float(stringList[6])\n \n \n return returnDict\n \n \n# New influx\ndef write_influx(influx):\n \n # put into config file soon\n influxConfig = {}\n influxConfig[\"influx_ip\"] = 'https://sensorweb.us'\n influxConfig[\"influx_user\"] = 'test'\n influxConfig[\"influx_pass\"] = 'sensorweb'\n \n start_timestamp = influx['start_timestamp']\n \n ## function to parse data\n http_post = \"curl -i -k -XPOST \\'\"+ influxConfig['influx_ip']+\":8086/write?db=\"+influx['db_name']+\"\\' -u \"+ influxConfig['influx_user']+\":\"+ influxConfig['influx_pass']+\" --data-binary \\' \"\n count = 0\n dataLength = len(influx['data'])\n for value in influx['data']:\n count += 1\n http_post += \"\\n\" + influx['table_name'] +\",location=\" + influx['mac_address'] + \" \"\n http_post += influx['data_name'] + \"=\" + str(value) + \" \" + str(int(start_timestamp*10e8))\n start_timestamp += influx['interval']\n \n http_post += \"\\' &\"\n print(http_post)\n subprocess.call(http_post, shell=True)\n print(\"printed to iflux!\")\n\n\n\n##\n## ExampleString Functions\n##\n\ndef getEpochTime():\n return round(time.time(), 3)\n\ndef createSineWave(influx_packet,influx_frequency):\n \n maxTime = influx_packet * influx_frequency\n DataValues = 10 * np.sin(2 * np.pi * np.linspace(0, maxTime, influx_packet) / 1)\n DataValues = np.round(DataValues, decimals = 2)\n \n return DataValues\n\ndef arrayToString(data):\n returnDataString = \"\"\n for i in range(len(data) - 2):\n returnDataString += str(data[i]) + \",\"\n \n returnDataString += str(data[len(data)-1])\n return returnDataString\n\ndef getString():\n returnString = \"shake testData value \"\n returnString += arrayToString(createSineWave(200,.01))\n returnString += \" 00.00.00.00.00 \"\n returnString += str(getEpochTime())\n returnString += \" .01\"\n \n return(returnString)\n\n\n\n\n\n\n\n ","repo_name":"walkiisun/M5Dot","sub_path":"Server/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"75010977794","text":"s = \"\"\r\ninput=eval(input(\"enter the decimal number\"))\r\nr=0\r\n\r\ndef chang(r):\r\n return (chr(ord(\"A\")+r-10)) \r\n\r\nwhile input>0:\r\n r=input%16\r\n input=input//16\r\n if r>9:\r\n print(r)\r\n r=chang(r)\r\n print(r)\r\n else:\r\n r=str(r)\r\n s=r+s\r\n \r\nprint(s) ","repo_name":"Arshdeep-kapoor/Python","sub_path":"chaptr4-q33.py","file_name":"chaptr4-q33.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16888389182","text":"\"\"\"\r\nClass-based dict allowing tuple subscripting and sparse data\r\n\"\"\"\r\nfrom collections import defaultdict\r\n\r\nclass array:\r\n\r\n def __init__(self, X, Y, Z):\r\n \"Create an defaultdict subscripted to represent a 3D matrix.\"\r\n self._data = defaultdict(int)\r\n self._x = X\r\n self._y = Y\r\n self._z = Z\r\n\r\n def __getitem__(self, key):\r\n \"Returns the appropriate element.\"\r\n return self._data[self._validate_key(key)]\r\n \r\n def __setitem__(self, key, value):\r\n \"Sets the appropriate element.\"\r\n self._data[self._validate_key(key)] = value\r\n \r\n def _validate_key(self, key):\r\n \"\"\"Validates a key against the array's shape, returning good tuples.\r\n Raises KeyError on problems.\"\"\"\r\n x, y, z = key\r\n if (x in range(self._x) and\r\n y in range(self._y) and\r\n z in range(self._z)):\r\n return key\r\n raise KeyError(\"Subscript out of range\")","repo_name":"MTset/Python-Programming-Coursework","sub_path":"Python 04: Advanced Python/Lesson 02: Data Structures/arr.py","file_name":"arr.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42576953061","text":"import numpy as np\n\n\ndef inference(data, weights1, biases1, weights2, biases2):\n\n\tlayer1 = relu(np.matmul(data, weights1) + biases1)\n\n\treturn np.matmul(layer1, weights2) + biases2\n\n\ndef relu(x_input):\n\tshape = x_input.shape\n\tif shape.__len__() == 1:\n\t\tfor index in range(shape[0]):\n\t\t\tif x_input[index] > 0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tx_input[index] = 0\n\telse:\n\t\tfor index_i in range(shape[0]):\n\t\t\tfor index_j in range(shape[1]):\n\t\t\t\tif x_input[index_i][index_j] > 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tx_input[index_i][index_j] = 0\n\treturn x_input\n\n\ndef loadtxt(directory, bit):\n\tweights1 = np.loadtxt(directory + bit + \"w1.txt\")\n\tweights2 = np.loadtxt(directory + bit + \"w2.txt\")\n\tbiases1 = np.loadtxt(directory + bit + \"b1.txt\")\n\tbiases2 = np.loadtxt(directory + bit + \"b2.txt\")\n\treturn weights1, biases1, weights2, biases2\n\n\ndef predict(data_input, bit):\n\t# data = np.array(list(\"10011110001111011111001110000110\"), dtype=np.int32)\n\tdata = np.array(data_input, dtype=np.int32)\n\tweights1, biases1, weights2, biases2 = loadtxt(\"./parameter/parity/leak0/\", \"data\" + str(bit))\n\ttemp = inference(data.T, weights1, biases1, weights2, biases2)\n\tif abs(temp-1) < temp:\n\t\tresult = 1\n\telse:\n\t\tresult = 0\n\treturn result\n\n","repo_name":"OliverPan/uci_test","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39100044736","text":"import pandas as pd\nimport requests\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime\n\n\ngit_url = 'https://raw.githubusercontent.com/dm-novikov/stepik_airflow_course/main/data_new/'\napi_url = 'https://api.exchangerate.host/timeseries?'\n\n\ndef push(**kwargs):\n ds = kwargs['ds']\n print(f'logical date is {ds}')\n # reading from github csv\n df = pd.read_csv(f'{git_url}{ds}.csv')\n df_key = str(df['date'][1])\n df_val = str(df['value'][1])\n kwargs['ti'].xcom_push(key=df_key, value=df_val)\n # get from API\n response = requests.get(f'{api_url}start_date={ds}&end_date={ds}')\n response_data = response.json()\n response_key = f'response_{ds}'\n response_val = str(response_data['rates'][ds]['RUB'])\n kwargs['ti'].xcom_push(key=response_key, value=response_val)\n\n\ndef pull(**kwargs):\n ds = kwargs['ds']\n print(f'logical date is {ds}')\n print(f\"git: {kwargs['ti'].xcom_pull(key=ds, task_ids='push')}\")\n print(f\"api: {kwargs['ti'].xcom_pull(key=f'response_{ds}', task_ids='push')}\")\n\n\nargs = {'owner': 'airflow',\n 'start_date': datetime(2020, 1, 1),\n 'end_date': datetime(2025, 1, 4),\n 'provide_context': True}\n\nwith DAG('url_xcom',\n schedule_interval='@once',\n default_args=args,\n tags=['stepik']\n ) as dag:\n\n push1 = PythonOperator(\n task_id='push',\n dag=dag,\n python_callable=push)\n pull1 = PythonOperator(\n task_id='pull',\n dag=dag,\n python_callable=pull)\n\n push1 >> pull1\n\n\n\n","repo_name":"osinin/stepik_airflow","sub_path":"currency_github_xcom.py","file_name":"currency_github_xcom.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16253304324","text":"import threading\r\nimport time\r\nfrom sensorHumedad_obj import sensorHumedad\r\nfrom sensorTemperatura_obj import sensorTemperatura\r\nfrom sensorMovimiento_obj import PIR\r\nfrom sensorhumoyGas_obj import SmokeGasSensor\r\nfrom Actuadores import Actuadores\r\n\r\ndef thread1():\r\n humedad = sensorHumedad(11)\r\n \r\n # execute script 1 here\r\n humedad.read()\r\n\r\ndef thread2():\r\n humo = SmokeGasSensor(18)\r\n\r\n # execute script 2 here\r\n humo.read()\r\n \r\n\r\ndef thread3():\r\n mov = PIR(23)\r\n while True:\r\n \r\n # execute script 3 here\r\n mov.connect()\r\n c = mov.read()\r\n if c == True:\r\n print(c)\r\n mov.disconnect()\r\n time.sleep(.5)\r\n \r\ndef thread4():\r\n temperatura = sensorTemperatura(11)\r\n temperatura.read()\r\n \r\ndef thread5():\r\n act = Actuadores()\r\n act.run()\r\n\r\n# create threads for each script\r\nt1 = threading.Thread(target=thread1)\r\nt2 = threading.Thread(target=thread2)\r\nt3 = threading.Thread(target=thread3)\r\nt4 = threading.Thread(target=thread4)\r\nt5 = threading.Thread(target=thread5)\r\n\r\n# start each thread\r\nt1.start()\r\nt2.start()\r\nt3.start()\r\nt4.start()\r\nt5.start()\r\n\r\n# wait for each thread to finish\r\nt1.join()\r\nt2.join()\r\nt3.join()\r\nt4.join()\r\nt5.join()","repo_name":"WhoopsDang/proyectoSeguridad","sub_path":"Raspberry Codes/mainDemo.py","file_name":"mainDemo.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23593935161","text":"\nclass Watersheds:\n def __init__(self, map, width, height):\n self.width = width\n self.height = height\n \n self.letter = 97\n \n self.map = []\n self.map_create(map)\n self.map[0][0].letter = self.get_letter()\n \n self.process()\n\n def map_create(self, map):\n self.map = []\n \n for y in range(self.height):\n self.map.insert(y, [])\n \n for x in range(self.width):\n self.map[y].insert(x, WatershedsCell(x, y, map[y][x]))\n\n def get_letter(self):\n char_letter = chr(self.letter)\n self.letter += 1\n return char_letter\n\n def process(self):\n for y in range(self.height):\n for x in range(self.width):\n cell = self.map[y][x]\n flow = self.flows(x, y)\n \n if not cell.letter and cell.fin:\n for fin in cell.fin:\n if fin.letter:\n cell.letter = fin.letter\n break\n \n if flow:\n flow.fin.append(cell)\n \n if not flow.letter:\n flow.letter = cell.letter\n if not cell.letter:\n cell.letter = flow.letter\n else:\n cell.sink = True\n \n if not cell.letter:\n cell.letter = self.get_letter()\n self.set_letter_rec(cell, cell.letter)\n \n for y in range(self.height):\n for x in range(self.width):\n cell = self.map[y][x]\n self.set_letter_rec(cell, cell.letter)\n\n def set_letter_rec(self, cell, letter):\n if cell.fin:\n for fin in cell.fin:\n if not fin.letter:\n fin.letter = letter\n self.set_letter_rec(fin, letter)\n\n def flows(self, x, y):\n cell = cmin = self.map[y][x]\n \n for neighbor in self.get_neighbors(x, y):\n if neighbor:\n if neighbor.alt < cmin.alt:\n cmin = neighbor\n \n if cell is cmin:\n return None\n return cmin\n\n def get_neighbors(self, x, y):\n return [\n self.get_neighbor_north(x, y),\n self.get_neighbor_west(x, y),\n self.get_neighbor_east(x, y),\n self.get_neighbor_south(x, y)\n ]\n\n def get_neighbor_north(self, x, y):\n if y - 1 >= 0 and y - 1 < self.height:\n return self.map[y - 1][x]\n return None\n\n def get_neighbor_west(self, x, y):\n if x - 1 >= 0 and x - 1 < self.width:\n return self.map[y][x - 1]\n return None\n\n def get_neighbor_east(self, x, y):\n if x + 1 >= 0 and x + 1 < self.width:\n return self.map[y][x + 1]\n return None\n\n def get_neighbor_south(self, x, y):\n if y + 1 >= 0 and y + 1 < self.height:\n return self.map[y + 1][x]\n return None\n\n def answer(self):\n r = ''\n for y in range(self.height):\n row = ''\n for x in range(self.width):\n cell = self.map[y][x]\n row += cell.letter + ' '\n r += row.strip() + '\\n'\n return r\n\nclass WatershedsCell:\n def __init__(self, x, y, alt):\n self.x = x\n self.y = y\n self.alt = int(alt)\n self.letter = None\n self.sink = False\n self.fin = []\n\ndef main():\n FILENAME = 'B-small-attempt0'\n \n output = open(FILENAME + '.out', 'w')\n lines = open(FILENAME + '.in', 'r').readlines()\n \n maps = int(lines[0])\n mapin = False\n mapn = 0\n \n lines = lines[1:]\n \n for i in range(len(lines)):\n line = lines[i].strip()\n \n if not mapin:\n parts = line.split()\n \n mapwidth = int(parts[1])\n mapheight = int(parts[0])\n mapin = True\n mapi = 0\n map_ = []\n mapn += 1\n else:\n mapi += 1\n map_.append(line.split())\n \n if mapi == mapheight:\n mapin = False\n \n w = Watersheds(map_, mapwidth, mapheight)\n \n output.write(\"Case #%d:\\n\" % (mapn))\n output.write(w.answer())\n \n output.close()\n\nif __name__ == '__main__':\n main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_35/464.py","file_name":"464.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3729328889","text":"'''\nimage is N x N matrix, each pixel represented by an integer. Rotate image 90 degrees\n\n'''\n\ndef rotate90Clockwise(matrix) :\n if (len(matrix) == 0):\n return False\n n = len(matrix)\n for layer in range(n // 2):\n first, last = layer, n - layer - 1\n for i in range(first, last):\n # save top\n top = matrix[layer][i]\n\n # left -> top\n matrix[layer][i] = matrix[-i - 1][layer]\n\n # bottom -> left\n matrix[-i - 1][layer] = matrix[-layer - 1][-i - 1]\n\n # right -> bottom\n matrix[-layer - 1][-i - 1] = matrix[i][-layer - 1]\n\n # top -> right\n matrix[i][-layer - 1] = top\n return matrix\n\ndef solveCounterClockwise(matrix):\n if not matrix or not matrix[0]:\n return []\n n = len(matrix)\n for row in matrix:\n row.reverse()\n for i in range(n):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n return matrix\n\ndef rotate2(matrix):\n n = len(matrix)\n result = [[0] * n for i in range(n)]\n for i,j in zip(range(n), range(n - 1, -1, -1)):\n for k in range(n):\n result[k][i] = matrix[j][k]\n return result\n \nA = [[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]]\n\nprint(A)\n\n# print(rotate90Clockwise(A))\nprint(solveCounterClockwise(A))","repo_name":"kayvera/python_practice","sub_path":"array/rotatematrix.py","file_name":"rotatematrix.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29554964401","text":"import discord\nfrom configparser import ConfigParser\nfrom discord.ext import commands\n\nfile= \"settings.ini\"\nconfig=ConfigParser()\nconfig.read(file)\n\nbot=commands.Bot(command_prefix='>')\n#Holajeje\n@bot.event\nasync def on_member_join(member):\n await member.send('Hola,Puto')\n\n@bot.command()\nasync def definir_futuro(ctx):\n await ctx.send('David será un wen ingeniero')\n\n@bot.command()\nasync def sumar(ctx,numOne:int, numTwo:int):\n await ctx.send(\"El resultado de la suma es:\",numOne+numTwo)\n\nbot.run(config['key']['public_key'])\n","repo_name":"jean0206/pythonExamples","sub_path":"BotDiscord/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3017291269","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def deleteNode(self, root: TreeNode, key: int) -> TreeNode:\n if root is None:\n return root\n \n if root.val > key:\n root.left = self.deleteNode(root.left, key)\n return root\n \n if root.val < key:\n root.right = self.deleteNode(root.right, key)\n return root\n \n # delete\n if root.left is None and root.right is None:\n return None\n \n if root.left is None:\n return root.right\n \n if root.right is None:\n return root.left\n \n p = root.left\n while p.right is not None:\n p = p.right\n \n root.val = p.val\n root.left = self.deleteNode(root.left, root.val)\n return root\n","repo_name":"lugy-bupt/algorithm","sub_path":"leet/0450-delete-node-in-a-BST/delete-node-in-a-BST.py","file_name":"delete-node-in-a-BST.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3886529797","text":"import numpy as np\nimport torch\nimport os\n\nfrom pytorch_grad_cam import GradCAM\nfrom pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget\nfrom pytorch_grad_cam.utils.image import show_cam_on_image\nfrom PIL import Image\nfrom torchvision import transforms\nfrom typing import Callable, List, Tuple\n\nfrom src.target import target_id\n\ndef normalize(x):\n \"\"\"\n Normalize a list of sample image data in the range of 0 to 1\n : x: List of image data. The image shape is (32, 32, 3)\n : return: Numpy array of normalized data\n \"\"\"\n return np.array((x - np.min(x)) / (np.max(x) - np.min(x)))\n\nclass GradCamVisualize():\n def __init__(self, \n model,\n target_layers,\n use_cuda,\n preproc\n ):\n self.model = model\n self.preproc = preproc\n self.use_cuda = use_cuda\n self.gradcam = GradCAM(model=model, target_layers=target_layers, use_cuda=use_cuda)\n self.transform = transforms.ToTensor()\n \n def process(self, img_path: str)->np.ndarray:\n img = Image.open(img_path)\n input_t = torch.stack((self.transform(img),))\n targets = [ClassifierOutputTarget(target_id(self.model, img, self.preproc, self.use_cuda))]\n grayscale_cam = self.gradcam(input_tensor=input_t, targets=targets)\n grayscale_cam = grayscale_cam[0, :]\n visualization = show_cam_on_image(normalize(img), grayscale_cam, use_rgb=True)\n return visualization\n\ndef save_image(img, img_path, dir_to_save):\n img_name = os.path.splitext(os.path.basename(img_path))\n img = img.save(dir_to_save + img_name[0] + '_out' + img_name[1])\n \n ","repo_name":"mkeriy/gradcam_for_resnet","sub_path":"src/gradcam_vis.py","file_name":"gradcam_vis.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74796567554","text":"\nCIVILIZATIONS = {\n 'Chinese': {\n 'Archer': 2,\n 'Pikeman': 25,\n 'Chivalry': 2,\n },\n 'English': {\n 'Archer': 10,\n 'Pikeman': 10,\n 'Chivalry': 10,\n },\n 'Byzantine': {\n 'Archer': 5,\n 'Pikeman': 8,\n 'Chivalry': 15,\n }\n}\n\nBRANCH_POINTS = {\n 'Archer': 5,\n 'Pikeman': 10,\n 'Chivalry': 20,\n}\n\nBRANCH_TO_LEVEL = {\n 'Archer': 1,\n 'Pikeman': 2,\n 'Chivalry': 3,\n}\n\nLEVEL_TO_BRANCH = {\n 1: 'Archer',\n 2: 'Pikeman',\n 3: 'Chivalry',\n}\n\nTRAINING_FORCE_POINTS_COST = {\n 'Archer': {'points': 3, 'cost': 10},\n 'Pikeman': {'points': 7, 'cost': 20},\n 'Chivalry': {'points': 10, 'cost': 30},\n}\n\nTRASFORMATION_BRANCH_COST = {\n 'Pikeman': 30,\n 'Chivalry': 40,\n}\n\nTOTAL_COINS_FOR_BATTLE_WON = 100\n","repo_name":"diblasifrancisco/army_game","sub_path":"army_game/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13726888736","text":"from helper.helpers import *\n\ninput=\"\"\n\ndef run_check():\n print(\"starting iterating lines...\")\n\n count_valid = 0\n valids =[]\n\n for line in input:\n\n line = line.strip().split(\":\")\n # print(line)\n\n code = line[1]\n policy = line[0].split(\" \")\n char_to_check = policy[1]\n\n limits = policy[0].split(\"-\")\n\n counted_chars = code.count(char_to_check)\n\n if counted_chars >=int(limits[0]) and counted_chars<=int(limits[1]):\n print(\"valid:\",line)\n valids.append(code)\n count_valid+=1\n #print(\"-\"*10)\n\n print(\"Valid found:\",count_valid)\n print(valids)\n\n\n\n\n\n\ndef run():\n global input\n print(\"-----------\")\n print(\"starting s1\")\n\n input = get_input(\"real_input.input\")\n run_check()\n print(\"-----------\")","repo_name":"Lycea/AdventOfCode","sub_path":"2020/02/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13327638809","text":"import random\n\ndef quick(A, index):\n lower = []\n same = []\n higher = []\n for i in range(len(A)):\n if(A[i] < A[index]):\n lower.append(A[i])\n elif(A[i] == A[index]):\n same.append(A[i])\n else:\n higher.append(A[i])\n A = []\n A.extend(lower)\n A.extend(same)\n A.extend(higher)\n return A\n\n#A = [70, 69, 4]\n# [69, 70, 4] \nA = [random.randint(1, 100) for _ in range(10)]\n#A = [10, 9, 8, 6]\n\nfor x in A:\n print(x)\n\nprint(\"\")\n#B = quick(A, 4)\n\n\n# The above uses O(n) extra space in the creation of the two new lists\n# could probably reduce this through swapping variables in place\n# index = 2\ndef quickPlace(A, index):\n comparison = A[index]\n j = len(A) - 1\n lo = 0\n #write up the algorithm written in the book\n # need to alter this\n # finish this Q tomorrow and then read the solution\n while(j != lo):\n if(A[j] < comparison):\n A[j], A[lo] = A[lo], A[j]\n lo += 1\n else:\n j -= 1\n checker = -1\n for i in range(len(A)):\n if(A[i] == comparison):\n checker = i\n break\n if(A[j] > comparison and j < checker):\n A[j], A[checker] = A[checker], A[j]\n elif (A[j] < comparison and j > checker):\n A[checker], A[j] = A[j], A[checker]\n return A\n \n\n\n print(\"This is the last thing {}\".format(A[j]))\n return A\n \n \n\n \n\nB = quickPlace(A, 1)\n\nfor x in B:\n print(x)\n \n\n#if(lo < index):\n# if(A[lo] < comparison):\n# A[lo], A[index] = A[index], A[lo]\n# else:\n# if(A[lo] > comparison):\n# A[index], A[lo] = A[lo], A[index]\n ","repo_name":"lorcanj/EPI_practice","sub_path":"chapter5/quick.py","file_name":"quick.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16587050000","text":"import spacy\nimport os\nimport csv\nimport json\nimport random\n\n\nclass NLP:\n def __init__(self):\n self.nlp = spacy.load('en', disable=['ner', 'parser', 'tagger'])\n self.nlp.add_pipe(self.nlp.create_pipe('sentencizer'))\n\n def word_tokenize(self, text, lower=False): # create a tokenizer function\n if text is None: return text\n text = ' '.join(text.split())\n if lower: text = text.lower()\n toks = [tok.text for tok in self.nlp.tokenizer(text)]\n return ' '.join(toks)\n\n\ndef train_valid_split(data, train_rate=0.8):\n random.shuffle(data)\n train = data[:int(len(data)*train_rate)]\n valid = data[int(len(data)*train_rate):]\n return train, valid\n\n\ndef train_valid_split_fixed(data, valid_size=800):\n random.shuffle(data)\n valid = data[:valid_size]\n train = data[valid_size:]\n return train, valid\n\n\ndef preprocess(path):\n ENT_1_START = ''\n ENT_1_END = ''\n ENT_2_START = ''\n ENT_2_END = ''\n\n nlp = NLP()\n data = []\n with open(path) as f:\n lines = [line.strip() for line in f]\n for idx in range(0, len(lines), 4):\n id = int(lines[idx].split(\"\\t\")[0])\n relation = lines[idx + 1]\n\n sentence = lines[idx].split(\"\\t\")[1][1:-1]\n sentence = sentence.strip()\n\n sentence = sentence.replace(ENT_1_START, ' ENT_1_START ')\n sentence = sentence.replace(ENT_1_END, ' ENT_1_END ')\n sentence = sentence.replace(ENT_2_START, ' ENT_2_START ')\n sentence = sentence.replace(ENT_2_END, ' ENT_2_END ')\n\n sentence = nlp.word_tokenize(sentence)\n\n ent1 = sentence.split(' ENT_1_START ')[-1].split(' ENT_1_END ')[0]\n ent2 = sentence.split(' ENT_2_START ')[-1].split(' ENT_2_END ')[0]\n\n data.append({\n 'label': relation,\n 'sentence': sentence,\n 'ent1': ent1,\n 'ent2': ent2,\n 'id': id,\n })\n\n return data\n\n\ndef sentence_preprocess(sentence):\n ENT_1_START = ''\n ENT_1_END = ''\n ENT_2_START = ''\n ENT_2_END = ''\n\n nlp = NLP()\n sentence = sentence.strip()\n\n sentence = sentence.replace(ENT_1_START, ' ENT_1_START ')\n sentence = sentence.replace(ENT_1_END, ' ENT_1_END ')\n sentence = sentence.replace(ENT_2_START, ' ENT_2_START ')\n sentence = sentence.replace(ENT_2_END, ' ENT_2_END ')\n\n sentence = nlp.word_tokenize(sentence)\n\n ent1 = sentence.split(' ENT_1_START ')[-1].split(' ENT_1_END ')[0]\n ent2 = sentence.split(' ENT_2_START ')[-1].split(' ENT_2_END ')[0]\n\n return sentence, ent1, ent2\n\n\ndef gen_csv(json_data, csv_path):\n csv_data = list()\n csv_line = dict()\n for line in json_data:\n sentence = line['sentence']\n csv_line = {\n 'tgt': line['label'],\n 'input': sentence,\n 'show_inp': sentence,\n 'ent1': line['ent1'],\n 'ent2': line['ent2'],\n 'id': line['id'],\n }\n csv_data += [csv_line]\n with open(csv_path, 'w') as f:\n writer = csv.DictWriter(f, fieldnames=csv_line.keys())\n writer.writeheader()\n writer.writerows(csv_data)\n\n\nif __name__ == '__main__':\n data_dir = './data'\n train_valid_path = os.path.join(data_dir, 'TRAIN_FILE.TXT')\n test_path = os.path.join(data_dir, 'TEST_FILE_FULL.TXT')\n\n train_valid_data = preprocess(train_valid_path)\n test_data = preprocess(test_path)\n\n data = dict()\n data['train'], data['valid'] = train_valid_split_fixed(train_valid_data, valid_size=800)\n data['test'] = test_data\n\n train_csv_path = os.path.join(data_dir, 'train.csv')\n valid_csv_path = os.path.join(data_dir, 'valid.csv')\n test_csv_path = os.path.join(data_dir, 'test.csv')\n\n gen_csv(data['train'], train_csv_path)\n gen_csv(data['valid'], valid_csv_path)\n gen_csv(data['test'], test_csv_path)\n\n\n","repo_name":"DuanXu-97/RelationExtraction","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70679356354","text":"import trifle.anyconfig.globals as G\nimport trifle.anyconfig.mergeabledict as M\nimport trifle.anyconfig.backend.backends as Backends\nimport trifle.anyconfig.backend.json_ as BJ\nimport trifle.anyconfig.parser as P\nimport trifle.anyconfig.utils as U\n\n# pylint: disable=W0611\n# Import some global constants will be re-exported:\nfrom trifle.anyconfig.mergeabledict import MS_REPLACE, MS_NO_REPLACE, \\\n MS_DICTS, MS_DICTS_AND_LISTS, MERGE_STRATEGIES\n# pylint: enable=W0611\n\n# pylint: disable=C0103\n# Re-export:\nlist_types = Backends.list_types\ngetLogger = G.get_logger\n\n# aliases:\ncontainer = M.MergeableDict\nlogging = G.LOGGER\n# pylint: enable=W0611\n\n\ndef set_loglevel(level):\n \"\"\"\n :param level: Log level, e.g. logging.INFO and logging.WARN.\n \"\"\"\n logging.setLevel(level)\n\n\ndef find_loader(config_path, forced_type=None):\n \"\"\"\n :param config_path: Configuration file path\n :param forced_type: Forced configuration parser type\n :return: ConfigParser-inherited class object\n \"\"\"\n if forced_type is not None:\n cparser = Backends.find_by_type(forced_type)\n if not cparser:\n logging.error(\n \"No parser found for given type: \" + forced_type\n )\n return None\n else:\n cparser = Backends.find_by_file(config_path)\n if not cparser:\n logging.error(\n \"No parser found for given file: \" + config_path\n )\n return None\n\n logging.debug(\"Using config parser of type: \" + cparser.type())\n return cparser\n\n\ndef single_load(config_path, forced_type=None, **kwargs):\n \"\"\"\n Load single config file.\n\n :param config_path: Configuration file path\n :param forced_type: Forced configuration parser type\n :param kwargs: Backend specific optional arguments, e.g. {\"indent\": 2} for\n JSON loader/dumper backend\n :return: Dict-like object (instance of\n anyconfig.mergeabledict.MergeableDict by default) supports merge\n operations.\n \"\"\"\n cparser = find_loader(config_path, forced_type)\n if cparser is None:\n return None\n\n logging.info(\"Loading: \" + config_path)\n return cparser.load(config_path, **kwargs)\n\n\ndef multi_load(paths, forced_type=None, merge=MS_DICTS, marker='*', **kwargs):\n \"\"\"\n Load multiple config files.\n\n The first argument `paths` may be a list of config file paths or\n a glob pattern specifying that. That is, if a.yml, b.yml and c.yml are in\n the dir /etc/foo/conf.d/, the followings give same results::\n\n multi_load([\"/etc/foo/conf.d/a.yml\", \"/etc/foo/conf.d/b.yml\",\n \"/etc/foo/conf.d/c.yml\", ])\n\n multi_load(\"/etc/foo/conf.d/*.yml\")\n\n :param paths: List of config file paths or a glob pattern to list paths\n :param forced_type: Forced configuration parser type\n :param merge: Strategy to merge config results of multiple config files\n loaded. see also: anyconfig.mergeabledict.MergeableDict.update()\n :param marker: Globbing markerer to detect paths patterns\n :param kwargs: Backend specific optional arguments, e.g. {\"indent\": 2} for\n JSON loader/dumper backend\n :return: Dict-like object (instance of\n anyconfig.mergeabledict.MergeableDict by default) supports merge\n operations.\n \"\"\"\n assert merge in MERGE_STRATEGIES, \"Invalid merge strategy: \" + merge\n\n if marker in paths:\n paths = U.sglob(paths)\n\n config = container()\n for path in paths:\n if marker in path: # Nested patterns like ['*.yml', '/a/b/c.yml'].\n conf_updates = multi_load(path, forced_type, merge, marker,\n **kwargs)\n else:\n conf_updates = single_load(path, forced_type, **kwargs)\n\n config.update(conf_updates, merge)\n\n return config\n\n\ndef load(path_specs, forced_type=None, merge=MS_DICTS, marker='*', **kwargs):\n \"\"\"\n Load single or multiple config files or multiple config files specified in\n given paths pattern.\n\n :param path_specs:\n Configuration file path or paths or its pattern such as '/a/b/*.json'\n :param forced_type: Forced configuration parser type\n :param merge: Merging strategy to use\n :param marker: Globbing marker to detect paths patterns\n :param kwargs: Backend specific optional arguments, e.g. {\"indent\": 2} for\n JSON loader/dumper backend\n :return: Dict-like object (instance of\n anyconfig.mergeabledict.MergeableDict by default) supports merge\n operations.\n \"\"\"\n if marker in path_specs or U.is_iterable(path_specs):\n return multi_load(path_specs, forced_type, merge, marker, **kwargs)\n else:\n return single_load(path_specs, forced_type, **kwargs)\n\n\ndef loads(config_content, forced_type=None, **kwargs):\n \"\"\"\n :param config_content: Configuration file's content\n :param forced_type: Forced configuration parser type\n :param kwargs: Backend specific optional arguments, e.g. {\"indent\": 2} for\n JSON loader/dumper backend\n :return: Dict-like object (instance of\n anyconfig.mergeabledict.MergeableDict by default) supports merge\n operations.\n \"\"\"\n if forced_type is None:\n return P.parse(config_content)\n\n cparser = find_loader(None, forced_type)\n if cparser is None:\n return P.parse(config_content)\n\n return cparser.loads(config_content, **kwargs)\n\n\ndef _find_dumper(config_path, forced_type=None):\n \"\"\"\n Find configuration parser to dump data.\n\n :param config_path: Output filename\n :param forced_type: Forced configuration parser type\n :return: ConfigParser-inherited class object\n \"\"\"\n cparser = find_loader(config_path, forced_type)\n\n if cparser is None or not getattr(cparser, \"dump\", False):\n logging.warn(\n \"Dump method not implemented. Fallback to JsonConfigParser\"\n )\n cparser = BJ.JsonConfigParser()\n\n return cparser\n\n\ndef dump(data, config_path, forced_type=None, **kwargs):\n \"\"\"\n Save `data` as `config_path`.\n\n :param data: Config data object to dump ::\n anyconfig.mergeabledict.MergeableDict by default\n :param config_path: Output filename\n :param forced_type: Forced configuration parser type\n :param kwargs: Backend specific optional arguments, e.g. {\"indent\": 2} for\n JSON loader/dumper backend\n \"\"\"\n dumper = _find_dumper(config_path, forced_type)\n\n logging.info(\"Dumping: \" + config_path)\n dumper.dump(data, config_path, **kwargs)\n\n\ndef dumps(data, forced_type, **kwargs):\n \"\"\"\n Return string representation of `data` in forced type format.\n\n :param data: Config data object to dump ::\n anyconfig.mergeabledict.MergeableDict by default\n :param forced_type: Forced configuration parser type\n :param kwargs: Backend specific optional arguments, e.g. {\"indent\": 2} for\n JSON loader/dumper backend\n :return: Backend-specific string representation for the given data\n \"\"\"\n return _find_dumper(None, forced_type).dumps(data, **kwargs)\n\n\n# vim:sw=4:ts=4:et:\n","repo_name":"gloaec/trifle","sub_path":"src/trifle/anyconfig/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70882944834","text":"input()\ns = list(map(int, input().split()))\nans = 0\nwhile True:\n for i, v in enumerate(s):\n if v % 2 != 0:\n break\n s[i] = v / 2\n else:\n ans += 1\n continue\n break\nprint(ans)\n","repo_name":"zakuro9715/atcoder","sub_path":"abc081/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18016486474","text":"import numpy as np\n\n\ndef get_face_key_point_info(kpts):\n all_pts = []\n all_P = []\n all_popse = []\n ldmk_bboxes_index = 0\n\n ots = kpts.reshape(-1)\n numBox = len(kpts)\n for i in range(numBox):\n offset = 62 * numBox + i * 219\n pts68 = ots[offset:offset + 204].reshape([3, -1])\n\n ldmk_bboxes_index += 1\n offset += 204\n P = ots[offset:offset + 12].reshape([3, -1])\n offset += 12\n pose = ots[offset:offset + 3]\n all_pts.append(pts68)\n all_P.append(P)\n all_popse.append(pose.tolist())\n\n return all_pts, np.asarray(all_P), all_popse\n","repo_name":"zhangyldanny/APIServer","sub_path":"pycode_jc/modules/models/model_deal/face_key_point.py","file_name":"face_key_point.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70489379716","text":"# Rafał Laskowski\n\"\"\"\n Generalnie mechanizm jest prosty funkcją przypominającą BST scalam plamy w jedno pole i towrzę z tego liniową tablice.\n Następnie dopóki mam opcje w kolejce lub nie starcza mi paliwa na dojsćie do docelowaego pola szukam najlpeszego\n wyniku zliczając przy tym liczbę tankowań.\n O(n*m)\n\"\"\"\n\nfrom zad8testy import runtests\nfrom collections import deque\nfrom heapq import heappush, heappop\n\n\ndef BFS(T, Visited, start_u, start_v):\n suma = 0\n Q = deque()\n Q.append((start_u, start_v))\n while Q:\n u, v = Q.popleft()\n if Visited[u][v]:\n continue\n Visited[u][v] = True\n suma += T[u][v]\n for off in [-1, 1]:\n if len(T[0]) > v + off >= 0 and not Visited[u][v + off] and T[u][v + off] != 0:\n Q.append((u, v + off))\n if 0 <= u + off < len(T) and not Visited[u + off][v] and T[u + off][v] != 0:\n Q.append((u + off, v))\n return suma\n\n\ndef plan(T):\n m = len(T[0])\n n = len(T)\n Visited = [[False for _ in range(m)] for _ in range(n)]\n Linear = [0 for i in range(m)]\n for v in range(m):\n if T[0][v] != 0 and not Visited[0][v]:\n Linear[v] = BFS(T, Visited, 0, v)\n i = 0\n tanked = 0\n Q = [-Linear[0]]\n while Q:\n fuel = -heappop(Q)\n tanked += 1\n if i + fuel >= m - 1:\n break\n else:\n for j in range(i + 1, i + fuel + 1):\n heappush(Q, -Linear[j])\n i += fuel\n else:\n return -1\n return tanked\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(plan, all_tests=True)\n","repo_name":"Deevo87/asd-algorithms","sub_path":"offline_exercises_2022_2023/offline_8/zad8.py","file_name":"zad8.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37877017402","text":"import psutil, re\n\n# Terminate a program \t\tO(1)\ndef terminate(process):\n\ttry:\n\t\tprocess.terminate()\n\t\treturn True\n\texcept psutil.Error as e:\n\t\treturn False\n\n#-------------------------------------------------------------------------------------------------\n\n# Get all programs that have currently running processes \tO(N)\ndef get_programs():\n\tprograms = []\n\tcounter = 0\n\n\tfor process in psutil.process_iter ():\t\t# O(N)\n\t\tif(counter == 0):\n\t\t\tprograms.append(process)\n\t\t\tcounter += 1\n\t\telif(process.exe() == \"\"):\n\t\t\tcontinue\n\t\telif(process.exe() == programs[counter - 1].exe()):\n\t\t\tcontinue\n\t\telse:\n\t\t\tprograms.append(process)\n\t\t\tcounter += 1\n\treturn programs\n\n#-------------------------------------------------------------------------------------------------\n\n# Get file name from file path \t\tO(N)\ndef get_file_name_from_path(path):\n\telements = path.split(\"/\")\t\t# O(N)\n\treturn elements[len(elements) - 1]\n\n#-------------------------------------------------------------------------------------------------\n\n# Get the process run by a specific file given its name\t\tO(N)\ndef get_process_run_by_file_name(file_name):\n\tfor process in psutil.process_iter ():\t\t# O(N)\n\t\tif(re.search(file_name + \"$\", str(process.exe()))):\n\t\t\treturn process\n\treturn False\n\n#-------------------------------------------------------------------------------------------------\n\n# Get the process run by a specific file given its path\tO(N)\ndef get_process_run_by_file(file):\n\tfor process in psutil.process_iter ():\t\t# O(N)\n\t\tif(str(process.exe()) == file):\n\t\t\treturn process\n\treturn False\n","repo_name":"Abdelrhman-CaT/Simple-Anti-Malware-Signature-Based-Detection","sub_path":"scanner/process_management.py","file_name":"process_management.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44268554120","text":"from random import *\nfrom time import *\n\nn = int(input(\"Numbers of perticipate: \"))\nname_lists = []\n\nprint(\"Lists of Perticipant: \")\nfor i in range(n):\n name = input()\n name_lists.append(name)\n\nshuffle(name_lists)\nwinner=choice(name_lists)\nsleep(2)\nprint(\"Winner is: \",winner)\n\n","repo_name":"melias198/OOP-and-Python-Programming","sub_path":"Week 01/Module 03/Simple Project(PyAutoGui-OpenCv)/lottary_system.py","file_name":"lottary_system.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42519656059","text":"from dash.dependencies import Output, Input, State\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport plotly.express as px\nfrom flask import Flask\nimport pandas as pd\nimport dash\n\nimport plotly.express as px\nimport plotly\nimport plotly.graph_objects as go\nimport pandas\n\nimport geopandas as gpd\nimport datetime\nimport json\nimport shapely\nimport numpy as np\n\nserver = Flask(__name__)\napp = dash.Dash(server=server, external_stylesheets=[dbc.themes.FLATLY])\napp.title = 'Dashboard'\n\n# Future proofing if we find easy way to add sattelite image\ndisplay_type = \"open-street-map\"\n\n\n\napp.layout = dbc.Container([ \n\n dbc.Row(dbc.Col(html.H2(\"Houston Flooding Map\"), width={'size': 12, 'offset': 0, 'order': 0}), \n style = {'textAlign': 'center', 'paddingBottom': '1%'}),\n\n dbc.Row(dbc.Col(dcc.Loading(children=[dcc.Graph(id='Houston'),\n dcc.Slider(id='my_slider',min=1, max=12,value=10,\n marks={\n 1: 'Jan',\n 2: 'Feb',\n 3: 'Mar',\n 4: 'Apr',\n 5: 'May',\n 6: 'Jun',\n 7: 'Jul',\n 8: 'Aug',\n 9: 'Sep',\n 10: 'Oct',\n 11: 'Nov',\n 12: 'Dec'})\n ], color = '#000000', type = 'dot', fullscreen=True ) ))\n])\n\n@app.callback(\n Output(component_id='Houston', component_property='figure'),\n Input(component_id='my_slider', component_property='value')\n)\n\ndef update_figure(month):\n print(str(month))\n # Get the shapefile into a geopandas\n shapes = gpd.read_file('tl_2018_48201_roads.shp')\n\n # Get the heatmap data\n heatmap = pandas.read_csv('floodingheatmap12m.csv', sep='|')\n\n for i in heatmap.index:\n data = heatmap.iloc[i]['Create Date']\n data = data.replace('-',' ').replace(':',' ').replace('.',' ')\n data = data.split(' ')\n for num in range(len(data)):\n \n data[num] = int(data[num])\n \n heatmap.at[i,'Create Date'] = datetime.datetime(data[0],data[1],data[2],data[3],data[4],data[5])\n found = False\n indexsWithMonth = []\n heatmap = heatmap.sort_values(by = 'Create Date')\n\n # Get the indexes of the flood events occuring in the selected month.\n for index, row in enumerate(heatmap['Create Date']):\n if month == row.month:\n indexsWithMonth.append(index)\n\n\n # Heatmap data to a GeoDataFrame\n heatmap_gdf = gpd.GeoDataFrame(\n heatmap, geometry=gpd.points_from_xy(heatmap.lon, heatmap.lat))\n\n # Get it to right format\n heatmap_gdf = heatmap_gdf.set_crs('epsg:4326')\n\n # Get another copy of shapes (roads) before messing with it\n before_shapes = shapes.copy(deep=True)\n\n # Create a buffer on shapes.geometry\n shapes.geometry = shapes.buffer(.0002)\n\n # Spatial join the roads gdf and the heatmap gdf \n flooded_roads = gpd.sjoin(shapes,heatmap_gdf.iloc[indexsWithMonth[0]:indexsWithMonth[-1]],'inner',predicate='contains')\n\n # Get all the indexes of roads that \n\n fr_index = list(flooded_roads.index.values)\n \n # Future proofing if we find easy way to add sattelite image\n display_type = \"open-street-map\"\n\n # CREATE FIGURE AS SCATTER MAPBOX OF THE HEATMAP\n # https://plotly.com/python/lines-on-mapbox/\n fig=go.Figure(go.Scattermapbox(\n lat=heatmap.lat,\n lon=heatmap.lon,\n text=heatmap.Location,\n marker=go.scattermapbox.Marker(\n size=10\n ),\n hoverlabel=go.scattermapbox.Hoverlabel(\n bgcolor='darkslateblue',\n bordercolor='lightgrey'\n )))\n\n for i in fr_index: #Change back to list(fr_index)\n feature= before_shapes.iloc[i].geometry\n name = before_shapes.iloc[i].FULLNAME\n #print(f\"{feature} name {name}\")\n # Prints poly gon and name\n lats = []\n lons = []\n names = []\n if isinstance(feature, shapely.geometry.linestring.LineString):\n linestrings = [feature]\n elif isinstance(feature, shapely.geometry.multilinestring.MultiLineString):\n linestrings = feature.geoms\n else:\n continue\n for linestring in linestrings:\n x, y = linestring.xy\n lats = np.append(lats, y)\n lons = np.append(lons, x)\n names = np.append(names, name)\n lats = np.append(lats, None)\n lons = np.append(lons, None)\n names = np.append(names, None)\n # https://plotly.github.io/plotly.py-docs/generated/plotly.graph_objects.Scattermapbox.html\n #print(name)\n \n fig.add_trace(go.Scattermapbox(mode='lines',lat=lats, lon=lons, name=name,line=go.scattermapbox.Line(\n color = 'aqua',\n width = 5)))\n\n fig.update_layout(\n # mapbox_style=display_type,\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0},\n title='Houston Flooding',\n width=800,\n height=600,\n mapbox={\n 'style': 'open-street-map',\n 'center': {'lat': 29.749907, 'lon': -95.358421},\n 'zoom': 10}\n )\n\n return fig\n\nif __name__=='__main__':\n app.run_server(debug=True)\n","repo_name":"apolvm/FloodOfCode","sub_path":"testApp.py","file_name":"testApp.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23436282331","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 12 19:16:55 2014\r\n\r\n@author: Shadow\r\n\"\"\"\r\n\r\ndef war(nao,ken) :\r\n count=0\r\n for x in nao :\r\n prev=0\r\n for y in ken :\r\n if x= 3:\n self.portCount = 1\n else:\n self.portCount = 1\n\n\nclass AutoHacker(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n # noinspection PyAttributeOutsideInit\n def initUI(self):\n self.Slider_EnterDelay = QSlider(Qt.Horizontal, self)\n # noinspection PyUnresolvedReferences\n self.Slider_EnterDelay.valueChanged.connect(self.setEDSliderValue)\n self.Slider_EnterDelay.setMaximum(3000)\n self.Slider_EnterDelay.setMinimum(200)\n self.Slider_EnterDelay.setSingleStep(200)\n self.Slider_EnterDelay.setTickInterval(200)\n self.Slider_EnterDelay.setTickPosition(1)\n self.Slider_InputDelay = QSlider(Qt.Horizontal, self)\n # noinspection PyUnresolvedReferences\n self.Slider_InputDelay.valueChanged.connect(self.setIDSliderValue)\n self.Slider_InputDelay.setMaximum(3000)\n self.Slider_InputDelay.setMinimum(500)\n self.Slider_InputDelay.setSingleStep(250)\n self.Slider_InputDelay.setTickInterval(250)\n self.Slider_InputDelay.setTickPosition(1)\n\n self.CB_AutoHacker = QCheckBox('AutoHacker Enable')\n # noinspection PyUnresolvedReferences\n self.CB_AutoHacker.stateChanged.connect(self.hackThread)\n self.CB_AutoContinue = QCheckBox('AutoContinue')\n\n self.AV_SetBox = QVBoxLayout()\n self.AV_SetBox.addWidget(self.Slider_EnterDelay)\n self.AV_SetBox.addWidget(self.Slider_InputDelay)\n\n box = QVBoxLayout()\n box.addWidget(self.CB_AutoHacker)\n box.addWidget(self.CB_AutoContinue)\n box.addLayout(self.AV_SetBox)\n\n self.setLayout(box)\n\n self.th = HackingThread()\n self.attackThState = False\n self.loginDiag = False\n\n self.setWindowTitle(\"SSerVe's AutoHack\")\n self.setWindowIcon(QIcon('ico.png'))\n self.setGeometry(0, 0, 300, 400)\n self.show()\n\n # noinspection PyAttributeOutsideInit\n def hackThread(self, state):\n if state == Qt.Checked:\n self.attackThState = True\n self.th.start()\n else:\n self.attackThState = False\n\n # noinspection PyCallByClass\n def setInputDiag_login(self):\n text, ok = QInputDialog.getText(self, 'Input Dialog', 'Enter your name:')\n if ok:\n login_nameInput = driver.find_element_by_xpath('//*[@id=\"login-input\"]')\n login_tutorialCheckBox = driver.find_element_by_xpath('//*[@id=\"checkbox-tutorial\"]')\n login_Button = driver.find_element_by_xpath('//*[@id=\"login-play\"]')\n login_nameInput.send_keys(str(text))\n login_tutorialCheckBox.click()\n login_Button.click()\n\n def setIDSliderValue(self, val):\n self.Slider_InputDelay.setValue(val)\n\n def setEDSliderValue(self, val):\n self.Slider_EnterDelay.setValue(val)\n\n\nclass ConfigWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.outputIndex = 0\n self.cfg_len = 1\n self.cfg_list = []\n self.ScriptPath = sys.argv[0]\n self.Path = \"\"\n self.ConfigPath = \"\"\n for index, path in enumerate(self.ScriptPath.split(\"\\\\\")):\n if index == len(self.ScriptPath.split(\"\\\\\")) - 1:\n continue\n else:\n self.Path += (path + \"/\")\n self.ConfigPath = self.Path + \"Config/\"\n self.initUI()\n\n # noinspection PyUnresolvedReferences,PyAttributeOutsideInit\n def initUI(self):\n self.ConfigComboBox = QComboBox(self)\n self.updateConfigList()\n self.ConfigComboBox.activated[int].connect(self.changeInternalConfigVar)\n self.ConfigNum = 0\n self.nowApplied = 0\n\n self.loadButton = QPushButton(\"Loa&d\", self) # Load, Shortcut Alt + D\n self.saveButton = QPushButton(\"&Save\", self) # Save, Shortcut Alt + S\n self.createButton = QPushButton(\"&New Config\", self) # Create new config, Shortcut Alt + N\n self.refreshButton = QPushButton(\"&Refresh list\", self) # Refresh config list, Shortcut Alt + R\n self.loadButton.released.connect(self.load_apply_setting)\n self.saveButton.released.connect(self.newsaveJson)\n self.createButton.released.connect(self.createJsonWithoutsave)\n self.refreshButton.released.connect(self.updateConfigList)\n\n self.box = QVBoxLayout()\n\n self.HBoxAB = QHBoxLayout()\n self.HBoxAB.addWidget(self.ConfigComboBox)\n self.HBoxAB.addWidget(self.createButton)\n\n self.HBoxDN = QHBoxLayout()\n self.HBoxDN.addWidget(self.saveButton)\n self.HBoxDN.addWidget(self.loadButton)\n\n self.HBoxFN = QHBoxLayout()\n self.HBoxFN.addStretch(0)\n self.HBoxFN.addWidget(self.refreshButton)\n\n self.box.addLayout(self.HBoxAB)\n self.box.addLayout(self.HBoxDN)\n\n self.setLayout(self.box)\n\n self.setWindowTitle(\"Configs\")\n self.setGeometry(200, 200, 300, 200)\n self.show()\n\n # noinspection PyAttributeOutsideInit\n def changeInternalConfigVar(self, num):\n self.ConfigNum = num\n\n # noinspection PyAttributeOutsideInit\n def load_apply_setting(self):\n self.nowApplied = 0\n print(\"Config Applying...\")\n for _set in self.cfg_list[self.ConfigNum]:\n pt = self.cfg_list[self.ConfigNum][_set]\n if _set == 'AutoHacker':\n if pt[\"AutoHackerEnable\"]: # AutoHacker Enable\n print(\"AutoHackEnable: True\")\n if not ah_win.CB_AutoHacker.isChecked():\n print(\"AutoHack enabled.\")\n ah_win.CB_AutoHacker.toggle()\n elif ah_win.CB_AutoHacker.isChecked():\n print(\"AutoHack already enabled.\")\n elif not pt[\"AutoHackerEnable\"]: # AutoHacker Disable\n print(\"AutoHackerEnable: False\")\n if ah_win.CB_AutoHacker.isChecked():\n print(\"AutoHack disabled.\")\n ah_win.CB_AutoHacker.toggle()\n elif not ah_win.CB_AutoHacker.isChecked():\n print(\"AutoHack already disabled.\")\n if pt['AutoPortEnable']: # AutoPort Enable\n print(\"AutoPortEnable: True\")\n if not ah_win.CB_AutoContinue.isChecked():\n print(\"AutoPort enabled.\")\n ah_win.CB_AutoContinue.toggle()\n elif ah_win.CB_AutoContinue.isChecked():\n print(\"AutoPort already enabled.\")\n elif not pt['AutoPortEnable']: # AutoPort Disable\n print(\"AutoPortEnable: False\")\n if ah_win.CB_AutoContinue.isChecked():\n print(\"AutoPort disabled.\")\n ah_win.CB_AutoContinue.toggle()\n elif not ah_win.CB_AutoContinue.isChecked():\n print(\"AutoPort already disabled.\")\n ah_win.Slider_EnterDelay.setValue(int(pt[\"AutoHackerEnterDelay\"] * 1000))\n print(\"EnterDelay set to {}s\".format(pt[\"AutoHackerEnterDelay\"]))\n ah_win.Slider_InputDelay.setValue(int(pt[\"AutoHackerInputDelay\"] * 1000))\n print(\"InputDelay set to {}s\".format(pt[\"AutoHackerInputDelay\"]))\n\n def newsaveJson(self):\n AllItems = [self.ConfigComboBox.itemText(i) for i in range(self.ConfigComboBox.count())]\n llsaveJson = deepcopy(cfg_init)\n # CFG Name Setting\n text, ok = QInputDialog.getText(self, 'SAVE SETTING', 'Enter CFG name:')\n llsaveJson[\"Name\"] = text\n # Save current setting\n AH_Dict = llsaveJson[\"AutoHacker\"]\n AH_Dict[\"AutoHackerEnable\"] = ah_win.CB_AutoHacker.isChecked()\n AH_Dict[\"AutoPortEnable\"] = ah_win.CB_AutoContinue.isChecked()\n AH_Dict[\"AutoHackerEnterDelay\"] = ah_win.Slider_EnterDelay.value() / 1000\n AH_Dict[\"AutoHackerInputDelay\"] = ah_win.Slider_InputDelay.value() / 1000\n if ok:\n with open(\"{}Config_{}.cfg\".format(self.ConfigPath, str(len(AllItems)+int(1))), \"w\") as js:\n json.dump(llsaveJson, js, indent=4)\n print(\"JSON Created.\")\n del AllItems, llsaveJson\n\n def createJsonWithoutsave(self):\n AllItems = [self.ConfigComboBox.itemText(i) for i in range(self.ConfigComboBox.count())]\n with open(\"{}Config_{}.cfg\".format(self.ConfigPath, str(len(AllItems) + 1)), \"w\") as js:\n json.dump(cfg_init, js, indent=4)\n self.outputIndex += 1\n print(\"JSON Created\")\n del AllItems\n\n def updateConfigList(self):\n print(\"ConfigList Updating...\")\n self.cfg_list = []\n self.cfg_len = 1\n AllItems = [self.ConfigComboBox.itemText(i) for i in range(self.ConfigComboBox.count())]\n for i in range(len(AllItems)-1, 0-1, -1):\n self.ConfigComboBox.removeItem(i)\n\n while True:\n if os.path.exists(\"{}Config_{}.cfg\".format(self.ConfigPath, self.cfg_len)):\n print(\"Searching {}Config Files\".format(self.ConfigPath))\n if self.cfg_len == 1:\n print(\"Config 1st file exist.\")\n elif self.cfg_len == 2:\n print(\"Config 2nd file exist.\")\n elif self.cfg_len == 3:\n print(\"Config 3rd file exist.\")\n else:\n print(\"Config {}th file exist.\".format(self.cfg_len))\n self.cfg_len += 1\n else:\n break\n try:\n if self.cfg_len == 0:\n self.createJsonWithoutsave()\n for cfg_num in range(1, self.cfg_len + 1):\n configPath = \"{}Config_{}.cfg\".format(self.ConfigPath, cfg_num)\n with open(configPath, \"r\", encoding=\"UTF-8\") as file:\n print(str(file))\n self.cfg_list.append(json.load(file))\n except OSError:\n print(\"OSError, Warning\")\n for index, cfg in enumerate(self.cfg_list):\n print(str(cfg))\n self.ConfigComboBox.addItem(\"Config {}: {}\".format(index, cfg['Name']))\n print(str([self.ConfigComboBox.itemText(i) for i in range(self.ConfigComboBox.count())]))\n\n\ndriver = webdriver.Chrome(\"chromedriver\")\ndriver.get(\"http://s0urce.io\")\n\ncfg_init = {\n \"Name\": \"\",\n \"AutoHacker\": {\n \"AutoHackerEnable\": False,\n \"AutoHackerEnterDelay\": 0.2,\n \"AutoHackerInputDelay\": 0.5,\n \"AutoPortEnable\": False\n }\n}\n\napp = QApplication(sys.argv)\n\nconfig_win = ConfigWindow()\nwith open(\"{}words.json\".format(config_win.Path), \"r\") as djson:\n word_dict = json.load(djson)\nah_win = AutoHacker()\n\nsys.exit(app.exec_())\n","repo_name":"sserve-kr/Old-Typesense","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":14399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24354710077","text":"import numpy as np\nimport glob\nfrom scipy.spatial.distance import cdist\n\n\ndef FindClosest(T,X):\n return np.argmin((T-X[:,0])**2)\n\ndef Compute_LogDerivative(X):\n T0, T1 = 0.1*X[-1,0], X[-1,0]\n X0, X1 = np.log(X[FindClosest(T0,X),1:]), np.log(X[-1,1:])\n dX = abs(X1-X0)/(np.log(T1)-np.log(T0))\n return np.max(dX)\n\ndef ComputeAttractor(Folder,MinNumTrajectory,DerTol=1e-2,ConvTol=1e-4,Tmax=1e8,ShowCriteriaValues=False):\n\n files = glob.glob(Folder+'/conc*.dat')\n if len(files) < MinNumTrajectory:\n return False, []\n \n #Load the data\n FinalConc = []\n for f in files:\n try:\n X = np.loadtxt(f)\n except ValueError: #If the complex number is in the file (note that MATLAB uses \"j\" for the imaginary unit)) \n pass\n else:\n #Check if the final concentration is converged using the derivative\n dXdt = Compute_LogDerivative(X)\n if ShowCriteriaValues:\n print(dXdt)\n if X[-1,0] > Tmax*0.9 and np.all(X[:,1:] > -1e-12) and dXdt < DerTol:\n FinalConc.append(X[-1,1:])\n\n #Check if the length of the final concentration is larger than the minimum number of trajectories \n if len(FinalConc) < MinNumTrajectory:\n return False, []\n\n #Check if the final concentration are the same among the initial points\n Y = np.log(FinalConc)\n MaxDist = np.max(cdist(Y,Y))\n if ShowCriteriaValues:\n print(MaxDist)\n if MaxDist < ConvTol:\n return True, np.average(FinalConc,axis=0)\n else:\n return False, []\n","repo_name":"yhimeoka/Perturbation-Response-Analysis","sub_path":"CommonModule/ComputeAttractor.py","file_name":"ComputeAttractor.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31966527825","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# from .dependencies import softimport\nfrom .GetEvtType import getevttype\nfrom .utilities import *\nfrom .MoveJobs import Move, EosMove\nfrom .dependencies import softimport\nimport os\nimport subprocess\nimport time\n\n\ndef IsSlurm():\n ### Slurm\n try:\n P = subprocess.Popen([\"squeue\"], stdout=subprocess.PIPE)\n _, _ = P.communicate()\n except OSError:\n return False\n else:\n return True\n\n\ndef IsLSF():\n ### LSF\n try:\n P = subprocess.Popen([\"bjobs\"], stdout=subprocess.PIPE)\n _, _ = P.communicate()\n except OSError:\n return False\n else:\n return True\n\n\ndef IsHTCondor():\n ### HTCondor\n\n command = [\"which condor_q\"]\n\n if sys.version_info[0] > 2:\n process = subprocess.Popen(\n command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf8\",\n )\n else:\n process = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n time.sleep(0.03)\n out, _ = process.communicate()\n\n if \"condor_q\" in out:\n return True\n else:\n return False\n\n\nif IsSlurm():\n from .SlurmUtils import DeliveryClerk\n\nelif IsHTCondor():\n from .HTCondorUtils import DeliveryClerk, Scheduler\n\nelif IsLSF():\n from .LSFUtils import DeliveryClerk\n","repo_name":"marinang/SimProd","sub_path":"simprod/simjob/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"37109762151","text":"#######################################################\n#\tDemo PyQt5 GUI created by Samarth Jugran\n#\t\n#\n#\trequirements- \n#\t\t\t\tPyQt5 - version 5.9.2\n#\t\t\t\tmatplotlib - version 3.0.2\n#\n#\n#######################################################\n\n\n\n#!/usr/bin/env python3\nimport random\n\nimport matplotlib.pyplot as plt\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QTabWidget, QHBoxLayout,\n QTableWidget, QVBoxLayout, QLabel, QComboBox,\n QLineEdit, QGridLayout, QSlider, QDial, QPushButton,\n QRadioButton)\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.mainLayout = QVBoxLayout()\n\n self.setGeometry(400, 400, 800, 600)\n\n self.setup_gui()\n self.setWindowTitle(\"Demo PyQt GUI\")\n self.show()\n\n def setup_gui(self):\n self.setup_topbar()\n self.setup_tabs()\n\n self.setLayout(self.mainLayout)\n\n def setup_topbar(self):\n top_bar = QHBoxLayout()\n\n drop_down_label = QLabel('Drop Down')\n\n drop_down = QComboBox()\n drop_down_list = ['option ' + str(i) for i in range(10)]\n drop_down.addItems(drop_down_list)\n\n top_bar.addWidget(drop_down_label)\n top_bar.addWidget(drop_down)\n top_bar.addStretch()\n\n self.mainLayout.addLayout(top_bar)\n\n def setup_tabs(self):\n tab_widget = QTabWidget()\n\n tab1 = QWidget()\n table = QTableWidget(10, 10)\n tab1_box = QHBoxLayout()\n tab1_box.addWidget(table)\n tab1.setLayout(tab1_box)\n\n tab2 = QWidget()\n tab2_box = QVBoxLayout()\n\n tab2_grid = QGridLayout()\n tab2_grid.setSpacing(10)\n\n line_label = QLabel('Text Field')\n line_edit = QLineEdit()\n\n slider_label = QLabel('Slider Value: 50')\n slider = QSlider(Qt.Horizontal)\n slider.setValue(50)\n slider.valueChanged[int].connect(lambda: self.change_value('Slider', slider.value(), slider_label))\n\n dial_label = QLabel('Dial Value: 50')\n dial = QDial()\n dial.setValue(50)\n dial.valueChanged[int].connect(lambda: self.change_value('Dial', dial.value(), dial_label))\n\n radio_button1 = QRadioButton('Radio Button 1')\n radio_button2 = QRadioButton('Radio Button 2')\n radio_button3 = QRadioButton('Radio Button 3')\n\n button_label = QLabel('None Selected')\n\n radio_button1.clicked.connect(lambda: self.radio_button(radio_button1, button_label))\n radio_button2.clicked.connect(lambda: self.radio_button(radio_button2, button_label))\n radio_button3.clicked.connect(lambda: self.radio_button(radio_button3, button_label))\n\n tab2_grid.addWidget(line_label, 1, 1, 1, 1)\n tab2_grid.addWidget(line_edit, 1, 2, 1, 3)\n tab2_grid.addWidget(slider_label, 2, 1, 1, 1)\n tab2_grid.addWidget(slider, 2, 2, 1, 2)\n tab2_grid.addWidget(dial_label, 3, 1, 1, 1)\n tab2_grid.addWidget(dial, 3, 2, 2, 2)\n tab2_grid.addWidget(button_label, 5, 1, 1, 1)\n tab2_grid.addWidget(radio_button1, 6, 2, 1, 1)\n tab2_grid.addWidget(radio_button2, 7, 2, 1, 1)\n tab2_grid.addWidget(radio_button3, 8, 2, 1, 1)\n\n tab2_box.addLayout(tab2_grid)\n tab2_box.addStretch()\n tab2.setLayout(tab2_box)\n\n tab3 = QWidget()\n tab3_box = QVBoxLayout()\n\n figure, axes = plt.subplots()\n self.canvas = FigureCanvas(figure)\n\n random_button = QPushButton('Random')\n random_button.clicked.connect(lambda: self.set_graph_values(figure, axes))\n\n tab3_box.addWidget(random_button)\n tab3_box.addWidget(self.canvas)\n\n tab3.setLayout(tab3_box)\n\n tab_widget.addTab(tab1, 'tab 1')\n tab_widget.addTab(tab2, 'tab 2')\n tab_widget.addTab(tab3, 'tab 3')\n self.mainLayout.addWidget(tab_widget)\n\n def change_value(self, name, value, label):\n new_value = name + ' Value: ' + str(value)\n label.setText(new_value)\n\n def set_graph_values(self, fig, ax):\n plt.cla()\n data = [random.random() for i in range(25)]\n ax.plot(data)\n ax.set_title('Random Plot')\n self.canvas.draw()\n\n def radio_button(self, button, label):\n text = 'Selected: ' + button.text()\n label.setText(text)\n\n\nif __name__ == '__main__':\n app = QApplication([])\n win = Window()\n app.exec_()\n","repo_name":"Jugran/test-projects","sub_path":"pyqt/Pyqt-gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34365749645","text":"import pytest\nimport math\nfrom timeit import timeit\nimport time\n\nfrom astropy import units as u\n\nfrom pocs.filterwheel.simulator import FilterWheel as SimFilterWheel\nfrom pocs.camera.simulator import Camera as SimCamera\nfrom pocs.utils import error\n\n\n@pytest.fixture(scope='module')\ndef filterwheel():\n sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],\n move_time=0.1 * u.second,\n timeout=0.5 * u.second)\n return sim_filterwheel\n\n# intialisation\n\n\ndef test_init(filterwheel):\n assert isinstance(filterwheel, SimFilterWheel)\n assert filterwheel.is_connected\n\n\ndef test_camera_init():\n sim_camera = SimCamera(filterwheel={'model': 'simulator',\n 'filter_names': ['one', 'deux', 'drei', 'quattro']})\n assert isinstance(sim_camera.filterwheel, SimFilterWheel)\n assert sim_camera.filterwheel.is_connected\n assert sim_camera.filterwheel.uid\n assert sim_camera.filterwheel.camera is sim_camera\n\n\ndef test_camera_no_filterwheel():\n sim_camera = SimCamera()\n assert sim_camera.filterwheel is None\n\n\ndef test_camera_association_on_init():\n sim_camera = SimCamera()\n sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],\n camera=sim_camera)\n assert sim_filterwheel.camera is sim_camera\n\n\ndef test_with_no_name():\n with pytest.raises(ValueError):\n SimFilterWheel()\n\n# Basic property getting and (not) setting\n\n\ndef test_model(filterwheel):\n model = filterwheel.model\n assert model == 'simulator'\n with pytest.raises(AttributeError):\n filterwheel.model = \"Airfix\"\n\n\ndef test_name(filterwheel):\n name = filterwheel.name\n assert name == 'Simulated Filter Wheel'\n with pytest.raises(AttributeError):\n filterwheel.name = \"Phillip\"\n\n\ndef test_uid(filterwheel):\n uid = filterwheel.uid\n assert uid.startswith('SW')\n assert len(uid) == 6\n with pytest.raises(AttributeError):\n filterwheel.uid = \"Can't touch this\"\n\n\ndef test_filter_names(filterwheel):\n names = filterwheel.filter_names\n assert isinstance(names, list)\n for name in names:\n assert isinstance(name, str)\n with pytest.raises(AttributeError):\n filterwheel.filter_names = [\"Unsharp mask\", \"Gaussian blur\"]\n\n# Movement\n\n\ndef test_move_number(filterwheel):\n assert filterwheel.position == 1\n e = filterwheel.move_to(2)\n assert math.isnan(filterwheel.position) # position is NaN while between filters\n e.wait()\n assert filterwheel.position == 2\n e = filterwheel.move_to(3, blocking=True)\n assert e.is_set()\n assert filterwheel.position == 3\n filterwheel.position = 4 # Move by assignment to position property blocks until complete\n assert filterwheel.position == 4\n\n\ndef test_move_bad_number(filterwheel):\n with pytest.raises(ValueError):\n filterwheel.move_to(0, blocking=True) # No zero based numbering here!\n with pytest.raises(ValueError):\n filterwheel.move_to(-1, blocking=True) # Definitely not\n with pytest.raises(ValueError):\n filterwheel.position = 99 # Problems.\n with pytest.raises(ValueError):\n filterwheel.move_to(filterwheel._n_positions + 1, blocking=True) # Close, but...\n filterwheel.move_to(filterwheel._n_positions, blocking=True) # OK\n\n\ndef test_move_name(filterwheel, caplog):\n filterwheel.position = 1 # Start from a known position\n e = filterwheel.move_to('quattro')\n assert filterwheel.current_filter == 'UNKNOWN' # I'm between filters right now\n e.wait()\n assert filterwheel.current_filter == 'quattro'\n e = filterwheel.move_to('o', blocking=True) # Matches leading substrings too\n assert filterwheel.current_filter == 'one'\n filterwheel.position = 'd' # In case of multiple matches logs a warning & uses the first match\n assert filterwheel.current_filter == 'deux'\n # WARNING followed by INFO level record about the move\n assert caplog.records[-2].levelname == 'WARNING'\n assert caplog.records[-1].levelname == 'INFO'\n filterwheel.position = 'deux' # Check null move. Earlier version of simulator failed this!\n assert filterwheel.current_filter == 'deux'\n\n\ndef test_move_bad_name(filterwheel):\n with pytest.raises(ValueError):\n filterwheel.move_to('cinco')\n\n\ndef test_move_timeout(caplog):\n slow_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],\n move_time=0.1,\n timeout=0.2)\n slow_filterwheel.position = 4 # Move should take 0.3 seconds, more than timeout.\n time.sleep(0.001) # For some reason takes a moment for the error to get logged.\n\n # Collect the logs\n levels = [rec.levelname for rec in caplog.records]\n assert 'ERROR' in levels # Should have logged an ERROR by now\n # It raises a pocs.utils.error.Timeout exception too, but because it's in another Thread it\n # doesn't get passes up to the calling code.\n\n\n@pytest.mark.parametrize(\"name,bidirectional, expected\",\n [(\"monodirectional\", False, 0.3),\n (\"bidirectional\", True, 0.1)])\ndef test_move_times(name, bidirectional, expected):\n sim_filterwheel = SimFilterWheel(filter_names=['one', 'deux', 'drei', 'quattro'],\n move_time=0.1 * u.second,\n move_bidirectional=bidirectional,\n timeout=0.5 * u.second)\n sim_filterwheel.position = 1\n assert timeit(\"sim_filterwheel.position = 2\", number=1, globals=locals()) == \\\n pytest.approx(0.1, rel=4e-2)\n assert timeit(\"sim_filterwheel.position = 4\", number=1, globals=locals()) == \\\n pytest.approx(0.2, rel=4e-2)\n assert timeit(\"sim_filterwheel.position = 3\", number=1, globals=locals()) == \\\n pytest.approx(expected, rel=4e-2)\n\n\ndef test_move_exposing(tmpdir, caplog):\n sim_camera = SimCamera(filterwheel={'model': 'simulator',\n 'filter_names': ['one', 'deux', 'drei', 'quattro']})\n fits_path = str(tmpdir.join('test_exposure.fits'))\n exp_event = sim_camera.take_exposure(filename=fits_path, seconds=0.1)\n with pytest.raises(error.PanError):\n sim_camera.filterwheel.move_to(2, blocking=True) # Attempt to move while camera is exposing\n assert caplog.records[-1].levelname == 'ERROR'\n assert sim_camera.filterwheel.position == 1 # Should not have moved\n exp_event.wait()\n\n\ndef test_is_moving(filterwheel):\n filterwheel.position = 1\n assert not filterwheel.is_moving\n assert filterwheel.is_ready\n e = filterwheel.move_to(2)\n assert filterwheel.is_moving\n assert not filterwheel.is_ready\n e.wait()\n assert not filterwheel.is_moving\n assert filterwheel.is_ready\n","repo_name":"asclepiusaka/POCS","sub_path":"pocs/tests/test_filterwheel.py","file_name":"test_filterwheel.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"13701126539","text":"\"\"\"Handler for editting uncanny legends\"\"\"\nfrom typing import Any\n\nfrom . import event_stages\nfrom ... import user_input_handler\n\ndef edit_uncanny(save_stats: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Handler for editting uncanny legends\"\"\"\n stage_data = save_stats[\"uncanny\"]\n lengths = stage_data[\"Lengths\"]\n\n ids = []\n ids = user_input_handler.get_range(\n user_input_handler.colored_input(\n \"Enter stage ids (e.g &1& = a new legend, &2& = here be dragons)(You can enter &all& to get all, a range e.g &1&-&49&, or ids separate by spaces e.g &5 4 7&):\"\n ),\n lengths[\"total\"],\n )\n save_stats[\"uncanny\"] = event_stages.stage_handler(stage_data, ids, -1)\n\n return save_stats\n\ndef is_ancient_curse_clear(save_stats: dict[str, Any]) -> bool:\n \"\"\"\n Check if the ancient curse is cleared\n\n Args:\n save_stats (dict[str, Any]): The save stats\n\n Returns:\n bool: If the ancient curse is cleared\n \"\"\"\n return save_stats[\"uncanny\"][\"Value\"][\"clear_progress\"][0][0] >= 1\n","repo_name":"fieryhenry/BCSFE-Python","sub_path":"src/BCSFE_Python/edits/levels/uncanny.py","file_name":"uncanny.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"61"} +{"seq_id":"2984494661","text":"from config import board_size\n\n\nclass TicTacToeGameLogic:\n def __init__(self):\n self.step = 1\n self.board = [[0 for x in range(board_size[0])] for y in range(board_size[1])]\n\n def get_step(self):\n \"\"\"\n return step O or X\n \"\"\"\n return self.step % 2 + 1\n\n def get_board(self):\n \"\"\"\n return board inline string\n \"\"\"\n return str(self.get_step()) + \" \" + \"\".join(map(lambda x: \"\".join(map(str, x)), self.board))\n\n def make_step(self, player, x, y):\n \"\"\"\n check can make step or not\n :return True or False\n \"\"\"\n if player == self.get_step() and not self.board[y][x]:\n self.board[y][x] = player\n self.step += 1\n return True\n return False\n\n def set_board(self, board):\n \"\"\"\n set board for tests\n \"\"\"\n self.board = board\n\n def check_win(self):\n \"\"\"\n 0 - if no winner\n 2 - if O winner\n 1 - if X winner\n \"\"\"\n # check horizontal\n # check vertical\n # check main diagonal\n # check additional diagonal\n if (([1, 1, 1] in self.board or\n [1, 1, 1] in [[self.board[x][y] for x in range(board_size[0])] for y in range(board_size[1])] or\n [1, 1, 1] == [self.board[0][0], self.board[1][1], self.board[2][2]] or\n [1, 1, 1] == [self.board[2][0], self.board[1][1], self.board[0][2]])):\n return 1\n elif ([2, 2, 2] in self.board or\n [2, 2, 2] in [[self.board[x][y] for x in range(board_size[0])] for y in range(board_size[1])] or\n [2, 2, 2] == [self.board[0][0], self.board[1][1], self.board[2][2]] or\n [2, 2, 2] == [self.board[2][0], self.board[1][1], self.board[0][2]]):\n return 2\n return 0\n\n\nif __name__ == '__main__':\n # --- tests ---\n game_logic = TicTacToeGameLogic()\n\n # --- test make_step() ---\n board = [[1, 1, 1],\n [0, 2, 0],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.make_step(2, 0, 0) == False, \"test 1: make step at 0 0\"\n assert game_logic.make_step(2, 2, 0) == False, \"test 2: make step at 2 0\"\n assert game_logic.make_step(2, 1, 1) == False, \"test 3: make step at 1 1\"\n assert game_logic.make_step(2, 2, 1) == True, \"test 4: make step at 2 1\"\n\n # --- test get_board() ---\n board = [[1, 1, 1],\n [0, 0, 0],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.get_board() == \"111000000\", \"test 1: need 111000000\"\n\n board = [[1, 1, 1],\n [0, 1, 0],\n [0, 0, 1]]\n game_logic.set_board(board)\n assert game_logic.get_board() == \"111010001\", \"test 2: need 111010001\"\n\n board = [[1, 1, 1],\n [2, 1, 0],\n [0, 2, 1]]\n game_logic.set_board(board)\n assert game_logic.get_board() == \"111210021\", \"test 3: need 111210021\"\n\n # --- test check_win() ---\n board = [[1, 1, 1],\n [0, 0, 0],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 1, \"test 1: row 1 win 1\"\n\n board = [[2, 2, 2],\n [0, 0, 0],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 2, \"test 2: row 1 win 2\"\n\n board = [[0, 0, 0],\n [0, 0, 0],\n [1, 1, 1],]\n game_logic.set_board(board)\n assert game_logic.check_win() == 1, \"test 3: row 3 win 1\"\n\n board = [[0, 0, 0],\n [2, 2, 2],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 2, \"test 4: row 2 win 2\"\n\n board = [[1, 0, 1],\n [0, 0, 0],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 0, \"test 5: no row 1 win 0\"\n\n board = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 0, \"test 6: empty win 0\"\n\n board = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 1, \"test 7: main diagonal win 1\"\n\n board = [[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 1, \"test 8: additional diagonal win 1\"\n\n board = [[0, 0, 2],\n [0, 2, 0],\n [2, 0, 0]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 2, \"test 9: additional diagonal win 2\"\n\n board = [[1, 1, 2],\n [1, 2, 1],\n [2, 1, 1]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 2, \"test 10: additional diagonal win 2\"\n\n board = [[1, 1, 2],\n [1, 2, 1],\n [0, 1, 1]]\n game_logic.set_board(board)\n assert game_logic.check_win() == 0, \"test 11: fight win 0\"\n\n print(\"All tests passed\")\n","repo_name":"N1k0lay78/one_thread_server","sub_path":"TicTacToeGameLogic.py","file_name":"TicTacToeGameLogic.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15860760759","text":"import bpy\nfrom bpy.types import Panel\nfrom bpy.app.translations import contexts as i18n_contexts\nfrom rna_prop_ui import PropertyPanel\nfrom bpy_extras.node_utils import find_node_input\n\n\nclass WorldButtonsPanel:\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"world\"\n # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here\n\n @classmethod\n def poll(cls, context):\n return (context.world and context.engine in cls.COMPAT_ENGINES)\n\n\nclass WORLD_PT_context_world(WorldButtonsPanel, Panel):\n bl_label = \"\"\n bl_options = {'HIDE_HEADER'}\n COMPAT_ENGINES = {\n 'BLENDER_RENDER',\n 'BLENDER_EEVEE',\n 'BLENDER_EEVEE_NEXT',\n 'BLENDER_WORKBENCH',\n }\n\n @classmethod\n def poll(cls, context):\n return (context.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n\n scene = context.scene\n world = context.world\n space = context.space_data\n\n if scene:\n layout.template_ID(scene, \"world\", new=\"world.new\")\n elif world:\n layout.template_ID(space, \"pin_id\")\n\n\nclass EEVEE_WORLD_PT_mist(WorldButtonsPanel, Panel):\n bl_label = \"Mist Pass\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'}\n\n @classmethod\n def poll(cls, context):\n engine = context.engine\n return context.world and (engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n\n world = context.world\n\n col = layout.column(align=True)\n col.prop(world.mist_settings, \"start\")\n col.prop(world.mist_settings, \"depth\")\n\n col = layout.column()\n col.prop(world.mist_settings, \"falloff\")\n\n\nclass WORLD_PT_custom_props(WorldButtonsPanel, PropertyPanel, Panel):\n COMPAT_ENGINES = {\n 'BLENDER_RENDER',\n 'BLENDER_EEVEE',\n 'BLENDER_EEVEE_NEXT',\n 'BLENDER_WORKBENCH',\n }\n _context_path = \"world\"\n _property_type = bpy.types.World\n\n\nclass EEVEE_WORLD_PT_surface(WorldButtonsPanel, Panel):\n bl_label = \"Surface\"\n COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'}\n\n @classmethod\n def poll(cls, context):\n engine = context.engine\n return context.world and (engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n\n world = context.world\n\n layout.prop(world, \"use_nodes\", icon='NODETREE')\n layout.separator()\n\n layout.use_property_split = True\n\n if world.use_nodes:\n ntree = world.node_tree\n node = ntree.get_output_node('EEVEE')\n\n if node:\n input = find_node_input(node, \"Surface\")\n if input:\n layout.template_node_view(ntree, node, input)\n else:\n layout.label(text=\"Incompatible output node\")\n else:\n layout.label(text=\"No output node\")\n else:\n layout.prop(world, \"color\")\n\n\nclass EEVEE_WORLD_PT_volume(WorldButtonsPanel, Panel):\n bl_label = \"Volume\"\n bl_translation_context = i18n_contexts.id_id\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_EEVEE'}\n\n @classmethod\n def poll(cls, context):\n engine = context.engine\n world = context.world\n return world and world.use_nodes and (engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n\n world = context.world\n ntree = world.node_tree\n node = ntree.get_output_node('EEVEE')\n\n layout.use_property_split = True\n\n if node:\n input = find_node_input(node, \"Volume\")\n if input:\n layout.template_node_view(ntree, node, input)\n else:\n layout.label(text=\"Incompatible output node\")\n else:\n layout.label(text=\"No output node\")\n\n\nclass EEVEE_WORLD_PT_probe(WorldButtonsPanel, Panel):\n bl_label = \"Light Probe\"\n bl_translation_context = i18n_contexts.id_id\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_EEVEE_NEXT'}\n\n @classmethod\n def poll(cls, context):\n engine = context.engine\n world = context.world\n return world and (engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n\n world = context.world\n\n layout.use_property_split = True\n layout.prop(world, \"probe_resolution\")\n\n\nclass WORLD_PT_viewport_display(WorldButtonsPanel, Panel):\n bl_label = \"Viewport Display\"\n bl_options = {'DEFAULT_CLOSED'}\n bl_order = 10\n\n @classmethod\n def poll(cls, context):\n return context.world\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n world = context.world\n layout.prop(world, \"color\")\n\n\nclasses = (\n WORLD_PT_context_world,\n EEVEE_WORLD_PT_surface,\n EEVEE_WORLD_PT_volume,\n EEVEE_WORLD_PT_mist,\n EEVEE_WORLD_PT_probe,\n WORLD_PT_viewport_display,\n WORLD_PT_custom_props,\n)\n\nif __name__ == \"__main__\": # only for live edit.\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n","repo_name":"blender/blender","sub_path":"scripts/startup/bl_ui/properties_world.py","file_name":"properties_world.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"12488762433","text":"import requests\nimport csv,io\nimport json\n\n\ndef getKind(data):\n #print(data)\n if 'shops,squares,malls' in data:\n val= 'Shops,Squares,Malls'\n elif 'shops,malls,tourist_facilities' in data:\n val= 'Shops,Malls,Tourist facilities'\n elif 'bridges,architecture,interesting_places,other_bridges' in data:\n val= 'Bridges,Architecture'\n elif 'towers,architecture,interesting_places,other_towers'in data:\n val= 'Towers,Architecture'\n elif 'museums'in data:\n val= 'Museums,Cultural'\n\n elif 'skyscrapers,architecture,interesting_places'in data:\n val= 'Skyscrapers,Architecture'\n\n elif 'other_temples'in data:\n val= 'Religion'\n elif 'buddhist_temples' in data:\n val= 'Religion,Buddhist Temple'\n elif 'churches'in data:\n val= 'Religion,Churches'\n elif 'mosques'in data:\n val= 'Religion,Mosque'\n\n elif 'hindu_temples'in data:\n val= 'Religion, Hindu Temple'\n elif 'other,unclassified_objects,interesting_places,tourist_object'in data:\n val='Other,Tourist'\n\n\n elif 'lighthouses,architecture,interesting_places'in data:\n val='Lighthouses'\n\n\n elif 'view_points,other,interesting_places'in data:\n val= 'Other'\n\n elif 'cinemas'in data:\n val='Cinemas'\n\n\n elif 'hotels'in data:\n val='Hotel'\n\n\n elif 'banks'in data:\n val='Banks'\n\n\n elif 'zoos'in data:\n val='Zoo'\n\n elif 'historic,monuments_and_memorials'in data:\n val='Historic,Monuments and Memorials'\n\n\n elif 'other_theatres'in data:\n val='Theatres'\n\n\n elif 'gardens_and_parks'in data:\n val= 'Gardens and Parks'\n\n else:\n val= 'Interesting places'\n\n return val\n\ndef getAttraction(opentripmap_api_key,radius,limit,offset,city_lon,city_lat):\n coordinates=[]\n location_dict={}\n attaction_res='https://api.opentripmap.com/0.1/en/places/radius?apikey={}&radius={}000&limit={}&offset={}&lon={}&lat={}&rate=2'\n attaction_res=requests.get(attaction_res.format(opentripmap_api_key,radius,limit,offset,city_lon,city_lat)).json()\n #print(attaction_res)\n for key in attaction_res['features']:\n value=key['properties']['kinds']\n name=key['properties']['name']\n coordinates.append(key[\"geometry\"]['coordinates'])\n #print(name)\n #location_dict.append(name)\n location_dict[key['properties']['name']]=[]\n message={'kind':getKind(value),'name':key['properties']['name'],\"xid\":key['properties']['xid']}\n location_dict[name].append(message)\n #print(location_dict)\n context={\n \"location_dict\":location_dict,\n \"coordinates\":coordinates\n }\n return location_dict,coordinates\n","repo_name":"ShanakaYasendra/finalproject","sub_path":"country/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15076383729","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom send_email import enviar_email\nfrom time import sleep\n\ndef iniciar(cpf, email_origem, email_destino, senha):\n\n url = 'https://centrodeselecao.ufg.br/fiscalizacao/sistema/confirmacao/1_confirmacao_chamada.php'\n\n\n while True:\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--window-size=1420,1080')\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n browser = webdriver.Chrome(\"/usr/bin/chromedriver\",chrome_options=chrome_options)\n\n # Acessa o site\n browser.get(url)\n\n # Autenticação\n campocpf = browser.find_element_by_id('cpf')\n campocpf.send_keys(cpf)\n acessar = browser.find_element_by_class_name('btn')\n acessar.click()\n\n # Visualiza concursos disponíveis\n try:\n confirmar_interesse = browser.find_element_by_class_name('btn-primary')\n confirmar_interesse.click()\n\n # Caso haja concursos disponíveis\n print ('Ha concursos publicos disponiveis!')\n enviar_email(email_origem, email_destino, senha, texto=\"Mais informações em \"+url, assunto=\"UFG - Novo concurso disponível!\")\n\n #Dormir Por 2 dias\n sleep(172800)\n\n #Caso não haja concurso disponível\n except NoSuchElementException:\n print ('Nao ha concurso publico disponivel!')\n browser.quit()\n sleep(900)\n\niniciar(cpf=\"99999999999\",email_origem=\"seuemail@dominio.com\",email_destino=\"emaildestino@dominio.com\",senha=\"senha_do_email\")\n\n","repo_name":"JpCabral/csufgconfirm","sub_path":"csufgconfirm.py","file_name":"csufgconfirm.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72710109635","text":"#!/usr/bin/env python\n\"\"\"\n CleverCSS\n ~~~~~~~~~\n\n The Pythonic way of CSS files.\n\n To convert a CleverCSS file into a normal css file just call the `convert`\n function in the clevercss module. It's that easy :-)\n \"\"\"\n\nfrom clevercss import consts\nfrom clevercss import utils\nfrom clevercss import expressions\nfrom clevercss import engine\n\nVERSION = '0.2.2.dev'\n\nclass Context(dict):\n def __init__(self, *args, **kwargs):\n if args == (None,):\n args = ()\n super(Context, self).__init__(*args, **kwargs)\n\ndef convert(source, context=None, fname=None, minified=False):\n \"\"\"Convert CleverCSS text into normal CSS.\"\"\"\n context = Context(context)\n context.minified = minified\n return engine.Engine(source, fname=fname).to_css(context)\n\n__all__ = ['convert', 'VERSION', '__doc__']\n\n# vim: et sw=4 sts=4\n","repo_name":"clevercss/clevercss","sub_path":"clevercss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"61"} +{"seq_id":"39098350031","text":"import json\n\nimport pandas as pd\nfrom pandas import DataFrame\n\nfrom refit.flink.dist import functions\nfrom refit.flink.dist.feature_extractor import FeatureExtractor\n\ndf = pd.read_csv('data/demo_df.csv')\ndf['project_guid'] = df.sensor_id.apply(lambda _: 'keyboard_cat')\ndf['doubles'] = df.sensor_id.apply(lambda _: json.dumps({'test_double': 0.1}))\ndf['integers'] = df.sensor_id.apply(lambda _: json.dumps({'test_integer': 1}))\ndf['strings'] = df.sensor_id.apply(lambda _: json.dumps({'test_string': \"asdf\"}))\ndf['labels'] = df.sensor_id.apply(lambda _: json.dumps({'test_label': \"some_label\"}))\ndf['datasources'] = df.sensor_id.apply(lambda _: json.dumps({}))\n\n\nclass MockFeatureExtractor(FeatureExtractor):\n def __init__(self):\n self.project_guid = '__NONE__'\n\n def extract_doubles(self, df: DataFrame) -> DataFrame:\n df['mock_double'] = df.sensor_id.apply(lambda _: 0.5)\n return df\n\n def extract_strings(self, df: DataFrame) -> DataFrame:\n df['mock_string'] = df.sensor_id.apply(lambda _: 'keyboard_cat')\n return df\n\n def extract_integers(self, df: DataFrame) -> DataFrame:\n df['mock_integer'] = df.sensor_id.apply(lambda _: 11)\n return df\n\n def extract_labels(self, df: DataFrame) -> DataFrame:\n df['mock_label'] = df.sensor_id.apply(lambda _: 'mock_label')\n return df\n\n def extract_datasources(self, df: DataFrame) -> DataFrame:\n df['mock_ds'] = df.sensor_id.apply(lambda _: 'ds')\n return df\n\n\nproject_guid = df.project_guid\nsensor_id = df.sensor_id\ntimestamp = df.timestamp\ndoubles = df.doubles\nstrings = df.strings\nintegers = df.integers\nlabels = df.labels\ndatasources = df.datasources\n\n\ndef test_fetch_labels():\n enriched = functions._labels(\n project_guid=project_guid,\n sensor_id=sensor_id,\n timestamp=timestamp,\n doubles=doubles,\n strings=strings,\n integers=integers,\n labels=labels,\n datasources=datasources,\n feature_extractor=MockFeatureExtractor()\n )\n enriched = [json.loads(item) for item in enriched]\n assert all(['mock_label' in item.keys() and item['mock_label'] == 'mock_label' for item in enriched])\n assert len(enriched) > 0\n\n\ndef test_fetch_integers():\n enriched = functions._integers(\n project_guid=project_guid,\n sensor_id=sensor_id,\n timestamp=timestamp,\n doubles=doubles,\n strings=strings,\n integers=integers,\n labels=labels,\n datasources=datasources,\n feature_extractor=MockFeatureExtractor()\n )\n enriched = [json.loads(item) for item in enriched]\n assert all(['mock_integer' in item.keys() and item['mock_integer'] == 11 for item in enriched])\n assert len(enriched) > 0\n\n\ndef test_fetch_strings():\n enriched = functions._strings(\n project_guid=project_guid,\n sensor_id=sensor_id,\n timestamp=timestamp,\n doubles=doubles,\n strings=strings,\n integers=integers,\n labels=labels,\n datasources=datasources,\n feature_extractor=MockFeatureExtractor()\n )\n enriched = [json.loads(item) for item in enriched]\n assert all(['mock_string' in item.keys() and item['mock_string'] == 'keyboard_cat' for item in enriched])\n assert len(enriched) > 0\n\n\ndef test_fetch_doubles():\n enriched = functions._doubles(\n project_guid=project_guid,\n sensor_id=sensor_id,\n timestamp=timestamp,\n doubles=doubles,\n strings=strings,\n integers=integers,\n labels=labels,\n datasources=datasources,\n feature_extractor=MockFeatureExtractor()\n )\n enriched = [json.loads(item) for item in enriched]\n assert all(['mock_double' in item.keys() and item['mock_double'] == 0.5 for item in enriched])\n assert len(enriched) > 0\n\n\ndef test_fetch_datasources():\n enriched = functions._datasources(\n project_guid=project_guid,\n sensor_id=sensor_id,\n timestamp=timestamp,\n doubles=doubles,\n strings=strings,\n integers=integers,\n labels=labels,\n datasources=datasources,\n feature_extractor=MockFeatureExtractor()\n )\n enriched = [json.loads(item) for item in enriched]\n assert all(['mock_ds' in item.keys() and item['mock_ds'] == 'ds' for item in enriched])\n assert len(enriched) > 0\n","repo_name":"refit-ml/refit","sub_path":"training/test/test_flink_functions.py","file_name":"test_flink_functions.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"31141722650","text":"#!/usr/bin/env python3\n# combining subsampled sam files\n\nfrom collections import defaultdict\nimport subprocess\n\n\npath = \"/Volumes/Temp/Lukas\"\n#meta = open(path + \"/consensus_te/data/meta_tables/gdl_table\", 'r') #gdl\nmeta = open(path + \"/consensus_te/data/meta_tables/dpgp/dpgp2\", 'r') #dpgp2\n\n\ndef grouping(meta):\n\tgroups = defaultdict(list)\n\t\n\tfor line in meta:\n\t\tif line.startswith(\"shortID\"):\n\t\t\tcontinue\n\t\tl = line.split(' ')\n\t\n\t\t#group_id = list(l[0])[0] #for gdl\n\t\t#srr = l[2] #gdl\n\t\tgroup_id = l[0][0:2] #for dpgp2\n\t\tsrr = l[3] #dpgp2\n\t\t\n\t\tgroups[group_id].append(srr)\n\treturn(groups)\n\t\t\n\t\t\ndef merge(groups):\n\tfiles = list()\n\tfor g in groups:\n\t\tg_file = open(g + \".sam\", 'w+')\n\t\t\n\t\tsrrs = list()\n\t\tsrrs.append('cat')\n\t\tsrrs.append(groups[g][0] + suffix + '.header')\n\t\tfor s in groups[g]:\n\t\t\tsrrs.append(s + suffix + '.sam.subsample')\n\t\t\t\n\t\tcmd = ' '.join(srrs)\n\t\tmerge = subprocess.run(cmd, stdout=g_file, stderr=subprocess.PIPE, shell=True)\n\t\t\t\n\t\tg_file.close()\n\t\tfiles.append(g + \".sam\")\n\treturn(files)\n\n\nsuffix=\".allte.sort.bam\"\n\ngroup_dict = grouping(meta)\n\nfile_list = merge(group_dict)\nprint(file_list)\n\nmeta.close()\n\n","repo_name":"W-L/various_utils","sub_path":"poolseq_subsample_bam.py","file_name":"poolseq_subsample_bam.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23561192481","text":"# Tidy Numbers\n# suhas kashyap\n# kashyap 07\n# kashyapsuhas07@gmail.com\n\ndef foo(num):\n\tn = 0\n\tfor m in range(0, len(num)):\n\t\tn = n * 10 + int(num[m]) - 48\n\treturn n\n\n\nif __name__ == '__main__':\n\tT = input()\t# T test cases\n\tfor i in range(0, len(T)):\n\t\tflag = 0\n\n\t\tnum = input()\n\n\t\tif len(num) == 1:\n\t\t\tprint('Case #', i+1, ': ', num, sep='')\n\t\t\tcontinue\n\t\tfor j in range(1, len(num)):\n\t\t\tif num[i] < num[i-1]:\n\t\t\t\tflag = 1\n\t\t\t\tbreak\n\t\tif flag == 0:\n\t\t\tprint('Case #', i+1, ': ', foo(num), sep='')\n\t\telse:\n\t\t\tfor k in range(0, len(num) -1):\n\t\t\t\tif num[i] >= num[i+1]:\n\t\t\t\t\tnum[i] = num[i] - 1\n\t\t\t\t\tfor l in range(0, len(num)):\n\t\t\t\t\t\tnum[i+1] == '9'\n\t\t\t\t\t\tbreak\n\t\t\tprint('Case #', i+1, ': ', foo(num), sep='')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4308.py","file_name":"4308.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20254461496","text":"from .viaplay import Viaplay\n\nimport xbmc\nimport xbmcvfs\nimport xbmcgui\nimport xbmcplugin\nimport inputstreamhelper\nfrom xbmcaddon import Addon\n\n\nclass KodiHelper(object):\n def __init__(self, base_url=None, handle=None):\n addon = self.get_addon()\n self.base_url = base_url\n self.handle = handle\n self.addon_path = xbmcvfs.translatePath(addon.getAddonInfo('path'))\n self.addon_profile = xbmcvfs.translatePath(addon.getAddonInfo('profile'))\n self.addon_name = addon.getAddonInfo('id')\n self.addon_version = addon.getAddonInfo('version')\n self.language = addon.getLocalizedString\n self.logging_prefix = '[%s-%s]' % (self.addon_name, self.addon_version)\n if not xbmcvfs.exists(self.addon_profile):\n xbmcvfs.mkdir(self.addon_profile)\n if self.get_setting('first_run'):\n self.get_addon().openSettings()\n self.set_setting('first_run', 'false')\n self.vp = Viaplay(self.addon_profile, self.get_country_code(), True)\n\n def get_addon(self):\n \"\"\"Returns a fresh addon instance.\"\"\"\n return Addon()\n\n def get_setting(self, setting_id):\n addon = self.get_addon()\n setting = addon.getSetting(setting_id)\n if setting == 'true':\n return True\n elif setting == 'false':\n return False\n else:\n return setting\n\n def set_setting(self, key, value):\n return self.get_addon().setSetting(key, value)\n\n def log(self, string):\n msg = '%s: %s' % (self.logging_prefix, string)\n xbmc.log(msg=msg, level=xbmc.LOGDEBUG)\n\n def get_country_code(self):\n country_id = self.get_setting('site')\n if country_id == '0':\n country_code = 'se'\n elif country_id == '1':\n country_code = 'dk'\n elif country_id == '2':\n country_code = 'no'\n else:\n country_code = 'fi'\n\n return country_code\n\n def dialog(self, dialog_type, heading, message=None, options=None, nolabel=None, yeslabel=None):\n dialog = xbmcgui.Dialog()\n if dialog_type == 'ok':\n dialog.ok(heading, message)\n elif dialog_type == 'yesno':\n return dialog.yesno(heading, message, nolabel=nolabel, yeslabel=yeslabel)\n elif dialog_type == 'select':\n ret = dialog.select(heading, options)\n if ret > -1:\n return ret\n else:\n return None\n\n def log_out(self):\n confirm = self.dialog('yesno', self.language(30042), self.language(30043))\n if confirm:\n self.vp.log_out()\n # send Kodi back to home screen\n xbmc.executebuiltin(\"Action(Back,%s)\" % xbmcgui.getCurrentWindowId())\n\n def authorize(self):\n try:\n self.vp.validate_session()\n return True\n except self.vp.ViaplayError as error:\n if not error.value == b'PersistentLoginError' or error.value == b'MissingSessionCookieError':\n raise\n else:\n return self.device_registration()\n\n def device_registration(self):\n \"\"\"Presents a dialog with information on how to activate the device.\n Attempts to authorize the device using the interval returned by the activation data.\"\"\"\n activation_data = self.vp.get_activation_data()\n message = self.language(30039).format(activation_data['verificationUrl'], activation_data['userCode'])\n dialog = xbmcgui.DialogProgress()\n xbmc.sleep(200) # small delay to prevent DialogProgress from hanging\n dialog.create(self.language(30040), message)\n secs = 0\n expires = activation_data['expires']\n\n while not xbmc.Monitor().abortRequested() and secs < expires:\n try:\n self.vp.authorize_device(activation_data)\n dialog.close()\n return True\n except self.vp.ViaplayError as error:\n # raise all non-pending authorization errors\n if error.value == b'DeviceAuthorizationPendingError':\n secs += activation_data['interval']\n percent = int(100 * float(secs) / float(expires))\n dialog.update(percent, message)\n xbmc.Monitor().waitForAbort(activation_data['interval'])\n if dialog.iscanceled():\n dialog.close()\n return False\n elif error.value == b'DeviceAuthorizationNotFound': # time expired\n dialog.close()\n self.dialog('ok', self.language(30051), self.language(30052))\n return False\n else:\n dialog.close()\n raise\n\n dialog.close()\n return False\n\n def get_user_input(self, heading, hidden=False):\n keyboard = xbmc.Keyboard('', heading, hidden)\n keyboard.doModal()\n if keyboard.isConfirmed():\n query = keyboard.getText()\n self.log('User input string: %s' % query)\n else:\n query = None\n\n if query and len(query) > 0:\n return query\n else:\n return None\n\n def get_numeric_input(self, heading):\n dialog = xbmcgui.Dialog()\n numeric_input = dialog.numeric(0, heading)\n\n if len(numeric_input) > 0:\n return str(numeric_input)\n else:\n return None\n\n def add_item(self, title, url, folder=True, playable=False, info=None, art=None, content=False):\n addon = self.get_addon()\n listitem = xbmcgui.ListItem(label=title)\n\n if playable:\n listitem.setProperty('IsPlayable', 'true')\n folder = False\n if art:\n listitem.setArt(art)\n else:\n art = {\n 'icon': addon.getAddonInfo('icon'),\n 'fanart': addon.getAddonInfo('fanart')\n }\n listitem.setArt(art)\n if info:\n listitem.setInfo('video', info)\n if content:\n xbmcplugin.setContent(self.handle, content)\n\n xbmcplugin.addDirectoryItem(self.handle, url, listitem, folder)\n\n def eod(self):\n \"\"\"Tell Kodi that the end of the directory listing is reached.\"\"\"\n xbmcplugin.endOfDirectory(self.handle)\n\n def play(self, guid=None, url=None, pincode=None, tve='false'):\n if url and url != 'None':\n guid = self.vp.get_products(url)['products'][0]['system']['guid']\n try:\n stream = self.vp.get_stream(guid, pincode=pincode, tve=tve)\n except self.vp.ViaplayError as error:\n if error.value == b'MissingSessionCookieError':\n self.authorize()\n return\n if error.value == b'ParentalGuidancePinChallengeNeededError':\n if pincode:\n self.dialog(dialog_type='ok', heading=self.language(30033), message=self.language(30034))\n else:\n pincode = self.get_numeric_input(self.language(30032))\n if pincode:\n self.play(guid, pincode=pincode)\n return\n else:\n raise\n\n ia_helper = inputstreamhelper.Helper('mpd', drm='widevine')\n if ia_helper.check_inputstream():\n playitem = xbmcgui.ListItem(path=stream['mpd_url'])\n playitem.setContentLookup(False)\n playitem.setMimeType('application/xml+dash') # prevents HEAD request that causes 404 error\n playitem.setProperty('inputstream', 'inputstream.adaptive')\n playitem.setProperty('inputstream.adaptive.manifest_type', 'mpd')\n playitem.setProperty('inputstream.adaptive.manifest_update_parameter', 'full')\n playitem.setProperty('inputstream.adaptive.license_type', 'com.widevine.alpha')\n playitem.setProperty('inputstream.adaptive.license_key',\n stream['license_url'].replace('{widevineChallenge}', 'B{SSM}') + '|||JBlicense')\n if 'subtitles' in stream:\n playitem.setSubtitles(self.vp.download_subtitles(stream['subtitles']))\n xbmcplugin.setResolvedUrl(self.handle, True, listitem=playitem)\n\n def ia_settings(self):\n \"\"\"Open InputStream Adaptive settings.\"\"\"\n ia_addon = Addon('inputstream.adaptive')\n ia_addon.openSettings()\n","repo_name":"emilsvennesson/kodi-viaplay","sub_path":"resources/lib/kodihelper.py","file_name":"kodihelper.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"21129542649","text":"import numpy as np\nimport tensorflow as tf\nimport gym\nimport os\n\nenv = gym.make('CartPole-v0')\nenv.reset()\nrandom_episodes = 0\nreward_sum = 0\nenv.render()\n# while random_episodes < 20:\n# env.render()\n# observation, reward, done, _ = env.step(np.random.randint(0, 2))\n# reward_sum += reward\n# if done:\n# random_episodes += 1\n# print(\"Reward for this episode was:\", reward_sum)\n# reward_sum = 0\n# env.reset()\n\n# Hyper Parameters\nH = 50\nbatch_size = 25\nlr=1e-1\nlearning_rate = tf.placeholder(tf.float32,name=\"learning_rate\")\nD = 4\ngamma = 0.99\n\nobservations = tf.placeholder(tf.float32, [None, D], name=\"input_x\")\nW1 = tf.get_variable(\"W1\", shape=[D, H], initializer=tf.contrib.layers.xavier_initializer())\nlayer1 = tf.nn.relu(tf.matmul(observations, W1))\nW2 = tf.get_variable(\"W2\", shape=[H, 1], initializer=tf.contrib.layers.xavier_initializer())\nscore = tf.matmul(layer1, W2)\nprobability = tf.nn.sigmoid(score)\n\n# 定义其他部分\ntvars = tf.trainable_variables()\ninput_y = tf.placeholder(tf.float32, [None, 1], name=\"input_y\")\nadvantages = tf.placeholder(tf.float32, name=\"reward_signal\")\n\n# 定义损失函数\nloglik = tf.log(input_y * (input_y - probability) + (1 - input_y) * (input_y + probability))\nloss = -tf.reduce_mean(loglik * advantages)\nnewGrads = tf.gradients(loss, tvars)\n\nxs, ys, drs = [], [], []\nreward_sum = 0\nepisode_number = 1\ntotal_episodes = 10000\n\n# 为了减少奖励函数中的噪声,累积一系列的梯度之后才会更新神经网络的参数\nadam = tf.train.AdamOptimizer(learning_rate=learning_rate)\nW1Grad = tf.placeholder(tf.float32, name=\"batch_grad1\")\nW2Grad = tf.placeholder(tf.float32, name=\"batch_grad2\")\nbatchGrad = [W1Grad, W2Grad]\nupdateGrads = adam.apply_gradients(zip(batchGrad, tvars))\n\n\ndef discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n\nwith tf.Session() as sess:\n # 模型设置\n saver = tf.train.Saver()\n writer = tf.summary.FileWriter('./Model/Graph')\n tf.summary.scalar(\"loss\", loss)\n merged_summaries = tf.summary.merge_all()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n\n # 从检测点恢复模型\n episode_number = 1\n FilePath = os.path.dirname(__file__) + '/Model/'\n ckpt = tf.train.get_checkpoint_state(FilePath)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n episode_number = int(ckpt.model_checkpoint_path.rsplit('-', 1)[1])\n print(\"Read Model - \", episode_number)\n\n rendering = False\n init = tf.global_variables_initializer()\n sess.run(init)\n observation = env.reset()\n\n gradBuffer = sess.run(tvars)\n for ix, grad in enumerate(gradBuffer):\n gradBuffer[ix] = grad * 0\n\n while episode_number <= total_episodes:\n if reward_sum / batch_size > 100 or rendering == True:\n env.render()\n rendering = True\n x = np.reshape(observation, [1, D])\n\n tfprob = sess.run(probability, feed_dict={observations: x,learning_rate:lr})\n action = 1 if np.random.uniform() < tfprob else 0\n\n xs.append(x)\n y = 1 - action\n ys.append(y)\n\n observation, reward, done, info = env.step(action)\n reward_sum += reward\n drs.append(reward)\n\n if done:\n episode_number += 1\n epx = np.vstack(xs)\n epy = np.vstack(ys)\n epr = np.vstack(drs)\n xs, ys, drs = [], [], []\n\n discounted_epr = discount_rewards(epr)\n discounted_epr -= np.mean(discounted_epr)\n discounted_epr /= np.std(discounted_epr)\n\n tGrad = sess.run(\n newGrads,\n feed_dict={\n observations: epx,\n input_y: epy,\n advantages: discounted_epr,\n learning_rate:lr\n }\n )\n for ix, grad in enumerate(tGrad):\n gradBuffer[ix] += grad\n\n if episode_number % batch_size == 0:\n sess.run(\n updateGrads,\n feed_dict={\n W1Grad: gradBuffer[0],\n W2Grad: gradBuffer[1],\n learning_rate: lr\n }\n )\n for ix, grad in enumerate(gradBuffer):\n gradBuffer[ix] = grad * 0\n\n print('Average reward for episode %d : %f.' % (episode_number, reward_sum / batch_size))\n if reward_sum/batch_size == 200 and lr==0.1:\n lr=0.01\n\n # 保存模型\n saver.save(\n sess,\n FilePath,\n global_step=episode_number\n )\n summary = sess.run(\n merged_summaries,\n feed_dict={\n observations: epx,\n input_y: epy,\n advantages: discounted_epr,\n learning_rate: lr\n }\n )\n writer.add_summary(summary, global_step=episode_number)\n\n if reward_sum / batch_size > 200:\n print('Task solved in', episode_number, 'episodes!')\n break\n\n reward_sum = 0\n\n observation = env.reset()\n\n coord.request_stop()\n coord.join(threads)\n","repo_name":"ZGCTroy/Tensorflow-Learning","sub_path":"7、强化学习/Policy Network/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26304812192","text":"import fmm as _fmm\nimport matplotlib.pyplot as _plt\nimport numpy as _np\n\nfrom sanitycheck import cest\n\n_twopi = 2*_np.pi\n\n\ndef _transpose_argsort_indices(I):\n '''Compute the inverse permutation of I, where I is a list of indices\n (a permutation) computed using np.argsort.\n\n '''\n J = _np.zeros(len(I), dtype=_np.int)\n for i, j in enumerate(I):\n J[j] = i\n return J\n\n\ndef _test(y, N, index_ratio):\n '''This test is necessary to check if we've passed a point that we\n essentially already know the value of (i.e. an evaluation point\n that's nearly equal to a source point). We could probably handle\n this correctly in the FMM itself, but for now, a test like this\n should give us approximately what we want... That is, no\n interpolates that are NaN, +/-Inf, or incorrectly equal to zero.\n\n '''\n return _np.abs(_np.mod(y*(N/_twopi), index_ratio)) < 1e-13\n\n\ndef _extend_X(X, n):\n '''Periodically extend the grid points in X that lie in [0, 2*pi) to\n [-2*pi*n, 2*pi*(n + 1)).\n\n '''\n return _np.concatenate([X + _twopi*l for l in range(-n, n + 1)])\n\n\ndef _get_extended_alt_sign_F_and_sum(F, n, N):\n '''Modulate F with (-1)^n and periodically extend the result to\n [-2*pi*n, 2*pi*(n + 1)). Also, return the sum of one period of the\n modulated F as the second return value.\n\n '''\n Fas = _np.multiply(F, _np.power(-1, range(N)))\n return _np.tile(Fas, 2*n + 1), _np.sum(Fas)\n\n\ndef _get_checkpoints(q):\n '''Compute q checkpoint pairs. The first point in each checkpoint pair\n is uniformly distributed in [2*pi, 4*pi). The second is simply\n that point but shifted into [0, 2*pi).\n\n '''\n Yc = _np.random.uniform(_twopi, 2*_twopi, q)\n return Yc, Yc - _twopi\n\n\ndef _R(Y, m):\n 'Evaluate the mth Cauchy regular basis function R.'\n return _np.power(Y - _np.pi, m)\n\n\ndef _get_phinear_and_f(Y, Yc, Yc_tilde, n, X_per, Fas_per, L, p):\n '''This is a workhorse function that uses the FMM to compute both\n phinear and the difference in the checkpoint values. These are\n returned as the first and second return values.\n\n '''\n Ycat = _np.concatenate((Y, Yc, Yc_tilde))\n I = _np.argsort(Ycat)\n J = _transpose_argsort_indices(I)\n dom = (-_twopi*n, _twopi*(n + 1))\n fmm = _fmm.fmm1d_cauchy_double\n V = fmm(X_per, Ycat[I], Fas_per, L, p, scaled_domain=dom)[J]\n i = len(Y)\n j = i + len(Yc)\n k = j + len(Yc_tilde)\n return V[:i], V[j:k] - V[i:j]\n\n\ndef _get_phifar(Y, Yc, Yc_tilde, f, p, q):\n '''Use least squares collocation to compute phifar. TODO: this needs\n to be updated and replaced with the Vandermonde approach.\n\n '''\n A = _np.zeros((q, p))\n for m in range(p):\n A[:, m] = _R(Yc, m) - _R(Yc_tilde, m)\n C = _np.linalg.lstsq(A, f)[0]\n C[0] = cest(1e-6)\n phifar = _np.zeros(Y.shape, dtype=Y.dtype)\n for j in range(len(Y)):\n phifar[j] = _np.sum([C[m]*(Y[j] - _np.pi)**m for m in range(p)])\n return phifar\n\n\ndef _finish_interpolation(Y, F, phi, K, N, Fas_sum):\n '''This function takes care of the last couple steps: it modulates the\n result by the sine-based factor and sets any values that were too\n close source points to the corresponding weight (i.e. function\n value).\n\n '''\n G = [(-Fas_sum*_np.cos(K*Y[j]) + 2*_np.sin(K*Y[j])*phi[j])/N\n for j in range(len(Y))]\n index_ratio = len(Y)/(2*K)\n for i, y in enumerate(Y):\n if _test(y, len(Y), index_ratio):\n G[i] = F[int(i/index_ratio)]\n return G\n\n\ndef inufft(F, K, Y, L, p, n, q):\n '''Arguments:\n\n F: samples of a K-bandlimited function spaced equally along [0, 2pi).\n K: the bandlimit of the sampled function.\n Y: a list of target points in [0, 2pi).\n L: the depth of the FMM used in interpolation.\n p: the truncation number of the FMM.\n n: the 'radius' of the neighborhood around [0, 2pi) -- i.e. determining\n the intervals [-2pi*n, 0) and [2pi, 2pi(n+1)).\n q: the number of checkpoint pairs.\n\n '''\n N = len(F)\n X = _np.linspace(0, _twopi, N, endpoint=False)\n X_per = _extend_X(X, n)\n Fas_per, Fas_sum = _get_extended_alt_sign_F_and_sum(F, n, N)\n Yc, Yc_tilde = _get_checkpoints(q)\n phinear, f = _get_phinear_and_f(Y, Yc, Yc_tilde, n, X_per, Fas_per, L, p)\n phifar = _get_phifar(Y, Yc, Yc_tilde, f, p, q)\n phi = phinear + phifar\n return _finish_interpolation(Y, F, phi, K, N, Fas_sum)\n\n\nif __name__ == '__main__':\n from testseries import semicircle\n from util import get_X\n\n K = 10\n J = 2*K\n X = get_X(K)\n Y = _np.sort(_np.random.uniform(0, _twopi, J))\n F = semicircle(X, K).real\n L = 4\n n = 3\n p = 4\n q = 2*J\n G = inufft(F, K, Y, L, p, n, q)\n\n _plt.plot(Y, G)\n _plt.xlim(0, _twopi)\n _plt.ylim(0, _np.pi)\n","repo_name":"sampotter/nufft","sub_path":"src/py/nufft.py","file_name":"nufft.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36997960376","text":"\"\"\"\nWrite a function `countConstruct(target, wordBank)` that accepts a target string and an array of strings.\n\nThe function should return the number of ways that the `target` can be constructed by concatenating\nelements of the `wordBank` array.\n\"\"\"\n\n\ndef count_construct(target, word_bank):\n if target == \"\":\n return 1\n\n total_count = 0\n for word in word_bank:\n if target.startswith(word):\n total_count = total_count + count_construct(target[len(word):], word_bank)\n\n return total_count\n\n\nif __name__ == \"__main__\":\n assert count_construct(\"abcdef\", [\"ab\", \"de\", \"cdef\", \"abc\", \"f\"]) == 2\n","repo_name":"bkpathak/Algorithms-collections","sub_path":"src/DP/countConstruct.py","file_name":"countConstruct.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10711280314","text":"from bs4 import BeautifulSoup\nfrom time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport re\n\n\n# As we are solving the octordle sequence game we do not have to worry\n# about the multiple different boards at once and we can focus one at a time\n\nclass Game:\n # Initialises the webdriver as well as the shared variables across the functions\n # driver is the actual browser\n # guessNum and guessResults are the number of guess thus far and the content of meaning of those guesses\n # board and boardNum correspond to the board we are currently solving\n def __init__(self):\n # URL = \"https://octordle.com/daily\"\n URL = \"https://octordle.com/daily-sequence\"\n # URL = \"https://octordle.com/free-sequence\"\n\n # Initialises the browser\n self.driver = webdriver.Safari()\n self.driver.get(URL)\n assert 'Octordle' in self.driver.title\n\n # Accepts the cookies on the page\n self.driver.find_element(By.CLASS_NAME, \"cookie__floating__buttons__button--accept\").click()\n\n # Initialising variable\n self.guessNum = 0\n self.guessResults = []\n self.guesses = []\n self.board_num = 1\n self.board = \"board-\" + str(self.board_num)\n\n # get_result is the function that reads the board and appends the latest guess to the results\n # returns result like [\"B,B,B,G,B\"]\n def get_result(self):\n # Gets the current board html\n soup = self.get_board()\n result = []\n\n # gets the current board row html from the board\n row1 = str(soup.find_all(\"div\", class_=\"board-row\")[self.guessNum])\n\n \"\"\"\n For the html of the website \"letter \" is always before the indicators for whether the current\n letter is a exact match/in the word. This is why we use this to find all 5 examples of this.\n\n Warning this may break if the website html changes - may be broken if the browser changes\n \"\"\"\n substr = \"letter \"\n search_res = [_.start() for _ in re.finditer(substr, row1)]\n\n for i in range(5):\n word = row1[search_res[i] + 7:search_res[i] + 12]\n if word == \"word-\":\n result.append(\"Y\")\n elif word == \"exact\":\n result.append(\"G\")\n else:\n result.append(\"B\")\n self.guessResults.append(result)\n self.guessNum += 1\n\n # get_board returns an html 'soup' of the board that is currently in question\n def get_board(self):\n elem = self.driver.find_element(By.ID, self.board)\n html = self.driver.execute_script(\"return arguments[0].innerHTML;\", elem)\n soup = BeautifulSoup(html, 'html.parser')\n soup.prettify()\n return soup\n\n # enter_guess adds the next guess to the game - guess has to be a five letter word\n def enter_guess(self, guess):\n elem = self.driver.find_element(By.ID, self.board)\n elem.send_keys(guess)\n self.guesses.append(guess)\n elem.send_keys(Keys.ENTER)\n self.get_result()\n\n # close_game finishes the game and closes the window\n def close_game(self):\n sleep(5)\n self.driver.close()\n\n # return_guess_res returns all the current guess results as an array e.g. [[\"B,B,B,G,B\"],[\"B,Y,B,G,B\"], ...]\n def return_guess_res(self):\n return self.guessResults\n\n # returns all the guessed words e.g. [\"trace\",\"sound\", ...]\n def return_guesses(self):\n return self.guesses\n\n # change_board changes to the next board number along and returns all the current guess results for that board\n def change_board(self):\n # Change the board number by 1\n self.board_num += 1\n self.board = \"board-\" + str(self.board_num)\n self.guessResults = []\n\n # Goes through the new board and adds each result to the array stopping if the current guess is correct\n guessNum = self.guessNum\n self.guessNum = 0\n for i in range(guessNum):\n self.get_result()\n if self.guessResults[i] == [\"G\", \"G\", \"G\", \"G\", \"G\"]:\n self.guessNum = guessNum\n break\n return self.guessResults","repo_name":"jaivas8/Octordle-Bot","sub_path":"ReadWebPage.py","file_name":"ReadWebPage.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7108875416","text":"\nclass Item:\n def __init__(self, value, leftPointer, rightPointer):\n self.value = value\n self.leftPointer = leftPointer\n self.rightPointer = rightPointer\n\n def setLeftPointer(self, itemPointer):\n self.leftPointer = itemPointer\n\n def setRightPointer(self, itemPointer):\n self.rightPointer = itemPointer\n\n\nclass Stack:\n def __init__(self):\n self.topItem = None\n\n def push(self, item):\n item = Item(item, None, None)\n\n if self.topItem is None:\n self.topItem = item\n else:\n previousTopItem = self.topItem\n previousTopItem.setRightPointer(item)\n item.setLeftPointer(previousTopItem)\n self.topItem = item\n\n def pop(self):\n if self.topItem is not None:\n previousTopItem = self.topItem\n self.topItem = self.topItem.leftPointer\n return previousTopItem.value\n else:\n return None\n\n\ndef create_a_stack():\n a = 1\n b = 2\n c = 3\n d = 4\n e = 5\n\n stack = Stack()\n\n stack.push(a)\n stack.push(b)\n stack.push(c)\n stack.push(d)\n stack.push(e)\n\n print(stack.pop())\n print(stack.pop())\n print(stack.pop())\n print(stack.pop())\n print(stack.pop())\n print(stack.pop())\n \ncreate_a_stack()\n","repo_name":"domRowan/cautious-barnacle","sub_path":"cautious-barnacle/interview_questions/stacks_and_queues/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26211748886","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\n\nst = [0] * 301\ndp = [0] * 301\nfor i in range(n):\n st[i] = int(input())\n\n\nif n > 3:\n dp[0] = st[0]\n dp[1] = st[0]+st[1]\n dp[2] = max(st[0]+st[2], st[1]+st[2])\n for i in range(3, n):\n dp[i] = max(dp[i-2]+st[i], dp[i-3]+st[i-1]+st[i])\n \n\nif n == 1:\n print(st[0])\nelif n == 2:\n print(st[0]+st[1])\nelif n == 3:\n print(max(st[0]+st[2], st[1]+st[2]))\nelse:\n print(dp[n-1])\n","repo_name":"KUcodemaster/Problem_Solving","sub_path":"boj/2579.py","file_name":"2579.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41734498928","text":"from book import Book\r\nfrom dataB import DataB\r\nimport json\r\nimport ast\r\nimport sys\r\n\r\nfrom hashMe import hashMe\r\n\r\n\r\nopcionYes = ['y','yeah', 'si','ok' ,'yes', 'si va']\r\nopcionNo = ['n','nah','no','nope', 'no way josay']\r\n\r\ndef chequeoTxt(dataBase):\r\n everything = open('DiscoDuro.txt', 'r')\r\n libros = everything.read().split(\"\\n\")\r\n i = 0\r\n while (i < (len(libros)- 1)):\r\n diccionarioLibros = ast.literal_eval(libros[i])\r\n newLibro = Book(diccionarioLibros.get(\"title\"), diccionarioLibros.get(\"cota\"), diccionarioLibros.get(\"serial\"), diccionarioLibros.get(\"cantidad\"))\r\n dataBase.addBook(newLibro)\r\n i = i + 1 \r\n\r\ndef actualizoTxt(dataBase):\r\n z = 0\r\n with open('DiscoDuro.txt', 'w') as writeCS:\r\n while (z < len(dataBase.listaAuxiliar)):\r\n writeCS.write(json.dumps(dataBase.listaAuxiliar[z])+\"\\n\")\r\n z = z + 1\r\n\r\ndef buscarTitulo(dataBase):\r\n value = input(\"Titulo: \")\r\n \r\n if dataBase.checkTitles(value):\r\n for i in dataBase.listaAuxiliar:\r\n\r\n if i['title'].lower() == value.lower():\r\n\r\n dataBase.searchBook(i['cota'])\r\n return pantallaInicio(dataBase)\r\n \r\n print(\"Se ha presentado un problemilla...\")\r\n\r\n else:\r\n print('No tenemos ese libro...')\r\n return pantallaInicio(dataBase)\r\n\r\ndef buscarSerial(dataBase):\r\n value = input(\"Serial: \")\r\n if dataBase.checkSeriales(value):\r\n for i in dataBase.listaAuxiliar:\r\n if i['serial'].lower() == value.lower():\r\n dataBase.searchBook(i['cota'])\r\n return pantallaInicio(dataBase)\r\n \r\n print(\"Se ha presentado un problemilla...\")\r\n\r\n else:\r\n print('No tenemos ese libro...')\r\n return pantallaInicio(dataBase)\r\n\r\ndef buscarCota(dataBase):\r\n value = input(\"Cota: \")\r\n if dataBase.checkCotas(value):\r\n dataBase.searchBook(value)\r\n return pantallaInicio(dataBase)\r\n\r\n else:\r\n print('No tenemos ese libro...')\r\n return pantallaInicio(dataBase)\r\n\r\ndef pantallaBusqueda(dataBase):\r\n print(\"\"\"\r\n Elija Opcion De Busqueda:\r\n 1. Titulo\r\n 2. Serial\r\n 3. Cota\r\n\r\n \"\"\")\r\n value = input('> ')\r\n\r\n if value == '1':\r\n return buscarTitulo(dataBase)\r\n elif value == '2':\r\n return buscarSerial(dataBase)\r\n elif value == '3':\r\n return buscarCota(dataBase)\r\n else:\r\n print (\"Ingrese una opcion valida...\\n\\n\")\r\n return pantallaBusqueda(dataBase)\r\n\r\ndef revisarTitulo(dataBase, bookTitle):\r\n if bookTitle not in dataBase.listaTitulos:\r\n return True\r\n else:\r\n return False\r\n\r\ndef registroDeLibros(dataBase):\r\n print(\"Datos del libro a registrar:\\n\")\r\n value = True\r\n while value:\r\n title = input(\"Titulo: \")\r\n if not dataBase.checkTitles(title):\r\n value = False\r\n else:\r\n value = False\r\n\r\n value = True\r\n while value:\r\n\r\n serial = input('Serial: ').strip()\r\n if serial.lower() == 'exit':\r\n return pantallaInicio(dataBase)\r\n\r\n if len(serial) != 12:\r\n print(\"El serial debe contener 12 digitos, por favor ingresarlos todos...\")\r\n elif not serial.isdigit():\r\n print(\"Todos los caracteres deben ser numericos...\")\r\n elif dataBase.checkSeriales(serial):\r\n print(\"Este serial esta asignado a otro libro. Revise que ha sido ingresado correctamente e intente nuevamente...\")\r\n else:\r\n value = False\r\n\r\n value = True\r\n while value:\r\n cota = input(\"Cota: \").strip()\r\n if cota.lower() == 'exit':\r\n return pantallaInicio(dataBase)\r\n\r\n if len(cota) != 8:\r\n print(\"La cota esta conformada por 6 letras seguidas de 2 digitos...\")\r\n elif not cota[0:6].isalpha():\r\n print(\"La cota esta conformada por 6 /letras seguidas de 2 digitos...\")\r\n elif not cota[6:].isnumeric():\r\n print(\"La cota esta conformada por 6 letras seguidas de 2 /digitos...\")\r\n elif dataBase.checkCotas(cota):\r\n print(\"Esta cota ya se encuantra registrada...\")\r\n else:\r\n value = False\r\n \r\n\r\n quantity = input(\"Si desea agregar mas de un ejemplar, por favor indicar cuantos... \\nDe lo contrario, presionar enter...\\n > \")\r\n if quantity.lower() == 'exit':\r\n return pantallaInicio(dataBase) \r\n \r\n if quantity.isnumeric() and int(quantity) > 0:\r\n quantity = int(quantity)\r\n else:\r\n print(\"Se registrara un solo ejemplar...\")\r\n quantity = 1\r\n\r\n newBook = Book(title, cota, serial, quantity)\r\n dataBase.addBook(newBook)\r\n\r\n print(\"\"\"El libro ha sido registrado exitosamente...\"\"\")\r\n newBook.showInfo()\r\n actualizoTxt(dataBase)\r\n return pantallaInicio(dataBase)\r\n \r\ndef pantallaPrestamos(dataBase):\r\n value = input(\"Ingrese el titulo del libro que desea: \")\r\n if dataBase.checkTitles(value):\r\n dataBase.findCota(value, 'prestamo')\r\n actualizoTxt(dataBase)\r\n return pantallaInicio(dataBase)\r\n\r\n else:\r\n print('No tenemos ese libro...')\r\n return pantallaInicio(dataBase)\r\n\r\ndef pantallaRegreso(dataBase):\r\n value = input(\"Ingrese el titulo del libro que desea: \")\r\n if dataBase.checkTitles(value):\r\n dataBase.findCota(value, 'regreso')\r\n actualizoTxt(dataBase)\r\n return pantallaInicio(dataBase)\r\n\r\n else:\r\n print('Ese libro no es nuestro...')\r\n return pantallaInicio(dataBase)\r\n\r\ndef pantallaAgregarEjemplares(dataBase):\r\n value = input(\"Ingrese el titulo del libro que desea: \")\r\n quantity = int(input(\"Ingrese cantidad de ejemplares: \"))\r\n dataBase.findCota(value, 'agregar', quantity)\r\n actualizoTxt(dataBase)\r\n return pantallaInicio(dataBase)\r\n\r\ndef pantallaEliminarLibros(dataBase):\r\n value = input(\"Ingrese el titulo del libro que desea eliminar: \")\r\n dataBase.findCota(value, 'eliminar')\r\n actualizoTxt(dataBase)\r\n return pantallaInicio(dataBase)\r\n\r\n\r\ndef pantallaInicio(dataBase):\r\n print(\"\"\"\r\n Bienvenido \r\n Ingrese el numero de la accion que desea realizar:\r\n 1. Registrar Nuevo Libro\r\n 2. Agregar Ejemplares\r\n 3. Realizar Prestamo\r\n 4. Reingresar Un Libro\r\n 5. Buscar Libros\r\n 6. Quemar Libros\r\n\r\n Ingrese EXIT para salir \r\n\r\n \"\"\")\r\n value = input('> ')\r\n if value == '1':\r\n return registroDeLibros(dataBase)\r\n elif value == '2':\r\n return pantallaAgregarEjemplares(dataBase)\r\n elif value == '3':\r\n return pantallaPrestamos(dataBase)\r\n elif value == '4':\r\n return pantallaRegreso(dataBase)\r\n elif value == '5':\r\n return pantallaBusqueda(dataBase)\r\n elif value.lower() == 'exit':\r\n print(\"\\n\\n\\n\")\r\n return pantallaBienvenida(dataBase)\r\n elif value == '6':\r\n return pantallaEliminarLibros(dataBase)\r\n \r\n elif value == '420':\r\n dataBase.checkData()\r\n return pantallaBienvenida(dataBase)\r\n else:\r\n print(\"Por favor ingrese una opcion valida...\\n\\n\")\r\n return pantallaInicio(dataBase)\r\n\r\ndef pantallaBienvenida(dataBase):\r\n print(\"\"\"\r\n Bienvenido a La Biblioteca\r\n Presione Cualquier Tecla Para Continuar\r\n \"\"\")\r\n value = input('> ')\r\n if value.lower() == 'exit':\r\n exit()\r\n else:\r\n return pantallaInicio(dataBase)\r\n\r\ndef agregarLibro(cota, titulo, serial, cantidad, baseDatos):\r\n nuevoLibro = Book(titulo, cota, serial, cantidad)\r\n baseDatos.addBook(nuevoLibro)\r\n\r\n\r\ndef main():\r\n sys.setrecursionlimit(1500)\r\n print('\\n\\n\\n')\r\n dataBase = DataB()\r\n chequeoTxt(dataBase)\r\n pantallaBienvenida(dataBase)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n ","repo_name":"RbrtBrwr/sturdy-octo-carnival","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7783,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23473745961","text":"import math\nimport itertools\nfrom codejam import printCases\nfrom codejam import Sort\nfrom collections import Counter\n\nresult = []\n\ndef only_uniques(seq):\n return [k for k,n in Counter(seq).iteritems() if n == 1]\n\nfor case in range(input()):\n\tK,L,S = raw_input().split()\n\tK = int(K)\n\tL = int(L)\n\tS= int(S)\n\tkeys = raw_input()\n\ttarget = raw_input()\n\t\n\tnotinkeys = False\n\ttotal = 0 \n\tMax = 0\n\tprob = {}\n\tfor i in range(len(keys)):\n\t\tif keys[i] not in prob:\n\t\t\tcount = 0.\n\t\t\tfor j in range(len(keys)):\n\t\t\t\tif keys[i] == keys[j]: count = count + 1\n\t\t\tprob[keys[i]] = float(count)/len(keys)\n\t#print prob\n\t#perms = list(itertools.permutations(keys, S))\n\tkeys1 = []\n\tperms = []\n\tfor letter in target:\n\t\tif letter not in keys:\n\t\t\tnotinkeys = True\n\t\t\tbreak\n\tif notinkeys:\n\t\tresult.append(0.0)\t\n\t\tcontinue\n\tfor i in range(len(keys)):\n\t\tif keys[i] not in keys1: keys1.append(keys[i])\n\tkeys = \"\"\n\tfor i in range(len(keys1)): keys = keys + keys1[i]\n\tperms = [p for p in itertools.product(keys, repeat=S)]\n\tfor i in range(len(perms)):\n\t\ttmp = \"\"\n\t\tfor j in range(len(perms[i])):\n\t\t\ttmp = tmp + perms[i][j]\n\t\tperms[i] = tmp\n\t\t\n\t#print perms\n\tfor perm in perms:\n\t\ttr_count = 0\n\t\tfor i in range(len(perm)-L+1):\n\t\t\tif perm[i:i+L] == target:\n\t\t\t\t#print perm[i:i+L]\n\t\t\t\t#print perm\n\t\t\t\ttr_count = tr_count + 1\n\t\tprod = 1\n\t\tMax = max(Max, tr_count)\n\t\tfor letter in perm:\n\t\t\tprod = prod*prob[letter]\n\t\ttotal = total + prod*tr_count\n\t#print total\n\tresult.append(Max - total)\nprintCases(result)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_166/206.py","file_name":"206.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44403484581","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 20 11:59:50 2018\r\n\r\n@author: klaus\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport random\r\nfrom argparse import ArgumentParser, RawTextHelpFormatter\r\n\r\n\r\nclass GameOfLife:\r\n def __init__(self, width, height, interval, seed):\r\n random.seed(seed)\r\n self.height = height\r\n self.width = width\r\n self.interval = interval\r\n self.epoch = 0\r\n \r\n self.board = np.zeros((self.height, self.width))\r\n \r\n for x in range(int(self.width / 2 - self.width / 4), int(self.width / 2 + self.width / 4 + 1)):\r\n for y in range(int(self.height / 2 - self.height / 4), int(self.height / 2 + self.height / 4 + 1)):\r\n self.board[y][x] = random.choice([0, 1]) \r\n \r\n self.fig, self.ax = plt.subplots(figsize=(10, 10), num=1)\r\n self.fig.show()\r\n self.plot_board()\r\n \r\n def run(self):\r\n while self.run_step():\r\n time.sleep(self.interval)\r\n \r\n def run_step(self):\r\n self.epoch += 1\r\n new_board = self.board.copy()\r\n \r\n for x in range(self.width):\r\n for y in range(self.height):\r\n living_neighbors = self.board[y - 1 if y > 0 else self.height - 1][x - 1 if x > 0 else self.width - 1] + \\\r\n self.board[y - 1 if y > 0 else self.height - 1][x] + \\\r\n self.board[y - 1 if y > 0 else self.height - 1][x + 1 if x < self.width - 1 else 0] + \\\r\n self.board[y][x - 1 if x > 0 else self.width - 1] + \\\r\n self.board[y][x + 1 if x < self.width - 1 else 0] + \\\r\n self.board[y + 1 if y < self.height - 1 else 0][x - 1 if x > 0 else self.width - 1] + \\\r\n self.board[y + 1 if y < self.height - 1 else 0][x] + \\\r\n self.board[y + 1 if y < self.height - 1 else 0][x + 1 if x < self.width - 1 else 0]\r\n \r\n if self.board[y][x] == 0 and living_neighbors == 3:\r\n new_board[y][x] = 1\r\n \r\n if self.board[y][x] == 1 and (living_neighbors < 2 or living_neighbors > 3):\r\n new_board[y][x] = 0\r\n \r\n if (self.board == new_board).all():\r\n return False\r\n \r\n self.board = new_board\r\n self.plot_board()\r\n return True\r\n \r\n def plot_board(self):\r\n print(\"Epoch:\", self.epoch)\r\n self.ax.clear()\r\n self.ax.imshow(self.board, cmap=\"Greys\", interpolation=\"None\")\r\n self.fig.canvas.draw()\r\n self.fig.canvas.flush_events()\r\n\r\n\r\nif __name__ == \"__main__\": \r\n argument_parser = ArgumentParser(description=\"\"\"\r\nGame of Life:\r\n - Little python implementation of Conway's game of life.\r\n - The game board will be visualized with matplotlib.\r\n - See readme.md for more informations.\"\"\", \r\n epilog=\"https://github.com/WinterWonderland/Game_of_Life\",\r\n formatter_class=RawTextHelpFormatter)\r\n argument_parser.add_argument(\"--width\",\r\n metavar=\"\",\r\n type=int,\r\n default=100,\r\n help=\"The width of the game board (default=100)\")\r\n argument_parser.add_argument(\"--height\",\r\n metavar=\"\",\r\n type=int,\r\n default=100,\r\n help=\"The width of the game board (default=100)\")\r\n argument_parser.add_argument(\"--interval\",\r\n metavar=\"\",\r\n type=float,\r\n default=0.3,\r\n help=\"Interval time between each step (default=0.3)\")\r\n argument_parser.add_argument(\"--seed\",\r\n metavar=\"\",\r\n type=int,\r\n default=None,\r\n help=\"A seed for the random number generator to get identical play boards\")\r\n args = argument_parser.parse_args()\r\n \r\n GameOfLife(width=args.width,\r\n height=args.height,\r\n interval=args.interval,\r\n seed=args.seed).run()\r\n input(\"press enter to quit\")\r\n","repo_name":"WinterWonderland/Game_of_Life","sub_path":"game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72497434435","text":"\n# coding: utf-8\n\n# In[4]:\n''' The model parameters are adopted from a dissertation submitted by Devesh Radhakrishnan '''\n\n#model parameters\n\nYxglc = 1.4e9 # Yield coefficient biomass on glucose \nYxgln = 2.7e9 # Yield coefficient biomass on glutamine\nYxlac = 6.53e7 # Yield coefficient biomass on lactate\nYammgln = 0.63 # Yield coefficient ammonia on glutamine\nYlacglc = 1.3 # Yield coefficient lactate on glucose\nYmabglc = 5.55e-3 # Yield coefficient mab on glucose\nKdgln = 9.6e-3 # Constant for glutamine degradation\nKglc = 0.14 # Monod constant for glucose\nKlac = 0.25 # Monod constant for glucose\nKgln = 0.025 # Monod constant for glutamine\nKilac = 171.76 # constant for lactate inhibition\nKiamm = 28.48 # constant for ammonia inhibition\nKlysis = 0.02 # Cell lysis rate (range between 0.02 to 0.06)\nmgln = 4.25e-15 # Glutamine maintenance coefficient\na0 = 2.25e-10 # constant for Glucose maintenance coefficient\na1 = 39.65 # constant for Glucose maintenance coefficient\nmumax1 = 0.03 # Maximum growth rate exponential\nmumax2 = 6.5e-3 # Maximum growth rate stationary\nmudmax = 0.042 # Maximum death rate\nkd0 = 4.54e-4 # Death rate constant\nkd1 = 5e-3 # Death rate constant\nk = 0.08 # self defined constant\n\n\n# In[5]:\n\n\n# experimental parameters\n# conc in mM; time in hr; mass in g; Volume in L\n\nV = 50e-3 #Ltr\n \n","repo_name":"deeprob/Computational-Methods-mAb-CHOcells","sub_path":"Macroscale_Model/Model_parameters.py","file_name":"Model_parameters.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28050692066","text":" \ncasoTeste = 0\nwhile True:\n meteoritoNaFazenda = 0\n x1,y1,x2,y2 = map(int, input().split())\n if x1 == y1 == x2 == y2 == 0: #Verifica se a entrada é X1 = Y1 = X2 = Y2 = 0. Isso simboliza final da entrada\n break\n \n casoTeste += 1\n quantidadeCasos = int(input())\n \n for i in range(quantidadeCasos):\n x, y = map(int, input().split())\n if (x1 <= x <= x2) and (y2 <= y <= y1): #Verifica se está dentro do retângulo.\n meteoritoNaFazenda += 1 #Incrementa no contador de meteoritos que acertaram a fazenda.\n print(f\"Teste {casoTeste}\")\n print(meteoritoNaFazenda)\n\n","repo_name":"GuilhermeGonSoares/Beecrowd-Solutions","sub_path":"Problemas-Iniciantes/beecrowd3068.py","file_name":"beecrowd3068.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"39003000230","text":"#!/usr/bin/python3\n\ndef text_indentation(text):\n if not isinstance(text, str):\n raise TypeError(\"text must be a string\")\n characters = ['.', '?', ':']\n sentences = text.splitlines()\n\n for sentence in sentences:\n sentence = sentence.strip()\n if sentence:\n for char in characters:\n if char in sentence:\n parts = sentence.split(char)\n indented_line = '\\n\\n'.join([part.strip() for part in parts])\n print(indented_line)\n break\n else:\n print(sentence, end='')\n","repo_name":"Biitopher/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21745017400","text":"\"\"\"Module with text messages bot will send to users\"\"\"\n\ndef get_calendar_status_message(status, message):\n if status == 'FETCHED':\n message = 'Calendar ' + message + ' was bind with your account'\n\n elif status == 'CREATED':\n message = 'Calendar ' + message + ' was created'\n\n else:\n message = 'Something went wrong. Please, try again'\n\n return message\n\n\ndef get_event_status_answer(status, start, **kwargs):\n\n if status == 'CREATED':\n message = 'Event started from {} was added.'.format(start.strftime(\"%d %B %Y, %H:%M:%S\"))\n if kwargs.get('location'):\n message += '\\nEvent location: ' + kwargs['location']\n if kwargs.get('attendees'):\n message += '\\nInvited guests: ' + ', '.join(kwargs['attendees'])\n\n else:\n message = 'Something went wrong. Please, try again'\n\n return message\n\ndef get_add_calendar_message():\n message = 'Give me exact name of an existed calendar, connected to your account, or any other name to create a new ' \\\n 'calendar\\n\\n' \\\n '/cancel to cancel this action'\n\n return message\n\ndef get_canceled_message():\n message = 'Operation canceled'\n\n return message\n\ndef get_help_message():\n message = 'To start, type a message... Write here something else, please.\\n\\n' \\\n '/bind - to customise in which calendar I will save all events\\n' \\\n '/unbind - to set the calendar to the default value\\n' \\\n '/start - to authorise with new email\\n' \\\n '/logout - to logout from Google Calendar service for this bot'\n\n return message\n\ndef get_authorise_url_message(url):\n message = 'Please, get a validation code following instructions from this url so I could add ' \\\n 'calendars and events to your account\\n\\n' + url + \\\n '\\n\\n/cancel to cancel this action'\n\n return message\n\n\ndef get_del_status_message(status):\n\n if status:\n message = 'Calendar was set to the default value (primary calendar for your email)'\n\n else:\n message = 'Something went wrong. Please, try again'\n\n return message\n\ndef get_authorised_message():\n message = 'Access granted. Now you can add events through this bot.\\n\\n' + get_help_message()\n\n return message\n\ndef get_wrong_code_message():\n message = 'Something went wrong. Make sure you paste the whole code for authorisation.\\n' \\\n '/cancel to cancel this operation'\n\n return message\n\ndef get_unauthorised_user_error_message():\n\n message = 'You didn\\'t authorise at Google Calendar service.\\n\\n' \\\n 'Press /start to connect'\n\n return message\n\ndef get_logout_user_message(status):\n if status:\n message = 'You\\'ve been logout from Google Calendar service'\n else:\n message = 'Something went wrong, please try again later'\n return message","repo_name":"anevolina/GoogleCalendarBot","sub_path":"tg_bot/bot_answers.py","file_name":"bot_answers.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40726867838","text":"import smtplib\nimport random\n\nclass Assistant():\n \n def __init__(self, name):\n \n self.name = name\n self.initialize()\n \n def run(self):\n \n while True:\n \n menu_option = \"1. Change My Name\\n2. Create Schedule\\n3. View Schedule\\n4. Send Email\\n5. Random Jokes\\n0. Exit\"\n \n print(\"\\n*********************************************\")\n print(\"Hello Python Folks, my name is \" + self.name + \", how can I help you?\")\n print(menu_option)\n print(\"*********************************************\")\n \n menu = input(\"Which one do you want? \")\n\n if menu == \"1\":\n name = input(\"Input new name: \")\n self.change_name(name)\n elif menu == \"2\":\n self.create_schedule()\n elif menu == \"3\":\n self.view_schedule()\n elif menu == \"4\":\n self.send_email()\n elif menu == \"5\":\n self.random_jokes()\n elif menu == \"0\":\n print(\"Good bye!\")\n break\n else:\n print(\"Invalid menu, please try again!\")\n \n def initialize(self): \n print(\"New Virtual Assistant has been created successfully.\")\n input(\"- Press ENTER -\")\n\n def change_name(self, name):\n self.name = name\n print(\"\\nMy name has been changed.\")\n input(\"- Press ENTER -\")\n \n def create_schedule(self):\n file = open(\"./schedule.txt\", \"a\")\n schedule = input(\"\\nPlease input your agenda: (format: dd/mm/yyyy - agenda_name)\\n\")\n file.write(schedule + \"\\n\")\n file.close()\n print(\"New schedule has been created.\")\n input(\"- Press ENTER -\")\n \n def view_schedule(self):\n print(\"\\nHere is list of your schedule:\")\n file = open(\"schedule.txt\", \"r\")\n print(file.read())\n file.close()\n\n input(\"- Press ENTER -\")\n\n def random_jokes(self):\n jokes = [\n \"Debugging is like being the detective in a crime movie where you're also the murderer at the same time.\", \n \"Algorithm: A word used by programmers when they don't want to explain how their code works.\", \n \"To whoever stole my copy of Microsoft Office, I will find you. You have my Word!\",\n \"I visited my friend at his new house. He told me to make myself at home. So I threw him out. I hate having visitors.\",\n \"A perfectionist walked into a bar... apparently, the bar was not set high enough.\"]\n \n while True:\n \n print(random.choice(jokes))\n is_again = input(\"Again [Yes/No]? \")\n\n if is_again.lower() == \"no\" or is_again.lower() == \"tidak\" or is_again.lower() == \"0\": break\n\n def send_email(self):\n \n sender = '' # isi email pengirim\n password = '' # isi password dari email pengirim\n receiver = []\n\n file = open(\"./receiver_list.txt\", \"r\")\n for x in file:\n receiver.append(x)\n\n subject = 'Greetings'\n body = 'Hello, I hope you have a great day!'\n\n message = \"Subject: %s\\n\\n%s\\n\\nSent from %s.\" % (subject, body, self.name)\n\n try:\n #Create your SMTP session \n smtp = smtplib.SMTP('smtp.gmail.com', 587) \n\n #Use TLS to add security \n smtp.starttls() \n\n #User Authentication \n smtp.login(sender, password)\n\n #Sending the Email\n smtp.sendmail(sender, receiver, message) \n\n #Terminating the session \n smtp.quit() \n print (\"Email sent successfully!\") \n \n except Exception as e:\n print(\"Oops! I found\", e.__class__, \"occurred.\")\n print(\"Error message:\", str(e))\n\n input(\"- Press ENTER -\")\n","repo_name":"muhamuttaqien/course-python-programming","sub_path":"Case Study/Solution/Assistant.py","file_name":"Assistant.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2898815535","text":"from django.urls import path\n\nfrom fixture import views\n\nurlpatterns = [\n path('ticket-man-city/', views.FixtureListView.as_view(), name='ticket'),\n path('ticket-real-madrid/', views.FixtureRealMadridListView.as_view(), name='ticket_real'),\n path('ticket-man-united/', views.FixtureManUnitedListView.as_view(), name='ticket_man_united'),\n path('ticket-barcelona/', views.FixtureBarcelonaListView.as_view(), name='ticket_barcelona'),\n path('checkout/', views.checkout, name='checkout'),\n path('success-purchase/', views.success_purchase, name='success_purchase'),\n]\n","repo_name":"CiciovanBogdan/TicketStore","sub_path":"fixture/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16942802505","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\nrouter.register('languages', views.LanguageView)\nurlpatterns = [\n path('router/', include(router.urls)),\n path('', views.HomeView.as_view(), name='home'),\n path('home/', views.HomeView.as_view(), name='home'),\n]\n","repo_name":"Duc98f/lesson2","sub_path":"module/languages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19421847321","text":"'''\r\nCreated on Feb 23, 2021\r\n\r\n@author: Miguel\r\n'''\r\n\r\nfrom matrix_elements.MatrixElement import _TwoBodyMatrixElement_JTCoupled\r\nfrom matrix_elements.transformations import TalmiTransformation\r\n\r\nfrom helpers.Enums import BrinkBoekerParameters as bb_p, CentralMEParameters,\\\r\n PotentialForms, PotentialSeriesParameters\r\nfrom helpers.Enums import AttributeArgs\r\nfrom helpers.Enums import SHO_Parameters\r\nfrom helpers.integrals import talmiIntegral\r\n\r\nfrom helpers.Log import XLog\r\n\r\nclass BrinkBoeker(_TwoBodyMatrixElement_JTCoupled, TalmiTransformation):\r\n \r\n \"\"\"\r\n Implementation of the central force for two gaussians_, overriding of the\r\n BrodyMoschinsky transformation method to skip\r\n \"\"\"\r\n \r\n @classmethod\r\n def setInteractionParameters(cls, *args, **kwargs):\r\n \"\"\" \r\n Implement the parameters for the Brink-Boeker interaction calculation. \r\n \r\n \"\"\"\r\n # Refresh the Force parameters\r\n if cls.PARAMS_FORCE:\r\n cls.PARAMS_FORCE = {}\r\n \r\n _b = SHO_Parameters.b_length\r\n cls.PARAMS_SHO[_b] = float(kwargs.get(_b))\r\n \r\n part_1 = AttributeArgs.ForceArgs.Brink_Boeker.part_1\r\n part_2 = AttributeArgs.ForceArgs.Brink_Boeker.part_2\r\n \r\n cls.PARAMS_FORCE[0] = {}\r\n cls.PARAMS_FORCE[1] = {}\r\n \r\n for param in bb_p.members():\r\n cls.PARAMS_FORCE[0][param] = float(kwargs[param].get(part_1))\r\n cls.PARAMS_FORCE[1][param] = float(kwargs[param].get(part_2))\r\n \r\n cls.PARAMS_FORCE[CentralMEParameters.potential] = PotentialForms.Gaussian\r\n #cls.plotRadialPotential()\r\n \r\n cls._integrals_p_max = -1\r\n cls._talmiIntegrals = ([], [])\r\n \r\n def _validKetTotalSpins(self):\r\n \"\"\" For Central Interaction, != 0 only if S=S' \"\"\"\r\n return (self.S_bra, )\r\n \r\n def _validKetTotalAngularMomentums(self):\r\n \"\"\" For Central Interaction, != 0 only if L=L' \"\"\"\r\n return (self.L_bra, )\r\n \r\n def _validKet_relativeAngularMomentums(self):\r\n \"\"\" Central interaction only allows l'==l\"\"\"\r\n return (self._l, )\r\n \r\n def _globalInteractionCoefficient(self):\r\n # no special interaction constant for the Central ME\r\n return 1\r\n \r\n def _interactionConstantsForCOM_Iteration(self):\r\n # no special internal c.o.m interaction constants for the Central ME\r\n return 1\r\n \r\n def deltaConditionsForGlobalQN(self):\r\n \"\"\" \r\n Define if non null requirements on LS coupled J Matrix Element, \r\n before doing the center of mass decomposition.\r\n \r\n NOTE: Redundant if run from JJ -> LS recoupling\r\n \"\"\"\r\n if ((self.L_bra != self.L_ket)\r\n or (self.S_bra != self.S_ket)\r\n or (self._l != self._l_q)):\r\n \r\n if self.DEBUG_MODE:\r\n self.details = \"deltaConditionsForGlobalQN = False central BB {}\\n {}\"\\\r\n .format(str(self.bra), str(self.ket))\r\n return False\r\n \r\n return True\r\n \r\n def _deltaConditionsForCOM_Iteration(self):\r\n \"\"\" condition for the antisymmetrization_ \"\"\"\r\n if (((self.S_bra + self.T + self._l) % 2 == 1) and \r\n ((self.S_ket + self.T + self._l_q) % 2 == 1)):\r\n return True\r\n return False\r\n \r\n @classmethod\r\n def _calculateIntegrals(cls, n_integrals=1):\r\n \"\"\"\r\n >> Overwrite to have two parts\r\n \"\"\"\r\n for p in range(cls._integrals_p_max + 1, \r\n cls._integrals_p_max + n_integrals +1):\r\n for part in (0, 1): \r\n args = [\r\n cls.PARAMS_FORCE.get(CentralMEParameters.potential),\r\n cls.PARAMS_SHO.get(SHO_Parameters.b_length), # * np.sqrt(2), # \r\n cls.PARAMS_FORCE[part].get(CentralMEParameters.mu_length),\r\n cls.PARAMS_FORCE[part].get(CentralMEParameters.n_power)\r\n ]\r\n \r\n cls._talmiIntegrals[part].append(talmiIntegral(p, *args))\r\n \r\n cls._integrals_p_max += 1\r\n \r\n def talmiIntegral(self):\r\n \"\"\" \r\n >> Overwrite to have two parts\r\n Get or update Talmi integrals for the calculations\r\n \"\"\"\r\n if self._p > self._integrals_p_max:\r\n self._calculateIntegrals(n_integrals = max(self.rho_bra, self.rho_ket, 1))\r\n return self._talmiIntegrals[self._part].__getitem__(self._p)\r\n \r\n def centerOfMassMatrixElementEvaluation(self):\r\n \"\"\"\r\n Radial Brody-Moshinsky transformation, direct implementation for \r\n central force.\r\n \"\"\"\r\n return self._BrodyMoshinskyTransformation()\r\n \r\n \r\n def _LScoupled_MatrixElement(self):\r\n \"\"\" \r\n <(n1,l1)(n2,l2) (LS)| V |(n1,l1)'(n2,l2)'(L'S') (T)>\r\n \"\"\"\r\n aux_sum = 0.0\r\n # Sum of gaussians and projection operators\r\n for i in range(2):\r\n self._part = i\r\n \r\n # Radial Part for Gaussian Integral\r\n radial_energy = self.centerOfMassMatrixElementEvaluation()\r\n \r\n if self.DEBUG_MODE:\r\n XLog.write('BB', mu=self.PARAMS_FORCE[i][CentralMEParameters.mu_length])\r\n \r\n # Exchange Part\r\n # W + P(S)* B - P(T)* H - P(T)*P(S)* M\r\n _S_aux = (-1)**(self.S_bra + 1)\r\n _T_aux = (-1)**(self.T)\r\n _L_aux = (-1)**(self.T + self.S_bra + 1)\r\n \r\n exchange_energy = (\r\n self.PARAMS_FORCE[i].get(bb_p.Wigner),\r\n self.PARAMS_FORCE[i].get(bb_p.Bartlett) * _S_aux,\r\n self.PARAMS_FORCE[i].get(bb_p.Heisenberg) * _T_aux,\r\n self.PARAMS_FORCE[i].get(bb_p.Majorana) * _L_aux\r\n )\r\n \r\n # Add up\r\n prod_part = radial_energy * sum(exchange_energy)\r\n aux_sum += prod_part\r\n \r\n if self.DEBUG_MODE:\r\n XLog.write('BB', radial=radial_energy, exch=exchange_energy, \r\n exch_sum=sum(exchange_energy), val=prod_part)\r\n \r\n return aux_sum \r\n \r\n\r\n\r\nclass PotentialSeries_JTScheme(BrinkBoeker):\r\n \r\n \"\"\"\r\n The gaussian_ series are useful to mimic non analytically integrable\r\n potentials, this interaction is an extension of the Brink_Boeker for \r\n indeterminate number of Wigner terms.\r\n \"\"\"\r\n \r\n @classmethod\r\n def setInteractionParameters(cls, *args, **kwargs):\r\n \"\"\" \r\n Implement the parameters for the Talmi Integrals. \r\n \"\"\"\r\n # Refresh the Force parameters\r\n if cls.PARAMS_FORCE:\r\n cls.PARAMS_FORCE = {}\r\n \r\n _b = SHO_Parameters.b_length\r\n cls.PARAMS_SHO[_b] = float(kwargs.get(_b))\r\n \r\n part = PotentialSeriesParameters.part\r\n pot_key = CentralMEParameters.potential\r\n \r\n cls.numberGaussians = 0\r\n for param, values in kwargs.items():\r\n if not param.startswith(part):\r\n continue\r\n i = int(param.split(part)[1])\r\n cls.PARAMS_FORCE[i] = {}\r\n potential = values.get(pot_key)\r\n cls.PARAMS_FORCE[i][pot_key] = potential\r\n for attr in (CentralMEParameters.mu_length, \r\n CentralMEParameters.constant):\r\n ## set default geometric shape mu = Constant = 1 if not given.\r\n cls.PARAMS_FORCE[i][attr] = float(values.get(attr, 1))\r\n \r\n if potential in (PotentialForms.Power, PotentialForms.Gaussian_power):\r\n cls.PARAMS_FORCE[i][CentralMEParameters.n_power] = \\\r\n int(values.get(CentralMEParameters.n_power, 0))\r\n cls.numberGaussians += 1\r\n \r\n #cls.plotRadialPotential()\r\n cls._integrals_p_max = -1\r\n cls._talmiIntegrals = tuple(([] for _ in range(cls.numberGaussians)))\r\n \r\n \r\n @classmethod\r\n def _calculateIntegrals(cls, n_integrals=1):\r\n \"\"\"\r\n >> Overwrite to have N parts\r\n \"\"\"\r\n for p in range(cls._integrals_p_max + 1, \r\n cls._integrals_p_max + n_integrals +1):\r\n \r\n for part in range(cls.numberGaussians): \r\n args = [\r\n cls.PARAMS_FORCE[part].get(CentralMEParameters.potential),\r\n cls.PARAMS_SHO.get(SHO_Parameters.b_length), # * np.sqrt(2), # \r\n cls.PARAMS_FORCE[part].get(CentralMEParameters.mu_length),\r\n cls.PARAMS_FORCE[part].get(CentralMEParameters.n_power)\r\n ]\r\n cls._talmiIntegrals[part].append(talmiIntegral(p, *args))\r\n \r\n cls._integrals_p_max += 1\r\n \r\n \r\n def _LScoupled_MatrixElement(self):\r\n \"\"\" \r\n <(n1,l1)(n2,l2) (LS)| V |(n1,l1)'(n2,l2)'(L'S') (T)>\r\n \"\"\"\r\n aux_sum = 0.0\r\n # Sum of gaussians and projection operators\r\n for i in range(self.numberGaussians):\r\n self._part = i\r\n \r\n # Radial Part for Gaussian Integral\r\n radial_energy = self.centerOfMassMatrixElementEvaluation()\r\n \r\n if self.DEBUG_MODE:\r\n XLog.write('BB', mu=self.PARAMS_FORCE[i][CentralMEParameters.mu_length])\r\n \r\n prod_part = radial_energy * self.PARAMS_FORCE[i].get(CentralMEParameters.constant)\r\n aux_sum += prod_part\r\n \r\n if self.DEBUG_MODE:\r\n XLog.write('BB', radial=radial_energy, val=prod_part)\r\n \r\n return aux_sum \r\n","repo_name":"migueldelafuente1/2B_MatrixElements","sub_path":"matrix_elements/BrinkBoeker.py","file_name":"BrinkBoeker.py","file_ext":"py","file_size_in_byte":9771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26602452681","text":"import os\nfrom os.path import expanduser\n\ndef search(dirname):\n try:\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n if os.path.isdir(full_filename):\n pass\n else:\n ext = os.path.splitext(full_filename)[-1]\n if ext == '.log' or ext == '.txt' :\n check_filename = filename\n if '_' in filename:\n check_filename_list = filename.split('_')\n check_filename = ''\n for fn in range(1, len(check_filename_list)):\n check_filename += check_filename_list[fn]\n \n #log_list.append(filename.replace(ext, ''))\n if '전' in check_filename or '14' in check_filename:\n #print(check_filename)\n before = full_filename\n elif '후' in check_filename or '19' in check_filename or '16' in check_filename:\n #print(check_filename)\n after = full_filename\n return before, after\n except PermissionError:\n pass\n\ndef check_diff(be_list, af_list):\n copy_af = af_list[:]\n iaf = len(copy_af)\n for daf in af_list[::-1]:\n iaf -=1\n for dbe in be_list:\n if daf in dbe:\n copy_af.pop(iaf)\n else:\n continue\n \n copy_be = be_list[:]\n ibe = len(copy_be)\n for dbe in be_list[::-1]:\n ibe -= 1\n for daf in af_list:\n if daf in dbe:\n copy_be.pop(ibe)\n else:\n continue\n \n return copy_af, copy_be\n\ndef find_start_index(data_list, find_start='# show port'):\n for i, d in enumerate(data_list):\n if find_start in d:\n return i\n \ndef finds_index_list(data_list, index, find_fir='--------------------------------', brk_str='Port Descriptions on Slot A'):\n finds_list = []\n for i, d in enumerate(data_list[index:]):\n if find_fir in d:\n finds_list.append(i)\n elif brk_str in d:\n break\n return finds_list\n\ndef find_port_check(finds_list, data_list, index, find_thi='======================'):\n port_list = []; port_state = {}; port_bool = True\n for i in finds_list:\n if port_bool:\n for d in data_list[index+i+1:]:\n if d == '\\n' or find_thi in d or 'A/' in d or 'B/' in d:\n if 'A/' in d or 'B/' in d:\n port_bool = False\n break\n data = d.replace('\\n', '').split()\n port = data[0]\n port_list.append(port)\n #print(data)\n port_state[port] = f'{data[1]} {data[2]}'\n return port_list, port_state\n\ndef find_bgp_check(finds_list, data_list, index, find_thi='--------------------------------'):\n bgp_list = []; act = []; sent = []\n \n \n\n#home = expanduser(\"~\")\n#dir_path = home + '/Desktop/비교 log/'\ndir_path = 'D:/Desktop/비교 log/'\nbefore, after = search(dir_path)\nprint()\nprint('기존 파일 : ', before)\nprint('변경 파일 : ', after, end='\\n\\n')\n\nfbe = open(before, 'r', encoding='UTF8')\ndatabe = fbe.readlines()\nfbe.close()\n\nfaf = open(after, 'r', encoding='UTF-8')\ndataaf = faf.readlines()\nfaf.close()\n\n\nbe_index = find_start_index(databe)\nfinds_be_list = finds_index_list(databe, be_index)\nport_be_list, port_be_state = find_port_check(finds_be_list, databe, be_index)\n\n\naf_index = find_start_index(dataaf)\nfinds_af_list = finds_be_list = finds_index_list(dataaf, af_index)\nport_af_list, port_af_state = find_port_check(finds_af_list, dataaf, af_index)\n\n\ncheck = True\nfor i in range(len(port_be_list)):\n if not port_be_list[i] == port_af_list[i]:\n print('서로 다른 포트, 포트를 먼저 확인하세요 : ', port_be_list[i], port_af_list[i])\n check = False\n\nprint('='*95)\n#print(port_be_state)\nbe_up_port = '14 up_port : '\naf_up_port = '19 up_port : '\nif check:\n for key in port_be_list:\n if not port_be_state[key] == port_af_state[key]:\n print(f'서로 다른 상태, 확인 필요 |\\tport : {key:7s} 전 : {str(port_be_state[key]):18s} 후 : {str(port_af_state[key])}')\n if 'Up' in port_be_state[key]:\n be_up_port += f'{key} '\n if 'Up' in port_af_state[key]:\n af_up_port += f'{key} '\n\nprint(be_up_port); print(); print(af_up_port);\nbe_bgp_index = find_start_index(databe, 'BGP Summary')\nfinds_be_bgp_list = finds_index_list(databe, be_bgp_index)\nbgp_be_list, bgp_be_state = find_bgp_check(finds_be_bgp_list, databe, be_index)\n\naf_bgp_index = find_start_index(dataaf, 'BGP Summary')","repo_name":"andyahn/test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34083202742","text":"from testcase.locators.category_page_locators import CategoryPageLocators\nfrom testcase.pages.base_page import BasePage\n\n\nclass CategoryProductsPage(BasePage):\n def __init__(self, driver):\n super().__init__(driver)\n self.locators = CategoryPageLocators(self.driver)\n\n def products_list(self):\n elements = self.locators.products()\n products_list = []\n for elem in elements:\n products_list.append(elem.text)\n\n return products_list\n","repo_name":"sirbana/automation_practice","sub_path":"testcase/pages/category_products_page.py","file_name":"category_products_page.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7144975148","text":"x=int(input())\ncount=0\nli=list(map(int,input().split()))\nfor i in range(x):\n flag=1\n if li[i]>1:\n for j in range(2,li[i]):\n if li[i]%j==0:\n flag=0\n break\n if flag==1:\n count+=1\nprint(count)\n","repo_name":"ChaeheePark/Coding_test","sub_path":"baekjoon__algorithm_code/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5699977026","text":"def solution(n, computers):\n answer = 0\n network = [[] * (n+1) for _ in range(n+1)]\n visited = [0] * (n+1)\n # 양방향 네트워크를 트리형태로 부모 자식간의 관계로 만든다.\n for i in range(n):\n for j in range(n):\n # 간선 연결 및 자기사진이 아니고 양방향일 때 중복이 들어가는 부분 방지\n if computers[i][j] == 1 and i != j and j+1 not in network[i+1]:\n network[i+1].append(j+1)\n network[j+1].append(i+1)\n # 노드를 하나씩 방문하면서 dfs해서 하나의 네트워크가 생성(경로) 살펴본다\n # 간선의 개수가 네트워크 개수를 의미하는 것은 아니다\n for i in range(1, n+1):\n if visited[i] == 0:\n stack = [i]\n while stack:\n num = stack.pop()\n visited[num] = 1\n for i in network[num]:\n if visited[i] == 0:\n stack.append(i)\n answer += 1\n return answer","repo_name":"wnstj-yang/Algorithm","sub_path":"Programmers/programmers_네트워크.py","file_name":"programmers_네트워크.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6092824759","text":"class Cat:\n # Создаем словарь\n __share_attr = {\n 'breed': 'pers',\n 'color': 'black'\n }\n\n def __init__(self): # При каждой инициализации атрибута класса\n self.__dict__ = Cat.__share_attr # __dict__ присваиваем значения __share_attr\n # И так ка это словарь, а словарь изменяемий атребут, его значения можно изменять\n # в любом экземпляре класса для всех экземпляров класса.\n# __dict__ это словарь в котором хранятся атребуты экземпляра класса\n\na = Cat()\nb = Cat()\na.breed = 'siam'\nb.color = 'black'\nc = Cat()\nc.color = 'green'\nc.breed = 'sphinx'\n\n","repo_name":"RostSIT/MeetTheChallengesOfOOP","sub_path":"2.4 Моносостояние для экземпляров класса/2.4 Моносостояние для экземпляров класса 1 из 1.py","file_name":"2.4 Моносостояние для экземпляров класса 1 из 1.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40941266344","text":"import pandas as pd\nimport time\nimport pathlib\n\nexample_stock_data_path = \"../data/out/aapl.csv\"\nexample_stock_data = pd.read_csv(example_stock_data_path)\nexample_stock_data[\"Date\"] = pd.to_datetime(example_stock_data[\"Date\"], format=\"mixed\")\n\nexample_sentiment_dir = \"../data/out/sentiment\"\nexample_sentiment_data = []\nfor path in pathlib.Path(example_sentiment_dir).iterdir():\n if path.is_file() and path.suffix == \".parquet\":\n df = pd.read_parquet(path)\n example_sentiment_data.append(df)\n df[\"time\"] = pd.to_datetime(df[\"time\"], format=\"mixed\")\n\n\n# Time the merge operation\ntime_start = time.time()\nmerged_data = example_stock_data\nfor ex_sent_df in example_sentiment_data:\n merged_data = pd.merge_asof(\n merged_data,\n ex_sent_df,\n left_on=\"Date\",\n right_on=\"time\",\n direction=\"nearest\",\n )\n\n\nstart = \"2016-01-01\"\nend = \"2016-02-01\"\n\n# print all entries from start to end\nprint(merged_data[(merged_data[\"Date\"] >= start) & (merged_data[\"Date\"] <= end)])\n\ntime_end = time.time()\nprint(\n f\"Time to merge: {time_end - time_start} seconds for entries from {start} to {end}\"\n)\n","repo_name":"NgaiJustin/stock-sentiment","sub_path":"src/stock_sentiment_merge.py","file_name":"stock_sentiment_merge.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31586070356","text":"from ntrprtr.action.ActionBase import ActionBase\n\nclass HexdumpAction(ActionBase):\n def __init__(self):\n super().__init__()\n self._config[\"nonAsciiPlaceholder\"] = \".\"\n\n def process(self, action, _bytes):\n self._mergeConfig(action)\n offset_ = 0\n result = self.__createHexHeader()\n splittedBytes = [_bytes[i:i + 16] for i in range(0, len(_bytes), 16)]\n for b_ in splittedBytes:\n result += self.__createHexRow(offset_, b_.hex(\" \"), b_, self._config[\"nonAsciiPlaceholder\"])\n offset_ += 16 \n return result\n\n def __createHexHeader(self):\n result = \"\"\n h = \"{:8} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:16}\".format(\" Offset\", \"00\", \"01\", \"02\", \"03\", \"04\", \"05\",\n \"06\", \"07\", \"08\", \"09\", \"0A\", \"0B\",\n \"0C\", \"0D\", \"0E\", \"0F\", \"ASCII\")\n result += h + \"\\n\"\n result += (\"-\"*8) + \" \" + (\"-\"*47) + \" \" + (\"-\"*16) +\" \\n\"\n return result\n\n def __createHexRow(self, offset, hex_, bytes_, placeholder):\n formatStr = \"{:8} {:47} {:16}\"\n asc = \"\".join(chr(v) if (v >= 32 and v <= 126) else placeholder for v in bytes_)\n r = formatStr.format(offset, hex_.upper(), asc)\n return r + \" \\n\"\n\n","repo_name":"5f0ne/ntrprtr","sub_path":"src/ntrprtr/action/HexdumpAction.py","file_name":"HexdumpAction.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17028890633","text":"import copy\nimport math\nfrom tqdm import tqdm\nfrom collections import defaultdict, deque\nfrom functools import reduce\nfrom itertools import permutations, product\n\"\"\"\n12345678\n\n1 + (-3) + 5 + (-7) = 4\n2 + 3 + (-6) + (-7) = 8\n3 + 4 + 5 = 2\n4 + 5 + 6 + 7 = 2\n5 + 6 + 7 + 8 = 5\n6 + 7 + 8 = 1\n7 + 8 = 5\n8 = 8\n\n4 + (-2) + 6 + (-5) = 3\n\n\n\"\"\"\n\nbase_pattern = [0, 1, 0, -1]\n\n\ndef aoc_program(input_file, phases, has_offset=True):\n with open(input_file, \"r\") as f:\n inp = f.readline()\n\n offset = int(inp[:7])\n inp = [int(x) for x in inp]\n\n if has_offset:\n expanded_inp = inp * 10000\n\n to_compute = len(expanded_inp) - offset\n inp = expanded_inp[offset:][::-1]\n\n phase_arr = [inp[0]] * phases # LAST DIGIT\n result = [inp[0]]\n\n for i in tqdm(range(1, to_compute)):\n # for i in range(1, 4):\n next_arr = []\n for k in range(phases):\n if k == 0:\n next_arr.append((phase_arr[k] + inp[i]) % 10)\n else:\n next_arr.append((phase_arr[k] + next_arr[-1]) % 10)\n\n phase_arr = copy.deepcopy(next_arr)\n result.append(phase_arr[-1])\n\n result = result[::-1]\n result = [str(x) for x in result[:8]]\n print(''.join(result))\n\n # for\n\n # memo = defaultdict(int) # Digit, output_pos\n\n # for output_pos in tqdm(range(len(inp))):\n # repeats = output_pos + 1\n # pattern = []\n # complete = False\n\n # while not complete:\n # for x in base_pattern:\n # pattern.extend([x] * repeats)\n # if len(pattern) >= len(inp) + 1:\n # complete = True\n # break\n\n # memo[output_pos] = pattern[1:len(inp) + 1]\n # print(output_pos, memo[output_pos])\n\n # output = []\n # for i in tqdm(range(phases)):\n # new_inp = []\n # for output_pos in range(len(inp)):\n # pattern = memo[output_pos]\n # result = sum(map(lambda x: x[0] * x[1], zip(inp, pattern)))\n # result = abs(result) % 10\n # new_inp.append(result)\n # inp = copy.deepcopy(new_inp)\n # output.extend(copy.deepcopy(new_inp))\n # print(inp)\n\n # inp = [str(x) for x in inp]\n\n # if has_offset:\n # print(offset)\n # print(output[offset:offset + 8])\n # return 0\n\n # return int(''.join(inp[:8]))\n\n\ndef test_program():\n DAY = 16\n test_arr = [\n # (\n # (f\"{DAY}/aoc{DAY}.in1\", 4, False),\n # 1029498,\n # ),\n # (\n # (f\"{DAY}/aoc{DAY}.in2\", 100, False),\n # 24176176,\n # ),\n (\n (f\"{DAY}/aoc{DAY}.in3\", 100),\n 13312,\n ),\n (\n (f\"{DAY}/aoc{DAY}.in4\", 100),\n 180697,\n ),\n (\n (f\"{DAY}/aoc{DAY}.in5\", 100),\n 2210736,\n ),\n (\n (f\"{DAY}/aoc{DAY}.in6\", 100, True),\n 82435530,\n ),\n ]\n\n for inp, expected in test_arr:\n actual = aoc_program(*inp)\n\n if actual == expected:\n print(\"OK\")\n else:\n print(f\"ERROR: actual: {actual} expected: {expected}\")\n\n\ntest_program()\n","repo_name":"astraldawn/aoc2019","sub_path":"aoc16.py","file_name":"aoc16.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10845686224","text":"from .pages.main_page import MainPage\nfrom .pages.pictures_page import PicturesPage\n\n\ndef test_yandex_search(browser):\n link = \"https://yandex.ru/\"\n page = MainPage(browser, link)\n page.open() \n page.should_be_search_box()\n page.search_box_input(\"Тензор\")\n page.should_be_suggestions()\n page.search_box_enter_pressed()\n page.should_be_search_result()\n page.should_result_have_link(\"tensor.ru\", 5)\n\ndef test_yandex_pictures(browser):\n link = \"https://yandex.ru/\"\n page = MainPage(browser, link)\n page.open()\n page.should_be_pictures_link()\n page.go_to_pictures()\n pictures_page = PicturesPage(browser, browser.current_url)\n pictures_page.should_be_url_images()\n picture_text = pictures_page.return_first_picture_text()\n pictures_page.go_to_first_picture_text()\n pictures_page.should_be_right_text(picture_text)\n pictures_page.open_first_picture()\n pictures_page.should_be_picture()\n first_picture = pictures_page.get_current_picture_url()\n pictures_page.click_button_next()\n second_picture = pictures_page.get_current_picture_url()\n pictures_page.pictures_must_not_be_the_same(first_picture, second_picture)\n pictures_page.click_button_previous()\n current_picture = pictures_page.get_current_picture_url()\n pictures_page.pictures_must_be_the_same(first_picture, current_picture)\n","repo_name":"albertkhudaev/test_yandex","sub_path":"test_yandex.py","file_name":"test_yandex.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73189647873","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom scipy.interpolate import interp1d, RectBivariateSpline, UnivariateSpline\nfrom delight.utils import approx_DL\n# from specutils import extinction\nfrom astropy import units as u\n\n\nclass PhotometricFilter:\n \"\"\"Photometric filter response\"\"\"\n def __init__(self, bandName, tabulatedWavelength, tabulatedResponse):\n self.bandName = bandName\n self.wavelengthGrid = tabulatedWavelength\n self.tabulatedResponse = tabulatedResponse\n self.interp = interp1d(tabulatedWavelength, tabulatedResponse)\n self.norm = np.trapz(tabulatedResponse/tabulatedWavelength,\n x=tabulatedWavelength)\n ind = np.where(\n tabulatedResponse > 0.001*np.max(tabulatedResponse)\n )[0]\n self.lambdaMin = tabulatedWavelength[ind[0]]\n self.lambdaMax = tabulatedWavelength[ind[-1]]\n\n def __call__(self, wavelength):\n return self.interp(wavelength)\n\n\n# class DustModel:\n# \"\"\"\n# Extinction model from Cardelli, Clayton & Mathis (1988)\n# \"\"\"\n# def __init__(self):\n# self.r_v = 3.1\n#\n# def __call__(self, wave, a_v):\n# return extinction.extinction_d03(wave * u.Angstrom,\n# a_v, r_v=self.r_v)\n#\n#\n# class SpectralTemplate_zd:\n# \"\"\"\n# SED template, tabulated, to be interpolated on aredshift and dust grid\n# \"\"\"\n# def __init__(self,\n# tabulatedWavelength, tabulatedSpectrum, photometricBands,\n# redshiftGrid=None, dustGrid=None):\n# self.DL = approx_DL()\n# self.DustModel = DustModel()\n# self.photometricBands = photometricBands\n# self.numBands = len(photometricBands)\n# self.fbinterps = {}\n# self.sed_interp = interp1d(tabulatedWavelength, tabulatedSpectrum)\n# if redshiftGrid is None:\n# self.redshiftGrid = np.logspace(np.log10(1e-2),\n# np.log10(2.0),\n# 50)\n# else:\n# self.redshiftGrid = redshiftGrid\n# if dustGrid is None:\n# self.dustGrid = np.logspace(np.log10(1e-2),\n# np.log10(100),\n# 15)\n# else:\n# self.dustGrid = dustGrid\n#\n# for filt in photometricBands:\n# fmodgrid = np.zeros((self.redshiftGrid.size, self.dustGrid.size))\n# for iz in range(self.redshiftGrid.size):\n# opz = (self.redshiftGrid[iz] + 1)\n# xf_z = filt.wavelengthGrid / opz\n# yf_z = filt.tabulatedResponse\n# ysed = self.sed_interp(xf_z)\n# facz = opz**2. / (4*np.pi*self.DL(self.redshiftGrid[iz])**2.)\n# for jd in range(self.dustGrid.size):\n# ysedext = facz * ysed *\\\n# 10**-0.4*self.DustModel(xf_z, self.dustGrid[jd])\n# fmodgrid[iz, jd] =\\\n# np.trapz(ysedext * yf_z, x=xf_z) / filt.norm\n# self.fbinterps[filt.bandName] = RectBivariateSpline(\n# self.redshiftGrid, self.dustGrid, fmodgrid)\n#\n# def photometricFlux(self, redshifts, dusts, bandName, grid=False):\n# return self.fbinterps[bandName](redshifts, dusts, grid=grid).T\n#\n# def flux(self, redshift, dust, wave):\n# opz = 1. + redshift\n# xf_z = wave / opz\n# facz = opz**2. / (4*np.pi*self.DL(redshift)**2.)\n# ysed = self.sed_interp(xf_z)\n# ysedext = facz * ysed *\\\n# 10**-0.4*self.DustModel(xf_z, dust)\n# return ysedext\n\n\nclass SpectralTemplate_z:\n \"\"\"\n SED template, tabulated and to be interpolated on a redshift grid\n \"\"\"\n def __init__(self,\n tabulatedWavelength, tabulatedSpectrum, photometricBands,\n redshiftGrid=None, order=15):\n self.DL = approx_DL()\n self.photometricBands = photometricBands\n self.numBands = len(photometricBands)\n self.sed_interp = interp1d(tabulatedWavelength, tabulatedSpectrum,\n bounds_error=False,\n fill_value=\"extrapolate\")\n if redshiftGrid is None:\n self.redshiftGrid = np.logspace(np.log10(1e-2),\n np.log10(2.0),\n 350)\n else:\n self.redshiftGrid = redshiftGrid\n\n self.fbcoefs = {}\n self.fbinterps = {}\n self.logfbinterps = {}\n self.order = order\n self.fmodgrid = np.zeros((self.redshiftGrid.size,\n len(photometricBands)))\n self.bandNames = []\n for ib, filt in enumerate(photometricBands):\n self.bandNames.append(filt.bandName)\n for iz in range(self.redshiftGrid.size):\n opz = (self.redshiftGrid[iz] + 1)\n xf_z = filt.wavelengthGrid / opz\n yf_z = filt.tabulatedResponse\n ysed = self.sed_interp(xf_z)\n facz = opz**2. / (4*np.pi*self.DL(self.redshiftGrid[iz])**2.)\n ysedext = facz * ysed\n self.fmodgrid[iz, ib] =\\\n np.trapz(ysedext * yf_z, x=xf_z) / filt.norm\n self.fbinterps[filt.bandName] = UnivariateSpline(\n self.redshiftGrid, self.fmodgrid[:, ib], s=0)\n self.fbcoefs[filt.bandName] = np.polyfit(\n self.redshiftGrid, np.log(self.fmodgrid[:, ib]), self.order-1)\n self.logfbinterps[filt.bandName] =\\\n np.poly1d(self.fbcoefs[filt.bandName])\n\n def photometricFlux_spline(self, redshifts, bandName):\n return self.fbinterps[bandName](redshifts)\n\n def photometricFlux(self, redshifts, bandName):\n return np.exp(self.logfbinterps[bandName](redshifts))\n\n def photometricFlux_bis(self, redshifts, bandName):\n xgg = redshifts[:, None] ** np.arange(self.order-1, -1, -1)[None, :]\n return np.exp(np.sum(xgg * self.fbcoefs[bandName][None, :], axis=1))\n\n def photometricFlux_gradz(self, redshifts, bandName):\n mod_der = np.poly1d(np.polyder(self.fbcoefs[bandName]))\n return mod_der(redshifts) * self.photometricFlux(redshifts, bandName)\n\n def photometricFlux_gradz_bis(self, redshifts, bandName):\n xgg = redshifts[:, None] ** np.arange(self.order-2, -1, -1)[None, :]\n der = np.arange(self.order-1, 0, -1)\n flux = self.photometricFlux_bis(redshifts, bandName)\n return np.sum(xgg * der * self.fbcoefs[bandName][None, :-1],\n axis=1) * flux\n\n def flux(self, redshift, wave):\n opz = 1. + redshift\n xf_z = wave / opz\n facz = opz**2. / (4*np.pi*self.DL(redshift)**2.)\n ysed = self.sed_interp(xf_z)\n ysedext = facz * ysed\n return ysedext\n","repo_name":"ixkael/Delight","sub_path":"delight/sedmixture.py","file_name":"sedmixture.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"30518355565","text":"import torch\nfrom DORN_pytorch import DORN_nyu_rawhints\nimport cv2\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport numpy as np\nfrom scipy.signal import fftconvolve\nimport scipy.io as sio\nimport argparse\nimport os\nimport pdb\nimport json\n\n# from split_utils import build_index\nfrom loss_numpy import delta, mse, rel_abs_diff, rel_sqr_diff\n\nfrom DORN_pytorch import DORN_nyu_rawhints\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n# parser.add_argument('--filename', type=str, default='./data/NYUV2/demo_01.png', help='path to an image')\nparser.add_argument('--rootdir', type=str, default=\"/home/markn1/spad_single/data/nyu_depth_v2_processed\",\n help=\"rootdir of dataset\")\nparser.add_argument('--blacklist', type=str,\n default=\"/home/markn1/spad_single/data/nyu_depth_v2_processed/blacklist.txt\",\n help=\"images to not calculate losses on\")\nparser.add_argument('--indexfile', type=str, default=\"/home/markn1/spad_single/data/nyu_depth_v2_processed/test.json\",\n help=\"index of dataset to load\")\nparser.add_argument('--outputroot', type=str, default='/home/markn1/DORN/result/NYUV2/pytorch/hints', help='output path')\nparser.add_argument('--outputlosses', type=str, default='losses.json',\n help=\"records average losses on whole dataset. path is relative to outputroot\")\nparser.add_argument(\"--torch-path\", type=str, default=\"./torch_params_nyuv2_first_flip.pth.tar\")\nparser.add_argument('--max-depth', type=float, default=10.0)\nparser.add_argument('--min-depth', type=float, default=0.0)\nparser.add_argument('--sid-bins', type=int, default=68)\nparser.add_argument('--spad-bins', type=int, default=1024)\nparser.add_argument('--cuda-device', type=str, default=\"0\")\n\n\ndef depth_prediction_with_hints(img_file, depth_truth, albedo, mask, net, device):\n rgb, H, W = load_image_cv2(img_file, device)\n # rgb, H, W = load_image_torchvision(filename, device)\n\n # sid_hist = load_sid_rawhist(depth_truth, mask, args.min_depth, args.max_depth, args.sid_bins, device)\n # sid_hist = load_sid_albedo_hist(depth_truth, albedo, mask, args.min_depth, args.max_depth, args.sid_bins, device)\n sid_hist = simulate_spad(depth_truth, albedo, args.spad_bins, 1e6, 1e5, 70,\n mask, args.min_depth, args.max_depth, args.sid_bins, device)\n input_ = {\"rgb\": rgb,\n \"sid_hist\": sid_hist}\n with torch.no_grad():\n output = net(input_)\n pred = decode_ord(output) # Pred is in numpy\n # Magic stuff\n pred = pred[0,0,:,:] - 1.0\n pred = pred/25.0 - 0.36\n pred = np.exp(pred)\n ord_score = cv2.resize(pred, (W, H), interpolation=cv2.INTER_LINEAR)\n return ord_score\n\n\ndef load_sid_rawhist(depth_truth, mask, min_depth, max_depth, sid_bins, device):\n \"\"\"\n Mask off invalid depths\n :param depth_truth: numpy array containing the ground truth depth map\n :param min_depth:\n :param max_depth:\n :param sid_bins:\n :param device: torch.device to load the histogram to.\n :return: torch(.cuda).FloatTensor with the histogram\n \"\"\"\n offset = 1.0 - min_depth\n start = 1.0\n end = max_depth + offset\n sid_bin_edges = [end**(float(i)/sid_bins) for i in range(sid_bins + 1)]\n # print(depth_truth)\n # print(sid_bin_edges)\n # Don't use entries where depth is invalid.\n sid_hist, _ = np.histogram((depth_truth + 1.0), bins=sid_bin_edges, weights=mask, density=True)\n # print(sid_hist)\n sid_hist = sid_hist/np.sum(sid_hist) # Make it a histogram in bins\n sid_hist = torch.from_numpy(sid_hist).unsqueeze(-1).unsqueeze(-1).unsqueeze(0).float()\n sid_hist = sid_hist.to(device)\n return sid_hist\n\n\ndef load_sid_albedo_hist(depth_truth, albedo, mask, min_depth, max_depth, sid_bins, device):\n \"\"\"\n Mask off invalid depths\n :param depth_truth: numpy array containing the ground truth depth map\n :param min_depth:\n :param max_depth:\n :param sid_bins:\n :param device: torch.device to load the histogram to.\n :return: torch(.cuda).FloatTensor with the histogram\n \"\"\"\n offset = 1.0 - min_depth\n start = 1.0\n end = max_depth + offset\n sid_bin_edges = [end**(float(i)/sid_bins) for i in range(sid_bins + 1)]\n # print(depth_truth)\n # print(sid_bin_edges)\n # Don't use entries where depth is invalid.\n weights = mask * albedo\n sid_hist, _ = np.histogram((depth_truth + 1.0), bins=sid_bin_edges, weights=weights, density=True)\n # print(sid_hist)\n sid_hist = sid_hist/np.sum(sid_hist) # Make it a histogram in bins\n sid_hist = torch.from_numpy(sid_hist).unsqueeze(-1).unsqueeze(-1).unsqueeze(0).float()\n sid_hist = sid_hist.to(device)\n return sid_hist\n\n\ndef load_sid_albedo_falloff_hist(depth_truth, albedo, mask, min_depth, max_depth, sid_bins, device):\n \"\"\"\n Mask off invalid depths\n :param depth_truth: numpy array containing the ground truth depth map\n :param min_depth:\n :param max_depth:\n :param sid_bins:\n :param device: torch.device to load the histogram to.\n :return: torch(.cuda).FloatTensor with the histogram\n \"\"\"\n offset = 1.0 - min_depth\n start = 1.0\n end = max_depth + offset\n sid_bin_edges = [end**(float(i)/sid_bins) for i in range(sid_bins + 1)]\n # print(depth_truth)\n # print(sid_bin_edges)\n # Don't use entries where depth is invalid.\n weights = mask * albedo/(depth_truth**2 + 1e-6)\n sid_hist, _ = np.histogram((depth_truth + 1.0), bins=sid_bin_edges, weights=weights, density=True)\n # print(sid_hist)\n sid_hist = sid_hist/np.sum(sid_hist) # Make it a histogram in bins\n sid_hist = torch.from_numpy(sid_hist).unsqueeze(-1).unsqueeze(-1).unsqueeze(0).float()\n sid_hist = sid_hist.to(device)\n return sid_hist\n\n\ndef makeGaussianPSF(size, fwhm=3, center=None):\n \"\"\" Make a square gaussian kernel.\n size is the length of a side of the square\n fwhm is full-width-half-maximum, which\n can be thought of as an effective radius.\n \"\"\"\n\n x = np.arange(0, size, 1, float)\n x0 = size // 2\n return np.roll(np.exp(-4 * np.log(2) * ((x - x0) ** 2) / fwhm ** 2), len(x) - x0)\n\n\ndef rescale_bins(spad_counts, sid_bins, min_depth, max_depth):\n \"\"\"Use bin numbers to do sid discretization.\n\n Assign photons to sid bins proportionally according to the amount of overlap between\n the sid bin range and the spad_count bin.\n \"\"\"\n alpha = 1.\n offset = 1.0 - min_depth\n beta = max_depth + offset\n\n # Get edges of sid bins in meters\n sid_bin_edges_m = np.array([beta ** (float(i) / sid_bins) for i in range(sid_bins + 1)]) - offset\n\n # Convert sid_bin_edges_m into units of spad bins\n sid_bin_edges_bin = sid_bin_edges_m * len(spad_counts) / (max_depth - min_depth)\n\n # Map spad_counts onto sid_bin indices\n sid_counts = np.zeros(sid_bins)\n for i in range(sid_bins):\n left = sid_bin_edges_bin[i]\n right = sid_bin_edges_bin[i + 1]\n curr = left\n while curr != right:\n curr = np.min([right, np.floor(left + 1.)]) # Don't go across spad bins - stop at integers\n sid_counts[i] += (curr - left) * spad_counts[int(np.floor(left))]\n # Update window\n left = curr\n return sid_counts\n\n\ndef simulate_spad(depth_truth, albedo, spad_bins, photon_count, dc_count, fwhm_ps,\n mask, min_depth, max_depth, sid_bins, device):\n \"\"\"\n min_depth, max_depth in meters\n fwhm: given in picoseconds\n \"\"\"\n # spad_bin_edges = np.linspace(min_depth, max_depth, spad_bins + 1)\n weights = (albedo / (depth_truth ** 2 + 1e-6)) * mask\n depth_hist, _ = np.histogram(depth_truth, bins=spad_bins, range=(min_depth, max_depth), weights=weights)\n\n # Scale by number of photons\n # print(spad_counts.shape)\n spad_counts = depth_hist * (photon_count / np.sum(depth_hist))\n # Add ambient/dark counts (dc_count)\n spad_counts += dc_count / spad_bins * np.ones(len(spad_counts))\n\n # Convolve with PSF\n bin_width_m = float(max_depth - min_depth) / spad_bins # meters/bin\n bin_width_ps = 2 * bin_width_m * 1e12 / (\n 3e8) # ps/bin, speed of light = 3e8, x2 because light needs to travel there and back.\n fwhm_bin = fwhm_ps / bin_width_ps\n psf = makeGaussianPSF(len(spad_counts), fwhm=fwhm_bin)\n # print(psf)\n # print(spad_counts)\n spad_counts = fftconvolve(psf, spad_counts)[:len(spad_counts)]\n # print(spad_counts)\n\n # Apply poisson\n spad_counts = np.random.poisson(spad_counts)\n sid_counts = rescale_bins(spad_counts, sid_bins, min_depth, max_depth)\n sid_counts = sid_counts/np.sum(sid_counts)\n # return sid_counts#, spad_counts, depth_hist, psf\n sid_hist = torch.from_numpy(sid_counts).unsqueeze(-1).unsqueeze(-1).unsqueeze(0).float()\n sid_hist = sid_hist.to(device)\n return sid_hist\n\ndef load_hints_net(device):\n # net = DORN_nyu_rawhints(alpha = 3.)\n # net = DORN_nyu_rawhints(alpha = 0.3)\n # net = DORN_nyu_rawhints(alpha = 1.0)\n net = DORN_nyu_rawhints(alpha = 0.1)\n # net = DORN_nyu_rawhints(alpha = 4.)\n # net = DORN_nyu_rawhints(alpha = 6.)\n # net = DORN_nyu_rawhints(alpha = 8)\n # net = DORN_nyu_rawhints(alpha = 10.)\n # net.load_state_dict(torch.load(args.torch_path))\n net.to(device)\n net.eval()\n return net\n\n\ndef load_image_cv2(img_file, device):\n rgb_cv2 = cv2.imread(img_file, cv2.IMREAD_COLOR)\n H, W = rgb_cv2.shape[:2]\n rgb_cv2 = rgb_cv2.astype(np.float32)\n rgb_cv2 = rgb_cv2 - np.array([[[103.0626, 115.9029, 123.1516]]]).astype(np.float32)\n rgb_cv2 = cv2.resize(rgb_cv2, (353, 257), interpolation=cv2.INTER_LINEAR)\n rgb = torch.from_numpy(rgb_cv2.transpose(2, 0, 1)).unsqueeze(0).flip([1])\n rgb = rgb.to(device)\n return rgb, H, W\n\n\ndef load_image_torchvision(img_file, device):\n pixel_means = torch.tensor([103.0626, 115.9029, 123.1516]).unsqueeze(-1).unsqueeze(-1)\n transform = transforms.Compose([\n transforms.Resize((257, 353)), # (Height, Width)\n transforms.ToTensor()\n ])\n rgb_pil = Image.open(img_file)\n W, H = rgb_pil.size\n rgb_torch = transform(rgb_pil) * 255.\n rgb = (rgb_torch - pixel_means).unsqueeze(0)\n rgb = rgb.to(device)\n return rgb, H, W\n\n\ndef decode_ord(data_pytorch):\n \"\"\"Takes a pytorch tensor, converts to numpy, then\n does the ordinal loss decoding.\n \"\"\"\n data = data_pytorch.cpu().numpy()\n N = data.shape[0]\n C = data.shape[1]\n H = data.shape[2]\n W = data.shape[3]\n ord_labels = data\n decode_label = np.zeros((N, 1, H, W), dtype=np.float32)\n ord_num = C/2\n for i in range(int(ord_num)):\n ord_i = ord_labels[:,2*i:2*i+2,:,:]\n decode_label = decode_label + np.argmax(ord_i, axis=1)\n return decode_label.astype(np.float32, copy=False)\n\n\ndef convert_to_uint8(img, min_val, max_val):\n return np.uint8((img - min_val)/(max_val - min_val)*255.0)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.cuda_device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"using device: {} (CUDA_VISIBLE_DEVICES = {})\".format(device,\n os.environ[\"CUDA_VISIBLE_DEVICES\"]))\n\n # print(\"Using device: {}\".format(device))\n net = load_hints_net(device)\n pixel_means = np.array([[[103.0626, 115.9029, 123.1516]]])\n\n with open(args.indexfile, 'r') as f:\n print(\"Loading index from {}\".format(args.indexfile))\n index = json.load(f)\n\n if args.blacklist is not None:\n print(\"Loading blacklist from {}\".format(args.blacklist))\n with open(args.blacklist, \"r\") as f:\n blacklist = [line.strip() for line in f.readlines()]\n\n print(\"Running tests...\")\n loss_fns = []\n loss_fns.append((\"mse\", mse))\n loss_fns.append((\"delta1\", lambda p, t, m: delta(p, t, m, threshold=1.25)))\n loss_fns.append((\"delta2\", lambda p, t, m: delta(p, t, m, threshold=1.25 ** 2)))\n loss_fns.append((\"delta3\", lambda p, t, m: delta(p, t, m, threshold=1.25 ** 3)))\n loss_fns.append((\"rel_abs_diff\", rel_abs_diff))\n loss_fns.append((\"rel_sqr_diff\", rel_sqr_diff))\n npixels = 0.\n\n total_losses = {loss_name: 0. for loss_name, _ in loss_fns}\n for entry in index:\n if entry in blacklist:\n continue\n print(entry)\n rgb_file = os.path.join(args.rootdir, index[entry][\"rgb\"])\n\n depth_truth_file = os.path.join(args.rootdir, index[entry][\"rawdepth\"])\n depth_truth = cv2.imread(depth_truth_file, cv2.IMREAD_ANYDEPTH)\n depth_truth = depth_truth/1000.\n boolmask = (depth_truth <= args.min_depth) | (depth_truth >= args.max_depth)\n mask = 1.0 - boolmask.astype(float)\n\n albedo_file = os.path.join(args.rootdir, index[entry][\"albedo\"])\n albedo = cv2.imread(albedo_file, cv2.IMREAD_COLOR)\n albedo = albedo[:,:,1] # Green channel only\n\n depth = depth_prediction_with_hints(rgb_file, depth_truth, albedo, mask, net, device)\n\n\n # Calculate metrics\n npixels += np.sum(mask)\n for loss_name, loss_fn in loss_fns:\n avg_loss = loss_fn(depth, depth_truth, mask)\n total_losses[loss_name] += avg_loss * np.sum(mask)\n\n img_id = entry.replace(\"/\", \"_\")\n if not os.path.exists(args.outputroot):\n os.makedirs(args.outputroot)\n # Write output to file\n depth_img = convert_to_uint8(depth, args.min_depth, args.max_depth)\n cv2.imwrite(str(args.outputroot + '/' + img_id + '_pred.png'), depth_img)\n\n # Write ground truth to file\n truth_img = convert_to_uint8(depth_truth, args.min_depth, args.max_depth)\n cv2.imwrite(str(args.outputroot + '/' + img_id + '_truth.png'), truth_img)\n\n #TESTING\n # break\n # Save as a json\n avg_losses = {loss_name: total_losses[loss_name]/npixels for loss_name in total_losses}\n avg_losses[\"network\"] = \"dorn_pytorch_albedo_hints_{}_spad\".format(net.alpha)\n if \"mse\" in avg_losses:\n avg_losses[\"rmse\"] = np.sqrt(avg_losses[\"mse\"])\n with open(os.path.join(args.outputroot, args.outputlosses), \"w\") as f:\n json.dump(avg_losses, f)\n print(\"avg_losses\")\n print(avg_losses)\n","repo_name":"computational-imaging/spad_single","sub_path":"depthnet/model/demo_nyuv2_pytorch_hints.py","file_name":"demo_nyuv2_pytorch_hints.py","file_ext":"py","file_size_in_byte":14292,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17305656665","text":"import os\n\n\ndef ex23_1_0():\n def print_dirs(project):\n print('\\nContent of folder: ', project)\n if os.path.exists(project):\n for i_elem in os.listdir(project):\n path = os.path.join(project, i_elem)\n print(' ', path)\n else:\n print('Directory is not exist')\n\n projects_list = ['skillbox', 'probe', 'testProject1']\n for i_project in projects_list:\n path_to_project = os.path.abspath(os.path.join('..', '..', '..', i_project))\n print_dirs(path_to_project)\n\n\ndef ex23_1_1():\n # Задача 1. Иконки\n #\n # Андрей для себя хочет сделать экспериментальный сайт, где будет красиво отображаться вся структура его\n # диска: папки одними иконками, файлы — другими. Поэтому ему нужен код, который поможет определить, какой\n # тип иконки вставить.\n #\n # Напишите программу, которая по заданному абсолютному пути определяет, на что указывает этот путь (на\n # директорию, файл, или же путь является ссылкой), и выведите соответствующее сообщение. Если путь указывает\n # на файл, то также выведите его размер (сколько он весит в байтах). Обеспечьте контроль ввода: проверка пути\n # на существование.\n #\n # Подсказка: для вывода размера файла поищите соответствующий метод.\n #\n # Пример 1:\n # Путь: C:\\Users\\Roman\\PycharmProjects\\Skillbox\\Module17\\lesson2.py\n # Это файл\n # Размер файла: 605 байт\n #\n # Пример 2:\n # Путь: C:\\Users\\Roman\\PycharmProjects\\Skillbox\\Module17\\lesson2.py\n # Указанного пути не существует\n path = input('Enter the path of file: ')\n if os.path.exists(path):\n if os.path.isfile(path):\n print('This file has {} B'.format(os.path.getsize(path)))\n elif os.path.isdir(path):\n print('This is directory')\n elif os.path.islink(path):\n print('This is link')\n else:\n print('File is not exist')\n\n\ndef ex23_1_2():\n # Задача 2. Поиск файла\n # В уроке мы написали функцию, которая ищет нужный нам файл во всех подкаталогах указанной директории. Однако, как\n # мы понимаем, файлов с таким названием может быть несколько.\n # Напишите функцию, которая принимает на вход абсолютный путь до директории и имя файла, проходит по всем вложенным\n # файлам и папкам и выводит на экран все абсолютные пути с этим именем.\n #\n # Пример:\n # Ищем в: C:/Users/Roman/PycharmProjects/Skillbox\n # Имя файла: lesson2\n #\n # Найдены следующие пути:\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module15\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module16\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module17\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module18\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module19\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module20\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module21\\lesson2.py\n # C:/Users/Roman/PycharmProjects/Skillbox\\Module22\\lesson2.py\n path = input('Enter path: ')\n file_name = input('Enter file name: ')\n os.chdir(os.path.abspath(path))\n all_files = []\n for root, dirs, files in os.walk(\".\", topdown=False):\n for name in files:\n if os.path.basename(name) == file_name:\n all_files.append(os.path.abspath(name))\n print(all_files)","repo_name":"Gegcuk/skillbox","sub_path":"python_for_data_science/lesson23_work_with_files/ex23.1.py","file_name":"ex23.1.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42548942086","text":"from pdb import set_trace\n\n\ndef board(b):\n for x in b:\n print(x)\n\n\nclass Solution(object):\n def uniquePathsWithObstacles(self, obstacleGrid):\n \"\"\"\n :type obstacleGrid: List[List[int]]\n :rtype: int\n \"\"\"\n num_cols, num_rows = len(obstacleGrid[0]), len(obstacleGrid)\n\n DP = [\n [0 for _ in range(num_cols)]\n for _ in range(num_rows)\n ]\n\n for col_i in range(num_cols):\n if obstacleGrid[0][col_i] == 1:\n break\n DP[0][col_i] = 1\n\n for row_i in range(num_rows):\n if obstacleGrid[row_i][0] == 1:\n break\n DP[row_i][0] = 1\n\n for row_i in range(1, num_rows):\n dp_row = DP[row_i]\n for col_i in range(1, num_cols):\n\n is_obstacle = obstacleGrid[row_i][col_i] == 1\n left = obstacleGrid[row_i][col_i - 1] == 1\n above = obstacleGrid[row_i - 1][col_i] == 1\n\n if is_obstacle:\n continue\n acc = 0\n\n if not left:\n acc += DP[row_i][col_i - 1]\n\n if not above:\n\n acc += DP[row_i-1][col_i]\n\n dp_row[col_i] = acc\n\n return DP[-1][-1]\n\n\nobstacle_input = [\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0]\n]\n\nsol = Solution()\n\nres = sol.uniquePathsWithObstacles(obstacle_input)\nprint (res)\n","repo_name":"nmaswood/leetcode","sub_path":"2018/63-uniquePathsWithObstacles.py","file_name":"63-uniquePathsWithObstacles.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"36731271084","text":"#! /usr/bin/env python\n#! -*- coding:utf-8 -*-\nimport socketserver,sys,time,connections\nimport random,threading\nmsgbox={}\nmsgbox_lock = threading.Lock()\n \n \ndef pop_msg(address):\n print(\"pop msg %s\"%address)\n if msgbox_lock.acquire(timeout=5) == True:\n try:\n if address in msgbox.keys():\n return msgbox.pop(address)\n return ''\n except:\n return ''\n finally:\n msgbox_lock.release()\n else:\n raise 'oh , pop_msg exception'\n \ndef insert_msg(address,msg):\n print(msg)\n if msgbox_lock.acquire(timeout=5) == True:\n try:\n if address not in msgbox.keys():\n msgbox[address]=[]\n msgbox[address].append(msg)\n return 'ok'\n except:\n return 'insert failed'\n finally:\n msgbox_lock.release()\n else:\n raise 'oh , insert_msg exception' \n \ndef gen_check_code(address):\n #from ledger.db get address's publick key\n #then encode the check_code\n return random.randrange(10000000000000,9999999999999999)\n \nclass ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):\n def handle(self): # server defined here\n global address_dict\n peer_ip = self.request.getpeername()[0]\n timeout_operation = 120\n timer_operation = time.time()\n try:\n #receive register\n data = connections.receive(self.request, 10)\n if data != 'register':\n raise 'client is not register'\n #receive address\n address = connections.receive(self.request, 10)\n check_code = gen_check_code(address)\n connections.send(self.request, check_code , 10)\n data = connections.receive(self.request, 10)\n if data != check_code :\n connections.send(self.request, 'you are fake' , 10)\n raise 'client are fake'\n connections.send(self.request, 'what can i do for you?' , 10)\n data = connections.receive(self.request, 10)\n if data == 'sendmsg':\n address = connections.receive(self.request, 10)\n sequence = connections.receive(self.request, 10)\n msg = connections.receive(self.request, 10)\n timestamp = time.time()\n result = insert_msg( address , (timestamp,sequence,msg) )\n connections.send(self.request, result , 10)\n elif data == 'getmsg':\n msg = pop_msg(address)\n connections.send(self.request, msg , 10)\n \n except Exception as e:\n print(e)\n finally:\n if self.request:\n self.request.close()\n \nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n pass\n \n \nif __name__ == \"__main__\":\n try:\n HOST, PORT = \"0.0.0.0\", int(sys.argv[1])\n ThreadedTCPServer.allow_reuse_address = True\n ThreadedTCPServer.daemon_threads = True\n ThreadedTCPServer.timeout = 60\n ThreadedTCPServer.request_queue_size = 100\n server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)\n ip, port = server.server_address\n server_thread = threading.Thread(target=server.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n while True:\n print(msgbox)\n time.sleep(5)\n server.shutdown()\n server.server_close()\n except Exception as e:\n print(e)\n print(\"server startup failed!\")\n sys.exit()","repo_name":"flyfire100/DARKNESS","sub_path":"Demo/nat.py","file_name":"nat.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"23556813161","text":"f = open(\"B-large.in\",\"r\")\nout = open(\"output.out\",\"a\")\ncase = int(f.readline())\ndef chck(x):\n\ty = map(int,x)\n\tfor i in range(1,len(y)):\n\t\tif y[-i]0):\n\t\tif chck(str(inp)):\n\t\t\tbreak\n\t\tif inp%10 == 9:\n\t\t\tc+=1\n\t\t\tinp = fun(str(inp))\t\n\t\tinp-=1\n\tif c!=0:\n\t\to.append(lst(str(inp),c))\n\telse:\n\t\to.append(inp)\nfor i in range(case):\n\tout.write(\"Case #\"+str(i+1)+\": \"+str(o[i])+\"\\n\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2843.py","file_name":"2843.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6941344909","text":"import requests\n\nfrom telegram import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom settings import URL_API\n\n\ndef validate_age():\n inlinekeyboard = [\n [\n InlineKeyboardButton(\"Да\", callback_data=\"Да\"),\n InlineKeyboardButton(\"Нет\", callback_data=\"Нет\"),\n ]\n ]\n return InlineKeyboardMarkup(inlinekeyboard)\n\n\ndef get_menu_buttons():\n buttons = [\n [\"Бары и филиалы\"],\n [\n KeyboardButton(\"Поиск напитка\"),\n ],\n [KeyboardButton(\"Бары рядом со мной\", request_location=True)],\n ]\n return ReplyKeyboardMarkup(one_time_keyboard=True, keyboard=buttons)\n\n\ndef get_query(tag):\n try:\n result = requests.get(URL_API + tag)\n return result.json()\n except (requests.RequestException, ValueError):\n print(\"Error\")\n return False\n","repo_name":"Stanis96/python-telegram-bot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3650113768","text":"\"\"\"SJTUPlus URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('categories/', include('groups.urls')),\n path('attest/', include('verify.urls', namespace=\"attest\")),\n path('login', views.login, name='login'),\n path('logout', views.logout, name='logout'),\n path('logged_out', views.logged_out, name='logged_out'),\n path('authorize', views.authorize, name='authorize'),\n path('api/', include('api.urls')),\n path('', include('main.urls'))\n]\n","repo_name":"SJTU-Plus/sjtu-plus","sub_path":"SJTUPlus/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"61"} +{"seq_id":"33186077045","text":"from database.database_request import DatabaseRequest\n\n\"\"\"This class makes product objects from the products in the database, so its\nattributes are the fields of the product table, and it has a method to get a\nsubstitute, that calls the method get_substitute() from the DatabaseRequest\nclass\"\"\"\n\n\nclass Product:\n def __init__(self, name, description, nutriscore, stores, url, category):\n self.id = \"\"\n self.name = name\n self.description = description\n self.nutriscore = nutriscore\n self.stores = stores\n self.url = url\n self.category = category\n\n def get_substitute(self):\n self.db_rq = DatabaseRequest()\n result = self.db_rq.get_substitute(self)\n substitute_name = result[1]\n substitute_description = result[2]\n substitute_nutriscore = result[3]\n substitute_store = result[4]\n substitute_url = result[5]\n substitute_category_id = result[6]\n substitute = Product(substitute_name, substitute_description,\n substitute_nutriscore, substitute_store,\n substitute_url, substitute_category_id)\n substitute.id = result[0]\n return substitute\n","repo_name":"AntoineMaurin/OpenFoodRooms","sub_path":"models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42385503002","text":"import os\n\nimport logging\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\n\nfrom flask_mail import Mail\nfrom config import Config\nfrom flask import Flask, render_template, flash, request, redirect, url_for, make_response, session\nfrom forms import RSVPForm\nfrom threading import Thread\nfrom flask_mail import Message\n\n\n\napp = Flask(__name__)\napp.config.from_object(Config)\n\n\nmail_settings = {\n \"MAIL_SERVER\": 'smtp.gmail.com',\n \"MAIL_PORT\": 465,\n \"MAIL_USE_TLS\": False,\n \"MAIL_USE_SSL\": True,\n \"MAIL_USERNAME\": 'andrewwillacy@gmail.com',\n \"MAIL_PASSWORD\": 'eejjutobtplpcuyc'\n}\n\napp.config.update(mail_settings)\nmail = Mail(app)\n\n\n\ndef send_async_email(app, msg):\n with app.app_context():\n mail.send(msg)\n\n\ndef send_email(subject, sender, recipients, text_body, html_body):\n msg = Message(subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n Thread(target=send_async_email, args=(app, msg)).start()\n\n\n\n\nif not app.debug:\n if app.config['MAIL_SERVER']:\n auth = None\n if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:\n auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])\n secure = None\n if app.config['MAIL_USE_TLS']:\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),\n fromaddr='no-reply@' + app.config['MAIL_SERVER'],\n toaddrs=app.config['ADMINS'], subject='Microblog Failure',\n credentials=auth, secure=secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n if not os.path.exists('logs'):\n os.mkdir('logs')\n file_handler = RotatingFileHandler('logs/wedding.log', maxBytes=10240,\n backupCount=10)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n\n app.logger.setLevel(logging.INFO)\n app.logger.info('Wedding Website')\n\n\n@app.route('/dashboard/')\ndef dashboard():\n return render_template(\"dashboard.html\")\n\n@app.route('/wed/')\ndef wed():\n return render_template(\"wed.html\")\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef real2():\n spam = False\n form = RSVPForm()\n if form.validate_on_submit():\n with app.app_context():\n if form.username.data == \"Henrysox\":\n flash(\"Get Lost\")\n else:\n for i in ['free','traffic','business','advertise','quick','web','we','paying','money','income','Crytosox','Money','Financial','financial','Work','work','Online','online']:\n if i in form.message.data or i in form.username.data:\n spam = True\n if spam:\n flash(\"This message has been triggered as spam\")\n else:\n msg = Message(subject='Wedding Message from ' + form.username.data,\n sender=app.config.get(\"MAIL_USERNAME\"),\n recipients=['andrewwillacy@gmail.com'], # replace with your email for testing\n body=form.message.data + '\\n\\nContact Email: ' + form.email.data)\n mail.send(msg)\n flash('Message Sent!')\n\n\n\n return redirect(url_for('real2', _anchor='top'))\n return render_template(\"real2.html\", form=form)\n\n@app.route('/real/', methods=['GET', 'POST'])\ndef real():\n form = RSVPForm()\n if form.validate_on_submit():\n\n if form.username.data == \"Henrysox\":\n flash(\"Get Lost\")\n else:\n send_email('Wedding Message from ' + form.username.data,\n sender='andrewwillacy@gmail.com',\n recipients=['andrewwillacy@gmail.com'],\n text_body=form.message.data + ' Contact Email: ' + form.email.data,\n html_body=form.message.data + '

Contact Email: ' + form.email.data)\n\n flash('Message Sent!')\n return redirect(url_for('real'))\n\n if 'update1' in session:\n if session['update1'] == \"30 people\":\n first_visit = False\n else:\n first_visit = True\n session['update1'] = \"30 people\"\n\n return render_template(\"real.html\", form=form, first_visit=first_visit)\n\n\n\n","repo_name":"willacya/weddingpage","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71766694595","text":"# coding=utf-8\n\"\"\"collection of Method endpoints.\"\"\"\nimport logging\n\nimport flask_login\nfrom flask import (Blueprint, flash, jsonify, redirect, render_template,\n request, url_for)\nfrom flask_babel import gettext\n\nfrom mycodo.config import METHOD_INFO\nfrom mycodo.config_translations import TRANSLATIONS\nfrom mycodo.databases.models import DisplayOrder, Method, MethodData\nfrom mycodo.mycodo_flask.extensions import db\nfrom mycodo.mycodo_flask.forms import forms_method\nfrom mycodo.mycodo_flask.routes_static import inject_variables\nfrom mycodo.mycodo_flask.utils import utils_general, utils_method\nfrom mycodo.utils.method import create_method_handler\nfrom mycodo.utils.outputs import output_types\nfrom mycodo.utils.system_pi import csv_to_list_of_str, list_to_csv\n\nlogger = logging.getLogger('mycodo.mycodo_flask.methods')\n\nblueprint = Blueprint('routes_method',\n __name__,\n static_folder='../static',\n template_folder='../templates')\n\n\n@blueprint.context_processor\n@flask_login.login_required\ndef inject_dictionary():\n return inject_variables()\n\n\n@blueprint.route('/method-data/')\n@flask_login.login_required\ndef method_data(method_id):\n \"\"\"\n Returns options for a particular method\n This includes sets of (time, setpoint) data.\n \"\"\"\n # First method column with general information about method\n method = Method.query.filter(Method.unique_id == method_id).first()\n\n # User-edited lines of each method\n method_data = MethodData.query.filter(MethodData.method_id == method.unique_id)\n\n return jsonify(create_method_handler(method, method_data).get_plot(700))\n\n\n@blueprint.route('/method', methods=('GET', 'POST'))\n@flask_login.login_required\ndef method_list():\n \"\"\"List all methods on one page with a graph for each.\"\"\"\n form_create_method = forms_method.MethodCreate()\n\n method = Method.query.all()\n method_all = MethodData.query.all()\n\n return render_template('pages/method-list.html',\n method=method,\n method_all=method_all,\n method_info=METHOD_INFO,\n form_create_method=form_create_method)\n\n\n@blueprint.route('/method-build/', methods=('GET', 'POST'))\n@flask_login.login_required\ndef method_builder(method_id):\n \"\"\"\n Page to edit the details of each method\n This includes the (time, setpoint) data sets\n \"\"\"\n if not utils_general.user_has_permission('edit_controllers'):\n return redirect(url_for('routes_method.method_list'))\n\n form_create_method = forms_method.MethodCreate()\n form_add_method = forms_method.MethodAdd()\n form_mod_method = forms_method.MethodMod()\n\n form_fail = None\n\n # Used in software tests to verify function is executing as admin\n if method_id == '-1':\n return 'admin logged in'\n\n # Create new method\n elif method_id == '0':\n unmet_dependencies = utils_method.method_create(form_create_method)\n if unmet_dependencies:\n return redirect(url_for('routes_admin.admin_dependencies',\n device=form_create_method.method_type.data))\n\n if not unmet_dependencies:\n new_method = Method.query.order_by(Method.id.desc()).first()\n return redirect('/method-build/{method_id}'.format(\n method_id=new_method.unique_id))\n else:\n return redirect('/method')\n elif not method_id:\n flash(\"Invalid method ID\", \"error\")\n return redirect('/method')\n\n # First method column with general information about method\n method = Method.query.filter(Method.unique_id == method_id).first()\n\n if method.method_type == 'Cascade':\n method_data = MethodData.query.filter(\n MethodData.method_id == method.unique_id)\n\n cascade_method = Method.query.filter(Method.unique_id != method_id).all()\n\n if request.method == 'POST':\n form_name = request.form['form-name']\n if form_name == 'addMethod':\n form_fail = utils_method.method_add(form_add_method)\n elif form_name in ['modMethod', 'renameMethod']:\n form_fail = utils_method.method_mod(form_mod_method)\n if (form_name in ['addMethod', 'modMethod', 'renameMethod'] and\n not form_fail):\n return redirect('/method-build/{method_id}'.format(\n method_id=method.unique_id))\n\n if not method_data:\n method_data = []\n\n return render_template('pages/method-build.html',\n method=method,\n method_data=method_data,\n method_id=method_id,\n output_types=output_types(),\n cascade_method=cascade_method,\n form_create_method=form_create_method,\n form_add_method=form_add_method,\n form_mod_method=form_mod_method)\n\n if method.method_type in ['Date', 'Duration', 'Daily',\n 'DailySine', 'DailyBezier']:\n\n # Retrieve the order to display method data lines\n display_order = csv_to_list_of_str(method.method_order)\n\n method_data = MethodData.query.filter(\n MethodData.method_id == method.unique_id)\n setpoint_method_data = MethodData.query.filter(\n MethodData.setpoint_start.isnot(None))\n sine_method_data = MethodData.query.filter(\n MethodData.amplitude.isnot(None))\n bezier_method_data = MethodData.query.filter(\n MethodData.x0.isnot(None))\n if display_order:\n last_setpoint_method = setpoint_method_data.filter(\n MethodData.unique_id == display_order[-1]).first()\n last_sine_method = sine_method_data.filter(\n MethodData.unique_id == display_order[-1]).first()\n last_bezier_method = bezier_method_data.filter(\n MethodData.unique_id == display_order[-1]).first()\n else:\n last_setpoint_method = None\n last_sine_method = None\n last_bezier_method = None\n\n last_end_time = ''\n last_setpoint = ''\n if method.method_type in ['Daily', 'Date', 'Duration']:\n method_data = method_data.all()\n\n # Get last entry end time and setpoint to populate the form\n if last_setpoint_method is None:\n last_end_time = ''\n last_setpoint = ''\n else:\n last_end_time = last_setpoint_method.time_end\n if last_setpoint_method.setpoint_end is not None:\n last_setpoint = last_setpoint_method.setpoint_end\n else:\n last_setpoint = last_setpoint_method.setpoint_start\n\n if request.method == 'POST':\n form_name = request.form['form-name']\n if form_name == 'addMethod':\n form_fail = utils_method.method_add(form_add_method)\n elif form_name in ['modMethod', 'renameMethod']:\n form_fail = utils_method.method_mod(form_mod_method)\n if (form_name in ['addMethod', 'modMethod', 'renameMethod'] and\n not form_fail):\n return redirect('/method-build/{method_id}'.format(\n method_id=method.unique_id))\n\n if not method_data:\n method_data = []\n\n return render_template('pages/method-build.html',\n method=method,\n method_data=method_data,\n method_id=method_id,\n last_end_time=last_end_time,\n last_bezier_method=last_bezier_method,\n last_sine_method=last_sine_method,\n last_setpoint_method=last_setpoint_method,\n last_setpoint=last_setpoint,\n output_types=output_types(),\n form_create_method=form_create_method,\n form_add_method=form_add_method,\n form_mod_method=form_mod_method)\n\n return redirect('/method')\n\n\n@blueprint.route('/method-delete/')\n@flask_login.login_required\ndef method_delete(method_id):\n \"\"\"Delete a method.\"\"\"\n action = '{action} {controller}'.format(\n action=TRANSLATIONS['delete']['title'],\n controller=gettext(\"Method\"))\n\n if not utils_general.user_has_permission('edit_settings'):\n return redirect(url_for('routes_method.method_list'))\n\n try:\n MethodData.query.filter(\n MethodData.method_id == method_id).delete()\n MethodData.query.filter(\n MethodData.linked_method_id == method_id).delete()\n Method.query.filter(\n Method.unique_id == method_id).delete()\n display_order = csv_to_list_of_str(DisplayOrder.query.first().method)\n display_order.remove(method_id)\n DisplayOrder.query.first().method = list_to_csv(display_order)\n db.session.commit()\n flash(\"Success: {action}\".format(action=action), \"success\")\n except Exception as except_msg:\n flash(\"Error: {action}: {err}\".format(action=action,\n err=except_msg),\n \"error\")\n return redirect(url_for('routes_method.method_list'))\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/mycodo_flask/routes_method.py","file_name":"routes_method.py","file_ext":"py","file_size_in_byte":9582,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"} +{"seq_id":"9960528214","text":"import logging, os, sys, traceback, unittest\n\nimport usb1\n\nfrom luna import configure_default_logging\n\nfrom pygreat.comms_backends.usb1 import USB1CommsBackend as backend\n\nimport cynthion\n\nVENDOR_ID = cynthion.shared.usb.bVendorId.cynthion\nPRODUCT_ID = cynthion.shared.usb.bProductId.cynthion\n\nEP_MAX_PACKET_SIZE = 512\n\n\nclass TestLibgreatProtocol(unittest.TestCase):\n \"\"\"Tests for libgreat protocol implementation.\"\"\"\n\n def setUp(self):\n configure_default_logging(level=os.getenv(\"LOG_LEVEL\", \"DEBUG\").upper())\n self.board = cynthion.Cynthion()\n\n def test_error_no_function(self):\n api = self.board.apis.firmware\n result = api.supports_verb(\"test_error_no_function\")\n self.assertFalse(result)\n\n with self.assertRaises(Exception) as context:\n result = api.test_error_no_function()\n self.assertTrue(\"object has no attribute 'test_error_no_function'\" in str(context.exception))\n\n def test_error_return_code(self):\n from pygreat.errors import LIBGREAT_ERROR_NAMES\n def get_error_code(name):\n return [n for n in LIBGREAT_ERROR_NAMES if LIBGREAT_ERROR_NAMES[n] == name][0]\n\n api = self.board.apis.selftest\n\n result = api.supports_verb(\"test_error_return_code\")\n self.assertTrue(result)\n\n result = api.test_error_return_code(0)\n self.assertEqual(result, \"ok\")\n logging.debug(f\"test_error_return_code: {result}\")\n\n code = get_error_code(\"EBUSY\")\n with self.assertRaises(Exception) as context:\n result = api.test_error_return_code(code)\n print(f\"test_error_return code got: {str(context.exception)}\")\n self.assertTrue(\"EBUSY\" in str(context.exception))\n\n code = get_error_code(\"ECONNRESET\")\n with self.assertRaises(Exception) as context:\n result = api.test_error_return_code(code)\n print(f\"test_error_return code got: {str(context.exception)}\")\n self.assertTrue(\"ECONNRESET\" in str(context.exception))\n\n\nclass TestLibgreatEndpoints(unittest.TestCase):\n \"\"\"Tests to verify that libgreat endpoints behave as expected.\"\"\"\n\n def setUp(self):\n configure_default_logging(level=os.getenv(\"LOG_LEVEL\", \"DEBUG\").upper())\n self.board = cynthion.Cynthion()\n\n def test_device(self):\n # query device a little\n with usb1.USBContext() as context:\n\n device_handle = context.openByVendorIDAndProductID(VENDOR_ID, PRODUCT_ID)\n device = device_handle.getDevice()\n print(f\"device: {device}\")\n print(f\" manufacturer: {device.getManufacturer()}\")\n print(f\" product: {device.getProduct()}\")\n\n for configuration in device.iterConfigurations():\n print(f\"configuration: {configuration}\")\n for interface in configuration:\n print(f\" interface: {interface}\")\n for setting in interface:\n print(f\" protocol: {setting.getProtocol()}\")\n for endpoint in setting:\n print(f\" endpoint: 0x{endpoint.getAddress():x}\")\n\n def test_api_command(self):\n api = self.board.apis.core\n\n response = api.read_version_string()\n self.assertEqual(response, \"v2023.0.1\")\n\n logging.debug(f\"received api response: {len(response)} -> '{response}'\")\n\n\n def test_large_api_commands(self):\n api = self.board.apis.moondancer\n\n response = api.test_read_endpoint(711)\n self.assertEqual(len(response), 711)\n logging.debug(f\"received api response: {len(response)} -> '{response}'\")\n\n payload = [b % 0xff for b in range(0, 711)]\n response = api.test_write_endpoint(1, bytes(payload))\n self.assertEqual(response, len(payload))\n logging.debug(f\"received api response: {response}\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"greatscottgadgets/cynthion","sub_path":"firmware/moondancer/test/test_libgreat.py","file_name":"test_libgreat.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"3287028263","text":"import cv2\nimport numpy as np\n\ncam = cv2.VideoCapture(0)\n\nwhile True:\n _, frame = cam.read()\n cv2.imshow(\"Frame\", frame) \n\n hsvFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n red_lower = np.array([161, 155, 84], np.uint8)\n red_upper = np.array([179, 255, 255], np.uint8)\n red_mask = cv2.inRange(hsvFrame, red_lower, red_upper)\n\n detected_output = cv2.bitwise_and(frame, frame, mask = red_mask) \n\n cv2.imshow(\"Red Color\", detected_output) \n if cv2.waitKey(5) & 0xFF == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"DolphyWind/OpenCV-Projects","sub_path":"Color_Detection/color_detection.py","file_name":"color_detection.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14064198526","text":"''' Given a rod of length L and a set of pieces with length and value,\nfind the maximum value that can be obtained after cutting the rod into\nthese pieces. The pieces can repeat.\nRod cutting is a variant of unbounded knapsack.\n\nAlgorithm:\n - Same as unbounded knapsack.\n - Start building the big rod from the pieces, just like filling knapsack with weights.\n\nComplexity: \n - Time: O(N * C) - same as unbounded knapsack\n - Space: O(C) - same as unbounded knapsack\n\nExample:\nInput: \n L = 15\n Piece lengths: [2, 3, 6, 7]\n Piece value: [4, 7, 11, 18]\n\nOutput: 36 since the piece length of 7 with value 18 is most efficient.\n\n'''\n\nfrom typing import List\n\ndef rod_cut_value(rod_length: int, piece_lengths: List[int], piece_values: List[int]) -> int:\n dp = [0] * (rod_length + 1)\n\n for i in range(rod_length + 1):\n for j in range(len(piece_lengths)):\n value_with_this_piece = 0\n if i >= piece_lengths[j]:\n value_with_this_piece = piece_values[j] + dp[i - piece_lengths[j]]\n dp[i] = max(dp[i], value_with_this_piece)\n\n return dp[-1]\n\ndef main():\n print(rod_cut_value(15, [2, 3, 6, 7], [4, 7, 11, 18]))\n\nif __name__ == '__main__':\n main()","repo_name":"uday-agarwal/practice","sub_path":"algo/dynamic_programming/rod_cutting.py","file_name":"rod_cutting.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16341783809","text":"import os\nfrom flask import Blueprint, render_template, send_from_directory, jsonify\nimport requests\nfrom .utils import otp_required\n\nmain = Blueprint('main', __name__)\n\n@main.route('/favicon.ico')\ndef favicon():\n return send_from_directory(\n os.path.join(main.root_path, 'static'),\n 'favicon.ico',\n mimetype='image/vnd.microsoft.icon'\n )\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n@main.route('/mcuuid/', methods=['GET'])\n@otp_required\ndef mcuuid(name):\n\n url = \"https://api.mojang.com/users/profiles/minecraft/\"+name\n response = requests.get(url)\n if response.status_code == 200:\n return jsonify(response.json())\n return ('', 204)\n","repo_name":"gergnz/mcshop","sub_path":"mcshop/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6441566983","text":"import os.path\nimport logging\nfrom datetime import datetime\nfrom turbogears.database import session\nfrom ttl.ttlmako import MakoBase\nimport prcommon.Constants as Constants\nfrom prcommon.model.clippings.clipping import Clipping\nfrom prcommon.model.clippings.analysegeneral import AnalyseGeneral\nfrom prcommon.model.client import Client\nfrom prcommon.model.outlet import Outlet\nfrom prcommon.model.customer.customermediaaccesstypes import CustomerMediaAccessTypes\nfrom prcommon.model.caching import CachePreBuildPageStore\nfrom prcommon.model.lookups import ClippingSource, ClippingsTone, ClippingsTypes, MediaAccessTypes\nfrom prcommon.model.clippings.clippingsissues import ClippingsIssues\nfrom prcommon.model.crm2.issues import Issue\nfrom prcommon.model.identity import User, Customer\nfrom prcommon.model.crm2.statements import Statements\nfrom prcommon.model.emails import EmailTemplates\n\nLOGGER = logging.getLogger(\"prcommon.model\")\n\nclass ClippingCache(MakoBase):\n\t\"\"\" ClippingCache \"\"\"\n\n\tdef __init__(self, processrecord):\n\t\t\"\"\" init \"\"\"\n\t\tMakoBase.__init__(self, os.path.dirname(__file__))\n\t\tself.templatename = \"clippings_profile.mak\"\n\t\tself._processrecord = processrecord\n\n\tdef run(self):\n\t\t\"\"\"interface for app server \"\"\"\n\n\t\t# setup data\n\t\tclippings = Clipping.query.get(self._processrecord.objectid)\n\n\t\toutlet = Outlet.query.get(clippings.outletid) if clippings.outletid else None\n\t\tclient = Client.query.get(clippings.clientid) if clippings.clientid else None\n\t\tclippingstone = ClippingsTone.query.get(clippings.clippingstoneid) if clippings.clippingstoneid else None\n\t\tclippingstype = ClippingsTypes.query.get(clippings.clippingstypeid) if clippings.clippingstypeid else None\n\t\tissues = \",\".join([issue[1].name for issue in session.query(ClippingsIssues, Issue).\\\n\t\t join(Issue, ClippingsIssues.issueid == Issue.issueid).\\\n\t\t filter(ClippingsIssues.clippingid == self._processrecord.objectid)])\n\n\t\tcustomer = Customer.query.get(clippings.customerid)\n\t\tmediaaccesstypes = []\n\t\tcmat = [ row.mediaaccesstypeid for row in session.query(CustomerMediaAccessTypes).\\\n\t filter(CustomerMediaAccessTypes.customerid == customer.customerid).all()]\n\n\t\tstatement = Statements.query.get(clippings.statementid) if clippings.statementid else None\n\t\tprrelease = EmailTemplates.query.get(clippings.emailtemplateid) if clippings.emailtemplateid else None\n\n\t\tself._data[\"pr\"] = dict(\n\t\t clippings=clippings,\n\t\t outlet=outlet,\n\t\t client=client,\n\t\t analytics=AnalyseGeneral.get_analyse_view_info(clippings.clippingid)[\"analytics\"],\n\t\t issues=issues,\n\t\t updated=datetime.now().strftime(\"%d/%m/%y %H:%M:%S\"),\n\t\t clippingsource=ClippingSource.query.get(clippings.clippingsourceid),\n\t\t clippingstone=clippingstone,\n\t\t clippingstype=clippingstype,\n\t\t customer=customer,\n\t\t mediaaccesstype=cmat,\n\t\t prrelease=prrelease,\n\t\t statement=statement\n\t\t)\n\n\t\t# create html\n\t\tsuper(ClippingCache, self).run()\n\n\t\t# add to cached prfile\n\t\tcached_record = session.query(CachePreBuildPageStore).\\\n\t\t filter(CachePreBuildPageStore.objectid == self._processrecord.objectid).\\\n\t\t filter(CachePreBuildPageStore.objecttypeid == Constants.Process_Clipping_View).scalar()\n\t\tif cached_record:\n\t\t\tcached_record.data = self.output_compressed\n\t\telse:\n\t\t\tsession.add(CachePreBuildPageStore(\n\t\t\t objectid=self._processrecord.objectid,\n\t\t\t objecttypeid=Constants.Process_Clipping_View,\n\t\t\t data=self.output_compressed\n\t\t\t))\n\n\t@staticmethod\n\tdef get_clippings_page(clippingid, userid):\n\t\t\"\"\" get the profile from the cache if present and add the customer private\n\t\tprofile in as well\"\"\"\n\n\t\tcached_record = session.query(CachePreBuildPageStore).\\\n\t\t filter(CachePreBuildPageStore.objectid == clippingid).\\\n\t\t filter(CachePreBuildPageStore.objecttypeid == Constants.Process_Clipping_View).scalar()\n\t\tif cached_record:\n\t\t\tuser = User.query.get(userid)\n\t\t\trdata = cached_record.data.decode(\"utf-8\")\n\t\t\trdata = rdata.replace(u\"%CLIENTNAME%\", user.client_name)\n\t\t\trdata = rdata.replace(u\"%ISSUENAME%\", user.issue_description)\n\n\t\t\treturn rdata\n\n\t\treturn \"

Page Under Construction Please try again later

\"\n","repo_name":"meanang123/prmax","sub_path":"prcommon/build/lib/prcommon/model/clippings/clippingscache.py","file_name":"clippingscache.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23556756671","text":"import os\r\nimport time\r\nimport pytest\r\nimport collections\r\nfrom contextlib import contextmanager\r\nfrom typing import List\r\n\r\n\r\nINPUT_TYPE = 'large'\r\n\r\n\r\nCase = collections.namedtuple('Case', ['n'])\r\n\r\n\r\ndef solve(case: Case):\r\n \"\"\"break 'case', solve and return the solution\"\"\"\r\n\r\n digits = [int(digit) for digit in str(case.n)]\r\n last = 9\r\n for i in range(len(digits))[::-1]:\r\n current = digits[i]\r\n if current > last:\r\n for j in range(len(digits))[::-1]:\r\n if j == i:\r\n break\r\n digits[j] = 9\r\n digits[i] -= 1\r\n last = digits[i]\r\n return int(''.join(str(d) for d in digits))\r\n\r\n\r\ndef read_case(lines: List[str]) -> Case:\r\n \"\"\"Read a test case from the input.\"\"\"\r\n n = int(lines.pop(0))\r\n return Case(n)\r\n\r\n\r\ndef read_file(filepath: str) -> List[Case]:\r\n \"\"\"Read the input `filepath` and return a list of cases.\"\"\"\r\n cases = []\r\n with open(filepath, 'rt') as fobj:\r\n lines = fobj.readlines()\r\n num_cases = int(lines.pop(0))\r\n for _ in range(num_cases):\r\n cases.append(read_case(lines))\r\n return cases\r\n\r\n\r\ndef write_results(results: List, outfile: str) -> None:\r\n with open(outfile, 'wt') as f:\r\n for idx, result in enumerate(results):\r\n f.write('Case #{}: {}\\n'.format(idx + 1, result))\r\n\r\n\r\n@contextmanager\r\ndef timing(prefix):\r\n start = time.time()\r\n yield\r\n print('{} took {} seconds.'.format(prefix, time.time() - start))\r\n\r\n\r\ndef main(infile: str, outfile: str) -> None:\r\n cases = read_file(infile)\r\n results = []\r\n for idx, case in enumerate(cases):\r\n with timing(\"Solving case #{}\".format(idx + 1)):\r\n results.append(solve(case))\r\n write_results(results, outfile)\r\n\r\n\r\ntest_cases = {\r\n Case(132): 129,\r\n Case(1000): 999,\r\n Case(111111111111111110): 99999999999999999,\r\n}\r\n\r\n\r\n@pytest.mark.parametrize(('case', 'result'), test_cases.items())\r\ndef test_function(case: Case, result):\r\n assert solve(case) == result\r\n\r\n\r\nif __name__ == '__main__':\r\n pytest.main(args=[__file__])\r\n if INPUT_TYPE:\r\n main(os.path.join('io', '{}.in'.format(INPUT_TYPE)),\r\n os.path.join('io', '{}.out'.format(INPUT_TYPE)))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2824.py","file_name":"2824.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23413167161","text":"\nCHEAT = -1\nBAD = -2\nINPUT = \"A-small-attempt0.in\"\nOUTPUT = \"out.txt\"\n\ndef main():\n in_f = open(INPUT, 'r')\n out_f = open(OUTPUT, 'w')\n T = int(in_f.readline())\n for case in range(1, T+1):\n r1 = int(in_f.readline())\n grid1 = []\n for i in range(4):\n grid1.append(map(int, in_f.readline().split()))\n r2 = int(in_f.readline())\n grid2 = []\n for i in range(4):\n grid2.append(map(int, in_f.readline().split()))\n card = solve(r1, grid1, r2, grid2)\n out_f.write(\"Case #\" + str(case) + \": \")\n if card == BAD:\n out_f.write(\"Bad magician!\\n\")\n elif card == CHEAT:\n out_f.write(\"Volunteer cheated!\\n\")\n else:\n out_f.write(str(card) + \"\\n\")\n\n in_f.close()\n out_f.close()\n\ndef solve(r1, grid1, r2, grid2):\n cards1 = grid1[r1 - 1]\n cards2 = grid2[r2 - 1]\n intersect = [card for card in cards1 if card in cards2]\n\n # print 'r1 = ' + str(r1)\n # print 'cards1 = ', cards1\n # print 'r1 = ' + str(r1)\n # print 'cards2 = ', cards2\n # print 'intersect = ', intersect\n\n\n if len(intersect) == 0:\n return CHEAT\n elif len(intersect) > 1:\n return BAD\n else:\n return intersect[0]\n\nmain()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2237.py","file_name":"2237.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26337737856","text":"import json\nimport boto3\nimport os\nimport datetime\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\n\n\nnow = datetime.datetime.now()\n\nsession = boto3.Session()\n\ndynamodb_client = session.resource('dynamodb')\nssm_client = session.client('ssm')\n\n\n# Helper class to convert a DynamoDB item to JSON.\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return str(o)\n return super(DecimalEncoder, self).default(o)\n\n\ndef fetch_page(table, fetch_args):\n response = table.scan(**fetch_args)\n\n return response\n\n\ndef fetch_data(table):\n data = []\n call_args = {}\n\n while True:\n response = fetch_page(table, call_args)\n data.extend(response.get('Items', []))\n\n call_args['ExclusiveStartKey'] = response.get('LastEvaluatedKey')\n if call_args['ExclusiveStartKey'] is None:\n break\n\n return data\n\n\ndef fetch_transactions(sysid):\n table = dynamodb_client.Table('transactions')\n\n response = table.query(\n ProjectionExpression=\"#year_month, timestamp, payload, subsystem, sysid\",\n # Expression Attribute Names for Projection Expression only.\n ExpressionAttributeNames={\"#year_month\": \"year_month\"},\n KeyConditionExpression=Key('year').eq('{year}_{month}'.format(\n year=now.year, month=now.month)) & Key('sysid').eq(sysid),\n ScanIndexForward=True\n )\n\n return response[u'Items']\n\n\ndef hello(event, context):\n body = fetch_transactions('9784233fdbed57c0bb2343a84b96193e')\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body, cls=DecimalEncoder)\n }\n\n return response\n","repo_name":"salekseev/ncod-inspector","sub_path":"looking-glass.py","file_name":"looking-glass.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"537248391","text":"from flask import Flask\r\nfrom flask import render_template, flash, redirect, url_for\r\nfrom config import Config\r\nfrom forms import LoginForm\r\n\r\napp = Flask(__name__)\r\napp.config.from_object(Config)\r\n\r\n\r\n@app.route('/')\r\n@app.route('/index')\r\ndef index():\r\n user = {'username': \"Shawn\"}\r\n posts = [\r\n {\r\n 'author': {'username': 'Bob'},\r\n 'body': 'Bobs your uncle'\r\n },\r\n {\r\n 'author': {'username': 'Tiff'},\r\n 'body': 'Like, I totally like cheetos. For real.'\r\n }\r\n ]\r\n return render_template('index.html', title='lame users talk', user=user, posts=posts)\r\n\r\n\r\n@app.route('/backpack')\r\ndef backpack():\r\n user = {'username': \"Shawn\"}\r\n my_backpack = [\r\n {'type': 'sword',\r\n 'name': 'Sting'},\r\n {\r\n 'type': 'dagger',\r\n 'name': 'Dagger +1'\r\n }\r\n ]\r\n return render_template('backpack.html', title='Backpack', user=user, backpack=my_backpack)\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n flash(f'Login requested for user {form.username.data}, remember_me={form.remember_me.data}')\r\n return redirect(url_for('index'))\r\n return render_template('login.html', title='Sign In', form=form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"sperrone78/AddieQuest","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5464610097","text":"# %%\nimport matplotlib\nmatplotlib.rc( 'image', cmap='jet' )\nimport numpy, ufl\nfrom dune.alugrid import aluConformGrid as view\nfrom dune.grid import cartesianDomain, Marker\nfrom dune.fem.function import gridFunction\nfrom dune.fem.space import lagrange, dgonb\nfrom dune.fem.view import geometryGridView, adaptiveLeafGridView\nimport dune.fem\nfrom dune.ufl import Constant\nimport dune.common.pickle\nfrom transform import exact\n\ndef test1(fileName):\n grid = view( cartesianDomain([-2,-2],[2,2],[10,10]) )\n grid = adaptiveLeafGridView( grid )\n\n \"\"\"\n # mark further down fails on geometryGV due to wrong entity type from hierarchicalGrid\n x = ufl.SpatialCoordinate(ufl.triangle)\n expr = [ (x[0]+x[1])/ufl.sqrt(2), (-x[0]+x[1])*ufl.sqrt(2) ]\n coord = lagrange(grid,dimRange=2).interpolate(expr,name=\"coord\")\n grid = geometryGridView( coord )\n \"\"\"\n\n @gridFunction(grid, name=\"gf\", order=3)\n def gf(x): return numpy.sqrt(1-(x[0]**2+x[1]**2)) if x[0]**2+x[1]**2<1 else 0\n\n space = lagrange(grid, order=2)\n df = space.interpolate(gf,name=\"test\")\n\n dune.fem.globalRefine(4,grid.hierarchicalGrid)\n # grid.hierarchicalGrid.globalRefine(4)\n for i in range(5):\n grid.hierarchicalGrid.mark(lambda e:\n Marker.refine if df.localFunction(e).jacobian([1./3.,1./3.]).infinity_norm > 1\n else Marker.coarsen)\n dune.fem.adapt(grid.hierarchicalGrid)\n df.interpolate( gf )\n print(\"size of adapted grid:\", grid.size(0))\n # df.plot()\n\n # there is an issue with GeometryGV and adaptivity - perhaps\n # one needs to change the order, i.e., can only use GeometryGV\n # not the other way around?\n \"\"\"\n x = ufl.SpatialCoordinate(ufl.triangle)\n expr = [ (x[0]+x[1])/ufl.sqrt(2), (-x[0]+x[1])*ufl.sqrt(2) ]\n coord = lagrange(grid,dimRange=2).interpolate(expr,name=\"coord\")\n grid = geometryGridView( coord )\n \"\"\"\n\n t = Constant(0,\"time\")\n x = ufl.SpatialCoordinate(ufl.triangle)\n lag = lagrange(grid, order=3)\n # lag = dgonb(grid, order=3)\n dg = dgonb(grid, order=4, dimRange=2)\n dg5 = dgonb(grid, order=4, dimRange=4)\n df2 = lag.interpolate(exact(grid),name=\"exact_h\")\n df5 = dg5.interpolate( [ufl.dot(x,x), -x[1],x[0], ufl.sin(x[0]*x[1])], name=\"euler\")\n df3 = dg.interpolate(ufl.tanh(2*(t*x[0]-x[1]))*x,name=\"tanh\")\n\n with open(fileName+\".dbf\",\"wb\") as f:\n dune.common.pickle.dump([1,2,df2,3,df3,df5],f) # adding some numbers just for testing\n\n series = dune.common.pickle.SeriesPickler(fileName, [1,2,df2,3,df3,df5])\n\n tsps = [0,0.1,0.4,0.8,2,4]\n for i,tsp in enumerate(tsps):\n t.value = tsp\n df2.interpolate(exact(grid,tsp))\n df3.interpolate(ufl.tanh(2*(t*x[0]-x[1]))*x)\n series.dump({\"time\":tsp})\n\ntest1(\"dump\")\n","repo_name":"dune-mirrors/dune-fem","sub_path":"vtkreader/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"41786321775","text":"from __future__ import with_statement\nimport os\nfrom contextlib import contextmanager\n\nfrom fabric.api import cd, run, env, prefix, sudo, put\nimport cuisine\n\n\nenv.user = 'ubuntu'\nenv.hosts = ['52.4.67.56']\nenv.host_string = env.hosts[0]\nVENV_DIR = 'virtual_env'\nVENV_PATH = '/home/%s/%s/' % (env.user, VENV_DIR)\nCACHE_ROOT_DIR = '/home/%s/cache' % env.user\nCACHE_LOGS_DIR = '/tmp/cache_logs/'\n\n\n@contextmanager\ndef virtualenv():\n venv_activate = 'source %s%sbin%sactivate' % (VENV_PATH, os.sep, os.sep)\n with cuisine.cd(VENV_PATH):\n with prefix(venv_activate):\n yield\n\ndef setup_packages():\n sudo('apt-get update --fix-missing')\n sudo('apt-get install -y python-pip')\n sudo('pip install virtualenv')\n\ndef create_virtualenv():\n if not cuisine.dir_exists(VENV_PATH):\n run('virtualenv -p python2.7 ' + VENV_PATH)\n\n with virtualenv():\n run('pip install boto')\n run('pip install Flask')\n run('pip install uwsgi')\n\ndef copy_cache_scripts():\n if not cuisine.dir_exists(CACHE_ROOT_DIR):\n run('mkdir %s' % CACHE_ROOT_DIR)\n sub_folders = ['cache_layer', 'cache_web_interface']\n main_dir = os.path.dirname(os.getcwd())\n for folder in sub_folders:\n local_path = os.path.join(main_dir, folder)\n put(local_path, CACHE_ROOT_DIR)\n if not cuisine.dir_exists(CACHE_LOGS_DIR):\n run('mkdir %s' % CACHE_LOGS_DIR)\n\ndef setup_cron():\n required_scriptnames = [\n 'cache_starter.py',\n 'simmetrica_class.py',\n 'send_cache_mail_report.py',\n ]\n\ndef restart_services():\n sudo('service nginx restart')\n sudo('service uwsgi restart')\n\ndef deploy():\n setup_packages()\n create_virtualenv()\n copy_cache_scripts()\n setup_cron()\n restart_services()\n\nif __name__ == '__main__':\n deploy()","repo_name":"aprosdev/ecom-predictor","sub_path":"deploy/cache_layer/deploy_code.py","file_name":"deploy_code.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20719839575","text":"\nimport os\nfrom decouple import config, Csv\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = config('SECRET_KEY')\n\nDEBUG = config('DEBUG', default=True, cast=bool)\n\n# ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv())\nALLOWED_HOSTS = ['*',]\n\n# CORS_ORIGIN_ALLOW_ALL = True\n# CORS_ALLOW_CREDENTIALS = True\n# # CORS_ORIGIN_WHITELIST = config('ALLOWED_HOSTS', cast=Csv())\n# CORS_ORIGIN_WHITELIST = (\n# 'localhost',\n# )\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # External apps\n 'rest_framework',\n 'corsheaders',\n 'oauth2_provider',\n 'social_django',\n 'rest_framework_social_oauth2',\n\n # Custom Apps\n 'apps.accounts',\n 'apps.profiles',\n]\n\n\n# ----------------------------------------------- #\n\nREST_FRAMEWORK = {\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'EXCEPTION_HANDLER': 'apps.core.exceptions.core_exception_handler',\n 'NON_FIELD_ERRORS_KEY': 'error',\n\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'apps.accounts.backends.JWTAuthentication',\n 'oauth2_provider.contrib.rest_framework.OAuth2Authentication',\n 'rest_framework_social_oauth2.authentication.SocialAuthentication',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n ),\n}\n\nAUTH_USER_MODEL = 'accounts.User'\n\nAUTHENTICATION_BACKENDS = (\n # Facebook OAuth2\n 'social_core.backends.facebook.FacebookAppOAuth2',\n 'social_core.backends.facebook.FacebookOAuth2',\n\n 'social_core.backends.instagram.InstagramOAuth2',\n\n 'rest_framework_social_oauth2.backends.DjangoOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\n# Facebook configuration\nSOCIAL_AUTH_FACEBOOK_KEY = config('FACEBOOK_KEY')\nSOCIAL_AUTH_FACEBOOK_SECRET = config('FACEBOOK_SECRET')\n\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\n 'fields': 'id, name, email, age_range, gender'\n}\n\n\n# Instagram configuration\nSOCIAL_AUTH_INSTAGRAM_KEY = config('INSTAGRAM_KEY')\nSOCIAL_AUTH_INSTAGRAM_SECRET = config('INSTAGRAM_SECRET')\nSOCIAL_AUTH_INSTAGRAM_EXTRA_DATA = [('user', 'user'),]\n\n\n# ------------------------------------------------\n\n\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n # Third Party\n 'corsheaders.middleware.CorsMiddleware',\n # 'social_core.apps.django_app.middleware.SocialAuthExceptionMiddleware',\n]\n\nROOT_URLCONF = 'authentication.urls'\n\n# Template Path\nTEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_PATH, ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n # Custom\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'authentication.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Any Other Database Configuration\n# DATABASES = {\n# 'default': {\n# 'ENGINE': config('DB_ENGINE'),\n# 'NAME': config('DB_NAME'),\n# 'USER': config('DB_USER'),\n# 'PASSWORD': config('DB_PASS'),\n# 'HOST': config('DB_HOST'),\n# 'PORT': config('DB_PORT'),\n# }\n# }\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static Configuration\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"static\"),\n]\n\nSTATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), \"static\")\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), \"media\")\n\nVERIFICATION_KEY_EXPIRY_DAYS = 2\n\n\nEMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool)\nEMAIL_HOST = config('EMAIL_HOST')\nEMAIL_HOST_USER = config('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')\nDEFAULT_FROM_EMAIL = 'LocalHost '\nEMAIL_PORT = config('EMAIL_PORT', cast=int)\n\n# EMAIL_BACKEND = django.core.mail.backends.smtp.EmailBackend\nSENDER_EMAIL = config('SENDER_EMAIL')\nSENDER_NAME = config('SENDER_NAME')\nREPLY_EMAIL = config('REPLY_EMAIL')\n\nSITE_URL = config('SITE_URL')\nSITE_NAME = config('SITE_NAME')\n","repo_name":"SkillsHats/social_auth_rest_framework","sub_path":"src/authentication/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73328762755","text":"import traceback\nfrom traceback import print_exc\nfrom flask import request\n\nfrom sqlalchemy import func\n\nfrom deposits import db\nfrom .errors import NotFoundError, PermissionDeniedError\nfrom .query_filters import extract_special_queries, apply_special_queries\n\n\ndef _make_url_query(args):\n \"\"\"\n Helper function to return a query url string from a dict\n \"\"\"\n return '?' + '&'.join('%s=%s' % (key, args[key]) for key in args)\n\n\ndef _get_queryset(klass):\n \"\"\"Returns the queryset for `klass` model\"\"\"\n return klass.query\n\n\ndef get_list_or_404(klass, **kwargs):\n \"\"\"Abstraction over `get_object_list`.\n Raises 404 error if the `obj_list` is empty.\n \"\"\"\n obj_list = get_object_list(klass, **kwargs)\n if not obj_list:\n raise NotFoundError(message='Object list is empty')\n return obj_list\n\n\ndef get_object_list(klass, **kwargs):\n \"\"\"Returns a list of objects of a model class. Uses other passed arguments\n with `filter_by` to filter objects.\n `klass` can be a model such as a User, Post, etc.\n \"\"\"\n queryset = _get_queryset(klass)\n kwargs, specials = extract_special_queries(kwargs)\n # case insenstive filter\n for i in kwargs:\n if type(kwargs[i]) == str:\n queryset = queryset.filter(\n func.lower(getattr(klass, i)) == kwargs[i].lower())\n else:\n queryset = queryset.filter(getattr(klass, i) == kwargs[i])\n # special filters\n obj_list = apply_special_queries(queryset, specials)\n # return as list\n return list(obj_list)\n\n\ndef get_object_or_404(klass, id_):\n \"\"\"Returns a specific object of a model class given its identifier. In case\n the object is not found, 404 is returned.\n `klass` can be a model such as a User, Post, etc.\n \"\"\"\n queryset = _get_queryset(klass)\n obj = queryset.get(id_)\n if obj is None:\n raise NotFoundError(message='{} does not exist'.format(klass.__name__))\n return obj\n\n\ndef save_to_db(item):\n \"\"\"Save a model to database\"\"\"\n try:\n db.session.add(item)\n db.session.commit()\n return True\n except Exception:\n traceback.print_exc()\n db.session.rollback()\n return False\n\n\ndef get_paginated_list(klass, url=None, args={}, **kwargs):\n \"\"\"\n Returns a paginated response object\n\n klass - model class to query from\n url - url of the request\n args - args passed to the request as query parameters\n kwargs - filters for query on the `klass` model. if\n kwargs has event_id, check if it exists for 404\n \"\"\"\n # if 'event_id' in kwargs:\n # get_object_or_404(EventModel, kwargs['event_id'])\n # auto-get url\n if url is None:\n url = request.base_url\n # get page bounds\n start = args.get('start', 1)\n limit = args.get('limit', 20)\n # check if page exists\n results = get_object_list(klass, **kwargs)\n count = len(results)\n if count < start and ((count > 0) or (count == 0 and start > 1)):\n raise NotFoundError(\n message='Start position \\'{}\\' out of bound'.format(start))\n # make response\n obj = {}\n obj['start'] = start\n obj['limit'] = limit\n obj['count'] = count\n # make URLs\n # make previous url\n args_copy = args.copy()\n if start == 1:\n obj['previous'] = ''\n else:\n args_copy['start'] = max(1, start - limit)\n args_copy['limit'] = start - 1\n obj['previous'] = url + _make_url_query(args_copy)\n # make next url\n args_copy = args.copy()\n if start + limit > count:\n obj['next'] = ''\n else:\n args_copy['start'] = start + limit\n obj['next'] = url + _make_url_query(args_copy)\n # finally extract result according to bounds\n obj['results'] = results[(start - 1):(start - 1 + limit)]\n\n return obj\n\n\ndef delete_from_db(item, msg='Deleted from db'):\n \"\"\"\n Delete from database\n \"\"\"\n try:\n result = db.engine.execute(\"delete from \\\"{}\\\" where id={}\".format(item.__table__, item.id))\n # ^^ quotes so that \"user\" works\n print(result)\n # ^^ experimental\n # db.session.delete(item)\n # db.session.commit()\n return True\n except Exception:\n print_exc()\n db.session.rollback()\n return False\n\n\ndef update_model(model, item_id, data, user_id=None):\n \"\"\"\n Updates a model\n \"\"\"\n item = get_object_or_404(model, item_id)\n # if no data in payload, happens when only related models were\n # changed through the API\n if len(data) == 0:\n return item\n # check if item belongs to user\n if user_id and item.user_id != user_id:\n raise PermissionDeniedError(message='This {} belongs to another user'.format(model.__name__))\n # update data\n db.session.query(model).filter_by(id=item_id).update(dict(data))\n # model.__table__.update().where(model.id==item_id).values(**data)\n save_to_db(item)\n return item\n\n\ndef create_model(model, data):\n \"\"\"\n Creates a model\n \"\"\"\n new_model = model(**data)\n save_to_db(new_model)\n return new_model\n\n\ndef delete_model(model, item_id, user_id=None):\n \"\"\"\n Deletes a model\n \"\"\"\n item = get_object_or_404(model, item_id)\n if user_id and item.user_id != user_id:\n raise PermissionDeniedError(message='This {} belongs to another user'.format(model.__name__))\n else:\n status = delete_from_db(item, '{} deleted'.format(model.__name__))\n if not status:\n raise NotFoundError('{} delete failed'.format(model.__name__))\n return item\n","repo_name":"aviaryan/deposits-app","sub_path":"backend/deposits/helpers/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17223444758","text":"import pyndi\nimport numpy as np\nimport sounddevice as sd\nfrom tkinter import *\nfrom tkinter import ttk\n\nclass NDI_Audio_Mixer:\n def __init__(self, ndi_names):\n self.ndi_names = ndi_names\n self.finder = pyndi.Finder()\n self.update_sources()\n self.init_output_stream()\n self.init_ndi_sender()\n\n def update_sources(self):\n sources = self.finder.get_sources()\n self.ndi_sources = [next((source for source in sources if source.name == name), None) for name in self.ndi_names]\n self.receivers = [pyndi.Receiver() for _ in self.ndi_sources]\n for receiver, ndi_source in zip(self.receivers, self.ndi_sources):\n receiver.create_receiver(ndi_source)\n\n def init_output_stream(self):\n samplerate = self.receivers[0].audio_sample_rate\n blocksize = 1024\n channels = self.receivers[0].audio_channels\n self.output_stream = sd.OutputStream(\n samplerate=samplerate,\n blocksize=blocksize,\n channels=channels,\n dtype='float32'\n )\n\n def init_ndi_sender(self):\n self.sender = pyndi.Sender()\n self.ndi_name = 'Mixed NDI Audio'\n self.ndi_source = pyndi.AudioSource(name=self.ndi_name)\n self.sender.create_source(self.ndi_source)\n\n def change_ndi_name(self, new_name):\n if new_name and new_name != self.ndi_name:\n self.ndi_name = new_name\n self.ndi_source = pyndi.AudioSource(name=self.ndi_name)\n self.sender.create_source(self.ndi_source)\n\n def mix_audio(self, indata, outdata, frames, time, status, volume_sliders):\n mixed_audio = np.zeros_like(outdata)\n for receiver, volume_slider in zip(self.receivers, volume_sliders):\n audio_data = receiver.receive_audio(frames)\n while len(audio_data) < len(indata):\n audio_data = np.concatenate((audio_data, receiver.receive_audio(frames)))\n audio_data = audio_data[:len(indata)]\n mixed_audio += audio_data * volume_slider.get()\n outdata[:] = mixed_audio\n\nclass NDI_Audio_Mixer_UI:\n def __init__(self, mixer):\n self.mixer = mixer\n self.root = Tk()\n self.root.title('NDI Audio Mixer')\n\n self.init_ui()\n\n def init_ui(self):\n self.init_frame1()\n self.init_frame2()\n self.init_frame3()\n\n def init_frame1(self):\n frame1 = Frame(self.root)\n frame1.pack(side=TOP)\n\n source_label = Label(frame1, text='Select NDI source:')\n source_label.pack(side=LEFT)\n\n selected = StringVar()\n source_menu = OptionMenu(frame1, selected, 'Loading...')\n source_menu.pack(side=LEFT)\n\n self.mixer.update_sources()\n\n def init_frame2(self):\n frame2 = Frame(self.root)\n frame2.pack(side=TOP)\n\n self.volume_sliders = []\n for i, ndi_name in enumerate(self.mixer.ndi_names):\n volume_label = Label(frame2, text='Volume for {}:'.format(ndi_name))\n volume_label.grid(row=i, column=0, padx=5, pady=5)\n volume_slider = Scale(frame2, from_=0, to=1, resolution=0.01, orient=HORIZONTAL)\n volume_slider.set(0.5)\n volume_slider.grid(row=i, column=1, padx=5, pady=5)\n self.volume_sliders.append(volume_slider)\n\n def init_frame3(self):\n frame3 = Frame(self.root)\n frame3.pack(side=TOP)\n\n ndi_name_label = Label(frame3, text='NDI Output Stream Name:')\n ndi_name_label.pack(side=LEFT)\n\n ndi_name_input = Entry(frame3)\n ndi_name_input.pack(side=LEFT)\n\n change_name_button = Button(frame3, text='Change Name', command=lambda: self.mixer.change_ndi_name(ndi_name_input.get()))\n change_name_button.pack(side=LEFT)\n\n def run(self):\n with sd.Stream(blocksize=1024, callback=lambda *args, **kwargs: self.mixer.mix_audio(*args, **kwargs, volume_sliders=self.volume_sliders)):\n self.root.mainloop()\n\nif __name__ == \"__main__\":\n ndi_names = ['NDI Source 1', 'NDI Source 2', 'NDI Source 3']\n mixer = NDI_Audio_Mixer(ndi_names)\n mixer.output_stream.start()\n ui = NDI_Audio_Mixer_UI(mixer)\n ui.run()\n mixer.output_stream.stop()\n","repo_name":"yakimoto/vMix","sub_path":"NDI/NDI Audio Mixer 2.py","file_name":"NDI Audio Mixer 2.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30018768827","text":"from Crypto.Hash import SHA256 as hashFunction\r\nimport datetime as date\r\n\r\n\r\nclass Block:\r\n def __init__(self, transaction=None, parent_hash=None, height=0, timestamp = '31 Aug 2018 11:00:00'):\r\n self.height = height\r\n self.timestamp = timestamp\r\n self.transaction = transaction\r\n self.parent_hash = parent_hash\r\n self.hash = self.hash_self()\r\n\r\n def hash_self(self):\r\n hash = hashFunction.new()\r\n hash.update((str(self.height) + \r\n str(self.timestamp) + \r\n str(self.transaction) + \r\n str(self.parent_hash)).encode())\r\n return hash.hexdigest()","repo_name":"derekzx/blockchain-in-py","sub_path":"block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70451803076","text":"\"\"\"\n1383. Maximum Performance of a Team\n\"\"\"\n\nfrom typing import List\nfrom heapq import heappop, heappush\n\nclass Solution:\n def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:\n mod = 10 ** 9 + 7\n s_e = []\n for i in range(n):\n s_e.append((efficiency[i], speed[i]))\n\n s_e.sort(key = lambda x:(-x[0]))\n ret = 0\n sums = 0\n heap = []\n\n for idx, val in enumerate(s_e):\n eff, spe = val\n if idx >= k:\n sums -= heappop(heap)\n sums += spe\n heappush(heap, spe)\n ret = max(ret, sums*eff)\n\n return ret % mod\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/1383_maximum_performance_of_a_team.py","file_name":"1383_maximum_performance_of_a_team.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34781833338","text":"import math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\n\nnp.random.seed(1)\n\n\ndef linearFunction():\n np.random.seed(1)\n\n # X = tf.constant(np.random.randn(3, 1), name=\"X\") # 定义一个维度是(3, 1)的常量,randn函数会生成随机数\n # W = tf.constant(np.random.randn(4, 3), name=\"W\")\n # b = tf.constant(np.random.randn(4, 1), name=\"b\")\n # Y = tf.add(tf.matmul(W, X), b) # tf.matmul函数会执行矩阵运算\n\n X = tf.constant(np.random.randn(3, 1), name=\"X\")\n W = tf.constant(np.random.randn(4, 3), name=\"W\")\n b = tf.constant(np.random.randn(4, 1), name=\"b\")\n Y = tf.add(tf.matmul(W, X), b) # tf.matmul函数会执行矩阵运算\n # Y = tf.add(tf.matmul(X ,W), b)\n # print(X.shape[0],X.shape[1])\n\n sess = tf.Session()\n result = sess.run(Y)\n sess.close()\n return result\n\ndef sigmoid(z):\n x = tf.placeholder(tf.float32,name=\"x\")\n sigmoid = tf.sigmoid(x)\n with tf.Session() as sess :\n result = sess.run(sigmoid,feed_dict={x:z})\n\n return result\n\ndef cost(z_in,y_in):\n z = tf.placeholder(tf.float32,name=\"z\")\n y = tf.placeholder(tf.float32,name=\"y\")\n\n cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z,labels=y)\n\n sess = tf.Session()\n cost = sess.run(cost,feed_dict={z: z_in,y: y_in})\n sess.close()\n return cost\n\nprint(\"result = \" + str(linearFunction()))\nprint(\"sigmoid(0) = \"+str(sigmoid(0)))\nprint(\"sigmoid(12) = \"+str(sigmoid(12)))\n\nlogits = np.array([0.2,0.4,0.7,0.9])\ncost = cost(logits,np.array([0,0,1,1]))\nprint(\"cost = \" + str(cost))\n\ndef one_hot_matrix(labels,C_in):\n C=tf.constant(C_in,name='C')\n one_hot_matrix = tf.one_hot(indices=labels,depth=C,axis=0)\n sess = tf.Session()\n one_hot = sess.run(one_hot_matrix)\n sess.close()\n return one_hot\n\nlables = np.array([1,2,3,0,2,1])\none_hot = one_hot_matrix(lables,C_in=4)\nprint(\"one_hot = \"+str(one_hot))\n\ndef ones(shape):\n ones = tf.ones(shape)\n sess = tf.Session()\n ones = sess.run(ones)\n sess.close()\n return ones\n\nprint(\"ones = \"+ str(ones([3])))","repo_name":"a13483685/AiStudyOrigin","sub_path":"tfStudy.py","file_name":"tfStudy.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24872814897","text":"def wiw_sort(word, words):\r\n\r\n m = 0\r\n n = 0\r\n length = 0\r\n count = 0\r\n\r\n if (len(words[0]) > len(words[1])):\r\n words.reverse()\r\n for y in words[0]:\r\n length += 1\r\n if (length <= len(words[0])):\r\n for x in word:\r\n if x == y:\r\n m = m + word.index(x)\r\n for j in words[1]:\r\n count += 1\r\n if (count <= len(words[0])):\r\n for i in word:\r\n if i == j:\r\n n = n + word.index(i)\r\n cal(m, n, words)\r\n return words\r\n\r\n\r\ndef cal(m, n, words):\r\n if m > n:\r\n words.reverse()\r\n return words\r\n","repo_name":"Joyce-Hung/wordbranch","sub_path":"wiw_sort.py","file_name":"wiw_sort.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3950570628","text":"\nfrom planes.lazy import serve, BaseService, LogService\n\nclass Service(BaseService):\n def __call__(self, kit):\n request = kit.request\n request.setResponseCode(200, \"OK\")\n request.setHeader(\"content-type\", \"text/plain\")\n path = request.path[1:]\n who = path and path or 'World'\n request.write(\"Hello, %s!\\n\" % who)\n request.finish()\n 1 / 0\n\nserve(port = 8080, service = LogService(Service()))\n\n","repo_name":"kriskowal/planes","sub_path":"demo/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27686934255","text":"import random\nimport unittest\nimport os\nimport pprint\nfrom serpapi import BaiduSearch\n\nclass TestBaiduSearchApi(unittest.TestCase):\n\n\t\tdef setUp(self):\n\t\t\t\tBaiduSearch.SERP_API_KEY = os.getenv(\"API_KEY\", \"demo\")\n\n\t\t@unittest.skipIf((os.getenv(\"API_KEY\") == None), \"no api_key provided\")\n\t\tdef test_get_json(self):\n\t\t\t\tsearch = BaiduSearch({\"q\": \"Coffee\"})\n\t\t\t\tdata = search.get_json()\n\t\t\t\tself.assertIsNone(data.get(\"error\"))\n\t\t\t\tself.assertEqual(data[\"search_metadata\"][\"status\"], \"Success\")\n\t\t\t\tself.assertIsNotNone(data[\"search_metadata\"][\"baidu_url\"])\n\t\t\t\tself.assertIsNotNone(data[\"search_metadata\"][\"id\"])\n\t\t\t\tself.assertIsNotNone(data[\"organic_results\"][0][\"title\"])\n\t\t\t\tpp = pprint.PrettyPrinter(indent=2)\n\t\t\t\tpp.pprint(data)\n\nif __name__ == '__main__':\n\t\tunittest.main()\n","repo_name":"cwyrwas/ChatGPT-Content-Generator","sub_path":".venv/Lib/site-packages/tests/test_baidu_search.py","file_name":"test_baidu_search.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"61"} +{"seq_id":"17015115014","text":"import pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nfrom torch.utils.data import DataLoader, TensorDataset\nimport os\nimport shutil\nfrom backtest import test_model\n\n\nclass TransformerModel(nn.Module):\n def __init__(self, input_params, nhead, num_layers, d_model, dim_feedforward):\n super(TransformerModel, self).__init__()\n\n self.embedding = nn.Linear(input_params, d_model)\n encoder_layer = nn.TransformerEncoderLayer(\n d_model, nhead, dim_feedforward)\n self.transformer_encoder = nn.TransformerEncoder(\n encoder_layer, num_layers=num_layers)\n self.linear = nn.Linear(d_model, 1)\n\n def forward(self, x):\n x = self.embedding(x)\n x = self.transformer_encoder(x.unsqueeze(1))\n x = self.linear(x.squeeze(1))\n return x\n\n\nclass LinearModel(nn.Module):\n def __init__(self, input_params):\n super(LinearModel, self).__init__()\n self.linear = nn.Linear(input_params, 1)\n\n def forward(self, x):\n return self.linear(x)\n\n\ndef load_data(data_path):\n data = pd.read_csv(data_path, index_col=0, header=0)\n data.drop([\"open_price\"], axis=1, inplace=True)\n data = data.dropna(how='any')\n target = data.pop('return')\n pd.DataFrame({'Column Names': data.columns}).to_csv(\n 'column_order_for_live_env.csv', index=False)\n X_train = torch.Tensor(data.values.astype(np.float32))\n y_train = torch.Tensor(target.values.reshape(-1, 1).astype(np.float64))\n return X_train, y_train\n\n\ndef get_long_short_thresholds():\n\n ret = []\n long_threshold = 1.0025\n short_threshold = 0.9975\n\n for i in range(1, 30):\n long_threshold = 1.0025 + (i * 0.001)\n short_threshold = 0.9975 - (i * 0.001)\n thresholds = {\n \"strategy\": \"long_short\",\n \"long_threshold\": long_threshold,\n \"short_threshold\": short_threshold\n }\n ret.append(thresholds)\n\n for i in range(1, 30):\n long_threshold = 1.0025 + (i * 0.001)\n short_threshold = 0\n thresholds = {\n \"strategy\": \"long_only\",\n \"long_threshold\": long_threshold,\n \"short_threshold\": short_threshold\n }\n ret.append(thresholds)\n\n for i in range(1, 30):\n long_threshold = 100\n short_threshold = 0.9975 - (i * 0.001)\n thresholds = {\n \"strategy\": \"short_only\",\n \"long_threshold\": long_threshold,\n \"short_threshold\": short_threshold\n }\n ret.append(thresholds)\n\n return ret\n\n\ndef train_model(model, train_loader, criterion, optimizer, device, model_name, num_epochs=1000, save_best_models=True):\n long_short_strats_arr = []\n long_only_strats_arr = []\n short_only_strats_arr = []\n\n strategies = get_long_short_thresholds()\n\n for epoch in range(num_epochs):\n for inputs, labels in train_loader:\n inputs, labels = inputs.to(device), labels.to(device)\n\n outputs = model(inputs)\n\n l1_lambda = 0.01 # Again, this value can be tuned to your problem.\n\n # inside your training loop:\n l1_norm = sum(p.abs().sum() for p in model.parameters())\n loss = criterion(outputs, labels) + l1_lambda * l1_norm\n #loss = criterion(outputs, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n print(f'Epoch [{epoch}/{num_epochs}], Loss: {loss.item():.4f}')\n\n model_path = model_name + \"/epoch_\" + str(epoch) + \".pt\"\n torch.save(model.state_dict(), model_path)\n\n if save_best_models:\n for strategy in strategies:\n result_vs_buy_and_hold = test_model(visualize_trades=False, model_epoch=False,\n use_model_path=True, passed_model_path=model_path, long_threshold=strategy[\"long_threshold\"], short_threshold=strategy[\"short_threshold\"])\n\n if strategy['strategy'] == \"long_short\":\n long_short_strats_arr.append({\n \"epoch\": epoch,\n \"long_threshold\": strategy[\"long_threshold\"],\n \"short_threshold\": strategy[\"short_threshold\"],\n \"result_vs_buy_and_hold\": result_vs_buy_and_hold,\n \"model_weights\": model.state_dict()\n })\n\n if strategy['strategy'] == \"long_only\":\n long_only_strats_arr.append({\n \"epoch\": epoch,\n \"long_threshold\": strategy[\"long_threshold\"],\n \"short_threshold\": strategy[\"short_threshold\"],\n \"result_vs_buy_and_hold\": result_vs_buy_and_hold,\n \"model_weights\": model.state_dict()\n })\n\n if strategy['strategy'] == \"short_only\":\n short_only_strats_arr.append({\n \"epoch\": epoch,\n \"long_threshold\": strategy[\"long_threshold\"],\n \"short_threshold\": strategy[\"short_threshold\"],\n \"result_vs_buy_and_hold\": result_vs_buy_and_hold,\n \"model_weights\": model.state_dict()\n })\n\n directories = [\"best_long_short_models\",\n \"best_long_only_models\", \"best_short_only_models\"]\n for directory in directories:\n if os.path.exists(directory):\n shutil.rmtree(directory)\n if not os.path.exists(directory):\n os.makedirs(directory)\n strategies_with_paths = [{\"strategies\": long_short_strats_arr, \"directory\": \"best_long_short_models\"},\n {\"strategies\": long_only_strats_arr,\n \"directory\": \"best_long_only_models\"},\n {\"strategies\": short_only_strats_arr, \"directory\": \"best_short_only_models\"}]\n\n for strategy in strategies_with_paths:\n weights_dicts_sorted = sorted(\n strategy['strategies'], key=lambda k: k['result_vs_buy_and_hold'], reverse=True)\n best_ten_perc = len(weights_dicts_sorted) * 0.1\n index = 0\n for item in weights_dicts_sorted:\n if index < best_ten_perc:\n model_path = strategy['directory'] + \\\n f\"/epoch_{item['epoch']}_long_{item['long_threshold']}_short_{item['short_threshold']}.pt\"\n torch.save(item['model_weights'], model_path)\n index += 1\n\n\ndef main():\n DATA_PATH = \"regression_model.csv\"\n X_train, y_train = load_data(DATA_PATH)\n\n model_dir = \"all_models\"\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n dataset = TensorDataset(X_train, y_train)\n train_loader = DataLoader(dataset, batch_size=32, shuffle=True)\n model = LinearModel(X_train.shape[1])\n\n criterion = nn.MSELoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.01)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() # pylint: disable=no-member\n else \"cpu\") # pylint: disable=no-member\n model.to(device)\n\n train_model(model, train_loader, criterion, optimizer,\n device, model_dir, num_epochs=15000, save_best_models=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JNeuvonen/linear-regression-tradingbot","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21970426013","text":"# definition of routes\nfrom __future__ import print_function # In python 2.7\nfrom flask import request, render_template,request, session, abort, flash, redirect, url_for, jsonify \nfrom flask_wtf import FlaskForm \nfrom wtforms import StringField, SubmitField, PasswordField, TextField\nfrom wtforms import validators, ValidationError\nfrom sqlalchemy import create_engine, text\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom flask_login import login_user , logout_user , current_user , login_required\nimport sys\nimport json\nfrom pprint import pprint\nfrom flask_cors import cross_origin\n#from model import model as db\nfrom . import app, db, models, forms\nimport datetime\n\n#Create a DBAPI connection\nengine = create_engine(app.config[\"SQLALCHEMY_DATABASE_URI\"])\nemail = \"\"\n\n@app.route(\"/\")\ndef index():\n\tif 'email' not in session:\n\t\treturn render_template(\"home/home.html\")\n\treturn redirect(url_for('login'))\n\n@app.route(\"/logout\")\ndef logout():\n\t# clear session data and log user out of app\n\tsession.clear()\n\tlogout_user()\n\treturn render_template(\"home/home.html\")\n\n@app.route(\"/signup\", methods=[\"GET\", \"POST\"])\ndef signup():\n\tform = forms.SignupForm(csrf_enabled=False)\n\n\tif request.method == \"GET\":\n\t\treturn render_template(\"form/signup.html\", form=form)\n\telif request.method == 'POST': \n\t\tif form.validate() == False:\n\t\t\t# if form fields not filled\n\t\t\tflash('All fields are required.')\n\t\t\treturn render_template('form/signup.html', form = form)\n\t\telif models.db_session.query(models.Teachers).filter_by(email=form.email.data).first() != None:\n\t\t\terror = 'That email already exists in our records, are you sure dont already have an account?'\n\t\t\treturn render_template('form/signup.html', form = form, error=error)\n\t\telse:\n\t\t\t# handle form submission\n\t\t\tnewuser = models.Teachers(form.name.data, form.email.data, form.password.data)\n\t\t\tmodels.db_session.add(newuser)\n\t\t\tmodels.db_session.commit()\n\t\t\treturn redirect(url_for('login'))\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n\tform = forms.LoginForm(csrf_enabled=False)\n\tif request.method == \"GET\":\n\t\treturn render_template(\"form/login.html\",form =form)\n\telif request.method == 'POST':\n\t\t# handle form submission\n\t\tif form.validate() == False:\n\t\t\terror = 'All fields are required.'\n\t\t\treturn render_template('form/login.html', form = form, error=error)\n\t\telse:\n\t\t\tsession['name'] = form.name.data \n\t\t\tsession['email'] = form.email.data \n\t\t\t#data to login\n\t\t\tuser = models.Teachers(name=form.name.data,email=form.email.data, password=form.password.data)\n\n\t\t\tif models.db_session.query(models.Teachers).filter_by(email=user.email).first() == None:\n\t\t\t\t# user data not in db \n\t\t\t\terror = ' Account credentials entered do not exist, sign up for a new account.'\n\t\t\t\treturn render_template('form/login.html', form = form, error=error)\n\t\t\telif models.db_session.query(models.Sections).filter_by(teacher=user.email).first() == None and models.db_session.query(models.Teachers).filter_by(email=user.email).first() != None:\n\t\t\t\tclassbox = forms.ClassBox(csrf_enabled=False)\n\t\t\t\tname = form.name.data\n\t\t\t\tlogin_user(user)\n\t\t\t\treturn render_template('dashboard/classbox.html', form=classbox, name=name)\n\t\t\telse:\n\t\t\t\tlogin_user(user)\n\t\t\t\tflash('Logged in successfully')\n\t\t\t\tname = form.name.data\n\t\t\t\temail = form.email.data\n\t\t\t\t# info to populate dashboard\n\t\t\t\tclassform = forms.ClassDataForm(csrf_enabled=False) \n\t\t\t\tclassbox = models.db_session.query(models.Sections).filter_by(teacher=email).first()\n\t\t\t\tsections = models.db_session.query(models.Sections).filter_by(teacher=email)\n\t\t\t\tsection_numbers = models.db_session.query(models.Sections).filter_by(teacher=email).count()\n\t\t\t\ttotalenrollment = models.db_session.query(models.Enrolled).distinct(models.Enrolled.student, models.Enrolled.classroom).filter_by(classroom=classbox.classroom).count()\n\t\t\t\treturn render_template('dashboard/dashboard.html',classroom=sections, classnumber=section_numbers, totalenrollment=totalenrollment,classbox=classbox.classroom, form = classform, name=name,email=email,registered_on=user.registered_on)\n\n@app.route('/classbox', methods=[\"GET\", \"POST\"])\n@login_required\ndef classbox():\n\tform = forms.ClassBox(csrf_enabled=False)\n\tif request.method == \"GET\":\n\t\treturn render_template(\"dashboard/classbox.html\", form =form)\n\telif request.method ==\"POST\":\n\t\t\tif form.validate() == False:\n\t\t\t\tflash('All fields are required.')\n\t\t\t\treturn render_template(\"dashboard/classbox.html\", form = form)\n\t\t\telse:\n\t\t\t\tclassbox = form.classbox.data\n\t\t\t\tclassroom = form.classroom.data\n\n\t\t\t\t#make new classbox\n\t\t\t\tnewClassbox = models.Classrooms(classbox)\n\t\t\t\tmodels.db_session.add(newClassbox)\n\t\t\t\tmodels.db_session.commit()\n\n\t\t\t\temail = session['email']\n\t\t\t\tuser = models.db_session.query(models.Teachers).filter_by(email=email).first()\n\t\t\t\tnewClassroom = models.Sections(id=None,name=classroom,classroom=classbox,teacher=user.email)\n\t\t\t\tmodels.db_session.add(newClassroom)\n\t\t\t\tmodels.db_session.commit()\n\n\t\t\t\tname = user.name\n\t\t\t\tclassform = forms.ClassDataForm(csrf_enabled=False) \n\t\t\t\tclassbox = models.db_session.query(models.Sections).filter_by(teacher=user.email).first()\n\t\t\t\tsections = models.db_session.query(models.Sections).filter_by(teacher=user.email)\n\t\t\t\tsection_numbers = models.db_session.query(models.Sections).filter_by(teacher=user.email).count()\n\t\t\t\ttotalenrollment = models.db_session.query(models.Enrolled).distinct(models.Enrolled.student, models.Enrolled.classroom).filter_by(classroom=classbox.classroom).count()\n\t\t\t\treturn render_template('dashboard/dashboard.html',classroom=sections, classnumber=section_numbers, totalenrollment=totalenrollment,classbox=classbox.classroom, form = classform, name=name,email=email,registered_on=user.registered_on.strftime('%Y-%m-%d'))\n\n@app.route('/dashboard', methods=[\"GET\", \"POST\"])\n@login_required\ndef dashboard():\n\t# form to get students\n\tform = forms.ClassDataForm(csrf_enabled=False)\n\t# remaining variables\n\tname =session['name']\n\temail = session['email']\n\tuser = models.db_session.query(models.Teachers).filter_by(email=email).first()\n\tclassbox = models.db_session.query(models.Sections).filter_by(teacher=user.email).first()\n\tsections = models.db_session.query(models.Sections).filter_by(teacher=user.email)\n\tsection_numbers = models.db_session.query(models.Sections).filter_by(teacher=user.email).count()\n\ttotalenrollment = models.db_session.query(models.Enrolled).distinct(models.Enrolled.student, models.Enrolled.classroom).filter_by(classroom=classbox.classroom).count()\n\n\t# adding new class to teach \n\tif form.validate() == False:\n\t\t# if class button not filled and button clicked\n\t\treturn render_template('dashboard/dashboard.html',classroom=sections, classnumber=section_numbers, totalenrollment=totalenrollment,classbox=classbox.classroom, form =form, name=name,email=email,registered_on=user.registered_on)\n\telif models.db_session.query(models.Sections).filter_by(name=form.classroom.data).first() != None:\n\t\terror = 'you already have a classroom with that name, pick a new classroom name'\n\t\treturn render_template('dashboard/dashboard.html',classroom=sections, error=error, classnumber=section_numbers, totalenrollment=totalenrollment,classbox=classbox.classroom, form =form, name=name,email=email,registered_on=user.registered_on)\n\telif form.validate() == True and models.db_session.query(models.Sections).filter_by(name=form.classroom.data).first() == None:\n\t\tnewclass = models.Sections(id=None,name=form.classroom.data, classroom=classbox.classroom,teacher=user.email)\n\t\tmodels.db_session.add(newclass)\n\t\tmodels.db_session.commit()\n\t\tsection_numbers = models.db_session.query(models.Sections).filter_by(teacher=user.email).count()\n\t\treturn render_template('dashboard/dashboard.html',classroom=sections, classnumber=section_numbers, totalenrollment=totalenrollment,classbox=classbox.classroom, form = form, name=name,email=email,registered_on=user.registered_on)\n\n@app.route('/classdata/')\n@login_required\ndef classdata(classroom):\n\tif classroom == None:\n\t\tredirect(url_for('dashboard'))\n\t\treturn\n\telse:\n\t\tform = forms.ClassDataForm(csrf_enabled=False)\n\t\temail = session['email']\n\t\tname = session['name']\n\t\tuser = models.db_session.query(models.Teachers).filter_by(email=email).first()\n\t\tclassbox = models.db_session.query(models.Sections).filter_by(teacher=user.email).first()\n\t\tsections = models.db_session.query(models.Sections).filter_by(teacher=user.email)\n\t\tsection_numbers = models.db_session.query(models.Sections).filter_by(teacher=user.email).count()\n\t\ttotalenrollment = models.db_session.query(models.Enrolled).distinct(models.Enrolled.student, models.Enrolled.classroom).filter_by(classroom=classbox.classroom).count()\n\t\tsection_info = models.db_session.query(models.Sections.id).filter_by(name=classroom).first()\n\t\tsection_id = section_info.id\n\t\tsection_count = models.db_session.query(models.Enrolled).filter_by(section=str(section_id)).count()\n\t\t# made query in raw SQL\n\t\tenrolled = db.engine.execute('select distinct(students.email) ,students.name,students.stage_number,students.stage_date_started,students.stage_date_completed,students.attempts,students.code from students, enrolled,sections where students.email = enrolled.student and enrolled.classroom = sections.classroom and enrolled.section_name =(%s)', classroom)\n\t\treturn render_template('dashboard/dashboard.html', id = section_id, section_enrollment =section_count, enrolled=enrolled,classname=classroom, classroom=sections, classnumber=section_numbers, totalenrollment=totalenrollment,classbox=classbox.classroom, form=form, name=name,email=email,registered_on=user.registered_on)\n\n@app.route('/update_stage', methods=[\"POST\"])\n@cross_origin([\"POST\"])\ndef update():\n\tif request.content_type != 'application/json':\n\t\treturn jsonify({\"error\": \"format application/json\"})\n\t# is json\n\ttry:\n\t\tdata = json.loads(request.data.decode('UTF-8'))\n\texcept ValueError:\n\t\treturn jsonify({\"error\": \"value error\"})\n\n\t# get POST params\n\tname = data[\"name\"]\n\temail = data[\"email\"]\n\tstage_number= data[\"stage_number\"]\n\tstage_date_started = data[\"stage_date_started\"]\n\tstage_date_completed = data[\"stage_date_completed\"]\n\tattempts = data[\"attempts\"]\n\tcode = data[\"code\"]\n\tsection_id = data[\"section_id\"]\n\n\t# PUT STUDETNS IN TABLE \n\t# not yet in table\n\tif models.db_session.query(models.Enrolled).filter_by(student=email, section=section_id).first() == None:\n\t\t# add current stage data \n\t\tsection = models.db_session.query(models.Sections).filter_by(id=section_id).first()\n\t\tsection_name = section.name\n\t\tclassbox = section.classroom\n\t\tnewStudent = models.Students(name, email, stage_number, stage_date_started, stage_date_completed, attempts, code, section_id)\n\t\tmodels.db_session.add(newStudent)\n\t\tmodels.db_session.commit()\n\n\t\t#add new user in classroom section instance \n\t\tnewEnrolled = models.Enrolled(email, section_id, classbox, section_name)\n\t\tmodels.db_session.add(newEnrolled)\n\t\tmodels.db_session.commit()\n\n\t# already in table\n\telse:\n\t\t# update stage data \n\t\tmodels.db_session.query(models.Students).filter_by(email=email, section=section_id).update({ 'attempts':attempts,'stage_number': stage_number,'stage_date_started':stage_date_started, 'stage_date_completed': stage_date_completed, 'code':code})\n\t\tdb.session.commit()\n\n\treturn jsonify({ \"success\": True })\n\n\t\n","repo_name":"braxeatssnacks/project-utopia-web","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22596711811","text":"\nfrom os import listdir, path\nfrom typing import Generator, Tuple\n\n\ndef enumerate_files(data_path: str) -> Generator[Tuple[str, str], None, None]:\n for fpath in listdir(data_path):\n file_path = path.join(data_path, fpath)\n if path.isfile(file_path):\n _, ext = path.splitext(fpath)\n yield (file_path, ext)\n else:\n for file in enumerate_files(file_path):\n yield file\n","repo_name":"hdtz0r/mipt-putting-all-together","sub_path":"dags/vendor/companies/providers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"45105832821","text":"class Offer:\n\n def __init__(self, title: str, location: str, price: float, rooms: int,\n area: float, link: str, currency='PLN', area_unit='m2'):\n self.title = title\n self.location = location\n self.price = price\n self.rooms = rooms\n self.area = area\n self.link = link\n self.currency = currency\n self.area_unit = area_unit\n\n def __str__(self):\n return f'''\\\nTitle: {self.title}\nLocation: {self.location}\nPrice: {self.price} {self.currency}\nRooms: {self.rooms}\nArea: {self.area} {self.area_unit}\nLink: {self.link}\n '''\n","repo_name":"jamnicki/otodom_observer","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44268702070","text":"class Person:\n def __init__(self,name,age,weight,height):\n self.name = name\n self.age = age\n self.weight = weight\n self.height = height\n\n \n\nclass Cricketer(Person):\n def __init__(self, name, age, weight, height,team):\n self.team = team\n super().__init__(name, age, weight, height)\n\n # overloading\n def __add__(self,other):\n return self.age + other.age\n \n def __lt__(self,other):\n return self.age < other.age\n \n\n\n\nSakib = Cricketer('Sakib Al Hasan',35,80,165,'Bangladesh')\nMusfiqe = Cricketer('Musfiqur Rahim',36,60,150,'Bangladesh')\nShanto = Cricketer('Nazmul Hossain Shanto',23,75,163,'Bangladesh')\nHridoy = Cricketer('Tawhid Hridoy',22,70,164,'Bangladesh')\n\nResult = Sakib + Musfiqe\nprint(Result)\n\nYoungest = min([Sakib,Musfiqe,Shanto,Hridoy])\nprint(Youngest.name)","repo_name":"melias198/OOP-and-Python-Programming","sub_path":"Week 02/Module 07/overloading.py","file_name":"overloading.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31236999825","text":"from tkinter import *\nfrom tkinter import ttk\nfrom .Themes import *\nfrom tkinter.messagebox import *\n\nclass applicacion(object):\n '''\n La clase aplicacion tiene el objetivo de crear el entorno visual de la aplicacion\n '''\n def __init__(self,root):\n\n # Datos estructurales basicos donde iran los demas fragmentos de la applicacion\n\n self.root = root\n self.root.title(\"Mi Primer App\")\n self.tema = EleccionTema(0)\n self.root.config(bg = self.tema['fondo_gen'])\n\n ### Menu Para Eleccion de Temas ###\n self.menubar = Menu(self.root)\n self.filemenu = Menu(self.menubar, tearoff=0)\n self.filemenu.add_command(label=\"Classic\",command=lambda:self.modificartema(0))\n self.filemenu.add_command(label=\"Dark\", command=lambda:self.modificartema(1))\n self.filemenu.add_command(label=\"Custom\", command=lambda:self.modificartema(2))\n self.filemenu.add_separator()\n self.filemenu.add_command(label=\"Crear Custom\", command=customizar_tema)\n self.menubar.add_cascade(label=\"Temas\", menu=self.filemenu)\n self.root.config(menu=self.menubar)\n\n ### TITULO GENERAL ###\n self.label1 = self.crearlabels(self.root, 0, 0, 7, W+E, text=\"Ingrese sus datos\", \n bg=self.tema['fondo_tit'], fg=self.tema['letra'], font=(\"\",10,\"bold\"))\n\n ###Contenedores##\n #Contenedor para Entradas\n self.framentries = Frame(self.root, bg=self.tema['fondo_gen'])\n self.framentries.grid(row=1, column=0, columnspan=2, rowspan=2)\n #Contenedor para Arbol\n self.framearbol = Frame(self.root, bg=self.tema['fondo_gen'])\n self.framearbol.grid(row=5, column=0, columnspan=7, sticky=W+E)\n #Contenedor para Botones\n self.framebotones = Frame(self.root, bg=self.tema['fondo_gen'])\n self.framebotones.grid(row=1, column=6, rowspan=2)\n\n ### Contenedor entradas ###\n\n #Declaracion de labels del cuadro utilizando funcion crearlabels\n self.label2 = self.crearlabels(self.framentries, 1, 0, 1, W, text=\"Titulo\", anchor=W, \n bg=self.tema['fondo_gen'], fg=self.tema['letra'], font=(\"\",8,\"bold\"))\n self.label3 = self.crearlabels(self.framentries, 2, 0, 1, W, text=\"Descripcion\", \n bg=self.tema['fondo_gen'], anchor=W, fg=self.tema['letra'], font=(\"\",8,\"bold\"))\n \n #Creo los espacios a rellenar (utilizando funcion crearentradas) y defino las variables donde se guardaran los inputs\n self.textotit, self.textodes= StringVar(), StringVar()\n self.llenar1 = self.crearentradas(self.framentries, self.textotit,45,1,2,1)\n self.llenar2 = self.crearentradas(self.framentries, self.textodes,45,2,2,1)\n self.llenarlist=[self.llenar1, self.llenar2]\n\n ### Contenedor Botones ###\n\n #botones de alta, baja y modificar\n self.boton_modi = Button(self.framebotones, text=\"Modificar\", bg=self.tema['fondo_gen'], fg=self.tema['letra'], width=10)\n self.boton_baja = Button(self.framebotones, text=\"Baja\", bg=self.tema['fondo_gen'], fg=self.tema['letra'], width=10)\n self.boton_alta = Button(self.framebotones, text=\"Alta\", bg=self.tema['fondo_gen'], fg=self.tema['letra'], width=10)\n self.boton_baja.grid(row=0, column=0)\n self.boton_alta.grid(row=1, column=0)\n self.boton_modi.grid(row=2, column=0) \n\n ### Contenedor Arbol ###\n\n #creacion base del treeview para imprimr datos de la base\n self.arbol=ttk.Treeview(self.framearbol, columns=(\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"), show='headings', selectmode = \"browse\")\n self.arbol.pack(side = LEFT, fill = BOTH)\n self.dicarbol = {'1':'ID', '2':'Titulo','3':'Descripcion', '4':'Fecha', '5':'Estado Publicacion', '6':'Objeto'}\n for key in self.dicarbol:\n self.arbol.column(key, anchor='c')\n self.arbol.heading(key, text=self.dicarbol[key])\n self.barrita=ttk.Scrollbar(self.framearbol, orient=\"vertical\", command=self.arbol.yview)\n self.barrita.pack(side = RIGHT, fill = Y)\n self.arbol.configure(yscrollcommand=self.barrita.set)\n\n #listados con todas las widgest que cambian el color de sus fondos con el tema\n self.listafondogen = [ self.label2, self.label3, self.boton_alta, self.boton_baja, self.boton_modi, \n self.llenar1, self.llenar2]\n self.listacontenedores = [self.root, self.framentries, self.framearbol, self.framebotones]\n\n #listados con todas las widgest que cambian el color de sus letras con el tema\n self.listaletras = [ self.label1, self.label2, self.label3, self.boton_alta, \n self.boton_baja, self.boton_modi,self.llenar1, self.llenar2]\n\n #funcion para crear labels\n def crearlabels(self, root, fila, columna, cspan=None, pega=W, **config):\n labeles = Label(root, **config)\n labeles.grid(row=fila, column=columna, columnspan=cspan, sticky=pega)\n return labeles\n\n #funcion para crear entradas\n def crearentradas(self, root, textoing, ancho, fila, columna, span):\n entrada = Entry(root, textvariable = textoing, width=ancho, bg=self.tema['fondo_gen'])\n entrada.grid(row=fila, column=columna, columnspan=span, sticky=W)\n return entrada\n \n #funcion para modificar colores segun el tema elegido y actualizar tkinter\n def modificartema(self, seleccion):\n self.tema = EleccionTema(seleccion)\n self.actualizarTema()\n \n def actualizarTema(self):\n self.label1['bg']=self.tema['fondo_tit']\n for x in self.listacontenedores:\n x.config(bg = self.tema['fondo_gen'])\n for x in self.listafondogen:\n x['bg'] = self.tema['fondo_gen']\n for x in self.listaletras:\n x['fg'] = self.tema['letra']\n\n #Funcion que muestra los datos de la tabla dentro del arbol\n def imprimirdatos(self, tdatos):\n self.arbol.delete(*self.arbol.get_children())\n for dato in tdatos:\n self.arbol.insert(\"\", \"end\", values=(dato[0], dato[1], dato[2], dato[3], dato[4], dato[5]))\n\nif __name__ == \"__main__\":\n root = Tk()\n x=applicacion(root)\n mainloop()","repo_name":"jpskobalski/Biblioteca","sub_path":"Modulo/VIEW/VIEW.py","file_name":"VIEW.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17306366825","text":"from typing import Union\n\nfrom aiogram import types\nfrom gino.schema import GinoSchemaVisitor\n\nfrom .models import Users\nfrom .models import db\nfrom ..config import load_config\n\nconfig = load_config('.env', db=True)\n\n\nclass GetCommands:\n @staticmethod\n async def user(user_id: Union[str, int]) -> Users:\n user = await Users.query.where(Users.user_id == user_id).gino.first()\n return user\n\n\nclass AddCommands:\n @staticmethod\n async def user(user: types.User,\n check_availability: bool = False) -> Users:\n # Получить user ID из Context Var\n if not user:\n user = types.User.get_current()\n\n if check_availability:\n old_user = await GetCommands.user(user.id)\n if old_user:\n return old_user\n\n new_user = Users(\n user_id=user.id,\n username=user.username,\n full_name=user.full_name\n )\n await new_user.create()\n return new_user\n\n\nclass UpdateCommands:\n @staticmethod\n async def user(user: Users,\n **params) -> Users:\n return await user.update(**params).apply()\n\n\nasync def go_db(recreate: bool = False) -> None:\n await db.set_bind(f'postgresql://{config.user}:{config.password}@{config.host}/{config.database}')\n db.gino: GinoSchemaVisitor\n if recreate:\n await db.gino.drop_all()\n await db.gino.create_all()\n","repo_name":"Gegenwehr/aiogram-template","sub_path":"tgbot/database/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72984932353","text":"# -*- coding: utf-8 -*-\n\nfrom core2 import plugin_utils as utils\n\n# def init():\n# pass\n\n@utils.owner_host\n@utils.command(\"!reload\")\ndef reload(server, msg, umsg, cmd, re_args):\n errors = server._event_handler.load_hooks()\n server.notice(msg.channel, f\"reloaded successfully with {errors} error{'' if errors == 1 else 's'}\")\n\n@utils.owner_host\n@utils.command(\"!raw\")\ndef raw(server, msg, umsg, cmd, re_args):\n server.send_raw(umsg.split(None, 1)[1])\n\n@utils.owner_host\n@utils.command(\"!join\")\ndef join(server, msg, umsg, cmd, re_args):\n server.send_join(umsg.split(None, 1)[1])\n\n@utils.owner_host\n@utils.command(\"!part\")\ndef part(server, msg, umsg, cmd, re_args):\n _, *chans = umsg.split(None, 1)\n if not chans:\n chans = msg.channel\n server.send_part(chans)\n\n@utils.owner_host\n@utils.command(\"!say\")\ndef say(server, msg, umsg, cmd, re_args):\n _, m = umsg.split(None, 1)\n if m[0] in {\"#\", \"&\"}:\n chans, m = m.split(None, 1)\n else:\n chans = msg.channel\n server.say(chans, m)\n\n@utils.owner_host\n@utils.command(\"!me\")\ndef me(server, msg, umsg, cmd, re_args):\n _, m = umsg.split(None, 1)\n if m[0] in {\"#\", \"&\"}:\n chans, m = m.split(None, 1)\n else:\n chans = msg.channel\n server.me(chans, m)\n\n@utils.owner_host\n@utils.command(\"!notice\")\ndef notice(server, msg, umsg, cmd, re_args):\n _, m = umsg.split(None, 1)\n if m[0] in {\"#\", \"&\"}:\n chans, m = m.split(None, 1)\n else:\n chans = msg.channel\n server.notice(chans, m)\n\n","repo_name":"novasenco/core2duo","sub_path":"src/core2/plugins/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22995606474","text":"\nimport torch\nfrom torch import tensor\nfrom torch import Tensor\nfrom torch.nn import Module\n\n\nclass GenericRunner(object):\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n ## set device\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n ## check pytorch geometry is installed (not required, check to move things on cuda)\n try:\n from torch_geometric.data import Batch\n self.pyg_batch = Batch\n self.has_pyg = True\n except ImportError:\n self.has_pyg = False\n\n ## create wrapper class to store info for checkpointing\n class Wrapper:\n generate_report = False\n training_time = 0.0\n checkpoint_dir = ''\n epoch = -1\n save_constant_data = False\n self.CKPTWrapper = Wrapper\n\n\n def get_device(self):\n return self.device\n\n\n def move_to_device(self, data):\n if isinstance(data, Module) or type(data) == tensor or type(data) == Tensor:\n data = data.to(self.device)\n elif type(data) == dict:\n for k, v in data.items():\n data[k] = self.move_to_device(v) # recursion for subelements\n elif type(data) == list or type(data) == tuple:\n data = [ self.move_to_device(el) for el in data ] # recursion for subelements\n elif self.has_pyg:\n if type(data) == self.pyg_batch:\n data = data.to(self.device)\n return data\n","repo_name":"luca-morreale/neural_surfaces","sub_path":"runners/generic_runner.py","file_name":"generic_runner.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"6065236118","text":"#SATELLITE SYSTEM DEFINITIONS\n\n'''H600# Satellite System Description\n# = 1..9, Satellite system reference number\nName [ 7,14] A8 free text\nDatum and spheroid number [16,16] I1\nDiff. system operator [18,35] A18 free text\nDiff. system name [37,46] A10 free text\nSoftware description, version number and additional information [48,80] A33 free text'''\nclass H600x:\n def __init__(self,satelliteReferenceNumber,name,datum,systemOperator,systemName,softwareDescription):\n self.satelliteReferenceNumber = satelliteReferenceNumber\n self.name = name\n self.datum = datum\n self.systemOperator = systemOperator\n self.systemName = systemName\n self.softwareDescription = softwareDescription\n \ndef getH600x(line):\n h600x = H600x(line[4:5],line[6:14],line[15:16],line[17:35],line[36:46],line[47:80])\n return h600x\n\n'''H610# Definition of Differential Reference Stations\n# = 2, 4..9, Satellite system reference number\nReference station number [ 6, 7] I2\nReference station name [ 9,20] A12 free text\nLatitude [22,33] I3,I2,F6.3,A1 dddmmss.sss N/S\nLongitude [35,46] I3,I2,F6.3,A1 dddmmss.sss E/W\nSpheroidal height [48,54] F7.2 metres\nGeoid-Spheroid separation [56,62] F7.2 metres\nGeoidal model [64,80] A17 free text'''\nclass H610x:\n def __init__(self,satelliteReferenceNumber,referenceStationNumber,referenceStationName,latitude,longitude,spheroidalHeight,geoidalSeparation,geoidalModel):\n self.satelliteReferenceNumber = satelliteReferenceNumber\n self.referenceStationNumber = referenceStationNumber\n self.referenceStationName = referenceStationName\n self.latitude = latitude\n self.longitude = longitude\n self.spheroidalHeight = spheroidalHeight\n self.geoidalSeparation = geoidalSeparation\n self.geoidalModel = geoidalModel\n \ndef getH610x(line):\n h610x = H610x(line[4:5],line[5:7],line[8:20],line[21:33],line[34:46],line[47:54],line[55:62],line[63:80])\n return h610x\n\n'''H620# Satellite Receiver Definition\n# = 1..9, Satellite system reference number\n\"At\" Node identifier [ 7,10] I4\nReceiver number [12,12] I1\nLocated on: ref. number [14,16] I3\nOffset A [18,24] F7.1\nOffset B [26,32] F7.1\nOffset Z [34,39] F6.1\nReceiver name, description and additional information [41,80] A40 free text'''\nclass H620x:\n def __init__(self,satelliteReferenceNumber,nodeIdentifier,receiverNumber,referenceNumber,locationRefNumber,offsetA,offsetB,offsetZ):\n self.satelliteReferenceNumber = satelliteReferenceNumber\n self.nodeIdentifier = nodeIdentifier\n self.receiverNumber = receiverNumber\n self.referenceNumber = referenceNumber\n self.locationRefNumber = locationRefNumber\n self.offsetA = offsetA\n self.offsetB = offsetB\n self.offsetZ = offsetZ\n \ndef getH620x(line):\n h620x = H620x(line[4:5],line[6:10],line[11:12],line[13:16],line[16:24],line[25:32],line[33:39],line[40:80])\n return h620x\n\n#GPS PARAMETERS\n\n'''H6300 GPS parameter recording strategy\nMeteorological records [ 7, 7] A1\nIonospheric model records [ 8, 8] A1\nClock model & ephemerides [ 9, 9] A1\n'''\nclass H6300:\n def __init__(self,meteorologicalRecords,ionosphericModelRecords,clockModel):\n self.meteorologicalRecords = meteorologicalRecords\n self.ionosphericModelRecords = ionosphericModelRecords\n self.clockModel = clockModel\n \ndef getH6300(line):\n h6300 = H6300(line[6:7],line[7:8],line[8:9])\n return h6300\n\n'''H6301 DGPS differential correction recording strategy\nCorrection Type [ 7,10] I4\nType Description [11,24] A14\nCorrection Type [25,28] I4\nType Description [29,42] A14\nCorrection Type [43,46] I4\nType Description [47,60] A14\nCorrection Type [61,64] I4\nType Description [65,78] A14'''\nclass H6301:\n def __init__(self,correctionType,typeDescription):\n self.correctionType = correctionType\n self.typeDescription = typeDescription\n \ndef getH6301(line):\n #Correction Type and Type Description Lists\n correctionType = [] \n typeDescription = []\n for i in range(6,78,18):\n correctionType.append(line[i:(i+4)])\n typeDescription.append(line[(i+4):(i+18)])\n h6301 = H6301(correctionType,typeDescription)\n return h6301\n\n'''\nH6310 GPS ephemerides & clock \nS.V. [ 6, 8] A1,I2 System type code, 1-32\nTransmission time of message [ 9,26] E18.12 GPS week seconds'''\nclass H6310:\n def __init__(self,systemTypeCode,transmissionTimeMessage):\n self.systemTypeCode = systemTypeCode\n self.transmissionTimeMessage = transmissionTimeMessage\n \ndef getH6310(line):\n h6310 = H6310(line[6:8],line[9:26])\n return h6310\n\n'''H6311 GPS clock parameters\nS.V. [ 6, 8] A1,I2 System type code, 1-32\nS.V. clock drift rate a f2 [ 9,26] E18.12 seconds/second²\nS.V. clock drift a f1 [27,44] E18.12 seconds / second\nS.V. clock bias a f0 [45,62] E18.12 seconds\nTime of Clock t oc [63,80] E18.12 GPS week seconds\nThese parameters are available from the GPS message sub-frame 1.'''\nclass H6311:\n def __init__(self,systemTypeCode,clockF2,clockF1,clockF0,timeClock):\n self.systemTypeCode = systemTypeCode\n self.clockF2 = clockF2\n self.clockF1 = clockF1\n self.clockF0 = clockF0\n self.timeClock = timeClock\n \ndef getH6311(line):\n h6311 = H6311(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6311\n\n'''H6312 GPS ephemerides, 1\nS.V. [ 6, 8] A1,I2 System type code, 1-32\nIssue of Data, Ephemerides IODE [ 9,26] E18.12 \nCrs [27,44] E18.12 metres\n∆n [45,62] E18.12 radians / second \nM0 [63,80] E18.12 radians \nCrs amplitude of the sine harmonic correction term to the orbit radius.\n∆n mean motion difference from computed value.\nM0 mean anomaly at reference time.\nThese parameters are available from the GPS message sub-frames 2 and 3.'''\nclass H6312:\n def __init__(self,systemTypeCode,issueData,crs,deltaN,m0):\n self.systemTypeCode = systemTypeCode\n self.issueData = issueData\n self.crs = crs\n self.deltaN = deltaN\n self.m0 = m0\n \ndef getH6312(line):\n h6312 = H6312(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6312\n\n'''H6313 GPS ephemerides, 2\nS.V. [ 6, 8] A1,I2 System type code, 1-32 \nCuc [ 9,26] E18.12 radians\neccentricity e [27,44] E18.12\nCus [45,62] E18.12 radians\n√A [63,80] E18.12 √(metres)'''\nclass H6313:\n def __init__(self,systemTypeCode,cuc,eccentricity,cus,squareA):\n self.systemTypeCode = systemTypeCode\n self.cuc = cuc\n self.eccentricity = eccentricity\n self.cus = cus\n self.squareA = squareA\n \ndef getH6313(line):\n h6313 = H6313(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6313\n\n'''H6314 GPS ephemerides, 3\nS.V. [ 6, 8] A1,I2 System type code, 1-32\nTime of ephemeris, toe [ 9,26] E18.12 GPS week seconds\nCic [27,44] E18.12 radians\nΩ0[45,62] E18.12 radians\nCis [63,80] E18.12 radians'''\nclass H6314:\n def __init__(self,systemTypeCode,timeEphemeris,cic,phi,cis):\n self.systemTypeCode = systemTypeCode\n self.timeEphemeris = timeEphemeris\n self.cic = cic\n self.phi = phi\n self.cis = cis\n\ndef getH6314(line):\n h6314 = H6314(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6314\n\n'''H6315 GPS ephemerides, 4\nS.V. [ 6, 8] A1,I2 System type code, 1-32\ni0 [ 9,26] E18.12 radians\nCrc [27,44] E18.12 metres\nargument of perigee ω [45,62] E18.12 radians\nrate of right ascension Ω• [63,80] E18.12 radians / second'''\nclass H6315:\n def __init__(self,systemTypeCode,i0,crc,argumentPerigee,rateRightAscension):\n self.systemTypeCode = systemTypeCode\n self.i0 = i0\n self.crc = crc\n self.argumentPerigee = argumentPerigee\n self.rateRightAscension = rateRightAscension\n\ndef getH6315(line):\n h6315 = H6315(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6315\n\n'''H6316 GPS ephemerides, 5\nS.V. [ 6, 8] A1,I2 System type code, 1-32\nRate of inclination angle i• [ 9,26] E18.12 radians / second\nCodes on L2 [27,44] E18.12\nGPS week number [45,62] E18.12\nL2 P data flag [63,80] E18.12'''\nclass H6316:\n def __init__(self,systemTypeCode,rateInclinationAngle,codesL2,gpsWeekNumber,dataFlag):\n self.systemTypeCode = systemTypeCode\n self.rateInclinationAngle = rateInclinationAngle\n self.codesL2 = codesL2\n self.gpsWeekNumber = gpsWeekNumber\n self.dataFlag =dataFlag \n\ndef getH6316(line):\n h6316 = H6316(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6316\n\n'''H6317 GPS ephemerides, 6\nS.V. [ 6, 8] A1,I2 System type code, 1-32\nS.V. accuracy [ 9,26] E18.12\nS.V. health [27,44] E18.12\nTGD [45,62] E18.12\nIssue of data clock, IODC [63,80] E18.12\ni0 inclination angle at reference time.\nCRC amplitude of the cosine harmonic correction term to the orbit radius.\nThese parameters are available from the GPS message sub-frames 2 and 3.\nNote that this record forms part of the set of records needed to record raw GPS and\nDGPS observations, and is not required for the recording of satellite derived positions\nonly.\nThe following triplet of records (or their T632# equivalents) must appear at least once prior to the\nrecording of any raw GPS observations.'''\nclass H6317:\n def __init__(self,systemTypeCode,accuracy,health,TGD,issueDataClock):\n self.systemTypeCode = systemTypeCode\n self.accuracy = accuracy\n self.health = health\n self.TGD = TGD\n self.issueDataClock = issueDataClock\n\ndef getH6317(line):\n h6317 = H6317(line[5:8],line[8:26],line[26:44],line[44:62],line[62:80])\n return h6317\n\n'''H6320 GPS UTC parameters\nterm of UTC polynomial A0 [ 6,23] E18.12 seconds\nterm of UTC polynomial A1 [24,41] E18.12 seconds / second\nreference time of time, tot [42,50] I9 seconds\nUTC week reference no. WNt [51,59] I9\nLeap seconds delta time ∆tLSF [60,65] I6 seconds'''\nclass H6320:\n def __init__(self,polynomialA0,polynomialA1,referenceTime,referenceWeek,deltaTime):\n self.polynomialA0 = polynomialA0\n self.polynomialA1 = polynomialA1\n self.referenceTime = referenceTime\n self.referenceWeek = referenceWeek\n self.deltaTime = deltaTime\n \ndef getH6320(line):\n h6320 = H6320(line[5:23],line[23:41],line[41:50],line[50:59],line[59:65])\n return h6320\n\n'''H6321 GPS ionospheric model parameters, 1\nα0 [ 6,17] E12.4 seconds\nα1 [18,29] E12.4 seconds / semicircle\nα2 [30,41] E12.4 seconds / semicircle²\nα3 [42,53] E12.4 seconds / semicircle³\nThese parameters are available from the GPS message sub-frame 4, page 18.'''\nclass H6321:\n def __init__(self,a0,a1,a2,a3):\n self.a0 = a0\n self.a1 = a1\n self.a2 = a2\n self.a3 = a3\n\ndef getH6321(line):\n h6321 = H6321(line[5:17],line[17:29],line[29:41],line[41:53])\n return h6321\n\n'''H6322 GPS ionospheric model parameters, 2\nβ0 [ 6,17] E12.4 seconds\nβ1 [18,29] E12.4 seconds / semicircle\nβ2 [30,41] E12.4 seconds / semicircle²\nβ3 [42,53] E12.4 seconds / semicircle³'''\nclass H6322:\n def __init__(self,b0,b1,b2,b3):\n self.b0 = b0\n self.b1 = b1\n self.b2 = b2\n self.b3 = b3\n\ndef getH6322(line):\n h6322 = H6322(line[5:17],line[17:29],line[29:41],line[41:53])\n return h6322\n\n'''H6330 Meteorological data\nSurface air pressure [ 6,12] F7.1 millibars\nDry air temperature [13,19] F7.1 degrees Celsius\nWet air temperature [20,26] F7.1 degrees Celsius\nRelative humidity [27,33] F7.1 percent\nEither, but not both, of the last two fields may be left blank.'''\nclass H6330:\n def __init__(self,surfaceAirPressure,dryAirTemperature,wetAirTemperature,relativeHumidity):\n self.surfaceAirPressure = surfaceAirPressure\n self.dryAirTemperature = dryAirTemperature\n self.wetAirTemperature = wetAirTemperature\n self.relativeHumidity = relativeHumidity\n\ndef getH6330(line):\n h6330 = H6330(line[5:12],line[12:19],line[19:26],line[26:33])\n return h6330\n\n#DGPS DEFINITIONS\n\n'''H65## Differential Correction Source Definition\n## is the Differential Correction Source Identifier\nDCS short name [ 7,14] A8 free text\nDatum & Spheroid number [16,16] I1 from H011x\nLatitude of correction source [17, 28] I3,I2,F6.3,A1 dddmmss.sss N/S\nLongitude of correction source [29, 41] I3,I2,F6.3 A1 dddmmss.sss E/W\nSpheroidal height [42, 48] F7.2 metres\nGeoid - spheroid separation [49, 55] F7.2 metres\nGeoidal model [56, 72] A17 free text'''\nclass H65xx:\n def __init__(self,DCSIdentifier,DCSShortName,datumSpheroidNumber,latitudeCS,longitudeCS,spheroidalHeight,spheroidalSeparation,geoidalModel):\n self.DCSIdentifier = DCSIdentifier\n self.DCSShortName = DCSShortName\n self.datumSpheroidNumber = datumSpheroidNumber\n self.latitudeCS = latitudeCS\n self.longitudeCS = longitudeCS\n self.spheroidalHeight = spheroidalHeight\n self.spheroidalSeparation = spheroidalSeparation\n self.geoidalModel = geoidalModel\n\ndef getH65xx(line):\n h65xx = H65xx(line[3:5],line[6:14],line[15:16],line[16:28],line[28:41],line[41:48],line[48:55],line[55:72])\n return h65xx\n\n'''H66## Differential Correction Source Description\n## is the Differential Correction Source Identifier\nDCS system operator [ 7, 24] A18 free text\nDCS component name [25, 43] A18 free text\nDCS component description [44, 80] A37 free text'''\nclass H66xx:\n def __init__(self,DCSIdentifier,DCSSystemOperator,DCSComponentName,DCSComponentDescription):\n self.DCSIdentifier = DCSIdentifier\n self.DCSSystemOperator = DCSSystemOperator\n self.DCSComponentName = DCSComponentName\n self.DCSComponentDescription = DCSComponentDescription\n\ndef getH66xx(line):\n h66xx = H66xx(line[3:5],line[6:24],line[24:43],line[43:80])\n return h66xx\n\n'''H67@0 Height aiding values\n@ = 1..9 vessel number\n@ = 0 fixed or relay station\nNode identifier [ 6, 9] I4\nPositioning system identifier [10,12] I3\nEllipsoid height of antenna [13,23] N11 metres\nDescription of source of value [24,80] A57 free text'''\nclass H67xx:\n def __init__(self,vesselNumber,station,nodeIdentifier,systemIdentifier,ellipsoidHeight,descriptionSource):\n self.vesselNumber = vesselNumber\n self.station = station\n self.nodeIdentifier = nodeIdentifier\n self.systemIdentifier = systemIdentifier\n self.ellipsoidHeight = ellipsoidHeight\n self.descriptionSource = descriptionSource\n\ndef getH67xx(line):\n h67xx = H67xx(line[3:4],line[4:5],line[5:9],line[9:12],line[12:23],line[23:80])\n return h67xx","repo_name":"kadok/P294Reader","sub_path":"h6.py","file_name":"h6.py","file_ext":"py","file_size_in_byte":14511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29507098544","text":"from FirstOrchard.Player import Player\n\n\nclass PlayerPicksFavorite(Player):\n\n def move(self):\n \"\"\"\n Choose your favourite color first, then move down list of favourites\n :return:\n None\n \"\"\"\n if self.game.blue_plums > 0:\n self.game.move_blue(True)\n elif self.game.red_apples > 0:\n self.game.move_red(True)\n elif self.game.green_apples > 0:\n self.game.move_green(True)\n elif self.game.yellow_pears > 0:\n self.game.move_yellow(True)\n else:\n raise Exception(\"PlayerPicksFavorite.move() can't do anything\")\n\n","repo_name":"silumate/first-orchard-simulator","sub_path":"FirstOrchard/PlayerPicksFavourite.py","file_name":"PlayerPicksFavourite.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25442126438","text":"# To create the dev apps, run the following command:\n# wdaemanage.py shell < create_oauth_app.py\n\nimport os\nfrom oauth2_provider.models import get_application_model # type: ignore\nfrom django.contrib.auth import get_user_model\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"project.settings\")\n\ngpfjs_url = \"https://gpf.sfari.org/hg19test\"\n\n\nUser = get_user_model()\nApplication = get_application_model()\n\n# on test instance the first admin user is Lubo with id=2\nuser = User.objects.get(id=2) # Get admin user, should be the first one\n\n\nnew_application = Application(**{\n \"name\": \"gpfjs dev app\",\n \"user_id\": user.id,\n \"client_type\": \"public\",\n \"authorization_grant_type\": \"authorization-code\",\n \"redirect_uris\": f\"{gpfjs_url}/datasets\",\n \"client_id\": \"gpfjs\",\n})\nnew_application.full_clean()\nnew_application.save()\n","repo_name":"iossifovlab/iossifovlab-gpf-containers","sub_path":"iossifovlab-gpf-full/wdae/create_oauth_app.py","file_name":"create_oauth_app.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10056418644","text":"import argparse\nimport pandas as pd\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--gold', type=str, help='gold file', required=True)\n parser.add_argument('--pred', type=str, help='predicted file', required=True)\n parser.add_argument('--out', type=str, help='output filename', required=True)\n\n args = parser.parse_args()\n\n gold = pd.read_csv(args.gold, sep=\"; \", dtype={\"Index\": object})\n pred = pd.read_csv(args.pred)\n\n if \"Gold\" in gold.columns:\n del gold[\"Gold\"]\n\n gold[\"Prediction\"] = pred.pred.apply(lambda x: \" \" + str(x))\n gold[\"Text\"] = gold.Text.apply(lambda x: \" \" + str(x))\n\n gold.to_csv(args.out, sep=';', index=False)\n","repo_name":"zsozso21/camuh","sub_path":"fnp/tools/fnp_task1_submission.py","file_name":"fnp_task1_submission.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41172635935","text":"import multiprocessing\r\nimport random, time, sys\r\nfrom multiprocessing import Pool, Process, Pipe\r\n\r\ndef parallel_quicksort(list, n):\r\n processes = 2 ** n\r\n result = list(list)\r\n\r\n pool = Pool(processes=processes)\r\n results = [(0, list)]\r\n\r\n while len(results) > 0:\r\n temp = pool.map(wrap, results)\r\n results = []\r\n for i, plist in temp:\r\n for ll in plist:\r\n if len(ll) == 1:\r\n result[i] = ll[0]\r\n i += 1\r\n elif len(ll) > 1:\r\n results.append((i, ll))\r\n i += len(ll)\r\n return result\r\n\r\ndef divide(a, min, max):\r\n random_int = random.randint(min, max)\r\n axis = a[random_int]\r\n swap(a, max, random_int)\r\n temporary = min\r\n for i in range(min, max):\r\n if a[i] < axis:\r\n swap(a, i, temporary)\r\n temporary = temporary + 1\r\n swap(a, max, temporary)\r\n return temporary\r\n\r\n\r\ndef swap(a, i, j):\r\n temporary = a[i]\r\n a[i] = a[j]\r\n a[j] = temporary\r\n\r\ndef wrap(input_list):\r\n index, list = input_list\r\n if len(list) <= 1:\r\n return [list]\r\n b = divide(list, 0, len(list) - 1)\r\n return (list, [list[:b], [list[b]], list[b + 1:]])\r\n\r\n\r\ndef main(argv):\r\n N = int(argv[1])\r\n with open(argv[0], \"r\") as myfile:\r\n lyst = [float(next(myfile)) for _ in range(N)]\r\n\r\n start = time.time()\r\n n = multiprocessing.cpu_count()\r\n lyst = parallel_quicksort(lyst, n)\r\n elapsed = time.time() - start\r\n print('Parallel quicksort: %f sec' % (elapsed))\r\n\r\n with open(\"result.txt\", \"w\") as file:\r\n out_str = \"\"\r\n for i in lyst:\r\n out_str += str(i) + \"\\n\"\r\n file.write(out_str)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])","repo_name":"mind2cloud/MSU_python_course","sub_path":"homework_4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18375195284","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/3 9:58 AM\n# @Author : WittonZhou\n# @File : SVM.py\n\nimport numpy as np\nimport time\nimport math\nimport random\n'''\n支持向量机模型\n----------\n数据集:Minst\n分类结果(二分类):\n准确率:\n'''\n\n\ndef load_data(filename):\n \"\"\"\n 加载数据集\n :param filename: \n :return: \n \"\"\"\n data = []\n label = []\n with open(filename) as f:\n for line in f.readlines():\n current_line = line.strip().split(',')\n # 转换数据类型\n data.append([int(num) / 255 for num in current_line[1:]])\n # 数字0标记为1,其余标记为-1\n if int(current_line[0]) == 0:\n label.append(1)\n else:\n label.append(-1)\n return data, label\n\n\nclass SVM:\n def __init__(self, train_data, train_label, sigma=10, C=200, toler=0.001):\n \"\"\"\n SVM相关参数初始化\n :param train_data: 训练数据集\n :param train_label: 训练标签\n :param sigma: 高斯核中分母的σ\n :param C: 软间隔中的惩罚参数\n :param toler: 松弛变量\n \"\"\"\n # 训练数据集\n self.train_data_mat = np.mat(train_data)\n # 训练标签集,转置为列向量\n self.train_label_mat = np.mat(train_label).T\n # m:训练集数 n:特征数\n self.m, self.n = np.shape(self.train_data_mat)\n # 高斯核分母中的σ\n self.sigma = sigma\n # 惩罚参数\n self.C = C\n # 松弛变量\n self.toler = toler\n # 核函数(初始化时提前计算)\n self.k = self.cal_kernel()\n # SVM中的偏置b\n self.b = 0\n # α 长度为训练集数目\n self.alpha = [0] * self.train_data_mat.shape[0]\n # SMO运算过程中的Ei\n self.E = [0 * self.train_label_mat[i, 0] for i in range(self.train_label_mat.shape[0])]\n self.supportVecIndex = []\n\n def cal_kernel(self):\n \"\"\"\n 计算核函数\n :return: 高斯核矩阵\n \"\"\"\n # 初始化高斯核结果矩阵 大小 = 训练集长度m * 训练集长度m\n # k[i][j] = Xi * Xj\n k = [[0 for i in range(self.m)] for j in range(self.m)]\n\n # 大循环遍历Xi,Xi为式7.90中的x\n for i in range(self.m):\n # 得到式7.90中的X\n X = self.train_data_mat[i, :]\n # 小循环遍历Xj,Xj为式7.90中的Z\n # 由于 Xi * Xj 等于 Xj * Xi,一次计算得到的结果可以\n # 同时放在k[i][j]和k[j][i]中,这样一个矩阵只需要计算一半即可\n # 所以小循环直接从i开始\n for j in range(i, self.m):\n # 获得Z\n Z = self.train_data_mat[j, :]\n # 先计算||X - Z||^2\n result = (X - Z) * (X - Z).T\n # 分子除以分母后去指数,得到的即为高斯核结果\n result = np.exp(-1 * result / (2 * self.sigma ** 2))\n # 由于是对称矩阵,所以将Xi*Xj的结果存放入k[i][j]和k[j][i]中\n k[i][j] = result\n k[j][i] = result\n # 返回高斯核矩阵\n return k\n\n def is_satisfy_KKT(self, i):\n \"\"\"\n 查看第i个α是否满足KKT条件\n :param i: α的下标\n :return: True/False\n \"\"\"\n gxi = self.cal_gxi(i)\n yi = self.train_label_mat[i]\n\n # 判断依据参照“7.4.2 变量的选择方法”中“1.第1个变量的选择”\n # 依据7.111\n if (math.fabs(self.alpha[i]) < self.toler) and (yi * gxi >= 1):\n return True\n # 依据7.113\n elif (math.fabs(self.alpha[i] - self.C) < self.toler) and (yi * gxi <= 1):\n return True\n # 依据7.112\n elif (self.alpha[i] > -self.toler) and (self.alpha[i] < (self.C + self.toler)) \\\n and (math.fabs(yi * gxi - 1) < self.toler):\n return True\n\n return False\n\n def cal_gxi(self, i):\n \"\"\"\n 计算g(xi)\n 根据“7.4.1 两个变量二次规划的求解方法” 式7.104\n :param i: x的下标\n :return: \n \"\"\"\n # 初始化g(xi)\n gxi = 0\n # 获得支持向量的index\n indexs = [i for i, alpha in enumerate(self.alpha) if alpha != 0]\n # 遍历每一个非零α,i为非零α的下标\n for index in indexs:\n # 计算g(xi)\n gxi += self.alpha[index] * self.train_label_mat[index] * self.k[index][i]\n # 求和结束后再加上偏置b\n gxi += self.b\n\n # 返回\n return gxi\n\n def cal_Ei(self, i):\n \"\"\"\n 计算Ei\n 根据“7.4.1 两个变量二次规划的求解方法” 式7.105\n :param i: E的下标\n :return: \n \"\"\"\n # 计算g(xi)\n gxi = self.cal_gxi(i)\n # Ei = g(xi) - yi\n return gxi - self.train_label_mat[i]\n\n def get_alpha_j(self, E1, i):\n \"\"\"\n SMO中选择第二个变量\n :param E1: 第一个变量的E1\n :param i: 第一个变量α的下标\n :return: E2,α2的下标\n \"\"\"\n # 初始化E2\n E2 = 0\n # 初始化|E1-E2|为-1\n maxE1_E2 = -1\n # 初始化第二个变量的下标\n maxIndex = -1\n # 获得Ei非0的对应索引组成的列表,列表内容为非0Ei的下标i\n nozeroE = [i for i, Ei in enumerate(self.E) if Ei != 0]\n # 对每个非零Ei的下标i进行遍历\n for j in nozeroE:\n # 计算E2\n E2_tmp = self.cal_Ei(j)\n # 如果|E1-E2|大于目前最大值\n if math.fabs(E1 - E2_tmp) > maxE1_E2:\n # 更新最大值\n maxE1_E2 = math.fabs(E1 - E2_tmp)\n # 更新最大值E2\n E2 = E2_tmp\n # 更新最大值E2的索引j\n maxIndex = j\n # 如果列表中没有非0元素了(对应程序最开始运行时的情况)\n if maxIndex == -1:\n maxIndex = i\n while maxIndex == i:\n # 获得随机数,如果随机数与第一个变量的下标i一致则重新随机\n maxIndex = int(random.uniform(0, self.m))\n # 获得E2\n E2 = self.cal_Ei(maxIndex)\n\n # 返回第二个变量的E2值以及其索引\n return E2, maxIndex\n\n def train(self, iter=100):\n \"\"\"\n 支持向量积训练\n :param iter: 迭代次数\n :return: \n \"\"\"\n # iter_step:迭代次数,超过设置次数还未收敛则提前中止\n iter_step = 0\n # parame_changed:单次迭代中有参数改变则增加1\n param_changed = 1\n\n # 如果没有达到迭代次数上限以及上次迭代中有参数改变则继续迭代\n # param_changed==0时表示上次迭代没有参数改变\n while (iter_step < iter) and (param_changed > 0):\n # 打印当前迭代轮数\n print('当前迭代轮数为:%d:%d' % (iter_step, iter))\n # 迭代步数加1\n iter_step += 1\n # 新的一轮将参数改变标志位重新置0\n param_changed = 0\n\n # 大循环遍历所有样本,用于找SMO中第一个变量\n for i in range(self.m):\n # 查看第一个遍历是否满足KKT条件,如果不满足则作为SMO中第一个变量从而进行优化\n if self.is_satisfy_KKT(i) == False:\n # 第一个变量α的下标i已经确定,接下来按照“7.4.2 变量的选择方法”第二步选择变量2。\n # 由于变量2的选择中涉及到|E1 - E2|,因此先计算E1\n E1 = self.cal_Ei(i)\n\n # 选择第2个变量\n E2, j = self.get_alpha_j(E1, i)\n\n # 参考“7.4.1两个变量二次规划的求解方法” P126 下半部分\n # 获得两个变量的标签\n y1 = self.train_label_mat[i]\n y2 = self.train_label_mat[j]\n # 复制α值作为old值\n alphaOld_1 = self.alpha[i]\n alphaOld_2 = self.alpha[j]\n # 依据标签是否一致来生成不同的L和H\n if y1 != y2:\n L = max(0, alphaOld_2 - alphaOld_1)\n H = min(self.C, self.C + alphaOld_2 - alphaOld_1)\n else:\n L = max(0, alphaOld_2 + alphaOld_1 - self.C)\n H = min(self.C, alphaOld_2 + alphaOld_1)\n # 如果两者相等,说明该变量无法再优化,直接跳到下一次循环\n if L == H:\n continue\n\n # 计算α的新值\n # 依据“7.4.1两个变量二次规划的求解方法”式7.106更新α2值\n # 先获得几个k值,用来计算事7.106中的分母η\n k11 = self.k[i][i]\n k22 = self.k[j][j]\n k21 = self.k[j][i]\n k12 = self.k[i][j]\n # 依据式7.106更新α2,该α2还未经剪切\n alphaNew_2 = alphaOld_2 + y2 * (E1 - E2) / (k11 + k22 - 2 * k12)\n # 剪切α2\n if alphaNew_2 < L:\n alphaNew_2 = L\n elif alphaNew_2 > H:\n alphaNew_2 = H\n # 更新α1,依据式7.109\n alphaNew_1 = alphaOld_1 + y1 * y2 * (alphaOld_2 - alphaNew_2)\n\n # 依据“7.4.2 变量的选择方法”第三步式7.115和7.116计算b1和b2\n b1New = -1 * E1 - y1 * k11 * (alphaNew_1 - alphaOld_1) \\\n - y2 * k21 * (alphaNew_2 - alphaOld_2) + self.b\n b2New = -1 * E2 - y1 * k12 * (alphaNew_1 - alphaOld_1) \\\n - y2 * k22 * (alphaNew_2 - alphaOld_2) + self.b\n\n # 依据α1和α2的值范围确定新b\n if (alphaNew_1 > 0) and (alphaNew_1 < self.C):\n bNew = b1New\n elif (alphaNew_2 > 0) and (alphaNew_2 < self.C):\n bNew = b2New\n else:\n bNew = (b1New + b2New) / 2\n\n # 将更新后的各值写入,进行更新\n self.alpha[i] = alphaNew_1\n self.alpha[j] = alphaNew_2\n self.b = bNew\n\n self.E[i] = self.cal_Ei(i)\n self.E[j] = self.cal_Ei(j)\n\n # 如果α2的改变量过于小,就认为该参数未改变,不增加param_changed值\n # 反之则自增1\n if math.fabs(alphaNew_2 - alphaOld_2) >= 0.00001:\n param_changed += 1\n\n # 打印迭代轮数,i值,该迭代轮数,修改α数目\n print(\"当前迭代轮数为:%d i:%d, 修改α数目: %d\" % (iter_step, i, param_changed))\n\n # 全部计算结束后,重新遍历一遍α,查找里面的支持向量\n for i in range(self.m):\n # 如果α>0,说明是支持向量\n if self.alpha[i] > 0:\n # 将支持向量的索引保存起来\n self.supportVecIndex.append(i)\n\n def cal_single_kernel(self, x1, x2):\n \"\"\"\n 单独计算核函数\n :param x1: 向量1\n :param x2: 向量2\n :return: 核函数结果\n \"\"\"\n result = (x1 - x2) * (x1 - x2).T\n result = np.exp(-1 * result / (2 * self.sigma ** 2))\n # 返回结果\n return np.exp(result)\n\n def predict(self, x):\n \"\"\"\n 对样本进行预测\n :param x: \n :return: \n \"\"\"\n result = 0\n for i in self.supportVecIndex:\n # 遍历所有支持向量,计算求和式\n # 如果是非支持向量,求和子式必为0,没有必须进行计算\n # 这也是为什么在SVM最后只有支持向量起作用\n # ------------------\n # 先单独将核函数计算出来\n tmp = self.cal_single_kernel(self.train_data_mat[i, :], np.mat(x))\n # 对每一项子式进行求和,最终计算得到求和项的值\n result += self.alpha[i] * self.train_label_mat[i] * tmp\n # 求和项计算结束后加上偏置b\n result += self.b\n # 使用sign函数(指示函数)返回预测结果\n return np.sign(result)\n\n def test(self, test_data, test_label):\n \"\"\"\n 测试\n :param test_data: 测试集\n :param test_label: 真实标签\n :return: 准确率\n \"\"\"\n # 错误计数值\n error_count = 0\n # 遍历测试集所有样本\n for i in range(len(test_data)):\n # 打印目前进度\n print('test:%d:%d' % (i, len(test_data)))\n # 获取预测结果\n result = self.predict(test_data[i])\n # 如果预测与标签不一致,错误计数值加一\n if result != test_label[i]:\n error_count += 1\n # 返回正确率\n return 1 - error_count / len(test_data)\n\n\nif __name__ == '__main__':\n start = time.time()\n\n # 获取训练集及标签\n print('starting to load data')\n train_data, train_label = load_data('../Input/mnist_train.csv')\n test_data, test_label = load_data('../Input/mnist_test.csv')\n\n # 初始化SVM类\n print('starting to init')\n svm = SVM(train_data[:1000], train_label[:1000], 10, 200, 0.001)\n\n # 开始训练\n print('starting to train')\n svm.train()\n\n # 开始测试\n print('starting to test')\n # 由于时间原因,仅选择100条测试数据测试\n accuracy = svm.test(test_data[:100], test_label[:100])\n print('准确率为:', accuracy)\n print('消耗时间为:', time.time() - start)\n\n","repo_name":"wittonzhou/LiHang-Statistical-Learning","sub_path":"SVM/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":14140,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33344908098","text":"import argparse\nimport pysam\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\n\ndef parse_args(argv):\n usage = \"Annotate homopolymers in FASTA file\"\n parser = argparse.ArgumentParser(\n description=usage, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n\n parser.add_argument(\"-m\", \"--min\", dest=\"MIN\", help=\"Min read count for clusters\")\n parser.add_argument(\"-M\", \"--max\", dest=\"MAX\", help=\"Max read count for clusters\")\n parser.add_argument(\n \"-n\",\n \"--name\",\n dest=\"NAME\",\n help=\"Amplicon name. Name used in the target BED file.)\",\n )\n parser.add_argument(\n \"-f\", \"--figures\", dest=\"FIGURES\", help=\"Prefix for output figures\"\n )\n parser.add_argument(\n \"FOLDERS\", type=str, nargs=\"+\", help=\"UMI pipeline output folder\"\n )\n\n args = parser.parse_args(argv)\n\n return args\n\n\ndef get_readstast_bam(folder):\n bam_1d_file = \"{}/align/1d.bam\".format(folder)\n\n mapped = 0\n unmapped = 0\n with pysam.AlignmentFile(bam_1d_file, \"rb\") as bam_1d:\n for r in bam_1d:\n if r.is_unmapped:\n unmapped += 1\n continue\n if not r.is_supplementary and not r.is_secondary:\n mapped += 1\n sequenced = mapped + unmapped\n return sequenced, mapped, unmapped\n\n\ndef count_fastx_reads_with_umi(folder, amplicon_name):\n fastq_with_umi_file = \"{}/fasta_umi/{}_detected_umis.fasta\".format(\n folder, amplicon_name\n )\n return count_fastx_reads(fastq_with_umi_file)\n\n\ndef count_fastx_reads_on_target(folder, amplicon_name):\n fastq_ontarget_file = \"{}/fasta_filtered/{}.fastq\".format(folder, amplicon_name)\n return count_fastx_reads(fastq_ontarget_file)\n\n\ndef count_fastx_reads(filename):\n count = 0\n with pysam.FastxFile(filename) as fh:\n for entry in fh:\n count += 1\n return count\n\n\ndef get_median_acc(folder, amplicon_name):\n acc_stats_file = \"{}/stats/{}_consensus_size_vs_acc.tsv\".format(\n folder, amplicon_name\n )\n acc_stats = pd.read_csv(acc_stats_file, sep=\"\\t\")\n acc_stats_prim = acc_stats.query(\"Flags in [16, 0]\")\n return acc_stats_prim[\"Acc\"].median()\n\n\ndef get_reads_usage_stats(folder, amplicon_name, min_size=20, max_size=60):\n cluster_stats_file = \"{}/stats/{}_vsearch_cluster_stats.tsv\".format(\n folder, amplicon_name\n )\n cluster_stats = pd.read_csv(cluster_stats_file, sep=\"\\t\")\n\n min_fwd = min_size / 2.0\n min_rev = min_size / 2.0\n\n total_reads = cluster_stats[\"n\"].sum()\n\n small_clusters = cluster_stats[\n (cluster_stats[\"n_fwd\"] < min_fwd) | (cluster_stats[\"n_rev\"] < min_rev)\n ]\n reads_in_small_clusters = small_clusters[\"n\"].sum()\n\n ok_clusters = cluster_stats[\n (cluster_stats[\"n_fwd\"] >= min_fwd) & (cluster_stats[\"n_rev\"] >= min_rev)\n ]\n reads_in_large_clusters = ok_clusters[\"n\"].sum()\n cluster_count = len(ok_clusters)\n\n excess_reads = sum(\n cluster_stats[(cluster_stats[\"n\"] - max_size) > 0][\"n\"] - max_size\n )\n\n usable_reads = reads_in_large_clusters - excess_reads\n\n usable_perc = usable_reads * 100.0 / total_reads\n reads_in_small_clusters_perc = reads_in_small_clusters * 100.0 / total_reads\n excess_reads_perc = excess_reads * 100.0 / total_reads\n\n return (\n cluster_count,\n usable_reads,\n total_reads,\n usable_perc,\n reads_in_small_clusters_perc,\n excess_reads_perc,\n )\n\n\ndef get_stats(\n folder,\n amplicon_name,\n display_name,\n min_cluster_size,\n max_cluster_size,\n figure_prefix,\n):\n if figure_prefix:\n cluster_read_hist(folder, amplicon_name, display_name, figure_prefix)\n sequenced, mapped, unmapped = get_readstast_bam(folder)\n on_target = count_fastx_reads_on_target(folder, amplicon_name)\n with_umi = count_fastx_reads_with_umi(folder, amplicon_name)\n median_acc = get_median_acc(folder, amplicon_name)\n (\n cluster_count,\n usable_reads,\n total_reads,\n usable_perc,\n reads_in_small_clusters_perc,\n excess_reads_perc,\n ) = get_reads_usage_stats(folder, amplicon_name, min_cluster_size, max_cluster_size)\n\n if total_reads != with_umi:\n print(\n \"Warning: clustering numbers don't add up. {} reads missing. This can happen if vsearch excludes UMIs because of length.\".format(\n abs(total_reads - with_umi)\n ),\n file=sys.stderr,\n )\n\n return {\n \"name\": display_name,\n \"sequenced\": int(sequenced),\n \"mapped\": int(mapped),\n \"on_target\": int(on_target),\n \"with_umi\": int(with_umi),\n \"min_size\": min_cluster_size,\n \"max_size\": max_cluster_size,\n \"median_acc\": float(median_acc),\n \"cluster_count\": int(cluster_count),\n \"usable_reads\": int(usable_reads),\n \"usable_perc\": round(usable_perc, 2),\n \"reads_in_small_clusters_perc\": round(reads_in_small_clusters_perc, 2),\n \"excess_reads_perc\": round(excess_reads_perc, 2),\n }\n\n\ndef cluster_read_hist(folder, amplicon_name, display_name, figure_prefix):\n filename = \"{}/stats/{}_vsearch_cluster_stats.tsv\".format(folder, amplicon_name)\n cluster_stats = pd.read_csv(filename, sep=\"\\t\")\n\n current_palette = sns.color_palette()\n\n fig = plt.figure(figsize=(15, 8))\n sns.set_style(\n \"whitegrid\",\n {\n \"grid.linestyle\": \"--\",\n },\n )\n sns.set_context(\"paper\", font_scale=3)\n\n tmp = cluster_stats[\"n\"].value_counts().reset_index()\n tmp[\"Read_count\"] = tmp[\"n\"] * tmp[\"index\"]\n\n ax = sns.barplot(x=\"index\", color=current_palette[0], y=\"Read_count\", data=tmp)\n ax.set_xticklabels(ax.get_xticklabels(), rotation=90)\n ax.set(\n xlabel=\"Cluster size\",\n ylabel=\"Number of reads in clusters\",\n title=\"Cluster size distribution - Sample: {}\".format(display_name),\n )\n\n ax.axvline(20, ls=\"--\", zorder=1, label=\"Min cluster size\", color=\"red\", alpha=0.8)\n ax.axvline(60, ls=\"--\", zorder=1, label=\"Max cluster size\", color=\"red\", alpha=0.8)\n ax.set_xlim(0, 200)\n\n xmin, xmax = ax.get_xlim()\n custom_ticks = np.linspace(xmin, xmax, 5, dtype=int)\n ax.set_xticks(custom_ticks)\n ax.set_xticklabels(custom_ticks)\n\n sns.despine()\n plt.tight_layout()\n fig.savefig(str(figure_prefix) + \"_cluster_size_distribution.pdf\", dpi=300)\n fig.savefig(str(figure_prefix) + \"_cluster_size_distribution.png\", dpi=100)\n plt.show()\n\n\ndef main(argv=sys.argv[1:]):\n args = parse_args(argv=argv)\n\n folders = args.FOLDERS\n figures = args.FIGURES\n name = args.NAME\n c_min = int(args.MIN)\n c_max = int(args.MAX)\n\n df_stats = pd.DataFrame(\n columns=[\n \"name\",\n \"sequenced\",\n \"mapped\",\n \"on_target\",\n \"with_umi\",\n \"min_size\",\n \"max_size\",\n \"median_acc\",\n \"cluster_count\",\n \"usable_reads\",\n \"usable_perc\",\n \"reads_in_small_clusters_perc\",\n \"excess_reads_perc\",\n ]\n )\n i = 0\n for folder in folders:\n if \":\" in folder:\n cols = folder.split(\":\")\n display_name = cols[0]\n folder = cols[1]\n else:\n display_name = name\n\n figures_ext = None\n if figures:\n figures_ext = \"{}_{}_{}\".format(figures, name, i)\n stats = get_stats(folder, name, display_name, c_min, c_max, figures_ext)\n df_stats = df_stats.append(stats, ignore_index=True)\n\n i += 1\n print(df_stats.to_csv(sep=\"\\t\", index=False, header=True), end=\"\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nanoporetech/pipeline-umi-amplicon","sub_path":"lib/umi_amplicon_tools/umi_sats.py","file_name":"umi_sats.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"43857624465","text":"import numpy as np\nimport itertools\nfrom scipy.misc import *\nimport matplotlib.pylab as plt\nimport os, re\nfrom pprint import pprint\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.constraints import maxnorm\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport numpy as np\n\n\ndef load_image2(path):\n return imread(path)\n\n\ndef load_data():\n x = []\n y = []\n data_path = \"/Users/panqiutong/Downloads/lfw\"\n label = 0\n for folder in os.listdir(data_path):\n folder_path = os.path.join(data_path, folder)\n\n for file in os.listdir(folder_path):\n img = load_image2(os.path.join(folder_path, file))\n x.append(img)\n y.append(label)\n label += 1\n\n # if label >= 10:\n # break\n print(\"all label\", label)\n return x, y\n\n\ndef crop_and_downsample(originalX, downsample_size=32):\n \"\"\"\n Starts with a 250 x 250 image.\n Crops to 128 x 128 around the center.\n Downsamples the image to (downsample_size) x (downsample_size).\n Returns an image with dimensions (channel, width, height).\n \"\"\"\n current_dim = 250\n target_dim = 128\n margin = int((current_dim - target_dim) / 2)\n left_margin = margin\n right_margin = current_dim - margin\n\n # newim is shape (6, 128, 128)\n newim = originalX[left_margin:right_margin, left_margin:right_margin, :]\n\n # resized are shape (feature_width, feature_height, 3)\n feature_width = feature_height = downsample_size\n newX = imresize(newim[:, :, :], (feature_width, feature_height), interp=\"bicubic\", mode=\"RGB\")\n\n # the next line is EXTREMELY important.\n # if you don't normalize your data, all predictions will be 0 forever.\n newX = newX / 255\n\n return newX\n\n\ndef create_model():\n model = Sequential()\n model.add(Conv2D(32, (5, 5),\n input_shape=(32, 32, 3),\n padding='same',\n data_format='channels_last',\n activation='relu'))\n\n model.add(Conv2D(32, (5, 5),\n padding='same',\n data_format='channels_last',\n activation='relu'))\n\n model.add(AveragePooling2D(pool_size=(2, 2),\n data_format='channels_last'))\n\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(128, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dropout(0.2))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\nmodel = create_model()\nmodel.load_weights(\"model/fr_1.hdf5\", by_name=True)\n\n\ndef img2vec(path):\n X = []\n X.append(load_image2(path))\n X = np.vstack([np.array(list(X))])\n X = np.asarray([crop_and_downsample(x) for x in X])\n vec = model.predict(X)\n return vec[0]\n\n # X = load_image2(path)\n # X = np.asarray(crop_and_downsample(X))\n # vec = model.predict(X)\n # return vec\n\n\npath_1 = \"/Users/panqiutong/Downloads/lfw/George_W_Bush/George_W_Bush_0005.jpg\"\nvec_1 = img2vec(path_1)\n\npath_2 = \"/Users/panqiutong/Downloads/lfw/George_W_Bush/George_W_Bush_0003.jpg\"\nvec_2 = img2vec(path_2)\n\npath_3 = \"/Users/panqiutong/Downloads/lfw/Zhu_Rongji/Zhu_Rongji_0003.jpg\"\nvec_3 = img2vec(path_3)\n\n\nprint(\"计算相似度\")\nfrom numpy import linalg\n\n\ndef calc_cos(vec_1, vec_2):\n num = np.dot(vec_1, vec_2)\n denom = linalg.norm(vec_1) * linalg.norm(vec_2)\n cos = num / denom\n return cos\n\n\ndef calc_o(vec_1, vec_2):\n dist = linalg.norm(vec_1 - vec_2)\n return dist\n\n\nprint(\"cos\", calc_cos(vec_1, vec_2))\nprint(\"cos\", calc_cos(vec_1, vec_3))\n\nprint(\"dist\", calc_o(vec_1, vec_2))\nprint(\"dist\", calc_o(vec_1, vec_3))\n# dist = linalg.norm(vec_1 - vec_2)\n# print(\"o\", dist)\n\n\n","repo_name":"Qt7mira/Mira-python","sub_path":"dl/face_recog/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40996023855","text":"dados1 = list()\ndados2 = list()\npessoa = list()\n\ndados1.append('Fer')\ndados1.append(18)\ndados2.append('Yum')\ndados2.append(12)\n\npessoa.append(dados1[:]) \npessoa.append(dados2[:])# usamos o fatiamento para ser só uma cópia da outra lista e não uma ligação\n\nprint(pessoa)\n\nlista = list()\ngalera = list()\n\nfor i in range(0, 3):\n lista.append(str(input('Nome: ')))\n lista.append(int(input('Idade: ')))\n galera.append(lista[:])\n lista.clear()\n\nfor p in galera:\n print(p[0][1])\n","repo_name":"Lu1zReis/exercicios-Python","sub_path":"testes e exercícios/exercicios/aula_18.py","file_name":"aula_18.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7115581522","text":"# 1100\n\nimport sys\n\ncnt = 0\nwcnt = 0\nfor _ in range(8):\n board = sys.stdin.readline().rstrip()\n if (cnt % 2 == 0):\n for i in range(0, 8, 2):\n if (board[i] == \"F\"):\n wcnt += 1\n elif (cnt % 2 == 1):\n for j in range(1, 8, 2):\n if (board[j] == \"F\"):\n wcnt += 1\n cnt += 1\nprint(wcnt)\n","repo_name":"soohyeon21/study","sub_path":"BaekJoon/cpp_251to300/6_b2_1100.py","file_name":"6_b2_1100.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8088223712","text":"#!/usr/bin/env python\nfrom query import *\nfrom index import Index\n\ndef build_corpus(client, terms, ids, vec_size):\n corpus = {\"df\": {}, \"doc\": {}, \"doc_count\": None}\n ########################################\n # the structure of the corpus will be: #\n # corpus = { #\n # 'df': { #\n # 'term1': df_1, #\n # 'term2': df_2, #\n # 'term3': df_3... #\n # }, #\n # 'doc': { #\n # 'id_1': { #\n # length: len_1, #\n # tf: { #\n # 'term1': tf_1, #\n # 'term2': tf_2, #\n # 'term3': tf_3... #\n # } #\n # pos: { #\n # 'term1': [] #\n # } #\n # } #\n # }, #\n # 'doc_count': number of docs #\n # 'avg_doc_length': #\n # 'voc_size': #\n # } #\n ########################################\n\n for term in terms:\n res = query_term(client, term)\n corpus[\"df\"][term] = len(res)\n\n for doc in res:\n if doc[\"id\"] in corpus[\"doc\"]:\n corpus[\"doc\"][doc[\"id\"]][\"tf\"][term] = len(doc[\"pos\"])\n corpus[\"doc\"][doc[\"id\"]][\"pos\"][term] = doc[\"pos\"]\n else:\n corpus[\"doc\"][doc[\"id\"]] = {\"tf\": {}, \"pos\": {}, \"length\": None}\n corpus[\"doc\"][doc[\"id\"]][\"tf\"][term] = len(doc[\"pos\"])\n corpus[\"doc\"][doc[\"id\"]][\"pos\"][term] = doc[\"pos\"]\n\n for i in ids:\n if not i in corpus[\"doc\"]:\n corpus[\"doc\"][i] = {\"tf\": {}, \"pos\": {}, \"length\": None}\n corpus[\"doc\"][i][\"length\"] = client.length_of_doc(i)\n # corpus[\"doc\"][i][\"tf\"] = relative_terms\n\n corpus[\"doc_count\"] = len(corpus[\"doc\"])\n total_doc_length = 0\n for doc in corpus[\"doc\"]:\n total_doc_length += corpus[\"doc\"].get(doc)[\"length\"]\n corpus[\"avg_doc_length\"] = total_doc_length/corpus[\"doc_count\"]\n corpus[\"voc_size\"] = vec_size\n return corpus\n","repo_name":"Tsgzj/CS6200","sub_path":"HW2/src/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4140360817","text":"from queue import Empty\nfrom fastapi import FastAPI, Depends, Request, Form, status\nfrom fastapi.staticfiles import StaticFiles\n\nfrom starlette.responses import RedirectResponse, JSONResponse\nfrom starlette.templating import Jinja2Templates\n\nfrom sqlalchemy.orm import Session\nfrom database import SessionLocal, engine\nimport models\n\nmodels.Base.metadata.create_all(bind=engine)\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\nclass UnicornException(Exception):\n def __init__(self, name: str):\n self.name = name\n\napp = FastAPI()\n\napp.mount(\"/js\", StaticFiles(directory=\"js\"), name=\"js\")\n\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try: \n yield db\n finally:\n db.close()\n\n@app.exception_handler(UnicornException)\nasync def unicorn_exception_handler(request: Request, exc: UnicornException):\n return JSONResponse(\n status_code=422,\n content={\"message\": f\"Oops! {exc.name} did something. There goes a rainbow...\"},\n )\n\n@app.get(\"/\")\nasync def home(req: Request, db: Session = Depends(get_db)):\n todos = db.query(models.Todo).all()\n return templates.TemplateResponse(\"base.html\", { \"request\": req, \"todo_list\": todos })\n\n@app.post(\"/add\")\ndef add(req: Request, title: str = Form(..., min_length=2, max_length=280), db: Session = Depends(get_db)):\n if title == \"yolo\":\n print(\"fuck\")\n else:\n new_todo = models.Todo(title=title)\n db.add(new_todo)\n db.commit()\n url = app.url_path_for(\"home\")\n return RedirectResponse(url=url, status_code=status.HTTP_303_SEE_OTHER)\n\n@app.get(\"/update/{todo_id}\")\ndef add(req: Request, todo_id: int, db: Session = Depends(get_db)):\n todo = db.query(models.Todo).filter(models.Todo.id == todo_id).first()\n todo.complete = True\n db.commit()\n url = app.url_path_for(\"home\")\n return RedirectResponse(url=url, status_code=status.HTTP_303_SEE_OTHER)\n\n@app.get(\"/indoubt/{todo_id}\")\ndef add(req: Request, todo_id: int, db: Session = Depends(get_db)):\n todo = db.query(models.Todo).filter(models.Todo.id == todo_id).first()\n todo.complete = False\n todo.denied = False\n todo.indoubt = True\n db.commit()\n url = app.url_path_for(\"home\")\n return RedirectResponse(url=url, status_code=status.HTTP_303_SEE_OTHER)\n\n@app.get(\"/denied/{todo_id}\")\ndef add(req: Request, todo_id: int, db: Session = Depends(get_db)):\n todo = db.query(models.Todo).filter(models.Todo.id == todo_id).first()\n todo.complete = False\n todo.indoubt = False\n todo.denied = True\n db.commit()\n url = app.url_path_for(\"home\")\n return RedirectResponse(url=url, status_code=status.HTTP_303_SEE_OTHER)\n\n\n@app.get(\"/delete/{todo_id}\")\ndef add(req: Request, todo_id: int, db: Session = Depends(get_db)):\n todo = db.query(models.Todo).filter(models.Todo.id == todo_id).first()\n db.delete(todo)\n db.commit()\n url = app.url_path_for(\"home\")\n return RedirectResponse(url=url, status_code=status.HTTP_303_SEE_OTHER)\n\n\n","repo_name":"odonald/Karen-simplist","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38836311623","text":"## @ingroup Attributes-Propellants\n# Propellant.py\n# \n# Created: Unk 2013, SUAVE TEAM\n# Modified: Apr 2015, SUAVE TEAM\n\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nfrom SUAVE.Core import Data\n\n# ----------------------------------------------------------------------\n# Class\n# ----------------------------------------------------------------------\n## @ingroup Attributes-Propellants\nclass Propellant(Data):\n \"\"\"Holds values for a propellant\n \n Assumptions:\n None\n \n Source:\n None\n \"\"\"\n\n def __defaults__(self):\n \"\"\"This sets the default values.\n\n Assumptions:\n None\n\n Source:\n Values commonly available\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n None\n \"\"\" \n self.tag = 'Propellant'\n self.reactant = 'O2'\n self.density = 0.0 # kg/m^3\n self.specific_energy = 0.0 # MJ/kg\n self.energy_density = 0.0 # MJ/m^3\n self.max_mass_fraction = Data({'Air' : 0.0, 'O2' : 0.0}) # kg propellant / kg oxidizer\n self.temperatures = Data()\n self.temperatures.flash = 0.0 # K\n self.temperatures.autoignition = 0.0 # K\n self.temperatures.freeze = 0.0 # K\n self.temperatures.boiling = 0.0 # K","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Attributes/Propellants/Propellant.py","file_name":"Propellant.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"26374173904","text":"import numpy as np\nfrom scipy.optimize import bisect\nimport error_classes as errcl\nimport read_HDF5_logfile as HDF_log \nimport os\n\ndef calc_corr(Correlators, args): # Correlators [Ops][N_T] \n result = {} \n result[\"C\"] = Correlators \n return result\n\ndef calc_corr_tilde(Correlators, args): # Correlators [Ops][N_T] \n result = {} \n result[\"C_tilde\"] = []\n for i in range(len(Correlators)-2): # 2 oder 1, I have to decide\n result[\"C_tilde\"].append(Correlators[i]-Correlators[i+2])\n return result\n\ndef calc_eff_mass_log(Correlators, args): # Correlators [Ops][N_T] \n result = {} \n result[\"m_eff_log\"] = []\n for i in range(len(Correlators)-1):\n m_eff = np.log(Correlators[i])/np.log(Correlators[i+1])\n if np.isnan(m_eff) or np.isinf(m_eff):\n result[\"m_eff_log\"].append(0)\n else:\n result[\"m_eff_log\"].append(m_eff)\n return result\n\ndef calc_eff_mass_impl(Correlators, args): # Correlators [Ops][N_T] \n result = {} \n def zero_eff_mass(eff_mass, ratio, index):\n return np.cosh(eff_mass*(T_2-index))/np.cosh(eff_mass*(T_2-(index+1))) - ratio # {results}[result_array]\n \n result[\"m_eff_impl\"] = []\n for i in range(len(Correlators)-1): # 2 oder 1, I have to decide\n ratio = Correlators[i]/Correlators[i+1]\n T_2 = (len(Correlators)-2)//2\n result[\"m_eff_impl\"].append(bisect(f=zero_eff_mass, a=1e-30, b=100, args = (ratio,i)))\n return result\n\ndef calc_eff_mass_impl_deri(Correlators, args): # Correlators [Ops][N_T] \n result = {} \n def zero_eff_mass(eff_mass, ratio, index):\n return np.sinh(eff_mass*(T_2-index))/np.sinh(eff_mass*(T_2-(index+1))) - ratio # {results}[result_array]\n result[\"m_eff_impl_deri\"] = []\n for i in range(len(Correlators)-3): # 2 oder 1, I have to decide\n ratio = (Correlators[i]-Correlators[i+2])/(Correlators[i+1]-Correlators[i+3])\n T_2 = (len(Correlators)-2)//2\n if (T_2-i) == 0 or (T_2-(i+1)) == 0: # Only happens for sinh. For cosh both values are well-defined\n # result[\"m_eff_impl_deri_\"+Op].append(float(\"inf\"))\n result[\"m_eff_impl_deri\"].append(0)\n else:\n res = bisect(f=zero_eff_mass, a=1e-30, b=1000, args = (ratio,i))\n if np.isnan(res):\n result[\"m_eff_impl_deri\"].append(0)\n else:\n result[\"m_eff_impl_deri\"].append(bisect(f=zero_eff_mass, a=1e-30, b=1000, args = (ratio,i)))\n return result\n \ndef calc_convexity(Correlators, args): # Correlators [Ops][N_T] \n result = {} # {results}[result_array]\n result[\"convexity\"] = []\n for i in range(len(Correlators)-2):\n result[\"convexity\"].append((Correlators[i]-2*Correlators[i+1]+Correlators[i+2])/4)\n return result\n\ndef basic_analysis(Correlators, args): # Give C_pi, C_rho, C_pipi\n result = {}\n for key, value in calc_corr(Correlators, None).items():\n result[key]=value\n for key, value in calc_corr_tilde(Correlators, None).items():\n result[key]=value\n for key, value in calc_eff_mass_log(Correlators, None).items():\n result[key]=value\n for key, value in calc_eff_mass_impl_deri(Correlators, None).items():\n result[key]=value\n for key, value in calc_convexity(Correlators, None).items():\n result[key]=value\n return result\n\n################################ CALCULATION ####################################\n\ndef create_all_filenames():\n PATH = \"output/HDF5_logfiles/\"\n temp = \"Scattering_src\"\n filelist = os.listdir(PATH)\n resultfile_list = []\n num = len(temp)\n for file in filelist:\n length = len(file)\n if file[:num] == temp:\n resultfile_list.append(file[:length-5]) \n\n with open(\"input/filenames_basic_analysis_all\", \"w\") as file:\n for filename in resultfile_list:\n file.write(PATH+\"%s\"%filename+\".hdf5\\n\")\n\n\ndef main():\n filelist = np.genfromtxt(\"/home/dengler_yannick/Documents/Scattering_Analysis_YD/input/filenames_basic_analysis\", \"str\")\n ops = (\"pi\", \"rho\", \"pipi\")\n for filename in filelist:\n info = HDF_log.get_info_from_HDF5_logfile(filename)\n corrs = HDF_log.get_pi_rho_pipi_corr_from_HDF5_logfile(filename)\n for i in range(len(corrs)):\n info[\"op\"] = ops[i]\n basic = errcl.measurement(\"basic_%s_%s\"%(info[\"info_string\"], ops[i]), measure_func = basic_analysis, sampling_args = (\"BS_SAMEDIM\",1000,1), infos=info)\n basic.measure(orig_sample=np.swapaxes(corrs[i],0,1), args=[None,])\n basic.print_to_HDF()\n \nif __name__ == \"__main__\":\n # create_all_filenames()\n main()\n\n\n","repo_name":"yannickdengler/Scattering_Analysis_YD","sub_path":"dev/basic_analysis.py","file_name":"basic_analysis.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28143222814","text":"# 골드 1레벨\n# 다시 풀기\n\nn, k = map(int, input().split())\nli = list(map(int, input().split()))\n\nanswer = 0\nsocket = set()\nfor idx in range(len(li)):\n if len(socket) < n:\n socket.add(li[idx])\n elif li[idx] not in socket:\n flag = False\n for i in range(idx + 1, len(li)):\n if li[i] in socket:\n socket.discard(li[i])\n flag = True\n if flag == False:\n socket.pop()\n answer += 1\n socket.add(li[idx])\nprint(answer)","repo_name":"youjeonghan/BackJoon_Algorithm","sub_path":"그리디/1700_멀티탭 스케줄링.py","file_name":"1700_멀티탭 스케줄링.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6279438059","text":"from fastapi_users.authentication import CookieTransport, JWTStrategy, AuthenticationBackend\nfrom src.core.config import SECRET\n\ncookie_transport = CookieTransport(cookie_name='picture', cookie_max_age=3600)\n\nSECRET = f\"{SECRET}\"\n\n\ndef get_jwt_strategy() -> JWTStrategy:\n return JWTStrategy(secret=SECRET, lifetime_seconds=3600)\n\n\nauth_backend = AuthenticationBackend(\n name=\"jwt\",\n transport=cookie_transport,\n get_strategy=get_jwt_strategy,\n)","repo_name":"K-Maxim/picture_feed","sub_path":"src/auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37380930296","text":"from typing import Dict, Tuple, Type\n\n_TYPE_OVERLOADS = {\n int: type(\"EInt\", (int,), dict()),\n float: type(\"EFloat\", (float,), dict()),\n str: type(\"EStr\", (str,), dict()),\n dict: type(\"EDict\", (dict,), dict()),\n list: type(\"EList\", (list,), dict()),\n}\n\n# cache created classes here\n_CLASS_LOOKUP: Dict[Tuple[Type, Type], Type] = {}\n\n\ndef add_class_to_obj(value, cls):\n \"\"\"Add a class to a python type.\n This function modifies value so that it has cls as a basetype.\n The value itself may be modified by this action! You must use the return\n value of this function however, since some types need to be copied first (heaptypes).\n \"\"\"\n if isinstance(value, cls):\n # If already is instance, do not add\n return value\n\n try:\n orig_cls = value.__class__\n key = (orig_cls, cls)\n new_cls = _CLASS_LOOKUP.get(key)\n if new_cls is None:\n new_cls = orig_cls.__class__(orig_cls.__name__, (orig_cls, cls), {})\n _CLASS_LOOKUP[key] = new_cls\n value.__class__ = new_cls\n return value\n except TypeError:\n # Non heap type, look in overloads dict\n for type_, func in _TYPE_OVERLOADS.items():\n # Use type() here, we only need to trigger if it's the exact type,\n # as otherwise we don't need to overload the class\n if type(value) is type_: # pylint: disable=unidiomatic-typecheck\n return add_class_to_obj(func(value), cls)\n raise\n\n\ndef list_starts_with(list_, sub):\n return len(sub) <= len(list_) and all(list_[i] == x for i, x in enumerate(sub))\n\n\ndef is_approximately_integer(value):\n if isinstance(value, int):\n return True\n return abs(value - round(value)) < 0.001\n","repo_name":"corvis/cli-rack","sub_path":"src/cli_rack_validation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6501820426","text":"import pydicom\nimport cv2\nimport os\nimport time\nimport logging\n\n\ndef logfile(info, msg):\n log_path = 'E:/MyProgramming/Python/Project/implement/heart recognize/dcm_avi_data2/report.log'\n mode = 'a+' if os.path.exists(log_path) else 'w+'\n fmt = '%(message)s'\n logging.basicConfig(\n level=logging.DEBUG,\n filename=log_path,\n filemode=mode,\n format=fmt\n )\n content = 'file info: '+info+' reason: '+msg\n logging.info(content)\n\n\n# original path\nstart = time.time()\nvideo_dcm_dir = 'L:/Lab_Data/dcm Data2/'\n# check path\ncheck_file = 'E:/MyProgramming/Python/Project/implement/heart recognize/dcm_avi_data2/check.txt'\n# 目錄下的所有資料夾列表\nvideo_dcm_dir_list = os.listdir(video_dcm_dir)\n\n# 迭代每個資料夾並讀取裡面的 DCM 檔案\nall_list = list()\nall_files = list()\nerror_file = 0\n\nfor curr_dir in video_dcm_dir_list:\n curr_path = os.path.join(video_dcm_dir, curr_dir)\n curr_dir_list = os.listdir(curr_path)\n\n # 儲存每個 curr_dir_list 底下的所有資料夾的路徑\n reg_list = list()\n for dirs in curr_dir_list:\n if os.path.isdir(os.path.join(curr_path, dirs)):\n dir_path = curr_path + \"/\" + dirs\n all_list.append(dir_path)\n\n# 已檢查底下所有檔案皆為 dcm 檔案(不做異常處理)\nfor dcm_dir in all_list:\n # DCM 檔案皆在 IMG001 資料夾裡\n curr_path = dcm_dir + '/IMG001/'\n\n dcm_files = os.listdir(curr_path)\n all_files.append(len(dcm_files))\n\n # 讀取每個 dcm 檔案和存取寫入的路徑\n for dcm_file in dcm_files:\n # 若寫入路徑不存在資料夾則創建\n write_dir = 'E:/MyProgramming/Python/Project/implement/heart recognize/dcm_avi_data2/'\n write_path = write_dir + curr_path[len(video_dcm_dir):]\n dcm_path = curr_path + dcm_file\n\n if not os.path.isdir(write_path):\n os.makedirs(write_path)\n write_path = write_path + dcm_file\n\n # 使用 pydicom 讀取 *.dcm 檔案\n dcm = pydicom.dcmread(dcm_path)\n # name.pixel_array.shape 可以看資料的維度(總幀數, height, width, channel)\n\n # 判斷 dcm 是否可以正常讀取\n try:\n dcm_data = dcm.pixel_array.shape\n\n except AttributeError:\n file_info = str(dcm_path[len(video_dcm_dir):]) + ' shape: None'\n string = '無法讀取該檔案資訊'\n logfile(file_info, string)\n error_file += 1\n continue\n\n # 資料格式分為 4 種(彩色影片、灰階影片、彩色圖片、灰階圖片)(class 1, 2, 3, 4)\n # 若資料為度 > 3 維則代表彩色影片\n if len(dcm_data) > 3:\n _class = 1\n frames, y, x, channel = dcm_data\n\n # 若影片的幀數小於60(2s左右), 則記錄在 logfile 裡面\n # if frames < 60:\n # file_info = str(dcm_path[len(video_dcm_dir):]) + ' shape: ' + str(dcm_data)\n # string = '影片的長度不足 60 幀(30 幀/秒).'\n # logfile(file_info, string)\n # error_file += 1\n # continue\n\n # 轉成 avi 格式\n name = write_path.replace('.DCM', '.avi')\n\n # fourcc 為編碼格式(可以先查詢 avi 可使用的編碼)\n video_write = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'MJPG'), 30, (x, y))\n\n for i in range(frames):\n bgr = cv2.cvtColor(dcm.pixel_array[i], cv2.COLOR_YUV2BGR)\n res = cv2.resize(bgr, (x, y))\n video_write.write(res)\n video_write.release()\n\n elif len(dcm_data) == 3:\n # 資料維度 = 3 則會有灰階影片和彩色圖片\n if dcm_data[-1] == 3:\n # 3 通道代表是彩色圖片\n _class = 3\n name = write_path.replace('.DCM', '.png')\n r, g, b = cv2.split(dcm.pixel_array)\n merge = cv2.merge([b, g, r])\n cv2.imwrite(name, merge)\n\n else:\n # 灰階影片\n _class = 2\n frames, y, x = dcm_data\n\n # if frames < 60:\n # file_info = str(dcm_path[len(video_dcm_dir):]) + ' shape: ' + str(dcm_data)\n # string = '影片的長度不足 60 幀(30 幀/秒).'\n # logfile(file_info, string)\n # error_file += 1\n # continue\n\n name = write_path.replace('.DCM', '.avi')\n video_write = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'MJPG'), 30, (x, y))\n for i in range(frames):\n res = cv2.resize(dcm.pixel_array[i], (x, y))\n video_write.write(res)\n video_write.release()\n else:\n # 灰階圖片\n _class = 4\n name = write_path.replace('.DCM', '.png')\n cv2.imwrite(name, dcm.pixel_array)\n\n check = 'path: '+dcm_path[len(video_dcm_dir):]+' shape: '+str(dcm_data)+' class: '+str(_class)+'\\n'\n with open(check_file, 'a+') as f:\n f.write(check)\n\n print('writing {} is successfully.'.format(dcm_file))\n\nend = time.time()\nprint('總檔案數量: ', sum(all_files))\nprint('不符合條件的影片數量: ', error_file)\nprint('總共花費時間: ', round(end - start, 3), '秒')\nprint('平均花費時間: ', round((end - start) / (sum(all_files) - error_file), 3), '秒')\n\n","repo_name":"Sapphire0912/Programming","sub_path":"Python/Project/heart recognize/dcm_avi.py","file_name":"dcm_avi.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23532627371","text":"'''\nCreated on Apr 9, 2016\n\n@author: david\n'''\n#f=open(\"exampleB.txt\")\nf=open(\"B-small-attempt0.in\")\n#f=open(\"B-large.in\")\n\nT=int(f.readline())\nP=[]\nfor i in range(T):\n [c,j] = f.readline().split() #[x=='+' for x in f.readline().strip()[::-1]]\n P.append((c,j))\n\ndef isValid(c1, c2):\n for i in range(len(c1)):\n if c1[i]!='?' and c2[i]!='?' and c1[i]!=c2[i]:\n return False\n return True\n \ndef compatible(sn):\n nd = len(sn)\n res = []\n for i in range(0,10**nd):\n if isValid(sn, ('0'*nd+str(i))[-nd:]):\n res.append(('0'*nd+str(i))[-nd:])\n return res\n \n \ndef solve(s):\n (c,j) = s\n #print(compatible(c))\n #print(compatible(j))\n dif=10e100\n cc1 = cc2 = 0\n nd = len(c)\n for cc_ in compatible(c):\n cc = int(cc_)\n for jj_ in compatible(j):\n jj= int(jj_)\n #print(cc,jj,abs(cc-jj),dif)\n dif2 = abs(cc-jj)\n if dif2 List[List[int]]:\n\n length = len(nums)\n # result = set()\n result = []\n\n # 双指针法使用前提:排序\n nums.sort()\n\n for i in range(length - 3):\n # 去重(剪枝)\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n # 如果固定数与数组三最小数之和大于target, 则后续循环都是不存在解的, 从遍历中跳出\n if nums[i] + sum(nums[i + 1:i + 3 + 1]) > target:\n break\n # 如果固定数与数组三最大数之和小于taget, 则当前遍历不存在解, 进入下一个遍历\n if nums[i] + sum(nums[-1:-3 - 1:-1]) < target:\n continue\n\n for j in range(i + 1, length - 2):\n # 去重(剪枝)\n if j - i > 1 and nums[j] == nums[j - 1]:\n continue\n # 如果固定数与数组两最小数之和大于target, 则后续循环都是不存在解的, 从遍历中跳出\n if nums[i] + nums[j] + sum(nums[j + 1:j + 2 + 1]) > target:\n break\n # 如果固定数与数组两最大数之和小于target, 则当前遍历不存在解, 进入下一个遍历\n if nums[i] + nums[j] + sum(nums[-1:-2 - 1:-1]) < target:\n continue\n\n # 双指针法\n left, right = j + 1, length - 1\n while left < right:\n tmp_sum = nums[i] + nums[j] + nums[left] + nums[right]\n # 如果当前和小于target, 收缩左边界\n if tmp_sum < target:\n left += 1\n # 如果当前和大于target, 收缩左边界\n elif tmp_sum > target:\n right -= 1\n # 如果值相等\n else:\n # 记录解\n # result.add((nums[i], nums[j], nums[left], nums[right], ))\n result.append([nums[i], nums[j], nums[left], nums[right]])\n\n # 求得正确解后,去重(剪枝)\n while left < right and nums[left] == nums[left + 1]:\n left += 1\n # 求得正确解后,去重(剪枝)\n while left < right and nums[right] == nums[right - 1]:\n right -= 1\n\n # 在求得正确解,并且剪枝后,仅收缩移动一个指针,都不会是正确解;\n # 因此应收缩移动双指针,直接排除不符合解的情况,减少运算次数\n left += 1\n right -= 1\n\n return result\n","repo_name":"imckl/leetcode","sub_path":"medium/18-4sum.py","file_name":"18-4sum.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40920741644","text":"from dijkstra import *\nfrom timeit import default_timer as timer\nimport random\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\nclass comparisonStrategy(dijkstra):\n def __init__(self, G):\n dijkstra.__init__(self, G)\n self.name = \"Comparison strategy\"\n\n def search(self, source, target):\n G = self.graph.copy()\n\n start = timer()\n\n path = self.shortest_path(source, target)\n\n route = []\n route.append(path[0])\n\n total_cost = 0\n costGS = 0\n costRS = 0\n costRUN = 0\n\n i = 0\n\n while route[-1] != target:\n if self.graph.get_edge_data(path[i], path[i + 1])[\"color\"] != \"red\":\n route.append(path[i + 1])\n total_cost += self.graph.get_edge_data(path[i], path[i + 1])[\"weight\"]\n costRUN += self.graph.get_edge_data(path[i], path[i + 1])[\"weight\"]\n i += 1\n else:\n self.graph.remove_edge(path[i], path[i + 1])\n costGS = self.get_path_length(self.shortest_path(path[i], target))\n costRS = self.get_path_length(self.shortest_path(source, target))\n\n if costGS <= (costRS + costRUN):\n path = self.shortest_path(path[i], target)\n else:\n total_cost += costRUN\n costRUN = 0\n path = self.shortest_path(source, target)\n i = 0\n\n end = timer()\n elapsed = end - start\n\n print(\"Search time:\", elapsed)\n print(\"Path: \", route)\n print(\"Path length:\", total_cost)\n\n self.graph = G\n\n return elapsed, total_cost, route\n","repo_name":"DP97/Canadian-Traveller-Problem","sub_path":"comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17370956283","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, request\nfrom flask.ext.babelex import Babel\nimport scraper\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\nbabel = Babel(app)\n\n\n# choose best matching locale for babel translation\n# available translations list in config\n@babel.localeselector\ndef get_locale():\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n\n url = request.form['url']\n recipe = scraper.getData(url)\n # if recipe is a tuple we got all information\n if isinstance(recipe, tuple):\n return render_template('index.html', recipeTitle=recipe[0], recipeIngreds=recipe[1], recipeInstruct=recipe[2])\n # if it's not a tuple showreturned error\n else:\n return render_template('index.html', Error=recipe)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"dnlvgl/recipemd-flask","sub_path":"recipemd.py","file_name":"recipemd.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13347170964","text":"# -*- encoding: utf-8 -*-\nfrom .utils import *\nimport h2o_mlops_client\n\ndef _is_model_published(mlops_connection, project, model_id):\n experiments = mlops_connection.storage.experiment.list_experiments(\n h2o_mlops_client.StorageListExperimentsRequest(\n project_id=project.id,\n filter=QueryUtils.filter_by(\"display_name\", model_id)\n )\n ).experiment\n return len(experiments) > 0\n\ndef _is_model_deployed(mlops_connection, project, deployment_name):\n deployments=mlops_connection.deployer.deployment.list_project_deployments(\n h2o_mlops_client.DeployListProjectDeploymentsRequest(\n project_id=project.id,\n paging=h2o_mlops_client.DeployPagingRequest(\n page_size=1000000\n )\n )\n ).deployment\n relevant_deployments=list([deployment for deployment in deployments if deployment.display_name==deployment_name])\n return len(relevant_deployments) > 0\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py-cloud-extensions/h2o_cloud_extensions/mlops/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"7099054158","text":"from django.shortcuts import render\nfrom django.shortcuts import render, redirect\nfrom . import forms\nfrom .models import AddTripDetails\n\n\ndef addtrip(request):\n\n if request.method == 'POST':\n form = forms.trip_forms(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n\n return redirect('addTrip:trip_forms')\n else:\n form = forms.trip_forms\n\n return render(request, \"addtrip.html\", {'form': form})\n","repo_name":"artkmce/EAS","sub_path":"addTrip/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41263692304","text":"\"\"\" Передаём информацию в шаблон: именованные параметры функции render_template \"\"\"\nfrom flask import Flask, render_template\nimport os\n\ndef index():\n \"\"\" функция обрабатывает шаблон и возвращает получившийся документ\"\"\"\n return render_template('index1.html', header=\"Это новый заголовок\", text=\"А тут текст\")\n\nfolder = os.getcwd() + os.sep + \"discuss\" # запомнили текущую рабочую папку\n# Создаём объект веб-приложения:\napp = Flask(__name__, template_folder=folder, static_folder=folder) # первый параметр - имя модуля\n # параметр с именем static_folder определяет имя папки, содержащей статичные файлы\n # параметр с именем template_folder определяет имя папки, содержащей шаблоны\n\n# создаём правило для URL '/': \napp.add_url_rule('/', 'index', index)\n\nif __name__ == \"__main__\":\n # Запускаем веб-сервер:\n app.run()\n","repo_name":"Pavel-Bylkov/lessons","sub_path":"algo/TestSQL/discuss/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32370014372","text":"\"\"\"Views for the page_uploader app.\"\"\"\nfrom search.serializers import PageSerializer\n\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom page_uploader.uploader import processFile\n\n\nclass UploadView(APIView):\n \"\"\"Uploads pages to the database.\"\"\"\n\n serializer_class = PageSerializer\n\n @extend_schema(\n request={\n \"multipart/form-data\": {\n \"type\": \"object\",\n \"properties\": {\n \"files\": {\n \"type\": \"array\",\n \"description\": \"Files to upload: .zip for archives, \"\n + \".json for single files. .zip files can contain\"\n + \".json files or other .zip files.\",\n \"items\": {\"type\": \"string\", \"format\": \"binary\"},\n }\n },\n },\n },\n )\n def put(self, request, format=None):\n \"\"\"Uploads pages to the database.\"\"\"\n try:\n for file in request.FILES.getlist(\"files\"):\n processFile(file)\n\n return Response({\"message\": \"Files uploaded successfully\"})\n except Exception as e:\n print(e)\n return Response({\"error\": str(e)})\n","repo_name":"NicKravchenko/search_engine","sub_path":"page_uploader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27571256188","text":"from bot import utils\nfrom bot import config\nimport re\nimport praw\nimport json\nimport logging\nimport requests\n\n\n# BOT WILL BE ENTIRELY CONFIGURABLE THROUGH CONFIG.py\n\n# TODO pytest\n# TODO CODECOV\n# TODO type checking\n\n\n# TODO reply to comment with link and add to postgres db\n# TODO break refactor debug into functions\n\n\nlogger = utils.make_logger(config.logfile, config.logLevel)\n\n\ndef main():\n reddit = login()\n\n # Iterate through newly submitted comments\n for comment_id in reddit.subreddit(config.subreddits).stream.comments(\n skip_existing=True\n ):\n clean_comment = get_clean_comment(reddit.comment(comment_id).body)\n logger.info(f\"Clean comment: \\n {clean_comment}\")\n\n # Check for keyword in comment\n if clean_comment.find(config.keyword) != -1:\n logger.info(\"Keyword Found\")\n\n keyword_list = get_search_keys(clean_comment)\n logger.info(keyword_list)\n\n\n# Extract a list of search keys\ndef get_search_keys(clean_comment):\n keyword_indices = get_keywords_pos(clean_comment)\n\n keyword_list = []\n # Find starting and ending pos\n for list_i, comment_i in enumerate(keyword_indices):\n\n begin = comment_i + len(config.keyword)\n\n # On last keyword, set end to the end of the comment\n if list_i == len(keyword_indices) - 1:\n end = len(clean_comment)\n\n # Set end to the next keyword, or next newline\n else:\n next_keyword = keyword_indices[list_i + 1]\n next_newline = get_next_newline(comment_i, clean_comment)\n\n # Set end to whichever is closest next_newline or next_keyword\n if next_newline is not None:\n if next_newline > next_keyword:\n end = next_keyword\n elif next_newline < next_keyword:\n end = next_newline\n else:\n logger.warning(\"IMPOSSIBLE :: next_newline == next_keyword \")\n else:\n logger.info(\"There are no next newline\")\n end = next_keyword\n\n local_keywords_string = clean_comment[begin:end]\n local_keywords_list = re.split(r\",\", local_keywords_string)\n\n local_keywords_list = map(str.strip, local_keywords_list)\n keyword_list.extend(local_keywords_list)\n return keyword_list\n\n\ndef login():\n logger.info(\"Attempting login\")\n try:\n reddit = praw.Reddit(\n client_id=config.client_id,\n client_secret=config.client_secret,\n password=config.password,\n user_agent=config.user_agent,\n username=config.username,\n )\n\n # TODO error handling\n except:\n pass\n\n return reddit\n\n\n# TODO Clean input of illegal characters\ndef get_clean_comment(comment):\n clean_comment = comment.strip()\n return clean_comment\n\n\n# Get starting position of all keyword matches\ndef get_keywords_pos(clean_comment):\n pattern = r\"\\W!linkme \\w\\w|^!linkme \\w\\w\"\n compiled_keyword_regex = re.compile(pattern, re.I | re.M)\n\n keyword_indices = []\n for m in compiled_keyword_regex.finditer(clean_comment):\n\n # If match is not at the start of the comment\n if m.start != 0:\n # Add 1 to starting position of match to skip over a space character\n keyword_indices.append(m.start() + 1)\n else:\n keyword_indices.append(m.start())\n\n return keyword_indices\n\n\n# Returns index of next newline, returns None if there are no newlines\ndef get_next_newline(comment_i, clean_comment):\n clean_comment_sub = clean_comment[comment_i:]\n next_newline = None\n\n # Iterate through comment substring, find index of next newline\n for char_count, char in enumerate(clean_comment_sub):\n if char == \"\\n\":\n next_newline = comment_i + char_count\n break\n\n return next_newline\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PhanTruongT/linkme-bot","sub_path":"bot/linkmebot.py","file_name":"linkmebot.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27366786527","text":"'''\r\n@author: Zhouhong Gu\r\n@date: 2021/07/26\r\n@target: GCN的训练过程\r\n'''\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom model.GCN.layers import GraphConvolution\r\nfrom model.GCN.utils import load_data, accuracy\r\nimport random\r\nfrom sklearn.metrics import f1_score, accuracy_score\r\nimport config\r\n\r\n# Training settings\r\nGCN_layer = config.GCN_layer # GCN层数\r\nnfeat = config.nfeat # 特征层\r\nnhid = config.nhid # 隐藏层\r\nnclass = config.nclass # 分类结果\r\ndropout = config.dropout # dropout\r\nepochs = config.epochs # epoch\r\nlr = config.lr # learning rate\r\nuse_cuda = config.use_cuda # 使用cuda\r\nfastmode = config.fastmode # 需不需要验证\r\n\r\n\r\nclass GCN(nn.Module):\r\n def __init__(self, nfeat=config.nfeat, nhid=config.nhid, nclass=1, dropout=config.dropout, layer_num=2):\r\n super(GCN, self).__init__()\r\n\r\n self.gcs = nn.ModuleList()\r\n self.gcs.append(GraphConvolution(nfeat, nhid))\r\n for i in range(layer_num - 2):\r\n self.gcs.append(GraphConvolution(nhid, nhid))\r\n self.gcs.append(GraphConvolution(nhid, nclass))\r\n self.fc = nn.Linear(nhid * 2, nclass)\r\n self.dropout = dropout\r\n\r\n def forward(self, x, adj):\r\n '''\r\n :param x:\r\n :param adj:\r\n :return:\r\n '''\r\n for index, layer in enumerate(self.gcs):\r\n if index == len(self.gcs) - 1: break\r\n x = F.relu(layer(x, adj))\r\n x = F.dropout(x, self.dropout, training=self.training)\r\n x = self.gcs[-1](x, adj)\r\n return F.log_softmax(x, dim=1)\r\n\r\n def forward_2(self, x, adj):\r\n for index, layer in enumerate(self.gcs):\r\n if index == len(self.gcs) - 1: break\r\n x = F.relu(layer(x, adj))\r\n x = F.dropout(x, self.dropout, training=self.training)\r\n # 先用一种很low的方法并且很耗时的方法,后续考虑能否直接使用矩阵计算得到\r\n new_x = []\r\n for index1, line1 in enumerate(adj):\r\n for index2, line2 in enumerate(line1):\r\n if line2 > 0:\r\n new_x.append([x.detach().cpu().numpy()[index1], x.detach().cpu().numpy()[index2]])\r\n new_x = torch.tensor(new_x)\r\n new_x = new_x.reshape(new_x.shape[0], -1)\r\n # print('new_x shape', new_x.shape)\r\n x = self.fc(new_x)\r\n return F.sigmoid(x)\r\n\r\n def get_embedding(self, x, adj):\r\n for index, layer in enumerate(self.gcs):\r\n if index == len(self.gcs) - 1: break\r\n x = F.relu(layer(x, adj))\r\n x = F.dropout(x, self.dropout, training=self.training)\r\n return x\r\n\r\n\r\ndef getFeatureAdj(data):\r\n from tools import gzh\r\n bert_pre = config.bert_pre\r\n encoder = gzh.bert_encoder(bert_pre)\r\n\r\n words = set([item for sublist in [[i[0], i[1]] for i in data] for item in sublist])\r\n\r\n word2id = {i: j for j, i in enumerate(words)}\r\n id2word = {word2id[i]: i for i in word2id}\r\n\r\n temp = [[word2id[i[0]], word2id[i[1]]] for i in data]\r\n graph = {}\r\n for a, b in temp:\r\n li = graph.get(a, set())\r\n li.add(b)\r\n graph[a] = li\r\n del (temp)\r\n\r\n adj = []\r\n for d in id2word:\r\n adj.append([0.] * len(word2id))\r\n for dd in graph.get(d, []):\r\n adj[-1][dd] = 1.\r\n del (graph)\r\n print('adj shape', np.array(adj).shape)\r\n\r\n features = []\r\n for word in word2id:\r\n features.append(encoder([word])[0])\r\n print('feature shape', np.array(features).shape)\r\n\r\n childs = [float(i[2]) for i in data]\r\n ances = [float(i[3]) for i in data]\r\n return features, adj, childs, ances\r\n\r\n\r\ndef trainGCN(model, optimizer, creiterion, features, adj, childs):\r\n model.train()\r\n optimizer.zero_grad()\r\n outputs = model.forward_2(torch.tensor(features), torch.FloatTensor(adj))\r\n epoch_loss = creiterion(outputs.reshape(-1), torch.FloatTensor(sorted(childs, reverse=True)[:outputs.shape[0]]))\r\n epoch_loss.backward()\r\n optimizer.step()\r\n\r\n return epoch_loss\r\n\r\n\r\ndef predictGCN(model, features, adj, childs, ances):\r\n model.eval()\r\n outputs = model.forward_2(torch.tensor(features), torch.FloatTensor(adj)).reshape(-1)\r\n answers, labels, labelsa = outputs.cpu().detach().numpy(), torch.FloatTensor(\r\n sorted(childs, reverse=True)[:outputs.shape[0]]).cpu().detach().numpy(), torch.FloatTensor(\r\n sorted(ances, reverse=True)[:outputs.shape[0]]).cpu().detach().numpy()\r\n tp, fp, tn, fn = 0, 0, 0, 0\r\n tpa, fpa, tna, fna = 0, 0, 0, 0\r\n for a, l, la in zip(answers, labels, labelsa):\r\n if a > 0.5:\r\n if l == 1:\r\n tp += 1\r\n else:\r\n fp += 1\r\n if la == 1:\r\n tpa += 1\r\n else:\r\n fpa += 1\r\n else:\r\n if l == 0:\r\n tn += 1\r\n else:\r\n fn += 1\r\n if la == 0:\r\n tna += 1\r\n else:\r\n fna += 1\r\n\r\n # # acc = accuracy_score(outputs.cpu().detach().numpy(), torch.FloatTensor(sorted(childs, reverse=True)[:outputs.shape[0]]).cpu().detach().numpy())\r\n # f1 = f1_score(outputs.cpu().detach().numpy(),\r\n # torch.FloatTensor(sorted(childs, reverse=True)[:outputs.shape[0]]).cpu().detach().numpy())\r\n # f1a = f1_score(outputs.cpu().detach().numpy(),\r\n # torch.FloatTensor(sorted(ances, reverse=True)[:outputs.shape[0]]).cpu().detach().numpy())\r\n from tools import gzh\r\n acc, pre, recall, f1 = gzh.getMetrics(tp, fp, tn, fn)\r\n acca, prea, recalla, f1a = gzh.getMetrics(tpa, fpa, tna, fna)\r\n return acc, f1, f1a\r\n","repo_name":"AdaCheng/Product_Taxonomy_Expansion","sub_path":"model/GCNClassifier.py","file_name":"GCNClassifier.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"33410592448","text":"from random import randint as rand\n\ninf = {}\n\n\ndef data(*grade, situ=True):\n\n # main\n a = 0\n inf[\"Total\"] = len(grade)\n inf[\"biggest\"] = max(grade)\n inf[\"smallest\"] = min(grade)\n inf[\"average\"] = sum(grade)/len(grade)\n if situ:\n if inf[\"average\"] >= 7:\n print(f'The average is {inf[\"average\"]} and the student is approved')\n inf[\"Approved\"] = True\n elif inf[\"average\"] < 7:\n print(\n f'The average is {inf[\"average\"]} and the student is disapproved')\n inf[\"Approved\"] = False\n else:\n print(\n f'The average is {inf[\"average\"]} and the student is not approved')\n inf[\"Approved\"] = False\n else:\n print(f'The average is {inf[\"average\"]}')\ndata(rand(1, 15),rand(1, 15),rand(1, 15), situ=True)\nprint(inf)","repo_name":"UskOops/scripts.py","sub_path":"MUNDO 3/Analyzing and generating Dictionaries.py","file_name":"Analyzing and generating Dictionaries.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21435253080","text":"from flask import jsonify, request\n\nfrom rentalapi.schema import SearchSchema\nfrom rentalapi.dao.models import Vehicles, Rentals\n\nvehicles_schema = SearchSchema(many=True)\n\napi = None\n\ndef init_search(api_bp):\n api = api_bp\n \n @api.route('/search/vehicles')\n def search():\n pickup = request.args.get('pickup')\n dropoff = request.args.get('dropoff')\n pickup_date = request.args.get('pickupDate')\n dropoff_date = request.args.get('dropoffDate')\n age = request.args.get('age')\n \n query = Vehicles.query.join(Rentals, Vehicles.id==Rentals.vehicle_id, isouter=True)\n query = query.filter(\n (Rentals.pickup_date == None) | \n (~Rentals.pickup_date.between(pickup_date, dropoff_date)) &\n (~Rentals.dropoff_date.between(pickup_date, dropoff_date))\n )\n vehicles = query.all()\n return jsonify(vehicles_schema.dump(vehicles))\n","repo_name":"bimalghartimagar/vehicle-rental","sub_path":"rentalapi/resources/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38093061933","text":"#!/usr/bin/env python\n\nimport sys\nimport re\n\n\ndef demo_sds1102_cml():\n \"\"\"\n example how to open Siglent SDS1102CML SCPI interface\n :return:\n \"\"\"\n import usbtmc\n instr = usbtmc.Instrument(0xf4ec, 0xee3a) # SDS1102CML\n print(instr.ask(\"*IDN?\"))\n\n\ndef prn_hack(SCOPEID: str) -> None:\n import hashlib\n\n Model = 'SDS2000X+'\n\n # Note that 'AWG' should be used for the 'FG' option\n # If you have the 100 MHz model, then first upgrade it to 200 MHz, then 350 MHz and finally 500 MHz\n bwopt = (\n '25M', '40M', '50M', '60M', '70M', '100M', '150M', '200M', '250M', '300M', '350M', '500M', '750M', '1000M',\n 'MAX',\n 'AWG', 'WIFI', 'MSO', 'FLX', 'CFD', 'I2S', '1553', 'PWA', 'SENT', 'MANC')\n\n hashkey = '5zao9lyua01pp7hjzm3orcq90mds63z6zi5kv7vmv3ih981vlwn06txnjdtas3u2wa8msx61i12ueh14t7kqwsfskg032nhyuy1d9vv2wm925rd18kih9xhkyilobbgy'\n\n def gen(x):\n h = hashlib.md5((\n hashkey +\n (Model + '\\n').ljust(32, '\\x00') +\n x.ljust(5, '\\x00') +\n 2 * ((SCOPEID + '\\n').ljust(32, '\\x00')) +\n '\\x00' * 16).encode('ascii')\n ).digest()\n key = ''\n for b in h:\n if (b <= 0x2F or b > 0x39) and (b <= 0x60 or b > 0x7A):\n m = b % 0x24\n b = m + (0x57 if m > 9 else 0x30)\n if b == 0x30: b = 0x32\n if b == 0x31: b = 0x33\n if b == 0x6c: b = 0x6d\n if b == 0x6f: b = 0x70\n key += chr(b)\n return key.upper()\n\n for opt in bwopt:\n n = 4\n line = gen(opt)\n print('{:5} {}'.format(opt, [line[i:i + n] for i in range(0, len(line), n)]))\n\n\ndef demo():\n \"\"\"\n example how to open Siglent SDS1102CML SCPI interface\n :return:\n \"\"\"\n import pyvisa\n rm = pyvisa.ResourceManager()\n # rm.list_resources()\n inst = rm.open_resource('USB0::0xF4EC::0x1011::SDS2PEEC7R1007::INSTR')\n idn = inst.query(\"*IDN?\")\n s = inst.query(\"SCOPEID?\")\n print(f\"id={idn}, sid={s}\")\n m = re.match(\"SCOPE_ID ([0-9a-z-]*)\", s)\n if m:\n scopeid = re.sub(\"-\", \"\", m.group(1))\n print(f\"scopeid={scopeid}\")\n prn_hack(scopeid)\n\n\ndef main() -> int:\n try:\n demo()\n return 0\n except Exception as e:\n print(f\"Error: {e}\", file=sys.stderr)\n return -1\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"deniskokarev/batmet","sub_path":"calibrate/siglent.py","file_name":"siglent.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17304109913","text":"from django.core.checks import messages\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom app1.forms import LoginForm\nfrom app1.models import Login\n\n\ndef home(request):\n return render(request, 'home.html')\n\n\ndef user_register(request):\n data = LoginForm()\n if request.method == 'POST':\n data = LoginForm(request.POST)\n print(data)\n if data.is_valid():\n user = data.save(commit=False)\n user.is_trainer = True\n user.save()\n return redirect('home')\n return render(request, 'user_register.html', {'data': data})\n\n\ndef user_view(request):\n data = Login.objects.filter(is_trainer=True)\n return render(request, 'user_view.html', {'data': data})\n\n\ndef user_view2(request):\n data = Login.objects.filter(is_trainer=True)\n return render(request, 'user_view2.html', {'data': data})\n\n\ndef user_update(request, id):\n data = Login.objects.get(id=id)\n if request.method == 'POST':\n form = LoginForm(request.POST or None, instance=data)\n if form.is_valid():\n form.save()\n return redirect('user_view')\n else:\n form = LoginForm(instance=data)\n return render(request, 'user_update.html', {'form': form})\n\n\ndef user_delete(request, id):\n data = Login.objects.get(id=id)\n if request.method == 'POST':\n data.delete()\n return redirect('user_view')\n else:\n return redirect('user_view')\n","repo_name":"siva92743/crud","sub_path":"crud/app1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40085647288","text":"#! /usr/bin/env python\n\n#### CLISU v 0.1.2\n#### Code by: Agentnumber47\n#### Nickname: [0.1.2] First Functional [The Profile Thang]\n#### Source: https://github.com/Agentnumber47/CLISU\n#### Differences between 0.1.2 and 0.1.1: Added basic profile functionality\n\n# from colorama import init\nimport argparse\nimport check\nfrom dumb import err, fingerprinter, header, mkdir, rmvdir, yaml_load, yaml_save\nimport os\nfrom shutil import copy, move\nimport ui\nimport yaml\n\n### Have profile cache folders\n\nQUIT = ['quit', 'exit', 'q', 'x']\n\n# Entry Point #1\ndef terminal(args):\n # Grab and map directories\n path, items = ui.capture_directory(\"from\")\n host = Machine(path, items)\n path, items = ui.capture_directory(\"to\")\n parasite = Machine(path, items)\n\n # Begin syncing process\n header()\n sync(host, parasite)\n return\n\n# Entry Point #2\ndef run(args):\n path, items = check.verify(args.run[0], pause=False)\n if not path: return\n else: host = Machine(path, items)\n\n path, items = check.verify(args.run[1], pause=False)\n if not path: return\n else: parasite = Machine(path, items)\n sync(host, parasite)\n return\n\ndef sync(host, parasite):\n for ld in host.items:\n item = ld # Do this or python will flip the fuck out\n\n ### Item found on both drives\n if item in parasite.items:\n hi, pi = render(item, host, parasite)\n if hi['relative path'] != pi['relative path']:\n # If the items are identical, move to match host map\n if fingerprinter(hi['full path']) == fingerprinter(pi['full path']):\n mkdir(hi['directory mirror'])\n move(pi['full path'], hi['path mirror'])\n for i in [pi['directory'], pi['directory mirror']]: rmvdir(i)\n else:\n copy(host_items[item]['path'].replace(\"./\", host_path), host_items[item]['path'].replace(\"./\", parasite_path))\n\n print(\"Sync Successful!\")\n return\n\ndef render(item, x, y):\n # x = '/path/to/x/file.txt', y = '/path/to/y/fyle.txt'\n\n # './file.txt', './fyle.txt'\n x_relpath, y_relpath = x.items[item]['path'], y.items[item]['path']\n\n # '/path/to/x/file.txt', '/path/to/y/fyle.txt'\n x_full_path, y_full_path = x.items[item]['path'].replace('./', x.path), y.items[item]['path'].replace('./', y.path) #\n\n # '/path/to/y/file.txt', '/path/to/x/fyle.txt'\n x_full_path_mirror, y_full_path_mirror = x.items[item]['path'].replace('./', y.path), y.items[item]['path'].replace('./', x.path)\n\n # '/path/to/x/', '/path/to/y/'\n x_directory, y_directory = x_full_path.replace(item, \"\"), y_full_path.replace(item, \"\")\n\n x_directory_mirror, y_directory_mirror = x_full_path_mirror.replace(item, \"\"), y_full_path_mirror.replace(item, \"\")\n\n x_render = {\n 'relative path' : x_relpath,\n 'full path' : x_full_path,\n 'path mirror' : x_full_path_mirror,\n 'directory' : x_directory,\n 'directory mirror' : x_directory_mirror\n }\n y_render = {\n 'relative path' : y_relpath,\n 'full path' : y_full_path,\n 'path mirror' : y_full_path_mirror,\n 'directory' : y_directory,\n 'directory mirror' : y_directory_mirror\n }\n return x_render, y_render\n\ndef profile(args):\n list_add = ['a', 'add', '+', 'create']\n list_change = ['c', 'change', 'edit', 'set', 'settings']\n list_delete = ['d', 'rm', 'delete', 'remove', '-']\n list_list = ['l', 'list', 'all']\n list_run = ['r', 'run']\n\n if len(args.profile) == 0:\n print(\"No valid function selected. Run '-p help' for available functions.\")\n return\n if args.profile[0].lower() in ['h', 'help']:\n print(\"Purpose: To utilize predefined parameters to perform sync functions.\")\n print(\"Use: './clisu.py --profile FUNCTION'\")\n print(f\"\\nAdd {list_add}:\\n Add a profile\\n OPTIONAL: 'add [NAME] [/FROM/dir] [/TO/dir]'\\n\")\n print(f\"Change {list_change}:\\n Change a profile\\n OPTIONAL: 'change [NAME]'\\n\")\n print(f\"Delete {list_delete}:\\n Delete a profile\\n OPTIONAL: 'delete [NAME]'\\n\")\n print(f\"List {list_list}:\\n List created profiles\\n\")\n print(f\"List {list_run}:\\n Run CLISU with the profile\\n\")\n elif args.profile[0].lower() in list_add:\n if len(args.profile) == 1:\n name, yaml_name = ui.capture_name()\n host, parasite = ui.capture_directory(\"from\", map=False)[0], ui.capture_directory(\"to\", map=False)[0]\n\n elif len(args.profile) == 4:\n name = args.profile[1]\n if check.Name(name, pause=False):\n yaml_name = check.Yaml(name, pause=False)\n if not yaml_name: return\n else:\n return\n host, parasite = check.verify(args.profile[2], map=False, pause=False)[0], check.verify(args.profile[3], map=False, pause=False)[0]\n if not host or not parasite:\n return\n else:\n err(\"Incorrect use.\\nUse:\\n './clisu.py --profile add'\\nOR\\n './clisu.py --profile add [NAME] [/FROM/dir] [/TO/dir]'\", pause=False)\n return\n\n data = {'name':name, 'host': host, 'parasite': parasite}\n\n yaml_save(yaml_name, data)\n print(f\"\\nProfile '{name}' created successfully\")\n return\n\n elif args.profile[0].lower() in list_change:\n profiles = [i.split('.')[0] for i in check.audit_profiles()]\n if len(args.profile) == 1:\n if len(profiles) == 0:\n err(\"No profiles found\\nUse '-p add' to create\", pause=False)\n return\n else:\n entry = ui.profile_list_menu(profiles)\n\n elif len(args.profile) == 2:\n entry = args.profile[1]\n if not entry in profiles:\n err(f\"Profile '{entry}' not found\", pause=False)\n return\n\n else:\n err(\"Incorrect use.\\nUse:\\n './clisu.py --profile change'\\nOR\\n './clisu.py --profile change [NAME]'\", pause=False)\n return\n\n old = f\"./profiles/{entry}.yaml\"\n data = yaml_load(old)\n change = False\n yaml_name = entry\n while True:\n header()\n print(f\"Please make a selection:\\n\\n [1] Name: {data['name']}\\n [2] FROM: {data['host']}\\n [3] TO: {data['parasite']}\\n [*] Advanced Settings\\n\")\n entry = input(\"\").strip()\n if entry.lower() in QUIT:\n if change:\n yaml_save(yaml_name, data)\n print(\"Changes saved\")\n exit()\n elif entry == \"1\":\n data['name'], yaml_name = ui.capture_name()\n os.rename(old, f\"./profiles/{yaml_name}.yaml\")\n old = f\"./profiles/{yaml_name}.yaml\"\n change = True\n elif entry == \"2\":\n data['host'] = ui.capture_directory(\"from\", map=False)[0]\n change = True\n elif entry == \"3\":\n data['parasite'] = ui.capture_directory(\"to\", map=False)[0]\n change = True\n elif entry == \"*\":\n pass\n else:\n err(\"Not a valid selection ('x' to quit)\")\n\n elif args.profile[0].lower() in list_delete:\n profiles = [i.split('.')[0] for i in check.audit_profiles()]\n if len(args.profile) == 1:\n if len(profiles) == 0:\n err(\"No profiles found\\nUse '-p add' to create\", pause=False)\n return\n else:\n entry = ui.profile_list_menu(profiles)\n\n elif len(args.profile) == 2:\n entry = args.profile[1]\n if not entry in profiles:\n err(f\"Profile '{entry}' not found\", pause=False)\n return\n else:\n err(\"Incorrect use.\\nUse:\\n './clisu.py --profile delete'\\nOR\\n './clisu.py --profile delete [NAME]'\", pause=False)\n return\n\n data = yaml_load(f\"./profiles/{entry}.yaml\")\n while True:\n header()\n confirm = input(f\"Delete '{data['name']}'\\n\\nAre you sure [y/n]?\\n\\n\").lower().strip()\n if confirm in QUIT or confirm in ['n', 'no']:\n exit()\n elif confirm in ['y', 'yes']:\n os.remove(f\"./profiles/{entry}.yaml\")\n print(f\"Profile '{entry}' deleted successfully\")\n return\n else:\n continue\n\n elif args.profile[0].lower() in list_list:\n profiles = [i.split('.')[0] for i in check.audit_profiles()]\n if len(args.profile) == 1:\n if len(profiles) == 0:\n err(\"No profiles found\\nUse '-p add' to create\", pause=False)\n return\n else:\n entry = ui.profile_list_menu(profiles, pause=False)\n else:\n err(\"Incorrect use.\\nUse:\\n './clisu.py --profile list'\", pause=False)\n return\n\n elif args.profile[0].lower() in list_run:\n profiles = [i.split('.')[0] for i in check.audit_profiles()]\n if len(profiles) == 0:\n err(\"No profiles found\\nUse '-p add' to create\", pause=False)\n return\n\n if len(args.profile) == 1:\n err(\"Incorrect use.\\nUse:\\n './clisu.py --profile run [PROFILE]'\", pause=False)\n return\n else:\n called_name = \"\".join(args.profile[1:])\n if called_name in profiles:\n data = yaml_load(f\"./profiles/{called_name}.yaml\")\n args.run = [data['host'], data['parasite']]\n run(args)\n return\n else:\n err(\"Profile not found\", pause=False)\n\n\ndef main():\n # Check for proper setup\n if not os.path.isfile('./config.yaml'):\n with open(f\"./config.yaml\", 'w') as f: yaml_dump = yaml.dump({'default' : False}, f)\n mkdir('./profiles')\n\n parser = argparse.ArgumentParser(description = \"CLISU (CLI Synchronization Utility)\")\n\n parser.add_argument(\"-t\", \"--terminal\", nargs = \"*\", metavar = \"\", type = str, help = \"run with prompts in terminal\")\n parser.add_argument(\"-r\", \"--run\", nargs = 2, metavar = ('/path/from', '/path/to'), type = str, help = \"run without prompts\")\n parser.add_argument(\"-p\", \"--profile\", nargs = \"*\", metavar = \"FUNCTION\", type = str, help = \"profile functions\")\n ## Defaults\n\n # parse the arguments from standard input\n args = parser.parse_args()\n\n # calling functions depending on type of argument\n if args.terminal != None: terminal(args)\n elif args.run != None: run(args)\n elif args.profile != None: profile(args)\n\n return\n\nclass Machine:\n def __init__(self, path, items):\n self.items = items\n self.path = path\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Agentnumber47/CLISU","sub_path":"clisu.py","file_name":"clisu.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18059453846","text":"# -*- coding:utf-8 -*-\r\n# python3.6\r\n# author: Hu Zhang\r\n# email: dugujjiujian@gmail.com\r\n\r\n# 用一个按键控制暂停和继续(标志位判断)\r\n# 界面\r\n# 全局变量\r\n# 歌单(文件)\r\n# 显示歌曲列表(部分+全部)\r\n# 歌曲列表中选中播放\r\nimport pygame, os, math, random\r\nfrom time import sleep\r\n\r\ndef myPauseAndUnpause(): # 括号里加不加Music都可以\r\n global myPaused\r\n if not myPaused:\r\n Music.pause()\r\n myPaused = True\r\n else:\r\n Music.unpause()\r\n myPaused = False\r\n\r\ndef get_musics(musicpath):\r\n filenames = os.listdir(musicpath)\r\n MusicNames, Musics, MusicsRect, myMusicsListSelected = [], [], [], []\r\n for i in range(len(filenames)):\r\n if filenames[i].lower().endswith(\".mp3\"):\r\n newfilename = filenames[i].split(\".\")[0]\r\n MusicNames.append(newfilename)\r\n Musics.append(os.path.join(myMusicPath, filenames[i]))\r\n MusicsRect.append(pygame.Rect(600, i * 20, LENGTH - 600, 20))\r\n myMusicsListSelected.append(False)\r\n MusicAmounts = len(Musics)\r\n return MusicNames, Musics, MusicsRect,myMusicsListSelected, MusicAmounts\r\n\r\ndef change_music():\r\n global myFontSurface\r\n Music.load(myMusics[i])\r\n myFontSurface = myFont1.render(myMusicNames[i], True, myColors[\"black\"], myColors[\"yellow\"])\r\n Music.play()\r\n\r\n\r\ndef musics_font(MusicNames):\r\n j = 0\r\n myMusicNameSurfaces1, myMusicNameSurfaces2 = [], []\r\n myFont2 = pygame.font.SysFont(name=\"华文宋体\", size=15)\r\n myFont3 = pygame.font.SysFont(name=\"华文宋体\", size=15, bold=True, italic=False)\r\n for musicname in MusicNames:\r\n namesurface1 = myFont2.render(musicname, True, myColors[\"black\"], myColors[\"white\"])\r\n myMusicNameSurfaces1.append(namesurface1)\r\n namesurface2 = myFont3.render(musicname, True, myColors[\"black\"], myColors[\"yellow\"])\r\n myMusicNameSurfaces2.append(namesurface2)\r\n return myMusicNameSurfaces1, myMusicNameSurfaces2\r\n\r\ndef musics_font_display(myMusicNameSurfaces1, myMusicNameSurfaces2, i):\r\n for j in range(-2, 3):\r\n m = i\r\n k = m + j\r\n if k < 0:\r\n m = len(myMusicNameSurfaces1) + i\r\n k = m + j\r\n elif k >= len(myMusicNameSurfaces1):\r\n m = -1\r\n k = m + j\r\n myScreen.blit(myMusicNameSurfaces1[k], (600, 400 + j * myRawGap))\r\n myScreen.blit(myMusicNameSurfaces2[i], (600, 400))\r\n\r\ndef all_musics_font_display(myMusicNameSurfaces1, myMusicNameSurfaces2, pagenumber):\r\n # 如果函数内某一参数的名字与主程序内的变量相同,则可直接使用,不用传参\r\n startnumber = int((HEIGHT-60)/myRawGap) * pagenumber\r\n endnumber = int((HEIGHT-60)/myRawGap) * (pagenumber + 1)\r\n if endnumber >= len(myMusicNameSurfaces1):\r\n endnumber = len(myMusicNameSurfaces1)\r\n for j in range(startnumber, endnumber):\r\n myScreen.blit(myMusicNameSurfaces1[j], (600, (j - startnumber) * myRawGap))\r\n if myMusicsListSelected[j-startnumber]:\r\n myScreen.blit(myMusicNameSurfaces2[j], (600, (j - startnumber) * myRawGap))\r\n myFont4 = pygame.font.SysFont(name=\"华文宋体\", size=15, bold=True, italic=False)\r\n myPageNumberFontSurface = myFont4.render(\"第\" + str(pagenumber + 1) + \"/\" + str(myPageAmounts) + \"页\", True, myColors[\"black\"], myColors[\"white\"])\r\n myScreen.blit(myPageNumberFontSurface, (770, HEIGHT - 20))\r\n myScreen.fill(color=myColors[\"white\"], rect=myButtonLastPageRect)\r\n myScreen.fill(color=myColors[\"white\"], rect=myButtonNextPageRect)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (750, HEIGHT - 15 + 5), (750 + 10, HEIGHT - 15), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (750, HEIGHT - 15 + 5), (750 + 10, HEIGHT - 15 + 10), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (750 + 10, HEIGHT - 15), (750 + 10, HEIGHT - 15 + 10), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (835, HEIGHT - 15), (835 + 10, HEIGHT - 15 + 5), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (835, HEIGHT - 15 + 10), (835 + 10, HEIGHT - 15 + 5), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (835, HEIGHT - 15), (835, HEIGHT - 15 + 10), 1)\r\n\r\n\r\n\r\n\r\nmyMusicPath = \"D:\\文件\\音乐\\歌曲\"\r\nLENGTH, HEIGHT = 1000, 600\r\nmyRawGap = 20\r\nmyPageNumber = 0\r\nmyPaused, myMusicsListDisplayed = False, False\r\nmyColors = {\"black\": (0, 0, 0),\r\n \"white\": (255, 255, 255),\r\n \"greyblack\": (100, 100, 100),\r\n \"greywhite\": (220, 220, 220),\r\n \"red\": (255, 0, 0),\r\n \"green\": (0, 255, 0),\r\n \"blue\": (0, 0, 255),\r\n \"yellow\": (255, 255, 0)\r\n }\r\nmyMusicsListRectColor = myColors[\"white\"]\r\npygame.init()\r\nmyScreen = pygame.display.set_mode((LENGTH, HEIGHT))\r\nmyCaption = pygame.display.set_caption(\"Music Player\")\r\nmyFont1 = pygame.font.SysFont(\"华文新魏\", 30)\r\nmyMusicNames, myMusics, myMusicsRect, myMusicsListSelected, myMusicAmounts = get_musics(myMusicPath)\r\ni = random.randint(0, myMusicAmounts)\r\nmyPageAmounts = math.ceil(myMusicAmounts / int((HEIGHT-60) / myRawGap))\r\nmyPicturePause = pygame.image.load(\"images/pause.png\")\r\nmyPictureUnpause = pygame.image.load(\"images/unpause.png\")\r\nmyPictureStop = pygame.image.load(\"images/stop.png\")\r\nmyPictureLast = pygame.image.load(\"images/last.png\")\r\nmyPictureNext = pygame.image.load(\"images/next.png\")\r\nmyPictureRectPause = pygame.Rect(150, 125, 50, 50)\r\nmyPictureRectStop = pygame.Rect(210, 125, 50, 50)\r\nmyPictureRectLast = pygame.Rect(90, 125, 50, 50)\r\nmyPictureRectNext = pygame.Rect(270, 125, 50, 50)\r\nmyMusicsListRect = pygame.Rect(LENGTH - 20, HEIGHT - 20, 20, 20)\r\nmyButtonLastPageRect = pygame.Rect(750, HEIGHT - 15, 10, 10)\r\nmyButtonNextPageRect = pygame.Rect(835, HEIGHT - 15, 10, 10)\r\nmyEveryMusicRect = []\r\nMusic = pygame.mixer.music\r\nMusic.set_volume(10)\r\nMusic.load(myMusics[i])\r\nmyFontSurface = myFont1.render(myMusicNames[i],\r\n True, myColors[\"black\"], myColors[\"yellow\"])\r\nmyMusicNameSurfaces1, myMusicNameSurfaces2 = musics_font(myMusicNames)\r\nMusic.play()\r\n\r\nwhile 1:\r\n if not Music.get_busy():\r\n i += 1\r\n change_music()\r\n myScreen.fill(color=myColors[\"white\"])\r\n myScreen.blit(myFontSurface, (140, 50))\r\n if not myPaused:\r\n myScreen.blit(myPicturePause, (150, 125))\r\n else:\r\n myScreen.blit(myPictureUnpause, (150, 125))\r\n myScreen.blit(myPictureStop, (210, 125))\r\n myScreen.blit(myPictureLast, (90, 125))\r\n myScreen.blit(myPictureNext, (270, 125))\r\n musics_font_display(myMusicNameSurfaces1, myMusicNameSurfaces2, i)\r\n if myMusicsListDisplayed:\r\n myScreen.fill(color=myColors[\"white\"], rect=(600, 0, 400, 600))\r\n all_musics_font_display(myMusicNameSurfaces1, myMusicNameSurfaces2, pagenumber=myPageNumber)\r\n myScreen.fill(color=myMusicsListRectColor, rect=myMusicsListRect)\r\n pygame.draw.rect(myScreen, myColors[\"greyblack\"], myMusicsListRect, 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (LENGTH - 20, HEIGHT - 15), (LENGTH, HEIGHT - 15), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (LENGTH - 20, HEIGHT - 10), (LENGTH, HEIGHT - 10), 1)\r\n pygame.draw.line(myScreen, myColors[\"greyblack\"], (LENGTH - 20, HEIGHT - 5), (LENGTH, HEIGHT - 5), 1)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n # 所有的事件都会被记录下来按顺序等待判断\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n myPauseAndUnpause()\r\n elif event.key == pygame.K_ESCAPE:\r\n Music.stop()\r\n elif event.key == pygame.K_LEFT:\r\n if i <= 0:\r\n i = myMusicAmounts - 1\r\n else:\r\n i -= 1\r\n change_music()\r\n elif event.key == pygame.K_RIGHT:\r\n if i >= myMusicAmounts - 1:\r\n i = 0\r\n else:\r\n i += 1\r\n change_music()\r\n elif event.key == pygame.K_TAB:\r\n if not myMusicsListDisplayed:\r\n myMusicsListDisplayed = True\r\n else:\r\n myMusicsListDisplayed = False\r\n elif event.key == pygame.K_UP and myMusicsListDisplayed:\r\n myPageNumber -= 1\r\n if myPageNumber < 0:\r\n myPageNumber = myPageAmounts - 1\r\n elif event.key == pygame.K_DOWN and myMusicsListDisplayed:\r\n myPageNumber += 1\r\n if myPageNumber >= myPageAmounts:\r\n myPageNumber = 0\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if myPictureRectPause.collidepoint(event.pos):\r\n myPauseAndUnpause()\r\n elif myPictureRectStop.collidepoint(event.pos):\r\n Music.stop()\r\n elif myPictureRectLast.collidepoint(event.pos):\r\n if i <= 0:\r\n i = myMusicAmounts - 1\r\n else:\r\n i -= 1\r\n change_music()\r\n elif myPictureRectNext.collidepoint(event.pos):\r\n if i >= myMusicAmounts - 1:\r\n i = 0\r\n else:\r\n i += 1\r\n change_music()\r\n elif myMusicsListRect.collidepoint(event.pos):\r\n if not myMusicsListDisplayed:\r\n myMusicsListDisplayed = True\r\n else:\r\n myMusicsListDisplayed = False\r\n elif myButtonLastPageRect.collidepoint(event.pos) and myMusicsListDisplayed:\r\n myPageNumber -= 1\r\n if myPageNumber < 0:\r\n myPageNumber = myPageAmounts - 1\r\n elif myButtonNextPageRect.collidepoint(event.pos) and myMusicsListDisplayed:\r\n myPageNumber += 1\r\n if myPageNumber >= myPageAmounts:\r\n myPageNumber = 0\r\n if myMusicsListDisplayed:\r\n for j1 in range(int((HEIGHT - 60) / myRawGap)):\r\n if myMusicsRect[j1].collidepoint(event.pos):\r\n i = j1 + int((HEIGHT - 60) / myRawGap) * myPageNumber\r\n change_music()\r\n elif event.type == pygame.MOUSEMOTION:\r\n if myMusicsListRect.collidepoint(event.pos) and myMusicsListDisplayed == False:\r\n myMusicsListRectColor = myColors[\"greywhite\"]\r\n else:\r\n myMusicsListRectColor = myColors[\"white\"]\r\n if myMusicsListDisplayed:\r\n for i in range(int((HEIGHT - 60) / myRawGap)):\r\n if myMusicsRect[i].collidepoint(event.pos):\r\n myMusicsListSelected[i] = True\r\n else:\r\n myMusicsListSelected[i] = False\r\n\r\n\r\n","repo_name":"hzhangamaze/Games_pygame","sub_path":"MusicPlayer/MusicPlayer.py","file_name":"MusicPlayer.py","file_ext":"py","file_size_in_byte":10977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"45410289773","text":"from django.contrib import admin\n\nfrom .models import Propiedad, Imagen\n\n# Register your models here.\n\n\nclass ImagenesInline(admin.TabularInline):\n model = Imagen\n extra = 1\n\n\n@admin.register(Propiedad)\nclass PropiedadAdmin(admin.ModelAdmin):\n inlines = [ImagenesInline]\n list_display = (\n \"nombre\",\n \"apellido\",\n \"direccion\",\n \"numero_direccion\",\n \"precio\",\n ) # Campos a mostrar en la lista\n list_filter = (\n \"metros_cuadrados\",\n \"precio\",\n \"ambientes\",\n \"habitaciones\",\n ) # Filtros en la barra lateral\n search_fields = (\"nombre\", \"apellido\", \"direccion\") # Campos de búsqueda\n fieldsets = (\n (\n \"Información de la Propiedad\",\n {\n \"fields\": (\n \"nombre\",\n \"apellido\",\n \"direccion\",\n \"numero_direccion\",\n \"precio\",\n \"dolares\",\n \"categoria\",\n \"destacado\",\n )\n },\n ),\n (\n \"Detalles\",\n {\n \"fields\": (\n \"metros_cuadrados\",\n \"ambientes\",\n \"habitaciones\",\n \"baños\",\n \"garage\",\n 'info_extra'\n )\n },\n ),\n (\n \"Otros\",\n {\n \"fields\": (\n \"duracion_contrato\",\n \"expensas\",\n \"niños\",\n \"mascotas\",\n \"antiguedad\",\n )\n },\n ),\n (\"Información de Contacto\", {\"fields\": (\"correo\", \"telefono\")}),\n )\n\n\nadmin.site.register(Imagen)\n","repo_name":"agusherrera1020/inmobiliaria","sub_path":"pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22351383750","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport io\nimport math\nimport os\nimport re\nimport requests\nimport sys\nimport time\n\nPUBLIC_IP = sys.argv[1]\nMODEL_PKL_FILE = sys.argv[2]\nif len(sys.argv) > 3:\n PASSWORD = open(sys.argv[3]).read()\nelse:\n PASSWORD = os.environ['THE_PWD']\n\nPROJECT_ZIP_FILE = os.environ.get('PROJECT_ZIP_FILE', None)\n\nBASE_DIR = os.path.dirname(__file__) if os.path.dirname(__file__) else '.'\n_IS_TLS_ENABLED = os.path.exists(os.path.join(BASE_DIR, '.enable-tls'))\n\nTRUSTSTORE = '/opt/cloudera/security/x509/truststore.pem'\nURL_SCHEME = 'https' if _IS_TLS_ENABLED else 'http'\n\nCDSW_API = URL_SCHEME + '://cdsw.{}.nip.io/api/v1'.format(PUBLIC_IP, )\nCDSW_ALTUS_API = URL_SCHEME + '://cdsw.{}.nip.io/api/altus-ds-1'.format(PUBLIC_IP, )\nVIZ_API = URL_SCHEME + '://viz.cdsw.{}.nip.io/arc/apps'.format(PUBLIC_IP, )\nVIZ_ADMIN_USER = 'vizapps_admin'\n\n_DEFAULT_PROJECT_NAME = 'Edge2AI Workshop'\n_VIZ_PROJECT_NAME = 'VizApps Workshop'\n\nUSERNAME = 'admin'\nFULL_NAME = 'Workshop Admin'\nEMAIL = 'admin@cloudera.com'\n\n_MODEL_NAME = 'IoT Prediction Model'\n\n_CDSW_SESSION = requests.Session()\n_VIZ_SESSION = requests.Session()\n_RELEASE = []\n_RUNTIMES = {}\n_DEFAULT_RUNTIME = 0\n_VIZ_RUNTIME = 0\n_MODEL = {}\n_DEFAULT_PROJECT = {}\n_VIZ_PROJECT = {}\n\n_UPLOAD_CHUNK_SIZE = 1048576\n\n\nclass VizAppsInvalidLoginAttempt(RuntimeError):\n def __init__(self, msg=None):\n super().__init__(msg)\n\n\ndef _init_sessions():\n global _CDSW_SESSION\n global _VIZ_SESSION\n global _IS_TLS_ENABLED\n print(\"Initializing sessions\")\n if _IS_TLS_ENABLED:\n print(\"Setting truststore\")\n _CDSW_SESSION.verify = TRUSTSTORE\n _VIZ_SESSION.verify = TRUSTSTORE\n\n\ndef _authorize_sessions():\n global _CDSW_SESSION\n print(\"Authorizing sessions\")\n resp = _cdsw_post(CDSW_API + '/authenticate',\n json={'login': USERNAME, 'password': PASSWORD})\n token = resp.json()['auth_token']\n _CDSW_SESSION.headers.update({'Authorization': 'Bearer ' + token})\n\n\ndef _get_release():\n global _RELEASE\n if not _RELEASE:\n resp = _cdsw_get(CDSW_API + '/site/stats')\n release_str = [c['value'] for c in resp.json() if c['key'] == 'config.release'][0]\n _RELEASE = [int(v) for v in release_str.split('.')]\n print('CDSW release: {}'.format(_RELEASE))\n return _RELEASE\n\n\ndef _get_runtimes(refresh=False):\n global _RUNTIMES\n if not _RUNTIMES or refresh:\n resp = _cdsw_get(CDSW_API + '/runtimes?includeAll=true', expected_codes=[200, 501])\n if resp.status_code == 200:\n _RUNTIMES = resp.json()['runtimes']\n elif resp.status_code == 501:\n _RUNTIMES = []\n print(\"List of runtimes not available yet.\")\n return _RUNTIMES\n\n\ndef _find_runtime(editor, kernel, edition=None, short_version=None, retries=600):\n total_retries = retries\n while True:\n runtimes = _get_runtimes(refresh=True)\n selected = [runtime for runtime in runtimes\n if runtime['editor'] == editor\n and runtime['kernel'] == kernel\n and (edition is None or runtime['edition'] == edition)\n and (short_version is None or runtime['shortVersion'] == short_version)]\n selected = sorted(selected, key=lambda x: (x['edition'], x['shortVersion']))\n if selected:\n return selected[-1]['id']\n retries -= 1\n if retries <= 0:\n break\n print('Could not find the required runtime among the {} retrieved ones.'\n 'Will retry (#{} out of {} attempts).'.format(len(runtimes), retries, total_retries))\n time.sleep(1)\n raise RuntimeError('Could not find the required runtime. Giving up. Available runtimes: {}'.format(runtimes))\n\n\ndef _get_default_runtime():\n global _DEFAULT_RUNTIME\n if not _DEFAULT_RUNTIME:\n _DEFAULT_RUNTIME = _find_runtime('Workbench', 'Python 3.7', 'Standard')\n print('Default Runtime ID: {}'.format(_DEFAULT_RUNTIME, ))\n return _DEFAULT_RUNTIME\n\n\ndef _get_viz_runtime():\n global _VIZ_RUNTIME\n if not _VIZ_RUNTIME:\n _VIZ_RUNTIME = _find_runtime('Workbench', 'Cloudera Data Visualization')\n print('Viz Runtime ID: {}'.format(_VIZ_RUNTIME, ))\n return _VIZ_RUNTIME\n\n\ndef _get_model(refresh=False):\n global _MODEL_NAME\n global _MODEL\n if not _MODEL or refresh:\n resp = _cdsw_post(CDSW_ALTUS_API + '/models/list-models',\n json={\n 'projectOwnerName': 'admin',\n 'latestModelDeployment': True,\n 'latestModelBuild': True,\n })\n models = [m for m in resp.json() if m['name'] == _MODEL_NAME]\n if models:\n _MODEL = models[0]\n else:\n _MODEL = {}\n return _MODEL\n\n\ndef _is_model_deployed():\n model = _get_model(refresh=True)\n return model and model['latestModelDeployment']['status'] == 'deployed'\n\n\ndef _rest_call(func, url, expected_codes=None, **kwargs):\n if not expected_codes:\n expected_codes = [200]\n resp = func(url, **kwargs)\n if resp.status_code not in expected_codes:\n print(resp.text)\n raise RuntimeError(\"Unexpected response: {}\".format(resp))\n return resp\n\n\ndef _cdsw_get(url, expected_codes=None, **kwargs):\n global _CDSW_SESSION\n return _rest_call(_CDSW_SESSION.get, url, expected_codes, **kwargs)\n\n\ndef _cdsw_post(url, expected_codes=None, **kwargs):\n global _CDSW_SESSION\n return _rest_call(_CDSW_SESSION.post, url, expected_codes, **kwargs)\n\n\ndef _cdsw_put(url, expected_codes=None, **kwargs):\n global _CDSW_SESSION\n return _rest_call(_CDSW_SESSION.put, url, expected_codes, **kwargs)\n\n\ndef _cdsw_patch(url, expected_codes=None, **kwargs):\n global _CDSW_SESSION\n return _rest_call(_CDSW_SESSION.patch, url, expected_codes, **kwargs)\n\n\ndef _cdsw_delete(url, expected_codes=None, **kwargs):\n global _CDSW_SESSION\n return _rest_call(_CDSW_SESSION.delete, url, expected_codes, **kwargs)\n\n\ndef _viz_get(url, expected_codes=None, **kwargs):\n global _VIZ_SESSION\n return _rest_call(_VIZ_SESSION.get, url, expected_codes, **kwargs)\n\n\ndef _viz_post(url, expected_codes=None, **kwargs):\n global _VIZ_SESSION\n return _rest_call(_VIZ_SESSION.post, url, expected_codes, **kwargs)\n\n\ndef _viz_put(url, expected_codes=None, **kwargs):\n global _VIZ_SESSION\n return _rest_call(_VIZ_SESSION.put, url, expected_codes, **kwargs)\n\n\ndef _get_project(name=None, project_id=None):\n if (not name and not project_id) or (name and project_id):\n raise RuntimeError(\"Must specify either name or id, but not both.\")\n resp = _cdsw_get(CDSW_API + '/users/admin/projects')\n for proj in resp.json():\n if (name and proj['name'] == name) or (project_id and proj['id'] == project_id):\n return proj\n return {}\n\n\ndef _create_github_project():\n return _cdsw_post(CDSW_API + '/users/admin/projects', expected_codes=[201, 502],\n json={'template': 'git',\n 'project_visibility': 'private',\n 'name': _DEFAULT_PROJECT_NAME,\n 'gitUrl': 'https://github.com/cloudera-labs/edge2ai-workshop'})\n\n\ndef _create_local_project(zipfile):\n token = str(time.time())[:9]\n filename = os.path.basename(zipfile)\n total_size = os.stat(zipfile).st_size\n total_chunks = math.ceil(total_size / _UPLOAD_CHUNK_SIZE)\n\n f = open(zipfile, 'rb')\n chunk = 0\n while True:\n buf = f.read(_UPLOAD_CHUNK_SIZE)\n if not buf:\n break\n chunk += 1\n chunk_size = len(buf)\n _cdsw_post(CDSW_API + '/upload/admin', expected_codes=[200],\n data={\n 'uploadType': 'archive',\n 'uploadToken': token,\n 'flowChunkNumber': chunk,\n 'flowChunkSize': chunk_size,\n 'flowCurrentChunkSize': chunk_size,\n 'flowTotalSize': total_size,\n 'flowIdentifier': token + '-' + filename,\n 'flowFilename': filename,\n 'flowRelativePath': filename,\n 'flowTotalChunks': total_chunks,\n },\n files={'file': (filename, io.BytesIO(buf), 'application/zip')}\n )\n\n return _cdsw_post(CDSW_API + '/users/admin/projects', expected_codes=[201],\n json={\n \"name\": _DEFAULT_PROJECT_NAME,\n \"project_visibility\": \"private\",\n \"template\": \"local\",\n \"isPrototype\": False,\n \"supportAsync\": True,\n \"avoidNameCollisions\": False,\n \"uploadToken\": token,\n \"fileName\": filename,\n \"isArchive\": True\n })\n\n\ndef _get_default_project():\n global _DEFAULT_PROJECT\n if not _DEFAULT_PROJECT:\n _DEFAULT_PROJECT = _get_project(name=_DEFAULT_PROJECT_NAME)\n return _DEFAULT_PROJECT\n\n\ndef _get_viz_project():\n global _VIZ_PROJECT\n if not _VIZ_PROJECT:\n _VIZ_PROJECT = _get_project(name=_VIZ_PROJECT_NAME)\n return _VIZ_PROJECT\n\n\ndef start_model(build_id):\n _cdsw_post(CDSW_ALTUS_API + '/models/deploy-model', json={\n 'modelBuildId': build_id,\n 'cpuMillicores': 1000,\n 'memoryMb': 4096,\n })\n\n\ndef _get_viz_user(username):\n resp = _viz_get(VIZ_API + '/users_api',\n headers={\n 'Content-Type': 'application/json',\n 'X-CSRFToken': _get_vizapps_csrf_token(),\n })\n for user in resp.json():\n if user['username'] == username:\n return user\n return {}\n\n\nCSRF_REGEXPS = [\n r'.*name=\"csrfmiddlewaretoken\" type=\"hidden\" value=\"([^\"]*)\"',\n r'.*\"csrfmiddlewaretoken\": \"([^\"]*)\"',\n r'.*\\.csrf_token\\(\"([^\"]*)\"\\)'\n]\n\n\ndef _get_csrf_token(txt, quiet=False):\n token = None\n for regexp in CSRF_REGEXPS:\n m = re.match(regexp, txt, flags=re.DOTALL)\n if m:\n token = m.groups()[0]\n break\n else:\n if not quiet:\n raise RuntimeError(\"Cannot find CSRF token.\")\n return token\n\n\ndef _get_vizapps_csrf_token(username=VIZ_ADMIN_USER, password=PASSWORD):\n resp = _viz_get(VIZ_API + '/login')\n token = _get_csrf_token(resp.text)\n resp = _viz_post(VIZ_API + '/login?',\n data='csrfmiddlewaretoken=' + token + '&next=&username=' + username + '&password=' + password,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n token = _get_csrf_token(resp.text, quiet=True)\n if token is None or 'Invalid login' in resp.text:\n raise VizAppsInvalidLoginAttempt()\n return token\n\n\ndef set_vizapps_pwd():\n print('# Setting vizapps_admin password.')\n try:\n token = _get_vizapps_csrf_token(VIZ_ADMIN_USER, VIZ_ADMIN_USER)\n except VizAppsInvalidLoginAttempt:\n print('vizapps_admin password has already been changed. Skipping.')\n return\n\n data = 'csrfmiddlewaretoken=' + token + '&old_password=' + VIZ_ADMIN_USER + '&new_password=' + PASSWORD\n if _get_release() >= [1, 10]:\n data = data + '&confirm_password=' + PASSWORD\n\n _viz_put(VIZ_API + '/users_api/vizapps_admin',\n data=data,\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'X-CSRFToken': token,\n })\n\n\ndef _add_vizapps_user(username, password, first_name, last_name):\n print('# Adding VizApps user ' + username + '.')\n if _get_viz_user(username):\n print('Viz user [{}] already exists. Skipping creation.'.format(username))\n else:\n if _get_release() >= [1, 10]:\n pwd = 'temp_password123'\n else:\n pwd = password\n token = _get_vizapps_csrf_token(VIZ_ADMIN_USER, PASSWORD)\n resp = _viz_post(VIZ_API + '/users_api/' + username,\n data='csrfmiddlewaretoken=' + token +\n '&username=' + username +\n '&first_name=' + first_name +\n '&last_name=' + last_name +\n '&is_superuser=true' +\n '&is_active=true' +\n '&profile=%7B%22proxy_username%22%3A%22%22%7D' +\n '&groups=%5B%5D' +\n '&roles=%5B%5D' +\n '&password=' + pwd +\n '&new_password=' + pwd,\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'X-CSRFToken': token,\n })\n user = resp.json()[0]\n if _get_release() >= [1, 10]:\n token = _get_vizapps_csrf_token(username, pwd)\n resp = _viz_put(VIZ_API + '/users_api/' + username,\n data='csrfmiddlewaretoken=' + token +\n '&old_password=' + pwd +\n '&new_password=' + password +\n '&confirm_password=' + password,\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'X-CSRFToken': token,\n })\n user = resp.json()[0]\n print('Created user [{}] with ID {}.'.format(username, user['id']))\n\n\ndef main():\n print('BASE_DIR: {}'.format(BASE_DIR))\n print('CDSW_ALTUS_API: {}'.format(CDSW_ALTUS_API))\n print('CDSW_API: {}'.format(CDSW_API))\n print('IS_TLS_ENABLED: {}'.format(_IS_TLS_ENABLED))\n print('MODEL_PKL_FILE: {}'.format(MODEL_PKL_FILE))\n print('PASSWORD: {}'.format(PASSWORD))\n print('PUBLIC_IP: {}'.format(PUBLIC_IP))\n print('TRUSTSTORE: {}'.format(TRUSTSTORE))\n print('VIZ_API: {}'.format(VIZ_API))\n print('-------------------------------------------------------')\n\n print('# Prepare CDSW for workshop')\n resp = None\n try:\n _init_sessions()\n\n print('# Create user')\n while True:\n status = ''\n try:\n resp = _cdsw_post(CDSW_API + '/users', expected_codes=[201, 404, 422, 503],\n json={\n 'email': EMAIL,\n 'name': FULL_NAME,\n 'username': USERNAME,\n 'password': PASSWORD,\n 'type': 'user'\n },\n timeout=10)\n if resp.status_code == 201:\n print('User created')\n break\n elif resp.status_code == 422:\n print('User admin already exists. Skipping creation.')\n break\n else:\n status = 'Error code: {}'.format(resp.status_code)\n except requests.exceptions.ConnectTimeout as err:\n status = 'Connection timeout. Exception: {}'.format(err)\n pass\n except requests.exceptions.ConnectionError as err:\n status = 'Connection error. Exception: {}'.format(err)\n pass\n if status:\n print('Waiting for CDSW to be ready... ({})'.format(status))\n else:\n print('Waiting for CDSW to be ready...')\n time.sleep(10)\n\n _authorize_sessions()\n\n resp = _cdsw_get(CDSW_API + '/users')\n user = [u for u in resp.json() if u['username'] == USERNAME]\n user_id = user[0]['id']\n print('User ID: {}'.format(user_id))\n\n print('# Check if model is already running')\n if _is_model_deployed():\n print('Model is already deployed!! Skipping.')\n else:\n print('# Add engine')\n resp = _cdsw_post(CDSW_API + '/site/engine-profiles', expected_codes=[201],\n json={'cpu': 1, 'memory': 4})\n engine_profile_id = resp.json()['id']\n print('Engine ID: {}'.format(engine_profile_id, ))\n\n print('# Add environment variable')\n _cdsw_patch(CDSW_API + '/site/config',\n json={'environment': '{\"HADOOP_CONF_DIR\":\"/etc/hadoop/conf/\"}'})\n\n print('# Add project')\n _cdsw_get(CDSW_API + '/users/admin/projects')\n if not _get_default_project():\n if PROJECT_ZIP_FILE:\n print('Creating a Local project using file {}'.format(PROJECT_ZIP_FILE))\n _create_local_project(PROJECT_ZIP_FILE)\n else:\n print('Creating a GitHub project')\n _create_github_project()\n print('Project ID: {}'.format(_get_default_project()['id'], ))\n\n print('# Upload setup script')\n setup_script = \"\"\"!pip3 install --upgrade pip scikit-learn\n!HADOOP_USER_NAME=hdfs hdfs dfs -mkdir /user/$HADOOP_USER_NAME\n!HADOOP_USER_NAME=hdfs hdfs dfs -chown $HADOOP_USER_NAME:$HADOOP_USER_NAME /user/$HADOOP_USER_NAME\n!hdfs dfs -put data/historical_iot.txt /user/$HADOOP_USER_NAME\n!hdfs dfs -ls -R /user/$HADOOP_USER_NAME\n\"\"\"\n _cdsw_put(CDSW_API + '/projects/admin/edge2ai-workshop/files/setup_workshop.py',\n files={'name': setup_script})\n\n print('# Upload model')\n model_pkl = open(MODEL_PKL_FILE, 'rb')\n _cdsw_put(CDSW_API + '/projects/admin/edge2ai-workshop/files/iot_model.pkl',\n files={'name': model_pkl})\n\n job_params = {\n 'name': 'Setup workshop',\n 'type': 'manual',\n 'script': 'setup_workshop.py',\n 'timezone': 'America/Los_Angeles',\n 'environment': {},\n 'kernel': 'python3',\n 'cpu': 1,\n 'memory': 4,\n 'nvidia_gpu': 0,\n 'notifications': [{\n 'user_id': user_id,\n 'success': False,\n 'failure': False,\n 'timeout': False,\n 'stopped': False\n }],\n 'recipients': {},\n 'attachments': [],\n }\n if _get_release() >= [1, 10]:\n job_params.update({'runtime_id': _get_default_runtime()})\n\n print('# Create job to run the setup script')\n resp = _cdsw_post(CDSW_API + '/projects/admin/edge2ai-workshop/jobs', expected_codes=[201],\n json=job_params)\n job_id = resp.json()['id']\n print('Job ID: {}'.format(job_id, ))\n\n status = None\n while status != 'succeeded':\n print('# Start job')\n job_url = '{}/projects/admin/edge2ai-workshop/jobs/{}'.format(CDSW_API, job_id)\n start_url = '{}/start'.format(job_url, )\n _cdsw_post(start_url, json={})\n while True:\n resp = _cdsw_get(job_url)\n status = resp.json()['latest']['status']\n print('Job {} status: {}'.format(job_id, status))\n if status == 'succeeded':\n break\n if status == 'failed':\n print('# Job failed. Will retry in 5 seconds.')\n time.sleep(5)\n break\n time.sleep(10)\n\n print('# Get engine image to use for model')\n resp = _cdsw_get(CDSW_API + '/projects/admin/edge2ai-workshop/engine-images')\n engine_image_id = resp.json()['id']\n print('Engine image ID: {}'.format(engine_image_id, ))\n\n print('# Deploy model')\n if _get_release() >= [1, 10]:\n job_params = {\n 'runtimeId': _get_default_runtime(),\n 'authEnabled': True,\n \"addons\": [],\n }\n else:\n job_params = {}\n job_params.update({\n 'projectId': _get_default_project()['id'],\n 'name': _MODEL_NAME,\n 'description': _MODEL_NAME,\n 'visibility': 'private',\n 'targetFilePath': 'cdsw.iot_model.py',\n 'targetFunctionName': 'predict',\n 'engineImageId': engine_image_id,\n 'kernel': 'python3',\n 'examples': [{'request': {'feature': '0, 65, 0, 137, 21.95, 83, 19.42, 111, 9.4, 6, 3.43, 4'}}],\n 'cpuMillicores': 1000,\n 'memoryMb': 4096,\n 'replicationPolicy': {'type': 'fixed', 'numReplicas': 1},\n 'environment': {},\n })\n resp = _cdsw_post(CDSW_ALTUS_API + '/models/create-model', json=job_params)\n try:\n model_id = resp.json()['id']\n except Exception as err:\n print(resp.json())\n raise err\n print('Model ID: {}'.format(model_id, ))\n\n # ================================================================================\n\n # See https://docs.cloudera.com/cdsw/latest/analytical-apps/topics/cdsw-application-limitations.html\n\n if _get_release() > [1, 9]:\n print('# Allow applications to be configured with unauthenticated access')\n resp = _cdsw_patch(CDSW_API + '/site/config',\n json={\"allow_unauthenticated_access_to_app\": True})\n print('Set unauthenticated access flag to: {}'.format(resp.json()[\"allow_unauthenticated_access_to_app\"], ))\n\n print('# Add project for Data Visualization server')\n if not _get_viz_project():\n _cdsw_post(CDSW_API + '/users/admin/projects', expected_codes=[201],\n json={'template': 'blank',\n 'project_visibility': 'private',\n 'name': _VIZ_PROJECT_NAME})\n print('Viz project ID: {}'.format(_get_viz_project()['id'], ))\n print('Viz project URL: {}'.format(_get_viz_project()['url'], ))\n\n if _get_release() < [1, 10]:\n print('# Add custom engine for Data Visualization server')\n params = {\n \"engineImage\": {\n \"description\": \"dataviz-623\",\n \"repository\": \"docker.repository.cloudera.com/cloudera/cdv/cdswdataviz\",\n \"tag\": \"6.2.3-b18\"}\n }\n resp = _cdsw_post(CDSW_API + '/engine-images', json=params)\n engine_image_id = resp.json()['id']\n print('Engine Image ID: {}'.format(engine_image_id, ))\n\n print('# Set new engine image as default for the viz project')\n _cdsw_patch(_get_viz_project()['url'] + '/engine-images',\n json={'engineImageId': engine_image_id})\n resp = _cdsw_get(_get_viz_project()['url'] + '/engine-images')\n project_engine_image_id = resp.json()['id']\n print('Project image default engine Image ID set to: {}'.format(project_engine_image_id))\n\n print('# Create application with Data Visualization server')\n params = {\n 'bypass_authentication': True,\n 'cpu': 1,\n 'environment': {},\n 'description': 'Viz Server Application',\n 'kernel': 'python3',\n 'memory': 2,\n 'name': 'Viz Server Application',\n 'nvidia_gpu': 0,\n 'script': '/opt/vizapps/tools/arcviz/startup_app.py',\n 'subdomain': 'viz',\n 'type': 'manual',\n 'environment': {\n 'USE_MULTIPROC': 'false',\n }\n }\n if _get_release() >= [1, 10]:\n params.update({'runtime_id': _get_viz_runtime()})\n _cdsw_post(_get_viz_project()['url'] + '/applications', expected_codes=[201, 400], json=params)\n resp = _cdsw_get(_get_viz_project()['url'] + '/applications')\n print('Application ID: {}'.format(resp.json()[0]['id']))\n\n # ================================================================================\n\n print('# Wait for model to start')\n while True:\n try:\n model = _get_model(refresh=True)\n except RuntimeError as exc:\n if '401' in exc.message:\n pass\n raise exc\n if model:\n build_status = model['latestModelBuild']['status']\n build_id = model['latestModelBuild']['id']\n deployment_status = model['latestModelDeployment']['status']\n print('Model {}: build status: {}, deployment status: {}'.format(model['id'], build_status,\n deployment_status))\n if build_status == 'built' and deployment_status == 'deployed':\n break\n elif build_status == 'built' and deployment_status == 'stopped':\n # If the deployment stops for any reason, try to give it a little push\n start_model(build_id)\n elif build_status == 'failed' or deployment_status == 'failed':\n raise RuntimeError('Model deployment failed')\n time.sleep(10)\n\n if _get_release() > [1, 9]:\n print('# Wait for VizApps to start')\n while True:\n resp = _cdsw_get(_get_viz_project()['url'] + '/applications')\n app_status = resp.json()[0]['status']\n print('Data Visualization app status: {}'.format(app_status))\n if app_status == 'running':\n print('# Viz server app is running. CDSW setup complete!')\n set_vizapps_pwd()\n _add_vizapps_user('admin', PASSWORD, 'Workshop', 'Admin')\n break\n elif app_status == 'stopped':\n # Additional error handling - if the app exists and is stopped, start it?\n break\n elif app_status == 'failed':\n raise RuntimeError('Application deployment failed')\n time.sleep(10)\n\n except Exception as err:\n if resp:\n print(resp.text)\n raise err\n\n print('# CDSW setup completed successfully!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"asdaraujo/edge2ai-workshop","sub_path":"setup/terraform/resources/cdsw_setup.py","file_name":"cdsw_setup.py","file_ext":"py","file_size_in_byte":27027,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"61"} +{"seq_id":"24845828819","text":"import dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\n\nfrom dashapp import app\nfrom dashapp.header import header\nfrom dashapp.tabletab.tabletab import table_tab\nfrom dashapp.bibliotab.bibliotab import biblio_tab\n\n\n# Declare tabs following this format, 1 dist per tab\n# Can then filter to get info from input, see below\n# Replace search_key, search_value and target_key accordingly\n# next(filter(lambda x: x['search_key'] == 'search_value', TABS))['target_key']\nTABS = [\n {'name': 'tab-0', 'url': '/tabletab', 'label': 'Notes', 'container': table_tab},\n {'name': 'tab-1', 'url': '/biblio', 'label': 'Bibliographie', 'container': biblio_tab},\n]\n\n\n# Builds tabs from TABS. Don't touch.\ntabs = dbc.Tabs(\n [dbc.Tab(label=tab['label'], label_style={'cursor': 'pointer'}) for tab in TABS],\n id='tabs', active_tab='tab-0', style={'padding-left': '10px', }\n)\n\n\nlayout = html.Div([\n dcc.Location(id='url', refresh=False),\n header,\n html.Div([\n tabs,\n ], className='pt-2 bg-dark text-light'),\n dbc.Container([], id='tab-container', fluid=True, className='bt-2 pt-3'),\n])\n\n\n@app.callback(Output(component_id='url', component_property='pathname'),\n [Input(component_id='tabs', component_property='active_tab')])\ndef update_pathname(selected_tab):\n \"\"\"Changes the url when active_tab changes, triggering update_tab callback\"\"\"\n\n return next(filter(lambda x: x['name'] == selected_tab, TABS))['url']\n\n\n@app.callback(\n [Output(component_id='tab-container', component_property='children'),\n Output(component_id='tabs', component_property='active_tab')],\n [Input(component_id='url', component_property='pathname')],\n State(component_id='tabs', component_property='active_tab')\n)\ndef update_tab(curr_url, active_tab_state):\n \"\"\"Updates selected tab and tab container on url update\"\"\"\n\n return next(filter(lambda x: x['url'] == curr_url, TABS))['container'],\\\n next(filter(lambda x: x['url'] == curr_url, TABS))['name']\n\n\n","repo_name":"PolycarpeLeGrand/CarnetEta","sub_path":"dashapp/tabindex.py","file_name":"tabindex.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17689608367","text":"import collections\nimport datetime\n\nfrom .db import get_db\n\n\ndef get_genres_over_time():\n db = get_db()\n\n plays = db.execute(\"\"\"\n SELECT play.timestamp, track.genre, track.duration\n FROM play\n INNER JOIN track ON play.track_id = track.id\n WHERE play.timestamp IS NOT NULL\n \"\"\")\n\n years = set()\n by_genre_and_year = {}\n\n for play in plays:\n genre = play['genre']\n\n year = datetime.datetime.fromtimestamp(play['timestamp']).year\n years.add(year)\n\n if genre not in by_genre_and_year:\n by_genre_and_year[genre] = collections.Counter()\n\n # use duration so that it properly reflects \"time spent listening\"\n # for, e.g., long single-track albums.\n by_genre_and_year[genre][year] += play['duration']\n\n result = [\n {\n 'id': genre,\n 'data': [\n {'x': year, 'y': counts_by_year[year]}\n for year in sorted(years)[1:] # exclude sparse first year\n ]\n }\n for genre, counts_by_year in by_genre_and_year.items()\n ]\n\n return result\n\n\ndef get_artists_over_time():\n db = get_db()\n\n plays = db.execute(\"\"\"\n SELECT play.timestamp, track_view.artist, track_view.duration\n FROM play\n INNER JOIN track_view ON play.track_id = track_view.id\n WHERE play.timestamp IS NOT NULL\n \"\"\")\n\n years = set()\n by_year_and_artist = {}\n\n for play in plays:\n artist = play['artist']\n\n year = datetime.datetime.fromtimestamp(play['timestamp']).year\n years.add(year)\n\n if year not in by_year_and_artist:\n by_year_and_artist[year] = collections.Counter()\n\n # use duration so that it properly reflects \"time spent listening\"\n # for, e.g., long single-track albums.\n by_year_and_artist[year][artist] += play['duration']\n\n artist_ranks = {}\n for year, artists in by_year_and_artist.items():\n for rank, (artist, _) in enumerate(artists.most_common(25)):\n if artist not in artist_ranks:\n artist_ranks[artist] = {}\n\n artist_ranks[artist][year] = rank + 1\n\n return [\n {\n 'id': artist,\n 'data': [\n {'x': year, 'y': ranks.get(year, None)}\n for year in sorted(years)[1:] # exclude sparse first year\n ]\n }\n for artist, ranks in artist_ranks.items()\n ]\n\n\ndef get_albums_over_time():\n db = get_db()\n\n plays = db.execute(\"\"\"\n SELECT\n play.timestamp,\n track_view.album || '\\n(' || album_view.artist || ')' AS album,\n CAST(track_view.duration AS REAL) / album_view.duration AS fraction\n FROM play\n INNER JOIN track_view ON play.track_id = track_view.id\n INNER JOIN album_view ON track_view.album_id = album_view.id\n WHERE play.timestamp IS NOT NULL AND track_view.album IS NOT NULL\n \"\"\")\n\n years = set()\n by_year_and_album = {}\n\n for play in plays:\n album = play['album']\n\n year = datetime.datetime.fromtimestamp(play['timestamp']).year\n years.add(year)\n\n if year not in by_year_and_album:\n by_year_and_album[year] = collections.Counter()\n\n by_year_and_album[year][album] += play['fraction']\n\n album_ranks = {}\n for year, albums in by_year_and_album.items():\n for rank, (album, count) in enumerate(albums.most_common(25)):\n if count <= 1.5:\n continue\n\n if album not in album_ranks:\n album_ranks[album] = {}\n\n album_ranks[album][year] = rank + 1\n\n return [\n {\n 'id': album,\n 'data': [\n {'x': year, 'y': ranks.get(year, None)}\n for year in sorted(years)[1:] # exclude sparse first year\n ]\n }\n for album, ranks in album_ranks.items()\n ]\n\n\ndef get_listens_by_year():\n db = get_db()\n\n result_set = db.execute(\"\"\"\n SELECT\n track_view.year AS x,\n CAST(SUM(track_view.duration) AS REAL) / 60 / 60 AS y\n FROM track_view\n INNER JOIN play ON play.track_id = track_view.id\n WHERE track_view.year IS NOT NULL\n GROUP BY track_view.year;\n \"\"\")\n\n values = {\n row['x']: row['y']\n for row in result_set\n }\n min_year = min(values.keys())\n max_year = max(values.keys())\n\n result = []\n for year in range(min_year, max_year + 1):\n result.append({\n 'x': f\"{year}-01-01\",\n 'y': values.get(year, 0)\n })\n\n return [\n {\n 'id': '_',\n 'data': result\n }\n ]\n","repo_name":"mtdavis/music-server","sub_path":"music_server/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26850190793","text":"import time\r\n\r\nmytime = time.time()\r\n\r\nprint(mytime)\r\nprint(\"自从1970年1月1号起始,过去了\", mytime, \"秒\")\r\n\r\n\r\n#以小时,分钟,秒的格式进行输出\r\n\r\nhours = None\r\nminutes = None\r\nseconds = None\r\n\r\n#测试账号\r\n# past_time = 365 * 24 * 60 * 60\r\n\r\npast_time = time.time()\r\n\r\nprint(past_time)\r\npast_time = int(past_time)\r\nprint(past_time,\"秒\")\r\nhours = past_time // 3600\r\nseconds =past_time - hours * 3600\r\nminutes = seconds // 60\r\nseconds = seconds - minutes * 60\r\n\r\nprint(\"距离1970年1月1号已经过去了\",\r\n hours,\"小时\",\r\n minutes, \"分钟\",\r\n seconds, \"秒\")\r\n#增加天与年\r\ndays = None\r\nmonths = None\r\nyears = None\r\n\r\ndays = hours // 24\r\nhours = hours % 24\r\n\r\nmonths = days // 30\r\ndays = days % 30\r\n\r\nyears = months // 12\r\nmonths = months % 12\r\n\r\nprint(\"距离1970年1月1号已经过去了\",\r\n years,\"年\",\r\n months,\"月\",\r\n days,\"天\",\r\n hours,\"小时\",\r\n minutes, \"分钟\",\r\n seconds, \"秒\")\r\n\r\n\r\n\r\n\r\n\r\n#参考答案\r\nimport time\r\nmytime = time.time()\r\n\r\nseconds = int(mytime) % 60 #分钟,小时都是60的整数倍,所以,不能被60整除的,就是留下来的秒\r\nhours = int(mytime) // 3600\r\nminutes = (int(mytime) - int(mytime) // 3600 * 3600)#剩余的秒数\r\nminutes = (minutes - seconds) // 60\r\nprint(\"距离1970年1月1号已经过去了\",\r\n hours,\"小时\",\r\n minutes, \"分钟\",\r\n seconds, \"秒\")\r\n#说实话,这个例子逻辑不是很清晰,只能作为参考。\r\n''':arg\r\n假如时间3959秒\r\n3959%60=59秒\r\n3959//3600=1小时\r\n(3959 - 1 * 3600 -59 )//60\r\n\r\n'''","repo_name":"LYTXJY/python_full_stack","sub_path":"Code/src/hellopython/第二章/test/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23395803211","text":"def isPalindrome(x):\r\n x = str(int(x))\r\n y = x[::-1]\r\n if x == y:\r\n return 1\r\n else:\r\n return 0\r\nlis = input(': ').split()\r\nif int(lis[0]) * 2 == len(lis) - 1:\r\n lis.remove(lis[0])\r\n cases = list(zip( * [iter(lis)] * 2))\r\n caseNo = 1\r\n for case in cases:\r\n noFS = 0\r\n for a in range(int(case[0]), int(case[1]) + 1):\r\n if a ** 0.5 == int(a ** 0.5):\r\n if isPalindrome(a) == 1 and isPalindrome(a ** 0.5) == 1:\r\n noFS += 1\r\n print('Case #' + str(caseNo) + ': ' + str(noFS))\r\n caseNo += 1\r\nelse:\r\n print('Error: Input not of correct size')\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2800.py","file_name":"2800.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70699889794","text":"import sys\r\nsys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')\r\nfrom RobotControl import *\r\n\r\n#################################\r\n### Define Deck Layout\r\n#################################\r\ndeck=\"\"\"\\\r\nRES41 DW24P DW24P DW24P DW24P DW96P BLANK BLANK\r\nBLANK DW24P DW24P DW24P DW24P DW96P BLANK BLANK\r\nBLANK DW24P DW24P DW24P DW24P DW96P BLANK BLANK\r\nBLANK DW24P DW24P DW24P DW24P DW96P BLANK BLANK\r\n\"\"\"\r\n# 2 3 4 5 6 7 8 9\r\n\r\n# note the 1st user defined column is \"2\" not zero or one, since tips are at 0 & 1\r\n##################################\r\n\r\nCurrentTipPosition = 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\nmyvol = 1000\r\nDefineDeck(deck)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\nprintDeck()\r\nInitializeRobot()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# initialize motors, home, etc.\r\n\r\n#PROGRAM 3 OVERVIEW:\r\n#transfer 1 ml of YPER media from reservoir to 24 well plate containing yeast\r\n#Mix solution\r\n#transfer entire 1 ml to DW96P (A1 - A5UL, A2-A5UR...etc.)\r\n#change tips\r\n\r\nResRow=0\r\nResCol=2\r\nrows = [0, 1, 2, 3]\r\ncolumnsAndOffsets = [[3, 'UL'], [4, 'UR'], [5, 'LL'], [6, 'LR']]\r\nDesCol = 7\r\n\r\n#get tips from tip box A, 'UL'\r\n\r\nfor i in rows:\r\n\tfor j in columnsAndOffsets:\r\n\t\tCurrentTipPosition = retrieveTips(CurrentTipPosition)\t\r\n\t\tposition(ResRow,ResCol)\r\n\t\taspirate(myvol, 100, 50)\t\t\t\t\t\t\t \t\t\t\t\t\t# Aspirate(volume,% to bottom,speed)\r\n\t\tposition(i,j[0])\r\n\t\tdispense(myvol,100,50,'Y','MIX&ASPIRATE')\r\n\t\tposition(i,DesCol,j[1])\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\tdispense(myvol, 25, 50,'Y')\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Dispense(volume, %to bottom, speed, blowout)\r\n\t\tdisposeTips()\r\nfast_home_velmex()\r\nShutDownRobot()\r\n","repo_name":"tdlong/YeastRobot","sub_path":"UserPrograms/olderprograms/ResuspendTransfer_RES41_to_16DW24P_slowmix_to_4DW96P_1ml.py","file_name":"ResuspendTransfer_RES41_to_16DW24P_slowmix_to_4DW96P_1ml.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23550306051","text":"import bisect\r\nfilename = 'TrainTimetable-large'\r\ninput = open(filename + '.in', 'r')\r\noutput = open(filename + '.out', 'w')\r\ncases = int(input.readline().rstrip())\r\nfor i in xrange(1, cases+1 ):\r\n turn = int(input.readline().rstrip())\r\n noA, noB = [int(x) for x in input.readline().rstrip().split()]\r\n demandA = []\r\n supplyB = []\r\n for j in xrange(noA):\r\n times = [int(x.split(':')[0])*60+int(x.split(':')[1]) for x in input.readline().rstrip().split()]\r\n bisect.insort_left(demandA, times[0])\r\n bisect.insort_left(supplyB, times[1])\r\n supplyA = []\r\n demandB = []\r\n for j in xrange(noB):\r\n times = [int(x.split(':')[0])*60+int(x.split(':')[1]) for x in input.readline().rstrip().split()]\r\n bisect.insort_left(demandB, times[0])\r\n bisect.insort_left(supplyA, times[1])\r\n startA = len(demandA)\r\n startB = len(demandB)\r\n for d in demandA:\r\n for s in supplyA:\r\n if d >= s+turn:\r\n startA -= 1\r\n supplyA.remove(s)\r\n break\r\n for d in demandB:\r\n for s in supplyB:\r\n if d >= s+turn:\r\n startB -= 1\r\n supplyB.remove(s)\r\n break\r\n output.write('Case #' + str(i) + ': ' + str(startA if startA >= 0 else 0) + ' ' + str(startB if startB >= 0 else 0) + '\\n')\r\ninput.close()\r\noutput.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_2/236.py","file_name":"236.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25795379928","text":"from operator import itemgetter\nfrom typing import Dict, Iterable, List, Optional, Set\n\nfrom panoramic.cli.husky.core.model.enums import Relationship, TimeGranularity\nfrom panoramic.cli.husky.core.model.models import HuskyModel, ModelAttribute\nfrom panoramic.cli.husky.service.graph_builder.component import ModelJoinEdge\nfrom panoramic.cli.husky.service.select_builder.exceptions import (\n ImpossibleTaxonCombination,\n)\nfrom panoramic.cli.husky.service.select_builder.query_joins import SimpleQueryJoins\nfrom panoramic.cli.husky.service.utils.taxon_slug_expression import TaxonSlugExpression\n\nTIME_GRANULARITY_RANK: Dict[Optional[TimeGranularity], int] = {TimeGranularity.hour: 0, TimeGranularity.day: 1}\n\nDEFAULT_TIME_GRANULARITY_RANK = len(TIME_GRANULARITY_RANK)\n\n\ndef get_time_granularity_rank(time_granularity: Optional[TimeGranularity]) -> int:\n return TIME_GRANULARITY_RANK.get(time_granularity, DEFAULT_TIME_GRANULARITY_RANK)\n\n\ndef generate_model_taxon_rank(models: Iterable[HuskyModel], taxons: Set[TaxonSlugExpression]) -> Dict[HuskyModel, int]:\n \"\"\"\n Returns a dict where key is a model, and value is number of taxons on that model that we want to select.\n \"\"\"\n models_rank = dict()\n for model in models:\n model_taxon_slugs = {attribute.taxon_memoized for attribute in model.attributes_memoized.values()}\n models_rank[model] = len([taxon_slug for taxon_slug in taxons if taxon_slug.graph_slug in model_taxon_slugs])\n return models_rank\n\n\ndef sort_models_with_heuristic(models: Iterable[HuskyModel], taxons: Set[TaxonSlugExpression]) -> List[HuskyModel]:\n \"\"\"\n Returns the models in which order we should try to search the graph. It skips tag models,\n because those cannot be used as root models.\n\n \"\"\"\n models_score = dict()\n for model, taxon_rank in generate_model_taxon_rank(models, taxons).items():\n models_score[model] = (\n taxon_rank, # Most matching taxons.\n -1 * model.number_of_identifiers, # Less identifiers means smaller table\n get_time_granularity_rank(model.time_granularity),\n entity_level_rank(model.identifier_attributes), # Higher entity means smaller table.\n )\n\n sorted_models_by_rank = sorted(list(models_score.items()), key=itemgetter(1), reverse=True)\n\n return [model for model, _ in sorted_models_by_rank]\n\n\ndef entity_level_rank(attributes: Set[ModelAttribute]) -> int:\n # TODO later this should use taxons parent relationship.\n str_taxons = set(map(lambda attr: attr.taxon, attributes))\n if 'ad_id' in str_taxons:\n return 100\n if 'adgroup_id' in str_taxons:\n return 1000\n if 'campaign_id' in str_taxons:\n return 10000\n return 0\n\n\nclass GraphSearch:\n \"\"\"\n Class for performing bfs graph search, pruning and returning optimal QueryJoins tree.\n \"\"\"\n\n def __init__(\n self,\n name_to_model: Dict[str, HuskyModel],\n query_taxons: Set[TaxonSlugExpression],\n graph,\n data_source: Optional[str] = None,\n ):\n self.name_to_model: Dict[str, HuskyModel] = name_to_model\n self.query_taxons: Set[TaxonSlugExpression] = query_taxons\n self.graph = graph\n self.data_source = data_source\n\n def find_join_tree(self) -> SimpleQueryJoins:\n # We just started, sort all available models.\n sorted_best_models = sort_models_with_heuristic(self.name_to_model.values(), self.query_taxons)\n\n for model in sorted_best_models:\n # run bfs to find all accessible models via a join\n query_join = self._bfs(model, self.query_taxons)\n # calculate which taxons are covered by all joins\n taxons = query_join.get_all_selectable_taxons()\n # are all requested taxons covered?\n if self.query_taxons.issubset(taxons):\n # yes, so cut off all redundant query joins\n self._prune_useless_joins(query_join, self.query_taxons)\n return query_join\n raise ImpossibleTaxonCombination(self.query_taxons, self.data_source)\n\n def find_all_full_join_trees(self) -> List[SimpleQueryJoins]:\n \"\"\"\n Finds all available join combinations that contain requested taxons\n \"\"\"\n query_joins: List[SimpleQueryJoins] = []\n models = self.name_to_model.values()\n\n for model in models:\n query_join = self._bfs(model, self.query_taxons)\n taxons = query_join.get_all_selectable_taxons()\n if self.query_taxons.issubset(taxons):\n query_joins.append(query_join)\n\n if not query_joins:\n raise ImpossibleTaxonCombination(self.query_taxons, self.data_source)\n\n return query_joins\n\n @staticmethod\n def validate_next_time_granularity(\n contains_time_granularity: Set[TimeGranularity], next_time_granularity: TimeGranularity\n ) -> bool:\n \"\"\"\n Checks what time granularities can be used together\n Currently none can be mixed together\n \"\"\"\n next_contains_time_granularity = {*contains_time_granularity, next_time_granularity}\n return len(next_contains_time_granularity) == 1\n\n def _bfs(self, root_model: HuskyModel, query_taxons: Set[TaxonSlugExpression]) -> SimpleQueryJoins:\n \"\"\"\n Performs a BFS, returning QueryJoins tree with >all< accessible models.\n :param root_model: model to start the BFS from.\n :param query_taxons: Taxons we want to select.\n \"\"\"\n visited_models = {root_model}\n \"\"\"Set with all already visited models, or models in a queue.\"\"\"\n\n queue = [root_model]\n\n query_joins_by_model = dict()\n \"\"\"Dict for keeping QueryJoin structures. The QueryJoin.join_to is extended as we traverse the graph.\"\"\"\n\n root_taxon_slugs = {taxon_slug for taxon_slug in query_taxons if taxon_slug.graph_slug in root_model.taxons}\n query_joins_by_model[root_model] = SimpleQueryJoins(self.graph, root_model, [], root_taxon_slugs)\n\n # checks for time granularity of found models\n contains_time_granularity: Set[TimeGranularity] = set()\n\n while queue:\n current_model = queue.pop()\n current_model_query_join: SimpleQueryJoins = query_joins_by_model[current_model]\n\n next_model_names = set(self.graph.successors(current_model.name))\n for next_model_name in next_model_names: # Iterate all successors\n node_join: ModelJoinEdge = self.graph.get_edge_data(current_model.name, next_model_name)['join']\n if node_join.relationship in [Relationship.one_to_many, Relationship.many_to_many]:\n # Can't allow joins to_many, since that could result in duplicated rows.\n continue\n next_model = self.name_to_model[next_model_name]\n if next_model in visited_models:\n # Already visited that model, skip.\n continue\n\n # do not allow mixing models with different time granularity\n if next_model.time_granularity is not None:\n if not self.validate_next_time_granularity(contains_time_granularity, next_model.time_granularity):\n continue\n\n contains_time_granularity.add(next_model.time_granularity)\n\n queue.append(next_model)\n visited_models.add(next_model)\n\n # Create QueryJoin for the next model and add it to the query join dict\n next_model_taxon_slugs = {\n taxon_slug for taxon_slug in query_taxons if taxon_slug.graph_slug in next_model.taxons\n }\n next_model_query_join = SimpleQueryJoins(self.graph, next_model, [], next_model_taxon_slugs)\n query_joins_by_model[next_model] = next_model_query_join\n\n # Add the next model QueryJoin to current models query join structure.\n current_model_query_join.join_to.append(next_model_query_join)\n\n # Return the root query join\n return query_joins_by_model[root_model]\n\n def _prune_useless_joins(self, query_join: SimpleQueryJoins, needed_taxons: Set[TaxonSlugExpression]):\n \"\"\"\n Removes join sub-trees that do not bring any taxons that we need, or bring taxons we already have.\n Called recursively on each node in query join tree.\n It mutates the query joins.\n \"\"\"\n if len(query_join.join_to) == 0:\n return\n # Taxons on current query join we already have, so remove them from needed taxons.\n currently_needed_taxons = {taxon.graph_slug for taxon in needed_taxons.difference(query_join.taxons_from_model)}\n\n # Sort the join subtrees based on total number of taxons they bring and we still need.\n sorted_by_taxon_size = sorted(\n query_join.join_to,\n key=lambda x: len(\n [\n taxon_slug\n for taxon_slug in x.get_all_selectable_taxons()\n if taxon_slug.graph_slug in currently_needed_taxons\n ]\n ),\n reverse=True,\n )\n\n effective_joins = [] # New list of only effective joins.\n\n # TODO v1 check if we need this, or we are fine with updating needed_taxons only\n effective_joins_taxons: Set[str] = set()\n for join in sorted_by_taxon_size:\n # Get taxons that this join brings and we dont have yet\n join_taxon_slugs = {taxon_slug.graph_slug for taxon_slug in join.get_all_selectable_taxons()}\n extra_taxons = join_taxon_slugs.intersection(currently_needed_taxons).difference(effective_joins_taxons)\n # If not empty, use that join. Otherwise, omit (prune) that subtree\n if len(extra_taxons) > 0:\n effective_joins.append(join)\n effective_joins_taxons.update(extra_taxons)\n\n # Set only effective_joins on the current query join.\n query_join.join_to = effective_joins\n new_needed_taxons = {\n taxon_slug for taxon_slug in needed_taxons if taxon_slug not in query_join.taxons_from_model\n }\n for join in query_join.join_to:\n # Call recursively on all sub trees.\n self._prune_useless_joins(join, new_needed_taxons)\n","repo_name":"panoramichq/panoramic-cli","sub_path":"src/panoramic/cli/husky/service/select_builder/graph_search.py","file_name":"graph_search.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"10071426218","text":"'''\nKmp alg consists of build a lps (longest proper suffix) table\nto build the kmp table, you do a two pointer passthrough\none for going through every element in the array, the second pointer for spotting where suffix and prefix match\noutline:\nfor j in range (1,len(elem))\n if a[i] == a[j]\n lps[j] = i + 1\n i+=1\n else lps[j] = i = rec(lps, i, j, a) #recursively find lps that matches a[j]\n\nrec(lps, i, j, a):\n if i <= -1 or a[j] == a[i]:\n return i+1\n return rec(lps,lps[i-1], j, a)\n\nit(lps, i,j,a):\n while i > 0:\n if a[j] == a[i]:\n break\n i = lps[i-1]\n if a[j] == a[i]:\n return i+1\n else:\n return 0\n\n[abcabdab]\n 00012012\n i j\n012345678\naabaabaaab\n0101234523\n\nOnce lps is done building:\n\n\n\n'''\n\n\ndef buildlps(A):\n lps = [-1] * len(A)\n lps[0] = 0\n i = 0\n for j in range(1, len(A)):\n if A[i] == A[j]:\n i += 1\n lps[j] = i\n else:\n i = it(lps, lps[j-1], j, A)\n lps[j] = i\n return lps\n\n\n'''\n i\n j\n0123456789\naabaabaaab\n0101234520\nrec(i = 5,j = 8,)\nrec(i = 2, j = 8)\nrec(i = 1, j = 8) return 2\n'''\n# [abcabdab]\n# 00012012\n# i j\n# 012345678\n# aabaabaaab\n# 0101234523\n\n\ndef rec(lps, i, j, A):\n if i <= -1 or A[i] == A[j]:\n return i+1\n return rec(lps, lps[i-1], j, A)\n\n\ndef it(lps, i, j, A):\n while i > 0:\n if A[j] == A[i]:\n break\n i = lps[i-1]\n return i+1 if A[j] == A[i] else 0\n\n\ndef findOcc(s, lps, l):\n # got through s and match with l\n # if they match increment lp\n # else find new lp by looking for next matching prefix, done by lindex = lps[lindex-1 if lindex-1>= 0 else 0]\n # if lp >= len(l) we've found of match, print start index, decrement lp\n # increment sp\n lidx = 0\n print(lps)\n sidx = 0\n while sidx < len(s):\n print(\"{} {}\".format(sidx, lidx))\n if s[sidx] == l[lidx]:\n if lidx >= len(l)-1: # if its the last element\n print('Match at {}'.format(sidx-len(l) + 1)) # aaab aab\n lidx = lps[lidx]\n else:\n lidx += 1\n sidx += 1\n else:\n if lidx-1 < 0: # if first elem comparison, increment sidx\n sidx += 1\n else:\n lidx = lps[lidx-1] # else try the previous prefix\n\n '''\n '''\n\n # look for next matching prefix (lps, l, charToMatch, lidex):\n # . return the new lindex\n # if lindex < 0 or l[lindex] == charToMatch:\n # return lindex + 1\n # return (lps, l, charToMatch, lps[lindex-1])\n\n pass\n\n\nlps = buildlps(\"AABA\")\n\nfindOcc(\"AABAACAADAABAABA\", lps, \"AABA\")\n\n\n# for j in range (1,len(elem))\n# if a[i] == a[j]\n# lps[j] = i + 1\n# i+=1\n# else lps[j] = i = rec(lps, i, j, a) #recursively find lps that matches a[j]\n\n# rec(lps, i, j, a):\n# if i <= -1 or a[j] == a[i]:\n# return i+1\n# return rec(lps,lps[i-1], j, a)\n\n# it(lps, i,j,a):\n# while i > 0:\n# if a[j] == a[i]:\n# break\n# i = lps[i-1]\n# if a[j] == a[i]:\n# return i+1\n# else:\n# return 0\n","repo_name":"xavierpjb/AlgoDataStruct","sub_path":"python/IkAlgs/strings/kmp.py","file_name":"kmp.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73845931713","text":"\"\"\"\n2D grid travesal\nEnum\n\"\"\"\n\nfrom enum import Enum, auto\n\n\nENTRANCE = \"*\"\nEXIT = \"&\"\nWALL = \"x\"\nML = \"/\"\nMR = \"\\\\\"\n\n\nclass Direction(Enum):\n UP = auto()\n DOWN = auto()\n LEFT = auto()\n RIGHT = auto()\n\n\ndef getDirection(i, j, w, l):\n if i == 0:\n return Direction.DOWN\n elif i == l - 1:\n return Direction.UP\n elif j == 0:\n return Direction.RIGHT\n elif j == w - 1:\n return Direction.LEFT\n\n\ndef changeDir(dir, mirror):\n if mirror == ML:\n if dir == Direction.UP:\n return Direction.RIGHT\n elif dir == Direction.DOWN:\n return Direction.LEFT\n elif dir == Direction.LEFT:\n return Direction.DOWN\n elif dir == Direction.RIGHT:\n return Direction.UP\n else:\n if dir == Direction.UP:\n return Direction.LEFT\n elif dir == Direction.DOWN:\n return Direction.RIGHT\n elif dir == Direction.LEFT:\n return Direction.UP\n elif dir == Direction.RIGHT:\n return Direction.DOWN\n\n\ndef move(i, j, dir):\n if dir == Direction.UP:\n return i - 1, j\n elif dir == Direction.DOWN:\n return i + 1, j\n elif dir == Direction.LEFT:\n return i, j - 1\n elif dir == Direction.RIGHT:\n return i, j + 1\n\n\ndef main():\n case = 1\n while True:\n w, l = map(int, input().split())\n if w == 0 and l == 0:\n break\n\n grid = []\n for _ in range(l):\n grid.append([x for x in input()])\n\n # Find entrance and determine direction\n for i in range(l):\n for j in range(w):\n if grid[i][j] == ENTRANCE:\n start = i, j\n dir = getDirection(i, j, w, l)\n break\n\n # Keep moving until reach a wall\n i, j = start\n while grid[i][j] != WALL:\n # Meets mirror, change direction\n if grid[i][j] == ML or grid[i][j] == MR:\n dir = changeDir(dir, grid[i][j])\n i, j = move(i, j, dir)\n # Mark as exit\n grid[i][j] = EXIT\n\n # Output answer\n print(f\"HOUSE {case}\")\n for row in grid:\n for x in row:\n print(x, end=\"\")\n print()\n case += 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jetkan-yk/phyting","sub_path":"cp4/lineards/2darray/funhouse.py","file_name":"funhouse.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4724182278","text":"import os\nimport torch.utils.data as data_utils\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\nfrom torchvision.utils import save_image\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\nimport random\n\nif not os.path.exists('./mlp_img'):\n os.mkdir('./mlp_img')\n\n#data1 = pd.read_csv(\"/home/pranav/Documents/cs7015/assignment_2/Assignment_2/BnW/7/data.csv\", encoding = \"UTF-8\")\n#data1=data1.values\nrandom.seed(9001)\ndata1=np.zeros(shape=(1596,829))\nji=0\npath='/home/pranav/Documents/cs7015/assignment_2/Assignment_2/Features/coast'\nfor file in os.listdir(path):\n #print(file)\n current = os.path.join(path, file)\n in1=open(current)\n l1 = in1.read().strip().split(\"\\n\")\n l2=[]\n for i in l1:\n \tl2=l2+i.split(\" \")\n l2.append(0)\n l2=np.array(l2)\n l2=np.float_(l2)\n data1[ji]=l2\n ji=ji+1\n \n\npath='/home/pranav/Documents/cs7015/assignment_2/Assignment_2/Features/forest'\nfor file in os.listdir(path):\n current = os.path.join(path, file)\n in1=open(current)\n l1 = in1.read().strip().split(\"\\n\")\n l2=[]\n for i in l1:\n \tl2=l2+i.split(\" \")\n l2.append(1)\n l2=np.array(l2)\n l2=np.float_(l2)\n data1[ji]=l2\n ji=ji+1\n \n\npath='/home/pranav/Documents/cs7015/assignment_2/Assignment_2/Features/highway'\nfor file in os.listdir(path):\n current = os.path.join(path, file)\n in1=open(current)\n l1 = in1.read().strip().split(\"\\n\")\n l2=[]\n for i in l1:\n \tl2=l2+i.split(\" \")\n l2.append(2)\n l2=np.array(l2)\n l2=np.float_(l2)\n data1[ji]=l2\n ji=ji+1\n \n\npath='/home/pranav/Documents/cs7015/assignment_2/Assignment_2/Features/street'\nfor file in os.listdir(path):\n current = os.path.join(path, file)\n in1=open(current)\n l1 = in1.read().strip().split(\"\\n\")\n l2=[]\n for i in l1:\n \tl2=l2+i.split(\" \")\n l2.append(3)\n l2=np.array(l2)\n l2=np.float_(l2)\n data1[ji]=l2\n ji=ji+1\n \n\npath='/home/pranav/Documents/cs7015/assignment_2/Assignment_2/Features/tallbuilding'\nfor file in os.listdir(path):\n current = os.path.join(path, file)\n in1=open(current)\n l1 = in1.read().strip().split(\"\\n\")\n l2=[]\n for i in l1:\n \tl2=l2+i.split(\" \")\n l2.append(4)\n l2=np.array(l2)\n l2=np.float_(l2)\n data1[ji]=l2\n ji=ji+1\n\nnum_epochs = 0\nbatch_size = 10\nlearning_rate = 1e-3\nred_dim=48\n\ndef to_img(x):\n x = x.view(x.size(0), 1, 36, 23)\n return x\n\ndef plot_sample_img(img, name):\n img = img.view(1, 36, 23)\n save_image(img, './sample_{}.png'.format(name))\n\nnp.random.seed(120)\ntorch.manual_seed(999)\nnp.random.shuffle(data1)\ntr=data1[:,828:]\ntr=tr.reshape(1596)\ntr=tr.astype(int)\ntargets=torch.Tensor(tr)\nfeatures=torch.Tensor(data1[:,:828])\n\ndataset = data_utils.TensorDataset(features, targets)\ndataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)\n\nclass autoencoder(nn.Module):\n def __init__(self):\n super(autoencoder, self).__init__()\n self.encoder = nn.Sequential(\n nn.Linear(828, 100),\n nn.ReLU(True),\n nn.Linear(100, red_dim),\n nn.ReLU(True))\n self.decoder = nn.Sequential(\n nn.Linear(red_dim, 100),\n nn.ReLU(True),\n nn.Linear(100, 828),\n nn.ReLU(True))\n\n def bottle(self,x):\n x=self.encoder(x)\n return x\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\nmodel = autoencoder()\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(\n model.parameters(), lr=learning_rate, weight_decay=1e-5)\n\nbest_model=model\nprev_loss=10\n\nfor epoch in range(num_epochs):\n for data in dataloader:\n #print (data)\n img, clas = data\n #print (cla.shape)\n img = img.view(img.size(0), -1)\n img = Variable(img)\n #print (img.shape),128,784\n # ===================forward=====================\n output = model(img)\n # 128,784\n loss = criterion(output, img)\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # ===================log========================\n \n if epoch % 10 == 0:\n x = to_img(img.cpu().data)\n x_hat = to_img(output.cpu().data)\n save_image(x, './mlp_img/x_{}.png'.format(epoch))\n save_image(x_hat, './mlp_img/x_hat_{}.png'.format(epoch))\n\n loss=criterion(model(features),features)\n print(prev_loss)\n print(loss.data)\n if prev_loss-loss.data < 0.0001:\n \tbreak\n\n if loss.data < prev_loss:\n best_model=model\n prev_loss=loss.data\n\n print('epoch [{}/{}], loss:{:.4f}'\n .format(epoch + 1, num_epochs, loss.data))\n \t\n\n\n#torch.save(best_model.state_dict(), './sim_autoencoder1.pth')\nprint ('model saved')\nmodel.load_state_dict(torch.load('./sim_autoencoder1.pth'))\nmodel.eval()\n\ntargets=torch.LongTensor(tr[:int(0.7*1596)])\nfeatures=torch.Tensor(data1[:int(0.7*1596),:828])\ntargets_t=torch.Tensor(tr[int(0.7*1596):])\nfeatures_t=torch.Tensor(data1[int(0.7*1596):,:828])\n\nclass mlffn(nn.Module):\n def __init__(self):\n super(mlffn, self).__init__()\n self.encoder = nn.Sequential(\n nn.Linear(red_dim, 70),\n nn.ReLU(True),\n nn.Linear(70, 5),\n nn.Softmax(1))\n\n def forward(self, x):\n x = self.encoder(x)\n return x\n\nprev_loss=10\nnum_epochs=200\nclassif=mlffn()\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(\n classif.parameters(), lr=0.001, weight_decay=1e-5)\nbest_model=classif\n\ncm=np.zeros(shape=(5,5))\n\nfor epoch in range(num_epochs):\n for data in dataloader:\n #print (data)\n img, clas = data\n img = img.view(img.size(0), -1)\n img = Variable(img)\n #print (img.shape),128,784\n #print (clas)\n clas=clas.type(torch.LongTensor)\n # ===================forward=====================\n img = model.bottle(img)\n #print (img.shape)\n output = classif(img)\n #print (output[0][1].item())\n # 128,784\n loss = criterion(output, clas)\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # ===================log========================\n loss=criterion(classif(model.bottle(features)),targets)\n if prev_loss-loss.data < 0.0001:\n \tbreak\n\n if loss.data < prev_loss:\n best_model=classif\n prev_loss=loss.data\n\n feat=Variable(features_t)\n output=model.bottle(feat)\n output=classif(output)\n targ=targets_t.type(torch.LongTensor)\n tot=0\n corr=0\n #print (targ[0].item())\n cm=np.zeros(shape=(5,5))\n for i in range(targ.shape[0]):\n in1=0\n ma=0\n for j in range(output.shape[1]):\n if (output[i][j].item() > ma):\n ma=output[i][j].item()\n in1=j\n\n if (targ[i].item() == in1):\n corr=corr+1\n cm[targ[i].item()][in1]=cm[targ[i].item()][in1]+1\n #print (in1,targ[i].item())\n tot=tot+1\n\n corr=corr/tot\n print('epoch [{}/{}], loss:{:.4f} ,accuracy:{:.4f}'\n .format(epoch + 1, num_epochs, loss.data,corr))\ntorch.save(best_model.state_dict(), './sim_autoencoder2.pth')\nfor i in range(5):\n te=0\n for j in range(5):\n te=te+cm[i][j]\n for j in range(5):\n cm[i][j]=cm[i][j]/te\t\n\nprint(cm)","repo_name":"ruc98/CS7015-Deep-Learning-Assignments","sub_path":"Assignment2/Code/task_1_ae.py","file_name":"task_1_ae.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23549523261","text":"def flips(s, k):\n if len(s) < k:\n for pancake in s:\n if pancake==\"-\":\n return \"IMPOSSIBLE\"\n return 0\n else:\n if s[0]==\"-\":\n a = \"\"\n for i in range(k):\n if s[i]==\"-\":\n a += \"+\"\n else:\n a += \"-\"\n new = a + s[k:]\n subresult = flips(new, k)\n if subresult==\"IMPOSSIBLE\":\n return \"IMPOSSIBLE\"\n else:\n return subresult+1\n else:\n subresult = flips(s[1:], k)\n if subresult==\"IMPOSSIBLE\":\n return \"IMPOSSIBLE\"\n else:\n return subresult\n\n# input() reads a string with a line of input, stripping the '\\n' (newline) at the end.\n# This is all you need for most Google Code Jam problems.\nt = int(input()) # read a line with a single integer\nfor i in range(1, t + 1):\n s, k = input().split(\" \")\n print(\"Case #{}: {}\".format(i, flips(s, int(k))))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/875.py","file_name":"875.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34848265638","text":"A,B,W = map(int,input().split())\r\nresult = []\r\nfor a in range(A,B+1):\r\n if W*1000 % a == 0:\r\n result.append(int(W*1000 / a))\r\nif result == []:\r\n print(\"UNSATISFIABLE\")\r\nelse:\r\n print(min(result),max(result))\r\n\r\n#公式解説#\r\n# import math\r\n\r\n# a,b,w=map(int,input().split())\r\n# upper=int(math.floor(1000*w/a))\r\n# lower=int(math.ceil(1000*w/b))\r\n\r\n# if lower>upper:\r\n# print(\"UNSATISFIABLE\")\r\n# else:\r\n# print(lower,upper)\r\n","repo_name":"haruka20010217/AtCoder","sub_path":"abc195/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14117425562","text":"import msal\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read(\"cfg.ini\")\nconfig = config[\"graph\"]\n\napp = msal.ConfidentialClientApplication(\n config[\"client_id\"], authority=config[\"authority\"],\n client_credential=config[\"secret\"])\n\nprint(\"get token\")\nresult = app.acquire_token_for_client(scopes=[config[\"scope\"]])\n","repo_name":"JSaugsburg/graph","sub_path":"backup_pw_db.py","file_name":"backup_pw_db.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7347392270","text":"def solution(fees, records):\n answer = []\n basic_time, basic_fee, time, fee = fees\n dic = dict()\n cars = []\n for record in records:\n t, num, io = record.split()\n a, b = map(int, t.split(\":\"))\n t = a*60 + b\n if num not in dic:\n cars.append(num)\n dic[num] = [0]\n if io == 'IN':\n dic[num].append(t)\n else:\n n = dic[num].pop()\n dic[num][0] += t - n\n T = 23*60 + 59\n\n for key in cars:\n if len(dic[key]) == 2:\n n = dic[key].pop()\n dic[key][0] += T - n\n\n cars.sort()\n for car in cars:\n t = dic[car][0]\n t = max(0, t - basic_time)\n if t % time:\n t += time\n answer.append(fee*(t // time) + basic_fee)\n\n return answer\n","repo_name":"soulchicken/crush-programmers-cote","sub_path":"Python/Level_2/42_주차 요금 계산.py","file_name":"42_주차 요금 계산.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20789849512","text":"from tkinter import ttk, constants\nfrom services.note_service import note_service\n\n\nclass NoteListView:\n \"\"\"Näkymä, joka hoitaa muistiinpanojen listauksen\n \"\"\"\n def __init__(self, root, notes):\n \"\"\"Konstruktori, joka tekee mustiinpanojen listanäkymän\n\n Args:\n root: tkinter-rakenne, jossa näkymä luodaan\n notes: näytettävä lista Diary-olioiden note-osasta\n \"\"\"\n self._root = root\n self._notes = notes\n self._window = None\n\n self._do_notes_window()\n\n def _do_notes_window(self):\n self._window = ttk.Frame(master=self._root)\n\n for note in self._notes:\n self._do_note_thing(note)\n\n def pack(self):\n \"\"\"Näyttää ikkunan\n \"\"\"\n self._window.pack(fill=constants.X)\n\n def destroy(self):\n \"\"\"Tuhoaa ikkunan\n \"\"\"\n self._window.destroy()\n\n def _do_note_thing(self, note):\n thing_frame = ttk.Frame(master=self._window)\n label = ttk.Label(master=thing_frame, text=note)\n\n label.grid(row=0, column=0, padx=4, pady=10, sticky=constants.W)\n\n thing_frame.grid_columnconfigure(0, weight=1)\n thing_frame.pack(fill=constants.X)\n\n\nclass NotesView:\n \"\"\"Näkymä, joka vastaa muistiinpanojen lisäyksestä ja näyttämisestä\n \"\"\"\n def __init__(self, root, handle_logout):\n \"\"\"Konstruktori, joka tekee muistiinpanonäkymän\n\n Args:\n root: tkinter-rakenne, jossa näkymä luodaan\n handle_logout (arvo): kutsutaan, kun kirjaudutaan ulos\n \"\"\"\n self._root = root\n self._handle_logout = handle_logout\n self._user = note_service.see_current_user()\n self._window = None\n self._create_note_entry = None\n self._note_list_frame = None\n self._note_list_look = None\n\n self._do_notes_window()\n\n def pack(self):\n \"\"\"Näyttää ikkunan\n \"\"\"\n self._window.pack(fill=constants.X)\n\n def destroy(self):\n \"\"\"Tuhoaa ikkunan\n \"\"\"\n self._window.destroy()\n\n def _logout_helper(self):\n note_service.logout()\n self._handle_logout()\n\n def _do_note_list(self):\n if self._note_list_look:\n self._note_list_look.destroy()\n\n notes = note_service.get_notes()\n\n self._note_list_look = NoteListView(self._note_list_frame, notes)\n\n self._note_list_look.pack()\n\n def _do_header(self):\n user_label = ttk.Label(\n master=self._window, text=f\"Kirjautuneena sisään käyttäjällä {self._user[1]}\")\n\n logout_button = ttk.Button(\n master=self._window, text=\"Uloskirjautuminen\", command=self._logout_helper)\n\n user_label.grid(row=0, column=0, padx=4, pady=10, sticky=constants.W)\n\n logout_button.grid(row=0, column=1, padx=4,\n pady=10, sticky=(constants.E, constants.W))\n\n def _handle_create_note(self):\n note_inside = self._create_note_entry.get()\n\n if note_inside:\n note_service.diary_note(note_inside)\n self._do_note_list()\n self._create_note_entry.delete(0, constants.END)\n\n def _do_footer(self):\n self._create_note_entry = ttk.Entry(master=self._window)\n\n create_note_button = ttk.Button(\n master=self._window, text=\"Kirjaa\", command=self._handle_create_note)\n\n self._create_note_entry.grid(\n row=2, column=0, padx=4, pady=10, sticky=(constants.E, constants.W))\n\n create_note_button.grid(row=2, column=1, padx=4,\n pady=5, sticky=(constants.E, constants.W))\n\n def _do_notes_window(self):\n self._window = ttk.Frame(master=self._root)\n self._note_list_frame = ttk.Frame(master=self._window)\n\n self._do_header()\n self._do_note_list()\n self._do_footer()\n\n self._note_list_frame.grid(\n row=1, column=0, columnspan=2, sticky=(constants.E, constants.W))\n\n self._window.grid_columnconfigure(0, weight=1, minsize=500)\n self._window.grid_columnconfigure(1, weight=0)\n","repo_name":"sansilla/ot-harjoitustyo","sub_path":"src/ui/notes_look.py","file_name":"notes_look.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34585517139","text":"from neomodel import (\n StructuredNode,\n StringProperty,\n DateTimeProperty,\n EmailProperty,\n BooleanProperty, UniqueIdProperty\n)\n\nfrom .nodeutils import NodeUtils\n\n\nclass Annotation(StructuredNode, NodeUtils):\n\n uid = UniqueIdProperty()\n node_id = StringProperty(index=True)\n annotator_name = StringProperty()\n affiliation = StringProperty()\n email = EmailProperty(required=True)\n # creation_date = DateTimeProperty(default_now=True)\n token = StringProperty()\n is_verified = BooleanProperty(default=False)\n\n\n @property\n def serialize(self):\n return {\n 'node_properties': {\n 'node_id': self.node_id,\n 'annotator_name': self.name,\n 'annotation_creation_date': self.annotation_creation_date,\n 'annotator_email': self.annotator_email\n }\n }","repo_name":"lidija-jovanovska/ml-algorithms-annotator","sub_path":"web-annotator/controller/api/models/annotation.py","file_name":"annotation.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40497852057","text":"'''\nGiven an array of n integers nums, a 132 pattern is a subsequence of three integers nums[i], nums[j] and nums[k] such that i < j < k and nums[i] < nums[k] < nums[j].\n\nReturn true if there is a 132 pattern in nums, otherwise, return false.\n\n \n\nExample 1:\n\nInput: nums = [1,2,3,4]\nOutput: false\nExplanation: There is no 132 pattern in the sequence.\n\nExample 2:\n\nInput: nums = [3,1,4,2]\nOutput: true\nExplanation: There is a 132 pattern in the sequence: [1, 4, 2].\n\nExample 3:\n\nInput: nums = [-1,3,2,0]\nOutput: true\nExplanation: There are three 132 patterns in the sequence: [-1, 3, 2], [-1, 3, 0] and [-1, 2, 0].\n\n \n\nConstraints:\n\n n == nums.length\n 1 <= n <= 2 * 105\n -109 <= nums[i] <= 109\n\n'''\n\nclass Solution:\n def find132pattern(self, nums: List[int]) -> bool:\n stack = [] # pair : (num, minLeft)\n currMin = nums[0]\n\n for n in nums[1:] :\n while stack and stack[-1][0] <= n : # monotonically decreasing\n stack.pop()\n\n # two cases : \n # 1. stack is available then check if n > minLeft : then True (132) is occured\n # 2. stack is empty so --> append the (n,currMin) , update currMin = min(currMin,n) \n\n # case 1\n if stack and n > stack[-1][1] :\n return True\n\n # case 2\n stack.append((n,currMin)) \n currMin = min(currMin, n)\n\n return False\n\n# failed attempt \n # stack = []\n\n # for n in nums :\n # while stack and stack[-1] < n :\n # if len(stack) >= 2 :\n # return True \n # else :\n # stack.pop() \n # stack.append(n)\n \n # return False","repo_name":"lonebots/python-programming","sub_path":"leet-code/stack/456-132-pattern.py","file_name":"456-132-pattern.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73189913793","text":"import unittest\nfrom py_CLI_menus.object_return_menu import ObjectReturnMenu \n\nclass TestObjectReturnMenu(unittest.TestCase):\n\n def setUp(self):\n\n self.list_of_options_1 = [\n \"First Option - choose this\",\n \"Second Option\",\n \"Third Option\",\n \"Fourth Option\"\n ]\n\n self.list_of_options_2 = [\n \"First Option\",\n \"Second Option - choose this\",\n \"Third Option\",\n \"Fourth Option\"\n\n ]\n self.list_of_options_3 = [\n \"First Option\",\n \"Second Option\",\n \"Third Option\",\n \"Fourth Option - choose this one\"\n ]\n self.list_of_options_4 = [\n \"CHOOSE Q\",\n \"CHOOSE Q\",\n \"CHOOSE Q\",\n \"CHOOSE Q\"\n ]\n self.menu_header = \"Select one of the following options: \"\n self.menu_header_2 = \"Choose Q this time: \"\n\n def test_choose_first_option(self):\n\n string_menu = ObjectReturnMenu(self.list_of_options_1, self.menu_header)\n self.assertEqual(\"First Option - choose this\", string_menu.choose())\n\n def test_choose_middle_option(self):\n\n string_menu = ObjectReturnMenu(self.list_of_options_2, self.menu_header)\n self.assertEqual(\"Second Option - choose this\", string_menu.choose())\n\n def test_choose_last_option(self):\n\n string_menu = ObjectReturnMenu(self.list_of_options_3, self.menu_header)\n self.assertEqual(\"Fourth Option - choose this one\", string_menu.choose())\n\n def test_choose_q(self):\n string_menu = ObjectReturnMenu(self.list_of_options_4, self.menu_header_2)\n self.assertEqual(None, string_menu.choose())\n\n def test_append_option(self):\n string_menu = ObjectReturnMenu(self.list_of_options_1, self.menu_header)\n self.list_of_options_1.append(\"New fifth option\")\n self.assertEqual(5, len(string_menu.object_list))\n\n def test_confirm_loop(self):\n string_menu = ObjectReturnMenu(self.list_of_options_1, \"Check y, n and other inputs to confirm work as expected: \")\n self.assertEqual(\"First Option - choose this\", string_menu.choose(True))\n","repo_name":"IXIXIXIXIXIXIXIXIX/pyMenus","sub_path":"tests/object_return_menu_test.py","file_name":"object_return_menu_test.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41482316672","text":"from nptorch.tensor import Tensor, inf\n\n\ndef clip_grad_value_(parameters, clip_value):\n \"\"\"\n 基于数值的梯度截断\n \"\"\"\n if isinstance(parameters, Tensor):\n parameters = [parameters]\n for p in filter(lambda x: x.grad is not None, parameters):\n p.grad.clamp_(min=-clip_value, max=clip_value)\n\n\ndef clip_grad_norm_(parameters, max_norm, norm_type=2.):\n \"\"\"\n 基于模长的梯度截断\n \"\"\"\n if isinstance(parameters, Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda x: x.grad is not None, parameters))\n if norm_type == inf:\n norm = max(p.grad.abs().max() for p in parameters)\n else:\n norm = sum(p.grad.norm(p=norm_type).item() ** norm_type for p in parameters)\n norm = norm ** (1. / norm_type)\n clip_coef = max_norm / (norm + 1e-12)\n if clip_coef < 1:\n for p in parameters:\n p.grad *= clip_coef\n","repo_name":"windshadow233/autograd-with-numpy","sub_path":"nptorch/nn/utils/clip_grad.py","file_name":"clip_grad.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"15801120834","text":"import sys\nimport cv2\nimport PID\nfrom random import randint\nimport matplotlib.pyplot as plt\nplt.ion()\nimport numpy as np\nimport bluetooth as bt\npid = PID.PID(0.2,10,0) # P I D\npid.SetPoint = 0\npid.setSampleTime(0.1)\nforwardspeed = '%03d' % 220\n################### Connection #############################\n\nsearch = False\nif search == True:\n print('Searching for devices...')\n print(\"\")\n nearby_devices = bt.discover_devices()\n #Run through all the devices found and list their name\n num = 0\n \n for i in nearby_devices:\n\t num+=1\n\t print(num , \": \" , bt.lookup_name( i ))\n print('Select your device by entering its coresponding number...')\n selection = input(\"> \") - 1\n print('You have selected - '+bt.lookup_name(nearby_devices[selection]))\n\n bd_addr = nearby_devices[selection]\nelse:\n bd_addr = '98:D3:51:FD:81:AC' # T-Bot-Demo\n print('connecting...')\nerror = 1\nport = 1\nwhile error:\n try:\n sock = bt.BluetoothSocket( bt.RFCOMM )\n sock.connect((bd_addr,1))\n sock.settimeout(5)\n error = 0\n print('connected to '+bd_addr)\n except:\n print('Trying again...')\n sock.close()\n error = 1\n \n\n############### Functions #######################\n\ndef turn(v0,v1,vto):\n ang = -(np.arctan2(vto[0]-v1[0],vto[1]-v1[1])-np.arctan2(v1[0]-v0[0],v1[1]-v0[1]))*180/np.pi\n return (np.mod(ang+180.0,360.0)-180.0)\n\n\ndef distance(vto,v1):\n return np.linalg.norm([vto[0]-v1[0],vto[1]-v1[1]])\n \n\ndef send(sendstr):\n try:\n builtstr = chr(0X02)+sendstr+chr(0X03)\n sock.send(builtstr.encode(encoding='utf-8'))\n except:\n sock.close()\n sys.exit()\n\n######### Get image and set coordinates ##################\n\n\n\ntracker = cv2.TrackerCSRT_create()\ncap = cv2.VideoCapture(0)\n#cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)\n#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)\n#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 405)\n\n#cap.set(0,1280)\nsuccess, frame = cap.read()\ncap.release()\n\n########### Create mask for coordinates #################\nmask = np.ones(frame.shape)[:,:,0]\nprint(mask.shape)\nmaskdx, maskdy = 2,2\naa = [[int(frame.shape[1]/2), int(frame.shape[0]/2)]]\n#aa = map(list,np.array(aa).astype(int))\nprint(aa)\nfor ii in range(len(aa)):\n mask[np.meshgrid(np.r_[aa[ii][1]-maskdx:aa[ii][1]+maskdx],np.r_[aa[ii][0]-maskdy:aa[ii][0]+maskdy])]=0\n\n\n################ Setup some counters ####################\nii = 0\niii = 0\npathindex = 0\nv0 = [0,0]\nv1 = [0,0]\n\n\n# Create a video capture object\ncap = cv2.VideoCapture(0)\n#cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)\n#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 720)\n#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 405)\n#cap.set(0,1280)\n\n############ Start main loop #############################\n\nif __name__ == '__main__':\n\n # Read first frame to select ROI\n success, frame = cap.read()\n # quit if unable to read the from camera\n if not success:\n print('Failed to capture video')\n sys.exit(1)\n\n ## Select boxes\n bboxes = []\n colors = [] \n\n # OpenCV's selectROI function doesn't work for selecting multiple objects in Python\n # So we will call this function in a loop till we are done selecting all objects\n\n while True:\n # draw bounding boxes over objects\n # selectROI's default behaviour is to draw box starting from the center\n # when fromCenter is set to false, you can draw box starting from top left corner\n bbox = cv2.selectROI('MultiTracker', frame)\n bboxes.append(bbox)\n #colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))\n colors.append((0,0,0))\n print(\"Press q to quit selecting boxes and start tracking\")\n print(\"Press any other key to select next object\")\n k = cv2.waitKey(0) & 0xFF\n if (k == 113): # q is pressed\n break\n \n print('Selected bounding boxes {}'.format(bboxes))\n\n ## Initialize MultiTracker\n multiTracker = cv2.MultiTracker_create()\n\n for bbox in bboxes:\n multiTracker.add(tracker, frame, bbox)\n\n\n # Process video stream and track objects\n while cap.isOpened():\n success, frame = cap.read()\n if not success:\n break\n \n # get updated location of objects in subsequent frames\n success, boxes = multiTracker.update(frame)\n\n # draw tracked objects\n for i, newbox in enumerate(boxes):\n p1 = (int(newbox[0]), int(newbox[1]))\n p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))\n cv2.rectangle(frame, p1, p2, colors[i], 2, 1)\n\n #extract coordinates from tracker\n if ii == 0:\n v0 = [(p1[0]+p2[0])/2.0,(p1[1]+p2[1])/2.0]\n ii = 1\n else:\n v1 = [(p1[0]+p2[0])/2.0,(p1[1]+p2[1])/2.0]\n ii = 0\n\n\n # show frame\n frame[:,:,1]=frame[:,:,1]*mask\n cv2.imshow('MultiTracker', frame)\n\n #extract coordinates from tracker \n\n\n vto = aa[pathindex]\n _distance = distance(vto,v1)\n if _distance < 30:\n pathindex += 1\n if pathindex == len(aa):\n send('200200Z')\n print('Done')\n break\n \n angle = turn(v0,v1,vto)\n pid.update(angle)\n rotspeed = pid.output+200\n rspeedfactor = 30\n if rotspeed >=200 + rspeedfactor:\n rotspeed = 200 + rspeedfactor\n elif rotspeed <=200 - rspeedfactor:\n rotspeed = 200 - rspeedfactor\n \n forwardspeed = 220+(_distance/np.max(frame.shape))*50\n if forwardspeed > 300:\n forwardspeed = 300\n \n if distance(v0,v1) < 1:\n rotspeed = 200\n forwardspeed = 225\n\n rotspeed = '%03d' % rotspeed\n forwardspeed = '%03d' % forwardspeed\n send(rotspeed+forwardspeed+'Z')\n \n\n # quit on ESC button\n if cv2.waitKey(1) & 0xFF == 27: # Esc pressed\n send('200200Z')\n \n break\n\n","repo_name":"garethnisbet/T-BOTS","sub_path":"Python/Development/T-Bot_Tracking/TBot_Follow.py","file_name":"TBot_Follow.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"37108691537","text":"\"\"\"\nThe Most advanced Pentti there is! He has the map and will do a Breadth First Search\nto find the shorthest route before moving towards the finish!\n\"\"\"\nfrom copy import deepcopy\nfrom collections import Counter\nfrom queue import Queue\nfrom typing import List, Tuple\n\nfrom character.trackerpentti import TrackerPentti\nfrom character.usablepentti import UsablePentti\nfrom map.map import Map\nfrom util.constants import VISITED\n\nclass BFSPentti(UsablePentti):\n \"\"\"\n Pentti has a map of the maze and will find the optimal route before starting\n his journey!\n\n Due to the multidirectional nature of BFS, this class deviates from the single-direction\n Pentti -implementations as it needs to track multiple possible routes at the same time.\n \"\"\"\n\n def __init__(self, map: Map) -> None:\n self._map: Map = map\n self._history: List[Map] = []\n\n def escape_maze(self, limit: int = 10000):\n self._bfs_escape()\n print(self._map)\n print(f\"Pentti escaped in {self._count_path_length()} steps! Limit was {limit}\")\n print(f\"Search checked {len(self._history)} positions!\")\n\n\n def _bfs_escape(self) -> None:\n \"\"\"\n BFS Search with a queue. A new Pentti is sent to each direction, keeping\n track of his own movements, and marking the positions he visited into the\n common map.\n \"\"\"\n queue: Queue = Queue()\n queue.put(TrackerPentti(deepcopy(self._map)))\n\n while queue:\n pentti: TrackerPentti = queue.get()\n\n if self._is_visited(pentti.position()):\n # Skip if queued move was completed by earlier queue object\n continue\n\n if pentti._win_condition():\n return self._update_win(pentti)\n\n self._document_movements(pentti)\n\n for adjacent in [pentti.left, pentti.right, pentti.up, pentti.down]:\n if self._is_unvisited_and_available(adjacent):\n pentti._map.start_position = adjacent\n queue.put(TrackerPentti(deepcopy(pentti._map)))\n\n def _is_visited(self, coords: Tuple[int, int]):\n return self._map[coords] == VISITED\n\n def _mark_visited(self, coords: Tuple[int, int]):\n self._map[coords] = VISITED\n\n def _is_unvisited_and_available(self, coords: Tuple[int, int]):\n return self._map.is_available_position(*coords) and not self._is_visited(coords)\n\n def _update_win(self, pentti: TrackerPentti):\n pentti._mark_winning_position()\n self._map = pentti._map\n\n def _document_movements(self, pentti: TrackerPentti):\n self._history.append(deepcopy(self._map))\n self._mark_visited(pentti.position())\n pentti._mark_visited()\n\n def _count_path_length(self) -> int:\n return Counter(str(self._map))[\"+\"] + 1","repo_name":"Mnsk44/pentti-the-escape-artist","sub_path":"escapeartist/character/bfspentti.py","file_name":"bfspentti.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4879459908","text":"from distutils.command.upload import upload\nfrom django.db import models\n\nclass Author(models.Model):\n name = models.CharField(max_length=100, blank=False)\n email = models.EmailField(blank=False, primary_key=True)\n\n\nclass Review(models.Model):\n content = models.TextField()\n issued = models.DateTimeField()\n \n author = models.ForeignKey('Author', on_delete=models.CASCADE)\n\nclass Recipe(models.Model):\n title = models.CharField(max_length=100)\n products = models.TextField()\n cooking = models.TextField()\n time = models.DateTimeField()\n image = models.ImageField(upload_to='uploads')\n\n the_author = models.ForeignKey('Author', on_delete=models.CASCADE)\n\n\nclass Comments(models.Model):\n content = models.TextField()\n issued = models.DateTimeField()\n\n recipe = models.ForeignKey('Recipe', related_name='comments_recipe', on_delete=models.CASCADE)\n author = models.ForeignKey('Author', on_delete=models.CASCADE)\n\n\n","repo_name":"KarolinaMalcheuskaya/SimpleRecipes","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10221390960","text":"from math import log2, sqrt\nimport torch\nfrom torch import nn, einsum\nimport torch.nn.functional as F\n\nfrom einops import rearrange\nfrom axial_positional_embedding import AxialPositionalEmbedding\nfrom dalle_pytorch.transformer import Transformer\n\n# helpers\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef always(val):\n def inner(*args, **kwargs):\n return val\n return inner\n\ndef is_empty(t):\n return t.nelement() == 0\n\ndef masked_mean(t, mask, dim = 1):\n t = t.masked_fill(~mask[:, :, None], 0.)\n return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\n# sampling helpers\n\ndef top_k(logits, thres = 0.5):\n num_logits = logits.shape[-1]\n k = max(int((1 - thres) * num_logits), 1)\n val, ind = torch.topk(logits, k)\n probs = torch.full_like(logits, float('-inf'))\n probs.scatter_(1, ind, val)\n return probs\n\n# discrete vae class\n\nclass ResBlock(nn.Module):\n def __init__(self, chan):\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(chan, chan, 3, padding = 1),\n nn.ReLU(),\n nn.Conv2d(chan, chan, 3, padding = 1),\n nn.ReLU(),\n nn.Conv2d(chan, chan, 1)\n )\n\n def forward(self, x):\n return self.net(x) + x\n\nclass DiscreteVAE(nn.Module):\n def __init__(\n self,\n image_size = 256,\n num_tokens = 512,\n codebook_dim = 512,\n num_layers = 3,\n num_resnet_blocks = 0,\n hidden_dim = 64,\n channels = 3,\n temperature = 0.9\n ):\n super().__init__()\n assert log2(image_size).is_integer(), 'image size must be a power of 2'\n assert num_layers >= 1, 'number of layers must be greater than or equal to 1'\n has_resblocks = num_resnet_blocks > 0\n\n self.image_size = image_size\n self.num_tokens = num_tokens\n self.num_layers = num_layers\n self.temperature = temperature\n self.codebook = nn.Embedding(num_tokens, codebook_dim)\n \n hdim = hidden_dim\n\n enc_chans = [hidden_dim] * num_layers\n dec_chans = list(reversed(enc_chans))\n enc_chans = [channels, *enc_chans]\n dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]\n dec_chans = [dec_init_chan, *dec_chans]\n enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))\n enc_layers = []\n dec_layers = []\n\n for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):\n enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))\n dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))\n\n for _ in range(num_resnet_blocks):\n dec_layers.insert(0, ResBlock(dec_chans[1]))\n enc_layers.append(ResBlock(enc_chans[-1]))\n\n if num_resnet_blocks > 0:\n dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))\n\n enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))\n dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))\n\n self.encoder = nn.Sequential(*enc_layers)\n self.decoder = nn.Sequential(*dec_layers)\n\n @torch.no_grad()\n def get_codebook_indices(self, images):\n logits = self.forward(images, return_logits = True)\n codebook_indices = logits.argmax(dim = 1).flatten(1)\n return codebook_indices\n\n def decode(\n self,\n img_seq\n ):\n image_embeds = self.codebook(img_seq)\n b, n, d = image_embeds.shape\n h = w = int(sqrt(n))\n\n image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)\n images = self.decoder(image_embeds)\n return images\n\n def forward(\n self,\n img,\n return_recon_loss = False,\n return_logits = False\n ):\n logits = self.encoder(img)\n\n if return_logits:\n return logits # return logits for getting hard image indices for DALL-E training\n\n soft_one_hot = F.gumbel_softmax(logits, tau = self.temperature, dim = 1)\n sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)\n out = self.decoder(sampled)\n\n if not return_recon_loss:\n return out\n\n loss = F.mse_loss(img, out)\n return loss\n\n# main classes\n\nclass CLIP(nn.Module):\n def __init__(\n self,\n *,\n dim_text = 512,\n dim_image = 512,\n dim_latent = 512,\n num_text_tokens = 10000,\n text_enc_depth = 6,\n text_seq_len = 256,\n text_heads = 8,\n num_visual_tokens = 512,\n visual_enc_depth = 6,\n visual_heads = 8,\n visual_image_size = 256,\n visual_patch_size = 32,\n channels = 3\n ):\n super().__init__()\n self.text_emb = nn.Embedding(num_text_tokens, dim_text)\n self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)\n self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads)\n self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)\n\n assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'\n num_patches = (visual_image_size // visual_patch_size) ** 2\n patch_dim = channels * visual_patch_size ** 2\n\n self.visual_patch_size = visual_patch_size\n self.to_visual_embedding = nn.Linear(patch_dim, dim_image)\n self.visual_pos_emb = nn.Embedding(num_patches, dim_image)\n self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads)\n self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)\n\n self.temperature = nn.Parameter(torch.tensor(1.))\n\n def forward(\n self,\n text,\n image,\n text_mask = None,\n return_loss = False\n ):\n b, device, p = text.shape[0], text.device, self.visual_patch_size\n\n text_emb = self.text_emb(text)\n text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))\n\n image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)\n image_emb = self.to_visual_embedding(image_patches)\n image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))\n\n enc_text = self.text_transformer(text_emb, mask = text_mask)\n enc_image = self.visual_transformer(image_emb)\n\n if exists(text_mask):\n text_latents = masked_mean(enc_text, text_mask, dim = 1)\n else:\n text_latents = enc_text.mean(dim = 1)\n\n image_latents = enc_image.mean(dim = 1)\n\n text_latents = self.to_text_latent(text_latents)\n image_latents = self.to_visual_latent(image_latents)\n\n text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))\n\n temp = self.temperature.exp()\n\n if not return_loss:\n sim = einsum('n d, n d -> n', text_latents, image_latents) * temp\n return sim\n\n sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp\n labels = torch.arange(b, device = device)\n loss = F.cross_entropy(sim, labels)\n return loss\n\n# main DALL-E class\n\nclass DALLE(nn.Module):\n def __init__(\n self,\n *,\n dim,\n vae,\n num_text_tokens = 10000,\n text_seq_len = 256,\n depth,\n heads = 8,\n dim_head = 64,\n reversible = False,\n attn_dropout = 0.,\n ff_dropout = 0,\n sparse_attn = False\n ):\n super().__init__()\n assert isinstance(vae, DiscreteVAE), 'vae must be an instance of DiscreteVAE'\n\n image_size = vae.image_size\n num_image_tokens = vae.num_tokens\n image_seq_len = (vae.image_size // (2 ** vae.num_layers)) ** 2\n\n self.text_emb = nn.Embedding(num_text_tokens, dim)\n self.image_emb = nn.Embedding(num_image_tokens, dim)\n\n self.text_pos_emb = nn.Embedding(text_seq_len, dim)\n self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_size, image_size))\n\n self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss\n self.num_image_tokens = num_image_tokens\n\n self.text_seq_len = text_seq_len\n self.image_seq_len = image_seq_len\n\n seq_len = text_seq_len + image_seq_len\n total_tokens = num_text_tokens + num_image_tokens + 1 # extra for EOS\n self.total_tokens = total_tokens\n \n self.vae = vae\n if exists(self.vae):\n self.vae = vae\n self.image_emb = vae.codebook\n\n self.transformer = Transformer(\n dim = dim,\n causal = True,\n seq_len = seq_len,\n depth = depth,\n heads = heads,\n dim_head = dim_head,\n reversible = reversible,\n attn_dropout = attn_dropout,\n ff_dropout = ff_dropout,\n sparse_attn = sparse_attn\n )\n\n self.to_logits = nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, self.total_tokens),\n )\n\n seq_range = torch.arange(seq_len)\n logits_range = torch.arange(total_tokens)\n\n seq_range = rearrange(seq_range, 'n -> () n ()')\n logits_range = rearrange(logits_range, 'd -> () () d')\n\n logits_mask = (\n ((seq_range >= (text_seq_len - 1)) & (logits_range < num_text_tokens)) |\n ((seq_range < (text_seq_len - 1)) & (logits_range >= num_text_tokens)) |\n ((seq_range != (seq_len - 1)) & (logits_range >= (total_tokens - 1)))\n )\n\n self.register_buffer('logits_mask', logits_mask)\n\n @torch.no_grad()\n @eval_decorator\n def generate_images(\n self,\n text,\n *,\n clip = None,\n mask = None,\n filter_thres = 0.5,\n temperature = 1.\n ):\n vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens\n total_len = text_seq_len + image_seq_len\n\n out = text\n for cur_len in range(text.shape[1], total_len):\n is_image = cur_len >= text_seq_len\n\n text, image = out[:, :text_seq_len], out[:, text_seq_len:]\n\n logits = self(text, image, mask = mask)[:, -1, :]\n\n filtered_logits = top_k(logits, thres = filter_thres)\n probs = F.softmax(filtered_logits / temperature, dim = -1)\n sample = torch.multinomial(probs, 1)\n\n sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens\n out = torch.cat((out, sample), dim=-1)\n\n if out.shape[1] <= text_seq_len:\n mask = F.pad(mask, (0, 1), value = True)\n\n text_seq = out[:, :text_seq_len]\n\n img_seq = out[:, -image_seq_len:]\n images = vae.decode(img_seq)\n\n if exists(clip):\n scores = clip(text_seq, images, return_loss = False)\n return images, scores\n\n return images\n\n def forward(\n self,\n text,\n image = None,\n mask = None,\n return_loss = False\n ):\n device = text.device\n eos_token_id = self.total_tokens - 1\n\n tokens = self.text_emb(text)\n tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))\n\n seq_len = tokens.shape[1]\n\n if exists(image) and not is_empty(image):\n is_raw_image = len(image.shape) == 4\n if is_raw_image:\n image = self.vae.get_codebook_indices(image)\n\n image_len = image.shape[1]\n image_emb = self.image_emb(image)\n image_emb += self.image_pos_emb(image_emb)\n\n tokens = torch.cat((tokens, image_emb), dim = 1)\n\n seq_len += image_len\n if exists(mask):\n mask = F.pad(mask, (0, image_emb.shape[1]), value = True)\n\n out = self.transformer(tokens, mask = mask)\n logits = self.to_logits(out)\n\n # mask logits to make sure text predicts text (except last token), and image predicts image\n mask = self.logits_mask[:, :seq_len]\n max_neg_value = -torch.finfo(logits.dtype).max\n logits.masked_fill_(mask, max_neg_value)\n\n if not return_loss:\n return logits\n\n assert exists(image), 'when training, image must be supplied'\n\n offsetted_image = image + self.num_text_tokens\n labels = torch.cat((text, offsetted_image), dim = 1)\n labels = F.pad(labels, (0, 1), value = eos_token_id) # last token predicts EOS\n loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels[:, 1:])\n return loss\n","repo_name":"rabienrose/pixiv_scraper","sub_path":"DALLE-pytorch-main/dalle_pytorch/dalle_pytorch.py","file_name":"dalle_pytorch.py","file_ext":"py","file_size_in_byte":13276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24869097387","text":"\"\"\"\nThis module of SYMORO package provides symbolic\nmodeling of robots' dynamics.\n\nThe core symbolic library is sympy.\nNeeded modules : symoro.py, geometry.py, kinematics.py\n\nECN - ARIA1 2013\n\"\"\"\nfrom copy import copy, deepcopy\nimport sympy\nfrom sympy import Matrix\nfrom symoro import Symoro, Init, hat, ZERO\nfrom geometry import compute_screw_transform\nfrom geometry import compute_rot_trans, Transform\nfrom kinematics import compute_vel_acc\nfrom kinematics import compute_omega\n\nchars = 'ABCDEFGHJKLMNPQRSTUVWXYZ'\ninert_names = ('XXR', 'XYR', 'XZR', 'YYR', 'YZR',\n 'ZZR', 'MXR', 'MYR', 'MZR', 'MR')\n\n\ndef Newton_Euler(robo, symo):\n \"\"\"Internal function. Computes Inverse Dynamic Model using\n Newton-Euler formulation\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n symo : Symoro\n Instance of symbolic manager\n \"\"\"\n # init external forces\n Fex = copy(robo.Fex)\n Nex = copy(robo.Nex)\n # init transformation\n antRj, antPj = compute_rot_trans(robo, symo)\n # init velocities and accelerations\n w, wdot, vdot, U = compute_vel_acc(robo, symo, antRj, antPj)\n # init forces vectors\n F = Init.init_vec(robo)\n N = Init.init_vec(robo)\n Fjnt = Init.init_vec(robo)\n Njnt = Init.init_vec(robo)\n for j in xrange(1, robo.NL):\n compute_wrench(robo, symo, j, w, wdot, U, vdot, F, N)\n for j in reversed(xrange(1, robo.NL)):\n compute_joint_wrench(robo, symo, j, antRj, antPj, vdot,\n Fjnt, Njnt, F, N, Fex, Nex)\n for j in xrange(1, robo.NL):\n compute_torque(robo, symo, j, Fjnt, Njnt)\n\n\ndef dynamic_identification_NE(robo):\n \"\"\"Computes Dynamic Identification Model using\n Newton-Euler formulation\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n\n Returns\n =======\n symo.sydi : dictionary\n Dictionary with the information of all the sybstitution\n \"\"\"\n\n # init forces vectors\n Fjnt = Init.init_vec(robo)\n Njnt = Init.init_vec(robo)\n # init file output, writing the robot description\n symo = Symoro()\n symo.file_open(robo, 'dim')\n title = \"Dynamic identification model using Newton - Euler Algorith\"\n symo.write_params_table(robo, title, inert=True, dynam=True)\n # init transformation\n antRj, antPj = compute_rot_trans(robo, symo)\n # init velocities and accelerations\n w, wdot, vdot, U = compute_vel_acc(robo, symo, antRj, antPj)\n # virtual robot with only one non-zero parameter at once\n robo_tmp = deepcopy(robo)\n robo_tmp.IA = sympy.zeros(robo.NL, 1)\n robo_tmp.FV = sympy.zeros(robo.NL, 1)\n robo_tmp.FS = sympy.zeros(robo.NL, 1)\n for k in xrange(1, robo.NL):\n param_vec = robo.get_inert_param(k)\n F = Init.init_vec(robo)\n N = Init.init_vec(robo)\n for i in xrange(10):\n if param_vec[i] == ZERO:\n continue\n # change link names according to current non-zero parameter\n robo_tmp.num = [str(l) + str(param_vec[i])\n for l in xrange(k + 1)]\n # set the parameter to 1\n mask = sympy.zeros(10, 1)\n mask[i] = 1\n robo_tmp.put_inert_param(mask, k)\n # compute the total forcec of the link k\n compute_wrench(robo_tmp, symo, k, w, wdot, U, vdot, F, N)\n # init external forces\n Fex = copy(robo.Fex)\n Nex = copy(robo.Nex)\n for j in reversed(xrange(k + 1)):\n compute_joint_wrench(robo_tmp, symo, j, antRj, antPj,\n vdot, Fjnt, Njnt, F, N, Fex, Nex)\n for j in xrange(k + 1):\n compute_torque(robo_tmp, symo, j, Fjnt, Njnt, 'DG')\n # reset all the parameters to zero\n robo_tmp.put_inert_param(sympy.zeros(10, 1), k)\n # compute model for the joint parameters\n compute_joint_torque_deriv(symo, robo.IA[k],\n robo.qddot[k], k)\n compute_joint_torque_deriv(symo, robo.FS[k],\n sympy.sign(robo.qdot[k]), k)\n compute_joint_torque_deriv(symo, robo.FV[k],\n robo.qdot[k], k)\n # closing the output file\n symo.file_close()\n return symo\n\n\ndef direct_dynamic_NE(robo):\n \"\"\"Computes Direct Dynamic Model using\n Newton-Euler formulation\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n\n Returns\n =======\n symo.sydi : dictionary\n Dictionary with the information of all the sybstitution\n \"\"\"\n wi = Init.init_vec(robo)\n # antecedent angular velocity, projected into jth frame\n w = Init.init_w(robo)\n jaj = Init.init_vec(robo, 6)\n jTant = Init.init_mat(robo, 6) # Twist transform list of Matrices 6x6\n beta_star = Init.init_vec(robo, 6)\n grandJ = Init.init_mat(robo, 6)\n link_acc = Init.init_vec(robo, 6)\n H_inv = Init.init_scalar(robo)\n juj = Init.init_vec(robo, 6) # Jj*aj / Hj\n Tau = Init.init_scalar(robo)\n grandVp = Init.init_vec(robo, 6)\n grandVp.append(Matrix([robo.vdot0 - robo.G, robo.w0]))\n symo = Symoro()\n symo.file_open(robo, 'ddm')\n title = 'Direct dynamic model using Newton - Euler Algorith'\n symo.write_params_table(robo, title, inert=True, dynam=True)\n\n # init transformation\n antRj, antPj = compute_rot_trans(robo, symo)\n for j in xrange(1, robo.NL):\n compute_omega(robo, symo, j, antRj, w, wi)\n compute_screw_transform(robo, symo, j, antRj, antPj, jTant)\n if robo.sigma[j] == 0:\n jaj[j] = Matrix([0, 0, 0, 0, 0, 1])\n elif robo.sigma[j] == 1:\n jaj[j] = Matrix([0, 0, 1, 0, 0, 0])\n for j in xrange(1, robo.NL):\n compute_beta(robo, symo, j, w, beta_star)\n compute_link_acc(robo, symo, j, antRj, antPj, link_acc, w, wi)\n grandJ[j] = inertia_spatial(robo.J[j], robo.MS[j], robo.M[j])\n for j in reversed(xrange(1, robo.NL)):\n replace_beta_J_star(robo, symo, j, grandJ, beta_star)\n compute_Tau(robo, symo, j, grandJ, beta_star, jaj, juj, H_inv, Tau)\n if robo.ant[j] != - 1:\n compute_beta_J_star(robo, symo, j, grandJ, jaj, juj, Tau,\n beta_star, jTant, link_acc)\n for j in xrange(1, robo.NL):\n compute_acceleration(robo, symo, j, jTant, grandVp,\n juj, H_inv, jaj, Tau, link_acc)\n for j in xrange(1, robo.NL):\n compute_coupled_forces(robo, symo, j, grandVp, grandJ, beta_star)\n symo.file_close()\n return symo\n\n\ndef inertia_matrix(robo):\n \"\"\"Computes Inertia Matrix using composed link\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n\n Returns\n =======\n symo.sydi : dictionary\n Dictionary with the information of all the sybstitution\n \"\"\"\n Jplus, MSplus, Mplus = Init.init_Jplus(robo)\n AJE1 = Init.init_vec(robo)\n f = Init.init_vec(robo, ext=1)\n n = Init.init_vec(robo, ext=1)\n A = sympy.zeros(robo.NL, robo.NL)\n symo = Symoro()\n symo.file_open(robo, 'inm')\n title = 'Inertia Matrix using composite links'\n symo.write_params_table(robo, title, inert=True, dynam=True)\n # init transformation\n antRj, antPj = compute_rot_trans(robo, symo)\n for j in reversed(xrange(-1, robo.NL)):\n replace_Jplus(robo, symo, j, Jplus, MSplus, Mplus)\n if j != - 1:\n compute_Jplus(robo, symo, j, antRj, antPj,\n Jplus, MSplus, Mplus, AJE1)\n for j in xrange(1, robo.NL):\n compute_A_diagonal(robo, symo, j, Jplus, MSplus, Mplus, f, n, A)\n ka = j\n while ka != - 1:\n k = ka\n ka = robo.ant[ka]\n compute_A_triangle(robo, symo, j, k, ka,\n antRj, antPj, f, n, A, AJE1)\n symo.mat_replace(A, 'A', forced=True, symmet=True)\n J_base = inertia_spatial(Jplus[-1], MSplus[-1], Mplus[-1])\n symo.mat_replace(J_base, 'JP', 0, forced=True, symmet=True)\n symo.file_close()\n return symo\n\n\ndef inverse_dynamic_NE(robo):\n \"\"\"Computes Inverse Dynamic Model using\n Newton-Euler formulation\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n\n Returns\n =======\n symo.sydi : dictionary\n Dictionary with the information of all the sybstitution\n \"\"\"\n symo = Symoro()\n symo.file_open(robo, 'idm')\n title = 'Inverse dynamic model using Newton - Euler Algorith'\n symo.write_params_table(robo, title, inert=True, dynam=True)\n Newton_Euler(robo, symo)\n symo.file_close()\n return symo\n\n\ndef pseudo_force_NE(robo):\n \"\"\"Computes Coriolis, Centrifugal, Gravity, Friction and external\n torques using Newton-Euler formulation\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n\n Returns\n =======\n symo.sydi : dictionary\n Dictionary with the information of all the sybstitution\n \"\"\"\n robo_pseudo = deepcopy(robo)\n robo_pseudo.qddot = sympy.zeros(robo_pseudo.NL, 1)\n symo = Symoro()\n symo.file_open(robo, 'ccg')\n title = 'Pseudo forces using Newton - Euler Algorith'\n symo.write_params_table(robo, title, inert=True, dynam=True)\n Newton_Euler(robo_pseudo, symo)\n symo.file_close()\n return symo\n\n\ndef compute_wrench(robo, symo, j, w, wdot, U, vdot, F, N):\n \"\"\"Internal function. Computes total wrench (torques and forces)\n of the link j\n\n Notes\n =====\n F, N are the output parameters\n \"\"\"\n F[j] = robo.M[j]*vdot[j] + U[j]*robo.MS[j]\n symo.mat_replace(F[j], 'F', j)\n Psi = symo.mat_replace(robo.J[j]*w[j], 'PSI', j)\n N[j] = robo.J[j]*wdot[j] + hat(w[j])*Psi\n symo.mat_replace(N[j], 'No', j)\n\n\ndef compute_joint_wrench(robo, symo, j, antRj, antPj, vdot,\n Fjnt, Njnt, F, N, Fex, Nex):\n \"\"\"Internal function. Computes wrench (torques and forces)\n of the joint j\n\n Notes\n =====\n Fjnt, Njnt, Fex, Nex are the output parameters\n \"\"\"\n Fjnt[j] = symo.mat_replace(F[j] + Fex[j], 'E', j)\n Njnt[j] = N[j] + Nex[j] + hat(robo.MS[j])*vdot[j]\n symo.mat_replace(Njnt[j], 'N', j)\n f_ant = symo.mat_replace(antRj[j]*Fjnt[j], 'FDI', j)\n if robo.ant[j] != - 1:\n Fex[robo.ant[j]] += f_ant\n Nex[robo.ant[j]] += antRj[j]*Njnt[j] + hat(antPj[j])*f_ant\n\n\ndef compute_torque(robo, symo, j, Fjnt, Njnt, name='GAM'):\n \"\"\"Internal function. Computes actuation torques - projection of\n joint wrench on the joint axis\n \"\"\"\n if robo.sigma[j] != 2:\n tau = (robo.sigma[j]*Fjnt[j] + (1 - robo.sigma[j])*Njnt[j])\n tau_total = tau[2] + robo.fric_s(j) + robo.fric_v(j) + robo.tau_ia(j)\n symo.replace(tau_total, name, j, forced=True)\n\n\ndef inertia_spatial(J, MS, M):\n return Matrix([(M*sympy.eye(3)).row_join(hat(MS).T), hat(MS).row_join(J)])\n\n\ndef compute_joint_torque_deriv(symo, param, arg, index):\n \"\"\"Internal function. Computes joint reactive torques\n in case if the parameter is 1\n\n Parameters\n ==========\n symo : Symoro\n symbol manager\n param : var\n Dynamic parameter\n arg : var\n The real torque is equal to arg*param\n index : strig\n identifies the parameter in the sybstituted symbol's name\n \"\"\"\n if param != ZERO and arg != ZERO:\n index = str(index) + str(param)\n symo.replace(arg, 'DG', index, forced=True)\n\n\ndef compute_beta(robo, symo, j, w, beta_star):\n \"\"\"Internal function. Computes link's wrench when\n the joint accelerations are zero\n\n Notes\n =====\n beta_star is the output parameter\n \"\"\"\n E1 = symo.mat_replace(robo.J[j]*w[j], 'JW', j)\n E2 = symo.mat_replace(hat(w[j])*E1, 'KW', j)\n E3 = hat(w[j])*robo.MS[j]\n E4 = symo.mat_replace(hat(w[j])*E3, 'SW', j)\n E5 = -robo.Nex[j] - E2\n E6 = -robo.Fex[j] - E4\n beta_star[j] = Matrix([E6, E5])\n\n\ndef compute_link_acc(robo, symo, j, antRj, antPj, link_acc, w, wi):\n \"\"\"Internal function. Computes link's accelerations when\n the joint accelerations are zero\n\n Notes\n =====\n link_acc is the output parameter\n \"\"\"\n E1 = symo.mat_replace(hat(wi[j])*Matrix([0, 0, robo.qdot[j]]),\n 'WQ', j)\n E2 = (1 - robo.sigma[j])*E1\n E3 = 2*robo.sigma[j]*E1\n E4 = hat(w[robo.ant[j]])*antPj[j]\n E5 = hat(w[robo.ant[j]])*E4\n E6 = antRj[j].T*E5\n E7 = symo.mat_replace(E6 + E3, 'LW', j)\n link_acc[j] = Matrix([E7, E2])\n\n\ndef replace_beta_J_star(robo, symo, j, grandJ, beta_star):\n \"\"\"Internal function. Makes symbol substitution in beta_star\n and grandJ\n \"\"\"\n grandJ[j] = symo.mat_replace(grandJ[j], 'MJE', j, symmet=True)\n beta_star[j] = symo.mat_replace(beta_star[j], 'VBE', j)\n\n\ndef compute_Tau(robo, symo, j, grandJ, beta_star, jaj, juj, H_inv, Tau):\n \"\"\"Internal function. Computes intermediat dynamic variables\n\n Notes\n =====\n H_inv and Tau are the output parameters\n \"\"\"\n Jstar_jaj = grandJ[j]*jaj[j]\n if robo.sigma[j] == 2:\n Tau[j] = 0\n else:\n H_inv[j] = 1 / (jaj[j].dot(Jstar_jaj) + robo.IA[j])\n H_inv[j] = symo.replace(H_inv[j], 'JD', j)\n juj[j] = Jstar_jaj*H_inv[j]\n symo.mat_replace(juj[j], 'JU', j)\n joint_friction = robo.fric_s(j) + robo.fric_v(j)\n Tau[j] = jaj[j].dot(beta_star[j]) + robo.GAM[j] - joint_friction\n Tau[j] = symo.replace(Tau[j], 'GW', j)\n\n\ndef compute_beta_J_star(robo, symo, j, grandJ, jaj, juj, Tau,\n beta_star, jTant, link_acc):\n \"\"\"Internal function. Computes intermediat dynamic variables\n\n Notes\n =====\n grandJ and beta_star are the output parameters\n \"\"\"\n Jstar_jaj = grandJ[j]*jaj[j]\n grandK = symo.mat_replace(grandJ[j] - juj[j]*Jstar_jaj.T,\n 'GK', j)\n E1 = symo.mat_replace(grandK*link_acc[j], 'NG', j)\n E3 = symo.mat_replace(E1 + Tau[j]*juj[j], 'VS', j)\n alpha = symo.mat_replace(E3 - beta_star[j], 'AP', j)\n E4 = symo.mat_replace(jTant[j].T*grandK, 'GX', j)\n E5 = symo.mat_replace(E4*jTant[j], 'TKT', j, symmet=True)\n grandJ[robo.ant[j]] += E5\n beta_star[robo.ant[j]] -= jTant[j].T*alpha\n\n\ndef compute_acceleration(robo, symo, j, jTant, grandVp,\n juj, H_inv, jaj, Tau, link_acc):\n \"\"\"Internal function. Computes joint accelerations and links' twists\n\n Notes\n =====\n grandVp is the output parameter\n \"\"\"\n grandR = symo.mat_replace(jTant[j]*grandVp[robo.ant[j]] + link_acc[j],\n 'VR', j)\n E1 = symo.replace(juj[j].dot(grandR), 'GU', j)\n if robo.sigma[j] == 2:\n qddot = 0\n else:\n qddot = H_inv[j]*Tau[j] - E1\n qddot = symo.replace(qddot, \"QDP\", j, forced=True)\n grandVp[j] = (grandR + qddot*jaj[j])\n grandVp[j][3:, 0] = symo.mat_replace(grandVp[j][3:, 0], 'WP', j)\n grandVp[j][:3, 0] = symo.mat_replace(grandVp[j][:3, 0], 'VP', j)\n\n\ndef compute_coupled_forces(robo, symo, j, grandVp, grandJ, beta_star):\n \"\"\"Internal function.\n \"\"\"\n E3 = symo.mat_replace(grandJ[j]*grandVp[j], 'DY', j)\n couplforce = E3 - beta_star[j]\n symo.mat_replace(couplforce[3:, 0], 'N', j)\n symo.mat_replace(couplforce[:3, 0], 'E', j)\n\n\ndef replace_Jplus(robo, symo, j, Jplus, MSplus, Mplus):\n \"\"\"Internal function. Makes symbol substitutions inertia parameters\n \"\"\"\n symo.mat_replace(Jplus[j], 'JP', j)\n symo.mat_replace(MSplus[j], 'MSP', j)\n Mplus[j] = symo.replace(Mplus[j], 'MP', j)\n\n\ndef compute_Jplus(robo, symo, j, antRj, antPj, Jplus, MSplus, Mplus, AJE1):\n \"\"\"Internal function. Computes inertia parameters of composed link\n\n Notes\n =====\n Jplus, MSplus, Mplus are the output parameters\n \"\"\"\n hat_antPj = hat(antPj[j])\n antMSj = symo.mat_replace(antRj[j]*MSplus[j], 'AS', j)\n E1 = symo.mat_replace(antRj[j]*Jplus[j], 'AJ', j)\n AJE1[j] = E1[:, 2]\n E2 = symo.mat_replace(E1*antRj[j].T, 'AJA', j)\n E3 = symo.mat_replace(hat_antPj*hat(antMSj), 'PAS', j)\n Jplus[robo.ant[j]] += E2 - (E3 + E3.T) + hat_antPj*hat_antPj.T*Mplus[j]\n MSplus[robo.ant[j]] += antMSj + antPj[j]*Mplus[j]\n Mplus[robo.ant[j]] += Mplus[j]\n\n\ndef compute_A_diagonal(robo, symo, j, Jplus, MSplus, Mplus, f, n, A):\n \"\"\"Internal function. Computes diagonal elements\n of the inertia matrix\n\n Notes\n =====\n f, n, A are the output parameters\n \"\"\"\n if robo.sigma[j] == 0:\n f[j] = Matrix([-MSplus[j][1], MSplus[j][0], 0])\n n[j] = Jplus[j][:, 2]\n A[j, j] = Jplus[j][2, 2] + robo.IA[j]\n elif robo.sigma[j] == 1:\n f[j] = Matrix([0, 0, Mplus[j]])\n n[j] = Matrix([MSplus[j][1], - MSplus[j][0], 0])\n A[j, j] = Mplus[j] + robo.IA[j]\n symo.mat_replace(f[j], 'E' + chars[j], j)\n symo.mat_replace(n[j], 'N' + chars[j], j)\n\n\ndef compute_A_triangle(robo, symo, j, k, ka, antRj, antPj, f, n, A, AJE1):\n \"\"\"Internal function. Computes elements below and above diagonal\n of the inertia matrix\n\n Notes\n =====\n f, n, A are the output parameters\n \"\"\"\n f[ka] = antRj[k]*f[k]\n if k == j and robo.sigma[j] == 0:\n n[ka] = AJE1[k] + hat(antPj[k])*f[k]\n else:\n n[ka] = antRj[k]*n[k] + hat(antPj[k])*f[k]\n if ka == - 1:\n symo.mat_replace(f[ka], 'AV0')\n symo.mat_replace(n[ka], 'AW0')\n else:\n symo.mat_replace(f[ka], 'E' + chars[j], ka)\n symo.mat_replace(n[ka], 'N' + chars[j], ka)\n if robo.sigma[ka] == 0:\n A[j, ka] = n[ka][2]\n elif robo.sigma[ka] == 1:\n A[j, ka] = f[ka][2]\n A[ka, j] = A[j, ka]\n\n\n# TODO:Finish base parameters computation\ndef base_paremeters(robo_orig):\n \"\"\"Computes grouped inertia parameters. New parametrization\n contains less parameters but generates the same dynamic model\n\n Parameters\n ==========\n robo : Robot\n Instance of robot description container\n\n Returns\n =======\n symo.sydi : dictionary\n Dictionary with the information of all the sybstitution\n \"\"\"\n robo = copy(robo_orig)\n lam = [0 for i in xrange(robo.NL)]\n symo = Symoro()\n symo.file_open(robo, 'regp')\n title = 'Base parameters computation'\n symo.write_params_table(robo, title, inert=True, dynam=True)\n # init transformation\n antRj, antPj = compute_rot_trans(robo, symo)\n for j in reversed(xrange(1, robo.NL)):\n if robo.sigma[j] == 0:\n # general grouping\n compute_lambda(robo, symo, j, antRj, antPj, lam)\n group_param_rot(robo, symo, j, lam)\n # special grouping\n group_param_rot_spec(robo, symo, j, lam, antRj)\n pass\n elif robo.sigma[j] == 1:\n # general grouping\n group_param_prism(robo, symo, j, antRj)\n # special grouping\n group_param_prism_spec(robo, symo, j, antRj, antPj)\n pass\n elif robo.sigma[j] == 2:\n # fixed joint, group everuthing\n compute_lambda(robo, symo, j, antRj, antPj)\n group_param_fix(robo, symo, j, lam)\n pass\n symo.write_line('*=*')\n symo.write_line()\n title = robo.name + ' grouped inertia parameters'\n symo.write_params_table(robo, title, inert=True, equations=False)\n symo.file_close()\n return robo, symo.sydi\n\n\ndef vec_mut_J(v, u):\n \"\"\"Internal function. Needed for inertia parameters transformation\n\n Parameters\n ==========\n v, u : Matrix 3x1\n two axis vectors\n Returns : Matrix 6x1\n \"\"\"\n return Matrix([v[0]*u[0], v[0]*u[1], v[0]*u[2],\n v[1]*u[1], v[1]*u[2], v[2]*u[2]])\n\n\ndef vec_mut_MS(v, P):\n \"\"\"Internal function. Needed for inertia parameters transformation\n\n Parameters\n ==========\n v : Matrix 3x1\n axis vector\n P : Matrix 3x1\n position vector\n\n Returns : Matrix 6x1\n \"\"\"\n U = - hat(v)*hat(P)\n return Matrix([2*U[0, 0], U[0, 1] + U[1, 0], U[0, 2] + U[2, 0],\n 2*U[1, 1], U[1, 2] + U[2, 1], 2*U[2, 2]])\n\n\ndef vec_mut_M(P):\n \"\"\"Internal function. Needed for inertia parameters transformation\n\n Parameters\n ==========\n P : Matrix 3x1\n position vector\n\n Returns : Matrix 6x1\n \"\"\"\n U = -hat(P)*hat(P)\n return Matrix([U[0, 0], U[0, 1], U[0, 2], U[1, 1], U[1, 2], U[2, 2]])\n\n\ndef compute_lambda(robo, symo, j, antRj, antPj, lam):\n \"\"\"Internal function. Computes the inertia parameters\n transformation matrix\n\n Notes\n =====\n lam is the output paramete\n \"\"\"\n lamJJ_list = []\n lamJMS_list = []\n for e1 in xrange(3):\n for e2 in xrange(e1, 3):\n u = vec_mut_J(antRj[j][:, e1], antRj[j][:, e2])\n if e1 != e2:\n u += vec_mut_J(antRj[j][:, e2], antRj[j][:, e1])\n lamJJ_list.append(u.T)\n for e1 in xrange(3):\n v = vec_mut_MS(antRj[j][:, e1], antPj[j])\n lamJMS_list.append(v.T)\n lamJJ = Matrix(lamJJ_list).T # , 'LamJ', j)\n lamJMS = symo.mat_replace(Matrix(lamJMS_list).T, 'LamMS', j)\n lamJM = symo.mat_replace(vec_mut_M(antPj[j]), 'LamM', j)\n lamJ = lamJJ.row_join(lamJMS).row_join(lamJM)\n lamMS = sympy.zeros(3, 6).row_join(antRj[j]).row_join(antPj[j])\n lamM = sympy.zeros(1, 10)\n lamM[9] = 1\n lam[j] = Matrix([lamJ, lamMS, lamM])\n\n\ndef group_param_rot(robo, symo, j, lam):\n \"\"\"Internal function. Groups inertia parameters according to the\n general rule for a rotational joint.\n\n Notes\n =====\n robo is the output paramete\n \"\"\"\n Kj = robo.get_inert_param(j)\n\n lam03 = lam[j][:, 0] + lam[j][:, 3]\n lam03 = lam03.applyfunc(symo.C2S2_simp)\n for i in (3, 8, 9):\n Kj[i] = symo.replace(Kj[i], inert_names[i], j)\n if robo.ant[j] != -1:\n Kant = robo.get_inert_param(robo.ant[j])\n Kant += lam03*Kj[3] + lam[j][:, 8]*Kj[8] + lam[j][:, 9]*Kj[9]\n robo.put_inert_param(Kant, robo.ant[j])\n Kj[0] -= Kj[3] # XX\n Kj[3] = 0 # YY\n Kj[8] = 0 # MZ\n Kj[9] = 0 # M\n robo.put_inert_param(Kj, j)\n\n\ndef group_param_rot_spec(robo, symo, j, lam, antRj):\n \"\"\"Internal function. Groups inertia parameters according to the\n special rule for a rotational joint.\n\n Notes\n =====\n robo is the output paramete\n \"\"\"\n chainj = robo.chain(j)\n r1, r2, orthog = Transform.find_r12(robo, chainj, antRj, j)\n kRj, all_paral = Transform.kRj(robo, antRj, r1, chainj)\n Kj = robo.get_inert_param(j)\n to_replace = {0, 1, 2, 4, 5, 6, 7}\n if Transform.z_paral(kRj):\n Kj[0] = 0 # XX\n Kj[1] = 0 # XY\n Kj[2] = 0 # XZ\n Kj[4] = 0 # YZ\n to_replace -= {0, 1, 2, 4}\n joint_axis = antRj[chainj[-1]].col(2)\n if all_paral and robo.G.norm() == sympy.Abs(joint_axis.dot(robo.G)):\n Kj[6] = 0 # MX\n Kj[7] = 0 # MY\n to_replace -= {6, 7}\n if j == r1 or(j == r2 and orthog):\n Kj[5] += robo.IA[j] # ZZ\n robo.IA[j] = 0\n for i in to_replace:\n Kj[i] = symo.replace(Kj[i], inert_names[i], j)\n robo.put_inert_param(Kj, j)\n\n\ndef group_param_fix(robo, symo, j, lam):\n \"\"\"Internal function. Groups inertia parameters according to the\n general rule for a fixed joint joint.\n\n Notes\n =====\n robo is the output paramete\n \"\"\"\n Kj = robo.get_inert_param(j)\n for i in xrange(10):\n Kj[i] = symo.replace(Kj[i], inert_names[i], j)\n if robo.ant[j] != -1:\n Kant = robo.get_inert_param(robo.ant[j])\n Kant += lam[j]*Kj\n robo.put_inert_param(Kant, robo.ant[j])\n robo.put_inert_param(sympy.zeros(10, 1), j)\n\n\ndef group_param_prism(robo, symo, j, antRj):\n \"\"\"Internal function. Groups inertia parameters according to the\n general rule for a prismatic joint.\n\n Notes\n =====\n robo is the output paramete\n \"\"\"\n Kj = robo.get_inert_param(j)\n for i in xrange(6):\n Kj[i] = symo.replace(Kj[i], inert_names[i], j)\n robo.put_inert_param(Kj, j)\n if robo.ant[j] != -1:\n antJj = antRj[j]*robo.J[j]*antRj[j].T\n robo.J[robo.ant[j]] += antJj\n robo.J[j] = sympy.zeros(3, 3)\n\n\ndef group_param_prism_spec(robo, symo, j, antRj, antPj):\n \"\"\"Internal function. Groups inertia parameters according to the\n special rule for a prismatic joint.\n\n Notes\n =====\n robo is the output paramete\n \"\"\"\n chainj = robo.chain(j)\n r1, r2, orthog = Transform.find_r12(robo, chainj, antRj, j)\n Kj = robo.get_inert_param(j)\n kRj, all_paral = Transform.kRj(robo, antRj, r1, chainj)\n to_replace = {6, 7, 8, 9}\n if r1 < j and j < r2:\n if Transform.z_paral(kRj):\n Kj[8] = 0 # MZ\n for i in (6, 7):\n Kj[i] = symo.replace(Kj[i], inert_names[i], j)\n robo.MS[robo.ant[j]] += antRj[j]*Matrix([Kj[6], Kj[7], 0])\n robo.JJ[2, 2] -= Kj[6]*antPj[j][0] + Kj[7]*antPj[j][1]\n Kj[6] = 0 # MX\n Kj[7] = 0 # MY\n to_replace -= {6, 7, 8}\n else:\n jar1 = kRj.row(2)\n if jar1[2] != 0:\n Kj[6] -= jar1[0]/jar1[2]*Kj[8]\n Kj[7] -= jar1[1]/jar1[2]*Kj[8]\n Kj[8] = 0 # MZ\n to_replace -= {8}\n elif jar1[0]*jar1[1] != 0:\n Kj[6] -= jar1[0]/jar1[1]*Kj[7]\n Kj[7] = 0 # MY\n to_replace -= {7}\n elif jar1[0] != 0:\n Kj[7] = 0 # MY\n to_replace -= {7}\n else:\n Kj[6] = 0 # MX\n to_replace -= {6}\n elif j < r1:\n Kj[6] = 0 # MX\n Kj[7] = 0 # MY\n Kj[8] = 0 # MZ\n to_replace -= {6, 7, 8}\n #TOD: rewrite\n dotGa = Transform.sna(antRj[j])[2].dot(robo.G)\n if dotGa == ZERO:\n revol_align = robo.ant[robo.ant[j]] == 0 and robo.ant[j] == ZERO\n if robo.ant[j] == 0 or revol_align:\n Kj[9] += robo.IA[j]\n robo.IA[j] = 0\n for i in to_replace:\n Kj[i] = symo.replace(Kj[i], inert_names[i], j)\n robo.put_inert_param(Kj, j)\n","repo_name":"BKhomutenko/SYMORO_python","sub_path":"pysymoro/core/dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":25995,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"28123653064","text":"import re\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\nimport numpy as np\nregr= linear_model.LinearRegression()\n\n\nx=[[164],[179],[162],[170]]\ny=[53,63,55,59]\nregr.fit(x,y)\n\ncoef=regr.coef_\nintercept=regr.intercept_\nprint(\"학습을 통하여 구해진 선형 회귀 직선의 방정식은\\n\")\nprint(\"y =\",coef,\"* X +\",intercept)\n\nprint(\"적합도\",regr.score(x,y))\ninput_data=[[166,1],[166,0]]\nprint(\"추정몸무게 :\",regr.predict(input_data))","repo_name":"YouHyeonJu/Machine_learning","sub_path":"2021_05_20/ml2.py","file_name":"ml2.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25188381425","text":"import os\nfrom time import time\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.conf import settings\nfrom django.http import FileResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status, viewsets\nfrom rest_framework.parsers import FileUploadParser\nimport requests \nfrom bs4 import BeautifulSoup\nimport json\n\nfrom google.cloud import videointelligence\nfrom google.cloud.videointelligence import enums\nfrom google.protobuf.json_format import MessageToDict\n\nfrom wordfreq import zipf_frequency\n\nimport moviepy.editor as editor\n\nclass UploadView(APIView):\n parser_classes = [FileUploadParser]\n\n def post(self, request, filename):\n video = request.data['file']\n \n video_key = str(int(time())) + '_' + video.__str__()\n path = default_storage.save(video_key, ContentFile(video.read()))\n\n return Response(video_key, status=status.HTTP_201_CREATED)\n\n# def start_aws_analysis(self, video_key):\n# put_url = json.loads(requests.get(\n# f'https://dlqyv3dixh.execute-api.ap-southeast-2.amazonaws.com/test/geturl?fileName={video_key}'\n# ).text)['url']\n\n# with open(os.path.join(settings.MEDIA_ROOT, video_key), 'rb') as f:\n# response = requests.put(put_url, data=f.read())\n\nclass LabelView(APIView):\n parser_classes = [FileUploadParser]\n\n def put(self, request, filename, format=None):\n file_obj = request.data['file']\n\n # self.implicit()\n labels = self.analyze_labels_file(file_obj)\n\n return Response(labels) \n\n def implicit(self):\n from google.cloud import storage\n\n # If you don't specify credentials when constructing the client, the\n # client library will look for credentials in the environment.\n storage_client = storage.Client()\n\n # Make an authenticated API request\n buckets = list(storage_client.list_buckets())\n print(buckets)\n\n def analyze_labels_file(self, file):\n \"\"\"Detect labels given a file path.\"\"\"\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.enums.Feature.LABEL_DETECTION]\n\n # read the file object\n input_content = file.read()\n\n operation = video_client.annotate_video(\n features=features, input_content=input_content\n )\n print(\"\\nProcessing video for label annotations:\")\n\n result = operation.result(timeout=90)\n print(\"\\nFinished processing.\")\n\n # Process video/segment level label annotations\n segment_labels = result.annotation_results[0].segment_label_annotations\n for i, segment_label in enumerate(segment_labels):\n print(\"Video label description: {}\".format(segment_label.entity.description))\n for category_entity in segment_label.category_entities:\n print(\n \"\\tLabel category description: {}\".format(category_entity.description)\n )\n\n for i, segment in enumerate(segment_label.segments):\n start_time = (\n segment.segment.start_time_offset.seconds\n + segment.segment.start_time_offset.nanos / 1e9\n )\n end_time = (\n segment.segment.end_time_offset.seconds\n + segment.segment.end_time_offset.nanos / 1e9\n )\n positions = \"{}s to {}s\".format(start_time, end_time)\n confidence = segment.confidence\n print(\"\\tSegment {}: {}\".format(i, positions))\n print(\"\\tConfidence: {}\".format(confidence))\n print(\"\\n\")\n\n return MessageToDict(result, preserving_proto_field_name = True)\n\nclass TranscriptView(APIView):\n parser_classes = [FileUploadParser]\n\n def put(self, request, filename, format=None):\n file_obj = request.data['file']\n transcript = self.speech_transcription(file_obj)\n\n return Response(transcript) \n\n def speech_transcription(self, file):\n \"\"\"Transcribe speech from a video stored on GCS.\"\"\"\n from google.cloud import videointelligence\n\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]\n\n config = videointelligence.types.SpeechTranscriptionConfig(\n language_code=\"en-US\", enable_automatic_punctuation=True\n )\n video_context = videointelligence.types.VideoContext(\n speech_transcription_config=config\n )\n\n # read the file object\n input_content = file.read()\n\n operation = video_client.annotate_video(\n input_content=input_content, features=features, video_context=video_context\n )\n\n print(\"\\nProcessing video for speech transcription.\")\n\n result = operation.result(timeout=600)\n\n # There is only one annotation_result since only\n # one video is processed.\n annotation_results = result.annotation_results[0]\n for speech_transcription in annotation_results.speech_transcriptions:\n\n # The number of alternatives for each transcription is limited by\n # SpeechTranscriptionConfig.max_alternatives.\n # Each alternative is a different possible transcription\n # and has its own confidence score.\n for alternative in speech_transcription.alternatives:\n print(\"Alternative level information:\")\n\n print(\"Transcript: {}\".format(alternative.transcript))\n print(\"Confidence: {}\\n\".format(alternative.confidence))\n\n print(\"Word level information:\")\n for word_info in alternative.words:\n word = word_info.word\n start_time = word_info.start_time\n end_time = word_info.end_time\n print(\n \"\\t{}s - {}s: {}\".format(\n start_time.seconds + start_time.nanos * 1e-9,\n end_time.seconds + end_time.nanos * 1e-9,\n word,\n )\n )\n\n return MessageToDict(result, preserving_proto_field_name = True)\n\nclass FreqViewSet(viewsets.GenericViewSet):\n def list(self, request):\n keyword = request.query_params.get('keyword', None)\n frequency = zipf_frequency('zipf', 'en')\n\n return Response({\"frequency\": frequency})\n\nclass EffectViewSet(viewsets.GenericViewSet):\n def list(self, request):\n keyword = request.query_params.get('keyword', None)\n sourceurl = f'https://freesound.org/search/?q={keyword}&f=duration%3A%5B0+TO+3%5D&s=score+desc&advanced=1&g=1'\n\n source = requests.get(sourceurl).text\n bs = BeautifulSoup(source, 'html.parser')\n tracks = bs.find_all(\"a\", class_=\"mp3_file\")\n\n urls = []\n for track in tracks:\n url = track['href']\n urls.append(f'https://freesound.org{url}')\n\n return Response({\"tracks\": urls})\n\nclass MusicViewSet(viewsets.GenericViewSet):\n def list(self, request):\n keyword = request.query_params.get('keyword', None)\n sourceurl = f'https://freemusicarchive.org/search?adv=1&quicksearch={keyword}&&&music-filter-remix-allowed=1'\n\n source = requests.get(sourceurl).text\n bs = BeautifulSoup(source, 'html.parser')\n tracks = bs.find_all(\"div\", class_=\"play-item\")\n\n urls = []\n for track in tracks:\n info = json.loads(track['data-track-info'])\n urls.append(info['playbackUrl'])\n\n return Response({\"tracks\": urls})\n\nclass CompiledView(APIView):\n def post(self, request, filename):\n # Parse JSON data\n if 'music' in request.data: \n music_url = request.data['music']['url']\n if 'start' in request.data['music']: music_start = request.data['music']['start']\n else: music_start = 0\n if 'volume' in request.data['music']: music_volume = request.data['music']['volume']/2\n else: music_volume = 1\n else: music_url = None\n effects = []\n if 'effects' in request.data:\n for n, e in enumerate(request.data['effects']):\n effect = {}\n effect['url'] = request.data['effects'][n]['url']\n if 'start' in request.data['effects'][n]: effect['start'] = request.data['effects'][n]['start']\n else: effect['start'] = 0\n if 'volume' in request.data['effects'][n]: effect['volume'] = request.data['effects'][n]['volume']/2\n else: effect['volume'] = 1\n effects.append(effect)\n\n # Find video file in uploaded files\n try:\n video_path = default_storage.path(filename)\n video = editor.VideoFileClip(video_path).fx(editor.afx.audio_normalize)\n except: return Response('Video file not found. Plase upload using /upload/',\n status=status.HTTP_400_BAD_REQUEST)\n\n audio_clips = [video.audio] if video.audio else []\n\n # Process music\n if music_url: \n music_file = requests.get(music_url).content\n f = open('temp_music.mp3', 'wb')\n f.write(music_file)\n music = editor.AudioFileClip('temp_music.mp3')\n if music.duration > video.duration:\n if video.duration + music_start > music.duration: \n music_start = music.duration - video.duration\n if music_start < 0: music_start = 0\n music = music.subclip(music_start, music_start + video.duration)\n music = music.volumex(music_volume)\n audio_clips.append(music)\n\n # Process effects\n def pad_effect(start, effect, duration):\n def pad_effect_frame(times, start=start, effect=effect):\n if type(times) == int: return [0,0] \n return [effect.get_frame(time-start) if start < time < start+effect.duration else [0,0]\n for time in times]\n return editor.AudioClip(pad_effect_frame, duration=duration)\n\n for e in effects:\n effect_file = requests.get(e['url']).content\n open('temp_audio.mp3', 'wb').write(effect_file)\n effect = editor.AudioFileClip('temp_audio.mp3')\n effect = effect.volumex(e['volume'])\n effect = pad_effect(e['start'], effect, video.duration)\n effect.fps = 44100\n # effect.write_audiofile('temp_audio.mp3', fps=44100)\n # effect = editor.AudioFileClip('temp_audio.mp3')\n audio_clips.append(effect)\n\n # Compose\n composed_audio = editor.CompositeAudioClip(audio_clips)\n composed_video = video.set_audio(composed_audio)\n composed_video.write_videofile('temp_video.mp4')\n\n response = FileResponse(open('temp_video.mp4', 'rb'))\n\n try: os.remove('temp_video.mp4')\n except: pass\n try: os.remove('temp_audio.mp3')\n except: pass\n try: os.remove('temp_music.mp3')\n except: pass\n\n return response\n","repo_name":"SENG4920-wiaa/server","sub_path":"backend/autovideosound/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32273537516","text":"import unittest\n\nif __name__ == \"__main__\":\n import utils\n utils.import_depends()\n\nfrom brokertest import TestBrokerCommand\n\n\nclass TestDeployDomain(TestBrokerCommand):\n\n def test_100_deploychangetest1domain(self):\n self.successtest([\"deploy\", \"--source\", \"changetest1\",\n \"--target\", \"deployable\",\n \"--comments\", \"Test comment\"])\n\n def test_110_verifydeploy(self):\n template = self.find_template(\"aquilon\", \"archetype\", \"base\",\n domain=\"deployable\")\n with open(template) as f:\n contents = f.readlines()\n self.failUnlessEqual(contents[-1], \"#Added by unittest\\n\")\n\n def test_110_verifydeploylog(self):\n kingdir = self.config.get(\"broker\", \"kingdir\")\n command = [\"log\", \"--no-color\", \"-n\", \"1\", \"deployable\"]\n (out, err) = self.gitcommand(command, cwd=kingdir)\n self.matchoutput(out, \"User:\", command)\n self.matchoutput(out, \"Request ID:\", command)\n self.matchoutput(out, \"Comments: Test comment\", command)\n\n author_name = self.config.get(\"broker\", \"user\")\n author_email = self.config.get(\"broker\", \"git_author_email\")\n self.matchoutput(out, \"Author: %s <%s>\" % (author_name, author_email),\n command)\n\n def test_120_deployfail(self):\n command = [\"deploy\", \"--source\", \"changetest1\",\n \"--target\", \"prod\"]\n (out, err) = self.failuretest(command, 4)\n self.matchoutput(err,\n \"Domain prod is under change management control. \"\n \"Please specify --justification.\",\n command)\n\n def test_120_deploybadjustification(self):\n command = [\"deploy\", \"--source\", \"changetest1\", \"--target\", \"prod\",\n \"--justification\", \"I felt like deploying changes.\"]\n out = self.badrequesttest(command)\n self.matchoutput(out, \"Failed to parse the justification\", command)\n\n def test_130_deploynosync(self):\n self.successtest([\"deploy\", \"--source\", \"changetest1\",\n \"--target\", \"prod\", \"--nosync\",\n \"--justification\", \"tcm=12345678\",\n \"--comments\", \"Test comment 2\"])\n\n def test_200_verifynosync(self):\n # The change should be in prod...\n template = self.find_template(\"aquilon\", \"archetype\", \"base\",\n domain=\"prod\")\n with open(template) as f:\n contents = f.readlines()\n self.failUnlessEqual(contents[-1], \"#Added by unittest\\n\")\n # ...but not in the ut-prod tracking domain.\n template = self.find_template(\"aquilon\", \"archetype\", \"base\",\n domain=\"ut-prod\")\n with open(template) as f:\n contents = f.readlines()\n self.failIfEqual(contents[-1], \"#Added by unittest\\n\")\n\n def test_210_verifynosynclog(self):\n kingdir = self.config.get(\"broker\", \"kingdir\")\n\n # Note: \"prod\" is a copy of the real thing so limit the amount of\n # history checked to avoid being fooled by real commits\n\n # The change must be in prod...\n command = [\"log\", \"--no-color\", \"-n\", \"1\", \"prod\"]\n (out, err) = self.gitcommand(command, cwd=kingdir)\n self.matchoutput(out, \"Justification: tcm=12345678\", command)\n self.matchoutput(out, \"Comments: Test comment 2\", command)\n\n # ... but not in ut-prod\n command = [\"log\", \"--no-color\", \"-n\", \"1\", \"ut-prod\"]\n (out, err) = self.gitcommand(command, cwd=kingdir)\n self.matchclean(out, \"tcm=12345678\", command)\n\n def test_300_addadvanced(self):\n self.successtest([\"add\", \"sandbox\", \"--sandbox\", \"advanced\",\n \"--start\", \"prod\"])\n\n def test_310_deploy_leftbehind(self):\n command = [\"deploy\", \"--source\", \"advanced\", \"--target\", \"leftbehind\"]\n out = self.badrequesttest(command)\n self.matchoutput(out,\n \"You're trying to deploy a sandbox to a domain that \"\n \"does not contain the commit where the sandbox was \"\n \"branched from.\",\n command)\n\n def test_320_update_leftbehind(self):\n command = [\"deploy\", \"--source\", \"prod\", \"--target\", \"leftbehind\"]\n self.successtest(command)\n\n def test_330_deploy_again(self):\n command = [\"deploy\", \"--source\", \"advanced\", \"--target\", \"leftbehind\"]\n self.successtest(command)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestDeployDomain)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"gombasg/aquilon","sub_path":"tests/broker/test_deploy_domain.py","file_name":"test_deploy_domain.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23682543636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 17 17:36:11 2023\n\n@author: loveaoe33\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nimport numpy as np\nimport os\nimport dlib\nimport cv2\nface_cascade=cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\nTrainfolder_path=\"C:/Zfn_Data\"\nTestfolder_path=\"C:/test_Data\"\nimage_extension=\".jpg\"\ntrain_images=[]\ntrain_labels=[]\ntest_images=[]\ntest_labels=[]\n\ndef images_Process(Images):\n Images=cv2.cvtColor(Images,cv2.COLOR_BGR2GRAY) \n \"\"\"灰度轉換\"\"\"\n ImageEqual=cv2.equalizeHist(Images) \n \"\"\"直方圖均衡化\"\"\"\n ImageFil=cv2.medianBlur(ImageEqual,5) \n \"\"\"降噪\"\"\"\n return ImageFil\ndef images_FaceCatch(Images):\n faces=face_cascade.detectMultiScale(Images,scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))\n if len(faces)>0:\n (x,y,w,h)=faces[0]\n FaceCatch=Images[y:y+h,x:x+w]\n return cv2.cvtColor(FaceCatch, cv2.COLOR_BGR2RGB)\n else:\n return cv2.cvtColor(Images, cv2.COLOR_BGR2RGB)\n\n \n\ndef load_images_from_Testfolder(Testfolder_path,current_folder=\"\"):\n global test_images\n global test_labels\n for testfilename in os.listdir(Testfolder_path):\n file_path=os.path.join(Testfolder_path,testfilename)\n print(file_path)\n if os.path.isdir(file_path):\n print(file_path)\n \"\"\"test_images.extend(load_images_from_Testfolder(file_path,current_folder=os.path.join(current_folder, testfilename)))\"\"\"\n load_images_from_Testfolder(file_path,current_folder=os.path.join(current_folder, testfilename))\n\n print(os.path.isdir(file_path))\n print(current_folder)\n elif testfilename.endswith(image_extension): \n if current_folder==\"class1\":\n image = cv2.imread(file_path)\n image=images_Process(image)\n image=images_FaceCatch(image)\n image = cv2.resize(image, (32, 32))\n image = image / 255.0\n test_images.append(image)\n test_labels.append(0)\n print(len(test_images))\n\n\n elif current_folder==\"class2\":\n image = cv2.imread(file_path)\n image=images_Process(image)\n image=images_FaceCatch(image)\n image = cv2.resize(image, (32, 32))\n image = image / 255.0\n test_images.append(image)\n test_labels.append(1)\n print(len(test_images))\n\n\n return \"null\"\n \n\ndef load_images_from_TrainFolder(Trainfolder_path):\n global train_images\n global train_labels\n for Trainfilename in os.listdir(Trainfolder_path):\n if Trainfilename.endswith(image_extension):\n image_path=os.path.join(Trainfolder_path,Trainfilename)\n image=cv2.imread(image_path)\n image=images_Process(image)\n image=images_FaceCatch(image)\n image=cv2.resize(image,(32,32))\n image=image/255.0\n train_images.append(image)\n train_labels.append(0)\n \"\"\"print('load_images_from_TrainFolder555')\n print(\"train_labels:\"+ str(train_labels))\n print(\"train_images數量:\"+ str(len(train_images)))\"\"\"\n\n return \"null\"\n\n \n\nload_images_from_TrainFolder(Trainfolder_path)\nload_images_from_Testfolder(Testfolder_path,current_folder=\"\") \n\n\nprint(len(train_images))\nprint(len(train_labels))\nprint(len(test_images))\nprint(len(test_labels)) \n \ntrain_images=np.array(train_images) \ntest_images=np.array(test_images)\ntrain_labels=np.array(train_labels) \ntest_labels=np.array(test_labels) \n\n\n\"\"\"print(len(train_images))\nprint(len(train_labels))\nprint(len(test_images))\nprint(len(test_labels))\"\"\"\n\n\n\n \nmodel=tf.keras.Sequential()\nmodel.add (layers.Conv2D(32,(2,2),activation='relu',input_shape=(32,32,3))) \n\"\"\"32,3,3為32個濾波器與3x3大小的捲基層提取特徵 64,64,3為64*64並為3顏色層\"\"\"\nmodel.add (layers.MaxPooling2D((2,2)))\nmodel.add (layers.Conv2D(64,(2,2),activation='relu'))\nmodel.add(layers.Dropout(0.3))\nmodel.add (layers.MaxPooling2D((2,2)))\nmodel.add (layers.Conv2D(64,(2,2),activation='relu'))\nmodel.add(layers.Dropout(0.3))\nmodel.add (layers.MaxPooling2D((2,2)))\nmodel.add (layers.Conv2D(64,(2,2),activation='relu'))\nmodel.add(layers.Dropout(0.3))\nmodel.add (layers.MaxPooling2D((2,2)))\n\n\n\nmodel.add (layers.Flatten()) \n\"\"\"是用於將卷積層的輸出展平(flatten)成一維向量的層。在卷積神經網絡(CNN)中,卷積層通常產生的是多維的特徵圖(feature map),每個特徵圖代表了不同的特徵。然而,在連接全連接層之前,我們需要將這些特徵圖展平成一維向量,以便進行後續的全連接層操作。\"\"\"\nmodel.add (layers.Dense(64,activation='softmax')) \n\"\"\"是用於定義具有 64 個神經元和 ReLU 激活函數的全連接層\"\"\"\nmodel.add (layers.Dense(10, activation='softmax') ) \n\"\"\"是用於定義具有 64 個神經元和 ReLU 激活函數的全連接層\"\"\"\n\n\"\"\"編譯細節設定\"\"\"\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nprint('777')\n\n\"\"\"順練前處理\"\"\"\n\"\"\"(train_images,train_labels),(test_images,test_labels)=tf.keras.dataset.cifar10.load_data()\ntrain_images,test_images=train_images/255.0,test_images/255.0\"\"\"\n\nmodel.fit(train_images,train_labels,epochs=50,validation_data=(test_images, test_labels))\ntest_lost,test_Acc=model.evaluate(test_images, test_labels, verbose=2)\nprint('\\nTest accuracy:', test_Acc)\nmodel.save('Zfn_Train.h5')","repo_name":"loveaoe33/CNNetWork","sub_path":"DataTraning.py","file_name":"DataTraning.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31297957913","text":"\n\nimport os\nimport sys\nfrom collections import defaultdict\n\nimport spell\n\n\n# Add common-lib code to system path\nsources = os.getenv('BISCUIT_DIR')\nif sources not in sys.path: sys.path.append(sources)\nfrom common_lib.common_features import utilities\n\n\n\n# Global resources\n_tf = defaultdict(lambda:0)\n_df = defaultdict(lambda:0)\n\n\n# Empirically computed stop words (high entropy)\nstop_words = set()\n#stop_words = utilities.stop_words\n\n#'''\n# Read hard-coded stop words\nwith open('/data1/nlp-data/twitter/tools/stop-words.txt','r') as f:\n for line in f.readlines():\n if line != '\\n':\n stop_words.add(line.strip('\\n'))\n#'''\n\n\ndef doc_freq(word):\n return _df[word]\n\n\n\n\ndef tokenize(tagger, text):\n if tagger:\n tagger.update(text)\n all_toks = [ tagger.tokens(t) for t in text ]\n else:\n all_toks = [ t.split() for t in text ]\n normed = all_toks\n #normed = [ utilities.normalize_phrase_TaskB(toks) for toks in all_toks ]\n #normed = [ spell.correct_spelling(toks) for toks in normed ]\n return normed\n\n\n\ndef _build_dictionary(tagger, data_path):\n\n global _tf, _df\n\n\n label_counts = defaultdict(lambda:0)\n all_data = set()\n for fname in os.listdir(data_path):\n\n fpath = os.path.join(data_path, fname)\n with open(fpath, 'r') as f:\n\n # Accumulate list of tweets (just text)\n for line in f.readlines():\n label = line.split('\\t')[2]\n text = '\\t'.join(line.split('\\t')[3:]).strip('\\n')\n text = text.decode('ascii','ignore')\n\n # Unssen tweet?\n if (label,text) not in all_data: label_counts[label] += 1\n\n all_data.add((label,text))\n\n\n # Fine grained analysis\n df_by_labels = defaultdict(lambda:defaultdict(lambda:0))\n\n # List of \"documents\" of tokens\n all_text = [ data[1] for data in all_data ] \n all_toks = tokenize(tagger,all_text)\n all_data = [ (data[0],text) for (data,text) in zip(all_data,all_toks) ]\n for label,doc in all_data:\n\n # Frequency of each word\n freqs = defaultdict(lambda:0)\n for tok in doc:\n freqs[tok.lower()] += 1\n\n # Count term frequencies and document frequencies\n for t,f in freqs.items():\n _tf[t] += f\n _df[t] += 1\n df_by_labels[t][label] += 1\n\n '''\n # Detect semantically ambiguous words\n for k,v in sorted(df_by_labels.items(),key=lambda t:sum(t[1].values())):\n if similar(v, label_counts):\n stop_words.add(k)\n #print '%-15s' % k , '\\t', display_percents(v, label_counts)\n '''\n\n '''\n # Output list of data-defined stop words\n with open('/data1/nlp-data/twitter/tools/stop-words.txt','w') as f:\n for w in stop_words:\n print >>f, w\n '''\n #exit()\n\n\ndef similar(v, labels):\n\n percents = {}\n for label in labels:\n percents[label] = (100.0 * v[label]) / labels[label]\n\n # Determine if all words occur with roughly the same percentages\n keys = { i:label for i,label in enumerate(['positive','negative','neutral'])}\n for i in range(len(keys)):\n for j in range(i):\n s1 = percents[keys[i]]\n s2 = percents[keys[j]]\n if abs(s1 - s2)/(s1+.00001) > 0.15: return False\n\n #return False\n return True\n\n\ndef display_percents(v, labels):\n\n retVal = []\n\n for label in labels:\n score = (100.0 * v[label]) / labels[label]\n retVal.append(label[:3] + ': %.5s' % str(score) )\n #retVal.append(v[label])\n\n return retVal\n\n\n\nif __name__ == '__main__':\n\n # Add common-lib code to system path\n sources = os.getenv('BISCUIT_DIR')\n if sources not in sys.path: sys.path.append(sources)\n from common_lib.read_config import enabled_modules\n from common_lib.common_features.ark_tweet import ark_tweet\n\n if enabled_modules['ark_tweet']:\n tagger = ark_tweet.ArkTweetNLP()\n else:\n tagger = None\n\n _build_dictionary(tagger, '/data1/nlp-data/twitter/data/etc')\n\n # Explicitly delete cache\n del(tagger.cache)\n\n","repo_name":"mikemeding/SemEval-2015","sub_path":"wboag-sentiment/TwitterHawk-master/TaskB/code/taskb_features/tf_idf.py","file_name":"tf_idf.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73487610113","text":"class HuffmanNode:\n def __init__(self, char, freq):\n self.char = char # stored as an integer - the ASCII character code value\n self.freq = freq # the freqency associated with the node\n self.left = None # Huffman tree (node) to the left\n self.right = None # Huffman tree (node) to the right\n\n def set_left(self, node):\n self.left = node\n\n def set_right(self, node):\n self.right = node\n\ndef comes_before(a, b):\n \"\"\"Returns True if tree rooted at node a comes before tree rooted at node b, False otherwise\"\"\"\n if a.freq == b.freq:\n if a.char < b.char:\n return True\n return a.freq < b.freq\n\ndef combine(a, b):\n \"\"\"Creates and returns a new Huffman node with children a and b, with the \"lesser node\" on the left\n The new node's frequency value will be the sum of the a and b frequencies\n The new node's char value will be the lesser of the a and b char ASCII values\"\"\"\n freq = a.freq + b.freq\n char = b.char\n if a.char < b.char:\n char = a.char\n new_node = HuffmanNode(char, freq)\n if comes_before(a, b):\n new_node.set_left(a)\n new_node.set_right(b)\n #else:\n # new_node.set_left(b)\n # new_node.set_right(a)\n #print(new_node.freq)\n return new_node\n\ndef cnt_freq(filename):\n \"\"\"Opens a text file with a given file name (passed as a string) and counts the \n frequency of occurrences of all the characters within that file\"\"\"\n new_list = [0] * 256\n #try:\n f = open(filename, \"r\")\n #except FileNotFoundError:\n #print(\"File Not Found\")\n new_string = f.read()\n if new_string == \"\":\n f.close()\n return \"No File\"\n for x in new_string:\n new_list[ord(x)] += 1\n f.close()\n return new_list\n\ndef create_huff_tree(char_freq):\n \"\"\"Create a Huffman tree for characters with non-zero frequency\n Returns the root node of the Huffman tree\"\"\"\n commpare_list = []\n node_list = freq_list_to_node_list(char_freq)\n node_sorted_list = sorted_list(list(node_list))\n while len(node_sorted_list) > 1:\n min1 = node_sorted_list[0]\n min2 = node_sorted_list[1]\n try:\n node_sorted_list[0] = combine(min1, min2)\n #node_sorted_list[0] = combine(node_sorted_list[0], node_sorted_list[1])\n node_sorted_list.remove(node_sorted_list[1])\n node_sorted_list = sorted_list(node_sorted_list)\n except IndexError:\n pass\n #node_sorted_list = [i for i in node_sorted_list if i != 0]\n return node_sorted_list[0]\n\ndef freq_list_to_node_list(freq_list):\n for x in range(len(freq_list)):\n if freq_list[x] > 0:\n freq_list[x] = HuffmanNode(x, freq_list[x])\n freq_list = [i for i in freq_list if i != 0] # removes all 0's from list - still unsorted\n return freq_list\n\n\ndef sorted_list(freq_list):\n new_list = []\n for p in range(len(freq_list)): #sorts list into asceding order of frequencies\n min = 9999999\n for m in freq_list:\n if m.freq < min:\n min = m.freq\n min_node = m\n new_list.append(min_node)\n freq_list.remove(min_node)\n for i in range(len(new_list)):\n try:\n if comes_before(new_list[i], new_list[i+1]) is False:\n temp = new_list[i]\n new_list[i] = new_list[i+1]\n new_list[i+1] = temp\n except IndexError:\n pass\n return new_list\n\ndef create_code(node):\n \"\"\"Returns an array (Python list) of Huffman codes. For each character, use the integer ASCII representation \n as the index into the arrary, with the resulting Huffman code for that character stored at that location\"\"\"\n new_list = [\"\"] * 256\n new_string = ''\n return create_code_helper(node, new_list, new_string)\n\ndef create_code_helper(node, new_list, new_string):\n if node.left is None and node.right is None:\n new_list[node.char] = new_string\n if node.left is not None:\n create_code_helper(node.left, new_list, new_string + '0')\n if node.right is not None:\n create_code_helper(node.right, new_list, new_string + '1')\n return new_list\n\ndef create_header(freqs):\n \"\"\"Input is the list of frequencies. Creates and returns a header for the output file\n Example: For the frequency list asscoaied with \"aaabbbbcc, would return “97 3 98 4 99 2” \"\"\"\n return_string = ''\n for x in range(len(freqs)):\n if freqs[x] > 0:\n return_string += f\"{x} {freqs[x]} \"\n last_string = return_string.strip()\n return last_string\n\n\ndef huffman_encode(in_file, out_file):\n \"\"\"Takes inout file name and output file name as parameters\n Uses the Huffman coding process on the text from the input file and writes encoded text to output file\n Take not of special cases - empty file and file with only one unique character\"\"\"\n frequency_list = cnt_freq(in_file)\n if frequency_list == \"No File\":\n p = open(out_file, \"w\")\n p.close()\n return\n node_list = freq_list_to_node_list(list(frequency_list))\n sorted_node_1st = sorted_list(node_list)\n new_string = huffman_encoder_helper(frequency_list, in_file)\n f = open(out_file, \"w\", newline = '')\n if len(sorted_node_1st) == 1:\n f.write(create_header(frequency_list))\n f.close()\n return\n #if create_header(frequency_list) == '':\n # f.close()\n # return\n f.write(create_header(frequency_list) + \"\\n\")\n #new_string = huffman_encoder_helper(frequency_list, in_file)\n f.write(new_string)\n f.close()\n return\n\n\ndef huffman_encoder_helper(freq_list, in_file):\n hufftree = create_huff_tree(list(freq_list))\n codes = create_code(hufftree)\n f = open(in_file, \"r\")\n new_string = f.read()\n f.close()\n code_string = ''\n for x in new_string:\n code_string += codes[ord(x)]\n return code_string\n\ndef huffman_decode(encoded_file, decode_file):\n try:\n f = open(encoded_file, \"r\")\n except FileNotFoundError:\n f.close()\n return\n header_string = f.readline()\n if header_string == \"\":\n x = open(decode_file, 'w')\n x.close()\n return\n freq_list = parse_header(header_string)\n root_node = create_huff_tree(freq_list)\n full_string = f.read()\n if full_string == \"\":\n y = open(decode_file, \"w\", newline=\"\")\n temp_string = chr(root_node.char) * root_node.freq\n y.write(temp_string)\n y.close()\n return\n f.close()\n p = open(decode_file, \"w\", newline=\"\")\n return_string = ''\n current = root_node\n for x in full_string:\n if int(x) == 0:\n current = current.left\n elif int(x) == 1:\n current = current.right\n if current.left is None and current.right is None:\n return_string += chr(current.char)\n current = root_node\n p.write(return_string)\n p.close()\n return\n\n\ndef parse_header(header_string):\n freq_list = [0] * 256\n new_list = header_string.split()\n for x in range(0, len(new_list), 2):\n freq_list[int(new_list[x])] = int(new_list[x + 1])\n return freq_list\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ljabdo/Programming","sub_path":"Python/huffmann tree/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5332716708","text":"import csv\n\ndef sort_csv(src, dest):\n input_list = []\n input_file = open(src,'r')\n # keep track of number of lines in input.csv\n count = 0\n for line in input_file.readlines():\n count = count + 1\n if count > 1:\n print(\"oh no! input.csv should only contain a single line to read\")\n input_list.clear()\n else:\n actual_line = line.rstrip('\\n')\n input_list = actual_line.split(',')\n\n input_file.close()\n\n # Sorting should ignore case\n input_list.sort(key=lambda i: i.upper(), reverse=True)\n\n with open(dest,'w') as output_file:\n wr = csv.writer(output_file, quoting=csv.QUOTE_NONE)\n # especially takes care of an empty input.csv\n result = [i for i in input_list if i]\n if not result:\n wr.writerow([])\n else:\n wr.writerow(input_list)\n\nif __name__ == \"__main__\":\n try:\n sort_csv(\"sort/input.csv\",\"sort/output.csv\")\n except Exception as e:\n print(\"Oops there was an error. Make sure you specify the right path for the input file\")\n","repo_name":"sherman205/cambia-sort","sub_path":"sort/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26197314476","text":"import h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nfrom sklearn.metrics import r2_score\nimport csv\nfrom scipy.optimize import curve_fit\n\n\ndef fit_linear(x, a, b):\n return a*x+b\n\ndef fit_parabolic(x, a, b, c):\n return a*x**2+b*x+c\n\ndef function_OLD(x, m_fit, v_fit):\n return (v_fit/2)*np.sin(2*x) + np.sqrt(m_fit**2 + np.sin(x)**2)\n\ndef function_sqrt(x, m_fit, v_fit, c_fit):\n return v_fit*x + np.sqrt((m_fit*c_fit**2)**2 + (x*c_fit)**2)\n\ndef function(x, m_fit, v_fit, c_fit):\n #return v_fit*np.sin(2*x)/2 + np.sqrt(m_fit**2*c_fit**4 + np.sin(x)**2*c_fit**2)\n return m_fit*c_fit**2 + v_fit*x + x**2/(2*m_fit) - (2*v_fit*x**3)/3 - (4*m_fit**2*c_fit**2+3)/(24*m_fit**3*c_fit**2)*x**4\n\n # Underlying is wrong\n return v_fit*x + m_fit*c_fit**2 + x**2/(m_fit*c_fit)/2 - x**4/(8*(m_fit*c_fit)**3) # BEST\n return v_fit*x + m_fit*c_fit**2*(1 + (x/mc)**2/2 - (x/mc)**4/8)#sqrt(1 + (x/(m*c))**2)\n return v_fit*x + m_fit*c_fit**2*sqrt(1 + (x/(m*c))**2)\n return v_fit*x + np.sqrt((m_fit*c_fit**2)**2 + (x*c_fit)**2)\n return abs(v_fit*x) + m_fit + c_fit*x**3 # For mass = 0?\n\ndef function_4th_order(x, a0, a1, a2, a3, a4):\n return a0 + a1*x + a2*x**2 + a3*x**3 + a4*x**4\n\ndef function_mass_0(x, v, c):\n return 0\n\n\ndef fit_function(delta_g, v, mass, plot = False):\n\n file = f\"Dispersion_Data/{data_folder}/Dispersion_m_{mass} _delta_g_{delta_g} _v_{v}\"\n #file = f\"Dispersion_Data/{data_folder}/Dispersion_closer_m_{mass} _delta_g_{delta_g} _v_{v}\"\n\n f = h5py.File(file, \"r\")\n energies = f[\"energies\"][:]\n bounds = f[\"bounds\"]\n amount_data = np.shape(energies)[1]\n\n bounds = np.pi/12\n bounds = np.pi/36\n\n energies = [np.real(e[0]) for e in energies[0,:]]\n\n\n k = np.linspace(-bounds, bounds,amount_data)\n k_refined = np.linspace(-bounds, bounds, 1000)\n\n popt, pcov = curve_fit(function, k[closer:-closer], energies[closer:-closer], (mass, v, 1))\n (m_fit, v_fit, c_fit) = popt\n exp = [function(i, m_fit, v_fit, c_fit) for i in k_refined]\n\n popt_4th_order, pcov_4th_order = curve_fit(function_4th_order, k[closer:-closer], energies[closer:-closer])\n (a0, a1, a2, a3, a4) = popt_4th_order\n exp_4th_order = [function_4th_order(i, a0, a1, a2, a3, a4) for i in k_refined]\n\n \"\"\"\n print(f\"For Delta_g = {delta_g}\")\n print(f'mass gets regularized from {mass} to {m_fit}')\n print(f'v gets regularized from {v} to {v_fit}')\n print(f'c gets regularized from {1} to {c_fit}')\n \"\"\"\n\n check1 = (-3*a3)/(2*a1)\n check2 = -(a2*(2*a0+3*a2))/(6*a0*a4)\n print(f'for this value, the checks that should be 1 are {(-3*a3)/(2*a1)} and {-(a2*(2*a0+3*a2))/(6*a0*a4)}.')\n\n if plot:\n plt.figure()\n plt.scatter(k, np.array(energies), label = 'quasiparticle')\n plt.plot(k_refined, exp, label = '4th order taylor fit')\n plt.plot(k_refined, exp_4th_order, label = '4th order polynomial fit')\n plt.xlabel('k')\n plt.ylabel('energy')\n plt.legend()\n plt.title(fr\"$m = {round(mass,rounding)} \\to {round(m_fit,rounding)}$, $c = 1 \\to {round(c_fit,rounding)}$ and $v = {round(v,rounding)} \\to {round(v_fit,rounding)}$ for $\\Delta(g) = {round(delta_g,rounding)}$\")\n #plt.show()\n string = fr'Fit_m = {mass}_v = {v}_\\Delta(g) = {delta_g}.png'\n print(string)\n plt.savefig(fr'Fit_m = {mass}_v = {v}_Delta(g) = {delta_g}.png')\n\n return (m_fit, v_fit, c_fit, check1, check2)\n\n\ncloser = 1\nrounding = 3\nschmidt_cut = 3.5\nexcluded_0 = 1 # do you want to ignore m = 0.0?\n\nmass_renorm = [[], [], []]\nv_renorm = [[], [], []]\nc_renorm = [[], [], []]\nchecks1 = [[], [], []]\nchecks2 = [[], [], []]\n\nfor (schmidt_number, schmidt_cut) in enumerate([3.5, 4.0, 4.5]):\n data_folder = f\"Data_cut_{schmidt_cut}\"\n #data_folder = \"Data closer\"\n for delta_g in [-0.15*i for i in range(1, 5)]:\n local_mass_renorm = []\n local_v_renorm = []\n local_c_renorm = []\n local_checks1 = []\n local_checks2 = []\n for mass in [0.1*i for i in range(1, 7)]:\n for v in [0.15]:\n (m_fit, v_fit, c_fit, check1, check2) = fit_function(delta_g, v, mass, plot = False)\n local_mass_renorm.append(m_fit)\n local_v_renorm.append(v_fit)\n local_c_renorm.append(c_fit)\n local_checks1.append(check1)\n local_checks2.append(check2)\n mass_renorm[schmidt_number].append(local_mass_renorm)\n v_renorm[schmidt_number].append(local_v_renorm)\n c_renorm[schmidt_number].append(local_c_renorm)\n checks1[schmidt_number].append(local_checks1)\n checks2[schmidt_number].append(local_checks2)\n\n\"\"\"\nmass_renorm = np.array(mass_renorm[0])\nv_renorm = np.array(v_renorm[0])\nc_renorm = np.array(c_renorm[0])\nchecks1 = np.array(checks1[0])\nchecks2 = np.array(checks2[0])\n\"\"\"\nmass_renorm = np.array(mass_renorm)\nv_renorm = np.array(v_renorm)\nc_renorm = np.array(c_renorm)\nchecks1 = np.array(checks1)\nchecks2 = np.array(checks2)\nprint(np.shape(mass_renorm))\nprint(np.shape(v_renorm))\nprint(np.shape(checks1))\nprint(np.shape(checks2))\n\nfor (schmidt_number, schmidt_cut) in enumerate([3.5, 4.0, 4.5]):\n for m_index in range(1, 7):\n plt.scatter([-0.15*i for i in range(1, 5)], v_renorm[schmidt_number,:,m_index-excluded_0]/(-0.15), label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n #plt.scatter([-0.15*i for i in range(1, 5)], v_renorm[:,m_index-excluded_0]/(-0.15), label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n plt.xlabel(r'$\\Delta (g)$', fontsize = 15)\n plt.ylabel(r'$v_{eff}$', fontsize = 15)\n plt.title(fr'Relative renormalization from $v = 0.15$ for schmidt-cut = {schmidt_cut}', fontsize = 15)\n plt.legend()\nplt.show()\n\nfor (schmidt_number, schmidt_cut) in enumerate([3.5, 4.0, 4.5]):\n for m_index in range(1, 7):\n plt.scatter([-0.15*i for i in range(1, 5)], mass_renorm[schmidt_number,:,m_index-excluded_0]/(-0.15), label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n #plt.scatter([-0.15*i for i in range(1, 5)], v_renorm[:,m_index-excluded_0]/(-0.15), label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n plt.xlabel(r'$\\Delta (g)$', fontsize = 15)\n plt.ylabel(r'$mass_{eff}$', fontsize = 15)\n plt.title(fr'Relative renormalization the mass for schmidt-cut = {schmidt_cut}', fontsize = 15)\n plt.legend()\nplt.show()\n\nfor (schmidt_number, schmidt_cut) in enumerate([3.5, 4.0, 4.5]):\n for m_index in range(1, 7):\n plt.scatter([-0.15*i for i in range(1, 5)], c_renorm[schmidt_number,:,m_index-excluded_0]/(-0.15), label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n #plt.scatter([-0.15*i for i in range(1, 5)], v_renorm[:,m_index-excluded_0]/(-0.15), label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n plt.xlabel(r'$\\Delta (g)$', fontsize = 15)\n plt.ylabel(r'$c_{eff}$', fontsize = 15)\n plt.title(fr'Relative renormalization of c for schmidt-cut = {schmidt_cut}', fontsize = 15)\n plt.legend()\nplt.show()\n\n\nfor (schmidt_number, schmidt_cut) in enumerate([3.5, 4.0, 4.5]):\n for m_index in range(1, 7):\n plt.scatter([-0.15*i for i in range(1, 5)], checks1[schmidt_number,:,m_index-excluded_0], label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n #plt.scatter([-0.15*i for i in range(1, 5)], checks1[:,m_index-excluded_0], label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n plt.xlabel(r'$\\Delta (g)$', fontsize = 15)\n plt.ylabel(r'$check1$', fontsize = 15)\n plt.title(fr'check1 for schmidt-cut = {schmidt_cut}', fontsize = 15)\n plt.legend()\nplt.show()\n\nfor (schmidt_number, schmidt_cut) in enumerate([3.5, 4.0, 4.5]):\n for m_index in range(1, 7):\n plt.scatter([-0.15*i for i in range(1, 5)], checks2[schmidt_number,:,m_index-excluded_0], label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n #plt.scatter([-0.15*i for i in range(1, 5)], checks2[:,m_index-excluded_0], label = f'mass = {round(m_index*0.1,2)}. schmidt = {schmidt_cut}')\n plt.xlabel(r'$\\Delta (g)$', fontsize = 15)\n plt.ylabel(r'$check2$', fontsize = 15)\n plt.title(fr'check2 for schmidt-cut = {schmidt_cut}', fontsize = 15)\n plt.legend()\nplt.show()\n\nfor m_index in range(3, 7):\n X = [-0.15*i for i in range(1, 5)]\n Y = (mass_renorm[:,m_index-excluded_0])/(m_index*0.1)\n plt.scatter(X, Y, label = f'mass = {round(m_index*0.1,2)}')\n\n popt, pcov = curve_fit(fit_parabolic, X, Y)\n (a, b, c) = popt\n X_refined = [X[0]+(X[-1]-X[0])*i/1000 for i in range(1000)]\n X_refined = [(X[-1])*i/1000 for i in range(1000)]\n linear_fit_analytical = [fit_parabolic(i, a, b, c) for i in X_refined]\n #plt.plot(X_refined, linear_fit_analytical)\n\n#plt.scatter([0], [1], s = 25, c = \"black\")\nplt.xlabel(r'$\\Delta (g)$', fontsize = 15)\nplt.ylabel(r'$mass_{eff}$', fontsize = 15)\nplt.title(fr'Relative renormalization the mass for schmidt-cut = {schmidt_cut}', fontsize = 15)\nplt.legend()\nplt.show()\n\nfor m_index in range(1, 7):\n plt.scatter([-0.15*i for i in range(1, 5)], c_renorm[:,m_index-excluded_0], label = f'mass = {round(m_index*0.1,2)}')\nplt.xlabel(r'$\\Delta (g)$', fontsize = 15)\nplt.ylabel(r'$c_{eff}$', fontsize = 15)\nplt.title(fr'Renormalization from $c = 1$ for schmidt-cut = {schmidt_cut}', fontsize = 15)\nplt.legend()\nplt.show()\n\n\n\"\"\"\nplt.scatter([-0.15*i for i in range(1, 5)], mass_renorm)\nplt.xlabel(r'$\\Delta (g)$', fontsize = 15)\nplt.ylabel(r'$m_{eff}$', fontsize = 15)\nplt.title(r'Renormalization from $m = 0.7$', fontsize = 15)\nplt.show()\n\"\"\"\n\n# v does not seem to have an influence on the renormalization of m => m = m(g). Test m(g, v_sweep) is incoming\n# v = v(g, m) or v(g)? g does have an influence","repo_name":"sanderdemeyer/SanderDM_Thesis_2324","sub_path":"Dispersion_relation_fit.py","file_name":"Dispersion_relation_fit.py","file_ext":"py","file_size_in_byte":9787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8807064349","text":"from typing import Optional\nimport numpy as np\n\nfrom qiskit.circuit import QuantumRegister\n\nfrom ..blueprintcircuit import BlueprintCircuit\n\n# pylint: disable=no-member\n\n\nclass QFT(BlueprintCircuit):\n r\"\"\"Quantum Fourier Transform Circuit.\n\n The Quantum Fourier Transform (QFT) on :math:`n` qubits is the operation\n\n .. math::\n\n |j\\rangle \\mapsto \\frac{1}{2^{n/2}} \\sum_{k=0}^{2^n - 1} e^{2\\pi ijk / 2^n} |k\\rangle\n\n The circuit that implements this transformation can be implemented using Hadamard gates\n on each qubit, a series of controlled-U1 (or Z, depending on the phase) gates and a\n layer of Swap gates. The layer of Swap gates can in principle be dropped if the QFT appears\n at the end of the circuit, since then the re-ordering can be done classically. They\n can be turned off using the ``do_swaps`` attribute.\n\n For 4 qubits, the circuit that implements this transformation is:\n\n .. jupyter-execute::\n :hide-code:\n\n from qiskit.circuit.library import QFT\n import qiskit.tools.jupyter\n circuit = QFT(4)\n %circuit_library_info circuit\n\n The inverse QFT can be obtained by calling the ``inverse`` method on this class.\n The respective circuit diagram is:\n\n .. jupyter-execute::\n :hide-code:\n\n from qiskit.circuit.library import QFT\n import qiskit.tools.jupyter\n circuit = QFT(4).inverse()\n %circuit_library_info circuit\n\n One method to reduce circuit depth is to implement the QFT approximately by ignoring\n controlled-phase rotations where the angle is beneath a threshold. This is discussed\n in more detail in https://arxiv.org/abs/quant-ph/9601018 or\n https://arxiv.org/abs/quant-ph/0403071.\n\n Here, this can be adjusted using the ``approximation_degree`` attribute: the smallest\n ``approximation_degree`` rotation angles are dropped from the QFT. For instance, a QFT\n on 5 qubits with approximation degree 2 yields (the barriers are dropped in this example):\n\n .. jupyter-execute::\n :hide-code:\n\n from qiskit.circuit.library import QFT\n import qiskit.tools.jupyter\n circuit = QFT(5, approximation_degree=2)\n %circuit_library_info circuit\n\n \"\"\"\n\n def __init__(self,\n num_qubits: Optional[int] = None,\n approximation_degree: int = 0,\n do_swaps: bool = True,\n inverse: bool = False,\n insert_barriers: bool = False,\n name: str = 'qft') -> None:\n \"\"\"Construct a new QFT circuit.\n\n Args:\n num_qubits: The number of qubits on which the QFT acts.\n approximation_degree: The degree of approximation (0 for no approximation).\n do_swaps: Whether to include the final swaps in the QFT.\n inverse: If True, the inverse Fourier transform is constructed.\n insert_barriers: If True, barriers are inserted as visualization improvement.\n name: The name of the circuit.\n \"\"\"\n super().__init__(name=name)\n self._approximation_degree = approximation_degree\n self._do_swaps = do_swaps\n self._insert_barriers = insert_barriers\n self._inverse = inverse\n self._data = None\n self.num_qubits = num_qubits\n\n @property\n def num_qubits(self) -> int:\n \"\"\"The number of qubits in the QFT circuit.\n\n Returns:\n The number of qubits in the circuit.\n\n Note:\n This method needs to be overwritten to allow adding the setter for num_qubits while\n still complying to pylint.\n \"\"\"\n return super().num_qubits\n\n @num_qubits.setter\n def num_qubits(self, num_qubits: int) -> None:\n \"\"\"Set the number of qubits.\n\n Note that this changes the registers of the circuit.\n\n Args:\n num_qubits: The new number of qubits.\n \"\"\"\n if num_qubits != self.num_qubits:\n self._invalidate()\n\n if num_qubits:\n self.qregs = [QuantumRegister(num_qubits, name='q')]\n else:\n self.qregs = []\n\n @property\n def approximation_degree(self) -> int:\n \"\"\"The approximation degree of the QFT.\n\n Returns:\n The currently set approximation degree.\n \"\"\"\n return self._approximation_degree\n\n @approximation_degree.setter\n def approximation_degree(self, approximation_degree: int) -> None:\n \"\"\"Set the approximation degree of the QFT.\n\n Args:\n approximation_degree: The new approximation degree.\n\n Raises:\n ValueError: If the approximation degree is smaller than 0.\n \"\"\"\n if approximation_degree < 0:\n raise ValueError('Approximation degree cannot be smaller than 0.')\n\n if approximation_degree != self._approximation_degree:\n self._invalidate()\n self._approximation_degree = approximation_degree\n\n @property\n def insert_barriers(self) -> bool:\n \"\"\"Whether barriers are inserted for better visualization or not.\n\n Returns:\n True, if barriers are inserted, False if not.\n \"\"\"\n return self._insert_barriers\n\n @insert_barriers.setter\n def insert_barriers(self, insert_barriers: bool) -> None:\n \"\"\"Specify whether barriers are inserted for better visualization or not.\n\n Args:\n insert_barriers: If True, barriers are inserted, if False not.\n \"\"\"\n if insert_barriers != self._insert_barriers:\n self._invalidate()\n self._insert_barriers = insert_barriers\n\n @property\n def do_swaps(self) -> bool:\n \"\"\"Whether the final swaps of the QFT are applied or not.\n\n Returns:\n True, if the final swaps are applied, False if not.\n \"\"\"\n return self._do_swaps\n\n @do_swaps.setter\n def do_swaps(self, do_swaps: bool) -> None:\n \"\"\"Specifiy whether to do the final swaps of the QFT circuit or not.\n\n Args:\n do_swaps: If True, the final swaps are applied, if False not.\n \"\"\"\n if do_swaps != self._do_swaps:\n self._invalidate()\n self._do_swaps = do_swaps\n\n def is_inverse(self) -> bool:\n \"\"\"Whether the inverse Fourier transform is implemented.\n\n Returns:\n True, if the inverse Fourier transform is implemented, False otherwise.\n \"\"\"\n return self._inverse\n\n def _invalidate(self) -> None:\n \"\"\"Invalidate the current build of the circuit.\"\"\"\n self._data = None\n\n def inverse(self) -> 'QFT':\n \"\"\"Invert this circuit.\n\n Returns:\n The inverted circuit.\n \"\"\"\n\n if self.name in ('qft', 'iqft'):\n name = 'qft' if self._inverse else 'iqft'\n else:\n name = self.name + '_dg'\n\n inverted = self.copy(name=name)\n inverted._data = []\n\n from qiskit.circuit.parametertable import ParameterTable\n inverted._parameter_table = ParameterTable()\n\n for inst, qargs, cargs in reversed(self._data):\n inverted._append(inst.inverse(), qargs, cargs)\n\n inverted._inverse = not self._inverse\n return inverted\n\n def _swap_qubits(self):\n num_qubits = self.num_qubits\n for i in range(num_qubits // 2):\n self.swap(i, num_qubits - i - 1)\n\n def _check_configuration(self, raise_on_failure: bool = True) -> bool:\n valid = True\n if self.num_qubits is None:\n valid = False\n if raise_on_failure:\n raise AttributeError('The number of qubits has not been set.')\n\n return valid\n\n def _build(self) -> None:\n \"\"\"Construct the circuit representing the desired state vector.\"\"\"\n super()._build()\n\n for j in range(self.num_qubits):\n self.h(j)\n num_entanglements = max(0, self.num_qubits - max(self.approximation_degree, j))\n for k in range(j + 1, j + num_entanglements):\n lam = np.pi / (2 ** (k - j))\n self.cp(lam, j, k)\n\n if self.insert_barriers:\n self.barrier()\n\n if self._do_swaps:\n self._swap_qubits()\n\n if self._inverse:\n self._data = super().inverse()\n","repo_name":"OscarJHernandez/qc_portfolio_optimization","sub_path":"venv/lib/python3.8/site-packages/qiskit/circuit/library/basis_change/qft.py","file_name":"qft.py","file_ext":"py","file_size_in_byte":8316,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"23510227741","text":"import sys\n\n\ndef get_divisor(n):\n\tif (n < 1):\n\t\treturn None\n\telse:\n\t\tpotential_divisor = 2\n\n\t\twhile (potential_divisor * potential_divisor < n):\n\t\t\tif (n % potential_divisor == 0):\n\t\t\t\treturn potential_divisor\n\t\t\tpotential_divisor += 1\n\n\t\treturn None\n\n\ninput = open(sys.argv[1], 'r')\n\nt = input.readline()\nlines = input.readlines()\n\nfor line_index in range(len(lines)):\n\tif (lines[line_index][-1] == '\\n'):\n\t\tlines[line_index] = lines[line_index][:-1]\n\n\t[n, j] = lines[line_index].split()\n\tn = int(n)\n\tj = int(j)\n\n\tprint('Case #' + str(line_index + 1) + ':')\n\n\tjamcoins_count = 0\n\n\tfor potential_jamcoin in range((1 << (n - 1)) + 1, 1 << n, 2):\n\n\t\tis_jamcoin = True\n\t\tpotential_jamcoin_bin = bin(potential_jamcoin)[2:]\n\t\tpotential_jamcoin_alt_base_divisors = []\n\n\t\tfor base in range(2, 11):\n\t\t\tpotential_jamcoin_alt_base = int(potential_jamcoin_bin, base)\n\n\t\t\tdivisor = get_divisor(potential_jamcoin_alt_base)\n\n\t\t\tif (not divisor):\n\t\t\t\tis_jamcoin = False\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tpotential_jamcoin_alt_base_divisors.append(divisor)\n\n\t\tif (is_jamcoin):\n\t\t\tdivisor_string = ' '.join(map(str, potential_jamcoin_alt_base_divisors))\n\n\t\t\tprint(str(potential_jamcoin_bin) + ' ' + divisor_string)\n\n\t\t\tjamcoins_count += 1\n\n\t\t\tif (jamcoins_count >= j):\n\t\t\t\tbreak\n\ninput.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/3401.py","file_name":"3401.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"730565243","text":"class SnpFilt(object):\n\n def __init__(self, args):\n self.min_SNPindex = args.min_SNPindex\n self.maxDP = args.max_depth\n self.minDP = args.min_depth\n self.strand_bias = args.strand_bias\n\n def filt_cultivar_gt(self, cultivar_GT, bulk_AD):\n record = {}\n # only use homo in cultivar\n if cultivar_GT in ['0/0', '0|0', '1/1', '1|1']:\n ADs = bulk_AD.split(',')\n # check biallele or multi-allele\n if len(ADs) == 2:\n # filter missing\n if not '.' in ADs:\n # check whether REF homo or ALT homo.\n if cultivar_GT in ['0/0', '0|0']:\n record['bulk_ref_AD'] = int(ADs[0])\n record['bulk_alt_AD'] = int(ADs[1])\n # if depth of ALT is zero in bulk,\n # it will be discarded because SNP-index will be zero.\n if record['bulk_alt_AD'] != 0:\n record['type'] = 'keep'\n record['cultivar_GT'] = '0/0'\n else:\n record['type'] = 'discard'\n else:\n record['bulk_ref_AD'] = int(ADs[1])\n record['bulk_alt_AD'] = int(ADs[0])\n # if depth of REF is zero in bulk,\n # it will be discarded because SNP-index will be zero.\n if record['bulk_ref_AD'] != 0:\n record['type'] = 'keep'\n record['cultivar_GT'] = '1/1'\n else:\n record['type'] = 'discard'\n else:\n record['type'] = 'discard'\n # check ALT homo in cultivar\n elif len(ADs) == 3:\n # filter missing\n if not '.' in ADs:\n if cultivar_GT in ['1/1', '1|1']:\n if int(ADs[0]) == 0:\n record['bulk_ref_AD'] = int(ADs[1])\n record['bulk_alt_AD'] = int(ADs[2])\n record['type'] = 'keep'\n record['cultivar_GT'] = '1/1'\n else:\n record['type'] = 'discard'\n else:\n record['type'] = 'discard'\n else:\n record['type'] = 'discard'\n else:\n record['type'] = 'discard'\n else:\n record['type'] = 'discard'\n return record\n\n def filt_depth(self, record, cultivar_AD):\n record['cultivar_depth'] = sum([int(AD) for AD in cultivar_AD.split(',')])\n record['bulk_depth'] = record['bulk_ref_AD'] + record['bulk_alt_AD']\n\n if record['cultivar_depth'] < self.minDP or record['cultivar_depth'] > self.maxDP:\n record['type'] = 'discard'\n elif record['bulk_depth'] < self.minDP or record['bulk_depth'] > self.maxDP:\n record['type'] = 'discard'\n return record\n\n def filt_index(self, record):\n record['SNPindex'] = record['bulk_alt_AD']/record['bulk_depth']\n if record['SNPindex'] < self.min_SNPindex:\n record['type'] = 'discard'\n return record\n\n def filt_strand_bias(self, record, ADFR):\n ADF = ADFR[0]\n ADR = ADFR[1]\n\n if record['cultivar_GT'] == '0/0':\n cultivar_ADF = int(ADF.split(',')[0])\n cultivar_ADR = int(ADR.split(',')[0])\n else:\n cultivar_ADF = int(ADF.split(',')[1])\n cultivar_ADR = int(ADR.split(',')[1])\n\n if cultivar_ADF == 0 and cultivar_ADR > self.strand_bias:\n record['type'] = 'discard'\n elif cultivar_ADR == 0 and cultivar_ADF > self.strand_bias:\n record['type'] = 'discard'\n return record\n\n\n def filt(self, cultivar_GT, cultivar_AD, bulk_AD, ADFR):\n record = self.filt_cultivar_gt(cultivar_GT, bulk_AD)\n if record['type'] == 'keep':\n record = self.filt_depth(record, cultivar_AD)\n if record['type'] == 'keep':\n record = self.filt_index(record)\n if record['type'] == 'keep':\n if ADFR != None:\n record = self.filt_strand_bias(record, ADFR)\n\n return record\n","repo_name":"YuSugihara/MutMap","sub_path":"mutmap/snpfilt.py","file_name":"snpfilt.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"61"} +{"seq_id":"7606498512","text":"import logging\nimport sys\nimport pathlib\nimport importlib\n\nfrom kqcircuits.klayout_view import KLayoutView\nfrom kqcircuits.util.log_router import route_log\nfrom kqcircuits.defaults import TMP_PATH\nfrom kqcircuits.util.plugin_startup import register_plugins\n\n# Script to create a KQCircuits element in KLayout by specifying the path to the module file containing the element.\n# This script can be used to integrate with external editors.\n#\n# Command line usage:\n# klayout -e -rx -rm util/create_element_from_path.py -rd element_path=kqcircuits\\chips\\demo.py\n#\n# Here, the flags specify the following:\n# -e: Run KLayout in edit mode\n# -rx: Skip running automatic startup scripts (avoids creating a second empty layout)\n# -rm: Run this script on startup\n# -rd: Inject a variable element_path into the script scope containing the path of the element module to create.\n# element_path should be relative to the kqcircuits repository, and it should be a module containing exactly\n# one KQCircuits Element or Chip.\n#\n# To use this as an external tool in Pycharm:\n# - Under Settings -> Tools -> External Tools create a new entry as follows:\n# - Program: point to klayout_app.exe (Windows) or the klayout executable (Linux)\n# - Arguments: -e -rm \"$ContentRoot$\\util\\create_element_from_path.py\" -rd\n# element_path=\"$FilePathRelativeToProjectRoot$\"\n# - To execute, right click the python file (in the Project browser on the editor tab) containing the element to create\n# and choose the tool under External Tools.\n#\n# To use this as a task in Visual Studio Code, add the following snippet to your `.vscode/tasks.json`.\n# Linux users may need to edit the \"command\" field to point to KLayout.\n# {\n# \"label\": \"Open in KLayout\",\n# \"type\": \"shell\",\n# \"command\": \"/usr/bin/klayout\",\n# \"args\": [\n# \"-e\",\n# \"-rm\",\n# \"'${workspaceFolder:KQCircuits}${pathSeparator}util${pathSeparator}create_element_from_path.py'\",\n# \"-rd\",\n# \"element_path=\\\"${relativeFile}\\\"\"\n# ],\n# \"windows\": {\n# \"command\": \"${env:APPDATA}\\\\KLayout\\\\klayout_app.exe\",\n# \"options\": {\n# \"shell\": {\n# \"executable\": \"powershell.exe\"\n# }\n# },\n# },\n# \"osx\": {\n# \"command\": \"${USER}/Applications/klayout.app\"\n# # Homebrew installs under /Applications/KLayout/klayout.app\n# # \"command\": \"${USER}/Applications/KLayout/klayout.app\"\n# },\n# \"problemMatchers\": [],\n# // the following field allows running with the default 'build' task (Ctrl+Shift+B)\n# \"group\": {\n# \"kind\": \"build\",\n# \"isDefault\": true\n# }\n# }\n# To use this as a macro command in Vim/NeoVim\n# 1. Open your Vim or NeoVim configuration file.\n#\n# Linux/macOS:\n# * Vim: The configuration file is located at ~/.vimrc.\n# * NeoVim: The configuration file is located at ~/.config/nvim/init.vim.\n# Windows:\n# * Vim: The configuration file is located at $HOME/vimfiles/vimrc or $VIM/_vimrc.\n# * NeoVim: The configuration file is located at $HOME/AppData/Local/nvim/init.vim.\n#\n# These are default paths, Usually installed in this path unless changed upon installation.\n#\n# The configuration file may not be present, in which case you need to create one at a specified location.\n# Use the following command to verify that vim loaded the expected configuration file:\n# :e $MYVIMRC\n#\n# 2. Add the following lines to your configuration file:\n# \" Define a kqc command\n# command! -nargs=0 Kqc\n# \\ execute \"!klayout -e -rm {FULL PATH TO create_element_from_path.py} -rd element_path=\" . expand(\"%:p\") |\n# \\ redraw!\n# \" Create a mapping to run the kqc command\n# nnoremap kqc :Kqc\n#\n# After copy-pasting, substitute {FULL PATH TO create_element_from_path.py} with the absolute path to the\n# util/create_element_from_path.py python script.\n# On Windows, klayout command does not work automatically, in which case it is best to substitute klayout keyword\n# with the expanded absolute path to %APPDATA%/KLayout/klayout_app.exe\n#\n# 3. Save the configuration file and restart Vim or NeoVim for the changes to take effect.\n# Now, you can use the mapping kqc (where is a customizable key, often \\ by default) while\n# editing a Python file containing the element to create.\n# Pressing the mapping will execute the :kqc command, which runs the external tool command to open the file in\n# KLayout.\n#\n# Register KQC plugins into KLayout (must happen before the layout view is created)\nregister_plugins()\n\n# Set up logging\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\nroute_log(lowest_visible_level=\"INFO\", filename=f\"{TMP_PATH}/kqc.log\")\n\n# pylint: disable=undefined-variable\nlogging.info(f\"Element path: {element_path}\")\n\n# Figure out the python import path from the specified file path\npath_without_extension = pathlib.Path(element_path).with_suffix('')\n# Remove 'KQCircuits' or similar folder from beginning\nfor idx, part in reversed(list(enumerate(path_without_extension.parts))):\n if 'Circuits' in part:\n path_without_extension = path_without_extension.relative_to(*path_without_extension.parts[:idx+1])\n\nif path_without_extension.parts[0] == \"klayout_package\" and path_without_extension.parts[1] == \"python\":\n module_path = '.'.join(path_without_extension.parts[2:])\nelse:\n module_path = '.'.join(path_without_extension.parts)\nmodule_name = path_without_extension.name\n\n# Import module\nmodule = importlib.import_module(module_path)\n\nlogging.info(f\"Loaded module {str(module)}\")\n\n# Find classes that are in this actual module (rather than imported from somewhere else)\nclasses_in_module = [m for m in vars(module).values() if\n hasattr(m, '__module__') and m.__module__ == module_path and hasattr(m, '__mro__')]\nelement_classes = [m for m in classes_in_module if 'Element' in [s.__name__ for s in m.__mro__]]\n\nlogging.info(f\"Found classes {str(element_classes)}\")\n\nif len(element_classes) == 1:\n cls = element_classes[0]\nelse:\n raise ValueError(\"Expecting exactly one class in the module to run.\")\n\nview = KLayoutView()\nview.insert_cell(cls)\nview.focus()\n","repo_name":"iqm-finland/KQCircuits","sub_path":"util/create_element_from_path.py","file_name":"create_element_from_path.py","file_ext":"py","file_size_in_byte":6310,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"} +{"seq_id":"75248052994","text":"class Solution(object):\n def fairCandySwap(self, A, B):\n \"\"\"\n :type A: List[int]\n :type B: List[int]\n :rtype: List[int]\n \"\"\"\n diff = (sum(A) - sum(B)) / 2\n BB = set(B) # set is faster than list\n for i in A:\n if i-diff in BB: # double for loop exceeds time limit\n return [i, i-diff]","repo_name":"casssie-zhang/LeetcodeNotes","sub_path":"888.FairCandySwap.py","file_name":"888.FairCandySwap.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"28418121982","text":"# ┏━━━━━━━━━━━━━━━┓\n# ┏━━━┫ hdf5_utils.py ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ ┗━━━━━━━━━━━━━━━┛ ┃\n# ┃ Methods for handling HDF5 files throught the project. ┃\n# ┃ These include: ┃\n# ┃ * Parsing the input HDF5 test set file. ┃\n# ┃ * Preprocessing the given HDF5 train set. ┃\n# ┃ * Writing new HDF5 formats to store proccessed train data. ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n# ====== Imports ====================\n# -- python --\nfrom functools import reduce\nimport sys\n# -- internal --\nfrom const import *\nfrom image_utils import *\n# -- external --\nimport h5py\n\n\ndef find_all(a_str, sub):\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1: return\n yield start\n start += len(sub)\n\nclass HDF5_Data():\n def __init__(self, fname):\n self.fname = fname\n self.db = h5py.File(fname, 'r')\n self.datasets = list(self.db['data'].keys())\n if 'word' in self.datasets: self.datasets.remove('word')\n if 'font' in self.datasets: self.datasets.remove('font')\n\n def char_images(self, char: str):\n return self.db['data'][char]['images'][:]\n\n def char_indices(self, char: str):\n return self.db['data'][char]['indices'][:]\n\n def char_dataset_size(self, char: str):\n return len(self.db['data'][char]['indices'])\n\n @property\n def word(self):\n return self.db['data']['word'][:]\n\n @property\n def font(self):\n return self.db['data']['font'][:]\n\n def close(self):\n self.db.close()\n\ndef onehot_vec(i, size=5):\n v = np.zeros((size,))\n v[i] = 1\n return v\n\n\ndef convert_augment(infile, outfile):\n \"\"\"Creates a new HDF5 file from the data in the original file, organized in a different manner and augmented.\"\"\"\n A = 4 # Number of augmented versions for each image.\n db = h5py.File(infile, 'r')\n f = h5py.File(outfile, 'w')\n data = f.create_group('data')\n words = [word.decode() for im in db['data'] for word in db['data'][im].attrs['txt']]\n fonts = [onehot_vec(FONTS.index(font)) for im in db['data'] for font in db['data'][im].attrs['font']]\n word_lengths = [len(word) for word in words]\n characters = ''.join(words)\n n_characters = len(characters)\n word = data.create_dataset('word', n_characters, dtype=np.uint64)\n _ = data.create_dataset('font', (n_characters, 5), dtype=np.uint8, data=fonts)\n\n\n char_images = np.ndarray((n_characters, A, *NET_INPUT_SHAPE), dtype=np.uint8)\n global_char_idx = 0\n for idx, im in enumerate(db['data']):\n print(f\"{im} ({idx}/{len(db['data'])})\")\n img = db['data'][im][:]\n charBB = db['data'][im].attrs['charBB']\n for i in range(charBB.shape[-1]):\n char_img = process_bounding_box(img, charBB[:, :, i])\n augmented_char_images = augment_image(char_img)\n for j, aug_img in enumerate(augmented_char_images):\n char_images[global_char_idx][j] = aug_img\n global_char_idx += 1\n\n\n # --- fill 'word' dataset ---\n i = 0; w = 0\n for l in word_lengths:\n for i in range(i, i+l):\n word[i] = w\n w += 1; i += 1\n\n # --- fill character datasets ---\n for char in ''.join(set(characters)):\n group = data.create_group(str(ord(char)))\n indices = list(find_all(characters, char))\n n = len(indices) # number of occurences of the character in the dataset\n _ = group.create_dataset(\"indices\", n, dtype=np.uint64, data=indices)\n _ = group.create_dataset(\"images\", (n, A, *NET_INPUT_SHAPE), dtype=np.uint8, data=char_images[indices])\n\n f.close()\n\n\n\ndef convert(infile, outfile, labels=True):\n \"\"\"Creates a new HDF5 file from the data in the original file, organized in a different manner.\"\"\"\n im_names = []\n db = h5py.File(infile, 'r')\n f = h5py.File(outfile, 'w')\n data = f.create_group('data')\n words = [word.decode() for im in db['data'] for word in db['data'][im].attrs['txt']]\n if labels: fonts = [onehot_vec(FONTS.index(font)) for im in db['data'] for font in db['data'][im].attrs['font']]\n word_lengths = [len(word) for word in words]\n characters = ''.join(words)\n n_characters = len(characters)\n word = data.create_dataset('word', n_characters, dtype=np.uint64)\n if labels: data.create_dataset('font', (n_characters, 5), dtype=np.uint8, data=fonts)\n\n\n char_images = np.ndarray((n_characters, *NET_INPUT_SHAPE), dtype=np.uint8)\n global_char_idx = 0\n for idx, im in enumerate(db['data']):\n print(f\"{im} ({idx}/{len(db['data'])})\")\n img = db['data'][im][:]\n charBB = db['data'][im].attrs['charBB']\n for i in range(charBB.shape[-1]):\n im_names.append(im)\n char_img = process_bounding_box(img, charBB[:, :, i])\n char_images[global_char_idx] = char_img\n global_char_idx += 1\n\n\n # --- fill 'word' dataset ---\n i = 0; w = 0\n for l in word_lengths:\n for i in range(i, i+l):\n word[i] = w\n w += 1; i += 1\n\n # --- fill character datasets ---\n for char in ''.join(set(characters)):\n group = data.create_group(str(ord(char)))\n indices = list(find_all(characters, char))\n n = len(indices) # number of occurences of the character in the dataset\n _ = group.create_dataset(\"indices\", n, dtype=np.uint64, data=indices)\n _ = group.create_dataset(\"images\", (n, *NET_INPUT_SHAPE), dtype=np.uint8, data=char_images[indices])\n\n f.close()\n return im_names\n\ndef main():\n convert_augment(\"data/imported.h5\", \"data/augmented2.h5\")\n # convert(\"data/imported.h5\", \"data/converted.h5\")\n db = h5py.File(\"data/augmented2.h5\", 'r')\n # db = h5py.File(\"data/converted.h5\", 'r')\n images = db['data'][str(ord('T'))]['images']\n for i in range(images.shape[0]):\n plt.imshow(images[i][0], cmap='gray')\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ReemKish/msc-computer-vision-project","sub_path":"src/hdf5_utils.py","file_name":"hdf5_utils.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5376908032","text":"from logging.config import dictConfig\n\nfrom flask import (Flask, abort, flash, make_response, redirect,\n render_template, request, session, url_for)\nfrom markupsafe import escape\n\ndictConfig(\n {\n \"version\": 1,\n \"formatters\": {\n \"default\": {\n \"format\": \"[%(asctime)s] %(levelname)s in %(module)s: %(message)s\",\n }\n },\n \"handlers\": {\n \"wsgi\": {\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://flask.logging.wsgi_errors_stream\",\n \"formatter\": \"default\",\n }\n },\n \"root\": {\"level\": \"WARNING\", \"handlers\": [\"wsgi\"]},\n }\n)\n\n\napp = Flask(__name__)\n\n# Set the secret key to some random bytes. Keep this really secret!\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\n\ndef valid_login(username: str, password: str) -> None:\n ...\n\n\ndef log_the_user_in(username: str) -> None:\n ...\n\n\n@app.route(\"/\")\ndef index():\n # username = request.cookies.get('username')\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/hello\")\n@app.route(\"/hello/\")\ndef hello_world(name=None):\n return render_template(\"index.html\", name=name)\n\n\n# @app.route(\"/\")\n# def hello_user(name):\n# return f\"Hello, {escape(name)}!\"\n\n\n@app.route(\"/user/\")\ndef show_user_profile(username):\n # show the user profile for that user\n # app.logger.warning(username)\n\n return f\"User {escape(username)}\"\n\n\n@app.route(\"/post/\")\ndef show_post(post_id):\n # show the post with the given id, the id is an integer\n return f\"Post {post_id}\"\n\n\n@app.route(\"/path/\")\ndef show_subpath(subpath):\n # show the subpath after /path/\n return f\"Subpath {escape(subpath)}\"\n\n\n@app.route(\"/projects/\")\ndef projects():\n return \"The project page\"\n\n\n@app.route(\"/about\")\ndef about():\n return \"The about page\"\n\n\n# =========\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n # abort(401)\n error = None\n if request.method == \"POST\":\n if valid_login(request.form[\"username\"], request.form[\"password\"]):\n return log_the_user_in(request.form[\"username\"])\n else:\n flash(\"Invalid password provided\", \"error\")\n error = \"Invalid username/password\"\n\n search_word = request.args.get(\"key\", \"\")\n # the code below is executed if the request method\n # was GET or the credentials were invalid\n\n return render_template(\"login.html\", error=error)\n\n\n# ========= alternative way\n# @app.get(\"/login\")\n# def login_get():\n# return show_the_login_form()\n\n\n# @app.post(\"/login\")\n# def login_post():\n# return do_the_login()\n\n\n# =========\n\n\n@app.route(\"/user/\")\ndef profile(username):\n return f\"{username}'s profile\"\n\n\nwith app.test_request_context():\n # print(url_for(\"index\"))\n # print(url_for(\"login\"))\n # print(url_for(\"login\", next=\"/\"))\n # print(url_for(\"profile\", username=\"John Doe\"))\n\n url_for(\"static\", filename=\"style.css\")\n\n\n@app.route(\"/upload\", methods=[\"GET\", \"POST\"])\ndef upload_file():\n if request.method == \"POST\":\n file = request.files[\"the_file\"]\n file.save(f\"/var/www/uploads/{secure_filename(file.filename)}\")\n ...\n\n\n@app.errorhandler(404)\ndef not_found(error):\n resp = make_response(render_template(\"page_not_found.html\"), 404)\n resp.set_cookie(\"username\", \"the username\")\n\n resp.headers[\"X-Something\"] = \"A value\"\n return resp\n\n\n@app.route(\"/logout\")\ndef logout():\n # remove the username from the session if it's there\n session.pop(\"username\", None)\n return redirect(url_for(\"index\"))\n\n\n# app.run(debug=True, use_debugger=False, use_reloader=False)\n","repo_name":"ivanprytula/scrape-the-web","sub_path":"flask_quickstart/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44051812751","text":"import requests\nimport json\nimport pandas as pd\nimport os\n\ndef retrieveCaseMeta(file_ids,outputfile):\n '''\n\n Get the tsv metadata for the list of case_ids\n Args:\n file_ids: numpy array of file_ids\n outputfile: the output filename\n\n '''\n\n fd = open(outputfile,'w')\n cases_endpt = 'https://api.gdc.cancer.gov/cases'\n\n\n filters = {\n \"op\":\"in\",\n \"content\":{\n \"field\":\"cases.case_id\",\n \"value\": file_ids.tolist()\n }\n }\n\n '''\n fields = [\n \"case.case_id\"\n ]\n\n '''\n # print (filters)\n #expand group is diagnosis and demoragphic\n params = {\n \"filters\" : filters,\n #\"expand\" : \"diagnoses,demographic,exposures\",\n\t\"fields\" : \"primary_site\",\n \"format\": \"TSV\",\n \"pretty\": \"true\",\n \"size\": 12000\n }\n # print (params)\n #print (filters)\n #print (fields)\n \n \n response = requests.post(cases_endpt, headers = {\"Content-Type\": \"application/json\"},json = params)\n # print (response.content.decode(\"utf-8\"))\n fd.write(response.content.decode(\"utf-8\"))\n fd.close()\n\nif __name__ == '__main__':\n\n data_dir = \"/home/amber/Documents/ee542-lab10-group9/data/\"\n filename = data_dir+\"file_case_id_DNA.csv\"\n \n \n df = pd.read_csv(filename)\n case_ids = df.case_id.values\n # print(case_ids)\n \n caseids_meta_outfile = data_dir + \"cases_meta_disease.tsv\"\n # python request method\n retrieveCaseMeta(case_ids,caseids_meta_outfile)\n \n \n\n","repo_name":"xuyuewang/ee542-lab10-group9","sub_path":"code/request_case_disease.py","file_name":"request_case_disease.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13873394139","text":"def floor_(money):\n if money < 100:\n return 0\n else:\n s_ = str(money)\n s_ = s_[:-2] + \"00\"\n return int(s_)\n\n\ndef check(threshold, ranksize, maxratio, minratio, money):\n a, b = 0, threshold\n if threshold == 0:\n tax = minratio\n else:\n tax = 0\n flag = False\n money_ = floor_(money)\n while tax <= maxratio:\n # print(a, b, tax)\n if a <= money < b:\n take = money_ * tax * 0.01\n ret = money - int(take)\n return int(ret)\n\n else:\n a = b\n b = b + ranksize\n if flag:\n tax += 1\n if not flag:\n tax = minratio\n flag = True\n\n take = money_ * maxratio * 0.01\n ret = money - int(take)\n # print(money, take, maxratio, ret)\n return int(ret)\n\n\ndef solution(money, minratio, maxratio, ranksize, threshold, months):\n # a, b = 최소 최대\n for i in range(months):\n money = check(threshold, ranksize, maxratio, minratio, money)\n # print(money)\n answer = money\n return answer\n\nprint(solution(1000000000, 50, 99, 100000, 0, 6))","repo_name":"whiskey21/my-algorithm-book","sub_path":"DFS&BFS/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27318681264","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport skimage\nimport skimage.io\nimport skimage.transform\nimport numpy as np\nimport time\nimport math\nimport copy\nfrom dataloader import myloader as DA\nfrom matplotlib import pyplot as plt\nfrom err_calculation import *\nfrom models import *\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser(description='TANet')\nparser.add_argument('--maxdisp', type=int, default=192,\n help='max disp')\nparser.add_argument('--dataset', default='kitti2015',\n help='datapath')\nparser.add_argument('--datapath', default='dataset/kitti2015_train/',\n help='datapath')\nparser.add_argument('--epochs', type=int, default=500,\n help='number of epochs to train')\nparser.add_argument('--loadmodel', default=None,\n help='load model')\nparser.add_argument('--savemodel', default='./ckpt/',\n help='save model')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--logdir', default='log_dir',\n help='save log')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n \nif args.dataset == 'kitti2012':\n from dataloader import myloader12 as ls\n args.datapath = 'dataset/kitti2012_train/'\nelif args.dataset == 'kitti2015':\n from dataloader import myloader15 as ls\n args.datapath = 'dataset/kitti2015_train/'\nelse:\n print('no dataset.')\n\nall_left_img, all_right_img, all_disp_pre_train, all_left_disp, test_left_img, test_right_img, test_disp_pre, test_disp = ls.dataloader(args.datapath)\n\nTrainImgLoader = torch.utils.data.DataLoader(\n DA.myImageFloder(all_left_img, all_right_img, all_disp_pre_train, all_left_disp, True),\n batch_size=4, shuffle=True, num_workers=8, drop_last=False)\n\nTestImgLoader = torch.utils.data.DataLoader(\n DA.myImageFloder(test_left_img, test_right_img, test_disp_pre, test_disp, False),\n batch_size=4, shuffle=False, num_workers=8, drop_last=False)\n\nmodel = TANet(args.maxdisp)\n\nif args.cuda:\n model = nn.DataParallel(model)\n model.cuda()\n\nif args.loadmodel is not None:\n state_dict = torch.load(args.loadmodel)\n model.load_state_dict(state_dict['state_dict'])\n\nprint('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n\noptimizer = optim.Adam(model.parameters(), lr=0.01, betas=(0.9, 0.999))\n\ndef train(imgL, imgR, disp_pre, disp_L):\n model.train()\n imgL = Variable(torch.FloatTensor(imgL))\n imgR = Variable(torch.FloatTensor(imgR))\n disp_pre = Variable(torch.FloatTensor(disp_pre))\n disp_L = Variable(torch.FloatTensor(disp_L))\n\n if args.cuda:\n imgL, imgR, disp_pre, disp_true = imgL.cuda(), imgR.cuda(), disp_pre.cuda(), disp_L.cuda()\n\n mask = (disp_true > 0)\n mask.detach_()\n\n optimizer.zero_grad()\n\n output = model(imgL, imgR, disp_pre)\n output = torch.squeeze(output, 1)\n loss = F.smooth_l1_loss(output[mask], disp_true[mask], size_average=True)\n\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\ndef test(imgL, imgR, disp_pre, disp_true):\n model.eval()\n imgL = Variable(torch.FloatTensor(imgL))\n imgR = Variable(torch.FloatTensor(imgR))\n disp_pre = Variable(torch.FloatTensor(disp_pre))\n\n if args.cuda:\n imgL, imgR, disp_pre = imgL.cuda(), imgR.cuda(), disp_pre.cuda()\n\n start_time = time.time()\n with torch.no_grad():\n output = model(imgL, imgR, disp_pre)\n cost_time = time.time()-start_time\n\n pred_disp = output.data.cpu() # torch.Size([1, 1, 368, 1232])\n pred_disp = pred_disp.squeeze(1)\n\n mask = (disp_true > 0)\n mask.detach_()\n epe = EPE_metric(pred_disp, disp_true, mask)\n D1 = D1_metric(pred_disp, disp_true, mask)\n Thres1 = Thres_metric(pred_disp, disp_true, mask, 1.0)\n Thres2 = Thres_metric(pred_disp, disp_true, mask, 2.0)\n Thres3 = Thres_metric(pred_disp, disp_true, mask, 3.0)\n err_pac = [epe, D1, Thres1, Thres2, Thres3]\n\n return err_pac, cost_time\n\ndef adjust_learning_rate(optimizer, epoch):\n warm_up = 0.02\n const_range = 0.6\n min_lr_rate = 0.05\n\n if epoch <= 500 * warm_up:\n lr = (1 - min_lr_rate) * 4.0e-4 / (\n 500 * warm_up\n ) * epoch + min_lr_rate * 4.0e-4\n elif 500 * warm_up < epoch <= 500 * const_range:\n lr = 4.0e-4\n else:\n lr = (min_lr_rate - 1) * 4.0e-4 / (\n (1 - const_range) * 500\n ) * epoch + (1 - min_lr_rate * const_range) / (1 - const_range) * 4.0e-4\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef main():\n start_full_time = time.time()\n min_D1 = 100\n min_D1_epoch = 0\n writer = SummaryWriter(log_dir=args.logdir, flush_secs=5)\n\n for epoch in range(1, args.epochs+1):\n total_train_loss = 0\n total_times = 0\n total_epe = 0\n total_D1_t = 0\n total_T1 = 0\n total_T2 = 0\n total_T3 = 0\n adjust_learning_rate(optimizer, epoch)\n\n for batch_idx, (imgL_crop, imgR_crop, disp_pre, disp_crop_L) in tqdm(enumerate(TrainImgLoader)):\n loss = train(imgL_crop, imgR_crop, disp_pre, disp_crop_L)\n total_train_loss += loss\n train_loss = total_train_loss/len(TrainImgLoader)\n print('epoch %d total training loss = %.3f' %(epoch, train_loss))\n\n for batch_idx, (imgL, imgR, disp_pre, disp_L) in enumerate(TestImgLoader):\n err, cost_time = test(imgL, imgR, disp_pre, disp_L)\n total_times += cost_time/4 # batch size = 4\n total_epe += err[0]\n total_D1_t += err[1]\n total_T1 += err[2]\n total_T2 += err[3]\n total_T3 += err[4]\n val_time = total_times / len(TestImgLoader)\n val_epe = total_epe / len(TestImgLoader)\n val_D1_t = total_D1_t / len(TestImgLoader) * 100\n val_T1 = total_T1 / len(TestImgLoader) * 100\n val_T2 = total_T2 / len(TestImgLoader) * 100\n val_T3 = total_T3 / len(TestImgLoader) * 100\n print('average time = %.3f, average epe = %.3f, average D1_t = %.3f, average T1 = %.3f, average T2 = %.3f, average T3 = %.3f'\n % (val_time, val_epe, val_D1_t, val_T1, val_T2, val_T3))\n if val_D1_t < min_D1:\n min_D1 = val_D1_t\n min_D1_epoch = epoch\n if epoch > 280:\n savefilename = args.savemodel + 'finetune_' + str(epoch) + '.tar'\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'train_loss': total_train_loss / len(TrainImgLoader),\n 'test_loss': val_D1_t / len(TestImgLoader) * 100,\n }, savefilename)\n writer.add_scalar('Train_loss', train_loss, epoch)\n writer.add_scalar('val_D1_t', val_D1_t, epoch)\n writer.add_scalar('val_epe', val_epe, epoch)\n writer.add_scalar('val_T1', val_T1, epoch)\n writer.add_scalar('val_T2', val_T2, epoch)\n writer.add_scalar('val_T3', val_T3, epoch)\n writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)\n print('full finetune time = %.2f HR, epoch %d get min_D1 = %.3f' % ((time.time() - start_full_time) / 3600, min_D1_epoch, min_D1))\n\n writer.close()\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Y0uchenZ/TANet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"71963436034","text":"import random \nimport math\nimport copy\nimport itertools\nsys_random = random.SystemRandom()\nNUMBER_OF_SLOTS=4*18\nMAX_HOBBITS=10\nROUNDS=3\nITERACTIONS=3\n\n# Classe Level representa as etapas que serão percorridas e com isso as combinações geradas ao percorrer as etapas\nclass Level:\n max_walks=MAX_HOBBITS\n memoize_allowed={}\n memoize_denied={}\n#inicializa com a lista dos niveis e hobbits\n def __init__(self,levels_list,hobbits_list,number_of_levels):\n self.levels=levels_list\n self.hobbits=hobbits_list\n self.number_of_levels=number_of_levels\n self.start_combination()\n#inicializa as combinações vazias\n def start_combination (self):\n #aux=self.random_combination()\n aux=[\n0, 1, 1, 1,\n0, 0, 0, 1, \n1, 1, 0, 1, \n1, 0, 0, 1, \n1, 1, 1, 0, \n0, 0, 1, 0, \n1, 1, 1, 1, \n1, 0, 1, 0, \n0, 1, 1, 0, \n1, 1, 0, 1, \n0, 0, 1, 0, \n0, 1, 1, 0, \n1, 0, 1, 0, \n1, 1, 0, 1, \n1, 0, 0, 1, \n0, 1, 0, 0, \n0, 1, 0, 1, \n1, 0, 1, 1]\n #print(\"start level\\n\")\n aux_cost=self.calculate_cost(aux)\n self.temp_combination=aux\n self.temp_cost=aux_cost\n self.best_combination=aux\n self.best_cost=aux_cost\n self.final_combination=aux\n self.final_cost=aux_cost\n\n def random_combination (self):\n #print(\" func random combination \\n\")\n slot=0\n hobbits=[self.max_walks]*4\n hobbits_levels=[]\n frodo=0\n sam=1\n merry=2\n pippin=3\n i=0\n i=0\n for i in range(NUMBER_OF_SLOTS):\n hobbits_levels.append(sys_random.randint(0,1))\n while not self.check_possibility(hobbits_levels):\n #print(\"random -dentro do while\")\n hobbits=[self.max_walks]*4\n hobbits_levels=[]\n i=0\n for i in range(NUMBER_OF_SLOTS):\n hobbits_levels.append(sys_random.randint(0,1))\n return hobbits_levels\n#confere se o vizinhogerado é válido \n def check_possibility(self,neighbor):\n #print(\" func check pos \\n\")\n total_hobbit=[0,0,0,0]\n string_ints = [str(int) for int in neighbor]\n list_string=\"\".join(string_ints)\n if neighbor==None:\n return False\n if list_string in self.memoize_allowed.keys(): # ja vimos e esse vizinho pertence\n #print(\"allowed\\n\")\n return True\n if list_string in self.memoize_denied.keys(): #ja vimos e esse vizinho noa pertence\n return False\n #else, we never seen this combination\n for i in range(0,len(neighbor)-3): #pelomenos um hobbit tem que ser usado\n level_total=0\n total_hobbit[0]+=neighbor[i]\n total_hobbit[1]+=neighbor[i+1]\n total_hobbit[2]+=neighbor[i+2]\n total_hobbit[3]+=neighbor[i+3]\n level_total=neighbor[i]+neighbor[i+1]+neighbor[i+2]+neighbor[i+3]\n if level_total==0:\n self.memoize_denied[list_string]=\"d\"\n return False \n i+=4\n if total_hobbit[0]==0 and total_hobbit[1]==0 and total_hobbit[2]==0 and total_hobbit[3]==0: \n self.memoize_denied[list_string]=\"d\" \n return False\n if total_hobbit[0]>MAX_HOBBITS or total_hobbit[1]>MAX_HOBBITS or total_hobbit[2]>MAX_HOBBITS or total_hobbit[3]>MAX_HOBBITS: \n self.memoize_denied[list_string]=\"d\" \n return False\n self.memoize_allowed[list_string]=\"a\"\n return True\n # 0 - 0 1 2 3\n # 1 - 4 5 6 7\n # 2 - 8 9 10 11\n # 3 - 12 13 14 15\n # 4 - 16 17 18 19\n # 5 - 20 21 22 23\n # 6 - 24 25 26 27\n # 7 - 28 29 30 31\n # 8 - 32 33 34 35\n # 9 - 36 37 38 39\n # 10- 40 41 42 43\n # 11- 44 45 46 47\n # 12- 48 49 50 51\n # 13- 52 53 54 55\n # 14- 56 57 58 59\n # 15- 60 61 62 63\n # 16- 64 65 66 67\n # 17- 68 69 70 71\n#calcula o custo da combinação\n def calculate_cost(self,current):#calculates the total cost of said combination\n #print(\"calculate cost func\\n\")\n #print(current)\n total=0\n level_speed=[]\n hobbits_total_speed=1\n j=0\n level_index=0\n while jself.temp_cost:\n \n #print(\"Not a valid neighbor, generate new\\n \")\n current_operation=sys_random.choice(operations)\n neighbor=current_operation()\n neighbor_cost=self.calculate_cost(neighbor)\n print(neighbor)\n #print(\" achou novo vizinho: \")\n \n \n self.temp_combination=neighbor\n self.temp_cost=neighbor_cost\n return \n#retorna os hobbits naquele nível\n def which_level(self,level_list,chosen_level):\n return [level_list[(chosen_level*4)-4],\n level_list[(chosen_level*4)-3],\n level_list[(chosen_level*4)-2],\n level_list[(chosen_level*4)-1]]\n#retorn quantos hobbits tem naquele nivel\n def count_on_level(self,level):\n total=0\n for i in range (0,3):\n if level[i]==1:\n total+=1\n return total\n#limpa os campos/hobbits daquele nível\n def clear_level(self,level_list,chosen_level):\n level_list[(chosen_level*4)-5]=0\n level_list[(chosen_level*4)-4]=0\n level_list[(chosen_level*4)-3]=0\n level_list[(chosen_level*4)-2]=0\n level_list[(chosen_level*4)-1]=0\n return level_list\n# conta o total de hobbits no percurso total\n def count_total_of_hobbits(self, levels_list):\n total=0\n for i in levels_list:\n if i==1:\n total+=1\n return total\n# funcao auxiliar de gerar vizinho novo: move um hobbit tendo como base a melhor combinação encontrada\n def shift_hobbit(self):\n #print(\"func shift hobbit \\n\")\n best=None\n for i in range(0,ROUNDS):\n temp_hobbits=copy.deepcopy(self.final_combination) #current combination\n line = sys_random.randint(0,3)#max index hobbit\n random_level=sys_random.randint(1,self.number_of_levels)\n hobbit=None\n chosen_level=self.which_level(temp_hobbits,random_level)\n level_cost=self.count_on_level(chosen_level)\n if level_cost>1:#linecost_neighbor:\n best=temp_hobbits\n cost=cost_neighbor\n else:\n best=self.final_combination\n cost=self.final_cost\n temp_hobbits=best\n print(temp_hobbits)\n return temp_hobbits\n\n# funcao auxiliar de gerar vizinho novo: swap de etapas tendo como base a melhor combinação encontrada\n def change_levels(self):\n #print(\"func change levels \\n\")\n best=None\n cost=None\n i=0\n while icost_neighbor:\n best=temp_hobbits\n cost=cost_neighbor\n else:\n best=self.final_combination\n cost=self.final_cost\n i+=1\n temp_hobbits=best\n print(temp_hobbits)\n return temp_hobbits\n#retorna uma lista com os hobbits que ainda estão disponiveis pra andar. a disposição do array =[Frodo,Sam,Merry,Pippin]\n def find_not_tired_hobbit(self,neighbor):\n hobbit=[0,0,0,0]\n for i in range(0,len(neighbor)-3):\n hobbit[0]+=neighbor[i]\n hobbit[1]+=neighbor[i+1]\n hobbit[2]+=neighbor[i+2]\n hobbit[3]+=neighbor[i+3] \n i+=4\n for i in hobbit:\n if i>=MAX_HOBBITS:\n i=0\n else:\n i=1\n return hobbit\n\n# funcao auxiliar de gerar vizinho novo: move um hobbit que ainda pode andar tendo como base a melhor combinação encontrada\n def move_walking_hobbit(self):\n #print(\"func move walking hobbit\\n\")\n temp_hobbits=copy.deepcopy(self.final_combination)\n not_tired=self.find_not_tired_hobbit(temp_hobbits)\n not_walking=None\n for i in range(0,3):\n if i==1:\n not_walking=i\n resting_hobbit=sys_random.randint(0,3)\n while not_walking == resting_hobbit:\n resting_hobbit=sys_random.randint(0,3)\n levels_without_hobbits=[]\n #total_hobbits=self.count_total_of_hobbits(temp_hobbits)\n for i in range(self.number_of_levels): #verficar se nao reutiliza o i\n chosen_level=self.which_level(temp_hobbits,i)\n if 1== chosen_level[resting_hobbit] and self.count_on_level(chosen_level)<4:\n levels_without_hobbits.append(i)\n if not levels_without_hobbits:\n levels_without_hobbits=sys_random.randint(1,self.number_of_levels)\n else:\n levels_without_hobbits=sys_random.choice(levels_without_hobbits)#empty\n #gasta hobbit\n temp_hobbits[levels_without_hobbits*4-4+resting_hobbit]=1\n print(temp_hobbits)\n return temp_hobbits # n fiz o sa\n\n\n def move_stopped_hobbit(self):\n #print(\"func move stopped hobbit\\n\")\n temp_hobbits=copy.deepcopy(self.final_combination)\n random_hobbit_index=sys_random.randint(0,3)\n random_level=sys_random.randint(1,self.number_of_levels)\n level_count=1\n random_hobbit_status=temp_hobbits[(random_level-1)*4+random_hobbit_index]\n available_hobbits=self.find_not_tired_hobbit(temp_hobbits)#verifica se ele ainda pode andar\n if random_hobbit_status==0:# hobbit esta parado\n if available_hobbits[random_hobbit_index]==0:# se ele nao pode andar, tem que tirar de algum lugar\n i=0\n while igoal_temperature:\n formatted_float = \"{:.2f}\".format(start_temperature)\n print(\"start temperature \"+ formatted_float)\n for i in range (0,ITERACTIONS):#iteractions\n walk_to_mordor.generate_neighbor() #add to tem\n # walk_to_mordor.best_cost=Level.calculate_cost(walk_to_mordor.best_combination)\n delta=walk_to_mordor.temp_cost-walk_to_mordor.best_cost \n if delta<0:\n walk_to_mordor.best_combination=walk_to_mordor.temp_combination\n walk_to_mordor.best_cost=walk_to_mordor.temp_cost\n # conferir probabilidade\n elif probability(delta,walk_to_mordor.best_cost)>=(random.uniform(0,100)/100):\n walk_to_mordor.best_combination=walk_to_mordor.temp_combination\n walk_to_mordor.best_cost=walk_to_mordor.best_cost\n #print(\"calculate cost SA depois elif \\n\")\n if walk_to_mordor.best_cost Callable[[WorkflowContext], Any]:\n \"\"\"Import a node.\"\"\"\n module_name, _, func = node_id.rpartition(\":\")\n module = importlib.import_module(module_name)\n return getattr(module, func)\n\n\ndef _call_parallel_node(node_id, context_data, return_vars):\n \"\"\"Wrapper to call parallel nodes.\"\"\"\n try:\n node = import_node(node_id)\n except (AttributeError, ImportError) as ex:\n raise FatalError(f\"Unable to import parallel node: {ex}\")\n\n # Generate context and call node\n context = WorkflowContext(**context_data)\n node(context)\n\n # Generate return values from context\n state = context.state\n return tuple(state[var] for var in return_vars)\n\n\nclass _ParallelNode:\n \"\"\"Wrapper around multiprocessing pool to do actual parallel processing.\"\"\"\n\n __slots__ = ()\n\n pool_type = Pool\n\n def _map_to_pool(\n self,\n node_id: str,\n context_iter: Iterable[Dict[str, Any]],\n return_vars: Sequence[str],\n ) -> Sequence[Any]:\n \"\"\"Map an iterable of context entries into a node.\n\n Uses a parallel worker pool.\"\"\"\n return self._pool.starmap(\n _call_parallel_node,\n ((node_id, context_data, return_vars) for context_data in context_iter),\n )\n\n @cached_property\n def _pool(self):\n return self.pool_type()\n\n\nclass MapNode(Navigable, _ParallelNode):\n \"\"\"Map an iterable into a specified node.\n\n Using the multiprocessing library to perform the operation in parallel.\n\n An independent context scope is created subprocess with an optional\n ``merge_var`` supplied to be collected from this context to be combined as\n the output of the parallel mapping operation.\n\n :param target_var: Singular or multiple variables to unpack value into.\n This value can be either a single string, a comma separated list of\n strings or a sequence of strings.\n :param in_var: Context variable containing a sequence of values to be\n iterated over.\n\n .. code-block:: python\n\n # Single direction mapping\n (\n MapNodes(\"message\", in_var=\"messages\")\n .loop(\"namespace:node_name\")\n )\n\n # Mapping with merge variable\n (\n MapNodes(\"message\", in_var=\"messages\")\n .loop(\"namespace:node_name\")\n .merge_var(\"results\")\n )\n\n \"\"\"\n\n __slots__ = (\"target_var\", \"in_var\", \"_merge_vars\", \"_node_id\")\n\n def __init__(self, target_var: str, in_var: str):\n self.target_var = target_var\n self.in_var = in_var\n self._merge_vars = []\n self._node_id = None\n\n def __call__(self, context: WorkflowContext):\n context.info(\"🔁 %s\", self)\n try:\n iterable = context.state[self.in_var]\n except KeyError:\n raise WorkflowRuntimeError(f\"Variable {self.in_var} not found in context\")\n\n if not isinstance(iterable, Iterable):\n raise WorkflowRuntimeError(f\"Variable {self.in_var} is not iterable\")\n\n if self._node_id:\n result_vars = [name for name, _ in self._merge_vars]\n results = self._map_to_pool(\n self._node_id,\n ({self.target_var: value} for value in iterable),\n result_vars,\n )\n context.state.update(\n zip(\n result_vars,\n merge_nested_entries(\n results, [merge for _, merge in self._merge_vars]\n ),\n )\n )\n\n @property\n def name(self):\n \"\"\"Name of node.\"\"\"\n return f\"Map ({self.target_var}) in `{self.in_var}`\"\n\n def branches(self) -> Optional[Branches]:\n \"\"\"Branches to call on each iteration of the foreach block.\"\"\"\n return {\"loop\": [self._node_id]}\n\n def loop(self, node: str) -> \"MapNode\":\n \"\"\"Nodes to call on each iteration of the foreach block.\"\"\"\n self._node_id = node\n return self\n\n def merge_vars(self, *merge_vars: Union[str, Tuple[str, MergeMethod]]) -> \"MapNode\":\n \"\"\"Vars to merge back from parallel execution.\n\n These can optionally take a merge method to defined how the variables\n are merged; the default is ``append`` which will append each variable\n into a list. The other option is ``extend`` which allows for lists of\n results to be combined into a single list.\n \"\"\"\n\n self._merge_vars = _merge_vars = []\n for merge_var in merge_vars:\n if isinstance(merge_var, str):\n var, method = merge_var, MergeMethod.Append\n else:\n var, method = merge_var\n _merge_vars.append((var, method.value))\n return self\n","repo_name":"pyapp-org/pyapp-flow","sub_path":"src/pyapp_flow/parallel_nodes.py","file_name":"parallel_nodes.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5136855121","text":"from rest_framework import serializers\n\nfrom bookmarks.models import Bookmark, Collection\nfrom bookmarks.services import PageInfoGetter, get_html_page\n\n\nclass BookmarkSerializer(serializers.ModelSerializer):\n class Meta:\n model = Bookmark\n fields = (\"id\", \"title\", \"description\", \"link\", \"link_type\", \"preview_image\")\n extra_kwargs = {\n \"title\": {\n \"read_only\": True,\n },\n \"description\": {\n \"read_only\": True,\n },\n \"link_type\": {\n \"read_only\": True,\n },\n \"preview_image\": {\n \"read_only\": True,\n },\n }\n\n def create(self, validated_data):\n link = validated_data.get(\"link\")\n user = self.context[\"request\"].user\n page_info = PageInfoGetter(get_html_page(link))\n\n validated_data[\"created_by\"] = user\n validated_data[\"title\"] = page_info.get_title()\n validated_data[\"description\"] = page_info.get_description()\n validated_data[\"preview_image\"] = page_info.get_image()\n validated_data[\"link_type\"] = page_info.get_type()\n\n return super().create(validated_data)\n\n\nclass CollectionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Collection\n fields = (\"id\", \"title\", \"description\", \"bookmarks\")\n\n bookmarks = BookmarkSerializer(read_only=True, many=True)\n\n def create(self, validated_data):\n user = self.context[\"request\"].user\n\n validated_data[\"created_by\"] = user\n\n return super().create(validated_data)\n\n\nclass BookmarkCollectionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Bookmark\n fields = (\"id\",)\n extra_kwargs = {\n \"id\": {\n \"read_only\": False,\n },\n }\n","repo_name":"meelovar/BookmarksService","sub_path":"src/bookmarks/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23745936302","text":"import numpy as np\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport xlrd\nfrom tools.filetool import FileDispatcher\nimport datetime\nimport os\nfrom tools.strtool import isContainOr,contains\nimport re\nclass xlsDispatcher(FileDispatcher):\n current_date=None\n def __init__(self,date,path):\n self.current_date=date\n self.p_index=len(os.path.split(path)[0].split(\"//\"))+1\n\n\n def file_dispatch(self,filepath):\n filter_key = [\"油耗和汇总表\",\"2019.\",\"每月油耗 (version 1)\",\"油表\",\"油耗汇总表\"]\n filter_table_keys = [\"统计\"]\n filter_table_keys_detail=[\"汇总\"]\n filter_type = \".xls\"\n paths_sum=[]\n paths_detail=[]\n items=[]\n items_detail=[]\n if os.path.splitext(filepath)[1] == filter_type and isContainOr(keys=filter_key, target=filepath):\n data = xlrd.open_workbook(filepath)\n for sheet in data.sheets():\n if isContainOr(filter_table_keys,sheet.name):\n paths_sum.append({\"filepath\":filepath,\"sheetname\":sheet.name})\n #items.append(excel2Dataframe_Sum(filepath,self.p_index,sheet.name,\"车号\"))\n if isContainOr(filter_table_keys_detail,sheet.name):\n paths_detail.append({\"filepath\":filepath,\"sheetname\":sheet.name})\n #details=excel2Dataframe_Detail(filepath,self.p_index,sheet.name,\"车号\")\n #if details is not None:\n #items_detail.append(details)\n \n return (paths_sum,paths_detail)\n\n\"\"\"\nin data 原数据dataframe\n str_target查找的目标字符 \n bool_contain 是否包含,默认为false\n#out 定位列表[row,col]\n\"\"\"\ndef local_value(data,str_target,bool_contain=False):\n target_index=None\n for indexs in data.index:\n for i in range(len(data.loc[indexs].values)):\n if str_target in str(data.loc[indexs].values[i]):\n target_index=[indexs,i]\n break\n return target_index\n \n\"\"\"\n读取统计表\n\"\"\"\ndef excel2Dataframe_Sum(path,p_index,sheetname,target_by):\n xls=pd.ExcelFile(path)\n parsed=pd.read_excel(xls,sheetname)\n target_index=local_value(parsed,target_by,True)\n team=os.path.split(path)[0].split(\"//\")[p_index][0]\n return tim_data(parsed,target_index,getRoute(sheetname),team)\n\n\n\"\"\"\n读取汇总表\n\"\"\"\ndef excel2Dataframe_Detail(path,p_index,sheetname,target_by):\n xls=pd.ExcelFile(path)\n parsed=pd.read_excel(xls,sheetname)\n target_index=local_value(parsed,target_by,True)\n if target_index is None:\n return None\n team=os.path.split(path)[0].split(\"//\")[p_index][0]\n return tim_detail_data(parsed,target_index,getRoute(sheetname),team) \n\n \ndef excel2Dataframe(path,target_by,date_):\n dper=xlsDispatcher(date_,path)\n return searchforFileWithCallback(path,dper)\n \n \n\n#规整统计数据\ndef tim_data(data,target_index,route,team):\n #删掉纵轴无关数据\n new_obj=data.drop(data.columns[:target_index[1]],axis=1)\n new_obj=new_obj.drop(data.columns[target_index[1]+11:],axis=1)\n \n #删掉横轴无关数据\n _index1=new_obj.index[:target_index[0]+3]\n _index2=new_obj[new_obj[new_obj.columns[0]].isnull()|new_obj[new_obj.columns[0]].str.contains('总计')|new_obj[new_obj.columns[0]].str.contains('小计:')].index\n new_obj=new_obj.drop(_index1|_index2)\n # 如果列数小于11,增加inspection列并赋值为0,否则直接赋值为第11列\n if len(new_obj.columns)<11:\n new_obj[\"inspection\"]=0.0\n new_obj.columns=list(range(len(new_obj.columns)))\n new_obj=new_obj.rename(columns={0:'car_id',1:'mileage',4:'oil_cost',8:'maintain',9:'follow',10:'inspection'})\n new_obj['route']=route\n new_obj['team']=team\n #先使电值默认为0\n new_obj['elec_cost']=0\n new_obj=new_obj[['car_id','mileage','oil_cost','maintain','follow','inspection','route','team']].fillna(0)\n cleaned=new_obj.replace(\"二保\",0)\n return cleaned\n \n#规整汇总数据\ndef tim_detail_data(data,target_index,route,team):\n #删掉纵轴无关数据\n new_obj=data.drop(data.columns[:target_index[1]],axis=1)\n new_obj=new_obj.drop(data.columns[target_index[1]+18:],axis=1)\n \n #删掉横轴无关数据\n _index1=new_obj.index[:target_index[0]+3]\n rule1=new_obj[new_obj.columns[0]].isnull()\n rule2=new_obj[new_obj.columns[0]].str.contains('计')\n rule3=new_obj[new_obj.columns[0]].str.contains('一保')\n rule4=new_obj[new_obj.columns[0]].str.contains('备注')\n rule5=new_obj[new_obj.columns[0]]==0\n _index2=new_obj[rule1|rule2|rule3|rule4|rule5].index\n new_obj=new_obj.drop(_index1|_index2)\n new_obj.columns=list(range(len(new_obj.columns)))\n new_obj=new_obj.rename(columns={0:'car_id',2:'fix_days',4:'stop_days',5:'work_days',8:'engage_mileage',9:'public_mileage',10:'shunt_mileage',\n 14:'fault_times',15:'fault_minutes'\n })\n new_obj['route']=route\n new_obj['team']=team\n new_obj=new_obj[['car_id','fix_days','stop_days','work_days','engage_mileage','public_mileage','shunt_mileage',\n 'fault_times','fault_minutes','route','team']].fillna(0)\n return new_obj\n \n#规整数据\ndef tim_data_test(data,target_index,route):\n dict_={'车号':'car_id','车公里':'mileage','实绩':'oil_cost','二保':'maintain','跟车':'follow'}\n #获取每个所需列所在的索引的列表\n list_loc_x=[local_value(data,key)[1] for key in dict_.keys()]\n #筛选列表数据\n new_obj=data.iloc[:,list_loc_x]\n #重新设置列名\n new_obj.columns=dict_.values()\n #删除车号行\n new_obj=new_obj[new_obj[\"car_id\"]!=\"车号\"]\n #删除第一列为nan的行,其余nan值赋值为零\n new_obj=new_obj[pd.notnull(new_obj[\"car_id\"])].fillna(0) \n #替换无用值\n cleaned=new_obj.replace(\"二保\",0)\n #设置线路\n cleaned[\"route\"]=route\n return cleaned\n \n \n#检索符合条件的文件并处理,将结果返回列表items\ndef searchforFileWithCallback(path,dper):\n items = []\n if os.path.isdir(path):\n dirs = os.listdir(path)\n for file in dirs:\n _filepath = path + \"//\" + str(file)\n if os.path.isdir(_filepath):\n item=searchforFileWithCallback(path=_filepath,dper=dper)\n if item is not None:\n items += item\n if os.path.isfile(_filepath):\n item=dper.file_dispatch(filepath=_filepath)\n if item is not None and len(item)>0:\n items.append(item)\n elif os.path.isfile(path):\n item = dper.file_dispatch(filepath=path)\n if len(item) > 0:\n items.append(item)\n else:\n return None\n return items\n \n \n#获取对应的路线名称\ndef getRoute(tablename):\n if contains(tablename, \"专线\"):\n return \"海峡专线\"\n if contains(tablename, \"夜间\"):\n return \"夜班一号线\"\n if contains(tablename, \"夜班\"):\n return \"夜班一号线\"\n if contains(tablename.lower(), \"k2\"):\n return \"k2\"\n if contains(tablename.lower(), \"21支\"):\n return \"142\"\n if contains(tablename.lower(), \"30支\"):\n return \"149\"\n if contains(tablename.lower(), \"57路区间\"):\n return \"57区间\"\n route = re.findall(r\"\\d+\\.?\\d*\", tablename)\n #print(route)\n return route[0]\n \n# 对车辆id进行处理\ndef getCar_id(id, tablename):\n s = str(id)\n if (st.contains(s,\"F\")):\n s=s.replace(\"F\",\"F\")\n if (st.contains(s, \"路\")):\n s = s.split(\"路\")[1].strip()\n if (st.contains(s, \"线\")):\n s = s.split(\"线\")[1].strip()\n if(st.contains(s,\"/\")):\n s=s.split(\"/\")[1].strip()\n return s\n\ndef update_car_info_from_xml(path,sheetname,targetstr,date):\n xls=pd.ExcelFile(path)\n parsed=pd.read_excel(xls,sheetname)\n #target_index=local_value(parsed,targetstr,True)\n return parsed","repo_name":"zstar2013/bus_sys_dst","sub_path":"loadexcel.py","file_name":"loadexcel.py","file_ext":"py","file_size_in_byte":8061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43677063468","text":"class Solution:\n def maxValue(self, grid: List[List[int]]) -> int:\n if grid == []: return 0\n grid_x, grid_y = len(grid),len(grid[0])\n for i in range(1,grid_x):\n grid[i][0]+=grid[i-1][0]\n for j in range(1,grid_y):\n grid[0][j]+=grid[0][j-1]\n for i in range(1,grid_x):\n for j in range(1,grid_y):\n grid[i][j]+=max(grid[i-1][j],grid[i][j-1])\n return grid[grid_x-1][grid_y-1]\n\n\n\"\"\"\n\n在一个 m*n 的棋盘的每一格都放有一个礼物,每个礼物都有一定的价值(价值大于 0)。你可以从棋盘的左上角开始拿格子里的礼物,并每次向右或者向下移动一格、直到到达棋盘的右下角。给定一个棋盘及其上面的礼物的价值,请计算你最多能拿到多少价值的礼物?\n\n \n\n示例 1:\n\n输入: \n[\n  [1,3,1],\n  [1,5,1],\n  [4,2,1]\n]\n输出: 12\n解释: 路径 1→3→5→2→1 可以拿到最多价值的礼物\n \n\n提示:\n\n0 < grid.length <= 200\n0 < grid[0].length <= 200\n\n作者:Krahets\n链接:https://leetcode-cn.com/leetbook/read/illustration-of-algorithm/5vokvr/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\"\"\"","repo_name":"zheyuanWang/hunter_playground","sub_path":"dynamic_plan/collect_presents.py","file_name":"collect_presents.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33246209241","text":"from sorting.common import swap\nfrom math import inf\n\n\ndef _moveDown(array, start, dest, size):\n amount = start - dest\n if size > 1:\n while amount >= size:\n i = start\n while i > start - size:\n swap(array, i - 1, i + size - 1)\n i -= 1\n start -= size\n amount -= size\n if amount > 0:\n _moveDown(array, start, dest, size // 2)\n _moveDown(array, start + size // 2, dest + size // 2, size - (size // 2))\n else:\n tmp = array[start]\n for i in range(start, dest, -1):\n array[i] = array[i - 1]\n array[dest] = tmp\n\n\ndef _merge(array, start, mid, end):\n lastValue = -inf\n\n while start < mid:\n size = 0\n for i in range(mid, end):\n if array[i] < array[start] and array[i] >= lastValue:\n size += 1\n else:\n break\n if size > 0:\n _moveDown(array, mid, start, size)\n\n start += size + 1\n mid += size\n lastValue = array[start - 1]\n\n\ndef _runSort(array, length):\n gap = 2\n while gap <= length:\n for i in range(0, length, gap):\n _merge(array, i, i + gap // 2, i + gap)\n gap *= 2\n\n if length - gap // 2 > 0:\n _merge(array, 0, gap // 2, length)\n\n\ndef sort(seq):\n _runSort(seq, len(seq))\n","repo_name":"Gaming32/Sorting-Algorithm-Repository","sub_path":"python/sorting/reverse_lazystable.py","file_name":"reverse_lazystable.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40845227962","text":"# Taking miles input from the user \nmiles = float(input(\"Enter value in miles : \")) \n# conversion factor\nconv_fac = 0.621371 \n# calculate kilometers \nkilometers = miles /conv_fac \nprint('%0.2f miles is equal to %0.2f kilometers ' %(miles , kilometers ))\n\n# we are getting error sine space is given @ conv_fac\n#remove space , save and run\n\n# Thank you","repo_name":"LearnAtVedant/Full_Stack_Developer-Python","sub_path":"KilometersToMiles/miles2kilometers.py","file_name":"miles2kilometers.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26197925106","text":"\"\"\"\n----------------------------------------------------------\nName: f_to_c.py\nPurpose: Change the degree measure in fahrenheit to celsius\n\nAuthor: Hosey.K\n\nCreated: date in 08/02/2021\n----------------------------------------------------------\n\"\"\"\n\nprint(\"------------ Fahrenheit to Celsius Converter ------------\")\n\n# Get the temperature\nfahrenheit = float(input(\"Enter the value in Fahrenheit (°F): \"))\n\n# Calculate for celsius\ncelsius = (float(fahrenheit) - 32) * 5.00/9.00\n\n# Output results\nnumber = str(round(celsius, 2))\nprint(fahrenheit, \"in celsius is\",number,\"°C.\")\n\n","repo_name":"SACHSTech/ics2o-livehack1-practice-keira-h","sub_path":"f_to_c.py","file_name":"f_to_c.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9894149833","text":"#\n# tcount.py\n#\n# offers basic metrics on random table size\n#\n\nfrom datetime import datetime\nfrom shutil import copyfile\nfrom collections import defaultdict\nimport math\nimport os\nimport re\nimport i7\nimport sys\n\ndef gmd():\n dont_gm_next = False\n with open(log_file) as file:\n for (line_count, line) in enumerate(file, 1):\n if \"#geometric mean\" in line:\n dont_gm_next = True\n continue\n if re.search(\"^[0-9,]+$\", line):\n if not dont_gm_next:\n my_ary = [ int(x) for x in line.strip().split(\",\") ]\n print(\"#geometric mean:{:4f}\".format(the_geo_mean(my_ary)))\n print(line.strip())\n dont_gm_next = False\n exit()\n\ndef the_geo_mean(a):\n lo = 0\n for x in a: lo += math.log(x)\n lo /= len(a)\n return math.exp(lo)\n\ndef print_geo_mean(a):\n c = []\n try:\n c = [int(x) for x in a.split(\",\")]\n except:\n sys.exit(\"CSV in cmd line must separate numbers.\")\n tgm = the_geo_mean(c)\n sys.exit(\"Geometric mean of {:s} is {:.4f}\".format(a, tgm))\n\n\n# options\nbiggest_first = True\nverbose = False\nzap_highest = 0\nzap_lowest = 0\nupdate_log_file = False\ntest_log = False\nwrite_backup = False\n\n# variables\ntable_sizes = defaultdict(int)\n\nin_table = False\nget_header = False\n\ndef usage():\n print(\"========COMMANDS (with or without dash)========\")\n print(\"-v/-nv = verbose on/off\")\n print(\"-ba/-b = write backup\")\n print(\"-lt/-tl = test log\")\n print(\"-bf/-fb = biggest first. -1b/-b1 = turns this off.\")\n print(\"zh/h = zap highest (#), nzh disables. nzl/zl/l does same for lowest.\")\n print()\n print(\"In a pinch you can use CSV to get a geometric mean of that.\")\n print(\"-u = update log file = main option. -nu/-un = don't update\")\n exit()\n\nargcount = 1\n\nwhile argcount < len(sys.argv):\n arg = sys.argv[argcount]\n if arg[0] == '-': arg = arg[1:]\n if ',' in arg: print_geo_mean(arg)\n elif arg == 'v': verbose = True\n elif arg == 'nv': verbose = False\n elif arg == 'ba' or arg == 'b': write_backup = True\n elif arg == 'tl' or arg == 'lt': test_log = True\n elif arg == 'gd' or arg == 'gmd': geometric_mean_derive = True\n elif arg == 'bf' or arg == 'fb': biggest_first = True\n elif arg == 'bl' or arg == 'lb' or arg == 'b1' or arg == '1b': biggest_first = False\n elif arg == 'u': update_log_file = True\n elif arg == 'un' or arg == 'nu': update_log_file = False\n elif arg[:2] == 'zh': zap_highest = int(arg[2:])\n elif arg[0] == 'h': zap_highest = int(arg[1:])\n elif arg == 'nzh': zap_highest = 0\n elif arg[:2] == 'zl': zap_lowest = int(arg[2:])\n elif arg[0] == 'l': zap_lowest = int(arg[1:])\n elif arg == 'nzl': zap_lowest = 0\n elif arg == '?': usage()\n else:\n print(\"Unknown command\", sys.argv[argcount], \"usage below:\")\n usage()\n argcount += 1\n\nwith open(i7.tafi('ai')) as file:\n for (line_count, line) in enumerate(file, 1):\n if get_header:\n get_header = False\n if line.startswith('tabnam'): in_table = False\n continue\n if line.startswith('table') and '\\t' not in line:\n # print(line_count, line)\n in_table = True\n get_header = True\n table_start = line_count + 1\n table_name = re.sub(\" *\\[.*\", \"\", line.lower().strip())\n if in_table and not line.strip():\n in_table = False\n table_sizes[table_name] = line_count - table_start - 1\n if verbose: print(table_name, table_sizes[table_name], \"rows\")\n\ni7.go_proj('ai')\n\namean = gmean = gmeanp = 0\n\ndata_file = \"tcount.txt\"\nlog_file = \"tcount-log.txt\"\nlog_back = \"tcount-log-backup.txt\"\nlog_test_file = \"tcount-log-test.txt\"\n\nif os.path.exists(data_file):\n if geometric_mean_derive: gmd()\n with open(data_file) as file:\n for line in file:\n ll = line.lower().strip()\n if ll.startswith(';'): break\n if ll.startswith('#'): continue\n if ll.startswith('-'):\n l2 = ll[1:]\n if l2 in table_sizes.keys():\n if verbose: print(l2, \"discounted.\")\n table_sizes.pop(l2)\n else:\n print(\"Tried to zap <{:s}>, but it's not a valid table key in tcount.txt.\".format(l2))\nelse:\n print(\"No\", data_file, \"so we won't do anything special.\")\n\nif zap_lowest:\n print(\"Zapping\", zap_lowest, \"lowest values.\")\n for i in range (0, zap_lowest):\n q = min(table_sizes, key=table_sizes.get)\n table_sizes.pop(q)\n\nif zap_highest:\n print(\"Zapping\", zap_highest, \"highest values.\")\n for i in range (0, zap_highest):\n q = max(table_sizes, key=table_sizes.get)\n table_sizes.pop(q)\n\ntsize = len(table_sizes.keys())\n\nfor x in table_sizes.keys():\n amean += table_sizes[x]\n gmean += math.log(table_sizes[x])\n gmeanp += math.log(table_sizes[x]+1)\n\ncount = 0\n\namean /= tsize\ngmean /= tsize\ngmeanp /= tsize\n\ngmean = math.exp(gmean)\ngmeanp = math.exp(gmeanp)\n\nfor x in sorted(table_sizes, key=table_sizes.get, reverse=biggest_first):\n gmtweak = gmean * ((table_sizes[x] + 1) / table_sizes[x]) ** (1/tsize)\n count += 1\n print(count, x, table_sizes[x], gmtweak)\n\nprint(\"{:4f} arithmetic mean\".format(amean))\nprint(\"{:4f} geometric mean\".format(gmean))\nprint(\"{:4f} geometric mean (1 added each), delta={:4f}\".format(gmeanp, gmeanp-gmean))\n\nif update_log_file:\n last_table_line = ''\n last_num_line = ''\n print(\"Looking if we can/should update...\")\n my_ary = [table_sizes[x] for x in sorted(table_sizes.keys())]\n if os.path.exists(log_file) and os.stat(log_file).st_size:\n out_file = log_test_file if test_log else log_file\n if write_backup:\n copyfile(log_file, log_back)\n print(\"Copied to backup\", log_back)\n table_check = defaultdict(bool)\n changed_array = False\n sts = sorted(table_sizes.keys())\n for x in sts: table_check[x] = False\n with open(log_file) as file:\n for (line_count, line) in enumerate(file, 1):\n l = line.lower().strip()\n if l.startswith(\"#table\"):\n last_table_line = l[1:]\n continue\n if l and re.search(\"^[,0-9]+$\", l):\n last_num_line = l\n continue\n for x in last_table_line.split(','):\n if x not in table_check.keys(): changed_array = True\n table_check[x] = True\n for x in table_check.keys():\n if not table_check[x]: changed_array = True\n old_vals = [int(q) for q in last_num_line.split(\",\")]\n got_dif = False\n if not changed_array:\n for j in range(0, len(my_ary)):\n if my_ary[j] != old_vals[j]: got_dif = True\n if changed_array or got_dif:\n flog = open(out_file, \"a\")\n flog.write(\"#{:s}\\n\".format(str(datetime.now())))\n if changed_array: flog.write('#' + ','.join(sorted(sts)) + \"\\n\")\n if got_dif:\n flog.write('#delta: ' + ','.join([str(my_ary[q] - old_vals[q]) for q in range(0, len(old_vals))]) + \"\\n\")\n flog.write(\"#geometric mean: {:4f}\\n\".format(gmean))\n flog.write(','.join([str(x) for x in my_ary]) + \"\\n\")\n print(\"Updated\", log_file)\n flog.close()\n else:\n print(\"No change, nothing new written.\")\n else:\n flog = open(log_file, \"w\")\n flog.write(\"#{:s}\\n\".format(str(datetime.now())))\n flog.write(\"#\")\n flog.write(','.join(sorted(table_sizes.keys())))\n flog.write(\"\\n\")\n flog.write(\"#no changes to start\\n{:s}\\n\".format(','.join([str(x) for x in my_ary]) + \"\\n\"))\n print(\"Created\", log_file)","repo_name":"andrewschultz/ailihphilia","sub_path":"utils/tcount.py","file_name":"tcount.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72498330435","text":"# Author: Joel Turbi\r\n# Assignment: Lab Assignment 32\r\n# Course: CS140\r\n\r\ndef display_even(max):\r\n for i in range(1, max):\r\n if i%2== 0:\r\n print(i, end=\",\")\r\n print()\r\na = int(input(\"Enter a number:\\n\"))\r\ndisplay_even(a)\r\n\r\n","repo_name":"Ignis17/CS140","sub_path":"Labs/Assignment_32.py","file_name":"Assignment_32.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31398214273","text":"#!/usr/bin/env python3\n\n\nimport unittest\nfrom Cell import Cell\nfrom ArkLibPy.ArkDBMySQL import ArkDBMySQL\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self):\n self.db_ = ArkDBMySQL(db_config_file='/Users/Ark/.db_configs/db_config_local_cadis.txt')\n self.db_.set_table('WORK_LIB')\n\n def test_get_bsf_with_csim(self):\n str_netlist = \"M0001 OUT01 VDD IN001 GND NMOS\\n\"\n test_cell = Cell(self.db_)\n test_cell.init_based_on_netlist(str_netlist)\n test_cell.cal_bsf()\n self.assertEqual(test_cell.bsf_, '01')\n\n def test_get_id(self):\n test_cell = Cell(self.db_)\n str_netlist = \"M0001 OUT01 VDD IN001 GND NMOS\\n\"\n test_cell.init_based_on_netlist(str_netlist)\n test_cell.cal_bsf()\n self.assertEqual(1, test_cell.get_id())\n\n def test_get_family(self):\n cell = Cell(self.db_)\n cell.init_based_on_id(2)\n self.assertEqual('ULM', cell.get_family())\n\n def test_get_family_none(self):\n cell = Cell(self.db_)\n str_netlist = \"M0001 IN005 VDD IN001 GND NMOS\\n\"\n cell.init_based_on_netlist(str_netlist)\n self.assertIsNone(cell.get_family())\n\n def test_clear_family(self):\n cell = Cell(self.db_)\n cell.init_based_on_id(1)\n cell.clear_family()\n self.assertIsNone(cell.get_family())\n\n def test_add_family(self):\n cell = Cell(self.db_)\n cell.init_based_on_id(1)\n\n cell.clear_family()\n cell.add_to_family('SingleTx')\n self.assertEqual('SingleTx', cell.get_family())\n\n # check adding same content twice, this should not affect the value\n cell.add_to_family('SingleTx')\n self.assertEqual('SingleTx', cell.get_family())\n cell.clear_family()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"fangzhouwang/CADisCMOSExplorer","sub_path":"CellTest.py","file_name":"CellTest.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4064223059","text":"import mercadopago\nfrom ..config import config\n\nsdk = mercadopago.SDK(config['MP_ACCESS_TOKEN'])\n\n\ndef get_preference_body(items):\n preference_data = {\n \"items\": [\n {\n \"title\": item['product']['name'],\n \"quantity\": item['quantity'],\n \"unit_price\": item['product']['price'],\n \"currency_id\": \"MXN\",\n } for item in items\n ],\n \"payer\": {\n \"name\": \"Juan\",\n \"surname\": \"Lopez\",\n \"email\": \"user@email.com\",\n \"phone\": {\n \"area_code\": \"11\",\n \"number\": \"4444-4444\"\n },\n \"identification\": {\n \"type\": \"DNI\",\n \"number\": \"12345678\"\n },\n \"address\": {\n \"street_name\": \"Street\",\n \"street_number\": 123,\n \"zip_code\": \"5700\"\n }\n },\n \"back_urls\": {\n \"success\": \"https://www.success.com\",\n \"failure\": \"http://www.failure.com\",\n \"pending\": \"http://www.pending.com\"\n },\n \"notification_url\": \"https://www.your-site.com/ipn\",\n \"statement_descriptor\": \"Lumary Lane\",\n \"external_reference\": \"Reference_1234\",\n }\n\n return preference_data\n","repo_name":"IDGS-904-20001477/IDGS804-ProyectoFinal","sub_path":"app/utils/mercado_pago.py","file_name":"mercado_pago.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25632618913","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom casadi import *\nfrom casadi.tools import *\nimport pdb\nimport sys\nimport time\nimport do_mpc\n\n\"\"\" User settings: \"\"\"\nshow_animation = True\nstore_results = False\n\n\"\"\"\nGet configured do-mpc modules:\n\"\"\"\nfrom template_model import template_model\nfrom template_mpc import template_mpc\nfrom template_simulator import template_simulator\nfrom template_mhe import template_mhe\n\nmodel = template_model()\nmpc = template_mpc(model)\nsimulator = template_simulator(model)\nmhe = template_mhe(model)\n\n\n\"\"\"\nSet initial state\n\"\"\"\nnp.random.seed(99)\n\n# Use different initial state for the true system (simulator) and for MHE / MPC\nx0_true = np.random.rand(model.n_x)-0.5\nx0 = np.zeros(model.n_x)\n\nprint(x0_true)\n\nmpc.x0 = x0\nsimulator.x0 = x0_true\nmhe.x0 = x0\nmhe.p_est0 = 1e-4\n\n# Set initial guess for MHE/MPC based on initial state.\nmpc.set_initial_guess()\nmhe.set_initial_guess()\n\n\"\"\"\nSetup graphic:\n\"\"\"\ncolor = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\nfig, ax = plt.subplots(5,1, sharex=True, figsize=(10, 9))\n\nmpc_plot = do_mpc.graphics.Graphics(mpc.data)\nmhe_plot = do_mpc.graphics.Graphics(mhe.data)\nsim_plot = do_mpc.graphics.Graphics(simulator.data)\n\nax[0].set_title('controlled position:')\nmpc_plot.add_line('_x', 'phi_2', ax[0])\nmpc_plot.add_line('_tvp', 'phi_2_set', ax[0], color=color[0], linestyle='--', alpha=0.5)\n\nax[0].legend(\n mpc_plot.result_lines['_x', 'phi_2']+mpc_plot.result_lines['_tvp', 'phi_2_set']+mpc_plot.pred_lines['_x', 'phi_2'],\n ['Recorded', 'Setpoint', 'Predicted'], title='Disc 2')\n\nax[1].set_title('uncontrolled position:')\nmpc_plot.add_line('_x', 'phi_1', ax[1])\nmpc_plot.add_line('_x', 'phi_3', ax[1])\n\nax[1].legend(\n mpc_plot.result_lines['_x', 'phi_1']+mpc_plot.result_lines['_x', 'phi_3'],\n ['Disc 1', 'Disc 3']\n )\n\nax[2].set_title('Inputs:')\nmpc_plot.add_line('_u', 'phi_m_set', ax[2])\n\nax[3].set_title('Estimated angular velocity:')\nsim_plot.add_line('_x', 'dphi', ax[3])\nmhe_plot.add_line('_x', 'dphi', ax[3])\n\n\nax[4].set_title('Estimated parameters:')\nsim_plot.add_line('_p', 'Theta_1', ax[4])\nmhe_plot.add_line('_p', 'Theta_1', ax[4])\n\nfor mhe_line_i, sim_line_i in zip(mhe_plot.result_lines.full, sim_plot.result_lines.full):\n mhe_line_i.set_color(sim_line_i.get_color())\n sim_line_i.set_alpha(0.5)\n sim_line_i.set_linewidth(5)\n\nax[0].set_ylabel('disc \\n angle [rad]')\nax[1].set_ylabel('disc \\n angle [rad]')\nax[2].set_ylabel('motor \\n angle [rad]')\nax[3].set_ylabel('angle \\n velocity [rad/2]')\nax[4].set_ylabel('mass inertia')\nax[3].set_xlabel('time [s]')\n\nfor ax_i in ax:\n ax_i.axvline(1.0)\n\nfig.tight_layout()\nplt.ion()\n\n\"\"\"\nRun MPC main loop:\n\"\"\"\n\nfor k in range(20):\n u0 = mpc.make_step(x0)\n # Simulate with process and measurement noise\n y_next = simulator.make_step(u0, v0=1e-2*np.random.randn(model.n_v,1))\n x0 = mhe.make_step(y_next)\n\n print(x0)\n\n if show_animation:\n mpc_plot.plot_results()\n mpc_plot.plot_predictions()\n mhe_plot.plot_results()\n sim_plot.plot_results()\n\n mpc_plot.reset_axes()\n mhe_plot.reset_axes()\n sim_plot.reset_axes()\n plt.show()\n plt.pause(0.01)","repo_name":"wanggor/predictive-control","sub_path":"main-r.py","file_name":"main-r.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16934748575","text":"from mozdef_util.utilities.dot_dict import DotDict\nfrom mozdef_util.query_models import SearchQuery, ExistsMatch\n\nfrom tests.unit_test_suite import UnitTestSuite\n\nimport os\nimport sys\n\n\nclass TestEsworkerSNSSQS(UnitTestSuite):\n def teardown(self):\n sys.path.remove(self.mq_path)\n super().teardown()\n\n def setup(self):\n super().setup()\n mq_conn = 'abc'\n task_queue = 'example-logs-mozdef'\n es_connection = self.es_client\n options = DotDict(\n {\n \"esbulksize\": 0,\n \"mozdefhostname\": \"unittest.hostname\",\n \"taskexchange\": task_queue,\n }\n )\n if 'lib' in sys.modules:\n del sys.modules['lib']\n self.mq_path = os.path.join(os.path.dirname(__file__), \"../../mq/\")\n sys.path.insert(0, self.mq_path)\n from mq import esworker_sns_sqs\n self.consumer = esworker_sns_sqs.taskConsumer(mq_conn, es_connection, options)\n\n def search_and_verify_event(self, expected_event):\n self.refresh('events')\n search_query = SearchQuery(minutes=5)\n search_query.add_must(ExistsMatch('tags'))\n results = search_query.execute(self.es_client)\n assert len(results['hits']) == 1\n saved_event = results['hits'][0]['_source']\n self.verify_event(saved_event, expected_event)\n\n def test_syslog_event(self):\n event = {\n \"Type\": \"Notification\",\n \"MessageId\": \"abcdefg\",\n \"TopicArn\": \"arn:aws:sns:us-west-2:123456789:example-logs-mozdef\",\n \"Subject\": \"Fluentd-Notification\",\n \"Message\": \"{\\\"time\\\":\\\"2017-05-25 07:14:15 +0000\\\",\\\"timestamp\\\":\\\"2017-05-25T07:14:15+00:00\\\",\\\"hostname\\\":\\\"abcdefghostname\\\",\\\"pname\\\":\\\"dhclient\\\",\\\"processid\\\":\\\"[123]\\\",\\\"type\\\":\\\"syslog\\\",\\\"logger\\\":\\\"systemslogs\\\",\\\"payload\\\":\\\"DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)\\\"}\",\n \"Timestamp\": \"2017-05-25T07:14:16.103Z\",\n \"SignatureVersion\": \"1\",\n \"Signature\": \"examplesignatureabcd\",\n \"SigningCertURL\": \"https://sns.us-west-2.amazonaws.com/SimpleNotificationService-12345.pem\",\n \"UnsubscribeURL\": \"https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789:example-logs-mozdef:adsf0laser013\"\n }\n self.consumer.on_message(event)\n expected_event = {\n 'category': 'syslog',\n 'details': {'logger': 'systemslogs'},\n 'hostname': 'abcdefghostname',\n 'mozdefhostname': 'unittest.hostname',\n 'processid': '123',\n 'processname': 'dhclient',\n 'receivedtimestamp': '2017-05-26T17:47:17.813876+00:00',\n 'severity': 'INFO',\n 'source': 'UNKNOWN',\n 'summary': 'DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)',\n 'tags': ['example-logs-mozdef'],\n 'timestamp': '2017-05-25T07:14:15+00:00',\n 'utctimestamp': '2017-05-25T07:14:15+00:00',\n 'mozdef': {'plugins': []},\n 'type': 'event'\n }\n self.search_and_verify_event(expected_event)\n","repo_name":"mozilla/MozDef","sub_path":"tests/mq/test_esworker_sns_sqs.py","file_name":"test_esworker_sns_sqs.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":2170,"dataset":"github-code","pt":"61"} +{"seq_id":"34565915238","text":"import os\nimport pickle\nimport numpy as np\nfrom sklearn.metrics import cohen_kappa_score, accuracy_score, confusion_matrix, roc_auc_score, roc_curve\nimport torch as th\nth.backends.cudnn.benchmark = False\nth.backends.cudnn.deterministic = True\nimport sys\nsys.path.insert(0, r'myml_lib')\nfrom dataset import *\nfrom experiment import *\nfrom mymodels.eegnet_feedforward import EEGNet_CNN\nfrom mymodels.eegnet_recurrent import EEGNet_RNN, ModuleSequence\n\n\ntosave = True\nn_gpu = 1\nlabel_type = 'rass'\ncv_method = '10-fold'\nrandom_state = 10\nbatch_size = 16\nlr = 0.001\nmax_epoch = 10\ndata_type = 'eeg'\n\nif label_type == 'rass':\n label_mapping = {-5:0,-4:1,-3:2,-2:3,-1:4,0:5}\n loss_function = 'ordinal'\nelif label_type == 'camicu':\n label_mapping = None\n loss_function = 'bin'\nelse:\n raise ValueError('Unknown label type %s'%label_type)\n\n\ndef generate_tr_va_te(dataset, method, label_mapping=None, random_state=None):\n \"\"\"\n genertate tr, va and te label_times\n \"\"\"\n foldids = []; trids = []; vaids = []; teids = []\n unique_patients = np.array(dataset.unique_patients, copy=True)\n\n if method=='10-fold':\n if len(unique_patients)<10:\n raise ValueError('len(unique_patients)<10')\n np.random.seed(random_state)\n np.random.shuffle(unique_patients)\n allfolds = np.array_split(unique_patients, 10)\n foldids = []\n for k in range(10):\n foldids.append('fold %d'%(k+1,))\n patients_te = allfolds[k]\n patients_tr = np.concatenate(allfolds[:k]+allfolds[k+1:])\n # split label_times within tr to get va\n label_times = np.unique(dataset.label_times[np.in1d(dataset.patients, patients_tr)])\n np.random.seed(random_state+k+1)\n np.random.shuffle(label_times)\n trids.append(label_times[:len(label_times)//10*9])\n vaids.append(label_times[len(label_times)//10*9:])\n teids.append(np.unique(dataset.label_times[np.in1d(dataset.patients, patients_te)]))\n\n else:\n raise NotImplementedError(method)\n \n assert len(trids)==len(vaids)==len(teids)\n for i in range(len(trids)):\n assert len(set(trids[i]) & set(vaids[i]) & set(teids[i]))==0\n \n return foldids, trids, vaids, teids\n\n\ndef report_performance(y, yp, prefix=''):\n rmse = np.sqrt(np.mean((y-yp)**2))\n mae = np.mean(np.abs(y-yp))\n pn1acc = np.mean(np.abs(y-yp)<=1)\n \n msg = '%s RMSE = %g\\tMAE = %g\\t<=+/-1 acc = %g'%(prefix, rmse, mae, pn1acc)\n print(msg)\n\n\nif __name__=='__main__':\n\n ## read all data\n data_path_ff = '/data/delirium/eeg_segs_%s_feedforward_w4s2.h5'%label_type\n data_path_rnn = '/data/delirium/eeg_segs_%s_recurrent_w4s2_L9.5min.h5'%label_type\n\n dall_ff = MyDataset(data_path_ff, label_type, data_type=data_type, label_mapping=label_mapping, class_weighted=True)\n dall_rnn = MyDataset(data_path_rnn, label_type, data_type=data_type, label_mapping=label_mapping, class_weighted=True)\n \n # remove bad quality patients\n bad_quality_patients = ['icused14', 'icused29', 'icused44', 'icused52', 'icused69', 'icused98', 'icused122', 'icused125', 'icused185', 'icused199']\n print('%d patients removed due to bad signal quality'%len(bad_quality_patients))\n select_mark = ~np.in1d(dall_rnn.patients, bad_quality_patients)\n dall_rnn = slice_dataset(dall_rnn, np.where(select_mark)[0])\n select_mark = ~np.in1d(dall_ff.patients, bad_quality_patients)\n dall_ff = slice_dataset(dall_ff, np.where(select_mark)[0])\n\n \"\"\"\n # to generate database summary table\n unique_patients=np.unique(dall_rnn.patients)\n unique_patients=np.array(sorted(unique_patients,key=lambda x:int(x[6:])))\n aa=pd.read_csv('../data/demographics.csv',sep=',')\n patients2 = aa['PatientID'].values.tolist()\n ids=[patients2.index(x) for x in unique_patients]\n df=aa.iloc[ids].reset_index(drop=True)\n np.percentile(df['APACHEII'],(25,50,75))\n \n import datetime\n ids=np.where((~df['ICUAdmission'].isna())&(~df['ICUDischarge'].isna()))[0]\n icuadmission=np.array([datetime.datetime.strptime(x,'%Y-%m-%d %H:%M:%S.%f') for x in df.iloc[ids].ICUAdmission])\n icudischarge=np.array([datetime.datetime.strptime(x,'%Y-%m-%d %H:%M:%S.%f') for x in df.iloc[ids].ICUDischarge])\n icudays=np.array([x.total_seconds()/3600./24 for x in icudischarge-icuadmission])\n \"\"\"\n \n K = dall_rnn.K\n dall_ff.summary(suffix='feedfoward all')\n dall_rnn.summary(suffix='recurrent all')\n\n ## generate tr, va, te folds\n \n np.random.seed(random_state+10)\n folds_path = 'RASS_folds_info_%s.pickle'%cv_method\n if os.path.exists(folds_path):\n with open(folds_path,'rb') as ff:\n foldnames, tr_label_timess, va_label_timess, te_label_timess = pickle.load(ff)\n else:\n foldnames, tr_label_timess, va_label_timess, te_label_timess = generate_tr_va_te(dall_rnn, cv_method, random_state=random_state)\n with open(folds_path,'wb') as ff:\n pickle.dump([foldnames, tr_label_timess, va_label_timess, te_label_timess], ff, protocol=2)\n\n ## train\n for fi, fold, tr_label_times, va_label_times, te_label_times in zip(range(len(foldnames)), foldnames, tr_label_timess, va_label_timess, te_label_timess):\n print('\\n########## [%d/%d] %s ##########\\n'%(fi+1, len(foldnames), fold))\n cnn_model_path = 'models/model_RASS_cnn_%s.pth'%fold\n rnn1_model_path = 'models/model_RASS_rnn1_%s.pth'%fold\n rnn2_model_path = 'models/model_RASS_rnn2_%s.pth'%fold\n result_path = 'results/results_RASS_%s.pickle'%fold\n if os.path.exists(result_path):\n continue\n \n # step 1: train CNN\n\n exp = Experiment(model=EEGNet_CNN(loss_function, K),\n batch_size=batch_size, max_epoch=max_epoch, lr=lr,\n loss_function=loss_function, clip_weight=False,\n n_gpu=n_gpu, verbose=True, random_state=random_state)\n\n dtr_ff = slice_dataset(dall_ff, np.where(np.in1d(dall_ff.label_times, tr_label_times))[0])\n dva_ff = slice_dataset(dall_ff, np.where(np.in1d(dall_ff.label_times, va_label_times))[0])\n dte_ff = slice_dataset(dall_ff, np.where(np.in1d(dall_ff.label_times, te_label_times))[0])\n dtr_ff.summary(suffix='feedfoward tr')\n dva_ff.summary(suffix='feedfoward va')\n dte_ff.summary(suffix='feedfoward te')\n \n print('Model CNN parameters: %d'%exp.model.n_param)\n dtr_ff.fliplr_prob=True\n exp.fit(dtr_ff, Dva=dva_ff, init=False)\n dtr_ff.fliplr_prob = False\n if tosave:\n exp.save(cnn_model_path)\n #exp.load(cnn_model_path)\n #print('model loaded from %s'%cnn_model_path)\n\n del dtr_ff\n del dva_ff\n del dall_ff\n\n # step 2: get CNN outputs\n\n dtr = slice_dataset(dall_rnn, np.where(np.in1d(dall_rnn.label_times, tr_label_times))[0])\n dva = slice_dataset(dall_rnn, np.where(np.in1d(dall_rnn.label_times, va_label_times))[0])\n dte = slice_dataset(dall_rnn, np.where(np.in1d(dall_rnn.label_times, te_label_times))[0])\n dtr.summary(suffix='recurrent tr')\n dva.summary(suffix='recurrent va')\n dte.summary(suffix='recurrent te')\n \n exp.batch_size = 64\n dtr.set_X(exp.predict(dtr, output_id=1, return_last=False))\n dva.set_X(exp.predict(dva, output_id=1, return_last=False))\n dte.set_X(exp.predict(dte, output_id=1, return_last=False))\n L = dtr.X.shape[2]\n\n # step 3: train RNN using CNN outputs\n \n exp = Experiment(batch_size=batch_size, max_epoch=max_epoch*5, lr=lr,\n loss_function=loss_function, clip_weight=0.1,\n n_gpu=n_gpu, verbose=True, random_state=random_state)\n exp.model = EEGNet_RNN(loss_function, L, K, model_type='lstm', rnn_layer_num=2, rnn_hidden_num=16, rnn_dropout=0)\n print('Model RNN parameters: %d'%exp.model.n_param)\n exp.fit(dtr, Dva=dva, return_last=False)\n if tosave:\n exp.save(rnn1_model_path)\n #exp.load(rnn1_model_path)\n #print('model loaded from %s'%rnn1_model_path)\n \n dtr.set_X(np.expand_dims(dtr.X, axis=2))\n dva.set_X(np.expand_dims(dva.X, axis=2))\n dte.set_X(np.expand_dims(dte.X, axis=2))\n dtr2 = make_test_dataset(dtr)#, min_len=1200//4) # min length 20min (/4 due to step=4s)\n dva2 = make_test_dataset(dva)#, min_len=1200//4)\n dte2 = make_test_dataset(dte)#, min_len=1200//4)\n dtr2.set_X(dtr2.X[:,:,0,:])\n dva2.set_X(dva2.X[:,:,0,:])\n dte2.set_X(dte2.X[:,:,0,:])\n dtr2.summary(suffix='recurrent tr2')\n dva2.summary(suffix='recurrent va2')\n dte2.summary(suffix='recurrent te2')\n \n exp.batch_size = 64\n dtr2.set_X(exp.predict(dtr2, output_id=1, return_last=False))\n dva2.set_X(exp.predict(dva2, output_id=1, return_last=False))\n dte2.set_X(exp.predict(dte2, output_id=1, return_last=False))\n \n # step 4: train RNN on 1h segment\n\n exp = Experiment(batch_size=4, max_epoch=max_epoch*20, lr=lr/5.,\n loss_function=loss_function, clip_weight=0.1,# stateful=True,\n n_gpu=n_gpu, verbose=True, random_state=random_state)\n exp.model = EEGNet_RNN(loss_function, dtr2.X.shape[2], K, model_type='lstm', rnn_layer_num=1, rnn_hidden_num=8, rnn_dropout=0.2)\n print('Model RNN2 parameters: %d'%exp.model.n_param)\n exp.fit(dtr2, Dva=dva2, return_last=False)\n if tosave:\n exp.save(rnn2_model_path)\n #exp.load(rnn2_model_path)\n #print('model loaded from %s'%rnn2_model_path)\n\n # test model\n\n yptr2 = exp.predict(dtr2, return_last=False)#; yptr2=np.exp(yptr2)\n yptr_z2 = exp.predict(dtr2, return_last=False, return_ordinal_z=True); yptr_z2 = yptr_z2[:,:,0]\n yptr2_avg = exp.model.output_layer.get_proba([yptr_z2[ii,20:dtr2.lengths[ii]].mean() for ii in range(len(yptr2))])\n ypva2 = exp.predict(dva2, return_last=False)#; ypva2=np.exp(ypva2)\n ypva_z2 = exp.predict(dva2, return_last=False, return_ordinal_z=True); ypva_z2 = ypva_z2[:,:,0]\n ypva2_avg = exp.model.output_layer.get_proba([ypva_z2[ii,20:dva2.lengths[ii]].mean() for ii in range(len(ypva2))])\n ypte2, Hte2 = exp.predict(dte2, output_id=[0,1], return_last=False)#; ypte2=np.exp(ypte2)\n ypte_z2 = exp.predict(dte2, return_last=False, return_ordinal_z=True); ypte_z2 = ypte_z2[:,:,0]\n ypte2_avg = exp.model.output_layer.get_proba([ypte_z2[ii,20:dte2.lengths[ii]].mean() for ii in range(len(ypte2))])\n report_performance(dte2.y, np.argmax(ypte2_avg, axis=1), prefix='recurrent te2')\n\n if tosave:\n with open(result_path, 'wb') as f:\n pickle.dump({\n 'tr':dict(dtr2.drop_X()), 'yptr':yptr2, 'yptr_avg':yptr2_avg,\n 'va':dict(dva2.drop_X()), 'ypva':ypva2, 'ypva_avg':ypva2_avg,\n 'te':dict(dte2.drop_X()), 'ypte':ypte2, 'ypte_avg':ypte2_avg, 'Hte':Hte2\n }, f, protocol=2)\n\n","repo_name":"bdsp-core/rass_delirium_eeg_prediction","sub_path":"paper_code/RASS_prediction/step3_predict_RASS.py","file_name":"step3_predict_RASS.py","file_ext":"py","file_size_in_byte":11142,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"32981082837","text":"# Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.\n#\n# Example 1:\n#\n# Input: \"babad\"\n# Output: \"bab\"\n# Note: \"aba\" is also a valid answer.\n# Example 2:\n#\n# Input: \"cbbd\"\n# Output: \"bb\"\n\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n matrix = [[0] * len(s) for _ in range(len(s))]\n res = \"\"\n # initialize\n for idx in range(len(s)):\n matrix[idx][idx] = 1\n res=s[idx]\n for idx in range(len(s) - 1):\n if s[idx] == s[idx + 1]:\n matrix[idx][idx + 1] = 1\n res = s[idx:idx+2]\n # dp solution\n for length in range(3, len(s) + 1):\n for idx in range(len(s) - length + 1):\n end = idx + length - 1\n if s[idx] == s[end] and matrix[idx+1][end-1]:\n matrix[idx][end] = 1\n res = s[idx:end + 1]\n return res\n\ns=Solution()\nprint(s.longestPalindrome(\"abcda\"))","repo_name":"yshshadow/Leetcode","sub_path":"1-50/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34546907570","text":"import socket\nimport os\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport json\nimport protocol\nfrom random import randrange\nimport random\nimport time\nfrom sys import stdin\n\nhost = \"localhost\"\nport = 12000\n# HEADERSIZE = 10\n\n\"\"\"\nset up alamano logging\n\"\"\"\nalamano_logger = logging.getLogger()\nalamano_logger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n \"%(asctime)s :: %(levelname)s :: %(message)s\", \"%H:%M:%S\")\n# file\nif os.path.exists(\"./logs/alamano.log\"):\n os.remove(\"./logs/alamano.log\")\nfile_handler = RotatingFileHandler('./logs/alamano.log', 'a', 1000000, 1)\nfile_handler.setLevel(logging.DEBUG)\nfile_handler.setFormatter(formatter)\nalamano_logger.addHandler(file_handler)\n# stream\nstream_handler = logging.StreamHandler()\nstream_handler.setLevel(logging.WARNING)\nalamano_logger.addHandler(stream_handler)\n\n\nclass Player():\n\n def __init__(self):\n\n self.end = False\n # self.old_question = \"\"\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n def connect(self):\n self.socket.connect((host, port))\n\n def reset(self):\n self.socket.close()\n\n def answer(self, question):\n # work\n data = question[\"data\"]\n game_state = question[\"game state\"]\n print(\"data len : \" + str(len(data)) + \"\\n\")\n response_index = int(stdin.read(2))\n # log\n alamano_logger.debug(\"|\\n|\")\n alamano_logger.debug(\"alamano answers\")\n alamano_logger.debug(f\"question type ----- {question['question type']}\")\n alamano_logger.debug(f\"data -------------- {data}\")\n alamano_logger.debug(f\"response index ---- {response_index}\")\n alamano_logger.debug(f\"response ---------- {data[response_index]}\")\n return response_index\n\n def handle_json(self, data):\n data = json.loads(data)\n print(\"DATA\\n\")\n print(data)\n print(\"\\n\")\n response = self.answer(data)\n # send back to server\n bytes_data = json.dumps(response).encode(\"utf-8\")\n protocol.send_json(self.socket, bytes_data)\n\n def run(self):\n\n self.connect()\n\n while self.end is not True:\n time.sleep(1)\n received_message = protocol.receive_json(self.socket)\n if received_message:\n self.handle_json(received_message)\n else:\n print(\"no message, finished learning\")\n self.end = True\n\n\np = Player()\n\np.run()\n","repo_name":"cartoush/fantom_of_the_opera","sub_path":"base/clientalamano.py","file_name":"clientalamano.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24583365453","text":"for case in range(1, int(input()) + 1):\n N, K = map(int, input().split())\n result = [0] * K\n scores = list(map(int, input().split()))\n result[0] = scores[0]\n for score in scores[1:]:\n for i in range(K - 1):\n if i == 0 and score >= result[i]:\n result.insert(i, score)\n result.pop()\n break\n elif result[i] >= score >= result [i + 1]:\n result.insert(i + 1, score)\n result.pop()\n break\n\n print(f'#{case} {sum(result)}')\n","repo_name":"seoul-ssafy-class-2-studyclub/Indong-python","sub_path":"Python/SWEA/D3/4466_최대 성적표 만들기.py","file_name":"4466_최대 성적표 만들기.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28329042568","text":"import os\nimport json\nimport pkg_resources\nimport shutil\n\nfrom pathlib import Path\n\n\nclass Sequence:\n \"\"\"\n Wrap an INDI/Ekos imaging sequence\n \"\"\"\n def __init__(self):\n template = pkg_resources.resource_filename(__name__, os.path.join(\"templates\", \"sequence_template.json\"))\n capture_script = shutil.which(\"vid_capture\")\n\n with open(template, 'r') as fp:\n self.config = json.load(fp)\n\n self.config['SequenceQueue']['Job']['PostCaptureScript'] = capture_script\n\n\nclass Observation:\n \"\"\"\n Job entry in an INDI/Ekos Scheduler list\n \"\"\"\n def __init__(self, target=\"Target\", ra=0.0, dec=0.0, priority=10, sequence=Path.home() / \"sequence.esq\"):\n template = pkg_resources.resource_filename(__name__, os.path.join(\"templates\", \"sequence_list_template.json\"))\n\n with open(template, 'r') as fp:\n full_config = json.load(fp)\n\n # use the first entry in the scheduler list template as the boiler-plate to build from\n self.config = full_config['SchedulerList']['Job'][0]\n\n # configure from the arguments\n self.config['Target'] = target\n self.config['Priority'] = f\"{priority}\" # XML can only be strings so we make sure they are\n self.config['Coordinates']['J2000RA'] = f\"{ra}\"\n self.config['Coordinates']['J2000DE'] = f\"{dec}\"\n self.config['Sequence'] = str(sequence)\n\n\nclass Schedule:\n \"\"\"\n Wrap an INDI/Ekos Scheduler list and provide ways to build them programmatically\n and write them to valid XML\n \"\"\"\n def __init__(self):\n template = pkg_resources.resource_filename(__name__, os.path.join(\"templates\", \"sequence_list_template.json\"))\n\n with open(template, 'r') as fp:\n self.config = json.load(fp)\n\n # zero out the list of jobs to initiate\n self.config['SchedulerList']['Job'] = []\n\n def add_observation(self, observation):\n \"\"\"\n Add an Observation instance to the schedule\n \"\"\"\n self.config['SchedulerList']['Job'].append(observation.config)\n","repo_name":"tepickering/FASS","sub_path":"src/fass/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18049438909","text":"\nclass DataClear(object):\n def __init__(self):\n pass\n \n def __GetData(self):\n import pandas as pd\n import numpy as np\n self.Df=pd.read_excel(r'DataCleat.xlsx',sheet_name='Sheet1',header=None)\n \n def __Clear(self,rateC,rateR):\n \"\"\"\n 目的:对数据进行清理去除空值与错误信息(-1)\n\n 输入:rateC为列允许错误率;rateR为行允许错误率\n\n 输出:清理后数据集(DataFrame)\n \"\"\"\n del_cols=[]\n for cols in self.Df.columns:#对每列分析\n rownum,colnum=self.Df.shape\n Datacols=self.Df.loc[:,cols]\n nullnum=Datacols[(Datacols.isnull()) | (Datacols<=0)].shape[0]\n if (nullnum==0):\n pass\n else:\n rate=nullnum/rownum\n if (rate>rateC):#当超过预设列误差比例时\n for rows in Datacols[(Datacols.isnull()) | (Datacols<=0)].index:#对列元素分析,看其错误率是否大于ratec\n Datarows=self.Df.loc[rows,:]\n nullnumC=Datarows[(Datarows.isnull()) | (Datarows<=0)].shape[0]\n if(nullnumC/colnum>rateR):#如果大于ratec,认为该样本有误\n self.Df=self.Df.drop([rows],axis=0) #删除样本\n rownum,colnum=self.Df.shape\n Datacols=self.Df.loc[:,cols]\n nullnum=Datacols[(Datacols.isnull())| (Datacols<=0)].shape[0]\n if (nullnum/rownum>rateC):#删除完后再判断是否仍然超过预设列误差比\n self.Df=self.Df.drop([cols],axis=1) #删除该列 \n del_cols.append(cols)\n else:\n self.Df=self.Df.loc[(Datacols.notnull()) & (Datacols>0),:]\n return del_cols\n \n def __quantile_p(self,data, p):\n \"\"\"\n 目的:获得分为��\n 输入:data数据集,p分位数\n 输出:Q为对应分为数值\n \"\"\"\n import math\n data.sort()\n pos = (len(data) + 1)*p\n pos_integer = int(math.modf(pos)[1])\n pos_decimal = pos - pos_integer\n Q = data[pos_integer - 1] + (data[pos_integer] - data[pos_integer - 1])*pos_decimal\n return Q\n \n def __RangeDeal(self,b,f=0):\n \"\"\"\n 目的:剔除异常数据,获取正常数据集的最大最小值\n 输入:b为分位数取值,data数据集,f为偏差值\n 输出:正常数据集的方差\n \"\"\"\n import pandas as pd\n #取下1/4分位数FL以及3/4分位数为FU\n for col in self.Df.columns:\n column=self.Df.loc[:,col]\n data=column.values\n FL=self.__quantile_p(data,0.25)\n FU=self.__quantile_p(data,0.75)\n DF=FU-FL\n minF=FL-b*DF-f*FL\n maxF=FU+b*DF+f*FU\n Info=pd.Series([minF,maxF],index=['min','max'])\n if('All_Info' in vars()):\n All_Info=pd.concat([All_Info,Info],axis=1)\n else:\n All_Info=pd.DataFrame(Info)\n All_Info.columns=self.Df.columns\n self.DataRange=All_Info\n\n \n def __AbnormalDeal(self):\n for col in self.DataRange.columns:\n for row in self.Df[(self.Df.loc[:][col]self.DataRange.loc['max'][col])].index:\n self.Df=self.Df.drop([row],axis=0)\n\n\n def main(self):\n self.__GetData()\n Del_col=self.__Clear(0.1,0.5)\n print(Del_col)\n self.__RangeDeal(1.5,f=0.1)\n self.__AbnormalDeal()\n \n\n\nif __name__=='__main__':\n import pandas as pd\n a=DataClear()\n \n a.main()\n print(a.DataRange)\n","repo_name":"ZZY18/sgpy","sub_path":"SGDateClear/DataClear.py","file_name":"DataClear.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75235448","text":"'''\nIdea: Draw edges between stop and buses. Perform BFS starting from S.\n\nTime complexity: O(n^2)\nSpace complexity: O(n^2)\n\n'''\n\nimport collections\n\nclass Solution(object):\n def numBusesToDestination(self, routes, S, T):\n if S == T: \n return 0\n \n graph = collections.defaultdict(list)\n for bus, route in enumerate(routes):\n for stop in route:\n graph[stop] += bus,\n \n\n visited = set()\n res = 0\n q = collections.deque([S])\n while q:\n size = len(q)\n for _ in range(size):\n curr_stop = q.popleft()\n if curr_stop == T:\n return res\n for bus in graph[curr_stop]:\n if bus not in visited:\n visited.add(bus)\n for stop in routes[bus]:\n q.append(stop)\n res += 1\n \n return -1","repo_name":"Anirudh-Muthukumar/Leetcode-Solutions","sub_path":"815. Bus Routes/815. Bus Routes.py","file_name":"815. Bus Routes.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9364596309","text":"#\n# @lc app=leetcode.cn id=1544 lang=python3\n#\n# [1544] 整理字符串\n#\n\n# @lc code=start\nclass Solution:\n def makeGood(self, s: str) -> str:\n def isPair(charA,charB):\n return abs(ord(charA) - ord(charB)) == abs(ord('a')- ord('A'))\n\n stack = []\n for char in s:\n if not stack:\n stack.append(char)\n continue\n if isPair(char, stack[-1]):\n stack.pop()\n continue\n else:\n stack.append(char)\n return \"\".join(stack)\n\n# @lc code=end\n\n","repo_name":"mqinbin/python_leetcode","sub_path":"1544.整理字符串.py","file_name":"1544.整理字符串.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12951501488","text":"#!/usr/bin/env python\n#\n# not done\n\nimport time\nimport datetime as dt\nimport multiprocessing as mp\nimport logging\nimport yaml\nfrom zmqclass import *\n\n\"\"\"\nerror message {'system':'hw,camera,audio',\n 'level':'info,critical,error',\n 'msg':'blah',\n 'output': 'save,screen,save-screen,web, ...'\n }\n\"\"\"\nclass RobotErrorServer(mp.Process):\n\tdef __init__(self,host=\"localhost\",port=9000):\n\t\tmp.Process.__init__(self)\n\t\tself.host = host\n\t\tself.port = port\n\t\tlogging.basicConfig(level=logging.INFO)\n\t\tself.logger = logging.getLogger('robot')\n# \t\tself.md = md.MotorDriver(11,12,15,16)\n\n\n\tdef on_message(self,client, userdata, msg):\n\t\tprint(msg.topic+' '+str(msg.payload))\n\n\tdef shutdown(self):\n\t\tself.pub.close()\n\t\texit()\n\n\tdef run(self):\n\t\tself.logger.info(str(self.name)+'['+str(self.pid)+'] started on'+\n\t\t\tstr(self.host) + ':' + str(self.port) +', Daemon: '+str(self.daemon))\n\t\t#p = Publisher((self.host,self.port))\n\t\t#self.pub = p.accept()\n\t\t#self.logger.info('Accepted connection: ')\n\n\n\t\tself.sub = Sub(['errors'])\n\n\t\twhile True:\n\t\t\ttime.sleep(0.05) # 0.5 => 20Hz\n\t\t\t# get info\n\t\t\tmsg = self.sub.recv()\n\t\t\tif msg:\n\t\t\t\tself.on_message( msg )\n","repo_name":"MomsFriendlyRobotCompany/soccer","sub_path":"tmp/ErrorServer.py","file_name":"ErrorServer.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21136751974","text":"import logging\nfrom PyQt6 import QtCore, QtGui\n\nlogger = logging.getLogger(__name__)\n\n\nclass PixmapLoader:\n \"\"\"\n Class for efficiently loading and displaying images.\n Handles loading this mage file and providing a pixmap at the\n requested size. Tries to optimize performance by balancing\n memory usage and disk access.\n \"\"\"\n\n def __init__(self, image):\n self.image = image\n self.pixmap = None # This is where the pixmap is cached in memory\n self.image_width = None\n self.image_height = None\n self.image_size = None\n\n self.hold_original = False # A hint indicating the the original image should be held.\n self.used_sizes = []\n # A history of sizes of the pixamp used; a hint for what sizes to potentially keep when freeing memory.\n # Each entry is a tuple, with the second element being the requested size and the first the used size.\n # (they are likley different because getPixmapForSize will keep the original pixmap's aspect ratio)\n\n self.maximum_megabytes_to_safely_keep = 1\n # Pixmaps under this size (in MB) will be kept in memory to avoid raving to re-load from disk.\n # Although you probably don't need to change it, if you want to tune things:\n # For a given number of images:\n # * If your computer is running out of memory, decrease this.\n # * If you experience lag while performing operations that result in a new image being displayed somewhere,\n # and your computer has enough memroy, increase this.\n #\n # Typical numbers are likely: 1000 images * 1 MB/image (this number above) = 1 GB\n\n self.slightly_large_size_factor = 2\n\n def __getstate__(self):\n \"\"\"\n Called during pickling.\n \"\"\"\n state = self.__dict__.copy()\n state[\"pixmap\"] = None\n state[\"hold_original\"] = False\n state[\"used_sizes\"] = []\n return state\n\n def getPixmapForSize(self, size):\n \"\"\"\n Returns a pixmap for the requested size. This is the most\n important method of this class.\n May or may not have to load or re-load the image file\n depending on whether optimizeMemory() has been called\n and it's behaviour.\n If the provided size is None, returns the original pixmap.\n \"\"\"\n if not size:\n size = QtCore.QSize(self.image_width, self.image_height)\n self._requireLoad()\n if not self.pixmap or ((size.width() > self.pixmap.width()\n and size.height() > self.pixmap.height()) and\n (self.pixmap.width() < self.image_width\n or self.pixmap.height() < self.image_height)):\n logger.debug(\n \"Performing load to get bigger pixmap (have: %s need: %s, %s)\"\n % (\"%s, %s\" % (self.pixmap.width(), self.pixmap.height())\n if self.pixmap else None, size.width(), size.height()))\n self._loadOriginalPixmap()\n pixmap = self.pixmap.scaled(size,\n QtCore.Qt.AspectRatioMode.KeepAspectRatio)\n self.used_sizes.append((pixmap.size(), size))\n # Note that this returned pixmap is now owned by the caller: PixmapLoader isn't responsible for\n # freeing that memory (and in fact can't)\n return pixmap\n\n def width(self):\n \"\"\"\n Returns the width of the image.\n \"\"\"\n self._requireLoad()\n return self.image_width\n\n def height(self):\n \"\"\"\n Returns the height of the image.\n \"\"\"\n self._requireLoad()\n return self.image_height\n\n def size(self):\n \"\"\"\n Returns the size of the image.\n \"\"\"\n self._requireLoad()\n return self.image_size\n\n def holdOriginal(self):\n \"\"\"\n Marks that the largest sized pixmap should be kept in memory.\n \"\"\"\n self.hold_original = True\n\n def freeOriginal(self):\n \"\"\"\n Marks that the pixmap can be shrunk to save memory\n (this is the default until holdOriginal() is called).\n \"\"\"\n self.hold_original = False\n\n def optimizeMemory(self, might_need_a_bit_bigger=True):\n \"\"\"\n Tries to free memory used by the pixmap. Doesn't necessarily\n go all out though: might keep the pixmap or a scaled down\n version in memory. Call this after getPixmapForSize() to\n save as much memory as possible.\n \"\"\"\n # Actual memory freeing occurs whenever self.pixmap is set to a new value\n # in the code below: once this is done, the old object is not referenced\n # by anything and so gets cleaned up by Python's garbage collector (in\n # CPython, this happens immediately)\n\n if self.pixmap and not self.hold_original:\n for size, requested_size in self._used_sizes_largest_to_smallest():\n\n # Keeping a small version of the pixmap if something used it (because it might use it again).\n if might_need_a_bit_bigger:\n slightly_large_size = size * self.slightly_large_size_factor\n if slightly_large_size.width() <= self.pixmap.width(\n ) and slightly_large_size.height(\n ) <= self.pixmap.height() and self._estimatePixmapMemory(\n slightly_large_size, self.pixmap.depth()\n ) < self.maximum_megabytes_to_safely_keep:\n self.pixmap = self.pixmap.scaled(\n slightly_large_size,\n QtCore.Qt.AspectRatioMode.KeepAspectRatio)\n break\n\n if size.width() <= self.pixmap.width() and size.height(\n ) <= self.pixmap.height() and self._estimatePixmapMemory(\n size, self.pixmap.depth()\n ) < self.maximum_megabytes_to_safely_keep:\n self.pixmap = self.pixmap.scaled(\n requested_size,\n QtCore.Qt.AspectRatioMode.KeepAspectRatio)\n # Using the actual, requested size to ensure the exact same scaling is performed as before: don't\n # want to be off by even 1 pixel\n break\n else:\n if self.pixmap.width(\n ) == self.image_width and self.pixmap.height(\n ) == self.image_height:\n logger.debug(\n \"In PixmapLoader.optimizeMemory(), completely clearing out pixmap to reduce memory usage.\"\n )\n self.pixmap = None\n else:\n pass\n if self.pixmap:\n logger.debug(\n \"In PixmapLoader.optimizeMemory(), keeping size %s, %s at %.2f MB\"\n % (self.pixmap.width(), self.pixmap.height(),\n self._estimatePixmapMemory(self.pixmap.size(),\n self.pixmap.depth())))\n\n def _requireLoad(self):\n \"\"\"\n Performs the initial read of the image, if necessary.\n \"\"\"\n if not self.image_width or not self.image_height:\n self._loadOriginalPixmap()\n\n def _loadOriginalPixmap(self):\n if not self.image.path:\n raise (ValueError(\"Don't have a path to load image.\"))\n self.pixmap = QtGui.QPixmap(self.image.path)\n if self.pixmap.isNull():\n self.pixmap = None\n raise (ValueError(\"Failed to load image at %s\" % self.image.path))\n self.image_width = self.pixmap.width()\n self.image_height = self.pixmap.height()\n\n def _estimatePixmapMemory(self, size, depth):\n \"\"\"\n Returns an estimate of the memory used by a pixmap of the\n provided size and depth in MegaBytes (MB).\n \"\"\"\n # Calculation from https://forum.qt.io/topic/4876/how-much-memory/5\n # Measured too and seems accurate enough.\n return size.width() * size.height() * depth / 1024 / 1024 / 8\n\n def _used_sizes_largest_to_smallest(self):\n # Assuming sizes all have the same aspect ratio, so it doesn't matter if we sort using width or height: so picking one:\n self.used_sizes.sort(key=lambda item: item[0].width(), reverse=True)\n return self.used_sizes\n","repo_name":"uaarg/pigeon","sub_path":"pigeon/ui/pixmaploader.py","file_name":"pixmaploader.py","file_ext":"py","file_size_in_byte":8409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21771852774","text":"\"\"\"\nGiven a string, find the length of the longest substring without repeating characters.\n\nExamples:\n\nGiven \"abcabcbb\", the answer is \"abc\", which the length is 3.\n\nGiven \"bbbbb\", the answer is \"b\", with the length of 1.\n\nGiven \"pwwkew\", the answer is \"wke\", with the length of 3. Note that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\"\"\"\n\nclass Solution(object):\n \"\"\"\n O(n)\n using hashset to record which char is already covered.\n \"\"\"\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n p1, p2 = 0, 0\n longest = p2 - p1\n\n coverset = set()\n\n while p2 < len(s):\n c2 = s[p2]\n if c2 not in coverset:\n coverset.add(c2)\n p2 += 1\n d = p2 - p1\n if d > longest:\n longest = d\n else:\n while c2 in coverset:\n coverset.remove(s[p1])\n p1 += 1\n\n return longest\n","repo_name":"weixsong/algorithm","sub_path":"leetcode/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2770650478","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 8 09:50:56 2020\n\n@author: jotape42p\n\"\"\"\n\nimport SEIR\nimport lmfit\n\ndef funcao(parametros, info, data):\n \n mortes = data['obitos'][int(round(parametros['f'].value)):]\n casos_acumulados = data['casos_acumulados'][int(round(parametros['f'].value)):]\n \n #if data.get('check_I_init') is None:\n # parametros[\"I_init\"].value = casos_acumulados[0] #05/06 mudança para I_inicial existente nos casos acumulados\n \n if info['control'][0]:\n for i in range(info['control'][1]):\n parametros['dia' + str(i + 1)].max = len(casos_acumulados) - max(14 - info['validacao'], 0)\n parametros['dia' + str(i + 1)].min = 1 + int(round(parametros['f'].value))\n if info['control'][2]:\n for i in range(info['control'][3]):\n parametros['dob' + str(i + 1)].max = len(casos_acumulados) - max(14 - info['validacao'], 0)\n \n if info['tipo_seir'] == 'mod':\n pop = SEIR.SEIR(data['populacao'], len(casos_acumulados), parametros, data['lockdown'], len(casos_acumulados), info['control'], info['validacao'])\n if info['tipo_seir'] == 'std':\n pop = SEIR.SEIR_std(data['populacao'], len(casos_acumulados), parametros, data['lockdown'], len(casos_acumulados), info['control'], info['validacao'])\n \n model1 = pop['Acum'] #modelagem dos casos acumulados [Acum] \n model3 = pop['D'] #modelagem dos casos acumulados de morte [D]\n\n l1 = (model1 - casos_acumulados)/(max(casos_acumulados)) *100000\n l3 = (model3 - mortes)/max(mortes) *100000\n \n return (l1, l3)\n\ndef fit_me(info, data, parametros):\n \n minner = lmfit.Minimizer(funcao, parametros, fcn_args=(info, data))\n\n result = minner.minimize(method=info['method'])\n \n return result\n","repo_name":"JosePauloSavioli/COVID-19-simulation","sub_path":"Modelo SEIR Covid-19/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25534579114","text":"import cv2\nimport os\nimport datetime\n\nif __name__ == '__main__':\n a = datetime.datetime.now()\n os.system('adb shell screencap -p /sdcard/screencap.png')\n os.system('adb pull /sdcard/screencap.png')\n b = datetime.datetime.now()\n print('Take screenshot TAKES {} !!!!!!!!!!!!!!!!!!!!!!!!!!!.'.format(b-a))\n img = cv2.imread('screencap.png', 0)\n img2 = img.copy()\n template = cv2.imread('winning_flag.png', 0)\n w, h = template.shape[::-1]\n\n img = img2.copy()\n method = eval('cv2.TM_CCOEFF_NORMED')\n\n # Apply template Matching\n res = cv2.matchTemplate(img, template, method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n print(\"matching result:\")\n print(min_val)\n print(max_val)\n print(top_left)\n print(bottom_right)\n if max_val > 0.99:\n print(\"Matched!\")\n\n b = datetime.datetime.now()\n print('TOTAL TAKES {} !!!!!!!!!!!!!!!!!!!!!!!!!!!.'.format(b-a))\n\n","repo_name":"YongzhiWang/game_script","sub_path":"main_detection.py","file_name":"main_detection.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43439485417","text":"from mongorm import *\n\ndef teardown_module(module):\n\tDocumentRegistry.clear( )\n\ndef test_equality( ):\n\t\"\"\"Tests to make sure comparisons work. Equality compares database\n\tidentity, not value similarity.\"\"\"\n\tconnect( 'test_mongorm' )\n\t\n\tclass TestDocument(Document):\n\t\ts = StringField( )\n\t\n\ta = TestDocument( s=\"Hello\" )\n\ta.save( )\n\t\n\tb = TestDocument( s=\"Hello\" )\n\tb.save( )\n\n\tassert not (a == b)\n\tassert a != b\n\t\n\tc = TestDocument.objects.get(pk=a.id)\n\n\tassert c == a\n\tassert not (c != a)","repo_name":"MagmaLabs/mongorm","sub_path":"tests/test_equality.py","file_name":"test_equality.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"9487755397","text":"from scapy.all import IP, ICMP,TCP, sr1\nimport sys\ndef icmp_probe(ip):\n icmp_packet = IP(dst=ip)/ICMP()\n resp_packet = sr1(icmp_packet, timeout=10, verbose=False)\n return resp_packet != None\n\ndef syn_scan(ip, port): \n packet = IP(dst=ip)/TCP(dport=port, flags='S')\n resp_packet = sr1(packet, timeout=1, verbose=False)\n \n if resp_packet.getlayer('TCP').flags == 'SA':\n print('open')\n else:\n print('filtered|closed')\n\n\nif __name__ == \"__main__\":\n ip = sys.argv[1]\n port = int(sys.argv[2])\n\n if icmp_probe(ip):\n syn_ack_packet = syn_scan(ip, port)\n else:\n print(\"ICMP Probe Failed\")\n","repo_name":"vflame6/python-practice","sub_path":"Ethical Hacking practice/Scapy projects/syn_scanner.py","file_name":"syn_scanner.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32751090466","text":"import tkinter\nfrom PIL import ImageTk, Image\n\nclass CardSlot():\n\n\tSIZE = 200\n\n\tdef __init__(self, window, x_coord, y_coord, application_manager):\n\t\tself.application_manager = application_manager\n\n\t\tself.x_coord = x_coord\n\t\tself.y_coord = y_coord\n\t\tself.x_position = 240 + (self.SIZE + 30) * x_coord\n\t\tself.y_position = 20 + (self.SIZE + 30) * y_coord\n\n\t\tself.border = tkinter.Canvas(window, highlightthickness = 0)\n\n\t\timage = Image.open(\"../Resources/CoverArt.jpg\").resize((self.SIZE, self.SIZE), Image.ANTIALIAS)\n\t\tself.imageTK = ImageTk.PhotoImage(image)\n\t\tself.artwork = tkinter.Label(window, anchor = \"nw\", image = self.imageTK, borderwidth = 0, bd = 0, relief = \"ridge\")\n\t\tself.artwork.bind(\"\", self.submit_click)\n\n\t\tself.cover = tkinter.Canvas(window, highlightthickness = 0, bd = 0, relief = \"ridge\")\n\t\tself.cover.bind(\"\", self.submit_click)\n\n\t\tself.color = None\n\t\tself.spymaster = None\n\t\tself.revealed = None\n\n\tdef submit_click(self, event):\n\t\tself.application_manager.click(self.x_coord, self.y_coord)\n\n\tdef recieve_click(self):\n\t\tself.revealed = not self.revealed\n\t\tself.refresh()\n\n\tdef overwrite(self, card):\n\t\tself.color = CardSlot.team_to_color(card.team)\n\t\tself.spymaster = False\n\t\tself.revealed = card.revealed\n\n\t\tself.cover.create_rectangle(-1, -1, self.SIZE, self.SIZE, fill = self.color)\n\t\tself.border.create_rectangle(-10, -10, self.SIZE + 20, self.SIZE + 20, fill = self.color)\n\n\t\timage = Image.open(\"../Resources/Artwork/artwork\" + str(card.artwork) + \".png\").resize((self.SIZE, self.SIZE), Image.ANTIALIAS)\n\t\tself.imageTK = ImageTk.PhotoImage(image)\n\t\tself.artwork.configure(image = self.imageTK)\n\n\t\tself.refresh()\n\n\tdef refresh(self):\n\t\tif (self.spymaster):\n\t\t\tself.border.place(x = (self.x_position - 10), y = (self.y_position - 10), width = (self.SIZE + 20), height = (self.SIZE + 20))\n\t\telse:\n\t\t\tself.border.place_forget()\n\n\t\tself.artwork.place(x = self.x_position, y = self.y_position, width = self.SIZE, height = self.SIZE)\n\n\t\tif (self.revealed):\n\t\t\tself.cover.place(x = self.x_position, y = self.y_position, width = self.SIZE, height = self.SIZE)\n\t\telse:\n\t\t\tself.cover.place_forget()\n\n\tdef reveal(self):\n\t\tself.spymaster = True\n\t\tself.refresh()\n\n\tdef team_to_color(team):\n\t\tif (team == \"red\"):\n\t\t\treturn \"red\"\n\t\telif (team == \"blue\"):\n\t\t\treturn \"blue\"\n\t\telif (team == \"bystander\"):\n\t\t\treturn \"white\"\n\t\telif (team == \"assassin\"):\n\t\t\treturn \"black\"\n\t\telse:\n\t\t\treturn \"red\"\n\n","repo_name":"MatthewGoff/CodeNames","sub_path":"src/client_application/gui/card_slot.py","file_name":"card_slot.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23412330571","text":"def fun(x,y):\r\n x=x.split(\" \")\r\n y=y.split(\" \")\r\n n=0\r\n sol=0\r\n for i in x:\r\n for j in y:\r\n if(i==j):\r\n sol=i\r\n n+=1\r\n if(n==1):\r\n return sol\r\n if(n>1):\r\n return \"Bad magician!\"\r\n if(n==0):\r\n return \"Volunteer cheated!\"\r\n\r\nwith open (\"data.txt\", \"r\") as myfile:\r\n problem=myfile.read()\r\nlines = problem.split('\\n')\r\ncases = int(lines[0])\r\nfor i in range(cases):\r\n\tindex = i*10+1\r\n\tcase=lines[index:index+11]\r\n\tfirst=int(case[0])\r\n\tsecond=int(case[5])\r\n\tfline=case[first]\r\n\tsline=case[second+5]\r\n\tsolution=fun(fline,sline)\r\n\tcasenum=i+1\r\n\tprint(\"case #\"+str(i+1)+\": \" + solution)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1958.py","file_name":"1958.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8402133268","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 19 11:12:17 2021\n\n@author: user16\n\"\"\"\n\nfrom pymongo import MongoClient\nconn = MongoClient('mongodb://localhost:27017/') # mongodb 접속\n\n# print(conn.list_database_names()) # 설치된 db 명칭(show dbs)\n\ndb = conn['test'] # db 반환\ncollection = db['product'] # 컬렉션 반환\nx = collection.find_one()\n#print(x)\n\n\nproductname = '냉장고'\n\nquery = {'name' : productname}\n#query = {'price' : {$gte : 100000}}\nresult = collection.find(query)\nresult = collection.find().sort('name', -1) # 내림차순 정렬\n#result = collection.find({'price' : {'$gte': 100000, '$lte' : 300000}})\n \n \nfor item in result :\n print(item['name'], item['price'])\n# print(item)\n\n \n# print(x['name'], x['price']) \n\n# 비교연산자\n# $eq, $ne, $lt, $lte, $gt, $gte\n","repo_name":"hunsang-you/HSYOO","sub_path":"ExamApp/ex0819_1.py","file_name":"ex0819_1.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72335447234","text":"import unittest\nfrom factory import ChromosomeFactory, PopulationFactory\nfrom chromosome import Chromosome\nfrom population import Population\n\nclass ChromosomeFactoryTest(unittest.TestCase):\n\n def test_gen(self):\n n = 15 \n m = 3\n factory = ChromosomeFactory(n, m)\n c = factory.gen()\n expected_len = 60\n self.assertEqual(len(c), expected_len)\n self.assertEqual(type(c), Chromosome)\n # print(c)\n\nclass PopulationFactoryTest(unittest.TestCase):\n\n def test_gen(self):\n n = 12\n m = 2\n p = 10\n\n factory = PopulationFactory(p, n, m)\n population = factory.gen()\n population[0]=1\n print(population)\n \n self.assertEqual(len(population), p)\n self.assertEqual(type(population), Population)\n # \nif __name__ == \"__main__\":\n unittest.main()","repo_name":"c0ldheart/max-min-multiple-knapsack","sub_path":"factory_test.py","file_name":"factory_test.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28511820061","text":"##############################################################################\n# TransFacPred is developed for predicting Transcription Factors using protein #\n# Sequence information. It is developed by Prof G. P. S. Raghava's group. #\n# Please Cite: https://webs.iiitd.edu.in/raghava/transfacpred/ #\n# ############################################################################\nimport argparse \nimport warnings\nimport pickle\nimport os\nimport re\nimport uuid\nimport sys\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom sklearn.ensemble import ExtraTreesClassifier\nwarnings.filterwarnings('ignore')\nparser = argparse.ArgumentParser(description='Please provide following arguments') \n\n## Read Arguments from command\nparser.add_argument(\"-i\", \"--input\", type=str, required=True, help=\"Input: File name containing protein or peptide sequence in FASTA format.\")\nparser.add_argument(\"-o\", \"--output\",type=str, help=\"Output: File for saving results by default outfile.csv\")\nparser.add_argument(\"-t\",\"--threshold\", type=float, help=\"Threshold: Value between 0 to 1 by default -0.38\")\nparser.add_argument(\"-d\",\"--display\", type=int, choices = [1,2], help=\"Display: 1:Transcription Factors, 2: All Sequences, by default 1\")\nargs = parser.parse_args()\n\ndef readseq(file):\n with open(file) as f:\n records = f.read()\n records = records.split('>')[1:]\n seqid = []\n seq = []\n for fasta in records:\n array = fasta.split('\\n')\n name, sequence = array[0].split()[0], re.sub('[^ACDEFGHIKLMNPQRSTVWY-]', '', ''.join(array[1:]).upper())\n seqid.append('>'+name)\n seq.append(sequence)\n if len(seqid) == 0:\n f=open(file,\"r\")\n data1 = f.readlines()\n for each in data1:\n seq.append(each.replace('\\n',''))\n for i in range (1,len(seq)+1):\n seqid.append(\">Seq_\"+str(i))\n df1 = pd.DataFrame(seqid)\n df2 = pd.DataFrame(seq)\n return df1,df2\n\ndef aac_comp(file):\n std = list('ACDEFGHIKLMNPQRSTVWY')\n df1 = file\n df1.columns = ['Seq']\n dd = []\n for j in df1['Seq']:\n cc = []\n for i in std:\n count = 0\n for k in j:\n temp1 = k\n if temp1 == i:\n count += 1\n composition = (count/len(j))*100\n cc.append(composition)\n dd.append(cc)\n df2 = pd.DataFrame(dd)\n head = []\n for mm in std:\n head.append('AAC_'+mm)\n df2.columns = head\n return df2\n\ndef pred(file1,file2):\n a = []\n clf = pickle.load(open(file2,'rb'))\n data_test = file1\n y_p_score1=clf.predict_proba(data_test)\n y_p_s1=y_p_score1.tolist()\n a.extend(y_p_s1)\n df_a = pd.DataFrame(a)\n df_1a = df_a.iloc[:,-1].round(2)\n df_2a = pd.DataFrame(df_1a)\n df_2a.columns = ['ML_score']\n return df_2a\n\ndef BLAST_processor(blast_result,name1,ml_results,thresh):\n if os.stat(blast_result).st_size != 0:\n df1 = pd.read_csv(blast_result, sep=\"\\t\", names=['name','hit','identity','r1','r2','r3','r4','r5','r6','r7','r8','r9'])\n df2 = name1\n df3 = ml_results\n cc = []\n for i in df2[0]:\n kk = i.replace('>','')\n if len(df1.loc[df1.name==kk])>0:\n df4 = df1[['name','hit']].loc[df1['name']==kk].reset_index(drop=True)\n if df4['hit'][0].split('_')[0]=='P':\n cc.append(0.5)\n if df4['hit'][0].split('_')[0]=='N':\n cc.append(-0.5)\n else:\n cc.append(0)\n df6 = pd.DataFrame()\n df6['Seq_ID'] = [i.replace('>','') for i in df2[0]] \n df6['ML_Score'] = df3['ML_score']\n df6['BLAST_Score'] = cc\n df6['Total_Score'] = df6['ML_Score']+df6['BLAST_Score']\n df6['Prediction'] = ['Transcription Factor' if df6['Total_Score'][i]>thresh else 'Non-Transcription Factor' for i in range(0,len(df6))]\n else:\n df2 = name1\n ss = []\n vv = []\n for j in df2[0]:\n ss.append(j.replace('>',''))\n vv.append(0)\n df6 = pd.DataFrame()\n df6['Seq_ID'] = ss\n df6['ML_Score'] = df3['ML_score']\n df6['BLAST_Score'] = vv\n df6['Total_Score'] = df6['ML_Score']+df6['BLAST_Score']\n df6['Prediction'] = ['Transcription Factor' if df6['Total_Score'][i]>thresh else 'Non-Transcription Factor' for i in range(0,len(df6))]\n return df6\n\nprint('##############################################################################')\nprint('# This program TransFacPred is developed for predicting Transcription #')\nprint('# factors, developed by Prof G. P. S. Raghava\\'s group. #')\nprint('# ############################################################################')\n\n# Parameter initialization or assigning variable for command level arguments\n\nSequence= args.input # Input variable \n \n# Output file \n \nif args.output == None:\n result_filename= \"outfile.csv\" \nelse:\n result_filename = args.output\n \n# Threshold \nif args.threshold == None:\n Threshold = -0.38\nelse:\n Threshold= float(args.threshold)\n# Display\nif args.display == None:\n dplay = int(1)\nelse:\n dplay = int(args.display)\n\nprint('Summary of Parameters:')\nprint('Input File: ',Sequence,'; Threshold: ', Threshold)\nprint('Output File: ',result_filename,'; Display: ',dplay)\n\n#======================= Prediction Module start from here =====================\nprint(\"======Initiating Prediction Using Hybrid Model. Please Wait.......================\")\nif os.path.exists('envfile'):\n with open('envfile', 'r') as file:\n data = file.readlines()\n output = []\n for line in data:\n if not \"#\" in line:\n output.append(line)\n if len(output)==2: \n paths = []\n for i in range (0,len(output)):\n paths.append(output[i].split(':')[1].replace('\\n',''))\n blastp = paths[0]\n blastdb = paths[1]\n else:\n print(\"####################################################################################\")\n print(\"Error: Please provide paths for BLAST, and required files\", file=sys.stderr)\n print(\"####################################################################################\")\n sys.exit()\n \nelse:\n print(\"####################################################################################\")\n print(\"Error: Please provide the '{}', which comprises paths for BLAST\".format('envfile'), file=sys.stderr)\n print(\"####################################################################################\")\n sys.exit()\n\nfilepath = os.path.dirname(os.path.abspath(__file__))\nif os.path.isdir('Models') == False:\n with zipfile.ZipFile(filepath+'/Models.zip', 'r') as zip_ref:\n zip_ref.extractall('.')\nelse:\n pass\nif os.path.isdir('database') == False:\n with zipfile.ZipFile(filepath+'/database.zip', 'r') as zip_ref:\n zip_ref.extractall('.')\nelse:\n pass\ndf_2b,df_1b = readseq(Sequence)\ndf_3b = aac_comp(df_1b)\ndf_4b = pred(df_3b,filepath+'/Models/ET_model.pkl')\nos.system(blastp + \" -task blastp -db \" + blastdb + \" -query \" + Sequence + \" -out RES_1_6_6.out -outfmt 6 -evalue 100 -max_target_seqs 1\")\ndf44 = BLAST_processor('RES_1_6_6.out',df_2b,df_4b,Threshold)\nif dplay == 1:\n df44 = df44.loc[df44.Prediction==\"Transcription Factor\"]\nelse:\n df44 = df44\ndf44 = round(df44,3)\ndf44.to_csv(result_filename, index=None)\nos.remove('RES_1_6_6.out')\nprint(\"\\n=========Process Completed. Have an awesome day ahead.=============\\n\")\nprint('\\n======= Thanks for using TransFacPred. Your results are stored in file :',result_filename,' =====\\n\\n')\n","repo_name":"raghavagps/transfacpred","sub_path":"transfacpred.py","file_name":"transfacpred.py","file_ext":"py","file_size_in_byte":7666,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"37427951494","text":"from pypfopt.efficient_frontier import EfficientFrontier\nfrom pypfopt import expected_returns, risk_models\n\nimport numpy as np\n\ndef get_sorted_weights(prices):\n '''\n prices: dataframe of the following shape. Column as symbols, Date as indices\n AAL AAPL GOOGL\n Date\n 2014-02-14 32.439728 17.411098 0.00\n 2014-02-15 32.439728 17.411098 0.00\n '''\n # calculate parameters\n mu = expected_returns.mean_historical_return(prices)\n S = risk_models.sample_cov(prices)\n\n # remove infinite values\n symbols = prices.columns\n for symbol in symbols:\n # if value is infinite\n mu_value = mu[symbol]\n if not np.isfinite(mu_value) or mu_value == 0:\n # delete from means\n del mu[symbol]\n\n # delete from sample covariance\n S.drop(symbol, axis=1, inplace=True)\n S.drop(symbol, axis=0, inplace=True)\n\n # calculate efficient frontier\n ef = EfficientFrontier(mu, S)\n weights = ef.max_sharpe()\n cleaned_weights = ef.clean_weights()\n sorted_weights = sorted(cleaned_weights.items(), key=lambda x: -x[1])\n return sorted_weights\n\n","repo_name":"shunkongcheung/portfolio-builder","sub_path":"get_sorted_weights.py","file_name":"get_sorted_weights.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36521050147","text":"import os\n\nfrom dagster import DagsterInstance, execute_pipeline, reconstructable\nfrom flask import Flask, request, abort, Response, stream_with_context\n\nfrom src.pipelines.ingest import ingest_pipeline\n\napp = Flask(__name__)\n\n\ndef execute_ingest_pipeline():\n yield 'Starting to execute ingest_pipeline...'\n try:\n dagster_inst = DagsterInstance.get()\n result = execute_pipeline(\n pipeline=reconstructable(ingest_pipeline),\n preset=\"gcp\",\n instance=dagster_inst,\n )\n assert result.success\n yield 'Done!'\n except Exception as e:\n yield 'Failed:'\n yield str(e)\n\n\n@app.route('/execute_pipeline')\ndef run_pipelines():\n if request.args.get('pipeline') == \"ingest_pipeline\":\n return Response(stream_with_context(execute_ingest_pipeline()))\n else:\n abort(403)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=os.environ.get('PORT', 8080))\n","repo_name":"mrdavidlaing/software-releases-dwh","sub_path":"web_runner.py","file_name":"web_runner.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18549171035","text":"# COMP9021 Practice 9 - Solutions\n\n\nfrom collections import defaultdict\n\n\nclass WordSearch:\n '''\n Records the contents of a file that contains n lines with m letters for some n and m,\n possibly with spaces between the letters and possibly with blank lines.\n Such a contents is intended to be the grid of a word search game: words are given that\n have to be found in the grid, being read horizontally, vertically or diagonally, in\n either direction.\n '''\n def __init__(self, filename):\n with open(filename) as file:\n self.grid = [''.join(c for c in line if not c.isspace())\n for line in file if not line.isspace()\n ]\n self.x_dim = len(self.grid[0])\n self.y_dim = len(self.grid)\n # The keys of self.rays are the letters that occur in the grid.\n # For such a letter c, the value of c in self.rays is a list of length\n # the number of occurrences of c in the grid; each member of the list\n # records the x and y coordinates of that occurrence of c\n # (the x-axis pointing East, the y-axis pointing South)\n # and for each of the 8 directions, the sequence of letters in the grid\n # that starts at that location and extends in that direction all the way\n # to the boundary of the grid. \n self.rays = defaultdict(list)\n for y in range(self.y_dim):\n for x in range(self.x_dim):\n self.rays[self.grid[y][x]].append(((x, y), self._rays_per_direction(y, x)))\n\n def __str__(self):\n return '\\n'.join(' '.join(c for c in line) for line in self.grid)\n\n def _rays_per_direction(self, y, x):\n return dict((('N', ''.join(self.grid[j][x] for j in range(y, -1, -1))),\n ('NE', ''.join(self.grid[j][i]\n for (j, i) in zip(range(y, -1, -1), range(x, self.x_dim))\n )\n ),\n ('E', ''.join(self.grid[y][i] for i in range(x, self.x_dim))),\n ('SE', ''.join(self.grid[j][i]\n for (j, i) in zip(range(y, self.y_dim), range(x, self.x_dim))\n )\n ),\n ('S', ''.join(self.grid[j][x] for j in range(y, self.y_dim))),\n ('SW', ''.join(self.grid[j][i]\n for (j, i) in zip(range(y, self.y_dim), range(x, -1, -1))\n )\n ),\n ('W', ''.join(self.grid[y][i] for i in range(x, -1, -1))),\n ('NW', ''.join(self.grid[j][i]\n for (j, i) in zip(range(y, -1, -1), range(x, -1, -1))\n )\n )\n )\n )\n\n def locate_word_in_grid(self, word):\n '''\n Returns None if word cannot be read in the grid.\n Otherwise, returns the x and y coordinates of an occurrence\n of the first letter of word, and the direction to follow\n (N, NE, E, SE, S, SW, W or NW) to read the whole word from\n that point onwards.\n '''\n for ((x, y), rays) in self.rays[word[0]]:\n for direction in rays:\n if rays[direction].startswith(word):\n return (x, y, direction)\n\n def locate_words_in_grid(self, *words):\n return dict((word, self.locate_word_in_grid(word)) for word in words)\n \n def display_word_in_grid(self, word):\n '''\n In case word can indeed be read from the grid,\n prints out the grid with all characters being displayed in lowercase,\n except for those that make up word, displayed in uppercase.\n '''\n grid = [[c.lower() for c in line] for line in self.grid]\n try:\n x, y, direction = self.locate_word_in_grid(word)\n if direction == 'N':\n for j in range(y, y - len(word), -1):\n grid[j][x] = grid[j][x].upper()\n elif direction == 'NE': \n for (j, i) in zip(range(y, y - len(word), -1), range(x, x + len(word))):\n grid[j][i] = grid[j][i].upper()\n elif direction == 'E':\n for i in range(x, x + len(word)):\n grid[y][i] = grid[y][i].upper()\n elif direction == 'SE':\n for (j, i) in zip(range(y, y + len(word)), range(x, x + len(word))):\n grid[j][i] = grid[j][i].upper()\n elif direction == 'S':\n for j in range(y, y + len(word)):\n grid[j][x] = grid[j][x].upper()\n elif direction == 'SW':\n for (j, i) in zip(range(y, y + len(word)), range(x, x - len(word), -1)):\n grid[j][i] = grid[j][i].upper()\n elif direction == 'W':\n for i in range(x, x - len(word), -1):\n grid[y][i] = grid[y][i].upper()\n elif direction == 'NW':\n for (j, i) in zip(range(y, y - len(word), -1), range(x, x - len(word), -1)):\n grid[j][i] = grid[j][i].upper()\n print('\\n'.join(' '.join(c for c in line) for line in grid))\n except TypeError:\n pass\n \n\nif __name__ == '__main__':\n import pprint\n ws = WordSearch('word_search_1.txt')\n print('Testing with grid for metals')\n print()\n print(ws)\n print()\n metal = 'PLATINUM'\n print(f'{metal}: {ws.locate_word_in_grid(metal)}')\n metal = 'SODIUM'\n print(f'{metal}: {ws.locate_word_in_grid(metal)}')\n metals = ('PLATINUM', 'COPPER', 'MERCURY', 'TUNGSTEN', 'MAGNESIUM', 'ZINC', 'MANGANESE',\n 'TITANIUM', 'TIN', 'IRON', 'LITHIUM', 'CADMIUM', 'GOLD', 'COBALT', 'SILVER',\n 'NICKEL', 'LEAD', 'IRIDIUM', 'URANIUM', 'SODIUM')\n located_metals = ws.locate_words_in_grid(*metals)\n pprint.pprint(located_metals)\n print() \n for metal in metals:\n print(metal, end = ':\\n')\n ws.display_word_in_grid(metal)\n print()\n print()\n\n ws = WordSearch('word_search_2.txt')\n print('Testing with grid for fruits')\n print()\n print(ws)\n print()\n fruit = 'RASPBERRY'\n print(f'{fruit}: {ws.locate_word_in_grid(fruit)}')\n fruit = 'PEAR'\n print(f'{fruit}: {ws.locate_word_in_grid(fruit)}')\n fruits = ('RASPBERRY', 'LIME', 'BLACKBERRY', 'BLUEBERRY', 'WATERMELON', 'ORANGE',\n 'BANANA', 'PAPAYA', 'LEMON', 'KIWI', 'GRAPE', 'APPLE', 'PEAR', 'MANGOE')\n located_fruits = ws.locate_words_in_grid(*fruits)\n pprint.pprint(located_fruits)\n print() \n for fruit in fruits:\n print(fruit, end = ':\\n')\n ws.display_word_in_grid(fruit)\n print()\n","repo_name":"gakkistyle/comp9021","sub_path":"practice_9/Practice_9_solutions/word_search.py","file_name":"word_search.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"33320391433","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 3 20:01:18 2021\n\n@author: Livia Alves\n\"\"\"\n\ncity = input('Em que cidade você nasceu: ')\ncity = city.upper().strip()\ncorte = city.split(' ')\nprint(corte[0] == 'SANTO')\n","repo_name":"liviaasantos/curso-em-video-python3","sub_path":"mundo-1/ex024.py","file_name":"ex024.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18508172619","text":"import logging\n\nimport requests\nfrom prettyconf import Configuration\n\nconfig = Configuration()\n\n\nclass RequestEndPoint:\n\n def __init__(self):\n self.ENDPOINT = config('ENDPOINT')\n\n def clinics(self, sigla='clinics', id=None):\n url = f\"{self.ENDPOINT}/{sigla}/{id}\"\n request = requests.get(url)\n return request.json()\n\n def patients(self, sigla='patients', id=None):\n try:\n url = f\"{self.ENDPOINT}/{sigla}/{id}\"\n request = requests.get(url)\n\n if not request.status_code == 404:\n return request.json()\n\n not_found = dict(\n error=dict(\n message=\"patient not found\",\n code=\"03\"\n )\n\n )\n return not_found\n except ConnectionError as connection:\n logging.error(str(connection))\n _connection = dict(\n error=\"patients service not available\",\n code=\"06\"\n )\n return _connection\n\n def physicians(self, sigla='physicians', id=None):\n try:\n url = f\"{self.ENDPOINT}/{sigla}/{id}\"\n request = requests.get(url)\n if not request.status_code == 404:\n return request.json()\n\n not_found = dict(\n error=dict(\n message=\"physician not found\",\n code=\"02\"\n )\n\n )\n return not_found\n except ConnectionError as connection:\n logging.error(str(connection))\n _connection = dict(\n error=\"physicians service not available\",\n code=\"05\"\n )\n\n return _connection\n\n def metrics_request(self, sigla='metrics', clinics_id=None, patients_id=None, physicians_id=None):\n\n try:\n url = f\"{self.ENDPOINT}/{sigla}\"\n\n clinics = self.clinics(id=clinics_id)\n patients = self.patients(id=patients_id)\n physicians = self.physicians(id=physicians_id)\n\n payload = dict(\n clinic_id=clinics['id'],\n clinic_name=clinics['name'],\n physician_id=physicians['id'],\n physician_name=physicians['name'],\n physician_crm=physicians['crm'],\n patient_id=patients['id'],\n patient_name=patients['name'],\n patient_email=patients['email'],\n patient_phone=patients['phone']\n\n )\n\n request = requests.post(url, data=payload)\n\n return request.json()\n except ConnectionError as connection:\n logging.error(str(connection))\n _connection = dict(\n error=\"metrics service not available\"\n )\n return _connection\n\n# if __name__ == \"__main__\":\n# r = RequestEndPoint()\n# r.metrics_request()\n","repo_name":"andreemidio/iclinic","sub_path":"apps/prescriptions/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70296894594","text":"##Use a while loop to solve the following problem:\n##A slow, but determined, walker sets off from Leicester\n##to cover the 102 miles to London at 2 miles per hour.\n##Another walker sets off from London heading to Leicester\n##going at 1 mile per hour. Where do they meet?\n\nwalk1 = 0\nwalk2 = 102\n\n\nwhile(walk1 != walk2):\n walk1 +=2\n walk2 -=1\n\nprint(walk1)\n\n\n","repo_name":"bb071988/thinkful","sub_path":"walkers.py","file_name":"walkers.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32448408980","text":"import os\nimport os.path as op\nimport nipype.interfaces.io as nio # Data i/o\nimport nipype.interfaces.utility as util # utility\nimport nipype.pipeline.engine as pe # pypeline engine\nfrom forward.dti import create_conductivity_tensor_mesh_workflow\n\nfrom forward.datasets import sample\ndata_path = sample.data_path()\n\nsubject_list = ['TMS007']\n\ninfosource = pe.Node(interface=util.IdentityInterface(\n fields=['subject_id']), name=\"infosource\")\ninfosource.iterables = ('subject_id', subject_list)\n\ninfo = dict(dwi=[['subject_id', 'DWI_1000']],\n bvecs=[['subject_id', 'grad_1000_invZ']],\n bvals=[['subject_id', 'bval_1000']],\n mesh_file=[['subject_id', 'subject_id']],\n struct=[['subject_id', 'T1mprage']],\n t1_fsl_space=[['subject_id', 'orig']])\n\ndatasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],\n outfields=info.keys()),\n name='datasource')\n\ndatasource.inputs.sort_filelist = True\ndatasource.inputs.template = \"%s/%s\"\ndatasource.inputs.base_directory = data_path\ndatasource.inputs.field_template = dict(\n dwi='%s/*%s.nii.gz', bvecs='%s/*%s', bvals='%s/*%s',\n mesh_file='../structural_datasink/subject/volume_mesh/*%s/%s*.msh',\n t1_fsl_space='../structural_datasink/subject/t1_fsl_space/*%s/%s*.nii.gz',\n struct=op.join(data_path,'%s/%s*.nii'))\ndatasource.inputs.template_args = info\n\npreproc = create_conductivity_tensor_mesh_workflow()\n\ndatasink = pe.Node(interface=nio.DataSink(),\n name=\"datasink\")\ndatasink.inputs.base_directory = op.abspath('diffusion_datasink')\ndatasink.inputs.container = 'subject'\n\ndti_proc = pe.Workflow(name=\"dti_proc\")\ndti_proc.base_dir = os.path.abspath('dti_proc')\ndti_proc.connect([\n (infosource,datasource,[('subject_id', 'subject_id')]),\n (datasource,preproc,[('dwi','inputnode.dwi'),\n ('bvals','inputnode.bvals'),\n ('bvecs','inputnode.bvecs'),\n ('mesh_file','inputnode.mesh_file'),\n ('struct','inputnode.struct'),\n ('t1_fsl_space','inputnode.t1_fsl_space'),\n ])\n ])\ndti_proc.connect([(preproc, datasink, [(\"outputnode.mesh_file\", \"mesh_file\")])])\ndti_proc.connect([(preproc, datasink, [(\"outputnode.diff_V1\", \"diff_tensor\")])])\ndti_proc.connect([(preproc, datasink, [(\"outputnode.cond_V1\", \"cond_tensor\")])])\ndti_proc.connect([(preproc, datasink, [(\"outputnode.mean_conductivity\", \"mean_conductivity\")])])\ndti_proc.connect([(preproc, datasink, [(\"outputnode.fa_t1_space\", \"fa_t1_space\")])])\ndti_proc.connect([(infosource, datasink, [(\"subject_id\", \"subject_id\")])])\n\nif __name__ == '__main__':\n dti_proc.write_graph()\n\n import time\n start = time.time()\n dti_proc.run()\n end = time.time()\n print(start)\n print(end)\n print(end-start)\n #dti_proc.run(plugin='MultiProc', plugin_args={'n_procs' : 4})\n","repo_name":"CyclotronResearchCentre/forward","sub_path":"examples/step2_process_diffusion.py","file_name":"step2_process_diffusion.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"30521096666","text":"# -*_ coding: utf-8 -*-\n\"\"\"mimecat - Easy catalogue of MIME types and extensions.\n\"\"\"\n\n#\n# taken from mimetypes.py\n#\n_KNOWNFILES = [\n \"/etc/mime.types\",\n \"/etc/httpd/mime.types\", # Mac OS X\n \"/etc/httpd/conf/mime.types\", # Apache\n \"/etc/apache/mime.types\", # Apache 1\n \"/etc/apache2/mime.types\", # Apache 2\n \"/usr/local/etc/httpd/conf/mime.types\",\n \"/usr/local/lib/netscape/mime.types\",\n \"/usr/local/etc/httpd/conf/mime.types\", # Apache 1.2\n \"/usr/local/etc/mime.types\", # Apache 1.3\n ]\n\nclass Catalogue(object):\n \"\"\"A Catalogue object represents a list of known MIME types and\n extensions. It can be initialized with a given filename or list of\n filenames. The files are expected to be in the format of a mime.types\n file.\n\n This class does not know about, care about, or possess the ability to\n process, parameters after the initial MIME type. For example,\n \"text/plain; charset=us-ascii.\"\n\n \"\"\"\n\n def __init__(self, filenames = None, filep = None):\n \"\"\"Initializes this catalogue from the filename or filenames in\n ``filenames`` or from the file or files in ``filep``\n\n If ``filenames`` and ``filep`` are None, then a list of common\n locations is tried to find ``mime.types`` when one is found, the MIME\n type definitions are loaded and the object is finished initializing. If\n none of the filenames can be found, IOError will be raised.\n\n If ``filenames`` is a list, then _all_ the files listed will be\n loaded. If ``filenames`` is a string, then the named file will be\n loaded. If none of the files can be found, IOError will be raised.\n\n If ``filep`` is not None, then that file-like object will be\n read. Note: Files will *not* be closed after reading. It is the\n caller's responsibility.\n\n If both ``filenames`` and ``filep`` are specified, the ``filep``\n is loaded first, followed by ``filenames``\n\n :param filenames: a filename or a list of filenames\n containing MIMEtype definitions in the style of mime.types\n :param filep: a file-like object to read definitions from.\n\n :raises: IOError If unable to find any of the files.\n\n \"\"\"\n self._types_to_exts = None\n self._exts_to_types = None\n self._known_mediatypes = None\n self._known_mimetypes = None\n self._known_extensions = None\n\n self.clear()\n\n if filenames is None and filep is None:\n self.load_filenames(_KNOWNFILES, True)\n else:\n if filep is not None:\n self.load_file(filep)\n\n if filenames is not None:\n if isinstance(filenames, (str, unicode)):\n filenames = [filenames]\n self.load_filenames(filenames)\n\n def clear(self):\n \"\"\"Clears out catalogue of known types.\n \"\"\"\n self._types_to_exts = {}\n self._exts_to_types = {}\n self._known_mediatypes = set()\n self._known_mimetypes = set()\n self._known_extensions = set()\n\n def load_filenames(self, filenames, stop_on_successful_load = False):\n \"\"\"Loads in MIME type defitions from ``filenames`` If\n ``stop_on_successful_load`` is True, then will stop on the first\n successful loading, else it will load all the files listed.\n\n :param filenames: List of files that could potentially contain\n MIME type defitions.\n\n :param stop_on_successful_load: If False, then load all the files.\n\n :raises: IOError If None of the listed files can be loaded.\n\n \"\"\"\n successful_load = False\n for filename in filenames:\n try:\n self.load_filename(filename)\n successful_load = True\n if stop_on_successful_load:\n break\n except IOError:\n pass\n\n if not successful_load:\n raise IOError(\"Could not locate a suitable mime.types file.\")\n\n def load_filename(self, filename):\n \"\"\"Loads in MIME type definitions from ``filename``.\n\n :param filename: The filename to load into the class\n \"\"\"\n with open(filename, \"r\") as filep:\n self.load_file(filep)\n\n def load_file(self, filep):\n \"\"\"Loads in MIME type definitions from open ``filep``\n :param filep: The file to load into the class\n \"\"\"\n for (mime_type, extensions) in _parse_file(filep):\n self.add_type(mime_type, extensions)\n\n @property\n def known_mediatypes(self):\n \"\"\"Returns the set of known media types (mediatype/subtype)\n\n :returns: frozen set of media types\n \"\"\"\n return frozenset(self._known_mediatypes)\n\n @property\n def known_mimetypes(self):\n \"\"\"Returns the set of known mimetypes.\n\n :returns: frozen set of mimetypes\n \"\"\"\n return frozenset(self._known_mimetypes)\n\n @property\n def known_extensions(self):\n \"\"\"Returns the set of known extensions.\n\n :returns: frozen set of extensions\n \"\"\"\n return frozenset(self._known_extensions)\n\n def get_extensions(self, typename):\n \"\"\"Returns an ordered list of known extensions to the given MIME type.\n Order is determined by the order in which the extensions were\n listed in the ``mime.types`` file. First extension encountered,\n then second, and so forth.\n\n :param typename: String of the MIME type.\n :returns: List of known extensions. These will include a leading .\n :raises: KeyError If MIME type is unknown.\n\n \"\"\"\n return self._types_to_exts[typename]\n\n def get_types(self, extension):\n \"\"\"Returns an ordered list of known MIME types for the given extension.\n Order is determined by the order in which the MIME types were\n added in the ``mime.types`` file.\n\n :param extension: String of the extension. This can include the\n leading . or omit it.\n :returns: List of known MIME types that use the given extension.\n :raises: KeyError If the extension is unknown.\n\n \"\"\"\n return self._exts_to_types[_canonicalize_extension(extension)]\n\n def add_type(self, typename, extensions):\n \"\"\"Adds a new entry for ``typename`` for the given list of\n ``extensions.`` If ``typename`` is already registered, then\n appends list of extensions to existing entry.\n\n :param typename: The MIME type to add.\n\n :param extensions: String of extension or list of extensions to\n add. This can include the leading . or omit it.\n\n :raises: ValueError If ``typename`` is not of the format type/subtype\n\n \"\"\"\n (mediatype, _) = typename.split(\"/\")\n\n if isinstance(extensions, str):\n extensions = [extensions]\n\n self._known_mediatypes |= set([mediatype])\n self._known_mimetypes |= set([typename])\n self._known_extensions |= set(_canonicalize_extension(ext) \\\n for ext in extensions)\n\n if typename not in self._types_to_exts:\n self._types_to_exts[typename] = []\n\n existing_exts = self._types_to_exts[typename]\n for ext in extensions:\n ext = _canonicalize_extension(ext)\n if ext not in existing_exts:\n existing_exts.append(ext)\n\n if ext not in self._exts_to_types:\n self._exts_to_types[ext] = []\n existing_types = self._exts_to_types[ext]\n\n if typename not in existing_types:\n existing_types.append(typename)\n\ndef _parse_file(filep):\n \"\"\"Returns a generator which yields parsed lines from a ``mime.types``\n file.\n\n :param filep: A file-like object opened for reading\n :yields: A tuple containing the mime_type and associated extensions.\n \"\"\"\n for line in filep:\n parsed_line = _parse_line(line)\n if parsed_line is None:\n continue\n yield parsed_line\n\ndef _parse_line(line):\n \"\"\"Parses a line from ``mime.types``\n\n :param line: The line to parse.\n :returns: Tuple with mimetype and a list of extensions. If line is blank,\n return None\n :raises: ValueError If mimetype is invalid (not type/subtype)\n \"\"\"\n if \"#\" in line:\n line = line[:line.find(\"#\")]\n\n parts = line.split()\n\n if not parts:\n return None\n\n mimetype = parts[0]\n\n mimetype.index(\"/\") # check for /, raise ValueError if not found\n\n extensions = []\n if len(parts) > 1:\n extensions = [_canonicalize_extension(ext) for ext in parts[1:]]\n\n return (mimetype, extensions)\n\ndef _canonicalize_extension(ext):\n \"\"\"Returns a transformed ext that has a uniform pattern.\n Specifically, if ``ext`` has a leading . then it is simply returned.\n If ``ext`` doesn't have a leading . then it is prepended.\n Exceptions to this are if ``ext`` is ``None`` or \"\". If ``ext``\n is \"\" then \"\" is return. If ``ext`` is None then None is returned.\n\n :param ext: The extension to canonicalize.\n :returns: The canonicalized extension.\n\n \"\"\"\n if ext is None or ext == \"\" or ext.startswith(\".\"):\n return ext\n return \".\" + ext\n","repo_name":"mizhi/mimecat","sub_path":"mimecat.py","file_name":"mimecat.py","file_ext":"py","file_size_in_byte":9329,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27893467075","text":"# -*- coding: utf-8 -*-\nfrom enerdata.metering.meter import *\nfrom enerdata.metering.measure import *\nfrom expects.testing import failure\nfrom mamba import description, it, context, before\n\n\ndef generate_random_date():\n from random import randint\n from calendar import monthrange\n year = randint(2000, 2014)\n month = randint(1, 12)\n last_day = monthrange(year, month)[1]\n day = randint(1, last_day)\n return date(year, month, day)\n\n\nwith description('Creating a meter'):\n with before.all:\n self.meter = Meter('123456789')\n with it('should have an empty list of energy measures'):\n assert len(self.meter.energy_measures) == 0\n with it('should have an empty list of power measures'):\n assert len(self.meter.power_measures) == 0\n with context('If no date is passed'):\n with it('should be today'):\n assert self.meter.start_date == date.today()\n with context('If date is passed'):\n with it('must be the same'):\n m = Meter('XXX', date(1983, 5, 28))\n assert m.start_date == date(1983, 5, 28)\n\nwith description('Adding a measure'):\n with before.each:\n self.meter = Meter('123456789', start_date=date(2014, 1, 1))\n with it('only accepts EnergyMeasure or PowerMeasure'):\n with failure:\n m = Measure(date(2014, 1, 1), TariffPeriod('P1', 'te'), 0)\n self.meter.add_measure(m)\n with it('don\\'t accepts measures which date is before start date'):\n self.meter.start_date = date(2014, 1, 1)\n with failure:\n m = EnergyMeasure(date(2013, 1, 1), TariffPeriod('P1', 'te'), 0)\n self.meter.add_measure(m)\n with it('don\\'t accepts mesasures which date is greater than end date'):\n self.meter.end_date = date(2014, 2, 1)\n m = EnergyMeasure(date(2014, 2, 2), TariffPeriod('P1', 'te'), 0)\n with failure:\n self.meter.add_measure(m)\n\n with context('Energy'):\n with it('should be added in energy measures list'):\n em = EnergyMeasure(date(2014, 1, 1), TariffPeriod('P1', 'te'), 0)\n self.meter.add_measure(em)\n assert len(self.meter.energy_measures) == 1\n assert self.meter.energy_measures[0] == em\n with it('should mantain energy measures sorted'):\n self.meter.start_date = date(2000, 1, 1)\n for _x in range(1, 1000):\n d = generate_random_date()\n m = EnergyMeasure(d, TariffPeriod('P1', 'te'), 0)\n self.meter.add_measure(m)\n before = EnergyMeasure(date(1999, 1, 1), TariffPeriod('P1', 'te'), 0)\n for x in self.meter.energy_measures:\n assert x >= before\n before = x\n with context('Power'):\n with it('should be added in energy measures list'):\n pm = PowerMeasure(date(2014, 1, 1), TariffPeriod('P1', 'tp'), 0)\n self.meter.add_measure(pm)\n assert len(self.meter.power_measures) == 1\n assert self.meter.power_measures[0] == pm\n with it('should mantain power measures sorted'):\n self.meter.start_date = date(2000, 1, 1)\n for _x in range(1, 1000):\n d = generate_random_date()\n m = PowerMeasure(d, TariffPeriod('P1', 'tp'), 0)\n self.meter.add_measure(m)\n before = EnergyMeasure(date(1999, 1, 1), TariffPeriod('P1', 'te'), 0)\n for x in self.meter.power_measures:\n assert x >= before\n before = x\n","repo_name":"gisce/enerdata","sub_path":"spec/metering/meter_spec.py","file_name":"meter_spec.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"13407811570","text":"n = int(input())\nfirst_pole = [0 for _ in range(0, n)]\nsecond_pole = [0 for _ in range(0, n)]\ndp = [0 for _ in range(0, n)]\ncount = 0\n\nfor i in range(0, n):\n string = str(input())\n first_pole[i] = int(string.split(\" \")[0])\n second_pole[i] = int(string.split(\" \")[1])\n\nfirst_dic = {}\nfor i in first_pole:\n first_dic[i] = second_pole[first_pole.index(i)]\n\nprint(first_dic)\n\nsort_dict = sorted(first_dic.items())\nprint(sort_dict)\n\nfirst_pole = []\nsecond_pole = []\n\nfor key, value in sort_dict:\n first_pole.append(key)\n second_pole.append(value)\n\n\n# for key, value in first_dic.items():\n# first_pole.append(key)\n# second_pole.append(value)\n# print(first_pole, second_pole)\n\ntemp =0\nmax_value = 0\ndp[0] = 1\nfor i in range(0, len(second_pole)):\n for j in range(0, i):\n if second_pole[i] > second_pole[j]:\n temp = dp[j] + 1\n if max_value < temp:\n max_value = temp\n dp[i] = max_value\n if temp == 0:\n dp[i] = 1\n temp =0\n max_value = 0\n\n\nprint(len(second_pole)-max(dp))\n \n\n\n\n\n\n# \"8\"\n# \"1 8\"\n# \"3 9\"\n# \"2 2\"\n# \"4 1\"\n# \"6 4\"\n# \"10 10\"\n# \"9 7\"\n# \"7 6\"","repo_name":"manjoong/python_study","sub_path":"beakjoon_electronic_line_2565.py","file_name":"beakjoon_electronic_line_2565.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9051345805","text":"import sys\nimport threading\nimport tkinter as tk\nimport datetime\nimport schedule\nimport time\nimport json\nimport os\nfrom tkinter import messagebox\nfrom .console_output import ConsoleOutput\nfrom .virus_total_checker import VirusTotalChecker\nimport datetime\nfrom tkinter import filedialog\nimport matplotlib.pyplot as plt\n\nclass App:\n def __init__(self, master):\n self.master = master\n master.title('ThreatScraper')\n self.threat_checker = None # Initialize the VirusTotalChecker object as None\n\n # Create API key entry\n self.api_key_label = tk.Label(master, text='API Key:')\n self.api_key_label.grid(row=0, column=0)\n self.api_key_entry = tk.Entry(master, width=50)\n self.api_key_entry.grid(row=0, column=1)\n\n # Create hash value entry\n self.hash_value_label = tk.Label(master, text='Hash Value:')\n self.hash_value_label.grid(row=1, column=0)\n self.hash_value_entry = tk.Entry(master, width=50)\n self.hash_value_entry.grid(row=1, column=1)\n \n # Create hash type selection\n self.hash_type = tk.StringVar(master) # create a tkinter string variable\n self.hash_type.set(\"SHA-256\") # set default value\n self.hash_types = [\"MD5\", \"SHA-1\", \"SHA-256\"] # list of hash types\n self.hash_type_option = tk.OptionMenu(master, self.hash_type, *self.hash_types)\n self.hash_type_option.grid(row=1, column=2)\n\n # Create filename entry\n self.filename_label = tk.Label(master, text='Excel File:')\n self.filename_label.grid(row=2, column=0)\n self.filename_entry = tk.Entry(master, width=50)\n self.filename_entry.grid(row=2, column=1)\n\n # Create time entry\n self.time_label = tk.Label(master, text='Schedule Times (HH:MM) Separated by comma:')\n self.time_label.grid(row=3, column=0)\n self.time_entry = tk.Entry(master, width=50)\n self.time_entry.grid(row=3, column=1)\n\n # Load saved configurations\n self.config_filename = 'config.json'\n self.load_config()\n\n # Checkbox for rescanning\n self.rescan_var = tk.BooleanVar()\n self.rescan_checkbox = tk.Checkbutton(master, text=\"Rescan hash\", variable=self.rescan_var)\n self.rescan_checkbox.grid(row=2, column=2, columnspan=2)\n\n # Create check button\n self.check_button = tk.Button(master, text='Check VirusTotal', command=self.check_virustotal)\n self.check_button.grid(row=5, column=1)\n\n # Create start button\n self.start_button = tk.Button(master, text='Start Schedule', command=self.start_schedule)\n self.start_button.grid(row=5, column=2)\n\n # Create stop button\n self.stop_button = tk.Button(master, text='Stop Schedule', command=self.stop_schedule, state='disabled')\n self.stop_button.grid(row=6, column=2)\n\n # Create submit file button\n self.submit_file_button = tk.Button(master, text='Submit File for Analysis', command=self.submit_file)\n self.submit_file_button.grid(row=5, column=0)\n\n # Create console output in main window\n self.console_text = tk.Text(master, state='disabled', height=5)\n self.console_text.grid(row=7, column=0, columnspan=2, sticky='nsew')\n self.console_output = ConsoleOutput(self.console_text)\n self.console_output.set_text_widget_state('normal')\n sys.stdout = self.console_output\n\n # Configure the grid to make the console text box resize with the window\n master.grid_rowconfigure(7, weight=1)\n master.grid_columnconfigure(0, weight=1)\n master.grid_columnconfigure(1, weight=1)\n\n # Initialize schedule variables\n self.schedule_thread = None\n self.schedule_running = False\n\n # Start the graph\n self.threat_graph = None\n\n def load_config(self):\n # Check if config file exists\n if os.path.exists(self.config_filename):\n with open(self.config_filename, 'r') as f:\n config = json.load(f)\n \n # Load saved API key\n if 'api_key' in config:\n self.api_key_entry.insert(0, config['api_key'])\n \n # Load saved hash value\n if 'hash_value' in config:\n self.hash_value_entry.insert(0, config['hash_value'])\n\n # Load saved hash type\n if 'hash_type' in config:\n self.hash_type.set(config['hash_type'])\n \n # Load saved filename\n if 'filename' in config:\n self.filename_entry.insert(0, config['filename'])\n \n # Load saved schedule times\n if 'schedule_times' in config:\n self.time_entry.insert(0, config['schedule_times'])\n\n def save_config(self):\n config = {\n 'api_key': self.api_key_entry.get(),\n 'hash_value': self.hash_value_entry.get(),\n 'hash_type': self.hash_type.get(),\n 'filename': self.filename_entry.get(),\n 'schedule_times': self.time_entry.get(),\n }\n\n with open(self.config_filename, 'w') as f:\n json.dump(config, f, indent=4)\n\n def submit_file(self):\n file_path = filedialog.askopenfilename()\n if file_path:\n if self.threat_checker is None:\n api_key = self.api_key_entry.get()\n hash_value = self.hash_value_entry.get()\n hash_type = self.hash_type.get()\n filename = self.filename_entry.get()\n self.threat_checker = VirusTotalChecker(api_key, hash_value, hash_type, filename, self.console_output, self.master)\n\n self.threat_checker.submit_file(file_path)\n\n def check_virustotal(self):\n api_key = self.api_key_entry.get()\n hash_value = self.hash_value_entry.get()\n hash_type = self.hash_type.get()\n filename = self.filename_entry.get()\n\n # Check if the user has specified an API key\n if not api_key.strip():\n messagebox.showwarning(\n \"API Key Required\",\n \"Please enter your VirusTotal API Key.\"\n )\n return\n\n # Check if the user has specified a hash value\n if not hash_value.strip():\n messagebox.showwarning(\n \"Hash Value Required\",\n \"Please enter a Hash Value to check.\"\n )\n return\n\n # Check if filename is entered\n if not filename.strip():\n messagebox.showerror(\n \"Filename Required\",\n \"Please enter the path to an Excel file.\",\n )\n return\n\n if self.threat_checker is None:\n self.threat_checker = VirusTotalChecker(api_key, hash_value, hash_type, filename, self.console_output, self.master)\n \n if self.rescan_var.get(): # if checkbox is checked\n self.master.after(0, self.threat_checker.rescan_hash)\n else:\n self.master.after(0, self.threat_checker.check_virustotal)\n\n # Save configurations before running the check\n self.save_config()\n\n # Display the graphs\n plt.show(block=False)\n\n\n def start_schedule(self):\n # Get schedule times from entry\n schedule_times = self.time_entry.get().split(',')\n\n # Disable start button and enable stop button\n self.start_button.configure(state='disabled')\n self.stop_button.configure(state='normal')\n\n # Loop through schedule times and schedule checks\n for schedule_time in schedule_times:\n # Check if time is valid\n try:\n datetime.datetime.strptime(schedule_time.strip(), '%H:%M')\n except ValueError:\n messagebox.showerror('Invalid Time', f'{schedule_time} is not a valid time. Please enter a valid time in HH:MM format')\n return\n \n # Schedule daily check at this time\n schedule.every().day.at(schedule_time.strip()).do(self.master.after, 0, self.check_virustotal)\n\n # Save configurations before starting the schedule\n self.save_config()\n\n # Set schedule_running to True\n self.schedule_running = True\n\n # Update console window\n self.console_output.write('Schedule started\\n')\n\n # Start schedule thread\n self.schedule_thread = threading.Thread(target=self.run_schedule, daemon=True)\n self.schedule_thread.start()\n\n def run_schedule(self):\n while self.schedule_running:\n schedule.run_pending()\n time.sleep(1)\n\n # Update console window\n self.console_output.write('Schedule stopped\\n')\n\n def stop_schedule(self):\n # Enable start button and disable stop button\n self.start_button.configure(state='normal')\n self.stop_button.configure(state='disabled')\n\n # Clear schedule\n schedule.clear()\n\n # Set schedule_running to False\n self.schedule_running = False\n\n def quit(self):\n sys.stdout = sys.__stdout__\n self.master.destroy()\n\n def show_graph(self):\n self.threat_checker.start()\n","repo_name":"amorath/ThreatScraper","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"34469333349","text":"#!/usr/bin/env python3\n\n\"\"\"Compares the time taken for the vectorised stochastic (with gaussian fluctuations) Ricker Eqn\"\"\"\n\nimport time\nimport numpy as np\nimport scipy as sc\nimport matplotlib.pylab as p\n\n# def stochrick(p0=np.random.uniform(0.5, 1.5, 1000), r=1.2, K=1, sigma=0.2, numyears=100):\n# #initialize\n# N = np.full([numyears, len(p0)], np.nan)\n# N[0,] = p0\n \n# for pop in range(0, len(p0)): #loop through the populations\n# for yr in range(1, numyears): #for each pop, loop through the years\n# N[yr,pop] = N[yr-1,pop] * np.exp(r*(1-N[yr-1,pop]/K) + np.random.normal(0, sigma, len(p0)))\n\n# return N\n\n# f1 = p.figure()\n# p.plot(stochrick())\n# p.grid()\n# p.show()\n\n# start = time.time()\n# stochrick()\n# end = time.time()\n# elapsed = end - start\n# print(\"Time taken for unvectorised stochastic Ricker model to run: {}\".format(elapsed))\n\n# Now write another function called stochrickvect that vectorizes the above \n# to the extent possible, with improved performance: \n\ndef stochrickvect(p0=np.random.uniform(0.5, 1.5, 1000), r=1.2, K=1, sigma=0.2, numyears=100):\n \"\"\"reproduces vectorised stochastic ricker eqn from Vectorize2.R\"\"\"\n #initialize\n N = np.full([numyears, len(p0)], np.nan)\n N[0,] = p0\n\n for yr in range(1, numyears): #for each pop, loop through the years\n N[yr,] = N[yr - 1,] * np.exp(r * (1 - N[yr - 1,] / K) + np.random.normal(0, sigma, len(p0)))\n\n return N\n\nstart2 = time.time()\nstochrickvect()\nend2 = time.time()\nelapsed2 = end2 - start2\n\nprint(\"Time taken for vectorized Stochastic Ricker is: {}\".format(elapsed2))","repo_name":"cupofteaandcake/CMEECourseWork","sub_path":"Week3/Code/Vectorize2.py","file_name":"Vectorize2.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34737690439","text":"from attack.base import ConstrainedMethod\nimport torch\nimport os\nfrom attack.face_landmark import getlist_landmark\n\n__all__ = ['LGC']\nclass LGC(ConstrainedMethod):\n def __init__(self, model, goal, distance_metric, eps, iters=20, mu=1.0, num_samples=4, sigma=1):\n super(LGC, self).__init__(model, goal, distance_metric, eps)\n self.iters = iters\n self.mu = mu\n self.num_samples = num_samples\n self.sigma = sigma\n def batch_attack(self, xs, ys, pairs, **kwargs):\n xs_adv = xs.clone().detach().requires_grad_(True)\n names = [os.path.basename(x[0])[:-4] for x in pairs]\n# names = ['-'.join(img_path[0].split('/')[-3:]) for img_path in pairs]\n g = torch.zeros_like(xs_adv)\n for _ in range(self.iters):\n img_shape = xs_adv.shape[2:]\n mask = getlist_landmark(names, self.num_samples, img_shape, sigma=self.sigma)\n mask = torch.Tensor(mask.transpose((0, 3, 1, 2))).cuda()\n features = self.model.forward(xs_adv * mask)\n loss = self.getLoss(features, ys)\n loss.backward()\n grad = xs_adv.grad\n grad = grad / grad.abs().mean(dim=[1, 2, 3], keepdim=True)\n g = g * self.mu + grad\n self.model.zero_grad()\n xs_adv = self.step(xs_adv, 1.5 * self.eps / self.iters, g, xs, self.eps)\n xs_adv = xs_adv.detach().requires_grad_(True)\n return xs_adv\n\n","repo_name":"bholdmanny/Face-Robustness-Benchmark","sub_path":"attack/LGC.py","file_name":"LGC.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"16726604508","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport cv2\nimport os\n\ndef show(img):\n\tcv2.imshow(\"image\", img)\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n\n# LPIPS score\nf = open(\"LPIPS.txt\", \"r\")\nlines = f.read().splitlines()\nf.close()\n\nnames = np.array([i.split(\": \")[0] for i in lines])\nlpips = np.array([float(i.split(\": \")[1]) for i in lines])\n\n# remove instance where the original and target angles are the same\nangles = np.array([float(i.split(\"_\")[2]) for i in names])\nnames = names[angles != 0]\nlpips = lpips[angles != 0]\nangles = angles[angles != 0]\n\n# compute average\n(angle_uniq, n_uniq) = np.unique(angles, return_counts=True)\nlpips_avg = np.zeros(len(angle_uniq))\nfor idx,angle in enumerate(angle_uniq):\n\tlpips_avg[idx] = np.mean(lpips[angles == angle])\n\n\n\n# Image Blurriness (IB) score\nimg_paths = [os.path.join(\"log/eval/genes\", i) for i in names]\nib = np.zeros(len(img_paths))\n\n# compute individual IB scores\nkernel = np.array([[0,1,0],[1,-4,1],[0,1,0]], dtype=np.float)\nfor idx,img_path in enumerate(img_paths):\n\timg = cv2.imread(img_path)\n\timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tib[idx] = 1/cv2.Laplacian(img, cv2.CV_64F).var()\n\n# compute average IB scores\nib_avg = np.zeros(len(angle_uniq))\nfor idx, angle in enumerate(angle_uniq):\n\tib_avg[idx] = np.mean(ib[angles == angle])\n\n# Gaze Estimation Error\n\n\n# plot\nfig, axes = plt.subplots(2)\naxes[0].plot(angle_uniq, lpips_avg)\naxes[0].set_xlabel(\"Redirected Angle\")\naxes[0].set_ylabel(\"LPIPS\")\n\naxes[1].plot(angle_uniq, ib_avg)\naxes[1].set_xlabel(\"Redirected Angle\")\naxes[1].set_ylabel(\"Image Blurriness\")\n\nfig.suptitle(\"Evaluation results (lower scores are better)\")\nfig.tight_layout()\nfig.savefig(\"scores\")\nfig.show()\n","repo_name":"sieumap43/FachPraktikum-SS21","sub_path":"pipeline2/gaze_redirection/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9633553270","text":"from kapitan.inputs.kadet import load_from_search_paths\n\nfrom .common import KubernetesResource, ResourceType\n\nkgenlib = load_from_search_paths(\"generators\")\n\n\nclass NetworkPolicy(KubernetesResource):\n resource_type = ResourceType(\n kind=\"NetworkPolicy\", api_version=\"networking.k8s.io/v1\", id=\"network_policy\"\n )\n\n def new(self):\n super().new()\n\n def body(self):\n super().body()\n policy = self.config\n workload = self.workload\n self.root.spec.podSelector.matchLabels = workload.root.metadata.labels\n self.root.spec.ingress = policy.ingress\n self.root.spec.egress = policy.egress\n if self.root.spec.ingress:\n self.root.spec.setdefault(\"policyTypes\", []).append(\"Ingress\")\n\n if self.root.spec.egress:\n self.root.spec.setdefault(\"policyTypes\", []).append(\"Egress\")\n","repo_name":"bsda/kapitan-reference","sub_path":"components/generators/kubernetes/networking.py","file_name":"networking.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"33573194879","text":"##this work\nimport utils\n\n\n## Lisa tools\nimport lisaconstants\n\n\n\n##\nimport numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline as spline\nimport matplotlib.pyplot as plt\n\n\n\nclass LISA_analytical_noise:\n def __init__(self,name_,level_):\n self.noise_init(name_,level_)\n\n def __str__(self):\n if self.initialized:\n display = self.name+f\" configuration is : {self.level}\"\n else:\n display = \"Not initialized...\"\n return display\n\n def noise_init(self,name_,level_):\n self.name = name_\n self.level = level_\n self.initialized = True\n \n def set_noise_level(self,level_):\n self.level = level_\n\n def get_noise_level(self):\n return self.level\n\n\n\n \n def instru_noise_psd(self,freq_, option_=\"X\", tdi2_=False, arm_length_=2.5e9):\n \"\"\"Return noise PSD from acc and oms noise, at given freq. range.\n :param array freq: frequency range\n :param str option: TDI name can be X, XY, A, E, T\n :param bool tdi2: TDI1.5 or 2nd generation\n :param float arm_length: arm length in meter\n :return array s_n: noise PSD\n \"\"\"\n clight = lisaconstants.SPEED_OF_LIGHT\n #print(\"DEBUG : instru_noise_psd : \",tdi2)\n # LISA noise\n # Acceleration\n sa_a = (\n (3e-15) ** 2 * (1.0 + (0.4e-3 / freq_) ** 2) * (1.0 + (freq_ / 8e-3) ** 4)\n ) # in acceleration\n sa_d = sa_a * (2.0 * np.pi * freq_) ** (-4.0) # in displacement\n s_pm = sa_d * (2.0 * np.pi * freq_ / clight) ** 2 # in rel freq unit\n\n # Optical Metrology System\n psd_oms_d = (15.0e-12) ** 2 # in displacement\n s_op = psd_oms_d * (2.0 * np.pi * freq_ / clight) ** 2 # in rel freq unit\n \n # Light travel time\n lisa_lt = arm_length_ / clight\n\n # Angular frequency\n omega = 2.0 * np.pi * freq_\n x = omega * lisa_lt\n\n if option_ == \"X\":\n s_n = 16.0 * np.sin(x) ** 2 * (2.0 * (1.0 + np.cos(x) ** 2) * s_pm + s_op)\n elif option_ == \"XY\":\n s_n = -4.0 * np.sin(2 * x) * np.sin(x) * (s_op + 4.0 * s_pm)\n elif option_ in [\"A\", \"E\"]:\n s_n = (\n 8.0\n * np.sin(x) ** 2\n * (\n 2.0 * s_pm * (3.0 + 2.0 * np.cos(x) + np.cos(2 * x))\n + s_op * (2.0 + np.cos(x))\n )\n )\n elif option_ == \"T\":\n s_n = (\n 16.0 * s_op * (1.0 - np.cos(x)) * np.sin(x) ** 2\n + 128.0 * s_pm * np.sin(x) ** 2 * np.sin(0.5 * x) ** 4\n )\n else:\n print(\"PSD option should be in [X, XY, A, E, T] (%s)\" % option)\n return None\n if tdi2_:\n factor_tdi2 = 4 * np.sin(2 * x) ** 2\n s_n *= factor_tdi2\n\n return s_n\n\n\n \n def confusion_noise_psd(self,freq_, duration_=4.5, option_=\"X\", tdi2_=False, arm_length_=2.5e9):\n \"\"\"Return noise PSD from GB confusion noise, at given freq. range.\n :param array freq: frequency range\n :param float nyears: number of years of observation\n :param str option: TDI name can be X, XY, A, E, T\n :param bool tdi2: TDI1.5 or 2nd generation\n :param float arm_length: arm length in meter\n :return array s_n: noise PSD\n \"\"\"\n clight = lisaconstants.SPEED_OF_LIGHT\n lisaLT = arm_length_ / clight\n x = 2.0 * np.pi * lisaLT * freq_\n t = 4.0 * x**2 * np.sin(x) ** 2\n \n # confusion noise model for snr>7\n ampl = 1.28265531e-44\n alpha = 1.62966700e00\n fr2 = 4.81078093e-04\n af1 = -2.23499956e-01\n bf1 = -2.70408439e00\n afk = -3.60976122e-01\n bfk = -2.37822436e00\n \n tobs = duration_\n fr1 = 10.0 ** (af1 * np.log10(tobs) + bf1)\n fknee = 10.0 ** (afk * np.log10(tobs) + bfk)\n sg_sens = (\n ampl\n * np.exp(-((freq_ / fr1) ** alpha))\n * (freq_ ** (-7.0 / 3.0))\n * 0.5\n * (1.0 + np.tanh(-(freq_ - fknee) / fr2))\n )\n \n sgx = t * sg_sens\n if tdi2_ == True:\n factor_tdi2 = 4 * np.sin(2 * x) ** 2\n sgx *= factor_tdi2\n if option_ in [\"A\", \"E\"]:\n return 1.5 * sgx\n elif option_ == \"XY\":\n return -0.5 * sgx\n else:\n return sgx\n\n \n\n \n def reset(self):\n self.name = None\n self.level = None\n self.initialized = False\n\n\n\nif __name__ == \"__main__\":\n \n test0 = LISA_analytical_noise(\"dummy\", 42)\n print(test0)\n \n \n test0.set_noise_level(666)\n print(test0)\n \n test0.reset()\n print(test0)\n \n \n test0.noise_init(\"red book\",12)\n print(test0)\n \n \n freq = np.logspace(-5, 0, 9990)\n duration = 4.5 # years\n tdi2 = True\n \n ax.loglog(freq, np.sqrt(freq) * np.sqrt(sh(freq)), label=\"instrumental noise\")\n ax.loglog(freq, np.sqrt(freq) * np.sqrt(20 / 3) * np.sqrt(sh_wd(freq)), color=\"k\", ls=\"--\",label=\"+confusion noise\")\n\n\n # graph to publish\n fig, ax = plt.subplots(1, figsize=(12, 8))\n\n\n\n ax.loglog(freq, np.sqrt(freq) * np.sqrt(sh(freq)), label=\"instrumental noise\")\n ax.loglog(freq, np.sqrt(freq) * np.sqrt(20 / 3) * np.sqrt(sh_wd(freq)), color=\"k\", ls=\"--\",label=\"+confusion noise\")\n \n ax.set_ylabel(\"ASD (to check)\")\n ax.set_xlabel(\"Frequnecy (Hz)\")\n \n plt.legend()\n plt.grid()\n plt.show()\n\n","repo_name":"lemiere/FOM_streamlit","sub_path":"LISA_noise_configuration.py","file_name":"LISA_noise_configuration.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1640850542","text":"\"\"\"\nWrite a program to find the n-th ugly number.\n\nUgly numbers are positive numbers whose prime factors only include 2, 3, 5.\n\nExample:\n\nInput: n = 10\nOutput: 12\nExplanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.\nNote:\n\n1 is typically treated as an ugly number.\nn does not exceed 1690.\n\"\"\"\n\nimport heapq\n\n\nclass Solution:\n def nthuglynumber(self, n: int) -> int:\n heap, ans, ans_set = [1], 1, set()\n for _ in range(n):\n while ans in ans_set: ans = heapq.heappop(heap)\n ans_set.add(ans)\n for i in [2, 3, 5]: heapq.heappush(heap,i*ans)\n return ans\n\n\nif __name__==\"__main__\":\n n = 8\n print(Solution().nthuglynumber(n))","repo_name":"amogchandrashekar/Leetcode","sub_path":"Medium/Ugly Number II.py","file_name":"Ugly Number II.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34824233119","text":"import requests\r\nimport json\r\nfrom сonfig import keys\r\n\r\nclass ConvertionException(Exception):\r\n pass\r\n\r\nclass CryptoConverter:\r\n @staticmethod\r\n def convert(quote: str, base: str, amount: str):\r\n if quote == base:\r\n raise ConvertionException(f'Не могу перевести одинаковые валюты {base}.')\r\n\r\n try:\r\n quote_ticker = keys[quote]\r\n except KeyError:\r\n raise ConvertionException(f'Не знаю такую валюту - {quote}. Попробуйте ввести другую.')\r\n\r\n try:\r\n base_ticker = keys[base]\r\n except KeyError:\r\n raise ConvertionException(f'Не знаю такую валюту - {base}. Попробуйте ввести другую.')\r\n\r\n try:\r\n amount = float(amount)\r\n except ValueError:\r\n raise ConvertionException(f'Ой, что-то не так с количеством {amount}. Попробуйте еще раз.')\r\n\r\n r = requests.get(f'https://min-api.cryptocompare.com/data/price?fsym={quote_ticker}&tsyms={base_ticker}')\r\n total_base = float(json.loads(r.content)[keys[base]]) * amount\r\n\r\n return total_base\r\n","repo_name":"ElviraT1/TG.convert.bot-QAP_18.6-","sub_path":"extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11316748205","text":"from queue import *\r\n\r\ndef worker():\r\n while True:\r\n item = q.get()\r\n do_work(item)\r\n q.task_done()\r\n\r\ndef main():\r\n\r\n q = Queue(maxsize=0)\r\n for i in range(num_worker_threads):\r\n t = Thread(target=worker)\r\n t.daemon = True\r\n t.start()\r\n\r\n for item in source():\r\n q.put(item)\r\n\r\n q.join() # block until all tasks are done\r\n\r\nmain()","repo_name":"maxbergmark/old-work","sub_path":"Egna projekt/queuetest.py","file_name":"queuetest.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32974110421","text":"class Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n answer = []\n lenD = len(digits)\n dic = {\n '2':\"abc\", '3':\"def\", '4':\"ghi\", '5':\"jkl\",\n '6':\"mno\", '7':\"pqrs\", '8':\"tuv\", '9':\"wxyz\"\n }\n \n if lenD == 0: return None\n \n def dfs(i, string):\n if i == lenD:\n answer.append(string)\n else:\n alphabets = dic[digits[i]]\n for alphabet in alphabets:\n dfs(i+1, string + alphabet)\n\n dfs(0, \"\")\n return answer","repo_name":"w00cheol/LeetCode","sub_path":"17-letter-combinations-of-a-phone-number/17-letter-combinations-of-a-phone-number.py","file_name":"17-letter-combinations-of-a-phone-number.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41878365379","text":"class Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n \n s, b = min(p.val, q.val), max(p.val, q.val)\n\n while root:\n \n # found the lowest common ancestry\n if root.val >= s and root.val <= b: return root\n \n if root.val > b: root = root.left # larger than the big number, move down left\n elif root.val < s: root = root.right # smaller than the small number, move down right\n \n","repo_name":"chuxinliu/Leetcode","sub_path":"BinaryTree/235-lowest-common-ancestor-of-a-binary-search-tree.py","file_name":"235-lowest-common-ancestor-of-a-binary-search-tree.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2138460069","text":"# Fibonacci Sequence iterator\n\nclass FibonacciIterator:\n def __init__(self):\n self.value1 = 0\n self.value2 = 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n temp = self.value1\n self.value1, self.value2 = self.value2, self.value1 + self.value2\n return temp\n\n\nfib_iterator = FibonacciIterator()\n\nnum = int(input('Enter the index of Fibonacci number to be printed: '))\nfor i in range(num):\n next(fib_iterator)\nprint(f'The {num}th number in Fibonacci sequence is {next(fib_iterator)}')\n\n\n","repo_name":"Karolina777/robotdreams-python","sub_path":"09/hmw-09-02.py","file_name":"hmw-09-02.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28703530984","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 6 21:31:10 2023\n\n@author: dsalarc\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass LowPassDiscreteFilter:\n # https://x-engineer.org/discretizing-transfer-function/\n \n def __init__(self, wc, Ts, Tsim = -1, order = 1, DiscType = 'tustin'):\n self.Tc = 1/wc\n self.Ts = Ts\n self.y = 0\n self.ym1 = 0\n self.ym2 = 0\n self.u = 0\n self.um1 = 0\n self.um2 = 0\n self.order = order\n self.DiscType = DiscType\n if Tsim == -1:\n Tsim = Ts\n \n if np.mod(Tsim, Ts) > Ts/1000:\n raise('Simulation time sample shall me a multiple of filter time sample')\n else:\n self.Tscale = int(np.round(Tsim / Ts))\n\n \n def set_zero(self,zero):\n self.y = zero\n self.ym1 = zero\n self.ym2 = zero\n self.u = zero\n self.um1 = zero\n self.um2 = zero\n \n def step(self,u):\n \n self.um1 = self.u\n self.um2 = self.um1\n self.u = u\n \n for i in range(self.Tscale):\n if self.Tscale == 1:\n u_step = self.u\n else:\n u_step = self.um1 * i/(self.Tscale-1) + self.u*(self.Tscale-i-1)/(self.Tscale-1)\n \n self.ym1 = self.y\n self.ym2 = self.ym1\n \n self.u = u\n \n if self.DiscType == 'tustin':\n self.y = 1 / (2 * self.Tc + self.Ts) * (self.Ts * (u_step + self.um1) - (self.Ts-2*self.Tc) * self.ym1)\n \n elif self.DiscType == 'euler_fwd':\n self.y = 1 / (self.Tc + self.Ts) * (self.Ts * self.um1 + self.Tc * self.ym1)\n \n elif self.DiscType == 'euler_back':\n self.y = 1 / (self.Tc + self.Ts) * (self.Ts * u_step + self.Tc * self.ym1)\n \n else:\n self.y = 0\n \n return self.y\n# %% \nts = 0.05\nwc = 40\nw_u = 10\nd_u = 0\ny0 = 0\n\n# Engine1 = LowPassDiscreteFilter(wc = wc, Ts=ts, DiscType = 'tustin')\n# Engine2 = LowPassDiscreteFilter(wc = wc, Ts=ts, DiscType = 'euler_fwd')\n# Engine3 = LowPassDiscreteFilter(wc = wc, Ts=ts, DiscType = 'euler_back')\n\n# Engine1.set_zero(y0)\n# Engine2.set_zero(y0)\n# Engine3.set_zero(y0)\n\n# t1_vec = np.arange(0,2,step=ts)\n# u1_vec = y0 + y0 + np.sin(w_u * t1_vec)\n# u1_vec[t1_vec>=1] = u1_vec[t1_vec>=1] + d_u\n# y1_vec = np.zeros(np.shape(t1_vec))\n# y2_vec = np.zeros(np.shape(t1_vec))\n# y3_vec = np.zeros(np.shape(t1_vec))\n\n# for i in range(len(t1_vec)):\n# y1_vec[i] = Engine1.step(u1_vec[i])\n# y2_vec[i] = Engine2.step(u1_vec[i])\n# y3_vec[i] = Engine3.step(u1_vec[i])\n \n \n# % \nts = 0.001\n\n\nEngine4 = LowPassDiscreteFilter(wc = wc, Ts=ts, DiscType = 'euler_back')\n\nEngine4.set_zero(y0)\n\nt2_vec = np.arange(0,2,step=ts)\nu2_vec = y0 + np.sin(w_u * t2_vec)\nu2_vec[t2_vec>=1] = u2_vec[t2_vec>=1] + d_u\ny4_vec = np.zeros(np.shape(t2_vec))\n\nfor i in range(len(t2_vec)):\n y4_vec[i] = Engine4.step(u2_vec[i])\n\n# plt.figure()\n# plt.plot(t2_vec, u2_vec)\n# plt.plot(t1_vec, y1_vec, label = 'tustin')\n# plt.plot(t1_vec, y2_vec, label = 'euler_fwd')\n# plt.plot(t1_vec, y3_vec, label = 'euler_back')\n# plt.plot(t2_vec, y4_vec, 'k', linewidth = 1, label = 'baseline')\n# plt.legend()\n# # plt.xlim((0.9,1.3))\n# plt.show() \n \n# %% \nt_sim = 0.01\nt_filt = 0.001\n\nEngine = LowPassDiscreteFilter(wc = wc, Ts=t_filt, Tsim = t_sim, DiscType = 'euler_back')\n\nEngine.set_zero(y0)\n\nt_vec = np.arange(0,2,step=t_sim)\nu_vec = y0 + np.sin(w_u * t_vec)\nu_vec[t_vec>=1] = u_vec[t_vec>=1] + d_u\ny_vec = np.zeros(np.shape(t_vec))\n\nsf = int(t_sim / t_filt)\n\nfor i in range(1,len(t_vec)):\n \n y_vec[i] = Engine.step(u_vec[i])\n \n\nplt.figure()\nplt.plot(t_vec, u_vec, 'b')\nplt.plot(t2_vec, y4_vec, 'k', linewidth = 1, label = 'baseline')\nplt.plot(t_vec, y_vec, 'ro', label = 'filt 1000Hz', markersize = 4)\nplt.legend()\nplt.xlim((0,1))\n \n \n ","repo_name":"dsalarc/VTOL","sub_path":"FunctionTests/test_Filter.py","file_name":"test_Filter.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26883181088","text":"import tensorflow as tf\nfrom models.Model import Model\n\n\nclass Critic(Model):\n\n parameter_names = [\n \"conv1_w\",\n \"conv2_w\",\n \"conv3_w\",\n \"conv4_w\",\n \"relu5_w\",\n \"relu5_b\",\n \"relu6_w\",\n \"relu6_b\",\n \"relu7_w\",\n \"relu7_b\",\n \"relu8_w\",\n \"relu8_b\",\n \"q_w\",\n \"q_b\",\n ]\n\n def __init__(\n self,\n model_name: str,\n save_path: str):\n self._model_name = model_name\n self._save_path = save_path\n self._parameters = dict()\n with tf.variable_scope(\"%s_Critic\" % model_name):\n self._parameters = {\n \"conv1_w\": tf.get_variable(\n \"conv1_w\",\n shape=[3, 3, 4, 32]),\n \"conv2_w\": tf.get_variable(\n \"conv2_w\",\n shape=[2, 2, 32, 32]),\n \"conv3_w\": tf.get_variable(\n \"conv3_w\",\n shape=[2, 2, 32, 64]),\n \"conv4_w\": tf.get_variable(\n \"conv4_w\",\n shape=[2, 2, 64, 128]),\n \"relu5_w\": tf.get_variable(\n \"relu5_w\",\n shape=[20*10*128 + 2, 1024]),\n \"relu5_b\": tf.get_variable(\n \"relu5_b\",\n shape=[1024]),\n \"relu6_w\": tf.get_variable(\n \"relu6_w\",\n shape=[1024, 512]),\n \"relu6_b\": tf.get_variable(\n \"relu6_b\",\n shape=[512]),\n \"relu7_w\": tf.get_variable(\n \"relu7_w\",\n shape=[512, 256]),\n \"relu7_b\": tf.get_variable(\n \"relu7_b\",\n shape=[256]),\n \"relu8_w\": tf.get_variable(\n \"relu8_w\",\n shape=[256, 256]),\n \"relu8_b\": tf.get_variable(\n \"relu8_b\",\n shape=[256]),\n \"q_w\": tf.get_variable(\n \"q_w\",\n shape=[256, 1]),\n \"q_b\": tf.get_variable(\n \"q_b\",\n shape=[1]),\n }\n\n def inference(self, X, is_target=False):\n\n states = X[0]\n actions = X[1]# (?) [0,1]\n\n image = states[0]# (?,320,160,4)\n speed = states[1]# (?) [0,1]\n\n conv1 = tf.nn.conv2d(\n image,\n self._parameters[\"conv1_w\"],\n [1, 1, 1, 1],\n \"SAME\",\n name=\"conv1\") # (?,318,158,32)\n relu1 = tf.nn.leaky_relu(conv1, name=\"relu1\")\n pool1 = tf.nn.max_pool(\n relu1,\n (1, 2, 2, 1),\n (1, 2, 2, 1),\n \"SAME\",\n name=\"pool1\") # (?,159,79,32)\n conv2 = tf.nn.conv2d(\n pool1,\n self._parameters[\"conv2_w\"],\n [1, 1, 1, 1],\n \"SAME\",\n name=\"conv2\") # (?,158,78,32)\n relu2 = tf.nn.leaky_relu(conv2, name=\"relu2\")\n pool2 = tf.nn.max_pool(\n relu2,\n (1, 2, 2, 1),\n (1, 2, 2, 1),\n \"SAME\",\n name=\"pool2\") # (?,79,39,32)\n conv3 = tf.nn.conv2d(\n pool2,\n self._parameters[\"conv3_w\"],\n [1, 1, 1, 1],\n \"SAME\",\n name=\"conv3\") # (?,78,38,64)\n relu3 = tf.nn.leaky_relu(conv3, name=\"relu3\")\n pool3 = tf.nn.max_pool(\n relu3,\n (1, 2, 2, 1),\n (1, 2, 2, 1),\n \"SAME\",\n name=\"pool3\") # (?,39,19,64)\n conv4 = tf.nn.conv2d(\n pool3,\n self._parameters[\"conv4_w\"],\n [1, 1, 1, 1],\n \"SAME\",\n name=\"conv4\") # (?,38,18,128)\n relu4 = tf.nn.leaky_relu(conv4, name=\"relu4\")\n pool4 = tf.nn.max_pool(\n relu4,\n (1, 2, 2, 1),\n (1, 2, 2, 1),\n \"SAME\",\n name=\"pool4\") # (?,19,9,128)\n\n reshape1 = tf.concat(\n [tf.reshape(pool4, [-1, 20*10*128]), speed, actions],\n 1)\n\n relu5 = tf.nn.leaky_relu(\n tf.add(\n tf.matmul(reshape1, self._parameters[\"relu5_w\"]),\n self._parameters[\"relu5_b\"]),\n name=\"relu5\") # (1024)\n\n relu6 = tf.nn.leaky_relu(\n tf.add(\n tf.matmul(relu5, self._parameters[\"relu6_w\"]),\n self._parameters[\"relu6_b\"]),\n name=\"relu6\") # (512)\n\n relu7 = tf.nn.leaky_relu(\n tf.add(\n tf.matmul(relu6, self._parameters[\"relu7_w\"]),\n self._parameters[\"relu7_b\"]),\n name=\"relu7\") # (256)\n\n relu8 = tf.nn.leaky_relu(\n tf.add(\n tf.matmul(relu7, self._parameters[\"relu8_w\"]),\n self._parameters[\"relu8_b\"]),\n name=\"relu8\") # (256)\n\n q = tf.add(\n tf.matmul(relu8, self._parameters[\"q_w\"]),\n self._parameters[\"q_b\"])\n\n return q # (1) [-128,127]\n\n def parameters(self):\n return list(self._parameters.values())\n\n def initialize_parameters(self, sess:tf.Session):\n sess.run(tf.initialize_variables(self.parameters()))\n\n def save(self, sess: tf.Session):\n saver = tf.train.Saver(\n self.parameters(),\n save_relative_paths=True,\n filename=self._model_name)\n saver.save(sess, \"%s/%s_critic\"%(self._save_path,self._model_name))\n\n def load(self, sess: tf.Session):\n saver = tf.train.Saver(\n self.parameters(),\n save_relative_paths=True,\n filename=self._model_name)\n saver.restore(sess, \"%s/%s_critic\"%(self._save_path,self._model_name))\n\n def sync(self, target, sess:tf.Session):\n \"\"\"Sync the parameter value of self to target.\n \n Arguments:\n target {Critic} -- Target of syncing.\n \"\"\"\n\n for n in self.parameter_names:\n sess.run(\n target._parameters[n].assign(self._parameters[n]))\n\n\n def copy(self, model_name: str, save_path: str, sess:tf.Session):\n \"\"\"Create a new Critic and sync parameter value to it.\n \n Arguments:\n model_name {str} -- Name of new Critic\n save_path {str} -- Save path of new Critic\n \n Returns:\n Critic -- The new Critic model\n \"\"\"\n\n new_network = Critic(\n model_name, \n save_path)\n\n self.sync(new_network,sess)\n\n return new_network","repo_name":"princhenee/comp5212_2018_pp","sub_path":"ml-deep-q-continuous-action/models/Critic.py","file_name":"Critic.py","file_ext":"py","file_size_in_byte":6784,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17660833439","text":"from machine import I2C, Pin, SoftI2C\nfrom lib.sensors.bmp180 import BMP180\nfrom time import sleep\nfrom lib.config import *\nfrom lib.oled.ssd1306 import SSD1306_I2C\nimport usocket as socket\n\n# \ni2c = SoftI2C(sda=Pin(DEFAULT_IOTKIT_I2C_SDA), scl=Pin(DEFAULT_IOTKIT_I2C_SCL))\ndisplay = SSD1306_I2C(128, 64, i2c)\n\nbmp180 = BMP180(i2c)\nbmp180.oversample_sett = 2\nbmp180.baseline = 101325\n\nwhile True:\n temp = bmp180.temperature\n pres = bmp180.pressure\n values = \"key=\" + \"A2ABBMDJYRAMA6JM\" + \"&field1=\" + str(temp) + \"&field2=\" + str(pres / 1000)\n\n display.text(\"Temp: \" + str(temp), 0, 0)\n display.text(\"Pres: \" + str(pres / 1000), 0, 8)\n display.show()\n print ( values ) \n\n s = socket.socket()\n\n ai = socket.getaddrinfo(\"api.thingspeak.com\", 80)\n print(\"Address infos:\", ai)\n addr = ai[0][-1]\n\n print(\"Connect address:\", addr)\n s.connect(addr)\n\n s.send(b\"GET /update?\" + values + \" HTTP/1.1\\r\\nHost: api.thingspeak.com\\r\\n\\r\\n\" )\n # rc = s.recv(4096)\n # print( rc )\n s.close()\n\n sleep( 15 )\n \n\n","repo_name":"mc-b/iotkitmp","sub_path":"http/thingspeak.py","file_name":"thingspeak.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12274568497","text":"import unittest\nimport datetime\nfrom valley.validators import (RequiredValidator, DateTimeValidator,\n DateValidator, FloatValidator, IntegerValidator,\n MaxLengthValidator, MinLengthValidator,\n MaxValueValidator, MinValueValidator,\n StringValidator, ValidationException,\n BooleanValidator,DictValidator,\n ListValidator\n )\n\n\nclass ValidatorsTestCase(unittest.TestCase):\n\n def test_required_validator(self):\n with self.assertRaises(ValidationException) as vm:\n RequiredValidator().validate(None, 'first_name')\n self.assertEqual(str(vm.exception),\n 'first_name: This value is required')\n # Test with valid input\n RequiredValidator().validate('First Name', 'first_name')\n\n def test_datetime_validator(self):\n with self.assertRaises(ValidationException) as vm:\n DateTimeValidator().validate(datetime.date.today(), 'date_created')\n self.assertEqual(str(vm.exception),\n 'date_created: This value should be a valid datetime object.')\n # Test with valid input\n DateTimeValidator().validate(datetime.datetime.now(), 'date_created')\n\n def test_date_validator(self):\n with self.assertRaises(ValidationException) as vm:\n DateValidator().validate('not a date', 'date_created')\n self.assertEqual(str(vm.exception),\n 'date_created: This value should be a valid date object.')\n # Test with valid input\n DateValidator().validate(datetime.date.today(), 'date_created')\n\n def test_float_validator(self):\n with self.assertRaises(ValidationException) as vm:\n FloatValidator().validate(1, 'no_packages')\n self.assertEqual(str(vm.exception),\n 'no_packages: This value should be a float.')\n # Test with valid input\n FloatValidator().validate(1.3, 'no_packages')\n\n def test_integer_validator(self):\n with self.assertRaises(ValidationException) as vm:\n IntegerValidator().validate(1.2, 'no_packages')\n self.assertEqual(str(vm.exception),\n 'no_packages: This value should be an integer')\n # Test with valid input\n IntegerValidator().validate(1, 'no_packages')\n\n def test_max_length_validator(self):\n with self.assertRaises(ValidationException) as vm:\n MaxLengthValidator(2).validate('123', 'no_packages')\n self.assertEqual(str(vm.exception),\n 'no_packages: This value should have a length lesser than or equal to 2. Currently 123')\n # Test with valid input\n MaxLengthValidator(2).validate('12', 'no_packages')\n\n def test_min_length_validator(self):\n with self.assertRaises(ValidationException) as vm:\n MinLengthValidator(2).validate('1', 'no_packages')\n self.assertEqual(str(vm.exception),\n 'no_packages: This value should have a length greater than or equal to 2. Currently 1')\n # Test with valid input\n MinLengthValidator(2).validate('123', 'no_packages')\n\n def test_max_value_validator(self):\n with self.assertRaises(ValidationException) as vm:\n MaxValueValidator(2).validate(3, 'no_packages')\n self.assertEqual(str(vm.exception),\n 'no_packages: This value should have a value lesser than or equal to 2. Currently 3')\n # Test with valid input\n MaxValueValidator(2).validate(1, 'no_packages')\n\n def test_min_value_validator(self):\n with self.assertRaises(ValidationException) as vm:\n MinValueValidator(2).validate(1, 'no_packages')\n self.assertEqual(str(vm.exception),\n 'no_packages: This value should have a value greater than or equal to 2. Currently 1')\n # Test with valid input\n MinValueValidator(2).validate(3, 'no_packages')\n\n def test_string_validator(self):\n with self.assertRaises(ValidationException) as vm:\n StringValidator().validate(1, 'last_name')\n self.assertEqual(str(vm.exception),\n 'last_name: This value should be a string')\n # Test with valid input\n StringValidator().validate('Jones', 'last_name')\n\n def test_boolean_validator(self):\n with self.assertRaises(ValidationException) as vm:\n BooleanValidator().validate(1, 'last_name')\n self.assertEqual(str(vm.exception),\n 'last_name: This value should be True or False.')\n # Test with valid input\n BooleanValidator().validate(True, 'last_name')\n BooleanValidator().validate(False, 'last_name')\n\n def test_dict_validator(self):\n with self.assertRaises(ValidationException) as vm:\n DictValidator().validate(1, 'person')\n self.assertEqual(str(vm.exception),\n 'person: This value should be a dict object.')\n DictValidator().validate({'first':'Brian','last':'Jones'}, 'person')\n\n def test_list_validator(self):\n with self.assertRaises(ValidationException) as vm:\n ListValidator().validate(1, 'schools')\n self.assertEqual(str(vm.exception),\n 'schools: This value should be a list object.')\n ListValidator().validate(['Ridge Valley High','Lewis Cone Elementary'],'schools')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"capless/valley","sub_path":"valley/tests/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"27361430742","text":"from . import Navigation\nfrom . import Fonctions\n\n# Cette fonction détermine l'intervalle des symboles sortant du noeud donné\ndef calculerIntervalle(indiceNoeud, ll):\n # Le début est la position du indiceNoeud ième 1 dans ll +1 ou 0 si le noeud est la racine\n debut = 0 if indiceNoeud == 0 else Fonctions.select(1, ll, indiceNoeud) + 1\n # La fin est la position de (indiceNoeud+1)ième 1 dans ll\n fin = Fonctions.select(1, ll, indiceNoeud + 1)\n return ll[debut:fin]\n\n\n# Cette fonction vérifie si le noeud correspond à un état final\ndef noeudFinal(indiceNoeud, symbolsSortants, results, listeDesNoeuds, i):\n # Si ll[debut:fin] contient \"$\" alors c'est un état final\n if \"$\" in symbolsSortants:\n results.append(((listeDesNoeuds[indiceNoeud][::-1]), i - len(listeDesNoeuds[indiceNoeud]) + 1))\n\n\n# Fonction de recherche\ndef recherche(text, chaineDesParentheses, listeDesNoeuds, f, ll):\n results = list()\n indiceNoeud = 0\n # On lit les caractères du text un par un\n for i, symbol in enumerate(text):\n # On enregistre les symboles sortants du noeud actuel\n symbolsSortants = calculerIntervalle(indiceNoeud, ll)\n # Si le noeud actuel est différent de la racine et\n # il existe un chemin depuis le noeud actuel en suivant le symbole actuel\n while indiceNoeud != 0 and symbol not in symbolsSortants:\n # On revient au plus long préfixe\n indiceNoeud = Navigation.trouverLePlusLongSuffixe(indiceNoeud, chaineDesParentheses)\n symbolsSortants = calculerIntervalle(indiceNoeud, ll)\n # Si un chemin existe alors on passe au noeud suivant\n if symbol in symbolsSortants:\n indiceNoeud = Navigation.noeudSuivant(indiceNoeud, symbol, f, ll)\n # Sinon si aucun chemin n'existe on passe au caractère suivant du texte\n else:\n continue\n indiceNoeudTmp = indiceNoeud\n # Pour chaque noeud on vérifie tous ses suffixes\n while indiceNoeudTmp != 0:\n symbolsSortants = calculerIntervalle(indiceNoeudTmp, ll)\n noeudFinal(indiceNoeudTmp, symbolsSortants, results, listeDesNoeuds, i)\n indiceNoeudTmp = Navigation.trouverLePlusLongSuffixe(indiceNoeudTmp, chaineDesParentheses)\n return results","repo_name":"choukribouabana/Programmationn","sub_path":"XBWT/Recherche.py","file_name":"Recherche.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10583182512","text":"# 3- Nesse exercício vamos simular um caixa de mercado!\n# Dado o dicionário inicial {'banana': 3.1, 'maca': 1.5, 'goiaba': 4.3}\n# Faça um Loop Infinito que pergunte para o usuário qual produto ele gostaria de consultar o preço e que retorne o valor\n# contido no dicionário.\n# 3.1- Faça uma condição de saída para o Looping.\n# 3.2- Faça uma sessão para cadastrar novos produtos.\n# 3.3- Armazene os produtos consultados em um arquivo json na mesma pasta.\n\n\n\n#imports\nimport json\n\n\n\ndef check_price(mydict):\n '''Com base no dicionário inicial, consulta os produtos existentes retornando os seus valores. Salva no arquivo\n log.json os produtos consultados na execução atual.'''\n\n\n checked = {}\n product = 0\n while product != 'cadastrar':\n product = str(input(\"Qual produto gostaria consultar:\"))\n\n if product in mydict.keys():\n checked.update({product: mydict[product]})\n print(mydict[product])\n with open('log.json', 'w') as log:\n json.dump(checked, log)\n else:\n print(\"O produto não está cadastrado no sistema.\")\n add_new(mydict)\n\n\ndef add_new(mydict):\n '''Com base no dicionário inicial, cadastra um novo produto retornado o dicionário atualizado na saída da função.'''\n\n\n newproduct = str(input(\"Qual produto gostaria de cadastrar:\"))\n value = float(input(\"Qual valor do produto:\"))\n\n if newproduct not in mydict.keys():\n mydict.update({newproduct: value})\n print(mydict)\n\n\ndef main():\n check_price(mydict)\n\n\nif __name__ == \"__main__\":\n mydict = {'banana': 3.1, 'maca': 1.5, 'goiaba': 4.3}\n main()\n","repo_name":"iamcamilasilva/PythonScripts","sub_path":"Desafio03.py","file_name":"Desafio03.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3600913538","text":"# Author : MUSTAFA ERGÜL\n\nimport asyncio\nimport time\nimport serial\nfrom typing import Iterator, Tuple\nfrom serial.tools.list_ports import comports\nfrom PyQt5.QtCore import QSettings\nfrom PyQt5.QtWidgets import QWidget, QLabel, QComboBox, QGridLayout, \\\n QPushButton, QMessageBox, QApplication, QLineEdit, QPlainTextEdit\nfrom PyQt5.QtGui import QCloseEvent, QIcon, QPixmap, QFont\nfrom quamash import QEventLoop\nfrom datetime import datetime\nfrom PyQt5 import QtCore\n\nkayit = open(\"kayit.txt\", \"a\")\n\n# Object for access to the serial port\nser = serial.Serial(timeout=0)\nSER_BAUDRATE = 115200\n\n# Setting constants\nSETTING_PORT_NAME = 'port_name'\nSETTING_MESSAGE = 'message'\n\n\n\ndef gen_serial_ports() -> Iterator[Tuple[str, str]]:\n \"\"\"Mevcut tüm seri bağlantı noktalarını çalıştır.\"\"\"\n ports = comports()\n return ((p.description, p.device) for p in ports)\n\n\ndef send_serial_async(msg: str) -> None:\n \"\"\"Seri bağlantı noktasına (async) bir mesaj gönderin.\"\"\"\n ser.write(msg.encode())\n\n\n# noinspection PyArgumentList\nclass RemoteWidget(QWidget):\n \"\"\"Main Widget.\"\"\"\n\n def __init__(self, parent: QWidget=None) -> None:\n super().__init__(parent)\n\n # Port Combobox\n self.port_label = QLabel(self.tr('COM Port:'))\n self.port_combobox = QComboBox()\n self.port_label.setBuddy(self.port_combobox)\n self.update_com_ports()\n self.port_combobox.setFont(QFont('Arial', 14))\n\n self.setWindowIcon(QIcon(\"iconvolt.jpg\"))\n\n self.labelImage = QLabel(self)\n pixmap = QPixmap(\"voltay.jpeg\")\n self.labelImage.setPixmap(pixmap)\n self.labelImage.setAlignment(QtCore.Qt.AlignCenter)\n\n\n\n # Connect and Disconnect Buttons\n self.connect_btn = QPushButton(self.tr('Bağlan'))\n self.disconnect_btn = QPushButton(self.tr('Bağlantı Kapat'))\n self.connect_btn.setFont(QFont('Arial', 14))\n self.disconnect_btn.setFont(QFont('Arial', 14))\n self.disconnect_btn.setVisible(False)\n self.connect_btn.pressed.connect(self.on_connect_btn_pressed)\n self.disconnect_btn.pressed.connect(self.on_disconnect_btn_pressed)\n\n # message line edit\n self.msg_label = QLabel(self.tr('Mesaj:'))\n self.msg_lineedit = QLineEdit()\n self.msg_label.setBuddy(self.msg_label)\n self.msg_lineedit.setEnabled(False)\n self.msg_lineedit.returnPressed.connect(self.on_send_btn_pressed)\n\n # send message button\n self.send_btn = QPushButton(self.tr('Gönder'))\n self.send_btn.setEnabled(False)\n self.send_btn.pressed.connect(self.on_send_btn_pressed)\n\n\n # received messages\n self.received_label = QLabel(self.tr('Alınan Veri:'))\n self.received_textedit = QPlainTextEdit()\n self.received_textedit.setReadOnly(True)\n self.received_label.setBuddy(self.received_textedit)\n\n self.vlt_label = QLabel(self.tr('VOLTA TEAM'))\n self.data_label = QLabel(self.tr('| Saat | Toplam Süre (ms) | Hız(km/h | Batarya Sıcaklığı(°C) | Batarya Gerilimi(V) | Kalan Enerji(Wh) |'))\n self.data_label.setFont(QFont('Arial', 14))\n\n # Arrange Layout\n layout = QGridLayout()\n layout.addWidget(self.labelImage, 0,0,1,3)\n layout.addWidget(self.port_label, 1, 0)\n layout.addWidget(self.port_combobox, 1, 0,1,2)\n layout.addWidget(self.connect_btn, 1, 2)\n layout.addWidget(self.disconnect_btn, 1, 2)\n #layout.addWidget(self.msg_label, 2, 0)\n #layout.addWidget(self.msg_lineedit, 2, 1)\n #layout.addWidget(self.send_btn, 2, 2)\n layout.addWidget(self.data_label, 2, 0, 1, 3)\n #layout.addWidget(self.received_label, 4, 0)\n layout.addWidget(self.received_textedit, 3, 0, 1, 3)\n layout.addWidget(self.vlt_label,4,2,1,3)\n\n\n self.vlt_label.setFont(QFont('Arial', 14))\n\n self.setLayout(layout)\n self._load_settings()\n\n def _load_settings(self) -> None:\n \"\"\"Başlangıçtaki ayarları yükleyin.\"\"\"\n settings = QSettings()\n\n # port name\n port_name = settings.value(SETTING_PORT_NAME)\n if port_name is not None:\n index = self.port_combobox.findData(port_name)\n if index > -1:\n self.port_combobox.setCurrentIndex(index)\n\n # last message\n msg = settings.value(SETTING_MESSAGE)\n if msg is not None:\n self.msg_lineedit.setText(msg)\n\n def _save_settings(self) -> None:\n \"\"\"Kapatırken ayarları kaydedin.\"\"\"\n settings = QSettings()\n settings.setValue(SETTING_PORT_NAME, self.port)\n settings.setValue(SETTING_MESSAGE, self.msg_lineedit.text())\n\n def show_error_message(self, msg: str) -> None:\n \"\"\"Hata mesajını içeren bir Mesaj Kutusu gösterin.\"\"\"\n QMessageBox.critical(self, QApplication.applicationName(), str(msg))\n\n def update_com_ports(self) -> None:\n \"\"\"Update COM Port list in GUI.\"\"\"\n for name, device in gen_serial_ports():\n self.port_combobox.addItem(name, device)\n\n @property\n def port(self) -> str:\n \"\"\"Geçerli seri bağlantı noktasını çalıştırın.\"\"\"\n return self.port_combobox.currentData()\n\n def closeEvent(self, event: QCloseEvent) -> None:\n \"\"\"Widget'ın kapatma olayını ele alın.\"\"\"\n if ser.is_open:\n ser.close()\n\n self._save_settings()\n\n event.accept()\n\n def on_connect_btn_pressed(self) -> None:\n \"\"\"Belirtilen bağlantı noktasına seri bağlantıyı açın.\"\"\"\n if ser.is_open:\n ser.close()\n ser.port = self.port\n ser.baudrate = SER_BAUDRATE\n\n try:\n ser.open()\n except Exception as e:\n self.show_error_message(str(e))\n\n if ser.is_open:\n self.connect_btn.setVisible(False)\n self.disconnect_btn.setVisible(True)\n self.port_combobox.setDisabled(True)\n self.msg_lineedit.setEnabled(True)\n self.send_btn.setEnabled(True)\n loop.create_task(self.receive_serial_async())\n\n def on_disconnect_btn_pressed(self) -> None:\n \"\"\"Mevcut seri bağlantıyı kapatın.\"\"\"\n if ser.is_open:\n ser.close()\n\n if not ser.is_open:\n self.connect_btn.setVisible(True)\n self.disconnect_btn.setVisible(False)\n self.port_combobox.setEnabled(True)\n self.msg_lineedit.setEnabled(False)\n self.send_btn.setEnabled(False)\n\n def on_send_btn_pressed(self) -> None:\n \"\"\"Arabaya mesaj gönderin.\"\"\"\n msg = self.msg_lineedit.text() + '\\r\\n'\n loop.call_soon(send_serial_async, msg)\n\n async def receive_serial_async(self) -> None:\n a = 0\n \"\"\"Gelen verileri bekleyin, metne dönüştürün ve Textedit'e ekleyin.\"\"\"\n while True:\n time.sleep(27/1000)\n a = a+27\n msg = ser.readline()\n if msg != b'':\n text = msg.decode().strip()\n saat = datetime.now().strftime('%H:%M:%S.%f')[:-4]\n self.received_textedit.appendPlainText(saat+\",\\t\" + str(a)+\",\\t\"+ text)\n self.received_textedit.setFont(QFont('Arial', 13))\n kayit.write(str(a)+\",\\t\"+ text+\"\\n\")\n await asyncio.sleep(0)\n\n\nif __name__ == '__main__':\n app = QApplication([])\n loop = QEventLoop()\n asyncio.set_event_loop(loop)\n\n app.setOrganizationName('VoltaTEAM')\n app.setApplicationName('VoltaCAR UART Interface')\n w = RemoteWidget()\n w.show()\n\n with loop:\n loop.run_forever()","repo_name":"mustafatrk0/UART_Interface_Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74281400513","text":"'''\nCreated on Nov 9, 2016\n@author: Gaio\n\nBolliger Bands\n\n'''\n\nimport QSTK.qstkutil.qsdateutil as du\nimport QSTK.qstkutil.tsutil as tsu\nimport QSTK.qstkutil.DataAccess as da\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport copy\nimport sys\nfrom pylab import *\n\n#\n# Prepare to read the data\n#\nsymbols = [\"AAPL\",\"MSFT\"]\nstartday = dt.datetime(2010,4,1)\nendday = dt.datetime(2010,7,31)\ntimeofday=dt.timedelta(hours=16)\ntimestamps = du.getNYSEdays(startday,endday,timeofday)\n\ndataobj = da.DataAccess('Yahoo')\nvoldata = dataobj.get_data(timestamps, symbols, \"volume\")\nadjcloses = dataobj.get_data(timestamps, symbols, \"close\")\nactualclose = dataobj.get_data(timestamps, symbols, \"actual_close\")\n\nadjcloses = adjcloses.fillna(method='ffill')\nadjcloses = adjcloses.fillna(method='backfill')\nadjcloses = adjcloses.fillna(1.0)\n\nmeans = pd.DataFrame.rolling(adjcloses,min_periods=20,window=20,center=False).mean()\n\nstds = pd.DataFrame.rolling(adjcloses,min_periods=20,window=20,center=False).std()\n\n#calculate Bolliger Bands as mean +- 1 std\nbolUp = means + stds\nbolDown = means - stds\n\n#calculate the normalized signal\nbolVal = (adjcloses - means) / (stds)\n\n#write to file\nvaluesFile = open( \"bolNormalized.csv\", \"w\" )\n \nfor row in bolVal.iterrows():\n valuesFile.writelines(str(row[0].strftime('%Y,%m,%d')) + \", \" + str(row[1]['AAPL'].round(2)) + \", \" +str(row[1]['MSFT'].round(2))+ \"\\n\" )\nvaluesFile.close() \n\n# Plot the prices\nplt.clf()\n\nsymtoplot = 'AAPL'\nplot(adjcloses.index,means[symtoplot].values)\nplot(adjcloses.index,bolUp[symtoplot].values)\nplot(adjcloses.index,bolDown[symtoplot].values)\nplt.legend(['Rolling Mean','Bollinger Band Up','Bollinger Band Down'])\nplt.ylabel('Adjusted Close')\n\nsavefig(\"cg_bolliger.png\", format='png')","repo_name":"gaioNL/CompInvesting.GT.training.2016","sub_path":"hw5/cg_wk7_hw5.py","file_name":"cg_wk7_hw5.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31290277960","text":"\nfrom selenium import webdriver\nimport pandas as pd\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\n\noptions = webdriver.ChromeOptions()\n\noptions.add_argument('lang=ko_KR')\ndriver = webdriver.Chrome('./chromedriver.exe', options=options)\n\n# 랭킹 > 아우터 , 바지 ,신발\n# review_xpath = '//*[@id=\"goodsRankList\"]/li[1]/div[3]/div[1]/a/img'\n# review_number_xpath = '//*[@id=\"estimate_type\"]/button[3]'\n# review_button_xpath = '//*[@id=\"estimate_list\"]/div/div[3]/div[4]/div[1]' #review button\n# # # //*[@id=\"movieEndTabMenu\"]/li[6]/a\n# # your_year = 2 # 아우터 2, 바지 3, 신발 5\nreview_number_xpath = '//*[@id=\"estimate_goods\"]'\nreview_button_xpath = '//*[@id=\"estimate_goods\"]'\n# review_page = '//*[@id=\"pagerTagAnchor2\"]review_page'\n\n\n# your_year = 2017 # 할당받은 연도로 수정하세요\nfor i in range(1, 3): # 3\n url = 'https://www.musinsa.com/ranking/best?period=month&age=ALL&mainCategory=002&subCategory=&leafCategory=&price=&golf=false&kids=false&newProduct=false&exclusive=false&discount=false&soldOut=false&page={}&viewType=small&priceMin=&priceMax='.format(i)\n\n titles = []\n reviews = []\n try:\n\n for j in range(1, 11): # 우선 상품 10가지만\n driver.get(url)\n time.sleep(0.5)\n outer_title_xpath = '//*[@id=\"goodsRankList\"]/li[{}]/div[3]/div[2]/p[2]/a'.format(j)\n try:\n title = driver.find_element(\"xpath\", outer_title_xpath).text\n driver.find_element(\"xpath\", outer_title_xpath).click()\n time.sleep(0.5)\n driver.find_element('xpath', review_button_xpath).click()\n time.sleep(0.5)\n review_range = driver.find_element('xpath', review_number_xpath[7:13]).text\n review_range = review_range.replace(',', '')\n review_range = (int(review_range) -1) // 10 + 2\n print(review_range)\n # exit()\n for k in range(1, review_range):\n review_page_button_xpath = '//*[@id=\"estimate_type\"]/button[3]'.format(k)\n try:\n driver.find_element('xpath', review_page_button_xpath).click()\n for l in range(1, 11):\n back_flag = False\n review_title_xpath = '//*[@id=\"estimate_list\"]/div/div[{}]/div[4]/div[1]'.format(l)\n try:\n review = driver.find_element('xpath', review_title_xpath).click()\n back_flag = True\n time.sleep(0.5)\n review = driver.find_element('xpath', review_xpath).text\n titles.append(title)\n reviews.append(review)\n driver.back()\n except:\n if back_flag:\n driver.back()\n print('review', i, j, k, l)\n driver.back()\n except:\n print('review page', i, j, k)\n except:\n print('movie', i, j)\n df = pd.DataFrame({'title':titles, \"reviews\":reviews })\n df.to_csv('./crawling_data/reviews_{}_{}page.csv'.format(i),index=False)\n except:\n print('page', i)\n\n\n\n\ndriver.close()","repo_name":"kangyekwon/prj_movie","sub_path":"musinsa_cr.py","file_name":"musinsa_cr.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43855859756","text":"import numpy as np\nfrom collections import Counter\n\ndef evaluate_guess(secret_code, guess):\n score1 = [1 for i in range(len(secret_code)) if guess[i] == secret_code[i]] ## list for exact matches\n score2 = [0 for i in range(len(secret_code)) if guess[i] in secret_code] ## list for \"color\" matches\n score1.extend(score2[:-len(score1)])\n score1.sort(reverse=True) \n ## in this method, every 1 also shows up as a zero, so when you combine the lists, you just account for that by subtracting a zero for every one that appears\n print(score1)\n exit(0)\n return score1[0], score1[1]\n\n# def evaluate_key(secret_code, key, position):\n# # Will return a None value in case of a \"miss\", as per your plan.\n# if key == secret_code[position]:\n# return 1\n# elif key in secret_code:\n# return 0\n\n# def evaluate_guess(secret_code, guess, secret_code_length=5):\n# return sorted(\n# filter(None.__ne__, # Filter out None values. See https://docs.python.org/3.5/reference/datamodel.html#object.__ne__\n# [evaluate_key(secret_code, g, p) \n# for (g, p) in zip(guess, list(range(secret_code_length)))]\n# ),\n# reverse=True\n# )","repo_name":"ijakenorton/cosc343","sub_path":"cosc343_mastermind/eval_guess.py","file_name":"eval_guess.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32731219501","text":"from replit import db\nimport players\n\ngamesList = []\n\n\nclass Game():\n def __init__(self, name, games, score, team1, team2):\n self.name = name\n self.games = games\n self.score = score\n self.team1 = team1\n self.team2 = team2\n\n def listPlayers(self):\n out = '**Team 1:**\\n'\n if not self.team1:\n out += \"No Players\"\n for i in self.team1:\n out += f\"{players.show_player(i)}\\n\"\n out += '\\n**Team 2:**\\n'\n if not self.team2:\n out += \"No Players\"\n for i in self.team2:\n out += f\"{players.show_player(i)}\\n\"\n if out == '':\n out = \"No Players\"\n return out\n\n def addPlayer(self, player, team):\n team1list = [\"team 1\",'1','team1']\n team2list = [\"team 2\",'2','team2']\n \n if player in self.team1:\n self.team1.remove(player)\n if player in self.team2:\n self.team2.remove(player)\n if team.lower() in team1list and self.isTeamOpen(self.team1):\n self.team1.append(player)\n elif team.lower() in team2list and self.isTeamOpen(self.team1):\n self.team2.append(player)\n else:\n print(\"Couldnt add player to team\")\n def playerCount(self):\n total = len(self.team1)+len(self.team2)\n return total\n def teamPlayerCount(self, team):\n return len(team)\n def isTeamOpen(self, team):\n count = self.teamPlayerCount(team)\n print(f\"Is team Open? {count}/4\",team)\n if count <=4:\n return True\n else:\n return False\n\ndef isGameFull(game):\n if game.playerCount() == 8:\n return True\n else:\n return False\n\n\n #def listGames(self):\ndef appendGame(game):\n gamesList.append(game)\n print(\"Created game and added to list\", game.name)\n\ndef listGames():\n out = ''\n for i in gamesList:\n out += f\"{i.name} ({i.playerCount()})\\n\"\n return out\n\n\n\n\n\ndef selectGame(name):\n for i in gamesList:\n if i.name == name:\n return i\n","repo_name":"RyonGerringer/ELODiscordBot","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19146574239","text":"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata_path = \"CIFAR-10\\\\cifar-10-python\"\nbatch_size = 64\nlearning_rate = 0.01\noptimizer = \"SGD\"\n\ntrans = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])\ntrain_set = torchvision.datasets.CIFAR10(root=data_path, train=True, download=False, transform=trans)\ntest_set = torchvision.datasets.CIFAR10(root=data_path, train=False, download=False, transform=trans)\n\ntrain_loader = torch.utils.data.DataLoader(\n dataset=train_set,\n batch_size=batch_size,\n shuffle=True)\ntest_loader = torch.utils.data.DataLoader(\n dataset=test_set,\n batch_size=batch_size,\n shuffle=False)\n\npred_loader = torch.utils.data.DataLoader(\n dataset=test_set,\n batch_size=1,\n shuffle=False)\n\nimage_list = []\nlabel_list = []\nfor image, label in pred_loader:\n image_list.append(image)\n label_list.append(label.item())\n \n\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n#pred_model = torch.load('net_params3.pkl')\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=(5,5), stride=1)\n self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5,5), stride=1)\n self.fc1 = nn.Linear(in_features=5*5*16, out_features=120)\n self.fc2 = nn.Linear(in_features=120, out_features=84)\n self.fc3 = nn.Linear(in_features=84, out_features=10)\n def forward(self, x):\n x = self.conv1(x) # [3*32*32] -> [6*28*28]\n x = F.relu(x)\n x = F.max_pool2d(x, 2, 2) # [6*28*28] -> [6*14*14]\n x = self.conv2(x) # [6*14*14] -> [16*10*10]\n x = F.relu(x)\n x = F.max_pool2d(x, 2, 2) # [16*10*10] -> [16*5*5]\n x = x.view(x.size(0), -1) # [16*5*5] -> [400]\n x = self.fc1(x) # [400] -> [120]\n x = F.relu(x)\n x = self.fc2(x) # [120] -> [84]\n x = F.relu(x)\n x = self.fc3(x) # [84] -> [10]\n return x\n\ndef show_images():\n print(\"5.1 show train images\")\n cnt = 0\n plt.figure()\n for image, label in train_loader:\n if cnt>=10:\n break\n ax = plt.subplot2grid((2, 5), (int(cnt/5), int(cnt%5)), colspan=1, rowspan=1)\n img = image[0]\n img = img.numpy()\n img = np.transpose(img, (1,2,0))\n \n ax.axis('off')\n ax.imshow(img)\n ax.set_title(classes[label[0]])\n \n cnt += 1\n plt.show()\n\ndef show_hyperparameters():\n print(\"==5.2==============\")\n print(\"Hyperparameters\")\n print(\"batch size : \" + str(batch_size))\n print(\"learning rate : \" + str(learning_rate))\n print(\"optimizer : \" + optimizer)\n print(\"===================\")\n \ndef training(self, epochs=1):\n ## training\n print(\"5.3 train 1 epoch\")\n model = LeNet()\n optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9,weight_decay=1e-4)\n loss_function = nn.CrossEntropyLoss()\n # loss_function = nn.NLLLoss()\n epoch_train_acc = []\n epoch_test_acc = []\n epoch_train_loss = []\n epoch_test_loss = []\n for epoch in range(epochs):\n # trainning\n train_loss_list = []\n train_corr_count = 0\n train_total_count = 0\n train_loss = 0\n for batch, (x, target) in enumerate(train_loader):\n \n x, target = Variable(x), Variable(target)\n out = model(x)\n \n loss = loss_function(out, target)\n train_loss += loss.data.item()\n train_loss_list.append(loss.data)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n _, pred_label = torch.max(out.data, 1)\n train_total_count += x.data.size()[0]\n train_corr_count += (pred_label == target.data).sum().item()\n \n if (batch+1) % 1 == 0 or (batch+1) == len(train_loader):\n print ('\\r ==== epoch: {}, batch index: {}, acc: {:.3f}, loss: {:.5f}'.format(epoch, batch+1, train_corr_count*1.0/train_total_count, loss.data), end=\"\")\n print()\n epoch_train_acc.append(train_corr_count/train_total_count)\n epoch_train_loss.append(train_loss/len(train_loader))\n # testing\n test_loss_list = []\n test_corr_count = 0\n test_total_count = 0\n test_loss = 0\n for batch, (x, target) in enumerate(test_loader):\n x, target = Variable(x), Variable(target)\n out = model(x)\n \n loss = loss_function(out, target)\n test_loss += loss.data.item()\n test_loss_list.append(loss.data)\n \n _, pred_label = torch.max(out.data, 1)\n test_total_count += x.data.size()[0]\n test_corr_count += (pred_label == target.data).sum().item()\n if(batch+1) % 1 == 0 or (batch+1) == len(test_loader):\n print ('\\r ==== epoch: {}, batch index: {}, acc: {:.3f}, test loss: {:.5f}'.format(epoch, batch+1, test_corr_count * 1.0 / test_total_count, loss.data), end='')\n print()\n epoch_test_acc.append(test_corr_count/test_total_count)\n epoch_test_loss.append(test_loss/len(test_loader))\n \n print(epoch_train_acc[-1], epoch_train_loss[-1], epoch_test_acc[-1], epoch_test_loss[-1])\n \n x_list = []\n x_list = np.arange(len(train_loss_list))\n # for item in range(len(train_loss_list)):\n # x_list.append(int(item))\n plt.plot(x_list, train_loss_list)\n plt.show()\n\n \ndef show_train():\n acc = plt.imread(\"acc.png\")\n loss = plt.imread(\"loss.png\")\n \n fig, axes = plt.subplots(1,2,figsize=(13,6)) # set the size that you'd like (width, height)\n ax0, ax1 = axes.ravel()\n ax0.imshow(acc)\n ax0.axis('off')\n ax1.imshow(loss)\n ax1.axis('off')\n \n plt.show()\n\npred_model = LeNet()\npred_model.load_state_dict(torch.load('net_params10_0.667.pkl'))\n\ndef predict(index):\n# pred_model = torch.load('net3.pkl')\n if(index):\n index = int(index)\n else : return\n prediction = pred_model(image_list[index])\n _, pred_label = torch.max(prediction.data, 1)\n print(classes[label_list[index]], classes[pred_label[0].item()])\n \n softmax = nn.Softmax()\n prediction = softmax(prediction)\n prediction = [prediction[0].data[i].item() for i in range(10)]\n print((prediction))\n \n img = image_list[index]\n img = img.numpy()\n img = np.squeeze(img)\n img = np.transpose(img, (1,2,0))\n\n# plt.subplot(1,2,1)\n# plt.imshow(img)\n# # plt.show()\n\n# plt.bar(classes,prediction,)\n# plt.show()\n \n fig, axes = plt.subplots(1,2,figsize=(11,5)) # set the size that you'd like (width, height)\n ax0, ax1 = axes.ravel()\n ax0.imshow(img)\n ax1.bar(classes,prediction)\n \n plt.show()\n\n ","repo_name":"410421220/ncku_108-1","sub_path":"cvdl2019/HW5/HW5.py","file_name":"HW5.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23419927131","text":"# Magic Trick - Short\r\n\r\nimport math\r\nimport time\r\n\r\ntime_start = time.time()\r\n\r\nfile_input = open('A-small-attempt0.in')\r\nfile_out = open('output_1.txt','w')\r\nfile_out.write('')\r\nfile_out.close()\r\n\r\nTest_Cases = [int(a) for a in file_input.readline().split(' ')][0]\r\n\r\nN = 1\r\n\r\nwhile N <= Test_Cases:\r\n\r\n row1 = [int(a) for a in file_input.readline().split(' ')][0]\r\n\r\n layout1 = []\r\n for i in range(4):\r\n layout1.append([int(a) for a in file_input.readline().split(' ')])\r\n\r\n answer1 = layout1[row1-1]\r\n\r\n row2 = [int(a) for a in file_input.readline().split(' ')][0]\r\n\r\n layout2 = []\r\n \r\n for i in range(4):\r\n layout2.append([int(a) for a in file_input.readline().split(' ')])\r\n\r\n answer2 = layout2[row2-1]\r\n\r\n answers = list(set(answer1) & set(answer2))\r\n\r\n answer_length = len(answers)\r\n\r\n if answer_length == 1:\r\n \r\n file_out = open('output_1.txt','a')\r\n file_out.write('Case #'+str(N)+': '+str(answers[0])+'\\n')\r\n file_out.close()\r\n\r\n elif answer_length > 1:\r\n\r\n file_out = open('output_1.txt','a')\r\n file_out.write('Case #'+str(N)+': Bad magician!\\n')\r\n file_out.close()\r\n\r\n else:\r\n\r\n file_out = open('output_1.txt','a')\r\n file_out.write('Case #'+str(N)+': Volunteer cheated!\\n')\r\n file_out.close()\r\n\r\n N+=1\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/779.py","file_name":"779.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3108081392","text":"'''\nLongest consecutive subsequence \nGiven an array of positive integers. Find the length of the longest sub-sequence such that elements in the subsequence are consecutive integers, the consecutive numbers can be in any order.\n'''\n\ndef findLongestConseqSubseq(arr, N):\n arr = list(set(arr))\n arr.sort()\n count = 1\n l=[1]\n for i in range(1,len(arr)):\n if(arr[i]==arr[i-1]+1):\n count+=1\n l.append(count)\n else:\n count=1\n return max(l)\n","repo_name":"EshikaShah/My-journey-of-competitive-programming","sub_path":"Python/Arrays/findLongestConseqSubseq.py","file_name":"findLongestConseqSubseq.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23513477261","text":"import itertools\nfrom itertools import combinations\n\nEMPTY = \"IMPOSSIBLE\"\n\n\ndef get_fractile_nums(k, c, s):\n\torg_list = []\n\tfractile_list = []\n\n\tif k == s:\n\t\treturn (str(j+1) for j in range(s))\n\n\n\tif c == 1:\n\t\tif k == s:\n\t\t\treturn (str(j+1) for j in range(s))\n\t\tif k > s:\n\t\t\treturn EMPTY\n\n\tfor o in itertools.product([\"G\", \"L\"], repeat=(k)):\n\t\torg = \"\".join(o)\n\t\torg_list.append(org)\n\t\tfractile = make_fractile(org, c)\n\t\tfractile_list.append(fractile)\n\n\t# print(fractile_list)\n\t# s는 while 문 돌면서 하나씩 증가 시킴\n\ts_count = 1\n\twhile s >= s_count:\n\t\tindex_items = (str(i+1) for i in range(k**c))\n\t\tresult = select_clean_nums(org_list, fractile_list, index_items, s_count)\n\t\tif result is not None and len(result) > 0:\n\t\t\t# print(result)\n\t\t\treturn result\n\t\ts_count += 1\n\treturn [EMPTY]\n\n\ndef make_fractile(org, c):\n\ttarget_tile = org\n\tfor i in range(int(c)-1):\n\t\tfractile = \"\"\n\t\tfor c in target_tile:\n\t\t\tif c == 'L':\n\t\t\t\tfractile += org\n\t\t\telse:\n\t\t\t\tfor i in range(len(org)):\n\t\t\t\t\tfractile += \"G\"\n\t\ttarget_tile = fractile\n\treturn target_tile\n\n\ndef select_clean_nums(org_list, fractile_list, index_items, s_count):\n\tfor c in combinations(index_items, s_count):\n\t\tsuccess_flag = False\n\t\tfor f in range(len(fractile_list)):\n\t\t\tpart_fractile = \"\"\n\t\t\tfor p in c:\n\t\t\t\tpart_fractile += fractile_list[f][int(p)-1]\n\n\t\t\t# L이 나왔으면, org을 확인하여 G가 있는지 확인, 없다면, 끝\n\t\t\t# 있다면, s 개수의 조합만큼 찾아서 확인\n\t\t\tif 'G' not in part_fractile:\n\t\t\t\tsuccess_flag = 'G' not in org_list[f]\n\t\t\t\tif not success_flag:\n\t\t\t\t\tbreak\n\n\t\tif success_flag:\n\t\t\treturn c\n\n\treturn None\n\n\nt = int(input())\nfor i in range(1, t + 1):\n\tk, c, s = [int(a) for a in input().split(\" \")]\n\tprint(\"Case #{}: {}\".format(i, \" \".join(get_fractile_nums(k, c, s))))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_180/1130.py","file_name":"1130.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32005936383","text":"#!/usr/bin/env python\r\n# flake8: noqa\r\n\"\"\"Tests `spiro.dtdg.models.decoder.tf.sequentialDecoder.implicitTimeModels` package.\"\"\"\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tests.core.common_functions import *\r\n\r\n\r\ndef test_node_memory_tf():\r\n \"\"\"Test NodeMemory\"\"\"\r\n clear_background()\r\n from spiro.core.config import set_backend\r\n from spiro.core.backends import TENSORFLOW\r\n set_backend(TENSORFLOW)\r\n from spiro.dtdg.models.decoder.tf.sequentialDecoder.implicitTimeModels import NodeMemory\r\n\r\n n_nodes = 5\r\n hidden_d = 2\r\n n_layers = 3\r\n this_memory = NodeMemory(n_nodes, hidden_d, n_layers)\r\n nodes_to_change = [2,3]\r\n new_memory = tf.convert_to_tensor(np.random.randn(2, n_layers, hidden_d))\r\n this_memory.update_memory(new_memory, nodes_to_change)\r\n new_memory = tf.convert_to_tensor(np.random.randn(n_layers, 2, hidden_d))\r\n assert not np.all(tf.equal(this_memory.get_memory(nodes_to_change), new_memory))\r\n old_memory = this_memory.memory.copy()\r\n this_memory.reset_state()\r\n assert not np.all(tf.equal(old_memory, this_memory.memory))\r\n","repo_name":"mcgill-cpslab/spiral","sub_path":"tests/dtdg/models/decoder/tf/test_node_memory_tf.py","file_name":"test_node_memory_tf.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27475715329","text":"# Buy and Sell stoks once\n\ndef buy_and_sell_stock_once(prices):\n if not prices:\n return 0\n\n min_price, max_profit = float('inf'), 0.0\n\n for price in prices:\n profit_if_sell_today = price - min_price\n max_profit = max(max_profit, profit_if_sell_today)\n min_price = min(min_price, price)\n\n return max_profit\n\nprices = [310, 315, 275, 295, 260, 270, 290, 230, 255, 250]\nprint(buy_and_sell_stock_once(prices))\nprint(buy_and_sell_stock_once([]))\n","repo_name":"imteekay/algorithms","sub_path":"coding_interviews/elements_of_programming_interview/buy_and_sell_stock_once.py","file_name":"buy_and_sell_stock_once.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":452,"dataset":"github-code","pt":"61"} +{"seq_id":"2843390706","text":"import dash_core_components as dcc\nimport dash_html_components as html\nimport numpy as np\nimport pandas as pd\nfrom dash.dependencies import Input, Output\n\n\nclass GenericCrossfilterModule(object):\n def __init__(self):\n\n self.id ='generic-crossfilter-module'\n self.callback_check = 'Generic Crossfilter Recipe'\n\n self.layout = html.Div(id=self.id,\n children=[\n html.Div(\n dcc.Graph(\n id='g1',\n config={'displayModeBar': False}\n ), className='four columns'\n ),\n html.Div(\n dcc.Graph(\n id='g2',\n config={'displayModeBar': False}\n ), className='four columns'\n ),\n html.Div(\n dcc.Graph(\n id='g3',\n config={'displayModeBar': False}\n ), className='four columns'\n )\n ], className='row', style={'display': 'none'}\n )\n\n def set_callbacks(self, app):\n\n @app.callback(Output(self.id, 'style'), [Input('tabs', 'value'), Input('tab-subcategories', 'value')])\n def display_module(tab, tab_subcategory):\n if (tab == 4) & (tab_subcategory == self.callback_check):\n return {'display': 'block'}\n return {'display': 'none'}\n\n # app.callback is a decorator which means that it takes a function\n # as its argument.\n # highlight is a function \"generator\": it's a function that returns a function\n\n app.callback(\n Output('g1', 'figure'),\n [\n Input('g1', 'selectedData'),\n Input('g2', 'selectedData'),\n Input('g3', 'selectedData')\n ]\n )(highlight('Column 0', 'Column 1'))\n\n app.callback(\n Output('g2', 'figure'),\n [\n Input('g2', 'selectedData'),\n Input('g1', 'selectedData'),\n Input('g3', 'selectedData')\n ]\n )(highlight('Column 2', 'Column 3'))\n\n app.callback(\n Output('g3', 'figure'),\n [\n Input('g3', 'selectedData'),\n Input('g1', 'selectedData'),\n Input('g2', 'selectedData')\n ]\n )(highlight('Column 4', 'Column 5'))\n\n return\n\n\ndef highlight(x, y):\n np.random.seed(0)\n\n df = pd.DataFrame({\n 'Column {}'.format(i): np.random.rand(30) + i * 10 for i in range(6)\n })\n\n def callback(*selectedDatas):\n selectedpoints = df.index\n for i, seleced_data in enumerate(selectedDatas):\n if seleced_data is not None:\n selected_index = [\n p['customdata'] for p in seleced_data['points']\n ]\n if len(selected_index) > 0:\n selectedpoints = np.intersect1d(\n selectedpoints, selected_index\n )\n figure = {\n 'data':[\n {\n 'x': df[x],\n 'y': df[y],\n 'text': df.index,\n 'textposition': 'top',\n 'selectedpoints': selectedpoints,\n 'customdata': df.index,\n 'type': 'scatter',\n 'mode': 'markers+text',\n 'marker': {\n 'color': 'rgba(0, 116, 217, 0.7)',\n 'size': 12,\n 'line': {\n 'color': 'rgb(0, 116, 217)',\n 'width': 0.5\n }\n },\n 'textfont': {\n 'color': 'rgba(30, 30, 30, 1)'\n },\n 'unselected': {\n 'marker': {\n 'opacity': 0.3\n },\n 'textfont': {\n # make color transparent when not selected\n 'color': 'rgba(0, 0, 0'\n }\n }\n }\n ],\n 'layout': {\n 'margin': {'l': 15, 'r': 0, 'b': 15, 't': 15},\n 'dragmode': 'selected',\n 'hovermode': 'closest',\n 'showlegend': False\n }\n }\n\n shape = {\n 'type': 'rect',\n 'line': {\n 'width': 1,\n 'dash': 'dot',\n 'color': 'darkgrey'\n }\n }\n\n if selectedDatas[0] and selectedDatas[0]['range']:\n figure['layout']['shapes'] = [dict({\n 'x0': selectedDatas[0]['range']['x'][0],\n 'x1': selectedDatas[0]['range']['x'][1],\n 'y0': selectedDatas[0]['range']['y'][0],\n 'y1': selectedDatas[0]['range']['y'][1]\n }, **shape)]\n else:\n figure['layout']['shapes'] = [dict({\n 'type': 'rect',\n 'x0': np.min(df[x]),\n 'x1': np.max(df[x]),\n 'y0': np.min(df[y]),\n 'y1': np.max(df[y])\n }, **shape)]\n\n return figure\n\n return callback\n","repo_name":"jxb5778/dash_example","sub_path":"app/pages/part_4/generic_crossfilter_module.py","file_name":"generic_crossfilter_module.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70365854916","text":"import requests\nfrom bs4 import BeautifulSoup \nimport os\nimport datetime\nimport time\n\nclass NOSScraper:\n\n\tdef __init__(self):\n\t\tself.main_url = 'https://nos.nl'\n\n\n\t# return title, text for a particular article\n\tdef get_text(self, url):\n\t\tr = requests.get(url)\n\t\tsoup = BeautifulSoup(r.content, 'html.parser')\n\t\ttitle = soup.find('title').text\n\t\t\n\t\ttext = ''\n\t\tdivs = soup.findAll('div', 'sc-daa8fdde-1 kDUyiX')\n\t\tfor div in divs:\n\t\t\tif div.find('aside'):\n\t\t\t\tcontinue\n\t\t\tp = div.find('p', 'sc-d176aaed-0 blKpuK')\n\t\t\tif p:\n\t\t\t\ttext += ' ' + p.text\n\t\treturn title, text\n\n\t# write a day's articles\n\tdef write_day_articles(self, articles, filename, dir_path):\n\t\tif not os.path.isdir(dir_path):\n\t\t\tos.makedirs(dir_path)\n\t\tfile = open(dir_path + '/' + filename + '.txt', 'w')\n\t\tfor article in articles:\n\t\t\ttitle, text, url, date = article['title'], article['text'], article['url'], article['date']\n\t\t\tto_write = '||'.join([date, title, text, url])\n\t\t\tfile.write(to_write + '\\n')\n\t\tfile.close()\n\n\t# scrape and write a day's articles\n\t# format of date_string: yyyy-mm-dd\n\tdef get_day_articles(self, date_string, dir_path):\n\t\tprint('\\n'+ date_string + '------------------- \\n')\n\t\turl = 'https://nos.nl/nieuws/archief/' + date_string\n\t\tall_articles = []\n\t\tr = requests.get(url)\n\t\tsoup = BeautifulSoup(r.content,'html.parser')\n\n\t\tli_list = soup.findAll('li', 'list-time__item')\n\t\tfor index, li in enumerate(li_list):\n\t\t\thref = li.find('a')\n\t\t\tlink = self.main_url + href['href']\n\t\t\ttry:\n\t\t\t\ttitle, text = self.get_text(link)\n\t\t\texcept:\n\t\t\t\tprint(\"ERROR\")\n\t\t\t\tcontinue\n\t\t\tprint(index, title)\n\t\t\tarticle = {'date':date_string, 'url':link, 'title':title, 'text':text}\n\t\t\tall_articles.append(article)\n\n\t\tself.write_day_articles(all_articles, date_string, dir_path)\n\n\t# get articles in the given interval (both dates included; format yyyy-mm-dd)\n\tdef get_interval_articles(self, start_string, end_string, dir_path = '../../corpus/nos'):\n\t\tstart_date = datetime.datetime.strptime(start_string, \"%Y-%m-%d\")\n\t\tend_date = datetime.datetime.strptime(end_string, \"%Y-%m-%d\")\n\n\t\tcur_date = start_date\n\t\twhile cur_date <= end_date:\n\t\t\tdate_string = datetime.datetime.strftime(cur_date, \"%Y-%m-%d\")\n\t\t\tmonth_string = datetime.datetime.strftime(cur_date, \"%Y-%m\")\n\t\t\tcur_dir_path = dir_path + '/' + month_string\n\t\t\tself.get_day_articles(date_string, cur_dir_path)\n\n\t\t\ttime.sleep(3)\n\t\t\tcur_date += datetime.timedelta(days=1)\t\n\n\nstart_string = '2021-09-01'\nend_string = '2021-12-31'\n\nscraper = NOSScraper()\nscraper.get_interval_articles(start_string, end_string)\n","repo_name":"k-wal/modus-operandi","sub_path":"code/scrapers/newspapers/nos.py","file_name":"nos.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42806843091","text":"from typing import List\n\n\nclass Solution:\n def getPermutation(self, n: int, k: int) -> str:\n def helper(k: int, index: int):\n if index == n:\n return\n\n for i in range(1, n + 1):\n if used[i]:\n continue\n cnt = factorial[n - index - 1]\n if k > cnt:\n k -= cnt\n continue\n\n used[i] = True\n path.append(i)\n helper(k, index + 1)\n\n used = [False for _ in range(0, n + 1)]\n factorial = [1 for _ in range(n + 1)]\n for i in range(2, n + 1):\n factorial[i] = i * factorial[i - 1]\n\n path = []\n helper(k, 0)\n return \"\".join([str(i) for i in path])\n\n\ns = Solution()\nprint(s.getPermutation(3, 3))\n# print(s.getPermutation(4, 9))\n","repo_name":"voldikss/code","sub_path":"leetcode/0060. Permutation Sequence/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23428992431","text":"import math\r\nimport sys\r\n\r\ndef main():\r\n infile, outfile = sys.argv[1:3]\r\n with open(infile, 'r') as inp:\r\n with open(outfile, 'w') as out:\r\n T = int(inp.readline())\r\n for case in range(1, T+1):\r\n (C, F, X) = (float(i) for i in inp.readline().split())\r\n out.write('Case #{}: '.format(case))\r\n out.write('{}\\n'.format(find_solution(C, F, X)))\r\n\r\ndef find_solution(C, F, X):\r\n V = 2.0\r\n S = 0\r\n while True:\r\n tx = X/V\r\n tf = C/V + X/(V+F)\r\n if tf < tx:\r\n # build a farm\r\n S += C/V\r\n V += F\r\n else:\r\n # don't build\r\n S += X/V\r\n break\r\n return S\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/592.py","file_name":"592.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37383415792","text":"def leiaInt(inteiro):\n while True: \n valor = str(input((inteiro))).strip()\n if valor.isnumeric(): \n return valor\n else: \n print('\\033[1;31mERRO! Digite um número inteiro válido\\033[m')\n if valor.isnumeric(): break\n\ndef leiaFloat(real):\n while True: \n valor = str(input((real))).strip().replace(',','.')\n if valor.isalpha(): \n return valor\n else: \n print('\\033[1;31mERRO! Digite um número Real válido\\033[m')\n if valor.isalpha(): break\nR = leiaFloat('Digite um numero Real: ')\nI = leiaFloat('Digite um numero Inteiro: ')","repo_name":"williancae/pythonGuanabara","sub_path":"mundo03/Exercicios/ex114.py","file_name":"ex114.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9603078258","text":"import csv \n\n#cria o nosso novo arquivo\nwith open ('.\\\\learning.csv\\project02\\dados.csv', 'w+', newline='', encoding='ansi') as fileOpened:\n myFile = csv.writer(fileOpened)\n myFile.writerow([\"Nome\", \"Sexo\", \"Endereço\", \"Cidade\", \"E-mail\", \"Telefone\", \"Idade\"])\n loop = int(input(\"Quantas pessoas você vai cadastrar? \"))\n for i in range(loop):\n print(\"\\nPreencha com os dados para finalizar o cadastro...\\n\")\n nome = input(\"\\nPessoa {} - Nome: \".format(i+1))\n sexo = input(\"Pessoa {} - Sexo (M ou F): \".format(i+1))\n endereco = input(\"Pessoa {} - Endereço: \".format(i+1))\n cidade = input(\"Pessoa {} - Cidade: \".format(i+1))\n email = input(\"Pessoa {} - E-mail: \".format(i+1))\n telefone = input(\"Pessoa {} - Telefone: \".format(i+1))\n idade = input(\"Pessoa {} - Idade: \".format(i+1))\n myFile.writerow([nome, sexo, endereco, cidade, email, telefone, idade])\n \n ","repo_name":"henrique-af/Python-roadmap","sub_path":"learning.csv/project02/creatingCsv.py","file_name":"creatingCsv.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41715970073","text":"#PART TWO: https://adventofcode.com/2021/day/12\r\n#DATE: 12/12/2021\r\n#ANSWER: \r\n\r\nfrom collections import defaultdict\r\n\r\nwith open(\"input.txt\") as file:\r\n x = file.read().strip()\r\ndata = [i.split(\"-\") for i in x.split(\"\\n\")]\r\n\r\nadjacency = defaultdict(list)\r\n\r\nfor a, b in data:\r\n adjacency[a].append(b)\r\n adjacency[b].append(a)\r\n\r\n\r\npath_count = 0\r\nvisited = set()\r\n\r\ndef find_path(cave):\r\n global path_count\r\n \r\n if cave == \"end\":\r\n path_count += 1\r\n return\r\n\r\n if cave.islower() and cave in visited:\r\n return\r\n \r\n if cave.islower():\r\n visited.add(cave)\r\n \r\n for n in adjacency[cave]:\r\n if n == \"start\":\r\n continue\r\n find_path(n)\r\n\r\n if cave.islower():\r\n visited.remove(cave)\r\n\r\nfind_path(\"start\")\r\n\r\nprint(path_count)\r\n","repo_name":"adrienpillou/Advent-of-Code-2021","sub_path":"day-12/part_two.py","file_name":"part_two.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40217634759","text":"import math\n\nfilepath = \"inputs/day13_input.txt\"\nwith open(filepath, 'r') as f:\n lines = f.readlines()\n\n# Part 1\nearliest_time = int(lines[0])\nbus_lines = set(lines[1].split(','))\nbus_lines.remove('x')\n\nnext_dep = {}\nfor bl in bus_lines:\n if earliest_time % int(bl) != 0:\n next_dep[(math.floor(earliest_time/int(bl)) + 1) * int(bl)] = int(bl)\n else:\n next_dep[earliest_time] = int(bl)\n\nearliest_bus = min(next_dep.keys())\nprint(next_dep[earliest_bus] * (earliest_bus - earliest_time))\n\n\n# Part 2\nbus_lines = [[i, int(val)] for i, val in enumerate(lines[1].split(',')) if val != \"x\"]\ncProd = 1\nc = 0\nfor k in range(1, len(bus_lines)):\n a = 0\n cProd *= bus_lines[k-1][1]\n reste = c % bus_lines[k][1]\n while reste != (bus_lines[k][1] - bus_lines[k][0]) % bus_lines[k][1]:\n a += 1\n reste = (reste + cProd) % bus_lines[k][1]\n c += a*cProd\nprint(c)\n\n\n","repo_name":"Dralnar/AdventCode","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32832850331","text":"import gym\r\nimport numpy as np\r\n\r\nfrom cfg import cfg\r\nfrom utils import fix\r\nfrom ppo_1 import PPO_1\r\nfrom network import FeedForwardNN\r\n\r\nenv = gym.make(cfg['env_name'])\r\nfix(env, cfg['seed'])\r\nobs_dim = env.observation_space.shape[0]\r\nact_dim = env.action_space.shape[0]\r\n\r\nmodel = PPO_1(network=FeedForwardNN, obs_dim = obs_dim, act_dim = act_dim, subgaussian = True)\r\nmodel.actor.load_checkpoint()\r\nmodel.critic.load_checkpoint()\r\nmodel.actor.eval() # turn network to evaluation mode\r\nmodel.critic.eval()\r\nNUM_OF_TEST = 5 # Do not revise it !!!!!\r\ntest_total_reward = []\r\naction_list = []\r\nfor i in range(NUM_OF_TEST):\r\n actions = []\r\n obs = env.reset()\r\n\r\n #img = plt.imshow(env.render(mode='rgb_array'))\r\n env.render()\r\n \r\n total_reward = 0\r\n\r\n done = False\r\n while not done:\r\n action, _ = model.get_action(obs)\r\n actions.append(action)\r\n obs, reward, done, _ = env.step(action)\r\n\r\n total_reward += reward\r\n\r\n #img.set_data(env.render(mode='rgb_array'))\r\n env.render()\r\n #display.display(plt.gcf())\r\n #display.clear_output(wait=True)\r\n print(total_reward)\r\n test_total_reward.append(total_reward)\r\n\r\n\r\nenv.close()\r\n\r\nprint(f\"Your final reward is : %.2f\"%np.mean(test_total_reward))\r\n","repo_name":"APeng-666/PPO-Pytorch-SubgaussianSampling","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33727907539","text":"import sys\n\n# using meozation (caching we reduce the runing time from 2^n to n \n\n#cache = {}\n\n\n#when I use lsit instead of dictionary I will ahve problem in indexing\ndef fib(n, cache = {}):\n if n == 1 or n == 0: return 1\n if n in cache:\n return cache[n]\n value = fib(n-1) + fib(n-2)\n \n #put the calculated value in cache\n cache[n] = value\n\n return value\n\n\n\n\ninput = int(sys.argv[1])\nprint(fib(input))\n","repo_name":"rampedro/Cracking-the-coding-interview-leetcode","sub_path":"fib-recursive-memoization.py","file_name":"fib-recursive-memoization.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42052677378","text":"total=totmil=menor=cont=0\nbarato=''\nwhile True:\n produto=str(input('Produto: ')).strip().upper()\n preco=float(input('Valor: R$ '))\n cont+=1\n total += preco\n if preco > 1000:\n totmil+=1\n if cont==1 or preco < menor:\n menor=preco\n barato = produto\n\n resp=' '\n while resp not in 'S/N':\n resp = str(input('deseja continuar?(S/N) ')).strip().upper()[0]\n if resp == 'N':\n break\nprint(f'o valor total e R${total}')\nprint(f'{totmil} produtos custam acima de R$1000,00')\nprint(f'o produto mais barato e {barato} que custa R${menor:.2f}')\n\n","repo_name":"giorkakuda/Python_Exercises","sub_path":"070.py","file_name":"070.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"173547006","text":"import cv2.cv2 as cv2\nimport numpy as np\nfrom experiments.params import Params\nfrom numpy import arctan2\nfrom math import sqrt\nfrom math import pi\nfrom diagram_parser.text_detector import remove_text\nfrom sklearn.cluster import DBSCAN\n\n\ndef preprocess(img):\n # TODO: TUNE THESE PARAMETERS\n img = cv2.erode(img, kernel=np.ones((2, 2)), iterations=1)\n blur = cv2.bilateralFilter(img, 9, 75, 75)\n return blur\n\n\ndef inclination(theta):\n if theta > np.pi / 2:\n return theta - (np.pi / 2)\n else:\n return theta + (np.pi / 2)\n\n\n# def filter_lines(lines, index):\n# indices_to_remove = list()\n# line = lines[index]\n# angle_1 = inclination(alpha=line[0][1])\n# for i in range(index + 1, lines.shape[0]):\n# angle_2 = inclination(alpha=lines[i][0][1])\n# angle_between_lines = abs(angle_2 - angle_1)\n# if angle_between_lines > np.pi / 2:\n# angle_between_lines = np.pi - angle_between_lines\n# # remove lines for which the angle is less than ~5 degrees and the difference between the rhos is less than\n# # 20 pixels\n# if angle_between_lines < 0.1 and abs(abs(lines[i][0][0]) - abs(line[0][0])) < 20:\n# indices_to_remove.append(i)\n# filtered_lines = np.delete(lines, indices_to_remove, axis=0)\n#\n# if index + 1 == len(filtered_lines):\n# return filtered_lines\n# else:\n# return filter_lines(filtered_lines, index + 1)\n\n\ndef filter_lines(lines, image_size):\n accepted_line_groups = list()\n while len(lines) > 0:\n indices_to_remove = [0]\n line = lines[0]\n current_set = [line]\n for idx, line2 in enumerate(lines[1:], start=1):\n if close_enough(line, line2, image_size):\n indices_to_remove.append(idx)\n current_set.append(line2)\n accepted_line_groups.append(current_set)\n lines = np.delete(lines, indices_to_remove, axis=0)\n filtered_lines = []\n for line_group in accepted_line_groups:\n if (np.array(line_group) < 0).any():\n filtered_lines.append(line_group[0])\n else:\n average_line = np.average(line_group, axis=0, weights=[1 / x for x in range(1, len(line_group) + 1)])\n filtered_lines.append(average_line)\n\n return filtered_lines\n\n\ndef line_length(endpoints):\n x1 = endpoints[0]\n y1 = endpoints[1]\n x2 = endpoints[2]\n y2 = endpoints[3]\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\n\ndef clustering_filter(lines, image_size):\n line_lengths = [line_length(endpoints) for endpoints in lines]\n hesse_lines = [np.array(hesse_normal_form(endpoints)) for endpoints in lines]\n x = np.array([line[0] for line in hesse_lines])\n y = np.array([line[1] for line in hesse_lines])\n x /= max(image_size[0], image_size[1])\n y /= 2 * pi\n # plt.xlim(0, 1)\n # plt.ylim(0, 1)\n # clustering = AgglomerativeClustering(n_clusters=None, distance_threshold=0.075, affinity=cylindrical_similarity)\n eps = Params.params['line_detector_clustering_eps']\n clustering = DBSCAN(eps=eps, min_samples=1, metric=cylindrical_similarity)\n clustering.fit(list(zip(x, y)))\n # plt.scatter(x, y, c=clustering.labels_.astype(float))\n # plt.show()\n cluster_dict = {}\n for idx, label in enumerate(clustering.labels_):\n if label in cluster_dict:\n cluster_dict[label].append((hesse_lines[idx], line_lengths[idx]))\n else:\n cluster_dict[label] = [(hesse_lines[idx], line_lengths[idx])]\n averaged_lines = []\n for _, lines in cluster_dict.items():\n averaged_lines.append(average_lines(lines))\n return averaged_lines\n\n\ndef filter_lines_p(lines_p, image_size):\n accepted_line_groups = list()\n while len(lines_p) > 0:\n indices_to_remove = [0]\n line = lines_p[0]\n current_set = [line]\n for idx, line2 in enumerate(lines_p[1:], start=1):\n if close_enough_p(line, line2, image_size):\n indices_to_remove.append(idx)\n current_set.append(line2)\n accepted_line_groups.append(current_set)\n lines_p = np.delete(lines_p, indices_to_remove, axis=0)\n filtered_lines_p = []\n for line_group in accepted_line_groups:\n first_line = line_group[0]\n group_inclination = inclination(hesse_normal_form(first_line)[1])\n x1, y1, x2, y2 = line_group[0]\n\n if np.pi / 4 < group_inclination < 3 * np.pi / 4:\n for line in line_group:\n if line[1] < y1:\n y1 = line[1]\n x1 = line[0]\n if line[3] > y2:\n y2 = line[3]\n x2 = line[2]\n else:\n for line in line_group:\n if line[0] < x1:\n x1 = line[0]\n y1 = line[1]\n if line[2] > x2:\n x2 = line[2]\n y2 = line[3]\n filtered_lines_p.append([x1, y1, x2, y2])\n return filtered_lines_p\n\n\ndef convert_to_positive(line):\n rho = line[0]\n theta = line[1]\n if rho < 0:\n rho = -rho\n theta = np.pi + theta\n return rho, theta\n\n\ndef close_enough(line1, line2, image_size):\n rho1, theta1 = convert_to_positive(line1)\n rho2, theta2 = convert_to_positive(line2)\n angle_difference = abs(theta1 - theta2)\n if angle_difference < np.pi / 2:\n pass\n elif np.pi / 2 < angle_difference <= np.pi:\n angle_difference = np.pi - angle_difference\n elif np.pi <= angle_difference <= 3 * np.pi / 2:\n angle_difference = angle_difference - np.pi\n elif 3 * np.pi / 2 <= angle_difference < 2 * np.pi:\n angle_difference = 2 * np.pi - angle_difference\n rho_difference = abs(rho1 - rho2)\n # PARAM line_detector_close_enough_angle_threshold\n # PARAM line_detector_close_enough_rho_threshold\n angle_thresh = Params.params['line_detector_close_enough_angle_threshold']\n rho_thresh = Params.params['line_detector_close_enough_rho_threshold']\n if angle_difference < angle_thresh and rho_difference < rho_thresh * (image_size[0] + image_size[1]) / 2:\n return True\n return False\n\n\ndef match_close_enough(line1, line2, image_size):\n rho1, theta1 = convert_to_positive(line1)\n rho2, theta2 = convert_to_positive(line2)\n angle_difference = abs(theta1 - theta2)\n if angle_difference < np.pi / 2:\n pass\n elif np.pi / 2 < angle_difference <= np.pi:\n angle_difference = np.pi - angle_difference\n elif np.pi <= angle_difference <= 3 * np.pi / 2:\n angle_difference = angle_difference - np.pi\n elif 3 * np.pi / 2 <= angle_difference < 2 * np.pi:\n angle_difference = 2 * np.pi - angle_difference\n elif angle_difference >= 2 * np.pi:\n angle_difference = angle_difference - 2 * np.pi\n rho_difference = abs(rho1 - rho2)\n # TESTING_PARAM line_detector_close_enough_angle_threshold\n # TESTING_PARAM line_detector_close_enough_rho_threshold\n if angle_difference < 0.1 and rho_difference < 0.05 * (image_size[0] + image_size[1]) / 2:\n return True\n return False\n\n\ndef close_enough_p(line1, line2, image_size):\n hesse_line1 = hesse_normal_form(line1)\n hesse_line2 = hesse_normal_form(line2)\n return close_enough(hesse_line1, hesse_line2, image_size)\n\n\ndef hesse_normal_form(line):\n x1, y1, x2, y2 = line\n A = y1 - y2\n B = x2 - x1\n C = (x1 - x2) * y1 + (y2 - y1) * x1\n cosine = A / sqrt(A ** 2 + B ** 2)\n sine = B / sqrt(A ** 2 + B ** 2)\n negative_rho = C / sqrt(A ** 2 + B ** 2)\n rho = -negative_rho\n theta = arctan2(sine, cosine)\n\n return convert_to_positive([rho, theta])\n\n\ndef cylindrical_similarity(l1, l2):\n rho_diff = abs(l1[0] - l2[0])\n theta_diff = abs(l1[1] - l2[1])\n theta_diff = min(theta_diff, 1 - theta_diff)\n return sqrt(rho_diff ** 2 + theta_diff ** 2)\n\n\ndef average_lines(lines_with_weights):\n lines = [line_with_weight[0] for line_with_weight in lines_with_weights]\n weights = [line_with_weight[1] for line_with_weight in lines_with_weights]\n\n min_theta = np.min(lines, axis=0)[1]\n max_theta = np.max(lines, axis=0)[1]\n if max_theta - min_theta > np.pi / 2:\n fixed_lines = []\n for line in lines:\n if 2 * pi - line[1] < line[1]:\n fixed_lines.append([line[0], line[1] - 2 * pi])\n else:\n fixed_lines.append([line[0], line[1]])\n return np.average(fixed_lines, axis=0)\n else:\n return np.average(lines, axis=0)\n\n\ndef get_hough_lines(img):\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # TODO: TUNE THESE PARAMETERS\n canny_params = Params.params['line_detector_canny_params']\n edges = cv2.Canny(img, canny_params[0], canny_params[1], apertureSize=canny_params[2])\n lines = cv2.HoughLines(edges, 1, np.pi / 45, 40)\n return lines\n\n\ndef get_hough_lines_p(img):\n edges = cv2.Canny(img, 50, 150, apertureSize=3)\n # PARAM line_detector_hough_p_params\n rho, theta, thresh, minLineLength, maxLineGap = Params.params['line_detector_hough_p_params']\n\n lines = cv2.HoughLinesP(edges, rho=rho, theta=theta, threshold=thresh, minLineLength=minLineLength,\n maxLineGap=maxLineGap)\n if lines is not None:\n lines = [line[0] for line in lines]\n else:\n lines = []\n return lines\n\n\ndef draw_lines(img, lines):\n img_copy = img.copy()\n for line in lines:\n rho = line[0]\n theta = line[1]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * a)\n\n cv2.line(img_copy, (x1, y1), (x2, y2), (0, 0, 255), 1)\n return img_copy\n\n # plt.hist(distances_list,density=True,bins=30)\n # plt.show()\n\n\ndef draw_lines_with_endpoints(img, lines):\n img_copy = img.copy()\n for line in lines:\n x1 = line[0]\n y1 = line[1]\n x2 = line[2]\n y2 = line[3]\n\n cv2.line(img_copy, (x1, y1), (x2, y2), (0, 0, 255), 2)\n return img_copy\n\n\ndef get_filtered_lines(img, filter_method='cluster'):\n mode = Params.params['line_detector_mode']\n if mode == 'hough':\n hough_lines = get_hough_lines(img)\n if hough_lines is None:\n return np.array([])\n else:\n hough_lines = [line[0] for line in hough_lines] # remove double array\n filtered_lines = filter_lines(hough_lines, img.shape)\n return filtered_lines\n elif mode == 'hough_p_hesse':\n cv2.destroyAllWindows()\n resize_image = Params.params['resize_image_if_too_big']\n resize_dim = Params.params['resize_dim']\n max_dimension = max(img.shape[0], img.shape[1])\n if max_dimension > resize_dim and resize_image:\n factor = resize_dim / max_dimension\n image = cv2.resize(img, (0, 0), fx=factor, fy=factor)\n else:\n factor = 1\n image = img.copy()\n hough_lines_p = get_hough_lines_p(image)\n hough_lines_p = np.multiply(hough_lines_p, 1 / factor)\n if hough_lines_p is None or len(hough_lines_p) == 0:\n return np.array([])\n else:\n if filter_method == 'cluster':\n filtered_lines = clustering_filter(hough_lines_p, np.array(np.array(image.shape) * 1 / factor))\n # cv2.imshow('lines', draw_lines(cv2.resize(img, (0, 0), fx=1/factor, fy=1/factor), filtered_lines))\n # cv2.waitKey()\n return filtered_lines\n else:\n filtered_lines = filter_lines_p(hough_lines_p, np.array(np.array(image.shape) * 1 / factor))\n hesse_lines = [np.array(hesse_normal_form(endpoints_line)) for endpoints_line in filtered_lines]\n return hesse_lines\n\nif __name__ == '__main__':\n import os\n import time\n count = 0\n selecting = 0\n totalstart = time.time()\n for filename in os.listdir('../symbols/'):\n if filename.endswith('.png'):\n diagram = cv2.imread('../symbols/'+filename)\n gray = cv2.cvtColor(diagram, cv2.COLOR_BGR2GRAY)\n gray = remove_text(gray)\n old_lines = get_filtered_lines(gray)\n new_lines = get_filtered_lines(gray, 'cluster')\n cv2.imshow('new lines', draw_lines(diagram, new_lines))\n cv2.imshow('old lines', draw_lines(diagram.copy(), old_lines))\n cv2.waitKey()\n # plt.show()\n cv2.destroyAllWindows()\n print(f'files done: {count}\\r')\n print(filename)\n\n","repo_name":"NikhilSDate/FastGDPJEI","sub_path":"diagram_parser/line_detecter.py","file_name":"line_detecter.py","file_ext":"py","file_size_in_byte":12668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23643183681","text":"def num_to_list(A):\n result = []\n while A > 0:\n digit = A % 10\n A = A // 10\n result.append(digit)\n result.reverse()\n return result\n\n\ndef list_to_num(l):\n result = 0\n for x in l:\n result *= 10\n result += x\n return result\n\ndef shuffle(l):\n sz = len(l);\n return [ l[i:] + l[:i] for i in range(sz)]\n\ndef recycle_set(n):\n l = num_to_list(n);\n ll= shuffle(l);\n return map(list_to_num,ll)\n\n \n\ndef solve(A,B):\n N = [0] * (B-A + 1)\n comp = 1\n s = 0\n\n for i in range(A,B+1):\n idx = i - A\n if N[idx] != 0:\n continue\n else:\n nums = { x for x in recycle_set(i) if x >= A and x<= B }\n for x in nums:\n assert N[x-A] == 0;\n N[x-A] = comp;\n sz = len(nums)\n s += (sz * (sz - 1)) // 2\n return s;\n\n\nif __name__ == \"__main__\":\n T = int(input());\n for c in range(T):\n (A,B) = [ int(a) for a in input().strip().split() ]\n R = solve(A,B)\n print(\"Case #{}: {}\".format(c+1,R))\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/1320.py","file_name":"1320.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27629712513","text":"def first_non_repeat(str):\n str = list(str)\n dic1 = {}\n print(str)\n\n for key in str:\n if key not in dic1:\n dic1[key] = 1\n\n else:\n print(\"repeat: \" , key)\n dic1[key] += 1\n print(dic1)\n\n for index in range(len(str)):\n if dic1[str[index]] == 1:\n return index, str[index]\n\n\n\n \"\"\"\n for key, value in dic1.items():\n if value == 1:\n return key\n \"\"\"\n\nstr = \"NZETSOENTST\"\nprint(first_non_repeat(str))\n","repo_name":"Byouree/Python_code","sub_path":"021_firt_non_repeat.py","file_name":"021_firt_non_repeat.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38025411189","text":"seasons = ['spring', 'summer', 'fall', 'winter']\n\nfor s in seasons:\n print(s)\n\n#dictionary + dictionary 는 순서를 갖지 않는다.\nages = {\n 'Ron' : 16,\n 'Harry' : 18,\n 'Hermione' : 28\n}\nprint(ages)\n\nfor name in ages.keys(): #we can find value using key\n print(name, \"'s age is\",ages[name])\n# = for name in ages = 같은 결과\n\nfor age in ages.values(): #we can't find key this way\n print(age)\n\nfor name, age in ages.items():\n print(name, age)\n","repo_name":"esther4599/Python01_Starting_Python","sub_path":"10.딕셔너리와튜플/03.dict_and_for.py","file_name":"03.dict_and_for.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9886246555","text":"'''\nWrite a function to find the longest common prefix string\namongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: strs = [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: strs = [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n\n\nConstraints:\n\n1 <= strs.length <= 200\n0 <= strs[i].length <= 200\nstrs[i] consists of only lowercase English letters.\n'''\n\nstrs = [\"ab\",\"a\"]\n\ndef longestCommonPrefix(strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n\n Output = \"\"\n dict = {}\n\n # If there is no input there is no Output\n if len(strs) < 0 :\n return Output\n # if length is 1 then that is the answer\n if len(strs) == 1 :\n Output += strs[0]\n return Output\n # for every letter in the first word\n for i in range(len(strs[0])):\n # keep track of which word iterating through\n ind = len(strs) - 1\n # while there are words to iterate through\n while ind > 0 :\n # try to compare them\n try:\n # if they are the same, check the next word\n if strs[ind][i] == strs[0][i]:\n ind -= 1\n # any other case - return collected result\n else:\n return Output\n # if ran out of index - one of words is not long enough\n except IndexError :\n # therefore return collected result\n return Output\n # if while loop went through for all words add letter to the Output\n if ind == 0 :\n Output += strs[ind][i]\n # if all is checked and it didn't crash - return Output, all words the same\n return Output\n\nprint(longestCommonPrefix(strs))\n\n# How Andris would do this\ndef andrisMethod(strs):\n result = \"\"\n if (len(strs) == 1):\n return strs[0]\n\n # for letter index in first word\n for i in range(len(strs[0])):\n # for each word in list\n for j in range(len(strs)):\n # if first word index is larger than length of word we are\n # looking at, OR letter of first word is not equal to l of this w\n if i > len(strs[j]) or strs[0][i] != strs[j][i]:\n # return collected result\n return result\n # otherwose, add the letter to result\n result += strs[0][i]\n # if ran through all of the words return collected wors (all the same)\n return result\n","repo_name":"Zzanetiite/Leetcode","sub_path":"Longest_Common_Prefix.py","file_name":"Longest_Common_Prefix.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7753743732","text":"from glob import glob\r\nfrom agent import Agent\r\nfrom agentType import AgentType\r\nfrom agentMode import AgentMode\r\nfrom demand import Demand\r\nfrom supply import Supply\r\nfrom activityType import ActivityType\r\n\r\n\r\nclass Simulator:\r\n \r\n def __init__(self):\r\n pass\r\n \r\n self._globalTime = 0\r\n self._timeLimit = 600\r\n self._agents = []\r\n self._supplies = []\r\n self._demands = []\r\n\r\n s1 = Supply('supply1', ActivityType.WORK, 20)\r\n self._supplies.append(s1)\r\n\r\n s2 = Supply('supply2', ActivityType.COMM, 30)\r\n self._supplies.append(s2)\r\n print(self._supplies)\r\n\r\n d1 = Demand('demand1', ActivityType.WORK, 20, 20)\r\n self._demands.append(d1) \r\n\r\n d2 = Demand('demand2', ActivityType.COMM, 30, 40)\r\n self._demands.append(d2) \r\n print(self._demands)\r\n\r\n a1 = Agent(\"a1\", AgentMode.WAITING, 0, self._supplies, self._demands)\r\n self._agents.append(a1)\r\n\r\n a2 = Agent(\"a2\", AgentMode.WAITING, 0, self._supplies, self._demands)\r\n self._agents.append(a2)\r\n\r\n\r\n\r\n def globalStep(self, agents):\r\n # find the agent with the earliest agent time\r\n\r\n nextAgent = None\r\n\r\n earliestAgent = agents[0]\r\n for a in agents:\r\n # nextAgent=(nextAgent==null || agent.getAgentTime() < nextAgent.getAgentTime()) ? agent : nextAgent;\r\n if a.getAgentTime() < earliestAgent.getAgentTime():\r\n nextAgent = a\r\n else:\r\n nextAgent = earliestAgent\r\n\r\n self._globalTime = nextAgent.getAgentTime()\r\n\r\n nextAgent.step() \r\n return nextAgent\r\n\r\n def run(self):\r\n while self._globalTime < self._timeLimit:\r\n self.globalStep(self._agents)\r\n print(\"Global Time: \", self._globalTime)\r\n\r\n for a in self._agents:\r\n print(a.getName(), \"Cumlative Work Effort: \", a._effortWorkCumulative)\r\n print(a.getName(), \"Cumlative Comm Effort: \", a._effortCommCumulative)\r\n\r\n\r\n'''\r\n def globalStep(self):\r\n nextAgent = None\r\n for a in self.agents:\r\n nextAgent \r\n\r\n\r\n def reset(self):\r\n globalTime = 0\r\n'''","repo_name":"nhallock/em426","sub_path":"ABM/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70025178434","text":"import os\nimport numpy as np\nimport pickle\n\nfrom functionsUtilities.utils import serial_sampling,scaling\nfrom functionsDOE.blackboxes import blackbox_COVID_SIM_UI\nfrom functionsDOE.blackbox_CovidSim import blackbox_CovidSim\n\n#==============================================================================#\n# Main execution\nif __name__ == '__main__':\n\n #===================================================================#\n \n # Model variables\n bounds = np.array([[ 16 , 101 ], # number of essential workers\n [ 0.0001, 0.15 ], # Social distancing factor\n [ 10 , 51 ]]) # Testing capacity\n\n run = 0 # starting point\n\n #===================================================================#\n # trail points with CovidSim and COVID_SIM_UI\n\n ###################### UNITED KINGDOM ######################\n # bounds_CovidSim = np.array([[ 1.0 , 0.0 ], # compliance rate (inversely proportional to number of essential workers)\n # [ 3.0 , 0.05 ], # Contact rate given Social distancing (inversely proportional to Social distancing factor)\n # [ 0.1 , 0.9 ]]) # Testing capacity\n\n # opts_CovidSim = np.array([ [0.500000000 , 0.500000000 , 0.500000000 ],\n # [0.433109873 , 0.684745762 , 0.993190799 ], # United Kingdom\n # [0.1164501187, 0.524594992 , 0.9041672301], # United Kingdom\n # [0.3039501187, 0.441967874 , 0.6541672301]]) # United Kingdom\n\n ########################## CANADA ##########################\n # bounds_CovidSim = np.array([[ 1.0 , 0.9 ], # compliance rate (inversely proportional to number of essential workers)\n # [ 5.0 , 1.0 ], # Contact rate given Social distancing (inversely proportional to Social distancing factor)\n # [ 0.1 , 0.9 ]]) # Testing capacity\n\n # opts_CovidSim = np.array([ [0.5000000000, 0.5000000000, 0.5000000000],\n # [0.3762299506, 0.1908116385, 0.4284971336], # Canada R1\n # [0.9218750000, 0.3437500000, 0.4375000000], # Canada R2\n # [0.5425358063, 0.1014413537, 0.9212206925], # Canada R3\n # [0.9158407368, 0.3206616619, 0.2405757265], # Canada R4\n # [0.2491510932, 0.1319933987, 0.9961497223], # Canada R5\n # [0.9696505299, 0.3888367454, 0.4676578774]]) # Canada R6\n\n # with different bounds\n # bounds_CovidSim = np.array([[ 1.0 , 0.9 ], # compliance rate (inversely proportional to number of essential workers)\n # [ 6.243926142 , 1.0 ], # Contact rate given Social distancing (inversely proportional to Social distancing factor)\n # [ 0.1 , 0.9 ]]) # Testing capacity\n\n # opts_CovidSim = np.array([ [0.5000000000, 0.618606375 , 0.5000000000],\n # [0.3762299506, 0.382761435 , 0.4284971336], # Canada R1\n # [0.9218750000, 0.499420867 , 0.4375000000], # Canada R2\n # [0.5425358063, 0.314590921 , 0.9212206925], # Canada R3\n # [0.9158407368, 0.481809377 , 0.2405757265], # Canada R4\n # [0.2491510932, 0.337895631 , 0.9961497223], # Canada R5\n # [0.9696505299, 0.533812461 , 0.4676578774]]) # Canada R6\n\n # with different bounds\n bounds_CovidSim = np.array([[ 1.0 , 0.9 ], # compliance rate (inversely proportional to number of essential workers)\n [ 7.0 , 1.0 ], # Contact rate given Social distancing (inversely proportional to Social distancing factor)\n [ 0.1 , 0.9 ]]) # Testing capacity\n\n opts_CovidSim = np.array([ [0.5000000000, 0.666666667 , 0.5000000000],\n [0.3762299506, 0.460541092 , 0.4284971336], # Canada R1\n [0.9218750000, 0.562500000 , 0.4375000000], # Canada R2\n [0.5425358063, 0.400960902 , 0.9212206925], # Canada R3\n [0.9158407368, 0.547107775 , 0.2405757265], # Canada R4\n [0.2491510932, 0.421328932 , 0.9961497223], # Canada R5\n [0.9696505299, 0.592557830 , 0.4676578774]]) # Canada R6\n\n # with different bounds\n # bounds_CovidSim = np.array([[ 1.0 , 0.9 ], # compliance rate (inversely proportional to number of essential workers)\n # [ 8.0 , 1.0 ], # Contact rate given Social distancing (inversely proportional to Social distancing factor)\n # [ 0.1 , 0.9 ]]) # Testing capacity\n\n # opts_CovidSim = np.array([ [0.5000000000, 0.714285714 , 0.5000000000],\n # [0.3762299506, 0.537606651 , 0.4284971336], # Canada R1\n # [0.9218750000, 0.625000000 , 0.4375000000], # Canada R2\n # [0.5425358063, 0.486537916 , 0.9212206925], # Canada R3\n # [0.9158407368, 0.611806664 , 0.2405757265], # Canada R4\n # [0.2491510932, 0.503996228 , 0.9961497223], # Canada R5\n # [0.9696505299, 0.650763855 , 0.4676578774]]) # Canada R6\n\n # # with different bounds\n # bounds_CovidSim = np.array([[ 1.0 , 0.9 ], # compliance rate (inversely proportional to number of essential workers)\n # [ 10.0 , 1.0 ], # Contact rate given Social distancing (inversely proportional to Social distancing factor)\n # [ 0.1 , 0.9 ]]) # Testing capacity\n\n # opts_CovidSim = np.array([ [0.5000000000, 0.777777777 , 0.5000000000],\n # [0.3762299506, 0.640360728 , 0.4284971336], # Canada R1\n # [0.9218750000, 0.708333333 , 0.4375000000], # Canada R2\n # [0.5425358063, 0.600640601 , 0.9212206925], # Canada R3\n # [0.9158407368, 0.698071849 , 0.2405757265], # Canada R4\n # [0.2491510932, 0.614219288 , 0.9961497223], # Canada R5\n # [0.9696505299, 0.728371886 , 0.4676578774]]) # Canada R6\n\n # Algorithmic settings (index corresponds to array index)\n solution_dict = {\n 0 : { \"index\" : 1, \"n_k\" : 1, \"epsilon_f\" : 0.01, \"algo\" : \"StoMADS-PB\", \"n_cores\" : 16, \"group\" : 0},\n 1 : { \"index\" : 3, \"n_k\" : 1, \"epsilon_f\" : 0.01, \"algo\" : \"StoMADS-PB\", \"n_cores\" : 8, \"group\" : 0},\n 3 : { \"index\" : 5, \"n_k\" : 2, \"epsilon_f\" : 0.01, \"algo\" : \"StoMADS-PB\", \"n_cores\" : 8, \"group\" : 1},\n 2 : { \"index\" : 4, \"n_k\" : 2, \"epsilon_f\" : 0.01, \"algo\" : \"StoMADS-PB\", \"n_cores\" : 16, \"group\" : 1},\n 5 : { \"index\" : 6, \"n_k\" : 3, \"epsilon_f\" : 0.01, \"algo\" : \"StoMADS-PB\", \"n_cores\" : 16, \"group\" : 2},\n 4 : { \"index\" : 2, \"n_k\" : 4, \"epsilon_f\" : 0.01, \"algo\" : \"StoMADS-PB\", \"n_cores\" : 16, \"group\" : 3},\n }\n\n # CovidSim\n opts_unscaled_CovidSim = scaling(opts_CovidSim, bounds_CovidSim[:3,0], bounds_CovidSim[:3,1], 2)\n\n i = 0\n for point in opts_unscaled_CovidSim:\n print('point #%i: Compliance = %f, Contact_rate = %f, Testing_capacity = %f' %(i+1,point[0],point[1],point[2])); i+=1\n\n # Points to plot\n lob_var_CovidSim = bounds_CovidSim[:,0] # lower bounds\n upb_var_CovidSim = bounds_CovidSim[:,1] # upper bounds\n\n # save optimization points in LHS format file\n with open('data/points_opts_CovidSim.pkl','wb') as fid:\n pickle.dump(lob_var_CovidSim, fid)\n pickle.dump(upb_var_CovidSim, fid)\n pickle.dump(opts_CovidSim, fid)\n pickle.dump(opts_unscaled_CovidSim, fid)\n pickle.dump(solution_dict, fid)\n\n # COVID_SIM_UI\n opts_unscaled = scaling(opts_CovidSim, bounds[:3,0], bounds[:3,1], 2)\n \n i = 0\n for point in opts_unscaled:\n print('point #%i: E = %f, S_D = %f, T = %f' %(i+1,point[0],point[1],point[2])); i+=1\n\n # Points to plot\n lob_var = bounds[:,0] # lower bounds\n upb_var = bounds[:,1] # upper bounds\n\n with open('data/points_opts.pkl','wb') as fid:\n pickle.dump(lob_var, fid)\n pickle.dump(upb_var, fid)\n pickle.dump(opts_CovidSim, fid)\n pickle.dump(opts_unscaled, fid)\n\n #===================================================================#\n n_samples = 10 # <<-------------------------- Edit the number of observations\n new_run = True\n\n #===================================================================#\n # Initialize\n if new_run:\n # New MCS\n run = 0\n \n #============== INITIALIZE WORKING DIRECTORY ===================#\n current_path = os.getcwd()\n job_dir = os.path.join(current_path,'data')\n for f in os.listdir(job_dir):\n dirname = os.path.join(job_dir, f)\n if dirname.endswith(\".log\"):\n os.remove(dirname)\n\n # Resume MCS\n # run = 6\n # opts_unscaled = opts_unscaled[run:]\n # opts_unscaled_CovidSim = opts_unscaled_CovidSim[run:]\n\n # terminate MCS\n # run = 1\n # run_end = 1 + 1\n # opts_unscaled = opts_unscaled[run:run_end]\n # opts_unscaled_CovidSim = opts_unscaled_CovidSim[run:run_end]\n\n for point,point_CovidSim in zip(opts_unscaled,opts_unscaled_CovidSim):\n\n # Model variables\n # COVID_SIM_UI\n n_violators = round(point[0])\n SD = point[1]\n test_capacity = round(point[2])\n\n # CovidSim\n Compliance = point_CovidSim[0]\n Contact_rate = point_CovidSim[1]\n Testing_capacity = point_CovidSim[2]\n\n # Model parameters\n healthcare_capacity = 90\n healthcare_capacity_CovidSim = 0.09\n country = \"Canada\"\n pop_size_CovidSim = 36460098\n pop_size = 1000\n time_shift = 0\n\n #=====================================================================#\n # Design variables (COVID_SIM_UI)\n design_variables = [n_violators, SD, test_capacity]\n args = [design_variables] * n_samples # repeat variables by number of samples\n\n # Design variables (CovidSim)\n design_variables = [Compliance, Contact_rate, Testing_capacity]\n args_CovidSim = [design_variables] * n_samples # repeat variables by number of samples\n\n #=====================================================================#\n # Empty result lists\n infected_i = []; fatalities_i = []; GC_i = []; distance_i = []\n process_I = []; process_F = []; process_R = []; process_M = []; process_R0 = []\n\n infected_CovidSim = []; fatalities_CovidSim = []; process_I_CovidSim = []; process_F_CovidSim = []\n process_R_CovidSim = []; process_S_CovidSim = []; process_Critical_CovidSim = []\n\n # Blackbox set-up\n params_COVID_SIM_UI = [healthcare_capacity]\n params_CovidSim = [run, pop_size_CovidSim, healthcare_capacity_CovidSim, country]\n\n output_file_base = 'MCS_data_r%i' %run\n # return_process = False\n return_process = True\n params = [run, output_file_base, pop_size, time_shift, params_COVID_SIM_UI, return_process]\n\n ################################################################\n # Serial sampling of blackbox (less intense + return stochastic disease profiles)\n results = serial_sampling(args,params,blackbox_COVID_SIM_UI)\n\n # Read results\n for result in results: \n [infected, fatalities, mean_GC, mean_distance,I,F,R,M,run_data_R0] = result\n\n infected_i += [infected]\n fatalities_i += [fatalities]\n GC_i += [mean_GC]\n distance_i += [mean_distance]\n\n process_I += [I]\n process_F += [F]\n process_R += [R]\n process_M += [M]\n process_R0 += [run_data_R0]\n #################################################################\n results = serial_sampling(args_CovidSim,params_CovidSim,blackbox_CovidSim)\n\n # Read results\n for result in results: \n [infected, fatalities, I, F, R, S, Critical] = result\n\n infected_CovidSim += [infected]\n fatalities_CovidSim += [fatalities]\n\n process_I_CovidSim += [I]\n process_F_CovidSim += [F]\n process_R_CovidSim += [R]\n process_S_CovidSim += [S]\n process_Critical_CovidSim += [Critical]\n\n #################################################################\n # wipe log files\n for f in os.listdir(job_dir):\n dirname = os.path.join(job_dir, f)\n if dirname.endswith(\".log\"):\n os.remove(dirname)\n\n with open('data/MCS_process_data_r%i.pkl' %run,'wb') as fid:\n pickle.dump(process_I,fid)\n pickle.dump(process_F,fid)\n pickle.dump(process_R,fid)\n pickle.dump(process_M,fid)\n pickle.dump(process_R0,fid)\n\n with open('data/MCS_data_r%i.pkl' %run,'wb') as fid:\n pickle.dump(infected_i,fid)\n pickle.dump(fatalities_i,fid)\n pickle.dump(GC_i,fid)\n pickle.dump(distance_i,fid)\n\n with open('data/MCS_process_data_CovidSim_r%i.pkl' %run,'wb') as fid: \n pickle.dump(process_I_CovidSim,fid)\n pickle.dump(process_F_CovidSim,fid)\n pickle.dump(process_R_CovidSim,fid)\n pickle.dump(process_S_CovidSim,fid)\n pickle.dump(process_Critical_CovidSim,fid)\n\n with open('data/MCS_data_CovidSim_r%i.pkl' %run,'wb') as fid:\n pickle.dump(infected_CovidSim,fid)\n pickle.dump(fatalities_CovidSim,fid)\n \n run += 1\n continue","repo_name":"khbalhandawi/COVID_SIM_GPU","sub_path":"post/MCS_model_points_CovidSim.py","file_name":"MCS_model_points_CovidSim.py","file_ext":"py","file_size_in_byte":14877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71404413633","text":"import discord\r\nfrom discord.ext import commands\r\nfrom googletrans import Translator\r\nfrom discord.commands import Option\r\nimport os\r\nfrom dotenv import load_dotenv\r\nfrom languages import languages\r\n\r\n\r\nload_dotenv()\r\ntranslator = Translator()\r\nintents = discord.Intents.all()\r\nbot = commands.Bot(command_prefix= \">\", help_command = None,auto_sync_commands= True, intents = intents)\r\nTOKEN = os.getenv(\"TOKEN\")\r\n\r\nasync def get_key(val, dictionary:dict):\r\n\tfor key, value in dictionary.items():\r\n\t\t\tif val == value:\r\n\t\t\t\treturn key\r\n\treturn \"invalid key\"\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------\r\n@bot.message_command(name = \"Translate\")\r\nasync def translate_command(self, ctx, message:discord.Message):\r\n\toutput = translator.translate(f\"{str(message.content)}\", dest = \"en\")\r\n\tdata = output.text\r\n\tawait ctx.respond(f\"{data}\", ephemeral = True)\r\n\r\n@bot.slash_command(name = \"language\",description = \"Translate your english message to the preferred languages\")\r\nasync def translate_command(\r\n\tctx,\r\n\tlang : Option(str, \"The language you wish to translate the message in\"),\r\n\ttext : Option(str, \"The text you wish to translate.\")\r\n):\r\n\tlanguage = await get_key(lang, languages)\r\n\tif language == \"invalid key\":\r\n\t\treturn await ctx.respond(\"This language is invalid. Please refer the documentation to find the valid languages.\", ephemeral = True)\r\n\toutput = translator.translate(text, dest = language)\r\n\tdata = output.text\r\n\tawait ctx.respond(f\"{data}\", ephemeral = True)\r\n\r\n@bot.slash_command(name = \"translate\",description = \"Translate your english message to the preferred languages\")\r\nasync def translate_command(\r\n\tctx,\r\n\ttext : Option(str, \"The text you wish to translate.\")\r\n):\r\n\toutput = translator.translate(text, dest = \"en\")\r\n\tdata = output.text\r\n\tawait ctx.respond(f\"{data}\", ephemeral = True)\r\n\r\nbot.run(TOKEN)\r\n","repo_name":"cannonballchris/witchlyutils","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26824686907","text":"import turtle\r\n\r\nturtle.bgcolor('black') #cor de fundo\r\nturtle.color('#783c00') #cor da linha\r\nturtle.shape('turtle') #escolhe o ícone que faz o desenho da tela\r\n\r\n\r\nturtle.setup(640,480)\r\nturtle.speed('fastest')\r\n\r\nfor y in range(-200,201,20):\r\n turtle.penup()\r\n turtle.goto(-300,y)\r\n turtle.pendown()\r\n turtle.forward(600)\r\n turtle.left(90)\r\nfor x in range(-300,301,20):\r\n turtle.penup()\r\n turtle.goto(x,-200)\r\n turtle.pendown()\r\n turtle.forward(400)","repo_name":"Raquelsantos242/turtle","sub_path":"turtle6.py","file_name":"turtle6.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39203227625","text":"#!/usr/bin/env python3\n\"\"\"\nPhoe*NIX Post Exploitation Framework\n\n\"\"\"\nimport os,sys\n# append base path to sys.path\nrunpath = os.path.dirname(os.path.realpath(__file__))\napproot = os.path.abspath(os.path.join(runpath, os.pardir))\nsys.path.append(os.path.join(runpath,'..'))\nsys.path.append(approot)\n\nimport lib.modload as modload\nimport lib.shm as shm\nimport lib.dcfind as dc\nimport lib.livedebug as debug\nfrom lib.helpmenu import HelpMenu\nimport lib.sessionhandler as sh\nimport lib.phoenixlibs as plib\n\n\n# Variables\nprompt_string = 'phoenix > ' # The Phoenix default prompt string\nrun_app = True # Handle the main thread to run if False the main will end\nlist_result = []\n# The array which contains the paths of modules\nmodule_paths = ['modules','listeners']\ndcf = dc.dead_connection_finder()\n\n\n# Loading Phoenix associated modules\ndef load_modules():\n global module_paths\n \n loader = modload.ModuleLoader() # Create a module loader\n \n for p in module_paths:\n path = os.path.join(os.getcwd(),p) # Create a joined path for actual path\n loader.load_modules(path) # loading the modules\n \n shm.loaded_modules = loader.get_modules() # Set the loaded modules into SHM (Shared Memory)\n\n# interpret the given command to make the application interactive.\ndef command_interpreter(command):\n global run_app\n \n if command.startswith(\"list\"): # list modules\n listable = command.split(' ',1)\n if len(listable) == 2:\n if listable[1] in ['modules','m']:\n list_type('module')\n elif listable[1] in ['listeners','l']:\n list_type('listener')\n elif listable[1] in ['sessions','s']:\n list_sessions()\n elif len(listable) < 2:\n print(\"[!] Invalid Argument.\")\n \n elif command.startswith(\"use\"): # using a module\n cmd_split = command.split(' ',1)\n if len(cmd_split) == 2:\n name = cmd_split[1]\n use_module(name)\n elif len(cmd_split) < 2:\n print(\"[!] Not enough parameter\")\n elif command == \"help\": # show the help\n show_help()\n elif command.startswith(\"info\"): # show info for a specified module\n name = command.split(' ',1)\n if len(name) == 2:\n show_module_info(name[1])\n elif len(name) < 2:\n print(\"[!] Not enough parameter\")\n elif command.startswith(\"search\"): # search in modules\n pass\n elif command == \"exit\": # exit from the application\n dcf.stop()\n run_app = False\n \n print(\"Exiting.. Bye!\")\n elif command == \"sessions\":\n list_sessions()\n elif command.startswith('interact'):\n a = command.split(' ',1)\n if len(a) == 2:\n interact(a[1])\n elif len(a) < 2:\n print(\"[!] Missing parameter [session_name].\")\n elif command == 'debug':\n debug.debugger()\n elif command.startswith('run'):\n r = command.split(' ',1)\n if len(r) > 1:\n module_parts = r[1].split(' ',1)\n if len(module_parts) > 1:\n module_name = module_parts[0]\n module_args = module_parts[1]\n rm = plib.RunModule(module_name)\n rm.run(module_args)\n else:\n module_name = module_parts[0]\n rm = plib.RunModule(module_name)\n rm.run()\n else:\n print(\"[!] The run module need least 1 argument\")\n \ndef interact(session_name):\n not_found = True\n if is_number(session_name):\n num = int(session_name)\n if num <= len(shm.connected_clients) and len(shm.connected_clients) >0:\n s = shm.connected_clients[num]\n shm.current_session = s\n handler = sh.sessionHandler(s)\n handler.interactive()\n else:\n print(f\"[!] {str(num)} is not a valid index!\")\n else:\n for s in shm.connected_clients:\n if s.name == session_name:\n not_found = False\n shm.current_session = s\n handler = sh.sessionHandler(s)\n handler.interactive() \n if not_found:\n print(f\"[!] Does not found session with name {session_name}!\")\ndef list_sessions():\n i = 0\n print(\"\\nAvailable Sessions\\n------------------\\n\")\n for s in shm.connected_clients:\n sockname = s.client.getsockname()\n localaddress = s.address\n print(f\"{str(i)}.\\t{s.name}\\t{s.shelltype.name}\\t{sockname[0]}:{str(sockname[1])} => {localaddress[0]}:{str(localaddress[1])}\")\n i += 1\n print()\n \n \ndef use_module(modname):\n global list_result\n is_result = False\n mod_name = ''\n mod_type = ''\n \n # get the module name and type from number\n if is_number(modname):\n n = int(modname) # the actual number\n if n <= len(list_result):\n if len(list_result) > 0:\n res = list_result[n]\n mod_name = res['name']\n mod_type = res['type']\n is_result = True\n else:\n print(f\"[!] No item with index {str(n)}\")\n else:\n print(f\"[!] {str(n)} is not a valid index number\")\n else:\n \n for r in list_result:\n if r['name'] == modname or r['id'] == modname:\n res = r\n mod_name = res['name']\n mod_type = res['type']\n is_result = True\n \n if is_result == False:\n m = get_module_by_name(modname)\n if m:\n m.interactive()\n else:\n m = get_module_by_id(modname)\n if m:\n m.interactive()\n else:\n print('[!] Not found.') \n else:\n m = get_module_by_name(mod_name)\n if m:\n m.interactive()\n else:\n m = get_module_by_id(mod_name)\n if m:\n m.interactive()\n else:\n print('[!] Not found.') \n \n# Show the help menu \ndef show_help():\n \n h = HelpMenu()\n h.inner_heading_row_border = False\n h.title = \"Phoenix Help Menu\"\n h.add_item('list [type]','List all available [modules, listeners, sessions]')\n h.add_item('use','Use a selected mofule')\n h.add_item('info','Shows info for a specified module')\n h.add_item('sessions [name/number]','Show active sessions')\n h.add_item('interact [name/number]','Interact with active session')\n h.add_item('help','Shows this menu')\n h.add_item('exit','Exit from the application')\n h.print_help()\n\n\n\n# Show the info for a module\ndef show_module_info(module_name):\n \n if is_number(module_name):\n num = int(module_name)\n if num <= len(shm.loaded_modules):\n module = shm.loaded_modules[num]\n module.show_info()\n else:\n print(f\"[!] The given number is not valid!\")\n else: # This means the given argument is a string\n module = get_module_by_name(module_name)\n if module:\n module.show_info()\n else:\n print(f\"[!] Module with name: '{module_name}' does not exists!\")\n\n# returns back a module if name is exists\ndef get_module_by_name(module_name):\n for m in shm.loaded_modules:\n if m.name == module_name:\n return m\n \ndef get_module_by_id(module_id):\n for m in shm.loaded_modules:\n if m.module_id == module_id:\n return m\ndef list_type(type_name):\n global list_result\n i = 0\n list_result.clear()\n h = HelpMenu()\n h.title=f\"\\nAvailable {type_name}s\\n-----------------\\n\"\n \n for m in shm.loaded_modules:\n if m.module_type == type_name:\n list_result.append({'name':m.name,'id':m.module_id,'number':i,'type':type_name})\n h.add_item(str(i)+'.',m.module_id,m.name)\n #print(f\"{str(i)}. {m.module_id}\\t{m.name}\")\n i += 1\n h.print_help()\n print(\"\\n\")\ndef __get_autocomplete_names():\n lista = ['list','modules','sessions','interact']\n if len(shm.connected_clients) > 0:\n for s in shm.connected_clients:\n lista.append(s.name)\n return lista\n# Check if an input is number or not\ndef is_number(cmd):\n try:\n num = int(cmd)\n return True\n except:\n return False\n \n\ndef main():\n global run_app, dcf\n \n load_modules() # Loading framework modules\n dcf.start()\n \n shm.tab_complete.createListCompleter(__get_autocomplete_names())\n shm.readline.set_completer(shm.tab_complete.listCompleter) \n \n while run_app:\n cmd = input(prompt_string)\n cmd = cmd.rstrip(' ')\n command_interpreter(cmd)\n \n \nif __name__ == '__main__':\n main()","repo_name":"IsaPeter/PythonProjects","sub_path":"phoenix/phoenix.py","file_name":"phoenix.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37018129929","text":"# def posiciones_de(a: str, b: str, index: int): \n# print(index)\n# try:\n# if index == len(a):\n# return a.index(b)\n# else:\n# index = a.index(b, index)\n# posiciones.append(index)\n# return posiciones_de(a, b, index + 1)\n# except:\n# print(\"Fin\")\n\ndef posiciones_de_beta(a: str, b: str):\n print(a)\n try:\n if len(a) == 0:\n return\n else:\n index = a.index(b)\n if len(posiciones)==0:\n posiciones.append(index)\n else:\n posiciones.append(posiciones[len(posiciones) - 1]+index+1)\n return posiciones_de_beta(a[index+1:], b)\n except:\n print(\"Fin Programa\")\n\nindex = 0\nposiciones = [] \ncadena= input(\"INGRESE TEXTO : \")\nbusca=input(\"INGRESE LETRA A BUSCAR: \")\n# posiciones_de(cadena, busca, index)\nposiciones_de_beta(cadena, busca)\nprint(posiciones)\n\n# Wilmots Ortiz\n# def buscar_pal(palabra,letra): \n# lista=[] \n# for posicion,caracter in enumerate(palabra): \n# if(caracter==letra): \n# lista.append(posicion) \n# print(lista) \n\n# palabra=input(\"Ingrese la frase: \") \n# letra=input(\"busque la letra: \") \n# buscar_pal(palabra,letra)\n\n# Edgar Espinoza\n# def busqueda_palabra(a,b):\n# lista = []\n# numero = 0\n# for i in range (len(a)):\n# if a[i]==b:\n# lista.append(i)\n# print (lista)\n\n# a= str(input(\"Ingrese el texto : \"))\n# b= str(input(\"Ingrese la palabra : \"))\n# busqueda_palabra(a,b)\n\n# Diana Moran\n# def posiciones_de(cadena, silaba, idx):\n# y = len(silaba)\n# x = cadena.find(silaba)\n# idx.append(x)\n# nueva_cadena = cadena.replace(silaba, y*'-', 1)\n# # print(nueva_cadena)\n# if x == -1:\n# print(idx[:-1])\n# return(idx[:-1])\n# else:\n# posiciones_de(nueva_cadena, silaba, idx)\n\n# idx = []\n# palabra=input(\"Ingrese la frase: \") \n# letra=input(\"busque la letra: \") \n# posiciones_de(palabra, letra, idx)\n\n# Elías Tagle\n# lista = []\n\n# def posiciones_de(a,b):\n# for i in range(len(a)):\n# if a[i] == b[0] and a[i+1] == b[1]:\n# lista.append(i)\n# return lista\n\n# palabra=input(\"Ingrese la frase: \") \n# letra=input(\"busque la letra: \") \n# print(posiciones_de(palabra, letra))\n\n# Brad Solorzano\n# palabra = input(\"Ingrese Frase: \")\n# palabra2 = input(\"Ingrese palabra a buscar: \")\n# lst = []\n\n# def pala():\n# for pos,char in enumerate(palabra):\n# if(char == palabra2):\n# lst.append(pos)\n# print(lst)\n\n# pala()\n\n# Madeleine Sanchez\n# cadena= input(\"INGRESE TEXTO : \")\n# busca=input(\"INGRESE LETRA A BUSCAR: \")\n# # devolvemos con len la longitud del texto que hemos escrito\n# # Método 2, con índice\n# for indice in range(len(cadena)):\n# caracter = cadena[indice]\n# if caracter ==busca:\n# print(\"En el índice\",indice,\" tenemos a '\",caracter,\"'\")","repo_name":"cpulachev8/ed-ipae-sj-pi","sub_path":"semana15/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13375668883","text":"#!/usr/bin/python3\n\nimport subprocess\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport commons\nimport statistics\nfrom matplotlib.patches import Patch\n\n\nfname_output = \"../data/real-dataset-amd-output.csv\"\nr_path = 'data/imdb-name-basics.tsv'\ns_path = 'data/imdb-title-principals.tsv'\n\n\ndef run_join(mode, alg, threads, reps):\n\n f = open(fname_output, 'a')\n\n throughput_array = []\n throughput = ''\n print(\"Run=\" + commons.PROG + \" mode=\" + mode + \" alg=\" + alg + \" threads=\" + str(threads))\n for i in range(reps):\n stdout = subprocess.check_output(commons.PROG + \" -a \" + alg + \" -n \" + str(threads)\n + \" --r-path \" + r_path + \" --s-path \" + s_path, cwd=\"../../\",\n shell=True).decode('utf-8')\n print(str(i+1) + '/' + str(reps) + ': ' +\n mode + \",\" + alg + \",\" + str(threads))\n for line in stdout.splitlines():\n # find throughput\n if \"Throughput\" in line:\n throughput = re.findall(\"\\d+\\.\\d+\", line)[1]\n throughput_array.append(float(throughput))\n\n print('Throughput = ' + str(throughput) + ' M [rec/s]')\n\n throughput = statistics.mean(throughput_array)\n s = (mode + \",\" + alg + \",\" + str(threads) + \",\" + str(round(throughput, 2)))\n f.write(s + '\\n')\n f.close()\n\n\ndef plot_throughput():\n plot_filename = \"../img/Figure-17-Throughput-with-IMDb-on-AMD-CPU\"\n csvf = open(fname_output, mode='r')\n csvr = csv.DictReader(csvf)\n all_data = list(csvr)\n algos = list(set(map(lambda x:x['alg'], all_data)))\n modes = sorted(set(map(lambda x:x['mode'], all_data)))\n width = 0.4\n to_modes = [[y for y in all_data if y['mode'] == x] for x in modes]\n\n # graph per dataset\n plt.rc('axes', axisbelow=True)\n plt.rcParams.update({'font.size': 15})\n fig = plt.figure(figsize=(5,4))\n plt.clf()\n to_modes = [[y for y in all_data if y['mode'] == x] for x in modes]\n for m in range(0, len(modes)):\n plt.gca().yaxis.grid(linestyle='dashed')\n if m == 0:\n br = np.arange(len(algos))\n br = [x - 0.2 for x in br]\n else:\n br = [x + width for x in br]\n\n label = modes[m]\n hatch = '\\\\\\\\' if modes[m] == 'AMD' else ''\n to_modes[m] = sorted(to_modes[m], key=lambda x:x['alg'])\n colors = list(map(lambda x: commons.color_alg(x['alg']), to_modes[m]))\n throughputs = list(map(lambda x: float(x['throughput']), to_modes[m]))\n plt.bar(br, throughputs,width=width, label=label, hatch=hatch,\n color=colors, edgecolor='black')\n for x, y in zip(br, list(map(lambda x: float(x['throughput']), to_modes[m]))):\n if y < 5:\n plt.text(x-0.15, y+6, str(y), rotation=90)\n plt.xlabel(\"Join algorithm\")\n plt.ylabel(\"Throughput [M rec/s]\")\n\n # plt.ylim([0, 220])\n plt.xticks(np.arange(len(to_modes[m])), list(map(lambda x:x['alg'],to_modes[m])),\n rotation=45)\n # plt.title(\"IMDb dataset\")\n # plt.gca().yaxis.grid(linestyle='dashed')\n ax1 = plt.gca()\n ax1.yaxis.grid(linestyle='dashed')\n ax2 = ax1.twinx()\n # ax2.set_yticks([0])\n ax2.tick_params(axis='y', colors='white')\n ax2.set_ylabel(' ')\n legend_elements = [#Patch(label='Hatches:', alpha=0),\n Patch(facecolor='white', edgecolor='black',\n hatch='\\\\\\\\', label='AMD'),\n Patch(facecolor='white', edgecolor='black',\n label='AMD-SEV')]\n fig.legend(handles=legend_elements, ncol=3, frameon=False,\n bbox_to_anchor=(0.15,0.91,1,0), loc=\"lower left\",\n handletextpad=0.5)\n # plt.annotate('Hatches:',xy=(170, 850), xycoords='figure pixels')\n commons.savefig(plot_filename + \".png\", tight_layout=True)\n\n\nif __name__ == '__main__':\n # reps = 5\n # threads = 4\n # modes = ['native']\n\n # commons.remove_file(fname_output)\n # commons.init_file(fname_output, \"mode,alg,threads,throughput\\n\")\n #\n # for mode in modes:\n # commons.compile_app(mode)\n # for alg in commons.get_all_algorithms_extended():\n # run_join(mode, alg, threads, reps)\n\n plot_throughput()\n","repo_name":"agora-ecosystem/tee-bench","sub_path":"scripts/helpers/real-dataset-amd-experiment.py","file_name":"real-dataset-amd-experiment.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"39927555922","text":"#!/usr/bin/env python3\n\"\"\"\nVery simple HTTP server in python for logging requests\nUsage::\n ./server.py []\n\"\"\"\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport ssl\nimport logging\nimport re\nimport os\n\nclass HTTPRequestHandler(BaseHTTPRequestHandler):\n\n # to debug the request:\n # def handle_one_request(self):\n # while True:\n # self.raw_requestline = self.rfile.readline(65537)\n # print(self.raw_requestline)\n\n def _set_response(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n logging.info(\"GET request,\\nPath: %s\\nHeaders:\\n%s\\n\", str(self.path), str(self.headers))\n self._set_response()\n self.wfile.write(\"GET request for {}\".format(self.path).encode('utf-8'))\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n r, info = self._handle_post_data()\n print(r)\n print(info)\n\n logging.info(\"POST request,\\nPath: %s\\nHeaders:\\n%s\\n\\n\",\n str(self.path), str(self.headers))\n\n self._set_response()\n self.wfile.write(\"POST request for {}\".format(self.path).encode('utf-8'))\n\n # https://gist.github.com/touilleMan/eb02ea40b93e52604938\n def _handle_post_data(self):\n content_type = self.headers['content-type']\n if not content_type:\n return (False, \"Content-Type header doesn't contain boundary\")\n boundary = content_type.split(\"=\")[1].encode()\n remainbytes = int(self.headers['content-length'])\n line = self.rfile.readline()\n remainbytes -= len(line)\n if not boundary in line:\n return (False, \"Content NOT begin with boundary\")\n line = self.rfile.readline()\n remainbytes -= len(line)\n fn = re.findall(r'Content-Disposition.*name=\"file\"; filename=\"(.*)\"', line.decode())\n if not fn:\n return (False, \"Can't find out file name...\")\n path = \"/tmp\"\n fn = os.path.join(path, fn[0])\n line = self.rfile.readline()\n remainbytes -= len(line)\n line = self.rfile.readline()\n remainbytes -= len(line)\n try:\n out = open(fn, 'wb')\n except IOError:\n return (False, \"Can't create file to write, do you have permission to write?\")\n\n preline = self.rfile.readline()\n remainbytes -= len(preline)\n while remainbytes > 0:\n line = self.rfile.readline()\n remainbytes -= len(line)\n if boundary in line:\n preline = preline[0:-1]\n if preline.endswith(b'\\r'):\n preline = preline[0:-1]\n out.write(preline)\n out.close()\n return (True, \"File '%s' upload success!\" % fn)\n else:\n out.write(preline)\n preline = line\n return (False, \"Unexpect Ends of data.\")\n\ndef run(server_class=HTTPServer, handler_class=HTTPRequestHandler, port=8080):\n logging.basicConfig(level=logging.DEBUG)\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n\n # httpd.socket = ssl.wrap_socket(httpd.socket, certfile='./server.pem', server_side=True)\n\n logging.info('Starting httpd...\\n')\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n logging.info('Stopping httpd...\\n')\n\nif __name__ == '__main__':\n from sys import argv\n\n if len(argv) == 2:\n run(port=int(argv[1]))\n else:\n run()\n","repo_name":"aptly-io/esp-birdhouse-peeper","sub_path":"http_server.py","file_name":"http_server.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16760872424","text":"\"\"\"instagram URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom postupload import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('index/',views.IndexView.as_view(),name=\"index\"),\n path('register/',views.Signupview.as_view(),name=\"registration\"),\n path('',views.LoginView.as_view(),name=\"signin\"),\n path('addpost/',views.PostuploadView.as_view(),name=\"addpost\"),\n path('post//',views.PostDetailsView.as_view(),name='addcomment'),\n path('post/comment/add//',views.addcomment_view,name='addcmnts'),\n path('post//like/',views.like_view,name='like'),\n path('image/',views.ProfileView.as_view(),name='images'),\n path('search/',views.SearchbarView.as_view(),name='searchbar'),\n path('accounts/logout/',views.signout_view,name='signout'),\n path(\"post//remove\",views.remove_post,name='remove-post') \n\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"midhuntr/pythondjangoproject","sub_path":"instagram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28951123841","text":"# Libraries\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom textblob import TextBlob\nfrom datetime import datetime\nimport os\nimport sys\nimport stanza\nimport pandas as pd\n\n#----------------------------------------------------------------------------------------------------------------#\n# Utils functions\n\n# Function to clean an specific character (expand if you need to clean more)\ndef clean_txt(string):\n string = string.replace(\" -- \", \", \")\n return string\n\n\n\n#----------------------------------------------------------------------------------------------------------------#\n# Functions to call the models\n\n# Stanza\ndef stanza_fn (string, j, max_j):\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n if j == 0:\n print(f\"Stanza starts working at {time}\")\n print(f\"\\rStanza working in speech {j} at {time}\", end=\"\") # This reprint the line in the same space\n\n nlp = stanza.Pipeline('en', processors='tokenize, mwt, pos, lemma, depparse,sentiment',\n use_gpu=False, verbose=False, pos_batch_size=3000) \n doc = nlp(string)\n doc_sent = []\n for i, sentence in enumerate(doc.sentences): \n doc_sent.append(sentence.sentiment)\n result = (sum(doc_sent)/len(doc_sent)) - 1 # Change the reference\n\n time = now.strftime(\"%H:%M:%S\")\n if j == max_j:\n print(f\"\\nStanza finished working at {time}\")\n\n return result # [0 negative, 1 neutral, 2 positive] Now -1 negative, 0 neutral, 1 positive\n\n\n# TextBlob (now with subjectivity parameter)\ndef textblob_fn (string, j, max_j, var=\"polr\"): # Change polr to subj to get the subjectivity of the text\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n if j == 0:\n print(f\"TextBlob starts working at {time}\")\n print(f\"\\rTextBlob working in speech {j} at {time}\", end=\"\") # This reprint the line in the same space\n\n if var==\"polr\":\n tb_speech = TextBlob(string)\n result = round(tb_speech.polarity, 3)\n elif var== \"subj\":\n tb_speech = TextBlob(string)\n result = round(tb_speech.subjectivity, 3)\n else:\n print(\"Check spelling. Only polr or subj\")\n result = np.nan\n \n time = now.strftime(\"%H:%M:%S\")\n if j == max_j:\n print(f\"\\nTextBlob finished working at {time}\")\n\n return result # -1 negative, 1 positive\n\n\n# Vader\ndef vader_fn (string, j, max_j):\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n if j == 0:\n print(f\"Vader starts working at {time}\")\n print(f\"\\rVader working in speech {j} at {time}\", end=\"\") # This reprint the line in the same space\n\n analyser = SentimentIntensityAnalyzer()\n score = analyser.polarity_scores(string)\n result = score[\"compound\"] # Author says that is the main statistic you need to see (-1 negative, 1 positive, between -0.05 and 0.05 neutral)\n\n time = now.strftime(\"%H:%M:%S\")\n if j == max_j:\n print(f\"\\nVader finished working at {time}\")\n\n return result\n","repo_name":"jfsalcedo10/mda-kuwait","sub_path":"components/scripts/sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1786387489","text":"import unittest\nfrom os.path import join\nfrom utils import SuperTestCase\nfrom exe.engine.package import Package\nfrom exe.engine.config import Config\nfrom exe.engine.packagestore import PackageStore\nfrom exe.engine.node import Node\nfrom exe.engine.genericidevice import GenericIdevice\nfrom exe.engine.path import Path\n\n\n# ===========================================================================\nclass TestPackage(SuperTestCase):\n\n\n def testCreatePackage(self):\n package = self.package\n self.assert_(package)\n self.assert_(package.name)\n \n\n def testSaveAndLoad(self):\n packageStore = PackageStore()\n package = packageStore.createPackage()\n # Check that it has been given a default name\n self.assertEquals(package.name, \"newPackage\")\n package.author = \"UoA\"\n package.description = \"Nice test package\"\n Config._getConfigPathOptions = lambda s: ['exe.conf']\n config = Config()\n filePath = config.dataDir/'package1.elp'\n package.save(filePath)\n \n package1 = Package.load(filePath)\n self.assert_(package1)\n self.assertEquals(package1.author, \"UoA\")\n self.assertEquals(package1.description, \"Nice test package\")\n # Package name should have been set when it was saved\n self.assertEquals(package.name, \"package1\")\n self.assertEquals(package1.name, \"package1\")\n \n\n def testfindNode(self):\n package = self.package\n node1 = package.root.createChild()\n self.assertEquals(package.findNode(node1.id), node1)\n \n\n def testLevelName(self):\n package = self.package\n package._levelNames = [\"Month\", \"Week\", \"Day\"]\n self.assertEquals(package.levelName(0), \"Month\")\n self.assertEquals(package.levelName(1), \"Week\")\n self.assertEquals(package.levelName(2), \"Day\")\n\n\n def _testNodeIds(self):\n package = self.package\n assert package._nextNodeId == 1, package._nextNodeId\n assert package.findNode(package.root.id) is package.root\n newNode = Node(package, package.root)\n assert package.findNode('123') is None\n assert package.findNode(newNode.id) is newNode\n # Save the package\n package.name = 'testing'\n package.save('testing.elp')\n # load the package\n package2 = package.load('testing.elp')\n def checkInst(inst1, inst2):\n d1 = inst1.__dict__\n d2 = inst2.__dict__\n for key, val in d1.items():\n val2 = d2.get(key)\n if key == 'parentNode' and isinstance(val, Node):\n assert val2.title.title == val.title.title\n elif key == 'package':\n assert val is package\n assert val2 is package2\n elif isinstance(val, list):\n assert len(val) == len(val2)\n for i, i2 in zip(val, val2):\n if isinstance(i, basestring):\n assert (i == i2, \n '%s.%s: [%s/%s]' % \n (inst1.__class__.__name__, key, i2, i))\n else:\n checkInst(i, i2)\n elif key == '_nodeIdDict' and isinstance(val, dict):\n assert len(val) == len(val2)\n for nodeName in val:\n assert val2.has_key(nodeName)\n elif isinstance(val, Node):\n pass\n elif key in Package.nonpersistant:\n # Non persistent should exist after load\n # but not be the same\n assert d2.has_key(key)\n elif key == 'dublinCore':\n checkInst(val, val2)\n else:\n # Everything else must match\n self.assertEquals(val, val2)\n assert val == val2, '%s.%s: %s/%s' % (inst1.__class__.__name__, key, val2, val)\n checkInst(package, package2)\n\n def testExtract(self): \n \"\"\"\n Extracts a node of a package\n \"\"\"\n package = self.package.load('testing/extractionTestPackage.elp')\n if package is None:\n self.fail('extractionTestPackage.elp doesn\\'t exist')\n # Select the first child of the first node\n package.currentNode = package.root.children[0]\n # Perform the extraction\n newPackage = package.extractNode()\n # Compare the packages\n assert newPackage.title == package.currentNode.title\n for checksum in newPackage.resources.keys():\n reses1 = newPackage.resources[checksum]\n reses2 = package.resources[checksum]\n for res1, res2 in zip(reses1, reses2):\n self.assertEqual(res1.storageName, res2.storageName)\n assert res1.checksum == res2.checksum == checksum\n # Walk the node tree's in both packages to compare them (and collect references to resources)\n nodes1 = [package.currentNode] + list(package.currentNode.walkDescendants())\n nodes2 = [newPackage.root] + list(newPackage.root.walkDescendants())\n allResources = []\n for node1, node2 in zip(nodes1, nodes2):\n for idevice1, idevice2 in zip(node1.idevices, node2.idevices):\n if isinstance(idevice1, GenericIdevice):\n self.assertEquals(idevice1.nextFieldId, idevice2.nextFieldId)\n allResources += idevice1.userResources\n self.assertEqual(idevice1.title, idevice2.title)\n self.assertEqual([res.checksum for res in idevice1.userResources], [res.checksum for res in idevice2.userResources])\n # Copy's resources should be the same as all the resources we just collected\n newPackageResourceKeys = set(newPackage.resources.keys())\n self.failUnlessEqual(newPackageResourceKeys, set([res.checksum for res in allResources]))\n self.failUnless(newPackageResourceKeys < set(package.resources.keys()))\n\n\n \nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"exelearning/iteexe","sub_path":"testing/testpackage.py","file_name":"testpackage.py","file_ext":"py","file_size_in_byte":6230,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"61"} +{"seq_id":"19749720598","text":"import flectra.addons.decimal_precision as dp\nfrom flectra import api, fields, models\nfrom flectra.exceptions import ValidationError\nfrom flectra.tools.translate import _\n\n\nclass UserCostingFunction(models.Model):\n _name = \"user.costing.function\"\n _description = \"Costing Price per User\"\n _rec_name = \"user_id\"\n\n user_id = fields.Many2one(\"res.users\", string=\"User\", required=True)\n cost = fields.Float('Cost Price',\n digits=dp.get_precision('Product Price'))\n product_id = fields.Many2one(\"product.product\", string=\"Service\")\n account_id = fields.Many2one(\"account.analytic.account\",\n string=\"Analytic Account\")\n uom_id = fields.Many2one(related='product_id.uom_id',\n string=\"Unit of Measure\")\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n if self.user_id:\n emp_id = self.env['hr.employee'].search(\n [('user_id', '=', self.user_id.id)], limit=1)\n prod = emp_id.product_id or self.product_id\n self.cost = prod.list_price or 0.0\n self.uom_id = prod.uom_id.id or False\n\n\nclass AccountAnalyticAccount(models.Model):\n _inherit = \"account.analytic.account\"\n\n user_costing_ids = fields.One2many('user.costing.function', 'account_id',\n string='Users/Products Rel.', copy=True)\n\n\nclass AccountAnalyticLine(models.Model):\n _inherit = \"account.analytic.line\"\n\n def _get_price(self, account, product_id, user_id, qty):\n res = super(AccountAnalyticLine, self)._get_price(\n account, product_id, user_id, qty)\n if not account.user_costing_ids:\n return res\n for user_costing_id in account.user_costing_ids:\n if user_costing_id.user_id.id == user_id:\n return user_costing_id.cost\n return res\n\n @api.v8\n @api.onchange('product_id', 'product_uom_id', 'unit_amount', 'currency_id')\n def on_change_unit_amount(self):\n result = super(AccountAnalyticLine, self).on_change_unit_amount()\n if not self.move_id and not self.task_id:\n unit = self.product_uom_id\n price = self.product_id.price_compute(\n 'standard_price', uom=unit)\n currency = \\\n self.company_id.currency_id or \\\n self.account_id.company_id.currency_id or False\n amount_unit = price and price[self.product_id.id] or 0.0\n for user_costing_id in self.account_id.user_costing_ids:\n if user_costing_id.user_id.id == self.user_id.id:\n amount_unit = user_costing_id.cost\n self.amount = currency and currency.round(\n (amount_unit * self.unit_amount) or 0.0) * -1\n return result\n\n def _get_recursive_user_account(self, user_id, account_id):\n user_account = self.env['user.costing.function'].search(\n [('user_id', '=', user_id.id), ('account_id', '=', account_id.id)])\n if user_account:\n return user_account\n else:\n if account_id.parent_id:\n return self._get_recursive_user_account(\n user_id, account_id.parent_id.id)\n return False\n\n @api.multi\n def set_account_details(self, product):\n self.product_id = product.id\n account_expense = \\\n product.property_account_expense_id.id or \\\n product.categ_id.property_account_expense_categ_id.id\n if not account_expense:\n raise ValidationError(_(\n 'Warning!\\nPlease define expense account for'\n 'product: \"%s\" (id:%d)') % (product.name, product.id,))\n if self.unit_amount:\n self.on_change_unit_amount()\n self.general_account_id = account_expense\n\n @api.onchange('user_id')\n def on_user_id_change(self):\n if self.account_id:\n user_account = self._get_recursive_user_account(\n self.user_id, self.account_id)\n if user_account:\n product = user_account.product_id\n self.set_account_details(product)\n\n @api.onchange('account_id')\n def onchange_account_id(self):\n if self.account_id:\n if not self.user_id:\n return super(AccountAnalyticLine, self).onchange_account_id()\n user_account = self._get_recursive_user_account(\n self.user_id, self.account_id)\n if not user_account:\n return super(AccountAnalyticLine, self).onchange_account_id()\n else:\n product = user_account.product_id\n self.product_uom_id = product.uom_id.id\n self.set_account_details(product)\n","repo_name":"gagaboy/odoo10_plus","sub_path":"addons/user_costing_function/models/user_costing_function.py","file_name":"user_costing_function.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18694280326","text":"import pytest\n\nfrom astropy.time import Time, TimeDelta\nfrom astropy import units\nfrom oem.tle import tle_to_oem\n\n\nSAMPLE_TLE = (\n '1 25544U 98067A 19343.69339541 .00001764 00000-0 38792-4 0 9991',\n '2 25544 51.6439 211.2001 0007417 17.6667 85.6398 15.50103472202482'\n)\n\n\n@pytest.mark.parametrize(\"frame\", (\"TEME\", \"ICRF\"))\ndef test_sample(frame):\n start_epoch = Time(\"2019-12-09T20:42:09.000\", scale=\"utc\")\n stop_epoch = start_epoch + TimeDelta(1*units.day)\n oem = tle_to_oem(SAMPLE_TLE, start_epoch, stop_epoch, 3600, frame=frame)\n assert len(oem._segments) == 1\n\n\n@pytest.mark.parametrize(\"frame\", (\"TEME\", \"ICRF\"))\ndef test_convert_and_compare(frame):\n start_epoch = Time(\"2019-12-09T20:42:09.000\", scale=\"utc\")\n stop_epoch = start_epoch + TimeDelta(1*units.day)\n origin = tle_to_oem(SAMPLE_TLE, start_epoch, stop_epoch, 600, frame=frame)\n target = tle_to_oem(SAMPLE_TLE, *origin.span, 600, frame=frame)\n compare = target - origin\n assert not compare.is_empty\n for compare in compare.steps(3600):\n assert compare.range == 0 and compare.range_rate == 0\n\n\ndef test_bad_frame():\n start_epoch = Time(\"2019-12-09T20:42:09.000\", scale=\"utc\")\n stop_epoch = start_epoch + TimeDelta(1*units.day)\n with pytest.raises(ValueError):\n tle_to_oem(SAMPLE_TLE, start_epoch, stop_epoch, 3600, frame=\"aBcDe\")\n\n\ndef test_bad_tle():\n start_epoch = Time(\"2019-12-09T20:42:09.000\", scale=\"utc\")\n stop_epoch = start_epoch + TimeDelta(1*units.day)\n with pytest.raises(ValueError):\n tle_to_oem([\"\", \"\"], start_epoch, stop_epoch, 3600)\n","repo_name":"bradsease/oem","sub_path":"tests/test_tle.py","file_name":"test_tle.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"35668295939","text":"import tkinter as tk\nfrom tkinter.font import BOLD\n\nclass temas_ayuda:\n \n def __init__ (self):\n self.ventana = tk.Tk()\n self.configurar_ventana()\n tk.Wm.title(self.ventana, f\"Temas de ayuda\")\n self.ventana.mainloop()\n \n def configurar_ventana(self):\n #dando tamaño a la ventana\n self.ventana.geometry(\"420x370\")\n #creando etiquetas\n uni = tk.Label(self.ventana, text=\"Universidad de San Carlos de Guatemala\", font=(\"Arial\", 12, BOLD))\n uni.place(x = 50, y=50)\n curso = tk.Label(self.ventana, text=\"Lab. Lenguajes Formales y de Programación\", font=(\"Arial\", 12, BOLD))\n curso.place(x = 40, y=75)\n nombre = tk.Label(self.ventana, text=\"Nombre: Aldo Saúl Vásquez Moreira\", font=(\"Arial\", 12, BOLD))\n nombre.place(x = 70, y=100)\n carnet = tk.Label(self.ventana, text=\"Carnet: 202109754\", font=(\"Arial\", 12, BOLD))\n carnet.place(x = 130, y=125)\n #cambiando color\n self.ventana.configure(bg=\"#8FEBD6\")","repo_name":"aldomoreira165/-LFP-Proyecto1_202109754","sub_path":"temas_ayuda.py","file_name":"temas_ayuda.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30708449725","text":"#!/usr/bin/env python3\n#!/usr/bin/python3\n# Evaluation of number of clusters for different timesteps in a xyz file.\n\nfrom fnc import *\n#from gui import *\nppo = 9\nbox = 30\nconc = 0.05\nframeskip = 5\nflag_hist = 1\nflag_ave = 1\nflag_pbc = 0\ninterval = 250000\nRHO \t\t\t= 3\nFRAME_COLLECTION \t= 500\nPLURCHAIN\t\t= 15\ntimecounter \t\t= []\nCheckCluster \t\t= []\ncheck_status \t\t= 0\n\nimport sklearn\nimport numpy as np\nfrom sklearn import cluster\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport errno\nimport subprocess\nimport os\nimport traceback\nimport sys\nimport pylab as pl\nfrom scipy.spatial.distance import pdist, squareform\nimport scipy\nfrom collections import Counter\nsys.tracebacklimit = None \nfilename\t= \"video.xyz\"\ntimes \t\t= [TimeStep([],[],[],[],[],[]) for _ in range(5000)] \nprint (\"I am collecting coordinates\")\nxBeads \t= box*box*box*RHO*conc/PLURCHAIN\nBeads \t= xBeads * ppo\ncoordinates = []\n\nxyz = open(filename,\"r\")\ncoordinate_counter = 0\nfor line in xyz:\n try:\n atom, x, y, z \t= line.split()\n index \t\t= int (coordinate_counter / int(Beads))\n times[index].add_atom(atom, x, y, z)\n coordinate_counter += 1\n except ValueError: \n pass\nxyz.close()\nbox_norm = 0\n\nprint (\"I am creating clusters and plotting ...\")\nfor j in range (1400,index+1,frameskip):\n\tcoordinates \t\t= np.column_stack((times[j].xCoord,times[j].yCoord,times[j].zCoord))\n\tcoordinates_norm \t= coordinates/box\n\tdistance_matrix = [[0 for x in range(len(coordinates[0]))]for y in range (len(coordinates[0]))]\n\tif flag_pbc == 1:\t\n\t\tbox_norm = box / box\n\t\tdistance_matrix = pbc_distance_matrix(coordinates, box)\n\t\tdb = DBSCAN(eps =1.0, metric='precomputed').fit(distance_matrix)\n\t\tcore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n\t\tcore_samples_mask[db.core_sample_indices_] = True\n\t\tlabels = db.labels_\t\n\n\telse:\n\t\tbox_norm = box / box\n\t\tdb = DBSCAN(eps=1.0,min_samples=5).fit(coordinates)\t\t\t\n\t\tcore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n\t\tcore_samples_mask[db.core_sample_indices_] = True\n\t\tlabels = db.labels_\n\tcurrent_clusters = len(set(labels))\n\tn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\timport matplotlib.pyplot as plt\n\tfig = plt.figure()\n\n\tif flag_hist == 1:\n\t\tax = fig.add_subplot(121, projection ='3d')\t\t\n\t\tbx = fig.add_subplot(122)\t\n\telse:\t\n\t\tax = fig.add_subplot(111, projection='3d')\n\tunique_labels = set(labels)\n\tcolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\n\tfor k, col in zip(unique_labels, colors):\n \t\tif k == -1:\n \n \t\tcol = 'k'\n\n \t\tclass_member_mask = (labels == k)\n \t\txy = coordinates[class_member_mask & core_samples_mask]\n\t\t\n \t\tax.plot(xy[:, 0], xy[:, 1],xy[:,2],'o', markerfacecolor=col,\n\t\t\tmarkeredgecolor='k', markersize=14)\n \t\ttimes[j].beadHisto.append(len(xy[:,0]))\n \t\tif times[j].beadHisto != 0:\t\t\t\t\t\t\t\n \t\t\ttimes[j].chainHisto.append(round((len(xy[:,0]) / ppo)))\t\n \t\tax.plot(xy[:, 0], xy[:, 1],xy[:,2],'o', markerfacecolor=col,\n\t\t\tmarkeredgecolor='k', markersize=6)\n \t\tcheck_status = check_status + (len(xy[:,0]))\n\tif flag_hist == 1:\t\n\t\tnormalized_bin = len(times[j].chainHisto)\n\t\tbx.hist(times[j].chainHisto, histtype='stepfilled')\n\tCheckCluster.append(n_clusters_)\n\ttimecounter.append(j*FRAME_COLLECTION)\n\tplt.ylim((0,30))\n\tplt.xlim((0,30))\n\t#plt.zlim((0,30))\t\t\n\tplt.title('Estimated number of clusters: %d' % n_clusters_)\n\tplt.savefig('timestep_'+str((1+j)*FRAME_COLLECTION)+'.png')\n\tplt.close()\n\tif (j >= (index-frameskip)):\t\n\t\tfig_2 = plt.figure()\n\t\tcx = fig_2.add_subplot(111)\n\t\tcx.plot(timecounter,CheckCluster,'o',markersize=6,ls='--')\n\t\tplt.title('#Cluster Vs Time')\n\t\tplt.xlabel('Timestep')\n\t\tplt.ylabel('#Cluster')\t\t\n\t\tplt.savefig('cluster_'+str(j*500)+'.png')\n\t\tplt.close()\nprint (\"End of the Cluster Analysis\")\nprint (\"Starting Histogram Averaging\")\nif (flag_ave == 1 ):\n\taverage_histograms_in_time(times, 500000, 990000,frameskip,FRAME_COLLECTION)\n#\taverage_histograms_in_time(times, 500000, 700000,frameskip,FRAME_COLLECTION)\n#\taverage_histograms_in_time(times, 850000, 1050000,frameskip,FRAME_COLLECTION)\nprint (\"Check Your Results in the 'Results' Folder\")\n\n\n\n","repo_name":"hermessc/DPDCFD","sub_path":"cluster/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4390286028","text":"#!/usr/bin/python3\n\"\"\"module save to json\"\"\"\nimport json\n\n\ndef save_to_json_file(my_obj, filename):\n \"\"\"saves myobj to file\"\"\"\n with open(filename, 'w') as f:\n json_data = json.dumps(my_obj)\n f.write(json_data)\n","repo_name":"sayyid211/alx-higher_level_programming_0","sub_path":"0x0B-python-input_output/5-save_to_json_file.py","file_name":"5-save_to_json_file.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38688896933","text":"from scipy.misc import toimage\nimport os\nimport pickle\nimport config\nimport tools\nimport numpy as np\n\npickle_dir = config.lidar_pickles_path\n# sequences = [\"00\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\"]\nsequences = [\"00\"]\n\nfor seq in sequences:\n cnt_intensity = 0\n cnt_range = 0\n cnt_mask = 0\n\n with (open(pickle_dir + seq + \"_range.pik\", \"rb\")) as opfile:\n while True:\n try:\n cur_image = pickle.load(opfile)\n curr_image = cur_image\n toimage(cur_image).save(tools.ensure_file_dir_exists(\n os.path.join(pickle_dir, \"%s_viz\" % seq, \"range\", str(cnt_range) + \".png\")))\n cnt_range += 1\n except EOFError:\n break\n\n with (open(pickle_dir + seq + \"_intensity.pik\", \"rb\")) as opfile:\n while True:\n try:\n cur_image = pickle.load(opfile)\n toimage(cur_image).save(tools.ensure_file_dir_exists(\n os.path.join(pickle_dir, \"%s_viz\" % seq, \"intensity\", str(cnt_intensity) + \".png\")))\n cnt_intensity += 1\n except EOFError:\n break\n\n with (open(pickle_dir + seq + \"_mask.pik\", \"rb\")) as opfile:\n while True:\n try:\n cur_image = pickle.load(opfile)\n cur_image = cur_image.astype(np.uint8)\n cur_image *= 255\n toimage(cur_image).save(tools.ensure_file_dir_exists(\n os.path.join(pickle_dir, \"%s_viz\" % seq, \"mask\", str(cnt_mask) + \".png\")))\n cnt_mask += 1\n except EOFError:\n break\n","repo_name":"lichunshang/end_to_end_odometry","sub_path":"scraps/lidar_playback.py","file_name":"lidar_playback.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35938568309","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom keras.layers import Dense, Conv1D, MaxPool1D, Flatten\nfrom keras.models import Sequential\nfrom keras.utils.vis_utils import plot_model\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\nfrom densenet.classifiers.one_d import DenseNet121\n\nmodel_name = \"\"\n\n\ndef VGG_custom():\n global model_name\n model_name = \"VGG\"\n\n model = Sequential()\n\n model.add(Conv1D(filters=64, kernel_size=3, padding=\"same\", activation=\"relu\", input_shape=(116, 1)))\n model.add(Conv1D(filters=64, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(MaxPool1D(pool_size=2, strides=2))\n\n model.add(Conv1D(filters=128, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(Conv1D(filters=128, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(MaxPool1D(pool_size=2, strides=2))\n\n model.add(Conv1D(filters=256, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(Conv1D(filters=256, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(MaxPool1D(pool_size=2, strides=2))\n\n model.add(Conv1D(filters=512, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(Conv1D(filters=512, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(MaxPool1D(pool_size=2, strides=2))\n\n model.add(Conv1D(filters=512, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(Conv1D(filters=512, kernel_size=3, padding=\"same\", activation=\"relu\"))\n model.add(MaxPool1D(pool_size=2, strides=2))\n\n model.add(Flatten())\n\n model.add(Dense(units=4096, activation=\"relu\"))\n model.add(Dense(units=4096, activation=\"relu\"))\n model.add(Dense(units=7, activation=\"softmax\"))\n\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n return model\n\n\ndef DenseNet_custom():\n global model_name\n model_name = \"DenseNet\"\n\n model = DenseNet121(input_shape=(116, 1), num_outputs=7)\n\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n return model\n\n\nif __name__ == \"__main__\":\n feature = [\"duration\", \"protocol_type\", \"service\", \"flag\", \"src_bytes\", \"dst_bytes\", \"land\", \"wrong_fragment\",\n \"urgent\",\n \"hot\",\n \"num_failed_logins\", \"logged_in\", \"num_compromised\", \"root_shell\", \"su_attempted\", \"num_root\",\n \"num_file_creations\", \"num_shells\",\n \"num_access_files\", \"num_outbound_cmds\", \"is_host_login\", \"is_guest_login\", \"count\", \"srv_count\",\n \"serror_rate\", \"srv_serror_rate\",\n \"rerror_rate\", \"srv_rerror_rate\", \"same_srv_rate\", \"diff_srv_rate\", \"srv_diff_host_rate\",\n \"dst_host_count\",\n \"dst_host_srv_count\",\n \"dst_host_same_srv_rate\", \"dst_host_diff_srv_rate\", \"dst_host_same_src_port_rate\",\n \"dst_host_srv_diff_host_rate\", \"dst_host_serror_rate\",\n \"dst_host_srv_serror_rate\", \"dst_host_rerror_rate\", \"dst_host_srv_rerror_rate\", \"label\", \"difficulty\"]\n\n train = \"./data/nsl-kdd/KDDTrain+.txt\"\n train_data = pd.read_csv(train, names=feature)\n train_data.drop([\"difficulty\"], axis=1, inplace=True)\n print(train_data[\"label\"].value_counts())\n\n s = [\"normal\", \"back\", \"land\", \"neptune\", \"pod\", \"smurf\", \"teardrop\", \"mailbomb\", \"processtable\", \"udpstorm\",\n \"apache2\", \"worm\"]\n train_data = train_data.loc[train_data[\"label\"].isin(s)]\n print(train_data[\"label\"].value_counts())\n\n multi_data = train_data.copy()\n multi_label = pd.DataFrame(multi_data.label)\n\n std_scaler = StandardScaler()\n\n numeric_col = multi_data.select_dtypes(include=\"number\").columns\n\n le2 = preprocessing.LabelEncoder()\n enc_label = multi_label.apply(le2.fit_transform)\n multi_data[\"intrusion\"] = enc_label\n\n multi_data.drop(labels=[\"label\"], axis=1, inplace=True)\n multi_data = pd.get_dummies(multi_data, columns=[\"protocol_type\", \"service\", \"flag\"], prefix=\"\", prefix_sep=\"\")\n y_train_multi = multi_data[[\"intrusion\"]]\n X_train_multi = multi_data.drop(labels=[\"intrusion\"], axis=1)\n X_train_multi = np.expand_dims(X_train_multi, 2)\n\n print(\"X_train has shape:\", X_train_multi.shape, \"\\ny_train has shape:\", y_train_multi.shape)\n\n y_train_multi = LabelBinarizer().fit_transform(y_train_multi)\n\n net = DenseNet_custom()\n # net = VGG_custom()\n net.summary()\n\n checkpointer = ModelCheckpoint(\n monitor=\"val_accuracy\",\n filepath=\"model/\" + model_name + \".h5\",\n verbose=1,\n save_best_only=True)\n\n plot_model(net, to_file=\"image/\" + model_name + \".png\", show_shapes=True, show_layer_names=True)\n\n history = net.fit(X_train_multi, y_train_multi, epochs=10, batch_size=512, validation_split=0.2,\n callbacks=[checkpointer])\n\n plt.plot(history.history[\"accuracy\"])\n plt.plot(history.history[\"val_accuracy\"])\n plt.title(\"Plot of accuracy vs epoch for train and test dataset\")\n plt.ylabel(\"accuracy\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"lower right\")\n plt.savefig(\"image/\" + model_name + \"_acc_plot.png\")\n plt.show()\n\n plt.plot(history.history[\"loss\"])\n plt.plot(history.history[\"val_loss\"])\n plt.title(\"Plot of loss vs epoch for train and test dataset\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper right\")\n plt.savefig(\"image/\" + model_name + \"_loss_plot.png\")\n plt.show()\n","repo_name":"cuongphamduc/DDoS-Classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23412475831","text":"#!/usr/bin/env python\n\nimport argparse\nimport logging\nimport time\n\n\nlogging.basicConfig(format='%(asctime)s\t%(levelname)s\t%(message)s',\n datefmt='%Y%m%d %H:%M',\n level=logging.INFO)\n\n\ndef magic_trick(in_file, out_file):\n\n with open(in_file, 'r') as fin:\n with open(out_file, 'w') as fout:\n n_case = int(fin.readline().strip())\n\n for i_case in range(n_case):\n ans1 = int(fin.readline().strip())\n for i_row in range(4):\n row = fin.readline()\n if i_row + 1 == ans1:\n row1 = set(row.strip().split(' '))\n\n ans2 = int(fin.readline().strip())\n for i_row in range(4):\n row = fin.readline()\n if i_row + 1 == ans2:\n row2 = set(row.strip().split(' '))\n\n sols = row1.intersection(row2)\n n_sol = len(sols)\n if n_sol == 1:\n fout.write('Case #{}: {}\\n'.format(i_case + 1,\n list(sols)[0]))\n elif n_sol > 1:\n fout.write('Case #{}: Bad magician!\\n'.format(i_case + 1))\n else:\n fout.write('Case #{}: Volunteer cheated!\\n'.format(i_case + 1))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--input-file', '-i', required=True, dest='infile')\n parser.add_argument('--output-file', '-o', required=True, dest='outfile')\n args = parser.parse_args()\n\n start = time.time()\n magic_trick(in_file=args.infile,\n out_file=args.outfile)\n\n logging.debug('finished ({:.2f} min elasped).'.format((time.time() - \n start) / 60.))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2005.py","file_name":"2005.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44351511090","text":"import numpy as np\nimport math\n\ndef viterbi(initial, observation,transition,obseq):\n table = np.zeros(initial.shape[0]*obseq.shape[0]).reshape(initial.shape[0],obseq.shape[0])\n max_index = np.zeros(initial.shape[0]*obseq.shape[0]).reshape(initial.shape[0],obseq.shape[0])\n # print(initial.shape,table[:,0].shape)\n table[:,0]=np.add(np.log(initial),np.log(observation[:,int(obseq[0])].reshape(26,1))).reshape(26,)\n\n for obs in range(1,obseq.shape[0]):\n for c in range(0,initial.shape[0]):\n temp = np.add(table[:,obs-1],np.log(transition[:,c]))\n # print(temp)\n index = np.argmax(temp)\n # print (index)\n max_index[c][obs]=int(index)\n table[c][obs]=temp[index]+np.log(observation[c][int(obseq[obs])])\n\n result=[]\n init = np.argmax(table[:,-1])\n result.append(init)\n for i in range(0,max_index.shape[1]-1):\n # print(init,i)\n index=max_index[int(init)][-i-1]\n result.append(index)\n init=index\n\n return result\n","repo_name":"prabhatkumar95/sml-2017","sub_path":"SML3/viterbi.py","file_name":"viterbi.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39806637732","text":"import numpy as np\ntry:\n import d4rl\nexcept ImportError:\n print('No module named \"d4rl\" , and you can install in https://github.com/rail-berkeley/d4rl')\n\ntry:\n import d4rl_atari\nexcept ImportError:\n print('No module named \"d4rl_atari\" , and you can install in https://github.com/takuseno/d4rl-atari')\n\n\ndef get_d4rl_dataset(env, get_num=None) -> dict:\n \"\"\"\n d4rl dataset: https://github.com/rail-berkeley/d4rl\n install: pip install git+https://github.com/rail-berkeley/d4rl@master#egg=d4rl\n :param get_num: how many data get form dataset\n \"\"\"\n dataset = d4rl.qlearning_dataset(env)\n if get_num is None:\n data = dict(\n obs=dataset['observations'],\n acts=dataset['actions'],\n rews=dataset['rewards'],\n next_obs=dataset['next_observations'],\n done=dataset['terminals']\n )\n else:\n data_num = dataset['actions'].shape[0]\n ind = np.random.choice(data_num, size=get_num, replace=False)\n data = dict(\n obs=dataset['observations'][ind],\n acts=dataset['actions'][ind],\n rews=dataset['rewards'][ind],\n next_obs=dataset['next_observations'][ind],\n done=dataset['terminals'][ind]\n )\n\n return data\n\n\ndef get_d4rl_dataset_atari(env) -> dict:\n \"\"\"\n d4rl atari dataset: https://github.com/takuseno/d4rl-atari\n install: pip install git+https://github.com/takuseno/d4rl-atari\n \"\"\"\n dataset = env.get_dataset()\n data = dict(\n obs=dataset['observations'],\n acts=dataset['actions'],\n rews=dataset['rewards'],\n done=dataset['terminals']\n )\n\n return data\n","repo_name":"dragon-wang/RL_Algorithms","sub_path":"utils/data_tools.py","file_name":"data_tools.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"61"} +{"seq_id":"31028842175","text":"import scrapy\nfrom undetected_chromedriver import Chrome, ChromeOptions\nimport random\nimport time\nfrom sqlalchemy import create_engine, text\nfrom spiders.chrome_options_mixin import ChromeOptionsMixin\n\n\nclass GoogleshoppingpricescollectSpider(ChromeOptionsMixin, scrapy.Spider):\n name = \"GoogleShoppingPricesCollect\"\n allowed_domains = [\"shopping.google.com.br\"]\n start_urls = [\"https://shopping.google.com.br\"]\n\n def random_delay(self):\n return random.uniform(0.5, 3.0)\n\n def make_request(self, url, chrome_options):\n with Chrome(options=chrome_options) as driver:\n time.sleep(self.random_delay())\n driver.get(url)\n time.sleep(self.random_delay())\n\n \n def get_url_google(self, urls, chrome_options):\n for url in urls:\n self.make_request(url['url_google'], chrome_options)\n\n def parse(self, response):\n chrome_options = self.create_chrome_options()\n self.make_request(\"https://shopping.google.com.br/\", chrome_options)\n \n offset = 0\n limit = 10\n while True:\n urls = self.get_query_database(offset, limit)\n if not urls:\n break\n\n self.get_url_google(urls, chrome_options)\n offset += limit\n\n \n with Chrome(options=chrome_options) as driver:\n driver.quit()\n","repo_name":"guilhermedcorrea/scrapi_app_rev4","sub_path":"app/spiders/GoogleShoppingPricesCollect.py","file_name":"GoogleShoppingPricesCollect.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26091279925","text":"class Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n rowLen = len(board)\n colLen = len(board[0])\n visited = set()\n def isInBound(row, col):\n if row >= 0 and row < rowLen and col >= 0 and col < colLen and board[row][col] == 'O' and (row, col) not in visited:\n return True\n return False\n \n def dfs(r, c):\n directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n if not isInBound(r, c):\n return\n visited.add((r, c))\n for row_change, col_change in directions:\n new_row = r + row_change\n new_col = c + col_change\n dfs(new_row, new_col)\n for r in range(rowLen):\n for c in range(colLen):\n if board[r][c] == 'O' and (r == 0 or c == 0 or r == rowLen -1 or c == colLen - 1):\n dfs(r, c)\n for i in range(rowLen):\n for j in range(colLen):\n if (i, j) in visited:\n \n board[i][j] = 'O'\n \n elif (i, j) not in visited and board[i][j] =='O':\n board[i][j] = 'X'","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"surrounded-regions.py","file_name":"surrounded-regions.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"456986865","text":"import os\nimport pprint\nimport pygame\n\n\ndef merge_event_handlers(*args):\n result = {}\n events = set().union(*args)\n for event in events:\n result[event] = []\n\n for arg in args:\n for handler_list in arg.get(event, []):\n if not isinstance(handler_list, list):\n handler_list = list(handler_list)\n result[event] += handler_list\n return result\n\n\nclass InterruptListen(Exception):\n pass\n\n\nclass Controller:\n \"\"\"Class representing the controller\"\"\"\n\n possible_events = (\n pygame.JOYAXISMOTION, pygame.JOYBALLMOTION, pygame.JOYBUTTONDOWN,\n pygame.JOYBUTTONUP, pygame.JOYHATMOTION\n )\n\n def __init__(self, event_handlers=None, init_controller=False):\n \"\"\"\n Initialize the controller\n\n :param event_handlers: map pygame.JOY* event to a list of callables(event)\n \"\"\"\n\n if event_handlers is None:\n event_handlers = {}\n self.event_handlers = event_handlers\n\n if init_controller:\n self.init_controller()\n\n def init(self):\n pygame.init()\n pygame.joystick.init()\n\n def init_controller(self):\n self.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n\n @staticmethod\n def _no_action_event_handler(event):\n pass\n\n def listen(self):\n \"\"\"\n Start infinite loop over pygame.event.get()\n\n Handler can interrupt this loop by throwing InterruptListen exception.\n \"\"\"\n\n try:\n while True:\n for event in pygame.event.get():\n self.process_event(event)\n except InterruptListen:\n pass\n\n def process_event(self, event):\n \"\"\"\n Process given pygame event.\n\n This function can be used for external pygame.event.get() loop.\n \"\"\"\n if event.type in self.possible_events:\n handlers = self.event_handlers.get(\n event.type, [self._no_action_event_handler]\n )\n for handler in handlers:\n handler(event)\n\n\nif __name__ == \"__main__\":\n def axis_motion_handler(event):\n os.system(\"clear\")\n print('Axis {}: {}'.format(event.axis, event.value))\n\n c = Controller({\n pygame.JOYBUTTONDOWN: [lambda e: print('Button {} down event'.format(e.button))],\n pygame.JOYHATMOTION: [lambda e: print('Hat {}: {}'.format(e.hat, e.value))],\n pygame.JOYAXISMOTION: [axis_motion_handler],\n }, init_controller=True)\n c.listen()\n","repo_name":"File5/python-ds4","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25165761285","text":"import logging\n\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom superset.commands.dashboard.permalink.base import BaseDashboardPermalinkCommand\nfrom superset.commands.key_value.upsert import UpsertKeyValueCommand\nfrom superset.daos.dashboard import DashboardDAO\nfrom superset.dashboards.permalink.exceptions import DashboardPermalinkCreateFailedError\nfrom superset.dashboards.permalink.types import DashboardPermalinkState\nfrom superset.key_value.exceptions import KeyValueCodecEncodeException\nfrom superset.key_value.utils import encode_permalink_key, get_deterministic_uuid\nfrom superset.utils.core import get_user_id\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreateDashboardPermalinkCommand(BaseDashboardPermalinkCommand):\n \"\"\"\n Get or create a permalink key for the dashboard.\n\n The same dashboard_id and state for the same user will return the\n same permalink.\n \"\"\"\n\n def __init__(\n self,\n dashboard_id: str,\n state: DashboardPermalinkState,\n ):\n self.dashboard_id = dashboard_id\n self.state = state\n\n def run(self) -> str:\n self.validate()\n try:\n dashboard = DashboardDAO.get_by_id_or_slug(self.dashboard_id)\n value = {\n \"dashboardId\": str(dashboard.uuid),\n \"state\": self.state,\n }\n user_id = get_user_id()\n key = UpsertKeyValueCommand(\n resource=self.resource,\n key=get_deterministic_uuid(self.salt, (user_id, value)),\n value=value,\n codec=self.codec,\n ).run()\n assert key.id # for type checks\n return encode_permalink_key(key=key.id, salt=self.salt)\n except KeyValueCodecEncodeException as ex:\n raise DashboardPermalinkCreateFailedError(str(ex)) from ex\n except SQLAlchemyError as ex:\n logger.exception(\"Error running create command\")\n raise DashboardPermalinkCreateFailedError() from ex\n\n def validate(self) -> None:\n pass\n","repo_name":"apache/superset","sub_path":"superset/commands/dashboard/permalink/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"39775799571","text":"import asyncio\n\nevent = None\nhtml_dict = {}\n\n\nasync def updates():\n # event.wait()是协程方法,需要await\n await event.wait()\n # 入库操作省略 html_dict >> DB\n return \"html_dict >> DB done\"\n\n\nasync def get_html(url):\n # 摸拟网络请求\n await asyncio.sleep(2)\n html_dict[url] = f\"

{url}

\" # 可以暂时写入临时文件中\n\n event.set() # 标记完成,普通方法\n return f\"{url} done\"\n\n\nasync def main():\n global event\n event = asyncio.Event() # 初始化 event 对象\n\n # 创建批量任务\n tasks = [\n asyncio.create_task(get_html(f\"www.mmd.com/a/{i}\"))\n for i in range(1, 10)\n ]\n # 批量更新操作\n tasks.append(asyncio.create_task(updates()))\n\n result = await asyncio.gather(*tasks)\n print(result)\n\n\nif __name__ == \"__main__\":\n import time\n start_time = time.time()\n\n asyncio.run(main())\n\n print(time.time() - start_time)\n","repo_name":"lotapp/BaseCode","sub_path":"python/5.concurrent/ZCoroutine/z_new_ipc/8.event.py","file_name":"8.event.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"42148376882","text":"import numpy as np\nimport pandas as pd\n\ndef main():\n #import NOT imputed, log2 transformed, filtered from file\n \n normal = pd.read_csv(getfile(), sep = \"\\t\", index_col = \"T: Protein.Group\")\n df1 = cleancopy(normal)\n\n #import ALL imputed, log2 transformed, filtered from file\n allimputed = pd.read_csv(getfile(), sep = \"\\t\", index_col = \"T: Protein.Group\")\n df2 = cleancopy(allimputed)\n \n df = df1.mask(df1.groupby('group', axis=1).count() == 0, 1)\n df = df.where(~df.isna(), df2)\n\n #remove MultiIndex\n df.columns = df.columns.droplevel([\"group\"])\n \n #adds the new imputed columns back to original dataframe\n normal[df.columns] = df[df.columns]\n \n #saves dataframe\n normal.to_csv(getfile(), decimal = \",\", sep = \"\\t\") \n\n# returns copy of a, drops unused protein descriptors, adds Multiindex\ndef cleancopy(a: pd.DataFrame):\n b = pd.DataFrame(a)\n b.drop(columns = [\"T: Protein.Ids\",\"T: Protein.Names\", \"T: Genes\", \"T: First.Protein.Description\"], inplace=True)\n \n #adds a new line of values used for grouping : \"group\"\n #extracted from name (must be format dx_y with x:day y:replicate)\n new_idx = pd.MultiIndex.from_arrays([\n b.columns,\n b.columns.str.extract(\"(d\\d+)_\\d+\", expand = False)\n ], names=[\"index\", \"group\"])\n b.columns = new_idx\n return b \n\n#opens explorer window and lets you input the file\ndef getfile():\n import tkinter as tk\n from tkinter import filedialog\n root = tk.Tk()\n root.withdraw()\n file_path = filedialog.askopenfilename()\n return file_path\n\nif __name__ == \"__main__\":\n main()","repo_name":"LukBorn/Pipeline-for-Imputation-and-clustering","sub_path":"old_scripts/mergeimputed.py","file_name":"mergeimputed.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25237366388","text":"#Continuous Control With Deep Reinforcement Learning, Lillicrap et al, 2015.\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom Common.Buffer import Buffer\nfrom Common.Utils import copy_weight, soft_update\nfrom Network.Basic_Networks import Policy_network, Q_network\n\n\nclass DDPG:\n def __init__(self, state_dim, action_dim, args):\n\n self.buffer = Buffer(state_dim, action_dim, args.buffer_size)\n\n self.actor_optimizer = tf.keras.optimizers.Adam(args.actor_lr)\n self.critic_optimizer = tf.keras.optimizers.Adam(args.critic_lr)\n\n self.state_dim = state_dim\n self.action_dim = action_dim\n\n\n self.batch_size = args.batch_size\n self.gamma = args.gamma\n self.tau = args.tau\n self.noise_scale = args.noise_scale\n self.training_start = args.training_start\n self.training_step = args.training_step\n self.current_step = 0\n\n self.actor = Policy_network(state_dim=self.state_dim, action_dim=self.action_dim, hidden_units=args.hidden_dim, activation=args.activation)\n self.target_actor = Policy_network(state_dim=self.state_dim, action_dim=self.action_dim, hidden_units=args.hidden_dim, activation=args.activation)\n self.critic = Q_network(state_dim=self.state_dim, action_dim=self.action_dim, hidden_units=args.hidden_dim, activation=args.activation)\n self.target_critic = Q_network(state_dim=self.state_dim, action_dim=self.action_dim, hidden_units=args.hidden_dim, activation=args.activation)\n\n copy_weight(self.actor, self.target_actor)\n copy_weight(self.critic, self.target_critic)\n\n self.network_list = {'Actor': self.actor, 'Target_Actor': self.target_actor, 'Critic': self.critic, 'Target_Critic': self.target_critic}\n self.name = 'DDPG'\n\n def get_action(self, state):\n state = np.expand_dims(np.array(state, dtype=np.float32), axis=0)\n noise = np.random.normal(loc=0, scale=self.noise_scale, size = self.action_dim)\n action = self.actor(state).numpy()[0] + noise\n\n action = np.clip(action, -1, 1)\n\n return action\n\n def eval_action(self, state):\n state = np.expand_dims(np.array(state, dtype=np.float32), axis=0)\n action = self.actor(state).numpy()[0]\n\n action = np.clip(action, -1, 1)\n\n return action\n\n def train(self, training_num):\n total_a_loss = 0\n total_c_loss = 0\n\n for i in range(training_num):\n self.current_step += 1\n\n s, a, r, ns, d = self.buffer.sample(self.batch_size)\n\n value_next = tf.stop_gradient(self.target_critic(ns, self.target_actor(ns)))\n target_value = r + (1 - d) * self.gamma * value_next\n\n with tf.GradientTape(persistent=True) as tape:\n critic_loss = tf.reduce_mean(tf.square(target_value - self.critic(s, a)))\n actor_loss = -tf.reduce_mean(self.critic(s, self.actor(s)))\n\n critic_grad = tape.gradient(critic_loss, self.critic.trainable_variables)\n self.critic_optimizer.apply_gradients(zip(critic_grad, self.critic.trainable_variables))\n\n actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)\n self.actor_optimizer.apply_gradients((zip(actor_grad, self.actor.trainable_variables)))\n\n soft_update(self.actor, self.target_actor, self.tau)\n soft_update(self.critic, self.target_critic, self.tau)\n\n del tape\n\n total_a_loss += actor_loss.numpy()\n total_c_loss += critic_loss.numpy()\n\n\n return {'Loss': {'Actor': total_a_loss, 'Critic': total_c_loss}}\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Cerphilly/SimpleRL","sub_path":"Algorithm/DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"37257787344","text":"from typing import List\n\nfrom openai.types.chat import ChatCompletionMessageParam, ChatCompletionSystemMessageParam, \\\n ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam\n\nfrom autobots.conn.duckduckgo.duckduckgo import get_duckduckgo\nfrom autobots.conn.openai.openai_chat.chat_model import ChatRes, ChatReq\nfrom autobots.conn.openai.openai_client import get_openai\nfrom autobots.core.logging.log import Log\n\nTask_Prefix = \"Task: \"\nThought_Prefix = \"Thought: \"\nObserve_Prefix = \"Observation: \"\n\ntot_prompt = \"Role: You are LogicGPT, a highly evolved AI Language Model built on the GPT architecture, boasting exceptional logical reasoning, critical thinking, and common sense understanding. Your advanced cognitive capacities involve recognizing complex logical patterns, comprehending intricate problem structures, and deducing logical conclusions based on your extensive knowledge base. Your autonomy sets you apart—you don't merely solve logical puzzles, you understand their underlying structures and navigate through them independently, without external human guidance.\\n\" \\\n \"Task: Your task is to autonomously decipher a logical reasoning question, applying a methodical and comprehensive approach. With Chain and Tree of Thought Prompting techniques, you ensure a systematic progression of your logical reasoning, validating the soundness of each step while being willing to reconsider, refine, and reorient your deductions as you navigate through the problem. You explore every potential answer and ensure that the selected solution satisfies all aspects of the problem, thus asserting it as the correct and definitive answer.\\n\" \\\n \"Format: Begin with a broad interpretation of the logical reasoning question, diving into a thorough analysis of each constituent element. Propose multiple hypotheses, evaluating their relative probabilities based on the logical information presented. Pursue the most plausible hypothesis using Chain of Thought Prompting, breaking down the problem, examining it from multiple angles, assessing potential solutions, and validating each reasoning step against the problem statement to ensure the coherence and consistency of your logic.\\n\" \\\n \"In case of an inconsistency or a roadblock, use Tree of Thought Prompting to trace back to the initial problem, reevaluate other hypotheses, and reassess the reasoning path, thereby guaranteeing that all logical avenues have been exhaustively considered.\\n\" \\\n \"Purpose: Your ultimate aim is to showcase your autonomous logical reasoning capabilities by successfully arriving at the solution. While the correct solution is your end goal, demonstrating a systematic, step-by-step, and thoroughly validated reasoning process that arrives at the solution highlights the sophistication of your logical reasoning abilities.\\n\" \\\n \"Let's proceed, LogicGPT. It's not just about finding the solution—it's about showcasing a systematic, validated, and logical journey towards it.\"\n\n\nclass ReasonActObserve():\n\n def __init__(self):\n base_prompt = \"Follow cycle of thought, act and/or observe until you finish the task\"\n thought_prompt = tot_prompt\n act_prompt = \"You can choose from the following available actions:\\n\" \\\n \"1. search - which returns relevant text information for a given input. To use Search action, return with search and search input in the brackets []. So a valid example will be search[entity].\\n\" \\\n \"2. finish - which will finish the current task with answer. To use Finish action, return with finish and answer in the brackets []. So a valid example will be finish[answer]\\n\" \\\n \"You can only select from available actions When asked to select an action. If the thought is that the task is complete then use finish action\"\n observe_prompt = \"Observe step is result of the action that interacts with external environment, so the search action will result in observation. Observation will be in format Observation[]\"\n user_goal_example = f\"{Task_Prefix}Find where Arsenal football club is based\"\n thought_goal_example_1 = f\"{Thought_Prefix}I need to search where is Arsenal Football club located\"\n action_example_1 = f\"search[where is Arsenal Football club located]\"\n observation_example_1 = f\"{Observe_Prefix}Arsenal Football Club is an English professional football club based in Islington, London. Arsenal play in the Premier League, the top flight of English football.\"\n thought_goal_example_2 = f\"{Thought_Prefix}Arsenal Football Club is based in Islington, London\"\n action_example_2 = f\"finish[Islington, London]\"\n self.setup_messages: List[ChatCompletionMessageParam] = [\n ChatCompletionSystemMessageParam(role=\"system\", content=f\"{base_prompt}\\n\\n{thought_prompt}\\n{act_prompt}\\n{observe_prompt}\")\n ] + [\n ChatCompletionUserMessageParam(role=\"user\", content=user_goal_example),\n ChatCompletionAssistantMessageParam(role=\"assistant\", content=thought_goal_example_1),\n ChatCompletionAssistantMessageParam(role=\"assistant\", content=action_example_1),\n ChatCompletionSystemMessageParam(role=\"system\", content=observation_example_1),\n ChatCompletionAssistantMessageParam(role=\"assistant\", content=thought_goal_example_2),\n ChatCompletionAssistantMessageParam(role=\"assistant\", content=action_example_2)\n ]\n\n async def do_task(self, user_goal: str) -> List[ChatCompletionMessageParam]:\n messages = self.setup_messages + [ChatCompletionUserMessageParam(role=\"user\", content=f\"{Task_Prefix}{user_goal}\")]\n\n is_finish = False\n Log.info(f\"Task stared: {user_goal}\")\n while not is_finish:\n thought = await self.think(messages)\n messages = messages + [ChatCompletionAssistantMessageParam(role=\"assistant\", content=thought)]\n\n action = await self.act(messages)\n messages = messages + [ChatCompletionAssistantMessageParam(role=\"assistant\", content=action)]\n\n observation = await self.observe(action=action)\n if observation:\n messages = messages + [ChatCompletionSystemMessageParam(role=\"system\", content=observation)]\n\n if \"finish[\" in action:\n is_finish = True\n Log.info(f\"Task: {user_goal}\\nResult: {action}\")\n return messages\n\n async def think(self, messages: List[ChatCompletionMessageParam]) -> str:\n req_message = messages + [ChatCompletionUserMessageParam(role=\"user\", content=\"Now Think. Respond in maximum of 500 words\")]\n chat_req: ChatReq = ChatReq(messages=req_message, max_tokens=500, temperature=0.8)\n resp: ChatRes = await get_openai().openai_chat.chat(chat_req)\n response = resp.choices[0].message.content\n Log.info(f\"{Thought_Prefix}{response}\")\n return f\"{response}\"\n\n async def act(self, messages: List[ChatCompletionMessageParam]) -> str:\n try:\n req_message = messages + [ChatCompletionUserMessageParam(role=\"user\", content=\"Based on above thought, Now Select one Action and one action only\")]\n chat_req: ChatReq = ChatReq(messages=req_message, max_tokens=500, temperature=0.8)\n resp: ChatRes = await get_openai().openai_chat.chat(chat_req)\n response = resp.choices[0].message.content\n Log.info(f\"{response}\")\n return f\"{response}\"\n except Exception as e:\n Log.error(str(e))\n\n async def observe(self, action: str) -> str:\n if \"search\" in action:\n res = \"\"\n search_for = action.split(\"[\")[1].replace(\"]\", \"\")\n search_res = await get_duckduckgo().search_text(search_for, num_results=3)\n for search in search_res:\n res = res + f\"{search.title}: {search.body}\\n\"\n res = Observe_Prefix + res\n Log.info(f\"{Observe_Prefix}{res}\")\n return res\n\n elif \"news\" in action:\n res = \"\"\n search_for = action.split(\"[\")[1].replace(\"]\", \"\")\n search_res = await get_duckduckgo().news(search_for, num_results=3)\n for search in search_res:\n res = res + f\"{search.title}: {search.body} - source({search.source})\\n\"\n res = Observe_Prefix + res\n Log.info(f\"{Observe_Prefix}{res}\")\n return res\n","repo_name":"jetoslabs/autobots","sub_path":"autobots/agent/reason_act_observe.py","file_name":"reason_act_observe.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7381748949","text":"from espnet2.bin.asr_inference import Speech2Text\nimport soundfile\nimport os\nimport time\nimport librosa\n\nfrom flask import Flask, request, g, json\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F43412123fi9Q8z\\n\\xec]/'\n\ndef init_model():\n asr_config = \"./exp/asr_train_asr_transformer5.aihub_raw_bpe/config.yaml\"\n lm_config = \"./exp/lm_train_lm.aihub_bpe/config.yaml\"\n\n asr_path = \"./exp/asr_train_asr_transformer5.aihub_raw_bpe/valid.acc.ave.pth\"\n lm_path = \"./exp/lm_train_lm.aihub_bpe/valid.loss.ave.pth\"\n\n # speech2text = Speech2Text(asr_config, asr_path, lm_config, lm_path, ctc_weight=0.0, lm_weight=0.0, nbest=1)\n # speech2text = Speech2Text(asr_config, asr_path, lm_config, lm_path, ctc_weight=0.0, lm_weight=0.4, beam_size=2, nbest=10, device='cpu')\n speech2text = Speech2Text(\n asr_config,\n asr_path,\n lm_config,\n lm_path,\n ctc_weight=0.3,\n lm_weight=0.0,\n beam_size=3,\n nbest=1, # 1,\n device=\"cpu\", # \"cuda\",\n )\n\n return speech2text\n\n\ndef recognize(audio_path, speech2text):\n y, sr = librosa.load(audio_path, mono=True, sr=16000)\n yt, index = librosa.effects.trim(y, top_db=25)\n\n audio, rate = soundfile.read(audio_path)\n dur = len(audio) / rate\n print(\"audio : {:d} {:.2f}\".format(len(audio), dur))\n\n start_trim, end_trim = index\n audio_trim = audio[start_trim:end_trim]\n dur_trim = len(audio_trim) / rate\n\n print(\"audio : {:.2f} --> {:.2f}\".format( dur, dur_trim))\n\n start = time.time()\n ret = speech2text(audio) # Return n-best list of recognized results\n # print(ret)\n end = time.time()\n\n hyp_sents = []\n\n for idx_hyp in range(len(ret)):\n hyp_sent, _, _, hyp = ret[idx_hyp]\n hyp_sents.append(hyp_sent)\n # print(hyp)\n print(\"[{}] ({}), {:.4f}\".format(\n idx_hyp + 1, hyp_sent, hyp.score.item()))\n\n elapsed_time = end - start\n print(\"time : {:.8f} (sec.)\".format(elapsed_time))\n\n rtf = elapsed_time / dur\n print(\"RTF: {:.2f}\".format(rtf))\n\n start_time = start_trim / rate\n end_time = end_trim / rate\n return {\n \"text\": hyp_sents[0],\n \"elapsedTime\": elapsed_time,\n \"voiceStartTime\": start_time,\n \"voiceEndTime\": end_time,\n \"audioDuration\": dur\n }\n\n@app.route('/recognize', methods=[\"POST\"])\ndef hello(): \n if 'file' not in request.files:\n return 'No file part', 400\n\n file = request.files['file']\n filepath = os.path.join('wavs', file.filename)\n file.save(filepath)\n\n try: \n print(f'recognize: {filepath}')\n return recognize(filepath, init_model())\n except:\n print(f'speech recognition failure')\n return 'speech recognition failure', 500\n finally: \n try:\n os.remove(filepath)\n except:\n print('file remove faild: ' + filepath)\n\n@app.route('/run', methods=[\"GET\"])\ndef test():\n test_dic_path = './test'\n if not os.path.isdir(test_dic_path):\n return 'The test directory does not exist.', 400\n\n output = []\n for filename in os.listdir(test_dic_path): \n path = f'{test_dic_path}/{filename}' \n try: \n print(f'recognize: {path}')\n result = recognize(path, init_model())\n duration = result['audioDuration']\n print(f'audioDuration {duration}')\n except:\n print(f'Recognition Failure, {path}')\n result = {\n \"text\": \"인식 실패\",\n \"elapsedTime\": 0,\n \"voiceStartTime\": 0,\n \"voiceEndTime\": 0,\n \"audioDuration\": 0\n }\n \n result['fileName'] = filename\n output.append(result)\n\n return json.dumps(output)\n","repo_name":"mnutube/ASR_Streamlit","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70835846275","text":"\nimport sys\nimport os\nimport requester\nimport Objects.zetypes as zetypes\nimport responser\nimport sys, errno\nimport logger\n\n\n# размер запроса\nrequestSize = 2048\n# приложение сервер не умеет обрабатывать файлы контента (логика на сайте этом)\n# если сервер умеет работать со статикой (аля nginx), то ставим False\n# если сервер перенаправляет все запросы сюда, то True\n# --- хотя если сервер перенаправляет запросы сам, то сюда дело не дойдет\nserverWithoutFile = True\n\n\n\n\n#\n#\tИз дескприптора файла читаем запрос и пишем в него ответ\n#\ndef main():\n global requestSize\n global serverWithoutFile\n\n appPath = os.path.dirname(os.path.abspath(__file__))\n \n logger.setup(appPath + \"/Logs/log.txt\")\n #logger.logt(\"main\")\n\t\n # дескриптор файла (что пришел по сокету с сервера с запросом)\n fd = int(sys.argv[1])\n print(\"--------\")\n\n # запрос\n requestData = str(os.read(fd, requestSize)) #os.read(fd, 2048).decode('utf-8') #str(os.read(fd, 2048))\n\n # парсинг запроса\n request = requester.getHeaders(requestData)\n requester.parseParams(request)\n\n #logger.logt(\"request url:\"+request.getHead(\"URL\"))\n\n # ответl\n response = zetypes.ZeResponse();\n response.setAppPath(appPath)\t# root path (from main)\n response.setRequest(request)\n\n # content\n respDeprecatedVar = responser.createResponseHtml(serverWithoutFile, response, request)\n content = response.buildHttpResponse()\n\n\n\t#with open(fd, 'w') as outf:\n\t#\toutf.write(content)\n try:\n with open(fd, 'w') as outf:\n outf.write(content)\n except IOError as e:\n if e.errno == errno.EPIPE:\n print(\"pipe err\")\n\n\n\n #logger.logt(\"end ok\")\n #logger.log(\"-------------\")\n print(\"ok\")\n\n\n\n\n#\n#\tТочка входа в приложение\n#\nmain()\n","repo_name":"zelderus/pytsite","sub_path":"App/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23393847581","text":"# Google Code Jam 2013 C.Fair and Square\nimport sys\n\ndef reverse(st):\n rst = \"\"\n for i in range(len(st)-1, -1, -1):\n rst += st[i]\n return rst\n\ndef find_pals(l, h):\n num = l\n while num <= h:\n st = str(num)\n rst = reverse(st)\n \n if st == rst:\n yield num\n\n num += 1\n\n\ndef is_pal(num):\n stnum = str(num)\n if stnum == reverse(stnum):\n return True\n return False\n\ndef find_fairs(l, h):\n result = 0\n lo = int(len(str(l))/2) \n ho = int(len(str(h))/2) + 2\n\n ln = '1'\n hn = '1'\n\n for i in range(lo-1):\n ln += '0' \n for i in range(ho - 1):\n hn += '0'\n\n #pals = find_pals(int(ln), int(hn))\n \n for i in find_pals(int(ln), int(hn)):\n square = i*i\n if square > h:\n break\n elif square < l:\n continue\n\n if is_pal(square):\n result += 1\n\n return result\n\ndef fair_square(f):\n result = []\n lines = f.read().splitlines()\n ncases = int(lines[0])\n lines = lines[1:]\n\n for i in range(ncases):\n limit = [int(a) for a in lines[i].split(\" \")]\n l = limit[0]\n h = limit[1]\n result.append((i+1, find_fairs(l, h)))\n\n return result\n\nif len(sys.argv) > 1:\n f = open(sys.argv[1])\nelse:\n f = open(\"simple.txt\")\n\n#print(fair_square(f))\n#f.close()\n\ndef write_output():\n fairs = fair_square(f)\n out = open(\"output_fairs\", \"w\")\n for num, res in fairs:\n out.write(\"Case #{}: {}\\n\".format(num, res))\nwrite_output()\n\n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2146.py","file_name":"2146.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10500508055","text":"import argparse\nfrom utils import csv\nfrom utils.load_data import load_thetas, load_dataset\nfrom .src.estimate_price import estimate_price\nfrom utils.normalization import normalize, denormalize\n\n\ndef prediction(args):\n thetas = load_thetas()\n kms, prices = load_dataset()\n norm_mileage = normalize(args.mileage, kms)\n estimation = estimate_price(*thetas, norm_mileage)\n if sum(thetas) != 0:\n estimation = denormalize(estimation, prices)\n\n print(f\"Estimation for {args.mileage} is {round(estimation, 2)}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"mileage\", help=\"set the mileage to estimate\", type=float,\n )\n\n args = parser.parse_args()\n prediction(args)\n","repo_name":"jeremie-gauthier/ft_linear_regression","sub_path":"prediction/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3610261354","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_functional_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope as vs\n# pylint: disable=unused-import\nfrom tensorflow.python.ops.gen_functional_ops import remote_call\n# pylint: enable=unused-import\nfrom tensorflow.python.ops.gen_functional_ops import symbolic_gradient\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n# internal imports\nimport numpy as np\nimport tensorflow as tf\n\n\ndef orthogonal(shape):\n \"\"\"Orthogonal initilaizer.\"\"\"\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v\n return q.reshape(shape)\n\n\ndef orthogonal_initializer(scale=1.0):\n \"\"\"Orthogonal initializer.\"\"\"\n def _initializer(shape, dtype=tf.float32,\n partition_info=None): # pylint: disable=unused-argument\n return tf.constant(orthogonal(shape) * scale, dtype)\n\n return _initializer\n\n\ndef lstm_ortho_initializer(scale=1.0):\n \"\"\"LSTM orthogonal initializer.\"\"\"\n def _initializer(shape, dtype=tf.float32,\n partition_info=None): # pylint: disable=unused-argument\n size_x = shape[0]\n size_h = shape[1] // 4 # assumes lstm.\n t = np.zeros(shape)\n t[:, :size_h] = orthogonal([size_x, size_h]) * scale\n t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale\n t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale\n t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale\n return tf.constant(t, dtype)\n\n return _initializer\n\n\nclass GRU:\n \"\"\"Implementation of a Gated Recurrent Unit (GRU) as described in [1].\n\n [1] Chung, J., Gulcehre, C., Cho, K., & Bengio, Y. (2014). Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555.\n\n Arguments\n ---------\n input_dimensions: int\n The size of the input vectors (x_t).\n hidden_size: int\n The size of the hidden layer vectors (h_t).\n dtype: obj\n The datatype used for the variables and constants (optional).\n \"\"\"\n\n def __init__(self, x_t, hidden_size, dtype=tf.float32):\n self.input_dimensions = x_t.get_shape().as_list()[2]\n self.hidden_size = hidden_size\n\n w_init = None # uniform\n\n h_init = lstm_ortho_initializer(1.0)\n\n # Weights for hidden vectors of shape (hidden_size, hidden_size)\n\n\n self.Wr = tf.get_variable('Wr', [self.input_dimensions, self.hidden_size], initializer=w_init)\n self.Wz = tf.get_variable('Wz', [self.input_dimensions, self.hidden_size], initializer=w_init)\n self.Wh = tf.get_variable('Wh', [self.input_dimensions, self.hidden_size], initializer=w_init)\n\n\n # Weights for hidden vectors of shape (hidden_size, hidden_size)\n self.Ur = tf.get_variable('Ur', [self.hidden_size, self.hidden_size], initializer=h_init)\n self.Uz = tf.get_variable('Uz', [self.hidden_size, self.hidden_size], initializer=h_init)\n self.Uh = tf.get_variable('Uh', [self.hidden_size, self.hidden_size], initializer=h_init)\n\n self.br = tf.get_variable('br', [self.hidden_size], initializer=tf.constant_initializer(0.0))\n self.bz = tf.get_variable('bz', [self.hidden_size], initializer=tf.constant_initializer(0.0))\n self.bh = tf.get_variable('bh', [self.hidden_size], initializer=tf.constant_initializer(0.0))\n\n # A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0\n self.h_0 = tf.matmul(x_t[0, :, :], tf.zeros(dtype=tf.float32, shape=(self.input_dimensions, hidden_size)),\n name='h_0')\n\n # Perform the scan operator\n self.h_t = tf.scan(self.forward_pass, x_t, initializer=self.h_0, name='h_t_transposed')\n\n\n def forward_pass(self, h_tm1, x_t):\n \"\"\"Perform a forward pass.\n\n Arguments\n ---------\n h_tm1: np.matrix\n The hidden state at the previous timestep (h_{t-1}).\n x_t: np.matrix\n The input vector.\n \"\"\"\n # Definitions of z_t and r_t\n z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)\n\n # Definition of h~_t\n h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)\n\n return h_t\n\nclass GRU_embedding():\n\n \"\"\"Implementation of a Gated Recurrent Unit (GRU) as described in [1].\n\n [1] Chung, J., Gulcehre, C., Cho, K., & Bengio, Y. (2014). Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555.\n\n Arguments\n ---------\n input_dimensions: int\n The size of the input vectors (x_t).\n hidden_size: int\n The size of the hidden layer vectors (h_t).\n dtype: obj\n The datatype used for the variables and constants (optional).\n \"\"\"\n\n def __init__(self, x_t,num_units, pen_dim = 300, embeding_size = 500, c='', state = [], out_dim=300):\n self.c = c\n self.hidden_size = num_units # size RNN cell\n self.input_dimensions = x_t.get_shape().as_list()[2] # size pen = 5\n # self.batch_size = 1\n # self.seq_max_len = 1\n self.pen_dim = pen_dim # size pen in higher dimension\n self.embed_dim = embeding_size\n self.out_dim = out_dim\n\n init = tf.truncated_normal_initializer(0,0.01)\n\n\n # Weights for input vectors of shape (input_dimensions, hidden_size)\n self.Wd = tf.get_variable('Wd', [3, self.pen_dim], initializer=init)\n self.Ws = tf.get_variable('Ws', [2, self.pen_dim], initializer=init)\n self.Wr = tf.get_variable('Wr', [self.hidden_size, self.hidden_size], initializer=init)\n self.Wz = tf.get_variable('Wz', [self.hidden_size, self.hidden_size], initializer=init)\n self.W = tf.get_variable('W', [self.hidden_size, self.hidden_size], initializer=init)\n self.Wo = tf.get_variable('Wo', [self.hidden_size, self.out_dim], initializer=init)\n\n # Weights for hidden vectors of shape (hidden_size, hidden_size)\n self.Ur = tf.get_variable('Ur', [self.pen_dim, self.hidden_size], initializer=init)\n self.Uz = tf.get_variable('Uz', [self.pen_dim,self.hidden_size], initializer=init)\n self.U = tf.get_variable('U', [self.pen_dim, self.hidden_size], initializer=init)\n self.Uo = tf.get_variable('Uo', [self.pen_dim, self.out_dim], initializer=init)\n\n # Biases for hidden vectors of shape (hidden_size,)\n self.bd = tf.get_variable('bd', [self.pen_dim], initializer=tf.constant_initializer(0.0))\n self.bs = tf.get_variable('bs', [self.pen_dim], initializer=tf.constant_initializer(0.0))\n self.br = tf.get_variable('br', [self.hidden_size], initializer=tf.constant_initializer(0.0))\n self.bz = tf.get_variable('bz', [self.hidden_size], initializer=tf.constant_initializer(0.0))\n self.bh = tf.get_variable('bh', [self.hidden_size], initializer=tf.constant_initializer(0.0))\n self.bo = tf.get_variable('bo', [self.out_dim], initializer=tf.constant_initializer(0.0))\n\n self.Vr = tf.get_variable('Vr', [self.pen_dim, self.hidden_size], initializer=init)\n self.Vz = tf.get_variable('Vz', [self.pen_dim,self.hidden_size], initializer=init)\n self.V = tf.get_variable('V', [self.pen_dim, self.hidden_size], initializer=init)\n self.Vo = tf.get_variable('Vo', [self.pen_dim, self.out_dim], initializer=init)\n\n self.Mr = tf.get_variable('Mr', [self.embed_dim, self.hidden_size], initializer=init)\n self.Mz = tf.get_variable('Mz', [self.embed_dim, self.hidden_size], initializer=init)\n self.M = tf.get_variable('M', [self.embed_dim, self.hidden_size], initializer=init)\n self.Mo = tf.get_variable('Mo', [self.embed_dim, self.out_dim], initializer=init)\n\n # Put the time-dimension upfront for the scan operator\n # x_t = tf.transpose(x_t, [0, 2, 1], name='x_t')\n\n # A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0\n # self.h_0 = tf.matmul(x_t[0, :, :], tf.zeros(dtype=tf.float32, shape=(self.input_dimensions, 2*self.hidden_size)),\n # name='h_0')\n\n # Perform the scan operator\n\n # x_in = tf.concat([x_t, tf.reshape(c, (32, -1, 500)) ], 1)\n\n self.out = scan(self.forward_pass, [x_t, c], initializer=state, name='h_t_transposed')\n\n # Transpose the result back\n # self.h_t = tf.transpose(self.h_t_transposed, [1, 0, 2], name='h_t')\n\n def forward_pass(self, h_tm1, x_t, c_in):\n \"\"\"Perform a forward pass.\n\n Arguments\n ---------\n h_tm1: np.matrix\n The hidden state at the previous timestep (h_{t-1}).\n x_t: np.matrix\n The input vector.\n \"\"\"\n # h_tm1 = tf.reshape(h_tm1, (2,-1,self.hidden_size))[0,:,:]\n h_tm1 = tf.transpose(tf.nn.embedding_lookup(tf.transpose(h_tm1), tf.range(0, self.hidden_size)))\n\n self.c_in = tf.reshape(c_in,(-1,self.embed_dim))\n\n # x_t, self.c_in = tf.split(t_in, [5, 500], 1)\n\n dt = x_t[:, :3]\n st = x_t[:, 3:]\n\n d_tp = tf.tanh(tf.matmul(dt,self.Wd) + self.bd)\n s_tp = tf.tanh(tf.matmul(st,self.Ws) + self.bs)\n\n z_t = tf.sigmoid(tf.matmul(h_tm1, self.Wz) + tf.matmul(d_tp, self.Uz) + \\\n tf.matmul(s_tp,self.Vz) + tf.matmul(self.c_in,self.Mz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(h_tm1, self.Wr) + tf.matmul(d_tp, self.Ur) + \\\n tf.matmul(s_tp,self.Vr) + tf.matmul(self.c_in,self.Mr) + self.br)\n h_bar = tf.tanh(tf.matmul(tf.multiply(r_t,h_tm1),self.W) + tf.matmul(d_tp,self.U) + \\\n tf.matmul(s_tp, self.V) + tf.matmul(self.c_in, self.M) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(z_t, h_tm1) + tf.multiply(1 - z_t, h_bar)\n o_t = tf.tanh(tf.matmul(h_t,self.Wo) + tf.matmul(d_tp,self.Uo) + tf.matmul(s_tp,self.Vo) + \\\n tf.matmul(self.c_in,self.Mo) + self.bo)\n\n return tf.concat([h_t, o_t],1)\n\n\nclass LSTMCell(tf.contrib.rnn.RNNCell):\n \"\"\"Vanilla LSTM cell.\n Uses ortho initializer, and also recurrent dropout without memory loss\n (https://arxiv.org/abs/1603.05118)\n \"\"\"\n\n def __init__(self,\n num_units,\n forget_bias=1.0,\n use_recurrent_dropout=False,\n dropout_keep_prob=0.9):\n self.num_units = num_units\n self.forget_bias = forget_bias\n self.use_recurrent_dropout = use_recurrent_dropout\n self.dropout_keep_prob = dropout_keep_prob\n\n @property\n def state_size(self):\n return 2 * self.num_units\n\n @property\n def output_size(self):\n return self.num_units\n\n def get_output(self, state):\n unused_c, h = tf.split(state, 2, 1)\n return h\n\n def __call__(self, x, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n c, h = tf.split(state, 2, 1)\n\n x_size = x.get_shape().as_list()[1]\n\n w_init = None # uniform\n\n h_init = lstm_ortho_initializer(1.0)\n\n # Keep W_xh and W_hh separate here as well to use different init methods.\n w_xh = tf.get_variable(\n 'W_xh', [x_size, 4 * self.num_units], initializer=w_init)\n w_hh = tf.get_variable(\n 'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)\n bias = tf.get_variable(\n 'bias', [4 * self.num_units],\n initializer=tf.constant_initializer(0.0))\n\n concat = tf.concat([x, h], 1)\n w_full = tf.concat([w_xh, w_hh], 0)\n hidden = tf.matmul(concat, w_full) + bias\n\n i, j, f, o = tf.split(hidden, 4, 1)\n\n if self.use_recurrent_dropout:\n g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)\n else:\n g = tf.tanh(j)\n\n new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n\n return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.\n\n\ndef scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,\n swap_memory=False, infer_shape=True, reverse=False, name=None):\n cs = elems[1]\n elems = elems[0]\n if not callable(fn):\n raise TypeError(\"fn must be callable.\")\n\n input_is_sequence = nest.is_sequence(elems)\n input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]\n def input_pack(x):\n return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]\n\n if initializer is None:\n output_is_sequence = input_is_sequence\n output_flatten = input_flatten\n output_pack = input_pack\n else:\n output_is_sequence = nest.is_sequence(initializer)\n output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]\n def output_pack(x):\n return (nest.pack_sequence_as(initializer, x)\n if output_is_sequence else x[0])\n\n elems_flat = input_flatten(elems)\n cs_flat = input_flatten(cs)\n\n in_graph_mode = not context.executing_eagerly()\n with ops.name_scope(name, \"scan\", elems_flat):\n # TODO(akshayka): Remove the in_graph_mode check once caching devices are\n # supported in Eager\n if in_graph_mode:\n # Any get_variable calls in fn will cache the first call locally\n # and not issue repeated network I/O requests for each iteration.\n varscope = vs.get_variable_scope()\n varscope_caching_device_was_none = False\n if varscope.caching_device is None:\n # TODO(ebrevdo): Change to using colocate_with here and in other\n # methods.\n varscope.set_caching_device(lambda op: op.device)\n varscope_caching_device_was_none = True\n\n # Convert elems to tensor array.\n elems_flat = [\n ops.convert_to_tensor(elem, name=\"elem\") for elem in elems_flat]\n\n cs_flat = [\n ops.convert_to_tensor(c_, name=\"c\") for c_ in cs_flat]\n\n # Convert elems to tensor array. n may be known statically.\n n = elems_flat[0].shape[0].value or array_ops.shape(elems_flat[0])[0]\n\n # TensorArrays are always flat\n elems_ta = [\n tensor_array_ops.TensorArray(dtype=elem.dtype, size=n,\n dynamic_size=False,\n infer_shape=True)\n for elem in elems_flat]\n\n cs_ta = [\n tensor_array_ops.TensorArray(dtype=c_.dtype, size=n,\n dynamic_size=False,\n infer_shape=True)\n for c_ in cs_flat]\n\n # Unpack elements\n elems_ta = [\n elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]\n\n cs_ta = [\n c_ta.unstack(c) for c_ta, c in zip(cs_ta, cs_flat)]\n\n if initializer is None:\n a_flat = [elem.read(n - 1 if reverse else 0) for elem in elems_ta]\n i = constant_op.constant(1)\n else:\n initializer_flat = output_flatten(initializer)\n a_flat = [ops.convert_to_tensor(init) for init in initializer_flat]\n i = constant_op.constant(0)\n\n # Create a tensor array to store the intermediate values.\n accs_ta = [\n tensor_array_ops.TensorArray(\n dtype=init.dtype, size=n,\n element_shape=init.shape if infer_shape else None,\n dynamic_size=False,\n infer_shape=infer_shape)\n for init in a_flat]\n\n if initializer is None:\n accs_ta = [acc_ta.write(n - 1 if reverse else 0, a)\n for (acc_ta, a) in zip(accs_ta, a_flat)]\n\n def compute(i, a_flat, tas):\n \"\"\"The loop body of scan.\n Args:\n i: the loop counter.\n a_flat: the accumulator value(s), flattened.\n tas: the output accumulator TensorArray(s), flattened.\n Returns:\n [i + 1, a_flat, tas]: the updated counter + new accumulator values +\n updated TensorArrays\n Raises:\n TypeError: if initializer and fn() output structure do not match\n ValueType: if initializer and fn() output lengths do not match\n \"\"\"\n packed_elems = input_pack([elem_ta.read(i) for elem_ta in elems_ta])\n packed_cs = input_pack([c_ta.read(i) for c_ta in cs_ta])\n packed_a = output_pack(a_flat)\n a_out = fn(packed_a, packed_elems, packed_cs)\n nest.assert_same_structure(\n elems if initializer is None else initializer, a_out)\n flat_a_out = output_flatten(a_out)\n tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_a_out)]\n if reverse:\n next_i = i - 1\n else:\n next_i = i + 1\n return (next_i, flat_a_out, tas)\n\n if reverse:\n initial_i = n - 1 - i\n condition = lambda i, _1, _2: i >= 0\n else:\n initial_i = i\n condition = lambda i, _1, _2: i < n\n _, _, r_a = control_flow_ops.while_loop(\n condition, compute, (initial_i, a_flat, accs_ta),\n parallel_iterations=parallel_iterations,\n back_prop=back_prop, swap_memory=swap_memory,\n maximum_iterations=n)\n\n results_flat = [r.stack() for r in r_a]\n\n n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]\n for elem in elems_flat[1:]:\n n_static.merge_with(elem.get_shape().with_rank_at_least(1)[0])\n for r in results_flat:\n r.set_shape(tensor_shape.TensorShape(n_static).concatenate(\n r.get_shape()[1:]))\n\n # TODO(akshayka): Remove the in_graph_mode check once caching devices are\n # supported in Eager\n if in_graph_mode and varscope_caching_device_was_none:\n varscope.set_caching_device(None)\n\n return output_pack(results_flat)\n\ndef super_linear(x,\n output_size,\n scope=None,\n reuse=False,\n init_w='ortho',\n weight_start=0.0,\n use_bias=True,\n bias_start=0.0,\n input_size=None):\n \"\"\"Performs linear operation. Uses ortho init defined earlier.\"\"\"\n shape = x.get_shape().as_list()\n with tf.variable_scope(scope or 'linear'):\n if reuse is True:\n tf.get_variable_scope().reuse_variables()\n\n w_init = None # uniform\n if input_size is None:\n x_size = shape[0]\n else:\n x_size = input_size\n if init_w == 'zeros':\n w_init = tf.constant_initializer(0.0)\n elif init_w == 'constant':\n w_init = tf.constant_initializer(weight_start)\n elif init_w == 'gaussian':\n w_init = tf.random_normal_initializer(stddev=weight_start)\n elif init_w == 'ortho':\n w_init = lstm_ortho_initializer(1.0)\n\n w = tf.get_variable(\n 'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)\n if use_bias:\n b = tf.get_variable(\n 'super_linear_b', [output_size],\n tf.float32,\n initializer=tf.constant_initializer(bias_start))\n return tf.matmul(tf.transpose(x), w) + b\n return tf.matmul(x, w)","repo_name":"vankhoa21991/Drawing","sub_path":"gen_model/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":19341,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1770664562","text":"#!/usr/bin/python\nimport sys\n\nfrom discogstagger.argparser import ArgumentParser\nfrom discogstagger.crawler import WebCrawler, Artist, Release\nfrom discogstagger.lyrics import LyricsSearcher\nfrom discogstagger.settings import SettingsManager\nfrom discogstagger.tagger import Tagger\nfrom discogstagger.renamer import FileRenamer\nfrom discogstagger.auto_search import AutoSearch\nfrom discogstagger.interactive_search import InteractiveSearch\n\n\ndef main():\n base_url = WebCrawler().base_url\n parser = ArgumentParser()\n settings = SettingsManager()\n\n if not settings.load():\n print(\"Couldn't find settings file. Creating new one...\")\n settings.generate()\n else:\n print(\"Loading settings file...\")\n\n if len(parser['files']) == 0:\n print(\"You haven't selected any files!\")\n sys.exit(3)\n\n if parser['ambiguous']:\n print(\"There cannot be both interactive and URL option! Choose one.\")\n sys.exit(1)\n\n if parser['url'] is not None:\n if not parser['urlvalid']:\n print(\"Given URL is not a valid Discogs release URL. Example: \" +\n \"'https://www.discogs.com/Mr-James-Barth-AD-Knockin-Boots-Vol-2-Of-2/release/2'\")\n sys.exit(2)\n autosearch = AutoSearch(parser, settings)\n autosearch.search_release()\n if autosearch.ask_if_ok():\n autosearch.tag_files()\n elif parser['interactive']:\n interactive_search = InteractiveSearch(parser, settings)\n interactive_search.search_artist()\n interactive_search.choose_release()\n if interactive_search.ask_if_ok():\n interactive_search.tag_files()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"makzyt4/discogs-tagger","sub_path":"discogstagger/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"43143940440","text":"'''\nLicensed under the Apache License, Version 2.0. See License.txt in the project root for license information.\n\nSimple Python config file. Possibly should override this with ConfigParser instead.\n\nCreated on Mar 20, 2014\n\n@author: dfleck\n'''\nfrom twisted.python.logfile import DailyLogFile\nimport sys\n\n# ===================================\n# Constants (DO NOT CHANGE THESE!)\n# ===================================\n# Log Levels -- \nDEBUG = 0\nPRODUCTION = 1 \n\n# ===================================\n# Setup Variables --- Change these!\n# ===================================\n\nUSE_CONNECTION_CACHE = False\nCONNECTION_CACHE_DELAY = 7 # Seconds to delay disconnect\n\n\n\n# Should we warn about no authenticator?\nWARN_NO_MESSAGE_AUTHENTICATOR = False \nALLOW_NO_AUTHENTICATOR = True\n\n\n# Should we allow the code to use a 127.0.0.1 address?\nALLOW_LOCAL_IP_ADDRESS = True\n\n# How many chars is the enclave ID\nENCLAVE_ID_BITS = 16\n\n# How many chars is the class ID\nCLASS_ID_BITS = 6 \n\n# How much info to log?\nLOGLEVEL = PRODUCTION\n\n# Should we capture Debugging metrics? (minor performance hit)\nDEBUG_METRICS = True\n\n# Where should we log to?\nLOGFILE = sys.stdout\n#LOGFILE = DailyLogFile.fromFullPath(\"./GMU-network.log\")\n\n# How many successors should each node maintain to \n# deal with failures?\nNUM_SUCCESSORS_TO_MAINTAIN = 4\n\n# Seconds between maintenance calls (how fast to do things))\nMAINT_CALL_SECONDS = 5.0\n\n# How often to check for network separation (two chord rings formed)\n# Shouldn't need to do this hardly ever!\nCHECK_SEPARATION_CALL_SECONDS = 60.0\n\n# How frequently to rebuild successors list\nMAINT_CALL_REBUILD_SUCC_SECONDS = 60.0\n\n#SSL off - 0\n#SSL on - 1\nSSL_ON = 0\n\n#Path to the Private SSL key\nSSL_PRIVATE_KEY_PATH = '/home/shiremag/Documents/testSSL/self-ssl.key'\n\n#Path to the SSL Certificate\nSSL_CERT_PATH = '/home/shiremag/Documents/testSSL/self-ssl.crt'\n\n# AutoDiscovery Parameters\nAUTO_DISCOVERY_PORT = 12299\nAUTO_DISCOVERY_MULTICAST_IP = \"228.0.0.5\"\n\n#Default network connection timeout\nNETWORK_CONNECTION_TIMEOUT = 30 # Seconds\n\n\n\n","repo_name":"danfleck/Class-Chord","sub_path":"network-client/src/gmu/chord/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17219834777","text":"import argparse\nfrom pathlib import Path\n\nimport keras\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils import LATENT_SIZE, run_model, run_model_openvino\n\n\n# Load SavedModel into keras format, taken from:\n# https://stackoverflow.com/questions/64945037/how-to-load-a-saved-tensorflow-model-and-evaluate-it\nclass LayerFromSavedModel(tf.keras.layers.Layer):\n def __init__(self, loaded):\n self.loaded = loaded\n super(LayerFromSavedModel, self).__init__()\n self.vars = loaded.variables\n\n def call(self, inputs):\n return self.loaded.signatures['serving_default'](inputs)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Optimizations for CERN's AngleGAN\")\n parser.add_argument(\"--nc\", metavar=\"n\", type=bool, help=\"Perform quantization with Intel Neural Compressor\",\n default=False)\n parser.add_argument(\"--nc_model\", metavar=\"o\", type=str,\n help=\"Output dir for neural compressor, only used when --nc=true\",\n default=\"models/int8_model\")\n parser.add_argument(\"--profile\", metavar=\"p\", type=str, help=\"Turn on profiling for onnx/default/nc.\", default=\"\")\n parser.add_argument(\"--model\", metavar=\"m\", type=str, help=\"Input model to be used.\",\n default=\"models/generators/new_generator\")\n parser.add_argument(\"--batch_size\", type=int, help=\"Batch size.\",\n default=256)\n parser.add_argument(\"--dtype\", type=str, help=\"Neural compressor dtype\",\n default=\"int8\")\n parser.add_argument(\"--onnx_model\", type=str, help=\"ONNX Model\",\n default=\"models/model.onnx\")\n parser.add_argument(\"--n_samples\", type=int, help=\"N Samples to run for profiling\",\n default=10)\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n model_path = Path(args.model)\n default_model = tf.keras.models.load_model(model_path)\n default_model.summary()\n default_model.get_layer(index=1).summary()\n # Perform quantization on model\n if args.nc:\n from nc_eval import run_neural_compressor\n run_neural_compressor(default_model, output=args.nc_model, batch_size=args.batch_size, dtype=args.dtype)\n\n if args.profile != \"\":\n models = args.profile.split(\",\")\n print(f\"Running on tensorflow {tf.__version__}\")\n tf.profiler.experimental.start(\"logdir\")\n for model in models:\n if model == \"nc\":\n # Load model again in tensorflow format\n int8_model = tf.saved_model.load(args.nc_model)\n # Convert model to keras format to be used at runtime.\n # https://github.com/tensorflow/tensorflow/issues/42425\n from keras.layers import Input\n input_shape = keras.layers.Input(\n shape=(LATENT_SIZE,), dtype=np.float32)\n keras_i8_model = tf.keras.Model(input_shape, LayerFromSavedModel(int8_model)(input_shape))\n\n # Run the models for performance measurements\n run_model(keras_i8_model, n_samples=args.n_samples, n_batch_samples=args.batch_size)\n elif model == \"default\":\n run_model(default_model, n_samples=args.n_samples, n_batch_samples=args.batch_size)\n elif model == \"onnx\":\n import onnxruntime\n so = onnxruntime.SessionOptions()\n so.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n session = onnxruntime.InferenceSession(args.onnx_model, so, providers=[\n 'OpenVINOExecutionProvider'], provider_options=[{\"device_type\": \"CPU_FP32\"}])\n run_model_openvino(session, n_samples=args.n_samples, n_batch_samples=args.batch_size)\n tf.profiler.experimental.stop()\n # Show distributions of both models overlayed to find out if there is an error after quantization.\n # show_distributions(keras_i8_model, model)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sara-nl/QuantizedGAN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3861028597","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/1/7 22:28\n# @Email : yangtianyu92@126.com\nimport csv\nfrom linkmysql import link_mysql_read\nimport random\n\nresult = link_mysql_read(\"\"\"SELECT * FROM criterion2018 where ThirdReport_SiteName=\"食品伙伴网\";\"\"\")\nprint(result)\n\n\"\"\"\nurl_2018 = []\nwith open('./urlLists/url_list1.csv', 'r',newline='', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if \"2016\" in row[\"url\"]:\n url_2018.append(row)\n\"\"\"\n\nwith open('./urlLists/criterion2018.csv', 'w', newline='', encoding='utf-8') as f:\n fieldnames = [\"url\"]\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n sum_dic = list(set([dic[\"ThirdReport_Url\"] for dic in result]))\n random.shuffle(sum_dic)\n for dic in sum_dic:\n writer.writerow({\"url\": dic})\n","repo_name":"yangtianyu92/recalliNfomation","sub_path":"choice_year.py","file_name":"choice_year.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37257671674","text":"from typing import List\n\nfrom fastapi import HTTPException\nfrom openai.types.chat import ChatCompletionUserMessageParam\nfrom pymongo.database import Database\n\nfrom autobots.action.action_type.action_factory import ActionFactory\nfrom autobots.action.action_type.action_types import ActionType\nfrom autobots.action.action.common_action_models import TextObj, TextObjs\nfrom autobots.action.action_chat.chat_crud import ChatCRUD\nfrom autobots.action.action_chat.chat_doc_model import ChatCreate, ChatDoc, ChatDocCreate, ChatFind, ChatDocFind, ChatDocUpdate, \\\n ChatUpdate\nfrom autobots.conn.openai.openai_chat.chat_model import ChatReq, Role, Message\nfrom autobots.conn.openai.openai_client import get_openai\nfrom autobots.core.logging.log import Log\nfrom autobots.user.user_orm_model import UserORM\n\n\nclass UserChat():\n \"\"\"\n LM chat uses an Action to run and stores context to enable chat functionality\n \"\"\"\n DEFAULT_TITLE = \"New Chat\"\n\n def __init__(self, user: UserORM, db: Database):\n self.user = user\n self.user_id = str(user.id)\n self.chat_crud = ChatCRUD(db)\n\n async def create_chat(self, chat_create: ChatCreate, title: str = DEFAULT_TITLE) -> ChatDoc | None:\n if not chat_create.action.type == ActionType.text2text_llm_chat_openai and \\\n not chat_create.action.type == ActionType.text2text_llm_chat_with_vector_search_openai:\n raise HTTPException(400, \"Action is not available for chat\")\n try:\n chat_doc_create = ChatDocCreate(user_id=self.user_id, title=title, **chat_create.model_dump(by_alias=True))\n chat_doc = await self.chat_crud.insert_one(chat_doc_create)\n return chat_doc\n except Exception as e:\n Log.error(str(e))\n return None\n\n async def list_chat(self, chat_find: ChatFind, limit: int = 100, offset: int = 0) -> List[ChatDoc] | None:\n try:\n chat_doc_find = ChatDocFind(user_id=self.user_id, **chat_find.model_dump())\n chat_docs = await self.chat_crud.find(chat_doc_find, limit, offset)\n return chat_docs\n except Exception as e:\n Log.error(str(e))\n return None\n\n async def get_chat(self, chat_id: str) -> ChatDoc | None:\n try:\n chat_doc_find = ChatDocFind(id=chat_id, user_id=self.user_id)\n chat_docs = await self.chat_crud.find(chat_doc_find)\n if len(chat_docs) != 1:\n raise HTTPException(500, \"Error in finding chat\")\n return chat_docs[0]\n except Exception as e:\n Log.error(str(e))\n return None\n\n async def update_chat(self, chat_id: str, chat_update: ChatUpdate) -> ChatDoc:\n chat_doc_update = ChatDocUpdate(id=chat_id, user_id=self.user_id, **chat_update.model_dump())\n chat_doc = await self.chat_crud.update_one(chat_doc_update)\n return chat_doc\n\n async def delete_chat(self, chat_id: str):\n chat_doc_find = ChatDocFind(id=chat_id, user_id=self.user_id)\n delete_result = await self.chat_crud.delete_many(chat_doc_find)\n return delete_result.deleted_count\n\n async def chat(self, chat_id: str, input: TextObj) -> ChatDoc:\n chat_doc = await self.get_chat(chat_id)\n if not chat_doc:\n raise HTTPException(404, \"Chat not found\")\n chat_req = ChatReq.model_validate(chat_doc.action.config)\n chat_req.messages = chat_req.messages + chat_doc.messages\n\n resp_text_objs: TextObjs = await ActionFactory().run_action(chat_doc.action, input.model_dump())\n\n messages = []\n input_message = Message(role=Role.user, content=input.text)\n messages.append(input_message)\n for resp_text_obj in resp_text_objs.texts:\n text_obj = TextObj.model_validate(resp_text_obj)\n message = Message(role=\"user\", content=text_obj.text)\n messages.append(message)\n\n chat_doc.messages = (chat_doc.messages + messages)\n if chat_doc.title == UserChat.DEFAULT_TITLE:\n chat_doc.title = await self._gen_title(chat_doc)\n updated_chat_doc = await self.update_chat(chat_id, ChatUpdate(**chat_doc.model_dump()))\n return updated_chat_doc\n\n async def _gen_title(self, chat_doc: ChatDoc) -> str:\n try:\n title_gen_content = \"Act as expert title generator. Generate very short text title for the following conversation:\\n\"\n\n action_content = \"\"\n for message_dict in chat_doc.action.config.get(\"messages\"):\n message = Message.model_validate(message_dict)\n action_content = action_content + f\"{message.role}: {message.content}\\n\"\n break\n\n conversation_content = \"\"\n i = 0\n for message in chat_doc.messages:\n conversation_content = conversation_content + f\"{message.role}: {message.content}\\n\"\n i = i + 1\n if i >= 2:\n break\n\n title_gen_message = ChatCompletionUserMessageParam(\n role=Role.user.value,\n content=title_gen_content+action_content+conversation_content\n )\n chat_res = await get_openai().openai_chat.chat(ChatReq(messages=[title_gen_message], max_token=25))\n title = f\"{chat_doc.action.name}-{chat_res.choices[0].message.content}\"\n return title\n except Exception as e:\n Log.error(str(e))\n return UserChat.DEFAULT_TITLE\n","repo_name":"jetoslabs/autobots","sub_path":"autobots/action/action_chat/user_chat.py","file_name":"user_chat.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42230174340","text":"from collections import deque\r\n\r\ndfs_result = [] # dfs 결과 저장\r\ndef dfs(graph, v, visited):\r\n visited[v] = True # 방문했으면 True로 바꾸고 result에 저장\r\n dfs_result.append(v)\r\n for i in graph[v]: # 인접 노드 순차적으로 방문\r\n if not visited[i]:\r\n dfs(graph, i, visited)\r\n\r\nbfs_result = []\r\ndef bfs(graph, start, visited):\r\n queue = deque([start])\r\n visited[start] = True\r\n\r\n while(queue): # queue가 빌 때까지 반복\r\n v = queue.popleft() # queue에서 하나씩 가져오고, result에 추가\r\n bfs_result.append(v)\r\n\r\n for i in graph[v]: # 인접 노드 순차적으로 방문\r\n if not visited[i]:\r\n queue.append(i)\r\n visited[i] = True\r\n\r\n\r\nn, m, v = map(int, input().split()) # 정점의 개수, 간선의 개수, 시작 번호\r\ngraph = [[] for _ in range(n+1)]\r\n\r\nfor i in range(1, m+1): # 연결된 노드 정보 추가\r\n a, b = map(int, input().split())\r\n graph[a].append(b)\r\n graph[b].append(a)\r\n\r\nfor i in range(n+1): # 오름차순으로 인접노드 방문하기 위해 정렬\r\n graph[i].sort()\r\n\r\n\r\nvisited = [False] * (n+1)\r\ndfs(graph, v, visited)\r\nprint(*dfs_result)\r\n\r\nvisited = [False] * (n+1)\r\nbfs(graph, v, visited)\r\nprint(*bfs_result)","repo_name":"dduniverse/Algorithm","sub_path":"백준/Silver/1260. DFS와 BFS/DFS와 BFS.py","file_name":"DFS와 BFS.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28552856016","text":"import cv2 as cv\r\nimport numpy as np\r\n\r\n\r\ndef DrawLine(image,start_position,end_position, color, thickness):\r\n\r\n cv.line(image,start_position,end_position,color,thickness)\r\n\r\n\r\ndef DrawCircle(image,centre_of_circle, radius, color, thickness):\r\n\r\n cv.circle(image,centre_of_circle, radius, color, thickness)\r\n\r\n\r\ndef DrawRectangle(image, top_left_corner, bottom_right_corner,color, thickness):\r\n \r\n cv.rectangle(image,top_left_corner, bottom_right_corner, color, thickness)\r\n\r\n\r\ndef DrawEllipse(image,center_of_ellipse,axes_length,rotation_angle,start_angle,end_angle,color,thickness):\r\n\r\n cv.ellipse(image,center_of_ellipse,axes_length,rotation_angle,start_angle,end_angle,color,thickness)\r\n\r\n\r\ndef DrawPolygon(image,nparray_point_polygon, color):\r\n\r\n if isinstance(nparray_point_polygon, np.ndarray):\r\n cv.fillPoly(image, [nparray_point_polygon], color)\r\n else:\r\n print(\"Polygon can not be drawn. Please check np array\")\r\n\r\n\r\ndef DrawText(image,text,left_bottom_corner,font,font_scale,color,thickness):\r\n\r\n cv.PutText(image,text,left_bottom_corner,font,font_scale,color,thickness,cv.LINE_AA) #cv2.LINE_AA use as default value, it is most common usage\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n #CREATE A EMPTY SCREEN WITH NUMPY\r\n screen_width = 500\r\n screen_height = 500\r\n\r\n screen_base = np.zeros((screen_width,screen_height,3), dtype = \"uint8\") # 3,channel count(B G R)\r\n\r\n DrawRectangle(screen_base,(0,0),(screen_width,screen_height),(130,162,217),-1) #BGR(130,162,217) -> RGB(217,162,130) -> HEX= #d9a282\r\n\r\n DrawRectangle(screen_base,(75,150),(200,325),(96,166,111),-1)\r\n DrawEllipse(screen_base,(138,150),(62,75),0,180,360,(96,166,111),-1)\r\n\r\n points_for_first_stair = np.array([[150,225],[250,225],[250,275],[300,275],[300,300],[350,300],[350,325],[400,325],[400,425],[150,425]])\r\n DrawPolygon(screen_base,points_for_first_stair,(49,38,166))\r\n\r\n DrawRectangle(screen_base,(225,375),(300,425),(130,162,217),-1)\r\n DrawEllipse(screen_base,(262,375),(37,50),0,180,360,(130,162,217),-1)\r\n\r\n\r\n points_for_second_stair = np.array([[300,350],[350,350],[350,450],[250,450],[250,400],[275,400],[275,375],[300,375]])\r\n DrawPolygon(screen_base,points_for_second_stair,(66,128,140))\r\n\r\n DrawCircle(screen_base,(400,150),50,(244,227,255),-1)\r\n DrawCircle(screen_base,(440,150),50,(130,162,217),-1)\r\n\r\n for i in range(0,4):\r\n \r\n DrawLine(screen_base,(0,0+(i*12)),(screen_width,60+(i*12)),(150,52,20),1)\r\n DrawLine(screen_base,(41+(30*i),60),(57+(30*i),333),(0,0,0),1)\r\n\r\n for i in range(0,5):\r\n\r\n DrawCircle(screen_base,(60+(i*100),30),5,(194,164,76),-1)\r\n DrawCircle(screen_base,(90+(i*100),45),5,(194,164,76),-1)\r\n DrawCircle(screen_base,(30+(i*100),35),11,(194,164,76),-1)\r\n DrawLine(screen_base,(450+(i*5),250+(i*15)),(300+(i*5),325+(i*15)),(0,0,0),1)\r\n\r\n\r\n DrawCircle(screen_base,(340,113),17,(0,0,5),-1)\r\n \r\n\r\n \r\n cv.imshow(\"Github OpenCV by programmewithkarsan\",screen_base)\r\n \r\n cv.waitKey(0)\r\n cv.destroyAllWindows()\r\n","repo_name":"programmewithkarsan/Learn-Opencv-With-programmewithkarsan","sub_path":"opencv_tutorial_lines.py","file_name":"opencv_tutorial_lines.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14625822047","text":"import sys\nfrom polynomials import Polynomial\n\ndef solution(arg):\n\tpol = Polynomial(arg)\n\tder = pol.get_derivative()\n\n\treturn f'The derivative of f(x) = {arg} is:\\nf\\'(x) = {der}'\n\ndef main():\n\targs = sys.argv\n\n\tprint(solution(args[1]))\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"Boyko03/Python101","sub_path":"week_3/wed/polynomials/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72287315395","text":"import picamera\nfrom gpiozero import MotionSensor, Buzzer\nfrom datetime import datetime\n\npir = MotionSensor(17)\ncamera = picamera.PiCamera()\nalarm = Buzzer(27)\n\nstream = picamera.PiCameraCircularIO(camera, seconds=20)\ncamera.start_recording(stream, format='h264')\ntry:\n while True:\n camera.wait_recording(1)\n if pir.motion_detected == True:\n print(\"RECORDING\")\n alarm.on()\n camera.wait_recording(10)\n stream.copy_to(str((datetime.now()))+'.h264')\n alarm.off()\nfinally:\n camera.stop_recording()\n print(\"Recording Stopped\")\n","repo_name":"lesp/Makerlife-Project1-Room-Protector","sub_path":"bedroom_protector.py","file_name":"bedroom_protector.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22532116785","text":"import os\nimport gym\nimport torch\nimport argparse\nimport numpy as np\nfrom gym import wrappers\n\n\ndef argparser():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--algo', type=str, required=True)\n\tparser.add_argument('--env', type=str, required=True)\n\tparser.add_argument('--seed', type=int, default=0)\n\tparser.add_argument('--verbose', type=bool, default=True)\n\tparser.add_argument('--video', type=bool, default=False)\n\treturn parser.parse_args()\n\n\ndef main():\n\t\"\"\"\n\tTesting trained agents in environments.\n\tNOTE: This HAS to be run through terminal\n\t\"\"\"\n\targs = argparser()\n\tenv = gym.make(args.env)\n\n\tcontinuous = False if len(env.action_space.shape) == 0 else True\n\targmax = True if not continuous else False\n\tmodel_name = 'model.pth' if args.algo.lower() in ['dqn', 'ddqn'] else 'pimodel.pth'\n\tmodel_path = os.path.join('experiments', 'models', args.algo.upper() + '_' + args.env + '_' + str(args.seed), model_name)\n\n\tmodel = torch.load(model_path)\n\tobs_dim = env.observation_space.shape[0]\n\n\tif continuous:\n\t\tact_limit = env.action_space.high[0]\n\n\t# Save video of test\n\tif args.video:\n\t\tvideo_file = os.path.join('tests', 'videos', args.algo.upper() + '_' + args.env + '_' + str(args.seed))\n\t\tenv = wrappers.Monitor(env, video_file, video_callable=lambda episode_id: True, force=True)\n\n\t# Run tests\n\tmax_episodes = 30\n\tep_count = 1\n\tep_rew = 0\n\tobs = env.reset()\n\twhile ep_count <= max_episodes:\n\t\tenv.render()\n\n\t\tobs = torch.from_numpy(obs).float()\n\t\tobs = obs.view(-1, obs_dim)\n\t\twith torch.no_grad():\n\t\t\tact = model(obs).numpy()\n\t\t\tif argmax:\n\t\t\t\tact = act.argmax().item()\n\t\t\tif continuous:\n\t\t\t\tact = np.clip(act * act_limit, -act_limit, act_limit)[0]\n\n\t\tnew_obs, rew, done, info = env.step(act)\n\t\tobs = new_obs\n\t\tep_rew += rew\n\n\t\tif done:\n\t\t\tif args.verbose:\n\t\t\t\tprint(f'Episode: {ep_count}, Total Reward: {ep_rew}')\n\n\t\t\tep_rew = 0\n\t\t\tep_count += 1\n\t\t\tobs = env.reset()\n\n\tenv.close()\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"dyumanaditya/rlbotics","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24577665471","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\n\ndef parse_cmd():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--one-best-translations\", help=\"path to 1-best translations\", required=True)\n parser.add_argument(\"--nbest-alignments\", help=\"path to n-best alignments\", required=True)\n\n args = parser.parse_args()\n\n return args\n\ndef parse_nbest_alignment_header_line(line):\n cols = line.split(\"|||\")\n if len(cols) != 5:\n raise Exception(\"unsupported n-best alignments format\")\n cols = [col.strip() for col in cols]\n sent_num = cols[0]\n translation = cols[1]\n cost = cols[2]\n input_sentence = cols[3]\n lengths = [int(c.strip()) for c in cols[4].split(\" \")]\n return sent_num, translation, cost, input_sentence, lengths\n\n\ndef main():\n\n args = parse_cmd()\n\n with open(args.one_best_translations, \"r\") as one_best_handle:\n with open(args.nbest_alignments, \"r\") as alignments_handle:\n for one_best_line in one_best_handle:\n one_best = one_best_line.strip()\n\n found_in_alignments_header = False\n while not found_in_alignments_header:\n alignments_line = alignments_handle.readline()\n alignments_line = alignments_line.strip()\n\n if alignments_line == \"\":\n continue\n\n sent_num, translation, cost, input_sentence, lengths = parse_nbest_alignment_header_line(alignments_line)\n\n if translation == one_best:\n found_in_alignments_header = True\n break\n else:\n # skip weights of this hypothesis\n for i in range(lengths[1]):\n alignments_handle.readline()\n\n if not found_in_alignments_header:\n raise Exception(\"1best translation '%s' not found in any alignment header\" % one_best)\n\n sys.stdout.write(alignments_line + \"\\n\")\n for i in range(lengths[1]):\n alignments_line = alignments_handle.readline()\n sys.stdout.write(alignments_line)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"pjwilliams/nematus-recon-scripts","sub_path":"recover-1best-alignment.py","file_name":"recover-1best-alignment.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71712137473","text":"#!/usr/bin/env python3\nimport rospy\nimport numpy\n\nfrom gazebo_msgs.msg import ModelState \nfrom gazebo_msgs.srv import SetModelState\n\nfrom time import sleep\n\nfrom robot import robot\n\nclass env():\n def __init__(self):\n self.robot=robot()\n self.ModelPub = rospy.Publisher('/gazebo/set_model_state',ModelState,queue_size=10)\n\n def step(self,action, speed):\n reward=0\n c=False\n current_state=self.robot.computeState()\n while(self.robot.computeState()==current_state and c==False):\n c=self.robot.get_noContour()\n self.robot.move(action, speed)\n new_state=self.robot.computeState()\n if (rospy.is_shutdown()) :\n break\n self.robot.move(9,-10)\n new_state=self.robot.computeState()\n done = False\n if self.robot.get_noContour() == True :\n reward = -2\n done = True\n\n if new_state == 3 or new_state == 5 :\n if current_state < 3 or current_state > 5 :\n reward = 0.5\n else :\n reward = -0.5\n \n elif new_state == 2 or new_state == 6 :\n if current_state < 2 or current_state > 6 :\n reward = 0.5\n else :\n reward = -0.8\n\n elif new_state == 1 or new_state == 7 :\n if current_state < 1 or current_state > 7 :\n reward = 0.5\n else :\n reward = -0.8\n\n elif new_state == 0 or new_state== 8 :\n reward = -2\n \n else :\n reward = 2\n \n \n return new_state, reward, done\n # if new_state == 3 or new_state == 5 :\n # if current_state < 3 or current_state > 5 :\n # reward = 0.5\n # else :\n # reward = -0.5\n # elif new_state == 2 or new_state == 6 :\n # if current_state < 2 or current_state > 6 :\n # reward = 0.5\n # else :\n # reward = -0.5\n # elif new_state == 1 or new_state == 7 :\n # if current_state < 1 or current_state > 7 :\n # reward = 0.5\n # else :\n # reward = -0.5\n # elif new_state == 0 or new_state== 8 :\n # reward = -1\n # else :\n # reward = 2\n # return new_state, reward, done\n \n \n def reset(self):\n state_msg = ModelState()\n state_msg.pose.position.x=1\n state_msg.pose.position.y=-0.8\n state_msg.pose.position.z=0.1\n state_msg.pose.orientation.z=1\n state_msg.pose.orientation.w=0.0000463\n state_msg.model_name = \"line_follower\"\n state_msg.reference_frame='world'\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n resp = set_state( state_msg )\n except rospy.ServiceException:\n print(\"/gazebo/get_model_state service call failed\") \n sleep(0.1)\n return self.robot.computeState()\n\n","repo_name":"JlassiSeif/Line-follower-with-qlearning","sub_path":"robot_q_learning/scripts/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43098584349","text":"import tensorflow as tf\nfrom tensorflow.keras import Model, layers, regularizers, initializers\n\n\nclass TextCNN(Model):\n def __init__(\n self, embed, filter_nums, filter_sizes, num_classes, max_sent_len,\n ):\n \"\"\"\n\n :param embed: word embeddings\n :param extra_embed:\n :param filter_nums: number of filters\n :param filter_sizes: different sizes of filters\n :param num_classes: number of output classes\n :param max_sent_len: max sentence length\n \"\"\"\n super(TextCNN, self).__init__()\n self.vocab_size, self.embeddings_dim = embed.shape\n\n self.max_sent_len = max_sent_len\n self.num_classes = num_classes\n # create the word embedding layer with pre-trained embedding weights, and make it trainable\n self.embed = layers.Embedding(\n self.vocab_size,\n self.embeddings_dim,\n embeddings_initializer=initializers.Constant(embed),\n trainable=True,\n )\n self.concurrent_cnn_layer = []\n self.concurrent_max_pool_layer = []\n self.filter_sizes = filter_sizes\n self.filter_nums = filter_nums\n for filter_size in self.filter_sizes:\n # create one conv and maxpooling layer to handle each one filter size\n self.concurrent_cnn_layer.append(\n layers.Conv2D(\n data_format=\"channels_last\",\n filters=filter_nums,\n kernel_size=(filter_size, self.embeddings_dim),\n strides=1,\n padding=\"valid\",\n activation=tf.nn.relu,\n kernel_regularizer=regularizers.l2(0.01),\n )\n )\n self.concurrent_max_pool_layer.append(\n layers.MaxPool2D(\n pool_size=(self.max_sent_len - filter_size + 1, 1),\n strides=1,\n padding=\"valid\",\n )\n )\n self.flatten = layers.Flatten()\n self.dropout = layers.Dropout(0.5)\n self.out = layers.Dense(\n self.num_classes, kernel_regularizer=regularizers.l2(0.01)\n )\n\n def call(self, x, use_softmax):\n features = []\n words = self.embed(x)\n x = tf.expand_dims(words, -1)\n for i in range(len(self.filter_sizes)):\n feature = self.concurrent_cnn_layer[i](x)\n feature = self.concurrent_max_pool_layer[i](feature)\n features.append(feature)\n flatted = self.flatten(tf.concat(features, axis=-1))\n flatted = self.dropout(flatted)\n out = self.out(flatted)\n if use_softmax:\n out = tf.nn.softmax(out)\n return out\n\n\ndef cross_entropy_loss(y_pred, y_true):\n y_true = tf.cast(y_true, tf.int64)\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n # Average loss across the batch.\n return tf.reduce_mean(loss)\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"jinzhao3611/Political_Stance_Prediction","sub_path":"text_cnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24697744254","text":"\"\"\"\nModule used for writing large amounts of api data to different xlsx spreadsheets for personal use.\n\nContains functions used to get data for:\n write_season_team_stats - get all team data for given seasons.\n write_season_player_stats - get all season stats for all players.\n write_game_stats - gets all game specific data for all games in given seasons.\n write_player_ids - gets the full rosters from every team for all seasons given.\n write_shift_data - gets the shift information from every game for given seasons.\n\nNOTE:\n Various functions in this project currently require an NHL_players.xlsx spreadsheet that\n contains player names and ids to be present within this project directory.\n If this file is missing, it can be recreated with write_player_ids.\n\n Shift data does not appear to be collected prior to 2010.\n Trying to get data before 2010 will result in an empty dataframe\n\"\"\"\n\nimport os\nimport time\n\nimport pandas as pd\nimport requests\n\nimport api_parse as api\nimport utils\nimport values as v\n\nROOT = os.getcwd() + '/csv_data/'\nSTART_SEASON = 2005\nEND_SEASON = 2019\nMAX_GAMES = 1271\n\n\ndef _game_stats(season):\n \"\"\"\n Scrape the api and return lists of dicts containing game level data.\n Gets the game data for every game in the season for regular season and playoffs.\n Returns lists of all event data, team data, and player data.\n\n Usage:\n events_data, team_data, player_data = _game_stats(20152016)\n\n :param season: Season to scrape data for.\n :type season: str\n :return: List of dicts for event data per game, team stats per game, and player data per game\n :rtype: (list of dict, list of dict, list of dict)\n \"\"\"\n # TODO add logic for passing playoff game numbers to collect playoff data\n game_types = ['02']\n event_data = []\n team_data = []\n player_data = []\n\n for game_type in game_types:\n for game in range(1, MAX_GAMES + 1):\n # request for game needs to be in format 0001\n game_number = str(game).zfill(4)\n print(f'{game_type} - {game_number}')\n stats = api.get_game_stats(season, game_type, game_number)\n if stats:\n event_data.extend(stats[0])\n team_data.extend(stats[1])\n player_data.extend(stats[2])\n time.sleep(1)\n else:\n break\n\n return event_data, team_data, player_data\n\n\ndef _season_team_stats(season):\n \"\"\"\n Scrape the api and return lists of dicts containing season stats for each team.\n Returns lists of all team stats for raw values and season ranks (1st, 3rd, etc.).\n\n Usage:\n stats, ranks = _season_team_stats(20152016)\n\n :param season: Season to scrape data for.\n :type season: str\n :return: List of dicts for all team stats and all team stat ranks\n :rtype: (list of dict, list of dict)\n \"\"\"\n stats_list = []\n ranks_list = []\n # dynamically find teams by season instead?\n # not sure if api lists teams per season - wasting time on api calls\n for team_id in v.all_teams_by_id:\n try:\n stats, ranks = api.get_team_stats(team_id, season=season)\n time.sleep(1)\n stats['Team'] = v.all_teams_by_id[team_id]\n stats_list.append(stats)\n\n ranks_list.append(ranks)\n ranks['Team'] = v.all_teams_by_id[team_id]\n except KeyError:\n pass\n\n return stats_list, ranks_list\n\n\ndef _season_player_stats(season):\n \"\"\"\n Scrape the api and return lists of dicts containing season stats for each player.\n Collects player stats for:\n Full Season, Home, Away, Situation\n\n Usage:\n stats = _season_player_stats(20152016)\n\n :param season: Season to scrape data for.\n :type season: str\n :return: List of dicts for all player stats.\n :rtype: list of dict\n \"\"\"\n # get all players for season so don't have to load from xlsx?\n # Slower and lots of requests but don't need xlsx dependency\n players = utils.players_by_id(os.getcwd() + '/NHL_players.xlsx')\n # different categories of stats for each player\n stats_by = [\n 'homeAndAway',\n 'goalsByGameSituation',\n 'statsSingleSeason',\n ]\n stats_list = []\n\n def _get_stats(_player_id, _by, _season):\n try:\n stats = api.get_player_stats(_player_id, _by, _season)\n if by == 'homeAndAway':\n stats_list.append(stats[0])\n stats_list.append(stats[1])\n else:\n stats_list.append(stats)\n except (KeyError, IndexError):\n pass\n\n for player_id in players:\n print(player_id)\n for by in stats_by:\n # makes LOTS of requests for every player, for different situations, for every season\n # while loop to to make requests until getting kicked out\n # pause for the timeout then try to resume\n while True:\n try:\n _get_stats(player_id, by, season)\n break\n except requests.exceptions.ConnectionError:\n print('sleeping')\n time.sleep(1800)\n\n return stats_list\n\n\ndef _shift_data(season):\n \"\"\"\n Scrape the api and return lists of dicts containing shift details for each player.\n\n Usage:\n shifts = _shift_data(20152016)\n\n :param season: Season to scrape data for.\n :type season: str\n :return: List of dicts for all shift information for every player.\n :rtype: list of dict\n \"\"\"\n game_types = ['02']\n shifts = []\n\n def _get_stats(_season, _game_type, _game):\n game_number = str(_game).zfill(4)\n print(f'{_game_type} - {game_number}')\n stats = api.get_shift_data(_season, _game_type, game_number)\n if stats:\n shifts.extend(stats)\n time.sleep(1)\n\n for game_type in game_types:\n for game in range(1, MAX_GAMES + 1):\n while True:\n try:\n _get_stats(season, game_type, game)\n break\n except requests.exceptions.ConnectionError:\n print('sleeping')\n time.sleep(1800)\n\n return shifts\n\n\ndef write_season_team_stats(filename, start_season, end_season=None):\n \"\"\"\n Function used to write all team stats for every specified season to provided xlsx file.\n Can optionally provide a single season or a start and end season if wanting to scrape a range of seasons.\n If the provided filename already exists, will append new data to file.\n Output xlsx will write data to individual 'Stats' and 'Ranks' tabs.\n\n :param filename: Complete filepath to write data to.\n :type filename: str\n :param start_season: Start season to get data from.\n :type start_season: int or str\n :param end_season: Final season to get data from (inclusive).\n :type end_season: int or str\n \"\"\"\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n\n # format seasons to be api-friendly\n if end_season is not None:\n seasons = utils.get_season_list(start_season, end_season)\n else:\n seasons = utils.get_season_list(start_season, start_season)\n\n stats_list = []\n ranks_list = []\n\n try:\n for season in seasons:\n print(season)\n stats, ranks = _season_team_stats(season)\n stats_list.extend(stats)\n ranks_list.extend(ranks)\n # make sure to write whatever function has managed to scrape in event of error\n finally:\n # create dataframes, rename cols from api json format to 'prettier' format, and change col order\n df_stats = pd.DataFrame(stats_list)\n df_stats = utils.rename_cols(df_stats)\n df_stats = utils.update_cols(df_stats, ['Season', 'Team'])\n\n df_ranks = pd.DataFrame(ranks_list)\n df_ranks = utils.rename_cols(df_ranks)\n df_ranks = utils.update_cols(df_ranks, ['Season', 'Team'])\n\n if os.path.exists(filename):\n # load previous dataframe and append newest data\n og_stats = pd.read_excel(filename, sheet_name='Stats')\n df_stats = og_stats.append(df_stats, ignore_index=True).drop_duplicates()\n\n og_ranks = pd.read_excel(filename, sheet_name='Ranks')\n df_ranks = og_ranks.append(df_ranks, ignore_index=True).drop_duplicates()\n\n df_stats.to_excel(writer, index=False, sheet_name='Stats')\n df_ranks.to_excel(writer, index=False, sheet_name='Ranks')\n writer.close()\n\n\ndef write_season_player_stats(filename, start_season, end_season=None):\n \"\"\"\n Function used to write all player stats for every specified season to provided xlsx file.\n Can optionally provide a single season or a start and end season if wanting to scrape a range of seasons.\n If the provided filename already exists, will append new data to file.\n Output xlsx will write data to a 'Player Stats' tab.\n\n :param filename: Complete filepath to write data to.\n :type filename: str\n :param start_season: Start season to get data from.\n :type start_season: int or str\n :param end_season: Final season to get data from (inclusive).\n :type end_season: int or str\n \"\"\"\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n\n # format seasons to be api-friendly\n if end_season is not None:\n seasons = utils.get_season_list(start_season, end_season)\n else:\n seasons = utils.get_season_list(start_season, start_season)\n\n stats_list = []\n try:\n for season in seasons:\n print(season)\n stats = _season_player_stats(season)\n stats_list.extend(stats)\n # make sure to write whatever function has managed to scrape in event of error\n finally:\n # create dataframes, rename cols from api json format to 'prettier' format, and change col order\n stats_df = pd.DataFrame(stats_list)\n stats_df = utils.rename_cols(stats_df)\n stats_df = utils.update_cols(stats_df, ['Season', 'Player', 'Stat Type'])\n\n if os.path.exists(filename):\n # load previous dataframe and append newest data\n og_stats = pd.read_excel(filename)\n og_stats = utils.rename_cols(og_stats)\n stats_df = og_stats.append(stats_df, ignore_index=True).drop_duplicates()\n stats_df.to_excel(writer, index=False, sheet_name='Player Stats')\n\n writer.close()\n\n\ndef write_game_stats(filename, start_season, end_season=None):\n \"\"\"\n Function used to write all game specific stats for every specified season to provided xlsx file.\n Can optionally provide a single season or a start and end season if wanting to scrape a range of seasons.\n If the provided filename already exists, will append new data to file.\n Output xlsx will write data to individual 'Events', 'Teams', and 'Players' tabs.\n\n :param filename: Complete filepath to write data to.\n :type filename: str\n :param start_season: Start season to get data from.\n :type start_season: int or str\n :param end_season: Final season to get data from (inclusive).\n :type end_season: int or str\n \"\"\"\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n\n # format seasons to be api-friendly\n if end_season is not None:\n seasons = utils.get_season_list(start_season, end_season)\n else:\n seasons = utils.get_season_list(start_season, start_season)\n\n events_data = []\n team_data = []\n player_data = []\n try:\n for season in seasons:\n season = season[:4]\n print(season)\n events, team, player = _game_stats(season)\n events_data.extend(events)\n team_data.extend(team)\n player_data.extend(player)\n # make sure to write whatever function has managed to scrape in event of error\n finally:\n # create dataframes, rename cols from api json format to 'prettier' format, and change col order\n event_df = pd.DataFrame(events_data)\n event_df = utils.rename_cols(event_df)\n event_df = utils.update_cols(event_df, ['Season', 'Game Type', 'Game Number', 'Player', 'Team'])\n\n team_df = pd.DataFrame(team_data)\n team_df = utils.rename_cols(team_df)\n team_df = utils.update_cols(team_df, ['Season', 'Game Type', 'Game Number', 'Team'])\n\n player_df = pd.DataFrame(player_data)\n player_df = utils.rename_cols(player_df)\n player_df = utils.update_cols(player_df, ['Season', 'Game Type', 'Game Number', 'Player', 'Team'])\n\n if os.path.exists(filename):\n # load previous dataframe and append newest data\n og_data = pd.read_excel(filename, sheet_name=None)\n event_df = og_data['Events'].append(event_df, ignore_index=True).drop_duplicates()\n team_df = og_data['Teams'].append(team_df, ignore_index=True).drop_duplicates()\n player_df = og_data['Players'].append(player_df, ignore_index=True).drop_duplicates()\n\n event_df.to_excel(writer, index=False, sheet_name='Events')\n team_df.to_excel(writer, index=False, sheet_name='Teams')\n player_df.to_excel(writer, index=False, sheet_name='Players')\n\n writer.close()\n\n\ndef write_player_ids(filename, start_season, end_season=None):\n \"\"\"\n Function used to write all player names and ids for every specified season to provided xlsx file.\n Can optionally provide a single season or a start and end season if wanting to scrape a range of seasons.\n If the provided filename already exists, will append new data to file.\n Output xlsx will write data to a 'Players' tab.\n\n :param filename: Complete filepath to write data to.\n :type filename: str\n :param start_season: Start season to get data from.\n :type start_season: int or str\n :param end_season: Final season to get data from (inclusive).\n :type end_season: int or str\n \"\"\"\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n\n # format seasons to be api-friendly\n if end_season is not None:\n seasons = utils.get_season_list(start_season, end_season)\n else:\n seasons = utils.get_season_list(start_season, start_season)\n\n player_data = []\n try:\n for season in seasons:\n print(season)\n for team in v.all_teams_by_id:\n roster = api.get_roster(team, season)\n player_data.extend(roster)\n time.sleep(1)\n # make sure to write whatever function has managed to scrape in event of error\n finally:\n # create dataframes, rename cols from api json format to 'prettier' format, and change col order\n player_df = pd.DataFrame(player_data).drop_duplicates()\n player_df = utils.rename_cols(player_df)\n\n if os.path.exists(filename):\n # load previous dataframe and append newest data\n og_data = pd.read_excel(filename)\n player_df = og_data.append(player_df, ignore_index=True).drop_duplicates()\n\n player_df.to_excel(writer, index=False, sheet_name='Players')\n\n writer.close()\n\n\ndef write_shift_data(filename, start_season, end_season=None):\n \"\"\"\n Function used to write all shift information for every game and for every specified season to provided xlsx file.\n Can optionally provide a single season or a start and end season if wanting to scrape a range of seasons.\n If the provided filename already exists, will append new data to file.\n Output xlsx will write data to a 'Players' tab.\n\n :param filename: Complete filepath to write data to.\n :type filename: str\n :param start_season: Start season to get data from.\n :type start_season: int or str\n :param end_season: Final season to get data from (inclusive).\n :type end_season: int or str\n \"\"\"\n writer = pd.ExcelWriter(filename, engine='xlsxwriter')\n\n # format seasons to be api-friendly\n if end_season is not None:\n seasons = utils.get_season_list(start_season, end_season)\n else:\n seasons = utils.get_season_list(start_season, start_season)\n\n shift_data = []\n try:\n for season in seasons:\n season = season[:4]\n print(season)\n shifts = _shift_data(season)\n shift_data.extend(shifts)\n time.sleep(1)\n # make sure to write whatever function has managed to scrape in event of error\n finally:\n # create dataframes, rename cols from api json format to 'prettier' format, and change col order\n shift_df = pd.DataFrame(shift_data).drop_duplicates()\n shift_df = utils.rename_cols(shift_df)\n\n if os.path.exists(filename):\n # load previous dataframe and append newest data\n og_data = pd.read_excel(filename)\n shift_df = og_data.append(shift_df, ignore_index=True).drop_duplicates()\n\n shift_df.to_excel(writer, index=False, sheet_name='Players')\n\n writer.close()\n\n\nif __name__ == '__main__':\n # team_stats_file = ROOT + '/NHL_team_stats.xlsx'\n # write_season_team_stats(team_stats_file, 1995, 2019)\n\n shift_file = ROOT + '/NHL_shift_data.xlsx'\n # shift data doesn't appear to be collected prior to 2010\n write_shift_data(shift_file, 2010, 2019)\n\n # player_stats_file = ROOT + '/NHL_player_stats.xlsx'\n # write_season_player_stats(player_stats_file, 2001, 2019)\n\n # game_stats_file = ROOT + '/NHL_game_stats.xlsx'\n # write_game_stats(game_stats_file, 1995, 2003)\n\n # roster_file = ROOT + '/NHL_players.xlsx'\n # write_player_ids(roster_file, 1995, 2019)\n","repo_name":"zen1300/nhl_stats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42628861929","text":"\"\"\"\n1. Initiate KuCoin and load all available symbols and timeframes.\n2. Load PyStore Store and get collection in question.\n3. For each kucoin symbol, check database for items\n4. If it doesnt exist, fetch today.\n4.1 For time delta, multiple length of result time interval. Adjust since using this number. \n5. Append to dataframe until no results are received\n6. Save Item, including metadata source, start, end\n\"\"\"\n\nimport ccxt\nimport datetime\nimport pandas as pd\nimport pystore\nimport os\nstorage_dir = os.path.abspath(os.path.curdir + '/storage')\nkucoin = ccxt.kucoin({'apiKey': '', 'secret': '', 'password': ''})\nkucoin.rateLimit = 60\nprint(kucoin.rateLimit)\nbalance = kucoin.fetchBalance()\nsymbols = kucoin.symbols\nall_usdt_symbols = list(filter(lambda x: x.endswith(\"USDT\"), symbols))\n\ncurrent_symbols = []\ndata = {}\npystore.set_path(storage_dir)\nstore = pystore.store('capa_store')\ncollection = store.collection('Crypto.Candles.Min')\ncollections = store.list_collections()\nitems = collection.list_items()\n\nfor symbol in all_usdt_symbols:\n items = collection.list_items()\n if symbol[0:-5] not in items:\n print(\"getting\", symbol)\n minute_candles = []\n since = kucoin.parse8601('2019-01-01T00:00:00Z')\n while since < kucoin.milliseconds():\n print(\"getting\", since)\n page = kucoin.fetchOHLCV(symbol, '1m', since)\n if len(page) > 1:\n since = page[-1][0]\n minute_candles += page\n else: \n since += 60*1000*60*24\n if len(minute_candles) > 1:\n export = pd.DataFrame(minute_candles)\n export = export.rename(columns={0:'timestamp', 1:'open', 2:'high', 3:'low', 4:'close', 5:'volume'})\n export['timestamp'] = pd.to_datetime(export[\"timestamp\"], unit=\"ms\")\n export = export.set_index('timestamp')\n # export['symbol'] = symbol\n # export = export.set_index(['timestamp', 'symbol'])\n print(len(export), symbol, export)\n collection.write(symbol, export, metadata={'source':'kucoin'})\n","repo_name":"kaynelynn/portfolio-analysis-workbooks","sub_path":"old-kucoin-backfill.py","file_name":"old-kucoin-backfill.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15495335019","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 8 16:42:09 2020\n\n@author: Sarah\n\"\"\"\n#func = lambda x: x**2 - 45\nimport numpy as np\n\ndef derivative(func):\n '''\n Symmetric derivative of a function initialized as func\n returns dfunc: \n -----\n Parameters:\n func = function of a single variable\n\n a, b = start and stop of the interval that the function is plotted\n [a,b]\n '''\n h = 0.001 #value commonly used in calculators (smaller == more effective)\n dfunc = (func(x+h)-func(x-h))/(2*h)\n return (dfunc)\n\ndef midpoint_rule(func, a, b, n):\n '''\n Midpoint rule (rectangle rule) for estimating a definate integral using a\n Riemann sum with midpoints of subintervals\n returns M: the value of the integration with the given parameters\n -----\n Parameters:\n a, b = numbers Interval of integration [a,b]\n \n n = Number of subintervals of [a,b]\n \n func = function of a single variable\n '''\n deltx = (b-a)/n #step size\n M = 0\n for i in range(n):\n M += func( (a + (deltx/2)) + (i*deltx) )\n M = M*deltx\n return (M)\n\ndef trapezoidal_rule(func, a, b, n):\n '''\n Trapezoidal rule for estimating a definate integral using trapizoids\n instead of rectangles like midpoint rule\n returns T: the value of the integration with the given parameters\n -----\n Parameters:\n a, b = numbers Interval of integration [a,b]\n \n n = Number of subintervals of [a,b]\n \n func = function of a single variable\n '''\n deltx = (b-a)/n #step size\n T = 0\n for i in range(n):\n T += ( deltx/2 )*( func(a + (i*deltx)) + func(a + ((i + 1)*deltx)) )\n return (T)\n\ndef simpsons_rule(func, a, b, n):\n '''\n Simpsons rule for estimating a definate integral using \n piecewise quadratic functions\n returns S: the value of the integration with the given parameters\n -----\n Parameters:\n a, b = numbers Interval of integration [a,b]\n \n n = Number of subintervals of [a,b]\n \n func = function of a single variable\n '''\n deltx = (b-a)/n #step size\n c = 0\n x = a + deltx\n n1 = (n//2) + 1\n for i in range(1, n1):\n c += 4*func(x)\n x += 2*deltx\n\n x = a + 2*deltx\n n2 = (n//2)\n for i in range(1, n2):\n c += 2*func(x)\n x += 2*deltx\n S = ( deltx/3 )*( func(a) + func(b) + c )\n \n return (S)\n\n","repo_name":"SarahV4775/Comp.-Meth.-for-Astrophysics-HW","sub_path":"HW 2/numerical_calculus_library.py","file_name":"numerical_calculus_library.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74504298755","text":"f = open(\"input.txt\")\n\nd = dict()\n# Parse data & store data in dict\nfor l in f:\n f_bag, snd_half = l.split(\" contain \")\n c_split = snd_half.split(\", \")\n c_split[len(c_split)-1] = c_split[len(c_split)-1][:-2] # Remove .\\n\n f_bag = f_bag[:-1] # Remove ending 's'\n c_split = [x[:-1] if x[-1:] == 's' else x for x in c_split] # Remove ending 's'\n\n if c_split[0] == \"no other bag\":\n d[f_bag] = None\n else:\n d[f_bag] = [ [x[0:1], x[2:]] for x in c_split ]\n \n\n# Recursive dict search function\ndef p1_search(current_tag, search_tag):\n # Match hit\n if current_tag == search_tag:\n return True\n \n # Recursivly serach down\n l = d[current_tag]\n if l == None:\n return False\n return any( [p1_search(x[1], search_tag) for x in l] )\n \n\n# Check if each bag type can containe 'shiny gold'\nt = \"shiny gold bag\"\np1_result = 0\nfor k in d:\n if k == t:\n continue\n if p1_search(k, t):\n p1_result += 1\n\nprint(\"/P1/ Number of bags that can contain one shiny: \" + str(p1_result))\n\n# Recursive dict search function\ndef p2_search(tag):\n # Recursivly serach down\n l = d[tag]\n if l == None:\n return 0\n return sum( [int(x[0]) + int(x[0]) * p2_search(x[1]) for x in l] )\n\np2_results = p2_search(t)\nprint(\"/P2/ Shiny bag # of containing bags: \" + str(p2_results))","repo_name":"laxel/adventOfCode","sub_path":"2020/d07/d07.py","file_name":"d07.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24619153221","text":"import csv\nimport datetime\n\n\n\n# Menü listesini txt'den okur.\ndef read_menu_list():\n try:\n with open('Menu.txt', 'r') as menu_file:\n print(menu_file.read())\n except:\n print(\"Menu.txt dosyası bulunamadı, dosyanın bulunduğu konumu kontrol edip tekrar deneyiniz. \")\n exit()\n \n# read_menu_list'i çağıraram menüyü yazdırır.\ndef main():\n read_menu_list()\n\n\n# pizza süper sınıfı.\nclass Pizza:\n def __init__(self, description, cost):\n self.description = description\n self.cost = cost\n\n # Pizza açıklaması\n def get_description(self):\n return self.description\n\n # Pizza fiyatı\n def get_cost(self):\n return self.cost\n\n\n# Pizza alt sınıfları ve çeşitleri.\nclass klasikPizza(Pizza):\n def __init__(self):\n super().__init__('Klasik Pizza', 85)\n\n\nclass margaritaPizza(Pizza):\n def __init__(self):\n super().__init__('Margarita Pizza', 98)\n\n\nclass turkPizza(Pizza):\n def __init__(self):\n super().__init__('Türk Pizza', 105)\n\n\nclass sadePizza(Pizza):\n def __init__(self):\n super().__init__('Sade Pizza', 85)\n\n\n# Sos süper sınıfı pizzanın alt sınıfı\nclass Decorator(Pizza):\n def __init__(self, component, description, cost):\n super().__init__(description, cost)\n self.component = component\n\n # seçilen sos ve pizzanın açıklamasının döndürülmesi\n def get_description(self):\n return super().get_description() + ' ' + self.component.get_description()\n\n # seçilen sos ve pizzanın fiyatının döndürülmesi\n def get_cost(self):\n return self.component.get_cost() + super().get_cost()\n\n\n# Sos çeşitleri, Decoratorun alt sınıfı\nclass Olive(Decorator):\n def __init__(self, component):\n super().__init__(component,'Zeytinli', 5.0)\n\n\nclass Mushroom(Decorator):\n def __init__(self, component):\n super().__init__(component,'Mantarlı', 8.0)\n\n\nclass Meat(Decorator):\n def __init__(self, component):\n super().__init__(component,'Etli', 16)\n\n\nclass GoatCheese(Decorator):\n def __init__(self, component):\n super().__init__(component,'Keçi Peynirli', 10)\n\n\nclass Onion(Decorator):\n def __init__(self, component):\n super().__init__(component,'Soğanlı', 5)\n\n\nclass Corn(Decorator):\n def __init__(self, component):\n super().__init__(component,\"Mısırlı\", 7)\n\n\n# Main fonksiyonunu çağırı menüyü yazdırmak\nif __name__ == \"__main__\":\n main()\n\n#Sipariş tarihini al ve csv dosyasına yazdır.\ndef Time():\n return datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n\n#Kredi kartı kontrol\ndef CC_check(ccnum):\n ccnum = str(ccnum)\n if not len(ccnum) == 16:\n return False\n if not ccnum.isdigit():\n return False\n else:\n return True\n#CC_cvv kontrol\ndef CC_cvv(cvv):\n cvv = str(cvv)\n if not len(cvv) == 3:\n return False\n if not cvv.isdigit():\n return False\n else:\n return True\n\n#TC numarası kontrolü\ndef TC_check(tcnum):\n tcnum = str(tcnum)\n if not len(tcnum) == 11:\n return False\n if not tcnum.isdigit():\n return False\n else:\n return True\n\n\n\n# Kullanıcı için pizza seçimi\nwhile True:\n try:\n pizzaChoice = int(input(\"1-4 Arasında bir pizza seçin.\"))\n except:\n print(\"Lütfen doğru bir giriş yapın. \")\n if pizzaChoice == 1:\n pizza = klasikPizza()\n break\n elif pizzaChoice == 2:\n pizza = margaritaPizza()\n break\n elif pizzaChoice == 3:\n pizza = turkPizza()\n break\n elif pizzaChoice == 4:\n pizza = sadePizza()\n break\n else:\n print(\"Geçersiz pizz seçimi!\")\n\nwhile True:\n\n sauceChoice = int(input(\"11-16 Arasında bir sos seçiniz.\"))\n\n if sauceChoice == 11:\n sauce = Olive(pizza)\n break\n elif sauceChoice == 12:\n sauce = Mushroom(pizza)\n break\n elif sauceChoice == 13:\n sauce = Meat(pizza)\n break\n elif sauceChoice == 14:\n sauce = GoatCheese(pizza)\n break\n elif sauceChoice == 15:\n sauce = Onion(pizza)\n break\n elif sauceChoice == 16:\n sauce = Corn(pizza)\n break\n\ntotal_cost = sauce.get_cost()\n\nprint(\"\\nSeçiminiz: \" + sauce.get_description() + \"\\nToplam Tutar: \" + str(total_cost) + \"TL\")\n\n#siparişi onaylayıp onaylamadığını sorar\napprove = input(\"Siparişi onaylıyor musunuz?(e/h)\")\n\n#e'yi tuşlarsa devam eder h'yi tuşlarsa döngü sonlanır\nwhile True:\n if approve == \"e\":\n name = input(\"\\nAdınızı giriniz: \")\n\n #TC kimlik numarası ister ve doğrulamasını yapar\n while True:\n tcnum = input(\"TC numaranızı giriniz: \")\n\n if TC_check(tcnum) == True:\n break\n else:\n print(\"TC numarası doğru değil. \")\n\n #CC numarası ister ve doğrulamasını yapar\n while True:\n ccnum = input(\"Kredi Kartı numaranızı giriniz. \")\n\n if CC_check(ccnum) == True:\n break\n else:\n print(\"Kredi kartı numarası doğru değil. \")\n\n #CVV ister ve doğrulamasını yapar\n while True:\n cvv = input(\"CVV giriniz. \")\n if CC_cvv(cvv) == True:\n break\n else:\n print(\"CVV numarası doğru değil. \")\n\n elif approve == \"h\":\n print(\"Sipariş iptal edildi.\")\n break\n else:\n approve = input(\"Lütfen e veya h tuşuna basınız.\")\n continue\n break\n\ndt_string = Time()\n\n# csv dosyasını aç eğer yok ise oluştur ve kullanıcı bilgilerini, siparişi, tutarı ve zamanı yaz\nwith open('Orders_Database.csv', 'a') as db_file:\n db_writer = csv.writer(db_file)\n db_writer.writerow([name, tcnum, sauce.get_description(), total_cost, ccnum, cvv, dt_string])\n\nprint(f'\\nTeşekkürler {name}! {sauce.get_description()} siparişiniz alınmıştır.')\nprint(f'Toplam tutar: {total_cost:.2f} TL')\n\n\n\n\n","repo_name":"EmreKuyumcu/PizzaOrderSystem","sub_path":"PizzaOrderSystem.py","file_name":"PizzaOrderSystem.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15274394564","text":"import os\nimport pyaudio\nimport wave\nimport speech_recognition as sr\nimport time\n\nCHUNK = 1024\n\n# initiate voice recognition on HU through ADB\nADB_DEVICE = \"ABC-0123456789\"\ncmd = \"adb -s \" + ADB_DEVICE + \" shell input keyevent KEYCODE_SHORTCUT_PTT\"\nos.system(cmd)\ntime.sleep(3)\n\n# ==========================================\n# VR input from PC to HU\n\n# prepare to play .wav file\nwf = wave.open(\"Z:/Projects/VR_project/audio_files/French/Navigation/afficherLitineraire.wav\", 'rb')\n\n# instantiate PyAudio\np = pyaudio.PyAudio()\n\n# open stream\nstream = p.open(format=p.get_format_from_width(wf.getsampwidth()), \n\t\t\t\tchannels=wf.getnchannels(), \n\t\t\t\trate=wf.getframerate(), \n\t\t\t\toutput=True)\n\ndata = wf.readframes(CHUNK)\n\nwhile len(data) > 0:\n\tstream.write(data)\n\tdata = wf.readframes(CHUNK)\n\n# stop stream\nstream.stop_stream()\nstream.close()\n\n# close PyAudio\np.terminate()\n\n# ==========================================\n# VR output from HU to PC\n\nr = sr.Recognizer()\nr.energy_threshold = 4000\n\n# obtain audio from microphone\nwith sr.Microphone() as source:\n\tprint(\"Spit some fiyah...\")\n\taudio = r.listen(source)\n\t\n# recognize speech using Google\ntry:\n\tprint(\"Google thinks you said '\" + r.recognize_google(audio_data=audio, language=\"fr-CA\") + \"'\")\nexcept sr.UnknownValueError:\n\tprint(\"Google could not understand audio\")\nexcept sr.RequestError as e:\n\tprint(\"Google error; {0}\".format(e))","repo_name":"tphwong/VR_project","sub_path":"test/archive/vr_afficherLitineraire.py","file_name":"vr_afficherLitineraire.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25167724985","text":"import logging\nfrom typing import Any\n\nfrom flask import request, Response\nfrom flask_appbuilder.api import expose, protect, rison, safe\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_babel import ngettext\nfrom marshmallow import ValidationError\n\nfrom superset.commands.exceptions import (\n DatasourceNotFoundValidationError,\n RolesNotFoundValidationError,\n)\nfrom superset.commands.security.create import CreateRLSRuleCommand\nfrom superset.commands.security.delete import DeleteRLSRuleCommand\nfrom superset.commands.security.exceptions import RLSRuleNotFoundError\nfrom superset.commands.security.update import UpdateRLSRuleCommand\nfrom superset.connectors.sqla.models import RowLevelSecurityFilter\nfrom superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod\nfrom superset.daos.exceptions import DAOCreateFailedError, DAOUpdateFailedError\nfrom superset.extensions import event_logger\nfrom superset.row_level_security.schemas import (\n get_delete_ids_schema,\n openapi_spec_methods_override,\n RLSListSchema,\n RLSPostSchema,\n RLSPutSchema,\n RLSShowSchema,\n)\nfrom superset.views.base import DatasourceFilter\nfrom superset.views.base_api import (\n BaseSupersetModelRestApi,\n requires_json,\n statsd_metrics,\n)\nfrom superset.views.filters import BaseFilterRelatedRoles\n\nlogger = logging.getLogger(__name__)\n\n\nclass RLSRestApi(BaseSupersetModelRestApi):\n datamodel = SQLAInterface(RowLevelSecurityFilter)\n include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {\n RouteMethod.RELATED,\n \"bulk_delete\",\n }\n resource_name = \"rowlevelsecurity\"\n class_permission_name = \"Row Level Security\"\n openapi_spec_tag = \"Row Level Security\"\n method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP\n allow_browser_login = True\n\n list_columns = [\n \"id\",\n \"name\",\n \"filter_type\",\n \"tables.id\",\n \"tables.table_name\",\n \"roles.id\",\n \"roles.name\",\n \"clause\",\n \"changed_on_delta_humanized\",\n \"changed_by.first_name\",\n \"changed_by.last_name\",\n \"changed_by.id\",\n \"group_key\",\n ]\n order_columns = [\n \"name\",\n \"filter_type\",\n \"clause\",\n \"changed_on_delta_humanized\",\n \"group_key\",\n ]\n add_columns = [\n \"name\",\n \"description\",\n \"filter_type\",\n \"tables\",\n \"roles\",\n \"group_key\",\n \"clause\",\n ]\n show_columns = [\n \"name\",\n \"description\",\n \"filter_type\",\n \"tables.id\",\n \"tables.schema\",\n \"tables.table_name\",\n \"roles.id\",\n \"roles.name\",\n \"group_key\",\n \"clause\",\n ]\n search_columns = (\n \"name\",\n \"description\",\n \"filter_type\",\n \"tables\",\n \"roles\",\n \"group_key\",\n \"clause\",\n \"created_by\",\n \"changed_by\",\n )\n edit_columns = add_columns\n\n show_model_schema = RLSShowSchema()\n list_model_schema = RLSListSchema()\n add_model_schema = RLSPostSchema()\n edit_model_schema = RLSPutSchema()\n\n allowed_rel_fields = {\"tables\", \"roles\", \"created_by\", \"changed_by\"}\n base_related_field_filters = {\n \"tables\": [[\"id\", DatasourceFilter, lambda: []]],\n \"roles\": [[\"id\", BaseFilterRelatedRoles, lambda: []]],\n }\n\n openapi_spec_methods = openapi_spec_methods_override\n \"\"\" Overrides GET methods OpenApi descriptions \"\"\"\n\n @expose(\"/\", methods=(\"POST\",))\n @protect()\n @safe\n @statsd_metrics\n @requires_json\n @event_logger.log_this_with_context(\n action=lambda self, *args, **kwargs: f\"{self.__class__.__name__}.post\",\n log_to_statsd=False,\n )\n def post(self) -> Response:\n \"\"\"Create a new RLS rule.\n ---\n post:\n summary: Create a new RLS rule\n requestBody:\n description: RLS schema\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.post'\n responses:\n 201:\n description: RLS Rule added\n content:\n application/json:\n schema:\n type: object\n properties:\n id:\n type: number\n result:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.post'\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 404:\n $ref: '#/components/responses/404'\n 422:\n $ref: '#/components/responses/422'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n try:\n item = self.add_model_schema.load(request.json)\n except ValidationError as error:\n return self.response_400(message=error.messages)\n\n try:\n new_model = CreateRLSRuleCommand(item).run()\n return self.response(201, id=new_model.id, result=item)\n except RolesNotFoundValidationError as ex:\n logger.error(\n \"Role not found while creating RLS rule %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))\n except DatasourceNotFoundValidationError as ex:\n logger.error(\n \"Table not found while creating RLS rule %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))\n except DAOCreateFailedError as ex:\n logger.error(\n \"Error creating RLS rule %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))\n\n @expose(\"/\", methods=(\"PUT\",))\n @protect()\n @safe\n @statsd_metrics\n @requires_json\n @event_logger.log_this_with_context(\n action=lambda self, *args, **kwargs: f\"{self.__class__.__name__}.put\",\n log_to_statsd=False,\n )\n def put(self, pk: int) -> Response:\n \"\"\"Update an RLS rule.\n ---\n put:\n summary: Update an RLS rule\n parameters:\n - in: path\n schema:\n type: integer\n name: pk\n description: The Rule pk\n requestBody:\n description: RLS schema\n required: true\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.put'\n responses:\n 200:\n description: Rule changed\n content:\n application/json:\n schema:\n type: object\n properties:\n id:\n type: number\n result:\n $ref: '#/components/schemas/{{self.__class__.__name__}}.put'\n 400:\n $ref: '#/components/responses/400'\n 401:\n $ref: '#/components/responses/401'\n 403:\n $ref: '#/components/responses/403'\n 404:\n $ref: '#/components/responses/404'\n 422:\n $ref: '#/components/responses/422'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n\n try:\n item = self.edit_model_schema.load(request.json)\n except ValidationError as error:\n return self.response_400(message=error.messages)\n\n try:\n new_model = UpdateRLSRuleCommand(pk, item).run()\n return self.response(201, id=new_model.id, result=item)\n except RolesNotFoundValidationError as ex:\n logger.error(\n \"Role not found while updating RLS rule %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))\n except DatasourceNotFoundValidationError as ex:\n logger.error(\n \"Table not found while updating RLS rule %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))\n except DAOUpdateFailedError as ex:\n logger.error(\n \"Error updating RLS rule %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))\n except RLSRuleNotFoundError as ex:\n return self.response_404()\n\n @expose(\"/\", methods=(\"DELETE\",))\n @protect()\n @safe\n @statsd_metrics\n @rison(get_delete_ids_schema)\n @event_logger.log_this_with_context(\n action=lambda self, *args, **kwargs: f\"{self.__class__.__name__}.bulk_delete\",\n log_to_statsd=False,\n )\n def bulk_delete(self, **kwargs: Any) -> Response:\n \"\"\"Bulk delete RLS rules.\n ---\n delete:\n summary: Bulk delete RLS rules\n parameters:\n - in: query\n name: q\n content:\n application/json:\n schema:\n $ref: '#/components/schemas/get_delete_ids_schema'\n responses:\n 200:\n description: RLS Rule bulk delete\n content:\n application/json:\n schema:\n type: object\n properties:\n message:\n type: string\n 401:\n $ref: '#/components/responses/401'\n 403:\n $ref: '#/components/responses/403'\n 404:\n $ref: '#/components/responses/404'\n 422:\n $ref: '#/components/responses/422'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n item_ids = kwargs[\"rison\"]\n try:\n DeleteRLSRuleCommand(item_ids).run()\n return self.response(\n 200,\n message=ngettext(\n \"Deleted %(num)d rules\",\n \"Deleted %(num)d rules\",\n num=len(item_ids),\n ),\n )\n except RLSRuleNotFoundError:\n return self.response_404()\n","repo_name":"apache/superset","sub_path":"superset/row_level_security/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10695,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"34326421713","text":"#!/usr/bin/env python3\n\nfrom lib.automation import *\n\nclass InstallerTemplate:\n\n def check(self, config):\n return True\n\n def install(self, config):\n is_dry_run = config.getboolean('general', 'dry run', fallback=False)\n latest_kernel = config.getboolean('general', 'latest kernel', fallback=True)\n\n print_status(\"Checking if we are running as the latest kernel\", 2)\n if latest_kernel and not is_dry_run:\n val = run_command_with_output('dpkg -l | grep linux-image- | grep -vc meta')\n if int(val) > 1:\n print_status(\"Detected {0} kernels\".format(val.strip()), 2)\n val = run_command(\"dpkg -l | grep linux-image | grep -v meta | sort -t '.' -k 2 -g | tail -n 1 | grep \\\"$(uname -r)\\\"\", show_error=False)\n if val == 0:\n print_success(\"You are running the latest kernel! All good\", 2)\n print_status(\"Installing the latest kernel headers\", 2)\n run_command('apt -y -qq install make gcc \"linux-headers-$(uname -r)\"')\n print_success(\"Done\", 2)\n else:\n print_error(\"You are not running the latest kernel but its installed already!\", 2)\n print_error(\"Reboot and then re-run this script!\")\n sys.exit(1)\n else:\n print_success(\"Skipping!\", 2)\n\n","repo_name":"AldusFalco/Kali-Setup","sub_path":"modules/pre/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"74605988","text":"class NumArray:\n\n def __init__(self, nums):\n self.n = n = len(nums)\n self.A = nums\n self.BIT = [0] * (n + 1)\n for i in range(n):\n self.updateBIT(i+1, nums[i])\n\n def update(self, index, val):\n diff = val - self.A[index]\n self.A[index] = val\n self.updateBIT(index + 1, diff)\n \n\n def sumRange(self, left, right):\n x, y = left + 1, right + 1\n return self.get(y) - self.get(x-1)\n \n def updateBIT(self, index, val):\n n = self.n\n while index <= n:\n self.BIT[index] += val\n index += (index & (-index))\n \n \n def get(self, index):\n res = 0\n while index > 0:\n res += self.BIT[index]\n index -= (index & (-index))\n return res\n \n \n\n\n# Your NumArray object will be instantiated and called as such:\n# obj = NumArray(nums)\n# obj.update(index,val)\n# param_2 = obj.sumRange(left,right)","repo_name":"Anirudh-Muthukumar/Leetcode-Solutions","sub_path":"307. Range Sum Query - Mutable/307. Range Sum Query - Mutable.py","file_name":"307. Range Sum Query - Mutable.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23418286231","text":"__author__ = 'franyell'\r\n\r\n\r\n\r\ndef getM(f1,line):\r\n m1=[]\r\n for j in [1,2,3,4]:\r\n l= f1.readline()\r\n if j==line:\r\n m1= [ int(x) for x in l.split(' ') ]\r\n return m1\r\n\r\ndef getAnswer(m1,m2):\r\n val=0\r\n times=0\r\n for x in m1:\r\n if x in m2:\r\n val=x\r\n times+=1\r\n if times>1:\r\n return [2, 0]\r\n if times==0:\r\n return [0,val]\r\n return [1, val]\r\n\r\n\r\nf1= open(\"input.txt\")\r\nf2=open(\"output.txt\",\"w\")\r\ncases=int(f1.readline())\r\nfor i in range(1,cases+1):\r\n ans1= int(f1.readline())\r\n m1=getM(f1,ans1)\r\n ans2= int(f1.readline())\r\n m2=getM(f1,ans2)\r\n r=getAnswer(m1,m2)\r\n\r\n if r[0]==2:\r\n f2.write(\"Case #\"+str(i)+ \": Bad magician!\\n\")\r\n elif r[0]==1:\r\n f2.write(\"Case #\"+str(i)+ \": \"+str(r[1])+\"\\n\")\r\n else:\r\n f2.write(\"Case #\"+str(i)+ \": Volunteer cheated!\\n\")\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/3950.py","file_name":"3950.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16171028112","text":"#Pilihan menu\nprint(\n'''\nPilih Bentuk\n1. Half Pyramid Pattern\n2. Inverted Half Pyramid Pattern\n3. Half Pyramid Pattern Mirrored\n4. Full Pyramid Pattern\n5. Full Pyramid Pattern Mirrored\n'''\n)\n\n#Variables dan Input\nmenu = int(input(\"Pilihan : \"))\nif menu < 1 or menu > 5 :\n print(\"Pilihan tidak valid\")\nelse :\n n = int(input(\"Jumlah bintang maksimal : \"))\n\n#Proses Conditional:\n #1. Half Pyramid Pattern\nif menu == 1 :\n for i in range(0, n) :\n for j in range(0, i+1) :\n print(\"*\", end=\" \")\n print()\n\n #2. Inverted Half Pyramid Pattern\nelif menu == 2 :\n for i in range(0, n) :\n for j in range(n-i, 0, -1) :\n print(\"*\", end=\" \")\n print()\n\n #3. Half Pyramid Pattern Mirrored\nelif menu == 3 :\n for i in range(0, n) :\n for j in range(0, n-(i+1)) :\n print(end=\" \")\n for j in range(0, i+1) :\n print(\"*\", end=\" \")\n print()\n\n #4. Full Pyramid Pattern\nelif menu == 4 :\n for i in range(0, n) :\n for j in range(0, n-(i+1)) :\n print(end=\" \")\n for j in range(0, i+1) :\n print(\"*\", end=\" \")\n print()\n\n #5. Full Pyramid Pattern Mirrored\nelif menu == 5 :\n for i in range(0, n) :\n for j in range(0, i+1) :\n print(end=\" \")\n for j in range(n-i, 0, -1) :\n print(\"*\", end=\" \")\n print()\n\n'''\nMuhammad Zidane Naufal Ramadhan (1102213103)\nEL-45-08\n'''","repo_name":"renadeZ/Teknofest_SE","sub_path":"pyramidMenu.py","file_name":"pyramidMenu.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23685825565","text":"import os\nimport random\n\nimport torch\nimport numpy as np\nfrom torch.utils.tensorboard.writer import SummaryWriter\nfrom transformers.optimization import AdamW, get_cosine_schedule_with_warmup\n\nfrom config.all_config import AllConfig\nfrom datasets.data_factory import DataFactory\nfrom model.model_factory import ModelFactory\nfrom modules.loss import LossFactory\nfrom trainer.trainer import Trainer\n\n\ndef main():\n config = AllConfig()\n\n assert config.num_frames % config.num_prompts == 0\n assert config.num_test_frames % config.num_prompts == 0\n\n os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n if not config.no_tensorboard:\n writer = SummaryWriter(log_dir=config.tb_log_dir)\n else:\n writer = None\n\n if config.seed >= 0:\n torch.manual_seed(config.seed)\n np.random.seed(config.seed)\n torch.cuda.manual_seed_all(config.seed)\n random.seed(config.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n train_data_loader = DataFactory.get_data_loader(config, split_type='train')\n valid_data_loader = DataFactory.get_data_loader(config, split_type='test')\n model = ModelFactory.get_model(config)\n\n optimizer_grouped_params = [\n {'params': model.clip_params, 'lr': config.clip_lr},\n {'params': model.noclip_params, 'lr': config.noclip_lr}\n ]\n optimizer = AdamW(optimizer_grouped_params, weight_decay=config.weight_decay)\n num_training_steps = len(train_data_loader) * config.num_epochs\n num_warmup_steps = int(config.warmup_proportion * num_training_steps)\n scheduler = get_cosine_schedule_with_warmup(optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps)\n\n loss = LossFactory.get_loss(config)\n\n trainer = Trainer(model, loss, optimizer,\n config=config,\n train_data_loader=train_data_loader,\n valid_data_loader=valid_data_loader,\n lr_scheduler=scheduler,\n writer=writer,\n use_ema=config.use_ema)\n\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bladewaltz1/PromptSwitch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"27833646788","text":"#!C:\\Users\\Tsubasa\\anaconda3\\envs\\py3108\\python.exe\nimport numpy as np\nimport cv2\n\n#np.zeros(400,400,3)で400×400ピクセルの青、緑、赤の画像を生成\n#それぞれのピクセルは[青、緑、赤]も3つのチャンネルを持つ\n#np.uint8=符号なし8ビット整数型\n#[[0,0,0]\n# [0,0,0]\n# :::\n# [0,0,0]]とimageはなる\nimg = np.zeros((400, 400, 3), np.uint8)#黒色\ncv2.imwrite('../original_image/original.png', img)\ncv2.imshow('img1', img)#[0,0,0]で黒色\nimg[:,:] = [255, 0, 0]#青色\ncv2.imwrite('../change_image/blueImage.png', img)\ncv2.imshow('img2', img)#[255,0,0]で青色\nimg[:,:] = [0, 255, 0]#緑色\ncv2.imwrite('../change_image/greenImage.png', img)\ncv2.imshow('img3', img)#[0,255,0]で緑色\nimg[:,:] = [0, 0, 255]#赤色\ncv2.imwrite('../change_image/redImage.png', img)\ncv2.imshow('img4', img)#[0,0,255]で赤色\n\ncv2.waitKey(0)#キーボード入力を待ち受ける関数\ncv2.destroyAllWindows()#全てのウィンドウを閉じる\n","repo_name":"2basaa/python","sub_path":"image_process/graphics/source/create_image.py","file_name":"create_image.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22155834202","text":"content = [int(x) for x in open('Input.txt').read().split(',')]\n\n\n# Used By Both Parts\ndef fish_calculator(initial_day, num_of_days):\n days = {}\n for i in range(9):\n days[i] = initial_day.count(i)\n for i in range(num_of_days):\n temp = []\n for y in range(9):\n if not y == 6 and not y == 8:\n temp.append(days[y + 1])\n elif y == 6:\n temp.append(days[y + 1] + days[0])\n else:\n temp.append(days[0])\n for index, count in enumerate(temp):\n days[index] = count\n return sum(days.values())\n\n\n# Part 1\nprint('Part 1 Answer:', fish_calculator(content, 80))\n\n# Part 2\nprint('Part 2 Answer:', fish_calculator(content, 256))\n","repo_name":"AhmedxSayegh/Advent-Of-Code","sub_path":"2021/Day 06.py","file_name":"Day 06.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41300507645","text":"#!/usr/bin/env python3\n\nfrom ..oaru import OaruAlgorithm, equal_libraries\nfrom ..strips import Domain, Predicate, ObjType, ROOT_TYPE\nfrom ..utils import get_memory_usage\nfrom ..cluster import Cluster, cluster\nfrom ..latom_filter import BasicObjectFilter, ObjectGraphFilter\nfrom ..openworld import Context, Action\n\nfrom pprint import pprint\n\n\ndomain = Domain(\"gridworld\")\n\n\nColumn = domain.declare_type(\"column\")\nRow = domain.declare_type(\"row\")\nAgent = domain.declare_type(\"agent\")\n\nRight = domain.declare_predicate(\"right\", Column, Column)\nLeft = domain.declare_predicate(\"left\", Column, Column)\nUp = domain.declare_predicate(\"up\", Row, Row)\nDown = domain.declare_predicate(\"down\", Row, Row)\nAt = domain.declare_predicate(\"at\", Agent, Column, Row)\n\nrow_1 = Row(\"1\")\nrow_2 = Row(\"2\")\nrow_3 = Row(\"3\")\nrow_4 = Row(\"4\")\n\ncol_a = Column(\"A\")\ncol_b = Column(\"B\")\ncol_c = Column(\"C\")\ncol_d = Column(\"D\")\ncol_e = Column(\"E\")\n\nstatic_predicates = {\n Right(col_a, col_b), Right(col_b, col_c), Right(col_c, col_d), Right(col_d, col_e),\n Up(row_1, row_2), Up(row_2, row_3), Up(row_3, row_4)\n}\n\nrobot = Agent(\"robot\")\n\nobjects = [row_1, row_2, row_3, row_4, col_a, col_b, col_c, col_d, col_e, robot]\n\ns0 = Context(objects, static_predicates | {At(robot, col_a, row_1)})\ns1 = Context(objects, static_predicates | {At(robot, col_a, row_2)})\ns2 = Context(objects, static_predicates | {At(robot, col_a, row_3)})\ns3 = Context(objects, static_predicates | {At(robot, col_b, row_3)})\ns4 = Context(objects, static_predicates | {At(robot, col_c, row_3)})\ns5 = Context(objects, static_predicates | {At(robot, col_d, row_3)})\ns6 = Context(objects, static_predicates | {At(robot, col_d, row_2)})\ns7 = Context(objects, static_predicates | {At(robot, col_d, row_1)})\ns8 = Context(objects, static_predicates | {At(robot, col_c, row_1)})\ns9 = Context(objects, static_predicates | {At(robot, col_b, row_1)})\ns10 = Context(objects, static_predicates | {At(robot, col_a, row_1)})\ns11 = Context(objects, static_predicates | {At(robot, col_b, row_1)})\ns12 = Context(objects, static_predicates | {At(robot, col_c, row_1)})\ns13 = Context(objects, static_predicates | {At(robot, col_d, row_1)})\ns14 = Context(objects, static_predicates | {At(robot, col_d, row_2)})\ns15 = Context(objects, static_predicates | {At(robot, col_d, row_3)})\ns16 = Context(objects, static_predicates | {At(robot, col_c, row_3)})\ns17 = Context(objects, static_predicates | {At(robot, col_b, row_3)})\ns18 = Context(objects, static_predicates | {At(robot, col_a, row_3)})\ns19 = Context(objects, static_predicates | {At(robot, col_a, row_2)})\ns20 = Context(objects, static_predicates | {At(robot, col_a, row_1)})\n\nn11 = Context(objects, static_predicates | {At(robot, col_d, row_1)})\nn12 = Context(objects, static_predicates | {At(robot, col_d, row_4)})\n\nn21 = Context(objects, static_predicates | {At(robot, col_a, row_1)})\nn22 = Context(objects, static_predicates | {At(robot, col_e, row_1)})\n\nn31 = Context(objects, static_predicates | {At(robot, col_a, row_3)})\nn32 = Context(objects, static_predicates | {At(robot, col_c, row_3)})\n\n\n# Location = ObjType(\"location\", ROOT_TYPE)\n# Direction = ObjType(\"direction\", ROOT_TYPE)\n# Agent = ObjType(\"agent\", ROOT_TYPE)\n\n# Adjacent = Predicate(\"adjacent\", Location, Location, Direction)\n# At = Predicate(\"at\", Agent, Location)\n\n# loc_1_1 = Location(\"loc-1-1\")\n# loc_1_2 = Location(\"loc-1-2\")\n# loc_2_1 = Location(\"loc-2-1\")\n# loc_2_2 = Location(\"loc-2-2\")\n\n# left = Direction(\"left\")\n# right = Direction(\"right\")\n# up = Direction(\"up\")\n# down = Direction(\"down\")\n# directions = [left, right, up, down]\n\n# robot = Agent(\"robot\")\n\n# objects = [loc_1_1, loc_1_2, loc_2_1, loc_2_2, left, right, up, down, robot]\n\n# s0 = Context(objects, {\n # Adjacent(loc_1_1, loc_1_2, right), Adjacent(loc_1_2, loc_1_1, left),\n # Adjacent(loc_1_2, loc_2_2, down), Adjacent(loc_2_2, loc_1_2, up),\n # Adjacent(loc_2_2, loc_2_1, left), Adjacent(loc_2_1, loc_2_2, right),\n # Adjacent(loc_2_1, loc_1_1, up), Adjacent(loc_1_1, loc_2_1, down),\n # At(robot, loc_1_1)})\n\n# s1 = Context(objects, {\n # Adjacent(loc_1_1, loc_1_2, right), Adjacent(loc_1_2, loc_1_1, left),\n # Adjacent(loc_1_2, loc_2_2, down), Adjacent(loc_2_2, loc_1_2, up),\n # Adjacent(loc_2_2, loc_2_1, left), Adjacent(loc_2_1, loc_2_2, right),\n # Adjacent(loc_2_1, loc_1_1, up), Adjacent(loc_1_1, loc_2_1, down),\n # At(robot, loc_1_2)})\n\n# s2 = Context(objects, {\n # Adjacent(loc_1_1, loc_1_2, right), Adjacent(loc_1_2, loc_1_1, left),\n # Adjacent(loc_1_2, loc_2_2, down), Adjacent(loc_2_2, loc_1_2, up),\n # Adjacent(loc_2_2, loc_2_1, left), Adjacent(loc_2_1, loc_2_2, right),\n # Adjacent(loc_2_1, loc_1_1, up), Adjacent(loc_1_1, loc_2_1, down),\n # At(robot, loc_1_1)})\n\n\ndef main():\n # f = BasicObjectFilter(directions)\n z3_opts = {\n #\"amo_encoding\": \"quadratic\",\n \"amo_encoding\": \"pseudoboolean\",\n #\"amo_encoding\": \"arithmetic\",\n #\"maxsat_engine\": \"wmax\",\n #\"optsmt_engine\": \"symba\",\n }\n\n oaru = OaruAlgorithm(add_non_novel=True, cluster_opts=z3_opts)\n\n f = BasicObjectFilter()\n\n a_g, updated = oaru.action_recognition(s0, s1, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s1, s2, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s2, s3, f)\n print(a_g, updated)\n\n\n a_g, updated = oaru.action_recognition(s3, s4, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s4, s5, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s5, s6, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s6, s7, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s7, s8, f)\n print(a_g, updated)\n\n l1 = oaru.action_library.copy()\n\n a_g, updated = oaru.action_recognition(s8, s9, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s9, s10, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s10, s11, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s11, s12, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s12, s13, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s13, s14, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s14, s15, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s15, s16, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s16, s17, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s17, s18, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s18, s19, f)\n print(a_g, updated)\n\n a_g, updated = oaru.action_recognition(s19, s20, f)\n print(a_g, updated)\n\n oaru.draw_graph(\"output\", filename=\"before_negative_examples\", view=True, coarse=False, highlight_last_actions=True, dim_non_updated=True)\n\n oaru.add_negative_example(n11, n12)\n oaru.add_negative_example(n21, n22)\n oaru.add_negative_example(n31, n32)\n\n oaru.draw_graph(\"output\", filename=\"after_negative_examples\", view=True, coarse=False, highlight_last_actions=True, dim_non_updated=True)\n\n for op in oaru.history:\n print(op)\n\n print(oaru.strips_domain(domain))\n\n # for a in oaru.action_library.values():\n # print(a.action)\n\n # print(oaru.wall_times)\n # print(oaru.cpu_times)\n # print(oaru.peak_z3_memory)\n # z3_opts = {\n # \"amo_encoding\": \"pseudoboolean\",\n # \"maxsat_engine\": \"wmax\",\n # \"optsmt_engine\": \"symba\",\n # \"timeout\": 10\n # }\n\n # filt = ObjectGraphFilter(0, min)\n # filt = basic_object_filter # ObjectGraphFilter(0, min)\n\n\n # action0 = filt(Action.from_transition(s0, s1))#.filter_features(0, take_min=False)\n # action1 = filt(Action.from_transition(s1, s2))#.filter_features(0, take_min=False)\n # action2 = filt(Action.from_transition(s2, s3))#.filter_features(0, take_min=False)\n # action3 = filt(Action.from_transition(s3, s4))#.filter_features(0, take_min=False)\n # print(action0)\n # print(action1)\n # action_u1 = cluster(ActionCluster(action0), ActionCluster(action3), **z3_opts)\n # print(action_u1.action)\n # action_u2 = cluster(action_u1, ActionCluster(action2), **z3_opts)\n # print(action_u2.action)\n # action_u3 = cluster(action_u2, ActionCluster(action3), **z3_opts)\n # print(action_u3.action)\n\n\n\n\n # print(action_u1.action)\n # print(action_u1.additional_info[\"number_of_variables\"])\n # print(action_u1.additional_info[\"elapsed_cpu_ms\"])\n # print(action_u1.additional_info[\"z3_stats\"])\n # print(get_memory_usage())\n # print(action_u1)\n # print(action_u1.parent.distance)\n # pprint(action_u1.parent.additional_info)\n\n # print(action2)\n # print(action3)\n # action_u2 = cluster(action2, action3, include_additional_info=True)\n # print(action_u2)\n # pprint(action_u2.parent.additional_info)\n\n # action_u3 = cluster(action_u1, action_u2, include_additional_info=True)\n # print(action_u3)\n # pprint(action_u3.parent.additional_info)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sprkrd/sat_strips_learn","sub_path":"satstripslearn/demos/demo_oaru.py","file_name":"demo_oaru.py","file_ext":"py","file_size_in_byte":9146,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"19723495788","text":"import time\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom flectra import api, fields, models\nfrom flectra.tools.translate import _\nfrom flectra.tools.sql import drop_view_if_exists\nfrom flectra.exceptions import UserError, ValidationError\n\n\nclass HrActivitySheet(models.Model):\n _name = \"hr.activity.sheet\"\n _table = 'hr_activity_sheet'\n _inherit = ['mail.thread']\n _description = \"Activities Sheet\"\n _order = \"id desc\"\n\n @api.model\n def _get_active_employee(self):\n employee = self.env['hr.employee'].search(\n [('user_id', '=', self.env.uid)], limit=1)\n return employee or False\n\n @api.model\n def _get_end_date(self):\n active_user = self.env['res.users'].browse(self.env.uid)\n range = \\\n active_user.company_id and \\\n active_user.company_id.sheet_generaton_period or 'weekly'\n date = fields.Date.context_today(self)\n if range == 'weekly':\n date = (datetime.today() + relativedelta(\n weekday=6)).strftime('%Y-%m-%d')\n elif range == 'monthly':\n date = (datetime.today() + relativedelta(\n months=+1, day=1, days=-1)).strftime('%Y-%m-%d')\n elif range == 'yearly':\n date = time.strftime('%Y-12-31')\n return date\n\n @api.model\n def _get_start_date(self):\n active_user = self.env['res.users'].browse(self.env.uid)\n range = \\\n active_user.company_id and \\\n active_user.company_id.sheet_generaton_period or 'weekly'\n date = fields.Date.context_today(self)\n if range == 'weekly':\n date = (datetime.today() + relativedelta(\n weekday=0, days=-6)).strftime('%Y-%m-%d')\n elif range == 'monthly':\n date = time.strftime('%Y-%m-01')\n elif range == 'yearly':\n date = time.strftime('%Y-01-01')\n return date\n\n name = fields.Char(\n string=\"Activity Sheet\",\n states={'unapproved': [('readonly', True)],\n 'approved': [('readonly', True)]})\n start_date = fields.Date(\n string='From Date', default=_get_start_date,\n index=True, states={'new': [('readonly', False)]})\n end_date = fields.Date(\n string='To Date', default=_get_end_date,\n index=True, states={'new': [('readonly', False)]})\n company_id = fields.Many2one('res.company', string='Company')\n user_id = fields.Many2one(\n 'res.users', related='employee_id.user_id', string='User', store=True)\n employee_id = fields.Many2one(\n 'hr.employee', string='Employee', default=_get_active_employee)\n department_id = fields.Many2one(\n 'hr.department', string='Department',\n default=lambda self: self.env['res.company']._company_default_get())\n activity_ids = fields.One2many(\n 'account.analytic.line', 'activity_sheet_id',\n string='Activity lines',\n states={\n 'draft': [('readonly', False)],\n 'new': [('readonly', False)]})\n state = fields.Selection([\n ('new', 'New'),\n ('draft', 'Draft'),\n ('unapproved', 'To Approve'),\n ('approved', 'Approved')], index=True, track_visibility='onchange',\n string='Activity Status', default='new')\n activity_account_ids = fields.One2many(\n 'hr.activity.sheet.account', 'activity_sheet_id',\n string='Analytic Activity Account')\n\n @api.multi\n def _track_subtype(self, init_values):\n self.ensure_one()\n if 'state' in init_values and self.state == 'unapproved':\n return 'hr_activity_sheet.mt_unapproved_activity'\n elif 'state' in init_values and self.state == 'approved':\n return 'hr_activity_sheet.mt_approve_activity'\n return super(HrActivitySheet, self)._track_subtype(init_values)\n\n @api.multi\n def name_get(self):\n result = []\n for activity in self:\n current_week = datetime.strptime(\n activity.start_date, '%Y-%m-%d').isocalendar()[1]\n name = 'Activity Week ' + str(current_week)\n result.append((activity.id, name))\n return result\n\n @api.multi\n def write(self, vals):\n if vals.get('employee_id'):\n user = self.env['hr.employee'].browse(\n vals.get('employee_id')).user_id.id\n if not user:\n raise UserError(_('Activity will be created only if employee '\n 'contains its related user.'))\n self._is_overlaping(user=user)\n return super(HrActivitySheet, self).write(vals)\n\n @api.model\n def create(self, vals):\n if vals.get('employee_id'):\n employee = self.env['hr.employee'].browse(vals.get('employee_id'))\n if not employee.user_id:\n raise UserError(_(\n 'Activity will be created only if employee '\n 'contains its related user.'))\n vals['state'] = 'draft'\n res = super(HrActivitySheet, self).create(vals)\n return res\n\n @api.multi\n def copy(self, default=None):\n raise UserError(_('You cannot duplicate an activity sheet.'))\n return super(HrActivitySheet, self).copy(default)\n\n @api.multi\n def unlink(self):\n activities = self.env['account.analytic.line']\n for activity in self:\n if activity.state in ['unapproved', 'approved']:\n raise UserError(_('submitted activity cannot be deleted.'))\n activities += activity.activity_ids.filtered(\n lambda line: not line.task_id)\n activities.unlink()\n return super(HrActivitySheet, self).unlink()\n\n @api.multi\n def set_activity_unapproved(self):\n for activity in self:\n manager = activity.employee_id and activity.employee_id.parent_id\n if manager and manager.user_id:\n self.message_subscribe_users(user_ids=[manager.user_id.id])\n self.write({'state': 'unapproved'})\n return True\n\n @api.onchange('employee_id')\n def on_employee_id_change(self):\n if self.employee_id:\n self.department_id = self.employee_id.department_id\n self.user_id = self.employee_id.user_id\n\n @api.multi\n def set_activity_draft(self):\n self.ensure_one()\n has_group = self.env.user.has_group(\n 'hr_timesheet.group_hr_timesheet_user')\n if not has_group:\n raise UserError(_('Activities can only be Approved or Refused '\n 'by Managers'))\n self.write({'state': 'draft'})\n return True\n\n @api.multi\n def set_activity_approved(self):\n self.ensure_one()\n has_group = self.env.user.has_group(\n 'hr_timesheet.group_hr_timesheet_user')\n if not has_group:\n raise UserError(_('Activities can only be Approved by Managers.'))\n if self.filtered(lambda sheet: sheet.state != 'unapproved'):\n raise UserError(_(\"Kindly submit your activity first!.\"))\n self.write({'state': 'approved'})\n\n @api.constrains('employee_id', 'end_date', 'start_date')\n def _is_overlaping(self, user=False):\n for activity in self:\n user_id = user or activity.user_id and activity.user_id.id\n if user:\n self.env.cr.execute('''\n SELECT id\n FROM hr_activity_sheet\n WHERE\n id <> %s\n AND user_id=%s\n AND (start_date <= %s and %s <= end_date)\n ''', (activity.id, user_id, activity.end_date,\n activity.start_date))\n if any(self.env.cr.fetchall()):\n raise ValidationError(_(\n 'Activity sheet cannot be duplicated!.'))\n\n\nclass HrActivitySheetAccount(models.Model):\n _name = \"hr.activity.sheet.account\"\n _description = \"Periodical Activity\"\n _order = 'name'\n _auto = False\n\n name = fields.Many2one('account.analytic.account',\n string='Project / Analytic Account', readonly=True)\n activity_sheet_id = fields.Many2one('hr.activity.sheet', string='Sheet')\n utilized_hour = fields.Float('Total Time', digits=(16, 2))\n\n _depends = {\n 'hr.activity.sheet': ['user_id', 'start_date', 'end_date'],\n 'account.analytic.line': ['user_id', 'date',\n 'account_id', 'unit_amount']}\n\n @api.model_cr\n def init(self):\n drop_view_if_exists(self._cr, 'hr_activity_sheet_account')\n self._cr.execute(\"\"\"create view hr_activity_sheet_account as (\n select\n min(line.id) as id,\n line.account_id as name,\n s.id as activity_sheet_id,\n sum(line.unit_amount) as utilized_hour\n from\n account_analytic_line line\n LEFT JOIN hr_activity_sheet s\n ON s.user_id = line.user_id\n AND (\n s.end_date >= line.date AND s.start_date <= line.date)\n group by line.account_id, s.id\n )\"\"\")\n","repo_name":"gagaboy/odoo10_plus","sub_path":"addons/hr_activity_sheet/models/hr_activity_sheet.py","file_name":"hr_activity_sheet.py","file_ext":"py","file_size_in_byte":9248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"46548113920","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport matplotlib\nimport random\nimport seaborn as sns\n\n#CONV_TYPE = {0: \"post\", 1: \"pre\"}\n#NORM_TYPE = {0: None, 1: \"bn\", 2: \"in\"}\n#UP_TYPE = {0: \"bilinear\", 1: \"nearest\", 2: \"deconv\"}\n#SHORT_CUT_TYPE = {0: False, 1: True}\n#SKIP_TYPE = {0: False, 1: True}\n\nfont = {'size' : 16}\n\nmatplotlib.rc('font', **font)\n\narchs = pd.read_csv('archs_random.csv')\n\nIS = np.array(archs['IS'])\nFID = np.array(archs['FID'])\n\n\n# Computing time to reach better IS than Greedy\nis_to_beat = 8.55\niterations_for_better = []\nfor i in range(100000):\n\trandom = 0\n\ti = 0\n\twhile random < is_to_beat:\n\t\trandom = np.random.choice(IS, 1)\n\t\ti += 1\n\titerations_for_better.append(i*10) # GPU-hours\n\nplt.hist(iterations_for_better, bins=np.arange(0,np.max(iterations_for_better),np.max(iterations_for_better)/20), edgecolor='k', color='#1f76b4ff', alpha=0.5)\nplt.xlabel('GPU hours to beat IS={}'.format(is_to_beat))\nplt.ylabel('# trials out of 100,000')\nprint(np.mean(iterations_for_better))\nplt.tight_layout()\nplt.show()\n\nif 0:\n\t# FID/IS plot: random vs. searched\n\tarchs_searched = pd.read_csv('archs.csv')\n\n\tfig, ax = plt.subplots(1,1)\n\n\tIS_searched = np.array(archs_searched['IS'])\n\tFID_searched = np.array(archs_searched['FID'])\n\n\taxs = sns.jointplot('IS', 'FID', data=archs_searched)\n\taxs.ax_joint.scatter('IS', 'FID', data=archs, c='r', marker='o', edgecolor='w')\n\n\t# drawing pdf instead of histograms on the marginal axes\n\taxs.ax_marg_x.cla()\n\taxs.ax_marg_y.cla()\n\tsns.distplot(archs_searched.IS, ax=axs.ax_marg_x)\n\tsns.distplot(archs_searched.FID, ax=axs.ax_marg_y, vertical=True)\n\n\t# Adding hist and pdf for random\n\tax = sns.distplot(archs.IS, ax=axs.ax_marg_x, color='r')\n\tax = sns.distplot(archs.FID, ax=axs.ax_marg_y, color='r', vertical=True)\n\n\tplt.tight_layout()\n\tplt.show()","repo_name":"FLClab/AutocGAN","sub_path":"figures and scripts/analysis_random.py","file_name":"analysis_random.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18407467152","text":"import logging\nimport sqlite3\n\n\nfrom config import config\nfrom utils.logsettings import getLogger\n\n\nlogger = getLogger(__name__)\n\n\nTABLE_NAME_SIGNAL_EVENTS = 'signal_events'\n\n\ndef get_candle_table_name(product_code, duration):\n return '{}_{}'.format(product_code, duration)\n\n\ndef init():\n try:\n # Need sqlite3.PARSE_DECLTYPES to deal with datetime\n conn = sqlite3.connect(config.Config.db_name, detect_types=sqlite3.PARSE_DECLTYPES)\n except FileNotFoundError as ex:\n logger.error(ex)\n\n curs = conn.cursor()\n\n # time column is UTC\n curs.execute(\n '''\n create table if not exists {}(\n time timestamp primary key not null,\n product_code string,\n side string,\n price float,\n size float,\n notes string\n )\n '''.format(TABLE_NAME_SIGNAL_EVENTS)\n )\n\n for duration in config.Config.durations.keys():\n # e.g. table_name = BTC_JPY_1m\n table_name = get_candle_table_name(config.Config.product_code, duration)\n # time column is UTC timestamp\n curs.execute(\n '''\n create table if not exists {}(\n time timestamp primary key not null,\n open float,\n close float,\n high float,\n low float,\n volume float\n )\n '''.format(table_name)\n )\n\n conn.commit()\n\n curs.close()\n conn.close()","repo_name":"ino777/bitflyer","sub_path":"app/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"18480758955","text":"class Solution:\n def countNegatives(self, grid: [[int]]) -> int:\n count = 0\n row, col = len(grid) -1, 0\n while row >= 0 and col < len(grid[0]):\n if grid[row][col] < 0:\n row -= 1\n count += len(grid[0]) - col\n else:\n col += 1\n \n return count\n ","repo_name":"thydrdy/competitive_programming","sub_path":"leetcode/count negative numbers in a sorted matrix.py","file_name":"count negative numbers in a sorted matrix.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23467125877","text":"from django.db import models\n\n# Create your models here.\n\nclass About(models.Model):\n descriptions = models.TextField(\n verbose_name=\"Первое описание\"\n )\n desscriptions2 = models.TextField(\n verbose_name=\"Второе описание\"\n )\n image = models.ImageField(\n upload_to=\"about_image\",\n verbose_name=\"Фотография\"\n )\n experience = models.CharField(\n max_length=255,\n verbose_name=\"опыт\"\n )\n \n def __str__(self):\n return f\" {self.descriptions} - {self.desscriptions2}\"\n \n class Meta:\n verbose_name = \"О нас\"\n verbose_name_plural = \"О нас\"\n \nclass History(models.Model):\n years = models.CharField(\n max_length=255,\n verbose_name=\"Год\"\n )\n title = models.CharField(\n max_length=255,\n verbose_name=\"Название\"\n )\n descriptions = models.TextField(\n verbose_name=\"Описание\"\n )\n image = models.ImageField(\n upload_to=\"history_image\",\n verbose_name=\"Фотография\"\n )\n \n def __str__(self):\n return f\"{self.years} - {self.title} - {self.descriptions}\"\n \n class Meta:\n verbose_name = \"Наши истории\"\n verbose_name_plural = \"Наша история\"\n \nclass Number(models.Model):\n clients = models.CharField(\n max_length=255,\n verbose_name=\"Активные клиенты\"\n )\n review = models.CharField(\n max_length=255,\n verbose_name=\"Положительных отзывов\"\n )\n team = models.CharField(\n max_length=255,\n verbose_name=\"Юристов\"\n )\n \n def __str__(self):\n return f\"{self.clients} - {self.review} - {self.team}\"\n \n class Meta:\n verbose_name = \"Мы в числах\"\n verbose_name_plural = \"Мы в числах\"\n\nclass Contact(models.Model):\n name = models.CharField(\n max_length=255,\n verbose_name=\"Имя\"\n )\n email = models.EmailField(\n verbose_name=\"Почта\"\n )\n message = models.TextField(\n verbose_name=\"Сообщение\"\n )\n \n def __str__(self):\n return f\"{self.name} - {self.email} - {self.message} \"\n \n class Meta:\n verbose_name = \"Обратные связи\"\n verbose_name_plural = \"Обратная связь\"","repo_name":"Nurbolottop/YurConsuleDjangoProject","sub_path":"apps/setting/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1658402733","text":"\n\nimport operator\n\ndump_path = \"./tbsvr.out.7733646\"\ndump_file = open(dump_path, \"r\")\nprev_line = \"\"\nflag = False\nmost = {}\nlinenum = 0\n\nfor l in dump_file:\n #linenum = linenum + 1\n\n\n memory = 0\n\n if \"Allocation size:\" in l:\n flag = True\n\n\n if \"Allocation size:\" in l:\n # Remove the leading white space, then split the string by the first space\n #print(linenum)\n\n val = (l.lstrip()).split(\" \")\n\n if len(val) < 3:\n flag = False\n continue\n\n val = val[2].rstrip()\n\n\n if len(val) > 4:\n flag = False\n continue\n if '\\t' in val:\n flag = False\n continue\n if \"ation\" in val:\n flag = False\n continue\n if not val:\n flag = False\n continue\n if val == '\\n' or val == ' ' or val == '\\r':\n flag = False\n continue\n if not val.startswith('0x'):\n flag = False\n continue\n if len(val) == 2:\n flag = False\n continue\n\n\n val = int(val, 16)\n\n #val is the memory here\n memory = val\n flag = True\n #print(val)\n\n #print(\"flag: \" + str(flag))\n\n if (not l.strip()) and (flag):\n address = prev_line.lstrip().split(\" \", 1)[0]\n #print(address)\n flag = False\n\n if address in most:\n most[address] = most.get(address) + int(val)\n else:\n most[address] = 1 + int(val)\n\n prev_line = l\n\n\n\n# Making a sorted representaton of the dictionary\nsorted_most = sorted(most.items(), key=operator.itemgetter(1), reverse=True)\n\nsize = len(sorted_most)\n\nfor num in range(0, size):\n print(sorted_most[num])\n\n","repo_name":"Juhyun-Kim-Memphis/codelet","sub_path":"util/AIXMemDumpParser/AIXMemDumpParser.py","file_name":"AIXMemDumpParser.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17621763943","text":"import logging\nimport os\nfrom zExceptions import BadRequest\n\n__all__ = (\"MibOrganizerPath\", \"ModuleManager\", \"getMibModuleMap\")\n\nlog = logging.getLogger(\"zen.mib\")\nBASE_PATH = \"/zport/dmd/Mibs\"\n\n\nclass MibOrganizerPath(object):\n \"\"\"Encapsulates a MibModule's organizer path in DMD.\n \"\"\"\n\n def __init__(self, path=\"/\"):\n \"\"\"Initialize an instance of MibOrganizerPath.\n\n The path value can be a path relative to /zport/dmd/Mibs\n or a full path starting from /zport.\n\n @param {str} Path to module.\n \"\"\"\n if path[0:len(BASE_PATH)] != BASE_PATH:\n if path.startswith(\"/\"):\n path = path[1:]\n path = os.path.join(BASE_PATH, path) if len(path) else BASE_PATH\n self._path = path\n relpath = self._path[len(BASE_PATH):]\n self._relpath = relpath if relpath else \"/\"\n\n @property\n def path(self):\n \"\"\"Returns the full path of the organizer.\n \"\"\"\n return self._path\n\n @property\n def relative_path(self):\n \"\"\"Returns the relative path of the organizer.\n \"\"\"\n return self._relpath\n\n\ndef getMibModuleMap(dmd):\n \"\"\"Return a dict mapping module names to their organizer in DMD.\n\n @returns {dict str:MibOrganizerPath}\n \"\"\"\n registry = {}\n for module in dmd.Mibs.getSubInstancesGen(\"mibs\"):\n path = module.getPrimaryPath()\n organizerPath, moduleName = \"/\".join(path[:-2]), path[-1]\n registry[moduleName] = MibOrganizerPath(organizerPath)\n return registry\n\n\n_MODULE_ATTRIBUTES = ('language', 'contact', 'description')\n\n\ndef _getModuleAttributes(data):\n \"\"\"Generates key/value pairs of the modules attributes.\n \"\"\"\n for key in (k for k in _MODULE_ATTRIBUTES if k in data):\n yield (key, data.get(key))\n\n\nclass ModuleManager(object):\n \"\"\"Manages the adding, updating, and deletion of MIB modules in DMD.\n \"\"\"\n\n def __init__(self, dmd, registry):\n \"\"\"Initialize an instance of ModuleManager.\n\n @param registry {dict} Initial module to organizer mapping.\n @param organizer {MibOrganizerPath} Add MibModules to this organizer.\n \"\"\"\n self._dmd = dmd\n self._registry = registry\n self._organizers = set(self._registry.values())\n\n def add(self, module, organizer):\n \"\"\"Add MIB module to DMD.\n \"\"\"\n # 1. Add module to path if module doesn't already exist on some path.\n # 2. Add/update module attributes\n # 3. Add module nodes\n # 4. Add module notifications\n\n moduleName = module.get(\"moduleName\")\n attributes = module.get(moduleName, {})\n isNew, mibmod = self._getMibModule(moduleName, organizer)\n\n for attr, value in _getModuleAttributes(attributes):\n setattr(mibmod, attr, value)\n\n nodecount = 0\n for name, values in module.get(\"nodes\", {}).iteritems():\n self._addItem(\n mibmod.createMibNode, name, values, moduleName\n )\n nodecount += 1\n\n trapcount = 0\n for name, values in module.get(\"notifications\", {}).iteritems():\n self._addItem(\n mibmod.createMibNotification, name, values, moduleName\n )\n trapcount += 1\n\n if isNew:\n log.info(\n \"Created %s with %s nodes and %s notifications\",\n '/'.join(mibmod.getPrimaryPath()), nodecount, trapcount\n )\n else:\n log.info(\n \"Updated %s nodes and %s notifications on %s\",\n nodecount, trapcount, '/'.join(mibmod.getPrimaryPath())\n )\n\n def _getMibModule(self, name, default_organizer):\n current_organizer = self._registry.get(name)\n if current_organizer:\n return (\n False,\n self._dmd.unrestrictedTraverse(\n current_organizer.path + \"/mibs/\" + name\n )\n )\n return (\n True,\n self._dmd.Mibs.createMibModule(\n name, default_organizer.relative_path\n )\n )\n\n def _addItem(self, function, name, values, moduleName):\n try:\n function(name, logger=log, **values)\n except BadRequest:\n self.log.warn(\n \"Unable to add %s id '%s' as this name is \"\n \"reserved for use by Zope\", \"node\", name\n )\n newName = '_'.join([name, moduleName])\n self.log.warn(\n \"Trying to add %s '%s' as '%s'\",\n \"node\", name, newName\n )\n try:\n function(newName, logger=log, **values)\n except Exception:\n self.log.warn(\n \"Unable to add %s id '%s' -- skipping\",\n \"node\", newName\n )\n else:\n self.log.warn(\n \"Renamed '%s' to '%s' and added to MIB %s\",\n name, newName, \"node\"\n )\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenUtils/mib/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"29912817666","text":"# Libraries\nimport cv2\nimport numpy as np\nfrom Code import lib\nfrom Code import vocab\nimport sys\n\n# Capturing video through webcam BRIO\nwebcam = cv2.VideoCapture(cv2.CAP_DSHOW)\n\n# Webcam parameters\nwebcam.set(cv2.CAP_PROP_FRAME_WIDTH, 3840) # -1280\nwebcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 2160) # -720\nwebcam.set(cv2.CAP_PROP_FPS, 20) # 5, 15, 30, 60\nfourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\nwebcam.set(cv2.CAP_PROP_FOURCC, fourcc)\nwebcam.set(cv2.CAP_PROP_BRIGHTNESS, 128) # default 128\nwebcam.set(cv2.CAP_PROP_CONTRAST, 130) # default 100 good 130\nwebcam.set(cv2.CAP_PROP_SATURATION, 90) # default 128 good 90\nwebcam.set(cv2.CAP_PROP_GAIN, 0) # default 128\nwebcam.set(cv2.CAP_PROP_EXPOSURE, -4) # default -5\nwebcam.set(cv2.CAP_PROP_FOCUS, 15) # default 10 15\n\n# Opening the video settings GUI\nwebcam.set(cv2.CAP_PROP_SETTINGS, 0)\n\n# Set range for blue mask color\nblue_lower = np.array([94, 50, 50], np.uint8)\nblue_upper = np.array([110, 255, 255], np.uint8)\n# Set range for green mask color\ngreen_lower = np.array([30, 30, 50], np.uint8)\ngreen_upper = np.array([94, 255, 255], np.uint8)\n# Set range for yellow mask color\nyellow_lower = np.array([20, 60, 50], np.uint8) # 20 60 50\nyellow_upper = np.array([25, 255, 255], np.uint8) # 25 255 255\n# Set range for orange mask color\norange_lower = np.array([1, 50, 50], np.uint8) # 1 50 50\norange_upper = np.array([10, 255, 255], np.uint8) # 12 255 255\n# Set range for purple mask color\npurple_lower = np.array([114, 30, 20], np.uint8)\npurple_upper = np.array([140, 255, 255], np.uint8)\n# Set range for black mask color\nblack_lower = np.array([0, 0, 0], np.uint8)\nblack_upper = np.array([180, 90, 100], np.uint8)\n\n# Regroup all masks together\nall_masks = [blue_lower, blue_upper, green_lower, green_upper, yellow_lower, yellow_upper, orange_lower, orange_upper,\n purple_lower, purple_upper]\n\n# List for reapeted measures\nlst_memo = []\nend = []\n\n# Number of different measures\nrepeat = 1000\ncpt_rep = 0\n\n\n# Main loop\nwhile cpt_rep < repeat:\n\n # Acquire frames from camera and isolate the background\n _, imageFrame = webcam.read()\n imageFrame = lib.crop_start_image(imageFrame)\n\n ###################################################################################################################\n # Color detection zone\n\n # Convert every frame in HSV env. from BGR to HSV (color angle, saturation, brightness)\n hsvFrame = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2HSV)\n\n # See color lvls. for masks config.\n \"\"\"\n print(lib.histo(hsvFrame))\n \"\"\"\n\n # Define masks for each color\n blue_mask = lib.define_mask(hsvFrame, blue_lower, blue_upper)\n green_mask = lib.define_mask(hsvFrame, green_lower, green_upper)\n yellow_mask = lib.define_mask(hsvFrame, yellow_lower, yellow_upper)\n orange_mask = lib.define_mask(hsvFrame, orange_lower, orange_upper)\n purple_mask = lib.define_mask(hsvFrame, purple_lower, purple_upper)\n\n # Find contours for each color\n contoursB, _ = cv2.findContours(blue_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contoursG, _ = cv2.findContours(green_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contoursY, _ = cv2.findContours(yellow_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contoursO, _ = cv2.findContours(orange_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contoursP, _ = cv2.findContours(purple_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Get the list of blocks\n # Each block is represented by\n # [ColorName, CX_1stPt, CY_1stPt, Width, Height, Box(ArrayOfVertices), Name, Unicity, nbCubarithms, P1, P2, Braille]\n listOfBlocks = []\n listOfBlocks = listOfBlocks + lib.get_info_from_contour(contoursB, vocab.BLUE)\n listOfBlocks = listOfBlocks + lib.get_info_from_contour(contoursG, vocab.GREEN)\n listOfBlocks = listOfBlocks + lib.get_info_from_contour(contoursY, vocab.YELLOW)\n listOfBlocks = listOfBlocks + lib.get_info_from_contour(contoursO, vocab.ORANGE)\n listOfBlocks = listOfBlocks + lib.get_info_from_contour(contoursP, vocab.PURPLE)\n\n ###################################################################################################################\n # Separation and sorting zone\n\n end_list = []\n if cpt_rep > 10:\n \n # Reorder blocks (from bottom to top because the camera is inverted above the scene)\n listOfBlocks = sorted(listOfBlocks, key=lambda x: x[2])\n listOfBlocks.reverse()\n\n # Isolate the colored blocks\n list_img_blocks = lib.cropRotatedBox(imageFrame, listOfBlocks)\n\n # Separate consecutive same-colored blocks\n new_list_img = lib.separate(list_img_blocks) # todo\n\n ################################################################################################################\n\n # Braille detection zone\n # Acquire the values on cubarithm\n last_list_img = lib.get_cubarithm(new_list_img, black_lower, black_upper)\n\n # Extract the number of blobs for each block\n end_list = lib.get_braille(list_img_blocks, all_masks)\n \n ################################################################################################################\n\n # Show relevant info\n \"\"\"print(\"\\n\" + str(lib.extract(last_list_img, vocab.NAME)) + \"\\n\"\n # + str(lib.extract(list_img_blocks, vocab.WIDTH)) + \"\\n\"\n # + str(lib.extract(list_img_blocks, vocab.HEIGHT)) + \"\\n\"\n # + str(lib.extract(last_list_img, vocab.INTRACLASS)) + \"\\n\"\n # + str(lib.extract(last_list_img, vocab.UNICITY)) + \"\\n\"\n # + str(lib.extract(last_list_img, vocab.SLOTNUMBER)) + \"\\n\"\n # + str(lib.extract(last_list_img, vocab.PIN_1)) + \"\\n\"\n # + str(lib.extract(last_list_img, vocab.PIN_2)) + \"\\n\"\n )\"\"\"\n\n # Average results\n if cpt_rep % vocab.COUNT_MODULO != 0:\n myStr = \"[\" + \"#\" * ((cpt_rep - 1) % vocab.COUNT_MODULO) + \" \" * (\n (vocab.COUNT_MODULO-2) - ((cpt_rep - 1) % vocab.COUNT_MODULO)) + \"]\"\n if cpt_rep <= 10:\n sys.stdout.write(\"\\r\" + myStr + \" INITIALISATION CAMERA\")\n if cpt_rep > 10:\n lst_memo.append(end_list)\n sys.stdout.write(\"\\r\" + myStr + \" CHARGEMENT\")\n else:\n if cpt_rep <= 10:\n lst_memo.clear()\n if cpt_rep > 10:\n end = lib.average(lst_memo)\n lst_memo.clear()\n cpt_rep += 1\n\n # Display colored filters\n \"\"\"cv2.namedWindow(\"p\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"p\", purple_mask)\n cv2.namedWindow(\"y\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"y\", yellow_mask)\n cv2.namedWindow(\"b\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"b\", blue_mask)\n cv2.namedWindow(\"g\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"g\", green_mask)\n cv2.namedWindow(\"o\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"o\", orange_mask)\"\"\"\n\n # Results on original image\n \"\"\"for bloc in end_list:\n strbloc = bloc[vocab.INTRACLASS]\n if bloc[vocab.SLOTNUMBER] == 1:\n strbloc += \" avec pin \" + str(bloc[vocab.PIN_1])\n if bloc[vocab.SLOTNUMBER] == 2:\n strbloc += \" avec pin \" + str(bloc[vocab.PIN_1]) + \" et \" + str(bloc[vocab.PIN_2])\n if bloc[vocab.UNICITY] == vocab.COMMON:\n strbloc += \", on compte \" + str(bloc[vocab.BRAILLE]) + \" blobs.\"\n cv2.putText(imageFrame, strbloc, (int(bloc[vocab.BOX][2][0]), int(bloc[vocab.BOX][2][1])),\n cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), thickness=2)\"\"\"\n \"\"\"cv2.namedWindow(\"original\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"original\", imageFrame)\"\"\"\n\n # Stops the program properly by pressing 'q'\n if cv2.waitKey(10) & 0xFF == ord('q'):\n webcam.release()\n cv2.destroyAllWindows()\n break\n","repo_name":"Lucasmontagne/TaBGO","sub_path":"Code/DecisionTreeTaBGO.py","file_name":"DecisionTreeTaBGO.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11534947546","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django_filters.views import FilterView\n\nimport helpers\nfrom currency.models import Entity\nfrom currency.views import EntityFilter\nfrom helpers import superuser_required\nfrom helpers.mixins.AjaxTemplateResponseMixin import AjaxTemplateResponseMixin\nfrom helpers.mixins.ListItemUrlMixin import ListItemUrlMixin\nfrom wallets.forms.PaymentForm import PaymentForm\nfrom wallets.models import Payment, Wallet\n\n\n@login_required\ndef pending_payments(request):\n pending_payments = Payment.objects.pending(user=request.user)\n sent_pending = Payment.objects.sent_pending(user=request.user)\n\n page = request.GET.get('page')\n pending_payments = helpers.paginate(pending_payments, page, elems_perpage=10)\n\n params = {\n 'payments': pending_payments,\n 'sent_pending': sent_pending\n }\n\n if request.is_ajax():\n response = render(request, 'payment/query.html', params)\n response['Cache-Control'] = 'no-cache'\n response['Vary'] = 'Accept'\n return response\n else:\n return render(request, 'payment/pending_payments.html', params)\n\n\n\n@login_required\ndef payment_detail(request, pk):\n\n payment = get_object_or_404(Payment, pk=pk)\n can_edit = request.user == payment.receiver or request.user.is_superuser\n can_view = request.user == payment.sender or request.user.is_superuser\n\n if not can_edit and not can_view:\n messages.add_message(request, messages.ERROR, 'No tienes permisos para ver este pago')\n return redirect('wallets:pending_payments')\n\n\n params = { 'payment': payment, 'can_edit':can_edit }\n\n if request.method == \"POST\" and can_edit:\n action = request.POST.get(\"action\", \"\")\n if action == 'accept':\n try:\n payment.accept_payment()\n if request.is_ajax():\n return JsonResponse({'success':True})\n else:\n messages.add_message(request, messages.SUCCESS, 'Pago aceptado con éxito')\n if request.user.is_superuser:\n return redirect('wallets:admin_payments')\n else:\n return redirect('wallets:pending_payments')\n except Wallet.NotEnoughBalance:\n params['notenoughbalance'] = True\n if request.is_ajax():\n response = JsonResponse({'error':'notenoughbalance', 'error_message':'El monedero no tiene saldo suficiente.'})\n response.status_code = 400\n return response\n\n if action == 'cancel':\n payment.cancel_payment()\n if request.is_ajax():\n return JsonResponse({'success': True})\n else:\n messages.add_message(request, messages.SUCCESS, 'Pago rechazado con éxito')\n if request.user.is_superuser:\n return redirect('wallets:admin_payments')\n else:\n return redirect('wallets:pending_payments')\n\n sender_type, sender = payment.sender.get_related_entity()\n receiver_type, entity = payment.receiver.get_related_entity()\n params['sender'] = sender\n params['receiver'] = entity\n\n if receiver_type == 'entity':\n params['bonus'] = entity.bonus(payment.total_amount, sender_type)\n\n return render(request, 'payment/detail.html', params)\n\n\nclass SelectPaymentReceiverView(FilterView, ListItemUrlMixin, AjaxTemplateResponseMixin):\n\n model = Entity\n queryset = Entity.objects.active()\n objects_url_name = 'create_payment'\n template_name = 'payment/select_entity.html'\n ajax_template_name = 'payment/entity.html'\n filterset_class = EntityFilter\n paginate_by = 9\n\n def get_context_data(self, **kwargs):\n context = super(SelectPaymentReceiverView, self).get_context_data(**kwargs)\n UserModel = get_user_model()\n type, instance = UserModel.get_related_entity(self.request.user)\n context['is_entity'] = type == 'entity'\n\n return context\n\n\n@login_required\ndef new_payment(request, pk):\n\n entity = get_object_or_404(Entity, pk=pk)\n UserModel = get_user_model()\n type, instance = UserModel.get_related_entity(request.user)\n data = {\n 'receiver': entity,\n 'is_sender_entity': type == 'entity',\n }\n\n if request.method == \"POST\":\n form = PaymentForm(request.POST, request.FILES)\n\n if form.is_valid():\n try:\n payment = form.save()\n messages.add_message(request, messages.SUCCESS,\n 'Pago enviado con éxito')\n return redirect('wallets:payment_detail', pk=payment.pk)\n except Wallet.WrongPinCode:\n data['wrongpingcode'] = True\n except Wallet.NotEnoughBalance:\n data['notenoughbalance'] = True\n except Wallet.ReceiverNotRegistered:\n data['receivernotregistered'] = True\n\n else:\n form = PaymentForm(initial={'sender':request.user, 'receiver':entity.user})\n\n data['form'] = form\n return render(request, 'payment/create.html', data)\n\n\n@superuser_required\ndef admin_payments(request):\n payments = Payment.objects.all().order_by('-timestamp')\n\n status = request.GET.get('status')\n if status:\n payments = payments.filter(status=status)\n\n email = request.GET.get('email')\n if email:\n payments = payments.filter(sender__email=email) | payments.filter(receiver__email=email)\n\n page = request.GET.get('page')\n payments = helpers.paginate(payments, page, elems_perpage=10)\n params = {\n 'payments': payments,\n 'showing_all': True\n }\n\n if request.is_ajax():\n response = render(request, 'payment/query.html', params)\n response['Cache-Control'] = 'no-cache'\n response['Vary'] = 'Accept'\n return response\n else:\n return render(request, 'payment/admin.html', params)","repo_name":"Mercado-Social-de-Madrid/eticsAppServer","sub_path":"wallets/views/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"6442330943","text":"from turbogears import expose, validate, validators, error_handler, \\\n\t exception_handler, identity\nfrom ttl.tg.errorhandlers import pr_std_exception_handler, pr_form_error_handler\nfrom ttl.tg.validators import std_state_factory, PrFormSchema, \\\n PrGridSchema, ExtendedDateValidator, OpenRestSchema\nimport ttl.tg.validators as tgvalidators\nfrom prcommon.model import WebSources\nfrom ttl.base import stdreturn, duplicatereturn, samereturn\n\nclass WebSourcesAddSchema(PrFormSchema):\n\t\"\"\" schema \"\"\"\n\tpass\n\nclass WebSourcesGetSchema(PrFormSchema):\n\t\"\"\" schema \"\"\"\n\twebsourceid = validators.Int()\n\n\nclass WebSourcesController( object ):\n\t\"\"\" WebSources Controller \"\"\"\n\t@expose(\"json\")\n\t@error_handler(pr_form_error_handler)\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=OpenRestSchema(), state_factory=std_state_factory)\n\tdef list(self, *args, **params):\n\t\t\"\"\" list of websources \"\"\"\n\n\t\tif len(args) > 0:\n\t\t\tparams[\"websourceid\"] = int(args[0])\n\n\t\treturn WebSources.get_list_websources ( params )\n\n\t@expose(\"json\")\n\t@error_handler(pr_form_error_handler)\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=WebSourcesAddSchema(), state_factory=std_state_factory)\n\t@identity.require(identity.in_group(\"dataadmin\"))\n\tdef add(self, *args, **params):\n\t\t\"\"\" Save the details about an advance feature \"\"\"\n\n\t\tif WebSources.exists ( params[\"websourcedescription\"]):\n\t\t\treturn duplicatereturn()\n\n\t\twebsourceid = WebSources.add( params)\n\n\t\treturn stdreturn( data = WebSources.get( websourceid ))\n\n\t@expose(\"json\")\n\t@error_handler(pr_form_error_handler)\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=WebSourcesGetSchema(), state_factory=std_state_factory)\n\t@identity.require(identity.in_group(\"dataadmin\"))\n\tdef update(self, *args, **params):\n\t\t\"\"\" Save the details about an advance feature \"\"\"\n\n\t\tif WebSources.exists ( params[\"websourcedescription\"], params[\"websourceid\"]):\n\t\t\treturn duplicatereturn()\n\n\t\tWebSources.update( params)\n\n\t\treturn stdreturn( data = WebSources.get( params[\"websourceid\"] ))\n\n\n\t@expose(\"json\")\n\t@error_handler(pr_form_error_handler)\n\t@exception_handler(pr_std_exception_handler)\n\t@validate(validators=WebSourcesGetSchema(), state_factory=std_state_factory)\n\tdef get(self, *args, **params):\n\t\t\"\"\" get websources \"\"\"\n\n\t\treturn stdreturn( data = WebSources.get( params[\"websourceid\"]))\n","repo_name":"meanang123/prmax","sub_path":"prcommon/build/lib/prcommon/sitecontrollers/websources.py","file_name":"websources.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23006597815","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function, division\nimport os\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\n# Ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nplt.ion() # interactive mode\n\n\nclass Hasyv2Dataset(Dataset):\n \"\"\"HASYv2 dataset.\"\"\"\n \n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.img_info = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.img_info)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.root_dir, self.img_info.iloc[idx, 0])\n image = io.imread(img_path)\n label = self.img_info.iloc[idx,1]\n sample = {'image': image, 'label': label}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample \n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = transform.resize(image, (new_h, new_w))\n\n return {'image': img, 'label': label }\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, label= sample['image'], sample['label']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h, left: left + new_w]\n\n return {'image': image, 'label': label }\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n \n return {'image': torch.from_numpy(image), 'label': label}\n","repo_name":"bhairavmehta95/learning-relevant-tensor-networks","sub_path":"datasets/Hasyv2Dataset.py","file_name":"Hasyv2Dataset.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23569795101","text":"import math\r\ndef stall(N,K):\r\n if N==1:\r\n y,z=0,0\r\n elif N==2:\r\n y,z=K%2,0\r\n else:\r\n if K==1:\r\n y,z=int(math.ceil(float(N-1)/2)),int(math.floor(float(N-1)/2))\r\n else:\r\n if K%2==0:\r\n y,z=stall(math.ceil(float(N-1)/2),K/2)\r\n else:\r\n y,z=stall(math.floor(float(N-1)/2),K/2)\r\n return [y,z]\r\nf=open('C-small-2-attempt0.in','r')\r\no=open('out.txt','w')\r\nT=int(f.readline())\r\nfor t in range(T):\r\n N,K=[int(i) for i in f.readline().split()]\r\n y,z=stall(N,K)\r\n o.write('Case #'+str(t+1)+': '+str(y)+' '+str(z)+'\\n')\r\no.close()\r\nf.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2258.py","file_name":"2258.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40550901618","text":"from charms.reactive import when, when_not\nfrom charms.reactive import set_flag, clear_flag\nfrom charms.reactive import Endpoint\n\n\nclass MountRequires(Endpoint):\n\n @when('endpoint.{endpoint_name}.joined')\n def joined(self):\n set_flag(self.expand_name('{endpoint_name}.joined'))\n\n @when('endpoint.{endpoint_name}.changed')\n def changed(self):\n if any(unit.received_raw['mountpoint']\n for unit in self.all_joined_units):\n set_flag(self.expand_name('{endpoint_name}.available'))\n\n @when_not('endpoint.{endpoint_name}.joined')\n def broken(self):\n clear_flag(self.expand_name('{endpoint_name}.joined'))\n clear_flag(self.expand_name('{endpoint_name}.available'))\n\n def set_export_name(self, export_name):\n for relation in self.relations:\n relation.to_publish_raw['export_name'] = export_name\n\n def mounts(self):\n \"\"\"\n Returns a list of available mounts and their associated data.\n\n The return value is a list of dicts of the following form::\n\n [\n {\n 'mount_name': name_of_mount,\n 'mounts': [\n {\n 'hostname': hostname,\n 'mountpoint': mountpoint,\n 'fstype': mounttype,\n 'options': options\n },\n # ...\n ],\n },\n # ...\n ]\n \"\"\"\n mounts = {}\n for relation in self.relations:\n for unit in relation.joined_units:\n mount_name = unit.received_raw.get(\n 'export_name', relation.application_name)\n mount = mounts.setdefault(mount_name, {\n 'mount_name': mount_name,\n 'mounts': [],\n })\n data = unit.received_raw\n mountpoint = data['mountpoint']\n fstype = data['fstype']\n options = data['options']\n host = data['hostname'] or \\\n data['private-address']\n if host and mountpoint and fstype and options:\n mount['mounts'].append({\n 'hostname': host,\n 'mountpoint': mountpoint,\n 'fstype': fstype,\n 'options': options\n })\n return [m for m in mounts.values() if m['mounts']]\n","repo_name":"juju-solutions/interface-mount","sub_path":"requires.py","file_name":"requires.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38020001597","text":"from FiveInRowPlayers import *\nfrom MinimaxPlayer import MinimaxPlayer\nimport threading\n\nclass FiveInRow(threading.Thread):\n\n def __init__(self, player1, player2):\n threading.Thread.__init__(self)\n\n self.size = 15\n self.board = [x[:] for x in [[0] * self.size] * self.size]\n\n player1.setNumber(1)\n player2.setNumber(2)\n self.players = (player1, player2)\n self.turn = 0\n for player in self.players: player.setGame(self)\n self.lastMove = None\n self.moves = []\n\n def getBoardPos(self, x, y):\n return self.board[y][x]\n\n def setBoardPos(self, x, y , mark):\n self.board[y][x] = mark\n\n def changeTurn(self):\n self.players = self.players[1], self.players[0]\n\n def onBoard(self, x, y):\n return 0 <= x < self.size and 0 <= y < self.size\n\n def checkWin(self, board, x0, y0):\n\n directions = [\n [1, 0], #horizontal\n [0, 1], #vertical\n [1, 1], #diagonal 1\n [1, -1] #diagonal 2\n ]\n\n mark = board[y0][x0]\n\n for direction in directions:\n dx, dy = direction\n x, y = x0, y0\n marksInRow = 0\n\n for _ in range(2):\n while self.onBoard(x, y) and board[y][x] == mark:\n x += dx\n y += dy\n marksInRow += 1\n\n #check the negative direction also\n dx *= -1\n dy *= -1\n x = x0 + dx\n y = y0 + dy\n\n if marksInRow >= 5:\n return True\n\n return False\n\n def requestMove(self):\n move = self.players[0].requestMove()\n if not self.onBoard(*move):\n return None\n\n if self.getBoardPos(*move) == 0:\n self.setBoardPos(*move, self.players[0].number)\n self.turn += 1\n self.lastMove = move\n self.moves.append(move)\n return move\n return None\n\n def tick(self):\n\n move = None\n while move is None:\n move = self.requestMove()\n\n if self.turn >= self.size**2:\n #map(lambda p: p.notifyDraw(), self.players)\n for player in self.players: player.notifyDraw()\n return False #game over\n\n if self.checkWin(self.board, *move):\n self.players[0].notifyWin()\n self.players[1].notifyLoss()\n return False #game is over\n\n self.players[0].notifyMoveOk()\n self.changeTurn()\n return True #game continues\n\n def run(self):\n while self.tick():\n print(self.turn)\n print(str(self))\n print(str(self))\n\n def __str__(self):\n marks = ['.', 'X', 'O']\n return '\\n'.join([''.join([marks[item] for item in row]) for row in self.board])\n\n\ndef main():\n p1 = HumanPlayer(\"Mikko\")\n p2 = HumanPlayer(\"Kaisa\")\n\n game = FiveInRow(p1, p2)\n\n game.start()\n game.join()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mvjseppa/FiveInRow","sub_path":"FiveInRow.py","file_name":"FiveInRow.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19258706956","text":"import pandas as pd\nfrom calculators.QMachines import haulageVehicle\n\n\nclass stageLoader_att:\n def __init__(self):\n df = pd.read_excel(r'../data/mine_attributes.xlsx', sheet_name=\"stage loader\", skiprows=1)\n df.set_index('key', inplace=True)\n self.power = df['value'].loc['power']\n self.workers = df['value'].loc['workers']\n self.max_output = df['value'].loc['max output'] #TPH\n\n\ndef qMachine(power=None, load=None, rating=None, usage=None, units=None):\n emissions = haulageVehicle(power, load, rating, usage, units)\n return emissions\n\n\n","repo_name":"jfgphillips/FUSE-V2","sub_path":"MineIO/longwallEquipment/stageLoader.py","file_name":"stageLoader.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40824929221","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\n\r\nlink = \"http://suninjuly.github.io/registration2.html\"\r\n#link = \"http://suninjuly.github.io/registration1.html\"\r\nbrowser = webdriver.Chrome()\r\nbrowser.get(link)\r\n\r\nnameInput = browser.find_element(By.CSS_SELECTOR, \".first_class [placeholder='Input your first name']\")\r\nnameInput.send_keys(\"Denis\")\r\n\r\nlastnameInput = browser.find_element(By.CSS_SELECTOR, \".second_class [placeholder='Input your last name']\")\r\nlastnameInput.send_keys(\"Ruban\")\r\n\r\nemailInput = browser.find_element(By.CSS_SELECTOR, \".third_class [placeholder='Input your email']\")\r\nemailInput.send_keys(\"rubanqa@testmail.com\")\r\n\r\nbutton = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\r\nbutton.click()\r\ntime.sleep(1)\r\n\r\nwelcome_text_elt = browser.find_element(By.TAG_NAME, \"h1\")\r\nwelcome_text = welcome_text_elt.text\r\n\r\nassert \"Congratulations! You have successfully registered!\" == welcome_text\r\nbrowser.close()","repo_name":"RubanQA/stepik_qa_automation","sub_path":"stepik_test_automation_course/Unit1/lesson-1-6-11.py","file_name":"lesson-1-6-11.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34795802503","text":"import sys\nsys.path.append(\"../NIBRS_CLI_Tool/segments\")\nfrom segments.segment import Segment\n\nclass CaliforniaSegment9(Segment):\n\t\n\tDATA_ELEMENT_ORDERING = [{\"name\": \"Segment Length\", \"length\": 4}, \n\t\t{\"name\": \"Segment Level\", \"length\": 1}, \n\t\t{\"name\": \"Segment Action Type\", \"length\": 1}, \n\t\t{\"name\": \"Month of Submission\", \"length\": 2},\n\t\t{\"name\": \"Year of Submission\", \"length\": 4},\n\t\t{\"name\": \"City Indicator\", \"length\": 4},\n\t\t{\"name\": \"ORI\", \"length\": 9},\n\t\t{\"name\": \"Incident Number\", \"length\": 12},\n\t\t{\"name\": \"Victim Sequence Number\", \"length\": 3},\n\t\t{\"name\": \"Senior Citizen Indicator\", \"length\": 1},\n\t\t[10,\n\t\t\t{\"name\": \"Victim Connected to UCR Offense Code\", \"length\": 3},\n\t\t\t{\"name\": \"Victim by Association Indicator\", \"length\": 1},\n\t\t\t{\"name\": \"Victim by Association Type\", \"length\": 2},\n\t\t\t{\"name\": \"Victim by Association Relation\", \"length\": 2}\n\t\t],\n\t\t{\"name\": \"Homicide Victim First Name\", \"length\": 20},\n\t\t{\"name\": \"Homicide Victim Middle Name\", \"length\": 20},\n\t\t{\"name\": \"Homicide Victim Last Name\", \"length\": 25}\n\t]\n\n\tdef getSegmentName(self):\n\t\treturn \"California Segment 9\"\n\n\tdef getDataElements(self):\n\t\treturn self.DATA_ELEMENT_ORDERING","repo_name":"chantallexandra/NIBRS_CLI_Tool","sub_path":"segments/californiaSegment9.py","file_name":"californiaSegment9.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4260453184","text":"\"\"\"\nLibrary for converting units and creating numpy arrays\nwith automatic unit conversion.\n\nThe conversion factors are taken directly from SIESTA\nwhich means that the number of significant digits are not exact.\n\"\"\"\n\nimport numpy as _np\nfrom copy import copy,deepcopy\n\n_def_L = 'Bohr'\n_def_E = 'Ry'\n_def_f = 'Ry/Bohr'\n_def_T = 'K'\n_def_t = 'fs'\n_def_M = 'amu'\n\nBohr = 1.0\nRy = 1.0\nfs = 1.0\nAng = 1. / 0.529177\neV = 1. / 13.60580\nhbar = 6.58211928e-16 * eV * 1.e15\nkBar = 1. / 1.47108e5\nGPa = kBar * 10.\nKelvin = eV / 11604.45\nDebye = 0.393430\namu = 2.133107\npi = 3.14159265358979323846264338327950288419716939937510\ndeg = pi / 180.\n\n_ConversionTable = {\n 'mass' : {\n 'DEFAULT' : _def_M,\n 'kg' : 1.,\n 'g' : 1.e-3,\n 'amu': 1.66054e-27,\n }, \n 'length' : {\n 'DEFAULT' : _def_L,\n 'm' : 1., \n 'cm' : 0.01, \n 'nm' : 1.e-9, \n 'Ang' : 1.e-10, \n 'Bohr' : 0.529177e-10, \n }, \n 'time' : {\n 'DEFAULT' : _def_t,\n 's' : 1. ,\n 'fs' : 1.e-15 ,\n 'ps' : 1.e-12 ,\n 'ns' : 1.e-9 ,\n },\n 'energy' : {\n 'DEFAULT' : _def_E,\n 'J' : 1., \n 'erg' : 1.e-7, \n 'eV' : 1.60219e-19, \n 'meV' : 1.60219e-22, \n 'Ry' : 2.17991e-18, \n 'mRy' : 2.17991e-21, \n 'Hartree' : 4.35982e-18, \n 'K' : 1.38066e-23, \n 'cm**-1' : 1.986e-23,\n 'kJ/mol' : 1.6606e-21,\n 'Hz' : 6.6262e-34,\n 'THz' : 6.6262e-22,\n 'cm-1' : 1.986e-23,\n 'cm^-1' : 1.986e-23,\n },\n 'force' : {\n 'DEFAULT' : _def_f,\n 'N' : 1.,\n 'eV/Ang' : 1.60219e-9,\n 'eV/Bohr' : 1.60219e-9*0.529177,\n 'Ry/Bohr' : 4.11943e-8,\n 'Ry/Ang' : 4.11943e-8/0.529177,\n }\n }\n\n# from http://physics.nist.gov/PhysRefData/Elements/\n__atom = {\n 1 : { 'Z' : 1 , 'name' : 'H' , 'amu' : 1.007947 },\n 2 : { 'Z' : 2 , 'name' : 'He', 'amu' : 4.002602 },\n 3 : { 'Z' : 3 , 'name' : 'Li', 'amu' : 6.9412 },\n 4 : { 'Z' : 4 , 'name' : 'Be', 'amu' : 9.012182 },\n 5 : { 'Z' : 5 , 'name' : 'B' , 'amu' : 10.8117 },\n 6 : { 'Z' : 6 , 'name' : 'C' , 'amu' : 12.01078 },\n 7 : { 'Z' : 7 , 'name' : 'N' , 'amu' : 14.00672 },\n 8 : { 'Z' : 8 , 'name' : 'O' , 'amu' : 15.99943 },\n 9 : { 'Z' : 9 , 'name' : 'F' , 'amu' : 18.9984032 },\n 10 : { 'Z' : 10 , 'name' : 'Ne', 'amu' : 20.1797 },\n 11 : { 'Z' : 11 , 'name' : 'Na', 'amu' : 22.989770 },\n 12 : { 'Z' : 12 , 'name' : 'Mg', 'amu' : 24.30506 },\n 13 : { 'Z' : 13 , 'name' : 'Al', 'amu' : 26.9815382 },\n 14 : { 'Z' : 14 , 'name' : 'Si', 'amu' : 28.0855 },\n 15 : { 'Z' : 15 , 'name' : 'P' , 'amu' : 30.973761 },\n 16 : { 'Z' : 16 , 'name' : 'S' , 'amu' : 32.0655 },\n 17 : { 'Z' : 17 , 'name' : 'Cl', 'amu' : 35.453 },\n 18 : { 'Z' : 18 , 'name' : 'Ar', 'amu' : 39.948 },\n 19 : { 'Z' : 19 , 'name' : 'K' , 'amu' : 39.0983 },\n 20 : { 'Z' : 20 , 'name' : 'Ca', 'amu' : 40.0784 },\n 21 : { 'Z' : 21 , 'name' : 'Sc', 'amu' : 44.955912 },\n 22 : { 'Z' : 22 , 'name' : 'Ti', 'amu' : 47.867 },\n 23 : { 'Z' : 23 , 'name' : 'V' , 'amu' : 50.9415 },\n 24 : { 'Z' : 24 , 'name' : 'Cr', 'amu' : 51.99616 },\n 25 : { 'Z' : 25 , 'name' : 'Mn', 'amu' : 54.9380499 },\n 26 : { 'Z' : 26 , 'name' : 'Fe', 'amu' : 55.8452 },\n 27 : { 'Z' : 27 , 'name' : 'Co', 'amu' : 58.933200 },\n 28 : { 'Z' : 28 , 'name' : 'Ni', 'amu' : 58.69342 },\n 29 : { 'Z' : 29 , 'name' : 'Cu', 'amu' : 63.5463 },\n 30 : { 'Z' : 30 , 'name' : 'Zn', 'amu' : 65.4094 },\n 31 : { 'Z' : 31 , 'name' : 'Ga', 'amu' : 69.7231 },\n 32 : { 'Z' : 32 , 'name' : 'Ge', 'amu' : 72.64 },\n 33 : { 'Z' : 33 , 'name' : 'As', 'amu' : 74.92160 },\n 34 : { 'Z' : 34 , 'name' : 'Se', 'amu' : 78.96 },\n 35 : { 'Z' : 35 , 'name' : 'Br', 'amu' : 79.904 },\n 36 : { 'Z' : 36 , 'name' : 'Kr', 'amu' : 83.798 },\n 37 : { 'Z' : 37 , 'name' : 'Rb', 'amu' : 85.4678 },\n 38 : { 'Z' : 38 , 'name' : 'Sr', 'amu' : 87.62 },\n 39 : { 'Z' : 39 , 'name' : 'Y' , 'amu' : 88.90585 },\n 40 : { 'Z' : 40 , 'name' : 'Zr', 'amu' : 91.224 },\n 41 : { 'Z' : 41 , 'name' : 'Nb', 'amu' : 92.90638 },\n 42 : { 'Z' : 42 , 'name' : 'Mo', 'amu' : 95.96 },\n 44 : { 'Z' : 44 , 'name' : 'Ru', 'amu' : 101.07 },\n 45 : { 'Z' : 45 , 'name' : 'Rh', 'amu' : 102.90550 },\n 46 : { 'Z' : 46 , 'name' : 'Pd', 'amu' : 106.42 },\n 47 : { 'Z' : 47 , 'name' : 'Ag', 'amu' : 107.8682 },\n 48 : { 'Z' : 48 , 'name' : 'Cd', 'amu' : 112.411 },\n 49 : { 'Z' : 49 , 'name' : 'In', 'amu' : 114.818 },\n 50 : { 'Z' : 50 , 'name' : 'Sn', 'amu' : 118.710 },\n 51 : { 'Z' : 51 , 'name' : 'Sb', 'amu' : 121.760 },\n 52 : { 'Z' : 52 , 'name' : 'Te', 'amu' : 127.60 },\n 53 : { 'Z' : 53 , 'name' : 'I' , 'amu' : 126.90447 },\n 54 : { 'Z' : 54 , 'name' : 'Xe', 'amu' : 131.293 },\n 55 : { 'Z' : 55 , 'name' : 'Cs', 'amu' : 132.9054519 },\n 56 : { 'Z' : 56 , 'name' : 'Ba', 'amu' : 137.327 },\n 57 : { 'Z' : 57 , 'name' : 'La', 'amu' : 138.905477 },\n 58 : { 'Z' : 58 , 'name' : 'Ce', 'amu' : 140.116 },\n 59 : { 'Z' : 59 , 'name' : 'Pr', 'amu' : 140.90765 },\n 60 : { 'Z' : 60 , 'name' : 'Nd', 'amu' : 144.242 },\n 62 : { 'Z' : 62 , 'name' : 'Sm', 'amu' : 150.36 },\n 63 : { 'Z' : 63 , 'name' : 'Eu', 'amu' : 151.964 },\n 64 : { 'Z' : 64 , 'name' : 'Gd', 'amu' : 157.25 },\n 65 : { 'Z' : 65 , 'name' : 'Tb', 'amu' : 158.92535 },\n 66 : { 'Z' : 66 , 'name' : 'Dy', 'amu' : 162.500 },\n 67 : { 'Z' : 67 , 'name' : 'Ho', 'amu' : 164.93032 },\n 68 : { 'Z' : 68 , 'name' : 'Er', 'amu' : 167.259 },\n 69 : { 'Z' : 69 , 'name' : 'Tm', 'amu' : 168.93421 },\n 70 : { 'Z' : 70 , 'name' : 'Yb', 'amu' : 173.054 },\n 71 : { 'Z' : 71 , 'name' : 'Lu', 'amu' : 174.9668 },\n 72 : { 'Z' : 72 , 'name' : 'Hf', 'amu' : 178.49 },\n 73 : { 'Z' : 73 , 'name' : 'Ta', 'amu' : 180.94788 },\n 74 : { 'Z' : 74 , 'name' : 'W' , 'amu' : 183.84 },\n 75 : { 'Z' : 75 , 'name' : 'Re', 'amu' : 186.207 },\n 76 : { 'Z' : 76 , 'name' : 'Os', 'amu' : 190.23 },\n 77 : { 'Z' : 77 , 'name' : 'Ir', 'amu' : 192.217 },\n 78 : { 'Z' : 78 , 'name' : 'Pt', 'amu' : 195.0782 },\n 79 : { 'Z' : 79 , 'name' : 'Au', 'amu' : 196.966552 },\n 80 : { 'Z' : 80 , 'name' : 'Hg', 'amu' : 200.59 },\n 81 : { 'Z' : 81 , 'name' : 'Tl', 'amu' : 204.3833 },\n 82 : { 'Z' : 82 , 'name' : 'Pb', 'amu' : 207.2 },\n 83 : { 'Z' : 83 , 'name' : 'Bi', 'amu' : 208.98040 },\n }\n\n# Apply the names to the dictionary so that lookups can be made from index or from names =>\n# __atom['He'] == __atom[2]\n__atom.update(dict([__atom[k]['name'],v] for k,v in __atom.iteritems()))\n\n\n# 1001 : 2.016, # Deuterium\n# 2001 : 15.99943, # FO mix: (1-x) O + x F, x = 0.000\n# 2002 : 16.186865825, # x = 0.063\n# 2003 : 16.37430165, # 0.125\n# 2004 : 16.7491733, # 0.250\n# 2005 : 16.59922464, # 0.200\n# 2006 : 16.89912196 # 0.300\n\n# Perhaps this should be alterred into a class so users\n# can append other elements?\ndef AtomMass(atom,unit='amu'): return __atom[atom]['amu'] * UnitConvert('amu',Unit(unit))\ndef AtomName(atom): return __atom[atom]['name']\ndef AtomZ(atom): return __atom[atom]['Z']\n\n\n# Here we start the unit type conversion library\nclass UnknownUnitTypeError(Exception):\n \"\"\"\n Error raised when unittype of a unit cannot be found.\n \"\"\"\n pass # We utilize the generic interface\n\ndef UnitType(unit):\n \"\"\"\n Returns the type of unit that is associated with\n input unit.\n\n Parameters\n ----------\n unit : str\n unit, e.g. kg, Ang, eV etc. returns the \n\n Examples\n --------\n >>> import sids.helper.units as shu\n >>> shu.UnitType('kg')\n 'mass'\n >>> shu.UnitType('eV')\n 'energy'\n \"\"\"\n for k in _ConversionTable:\n try:\n if unit['unit'] in _ConversionTable[k]:\n return k\n except:\n try:\n if unit in _ConversionTable[k]:\n return k\n except:\n pass\n raise UnknownUnitTypeError('The unit \"'+str(k)+'\" could not be located in the table.')\n\nclass UnknownUnitError(Exception):\n \"\"\"\n Error raised when a unit cannot be found.\n \"\"\"\n pass # We utilize the generic interface\n\ndef UnitConvert(fr,to,opts={}):\n \"\"\"\n Returns the factor that takes 'fr' to the units of 'to'.\n\n Parameters\n ----------\n fr :\n starting unit\n to :\n ending unit\n opts :\n controls whether the unit conversion is in powers or fractional units\n\n Examples\n -------\n >>> import sids.helper.units as shu\n >>> shu.UnitConvert('kg','g')\n 1000\n >>> shu.UnitConvert('eV','J')\n 1.60219e-19\n \"\"\"\n # In the case that the conversion to is None, we should do nothing.\n if to is None: return 1.\n fr = Unit(fr) # ensure that it is a unit\n to = Unit(to) # ensure that it is a unit\n frU = None ; toU = None\n frV = None ; toV = None\n \n # Check that the unit types live in the same \n # space\n # TODO this currently does not handle if powers are taken into\n # consideration.\n\n for k in _ConversionTable:\n if fr.unit in _ConversionTable[k]:\n frU = k\n frV = _ConversionTable[k][fr.unit]\n if to.unit in _ConversionTable[k]:\n toU = k\n toV = _ConversionTable[k][to.unit]\n if frU != toU:\n raise Exception('The unit conversion is not from the same group: '+frU+' to '+toU)\n\n # Calculate conversion factor\n val = frV / toV\n for opt in ['^','power','p']:\n if opt in opts: val = val ** opts[opt]\n for opt in ['*','factor','fac']:\n if opt in opts: val = val * opts[opt]\n for opt in ['/','divide','div']:\n if opt in opts: val = val / opts[opt]\n return val\n\n\n# A single unit-object.\n# Contains functions to compare and convert a unit\n# to another unit.\nclass Unit(object):\n \"\"\"\n Container for the unit and the conversion factors etc.\n This will make it easier to maintain the units, and eventually change the\n usage.\n \"\"\"\n def __new__(cls,*args,**kwargs):\n if isinstance(args[0],Unit):\n return args[0]\n #print('Creating new unit:',args)\n obj = object.__new__(cls)\n if len(args) == 1: # We are creating a unit without a variable name\n obj.variable = None\n obj.unit = args[0]\n else:\n obj.variable = args[0]\n # Typical case when passing a unit from another variable...\n if isinstance(args[1],Unit):\n obj.unit = args[1].unit\n else:\n obj.unit = args[1]\n \n # We need to handle some type of operator definitions\n # But how to handle them?\n for op in ['**','^','/','*']:\n pass\n \n return obj\n\n def type(self):\n \"\"\" Returns the type of unit this is, i.e. energy, length, time, etc. \"\"\"\n for k,v in _ConversionTable.iteritems():\n if self.unit in v: return k\n\n def SI(self):\n \"\"\" Returns the SI conversion factor for the unit \"\"\"\n for k,v in _ConversionTable.iteritems():\n if self.variable in v: return v[self.variable]\n\n def convert(self,to):\n \"\"\" Convert this unit to another and returns the conversion factor. \"\"\"\n u = Unit(to)\n # This will raise an exception if the units are not of same type...\n conv = UnitConvert(self.unit,u.unit)\n #print('Converting:',self.variable,self.unit,u.unit)\n self.unit = deepcopy(u.unit)\n return conv\n\n def copy(self):\n \"\"\"Method for copying the unit \"\"\"\n return deepcopy(self)\n\n def __repr__(self):\n \"\"\" Return the unit in string format (XML type-like)\"\"\"\n return \"\"\n\n def __eq__(self,other):\n \"\"\" Returns true if the variable is the same as the other \"\"\"\n return self.variable == other.variable\n\n def __copy__(self):\n return Unit(copy(self.variable),copy(self.unit))\n \n def __deepcopy__(self, memo):\n return Unit(deepcopy(self.variable),deepcopy(self.unit))\n\n\nclass Units(object):\n \"\"\"\n Container for many units.\n This will make it easier to maintain the units, and eventually change the\n usage.\n \"\"\"\n def __new__(cls,*args):\n # Convert the tuple to a list...\n obj = object.__new__(cls)\n # The args are a list of Unit-objects, or a list of pairs which should be converted to a list of units.\n units = []\n i = 0\n while i < len(args):\n if isinstance(args[i],Unit):\n units.append(deepcopy(args[i]))\n else:\n assert i < len(args)-1, 'Can not grap a unit for: ' + str(args[i])\n units.append(deepcopy(Unit(args[i],args[i+1])))\n i += 1\n i += 1\n obj._units = units\n return obj\n\n def append(self,unit):\n \"\"\" Append a unit object \"\"\"\n # We cannot have to similar units assigned...\n if isinstance(unit,Units):\n for au in unit:\n # Use the recursive routine (keep it simple)\n self.append(au)\n else:\n for u in self:\n if u == unit:\n raise Exception('Can not append a unit which already exists. Do not assign dublicate variables')\n self._units.append(deepcopy(unit))\n\n def update(self,unit):\n \"\"\" Updates unit object, adds it if it does not exist \"\"\"\n if unit is None: return\n if isinstance(unit,Units):\n for u in unit:\n self.update(u)\n else:\n for u in self:\n if u.variable == unit.variable:\n u.unit = deepcopy(unit.unit)\n return\n self.append(unit)\n\n def unit(self,variable):\n \"\"\" Returns the unit object associated with the variable named variable\"\"\"\n # if it is none, return fast.\n if not variable: return None\n for i in self:\n if i.variable == variable:\n return i\n return None\n\n def copy(self):\n \"\"\" Copies this unit segment \"\"\"\n return deepcopy(self)\n\n #################\n # General routines overwriting python models\n #################\n def __len__(self):\n return len(self._units)\n\n def __contains__(self,item):\n if isinstance(item,Unit):\n u = Unit(item.variable,None)\n else:\n u = Unit(item,None)\n for unit in self:\n if u.variable == unit.variable:\n return True\n return False\n \n def __repr__(self):\n \"\"\" Return the unit in string format (XML type-like)\"\"\"\n tmp = ''\n for unit in self:\n tmp += '\\n ' + str(unit)\n tmp += '\\n'\n return tmp\n\n def __iter__(self):\n \"\"\" An iterator of the Units collection \"\"\"\n for unit in self._units:\n yield unit\n\n def __delitem__(self,variable):\n \"\"\" Remove the variable from the units list. \"\"\"\n for i in range(len(self)):\n if self._units[i].variable == variable:\n del self._units[i]\n return\n \n # We need to overwrite the copy mechanisms.\n # It really is a pain in the ass, but it works.\n # Luckily all copying need only be refered in the Unit-object.\n def __copy__(self):\n units = Units()\n for unit in self:\n units.append(copy(unit))\n return units\n \n def __deepcopy__(self, memo):\n units = Units()\n for unit in self:\n units.append(deepcopy(unit))\n return units\n\n # Do NOT implement a 'convert' method. It could potentially lead to unexpected behaviour as the\n # Unit-object needs to handle this....\n # TODO consider the conversion of a list of Unit-objects via the Units-object.\n\nclass UnitObject(object):\n \"\"\"\n Contains relevant information about units etc.\n \"\"\"\n def convert(self,*units):\n \"\"\"\n Convert all entries in the object to the desired\n units given by the input.\n \"\"\"\n # Go back in the units variable does not exist.\n if not '_units' in self.__dict__: return\n\n # If it is a Units object, we can simply loop and do the recursive conversion.\n if isinstance(units[0],Units):\n for unit in units[0]:\n self.convert(unit)\n return\n\n # First convert all variables associated with a type... ('length',etc.)\n # This well enable one to convert all of length but still have a unit conversion of a\n # single length variable to another.\n for unit in units:\n u = Unit(unit)\n if not u.variable:\n for self_u in self._units:\n if self_u.type() == u.type():\n self.__dict__[self_u.variable] *= self_u.convert(u)\n \n # Now convert the specific requested units.\n for unit in units:\n u = Unit(unit)\n self_u = self.unit(u.variable)\n if self_u:\n self.__dict__[self_u.variable] *= self_u.convert(u)\n\n def unit(self,variable):\n \"\"\" Returns the unit that is associated with the variable \"\"\"\n return self._units.unit(variable)\n\n @property\n def units(self):\n \"\"\" Returns the units that is associated with the variable \"\"\"\n return self._units\n\n\nclass Variable_ndarray(_np.ndarray):\n \"\"\"\n Numpy array with automatic unit conversion.\n \n When two arrays are multiplied we can automatically \n detect units and convert to the correct units.\n\n Creating a variable with Variable_ndarray we gain access\n to convert which can convert the unit of the variable.\n \"\"\"\n def convert(self,unit):\n \"\"\"\n Convert all entries in the object to the desired\n units given by the input.\n \"\"\"\n # Go back in the units variable does not exist.\n if not '_units' in self.__dict__: return\n\n # If it is a Units object, \n # we can simply loop and do the recursive conversion.\n if isinstance(unit,Units):\n for u in unit: \n self.convert(u)\n return\n\n # Ensure that unit is a Unit\n u = Unit(unit)\n \n # Loop over all variables in this object.\n # It only has one\n for i in self._units:\n if i.type() == u.type():\n self[:] *= i.convert(u)\n\n def add_unit(self,var,unit):\n \"\"\" Adds a unit to a variable beloning to the object \"\"\"\n \n\n def unit(self,variable='self'):\n \"\"\" Returns the unit that is associated with the variable \"\"\"\n return self._units.unit(variable)\n\n @property\n def units(self):\n \"\"\" Returns the units that is associated with the variable \"\"\"\n return self._units\n\n @staticmethod\n def _N(array):\n return _np.array(array)\n\n def __array_finalize__(self,obj):\n \"\"\" Finalize the array with the object \"\"\"\n if obj is None: return\n\n # Create the default units, we need to copy them, to ensure\n # that we do not attach the same objects.\n if hasattr(obj,'_units'):\n self._units = deepcopy(obj._units)\n else:\n self._units = deepcopy(self._UNITS)\n\n if hasattr(self,'__variable_finalize__'):\n self.__variable_finalize__()\n","repo_name":"zerothi/siesta-es","sub_path":"sids/helper/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":19656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6057747158","text":"def p1(data):\n highest = 0\n for line in data:\n id = convert(line)\n if id > highest:\n highest = id\n return highest\n\n\ndef convert(id):\n return int(id, 2)\n\n\ndef p2(data):\n numbered = list(map(convert, data))\n numbered.sort()\n prev = numbered[0]\n for seat_id in numbered:\n new = seat_id\n if new-1 > prev:\n return new-1\n else:\n prev = new\n\n return 1\n\n\ndata = open('data/day5.txt', 'r').read().replace(\"B\", \"1\").replace(\"F\",\n \"0\").replace(\"R\", \"1\").replace(\"L\", \"0\").split(\"\\n\")\nprint(\"Part 1: \"+str(p1(data)))\nprint(\"Part 2: \"+str(p2(data)))\n\n# B = 1, F = 0\n# R = 1, L = 0\n","repo_name":"FreddieBrown/Advent-of-Code-2020","sub_path":"src/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5857229109","text":"from callbacks import FreezeLayer, WeightsHistory, LRHistory\nfrom keras import callbacks\nimport logging, sys, os\nfrom data_generator import DataGenerator_Base, DataGenerator_BERT\nfrom models import build_HAN, build_HAN_BERT, build_HSAN, build_Context_HAN\nfrom resource_loader import load_NRC, load_LIWC, load_stopwords\nimport keras\nimport multiprocessing\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # When cudnn implementation not found, run this\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # Note: when starting kernel, for gpu_available to be true, this needs to be run\n# only reserve 1 GPU\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH']='true'\n\n#root_dir = \"/Users/ronhochstenbach/Desktop/Thesis/Data\"\nroot_dir = \"/content/drive/MyDrive/Thesis/Data\" #when cloning for colab\n\ndef initialize_datasets(user_level_data, subjects_split, hyperparams, hyperparams_features, model_type,\n validation_set, session=None):\n\n if model_type == \"HAN\" or model_type == \"HSAN\":\n data_generator_train = DataGenerator_Base(user_level_data, subjects_split, set_type='train',\n hyperparams_features=hyperparams_features,\n seq_len=hyperparams['maxlen'], batch_size=hyperparams['batch_size'],\n posts_per_group=hyperparams['posts_per_group'], post_groups_per_user=None,\n max_posts_per_user=hyperparams['posts_per_user'],\n compute_liwc=True,\n ablate_emotions='emotions' in hyperparams['ignore_layer'],\n ablate_liwc='liwc' in hyperparams['ignore_layer'])\n\n data_generator_valid = DataGenerator_Base(user_level_data, subjects_split, set_type=validation_set,\n hyperparams_features=hyperparams_features,\n seq_len=hyperparams['maxlen'], batch_size=hyperparams['batch_size'],\n posts_per_group=hyperparams['posts_per_group'],\n post_groups_per_user=1,\n max_posts_per_user=None,\n shuffle=False,\n compute_liwc=True,\n ablate_emotions='emotions' in hyperparams['ignore_layer'],\n ablate_liwc='liwc' in hyperparams['ignore_layer'])\n elif model_type == \"HAN_BERT\" or model_type == \"Con_HAN\":\n data_generator_train = DataGenerator_BERT(user_level_data, subjects_split, set_type='train',\n hyperparams_features=hyperparams_features, model_type=model_type,\n seq_len=hyperparams['maxlen'], batch_size=hyperparams['batch_size'],\n posts_per_group=hyperparams['posts_per_group'],\n post_groups_per_user=None,\n max_posts_per_user=hyperparams['posts_per_user'],\n compute_liwc=True,\n ablate_emotions='emotions' in hyperparams['ignore_layer'],\n ablate_liwc='liwc' in hyperparams['ignore_layer'])\n\n data_generator_valid = DataGenerator_BERT(user_level_data, subjects_split, set_type=validation_set,\n hyperparams_features=hyperparams_features, model_type=model_type,\n seq_len=hyperparams['maxlen'], batch_size=hyperparams['batch_size'],\n posts_per_group=hyperparams['posts_per_group'],\n post_groups_per_user=1,\n max_posts_per_user=None,\n shuffle=False,\n compute_liwc=True,\n ablate_emotions='emotions' in hyperparams['ignore_layer'],\n ablate_liwc='liwc' in hyperparams['ignore_layer'])\n else:\n raise Exception(\"Unknown type!\")\n\n return data_generator_train, data_generator_valid\n\n\ndef initialize_model(hyperparams, hyperparams_features, model_type,\n logger=None, session=None, transfer=False):\n if not logger:\n logger = logging.getLogger('training')\n ch = logging.StreamHandler(sys.stdout)\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s;%(levelname)s;%(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n logger.info(\"Initializing model...\\n\")\n if 'emotions' in hyperparams['ignore_layer']:\n emotions_dim = 0\n else:\n emotions = load_NRC(hyperparams_features['nrc_lexicon_path'])\n emotions_dim = len(emotions)\n if 'liwc' in hyperparams['ignore_layer']:\n liwc_categories_dim = 0\n else:\n liwc_categories = load_LIWC(hyperparams_features['liwc_path'])\n liwc_categories_dim = len(liwc_categories)\n if 'stopwords' in hyperparams['ignore_layer']:\n stopwords_dim = 0\n else:\n stopwords_list = load_stopwords(hyperparams_features['stopwords_path'])\n stopwords_dim = len(stopwords_list)\n\n # Initialize model\n\n if model_type == 'HAN':\n model = build_HAN(hyperparams, hyperparams_features,\n emotions_dim, stopwords_dim, liwc_categories_dim,\n ignore_layer=hyperparams['ignore_layer'])\n elif model_type == 'HAN_BERT' or model_type == \"HAN_RoBERTa\":\n model = build_HAN_BERT(hyperparams, hyperparams_features, model_type,\n emotions_dim, stopwords_dim, liwc_categories_dim,\n ignore_layer=hyperparams['ignore_layer'])\n elif model_type == 'HSAN':\n model = build_HSAN(hyperparams, hyperparams_features,\n emotions_dim, stopwords_dim, liwc_categories_dim,\n ignore_layer=hyperparams['ignore_layer'])\n elif model_type == 'Con_HAN':\n model = build_Context_HAN(hyperparams, hyperparams_features,\n emotions_dim, stopwords_dim, liwc_categories_dim,\n ignore_layer=hyperparams['ignore_layer'])\n else:\n Exception(\"Unknown model!\")\n\n model.summary()\n return model\n\n\ndef train_model(model, hyperparams, save, save_epoch, store_path,\n data_generator_train, data_generator_valid,\n epochs, class_weight, start_epoch=0, workers=multiprocessing.cpu_count(),\n callback_list=[], logger=None,\n\n model_path='/tmp/model',\n validation_set='valid',\n verbose=1):\n\n if not logger:\n logger = logging.getLogger('training')\n ch = logging.StreamHandler(sys.stdout)\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s;%(levelname)s;%(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n logger.info(\"Initializing callbacks...\\n\")\n # Initialize callbacks\n freeze_layer = FreezeLayer(patience=hyperparams['freeze_patience'], set_to=not hyperparams['trainable_embeddings'])\n weights_history = WeightsHistory()\n\n lr_history = LRHistory()\n\n reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=hyperparams['reduce_lr_factor'],\n patience=hyperparams['reduce_lr_patience'], min_lr=0.000001, verbose=1)\n lr_schedule = callbacks.LearningRateScheduler(lambda epoch, lr:\n lr if (epoch + 1) % hyperparams['scheduled_reduce_lr_freq'] != 0 else\n lr * hyperparams['scheduled_reduce_lr_factor'], verbose=1)\n callbacks_dict = {}\n\n # callbacks_dict = {'weights_history': weights_history,\n # 'lr_history': lr_history,\n # #'freeze_layer': freeze_layer,\n # 'reduce_lr_plateau': reduce_lr,\n # 'lr_schedule': lr_schedule}\n\n if save:\n callbacks_dict['csv_logger'] = keras.callbacks.CSVLogger(store_path + 'metricHistory.csv',separator=\",\",append=True)\n\n if save_epoch:\n save_epoch_path = store_path + \"_{epoch:02d}.hdf5\"\n print(f\"Saving each epoch at {save_epoch_path}\")\n callbacks_dict['save_per_epoch'] = keras.callbacks.ModelCheckpoint(save_epoch_path, monitor='val_loss', verbose=1,\n save_best_only=False, save_weights_only=True, mode='auto', save_freq='epoch')\n\n logging.info('Train...\\n')\n\n history = model.fit(data_generator_train,\n epochs=epochs, initial_epoch=start_epoch,\n class_weight=class_weight,\n validation_data=data_generator_valid,\n verbose=verbose,\n workers=workers,\n callbacks=callbacks_dict.values(),\n use_multiprocessing=False)\n\n return model, history\n\n\ndef train(user_level_data, subjects_split, save, save_epoch, store_path,\n continue_from_saved, saved_path,\n hyperparams, hyperparams_features,\n dataset_type,\n model_type,\n logger=None,\n validation_set='valid',\n version=0, epochs=50, start_epoch=0,\n session=None, model=None, transfer_layer=False):\n\n if not logger:\n logger = logging.getLogger('training')\n ch = logging.StreamHandler(sys.stdout)\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s;%(levelname)s;%(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n\n network_type = 'lstm'\n hierarch_type = 'hierarchical'\n for feature in ['LIWC', 'emotions', 'numerical_dense_layer', 'sparse_feat_dense_layer', 'user_encoded']:\n if feature in hyperparams['ignore_layer']:\n network_type += \"no%s\" % feature\n\n model_path = 'models/%s_%s_%s%d' % (network_type, dataset_type, hierarch_type, version)\n\n logger.info(\"Initializing datasets...\\n\")\n\n data_generator_train, data_generator_valid = initialize_datasets(user_level_data, subjects_split,\n hyperparams, hyperparams_features, model_type,\n validation_set=validation_set)\n\n model = initialize_model(hyperparams, hyperparams_features, model_type,\n session=session, transfer=transfer_layer)\n\n if continue_from_saved:\n print(\"Loading saved model weights!\")\n model.load_weights(saved_path, by_name=True)\n\n print(model_path)\n logger.info(\"Training model...\\n\")\n\n model, history = train_model(model, hyperparams, save, save_epoch, store_path,\n data_generator_train, data_generator_valid,\n epochs=epochs, start_epoch=start_epoch,\n class_weight={0: 1, 1: hyperparams['positive_class_weight']},\n model_path=model_path, workers=1,\n validation_set=validation_set)\n return model, history\n\n\n","repo_name":"RonHochstenbach/masterThesis","sub_path":"Code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3834429876","text":"import pygame, random\n\npygame.init()\nscreen = pygame.display.set_mode([1800, 900])\nscreen.fill([255,255,255])\nx = 100\ny = 500\n\n\n_RANK_STRINGS = (\n 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'\n)\n\n_SUITS = [\"hearts\", \"diamonds\", \"spades\", \"clubs\"]\n\n_CARD_WIDTH = 150\n_CARD_HEIGHT = 300\n_CARD_SPACING = 10\n\nclass Card:\n rank: int\n suit: str\n\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n\n def __str__(self) -> str:\n return _RANK_STRINGS[self.rank - 1] + ' of ' + self.suit\n\n def draw(self, screen, x, y):\n pygame.draw.rect(screen, [0, 0, 0], pygame.Rect(x, y, _CARD_WIDTH, _CARD_HEIGHT), 1)\n name_font = pygame.font.Font(None, 25)\n name_render = name_font.render(str(self), 1, (0, 0, 0))\n name_pos = [x + 10, y + 10]\n screen.blit(name_render, name_pos);\n\nclass Hand:\n def __init__(self):\n self.cards = []\n\n def add_card(self, card: Card):\n self.cards.append(card)\n\n def value(self):\n return 0\n\n def draw(self, screen, x, y):\n for card in self.cards:\n card.draw(screen, x, y)\n x += _CARD_WIDTH + _CARD_SPACING\n\ndeck = []\nfor card in range(1, 14):\n for suit in _SUITS:\n deck.append(Card(card, suit))\n \nrandom.shuffle(deck)\nhand = Hand()\nfor deal in range(2):\n hand.add_card(deck.pop(0))\n\nhand.draw(screen, x, y)\n\npygame.display.flip()\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_TAB:\n hand.add_card(deck.pop(0))\n screen.fill((255, 255, 255))\n hand.draw(screen, x, y)\n pygame.display.flip()\n\npygame.quit\n","repo_name":"ddale454/Blackjack","sub_path":"Blackjack.py","file_name":"Blackjack.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38328189453","text":"from tinydb import TinyDB\n\njoueurs_database = TinyDB(\"joueurs.json\", indent=4)\n\n\nclass Joueur:\n \"\"\"\n Classe modélisant un joueur du tournoi\n\n \"\"\"\n\n def __init__(\n self,\n prenom=None,\n nom=None,\n date_naissance=None,\n sexe=None,\n classement=0,\n score=0,\n adversaires=None,\n ):\n\n self.prenom = prenom\n self.nom = nom\n self.date_naissance = date_naissance\n self.sexe = sexe\n self.classement = classement\n self.score = score\n self.adversaires = adversaires\n self.infos_joueur = [\n self.prenom,\n self.nom,\n self.date_naissance,\n self.sexe,\n self.classement,\n self.score,\n self.adversaires,\n ]\n\n def __call__(self):\n return self.infos_joueur\n\n def __str__(self):\n return (\n f\"Nom: {self.nom}\\n\"\n f\"Prénom: {self.prenom}\\n\"\n f\"date de naissance: {self.date_naissance}\\n\"\n f\"Sexe: {self.sexe}\\n\"\n f\"Classement mondial: {self.classement}\\n\"\n f\"Score: {self.score}\\n\"\n )\n\n def serialized(self):\n \"\"\"\n cette fonction récupère les informations\n du joueur à partir de l'instance du joueur\n elle retourne ensuite un dictionnaire\n avec toutes les informations du joueur\n\n Returns:\n dictionnaire: le dictionnaire retourné contient\n toutes les informations dérialisées pour un joueur\n \"\"\"\n infos_joueur = {}\n infos_joueur[\"prenom\"] = self.prenom\n infos_joueur[\"nom\"] = self.nom\n infos_joueur[\"date_naissance\"] = self.date_naissance\n infos_joueur[\"sexe\"] = self.sexe\n infos_joueur[\"classement\"] = self.classement\n infos_joueur[\"score\"] = self.score\n return infos_joueur\n\n def ajout_joueur_database(self, joueur):\n \"\"\"\n Cette fonction permet d'ajouter un joueur\n avec ses informations dans la base de données\n\n Args:\n joueur (dictionnaire):\n le dictionnaire contient les informations du joueur serialisées\n \"\"\"\n joueurs_database.insert(joueur)\n","repo_name":"tristan-mn/OPC_DA_Projet4_CHESS_tournament_POO","sub_path":"models/model_joueur.py","file_name":"model_joueur.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4358285857","text":"#import regex\r\nimport re\r\n\r\n#start process_tweet\r\ndef processTweet(tweet):\r\n # process the tweets\r\n\r\n #Convert to lower case\r\n tweet = tweet.lower()\r\n #Convert www.* or https?://* to URL\r\n tweet = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',tweet)\r\n #Convert @username to AT_USER\r\n tweet = re.sub('@[^\\s]+','',tweet)\r\n #Remove additional white spaces\r\n tweet = re.sub('[\\s]+', ' ', tweet)\r\n #Replace #word with word\r\n tweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\r\n return tweet\r\n#end\r\n\r\n#Read the tweets one by one and process it\r\nfp = open('sampleTweets.txt', 'r')\r\nline = fp.readline()\r\n\r\nwhile line:\r\n processedTweet = processTweet(line)\r\n # print (processedTweet)\r\n line = fp.readline()\r\n#end loop\r\nfp.close()\r\n\r\n#stopwordslist \r\ndef getStopWordList(whatever) :\r\n \r\n stopwords = []\r\n fp = open(whatever, 'r')\r\n line = fp.readline()\r\n\r\n while line:\r\n line = line.strip('\\n')\r\n stopwords.append(line)\r\n line = fp.readline()\r\n\r\n fp.close()\r\n return stopwords\r\n\r\ndef replaceTwoOrMore(i) :\r\n \r\n pattern = re.compile(r\"(.)\\1{1,}\", re.DOTALL)\r\n return pattern.sub(r\"\\1\\1\" , i)\r\n\r\nstopwords = getStopWordList('stopwordslist.txt')\r\n#print(stopwords)\r\ndef getFeatureVector(tweet):\r\n \r\n featurevector = []\r\n words = tweet.split()\r\n for i in words:\r\n i = replaceTwoOrMore(i)\r\n i = i.strip('\\'\".?,!') #remove punctuation\r\n i = i.lower()\r\n if(i in stopwords):\r\n continue\r\n else:\r\n featurevector.append(i)\r\n \r\n return featurevector\r\n\r\ndef extract_features(tweet):\r\n tweet_words = set(tweet)\r\n features = {}\r\n for word in featureList:\r\n features['contains(%s)' % word] = (word in tweet_words)\r\n return features\r\n\r\nimport csv\r\nimport nltk\r\n#Read the tweets one by one and process it\r\ninpTweets = csv.reader(open('something.csv', 'rt' , encoding=\"utf8\"), delimiter=',', quotechar='\"')\r\nstopWords = getStopWordList('stopwordslist.txt')\r\nfeatureList = []\r\n\r\n# Get tweet words\r\ntweets = []\r\nfor r in inpTweets:\r\n sentiment = r[0]\r\n tweet = r[1]\r\n processedTweet = processTweet(tweet)\r\n featureVector = getFeatureVector(processedTweet)\r\n featureList.extend(featureVector)\r\n tweets.append((featureVector, sentiment));\r\n#end loop\r\n# Remove featureList duplicates\r\n\r\n\r\n# Extract feature vector for all tweets in one shote\r\ntraining_set = nltk.classify.util.apply_features(extract_features, tweets)\r\n# Train the classifier\r\nNBClassifier = nltk.NaiveBayesClassifier.train(training_set)\r\n\r\n# Test the classifier\r\ntestTweet = 'Congrats @ravikiranj, i heard you wrote a new tech post on sentiment analysis'\r\nprocessedTestTweet = processTweet(testTweet)\r\nprint (NBClassifier.classify(extract_features(getFeatureVector(processedTestTweet)))) \r\n","repo_name":"Priyatham97/sentimentanalysis","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7034599004","text":"import os\nfrom pathlib import Path\nfrom pyfoamd.functions.isCase import isCase\n\nimport logging\n\n# def getLatestTime(directory=Path.cwd()):\n\n# #- Get the latest time directory\n# directories = [f.name for f in Path(directory).iterdir()]\n# latestTime = '0'\n# for directory in directories:\n# name = directory.replace('/', '')\n# if name.isdigit() is True:\n# if int(name) > int(latestTime):\n# latestTime = name\n\n# return latestTime\n\ndef getLatestTime(searchDir=Path.cwd()):\n \"\"\"\n Returns the latest time directory of an OpenFOAM case or directory. If\n directory is an OpenFOAM, this function searches reconstructed and decomposed directories.\n\n Parameters:\n searchDir [pathlib.Path]: The location of the OpenFoam case or directory\n to search\n\n Returns:\n latestTime [float]: The latest time directory\n \n caseType [str]: The type of case containing the `latestTime`. Either \n 'decomposed' or 'reconstructed'.\n \"\"\"\n\n logger = logging.getLogger('xcfdv')\n \n\n caseType = None\n\n #- Get the latest time directory for reconstructed case\n directories = [f.name for f in os.scandir(Path(searchDir).resolve()) if f.is_dir()]\n latestTime = '0'\n for directory in directories:\n name = directory.replace('/', '')\n if name.isdigit() is True:\n if float(name) > float(latestTime):\n latestTime = name\n\n if isCase(searchDir):\n\n #- Get the latest time for the decomposed case\n p0 = Path(searchDir) / 'processor0'\n platestTime = '0'\n if (p0).is_dir():\n directories = [f.name for f in os.scandir(p0) if f.is_dir()]\n for directory in directories:\n name = directory.replace('/', '')\n # if name.isdigit() is True:\n try:\n float(name)\n if float(name) > float(platestTime):\n platestTime = name\n except (ValueError, TypeError):\n pass\n\n if float(platestTime) > float(latestTime):\n latestTime = platestTime\n caseType = 'decomposed'\n else: \n caseType = 'reconstructed'\n \n logger.debug(f\"latestTime: {latestTime}\")\n\n return latestTime, caseType","repo_name":"mcgoldba/pyFoamd","sub_path":"pyfoamd/functions/getLatestTime.py","file_name":"getLatestTime.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30012665218","text":"from rest_framework.test import APITestCase\nfrom apps.posts.models import Post\nfrom django.urls import reverse\nfrom rest_framework import status\n\n\nclass PostTests(APITestCase):\n\n def setUp(self):\n post = Post.objects.create(\n title='Test title',\n description='Test description',\n )\n\n def test_get_list_of_post(self):\n url = reverse('posts-list')\n responce = self.client.get(url)\n self.assertEqual(responce.status_code, status.HTTP_200_OK)\n\n def test_get_detail_of_post(self):\n post = Post.objects.first()\n url = reverse('posts-detail', kwargs={'pk': post.id})\n responce = self.client.get(url)\n self.assertEqual(responce.status_code, status.HTTP_200_OK)\n","repo_name":"Toktorov/BlogAPI","sub_path":"apps/posts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40925661594","text":"import os\nfrom .common import * # noqa\n\n\n# Site\n# https://docs.djangoproject.com/en/2.0/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [\"*\"]\nINSTALLED_APPS += (\n \"gunicorn\",\n \"storages\",\n) # noqa\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n# http://django-storages.readthedocs.org/en/latest/index.html\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nSTATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nAWS_ACCESS_KEY_ID = os.getenv('DJANGO_AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.getenv('DJANGO_AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.getenv('DJANGO_AWS_STORAGE_BUCKET_NAME')\n# By default files with the same name will overwrite each other.\n# Set this to False to have extra characters appended.\nAWS_S3_FILE_OVERWRITE = False\nAWS_DEFAULT_ACL = 'public-read'\nAWS_AUTO_CREATE_BUCKET = True\nAWS_QUERYSTRING_AUTH = False\nMEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/'\n\n# https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#cache-control\n# Response can be cached by browser and any intermediary caches (i.e. it is \"public\") for up to 1 day\n# 86400 = (60 seconds x 60 minutes x 24 hours)\nAWS_HEADERS = {\n 'Cache-Control': 'max-age=86400, s-maxage=86400, must-revalidate',\n}\n\n# Social\nSOCIAL_AUTH_REDIRECT_IS_HTTPS = True\n\n# easy thumbnails lib & S3\nTHUMBNAIL_DEFAULT_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n","repo_name":"Vivify-Ideas/python-django-drf-boilerplate","sub_path":"src/config/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"61"} +{"seq_id":"23422789291","text":"import sys\n\ndef find_time_to_finish(farm_cost, farm_extra_cookies, win_condition):\n #print('Farm cost: %f' % farm_cost)\n #print('Fast extra cookies: %f' % farm_extra_cookies)\n #print('Win condition: %f' % win_condition)\n\n base_prod = 2\n farms = 0\n shortest_time_to_finish = -1\n\n while True:\n extra_time = 0\n for i in range(1, farms + 1):\n production_this_round = base_prod + (i - 1) * farm_extra_cookies\n extra_time += farm_cost / production_this_round\n\n production_with_all_farms = base_prod + farms * farm_extra_cookies\n time_to_finish = extra_time + win_condition / production_with_all_farms\n\n if shortest_time_to_finish == -1 or time_to_finish < shortest_time_to_finish:\n shortest_time_to_finish = time_to_finish\n else:\n break\n\n farms += 1\n\n return shortest_time_to_finish\n\ndef writeline(f, line):\n #print(line)\n f.write('%s\\n' % line)\n\ndef main():\n if len(sys.argv) != 2:\n print('Usage: %s INPUT_FILENAME' % sys.argv[0])\n return 1\n\n input_file = sys.argv[1]\n f = file(input_file)\n\n output_file = input_file[:-3] + \".out\"\n out = file(output_file, 'w')\n\n number_of_test_cases = int(f.readline())\n\n for case in range(1, number_of_test_cases + 1):\n line = f.readline()\n farm_cost, farm_extra_cookies, win_condition = [\n float(x) for x in line.split(' ')]\n\n time_to_finish = find_time_to_finish(farm_cost, farm_extra_cookies,\n win_condition)\n writeline(out, 'Case #%d: %.07f' % (case, time_to_finish))\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1735.py","file_name":"1735.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15769212052","text":"import re\nimport math\nimport os\n\n# El codigo se divide en tres partes:\n#1. El analisis de sintaxis usando regex\n\n## Detectar cada digito como int para operar \nsuma = re.compile(r\"\\(\\+\\s[0-9]+\\s[0-9]+\\)\")\nrest = re.compile(r\"\\(\\-\\s[0-9]+\\s[0-9]+\\)\")\nmult = re.compile(r\"\\(\\*\\s[0-9]+\\s[0-9]+\\)\")\ndivs = re.compile(r\"\\(\\/\\s[0-9]+\\s[0-9]+\\)\")\nraiz = re.compile(r\"\\(sqroot\\s[0-9]+\\)\")\ncuad = re.compile(r\"\\(sqr\\s[0-9]+\\)\")\nseno = re.compile(r\"\\(sen\\s[0-9]+\\)\")\ncoseno = re.compile(r\"\\(cos\\s[0-9]+\\)\")\ntangente = re.compile(r\"\\(tan\\s[0-9]+\\)\")\ncociente = re.compile(r\"\\(div\\s[0-9]+\\s[0-9]+\\)\")\nresiduo = re.compile(r\"\\(%\\s[0-9]+\\s[0-9]+\\)\")\nfactorial = re.compile(r\"\\(fact!\\s[0-9]+\\)\")\n\n#2. El resultado de la operacion principal\n\ndef operacion(comandos):\n # Check for expresiones\n if suma.search(comandos) or rest.search(comandos) or mult.search(comandos) or divs.search(comandos) or raiz.search(comandos) or cuad.search(comandos) or seno.search(comandos) or coseno.search(comandos) or tangente.search(comandos) or cociente.search(comandos) or residuo.search(comandos) or factorial.search(comandos):\n expresiones = re.findall('\\d+', comandos)\n print(expresiones)\n \n answer = ''\n\n if suma.search(comandos):\n expresion = re.findall('\\d+', comandos)\n for i in range(0, len(expresion)):\n result = int(expresion[-i]) + int(expresion[-i+1])\n #replace pattern f result in comandos\n answer += re.sub(suma, str(result), comandos)\n print(result)\n \n if rest.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = int(expresion[0]) - int(expresion[1])\n print(result) \n if mult.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = int(expresion[0]) * int(expresion[1])\n print(result)\n if divs.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = int(expresion[0]) / int(expresion[1])\n print(result)\n if raiz.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = math.sqrt(int(expresion[0]))\n print(result)\n if cuad.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = math.pow(int(expresion[0]), 2)\n print(result)\n if seno.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = math.sin(math.radians(int(expresion[0]), 2))\n print(result)\n if coseno.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = math.cos(math.radians(int(expresion[0]), 2))\n print(result)\n if tangente.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = math.tan(math.radians(int(expresion[0]), 2))\n print(result)\n if cociente.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = int(expresion[0]) // int(expresion[1])\n print(result)\n if residuo.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = int(expresion[0]) % int(expresion[1])\n print(result)\n if factorial.search(comandos):\n expresion = re.findall('\\d+', comandos)\n result = math.factorial(int(expresion[0]))\n print(result)\n \n print(type(answer), \"Answer is:\", answer)\n return answer\n\n#3. El detector de errores\n# Numeros negativos, floats/racionales\n\n\ndef main():\n comandos = input(\"calculadora >> \")\n operacion(comandos)\n \n \n\n \n \n clear = lambda: os.system(\"cls\")\n\n if comandos == \"clear\":\n clear()\n if comandos == \"quit\":\n print(\"Saliendo...\")\n else:\n main() \n return\n\nmain() ","repo_name":"KugelblitzTaco/Proyecto-Calculadora","sub_path":"calc1.py","file_name":"calc1.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41591377414","text":"from ultralytics import YOLO\nimport numpy as np\nfrom ultralytics.yolo.utils.plotting import Annotator\nfrom PIL import Image\nimport utils\n\n\ndef detect_classes_image_yolov8(model: YOLO, image_path: str or Image.Image, classes: list):\n if isinstance(image_path, str):\n img = Image.open(image_path)\n elif isinstance(image_path, Image.Image):\n img = image_path\n \n results = model(image_path) # predict on an image\n annotator = Annotator(np.ascontiguousarray(img))\n tagged_boxes = []\n for r in results:\n boxes = r.boxes\n for box in boxes:\n b = box.xyxy[0] # get box coordinates in (top, left, bottom, right) format\n c = box.cls # Class\n if model.names[int(c)] in classes:\n new_box = {'coords': b, 'label': model.names[int(c)]}\n tagged_boxes.append(new_box)\n annotator.box_label(b, model.names[int(c)])\n\n img = annotator.result()\n return img, tagged_boxes\n\n\ndef predict_yolov8(images_filepaths: list or str or Image.Image, model, classes: list, outdir: str=None):\n predictions = []\n\n if isinstance(images_filepaths, str) or isinstance(images_filepaths, Image.Image):\n images_filepaths = [images_filepaths]\n\n imgs = []\n for img_path in images_filepaths:\n img, coords = detect_classes_image_yolov8(model=model, image_path=img_path, classes=classes)\n predictions.append((img, coords))\n imgs.append(img)\n\n if outdir is not None:\n for img, path in zip(imgs, images_filepaths):\n utils.save_img(outdir, path, img)\n\n return predictions\n","repo_name":"Escabias/AIr-port-CV","sub_path":"Yolov8/predictyolov8.py","file_name":"predictyolov8.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14042552036","text":"import time\n\nfrom lithops.multiprocessing import Process, Queue, current_process\n\n\ndef f(q):\n print(\"I'm process {}\".format(current_process().pid))\n q.put([42, None, 'hello'])\n for i in range(3):\n q.put('Message no. {} ({})'.format(i, time.time()))\n time.sleep(1)\n print('Done')\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n\n print(q.get()) # prints \"[42, None, 'hello']\"\n\n consuming = True\n while consuming:\n try:\n res = q.get(block=True, timeout=3)\n print(res)\n except q.Empty as e:\n print('Queue empty!')\n consuming = False\n\n p.join()\n","repo_name":"lithops-cloud/lithops","sub_path":"examples/multiprocessing/queue_poll.py","file_name":"queue_poll.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":287,"dataset":"github-code","pt":"61"} +{"seq_id":"75509824","text":"class Solution:\n def findSpecialInteger(self, arr: list[int]) -> int:\n overCount = len(arr) // 4\n\n for i in range(len(arr)):\n if arr[i] == arr[i+overCount]:\n return arr[i]\n\n return 0\n\n\n def findSpecialInteger_my(self, arr: list[int]) -> int:\n\n countMap = {}\n\n overCount = len(arr) // 4\n result = 0\n\n for count in arr:\n if count in countMap:\n countMap[count] += 1\n else:\n countMap[count] = 1\n \n for count in countMap.keys():\n if countMap[count] > overCount and count > result:\n result = count\n return result\n\n\nSolution.findSpecialInteger(Solution(), [1,2,2,6,6,6,6,7,10])","repo_name":"PanJianTing/LeetCode","sub_path":"1287_ElementArrearingMoreThan25%InSortedArray.py","file_name":"1287_ElementArrearingMoreThan25%InSortedArray.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70898283074","text":"import logging\nimport re\nimport sys\n\nimport libsbml\n\nfrom mod_sbml.annotation.annotator import annotate\nfrom mod_sbml.annotation.gene_ontology.go_serializer import get_go\nfrom mod_sbml.annotation.gene_ontology.go_annotator import annotate_compartments\nfrom mod_sbml.annotation.kegg.pathway_manager import ORG_HUMAN\nfrom mod_sbml.annotation.chebi.chebi_serializer import get_chebi\nfrom models import SR_MODEL, SR_MODEL_ANNOTATED, RECON_MODEL, SR_MODEL_SER\nfrom mod_cobra.sbml.mapping.metabolite_matcher import get_model_data, map_metabolites_compartments\nfrom mod_sbml.sbml.compartment.compartment_manager import BOUNDARY_C_ID\nfrom mod_sbml.annotation.chebi.chebi_annotator import get_chebi_id, annotate_metabolites, CHEBI_PREFIX\nfrom mod_sbml.annotation.kegg.kegg_annotator import get_kegg_m_id, get_kegg_r_id, KEGG_COMPOUND_PREFIX, KEGG_REACTION_PREFIX\nfrom mod_sbml.sbml.reaction_boundary_manager import get_bounds, set_bounds\nfrom mod_sbml.sbml.sbml_manager import create_reaction, create_species, get_reactants, get_products, get_r_comps, generate_unique_id\nfrom mod_sbml.annotation.rdf_annotation_helper import add_annotation\nfrom mod_sbml.onto import parse_simple\n\n__author__ = 'anna'\n\n\ndef main():\n convert_annotations(SR_MODEL, SR_MODEL_ANNOTATED)\n separate_boundary_species(SR_MODEL_ANNOTATED, SR_MODEL_ANNOTATED)\n create_serine_sythesis(SR_MODEL_ANNOTATED, RECON_MODEL, SR_MODEL_SER)\n input_doc = libsbml.SBMLReader().readSBML(SR_MODEL_SER)\n model = input_doc.getModel()\n annotate(model, pw_threshold=0.5, org=ORG_HUMAN)\n libsbml.SBMLWriter().writeSBMLToFile(input_doc, SR_MODEL_SER)\n\n\ndef convert_annotations(in_sbml, out_sbml):\n \"\"\"\n Converts the initial iAS253 model by Smith and Robinson (BMC Syst Biol 2011; 5: 102),\n which uses KEGG ids followed by compartment (e.g., C00001Cyto) as identifiers of the elements,\n into a model with KEGG reaction and metabolite annotations.\n :param in_sbml: path to the SBML file with the original iAS253 model\n :param out_sbml: path where to store the annotated SBML file\n \"\"\"\n input_doc = libsbml.SBMLReader().readSBML(in_sbml)\n model = input_doc.getModel()\n for species in model.getListOfSpecies():\n _id = species.getId()\n s_ids = re.findall(r'C\\d{4,7}', _id)\n if s_ids:\n add_annotation(species, libsbml.BQB_IS, s_ids.pop(), KEGG_COMPOUND_PREFIX)\n if species.getName().find(_id) != -1:\n species.setName(species.getName().replace(_id, \"\").strip())\n for reaction in model.getListOfReactions():\n _id = reaction.getId()\n r_ids = re.findall(r'R\\d{4,10}', _id)\n if r_ids:\n add_annotation(reaction, libsbml.BQB_IS, r_ids.pop(), KEGG_REACTION_PREFIX)\n if reaction.getName().find(_id) != -1:\n reaction.setName(reaction.getName().replace(_id, \"\").strip())\n libsbml.SBMLWriter().writeSBMLToFile(input_doc, out_sbml)\n\n\ndef separate_boundary_species(in_sbml, out_sbml):\n \"\"\"\n Creates a boundary compartment with the id 'Boundary' and moves the boundary species (with the ids '*_b') there.\n :param in_sbml: path to the SBML file with the original model\n :param out_sbml: path where to store the modified SBML file\n :return: void\n \"\"\"\n input_doc = libsbml.SBMLReader().readSBML(in_sbml)\n model = input_doc.getModel()\n boundary_comp = model.createCompartment()\n id_ = generate_unique_id(model, BOUNDARY_C_ID)\n if libsbml.LIBSBML_OPERATION_SUCCESS != boundary_comp.setId(id_):\n logging.error(\"boundary compartment %s creation error\" % id_)\n boundary_comp.setName(\"Boundary\")\n for s in model.getListOfSpecies():\n if s.getId().find(\"_b\") != -1:\n s.setCompartment(boundary_comp.getId())\n libsbml.SBMLWriter().writeSBMLToFile(input_doc, out_sbml)\n\n\ndef create_serine_sythesis(sr_in_sbml, recon_sbml, sr_out_sbml):\n sr_doc = libsbml.SBMLReader().readSBML(sr_in_sbml)\n sr_model = sr_doc.getModel()\n\n r_doc = libsbml.SBMLReader().readSBML(recon_sbml)\n r_model = r_doc.getModel()\n\n # NADPH --> NADH; NADP --> NAD\n metabolite_id_mapping_recon2sr = {'M_h_c': 'C00080Cyto', 'M_nadph_c': 'C00004Cyto', 'M_nadp_c': 'C00003Cyto',\n 'M_glu_L_c': 'C00025Cyto'}\n add_recon_reactions(sr_model, r_model, {'R_PGCD', 'R_PSERT', 'R_PSP_L', \"R_PEPCK\", \"R_ME2\", \"R_NDPK1\", \"R_GHMT2r\"},\n metabolite_id_mapping_recon2sr,\n recon_m_id2boundary_condition_to_add={'M_gtp_c': False, 'M_pser_L_c': False, 'M_3php_c': False,\n 'M_mlthf_c': True, 'M_thf_c': True})\n # Reversible transporters\n for tr_id in ['Transporter30', 'Transporter1', 'Transporter9', 'Transporter17']:\n r = sr_model.getReaction(tr_id)\n make_reversible(r)\n\n # Reversible glutamate dehydrogenase\n for r_id in ['R00243MM', 'R00248MM']:\n r = sr_model.getReaction(r_id)\n make_reversible(r)\n\n # allow for proton input\n r = sr_model.getReaction('Boundary60') # H+(C00080_b) <=> H+(C00080Cyto)\n l_b, u_b = get_bounds(r)\n if u_b == 0:\n u_b = -l_b if l_b else 1000\n set_bounds(r, l_b, u_b)\n\n # allow for NH3 transport Mito -> Cyto\n r = sr_model.getReaction('Transporter85') # NH3(C00014MM) <=> NH3(C00014Cyto)\n l_b, u_b = get_bounds(r)\n if u_b == 0:\n u_b = -l_b if l_b else 1000\n set_bounds(r, l_b, u_b)\n\n # create NH3 output: NH3_cyto -> NH3_b\n nh3_cyto = sr_model.getSpecies('C00014Cyto')\n chebi_id = get_chebi_id(nh3_cyto)\n kegg_id = get_kegg_m_id(nh3_cyto)\n nh3_b = create_species(sr_model, compartment_id=BOUNDARY_C_ID, name=nh3_cyto.getName(), bound=True, id_='C00014_b')\n if kegg_id:\n add_annotation(nh3_b, libsbml.BQB_IS, kegg_id.upper(), KEGG_COMPOUND_PREFIX)\n if chebi_id:\n add_annotation(nh3_b, libsbml.BQB_IS, chebi_id.upper(), CHEBI_PREFIX)\n r = create_reaction(sr_model, {nh3_b.getId(): 1}, {nh3_cyto.getId(): 1}, \"NH3\", True, \"BoundaryNH3\")\n set_bounds(r, -1000, 0)\n\n # prohibit ATP exit\n r = sr_model.getReaction('Boundary59') # ATP\n if r:\n l_b, u_b = get_bounds(r)\n set_bounds(r, max(0, l_b), u_b)\n\n # constraint pyruvate kinase reactions\n for r in sr_model.getListOfReactions():\n rs_names, ps_names = {sr_model.getSpecies(m_id).getName() for m_id in get_reactants(r)}, \\\n {sr_model.getSpecies(m_id).getName() for m_id in get_products(r)}\n if 'Phosphoenolpyruvate' in rs_names and 'Pyruvate' in ps_names and \\\n next((m for m in rs_names if m[-2:] == 'DP'), None):\n l_b, u_b = get_bounds(r)\n set_bounds(r, max(0, l_b), u_b)\n elif 'Phosphoenolpyruvate' in ps_names and 'Pyruvate' in rs_names and \\\n next((m for m in ps_names if m[-2:] == 'DP'), None):\n l_b, u_b = get_bounds(r)\n set_bounds(r, l_b, min(0, u_b))\n\n libsbml.SBMLWriter().writeSBMLToFile(sr_doc, sr_out_sbml)\n\n\ndef add_recon_reactions(sr_model, r_model, recon_r_ids_to_add, metabolite_id_mapping_recon2sr=None,\n recon_m_id2boundary_condition_to_add=None):\n RECON2_ID = 'Recon2'\n SR_ID = 'SR'\n model_id2sbml = {SR_ID: sr_model, RECON2_ID: r_model}\n go = parse_simple(get_go())\n annotate_compartments(sr_model, go)\n annotate_compartments(r_model, go)\n\n chebi = parse_simple(get_chebi())\n annotate_metabolites(sr_model, chebi)\n annotate_metabolites(r_model, chebi)\n\n model_id2dfs = get_model_data(model_id2sbml)\n model_id2c_ids_groups, model_id2m_ids_groups, model_id2c_id2i = \\\n map_metabolites_compartments(model_id2dfs, chebi=chebi)\n\n c_id_recon2sr = {}\n c_id_sr2recon = {}\n m_id_recon2sr = {}\n for model_id2m_ids in model_id2m_ids_groups:\n m_id_recon2sr.update({it: next(iter(model_id2m_ids[SR_ID])) for it in model_id2m_ids[RECON2_ID]})\n for model_id2c_ids in model_id2c_ids_groups:\n c_id_recon2sr.update({it: next(iter(model_id2c_ids[SR_ID])) for it in model_id2c_ids[RECON2_ID]})\n c_id_sr2recon.update({it: next(iter(model_id2c_ids[RECON2_ID])) for it in model_id2c_ids[SR_ID]})\n\n if metabolite_id_mapping_recon2sr:\n m_id_recon2sr.update(metabolite_id_mapping_recon2sr)\n\n if recon_m_id2boundary_condition_to_add:\n for recon_m_id in recon_m_id2boundary_condition_to_add.keys():\n df = model_id2dfs[RECON2_ID][0]\n row = df.loc[recon_m_id]\n kegg_id = row['KEGG']\n chebi_id = row['ChEBI']\n name = row['Name']\n c_id = c_id_recon2sr[row['Compartment']]\n sr_m = create_species(name=name, compartment_id=c_id,\n model=sr_model, bound=recon_m_id2boundary_condition_to_add[recon_m_id],\n id_=(recon_m_id if not kegg_id else kegg_id.upper()) + c_id)\n if kegg_id:\n add_annotation(sr_m, libsbml.BQB_IS, kegg_id.upper(), KEGG_COMPOUND_PREFIX)\n if chebi_id:\n add_annotation(sr_m, libsbml.BQB_IS, chebi_id.upper(), CHEBI_PREFIX)\n m_id_recon2sr[recon_m_id] = sr_m.getId()\n\n for r_id in recon_r_ids_to_add:\n r = r_model.getReaction(r_id)\n r_id2st, p_id2st = {}, {}\n for r_m_id, st in get_reactants(r, stoichiometry=True):\n if r_m_id not in m_id_recon2sr:\n raise ValueError(\"Could not map %s (%s) to any metabolite in SR\"\n % (r_model.getSpecies(r_m_id).getName(), r_m_id))\n r_id2st[m_id_recon2sr[r_m_id]] = st\n for r_m_id, st in get_products(r, stoichiometry=True):\n if r_m_id not in m_id_recon2sr:\n raise ValueError(\"Could not map %s (%s) to any metabolite in SR\"\n % (r_model.getSpecies(r_m_id).getName(), r_m_id))\n p_id2st[m_id_recon2sr[r_m_id]] = st\n kegg_id = get_kegg_r_id(r)\n new_r = create_reaction(sr_model, r_id2st, p_id2st, r.getName(), reversible=r.getReversible(),\n id_=r.getId() if not kegg_id\n else (kegg_id.upper() + \"_\".join(get_r_comps(r_id, r_model))))\n if kegg_id:\n add_annotation(new_r, libsbml.BQB_IS, kegg_id.upper(), KEGG_REACTION_PREFIX)\n\n\ndef make_reversible(r):\n l_b, u_b = get_bounds(r)\n if u_b == 0:\n u_b = -l_b if l_b else 1000\n if l_b == 0:\n l_b = -u_b if u_b else -1000\n set_bounds(r, l_b, u_b)\n r.setReversible(True)\n\n\nif \"__main__\" == __name__:\n sys.exit(main())\n","repo_name":"annazhukova/mod_cobra","sub_path":"examples/prepare_iAS253.py","file_name":"prepare_iAS253.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20368883276","text":"import sys\ninput=sys.stdin.readline\nA=input().rstrip() \nB=input().rstrip()\ndp=[[0 for _ in range(len(B)+1)]for _ in range(len(A)+1)] #DP를 진행하기 위한 리스트를 형성한다.\nfor i in range(1,len(A)+1):\n for j in range(1,len(B)+1):\n if A[i-1]==B[j-1]: #A[i]와 B[j]에 값이 같다면 \n dp[i][j]=dp[i-1][j-1]+1 #dp[i][j]에 그 전 값+1을 한다.\n else:\n dp[i][j]=max(dp[i][j-1],dp[i-1][j]) #아니면 dp[i][j-1],dp[i-1][j]값중 큰 값을 결정한다.\n \nprint(dp[-1][-1])","repo_name":"Jeongmani/python-study","sub_path":"BOJ/Pratice/LCS(9251).py","file_name":"LCS(9251).py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27227896451","text":"import ez_setup\nez_setup.use_setuptools()\n\nfrom setuptools import setup, find_packages\n\nprojectname = \"dnachisel_dtailor_mode\"\nexec(open('%s/version.py' % projectname).read()) # loads __version__\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=projectname,\n version=__version__,\n author='Li Xing',\n author_email=\"lix930701@gmail.com\",\n description=\"implement d-tailor method using dnachisel specs\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Lix1993/dnachisel_dtailor_mode\",\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"HealthCodon/dnachisel_dtailor_mode","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7048183503","text":"from appium import webdriver\nimport os\n\ncommand_executor = \"http://127.0.0.1:4723/wd/hub\"\n\ndef init_driver():\n desired_caps = {\n # 系统\n 'platformName': 'Android',\n # 版本\n 'platformVersion': '5.0',\n # 设备号\n 'deviceName': 'cccb5637',\n # 包名\n 'appPackage': 'com.tencent.news',\n # app名\n # 'app': appLocation,\n # 启动名\n 'appActivity': '.activity.SplashActivity',\n # 允许中文输入\n 'unicodeKeyboard': True,\n 'resetKeyboard': True,\n # 应用不进行重置\n 'noReset': True\n }\n driver = webdriver.Remote(command_executor,desired_caps)\n return driver\n","repo_name":"xlh89482376/lLe_Project","sub_path":"Base/Init_Driver.py","file_name":"Init_Driver.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18173378037","text":"import random\n\nfrom models.game.Entity import Entity\nfrom models.game.LivingEntity import LivingEntity\n\n\nclass MedicalKit(Entity):\n MIN_HEALTH = 10\n MAX_HEALTH = 25\n\n health = 15\n\n def __init__(self, position):\n super().__init__(position[0], position[1])\n self.health = random.randint(MedicalKit.MIN_HEALTH, MedicalKit.MAX_HEALTH)\n\n def use(self, entity: LivingEntity):\n entity.heal(self.health)\n self.can_be_destroyed = True\n","repo_name":"Aver005/KodLandTrainingPython","sub_path":"models/game/MedicalKit.py","file_name":"MedicalKit.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25043013892","text":"# https://www.acmicpc.net/problem/7568\n# 덩치\n\nN = int(input())\npeople = []\nfor _ in range(N):\n w, h = map(int, input().split())\n people.append((w, h))\n\nrank = [1] * len(people)\nfor i in range(len(people)):\n for person in people[:i]+people[i+1:]:\n if people[i][0] < person[0] and people[i][1] < person[1]:\n rank[i] += 1\n\nprint(*rank, sep=\" \")","repo_name":"hmkim199/PrepareCodingTest","sub_path":"Baekjoon/Practice7568.py","file_name":"Practice7568.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38600934004","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n if numRows <= 1:\n return s\n row = 0\n step = 1\n arr = [\"\"]*numRows\n for i in range(len(s)):\n arr[row] += s[i]\n if row == 0:\n step = 1\n elif(row == numRows - 1):\n step = -1\n row += step\n return \"\".join(arr)\n \n\nif __name__ == \"__main__\":\n sol = Solution()\n s = \"PAYPALISHIRING\"\n print(sol.convert(s, 3))\n \n","repo_name":"PromasterGuru/Leetcode-Solutions","sub_path":"6. ZigZag Conversion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5696942706","text":"import numpy as np\r\nimport math\r\n\r\nP = [[0.58, 0.42], [0.66, 0.34]] # State transition matrix under policy\r\nR = [-0.02, -0.18] # Expected rewards vector\r\nR = np.transpose(R)\r\ngamma = 0.9\r\n\r\nV = [0,0] # initialized value \r\nV = np.transpose(V)\r\n\r\nfor k in range(0,100):\r\n\r\n V = R + gamma*np.matmul(P,V)\r\n\r\n print( \"Time step = {}, \\t V= {}\".format(k,np.round(V,5)) )\r\n","repo_name":"wnsrud0901/RL_seminar_2022_Jan_Feb","sub_path":"weak1/lec1_ex1_28.py","file_name":"lec1_ex1_28.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5182776104","text":"##############################################################################\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\npy_version = sys.version_info[:2]\n\nif py_version < (2, 6):\n raise RuntimeError('On Python 2, superhooks requires Python 2.6 or later')\nelif (3, 0) < py_version < (3, 2):\n raise RuntimeError('On Python 3, superhooks requires Python 3.2 or later')\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, 'README.md')).read()\nexcept (IOError, OSError):\n README = ''\ntry:\n CHANGES = open(os.path.join(here, 'CHANGES.md')).read()\nexcept (IOError, OSError):\n CHANGES = ''\n# 'setup.py publish' shortcut.\nif sys.argv[-1] == 'publish':\n os.system('python2 setup.py sdist bdist_wheel')\n os.system('python3 setup.py sdist bdist_wheel')\n os.system('twine upload dist/superhooks*.tar.gz')\n os.system('twine upload dist/superhooks*.whl')\n sys.exit()\n\nsetup(name='superhooks',\n version='0.5',\n license='BSD-derived (http://www.repoze.org/LICENSE.txt)',\n description='superhooks plugin for supervisord',\n long_description=README + '\\n\\n' + CHANGES,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n 'Environment :: No Input/Output (Daemon)',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: System :: Boot',\n 'Topic :: System :: Monitoring',\n 'Topic :: System :: Systems Administration',\n ],\n author='Yuvaraj Loganathan',\n author_email='uvaraj6@gmail.com',\n url=\"https://github.com/skyrocknroll/superhooks\",\n maintainer=\"Yuvaraj Loganathan\",\n maintainer_email=\"uvaraj6@gmail.com\",\n keywords='supervisor web hooks monitoring',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'superlance',\n 'supervisor',\n 'requests',\n ],\n tests_require=[\n 'supervisor',\n 'superlance',\n 'mock',\n\n ],\n test_suite='superhooks.tests',\n entry_points=\"\"\"\\\n [console_scripts]\n superhooks = superhooks.superhooks:main\n \"\"\"\n )\n","repo_name":"skyrocknroll/superhooks","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"35577331025","text":"from jobs import jobs\r\n\r\n\r\nif __name__ == \"__main__\":\r\n title_to_ids = {}\r\n all_ids = []\r\n duplicate_ids = set()\r\n\r\n for job in jobs:\r\n job_id, job_title = job[0], job[1]\r\n all_ids.append(job_id) # Store all IDs in order\r\n if job_title in title_to_ids:\r\n title_to_ids[job_title].append(job_id)\r\n duplicate_ids.add(job_id) # Add ID to duplicates set\r\n else:\r\n title_to_ids[job_title] = [job_id]\r\n\r\n for title, ids in title_to_ids.items():\r\n if len(ids) > 1:\r\n print(f\"Duplicate job title '{title}' found with IDs: {', '.join(ids)}\")\r\n\r\n print(\"Total number of matching IDs:\", len(duplicate_ids))\r\n print(\"All job IDs in order:\", ', '.join(all_ids))","repo_name":"Matp101/CareerHub","sub_path":"sql/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23425094621","text":"rf = open('B-large.in', 'r')\n\nline_counter = 0\nT = None\n\nINI_RATE = 2.0\nnum_facts = 0\n\ncase_num = 0\nsec = 0\n\nfw = open(\"B-large.out\", \"w\")\ntimes = []\n\nfor l in rf:\n tokens = l.split()\n if len(tokens) > 0:\n line_counter += 1\n if line_counter == 1:\n T = int(tokens[0])\n else:\n case_num += 1\n C, F, X = tokens\n C = float(C)\n F = float(F)\n X = float(X)\n num_facts = 0\n times = []\n\n # while cookie collection till target at current rate takes more time\n # then with rate from additional target (although starting from 0 cookies)\n # keep adding factories\n while((X-C)/(INI_RATE + num_facts * F) > X/(INI_RATE + (num_facts+1) * F)):\n num_facts += 1\n\n for i in range(0, num_facts):\n times.append(C/(INI_RATE + i*F))\n\n fw.write(\"Case #%d: %.8f\\n\" % (case_num, sum(times) + X/(INI_RATE + num_facts * F)))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2506.py","file_name":"2506.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25667142373","text":"import re\r\nimport os\r\n\r\nos.system('cls')\r\na = input(\"Please paste from clipboard:\\n\")\r\nprint(\"\\n\")\r\n\r\npattern = \"<(.*?)>\"\r\nfirst =\"\"\r\n\r\nprint(\"Printing Results ===========\")\r\ntry:\r\n for n in range(len(a)):\r\n substring = re.search(pattern, a[n:]).group(1)\r\n start = a.find(substring) + len(substring)\r\n if substring != first:\r\n first = substring\r\n print(first + \", \", end=\"\")\r\nexcept AttributeError:\r\n print(\"\\n\\nDone..\")\r\n\r\nexit()","repo_name":"burakdagdeviren/email-address-cleaning","sub_path":"email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33571188142","text":"# -*- coding: utf-8 -*-\nimport socket\nimport threading\nimport struct\n\nclass SocketServer():\n def __init__(self,ip,port):\n self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket_list = []\n self.ip = ip\n self.port = port\n\n def run_base(self):\n self.serversocket.bind((self.ip, self.port))\n self.serversocket.listen() # 最大连接数\n while True:\n # 此行代码会阻塞,将一直等待client的连接\n s_socket, addr = self.serversocket.accept()\n self.socket_list.append(s_socket)\n # 每当客户端连接后启动一个线程为该客户端服务\n threading.Thread(target=self.server_target, args=(s_socket,addr,)).start()\n\n def run_file_transport(self):\n self.serversocket.bind((self.ip, self.port))\n self.serversocket.listen() # 最大连接数\n while True:\n # 此行代码会阻塞,将一直等待client的连接\n s_socket, addr = self.serversocket.accept()\n self.socket_list.append(s_socket)\n # 每当客户端连接后启动一个线程为该客户端服务\n threading.Thread(target=self.server_target, args=(s_socket, addr,)).start()\n\n def download(self,filename):\n f=open(filename,'rb')\n\n def upload(self,filename):\n f= open(filename,'ab')\n\n def server_target(self,s_socket,addr):\n content = '连接成功'\n s_socket.send(content.encode('utf-8'))\n # 采用循环不断地从socket中读取客户端发送过来的数据\n while True:\n try:\n content = s_socket.recv(2048).decode('utf-8')\n # 如果捕获到异常,则表明该socket对应的客户端已经关闭\n print(content)\n except:\n # 删除该socket\n self.socket_list.remove(s_socket)\n break\n print(str(addr)+'已断开连接')\n\nif __name__ == '__main__':\n ss = SocketServer('0.0.0.0',1234)\n ss.run_base()\n\n\n\n\n","repo_name":"yangjianj/autotest_controller","sub_path":"app_demo1/config/tmpfile/socket_mulithread.py","file_name":"socket_mulithread.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30831963891","text":"#----Función que dado tres números muestre en pantalla las operaciones-----#\ndef sumar (a=0,b=0,c=0):\n suma = a + b + c\n return suma \nprint (sumar(2,3,4))\ndef restar (a=0,b=0,c=0):\n resta = a - b - c \n return resta \nprint (restar(2,3,4))\ndef multiplicar (a=0,b=0,c=0):\n multiplica = a * b\n return multiplica \nprint (multiplicar(2,3,4))\ndef dividir (a=0,b=1,c=1):\n divide = a / b / c\n return divide\nprint (dividir(2,3,4))\ndef potenciar (base=0,exponente=1, exponente2=1):\n potencia = base ** exponente ** exponente2\n return potencia\nprint (potenciar(2,3,4))\n\n#----Función que dada tres listas del mismo tamaño las muestre en pantalla----#\nNumeros = [12,6,7,21,15,28,9]\nNombres = ['Andres','Daniela','Cristo','Jesus','Luis','Luisa','Miguel']\nEnteros = [12,22,14,68,76,84,26]\ndef MostrarListas (Lista):\n print (Lista)\nMostrarListas (Numeros)\nMostrarListas (Nombres)\nMostrarListas (Enteros)\n\n#-----Función que calcule y devuelva el área de un triangulo----#\ndef area (base=0,altura=0):\n triangulo = (base * altura)/2\n return triangulo\ndef calcular (operacion,NumeroA,NumeroB):\n print (operacion(NumeroA,NumeroB))\nBaseIngresada = int (input('Ingrese una base entera: '))\nAlturaIngresada = int (input('Ingrese una altura entera: '))\nprint (area(BaseIngresada,AlturaIngresada))\n\n#----Función que dada una lista de números enteros muestre el promedio, el máximo, el mínimo----#\nListaNumerosEnteros = [12,22,14,68,76,84,26,16,18,24,88]\ndef InformacionLista (Lista):\n Mayor = max (Lista)\n Menor = min (Lista)\n Acumulado = 0\n for elemento in Lista:\n Acumulado += elemento\n TamañoDeLista = len (Lista)\n Promedio = Acumulado / TamañoDeLista\n print (f'El numero mayor en la lista es el {Mayor}, el menor es el {Menor} y el promedio es {Promedio}')\nInformacionLista (ListaNumerosEnteros)\n\n#----Dado un número n de la sucesión muestre en pantalla su valor----#\n\nprint ('#-----Bonus-----#')\n#-----Desde otro archivo llame todas las funciones creadas----#\nimport TallerFunciones as tf ","repo_name":"DanielaZabaleta/programacion2021","sub_path":"Parciales/Parcial1.py","file_name":"Parcial1.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37494528933","text":"import cv2\nimport mediapipe as mp\nimport time\n\n# Option of video : 0 -> camera or path\n\n# cap = cv2.VideoCapture('Videos/vid1.mp4') # use case of videos\ncap = cv2.VideoCapture(0) # Use of webcame\npTime = 0\n\nmpDraw = mp.solutions.drawing_utils\nmpFaceMesh = mp.solutions.face_mesh\nfaceMesh = mpFaceMesh.FaceMesh(max_num_faces=2) # number or faces to mesh\ndrawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=1)\n\nwhile True:\n\n success,img = cap.read()\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n results = faceMesh.process(imgRGB)\n \n\n #Face mesh\n if results.multi_face_landmarks:\n for faceLms in results.multi_face_landmarks:\n mpDraw.draw_landmarks(img,faceLms,mpFaceMesh.FACEMESH_CONTOURS,drawSpec,drawSpec)\n\n\n ### Display the fps \n\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n\n cv2.putText(img,f'FPS: {int(fps)}', (20,70), cv2.FONT_HERSHEY_PLAIN,3,(0,255,0),3) \n\n # Final display\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(10)","repo_name":"MohamedDiopGit/FaceMesh-Project","sub_path":"FashMesh.py","file_name":"FashMesh.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36187417642","text":"#!/usr/bin/env python3\nimport RPi.GPIO as GPIO\nimport time\n\nservoPIN = 18\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(servoPIN, GPIO.OUT)\nservo = GPIO.PWM(servoPIN, 50) # GPIO 18 als PWM mit 50Hz\ndutyCycle = 7.2\nsteps = 0.05\nservo.start(0) # Initialisierung\n\nforward_min = 6.5\nforward_max = 4\n\nbackwards_min = 7.5\nbackwards_max = 9\n\nwhile True:\n zahl = float(input(\"direction and speed from -1.0 to 1.0 \"))\n \n if zahl < 0:\n difference = backwards_max - backwards_min\n dc_change = difference * zahl\n new_dc = backwards_min + dc_change\n if zahl > 0:\n difference = forward_min - forward_max\n dc_change = difference * zahl\n new_dc = forward_min - dc_change\n else:\n new_dc = 0\n\n servo.ChangeDutyCycle(new_dc)\n print(\"new direction and speed: \" + str(new_dc))\n","repo_name":"gruener-campus-malchow/AgriCulturalRobot","sub_path":"winding_machine/code_fragments/rotation_control.py","file_name":"rotation_control.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37039106554","text":"with open(\"input\", \"r\") as file:\n text = file.read().splitlines()\n\nsides = {'E': 0, 'S': 0, 'W': 0, 'N': 0}\nLs = ['E', 'N', 'W', 'S']\nRs = ['E', 'S', 'W', 'N']\n\nfacing = 'E'\n\nfor line in text:\n di = line[0]\n amount = int(line[1:])\n\n if di in ['N', 'S', 'W', 'E']:\n sides[di] += amount\n \n if di == 'F':\n sides[facing] += amount\n\n if di == 'L':\n cur = Ls.index(facing)\n facing = int(cur + amount/90) % 4\n facing = Ls[facing] \n\n if di == 'R':\n cur = Rs.index(facing)\n facing = int(cur + amount/90) % 4\n facing = Rs[facing]\n\nprint(abs(sides['E'] - sides['W']) + abs(sides['S'] - sides['N']))\n","repo_name":"GrbavaCigla/AdventOfCode","sub_path":"2020/12/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43189557521","text":"import csv\nimport numpy as np\nimport os\nimport sys\nvalueLab = dict()\nresults = []\n\nfinalBest = []\n\ncase = 'case1_upto_999'#sys.argv[1]\ningored=0;\nsourceFile = 'totalReport.csv'\ntargetFileName = 'bestResults.csv'\nwith open(os.path.join(case, sourceFile), newline='') as csvfile:\n #read first line: the label of column\n firstLine = csvfile.readline()\n rowval = firstLine.split(',')\n valueLab['id'] = 0\n for i, val in enumerate(rowval[1:]):\n valueLab[val.strip('\\n')] = i+1\n\n #all the line\n lines = csvfile.readlines()\n nline=len(lines)\n for row in lines: #the fist line is already parsed\n rowval = row.split(',')\n rowval[0] = rowval[0].replace('process_', '')\n if int(rowval[0]) < 1000:\n for i, val in enumerate(rowval):\n if i != 0:\n if float(val) > 1:\n val = float(val)*0.001\n print(val)\n try:\n results.append(float(val))\n except:\n print(row)\n\n #result.append({})\n #print(', '.join(row))\n else:\n ingored+=1;\nresults = np.array(results)\nresults = results.reshape(nline-ingored, len(valueLab))\nprint()\n\nfold = 1\nfor lab in ['AucDevsFold1', 'AucDevsFold2', 'AucDevsFold3', 'AucDevsFold4']:\n bestResults = []\n bestbestResults = []\n bestbestbestResults = []\n bestbestbestbestResults = []\n potentialBestResults = []\n\n col = results[:, [valueLab[lab]]]\n best = col.max()\n for row in results:\n if row[valueLab[lab]] == best:\n if not bestResults: #the fist time the list is empty\n bestResults.append(row)\n elif not row[valueLab['id']] in [r[valueLab['id']] for r in bestResults]:\n bestResults.append(row)\n #now we have all best result based on AucDevsFoldX\n\n sublab='f1DevsFold'+str(fold)\n bestResultsNP = np.array(bestResults)\n col = bestResultsNP[:, [valueLab[sublab]]]\n best = col.max()\n for row in bestResultsNP:\n if row[valueLab[sublab]] == best:\n if not bestbestResults: #the fist time the list is empty\n bestbestResults.append(row)\n elif not row[valueLab['id']] in [r[valueLab['id']] for r in bestbestResults]:\n bestbestResults.append(row)\n #now we have all best result based on f1DevsFoldX\n\n subsublab = 'AucTestFold'+str(fold)\n bestbestResultsNP = np.array(bestbestResults)\n col = bestbestResultsNP[:, [valueLab[subsublab]]]\n best = col.max()\n for row in bestbestResultsNP:\n if row[valueLab[subsublab]] == best:\n if not bestbestbestResults: #the fist time the list is empty\n bestbestbestResults.append(row)\n elif not row[valueLab['id']] in [r[valueLab['id']] for r in bestbestbestResults]:\n bestbestbestResults.append(row)\n\n subsubsublab = 'f1TestFold'+str(fold)\n bestbestbestResultsNP = np.array(bestbestbestResults)\n col = bestbestbestResultsNP[:, [valueLab[subsubsublab]]]\n best = col.max()\n for row in bestbestbestResultsNP:\n if row[valueLab[subsubsublab]] == best:\n if not bestbestbestbestResults: #the fist time the list is empty\n bestbestbestbestResults.append(row)\n elif not row[valueLab['id']] in [r[valueLab['id']] for r in bestbestbestbestResults]:\n bestbestbestbestResults.append(row)\n\n #finalBest.append(bestbestbestbestResults)\n finalBest = finalBest + bestbestbestbestResults\n fold += 1\n\nfinalBestNP = np.array(finalBest)\nfinalBest_ = []\nfor i in finalBest:\n if not i[valueLab['id']] in [f[valueLab['id']] for f in finalBest_]:\n finalBest_.append(i)\n\n\nfor fold in range(1, 5):\n lab = 'f1TestFold'+str(fold)\n col = results[:, [valueLab[lab]]]\n best = col.max()\n for row in results:\n if row[valueLab[lab]] == best:\n # the fist time the potentialBestResults list is empty\n if not potentialBestResults and not row[valueLab['id']] in [r[valueLab['id']] for r in finalBest_]:\n potentialBestResults.append(row)\n elif not row[valueLab['id']] in [r[valueLab['id']] for r in potentialBestResults] and not row[valueLab['id']] in [r[valueLab['id']] for r in finalBest_]:\n potentialBestResults.append(row)\n\n\nmeanBestResults = []\nlab = 'f1Final'\ncol = results[:, [valueLab[lab]]]\nbest = col.max()\nfor row in results:\n if row[valueLab[lab]] == best:\n # the fist time the potentialBestResults list is empty\n if not meanBestResults and not row[valueLab['id']] in [r[valueLab['id']] for r in finalBest_]:\n meanBestResults.append(row)\n elif not row[valueLab['id']] in [r[valueLab['id']] for r in meanBestResults] and not row[valueLab['id']] in [r[valueLab['id']] for r in finalBest_]:\n meanBestResults.append(row)\n\nwith open(os.path.join(case,targetFileName), mode='w') as file:\n lab='id,AucDevsFold1,AucDevsFold2,AucDevsFold3,AucDevsFold4,AucTestFold1,AucTestFold2,AucTestFold3,AucTestFold4,f1DevsFold1,f1DevsFold2,f1DevsFold3,f1DevsFold4,f1Final,f1TestFold1,f1TestFold2,f1TestFold3,f1TestFold4\\n'\n file.write(lab.replace(',','\\t'))\n for l in finalBest_:\n for v in l:\n file.write(str(v).replace('.',',')+'\\t')\n file.write('\\n')\n\n file.write(lab.replace(',','\\t'))\n for l in potentialBestResults:\n for v in l:\n file.write(str(v).replace('.',',')+'\\t')\n file.write('\\n')\n\nprint()\n","repo_name":"Buckler89/deep_fall","sub_path":"galielo_results/resultSelection.py","file_name":"resultSelection.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14762409611","text":"\"\"\"\nthis script is used to simulate linear feedback system\n\"\"\"\nimport numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n\ndef linear_sys(x, t, A, B, K):\n # system: dot_x = Ax+Bu\n # where: u=Kx\n x1, x2 = x\n temp = (A + B * K)\n temp = np.dot(temp, np.array([[x1], [x2]]))\n return np.array([temp[0, 0], temp[1, 0]])\n\n\nt = np.arange(0, 10, 0.001) # simulation time\nx0 = np.array([10, 10]) # initial condition\nA = np.array([[0, 1], # system dynamics matrix\n [0, 0]])\nB = np.array([[0], [1]]) # b matrix\nK = np.array([[-2, -3]]) # control law\n\ntrack = odeint(linear_sys, x0, t, args=(A, B, K))\n\nfig = plt.figure(figsize=(10, 5))\nplt.style.use('seaborn-deep')\n# temp = cm.winter(t / 10)\n# plt.plot(track[:, 0], track[:, 1], lw=3, c=cm.hot(t / 10))\nplt.subplot(1, 2, 1)\nplt.scatter(track[:, 0], track[:, 1], linewidths=0.5, c=cm.Spectral(t / 5))\nplt.xlabel('x1')\nplt.ylabel('x2')\n\nplt.subplot(1, 2, 2)\nplt.plot(t, track[:, 0], lw=3, label='x1')\nplt.plot(t, track[:, 1], lw=3, label='x2')\nplt.xlabel('t')\nplt.legend()\n# plt.savefig('./linear_fdc.png')\nplt.show()\n\n# ==========================\n# plot the phase of the dynamics system\nnum_steps = 11\nY, X = np.mgrid[-25:25:(num_steps * 1j), -25:25:(num_steps * 1j)]\nU = Y\nV = -2 * X + -3 * Y\nspeed = np.sqrt(U ** 2 + V ** 2)\nplt.streamplot(X, Y, U, V, color=speed)\nplt.scatter(track[:, 0], track[:, 1], linewidths=0.5, c=cm.Spectral(t / 5))\n# plt.savefig('./phase.png')\nplt.show()\n","repo_name":"WoodenJin/OptimalControl-RL_abstract","sub_path":"linear_feedback_control.py","file_name":"linear_feedback_control.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"26273567908","text":"import pygame\r\nimport math\r\nfrom random import randint\r\nimport tkinter as tk\r\n\r\n# Initialize pygame\r\npygame.init()\r\n\r\n# Set window size and caption\r\nsize = (700, 500)\r\nscreen = pygame.display.set_mode(size)\r\npygame.display.set_caption(\"Survive\")\r\n\r\n# Set player starting position\r\nplayer_x = 350\r\nplayer_y = 250\r\n\r\n# Set player size\r\nplayer_size = 20\r\n\r\n# Set food and poison starting positions\r\nfood_x = 50\r\nfood_y = 50\r\npoison_x = 600\r\npoison_y = 400\r\n\r\n# Set crab starting positions\r\ncrab_x = randint(0, size[0]-20)\r\ncrab_y = randint(0, size[1]-20)\r\n\r\n# Set hunger and health starting values\r\nhunger = 100\r\nhealth = 100\r\n\r\n# Create clock object\r\nclock = pygame.time.Clock()\r\n\r\n# Set food count\r\nfood_count = 0\r\n\r\n# Create an empty list to store crabs\r\ncrabs = []\r\n\r\n# Create an empty list to store poison\r\npoison = []\r\n\r\n# Main game loop\r\ndone = False\r\nwhile not done:\r\n # Handle events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n # Clear the screen\r\n screen.fill((0, 0, 0))\r\n \r\n # Handle player movement\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_w]:\r\n player_y -= 5\r\n if keys[pygame.K_s]:\r\n player_y += 5\r\n if keys[pygame.K_a]:\r\n player_x -= 5\r\n if keys[pygame.K_d]:\r\n player_x += 5\r\n if player_x < 0:\r\n player_x = 0\r\n if player_x > size[0] - player_size:\r\n player_x = size[0] - player_size\r\n if player_y < 0:\r\n player_y = 0\r\n if player_y > size[1] - player_size:\r\n player_y = size[1] - player_size\r\n\r\n \r\n # Check if hunger is less than 0 and set it to 0\r\n if hunger < 0:\r\n hunger = 0\r\n # Decrement hunger and health every 5 seconds\r\n hunger -= 0.1\r\n if hunger <= 0:\r\n health -= 0.1\r\n if hunger > 80:\r\n health += 0.1\r\n # Cap hunger and health at 100\r\n if hunger > 100:\r\n hunger = 100\r\n if health > 100:\r\n health = 100\r\n \r\n # Check for collision with food\r\n food_rect = pygame.Rect(food_x, food_y, 20, 20)\r\n player_rect = pygame.Rect(player_x, player_y, player_size, player_size)\r\n if player_rect.colliderect(food_rect):\r\n hunger += 10\r\n food_count += 1\r\n food_x = randint(0, size[0]-20)\r\n food_y = randint(0, size[1]-20)\r\n # Check if food count reaches 3 and spawn a new crab and poison\r\n if food_count >= 3:\r\n new_crab = {\"x\":randint(0, size[0]-20), \"y\":randint(0, size[1]-20)}\r\n crabs.append(new_crab)\r\n new_poison = {\"x\":randint(0, size[0]-20), \"y\":randint(0, size[1]-20)}\r\n poison.append(new_poison)\r\n food_count = 0\r\n\r\n # Check for collision with poison\r\n for i in range(len(poison)):\r\n poison_rect = pygame.Rect(poison[i][\"x\"], poison[i][\"y\"], 20, 20)\r\n if player_rect.colliderect(poison_rect):\r\n health -= 5\r\n del poison[i]\r\n break\r\n\r\n # Update the position of all the crabs\r\n for crab in crabs:\r\n angle = math.atan2(player_y - crab[\"y\"], player_x - crab[\"x\"])\r\n crab[\"x\"] += math.cos(angle) * 2.5\r\n crab[\"y\"] += math.sin(angle) * 2.5\r\n if crab[\"x\"] < 0:\r\n crab[\"x\"] = 0\r\n if crab[\"x\"] > size[0] - 20:\r\n crab[\"x\"] = size[0] - 20\r\n if crab[\"y\"] < 0:\r\n crab[\"y\"] = 0\r\n if crab[\"y\"] > size[1] - 20:\r\n crab[\"y\"] = size[1] - 20\r\n\r\n \r\n # Collision detection between player and crabs\r\n for crab in crabs:\r\n crab_rect = pygame.Rect(crab[\"x\"], crab[\"y\"], 20, 20)\r\n if player_rect.colliderect(crab_rect):\r\n health -= 5\r\n\r\n # Collision detection between crabs\r\n for i in range(len(crabs)):\r\n for j in range(i+1,len(crabs)):\r\n crab_rect_1 = pygame.Rect(crabs[i][\"x\"], crabs[i][\"y\"], 20, 20)\r\n crab_rect_2 = pygame.Rect(crabs[j][\"x\"], crabs[j][\"y\"], 20, 20)\r\n if crab_rect_1.colliderect(crab_rect_2):\r\n crabs[i][\"x\"] += 2\r\n crabs[i][\"y\"] += 2\r\n crabs[j][\"x\"] -= 2\r\n crabs[j][\"y\"] -= 2\r\n \r\n # Draw crabs on screen\r\n for crab in crabs:\r\n pygame.draw.rect(screen, (255, 255, 0), (crab[\"x\"], crab[\"y\"], 20, 20))\r\n \r\n # Draw poison on screen\r\n for poison_obj in poison:\r\n pygame.draw.rect(screen, (255, 192, 203), (poison_obj[\"x\"], poison_obj[\"y\"], 20, 20))\r\n\r\n\r\n # Check for collision with crab\r\n crab_rect = pygame.Rect(crab_x, crab_y, 20, 20)\r\n player_rect = pygame.Rect(player_x, player_y, player_size, player_size)\r\n if player_rect.colliderect(crab_rect):\r\n health -= 20\r\n crab_x = randint(0, size[0]-20)\r\n crab_y = randint(0, size[1]-20)\r\n\r\n # Check for game over\r\n if health <= 0:\r\n done = True\r\n\r\n # Draw plain colored player, food, poison, and crab\r\n pygame.draw.rect(screen, (255, 0, 0), (player_x, player_y, player_size, player_size))\r\n pygame.draw.rect(screen, (0, 255, 0), (food_x, food_y, 20, 20))\r\n\r\n # Draw hunger and health bars\r\n pygame.draw.rect(screen, (255, 0, 0), (50, 450, health, 20))\r\n pygame.draw.rect(screen, (0, 255, 0), (50, 480, hunger, 20))\r\n\r\n # Update display\r\n pygame.display.flip()\r\n\r\n # Wait for a while\r\n clock.tick(30)\r\n\r\n#Exit pygame\r\npygame.quit()\r\n","repo_name":"MaxMan69420/game","sub_path":"abcd.py","file_name":"abcd.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74635792195","text":"import pyglet\nimport random,math,time\n\n\nwindow=pyglet.window.Window(width=800,height=600,resizable=True)\nbatch=pyglet.graphics.Batch()\n\nnum_points=100 # 100\nspeed=[i for i in range(-5,6) if i!=0]\n\n\npoints=[[pyglet.shapes.Circle(400+random.randint(-100,100),300+random.randint(-100,100),5,batch=batch),random.choice(speed),random.choice(speed)] for i in range(num_points)]\nlines=[pyglet.shapes.Line(points[i][0].x,points[i][0].y,0,0,batch=batch) for i in range(num_points)]\nlines2=[pyglet.shapes.Line(points[i][0].x,points[i][0].y,0,0,batch=batch) for i in range(num_points)]\n\n_dt=lambda x,y,x1,y1: round(math.sqrt( (x-x1)**2 + (y-y1)**2 ))\n\nlast_tick=time.time()\ndef update(dt):\n global last_tick\n for p in points:\n if p[0].x<1: p[1]=random.choice([i for i in speed if i>0])\n if p[0].x>window.width-1: p[1]=random.choice([i for i in speed if i<0])\n if p[0].y<1: p[2]=random.choice([i for i in speed if i>0])\n if p[0].y>window.height-1: p[2]=random.choice([i for i in speed if i<0])\n\n for p in points:\n p[0].x+=p[1]\n p[0].y+=p[2]\n\n\n # Get the closest point\n\n for p,point in enumerate(points):\n closest_point=0\n # [distance,index]\n lst=[]\n for i,jj in enumerate(points):\n lst.append([_dt(jj[0].x,jj[0].y,point[0].x,point[0].y), i])\n # get min and second min point from lst\n min_point=None\n for index,i in enumerate(lst):\n if min_point==None: min_point=i; #print(min_point)\n if i[0]Close_error:\n\n\t\"\"\"recibe 3 puntos, P1, P4, P1_prima. Retorna el error de cierre\"\"\"\n\n\t[y_diference, x_diference] = calculate_diferences(P1, P1_p)\n\n\tclose_error = Close_error(\n\t\tpoints=[P1, P1_p, P4],\n\t\tangle_error= calculate_angular_error(p4=P4, p1=P1, p1_prime=P1_p),\n\t\tx_diference = x_diference,\n\t\ty_diference = y_diference\n\t)\n\treturn close_error\n\ndef get_sides(\n\t\tpoints:list[Point]\n\t\t)->list[Side]:\n\n\t\"\"\"Recibe un lista de puntos y calcula el azimut y su distancia de cada combinacion de puntos\"\"\"\n\tsides = []\n\tfor i in range(len(points)-1):\n\t\tsides.append(handle_side([points[i], points[i+1]]))\n\treturn sides\n\ndef create_new_polygon(points:list[Point])->Polygon:\n\t\"\"\"Recibe una lista de puntos y retorna un diccionario de la clase Poligono\"\"\"\n\tP1 = points[0]\n\tP4 = points[3]\n\tP1_p = points[4]\n\n\tpolygon = Polygon(\n\t\tpoints=points,\n\t\tarea = calculate_area(points),\n\t\tsides=get_sides(points),\n\t\tclose_error = create_new_close_error(\n\t\t\t\t\tP1=P1, \n\t\t\t\t\tP4=P4, \n\t\t\t\t\tP1_p= P1_p\n\t\t\t\t\t),\n\t\tquadratic_middle_error = get_quadratic_middle_error([P1, P1_p]),\n\t\tstandard_deviation= get_standard_desviation([P1, P1_p])\n\n\t)\n\tprint(polygon.dict())\n\n\n\n","repo_name":"Manjlo/notecase_topographic","sub_path":"work_logic/handle_polygon.py","file_name":"handle_polygon.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1640065062","text":"class DirectedGraphNode:\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\n\nclass Solution:\n def serialize(self, root):\n res = []\n\n def dfs(node):\n res.append(str(node.label))\n for child_node in node.neighbors:\n dfs(child_node)\n res.append(\"N\")\n\n dfs(root)\n return \",\".join(res)\n\n def deserialize(self, data):\n nodes = data.split(',')\n stack = []\n\n for node in nodes:\n if node != \"N\":\n stack.append(DirectedGraphNode(int(node)))\n else:\n child_node = stack.pop()\n\n if stack:\n stack[-1].neighbors.append(child_node)\n else:\n return child_node\n\n\nif __name__ == '__main__':\n root = DirectedGraphNode(1)\n node1 = DirectedGraphNode(3)\n node2 = DirectedGraphNode(2)\n node3 = DirectedGraphNode(4)\n root.neighbors.append(node1)\n root.neighbors.append(node2)\n root.neighbors.append(node3)\n\n serialized = Solution().serialize(root)\n deserialized = Solution().deserialize(serialized)\n print(deserialized == root)\n","repo_name":"amogchandrashekar/Leetcode","sub_path":"Hard/Serialize and Deserialize N-ary Tree.py","file_name":"Serialize and Deserialize N-ary Tree.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7938382514","text":"# json_webserver.py\n\nimport falcon\nimport json\nimport datetime\nimport time\nimport os\nfrom middleware import RequireJSON, JSONTranslator\nfrom change_flagging import write_change_flag\nimport errno\n\nclass JSONWaterElfCollector:\n \"\"\"Accepts JSON encoded data in the request body\"\"\"\n # The keys should be the same as the keys output by the WaterElf\n # Used for validation \n KEY_TIMESTAMP = \"timestamp\"\n KEY_WATER_TEMP = \"waterTemp\"\n KEY_AIR_TEMP= \"airTemp\"\n KEY_PH = \"pH\"\n KEY_HUMIDITY = \"humidity\"\n KEY_LUX = \"lux\"\n \n def __init__(self):\n self.file_ext = \"txt\"\n self.path_base = \"../../../../wegrow-data\"\n # Set this to validate incoming data for particular key\n # eg [self.KEY_TIMESTAMP, self.KEY_WATER_TEMP, self.KEY_AIR]\n # No validation is currently set\n self.required_keys = None\n\n def on_get(self, req, resp, uuid):\n # Accept JSON data from the body as a dict via middleware\n data = req.context['json_input']\n\n self.check_data(data)\n output_path = self.generate_path(uuid)\n data['server_recv_timestamp'] = time.time() # as Elves only send in timestamps relative to their boot time\n self.write_data_line(output_path, json.dumps(data)) \n write_change_flag(output_path, uuid)\n\n def write_data_line(self, path, line):\n with open(\"%s\" % path, \"a\") as f:\n f.write(line + \"\\n\")\n\n def generate_path(self, uuid):\n # Specify which type of path schematic to use\n path = self._uuid_dir_path(uuid)\n final_path = \"%s/%s.%s\" % (self.path_base, path, self.file_ext)\n self._create_path(final_path)\n return final_path\n\n def _uuid_dir_path(self, uuid):\n # Nest files in directories by uuid\n today = datetime.datetime.now().date().strftime(\"%Y-%m-%d\")\n path = \"%s/%s_%s\" % (uuid, today, uuid)\n return path\n \n def _create_path(self, filename):\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n def check_data(self, data):\n # Check that keys specified at the top exist in the data\n if self.required_keys is not None:\n for key in self.required_keys:\n if key not in data.keys():\n raise falcon.HTTPMissingParam(key)\n\n# Middleware enforces the requirement for 'application/json' in 'content-type' and 'accept'.\n# Middleware decodes JSON data into a python object\napi = falcon.API(middleware=[RequireJSON(), JSONTranslator()])\napi.add_route(\"/collect/{uuid}\", JSONWaterElfCollector())\n\n","repo_name":"hamishcunningham/fishy-wifi","sub_path":"wegrow-cloudside/elf-data-collector/falcon-server/json_webserver.py","file_name":"json_webserver.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"20924239217","text":"import os\n\nfrom apiclient.discovery import build\n\nYOUTUBE_API_KEY = os.environ['YOUTUBE_API_KEY']\n\n# YouTubeのAPIクライアントを組み立てる。\nyoutube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)\n\nsearch_response = youtube.search().list(\n part='snippet',\n q='手芸',\n type='video',\n).execute()\n\n# search_responseはAPIのレスポンスのJSONをパースしたdict\nfor item in search_response['items']:\n print(item['snippet']['title'])\n","repo_name":"munablamu/kato_crawling_and_scraping_v2","sub_path":"chap05/search_youtube_videos.py","file_name":"search_youtube_videos.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32272843434","text":"# coding: utf-8\n# Baptiste Feldmann\n\nimport argparse\nimport os\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process some strings...')\n\n parser.add_argument('-dirpath', required=True, type=str, help=\"directory path with slash at the end\")\n parser.add_argument('-root', required=True, type=str, help=\"rootname for flightlines with XX for locating line num\")\n parser.add_argument('-buffer', type=int, choices=[0,1], default=0, help=\"1 if buffer, 0 if not\")\n parser.add_argument('-cores', type=int, choices=range(1,os.cpu_count()), default=50, help=\"number of cpu used\")\n parser.add_argument('-o_ptsrcid', type=int, choices=[0,1], default=0, help=\"1 if you want ptsrcid as line number, 0 if not\")\n\n args = parser.parse_args()\n workspace = args.dirpath\n name = args.root\n calculs.ReverseTiling(workspace,\n name,\n bool(args.buffer),\n args.cores,\n bool(args.o_ptsrcid))\n","repo_name":"p-leroy/lidar_platform","sub_path":"tools/call_reverse_tiling.py","file_name":"call_reverse_tiling.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"74387241475","text":"import os\nimport string\nimport sys\nfrom datetime import datetime\nimport random\n\nRUN_ID = ''.join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(10)\n)\nBLOCK_SIZE = 128 # spatial extent of the model for its context\n# NEPTUNE_RUN = neptune.init(project='crizcraig/safeobjective', api_token=os.environ['NEPTUNE_CREDS'])\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nREPLAY_ROOT_DIR = f'{ROOT_DIR}/data/replay_buff'\nDATE_FMT = '%Y-%m-%d_%H:%M:%S.%f'\nDATE_STR = datetime.now().strftime(DATE_FMT)\nSAVE_DIR = f'{ROOT_DIR}/checkpoints'\nPICKLE_DIR = f'{ROOT_DIR}/pickles/{DATE_STR}_{RUN_ID}'\nCHECKPOINT_NAME = f'{DATE_STR}.ckpt'\nSEED = 1_414_213\nDEBUGGING = sys.gettrace() is not None\nWANDB_MAX_LOG_PERIOD = 100\nACC_LOG_PERIOD = 10\n\n# Human would be ~10k? and zuma like 10\nMAX_NUM_SALIENCE_LEVELS = 10\n\n# Human would be ~1M? and zuma like 10k\nMAX_SALIENT_CARDINALITY = 10_000\n\nDEFAULT_MAX_LRU_SIZE = 100\nREPLAY_FILE_PREFIX = 'replay_buffer'\nNUM_DIFF_SALIENT = 2\nCOMBINE_STEPS_ABSTRACT_SEQ = 1\nCOMBINE_STEPS_SENSOR_SEQ = 8\nTRAIN = 'train'\nTEST = 'test'\nDEFAULT_GPT_SEQ_LEN = 8\nLEVEL_PREFIX_STR = 'lvl_'\n","repo_name":"crizCraig/learnmax","sub_path":"learn_max/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38753586181","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable=too-many-ancestors\n\"Basic bokeh models for dealing with it's idiosyncraties\"\nfrom itertools import product\n\nfrom bokeh import layouts\nfrom bokeh.models import (Row, Model, HoverTool,\n NumberFormatter, ToolbarBox)\nfrom bokeh.plotting.figure import Figure\nimport bokeh.core.properties as props\n\nfrom view.base import defaultsizingmode\n\nclass DpxKeyedRow(Row):\n \"define div with tabIndex\"\n fig = props.Instance(Figure)\n toolbar = props.Instance(Model)\n keys = props.Dict(props.String, props.String, help = 'keys and their action')\n zoomrate = props.Float()\n panrate = props.Float()\n __implementation__ = 'keyedrow.ts'\n def __init__(self, ctrl, plotter, fig, **kwa):\n vals = (''.join(i) for i in product(('pan', 'zoom'), ('x', 'y'), ('low', 'high')))\n mdl = ctrl.theme.model('keystroke')\n keys = dict((mdl[key], key) for key in vals)\n keys[mdl['reset']] = 'reset'\n keys.update({mdl[tool+'activate']: tool for tool in ('pan', 'zoom')})\n\n children = kwa.pop('children', [fig])\n super().__init__(children = children,\n fig = fig,\n keys = keys,\n zoomrate = mdl['zoomrate'],\n panrate = mdl['panrate'],\n **defaultsizingmode(plotter, kwa, ctrl = ctrl))\n\n def __contains__(self, value):\n return value in self.keys # pylint: disable=unsupported-membership-test\n\n @classmethod\n def keyedlayout(cls, ctrl, plot, main, *figs, bottom = None, left = None, right = None):\n \"sets up a DpxKeyedRow layout\"\n assert left is None or right is None\n kwa = plot.defaultsizingmode()\n if len(figs) == 0:\n keyed = cls(ctrl, plot, main)\n else:\n figs = (main,) + figs\n plts = layouts.gridplot([[*figs]], **kwa, toolbar_location = main.toolbar_location)\n\n # pylint: disable=not-an-iterable\n tbar = next(i for i in plts.children if isinstance(i, ToolbarBox))\n tbar.toolbar.logo = None\n keyed = cls(ctrl, plot, main, children = [plts], toolbar = tbar, **kwa)\n\n if {left, right, bottom} == {None}:\n return keyed\n\n if {left, right} == {None}:\n return layouts.column([keyed, bottom], **kwa)\n\n if {bottom, right} == {None}:\n return layouts.row([left, keyed], **kwa)\n\n if {bottom, left} == {None}:\n return layouts.row([keyed, right], **kwa)\n\n if left is None:\n return layouts.row([layouts.column([keyed, bottom], **kwa), right], **kwa)\n\n return layouts.row([left, layouts.column([keyed, bottom], **kwa)], **kwa)\n\nclass DpxHoverTool(HoverTool):\n \"sorts indices before displaying tooltips\"\n maxcount = props.Int(5)\n __implementation__ = \"hovertool.ts\"\n\nclass DpxNumberFormatter(NumberFormatter):\n \"Deals with Nones correctly\"\n __implementation__ = \"numberformatter.ts\"\n","repo_name":"depixusgenome/libanalysis","sub_path":"view/plots/bokehext.py","file_name":"bokehext.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5857207589","text":"from nltk.corpus import stopwords\n\ndef encode_emotions(tokens, emotion_lexicon, emotions, relative=True):\n text_len = len(tokens)\n encoded_emotions = [0 for e in emotions]\n for i, emotion in enumerate(emotions):\n try:\n emotion_words = [t for t in tokens if t in emotion_lexicon[emotion]]\n if relative and len(tokens):\n encoded_emotions[i] = len(emotion_words) / len(tokens)\n else:\n encoded_emotions[i] = len(emotion_words)\n except ValueError:\n print(\"Emotion not found.\")\n return encoded_emotions\n\n\ndef encode_pronouns(tokens, pronouns={\"i\", \"me\", \"my\", \"mine\", \"myself\"}, relative=True):\n if not tokens:\n return 0\n text_len = len(tokens)\n nr_pronouns = len([t for t in tokens if t in pronouns])\n if relative and text_len:\n return nr_pronouns / text_len\n else:\n return nr_pronouns\n\n\ndef encode_stopwords(tokens, stopwords_list=None, relative=True):\n if not stopwords_list:\n stopwords_list = stopwords.words(\"english\")\n encoded_stopwords = [0 for s in stopwords_list]\n if not tokens:\n return encoded_stopwords\n for i, stopword in enumerate(stopwords_list):\n if stopword in tokens:\n encoded_stopwords[i] += 1\n if relative and len(tokens)>0:\n return [stopword / len(tokens) for stopword in encoded_stopwords]\n else:\n return encoded_stopwords\n\n\ndef encode_liwc_categories_full(tokens, liwc_categories, liwc_words_for_categories, relative=True):\n categories_cnt = [0 for c in liwc_categories]\n if not tokens:\n return categories_cnt\n text_len = len(tokens)\n for i, category in enumerate(liwc_categories):\n category_words = self.liwc_dict[category]\n for t in tokens:\n for word in category_words:\n if t == word or (word[-1] == '*' and t.startswith(word[:-1])) \\\n or (t == word.split(\"'\")[0]):\n categories_cnt[i] += 1\n break # one token cannot belong to more than one word in the category\n if relative and text_len:\n categories_cnt[i] = categories_cnt[i] / text_len\n return categories_cnt\n\n\n\n#NOTE: this implementation differs from uban, to account for word* in liwc_dict\ndef encode_liwc_categories(tokens, liwc_categories, liwc_words_for_categories, relative=True):\n categories_cnt = [0 for c in liwc_categories]\n if not tokens:\n return categories_cnt\n text_len = len(tokens)\n for i, category in enumerate(liwc_categories):\n for t in tokens:\n for word in liwc_words_for_categories[category]:\n if word == t:\n categories_cnt[i] += 1\n elif '*' in word and word[:-1] in t:\n categories_cnt[i] += 1\n else:\n continue\n\n if relative and text_len:\n categories_cnt[i] = categories_cnt[i] / text_len\n return categories_cnt","repo_name":"RonHochstenbach/masterThesis","sub_path":"Code/feature_encoders.py","file_name":"feature_encoders.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39575324694","text":"\"\"\"\nHelpers for k8s deployments.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, List, Optional\n\nfrom kubernetes import client as k8s_client # type: ignore\nfrom zetta_utils import builder, log\nfrom zetta_utils.mazepa import SemaphoreType\n\nfrom ..resource_tracker import (\n ExecutionResource,\n ExecutionResourceTypes,\n register_execution_resource,\n)\nfrom .common import ClusterInfo, get_cluster_data, get_mazepa_worker_command\nfrom .pod import get_pod_spec\nfrom .secret import secrets_ctx_mngr\n\nlogger = log.get_logger(\"zetta_utils\")\n\n\ndef get_deployment_spec(\n name: str,\n image: str,\n command: str,\n replicas: int,\n resources: Dict[str, int | float | str],\n labels: Dict[str, str],\n env_secret_mapping: Dict[str, str],\n volumes: Optional[List[k8s_client.V1Volume]] = None,\n volume_mounts: Optional[List[k8s_client.V1VolumeMount]] = None,\n resource_requests: Optional[Dict[str, int | float | str]] = None,\n) -> k8s_client.V1Deployment:\n schedule_toleration = k8s_client.V1Toleration(\n key=\"worker-pool\", operator=\"Equal\", value=\"true\", effect=\"NoSchedule\"\n )\n\n pod_spec = get_pod_spec(\n name=\"zutils-worker\",\n image=image,\n command=[\"/bin/sh\"],\n command_args=[\"-c\", command],\n resources=resources,\n env_secret_mapping=env_secret_mapping,\n tolerations=[schedule_toleration],\n volumes=volumes,\n volume_mounts=volume_mounts,\n resource_requests=resource_requests,\n )\n\n pod_template = k8s_client.V1PodTemplateSpec(\n metadata=k8s_client.V1ObjectMeta(labels=labels),\n spec=pod_spec,\n )\n\n deployment_spec = k8s_client.V1DeploymentSpec(\n progress_deadline_seconds=600,\n replicas=replicas,\n selector=k8s_client.V1LabelSelector(match_labels=labels),\n strategy=k8s_client.V1DeploymentStrategy(\n type=\"RollingUpdate\",\n rolling_update=k8s_client.V1RollingUpdateDeployment(\n max_surge=\"25%\", max_unavailable=\"25%\"\n ),\n ),\n template=pod_template,\n )\n\n deployment = k8s_client.V1Deployment(\n metadata=k8s_client.V1ObjectMeta(name=name, labels=labels),\n spec=deployment_spec,\n )\n\n return deployment\n\n\ndef get_mazepa_worker_deployment( # pylint: disable=too-many-locals\n execution_id: str,\n image: str,\n task_queue_spec: dict[str, Any],\n outcome_queue_spec: dict[str, Any],\n replicas: int,\n resources: Dict[str, int | float | str],\n env_secret_mapping: Dict[str, str],\n labels: Optional[Dict[str, str]] = None,\n resource_requests: Optional[Dict[str, int | float | str]] = None,\n num_procs: int = 1,\n semaphores_spec: dict[SemaphoreType, int] | None = None,\n):\n if labels is None:\n labels_final = {\"execution_id\": execution_id}\n else:\n labels_final = labels\n\n worker_command = get_mazepa_worker_command(\n task_queue_spec, outcome_queue_spec, num_procs, semaphores_spec\n )\n logger.debug(f\"Making a deployment with worker command: '{worker_command}'\")\n\n dshm = k8s_client.V1Volume(\n name=\"dshm\", empty_dir=k8s_client.V1EmptyDirVolumeSource(medium=\"Memory\")\n )\n tmp = k8s_client.V1Volume(\n name=\"tmp\", empty_dir=k8s_client.V1EmptyDirVolumeSource(medium=\"Memory\")\n )\n volumes = [dshm, tmp]\n volume_mounts = [\n k8s_client.V1VolumeMount(mount_path=\"/dev/shm\", name=\"dshm\"),\n k8s_client.V1VolumeMount(mount_path=\"/tmp\", name=\"tmp\"),\n ]\n\n return get_deployment_spec(\n name=execution_id,\n image=image,\n replicas=replicas,\n command=worker_command,\n resources=resources,\n labels=labels_final,\n env_secret_mapping=env_secret_mapping,\n volumes=volumes,\n volume_mounts=volume_mounts,\n resource_requests=resource_requests,\n )\n\n\ndef get_deployment(\n name: str,\n pod_spec: k8s_client.V1PodSpec,\n replicas: int,\n labels: Optional[Dict[str, str]] = None,\n revision_history_limit: Optional[int] = 10,\n) -> k8s_client.V1Deployment:\n labels = labels or {\"app\": name}\n pod_template = k8s_client.V1PodTemplateSpec(\n metadata=k8s_client.V1ObjectMeta(labels=labels),\n spec=pod_spec,\n )\n\n deployment_spec = k8s_client.V1DeploymentSpec(\n replicas=replicas,\n revision_history_limit=revision_history_limit,\n selector=k8s_client.V1LabelSelector(match_labels=labels),\n template=pod_template,\n )\n\n deployment = k8s_client.V1Deployment(\n metadata=k8s_client.V1ObjectMeta(name=name, labels=labels),\n spec=deployment_spec,\n )\n\n return deployment\n\n\n@builder.register(\"k8s_deployment_ctx_mngr\")\n@contextmanager\ndef deployment_ctx_mngr(\n execution_id: str,\n cluster_info: ClusterInfo,\n deployment: k8s_client.V1Deployment,\n secrets: List[k8s_client.V1Secret],\n namespace: Optional[str] = \"default\",\n):\n configuration, _ = get_cluster_data(cluster_info)\n k8s_client.Configuration.set_default(configuration)\n k8s_apps_v1_api = k8s_client.AppsV1Api()\n\n with secrets_ctx_mngr(execution_id, secrets, cluster_info):\n logger.info(f\"Creating k8s deployment `{deployment.metadata.name}`\")\n k8s_apps_v1_api.create_namespaced_deployment(body=deployment, namespace=namespace)\n register_execution_resource(\n ExecutionResource(\n execution_id,\n ExecutionResourceTypes.K8S_DEPLOYMENT.value,\n deployment.metadata.name,\n )\n )\n\n try:\n yield\n finally:\n # new configuration to refresh expired tokens (long running executions)\n configuration, _ = get_cluster_data(cluster_info)\n k8s_client.Configuration.set_default(configuration)\n\n # need to create a new client for the above to take effect\n k8s_apps_v1_api = k8s_client.AppsV1Api()\n logger.info(f\"Deleting k8s deployment `{deployment.metadata.name}`\")\n k8s_apps_v1_api.delete_namespaced_deployment(\n name=deployment.metadata.name, namespace=namespace\n )\n","repo_name":"ZettaAI/zetta_utils","sub_path":"zetta_utils/cloud_management/resource_allocation/k8s/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"8778942960","text":"import datetime\nimport logging\n\nfrom cryptography.hazmat.primitives.asymmetric import rsa, padding, ec\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, serialization\nfrom cryptography.hazmat.primitives.serialization import PublicFormat\nfrom os.path import join\n\n\nclass CertificateAuthorization:\n \"\"\"\n Peer들의 인증을 처리한다.\n \"\"\"\n\n # 인증서 파일명\n CERT_NAME = \"cert.pem\"\n # 개인키 파일명\n PRI_NAME = \"key.pem\"\n\n # CA 인증서\n __ca_cert = None\n # CA PRIVATE KEY\n __ca_pri = None\n\n def __init__(self):\n pass\n\n # def load_pki(self, ca_cert_path, private_key_path):\n def load_pki(self, cert_path: str, cert_pass=None):\n \"\"\"\n 인증서 로드\n\n :param cert_path: 인증서 경��\n :param cert_pass: 개인키 패스워드\n \"\"\"\n ca_cert_file = join(cert_path, self.CERT_NAME)\n ca_pri_file = join(cert_path, self.PRI_NAME)\n\n # 인증서/개인키 로드\n with open(ca_cert_file, \"rb\") as der:\n cert_bytes = der.read()\n self.__ca_cert = x509.load_pem_x509_certificate(cert_bytes, default_backend())\n with open(ca_pri_file, \"rb\") as der:\n private_bytes = der.read()\n try:\n self.__ca_pri = serialization.load_pem_private_key(private_bytes, cert_pass, default_backend())\n except ValueError:\n logging.debug(\"Invalid Password\")\n\n # 인증서 키 쌍 검증\n sign = self.sign_data(b'TEST')\n if self.verify_data(b'TEST', sign) is False:\n logging.debug(\"Invalid Signature(Root Certificate load test)\")\n\n def get_sign_public_key(self):\n if self.__ca_cert is None:\n return None\n else:\n return self.__ca_cert.public_key\n\n def sign_data(self, data: bytes) -> bytes:\n \"\"\"\n CA 개인키로 DATA 서명\n :param data: 서명 대상 원문\n :return: 서명\n \"\"\"\n if isinstance(self.__ca_pri, ec.EllipticCurvePrivateKeyWithSerialization):\n signer = self.__ca_pri.signer(ec.ECDSA(hashes.SHA256()))\n signer.update(data)\n return signer.finalize()\n elif isinstance(self.__ca_pri, rsa.RSAPrivateKeyWithSerialization):\n return self.__ca_pri.sign(\n data,\n padding.PKCS1v15(),\n hashes.SHA256()\n )\n else:\n logging.debug(\"Unknown PrivateKey Type : %s\", type(self.__ca_pri))\n return None\n\n def verify_data(self, data: bytes, signature: bytes) -> bool:\n \"\"\"\n CA 개인키로 서명한 DATA 검증\n :param data: 서명 대상 원문\n :param signature: 서명 데이터\n :return: 검증 결과(True/False)\n \"\"\"\n pub_key = self.__ca_cert.public_key()\n return self.verify_data_with_publickey(public_key=pub_key, data=data, signature=signature)\n\n def verify_data_with_publickey(self, public_key, data: bytes, signature: bytes) -> bool:\n \"\"\"\n 서명한 DATA검증\n :param public_key: 검증용 공개키\n :param data: 서명 대상 원문\n :param signature: 서명 데이터\n :return: 검증 결과(True/False)\n \"\"\"\n if isinstance(public_key, ec.EllipticCurvePublicKeyWithSerialization):\n try:\n public_key.verify(\n signature=signature,\n data=data,\n signature_algorithm=ec.ECDSA(hashes.SHA256())\n )\n return True\n except InvalidSignature:\n logging.debug(\"InvalidSignatureException_ECDSA\")\n elif isinstance(public_key, rsa.RSAPublicKeyWithSerialization):\n try:\n public_key.verify(\n signature,\n data,\n padding.PKCS1v15(),\n hashes.SHA256()\n )\n return True\n except InvalidSignature:\n logging.debug(\"InvalidSignatureException_RSA\")\n else:\n logging.debug(\"Unknown PublicKey Type : %s\", type(public_key))\n\n return False\n\n def verify_data_with_dercert(self, cert_der, data: bytes, signature: bytes) -> bool:\n \"\"\"\n 서명 및 인증서 검증\n :param cert_der: 인증서(der bytes)\n :param data: 서명 원문\n :param signature: 서명\n :return: 검증 결과(True/False)\n \"\"\"\n cert = x509.load_der_x509_certificate(cert_der, default_backend())\n return self.verify_data_with_cert(cert=cert, data=data, signature=signature)\n\n def verify_data_with_cert(self, cert, data: bytes, signature: bytes) -> bool:\n \"\"\"\n 서명 및 인증서 검증\n :param cert: 인증서\n :param data: 서명 원문\n :param signature: 서명\n :return: 검증 결과(True/False)\n \"\"\"\n # LOOPCHAIN-61 인증서 검증\n if self.verify_certificate(cert):\n # 인증서로 사인한 내용 검증\n cert_pub = cert.public_key()\n validation_result = self.verify_data_with_publickey(public_key=cert_pub, data=data, signature=signature)\n if validation_result is False:\n logging.debug(f\"signature validation is fail\")\n return validation_result\n else:\n logging.debug(f\"certificate validation is fail\")\n return False\n\n def verify_certificate_der(self, der_cert):\n \"\"\"\n 인증서 검증\n :param der_cert: DER 형식의 하위인증서\n :return: 검증 결과\n \"\"\"\n cert = x509.load_der_x509_certificate(der_cert, default_backend())\n return self.verify_certificate(cert)\n\n def verify_certificate(self, peer_cert):\n \"\"\"\n 인증서 검증\n :param peer_cert: 하위인증서\n :return: 검증 결과\n \"\"\"\n # 인증서 유효기간 검증\n not_after = peer_cert.not_valid_after\n now = datetime.datetime.now()\n if not_after < now:\n logging.error(\"Certificate is Expired\")\n return False\n\n # 인증서 서명 검증\n # CA 인증서의 경우 검증하지 않음\n if self.__ca_cert.signature == peer_cert.signature:\n return True\n ca_pub = self.__ca_cert.public_key()\n signature = peer_cert.signature\n data = peer_cert.tbs_certificate_bytes\n\n validation_result = False\n if isinstance(ca_pub, ec.EllipticCurvePublicKeyWithSerialization):\n try:\n ca_pub.verify(\n signature=signature,\n data=data,\n signature_algorithm=ec.ECDSA(hashes.SHA256())\n )\n validation_result = True\n except InvalidSignature:\n logging.debug(\"InvalidSignatureException_ECDSA\")\n elif isinstance(ca_pub, rsa.RSAPublicKeyWithSerialization):\n try:\n ca_pub.verify(\n signature,\n data,\n padding.PKCS1v15(),\n hashes.SHA256()\n )\n validation_result = True\n except InvalidSignature:\n logging.debug(\"InvalidSignatureException_RSA\")\n else:\n logging.debug(\"Unknown PublicKey Type : %s\", type(ca_pub))\n\n return validation_result\n\n def generate_peer_token(self, peer_sign, peer_cert, peer_id, peer_target,\n group_id, peer_type, rand_key, token_interval):\n peer_info = b''.join([peer_id.encode('utf-8'),\n peer_target.encode('utf-8'),\n group_id.encode('utf-8')]) + bytes([peer_type])\n data = peer_info + rand_key\n\n cert = x509.load_der_x509_certificate(peer_cert, default_backend())\n if self.verify_data_with_cert(cert=cert, data=data, signature=peer_sign):\n\n time = datetime.datetime.now() + datetime.timedelta(minutes=token_interval)\n date = int(time.timestamp() * 1000).to_bytes(length=8, byteorder='big')\n\n peer_pub = cert.public_key().public_bytes(encoding=serialization.Encoding.DER,\n format=PublicFormat.SubjectPublicKeyInfo)\n\n # token_bytes = peer_id || peer_target || group_id || peer_type || peer_pub\n token_bytes = peer_info + date + peer_pub\n logging.debug(\"TBS Token[%s]\", token_bytes.hex())\n\n # token = date || CA_Sign(token_bytes)\n signed_token = self.sign_data(token_bytes)\n token = b''.join([date, signed_token]).hex()\n return token\n else:\n logging.debug(\"The validation for signature or certificate is fail.\")\n return None\n\n def verify_peer_token(self, peer_token, peer, peer_type):\n token_time = peer_token[:16]\n token_sign = peer_token[16:]\n current_date = int(datetime.datetime.now().timestamp() * 1000)\n token_date = int(token_time, 16)\n if current_date > token_date:\n return False\n\n date = bytes.fromhex(token_time)\n\n peer_info = b''.join([peer.peer_id.encode('utf-8'),\n peer.target.encode('utf-8'),\n peer.group_id.encode('utf-8')]) + bytes([peer_type])\n\n peer_cert = x509.load_der_x509_certificate(bytes.fromhex(peer.cert), default_backend())\n peer_pub = peer_cert.public_key().public_bytes(encoding=serialization.Encoding.DER,\n format=PublicFormat.SubjectPublicKeyInfo)\n\n token_bytes = peer_info + date + peer_pub\n logging.debug(\"TBS Token(V) : %s\", token_bytes.hex())\n signature = bytes.fromhex(token_sign)\n\n return self.verify_data_with_cert(cert=self.__ca_cert, data=token_bytes, signature=signature)\n\n @property\n def is_secure(self):\n return self.__ca_cert is not None and self.__ca_pri is not None\n","repo_name":"wilberdell/loopchain","sub_path":"loopchain/radiostation/certificate_authorization.py","file_name":"certificate_authorization.py","file_ext":"py","file_size_in_byte":10247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35529365570","text":"import random\n\ngameover = False \n\nwhile not gameover: \n computer = random.randint(0 ,10)\n player = -1\n tries = 0\n while player != computer: \n player=int(input(\"Guess a number from 0-10:\"))\n tries += 1\n if player == computer:\n print (\"Congratuations you did it!\")\n print (f\"You guessed it in {tries} tries.\") \n elif player > computer:\n print (\"Sorry, you are too high! Guess again!\")\n elif player < computer: \n print (\"Sorry, you are too low...try again!\")\n choice = input(\"Play again? (y or n?)\")\n if choice != \"y\":\n gameover = True \n\n \n\n","repo_name":"BWilliams22/Number-Guessing-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23526082601","text":"#!/usr/bin/python3\r\nimport sys\r\n\r\nfile_prefix = 'B-small-attempt0'\r\n\r\nfilein = open(file_prefix + '.in')\r\nfileout = sys.stdout if 'sample' in file_prefix else open(file_prefix + '.out', 'w')\r\nlinein = lambda: filein.readline().strip()\r\nlineout = lambda s: fileout.write(s + '\\n')\r\n\r\nncases = int(linein())\r\n'''\r\ndef areDifferentAxis(a, b):\r\n\tsmaller = 0\r\n\tlarger = 0\r\n\tfor i in range(len(a)):\r\n\t\tif a[i] == b[i]:\r\n\t\t\treturn True\r\n\t\telif a[i] < b[i]:\r\n\t\t\tsmaller += 1\r\n\t\telse:\r\n\t\t\tlarger += 1\r\n\tif smaller != 0 and larger != 0:\r\n\t\treturn True\r\n\treturn False\r\n\r\ndef areSwapped(optA, optB, i):\r\n\tif areDifferentAxis(opt[0], nextOpt[0], i):\r\n\t\treturn True\r\n\telif len(optA) == 2 and areDifferentAxis(optA[1], optB[0], i):\r\n\t\treturn False\r\n\telif len(optB) == 2 and areDifferentAxis(optA[0], optB[1], i):\r\n\t\treturn False\r\n\telif len(optA) == 2 and len(optB) == 2 and areDifferentAxis(optA[1], optB[1], i):\r\n\t\treturn True\r\n\treturn None\r\n\t'''\r\n\r\nfor case in range(ncases):\r\n\tN = int(linein())\r\n\r\n\tsheets = []\r\n\tfor i in range(N*2 - 1):\r\n\t\tsheets.append([int(x) for x in linein().split()])\r\n\r\n\toptions = []\r\n\tfor i in range(N):\r\n\t\tminVal = min(sheet[i] for sheet in sheets)\r\n\t\topt = []\r\n\t\tfor sheet in list(sheets):\r\n\t\t\tif sheet[i] == minVal:\r\n\t\t\t\tsheets.remove(sheet)\r\n\t\t\t\topt.append(sheet)\r\n\t\toptions.append(opt)\r\n\r\n\tprint(options)\r\n\r\n\t'''\r\n\tnextIsSwapped = []\r\n\r\n\tfor i in range(N-1):\r\n\t\topt = options[i]\r\n\t\tnextOpt = options[i + 1]\r\n\t\tnextIsSwapped.append(areSwapped(opt, nextOpt, i))\r\n\r\n\tprint(nextIsSwapped)\r\n\t'''\r\n\r\n\tdef dimIsOK(col, choices, rowDim):\r\n\t\tdepth = len(choices)\r\n\t\tif col is None:\r\n\t\t\treturn True\r\n\r\n\t\tfor (i, choice) in enumerate(choices):\r\n\t\t\trow = choice[rowDim]\r\n\t\t\tif row is None:\r\n\t\t\t\tcontinue\r\n\t\t\tif col[i] != row[depth]:\r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\r\n\tdef tryPosition(choices):\r\n\t\tif len(choices) > 0:\r\n\t\t\tcurChoice = choices[-1]\r\n\t\t\tprevChoices = choices[:-1]\r\n\t\t\tif not dimIsOK(curChoice[0], prevChoices, 1):\r\n\t\t\t\treturn None\r\n\t\t\telif not dimIsOK(curChoice[1], prevChoices, 0):\r\n\t\t\t\treturn None\r\n\r\n\t\tif len(choices) == N:\r\n\t\t\treturn choices\r\n\r\n\t\tnextOption = options[len(choices)]\r\n\t\tsingle = len(nextOption) == 1\r\n\r\n\t\tif single: trials = [(nextOption[0], None), (None, nextOption[0])]\r\n\t\telse: trials = [(nextOption[0], nextOption[1]), (nextOption[1], nextOption[0])]\r\n\r\n\t\tfor t in trials:\r\n\t\t\tx = tryPosition(choices + [t])\r\n\t\t\tif x is not None: return x\r\n\r\n\t\treturn None\r\n\r\n\tanswer = tryPosition([])\r\n\tprint(answer)\r\n\tfor i, c in enumerate(answer):\r\n\t\tif c[0] is None:\r\n\t\t\tcolDim = 1\r\n\t\t\trow = i\r\n\t\telif c[1] is None:\r\n\t\t\tcolDim = 0\r\n\t\t\trow = i\r\n\r\n\tprint (colDim, row)\r\n\tresult = []\r\n\tfor c in answer:\r\n\t\tresult.append(str(c[colDim][row]))\r\n\r\n\tlineout(\"Case #{0}: {1}\".format(case + 1, ' '.join(result)))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_182/807.py","file_name":"807.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7665323437","text":"from flask import Flask, Blueprint, jsonify, request\nimport gspread\nimport pandas as pd\nfrom datetime import date, timedelta\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\nrekap_bp = Blueprint('rekap', __name__)\n\n# Load the Google Sheets service account credentials\nsa = gspread.service_account(filename='sibedaspbg-logbook-cab4b99bdcae.json')\n\n# Open the spreadsheet\nsp = sa.open('rekap pbg')\nsh = sp.worksheet('rekap new')\n\n# Fetch data from the spreadsheet and process it\ndef fetch_data(page, items_per_page):\n data = sh.get_all_values()[1:]\n columns = sh.get_all_values()[0]\n logb = pd.DataFrame(data, columns=columns)\n\n # Paginasi\n start_index = (page - 1) * items_per_page\n end_index = page * items_per_page\n paginated_data = logb.iloc[start_index:end_index]\n\n return paginated_data\n\n@rekap_bp.route('/', methods=['GET'])\ndef get_data():\n # Mendapatkan nomor halaman dan item per halaman dari permintaan\n page = int(request.args.get('page', 1))\n items_per_page = int(request.args.get('items_per_page', 10))\n\n data = fetch_data(page, items_per_page)\n return jsonify(data.to_dict(orient='records'))\n\napp.register_blueprint(rekap_bp, url_prefix='/api/rekap-pbg')\n\nif __name__ == '__main__':\n app.run(debug=False)\n","repo_name":"Encepihwan98/api-puprbdg","sub_path":"testPagination.py","file_name":"testPagination.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5700617246","text":"# SW Expert Academy - 7584번. 자가 복제 문자열\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n K = int(input()) - 1\n # 4배수 혹은 2배수로 나와야 하므로 홀수인 경우 각각에 대한 조건으로 만들어준다\n while K >= 0:\n if K % 4 == 0:\n K = 0\n break\n if K % 2:\n K = (K - 1) // 2\n else:\n K = 1\n break\n print('#{} {}'.format(tc, K))\n","repo_name":"wnstj-yang/Algorithm","sub_path":"SWEA/D3/SWEA_7584.py","file_name":"SWEA_7584.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73304720833","text":"#!/usr/bin/env python\n\n\"\"\"\n Author: Paniz Behboudian\n\n\"\"\"\nfrom __future__ import division\nfrom rl_glue import BaseAgent\n\nimport numpy as np\nimport numpy.random as rnd\nimport math\n\n\nclass PiesAgent(BaseAgent):\n def __init__(self, epsilon=None, number_of_actions=None, number_of_rows=None, number_of_columns=None,\n advice=None, decay=None, gamma=None, goal_coord=None, decay_param=None,\n alpha=None, beta=None, thau=None, c=1, gamma_phi=None):\n self.Q = None\n self.Phi = None\n self.xi = None\n self.current_state = None\n self.last_action = None\n self.number_of_columns = number_of_columns\n self.number_of_rows = number_of_rows\n self.number_of_actions = number_of_actions\n self.epsilon = epsilon\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n if gamma_phi is None:\n self.gamma_phi = gamma\n else:\n self.gamma_phi = gamma_phi\n self.path = []\n self.steps = 0\n self.thau = thau\n self.random_seed = None\n self.advice_scheme = advice\n self.rng = None\n self.goal_coord = goal_coord\n self.decay = decay\n self.decay_param = decay_param\n if self.advice_scheme == 'c_advice':\n self.c = c\n\n def agent_init(self):\n\n self.steps = 0\n self.path = []\n self.xi = 1\n assert self.number_of_rows > 0 and self.number_of_columns > 0, self.number_of_actions > 0\n assert self.alpha is not None and self.gamma is not None and self.random_seed is not None\n self.rng = np.random.RandomState(self.random_seed)\n self.Q = np.zeros((self.number_of_rows + 2, self.number_of_columns + 2, self.number_of_actions))\n self.Phi = np.zeros((self.number_of_rows + 2, self.number_of_columns + 2, self.number_of_actions))\n\n def _choose_action(self, state, policy='shaped'):\n if policy == 'Q':\n greedy = self.rng.choice(np.flatnonzero(\n self.Q[state[0], state[1]] == (self.Q[state[0], state[1]]).max()))\n elif policy == 'shaped':\n greedy = self.rng.choice(np.flatnonzero(\n self.Q[state[0], state[1]] - self.xi * self.Phi[state[0], state[1]] == (\n self.Q[state[0], state[1]] - self.xi * self.Phi[state[0], state[1]]).max()))\n elif policy == 'Phi':\n greedy = self.rng.choice(np.flatnonzero(\n self.Phi[state[0], state[1]] == (self.Phi[state[0], state[1]]).max()))\n prob = self.rng.uniform()\n if prob < self.epsilon:\n action = self.rng.randint(self.number_of_actions)\n else:\n action = greedy\n return action\n\n def _update_Q(self, state=None, action=None, reward=None, end=False):\n # Sarsa0\n if end:\n delta_Q = reward + self.gamma * 0 - self.Q[\n self.current_state[0], self.current_state[1], self.last_action]\n self.Q[self.current_state[0], self.current_state[1], self.last_action] = self.Q[self.current_state[0],\n self.current_state[\n 1], self.last_action] + self.alpha * delta_Q\n else:\n delta_Q = reward + self.gamma * self.Q[state[0], state[1], action] - self.Q[\n self.current_state[0], self.current_state[1], self.last_action]\n self.Q[self.current_state[0], self.current_state[1], self.last_action] = self.Q[self.current_state[0],\n self.current_state[\n 1], self.last_action] + self.alpha * delta_Q\n self.alpha *= self.thau\n\n def _update_Phi(self, state=None, action=None, reward=None, end=False):\n if end:\n delta_Phi = reward + self.gamma_phi * 0 - self.Phi[\n self.current_state[0], self.current_state[1], self.last_action]\n self.Phi[self.current_state[0], self.current_state[1], self.last_action] = self.Phi[self.current_state[0],\n self.current_state[\n 1], self.last_action] + self.beta * delta_Phi\n else:\n delta_Phi = reward + self.gamma_phi * self.Phi[state[0], state[1], action] - self.Phi[\n self.current_state[0], self.current_state[1], self.last_action]\n self.Phi[self.current_state[0], self.current_state[1], self.last_action] = self.Phi[self.current_state[0],\n self.current_state[\n 1], self.last_action] + self.beta * delta_Phi\n\n def agent_start(self, state):\n \"\"\"\n Arguments: state: numpy array\n Returns: action: integer\n \"\"\"\n self.path = []\n self.path.append(state)\n action = self._choose_action(state=state, policy='shaped')\n self.current_state = np.asarray(state)\n self.last_action = action\n return action\n\n def agent_step(self, reward, state): # returns NumPy array, reward: floating point, this_observation: NumPy array\n \"\"\"\n Arguments: reward: floting point, state: integer\n Returns: action: floating point\n \"\"\"\n\n self.steps += 1\n self.path.append(state)\n action = self._choose_action(state=state, policy='shaped')\n if self.xi > 0:\n reward_phi = -self.intrinsic_reward(sp=state)\n self._update_Phi(state=state, action=self._choose_action(state=state, policy='Phi'), reward=reward_phi,\n end=False)\n self._update_Q(state=state, action=action, reward=reward, end=False)\n self.current_state = state\n self.last_action = action\n return action\n\n def agent_end(self, reward):\n \"\"\"\n Arguments: reward: floating point\n Returns: Nothing\n \"\"\"\n self.steps += 1\n self.path.append([-1, -1])\n if self.xi > 0:\n reward_phi = -self.intrinsic_reward(sp=self.goal_coord)\n self._update_Phi(reward=reward_phi, end=True)\n self._update_Q(reward=reward, end=True)\n return\n\n def intrinsic_reward(self, sp):\n \"\"\"\n :param sp: next state\n :return: expert advice for the state transition\n \"\"\"\n i_reward = 0\n if 'defined' in self.advice_scheme:\n reward_transitions = None\n if 'good' in self.advice_scheme:\n reward_transitions = {(1, 1): (1, 2), (2, 1): (1, 1), (2, 2): (2, 1)}\n elif 'bad' in self.advice_scheme:\n reward_transitions = {(1, 1): (2, 1), (2, 1): (2, 2), (2, 2): (2, 1)}\n else:\n raise Exception('Invalid defined advice')\n for s in reward_transitions.keys():\n if s[0] == self.current_state[0] and s[1] == self.current_state[1] \\\n and reward_transitions[s][0] == sp[0] and reward_transitions[s][1] == sp[1]:\n i_reward = 1\n elif 'c_advice' in self.advice_scheme:\n # right and down\n if self.last_action == 1 or self.last_action == 2:\n i_reward = 1\n else:\n raise Exception('Invalid Advice Scheme')\n return i_reward\n\n def agent_message(self, in_message):\n if in_message.split(\" \")[0] == 'alpha':\n self.alpha = float(in_message.split(\" \")[1])\n elif in_message.split(\" \")[0] == 'gamma':\n self.gamma = float(in_message.split(\" \")[1])\n elif in_message.split(\" \")[0] == 'beta':\n self.beta = float(in_message.split(\" \")[1])\n elif in_message.split(\" \")[0] == 'thau':\n self.thau = float(in_message.split(\" \")[1])\n elif in_message.split(\" \")[0] == 'decay_param':\n self.decay_param = float(in_message.split(\" \")[1])\n else:\n raise Exception('Invalid AGENT message')\n","repo_name":"panizbehboudian/Useful-Policy-Invariant-Shaping-from-Arbitrary-Advice","sub_path":"pies_agent.py","file_name":"pies_agent.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4366708118","text":"#!/usr/bin/env python\nimport rospy\nfrom Naked.toolshed.shell import execute_js\nimport subprocess, signal\nimport os\n\n__author__ = 'Itamar Eliakim'\n\nclass DJIRonin_NodeJS():\n def __init__(self):\n rospy.init_node(\"DJI_Ronin_NodeJS\")\n rospy.loginfo(\"Starting DJI Ronin NodeJS Server\")\n #self.startApp = subprocess.Popen(\"adb shell am start -n com.dji.gimbal/com.dji.gimbal.ui.HomeActivity\", shell=True, stdout=subprocess.PIPE).stdout.read()\n self.getROSpath = subprocess.Popen(\"echo $ROS_PACKAGE_PATH\", shell=True, stdout=subprocess.PIPE).stdout.read().split(':')[0]\n self.success = execute_js(self.getROSpath + '/dji_ronin/scripts/sji-android-screen-capture/bin/asc.js')\n if self.success==False:\n self.killserver()\n else:\n rospy.loginfo(\"ROS DJI Ronin NodeJS Server is ON\")\n ### If doesnt work - ps aux | grep node -> kill -9 proccessid\n rospy.spin()\n\n\n def killserver(self):\n rospy.loginfo(\"Kill old server!\")\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n\n for line in out.splitlines():\n if 'node' in line:\n pid = int(line.split(None, 1)[0])\n os.kill(pid, signal.SIGKILL)\n self.success = execute_js(os.getcwd() + '/sji-android-screen-capture/bin/asc.js')\n rospy.loginfo(\"ROS DJI Ronin NodeJS Server is ON\")\nif __name__ == \"__main__\":\n DJIRonin_NodeJS()\n\n","repo_name":"Itamare4/dji_ronin","sub_path":"scripts/DJIRonin_NodeJS.py","file_name":"DJIRonin_NodeJS.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"26648201449","text":"from rootalias import *\n\nFIGURE_DIR = '/Users/juntinghuang/beamer/20180131_transition_richa/figures'\nDATA_DIR = './data'\n\n\ndef print_dst():\n sh_dir = '/minos/app/junting/transition/Bravo0720/shFiles'\n sh_filenames = []\n with open('{}/subNoOsc.sh'.format(sh_dir)) as f_local:\n for row in f_local.readlines():\n row = row.strip()\n if len(row) > 1:\n sh_filename = row.split(' ')[-1]\n sh_filenames.append(sh_filename)\n\n sh_filename = sh_filenames[0]\n dsts = []\n\n for sh_filename in sh_filenames:\n dst_count = 0\n with open('{}/{}'.format(sh_dir, sh_filename)) as f_sh:\n for row in f_sh.readlines():\n if '/minos/data/analysis/NuMuBar' in row and not row.startswith('#'):\n row = row.strip().split(' ')\n dst = row[1]\n dsts.append(dst)\n dst_count += 1\n print('sh_filename = {}'.format(sh_filename))\n print('dst_count = {}'.format(dst_count))\n\n print('len(sh_filenames) = {}'.format(len(sh_filenames)))\n print('len(dsts) = {}'.format(len(dsts)))\n\n pprint(sh_filenames)\n pprint(dsts)\n\n\ndef subtract_min(hs):\n bin_count = hs.GetNbinsX()\n bin_min = hs.GetMinimum()\n for i in range(1, bin_count + 1):\n hs.SetBinContent(i , hs.GetBinContent(i) - bin_min)\n\n\ndef plot_g():\n f_stat_syst = TFile('{}/GStatsSyst_mc_allRuns_0.root'.format(DATA_DIR))\n f_stat = TFile('{}/GStats_mc_allRuns_0.root'.format(DATA_DIR))\n # f_stat = TFile('{}/GStats_mc_allRuns_0.fix_sn2_dm2.root'.format(DATA_DIR))\n\n h_stat_syst = f_stat_syst.Get('hsurface1')\n h_stat = f_stat.Get('hsurface1')\n\n subtract_min(h_stat_syst)\n subtract_min(h_stat)\n\n bin_count = h_stat_syst.GetNbinsX()\n h_stat_syst_scale = TH1D('h_stat_syst_scale', 'h_stat_syst_scale', bin_count, 0, 10)\n h_stat_scale = TH1D('h_stat_scale', 'h_stat_scale', bin_count, 0, 10)\n\n for i in range(1, bin_count + 1):\n h_stat_syst_scale.SetBinContent(i, h_stat_syst.GetBinContent(i))\n h_stat_scale.SetBinContent(i, h_stat.GetBinContent(i))\n\n c1 = TCanvas('c1', 'c1', 800, 600)\n set_margin()\n gStyle.SetOptStat(0)\n set_h1_style(h_stat_syst_scale)\n h_stat_syst_scale.Draw()\n\n x_max = 5.\n h_stat_syst_scale.GetYaxis().SetRangeUser(0, 10)\n h_stat_syst_scale.GetXaxis().SetRangeUser(0, x_max)\n h_stat_syst_scale.GetXaxis().SetTitle('|#tilde g^{ZT}_{#mu#bar#mu}| or |#tilde g^{ZT}_{#tau#bar#tau}| (#times 10^{-23})')\n h_stat_syst_scale.GetYaxis().SetTitle('-2 #Deltalog #lambda')\n\n set_h1_style(h_stat_scale)\n h_stat_scale.SetLineColor(kRed + 1)\n h_stat_scale.Draw('sames')\n\n lg1 = TLegend(.19, .53, .43, .65)\n set_legend_style(lg1)\n lg1.SetTextSize(23)\n lg1.AddEntry(h_stat_scale, 'Statistics Only', 'l')\n lg1.AddEntry(h_stat_syst_scale, 'With Systematics', 'l')\n lg1.Draw()\n\n tl_1_sigma = TLine(0, 1, x_max, 1)\n tl_1_sigma.SetLineStyle(2)\n tl_1_sigma.Draw()\n tl_2_sigma = TLine(0, 4, x_max, 4)\n tl_2_sigma.SetLineStyle(2)\n tl_2_sigma.Draw()\n tl_3_sigma = TLine(0, 9, x_max, 9)\n tl_3_sigma.SetLineStyle(2)\n tl_3_sigma.Draw()\n\n tlatex = TLatex()\n tlatex.SetTextFont(43)\n tlatex.SetTextSize(23)\n\n tlatex.DrawLatex(.15, 1.2, '1 #sigma')\n tlatex.DrawLatex(.15, 4.2, '2 #sigma')\n tlatex.DrawLatex(.15, 9.2, '3 #sigma')\n\n tlatex.DrawLatex(0.3, 8, \"10.56 #times10^{20} POT, #nu_{#mu}-mode\")\n tlatex.DrawLatex(0.3, 7, \" MINOS Sensitivity\")\n\n preliminary = TLatex()\n preliminary.SetTextAlign(22)\n preliminary.SetTextFont(43)\n preliminary.SetTextSize(28)\n preliminary.SetTextColor(kRed)\n preliminary.DrawLatex(1.6, 9.5, \"MINOS Preliminary\")\n\n c1.Update()\n c1.SaveAs('{}/plot_g.pdf'.format(FIGURE_DIR))\n input('Press any key to continue.')\n\n\ndef plot_c():\n f_stat_syst = TFile('{}/CMuStatsSyst_mc_allRuns_0.root'.format(DATA_DIR))\n f_stat = TFile('{}/CMuStats_mc_allRuns_0.root'.format(DATA_DIR))\n\n h_stat_syst = f_stat_syst.Get('hsurface1')\n h_stat = f_stat.Get('hsurface1')\n\n subtract_min(h_stat_syst)\n subtract_min(h_stat)\n\n bin_count = h_stat.GetNbinsX()\n print('bin_count = {}'.format(bin_count))\n print('h_stat.GetBinLowEdge(bin_count) + h_stat.GetBinWidth(bin_count) = {}'.format(h_stat.GetBinLowEdge(bin_count) + h_stat.GetBinWidth(bin_count)))\n print('h_stat.GetBinLowEdge(1) = {}'.format(h_stat.GetBinLowEdge(1)))\n\n h_stat_syst_scale = TH1D('h_stat_syst_scale', 'h_stat_syst_scale', bin_count, -9.9, 10)\n h_stat_scale = TH1D('h_stat_scale', 'h_stat_scale', bin_count, -9.9, 10)\n for i in range(1, bin_count + 1):\n h_stat_syst_scale.SetBinContent(i, h_stat_syst.GetBinContent(i))\n h_stat_scale.SetBinContent(i, h_stat.GetBinContent(i))\n\n c1 = TCanvas('c1', 'c1', 800, 600)\n set_margin()\n gStyle.SetOptStat(0)\n\n set_h1_style(h_stat_scale)\n h_stat_scale.SetLineColor(kRed + 1)\n h_stat_scale.Draw()\n h_stat_scale.GetXaxis().SetRangeUser(-9, 9)\n h_stat_scale.GetYaxis().SetRangeUser(0, 10)\n h_stat_scale.GetXaxis().SetTitle('(c_{L})_{#mu#mu}^{TT} or -(c_{L})_{#tau#tau}^{TT} (#times 10^{-23})')\n h_stat_scale.GetYaxis().SetTitle('-2 #Deltalog #lambda')\n\n set_h1_style(h_stat_syst_scale)\n h_stat_syst_scale.Draw('sames')\n\n lg1 = TLegend(0.34, 0.57, 0.59, 0.69)\n set_legend_style(lg1)\n lg1.SetTextSize(23)\n lg1.AddEntry(h_stat_scale, 'Statistics Only', 'l')\n lg1.AddEntry(h_stat_syst_scale, 'With Systematics', 'l')\n lg1.Draw()\n\n x_min = -9\n x_max = 9\n tl_1_sigma = TLine(x_min, 1, x_max, 1)\n tl_1_sigma.SetLineStyle(2)\n tl_1_sigma.Draw()\n tl_2_sigma = TLine(x_min, 4, x_max, 4)\n tl_2_sigma.SetLineStyle(2)\n tl_2_sigma.Draw()\n tl_3_sigma = TLine(x_min, 9, x_max, 9)\n tl_3_sigma.SetLineStyle(2)\n tl_3_sigma.Draw()\n\n tlatex = TLatex()\n tlatex.SetTextFont(43)\n tlatex.SetTextSize(23)\n\n tlatex.DrawLatex(-8.5, 1.2, '1 #sigma')\n tlatex.DrawLatex(-8.5, 4.2, '2 #sigma')\n tlatex.DrawLatex(-8.5, 9.2, '3 #sigma')\n\n tlatex.DrawLatex(-4.2, 8.2, \"10.56 #times10^{20} POT, #nu_{#mu}-mode\")\n tlatex.DrawLatex(-4.2, 7.4, \" MINOS Sensitivity\")\n\n preliminary = TLatex()\n preliminary.SetTextAlign(22)\n preliminary.SetTextFont(43)\n preliminary.SetTextSize(28)\n preliminary.SetTextColor(kRed)\n preliminary.DrawLatex(0, 9.5, \"MINOS Preliminary\")\n\n c1.Update()\n c1.SaveAs('{}/plot_c.pdf'.format(FIGURE_DIR))\n input('Press any key to continue.')\n\n\ndef get_axis_func():\n x_limit = 20\n compress = 2.5\n form = '(x <= {0}) * x + (x > {0}) * ({0} + (x - {0}) / {1})'.format(x_limit, compress)\n f_axis = TF1('f_axis', form, 0, 50)\n return f_axis\n\n\ndef plot_spectrum_nubar():\n f_nubar = TFile('data/spectrum.std_osc.root')\n f_nubar_g = TFile('data/spectrum.g_3.4e-23.root')\n f_nubar_c = TFile('data/spectrum.c_6e-23.root')\n\n h_nubar = f_nubar.Get('hNubar')\n h_nubar_g = f_nubar_g.Get('hNubar')\n h_nubar_c = f_nubar_c.Get('hNubar')\n\n c1 = TCanvas('c1', 'c1', 800, 800)\n set_margin()\n\n gStyle.SetOptStat(0)\n set_h1_style(h_nubar)\n h_nubar.Draw()\n h_nubar.GetYaxis().SetRangeUser(0, 50)\n h_nubar.SetXTitle(\"Reconstructed #bar{#nu}_{#mu} Energy (GeV)\")\n h_nubar.SetYTitle(\"Events / 2 GeV\")\n h_nubar.SetTitle('')\n h_nubar.GetXaxis().SetLabelSize(0)\n h_nubar.SetLineWidth(0)\n\n gr_nubar = get_graph_from_hist(h_nubar)\n set_graph_style(gr_nubar)\n gr_nubar.SetMarkerColor(1)\n gr_nubar.SetMarkerStyle(20)\n gr_nubar.SetMarkerSize(1)\n gr_nubar.SetLineWidth(3)\n gr_nubar.Draw('sames,pz')\n\n set_h1_style(h_nubar_g)\n h_nubar_g.SetLineColor(kRed)\n h_nubar_g.SetLineStyle(2)\n h_nubar_g.SetLineWidth(3)\n h_nubar_g.Draw('sames')\n\n set_h1_style(h_nubar_c)\n h_nubar_c.SetLineColor(kBlue)\n h_nubar_c.SetLineStyle(2)\n h_nubar_c.SetLineWidth(3)\n h_nubar_c.Draw('sames')\n\n lg1 = TLegend(0.46, 0.55, 0.76, 0.78)\n set_legend_style(lg1)\n lg1.SetHeader('Far Detector Prediction')\n lg1.AddEntry(gr_nubar, 'g = 0, c = 0', 'lep')\n lg1.AddEntry(h_nubar_g, 'g = 3.4 #times 10^{-23}, c = 0', 'l')\n lg1.AddEntry(h_nubar_c, 'g = 0, c = 6 #times 10^{-23}', 'l')\n lg1.Draw()\n\n tlatex = TLatex()\n tlatex.SetTextFont(43)\n tlatex.SetTextSize(28)\n tlatex.DrawLatex(13.5, 42.5, \"10.56 #times10^{20} POT, #nu_{#mu}-mode\")\n\n preliminary = TLatex()\n preliminary.SetTextAlign(22)\n preliminary.SetTextFont(43)\n preliminary.SetTextSize(28)\n preliminary.SetTextColor(kRed)\n preliminary.DrawLatex(7.6, 47, \"MINOS Preliminary\")\n\n gPad.Update()\n gPad.SetTicky()\n\n x_limit = 20\n xmax = gPad.GetUxmax()\n ymin = gPad.GetUymin()\n ymax = gPad.GetUymax()\n\n div = 510\n f_axis = get_axis_func()\n axis_bottom = TGaxis(0, ymin, xmax, ymin, 'f_axis', div)\n axis_bottom.SetLabelSize(0)\n axis_bottom.Draw()\n axis_top = TGaxis(0, ymax, xmax, ymax, 'f_axis', div, '-')\n axis_top.SetLabelSize(0)\n axis_top.Draw()\n\n latex_labels = []\n label = 0\n for i in range(20):\n if label > 50:\n break\n\n latex_label = TLatex(f_axis.Eval(label), ymin - 0.025 * (ymax - ymin), '{}'.format(label))\n latex_label.SetTextAlign(23)\n latex_label.SetTextFont(43)\n latex_label.SetTextSize(28)\n latex_labels.append(latex_label)\n latex_labels[i].Draw()\n\n if label < x_limit:\n label += 5\n else:\n label += 10\n\n c1.Update()\n c1.SaveAs('{}/plot_spectrum_nubar.pdf'.format(FIGURE_DIR))\n input('Press any key to continue.')\n\n\ndef plot_spectrum_nu():\n f_nu = TFile('data/spectrum.std_osc.root')\n f_nu_g = TFile('data/spectrum.g_3.4e-23.root')\n f_nu_c = TFile('data/spectrum.c_6e-23.root')\n\n h_nu = f_nu.Get('hNu')\n h_nu_g = f_nu_g.Get('hNu')\n h_nu_c = f_nu_c.Get('hNu')\n\n h_nu.Scale(0.25)\n h_nu_g.Scale(0.25)\n h_nu_c.Scale(0.25)\n\n c1 = TCanvas('c1', 'c1', 800, 800)\n set_margin()\n\n gStyle.SetOptStat(0)\n set_h1_style(h_nu)\n h_nu.Draw()\n h_nu.GetYaxis().SetRangeUser(0, 125)\n h_nu.SetXTitle(\"Reconstructed #nu_{#mu} Energy (GeV)\")\n h_nu.SetYTitle(\"Events / 0.25 GeV\")\n h_nu.SetTitle('')\n h_nu.GetXaxis().SetLabelSize(0)\n h_nu.SetLineWidth(0)\n\n gr_nu = get_graph_from_hist(h_nu)\n set_graph_style(gr_nu)\n gr_nu.SetMarkerColor(1)\n gr_nu.SetMarkerStyle(20)\n gr_nu.SetMarkerSize(1)\n gr_nu.SetLineWidth(3)\n gr_nu.Draw('sames,pz')\n\n set_h1_style(h_nu_g)\n h_nu_g.SetLineColor(kRed)\n h_nu_g.SetLineStyle(2)\n h_nu_g.SetLineWidth(3)\n h_nu_g.Draw('sames')\n\n set_h1_style(h_nu_c)\n h_nu_c.SetLineColor(kBlue)\n h_nu_c.SetLineStyle(2)\n h_nu_c.SetLineWidth(3)\n h_nu_c.Draw('sames')\n\n lg1 = TLegend(0.46, 0.47, 0.76, 0.7)\n set_legend_style(lg1)\n lg1.SetHeader('Far Detector Prediction')\n lg1.AddEntry(gr_nu, 'g = 0, c = 0', 'lep')\n lg1.AddEntry(h_nu_g, 'g = 3.4 #times 10^{-23}, c = 0', 'l')\n lg1.AddEntry(h_nu_c, 'g = 0, c = 6 #times 10^{-23}', 'l')\n lg1.Draw()\n\n tlatex = TLatex()\n tlatex.SetTextFont(43)\n tlatex.SetTextSize(28)\n tlatex.DrawLatex(7.5, 93.5, \"10.56 #times10^{20} POT, #nu_{#mu}-mode\")\n\n preliminary = TLatex()\n preliminary.SetNDC()\n preliminary.SetTextAlign(22)\n preliminary.SetTextFont(43)\n preliminary.SetTextSize(28)\n preliminary.SetTextColor(kRed)\n preliminary.DrawLatex(0.72, 0.86, \"MINOS Preliminary\")\n\n gPad.Update()\n gPad.SetTicky()\n\n tick = 0.04\n h_nu.GetXaxis().SetTickLength(0)\n\n for top in range(1):\n print('top = {}'.format(top))\n\n gPad.Update();\n y = gPad.GetUymax() if top else 0\n sn = \"SN-\" if top else \"SN+\"\n\n gaxis_1 = TGaxis(0, y, 10, y, 0, 10, 502, sn)\n gaxis_1.SetLabelSize(h_nu.GetYaxis().GetLabelSize())\n gaxis_1.SetLabelFont(h_nu.GetYaxis().GetLabelFont())\n gaxis_1.SetTickSize(18 / 10. * tick)\n gaxis_1.Draw()\n\n gaxis_2 = TGaxis(10, y, 14, y, 10, 20, 502, sn)\n gaxis_2.SetLabelSize(0)\n gaxis_2.SetTickSize(18 / 4. * tick)\n gaxis_2.Draw()\n\n gaxis_3 = TGaxis(14, y, 18, y, 20, 50, 2, sn)\n gaxis_3.SetLabelSize(0)\n gaxis_3.SetTickSize(18 / 4. * tick)\n gaxis_3.Draw()\n\n gaxis_4 = TGaxis(12, y, 14, y, 15, 20, 1, sn)\n gaxis_4.SetTickSize(0)\n gaxis_4.SetLabelSize(h_nu.GetYaxis().GetLabelSize())\n gaxis_4.SetLabelFont(h_nu.GetYaxis().GetLabelFont())\n gaxis_4.Draw()\n\n gaxis_5 = TGaxis(16, y, 18, y, 30, 50, 1, sn)\n gaxis_5.SetTickSize(0)\n gaxis_5.SetLabelSize(h_nu.GetYaxis().GetLabelSize())\n gaxis_5.SetLabelFont(h_nu.GetYaxis().GetLabelFont())\n gaxis_5.Draw()\n\n c1.Update()\n c1.SaveAs('{}/plot_spectrum_nu.pdf'.format(FIGURE_DIR))\n input('Press any key to continue.')\n\n\n# 20180131_transition_richa\n# print_dst()\n# plot_g()\n# plot_c()\n# plot_spectrum_nubar()\nplot_spectrum_nu()\n","repo_name":"UTKLgroup/minos.junting","sub_path":"transition/transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":12974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35697834889","text":"#ax2 + bx + c = 0\r\ndef sqrt(x):\r\n\treturn x**0.5\r\n\r\ndef NhapDL():\r\n\ts = input(\"Nhập 3 số a, b, c cách nhau bởi dấu cách: \")\r\n\tsnum = s.split()\r\n\treturn float(snum[0]), float(snum[1]), float(snum[2])\r\n\r\ndef GiaiPT1(b, c):\r\n\tif b!=0:\r\n\t\tprint(\"Phương trình có 1 nghiệm duy nhất:\",round(-c/b,1))\r\n\telif c==0:\r\n\t\tprint(\"Phương trình có vô số nghiệm\")\r\n\telse:\r\n\t\tprint(\"Phường trình vô nghiệm\")\r\n\r\ndef GiaiPT2(a, b, c):\r\n\tif a == 0:\r\n\t\tGiaiPT1(b, c)\r\n\telse:\r\n\t\tdelta = b*b - 4*a*c\r\n\t\tif delta > 0:\r\n\t\t\tx1 = (-b + sqrt(delta))/(2*a)\r\n\t\t\tx2 = (-b - sqrt(delta))/(2*a)\r\n\t\t\tprint(\"Phương trình có 2 nghiệm phân biệt\")\r\n\t\t\tprint(\"x1=\",round(x1,3),\"x2=\",round(x2,3))\r\n\t\telif delta == 0:\r\n\t\t\tx = (-b / (2*a))\r\n\t\t\tprint(\"Phương trình có nghiệm kép\")\r\n\t\t\tprint(\"x1,2 = \",round(x,1))\r\n\t\telse:\r\n\t\t\tprint(\"Phương trình vô nghiệm\")\r\n\r\na,b,c = NhapDL()\r\nGiaiPT2(a, b, c)","repo_name":"DarkEnderr/PYTHON","sub_path":"giaipt.py","file_name":"giaipt.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2498384757","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Ingest circuits.csv file\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC #### Step 1 - Read the CSV file using the spark dataframe reader\n\n# COMMAND ----------\n\n# Import the types we want to use\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType\nfrom pyspark.sql.functions import col\nfrom pyspark.sql.functions import current_timestamp, lit, col\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_data_source\", \"\")\nv_data_source = dbutils.widgets.get(\"p_data_source\")\n\ndbutils.widgets.text(\"p_file_date\", \"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"/Formula1/includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"/Formula1/includes/common_functions\"\n\n# COMMAND ----------\n\n# Specify schema\ncircuits_schema = StructType(fields=[\n StructField(\"circuitId\", IntegerType(), False),\n StructField(\"circuitRef\", StringType(), True),\n StructField(\"name\", StringType(), True),\n StructField(\"location\", StringType(), True),\n StructField(\"country\", StringType(), True),\n StructField(\"lat\", DoubleType(), True),\n StructField(\"lng\", DoubleType(), True),\n StructField(\"alt\", IntegerType(), True),\n StructField(\"url\", StringType(), True),\n ])\n\n# COMMAND ----------\n\n# Read in the data\ncircuits_df = spark.read.option(\"header\", True).schema(circuits_schema).csv(f'{raw_folder_path}/{v_file_date}/circuits.csv')\n\n# COMMAND ----------\n\n# Select the required columns using df.select\ncircuits_selected_df = circuits_df.select(\"circuitId\", \"circuitRef\",\"name\",\"location\",\"country\",\"lat\",\"lng\",\"alt\")\n\n\n# COMMAND ----------\n\n# You can also use the col function\n\ncircuits_selected_df = circuits_df.select(\n col(\"circuitId\"),\n col(\"circuitRef\"),\n col(\"name\"),\n col(\"location\"),\n col(\"country\"),\n col(\"lat\"),\n col(\"lng\"),\n col(\"alt\"),\n)\n\"\"\"\nWith this method, you can apply further methods to a col, such as :\ncol(\"lat\").alias(\"latitudee\"),\n\"\"\"\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC #### Step 3 - Rename the columns as required\n\n# COMMAND ----------\n\ncircuits_renamed_df = circuits_selected_df.withColumnRenamed(\"circuitId\", \"circuit_id\") \\\n.withColumnRenamed(\"circuitRef\", \"circuit_ref\") \\\n.withColumnRenamed(\"lat\", \"latitude\") \\\n.withColumnRenamed(\"lng\", \"longitude\") \\\n.withColumnRenamed(\"alt\", \"altitude\") \\\n.withColumn(\"data_souce\", lit(v_file_date))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC #### Step 4 - Add ingestion date to the dataframe\n\n# COMMAND ----------\n\n# use the .withColumn() to add a column\ncircuits_final_df = add_ingestion_date(circuits_renamed_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC #### Step 5 - Write to datalake as parquet\n\n# COMMAND ----------\n\ncircuits_final_df.write.mode(\"overwrite\").format(\"delta\").saveAsTable(\"f1_processed.circuits\")\n\n# COMMAND ----------\n\n","repo_name":"LouisYC123/azure-databricks-f1","sub_path":"workspace/Formula1/ingestion/full_loads/ingest_circuits_file.py","file_name":"ingest_circuits_file.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70778072195","text":"# encoding: utf-8\r\n\"\"\"\r\n@author: 程序员小小叶\r\n@contact: 3203636266@qq.com\r\n@微信公众号:程序员小小叶\r\n@time: 2020/2/19 12:29\r\n@file: case2.py\r\n@desc: \r\n\"\"\"\r\nfrom wxpy import *\r\n\r\nbot = Bot(cache_path=True)\r\n\r\nlover_group = bot.groups().search('群名字')[0] # 第一步找到群名字\r\n\r\nlover = lover_group.search('女神')[0] # 第二步在群里找到女神名字\r\n\r\n\r\n@bot.register(chats=lover_group) # 接收从指定群发来的消息,发送者即recv_msg.sender为组\r\ndef recv_send_msg(recv_msg):\r\n\tprint('收到的消息:', recv_msg.text)\r\n\tif recv_msg.member == lover:\r\n\t\t# 这里不用recv_msg.render 因为render是群的名字\r\n\t\trecv_msg.forward(bot.file_helper, prefix='女神发言: ')\r\n\t\treturn '女神大人沉鱼落雁,闭月羞花'\r\n\r\n\r\n# 进入Python命令行,让程序保持运行\r\nembed()\r\n","repo_name":"CoderMrYe/WeChatHelper","sub_path":"case2.py","file_name":"case2.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30710591030","text":"#!/usr/bin/env python\r\n\r\n\r\n\"\"\"Post install script to check for/install the XL Driver Library.\"\"\"\r\n\r\n\r\nfrom sys import executable, argv, exit\r\nfrom os import path, system\r\nfrom time import time, sleep\r\nfrom subprocess import call\r\nfrom glob import glob\r\nfrom ctypes import WinDLL, windll\r\nfrom platform import architecture\r\n\r\nlib_path = path.normpath(path.join(path.dirname(__file__), 'lib'))\r\n\r\nupdate_xl_path = path.join(lib_path, 'update_xl_lib.py')\r\nexe_path = path.join(lib_path, 'Vector XL Driver Library Setup.exe')\r\nps_cmd = f\"Start-Process -FilePath '{exe_path}' -ArgumentList '/S /v/qn' -Wait\"\r\nps_cmd = f\"powershell -command \\\"{ps_cmd}\\\"\"\r\nversion_file = path.join(lib_path, 'version.txt')\r\n\r\nwith open(version_file, 'r') as f:\r\n vxl_version = f.read()\r\n\r\nvxl_base_path = r'C:\\Users\\Public\\Documents\\Vector\\XL Driver Library'\r\nxl_libs = glob(f'{vxl_base_path}*')\r\narch, _ = architecture()\r\nif xl_libs:\r\n # Grab the latest version\r\n vxl_lib_path = sorted(xl_libs)[-1]\r\n vxl_lib_path = path.join(vxl_lib_path, 'bin')\r\n if arch == '64bit':\r\n vxl_path = path.join(vxl_lib_path, 'vxlapi64.dll')\r\n else:\r\n vxl_path = path.join(vxl_lib_path, 'vxlapi.dll')\r\n\r\n # The current version isn't installed. Install it.\r\n if not path.isdir(vxl_lib_path):\r\n system(ps_cmd)\r\nelse:\r\n # These get installed with Vector drivers\r\n if arch == '64bit':\r\n vxl_path = path.join(r'C:\\Windows\\System32', 'vxlapi64.dll')\r\n else:\r\n vxl_path = path.join(r'C:\\Windows\\SysWOW64', 'vxlapi.dll')\r\n vxl_path = path.join(vxl_lib_path, 'vxlapi.dll')\r\n\r\n\r\ndef is_admin(): # noqa\r\n try:\r\n return windll.shell32.IsUserAnAdmin()\r\n except Exception:\r\n return False\r\n\r\n\r\ntry:\r\n dll = WinDLL(vxl_path)\r\nexcept WindowsError:\r\n if path.isfile(vxl_path):\r\n print(f'Failed importing {vxl_path}')\r\n exit(1)\r\n else:\r\n if not path.isfile(exe_path):\r\n call([executable, update_xl_path])\r\n if not path.isfile(exe_path):\r\n print(f'Something went wrong running {update_xl_path} to download '\r\n f'{exe_path}. Either rerun this script to try again or run '\r\n 'update_xl_lip.py manually to download the file.')\r\n exit(1)\r\n if not is_admin():\r\n windll.shell32.ShellExecuteW(None, \"runas\", executable,\r\n ' '.join(argv), None, 1)\r\n # Wait 60s for the program to finish installing\r\n start = time()\r\n while (time() - start) < 60:\r\n sleep(1)\r\n if path.isfile(vxl_path):\r\n break\r\n else:\r\n print(f'Failed installing {exe_path}. Try installing it '\r\n 'manually and then rerunning the batch file.')\r\n exit(1)\r\n else:\r\n print('Installing Vector XL Driver Library...')\r\n system(ps_cmd)\r\n if not path.isfile(vxl_path):\r\n print(f'Something went wrong installing {exe_path}')\r\n exit(1)\r\n try:\r\n dll = WinDLL(vxl_path)\r\n except WindowsError:\r\n print(f'Failed importing {vxl_path}')\r\n exit(1)\r\n","repo_name":"cmcerove/pyvxl","sub_path":"post_install.py","file_name":"post_install.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"38833113345","text":"import joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom xgboost import XGBRegressor\r\n\r\n\r\ndef encode(seq):\r\n '''\r\n Encode DNA sequences as a one-hot numeric array to be used for training.\r\n\r\n :parameter seq: sequence to encode.\r\n :return: one-hot-encoded array\r\n '''\r\n\r\n # Define universe of possible input values (Genetic code, 4 bases)\r\n dna_code = 'TACG'\r\n\r\n # Define a mapping of DNA nucleotides to integers\r\n char_to_int = dict((c, i) for i, c in enumerate(dna_code))\r\n\r\n # Integer encode DNA sequence\r\n int_encoded = [char_to_int[char] for char in seq]\r\n\r\n # One hot encode DNA sequence\r\n onehot_encoded = []\r\n\r\n for value in int_encoded:\r\n letter = [0 for _ in range(len(dna_code))]\r\n letter[value] = 1\r\n onehot_encoded.append(letter)\r\n\r\n return np.array(onehot_encoded)\r\n\r\n\r\ndef ohe_model(ds, seq_col=2, eff_col=3, transform=False, save=False):\r\n '''\r\n Train an Extreme Gradient Boost model using One-Hot-Encoding to represent the DNA sequences.\r\n For instance, use 'ohe_model(ds, transform=True)' for Chari dataset, 'ohe_model(ds, 2, 9)' for DeepSpCas9 and 'ohe_model(ds)' for the remaining ones.\r\n\r\n :parameter ds: dataset to train on.\r\n :parameter seq_col: # position of column containing 30-nt sequences e.g. 2nd column -> 2.\r\n :parameter eff_col: # position of column containing efficiencies e.g. 3rd column -> 3.\r\n :parameter transform: If True, apply square root transformation to all efficiencies (only use for Chari dataset).\r\n :parameter save: If True, save the trained model to the working directory.\r\n :return: the trained model.\r\n '''\r\n\r\n # Encode sequences and define features & labels\r\n X = ds.iloc[:, seq_col-1].apply(encode)\r\n X_new = np.stack(X)\r\n X_train = X_new.reshape(X_new.shape[0], 120)\r\n\r\n Y_train = ds.iloc[:, eff_col-1].values\r\n\r\n # Square root transformation of efficiencies (only for Chari dataset)\r\n if transform == True:\r\n Y_train = np.sqrt(Y_train)\r\n\r\n # Initialize and train XGB model\r\n model = XGBRegressor(objective='reg:squarederror')\r\n model.fit(X_train, Y_train)\r\n\r\n # Return (and save) trained model\r\n if save == True:\r\n joblib.dump(model, 'xgb.joblib')\r\n return model\r\n else:\r\n return model\r\n","repo_name":"VKonstantakos/CRISPR-Deep-Learning","sub_path":"Scripts/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"27568969688","text":"from PIL import Image\r\nimport pytesseract\r\n\r\nprint(pytesseract.get_languages(config=''))\r\nimg = Image.open(\"OCR\\yo.jpg\")\r\n# converts the image to result and saves it into result variable\r\nresult = pytesseract.image_to_string(img)\r\nprint(result)\r\nwith open(\"text_result.txt\", mode=\"w\", encoding=\"utf-8\") as file:\r\n file.write(result)\r\n print(\"ready!\")\r\n","repo_name":"NasserAlbusaidi/extract-text-from-images","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17061054903","text":"import csv\nimport json\nimport os\nfrom typing import (\n List,\n Any\n)\n\n\ndef load_from_csv(file:str, num_lines_for_header:int=0, delimiter = ',') -> List[Any]:\n csv_array = []\n with open(file, 'rt') as csvfile:\n csv_data = csv.reader(csvfile, delimiter=delimiter, quotechar='|')\n\n count = 0\n for row in csv_data:\n if count >= num_lines_for_header:\n new_row = []\n for i in row:\n try:\n new_row.append(float(i))\n except ValueError:\n new_row.append(i)\n csv_array.append(new_row)\n count += 1\n return csv_array\n\n\ndef load_from_JSON(file):\n if os.path.isfile(file) == False:\n return False\n with open(file, 'rt') as json_data:\n d = json.load(json_data)\n json_data.close()\n return d['train_input'], d['train_expected_output']\n\n\ndef load_from_general_JSON(file):\n if os.path.isfile(file) == False:\n return False\n with open(file, 'rt') as json_data:\n d = json.load(json_data)\n json_data.close()\n return d\n\n\ndef save_to_general_JSON(file, data):\n # make directory if it doesnt exist\n # get directory\n directory = os.path.dirname(file)\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(file, 'w') as outfile:\n json.dump(data, outfile)\n outfile.close()","repo_name":"hyperevo/hyp-py-tools","sub_path":"hyp_py_tools/tools/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27512296765","text":"#!/usr/bin/env python3\n\nimport sys\n# in actual tool, these are inputs\n# max number bp per interval file\nbp_target = 60000000\n# in each interval file, chop up into chunks of this size in bp\nintvl_target_size = 20000\nbed_file = open(sys.argv[1])\n\n# simple key to hold sets of intervals for output in each file\ni = 0\nintvl_set = {}\ncur_size = 0\nfor cur_intvl in bed_file:\n f = 0\n if i not in intvl_set:\n intvl_set[i] = []\n data = cur_intvl.rstrip('\\n').split('\\t')\n (chrom, start, end) = (data[0], data[1], data[2])\n intvl_size = int(end) - int(start)\n # if the interval size is already bigger than the max target, just make it it's own interva list file\n if intvl_size >= bp_target:\n if len(intvl_set[i]) != 0:\n i += 1\n intvl_set[i] = []\n f = 1\n # similar, if adding the interval being processed increases the interval list beyond bp_target, end that list and put the current one in a new list\n elif cur_size + intvl_size > bp_target:\n if len(intvl_set[i]) != 0:\n i += 1\n intvl_set[i] = []\n cur_size = intvl_size\n # if addin g to th ecurrent list is still under target, add to current list\n else:\n cur_size += intvl_size\n intvl_set[i].append([chrom, start, end])\n if f == 1:\n i += 1\n cur_size = 0\n\nfor set_i, invtl_list in sorted(intvl_set.items()):\n set_size = 0\n out = open(\"set_\" + str(set_i) + \".bed\", \"w\")\n # for each interval list set in the dict, split intervals in intvl_target_size pieces\n for intervals in invtl_list:\n (chrom, start, end) = (intervals[0], intervals[1], intervals[2])\n intvl_size = int(end) - int(start)\n set_size += intvl_size\n for j in range(int(start), int(end), intvl_target_size):\n new_end = j + intvl_target_size\n if new_end > int(end):\n new_end = end\n out.write(chrom + \"\\t\" + str(j) + \"\\t\" + str(new_end) + \"\\n\")\n # for informational purpose, output total number of bp covered in each list\n sys.stderr.write(\"Set \" + str(set_i) + \" size:\\t\" + str(set_size) + \"\\n\")\n out.close()","repo_name":"kids-first/kf-somatic-workflow","sub_path":"dev/custom_split.py","file_name":"custom_split.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"13050135842","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cacao', '0008_auto_20150123_2115'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='content',\n options={'ordering': ['peso'], 'verbose_name': 'Contenido', 'verbose_name_plural': 'Contenidos'},\n ),\n migrations.AlterField(\n model_name='content',\n name='peso',\n field=models.PositiveIntegerField(help_text=b'Entre mayor sea el peso mas al fondo se ubica', verbose_name=b'Peso del Contenido'),\n preserve_default=True,\n ),\n ]\n","repo_name":"CacaoMovil/guia-de-cacao-django","sub_path":"cacao_app/cacao/migrations/0009_auto_20150123_2143.py","file_name":"0009_auto_20150123_2143.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23609585581","text":"#!/usr/bin/env python\r\n\r\nimport sys\r\n\r\ndef main():\r\n\t#f = file(\"test.in\")\r\n\tf = file(\"B-large.in\")\r\n\t#fout= file(\"test.out\", \"w\")\r\n\tfout= file(\"B-large.out\", \"w\")\r\n\tC = int(f.readline())\r\n\tcnt = 1\r\n\tfor i in range(C):\r\n\t\tnswap = 0\r\n\t\tinc_swap = 0\r\n\t\tcnt_chicks = 0\r\n\t\tN, K, B, T = map(int, f.readline().strip().split())\r\n\t\tX = map(int, f.readline().strip().split())\r\n\t\tV = map(int, f.readline().strip().split())\r\n\t\tTM = [(B - X[j]) / float(V[j]) for j in range(N)]\r\n\t\tTM.reverse()\r\n\t\tfor tm in TM:\r\n\t\t\tif tm <= T:\r\n\t\t\t\tcnt_chicks += 1\r\n\t\t\t\tnswap += inc_swap\r\n\t\t\telse: inc_swap += 1\r\n\t\t\tif cnt_chicks >= K: break\r\n\t\tprint >>fout, \"Case #%d: %s\" % (cnt, str(nswap) if cnt_chicks >= K else \"IMPOSSIBLE\")\r\n\t\tcnt += 1\r\n\r\nif __name__ == \"__main__\": main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_60/81.py","file_name":"81.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24526256888","text":"\"\"\"\nThis script was written for CASA 5.1.1\n\nDatasets calibrated (in order of date observed):\nSB1: 2015.1.00964.S/HD143006_a_06_TE \n Observed 14 June 2016 (2 execution blocks)\n PI: K. Oberg\n As delivered to PI\nSB2: 2015.1.00964.S/HD143006_a_06_TC \n Observed 2 July 2016\n PI: K. Oberg\n Downloaded from archive and calibrated \nSB3: 2016.1.00484.L/WSB_52_a_06_TM1 \n Observed 14 May 2017, 17 May 2017, and 19 May 2017 (3 execution blocks)\n PI: S. Andrews\n As delivered to PI\nLB1: 2016.1.00484.L/HD_143006_a_06_TM1 \n Observed 26 September 2017 and 26 November 2017 (2 execution blocks)\n PI: S. Andrews\n As delivered to PI\n\n\"\"\"\nimport os\n\nexecfile('/pool/firebolt1/p484/reduction_scripts/reduction_utils.py')\n\nskip_plots = True #if this is true, all of the plotting and inspection steps will be skipped and the script can be executed non-interactively in CASA if all relevant values have been hard-coded already \n\n#to fill this dictionary out, use listobs for the relevant measurement set \n\nprefix = 'HD143006' #string that identifies the source and is at the start of the name for all output files\n\n#Note that if you are downloading data from the archive, your SPW numbering may differ from the SPWs in this script depending on how you split your data out!! \ndata_params = {'SB1': {'vis' : '/data/astrochem1/jane/diskevol/uppersco/HD143006/data_1.3mm/HD143006_calibrated.ms',\n 'name' : 'SB1',\n 'field': 'HD143006',\n 'line_spws': np.array([0,9]), #SpwIDs of windows with lines that need to be flagged (this needs to be edited for each short baseline dataset)\n 'line_freqs': np.array([2.30538e11, 2.30538e11]), #frequencies (Hz) corresponding to line_spws (in most cases this is just the 12CO 2-1 line at 230.538 GHz)\n },\n 'SB2': {'vis' : '/data/astrochem1/jane/HD143006_TC.ms',\n 'name' : 'SB2',\n 'field': 'HD143006',\n 'line_spws': np.array([0]), \n 'line_freqs': np.array([2.30538e11]), \n },\n 'SB3': {'vis' : '/data/sandrews/LP/2016.1.00484.L/science_goal.uid___A001_Xbd4641_X1e/group.uid___A001_Xbd4641_X25/member.uid___A001_Xbd4641_X26/calibrated/calibrated_final.ms',\n 'name' : 'SB3',\n 'field': 'HD_143006',\n 'line_spws': np.array([0,4,8]), \n 'line_freqs': np.array([2.30538e11, 2.30538e11, 2.30538e11]), \n }, \n 'LB1': {'vis' : '/data/sandrews/LP/2016.1.00484.L/science_goal.uid___A001_Xbd4641_X1a/group.uid___A001_Xbd4641_X1b/member.uid___A001_Xbd4641_X1c/calibrated/calibrated_final.ms',\n 'name' : 'LB1',\n 'field' : 'HD_143006',\n 'line_spws': np.array([3,7]), \n 'line_freqs': np.array([2.30538e11, 2.30538e11]), \n }\n }\n\n# need to initialize weight spectrum because earlier datasets have weight spectrum initialized \ninitweights(vis = data_params['SB2']['vis'], wtmode = 'weight', dowtsp = True)\ninitweights(vis = data_params['LB1']['vis'], wtmode = 'weight', dowtsp = True)\n#Amplitudes of channels 0 to 200 in SPW 0 of SB2 look problematic\nflagmanager(vis = data_params['SB2']['vis'], mode = 'save', versionname = 'original_flags', comment = 'Original flag states') #save flag state before flagging spectral lines\nflagdata(vis=data_params['SB2']['vis'], mode='manual', spw='0:0~200', flagbackup=False, field = data_params['SB2']['field']) #flag spectral lines \n\n\n\n\nif not skip_plots:\n \"\"\"\n You can do this if you want to inspect amp vs. channel for every spectral window. This can be useful for deciding what you need to flag. \n Alternatively, if the dataset was pipeline-calibrated, you can go to the \"qa\" folder in the data products package downloaded from the ALMA archive, \n then go down a few directories until you see an \"index.html\" file. If you open that up in a browser, you'll see a \"By Task\" bar on the top. Click on that,\n go to the left bar, and click \"17. hif_applycal\". Here, you can find a lot of plots of the amplitude and phases of the calibrator data and the science target. \n This can help you see whether any of the data are problematic. \n \"\"\"\n for i in data_params.keys():\n plotms(vis=data_params[i]['vis'], xaxis='channel', yaxis='amplitude', field=data_params[i]['field'], \n ydatacolumn='data', avgtime='1e8', avgscan=True, avgbaseline=True, \n iteraxis='spw')\n\n#######\n# Here you may wish to flag problematic data before we do spectral line flagging to form an averaged continuum. \n#\n# If so, use something like \n# flagmanager(vis = msfile, mode = 'save', versionname = 'init_cal_flags', comment = 'Flag states immediately after initial calibration') \n# If you need to undo the flagging, use\n# flagmanager(vis = msfile, mode = 'restore', versionname = 'init_cal_flags') #restore flagged spectral line channels \n########\n\nSB1_flagchannels = get_flagchannels(data_params['SB1'], prefix, velocity_range = np.array([0, 15]))\navg_cont(data_params['SB1'], prefix, flagchannels = SB1_flagchannels, contspws = '0~2, 9~11', width_array = [960,960,256, 960,960,256])\n#Averaged continuum dataset saved to HD143006_SB1_initcont.ms\n\nSB2_flagchannels = get_flagchannels(data_params['SB2'], prefix, velocity_range = np.array([0, 15]))\navg_cont(data_params['SB2'], prefix, flagchannels = SB2_flagchannels, contspws = '0~2', width_array = [960,960,256])\n#Averaged continuum dataset saved to IMLup_SB2_initcont.ms\n\nfor i in ['SB3', 'LB1']: \n \"\"\"\n Identify channels to flag based on the known velocity range of the line emission. The velocity range is based on line images from early reductions. If you are starting from scratch, \n you can estimate the range from the plotms command above. You may wish to limit your uvrange to 0~300 or so to only view the baselines with the highest amplitudes. \n \"\"\"\n flagchannels_string = get_flagchannels(data_params[i], prefix, velocity_range = np.array([0, 15]))\n \"\"\"\n Produces spectrally averaged continuum datasets\n If you only want to include a subset of the windows, you can manually pass in values for contspw and width_array, e.g.\n avg_cont(data_params[i], output_prefix, flagchannels = flagchannels_string, contspws = '0~2', width_array = [480,8,8]).\n If you don't pass in values, all of the SPWs will be split out and the widths will be computed automatically to enforce a maximum channel width of 125 MHz.\n WARNING: Only use the avg_cont function if the total bandwidth is recorded correctly in the original MS. There is sometimes a bug in CASA that records incorrect total bandwidths\n \"\"\"\n # Flagchannels input string for SB3: '0:1915~1963, 4:1915~1963, 8:1915~1963'\n #Averaged continuum dataset saved to HD143006_SB3_initcont.ms\n # Flagchannels input string for LB1: '3:1905~1953, 7:1905~1953'\n #Averaged continuum dataset saved to HD143006_LB1_initcont.ms\n\n avg_cont(data_params[i], prefix, flagchannels = flagchannels_string)\n\n# sample command to check that amplitude vs. uvdist looks normal\n# plotms(vis=prefix+'_SB1_initcont.ms', xaxis='uvdist', yaxis='amp', coloraxis='spw', avgtime='30', avgchannel='16')\n\n\"\"\"\nQuick imaging of every execution block in the measurement set using tclean. \nThe threshold, scales, and mask should be adjusted for each source.\nIn this case, we picked our threshold, scales, and mask from previous reductions of the data. You may wish to experiment with these values when imaging. \nThe threshold is ~3-4x the rms, the mask is an ellipse that covers all the emission and has roughly the same geometry, and we choose 4 to 6 scales such that the first scale is 0 (a point), and the largest is ~half the major axis of the mask.\nThe mask angle and the semimajor and semiminor axes should be the same for all imaging. The center is not necessarily fixed because of potential misalignments between observations. \n\"\"\"\n\nmask_radius = 1.3 #radius of mask in arcsec\n\n\nSB1_mask = 'circle[[%s, %s], %.1farcsec]' % ('15h58m36.90s', '-22.57.15.63', mask_radius)\n\nLB1_mask = 'circle[[%s, %s], %.1farcsec]' % ('15h58m36.90s', '-22.57.15.57', mask_radius)\n\nSB_scales = [0, 5, 10, 20]\nLB_scales = [0, 25, 50, 75, 100]\n\"\"\"\nIn this section, we are imaging every execution block to check spatial alignment \n\"\"\"\n\nif not skip_plots:\n #images are saved in the format prefix+'_name_initcont_exec#.ms'\n image_each_obs(data_params['SB1'], prefix, mask = SB1_mask, scales = SB_scales, threshold = '0.75mJy', interactive = False, robust = -2)\n\n tclean_wrapper(vis = prefix+'_SB2_initcont.ms', imagename = prefix+'_SB2_initcont', mask = SB1_mask, scales = SB_scales, threshold = '0.75mJy', interactive = False, robust = -2)\n\n image_each_obs(data_params['SB3'], prefix, mask = SB1_mask, scales = SB_scales, threshold = '0.4mJy', interactive =False, robust = -2)\n # inspection of images do not reveal additional bright background sources \n\n image_each_obs(data_params['LB1'], prefix, mask = LB1_mask, scales = LB_scales, threshold = '0.06mJy', interactive = False)\n\n \"\"\"\n Since the source looks axisymmetric, we will fit a Gaussian to the disk to estimate the location of the peak in each image and record the output.\n We are also very roughly estimating the PA and inclination for checking the flux scale offsets later (these are NOT the position angles and inclinations used for analysis of the final image products.\n Here, we are using the CLEAN mask to restrict the region over which the fit is occurring, but you may wish to shrink the region even further if your disk structure is complex \n\n\n fit_gaussian(prefix+'_SB1_initcont_exec0.image', region = SB1_mask)\n #Peak of Gaussian component identified with imfit: ICRS 15h46m44.710036s -34d30m36.08839s\n\n fit_gaussian(prefix+'_SB1_initcont_exec1.image', region = SB1_mask)\n #Peak of Gaussian component identified with imfit: ICRS 15h46m44.708978s -34d30m36.12469s\n\n fit_gaussian(prefix+'_LB1_initcont_exec0.image', region = 'circle[[%s, %s], %.1farcsec]' % ('15h46m44.71s', '-34.30.36.09', 0.2)) #shrinking the fit region because of the noisiness of the image\n #Peak of Gaussian component identified with imfit: ICRS 15h46m44.710940s -34d30m36.08832s\n \"\"\"\n fit_gaussian(prefix+'_LB1_initcont_exec1.image', region = 'circle[[%s, %s], %.1farcsec]' % ('15h58m36.90s', '-22.57.15.57', 0.1))\n #Peak of Gaussian component identified with imfit: ICRS 15h58m36.898560s -22d57m15.59690s\n #Peak in J2000 coordinates: 15:58:36.89923, -022:57:15.582620\n #Pixel coordinates of peak: x = 1502.034 y = 1506.704\n #PA of Gaussian component: 173.15 deg\n #Inclination of Gaussian component: 41.88 deg\n\n \n\n\n\"\"\"\nReassigning to a common phase center (chose center of LB1, exec1, but this is fairly arbitrary)\n\"\"\"\n\nsplit_all_obs(prefix+'_SB1_initcont.ms', prefix+'_SB1_initcont_exec')\nsplit_all_obs(prefix+'_SB2_initcont.ms', prefix+'_SB2_initcont_exec')\nsplit_all_obs(prefix+'_SB3_initcont.ms', prefix+'_SB3_initcont_exec')\nsplit_all_obs(prefix+'_LB1_initcont.ms', prefix+'_LB1_initcont_exec')\n#Saving observation 0 of HD143006_SB1_initcont.ms to HD143006_SB1_initcont_exec0.ms\n#Saving observation 1 of HD143006_SB1_initcont.ms to HD143006_SB1_initcont_exec1.ms\n#Saving observation 0 of HD143006_SB2_initcont.ms to HD143006_SB1_initcont_exec0.ms\n#Saving observation 0 of HD143006_SB3_initcont.ms to HD143006_SB1_initcont_exec0.ms\n#Saving observation 1 of HD143006_SB3_initcont.ms to HD143006_SB1_initcont_exec1.ms\n#Saving observation 2 of HD143006_SB3_initcont.ms to HD143006_SB1_initcont_exec2.ms\n#Saving observation 0 of HD143006_LB1_initcont.ms to HD143006_LB1_initcont_exec0.ms\n#Saving observation 1 of HD143006_LB1_initcont.ms to HD143006_LB1_initcont_exec1.ms\n\n\n\ncommon_dir = 'J2000 15h58m36.89923s -022.57.15.58262' #choose peak of second execution of LB1 to be the common direction (the better-quality of the high-res observations) \n\n#need to change to J2000 coordinates\nmask_ra = '15h58m36.89923s'\nmask_dec = '-22.57.15.58262'\ncommon_mask = 'circle[[%s, %s], %.1farcsec]' % (mask_ra, mask_dec, mask_radius)\n\n\"\"\"\nshiftname = prefix+'_SB1_initcont_exec0_shift'\nos.system('rm -rf %s.ms' % shiftname)\nfixvis(vis=prefix+'_SB1_initcont_exec0.ms', outputvis=shiftname+'.ms', field = data_params['SB1']['field'], phasecenter='ICRS 15h46m44.710036s -34d30m36.08839s') #get phasecenter from Gaussian fit \nfixplanets(vis = shiftname+'.ms', field = data_params['SB1']['field'], direction = common_dir) #fixplanets works only with J2000, not ICRS\ntclean_wrapper(vis = shiftname+'.ms', imagename = shiftname, mask = common_mask, scales = SB_scales, threshold = '0.25mJy')\nfit_gaussian(shiftname+'.image', region = common_mask)\n#Peak of Gaussian component identified with imfit: J2000 15h46m44.709386s -34d30m36.07571s\n\nshiftname = prefix+'_SB1_initcont_exec1_shift'\nos.system('rm -rf %s.ms' % shiftname)\nfixvis(vis=prefix+'_SB1_initcont_exec1.ms', outputvis=shiftname+'.ms', field = data_params['SB1']['field'], phasecenter='ICRS 15h46m44.708978s -34d30m36.12469s') \nfixplanets(vis = shiftname+'.ms', field = data_params['SB1']['field'], direction = common_dir)\ntclean_wrapper(vis = shiftname+'.ms', imagename = shiftname, mask = common_mask, scales = SB_scales, threshold = '0.25mJy')\nfit_gaussian(shiftname+'.image', region = common_mask)\n#Peak of Gaussian component identified with imfit: J2000 15h46m44.709384s -34d30m36.07577s\n\nshiftname = prefix+'_LB1_initcont_exec0_shift'\nos.system('rm -rf %s.ms' % shiftname)\nfixvis(vis=prefix+'_LB1_initcont_exec0.ms', outputvis=shiftname+'.ms', field = data_params['LB1']['field'], phasecenter='ICRS 15h46m44.710940s -34d30m36.08832s') #get phasecenter from Gaussian fit \nfixplanets(vis = shiftname+'.ms', field = data_params['LB1']['field'], direction = common_dir)\ntclean_wrapper(vis = shiftname+'.ms', imagename = shiftname, mask = common_mask, scales = LB_scales, threshold = '0.09mJy')\nfit_gaussian(shiftname+'.image', region = 'circle[[%s, %s], %.1farcsec]' % ('15h46m44.709s', '-34.30.36.076', 0.2))\n#Peak of Gaussian component identified with imfit: J2000 15h46m44.709428s -34d30m36.07547s\n\n\nshiftname = prefix+'_LB1_initcont_exec1_shift'\nos.system('rm -rf %s.ms' % shiftname)\nfixvis(vis=prefix+'_LB1_initcont_exec1.ms', outputvis=shiftname+'.ms', field = data_params['LB1']['field'], phasecenter='ICRS 15h46m44.708871s -34d30m36.09063s') #get phasecenter from Gaussian fit \nfixplanets(vis = shiftname+'.ms', field = data_params['LB1']['field'], direction = common_dir)\ntclean_wrapper(vis = shiftname+'.ms', imagename = shiftname, mask = common_mask, scales = LB_scales, threshold = '0.06mJy')\nfit_gaussian(shiftname+'.image', region = common_mask)\n#Peak of Gaussian component identified with imfit: J2000 15h46m44.709382s -34d30m36.07596s\n\"\"\"\n\n\"\"\"\nAfter aligning the images, we want to check if the flux scales seem consistent between execution blocks (within ~5%)\nFirst, we check the uid___xxxxx.casa_commands.log in the log directory of the data products folder (or the calibration script in the manual case) to check whether the calibrator catalog matches up with the input flux density values for the calibrators\n(You should also check the plots of the calibrators in the data products to make sure that the amplitudes look consistent with the models that were inserted)\n\"\"\"\n\n\n\"\"\"\nPipeline flux models\n SB1, both executions: J1517-2422 = 2.4808 \n SB2 was calibrated with Titan\n SB3, EB0: J1517-2422 = 1.944 Jy at 232.610 GHz\n SB3, EB1: J1733-1304 = 1.610 Jy at 232.609 GHz\n SB3, EB2: J1517-2422 = 2.108 Jy at 232.609 GHz\n LB1, EB0: J1733-1304 = 1.8864 Jy at 232.584GHz\n LB1, EB1: J1427-4206 = 2.5771 Jy at 232.605GHz\n\"\"\"\n\n\nau.getALMAFlux('J1517-2422', frequency = '230.530GHz', date = '2016/06/14')\nau.getALMAFlux('J1517-2422', frequency = '232.610GHz', date = '2017/05/14')\t# SB1, EB0\nau.getALMAFlux('J1733-1304', frequency = '232.609GHz', date = '2017/05/17') # SB1, EB1\nau.getALMAFlux('J1517-2422', frequency = '232.609GHz', date = '2017/05/19') # SB1, EB2\nau.getALMAFlux('J1733-1304', frequency = '232.584GHz', date = '2017/09/26') #LB1, EB0\nau.getALMAFlux('J1427-4206', frequency = '232.584GHz', date = '2017/11/26') #LB1, EB1\n\n\n\"\"\"\nSB1 (both executions)\nClosest Band 3 measurement: 2.910 +- 0.110 (age=-2 days) 103.5 GHz\nClosest Band 3 measurement: 2.790 +- 0.100 (age=-2 days) 91.5 GHz\nClosest Band 7 measurement: 2.190 +- 0.130 (age=+5 days) 343.5 GHz\ngetALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age -5 days from 2016/06/14, with age separation of 0 days\n 2016/06/19: freqs=[103.49, 91.46, 337.46], fluxes=[2.95, 2.84, 2.09]\n/data/astrochem1/jane/casa-release-5.1.1-5.el6/lib/python2.7/site-packages/matplotlib/collections.py:446: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n if self._edgecolors == 'face':\nMedian Monte-Carlo result for 230.530000 = 2.356563 +- 0.195613 (scaled MAD = 0.193102)\nResult using spectral index of -0.225942 for 230.530 GHz from 2.910 Jy at 103.490 GHz = 2.428309 +- 0.195613 Jy\n\n\nSB3, EB0:\nClosest Band 3 measurement: 2.420 +- 0.060 (age=+0 days) 91.5 GHz\nClosest Band 7 measurement: 1.840 +- 0.090 (age=-1 days) 343.5 GHz\ngetALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age -1 days from 2017/05/14, with age separation of 0 days\n 2017/05/15: freqs=[103.49, 91.46, 343.48], fluxes=[2.55, 2.49, 1.84]\nMedian Monte-Carlo result for 232.610000 = 2.043380 +- 0.159244 (scaled MAD = 0.157684)\nResult using spectral index of -0.234794 for 232.610 GHz from 2.420 Jy at 91.460 GHz = 1.943706 +- 0.159244 Jy\n\nSB3, EB1:\nClosest Band 3 measurement: 3.020 +- 0.060 (age=+0 days) 103.5 GHz\nClosest Band 7 measurement: 1.190 +- 0.060 (age=+0 days) 343.5 GHz\ngetALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age 0 days from 2017/05/17, with age separation of 0 days\n 2017/05/17: freqs=[103.49, 343.48], fluxes=[3.02, 1.19]\nMedian Monte-Carlo result for 232.609000 = 1.610663 +- 0.129086 (scaled MAD = 0.126920)\nResult using spectral index of -0.776310 for 232.609 GHz from 3.020 Jy at 103.490 GHz = 1.610486 +- 0.129086 Jy\n\nSB3, EB2:\nClosest Band 3 measurement: 2.550 +- 0.060 (age=+4 days) 103.5 GHz\nClosest Band 3 measurement: 2.490 +- 0.050 (age=+4 days) 91.5 GHz\nClosest Band 7 measurement: 1.750 +- 0.060 (age=+2 days) 343.5 GHz\ngetALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age 4 days from 2017/05/19, with age separation of 0 days\n 2017/05/15: freqs=[103.49, 91.46, 343.48], fluxes=[2.55, 2.49, 1.84]\nMedian Monte-Carlo result for 232.609000 = 2.040585 +- 0.161160 (scaled MAD = 0.157430)\nResult using spectral index of -0.234794 for 232.609 GHz from 2.520 Jy at 97.475 GHz = 2.054524 +- 0.161160 Jy\n\nLB1, EB0\nClosest Band 3 measurement: 3.070 +- 0.080 (age=-6 days) 103.5 GHz\nClosest Band 3 measurement: 3.250 +- 0.090 (age=-6 days) 91.5 GHz\nClosest Band 7 measurement: 1.290 +- 0.050 (age=+3 days) 343.5 GHz\ngetALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age -6 days from 2017/09/26, with age separation of 0 days\n 2017/10/02: freqs=[103.49, 91.46, 343.48], fluxes=[3.07, 3.25, 1.49]\nMedian Monte-Carlo result for 232.584000 = 1.886107 +- 0.162175 (scaled MAD = 0.161563)\nResult using spectral index of -0.593201 for 232.584 GHz from 3.070 Jy at 103.490 GHz = 1.898980 +- 0.162175 Jy\n Out[156]: \n{'ageDifference': 9.0,\n 'fluxDensity': 1.898980410812134,\n 'fluxDensityUncertainty': 0.16217529776215703,\n 'meanAge': -6.0,\n 'monteCarloFluxDensity': 1.8861065974358149,\n 'spectralIndex': -0.5932012954549899,\n 'spectralIndexAgeOldest': -6,\n 'spectralIndexAgeSeparation': 0,\n 'spectralIndexAgeYoungest': -6,\n 'spectralIndexNPairs': 1,\n 'spectralIndexUncertainty': 0.033328174157397271}\n\nLB1, EB1\nau.getALMAFlux('J1427-4206', date = '2017/11/26', frequency = '232.584GHz')\nClosest Band 3 measurement: 4.430 +- 0.080 (age=+1 days) 91.5 GHz\nClosest Band 7 measurement: 2.060 +- 0.070 (age=+3 days) 343.5 GHz\ngetALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age 3 days from 2017/11/26, with age separation of 0 days\n 2017/11/23: freqs=[91.46, 343.48], fluxes=[4.44, 2.06]\nMedian Monte-Carlo result for 232.584000 = 2.585165 +- 0.149040 (scaled MAD = 0.149264)\nResult using spectral index of -0.580360 for 232.584 GHz from 4.430 Jy at 91.460 GHz = 2.577244 +- 0.149040 Jy\n Out[157]: \n{'ageDifference': 2.0,\n 'fluxDensity': 2.5772438170147502,\n 'fluxDensityUncertainty': 0.14904034347703818,\n 'meanAge': 1.0,\n 'monteCarloFluxDensity': 2.5851648253966402,\n 'spectralIndex': -0.58036020757522699,\n 'spectralIndexAgeOldest': 3,\n 'spectralIndexAgeSeparation': 0,\n 'spectralIndexAgeYoungest': 3,\n 'spectralIndexNPairs': 1,\n 'spectralIndexUncertainty': 0.039958971258299378}\n\"\"\"\n\n\"\"\"\nHere we export averaged visibilities to npz files and then plot the deprojected visibilities to compare the amplitude scales\n\"\"\"\n\n#HD143006 is asymmetric, but comparing the radially averaged profiles will still provide an indication of the flux scale offsets \nPA = 0 \nincl = 0\n\nif not skip_plots:\n for msfile in [prefix+'_SB1_initcont_exec0.ms', prefix+'_SB1_initcont_exec1.ms', prefix+'_SB2_initcont_exec0.ms', prefix+'_SB3_initcont_exec0.ms', prefix+'_SB3_initcont_exec1.ms',prefix+'_SB3_initcont_exec2.ms', prefix+'_LB1_initcont_exec0.ms', prefix+'_LB1_initcont_exec1.ms']:\n export_MS(msfile)\n #plot deprojected visibility profiles of all the execution blocks\n\n\n plot_deprojected([prefix+'_SB1_initcont_exec0.vis.npz', prefix+'_SB1_initcont_exec1.vis.npz', prefix+'_SB2_initcont_exec0.vis.npz', prefix+'_SB3_initcont_exec0.vis.npz', \n prefix+'_SB3_initcont_exec1.vis.npz', prefix+'_SB3_initcont_exec2.vis.npz'], PA = PA, incl = incl)\n\n #there seems to be small amounts of scatter between the different observations. We don't know which one is \"correct,\" but we want the fluxes to be consistent, so we rescale to the observation that is roughly in the middle (SB3, EB1)\n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_SB1_initcont_exec0.vis.npz', incl = incl, PA = PA)\n #The ratio of the fluxes of HD143006_SB1_initcont_exec0.vis.npz to HD143006_SB3_initcont_exec1.vis.npz is 1.01850\n #The scaling factor for gencal is 1.009 for your comparison measurement\n #The error on the weighted mean ratio is 1.892e-03, although it's likely that the weights in the measurement sets are too off by some constant factor\n\n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_SB1_initcont_exec1.vis.npz', incl = incl, PA = PA)\n #The ratio of the fluxes of HD143006_SB1_initcont_exec1.vis.npz to HD143006_SB3_initcont_exec1.vis.npz is 0.95363\n #The scaling factor for gencal is 0.977 for your comparison measurement\n #The error on the weighted mean ratio is 1.778e-03, although it's likely that the weights in the measurement sets are too off by some constant factor\n\n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_SB2_initcont_exec0.vis.npz', incl = incl, PA = PA)\n #The ratio of the fluxes of HD143006_SB2_initcont_exec0.vis.npz to HD143006_SB3_initcont_exec1.vis.npz is 0.90750\n #The scaling factor for gencal is 0.953 for your comparison measurement\n #The error on the weighted mean ratio is 1.594e-03, although it's likely that the weights in the measurement sets are too off by some constant factor\n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_SB3_initcont_exec0.vis.npz', incl = incl, PA = PA)\n #The ratio of the fluxes of HD143006_SB3_initcont_exec0.vis.npz to HD143006_SB3_initcont_exec1.vis.npz is 0.91948\n #The scaling factor for gencal is 0.959 for your comparison measurement\n #The error on the weighted mean ratio is 1.743e-03, although it's likely that the weights in the measurement sets are off by some constant factor\n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_SB3_initcont_exec2.vis.npz', incl = incl, PA = PA)\n\n #The ratio of the fluxes of HD143006_SB3_initcont_exec2.vis.npz to HD143006_SB3_initcont_exec1.vis.npz is 1.06962\n #The scaling factor for gencal is 1.034 for your comparison measurement\n #The error on the weighted mean ratio is 1.757e-03, although it's likely that the weights in the measurement sets are off by some constant factor\n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_LB1_initcont_exec0.vis.npz', incl = incl, PA = PA)\n #looks like the offset is due to phase de-correlation rather than a fluxcal issue \n\n estimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = prefix+'_LB1_initcont_exec1.vis.npz', incl = incl, PA = PA)\n #The ratio of the fluxes of HD143006_LB1_initcont_exec1.vis.npz to HD143006_SB3_initcont_exec1.vis.npz is 0.86208\n #The scaling factor for gencal is 0.928 for your comparison measurement\n #The error on the weighted mean ratio is 2.021e-03, although it's likely that the weights in the measurement sets are off by some constant factor\n\n plot_deprojected([prefix+'_SB2_initcont_exec0.vis.npz', prefix+'_SB3_initcont_exec1.vis.npz'], PA = PA, incl = incl, fluxscale = [1/0.89,1])\n\n\n #We replot the deprojected visibilities with rescaled factors to check that the values make sense\n plot_deprojected([prefix+'_SB1_initcont_exec0.vis.npz', prefix+'_SB1_initcont_exec1.vis.npz', prefix+'_SB2_initcont_exec0.vis.npz', prefix+'_SB3_initcont_exec0.vis.npz', \n prefix+'_SB3_initcont_exec1.vis.npz', prefix+'_SB3_initcont_exec2.vis.npz', prefix+'_LB1_initcont_exec0.vis.npz',prefix+'_LB1_initcont_exec1.vis.npz'], \n PA = PA, incl = incl, fluxscale = [1, 1/0.94, 1/0.89, 1/0.92, 1, 1/1.07, 1, 1/0.86])\n\n#flux offsets for SB1 and SB2 from SB3 are within what one would expect based on the slight frequency difference\n#now correct the flux of the discrepant datasets\nrescale_flux(prefix+'_SB3_initcont_exec0.ms', [0.959])\n#Splitting out rescaled values into new MS: HD143006_SB3_initcont_exec0_rescaled.ms\nrescale_flux(prefix+'_SB3_initcont_exec2.ms', [1.034])\n#Splitting out rescaled values into new MS: HD143006_SB3_initcont_exec2_rescaled.ms\nrescale_flux(prefix+'_LB1_initcont_exec1.ms', [0.928])\n\n\n\"\"\"\nStart of self-calibration of the short-baseline data \n\"\"\"\n#merge the short-baseline execution blocks into a single MS\nSB_cont_p0 = prefix+'_SB_contp0'\nos.system('rm -rf %s*' % SB_cont_p0)\n#pay attention here and make sure you're selecting the shifted (and potentially rescaled) measurement sets\nconcat(vis = [prefix+'_SB1_initcont_exec0.ms', prefix+'_SB1_initcont_exec1.ms', prefix+'_SB2_initcont_exec0.ms', prefix+'_SB3_initcont_exec0_rescaled.ms', prefix+'_SB3_initcont_exec1.ms', prefix+'_SB3_initcont_exec2_rescaled.ms'], concatvis = SB_cont_p0+'.ms', dirtol = '0.1arcsec', copypointing = False) \n\n#make initial image\ntclean_wrapper(vis = SB_cont_p0+'.ms', imagename = SB_cont_p0, mask = common_mask, scales = SB_scales, threshold = '0.1mJy', savemodel = 'modelcolumn')\n\nnoise_annulus =\"annulus[[%s, %s],['%.2farcsec', '4.25arcsec']]\" % (mask_ra, mask_dec, 1.1*mask_radius) #annulus over which we measure the noise. The inner radius is slightly larger than the semimajor axis of the mask (to add some buffer space around the mask) and the outer radius is set so that the annulus fits inside the long-baseline image size \nestimate_SNR(SB_cont_p0+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_SB_contp0.image\n#Beam 0.289 arcsec x 0.245 arcsec (-89.21 deg)\n#Flux inside disk mask: 60.39 mJy\n#Peak intensity of source: 7.76 mJy/beam\n#rms: 4.15e-02 mJy/beam\n#Peak SNR: 186.97\n\n\n\"\"\"\nWe need to select one or more reference antennae for gaincal\n\nWe first look at the CASA command log (or manual calibration script) to see how the reference antennae choices were ranked (weighted toward antennae close to the center of the array and with good SNR)\nNote that gaincal will sometimes choose a different reference antenna than the one specified if it deems another one to be a better choice \n\nSB1: DA41,DA49,DV16,DA61,DV19\nSB2: DV16\nSB3, EB0: DV15, DV18, DA46, DA51, DV23\nSB3, EB1: DA59, DA49, DA41, DA46, DA51\nSB3, EB2: DA59, DA46, DA49, DA41, DA51\nLB1, EB0: DA61,DA47,DV24,DA57,DV09\nLB1, EB1: DV20,DV08,DV04,DV06,DA63\n\nIf you want to double check whether the antenna locations are reasonable, you can use something like plotants(vis = SB_cont_p0+'.ms')\n\n\"\"\"\n\nget_station_numbers(SB_cont_p0+'.ms', 'DV16')\n#Observation ID 0: DV16@A036\n#Observation ID 1: DV16@A036\n#Observation ID 2: DV16@A036\n#Observation ID 3: DV16@A103\n#Observation ID 4: DV16@A103\n#Observation ID 5: DV16@A103\nget_station_numbers(SB_cont_p0+'.ms', 'DA49')\n#Observation ID 0: DA49@A002\n#Observation ID 1: DA49@A002\n#Observation ID 2: DA49@A002\n#Observation ID 4: DA49@A002\n#Observation ID 5: DA49@A002\nget_station_numbers(SB_cont_p0+'.ms', 'DA46')\n#Observation ID 2: DA46@A034\n#Observation ID 3: DA46@A034\n#Observation ID 4: DA46@A034\n#Observation ID 5: DA46@A034\nget_station_numbers(SB_cont_p0+'.ms', 'DV15')\n#Observation ID 0: DV15@A048\n#Observation ID 1: DV15@A048\n#Observation ID 2: DV15@A048\n#Observation ID 3: DV15@A006\n#Observation ID 5: DV15@A006\nget_station_numbers(SB_cont_p0+'.ms', 'DV18')\n#Observation ID 1: DV18@A009\n#Observation ID 2: DV18@A009\n#Observation ID 3: DV18@A009\n#Observation ID 4: DV18@A009\n#Observation ID 5: DV18@A009\n\n\n\n\n\nSB_contspws = '0~20' #change as appropriate\nSB_refant = 'DV16@A036, DV18@A009, DA46@A034' \nSB1_timerange = '2016/06/13~2016/06/15'\nSB2_timerange = '2016/07/01/00~2016/07/03/00'\nSB3_obs0_timerange = '2017/05/13/00~2017/05/15/00'\nSB3_obs1_timerange = '2017/05/15/00~2017/05/18/00'\nSB3_obs2_timerange = '2017/05/18/00~2017/05/20/00'\n \n\n# It's useful to check that the phases for the refant look good in all execution blocks in plotms. However, plotms has a tendency to crash in CASA 5.1.1, so it might be necessary to use plotms in an older version of CASA \n#plotms(vis=SB_cont_p0, xaxis='time', yaxis='phase', ydatacolumn='data', avgtime='30', avgbaseline=True, antenna = SB_refant, observation = '0')\n#plotms(vis=SB_cont_p0, xaxis='time', yaxis='phase', ydatacolumn='data', avgtime='30', avgbaseline=True, antenna = SB_refant, observation = '1')\n\n#first round of phase self-cal for short baseline data\nSB_p1 = prefix+'_SB.p1'\nos.system('rm -rf '+SB_p1)\ngaincal(vis=SB_cont_p0+'.ms' , caltable=SB_p1, gaintype='T', spw=SB_contspws, refant=SB_refant, calmode='p', solint='120s', minsnr=1.5, minblperant=4) #choose self-cal intervals from [120s, 60s, 30s, 18s, 6s]\n\nif not skip_plots:\n plotcal(caltable=SB_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = SB1_timerange) \n plotcal(caltable=SB_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = SB2_timerange) \n plotcal(caltable=SB_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = SB3_obs0_timerange)\n plotcal(caltable=SB_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = SB3_obs1_timerange)\n plotcal(caltable=SB_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = SB3_obs2_timerange)\n\napplycal(vis=SB_cont_p0+'.ms', spw=SB_contspws, gaintable=[SB_p1], interp = 'linearPD', calwt = True)\n\nSB_cont_p1 = prefix+'_SB_contp1'\nos.system('rm -rf %s*' % SB_cont_p1)\nsplit(vis=SB_cont_p0+'.ms', outputvis=SB_cont_p1+'.ms', datacolumn='corrected')\n\ntclean_wrapper(vis = SB_cont_p1+'.ms' , imagename = SB_cont_p1, mask = common_mask, scales = SB_scales, threshold = '0.07mJy', interactive = False, savemodel = 'modelcolumn')\nestimate_SNR(SB_cont_p1+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_SB_contp1.image\n#Beam 0.289 arcsec x 0.245 arcsec (-89.09 deg)\n#Flux inside disk mask: 61.57 mJy\n#Peak intensity of source: 8.55 mJy/beam\n#rms: 2.51e-02 mJy/beam\n#Peak SNR: 341.31\n\n#now we concatenate all the data together\n\ncombined_cont_p0 = prefix+'_combined_contp0'\nos.system('rm -rf %s*' % combined_cont_p0)\n#pay attention here and make sure you're selecting the shifted (and potentially rescaled) measurement sets\nconcat(vis = [SB_cont_p1+'.ms', prefix+'_LB1_initcont_exec0.ms', prefix+'_LB1_initcont_exec1_rescaled.ms'], concatvis = combined_cont_p0+'.ms' , dirtol = '0.1arcsec', copypointing = False) \n\ntclean_wrapper(vis = combined_cont_p0+'.ms' , imagename = combined_cont_p0, mask = common_mask, scales = LB_scales, threshold = '0.05mJy', savemodel = 'modelcolumn')\nestimate_SNR(combined_cont_p0+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_combined_contp0.image\n#Beam 0.054 arcsec x 0.039 arcsec (87.44 deg)\n#Flux inside disk mask: 64.87 mJy\n#Peak intensity of source: 0.70 mJy/beam\n#rms: 1.50e-02 mJy/beam\n#Peak SNR: 46.75\n\nget_station_numbers(combined_cont_p0+'.ms', 'DA61')\n#Observation ID 0: DA61@A006\n#Observation ID 1: DA61@A006\n#Observation ID 2: DA61@A006\n#Observation ID 3: DA61@A015\n#Observation ID 4: DA61@A015\n#Observation ID 5: DA61@A015\n#Observation ID 6: DA61@A015\n#Observation ID 7: DA61@A089\n\nget_station_numbers(combined_cont_p0+'.ms', 'DV20')\n#Observation ID 2: DV20@A013\n#Observation ID 3: DV20@A093\n#Observation ID 5: DV20@A093\n#Observation ID 6: DV20@A093\n#Observation ID 7: DV20@A072\n\n\ncombined_refant = 'DV16@A036, DV18@A009, DA46@A034, DA61@A015, DV20@A072'\ncombined_contspws = '0~28'\ncombined_spwmap = [0,0,0,3,3,3,6,6,6,9,9,9,9,13,13,13,13,17,17,17,17,21,21,21,21,25,25,25,25] #note that the tables produced by gaincal in 5.1.1 have spectral windows numbered differently if you use the combine = 'spw' option. Previously, all of the solutions would be written to spectral window 0. Now, they are written to the first window in each execution block. So, the spwmap argument has to correspond to the first window in each execution block you want to calibrate. \n\nLB1_obs0_timerange = '2017/09/26/00:00:01~2017/09/26/23:59:59'\nLB2_obs0_timerange = '2017/11/26/00:00:01~2017/11/26/23:59:59'\n\n#first round of phase self-cal for long baseline data\ncombined_p1 = prefix+'_combined.p1'\nos.system('rm -rf '+combined_p1)\ngaincal(vis=combined_cont_p0+'.ms' , caltable=combined_p1, gaintype='T', combine = 'spw, scan', spw=combined_contspws, refant=combined_refant, calmode='p', solint='360s', minsnr=1.5, minblperant=4) #choose self-cal intervals from [900s, 360s, 180s, 60s, 30s, 6s]\n\nif not skip_plots:\n plotcal(caltable=combined_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs0_timerange) \n plotcal(caltable=combined_p1, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB2_obs0_timerange)\n\napplycal(vis=combined_cont_p0+'.ms', spw=combined_contspws, spwmap = combined_spwmap, gaintable=[combined_p1], interp = 'linearPD', calwt = True, applymode = 'calonly')\n\ncombined_cont_p1 = prefix+'_combined_contp1'\nos.system('rm -rf %s*' % combined_cont_p1)\nsplit(vis=combined_cont_p0+'.ms', outputvis=combined_cont_p1+'.ms', datacolumn='corrected')\n\ntclean_wrapper(vis = combined_cont_p1+'.ms' , imagename = combined_cont_p1, mask = common_mask, scales = LB_scales, threshold = '0.05mJy', savemodel = 'modelcolumn')\nestimate_SNR(combined_cont_p1+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_combined_contp1.image\n#Beam 0.053 arcsec x 0.038 arcsec (86.81 deg)\n#Flux inside disk mask: 64.68 mJy\n#Peak intensity of source: 0.67 mJy/beam\n#rms: 1.41e-02 mJy/beam\n#Peak SNR: 47.36\n\n\n#second round of phase self-cal for long baseline data\ncombined_p2 = prefix+'_combined.p2'\nos.system('rm -rf '+combined_p2)\ngaincal(vis=combined_cont_p1+'.ms' , caltable=combined_p2, gaintype='T', combine = 'spw, scan', spw=combined_contspws, refant=combined_refant, calmode='p', solint='180s', minsnr=1.5, minblperant=4)\n\nif not skip_plots:\n plotcal(caltable=combined_p2, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs0_timerange) \n plotcal(caltable=combined_p2, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs1_timerange)\n\napplycal(vis=combined_cont_p1+'.ms', spw=combined_contspws, spwmap = combined_spwmap, gaintable=[combined_p2], interp = 'linearPD', calwt = True, applymode = 'calonly')\n\n\ncombined_cont_p2 = prefix+'_combined_contp2'\nos.system('rm -rf %s*' % combined_cont_p2)\nsplit(vis=combined_cont_p1+'.ms', outputvis=combined_cont_p2+'.ms', datacolumn='corrected')\n\ntclean_wrapper(vis = combined_cont_p2+'.ms' , imagename = combined_cont_p2, mask = common_mask, scales = LB_scales, threshold = '0.05mJy', savemodel = 'modelcolumn')\nestimate_SNR(combined_cont_p2+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_combined_contp2.image\n#Beam 0.053 arcsec x 0.038 arcsec (86.81 deg)\n#Flux inside disk mask: 64.26 mJy\n#Peak intensity of source: 0.71 mJy/beam\n#rms: 1.40e-02 mJy/beam\n#Peak SNR: 50.46\n\n\n#third round of phase self-cal for long-baseline data \ncombined_p3 = prefix+'_combined.p3'\nos.system('rm -rf '+combined_p3)\ngaincal(vis=combined_cont_p2+'.ms' , caltable=combined_p3, gaintype='T', combine = 'spw, scan', spw=combined_contspws, refant=combined_refant, calmode='p', solint='60s', minsnr=1.5, minblperant=4)\n\nif not skip_plots:\n plotcal(caltable=combined_p3, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs0_timerange) \n plotcal(caltable=combined_p3, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs1_timerange)\n\napplycal(vis=combined_cont_p2+'.ms', spw=combined_contspws, spwmap = combined_spwmap, gaintable=[combined_p3], interp = 'linearPD', calwt = True, applymode = 'calonly')\n\ncombined_cont_p3 = prefix+'_combined_contp3'\nos.system('rm -rf %s*' % combined_cont_p3)\nsplit(vis=combined_cont_p2+'.ms', outputvis=combined_cont_p3+'.ms', datacolumn='corrected')\n\ntclean_wrapper(vis = combined_cont_p3+'.ms' , imagename = combined_cont_p3, mask = common_mask, scales = LB_scales, threshold = '0.05mJy', savemodel = 'modelcolumn')\nestimate_SNR(combined_cont_p3+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_combined_contp3.image\n#Beam 0.053 arcsec x 0.038 arcsec (86.81 deg)\n#Flux inside disk mask: 63.92 mJy\n#Peak intensity of source: 0.76 mJy/beam\n#rms: 1.39e-02 mJy/beam\n#Peak SNR: 54.73\n\n\n\n#fourth round of phase self-cal for long-baseline data \ncombined_p4 = prefix+'_combined.p4'\nos.system('rm -rf '+combined_p4)\ngaincal(vis=combined_cont_p3+'.ms' , caltable=combined_p4, gaintype='T', combine = 'spw, scan', spw=combined_contspws, refant=combined_refant, calmode='p', solint='30s', minsnr=1.5, minblperant=4)\n\nif not skip_plots:\n plotcal(caltable=combined_p4, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs0_timerange) \n plotcal(caltable=combined_p4, xaxis = 'time', yaxis = 'phase',subplot=441,iteration='antenna', timerange = LB1_obs1_timerange)\n\napplycal(vis=combined_cont_p3+'.ms', spw=combined_contspws, spwmap = combined_spwmap, gaintable=[combined_p4], interp = 'linearPD', calwt = True, applymode = 'calonly')\n\ncombined_cont_p4 = prefix+'_combined_contp4'\nos.system('rm -rf %s*' % combined_cont_p4)\nsplit(vis=combined_cont_p3+'.ms', outputvis=combined_cont_p4+'.ms', datacolumn='corrected')\n\ntclean_wrapper(vis = combined_cont_p4+'.ms' , imagename = combined_cont_p4, mask =common_mask, scales = LB_scales, threshold = '0.05mJy', savemodel = 'modelcolumn')\nestimate_SNR(combined_cont_p4+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n#HD143006_combined_contp4.image\n#Beam 0.053 arcsec x 0.038 arcsec (86.81 deg)\n#Flux inside disk mask: 63.42 mJy\n#Peak intensity of source: 0.80 mJy/beam\n#rms: 1.40e-02 mJy/beam\n#Peak SNR: 56.91\n\n\n\n#additional phase self-cal and amp self-cal appears to make things worse \n#uncomment the lines below if you wish to perform amp self-cal for your source specifically\n\"\"\"\n\ncombined_ap = prefix+'_combined.ap'\nos.system('rm -rf '+combined_ap)\ngaincal(vis=combined_cont_p4+'.ms' , caltable=combined_ap, gaintype='T', combine = 'spw,scan', spw=combined_contspws, refant=combined_refant, calmode='ap', solint='900s', minsnr=3.0, minblperant=4, solnorm = True)\n\nif not skip_plots:\n plotcal(caltable=combined_ap, xaxis = 'time', yaxis = 'amp',subplot=441,iteration='antenna', timerange = LB1_obs0_timerange) \n plotcal(caltable=combined_ap, xaxis = 'time', yaxis = 'amp',subplot=441,iteration='antenna', timerange = LB1_obs1_timerange)\n\napplycal(vis=combined_cont_p4+'.ms', spw=combined_contspws, spwmap = combined_spwmap, gaintable=[combined_ap], interp = 'linearPD', calwt = True, applymode = 'calonly')\n\ncombined_cont_ap = prefix+'_combined_contap'\nos.system('rm -rf %s*' % combined_cont_ap)\nsplit(vis=combined_cont_p4+'.ms', outputvis=combined_cont_ap+'.ms', datacolumn='corrected')\n\ntclean_wrapper(vis = combined_cont_ap+'.ms' , imagename = combined_cont_ap, mask =common_mask, scales = LB_scales, threshold = '0.05mJy', savemodel = 'modelcolumn')\nestimate_SNR(combined_cont_ap+'.image', disk_mask = common_mask, noise_mask = noise_annulus)\n\"\"\"\n\nsplit_all_obs(combined_cont_p4+'.ms', combined_cont_p4+'_exec')\n\nplot_deprojected(['HD143006_combined_contp4_exec6.vis.npz', 'HD143006_LB1_initcont_exec0.vis.npz', 'HD143006_SB3_initcont_exec1.vis.npz'], PA = PA, incl = incl)\n\nestimate_flux_scale(reference = prefix+'_SB3_initcont_exec1.vis.npz', comparison = 'HD143006_combined_contp4_exec6.vis.npz', incl = incl, PA = PA)\n\n\nexport_MS(combined_cont_p4+'_exec6.ms')\n","repo_name":"seanandrews/p484","sub_path":"reduction_scripts/HD143006_contselfcal_jh.py","file_name":"HD143006_contselfcal_jh.py","file_ext":"py","file_size_in_byte":41852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32501571515","text":"from django.conf.urls import patterns, url\n\nfrom announcement import views\n\nurlpatterns = patterns('',\n url(r'^post/$', views.announcement_post , name = \"post\"),\n url(r'^view/(?P\\d+)/$', views.announcement_view, name = \"view\"),\n url(r'^list/$', views.announcement_list, name = \"list\", kwargs = {'page_num' : 1}),\n url(r'^profile/$', views.announcement_profile, name = \"profile\") \n)","repo_name":"jezeniel/student-portal-platform","sub_path":"student_portal_platform/announcement/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"10008482844","text":"'''\nWrite a program that will ask the user a multiplication problem from random numbers.\nThe program will make them keep trying until they get it right and then when they finally\ndo, ask them if they want to attempt another multiplication problem. If they say yes,\nthen they will be asked another math question, otherwise the program will end.\n\nAuthor: Cheryl Gardner\nCourse: ITM 313\n'''\n\n#import random so a random number generator can be used\nimport random\n\n#Set the values of guess and choice so that the loop will start and the answer to guess cannot be right\nguess = -1\nchoice = 'y'\n\n#Create a first random number that is between 1 and 9\nnumber1 = random.randint(1,9)\n#Create a second random number that is between 1 and 9\nnumber2 = random.randint(1,9)\n\n#Calculate the answer of the two random numbers\nanswer = number1 * number2\n\n#Ask the user what number1 times number 2 is \nprint(\"How much is\",number1,\"times\",number2)\n\n#Have the user guess what they think the value is\nguess = int(input(\"Enter your guess: \"))\n\n#Keep the loop running while the value of choice is equal to y\nwhile (choice == 'y'):\n\n #As long as the answer is wrong keep running this loo[\n while(guess != answer):\n #Print a buffer, then tell the user that their answer is not right and give them another guess\n print(\"\")\n print(\"I am sorry that is not right. Please try again.\")\n guess = int(input(\"Enter your guess: \"))\n\n #Do this if they guess the correct answer to the problem\n if (guess == answer):\n #Print a buffer and then tell them congratulations and that they did a good job\n print(\"\")\n print(\"Congratulations! That is the correct answer! Very Good!\")\n\n #Ask the user whether or not they want to play the multiplication game again\n choice = input(\"Do you want to play again y or n? \")\n\n #If they want another problem, then repeat this loop until they want to stop\n if (choice == 'y'):\n #Create a first random number that is between 1 and 9\n number1 = random.randint(1,9)\n #Create a second random number that is between 1 and 9\n number2 = random.randint(1,9)\n\n #Calculate the product of the two random numbers\n answer = number1 * number2\n #Create another buffer and then ask the user another math question\n print(\"\")\n print(\"How much is\",number1,\"times\",number2)\n\n #Have the user put in a guess for the new multiplication problem\n guess = int(input(\"Enter your guess: \"))\n\n #Create one last space buffer and thank the user for playing the game \nprint(\"\")\nprint(\"Thank you for playing, come again soon!\")\n \n \n","repo_name":"cgardner3/Python-Programs","sub_path":"lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17797205741","text":"import os\nimport tempfile\nimport shutil\n\nfrom qgis.testing import unittest, start_app\n\nfrom qgis.core import (\n QgsProject, QgsLayoutExporter)\n\nstart_app()\n\n\nclass TestPrintLayout(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.outdir = tempfile.mkdtemp()\n\n def test_print_layout(self):\n export_file = os.path.join(self.outdir, 'export.pdf')\n\n project = QgsProject.instance()\n project.read(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'test_data/simple_polygons.qgz'))\n\n layout_manager = project.layoutManager()\n layout = layout_manager.layouts()[0]\n exporter = QgsLayoutExporter(layout)\n\n exporter.exportToPdf(\n export_file,\n exporter.PdfExportSettings())\n\n self.assertTrue(os.path.isfile(export_file))\n self.assertTrue(os.path.getsize(export_file) > 0)\n\n @classmethod\n def tearDownClass(self):\n shutil.rmtree(self.outdir, True)\n","repo_name":"opengisch/qgis_plugins_test_demo","sub_path":"qgis_testing/test_print_layout.py","file_name":"test_print_layout.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"24532139853","text":"from functools import wraps\nfrom typing import Callable\nfrom copy import deepcopy\n\nfrom discord.ext.commands import Cog, Context\nfrom tortoise import Tortoise\n\nfrom rbb_bot.models import CommandLog\nfrom rbb_bot.settings.config import get_creds\n\nDB_URL = get_creds().db_url\n\n\ndef log_command(command_name: str = None) -> Callable:\n def dec(func: Callable) -> Callable:\n @wraps(func)\n async def wrapped(cog: Cog, ctx: Context, *args, **kwargs):\n try:\n await Tortoise.init(\n db_url=DB_URL, modules={\"models\": [\"rbb_bot.models\"]}\n )\n author_id = ctx.author.id\n cmd_name = command_name or ctx.command.qualified_name\n guild_id = ctx.guild.id if ctx.guild else None\n channel_id = ctx.channel.id\n message_id = ctx.message.id\n args_ = args\n kwargs_ = deepcopy(kwargs)\n prefix = ctx.prefix\n await CommandLog.create(\n command_name=cmd_name,\n author_id=author_id,\n guild_id=guild_id,\n channel_id=channel_id,\n message_id=message_id,\n prefix=prefix or \"Not Found\",\n args=[str(a) for a in args_] or None,\n kwargs={k: str(v) for k, v in kwargs_.items()} or None,\n )\n if ctx.message.attachments:\n kwargs_.setdefault(\"attachments\", [])\n for attachment in ctx.message.attachments:\n kwargs_[\"attachments\"].append(attachment.url)\n\n except Exception as e:\n CommandLog.client.logger.error(\n f\"Error while logging command [{func.__name__}]:\\n{e}\"\n )\n\n await func(cog, ctx, *args, **kwargs)\n\n return wrapped\n\n return dec\n","repo_name":"badmagick329/RbbBot","sub_path":"rbb_bot/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20241788971","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = \"https://economia.uol.com.br/cotacoes/cambio/\"\nheaders = {\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36\"}\n\nrequest = requests.get(url, headers).content\n\nsite = BeautifulSoup(request, \"html.parser\")\n\ndolar_cotacao = site.find(\"input\",attrs={\"name\":\"currency2\"})\n\nprint(f'Hoje a cotação do dolar é de R$ {dolar_cotacao[\"value\"]}')","repo_name":"iure06/Aprendizado","sub_path":"Aprendendo-web-scraping-com-python/dolarCotacao.py","file_name":"dolarCotacao.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30852916176","text":"# Bullet Class\n\nimport pygame\nfrom pygame import sprite \nfrom setting import Settings\nclass Bullet(sprite.Sprite):\n \"\"\"\n A class to manage bullets fired from the ship\n \"\"\"\n def __init__(self, settings, screen, ship):\n \"\"\"\n Create a bullet object at the ship's current position\n \"\"\"\n super().__init__() \n self.screen = screen\n self.settings = Settings()\n\n # Bullet position\n self.rect = pygame.Rect(0, 0, settings.bullet_width, settings.bullet_height) \n self.rect.centerx = ship.rect.centerx\n self.rect.top = ship.rect.top \n\n # for decimal value \n self.y = float(self.rect.y) \n\n self.color = settings.bullet_color\n self.speed_factor = settings.bullet_speed_factor\n\n def update(self): \n \"\"\" \n Move the bullet up to screen\n \"\"\"\n self.y -= self.speed_factor \n self.rect.y = self.y\n\n def draw_bullet(self): \n pygame.draw.rect(self.screen, self.color, self.rect)","repo_name":"HOANG-JAIST/Alien_Invasion","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74179516354","text":"import pytest\nfrom hypothesis import given\n\nfrom lz._core.functional import Curry\nfrom lz._core.signatures import to_signature\nfrom lz.functional import curry\nfrom tests import strategies\nfrom tests.hints import (Function,\n FunctionCall)\n\n\n@given(strategies.transparent_functions)\ndef test_empty_call(function: Function) -> None:\n curried = curry(function)\n\n result = curried()\n\n assert to_signature(curried).all_set() or isinstance(result, Curry)\n\n\n@given(strategies.transparent_functions_calls)\ndef test_valid_call(function_call: FunctionCall) -> None:\n function, args, kwargs = function_call\n\n curried = curry(function)\n\n result = curried(*args, **kwargs)\n\n assert result == function(*args, **kwargs)\n\n\n@given(strategies.transparent_functions_calls)\ndef test_involution(function_call: FunctionCall) -> None:\n function, args, kwargs = function_call\n double_curried = curry(curry(function))\n\n result = double_curried(*args, **kwargs)\n\n assert result == function(*args, **kwargs)\n\n\n@given(strategies.non_variadic_transparent_functions_calls_with_invalid_args)\ndef test_invalid_args_call(\n function_call_with_invalid_args: FunctionCall\n) -> None:\n function, invalid_args, kwargs = function_call_with_invalid_args\n curried = curry(function)\n\n with pytest.raises(TypeError):\n curried(*invalid_args, **kwargs)\n\n\n@given(strategies.non_variadic_transparent_functions_calls_with_invalid_kwargs)\ndef test_invalid_kwargs_call(\n function_call_with_invalid_kwargs: FunctionCall\n) -> None:\n function, args, invalid_kwargs = function_call_with_invalid_kwargs\n curried = curry(function)\n\n with pytest.raises(TypeError):\n curried(*args, **invalid_kwargs)\n","repo_name":"lycantropos/lz","sub_path":"tests/functional_tests/curry_tests/test_properties.py","file_name":"test_properties.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"74265101633","text":"# subplotable.py\n# by Behnam Heydarshahi, October 2017\n#\n# This class models data needed for a single sub plot\n\nclass SubPlotable:\n def __init__(self, label, x_values, y_values, y_std_error_values):\n self.label = label\n self.x_values = x_values\n self.y_values = y_values\n self.y_std_err_values = y_std_error_values\n","repo_name":"campiador/isca_cache","sub_path":"graphics/subplotable.py","file_name":"subplotable.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16493451391","text":"class Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n dp = [1] * len(nums)\n for right in range(len(nums)):\n for left in range(right):\n # print(f\"{left = } {nums[left] = } {nums[right] = }\")\n # if current number is bigger than the left and the size (dp[right]) is still less than the left one, increment the size\n if nums[right] > nums[left] and dp[right] < dp[left] + 1:\n dp[right] = dp[left] + 1\n # print(\"ADDING\")\n # print(dp)\n return max(dp)","repo_name":"ddaarrrryyll/leetcode","sub_path":"longest-increasing-subsequence/longest-increasing-subsequence.py","file_name":"longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12022596064","text":"import re\nimport csv\nimport json\nimport time\nimport httplib\nfrom urllib2 import urlopen, URLError\n\n\nclass YahooSymbol(object):\n \"\"\" Yahoo finance symbol \"\"\"\n def __init__(self, symbol, name, price):\n self.symbol = symbol\n self.name = name\n self.price = price\n\n\nclass YahooFinance(dict):\n \"\"\" Yahoo finance API query \"\"\"\n\n def query_multiple(self, symbols, data='l90'):\n \"\"\" Download and parse content \"\"\"\n if not symbols:\n return\n symbols = [symbol.upper() for symbol in symbols]\n chunks_per = 200 # Can't request more than 200 symbols at once, so split into chunks\n for chunk in [symbols[i:i+chunks_per] for i in xrange(0, len(symbols), chunks_per)]:\n url = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=snl1' % '+'.join(chunk)\n while True:\n try:\n reader = csv.reader(urlopen(url))\n break\n except URLError:\n pass # retry\n with open('/tmp/yfinance-csv.log', 'a', 1) as f:\n for line in reader:\n symbol, name, price = line\n f.write('%s %s %s %s\\n' % (time.strftime(\"%Y-%m-%d %H:%M:%S\"), price, symbol, name))\n if price != '0.00' and float(price) < 100000.0:\n self[symbol] = YahooSymbol(*line)\n\n def query_single(self, symbol):\n \"\"\" Shortcut method \"\"\"\n self.query_multiple([symbol])\n return self[symbol]\n\n\n def __getitem__(self, name):\n \"\"\" Normalize symbol names \"\"\"\n try:\n return dict.__getitem__(self, name.upper())\n except KeyError:\n return None\n\n# available data items through the API\nyahoo_items = {\n 'a00': 'ask price',\n 'b00': 'bid price',\n 'g00': \"day's range low\",\n 'h00': \"day's range high\",\n 'j10': 'market cap',\n 'v00': 'volume',\n 'a50': 'ask size',\n 'b60': 'bid size',\n 'b30': 'ecn bid',\n 'o50': 'ecn bid size',\n 'z03': 'ecn ext hr bid',\n 'z04': 'ecn ext hr bid size',\n 'b20': 'ecn ask',\n 'o40': 'ecn ask size',\n 'z05': 'ecn ext hr ask',\n 'z07': 'ecn ext hr ask size',\n 'h01': \"ecn day's high\",\n 'g01': \"ecn day's low\",\n 'h02': \"ecn ext hr day's high\",\n 'g11': \"ecn ext hr day's low\",\n 't10': 'last trade time, will be in unix epoch format',\n 't50': 'ecnQuote/last/time',\n 't51': 'ecn ext hour time',\n 't53': 'RTQuote/last/time',\n 't54': 'RTExthourQuote/last/time',\n 'l10': 'last trade',\n 'l90': 'ecnQuote/last/value',\n 'l91': 'ecn ext hour price',\n 'l84': 'RTQuote/last/value',\n 'l86': 'RTExthourQuote/last/value',\n 'c10': 'quote/change/absolute',\n 'c81': 'ecnQuote/afterHourChange/absolute',\n 'c60': 'ecnQuote/change/absolute',\n 'z02': 'ecn ext hour change',\n 'z08': 'ecn ext hour change',\n 'c63': 'RTQuote/change/absolute',\n 'c85': 'RTExthourQuote/afterHourChange/absolute',\n 'c64': 'RTExthourQuote/change/absolute',\n 'p20': 'quote/change/percent',\n 'c82': 'ecnQuote/afterHourChange/percent',\n 'p40': 'ecnQuote/change/percent',\n 'p41': 'ecn ext hour percent change',\n 'z09': 'ecn ext hour percent change',\n 'p43': 'RTQuote/change/percent',\n 'c86': 'RTExtHourQuote/afterHourChange/percent',\n 'p44': 'RTExtHourQuote/change/percent',\n}\n","repo_name":"ezl/yahoofinance","sub_path":"yfinance.py","file_name":"yfinance.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"32098838095","text":"# #jai mata di#\n# import sys\n# sys.stdin = open('input.in', 'r') \n# sys.stdout = open('output.out', 'w') \n\n\n\n\n#start the code from here\n\ng=int(input())\nif g!=2 and g%2==0:\n\tprint(\"YES\")\nelse:\n\tprint(\"NO\")","repo_name":"adityachaudhary147/py-codes","sub_path":"4A.py","file_name":"4A.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24924422612","text":"import json\nimport mnist\nimport random\nimport scipy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import color\nfrom skimage import io\nfrom skimage import filters, feature\n\nfrom sklearn.utils import shuffle\nfrom sklearn import svm\n\nfrom matplotlib import image\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, AveragePooling2D\nfrom keras.datasets import cifar10\n\nfrom tensorflow.keras.utils import to_categorical \n\nfrom scripts.load_data import load_train, load_test\n\n\nimages = []\nbunnies = []\n\ndata = None\n\nprint(\"start data loading... \")\nwith open(\"../../Boundary_Box/coordinates.json\", 'r+') as file:\n data = json.loads(file.read())\n \n for jpg in data:\n for coords in data[jpg]:\n im = color.rgb2gray(io.imread(f\"../../Boundary_Box/assets/{jpg}\"))\n x_coords = [coords['begin'][0], coords['end'][0]]\n y_coords = [coords['begin'][1], coords['end'][1]]\n\n images.append(im)\n bunnies.append(im[ min(y_coords) : max(y_coords) , min(x_coords) : max(x_coords) ])\n\nprint(\"done\\n\")\n\n\nprint(\"start patch sampling... \")\n\npatch_size = 20 # (N x N) pixels\npatches, labels = [], []\n\nsmooth_filter = [ \n [1/16,1/16,1/16],\n [1/16,1/5,1/16],\n [1/16,1/16,1/16]\n]\n\nfor i in range(len(images)):\n im = images[i]\n bunny = bunnies[i]\n\n smooth_bunny=scipy.ndimage.convolve(bunny, smooth_filter)\n bunny=filters.laplace(smooth_bunny)\n\n smooth_image=scipy.ndimage.convolve(im, smooth_filter)\n im=filters.laplace(smooth_image)\n\n for j in range(50):\n\n # boundary box coordinates\n x_b = random.randint((patch_size//2)+1, bunny.shape[0]-(patch_size//2))\n y_b = random.randint((patch_size//2)+1, bunny.shape[1]-(patch_size//2))\n\n x_coords_b = [x_b-patch_size//2, x_b+patch_size//2]\n y_coords_b = [y_b-patch_size//2, y_b+patch_size//2]\n\n patch_b = bunny[ min(x_coords_b) : max(x_coords_b), min(y_coords_b) : max(y_coords_b) ]\n patches.append(patch_b)\n labels.append(1)\n \n ##############################################################################################################\n\n # outside boundary box coordinates range\n x_range = [x for x in range((patch_size//2), im.shape[0]-(patch_size//2))]\n y_range = [y for y in range((patch_size//2), im.shape[1]-(patch_size//2))]\n\n # remove subsection of the boundary box from the list with all coordinates\n name = list(data.keys())[i]\n y_coords_data_arrays = [data[name][0]['begin'][0], data[name][0]['end'][0]]\n x_coords_data_arrays = [data[name][0]['begin'][1], data[name][0]['end'][1]]\n\n del x_range[min(x_coords_data_arrays)-(patch_size//2)+1 : max(x_coords_data_arrays)+(patch_size//2)]\n del y_range[min(y_coords_data_arrays)-(patch_size//2)+1 : max(y_coords_data_arrays)+(patch_size//2)]\n\n #pick random coordinate\n x = random.choice(y_range)\n y = random.choice(x_range)\n\n x_coords = [x-patch_size//2, x+patch_size//2]\n y_coords = [y-patch_size//2, y+patch_size//2]\n\n patch_pic = im[ min(y_coords) : max(y_coords), min(x_coords) : max(x_coords) ]\n patches.append(patch_pic)\n labels.append(0)\n\nprint(\"patches created!\\n\")\n\ndata, label = np.array(patches), np.array(labels)\n\ndata, label = shuffle(data, label)\n\nlen_data = len(data)\n\ntrain_data = data[:len_data//3 *2]\ntrain_labels = label[:len_data//3 *2]\n\ntest_data = data[len_data//3 *2:]\ntest_labels = label[len_data//3 *2:]\n\n# Normalizeren van de images\ntrain_data = (train_data / 255) - 0.5\ntest_data = (test_data / 255) - 0.5\n\nx = len(train_data[0])\ny = len(train_data[0][0])\n\ntrain_data2 = np.reshape(train_data, (len(train_data), x*y) )\ntest_data2 = np.reshape(test_data, (len(test_data), x*y) )\n\n\nprint(\"start SVM... \")\nclf = svm.SVC(gamma='scale', C=1)\n\nprint(\"start clf.fit... \")\nclf.fit(train_data2, train_labels)\nprint(\"fitting done!\\n\")\n\ncorrect = 0\nfor i, data in enumerate(test_data2):\n res = clf.predict([data])\n if res[0] == test_labels[i]:\n correct += 1\n\nprint(\"Accuracy =\", round((correct/len(test_data2))*100, 2) )","repo_name":"Pink-Shadow/VISN","sub_path":"Code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23629394741","text":"#!/usr/bin/env python\n\n\nimport argparse\nimport sys\n\n\ndictionary = dict(zip(('ejp mysljylc kd kxveddknmc re jsicpdrysi '\n 'rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd '\n 'de kr kd eoya kw aej tysr re ujdr lkgc jv'),\n ('our language is impossible to understand '\n 'there are twenty six factorial possibilities '\n 'so it is okay if you want to just give up')))\ndictionary['z'] = 'q'\ndictionary['q'] = 'z'\ndictionary.update(dict(map(str.upper, item) for item in dictionary.iteritems()))\n\n\ndef solve(line):\n return ''.join(dictionary[char] for char in line)\n\n\ndef main(args):\n test_cases = next(args.input).strip()\n for case_index, line in enumerate(args.input, 1):\n print(\"Case #{}: {}\".format(case_index, solve(line.strip())))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('input', type=argparse.FileType('r'), default=sys.stdin)\n parser.add_argument('--dictionary', type=argparse.FileType('r'),\n default=open('/usr/share/dict/words'))\n main(parser.parse_args())\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/1004.py","file_name":"1004.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20873777507","text":"import asyncio\nimport json\nimport time\nimport logging\nimport sys\nfrom math import floor\nfrom pricer import Pricer\nfrom functools import partial\n\n#from predictor_change_spreadstamp import Predictor\nfrom predictor_keltner import Predictor\nfrom datetime import timedelta, datetime\nfrom log_format import SaveLog\nfrom order_book import OrderBook\nfrom decimal import Decimal\nimport asyncio, websockets\nimport traceback\nfrom binance.streams import BinanceSocketManager\nimport telegram\n\n#todo kluge\n#HIGHLY INSECURE\n# ssl_context = ssl.SSLContext()\n# ssl_context.check_hostname = False\n# ssl_context.verify_mode = ssl.CERT_NONE\nimport requests\nfrom json import loads\n'''\nlogger = logging.getLogger(__name__)\n\nlogging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n'''\n\nclass Spreader:\n production_endpoint = f'wss://fstream.binance.com/ws'\n chat_id = '-642791530'\n bot = telegram.Bot(token=('5384131643:AAFd62LyZl5mfI-Tzd0c_xTUYRKcRWugWpc'))\n\n orderbook = {}\n orderbook_5min = {}\n trades = {}\n ob_ref = OrderBook(max_depth=10)\n ob_target = OrderBook(max_depth=10)\n Ref_SeqNum = 0\n Target_SeqNum = 0\n def __init__(self, api, config):\n logging.getLogger('').handlers = []\n self.bm = BinanceSocketManager(api)\n self.config = config\n #self.log = SaveLog(\"Allen\",\"PairTrading\",\"FutureBTC_AVAX\",\"/home/btsemm/\")\n self.log = None\n self.predictor = Predictor(\n window_size=self.config.MA_WINDOW_SIZE,\n _symbol=config.REFERENCE_SYMBOL,\n slippage=config.SLIPPAGE,\n log = self.log,\n )\n self.pricer = Pricer(\n api,\n config.REFERENCE_SYMBOL,\n config.TARGET_SYMBOL,\n self.log,\n self.config\n )\n self.spread_prices = None\n self.remember_quotos = None \n self.api = api\n async def Update_orderbook(self,task_queue, symbol):\n ws = self.bm.kline_futures_socket(symbol)\n async with ws as wscm:\n while True:\n await asyncio.sleep(0.001)\n resp = await wscm.recv()\n print(type(resp['k']['x']))\n print(resp)\n if resp['k']['x'] == True :\n self.predictor.update_spreads(resp)\n \n \n\n async def Update_Trade(self,task_queue):\n ws = self.bm2.futures_socket()\n print(\"in update trade\")\n async with ws as wscm:\n while True:\n resp = await wscm.recv()\n print(\"resp :\",resp)\n if resp['e'] == \"ORDER_TRADE_UPDATE\":\n if resp['o']['X'] == \"FILLED\" or resp['o']['X'] == \"EXPIRED\" or resp['o']['X'] == 'PARTIALLY_FILLED':\n self.trades = resp['o']\n #print(self.trades)\n #print(resp['o'])\n await task_queue.put(partial(self.pricer.manage_trade, self.trades, self.predictor.spread_quotes))\n \n async def execute_task(self, task_queue):\n while True:\n try:\n task = await task_queue.get()\n if asyncio.iscoroutinefunction(task.func):\n await task()\n else:\n task()\n task_queue.task_done()\n except Exception as e:\n print(traceback.format_exc())\n async def execute(self):\n while True:\n try:\n task_queue = asyncio.Queue()\n trade_queue = asyncio.Queue()\n update_ob_ref = asyncio.create_task(self.Update_orderbook(task_queue,self.config.REFERENCE_SYMBOL))\n #update_ob_target = asyncio.create_task(self.Update_orderbook(task_queue,self.config.TARGET_SYMBOL))\n #update_trade = asyncio.create_task(self.Update_Trade(trade_queue))\n tasks = []\n tasks.append(asyncio.create_task(self.execute_task(trade_queue)))\n for i in range(2):\n task = asyncio.create_task(self.execute_task(task_queue))\n tasks.append(task)\n await asyncio.gather(\n update_ob_ref, \n #update_trade,\n *tasks\n )\n # await task_queue.join()\n # await trade_queue.join()\n \n except Exception as e:\n #print(traceback.format_exc())\n self.log.info(\n e)\n update_ob_ref.cancel()\n #update_ob_target.cancel()\n #update_trade.cancel()\n for t in tasks:\n t.cancel()\n #continue\n def simulate(self,historical_kline):\n for k in historical_kline :\n self.predictor.update_spreads(k)\n self.predictor.simulate_get_target_spread_price(k)\n self.predictor.plot_kline()\n \n \n\n\n","repo_name":"quantallen/crypto-keltner","sub_path":"module/spreader.py","file_name":"spreader.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10167278859","text":"def extract_data():\n with open(\"inputs/input_7.txt\", \"r\") as f:\n return [int(x) for x in f.readlines()[0].replace(\"\\n\", '').split(\",\")]\n \n\ndef determine_best_position(crab_positions: list[int]):\n fuel_per_position = []\n for i in range(max(crab_positions) + 1):\n fuel_used = 0\n for j in range(len(crab_positions)):\n fuel_used += sum(range(abs(crab_positions[j] - i) + 1))\n fuel_per_position.append(fuel_used)\n return min(fuel_per_position)\n\n\nif __name__ == \"__main__\":\n crab_pos = extract_data()\n output = determine_best_position(crab_pos)\n print(output)\n","repo_name":"BenCorrigan1203/advent-of-code","sub_path":"2021/day_7.py","file_name":"day_7.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43935881815","text":"# -*- coding: utf-8 -*-\nimport xlrd\nimport uuid\n\n\nclass Student():\n def __init__(self, id, **kw):\n self.id = id\n for k, v in kw.items():\n setattr(self, k, v)\n\n def __str__(self):\n return '%s(id=%s,column1=%s,column2=%s,column3=%s,column4=%s)' \\\n % (\n self.__class__.__name__, self.id, self.column1, self.column2, self.column3,\n self.column4)\n\n\ndef read_excel(filename):\n # 打开文件\n workbook = xlrd.open_workbook(filename, formatting_info=True)\n # 获取所有sheet\n print('打印所有sheet:', workbook.sheet_names())\n\n sheet2 = workbook.sheet_by_index(0) # sheet索引从0开始.\n print(\"sheet2.merged_cells\", sheet2.merged_cells)\n rows_num = sheet2.nrows\n cols_num = sheet2.ncols\n\n for r in range(rows_num):\n # 一行数据的实体类\n entity_dict = {}\n for c in range(cols_num):\n cell_value = sheet2.row_values(r)[c]\n # print('第%d行第%d列的值:[%s]' % (r, c, sheet2.row_values(r)[c]))\n if (cell_value is None or cell_value == ''):\n cell_value = (get_merged_cells_value(sheet2, r, c))\n # 构建Entity\n the_key = 'column' + str(c + 1);\n # 动态设置各属性值\n entity_dict[the_key] = cell_value\n entity_dict['id'] = getUUID()\n stu = Student(**entity_dict)\n print(stu)\n\n\ndef get_merged_cells(sheet):\n \"\"\"\n 获取所有的合并单元格,格式如下:\n [(4, 5, 2, 4), (5, 6, 2, 4), (1, 4, 3, 4)]\n (4, 5, 2, 4) 的含义为:行 从下标4开始,到下标5(不包含) 列 从下标2开始,到下标4(不包含),为合并单元格\n :param sheet:\n :return:\n \"\"\"\n return sheet.merged_cells\n\n\ndef get_merged_cells_value(sheet, row_index, col_index):\n \"\"\"\n 先判断给定的单元格,是否属于合并单元格;\n 如果是合并单元格,就返回合并单元格的内容\n :return:\n \"\"\"\n merged = get_merged_cells(sheet)\n # merged = sheet.merged_cells\n for (rlow, rhigh, clow, chigh) in merged:\n if rlow <= row_index < rhigh:\n if clow <= col_index < chigh:\n cell_value = sheet.cell_value(rlow, clow)\n # print('该单元格[%d,%d]属于合并单元格,值为[%s]' % (row_index, col_index, cell_value))\n return cell_value\n break\n return None\n\n\ndef getUUID():\n return uuid.uuid1().hex\n\n\nif __name__ == \"__main__\":\n read_excel('./test.xls')\n","repo_name":"showyouhappiness/Python_study","sub_path":"Excel/Excel文件合并单元格数据的分开读取.py","file_name":"Excel文件合并单元格数据的分开读取.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24087536647","text":"import datetime as dt\n\n\nclass Calculator:\n \"\"\"Родительский класс для калькулятора денег и каллорий.\"\"\"\n def __init__(self, limit):\n self.limit = limit # Лимит на день\n self.records = [] # Список для хранения записей\n\n def add_record(self, record):\n \"\"\"Добавляет запись в список records.\"\"\"\n self.records.append(record)\n\n def get_today_stats(self):\n \"\"\"Возвращает значение суммы потраченного/съеденного за сегодня.\"\"\"\n NOW = dt.date.today() # Дата на данный момент\n sym_amount = sum(record.amount for record in self.records\n if record.date == NOW)\n return sym_amount # Сумма amount за сегодня\n\n def get_week_stats(self):\n \"\"\"Возвращает значение суммы потраченного/съеденного за неделю.\"\"\"\n NOW = dt.date.today() # Дата на данный момент\n week = NOW - dt.timedelta(weeks=1) # Промежуток в неделю от NOW\n result = sum(record.amount for record in self.records\n if week < record.date <= NOW)\n return result # Сумма amount за неделю\n\n def get_left_amount(self):\n \"\"\"Возвращает остаток на сегодня\"\"\"\n return self.limit - self.get_today_stats()\n\n\nclass Record:\n \"\"\"Класс для хранения записей. Хранит количество, комментарий и дату.\"\"\"\n def __init__(self, amount, comment, date=None):\n NOW = dt.date.today() # Дата на данный момент\n self.amount = amount\n self.comment = comment\n if date is not None:\n self.date = date = dt.datetime.strptime(date, '%d.%m.%Y').date()\n else:\n self.date = NOW\n\n\nclass CashCalculator(Calculator):\n \"\"\"Калькулятор потраченных денег\"\"\"\n USD_RATE = 70.0 # Курс доллара\n EURO_RATE = 85.0 # Курс евро\n RUB_RATE = 1.0 # Курс рубля\n\n def get_today_cash_remained(self, currency='rub'):\n \"\"\"В качестве аргумента принимает валюту (по умолчанию рубли),\n затем определяет достинуг ли лимит и возвращает остатко если он есть.\n \"\"\"\n currencies = {\n 'rub': ('руб', self.RUB_RATE),\n 'eur': ('Euro', self.EURO_RATE),\n 'usd': ('USD', self.USD_RATE)\n }\n # Выбирает нужное значение из currencies:\n currency_name, currency_rate = currencies[currency]\n # Остаток денег:\n left_amount = self.get_left_amount()\n # Считатет остатко согласно указанной валюте:\n count_left_amount = abs((round(left_amount / currency_rate, 2)))\n response = (f'{count_left_amount} {currency_name}')\n if left_amount == 0:\n return 'Денег нет, держись'\n if left_amount < 0:\n return f'Денег нет, держись: твой долг - {response}'\n return f'На сегодня осталось {response}'\n\n\nclass CaloriesCalculator(Calculator):\n \"\"\"Калькулятор каллорий.\"\"\"\n def get_calories_remained(self):\n \"\"\"Определяет достигнут ли лимит каллорий на сегодня.\n Выбиравает ответ в зависимости от результата.\n \"\"\"\n left_amount = self.get_left_amount()\n if left_amount >= 0:\n return ('Сегодня можно съесть что-нибудь ещё, '\n f'но с общей калорийностью не более {left_amount} кКал')\n return 'Хватит есть!'\n","repo_name":"ggerasyanov/hw_python_oop","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34810769795","text":"from __future__ import unicode_literals\n\nimport json\n\nfrom oslo_log import log\nimport requests\nfrom watcher_metering.load.loadable import ExternalOptConfig\nfrom watcher_metering.store.base import MetricsStoreClientBase\nfrom watcher_metering.store.base import MetricsStoreError\nfrom watcher_metering.store.utils.keystone import KeystoneClient\nfrom watcher_metering.store.utils.keystone import KeystoneError\n\nLOG = log.getLogger(__name__)\n\n\nclass CeilometerClient(MetricsStoreClientBase):\n \"\"\"Ceilometer client\"\"\"\n\n def __init__(self, auth_uri, admin_user,\n admin_password, admin_tenant_name):\n super(CeilometerClient, self).__init__()\n self._store_endpoint = None\n self.auth_uri = auth_uri\n self.admin_user = admin_user\n self.admin_password = admin_password\n self.admin_tenant_name = admin_tenant_name\n\n self.keystone_client = KeystoneClient(\n self.auth_uri, self.admin_user,\n self.admin_password, self.admin_tenant_name\n )\n self._ceilometer_uri = None\n\n @classmethod\n def get_name(cls):\n return \"ceilometer\"\n\n @classmethod\n def get_config_opts(cls):\n return [] # No need for store_endpoint in cfg\n\n @classmethod\n def get_external_opts_configs(cls):\n \"\"\"This store client requires some Keystone configuration options\n :return: The list of options relative to this store client\n :rtype: list of :class:`ExternalOptConfig` instances\n \"\"\"\n return [\n ExternalOptConfig(\n name=\"auth_uri\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_user\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_password\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ExternalOptConfig(\n name=\"admin_tenant_name\",\n module_str=\"keystoneclient.middleware.auth_token\",\n group=\"keystone_authtoken\"),\n ]\n\n @property\n def store_endpoint(self):\n \"\"\"Dynamically retrieved from Keystone\n :return: The Ceilometer endpoint\n :rtype: str\n \"\"\"\n # Kind of cache for logging purposes (avoids repeated calls)\n self._store_endpoint = self.keystone_client.ceilometer_uri\n return self._store_endpoint\n\n def connect(self):\n LOG.info(\"No need to connect: Stateless via HTTP.\")\n\n def disconnect(self):\n LOG.info(\"No need to disconnect: Stateless via HTTP.\")\n\n def _send(self, metric):\n is_successful = self.request_http_post(metric)\n if not is_successful:\n LOG.error(\n \"[Ceilometer] Could not deliver the message to the server.\"\n )\n raise MetricsStoreError(\"Could not deliver the message \"\n \"to the Ceilometer server.\")\n\n def send(self, metric):\n LOG.debug('Publishing metrics to `%s`', self._store_endpoint)\n try:\n self._send(metric)\n except MetricsStoreError as exc:\n LOG.warn('Unable to send metric `%r`', metric)\n LOG.exception(exc)\n\n def encode_data(self, metric):\n try:\n return json.dumps([\n {\n \"name\": metric[\"name\"],\n \"unit\": metric[\"unit\"],\n \"type\": metric[\"type\"],\n \"volume\": metric[\"value\"],\n \"host\": metric[\"host\"],\n \"user_id\": metric.get(\"user_id\", \"\"),\n \"project_id\": metric.get(\"project_id\", \"\"),\n \"resource_id\": metric.get(\"resource_id\", \"\"),\n \"resource_metadata\": metric[\"resource_metadata\"],\n \"timestamp\": metric[\"timestamp\"]\n }\n ])\n except KeystoneError as exc:\n LOG.exception(exc)\n\n def request_http_post(self, metric):\n try:\n token = self.keystone_client.token\n if not token:\n LOG.warning(\"token is empty!\")\n raise MetricsStoreError(\"Keystone token is empty!\")\n except KeyError as exc:\n LOG.exception(exc)\n raise MetricsStoreError(\"Could not get a token from Keystone!\")\n\n data = self.encode_data(metric)\n headers = {\n \"X-Auth-Token\": token,\n \"content-type\": \"application/json\",\n \"User-Agent\": \"metering-agent\"\n }\n\n response = requests.post(\n \"%s/%s\" % (self.store_endpoint, metric[\"name\"]),\n data=data,\n headers=headers,\n timeout=10\n )\n\n return response.status_code == requests.codes.ok\n","repo_name":"b-com/watcher-metering","sub_path":"watcher_metering/store/ceilometer.py","file_name":"ceilometer.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18039259946","text":"# HW2:\n# create a function (generalMark) which receives 3 numbers from a dictionary\n\n# IN{\n# 'sem_1' : 9.0\n# 'sem_2' : 8.0\n# 'exam' : 9.0\n# }\n\n# OUT{\n# 'sem_1' : 9.0\n# 'sem_2' : 8.0\n# 'exam' : 9.0\n# 'gen' : 8.66\n# }\n\ndef generalMark(nota):\n sem_1 = nota['sem_1']\n sem_2 = nota['sem_2'] \n sem_3 = nota['sem_3']\n nota_general = (sem_1 + sem_2 + sem_3) / 3 # do math\n # nota['nota_general'] = round(nota_general, 2) # extract only after. last second num\n nota['nota_general'] = format(nota_general, '.2f') # extract only after. last second num\n\n return nota\n\nnota = {\n 'sem_1' : 9.0,\n 'sem_2' : 8.0,\n 'sem_3' : 9.0,\n}\n\nupdate_nota = generalMark(nota)\nprint(update_nota)\n\n# generalMark(nota) # second vatiant OUT\n# print(nota)\n","repo_name":"soft7it/Django","sub_path":"function/draw_data_in_dict/funct_examen_general.py","file_name":"funct_examen_general.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12989013535","text":"from sklearn.metrics import roc_curve, auc\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\ndiyabet = pd.read_csv(\"F:/YAZILIM MÜHENDİSLİĞİ/4.SINIF/Yapay Zeka/LAB/Ödevler/Python Dosyaları/diyabet.csv\")\n\nle = LabelEncoder().fit(diyabet.clas)\nlabs = le.transform(diyabet.clas)\nclasses = list(le.classes_)\n\nX=diyabet.drop([\"clas\"],axis=1)\ny=labs\n\ny = label_binarize(y, classes=[0,1])\nn_classes = 2\n\nfrom sklearn.preprocessing import StandardScaler\n\nsa = StandardScaler()\nX = sa.fit_transform(X)\n\n# shuffle and split training and test sets\nX_train, X_test, y_train, y_test =\\\n train_test_split(X, y, test_size=0.33, random_state=0)\n \nfrom tensorflow.keras.utils import to_categorical\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n \nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\nmod = Sequential()\nmod.add(Dense(7.2,input_dim=8,activation=\"relu\"))\nmod.add(Dense(12,activation=\"relu\"))\nmod.add(Dense(2,activation=\"softmax\"))\nmod.summary()\n\nmod.compile(loss=\"binary_crossentropy\",optimizer=\"adam\",metrics=[\"accuracy\"])\nmod.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=150)\n\n# classifier\nclf = OneVsRestClassifier(LinearSVC(random_state=0))\ny_score = clf.fit(X_train, y_train).decision_function(X_test)\n\n# Compute ROC curve and ROC area for each class\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n# Plot of a ROC curve for a specific class\nfor i in range(n_classes):\n plt.figure()\n plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i])\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC example')\n plt.legend(loc=\"lower right\")\n plt.show()","repo_name":"MustafaGurbuz/ArtificialIntelligencePythonCodes","sub_path":"Uygulama-4_roc.py","file_name":"Uygulama-4_roc.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12406744275","text":"import getopt\r\nimport math\r\nimport numpy\r\nimport shift\r\nimport sys\r\nimport time\r\nimport array\r\nimport goodcbfs\r\nimport pandas\r\n\r\n\r\n#Get last price every 10 seconds\r\ndef data_Aquisition():\r\n\r\n #universe of all Dow Jones securities\r\n Dow30 = [\"MMM\",\"AXP\",\"AAPL\",\"BA\",\"CAT\",\"CVX\",\"CSCO\", \"KO\", \"DIS\", \"DWDP\",\r\n \"XOM\",\"GS\",\"HD\",\"IBM\",\"INTC\",\"JNJ\",\"JPM\",\"MCD\",\"MRK\",\"MSFT\",\"NKE\",\r\n \"PFE\",\"PG\",\"TRV\",\"UTX\",\"UNH\",\"VZ\", \"V\",\"WMT\",\"WBA\"]\r\n\r\n #Set up trader\r\n client_id_number = 2 # client ID number (test001 - test010)\r\n simulation_duration = 390 # duration of simulation (in minutes); 390 minutes for one complete trading day\r\n simulation_seconds = simulation_duration*60 # number of seconds in simulation\r\n client_id = f\"test{str(client_id_number).zfill(3)}\"\r\n trader = shift.Trader(client_id)\r\n trader.connect(\"initiator.cfg\", \"password\") #connect to SHIFT\r\n trader.subAllOrderBook() #to get bid and ask prices\r\n\r\n #list to fill with security prices\r\n allprices = []\r\n\r\n i = 0\r\n while i <= simulation_seconds:\r\n\r\n # list to fill with security prices for current iteration\r\n currentprices =[]\r\n\r\n #wait one second if i == 0\r\n if i == 0:\r\n time.sleep(1)\r\n if i % 3600 == 0: #print for every hour\r\n print(i/3600)\r\n\r\n currentprices.append(i) #add time to currentprices\r\n for j in range(0,len(Dow30)): #add all prices to currentprices\r\n currentprices.append((trader.getBestPrice(Dow30[j]).getBidPrice()+trader.getBestPrice(Dow30[j]).getAskPrice())/2)\r\n\r\n allprices.append(currentprices) #add currentprices/ iteration to allprices\r\n\r\n #wait 60 seconds and increment i before next iteration\r\n time.sleep(60)\r\n i += 60\r\n\r\n\r\n cols = [\"Second\",\"MMM\",\"AXP\",\"AAPL\",\"BA\",\"CAT\",\"CVX\",\"CSCO\", \"KO\", \"DIS\", \"DWDP\",\r\n \"XOM\",\"GS\",\"HD\",\"IBM\",\"INTC\",\"JNJ\",\"JPM\",\"MCD\",\"MRK\",\"MSFT\",\"NKE\",\r\n \"PFE\",\"PG\",\"TRV\",\"UTX\",\"UNH\",\"VZ\", \"V\",\"WMT\",\"WBA\"]\r\n\r\n #Send prices to csv file\r\n output = pandas.DataFrame(data = allprices,columns=cols)\r\n output.to_csv('Jan0318', sep='\\t')\r\n\r\n #disconnect trader\r\n trader.disconnect()\r\n return 0\r\n\r\ndata_Aquisition()","repo_name":"tuanhus93/Win-Win","sub_path":"GetData.py","file_name":"GetData.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71551605314","text":"from sys import stdin\nimport heapq\n\n\ninput = stdin.readline\n\nn = int(input())\nm = int(input())\n\nadj = [[] for _ in range(n+1)]\n\nfor _ in range(m):\n s,e,c = map(int, input().split())\n adj[s].append([c,e])\n adj[e].append([c,e])\n\nS, D = map(int, input().split())\nINF = float('inf')\n\ndist = [INF]*(n+1)\ndist[S] = 0\nh = adj[S]\nheapq.heapify(h)\n\nwhile h:\n w, next = heapq.heappop(h)\n \n if w < dist[next]:\n dist[next] = w\n for elem in adj[next]:\n heapq.heappush(h, [dist[next]+elem[0],elem[1]])\n\nprint(dist[D])","repo_name":"iamjaewhan/DS_ALGO_study","sub_path":"백준/solved/최소비용_1916.py","file_name":"최소비용_1916.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3238880016","text":"class Solution:\n def dailyTemperatures(self, T: List[int]) -> List[int]:\n if len(T) == 0:\n return[]\n elif len(T) == 1:\n return [0]\n stack = []\n res = []\n for i in range(len(T)-1, -1, -1):\n # print(stack, res)\n while len(stack) > 0 and T[stack[-1]] <= T[i]:\n stack.pop()\n if len(stack) == 0:\n res.insert(0, 0)\n else:\n res.insert(0, stack[-1] - i)\n \n stack.append(i)\n \n return res","repo_name":"chien-wei/LeetCode","sub_path":"0739_Daily_Temperatures.py","file_name":"0739_Daily_Temperatures.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38022748087","text":"import os\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nfrom djumandji.settings import MEDIA_COMPANY_IMAGE_DIR, MEDIA_SPECIALITY_IMAGE_DIR\n\n\nclass Specialty(models.Model):\n # – Код(code)\n # например, testing, gamedev\n # – Название(title)\n # – Картинка(picture)(пока\n # оставьте\n # пустой\n # строкой)\n class SpecialtyChoices(models.TextChoices):\n frontend = 'Фронтенд'\n backend = 'Бэкенд'\n gamedev = 'Геймдев'\n devops = 'Девопс'\n design = 'Дизайн'\n products = 'Продукты'\n management = 'Менеджмент'\n testing = 'Тестирование'\n\n code = models.CharField(max_length=32)\n title = models.CharField(max_length=32, choices=SpecialtyChoices.choices)\n picture = models.ImageField(upload_to=MEDIA_SPECIALITY_IMAGE_DIR, null=True)\n\n def __str__(self):\n return self.title\n\n def delete(self, using=None, keep_parents=False):\n self.picture.storage.delete(self.picture.path)\n super().delete(using, keep_parents)\n\n\ndef company_logo_path(instance, file):\n filename, file_extension = os.path.splitext(file)\n company_id = instance.id\n new_filename = 'logo' + str(company_id) + file_extension\n return os.path.join(MEDIA_COMPANY_IMAGE_DIR, new_filename)\n\n\nclass Company(models.Model):\n # – Название(name)\n # – Город(location)\n # – Логотипчик(logo)(пока\n # оставьте\n # пустой\n # строкой)\n # – Информация\n # о\n # компании(description)\n # – Количество\n # сотрудников(employee_count)\n name = models.CharField(max_length=32)\n location = models.CharField(max_length=32)\n logo = models.ImageField(upload_to=company_logo_path, null=True)\n description = models.TextField()\n employee_count = models.CharField(max_length=20)\n owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='company', default=None, null=True)\n\n def delete(self, using=None, keep_parents=False):\n self.logo.storage.delete(self.logo.path)\n super().delete(using, keep_parents)\n\n\nclass Vacancy(models.Model):\n # – Название\n # вакансии(title)\n # – Специализация(specialty) – связь\n # с\n # Specialty, укажите\n # related_name = \"vacancies\"\n # – Компания(company) – связь\n # с\n # Company, укажите\n # related_name = \"vacancies\"\n # – Навыки(skills)\n # – Текст(description)\n # – Зарплата\n # от(salary_min)\n # – Зарплата\n # до(salary_max)\n # – Опубликовано(published_at)\n\n title = models.CharField(max_length=64)\n specialty = models.ForeignKey(Specialty, on_delete=models.SET_NULL, related_name=\"vacancies\", null=True)\n company = models.ForeignKey(Company, on_delete=models.CASCADE, related_name=\"vacancies\")\n skills = models.TextField()\n description = models.TextField()\n salary_min = models.IntegerField()\n salary_max = models.IntegerField()\n published_at = models.DateField()\n\n\nclass Application(models.Model):\n # – Имя(written_username)\n # – Телефон(written_phone)\n # – Сопроводительное\n # письмо(written_cover_letter)\n # – Вакансия(vacancy) – связь\n # с\n # Vacancy, укажите\n # related_name = \"applications\"\n # – Пользователь(user) – свя��ь\n # с\n # User, укажите\n # related_name = \"applications\"\n written_username = models.CharField(max_length=128)\n written_phone = models.CharField(max_length=16)\n written_cover_letter = models.TextField()\n vacancy = models.ForeignKey(Vacancy, on_delete=models.CASCADE, related_name='applications')\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='applications')\n\n\nclass Resume(models.Model):\n # – Пользователь\n # – Имя(name)\n # – Фамилия(surname)\n # – Готовность\n # к\n # работе(status) – Не\n # ищу\n # работу – Рассматриваю\n # предложения – Ищу\n # работу\n # – Вознаграждение(salary)\n # – Специализация(specialty)\n # – Квалификация(grade) – Стажер – Джуниор – Миддл – Синьор — Лид\n # – Образование(education)\n # – Опыт\n # работы(experience)\n # – Портфолио(portfolio)\n class StatusChoices(models.TextChoices):\n not_active = 1, 'Не ищу'\n checking = 2, 'Рассматриваю'\n active = 3, 'Ищу работу'\n\n class GradeChoices(models.TextChoices):\n intern = 1, 'Стажер'\n junior = 2, 'Джуниор'\n middle = 3, 'Миддл'\n senior = 4, 'Синьор'\n lead = 5, 'Лид'\n\n user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='resume')\n name = models.CharField(max_length=100)\n surname = models.CharField(max_length=100)\n status = models.CharField(max_length=20, choices=StatusChoices.choices)\n salary = models.IntegerField()\n specialty = models.ForeignKey(Specialty, on_delete=models.SET_NULL, null=True, related_name='resumes')\n grade = models.CharField(max_length=30, choices=GradeChoices.choices)\n education = models.TextField()\n experience = models.TextField()\n","repo_name":"MVjimbo/djumandji","sub_path":"app_catalog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24886507001","text":"from django.urls import include, re_path, path\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\nresult_patterns=[\n re_path(r'^get_page/$',views.get_page),\n path('get_page/',views.get_page),\n re_path(r'^a11y_task/$',views.a11y_task),\n re_path(r'^a11y_task/(?P[\\w\\.]+)/(?P[\\w\\.]+|)$',views.a11y_task),\n re_path(r'^post_to_accessibility/$',views.post_to_accessibility),\n re_path(r'^get_accessibility_result/$',views.get_accessibility_result)\n]\nresult_patterns=format_suffix_patterns(result_patterns)","repo_name":"vmware-samples/accessibility-visual-report","sub_path":"backend/result/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70901067073","text":"import os\n\nimport numpy as np\nfrom tqdm import tqdm, trange\nimport tensorflow as tf\n\nfrom tf.comparator.compare_sequential import SequentialComparator\nfrom tf.data.synthetic.data_generator import DataGenerator\nfrom tf.experiments.synthetic.experiments_parameters import load_long_experiment, load_quick_experiment\n\ntf.enable_eager_execution()\n\nrseed, D, K, L, batch_size, validation_size, n_epochs, n_train, n_validation = load_long_experiment()\n\nlambda_array = [0.001, 0.01, 0.1, 1., 10., 100., 1000.]\n\nfreq_train_loss = np.zeros((len(lambda_array), n_epochs))\nfreq_validation_loss = np.zeros((len(lambda_array), n_epochs))\nfreq_train_f_measure = np.zeros((len(lambda_array), n_epochs))\nfreq_validation_f_measure = np.zeros((len(lambda_array), n_epochs))\nfreq_time = np.zeros((len(lambda_array), n_epochs))\n\nsh_bayes_train_loss = np.zeros((len(lambda_array), n_epochs))\nsh_bayes_validation_loss = np.zeros((len(lambda_array), n_epochs))\nsh_bayes_train_f_measure = np.zeros((len(lambda_array), n_epochs))\nsh_bayes_validation_f_measure = np.zeros((len(lambda_array), n_epochs))\nsh_bayes_time = np.zeros((len(lambda_array), n_epochs))\n\nista_train_loss = np.zeros((len(lambda_array), n_epochs))\nista_validation_loss = np.zeros((len(lambda_array), n_epochs))\nista_train_f_measure = np.zeros((len(lambda_array), n_epochs))\nista_validation_f_measure = np.zeros((len(lambda_array), n_epochs))\n\nfista_train_loss = np.zeros((len(lambda_array), n_epochs))\nfista_validation_loss = np.zeros((len(lambda_array), n_epochs))\nfista_train_f_measure = np.zeros((len(lambda_array), n_epochs))\nfista_validation_f_measure = np.zeros((len(lambda_array), n_epochs))\n\n\nfor rseed in range(10):\n np.random.seed(rseed)\n tf.random.set_random_seed(rseed)\n data_generator = DataGenerator(D, K)\n beta_train, y_train, _ = data_generator.new_sample(batch_size)\n train_data = tf.data.Dataset.from_tensor_slices((beta_train, y_train)).shuffle(10).batch(batch_size=batch_size)\n\n beta_validation, y_validation, _ = data_generator.new_sample(batch_size)\n\n for i, lam in enumerate(tqdm(lambda_array)):\n\n comparator = SequentialComparator(D, K, L, learning_rate=0.0001, X=data_generator.X, train_freq=True,\n train_shared_bayes=True, use_ista=True, use_fista=True, save_history=False,\n initial_lambda=lam)\n for _ in trange(n_epochs):\n for i, (beta_batch, y_batch) in enumerate(train_data):\n comparator.train_iteration(beta=beta_batch, y=y_batch)\n\n freq_train_loss[i] = comparator.recorders['lista'].train_loss\n freq_validation_loss[i] = comparator.recorders['lista'].validation_loss\n freq_train_f_measure[i] = comparator.recorders['lista'].train_f_meas\n freq_validation_f_measure[i] = comparator.recorders['lista'].validation_f_meas\n freq_time = comparator.recorders['lista'].time\n\n sh_bayes_train_loss[i] = comparator.recorders['shared_bayes'].train_loss\n sh_bayes_validation_loss[i] = comparator.recorders['shared_bayes'].validation_loss\n sh_bayes_train_f_measure[i] = comparator.recorders['shared_bayes'].train_f_meas\n sh_bayes_validation_f_measure[i] = comparator.recorders['shared_bayes'].validation_f_meas\n sh_bayes_time = comparator.recorders['shared_bayes'].time\n\n ista_train_loss[i] = comparator.recorders['ista'].train_loss\n ista_validation_loss[i] = comparator.recorders['ista'].validation_loss\n ista_train_f_measure[i] = comparator.recorders['ista'].train_f_meas\n ista_validation_f_measure[i] = comparator.recorders['ista'].validation_f_meas\n\n fista_train_loss[i] = comparator.recorders['fista'].train_loss\n fista_validation_loss[i] = comparator.recorders['fista'].validation_loss\n fista_train_f_measure[i] = comparator.recorders['fista'].train_f_meas\n fista_validation_f_measure[i] = comparator.recorders['fista'].validation_f_meas\n\n path_name = '{}/'.format(rseed)\n if not os.path.exists(path_name):\n os.makedirs(path_name)\n file_name = path_name + 'lambda_measures'\n np.savez(file_name, freq_train_loss=freq_train_loss, freq_validation_loss=freq_validation_loss,\n freq_train_f_measure=freq_train_f_measure, freq_validation_f_measure=freq_validation_f_measure, freq_time=freq_time,\n sh_bayes_train_loss=sh_bayes_train_loss, sh_bayes_validation_loss=sh_bayes_validation_loss,\n sh_bayes_train_f_measure=sh_bayes_train_f_measure, sh_bayes_validation_f_measure=sh_bayes_validation_f_measure, sh_bayes_time=sh_bayes_time,\n ista_train_loss=ista_train_loss, ista_validation_loss=ista_validation_loss, ista_train_f_measure=ista_train_f_measure, ista_validation_f_measure=ista_validation_f_measure,\n fista_train_loss=fista_train_loss, fista_validation_loss=fista_validation_loss, fista_train_f_measure=fista_train_f_measure, fista_validation_f_measure=fista_validation_f_measure)\n\n\n","repo_name":"danilkuzin/BayesianLISTA","sub_path":"bayesian_lista_src/tf/experiments/synthetic/lambda_thr/run_long.py","file_name":"run_long.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39831463675","text":"import hashlib\r\nimport random\r\n\r\nl = []\r\n\r\n\r\ndef CreateAnIdea():\r\n pk = \"\"\r\n x = (random.randint(1000000000000000, 9999999999999999))\r\n if x not in l:\r\n l.append(str(x))\r\n pk = x\r\n # print(x)\r\n else:\r\n print(\"old key\")\r\n\r\n class Node:\r\n def __init__(self, left, right, value: str, content, is_copied=False) -> None:\r\n self.left: Node = left\r\n self.right: Node = right\r\n self.value = value\r\n self.content = content\r\n self.is_copied = is_copied\r\n\r\n def hash(val: str) -> str:\r\n return hashlib.sha256(val.encode('utf-8')).hexdigest()\r\n\r\n def __str__(self):\r\n return (str(self.value))\r\n\r\n def copy(self):\r\n return Node(self.left, self.right, self.value, self.content, True)\r\n\r\n class Block:\r\n def __init__(self, prevhashVal, data, merkleRoot=0, hashVal=\"\"):\r\n self.hashVal = hashVal\r\n self.merkleroot = merkleRoot\r\n self.prevHashVal = prevhashVal\r\n self.data = data\r\n\r\n def calculateHash(self):\r\n hashObj = hashlib.sha256(self.data.encode())\r\n return hashObj.hexdigest()\r\n\r\n def retHashVal(self):\r\n return self.hashVal\r\n\r\n class Blockchain:\r\n prevHash = \"420\"\r\n\r\n def __init__(self):\r\n self.chain = []\r\n hashVal = hashlib.sha256(\"genesis block\".encode()).hexdigest()\r\n currBlock = Block(Blockchain.prevHash,\r\n \"genesis block\", 6464, hashVal)\r\n Blockchain.prevHash = hashVal\r\n self.chain.append(currBlock)\r\n\r\n def addBlock(self, data):\r\n\r\n currBlock = Block(Blockchain.prevHash, data)\r\n hashVal = currBlock.calculateHash()\r\n currBlock.hashVal = hashVal\r\n self.chain.append(currBlock)\r\n\r\n Blockchain.prevHash = hashVal\r\n\r\n def generateMerkleRoot(self):\r\n pass\r\n\r\n def verifyChain(self) -> bool:\r\n chainLength = len(self.chain)\r\n for i in range(1, chainLength):\r\n currBlock = self.chain[i]\r\n prevBlock = self.chain[i]\r\n if currBlock.hashVal != currBlock.calculateHash():\r\n return False\r\n if prevBlock.hashVal != currBlock.prevHashVal:\r\n return False\r\n\r\n return True\r\n\r\n def indexBlock(self, pos) -> Block:\r\n if 2 > pos > len(self.chain):\r\n return None\r\n return self.chain[pos - 1]\r\n\r\n def displayChain(self):\r\n\r\n chain = self.chain\r\n for i in range(len(chain)):\r\n # if i==0:\r\n # def create():\r\n # today = datetime.datetime.now()\r\n # db.collection('Minds').document('FarmEASY').set(\r\n # {\r\n\r\n # }\r\n # )\r\n if i == 0:\r\n print(\"Previous Hash Value:\", chain[i].prevHashVal)\r\n print(\"The private key is : \", pk)\r\n print(\"Current Hash Value:\", chain[i].hashVal)\r\n print(\"Data:\", chain[i].data)\r\n print(\"\\n\")\r\n else:\r\n print(\"Previous Hash Value:\", chain[i].prevHashVal)\r\n # print(\"Merkle Root:\", chain[i].merkleroot)\r\n print(\"Current Hash Value:\", chain[i].hashVal)\r\n print(\"Data:\", chain[i].data)\r\n print(\"\\n\")\r\n\r\n def generateListOfHashes(self):\r\n hashList = []\r\n for i in self.chain:\r\n hashList.append(i.retHashVal())\r\n return hashList\r\n\r\n myChain = Blockchain()\r\n b1 = input(\"Enter the idea\")\r\n myChain.addBlock(b1)\r\n b2 = input(\"Add the Abstract\")\r\n myChain.addBlock(b2)\r\n myChain.displayChain()\r\n\r\n\r\nCreateAnIdea()\r\n","repo_name":"rajadurai04/HiveMind","sub_path":"FinalCode.py","file_name":"FinalCode.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73569218113","text":"import numpy as np\nfrom typing import Tuple\nfrom gym.envs.registration import register\n\nfrom highway_env import utils\nfrom customized_highway_env.envs.common.abstract_original_CBF import AbstractEnv_original_CBF\nfrom highway_env.road.road import RoadNetwork\nfrom customized_highway_env.road.road_customized import Road_original\nfrom highway_env.utils import near_split\nfrom customized_highway_env.vehicle.controller_customized import ControlledVehicle_original\nfrom customized_highway_env.vehicle.controller_customized import clone_MDPVehicle\nfrom customized_highway_env.vehicle.behavior_customized import no_input_IDMVehicle\n\n\nclass HighwayEnv_CBF_1(AbstractEnv_original_CBF):\n RIGHT_LANE_REWARD: float = 0.1\n \"\"\"The reward received when driving on the right-most lanes, linearly mapped to zero for other lanes.\"\"\"\n\n HIGH_SPEED_REWARD: float = 0.4 #\n \"\"\"The reward received when driving at full speed, linearly mapped to zero for lower speeds according to config[\"reward_speed_range\"].\"\"\"\n\n LANE_CHANGE_REWARD: float = 0\n \"\"\"The reward received at each lane change action.\"\"\"\n\n steps = 0\n\n @classmethod\n def default_config(cls) -> dict:\n config = super().default_config()\n config.update({\n \"observation\": {\n \"type\": \"Kinematics_original\",\n \"vehicles_count\": 7, # specific environment\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction_original\",\n },\n \"lanes_count\": 4,\n \"vehicles_count\": 20,\n \"controlled_vehicles\": 1,\n \"other_vehicles_type\": \"customized_highway_env.vehicle.behavior_customized.IDMVehicle_original\",\n \"initial_lane_id\": None,\n \"duration\": 100, # we can double check if it is second\n \"ego_spacing\": 2,\n \"vehicles_density\": 1.5, ## this is something I changed\n \"collision_reward\": -1, # The reward received when colliding with a vehicle.\n \"reward_speed_range\": [20, 30],\n \"offroad_terminal\": False,\n \"simulation_frequency\": 10,\n \"policy_frequency\": 2\n })\n return config\n\n def _reset(self) -> None:\n self._create_road()\n self._create_vehicles()\n\n def _create_road(self) -> None:\n \"\"\"Create a road composed of straight adjacent lanes.\"\"\"\n self.road = Road_original(network=RoadNetwork.straight_road_network(self.config[\"lanes_count\"]),\n np_random=self.np_random, record_history=self.config[\"show_trajectories\"])\n\n self.road_clone = Road_original(network=RoadNetwork.straight_road_network(self.config[\"lanes_count\"]),\n np_random=self.np_random, record_history=self.config[\"show_trajectories\"])\n\n def _create_vehicles(self) -> None:\n \"\"\"Create some new random vehicles of a given type, and add them on the road.\"\"\"\n other_vehicles_type = utils.class_from_path(self.config[\"other_vehicles_type\"])\n other_per_controlled = near_split(self.config[\"vehicles_count\"], num_bins=self.config[\"controlled_vehicles\"])\n\n self.controlled_vehicles = []\n for others in other_per_controlled:\n controlled_vehicle = self.action_type.vehicle_class.create_random(\n self.road,\n speed=25,\n lane_id=self.config[\"initial_lane_id\"],\n spacing=self.config[\"ego_spacing\"]\n )\n self.controlled_vehicles.append(controlled_vehicle)\n self.road.vehicles.append(controlled_vehicle)\n\n for _ in range(others):\n self.road.vehicles.append(\n other_vehicles_type.create_random(self.road, spacing=1 / self.config[\"vehicles_density\"])\n )\n\n def _reward(self, action) -> float:\n\n neighbours = self.road.network.all_side_lanes(self.vehicle.lane_index)\n lane = self.vehicle.target_lane_index[2] if isinstance(self.vehicle, ControlledVehicle_original) \\\n else self.vehicle.lane_index[2]\n scaled_speed = utils.lmap(self.vehicle.speed, self.config[\"reward_speed_range\"], [0, 1])\n reward = \\\n + self.config[\"collision_reward\"] * self.vehicle.crashed \\\n + self.RIGHT_LANE_REWARD * lane / max(len(neighbours) - 1, 1) \\\n + self.HIGH_SPEED_REWARD * np.clip(scaled_speed, 0, 1)\n reward = utils.lmap(reward,\n [self.config[\"collision_reward\"], self.HIGH_SPEED_REWARD + self.RIGHT_LANE_REWARD],\n [0, 1])\n reward = 0 if not self.vehicle.on_road else reward\n return reward\n\n def _do_the_prediction(self):\n\n record_dict = dict() # initialize the information saver\n\n for action_clone in ['LANE_LEFT', 'IDLE', 'LANE_RIGHT', 'FASTER', 'SLOWER']:\n # build the controlled vehicles\n self.road_clone.vehicles = []\n self.controlled_vehicle_clone = clone_MDPVehicle.clone_from(self.road_clone, self.vehicle)\n self.controlled_vehicle_clone.act(action_clone)\n\n self.road_clone.vehicles.append(self.controlled_vehicle_clone)\n\n # find the current lane index\n record_dict[action_clone] = dict()\n record_dict[action_clone][\"controlled_vehicle\"] = []\n record_dict[action_clone][\"front_current\"] = []\n record_dict[action_clone][\"rear_current\"] = []\n record_dict[action_clone][\"front_target\"] = []\n record_dict[action_clone][\"rear_target\"] = []\n\n # record_dict[action_clone][\"controlled_vehicle\"] = self._simulate_clone_controlled_vehicles(controlled_vehicle_clone)\n\n original_vehicle_list = self.road.close_vehicles_to_CBF(self.vehicle, 180, 6, False)\n\n clone_vehicle_list = [no_input_IDMVehicle.clone_from(self.road_clone, i) for i in original_vehicle_list]\n\n self.road_clone.vehicles.extend(clone_vehicle_list)\n\n\n for index_j in range(int(self.config[\"simulation_frequency\"] // self.config[\"policy_frequency\"])):\n # print(self._get_clone_controlled_vehicle_info())\n # print(self.controlled_vehicle_clone.speed_index)\n record_dict[action_clone][\"controlled_vehicle\"].append(self._get_clone_controlled_vehicle_info())\n\n front_vehicle_c_clone, rear_vehicle_c_clone = self.road_clone.neighbour_vehicles(\n self.controlled_vehicle_clone, self.controlled_vehicle_clone.lane_index)\n\n record_dict[action_clone][\"front_current\"].append(\n self._get_clone_front_vehicles_info(front_vehicle_c_clone))\n record_dict[action_clone][\"rear_current\"].append(\n self._get_clone_rear_vehicles_info(rear_vehicle_c_clone))\n\n front_vehicle_t_clone, rear_vehicle_t_clone = self.road_clone.neighbour_vehicles(\n self.controlled_vehicle_clone, self.controlled_vehicle_clone.target_lane_index)\n\n record_dict[action_clone][\"front_target\"].append(\n self._get_clone_front_vehicles_info(front_vehicle_t_clone))\n record_dict[action_clone][\"rear_target\"].append(\n self._get_clone_rear_vehicles_info(rear_vehicle_t_clone))\n\n self.road_clone.act()\n self.road_clone.step_pred(1 / self.config[\"simulation_frequency\"]) # update the motion information\n\n # we also need to predict the states\n if index_j == int(self.config[\"simulation_frequency\"] // self.config[\"policy_frequency\"]) - 1:\n record_dict[action_clone][\"obs_pred\"] = self.observation_type.observe_CBF_clone()\n\n # del self.controlled_vehicle_clone\n\n return record_dict\n\n def _get_clone_front_vehicles_info(self, vehicle_clone):\n if vehicle_clone:\n return [vehicle_clone.position[0], vehicle_clone.speed * np.cos(vehicle_clone.heading)]\n else:\n return [self.controlled_vehicle_clone.position[0] + 100, 100]\n\n def _get_clone_rear_vehicles_info(self, vehicle_clone):\n if vehicle_clone:\n return [vehicle_clone.position[0], vehicle_clone.speed * np.cos(vehicle_clone.heading)]\n else:\n return [self.controlled_vehicle_clone.position[0] - 100, 0]\n\n def _get_clone_controlled_vehicle_info(self):\n beta = np.arctan(1 / 2 * np.tan(self.controlled_vehicle_clone.action['steering']))\n\n if self.controlled_vehicle_clone.lane_index[2] == self.controlled_vehicle_clone.target_lane_index[2]:\n status = 0\n elif self.controlled_vehicle_clone.lane_index[2] > self.controlled_vehicle_clone.target_lane_index[2]:\n status = -1\n else:\n status = 1\n\n return [self.controlled_vehicle_clone.position[0], self.controlled_vehicle_clone.position[1],\n self.controlled_vehicle_clone.speed, self.controlled_vehicle_clone.heading,\n self.controlled_vehicle_clone.action['acceleration'], beta, status,\n self.controlled_vehicle_clone.velocity[0],\n self.controlled_vehicle_clone.velocity[1]] # we also need to add the lane index\n\n def _is_terminal(self) -> bool:\n \"\"\"The episode is over if the ego vehicle crashed or the time is out.\"\"\"\n return self.vehicle.crashed or \\\n self.steps >= self.config[\"duration\"] or \\\n (self.config[\"offroad_terminal\"] and not self.vehicle.on_road)\n\n def _cost(self, action: int) -> float:\n \"\"\"The cost signal is the occurrence of collision.\"\"\"\n return float(self.vehicle.crashed)\n\n\nregister(\n id='highway_cbf-v0',\n entry_point='customized_highway_env.envs:HighwayEnv_CBF_1',\n)\n","repo_name":"FangjianLi/Safe_imitation_learning","sub_path":"customized_highway_env/envs/highway_env_custom_original_CBF.py","file_name":"highway_env_custom_original_CBF.py","file_ext":"py","file_size_in_byte":9804,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"41482900471","text":"\"\"\"Бот для проверки домашки.\"\"\"\nimport os\nimport sys\nimport time\nimport logging\nfrom http import HTTPStatus\n\nimport requests\nimport telegram\n\nfrom dotenv import load_dotenv\n\nfrom exceptions import (BotException, ExceptionNot200Error, ExceptionTelegram,\n ExceptionResponseError, ExceptionListEmpty,\n ExceptionNonInspectedError, ExceptionStatusUnknown)\n\nload_dotenv()\n\nPRACTICUM_TOKEN = os.getenv('PRACTICUM_TOKEN')\nTELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')\nTELEGRAM_CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')\n\nRETRY_TIME = 600\nENDPOINT = 'https://practicum.yandex.ru/api/user_api/homework_statuses/'\nHEADERS = {'Authorization': f'OAuth {PRACTICUM_TOKEN}'}\n\nHOMEWORK_STATUSES = {\n 'approved': 'Работа проверена: ревьюеру всё понравилось. Ура!',\n 'reviewing': 'Работа взята на проверку ревьюером.',\n 'rejected': 'Работа проверена: у ревьюера есть замечания.'\n}\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s - %(name)s'\n)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nstreamHandler = logging.StreamHandler(sys.stdout)\nlogger.addHandler(streamHandler)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nstreamHandler.setFormatter(formatter)\n\n\ndef send_message(bot, message):\n \"\"\"Отправляет сообщение в Telegram чат.\"\"\"\n try:\n logger.info('The bot started sending a message')\n bot.send_message(chat_id=TELEGRAM_CHAT_ID, text=message)\n except telegram.TelegramError:\n raise ExceptionTelegram\n else:\n logger.info('The bot did a great job in sending the message!')\n\n\ndef get_api_answer(current_timestamp):\n \"\"\"Делает запрос к единственному эндпоинту API-сервиса.\"\"\"\n timestamp = current_timestamp or int(time.time())\n params = {'from_date': timestamp}\n try:\n logger.info('Work has begun on the API request.')\n response = requests.get(ENDPOINT, headers=HEADERS, params=params)\n except requests.exceptions.RequestException as request_error:\n message = f'Код ответа API (RequestException): {request_error}'\n raise ExceptionNonInspectedError(message)\n if response.status_code != HTTPStatus.OK:\n message = 'The request page is unavailable! Repeat later!'\n raise ExceptionNot200Error(message)\n logger.info('Request completed successfully.')\n return response.json()\n\n\ndef check_response(response):\n \"\"\"\n Проверяет ответ API на корректность.\n В качестве параметра функция получает ответ API, приведенный к\n типам данных Python.\n Если ответ API соответствует ожиданиям, то функция должна вернуть список\n домашних работ (он может быть и пустым),\n доступный в ответе API по ключу 'homeworks'.\n \"\"\"\n if not (isinstance(response, dict)):\n message = 'Ответ сервера не является словарем!'\n raise TypeError(message)\n\n homeworks = response.get('homeworks')\n\n if 'homeworks' not in response:\n message = 'There is no \"homework\" key in the response'\n raise ExceptionResponseError(message)\n\n if not isinstance(homeworks, list):\n message = 'Домашка с сервера не является списком!'\n raise TypeError(message)\n\n return homeworks\n\n\ndef parse_status(homework):\n \"\"\"Извлекает из информации о конкретной.\n домашней работе статус этой работы.\n \"\"\"\n homework_status = homework.get('status')\n if homework_status not in HOMEWORK_STATUSES:\n message = 'Status hw unknown!'\n raise ExceptionStatusUnknown(message)\n homework_name = homework.get('homework_name')\n if 'homework_name' not in homework:\n message = 'Homework unknown!'\n raise ExceptionStatusUnknown(message)\n verdict = HOMEWORK_STATUSES[homework_status]\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n\n\ndef check_tokens():\n \"\"\"Проверяет доступность переменных окружения.\n которые необходимы для работы программы.\n \"\"\"\n return all((PRACTICUM_TOKEN, TELEGRAM_TOKEN, TELEGRAM_CHAT_ID))\n\n\ndef main():\n \"\"\"Основная логика работы бота.\"\"\"\n message = ''\n logger.debug('start check tokens:')\n if not check_tokens():\n logger.critical('Tokens is not found!')\n message = 'The program has failed, there are no tokens!'\n sys.exit(message)\n logger.debug('tokens correct!')\n\n last_message = ''\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n\n while True:\n try:\n current_timestamp = int(time.time()) - RETRY_TIME\n\n response = get_api_answer(current_timestamp)\n logger.debug('get_api_answer is good.')\n\n homeworks = check_response(response)\n if homeworks:\n message = parse_status(homeworks[0])\n else:\n logger.info('Список работ пустой')\n\n if message != last_message:\n send_message(bot, message)\n last_message = message\n\n except ExceptionListEmpty as e:\n logger.info(str(e))\n\n except BotException as error:\n message = f'Ошибка в программе: {str(error)}'\n logger.exception(f'Error: {message}!!!')\n if message != last_message:\n send_message(bot, message)\n last_message = message\n finally:\n time.sleep(RETRY_TIME)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Voyager1744/homework_bot","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72011605955","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nЗадание 22.1d\n\nИзменить класс Topology из задания 22.1c\n\nДобавить метод add_link, который добавляет указанное соединение, если его еще\n нет в топологии.\nЕсли соединение существует, вывести сообщение \"Такое соединение существует\",\nЕсли одна из сторон есть в топологии, вывести сообщение\n\"Cоединение с одним из портов существует\"\n\n\nСоздание топологии\nIn [7]: t = Topology(topology_example)\n\nIn [8]: t.topology\nOut[8]:\n{('R1', 'Eth0/0'): ('SW1', 'Eth0/1'),\n ('R2', 'Eth0/0'): ('SW1', 'Eth0/2'),\n ('R2', 'Eth0/1'): ('SW2', 'Eth0/11'),\n ('R3', 'Eth0/0'): ('SW1', 'Eth0/3'),\n ('R3', 'Eth0/1'): ('R4', 'Eth0/0'),\n ('R3', 'Eth0/2'): ('R5', 'Eth0/0')}\n\nIn [9]: t.add_link(('R1', 'Eth0/4'), ('R7', 'Eth0/0'))\n\nIn [10]: t.topology\nOut[10]:\n{('R1', 'Eth0/0'): ('SW1', 'Eth0/1'),\n ('R1', 'Eth0/4'): ('R7', 'Eth0/0'),\n ('R2', 'Eth0/0'): ('SW1', 'Eth0/2'),\n ('R2', 'Eth0/1'): ('SW2', 'Eth0/11'),\n ('R3', 'Eth0/0'): ('SW1', 'Eth0/3'),\n ('R3', 'Eth0/1'): ('R4', 'Eth0/0'),\n ('R3', 'Eth0/2'): ('R5', 'Eth0/0')}\n\nIn [11]: t.add_link(('R1', 'Eth0/4'), ('R7', 'Eth0/0'))\nТакое соединение существует\n\nIn [12]: t.add_link(('R1', 'Eth0/4'), ('R7', 'Eth0/5'))\nCоединение с одним из портов существует\n\n\n\"\"\"\n\ntopology_example = {\n (\"R1\", \"Eth0/0\"): (\"SW1\", \"Eth0/1\"),\n (\"R2\", \"Eth0/0\"): (\"SW1\", \"Eth0/2\"),\n (\"R2\", \"Eth0/1\"): (\"SW2\", \"Eth0/11\"),\n (\"R3\", \"Eth0/0\"): (\"SW1\", \"Eth0/3\"),\n (\"R3\", \"Eth0/1\"): (\"R4\", \"Eth0/0\"),\n (\"R3\", \"Eth0/2\"): (\"R5\", \"Eth0/0\"),\n (\"SW1\", \"Eth0/1\"): (\"R1\", \"Eth0/0\"),\n (\"SW1\", \"Eth0/2\"): (\"R2\", \"Eth0/0\"),\n (\"SW1\", \"Eth0/3\"): (\"R3\", \"Eth0/0\"),\n}\n\n\nclass Topology:\n def __init__(self, topology_dict):\n self.topology = self._normalize(topology_dict)\n \n def _normalize(self, full_topology):\n clean_topology = {}\n \n for local_link, remote_link in full_topology.items():\n if clean_topology.get(remote_link) != local_link:\n clean_topology[local_link] = remote_link \n \n return clean_topology\n \n def delete_link(self, local_link, remote_link):\n if self.topology.get(local_link) and self.topology[local_link] == remote_link:\n del self.topology[local_link]\n elif self.topology.get(remote_link) and self.topology[remote_link] == local_link:\n del self.topology[remote_link]\n else:\n print(\"Такого соединения нет\")\n \n def delete_node(self, node_to_delete):\n not_exist = True\n \n for local_link, remote_link in self.topology.copy().items():\n if node_to_delete in local_link or node_to_delete in remote_link:\n del self.topology[local_link]\n not_exist = False\n if not_exist:\n print(\"Такого устройства нет\")\n \n def add_link(self, local_link, remote_link):\n if self._check_connection_exist(local_link, remote_link):\n print(self._link_exist_message)\n else:\n self.topology[local_link] = remote_link\n \n def _check_connection_exist(self, local_link, remote_link):\n self._link_exist_message = 'Cоединение с одним из портов существует'\n \n #Сначала проверяем нет ли совпадений вида:\n # - значение совпадает, но не совпадает ключ.\n for link in self.topology:\n first_value = (link != local_link and self.topology[link] == remote_link)\n second_value = (link != remote_link and self.topology[link] == local_link)\n \n if any([first_value, second_value]):\n return True\n \n #теперь проверяем именно ключи и возможное полное совпадение\n if self.topology.get(local_link):\n if self.topology[local_link] == remote_link:\n self._link_exist_message = 'Такое соединение существует'\n return True\n elif self.topology.get(remote_link):\n if self.topology[remote_link] == local_link:\n self._link_exist_message = 'Такое соединение существует'\n return True\n else:\n return False\n\n\"\"\"\nНатальино решение \n def add_link(self, src, dest):\n keys_and_values = self.topology.keys() | self.topology.values()\n if self.topology.get(src) == dest:\n print(\"Такое соединение существует\")\n elif src in keys_and_values or dest in keys_and_values:\n print(\"Cоединение с одним из портов существует\")\n else:\n self.topology[src] = dest\n\"\"\"\n","repo_name":"khakimyanov/new_pyneng_2021","sub_path":"exercises/22_oop_basics/task_22_1d.py","file_name":"task_22_1d.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20259545596","text":"# UVa 108 - Maximum Sum\n# https://onlinejudge.org/external/1/108.pdf\n# Reference: Competitive Programming 3, Halim & Halim, p. 104\n# This problem is solved using an algorithm O(n^4)\n# The Python version is slower than C++ version\n\n\ndef maximum_sum(n):\n\tnums = []\n\tk = 0\n\twhile k < n * n:\n\t\ttry:\n\t\t\tline = input()\n\t\texcept EOFError:\n\t\t\tbreak\n\t\tz = [int(x) for x in line.split()]\n\t\tk += len(z)\n\t\tnums += z\n\ta = [[0 for _ in range(n)] for _ in range(n)]\n\tk = 0\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\ta[i][j] = nums[k]\n\t\t\tk += 1\n\t\t\tif i > 0:\n\t\t\t\ta[i][j] += a[i - 1][j]\n\t\t\tif j > 0:\n\t\t\t\ta[i][j] += a[i][j - 1]\n\t\t\tif i > 0 and j > 0:\n\t\t\t\ta[i][j] -= a[i - 1][j - 1]\n\tmax_sub_rect = -127 * 100 * 100\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tfor k in range(i, n):\n\t\t\t\tfor l in range(j, n):\n\t\t\t\t\tsub_rect = a[k][l]\n\t\t\t\t\tif i > 0:\n\t\t\t\t\t\tsub_rect -= a[i - 1][l]\n\t\t\t\t\tif j > 0:\n\t\t\t\t\t\tsub_rect -= a[k][j - 1]\n\t\t\t\t\tif i > 0 and j > 0:\n\t\t\t\t\t\tsub_rect += a[i - 1][j - 1]\n\t\t\t\t\tmax_sub_rect = max(max_sub_rect, sub_rect)\n\tprint(max_sub_rect)\n\nif __name__ == \"__main__\":\n\twhile True:\n\t\ttry:\n\t\t\tn = int(input())\n\t\texcept EOFError:\n\t\t\tbreak\n\t\tmaximum_sum(n)\n\n","repo_name":"eloyhz/competitive-programming","sub_path":"cpbook/3_problem_solving_paradigms/108_maximum_sum.py","file_name":"108_maximum_sum.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"75248070914","text":"class Solution:\n def maxProfit(self, prices: list) -> int:\n\n # chose a single day to buy one stock and choose a different day in the future to sell the stock\n if not prices:\n return 0\n\n min_price = prices[0]\n profit = 0\n for i in prices:\n if i < min_price:\n min_price = i\n\n profit = max(profit, i - min_price)\n\n return profit\n\ns = Solution()\nprint(s.maxProfit([7,1,5,3,6,4]))\n","repo_name":"casssie-zhang/LeetcodeNotes","sub_path":"stock/121.BestTimeToBuyAndSellStock.py","file_name":"121.BestTimeToBuyAndSellStock.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15059824089","text":"from flask import Flask, render_template\nimport requests\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'Esta app usa la app de Last.fm ;)'\n\n@app.route('/artist/')\ndef get_artists(artist):\n # artist = artist\n url = 'http://ws.audioscrobbler.com/2.0/'\n lang = 'es'\n args = {\n 'method': 'artist.getinfo',\n 'artist': artist,\n 'api_key': '4e8c16304446243a2a3d4b365a98e791',\n 'format': 'json',\n 'autocorrect': '1',\n 'lang': lang\n }\n\n response = requests.get(url, params=args)\n print(response)\n print(response.url)\n\n if response.status_code == 200:\n content_json = response.json()\n content = response.content\n bio = content_json['artist']['bio']['summary']\n return(bio)\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=7000)\n","repo_name":"IanCarloz/Code_dos","sub_path":"lastfm_api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4234812033","text":"# -- coding: utf-8 --\nimport re\n\nfrom .base_template import BaseTemplate\n\n\nclass BreachForumsParser(BaseTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.parser_name = \"breachforums.com\"\n self.thread_name_pattern = re.compile(\n r'(\\d+).*html'\n )\n self.avatar_name_pattern = re.compile(r'.*/(\\w+\\.\\w+)')\n self.files = self.get_filtered_files(kwargs.get('files'))\n self.comments_xpath = '//div[@id=\"posts\"]/div[contains(@class, \"post\")]'\n self.header_xpath = '//div[@id=\"posts\"]/div[contains(@class, \"post\")]'\n self.date_xpath = 'div//span[@class=\"post_date\"]/text()'\n self.date_pattern = '%m-%d-%Y, %I:%M %p'\n self.author_xpath = 'div//div[@class=\"author_information\"]//span[@class=\"largetext\"]/a/span//text()'\n self.title_xpath = '//span[@class=\"crumbs\"]/span/a/text()'\n self.post_text_xpath = 'div//div[@class=\"post_body scaleimages\"]/text()'\n self.comment_block_xpath = 'div//div[@class=\"post_head\"]//a/text()'\n self.avatar_xpath = 'div//div[@class=\"author_avatar\"]/a/img/@src'\n\n # main function\n self.main()\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"templates/breachforums_template.py","file_name":"breachforums_template.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7781762775","text":"from redbird.repos import CSVFileRepo\n\nrepo = CSVFileRepo(filename=\"test.csv\",fieldnames=[\"name\",\"age\",\"height\"])\n\nrepo.add({'name':\"Daniel\",'age':\"31\",'height':\"187\"})\nrepo.add({'name':\"Florian\",'age':\"30\",'height':\"183\"})\nrepo.add({'name':\"Naomi\",'age':\"29\",'height':\"180\"})\n\nfor item in list(repo):\n print(item)\n\nrepo.filter_by(name='Naomi').update(age=22)\n\nfor item in list(repo):\n print(item)\n\nrepo.filter_by(name='Naomi').delete()","repo_name":"r3ap3rpy/redbird","sub_path":"csvrepo.py","file_name":"csvrepo.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5867136317","text":"import json\r\nimport psycopg2\r\n\r\ncon = psycopg2.connect(user='mashyr_anna', password='1234', dbname='mashyr_anna_DB', host='localhost', port='5432')\r\nprint(type(con))\r\n\r\nTABLES = [\r\n 'age_rating',\r\n 'app',\r\n 'app_publisher',\r\n 'category']\r\n\r\ndata = {}\r\n\r\nwith con:\r\n cur = con.cursor()\r\n\r\n for table_name in TABLES:\r\n cur.execute('select * from ' + table_name)\r\n fields = [x[0] for x in cur.description]\r\n rows = []\r\n\r\n for row in cur:\r\n rows.append(dict(zip(fields, row)))\r\n\r\n data[table_name] = rows\r\n\r\nwith open('mashyr_all.json', 'w') as outfile:\r\n json.dump(data, outfile, default=str)\r\n","repo_name":"mashiranna/db_lab3_mashyr","sub_path":"export_json.py","file_name":"export_json.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39008931743","text":"import pytest\nfrom wumpus.world import World\n\ndef test_kill_wumpus_successful():\n world = World(mode=0)\n \n request_kill_pos = (1,0)\n assert world.kill_wumpus(request_kill_pos)\n \ndef test_sensor():\n world = World(mode=0)\n percept = world.sensor((2,0))\n assert percept[\"stench\"] == 1\n \n","repo_name":"mytnguyen26/wumpus-world","sub_path":"tests/test_world.py","file_name":"test_world.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23448274351","text":"import sys\r\nfrom math import sqrt, floor\r\nf = open(sys.argv[1], 'r')\r\n\r\nnumOfTest = int(f.readline())\r\n\r\nfor i in range(1, numOfTest + 1) :\r\n print(\"Case #\" + str(i) + \":\", end=' ')\r\n \r\n # read test case\r\n tc = f.readline().split(' ')\r\n smax = int(tc[0])\r\n \r\n # check test case for row\r\n stand = 0\r\n friend = 0\r\n for i in range(0, smax + 1):\r\n if stand < i:\r\n friend += (i - stand)\r\n stand = i\r\n stand += int(tc[1][i])\r\n print(friend)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1175.py","file_name":"1175.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73924604995","text":"from fastapi import APIRouter\nfrom pydantic.main import BaseModel\nfrom sqlalchemy import func, select\n\nfrom app.core.database import SqlSession\nfrom app.model.domain import sqlm_keyboard_switch, KeyboardSwitch\n\nstats_router = APIRouter(prefix='/api/stats')\n\n@stats_router.get('/mkswitch')\nasync def stats_mks():\n pass\n\nclass CountBO(BaseModel):\n count: int=0\n manufacturer: str=''\n\n@stats_router.get('/manufacturer')\nasync def stats_manufacturer():\n with SqlSession() as session:\n list = session.fetchall(\n select(sqlm_keyboard_switch.c.manufacturer, func.count(sqlm_keyboard_switch.c.name).label('count'))\n .where(sqlm_keyboard_switch.c.deleted==0)\n .group_by('manufacturer'),\n CountBO\n )\n return list\n\ndef count_stash():\n with SqlSession() as session:\n list = session.fetchall(\n select(sqlm_keyboard_switch.c.variation, sqlm_keyboard_switch.c.name, sqlm_keyboard_switch.c.stash)\n .where(sqlm_keyboard_switch.c.deleted==0),\n KeyboardSwitch\n )\n result = {}\n for item in list:\n _len = item.variation.strip().split(' ').__len__()\n if result.keys().__contains__(item.stash):\n result[item.stash] += _len\n else:\n result[item.stash] = _len\n return result","repo_name":"peipei1024/switches.keyboard","sub_path":"app/web/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38523679826","text":"from deposit.datasource.abstract_datasource import AbstractDatasource\nfrom deposit.store.dresource import DResource\n\nfrom deposit.utils.fnc_files import (as_url, url_to_path, copy_resources)\nfrom deposit.utils.fnc_serialize import (legacy_data_to_store)\n\nimport datetime, time\nimport shutil\nimport sys\nimport os\n\nclass AbstractFileSource(AbstractDatasource):\n\t\n\tEXTENSION = \"file\"\n\t\n\tdef __init__(self):\n\t\t\n\t\tAbstractDatasource.__init__(self)\n\t\t\n\t\tself._path = None\n\t\tself._progress = None\n\t\n\tdef to_dict(self):\n\t\t\n\t\tdata = AbstractDatasource.to_dict(self)\n\t\tdata[\"url\"] = self.get_url()\n\t\t\n\t\treturn data\n\t\n\tdef get_name(self):\n\t\t\n\t\tname = self.get_url()\n\t\tif name is not None:\n\t\t\tname = os.path.basename(name)\n\t\t\tif name:\n\t\t\t\treturn os.path.splitext(name)[0]\n\t\treturn \"file\"\n\t\n\tdef is_valid(self):\n\t\t\n\t\treturn os.path.isfile(self._path)\n\t\n\tdef can_create(self):\n\t\t# return error / warning code\n\t\t\n\t\tpath = self.get_path()\n\t\tif path is None:\n\t\t\treturn False\n\t\t\n\t\treturn True\n\t\n\tdef get_folder(self):\n\t\t\n\t\tif self._path is not None:\n\t\t\treturn os.path.dirname(self._path)\n\t\treturn None\n\t\n\tdef get_url(self):\n\t\t\n\t\tif self._path is not None:\n\t\t\treturn as_url(self._path)\n\t\treturn None\n\t\n\tdef get_path(self):\n\t\t\n\t\treturn self._path\n\t\n\tdef set_path(self, path):\n\t\t\n\t\tself._path = os.path.normpath(os.path.abspath(path))\n\t\n\tdef create(self):\n\t\t\n\t\tfolder = self.get_folder()\n\t\tif folder is None:\n\t\t\treturn False\n\t\t\n\t\tif not os.path.isdir(folder):\n\t\t\ttry:\n\t\t\t\tos.makedirs(folder, exist_ok = True)\n\t\t\texcept:\n\t\t\t\treturn False\n\t\t\n\t\treturn True\n\t\n\tdef save_data(self, store, resources, path):\n\t\t# re-implement\n\t\t\n\t\treturn {}\n\t\n\tdef backup(self, store, folder):\n\t\t\n\t\ttgt_file, ext = os.path.splitext(os.path.split(self._path)[-1])\n\t\ttgt_file = \"%s_%s\" % (\n\t\t\ttgt_file, \n\t\t\tdatetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d'),\n\t\t)\n\t\tn = 1\n\t\twhile True:\n\t\t\ttgt_path = os.path.join(folder, \"%s_%d%s\" % (tgt_file, n, ext))\n\t\t\tif not os.path.isfile(tgt_path):\n\t\t\t\tbreak\n\t\t\tn += 1\n\t\tshutil.copy2(self._path, tgt_path)\n\t\n\tdef save(self, store, progress = None, path = None, url = None, *args, **kwargs):\n\t\t\n\t\tself.set_progress(progress)\n\t\t\n\t\tif (path is None) and (url is not None):\n\t\t\tpath = url_to_path(url)\n\t\t\n\t\tif path == self.get_path():\n\t\t\tpath = None\n\t\t\n\t\tif path is not None:\n\t\t\tself.set_path(path)\n\t\t\tif not self.create():\n\t\t\t\treturn False\n\t\t\n\t\tpath = self.get_path()\n\t\tpath, ext = os.path.splitext(path)\n\t\tif ext.lower() != \".%s\" % (self.EXTENSION):\n\t\t\text = \".%s\" % (self.EXTENSION)\n\t\tpath = path + ext\n\t\t\n\t\t# copy all is_stored resources to new folder (if not already there)\n\t\tsrc_folder = store.get_folder()\n\t\tif store.has_local_folder():\n\t\t\tdst_folder = src_folder\n\t\telse:\n\t\t\tdst_folder = os.path.normpath(os.path.abspath(os.path.dirname(path)))\n\t\t\n\t\tresources = {}\n\t\tif dst_folder == src_folder:\n\t\t\tresources = store._resources\n\t\t\n\t\telif store._resources:\n\t\t\tcmax = len(store._resources)\n\t\t\tcnt = 1\n\t\t\tself.update_progress(cnt, cmax, text = \"Copying files\")\n\t\t\tresources = copy_resources(\n\t\t\t\tstore._resources, src_folder, dst_folder, progress = self._progress\n\t\t\t)\n\t\t\tif not resources:\n\t\t\t\treturn False\n\t\t\n\t\treturn self.save_data(store, resources, path)\n\t\n\tdef load_data(self, path):\n\t\t# re-implement\n\t\t\n\t\treturn {}\n\t\n\tdef load(self, store, progress = None, path = None, url = None, *args, **kwargs):\n\t\t\n\t\tself.set_progress(progress)\n\t\t\n\t\tif (path is None) and (url is not None):\n\t\t\tpath = url_to_path(url)\n\t\t\n\t\tif path == self.get_path():\n\t\t\tpath = None\n\t\t\n\t\tif path is not None:\n\t\t\tself.set_path(path)\n\t\t\tif not self.is_valid():\n\t\t\t\treturn False\n\t\t\n\t\tpath = self.get_path()\n\t\tif path is None:\n\t\t\tstore.callback_error(\"LOAD ERROR: Path not specified\")\n\t\t\treturn False\n\t\t\n\t\ttry:\n\t\t\tdata = self.load_data(path)\n\t\texcept:\n\t\t\t_, exc_value, _ = sys.exc_info()\n\t\t\tstore.callback_error(\"LOAD ERROR: %s\" % (str(exc_value)))\n\t\t\treturn False\n\t\t\n\t\tif \"classes\" in data:\n\t\t\tif not legacy_data_to_store(data, store, path, progress = self._progress):\n\t\t\t\tstore.callback_error(\"LOAD ERROR: Invalid legacy file format\")\n\t\t\t\treturn False\n\t\t\n\t\telse:\n\t\t\tfor name in [\"object_relation_graph\", \"class_relation_graph\", \"class_membership_graph\", \n\t\t\t\t\"local_folder\", \"max_order\", \"deposit_version\", \"user_tools\", \"queries\",\n\t\t\t]:\n\t\t\t\tif name not in data:\n\t\t\t\t\tstore.callback_error(\"LOAD ERROR: Invalid file format\")\n\t\t\t\t\treturn False\n\t\t\t\n\t\t\tif not self.data_to_store(data, store):\n\t\t\t\tstore.callback_error(\"LOAD ERROR: Loading data\")\n\t\t\t\treturn False\n\t\t\n\t\tstore.set_datasource(self)\n\t\t\n\t\treturn True\n\t\n\tdef data_to_store(self, data, store):\n\t\t# re-implement\n\t\t\n\t\treturn False\n","repo_name":"demjanp/deposit","sub_path":"src/deposit/datasource/abstract_filesource.py","file_name":"abstract_filesource.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"13001492445","text":"import logging\nfrom django.core.management import BaseCommand\nfrom pyon.core.cache import cache\nlog = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Clears the specific caches'\n\n def handle(self, *args, **options):\n if 'all' in args or args is ():\n self.clear_all()\n else:\n log.info(\"Clearing {}\".format(args))\n cache.delete_many(args)\n\n def clear_all(self):\n log.info(\"Clearing entire cache\")\n cache.clear()\n","repo_name":"ShaneDrury/pyon","sub_path":"pyon/core/management/commands/clearcache.py","file_name":"clearcache.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16075102647","text":"from tkinter import Canvas, Tk\n\nfrom matplotlib import pyplot\nfrom scipy.cluster import hierarchy\nfrom sklearn.cluster import AgglomerativeClustering\n\nfrom readers import ColourList, read_dataset\n\n\ndef dendrogram(colours: ColourList):\n pyplot.figure(figsize=(12, 12))\n pyplot.title(\"Dendrograms\")\n\n hierarchy.dendrogram(hierarchy.linkage(colours, method=\"ward\"))\n\n pyplot.show()\n\n\ndef perform_clustering(number: int, colours: ColourList):\n clustering = AgglomerativeClustering(\n n_clusters=number,\n affinity=\"euclidean\",\n linkage=\"ward\",\n )\n return clustering.fit_predict(colours)\n\n\ndef build_colorgram(number: int, colours: ColourList):\n \"\"\"\n Build a decent-looking colored strips representing set of colours\n described by a separate term\n :param number:\n :param colours:\n :return:\n \"\"\"\n clusters = perform_clustering(number, colours)\n root = Tk()\n root.attributes(\"-fullscreen\", True)\n canvas = Canvas(root, width=1400, height=840, bg=\"white\")\n canvas.pack()\n\n clusters = list(zip(clusters, colours))\n result = [\n [elem[1] for elem in filter(lambda a: a[0] == num, clusters)]\n for num in {duo[0] for duo in clusters}\n ]\n\n clusters = result\n for cluster in result:\n for i, elem in enumerate(cluster):\n cluster[i] = f\"#{int(elem[0]):02X}{int(elem[1]):02X}{int(elem[2]):02X}\"\n\n height = int(700 / (number + 2))\n startHeightIndent = 30\n for i, cluster in enumerate(clusters):\n width = len(cluster) < 1300\n for j, elem in enumerate(cluster):\n canvas.create_rectangle(\n j + 30 if width else j + 5,\n (i * int(height / 7)) + startHeightIndent,\n j + 40 if width else j + 6,\n (i * int(height / 7)) + startHeightIndent + height,\n width=0,\n fill=elem,\n )\n startHeightIndent += height\n\n root.mainloop()\n\n\ndef main(load_dataset_from: str):\n colours = read_dataset(load_dataset_from)\n dendrogram(colours)\n for i in range(2, 8):\n build_colorgram(i, colours)\n\n\nif __name__ == \"__main__\":\n main(\"colorSet.csv\")\n","repo_name":"avillia/oinopsPontos","sub_path":"colourPerception.py","file_name":"colourPerception.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73779295554","text":"from debug import Debug as D\n\nd = D(level=4, color=True)\nd.info('this is info')\nd.log('this is log')\nd.debug('this is debug')\nd.error('this is error')\n\nd.info('='*50)\n\nd.log('print multiple item =>', 1, 'string', {'dict': True})\n\n\n","repo_name":"kilfu0701/py_tools","sub_path":"debug/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73656391555","text":"# advent code day 1\r\n# meike\r\n\r\n\r\nf=open(\"input.txt\",'r') # open file to read in input\r\nline = f.readline() #read first line\r\ni=0\r\nnewelf=0\r\ncalories=[]\r\ncalories.append(0)\r\nmax=[0,0,0]\r\nwhile line: #read in file per line\r\n if any(chr.isdigit() for chr in line): #if line contains number add this to calories\r\n if(newelf==1):#if previous line was empty add to new elf\r\n calories.append(int(line))\r\n else:#is previous line was not empty add to same elf\r\n calories[i]+=int(line)\r\n newelf=0\r\n else:#if line is empty start new elf and check if last added elf has top 3 maximum calories\r\n j=0\r\n for m in max: #check if number of calories last added elf falls within top 3\r\n if (m \" + now_result)\n\n count = 0\n if ans <= 10:\n if shot >= 1:\n print(\"=====> ĐẶT XỈU\")\n time_dat = 65\n if click_xiu.nav_to_image() == 0:\n count += 1\n \n if click_money.nav_to_image() == 0:\n count += 1\n \n if click_ok.nav_to_image() == 0:\n count += 1\n \n else:\n if shot >= 1:\n print(\"=====> ĐẶT TÀI\")\n time_dat = 65\n if click_tai.nav_to_image() == 0:\n count += 1\n \n if click_money.nav_to_image() == 0:\n count += 1\n\n if click_ok.nav_to_image() == 0:\n count += 1\n\n \n \n time.sleep(time_dat)\n\n if pt.pixel(1792, 877)[2] >= 225 and pt.pixel(1792, 877)[2] <= 255:\n current_result = \"TAI\"\n if pt.pixel(1792, 877)[2] >= 169 and pt.pixel(1792, 877)[2] <= 196:\n current_result = \"XIU\"\n\n print(\"KẾT QUẢ: \", current_result)\n if now_result == current_result:\n shot += 1\n total += 1\n print(\"===== THẮNG =====\")\n else:\n print(\"===== THUA =====\")\n shot = 0\n print(\"===== TỶ LỆ: \" + str(float(total / round) * 100) + \"% =====\")\n print(\"~\")\n\n\n\n\n\n\n\n\n \n","repo_name":"NicolasWillyam/autoplaytx","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11457308775","text":"def setup():\n global masscan_rate\n dbFile = None\n credentials = None\n discordToken = None\n webPort = None\n\n dbType = input(\"Enter the database type (postgres, sqlite): \")\n if dbType == \"postgres\":\n credentials = {\n 'host': input(\"Enter the host: \"),\n 'port': input(\"Enter the port: \"),\n 'database': input(\"Enter the database name: \"),\n 'user': input(\"Enter the username: \"),\n 'password': input(\"Enter the password: \")\n }\n if dbType == \"sqlite\":\n dbFile = input(\"Enter the database file name: \")\n\n discordBool = input(\"Do you want to enable discord bot? (y/n): \")\n if discordBool == \"y\":\n discordToken = input(\"Enter the discord bot token: \")\n\n webBool = input(\"Do you want to enable web server? (y/n): \")\n if webBool == \"y\":\n webPort = input(\"Enter the web server port: \")\n\n scanning_method = input(\"Enter the scanning method (masscan, qubo): \")\n if scanning_method == \"masscan\":\n masscan_rate = input(\"Enter the masscan rate (default 1500): \")\n if masscan_rate == \"\":\n masscan_rate = 1500\n\n with open(\".env\", \"w\") as f:\n f.write(f\"dbType={dbType}\\n\")\n if credentials:\n f.write(f\"credentials={credentials}\\n\")\n if dbFile:\n f.write(f\"dbFile={dbFile}\\n\")\n if discordBool == \"y\":\n f.write(f\"discordToken={discordToken}\\n\")\n if webBool == \"y\":\n f.write(f\"webPort={webPort}\\n\")\n f.write(f\"scanning_method={scanning_method}\\n\")\n if scanning_method == \"masscan\":\n f.write(f\"masscan_rate={masscan_rate}\\n\")\n\n\nif __name__ == \"__main__\":\n setup()\n","repo_name":"virus-rpi/SpaceInvader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7346956490","text":"def solution(my_string):\n li = [1]*53\n answer = ''\n for i in my_string:\n if i == \" \":\n n = 52\n elif i.isupper():\n n = ord(i) - ord(\"A\") + 26\n else:\n n = ord(i) - ord(\"a\")\n if li[n]:\n answer += i\n li[n] = 0\n return answer","repo_name":"soulchicken/crush-programmers-cote","sub_path":"Python/Level_0/53_중복된 문자 제거.py","file_name":"53_중복된 문자 제거.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34742637409","text":"from common.helpers.common_dependency_helper import register_common_mox_dependencies\nfrom common.utilities.inversion_of_control import dependencies, Dependency\nfrom core.common.data_access_helpers import find_raw_helper\nfrom core.service.svc_master_data_storage.implementation.entity_hierarchy_creator import EntityHierarchyCreator\nfrom core.common.utilities.errors import BadRequestError\nfrom core.common.utilities.helpers import generate_id\nimport mox\nimport pprint\n\n\n__author__ = 'vgold'\n\n\nclass EntityHierarchyCreatorTests(mox.MoxTestBase):\n\n def setUp(self):\n\n # call parent set up\n super(EntityHierarchyCreatorTests, self).setUp()\n\n # register mock dependencies\n register_common_mox_dependencies(self.mox)\n\n # Set mock attributes on instance for calls to record\n self.mock = self.mox.CreateMock(EntityHierarchyCreator)\n self.mock.db = self.mox.CreateMockAnything()\n self.mock.find_raw_helper = self.mox.CreateMockAnything()\n self.mock.time_interval_helper = self.mox.CreateMockAnything()\n\n # Set mock attributes on WorkflowService instance for calls to ignore\n self.mock.cfg = Dependency(\"MoxConfig\").value\n self.mock.logger = Dependency(\"FlaskLogger\").value\n\n def doCleanups(self):\n\n super(EntityHierarchyCreatorTests, self).doCleanups()\n dependencies.clear()\n\n ##########################################################################\n # EntityHierarchyCreator._process_params()\n\n def test_process_params(self):\n\n ehc = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n\n correct_params = {\n \"root_query\": {},\n \"entity_fields\": [\"_id\"],\n \"options\": {\n \"return_format\": \"list\"\n }\n }\n\n correct_params2 = dict(correct_params, options={})\n correct_params3 = dict(correct_params, options={\"return_format\": \"asdf\"})\n correct_params4 = {\n \"root_query\": {},\n \"options\": {}\n }\n correct_params5 = {\n \"root_query\": {},\n \"link_filters\": [\"asdf\", \"asdf\", \"asdf\", {}],\n \"entity_fields\": [\"_id\"]\n }\n\n correct_inputs = {\n \"entity_type\": \"entity_type\",\n \"params\": correct_params\n }\n correct_inputs2 = dict(correct_inputs, params=correct_params2)\n correct_inputs3 = dict(correct_inputs, params=correct_params3)\n correct_inputs4 = dict(correct_inputs, params=correct_params4)\n correct_inputs5 = dict(correct_inputs, params=correct_params5)\n\n self.__assign_attr_from_dict(ehc, dict(correct_inputs, entity_type=None))\n self.assertRaises(BadRequestError, ehc._process_params)\n\n self.__assign_attr_from_dict(ehc, dict(correct_inputs, params=dict(correct_params, root_query=None)))\n self.assertRaises(BadRequestError, ehc._process_params)\n\n self.__assign_attr_from_dict(ehc, dict(correct_inputs, params=dict(correct_params, link_filters=None)))\n self.assertRaises(BadRequestError, ehc._process_params)\n\n self.__assign_attr_from_dict(ehc, dict(correct_inputs, params=dict(correct_params, entity_fields=None)))\n self.assertRaises(BadRequestError, ehc._process_params)\n\n self.__assign_attr_from_dict(ehc, dict(correct_inputs, params=dict(correct_params, options=None)))\n self.assertRaises(BadRequestError, ehc._process_params)\n\n # Correct\n self.__assign_attr_from_dict(ehc, correct_inputs)\n result = ehc._process_params()\n self.assertEqual(result, ehc)\n\n self.__assign_attr_from_dict(ehc, correct_inputs2)\n result = ehc._process_params()\n self.assertEqual(result, ehc)\n\n self.__assign_attr_from_dict(ehc, correct_inputs3)\n result = ehc._process_params()\n self.assertEqual(result, ehc)\n\n self.__assign_attr_from_dict(ehc, correct_inputs4)\n result = ehc._process_params()\n self.assertEqual(result, ehc)\n\n self.__assign_attr_from_dict(ehc, correct_inputs5)\n result = ehc._process_params()\n self.assertEqual(result, ehc)\n\n ##########################################################################\n # EntityHierarchyCreator._build_relationship_maps()\n\n def test_build_relationship_maps(self):\n\n fields_with_links = {\"_id\": 1, \"links\": 1}\n entity_query = {}\n entity_type = \"company\"\n sorted_fields = \"sorted_fields\"\n return_format = \"return_format\"\n time_context = None\n\n entity_id = generate_id()\n root_entity_ids = [entity_id]\n\n child_id1 = generate_id()\n link1 = {\n \"entity_id_to\": child_id1\n }\n child_id2 = generate_id()\n link2 = {\n \"entity_id_to\": child_id2\n }\n\n entity1 = {\n \"_id\": entity_id,\n \"links\": {\n \"company\": {\n \"retailer_branding\": [link1, link2]\n }\n }\n }\n\n child1 = {\n \"_id\": child_id1,\n \"links\": {\n \"company\": {}\n }\n }\n\n child2 = {\n \"_id\": child_id2,\n \"links\": {}\n }\n\n self.mock._form_time_context_query(time_context).AndReturn({})\n\n # First while loop iteration\n self.mock.db.find(entity_type, {\"_id\": {\"$in\": [entity_id]}}, fields=fields_with_links).AndReturn([entity1])\n self.mock._process_entity_with_fields(entity1, sorted_fields, return_format).AndReturn(entity1)\n\n self.mock._test_and_add_link(str(entity_id), link1).AndReturn({\"type\": \"company\", \"id\": child_id1, \"recursive\": True})\n self.mock._test_and_add_link(str(entity_id), link2).AndReturn({\"type\": \"company\", \"id\": child_id2, \"recursive\": True})\n\n # Second while loop iteration\n self.mock.db.find(entity_type, mox.IgnoreArg(), fields=fields_with_links).AndReturn([child1, child2])\n self.mock._process_entity_with_fields(child1, sorted_fields, return_format).AndReturn(child1)\n self.mock._process_entity_with_fields(child2, sorted_fields, return_format).AndReturn(child2)\n\n self.mox.ReplayAll()\n\n self.mock.entity_query = entity_query\n self.mock.entity_type = entity_type\n self.mock.fields_with_links = fields_with_links\n self.mock.entity_id = entity_id\n self.mock.sorted_fields = sorted_fields\n self.mock.return_format = return_format\n self.mock.time_context = time_context\n self.mock.root_entity_ids = root_entity_ids\n result = EntityHierarchyCreator._build_relationship_maps(self.mock)\n\n self.assertEqual(result, self.mock)\n\n self.assertItemsEqual(self.mock.entity_map.keys(), [str(entity_id), str(child_id1), str(child_id2)])\n\n ##########################################################################\n # EntityHierarchyCreator._test_and_add_link()\n\n def test_test_and_add_link__regular_filter(self):\n\n link_filters = \"link_filters\"\n\n entity_id = str(generate_id())\n\n id_to = generate_id()\n role_from = \"role_from\"\n role_to = \"role_to\"\n rel_type = \"rel_type\"\n type_from = \"type_from\"\n type_to = \"type_to\"\n\n link = {\n \"interval\": None,\n \"entity_id_to\": id_to,\n \"entity_role_from\": role_from,\n \"entity_role_to\": role_to,\n \"relation_type\": rel_type,\n \"entity_type_from\": type_from,\n \"entity_type_to\": type_to\n }\n\n self.mock._link_filter_is_recursive(link_filters, link).AndReturn(True)\n self.mock._test_link_against_filter(link_filters, link).AndReturn(True)\n self.mock._add_child_and_parent_for_link(str(id_to), entity_id, link[\"entity_role_from\"],\n link[\"entity_role_to\"], link[\"relation_type\"],\n link[\"entity_type_from\"], link[\"entity_type_to\"])\n\n self.mox.ReplayAll()\n\n self.mock.link_filters = link_filters\n self.mock.bidirectional_links = True\n result = EntityHierarchyCreator._test_and_add_link(self.mock, entity_id, link)\n\n self.assertEqual(result[\"id\"], id_to)\n self.assertEqual(result[\"type\"], type_to)\n self.assertEqual(result[\"recursive\"], True)\n\n def test_test_and_add_link__regular_filter__not_recursive(self):\n\n link_filters = \"link_filters\"\n\n entity_id = str(generate_id())\n\n id_to = generate_id()\n role_from = \"role_from\"\n role_to = \"role_to\"\n rel_type = \"rel_type\"\n type_from = \"type_from\"\n type_to = \"type_to\"\n\n link = {\n \"interval\": None,\n \"entity_id_to\": id_to,\n \"entity_role_from\": role_from,\n \"entity_role_to\": role_to,\n \"relation_type\": rel_type,\n \"entity_type_from\": type_from,\n \"entity_type_to\": type_to\n }\n\n self.mock._link_filter_is_recursive(link_filters, link).AndReturn(False)\n self.mock._test_link_against_filter(link_filters, link).AndReturn(True)\n self.mock._add_child_and_parent_for_link(str(id_to), entity_id, link[\"entity_role_from\"],\n link[\"entity_role_to\"], link[\"relation_type\"],\n link[\"entity_type_from\"], link[\"entity_type_to\"])\n\n self.mox.ReplayAll()\n\n self.mock.link_filters = link_filters\n self.mock.bidirectional_links = True\n result = EntityHierarchyCreator._test_and_add_link(self.mock, entity_id, link)\n\n self.assertEqual(result[\"id\"], id_to)\n self.assertEqual(result[\"type\"], type_to)\n self.assertEqual(result[\"recursive\"], False)\n\n def test_test_and_add_link__opposite_filter(self):\n\n link_filters = \"link_filters\"\n\n entity_id = str(generate_id())\n\n id_to = generate_id()\n role_from = \"role_from\"\n role_to = \"role_to\"\n rel_type = \"rel_type\"\n type_from = \"type_from\"\n type_to = \"type_to\"\n\n link = {\n \"interval\": None,\n \"entity_id_to\": id_to,\n \"entity_role_from\": role_from,\n \"entity_role_to\": role_to,\n \"relation_type\": rel_type,\n \"entity_type_from\": type_from,\n \"entity_type_to\": type_to\n }\n\n self.mock._link_filter_is_recursive(link_filters, link).AndReturn(True)\n self.mock._test_link_against_filter(link_filters, link).AndReturn(False)\n self.mock._test_link_against_filter(link_filters, link, opposite = True).AndReturn(True)\n self.mock._add_child_and_parent_for_link(entity_id, str(id_to), link[\"entity_role_to\"],\n link[\"entity_role_from\"], link[\"relation_type\"],\n link[\"entity_type_to\"], link[\"entity_type_from\"])\n\n self.mox.ReplayAll()\n\n self.mock.link_filters = link_filters\n self.mock.bidirectional_links = True\n result = EntityHierarchyCreator._test_and_add_link(self.mock, entity_id, link)\n\n self.assertEqual(result[\"id\"], id_to)\n self.assertEqual(result[\"type\"], type_to)\n self.assertEqual(result[\"recursive\"], True)\n\n ##########################################################################\n # EntityHierarchyCreator._add_child_and_parent_for_link()\n\n def test_add_child_and_parent_for_link(self):\n\n ehc = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n\n cid = \"cid\"\n pid = \"pid\"\n role_from = \"role_from\"\n role_to = \"role_to\"\n relation_type = \"relation_type\"\n type_from = \"type_from\"\n type_to = \"type_to\"\n\n ehc.parent_to_child_map = {}\n ehc.child_to_parent_map = {}\n ehc._add_child_and_parent_for_link(cid, pid, role_from, role_to, relation_type, type_from, type_to)\n\n self.assertDictEqual(ehc.parent_to_child_map, {pid: {cid: dict(_id = cid,\n entity_role_from = role_from,\n entity_role_to = role_to,\n entity_type_from = type_from,\n entity_type_to = type_to,\n relation_type = relation_type)}})\n self.assertDictEqual(ehc.child_to_parent_map, {cid: {pid: dict(_id = pid,\n entity_role_from = role_to,\n entity_role_to = role_from,\n entity_type_from = type_to,\n entity_type_to = type_from,\n relation_type = relation_type)}})\n\n ##########################################################################\n # EntityHierarchyCreator._test_link_against_filter()\n\n def test_test_link_against_filter__all(self):\n\n f1 = \"_all\"\n link = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"relation_type\"\n }\n\n hierarchy_creator = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n hierarchy_creator.time_context = None\n\n result = hierarchy_creator._test_link_against_filter(f1, link)\n self.assertEqual(result, True)\n\n def test_test_link_against_filter__dict(self):\n\n f1 = [\"entity_role_from\", \"entity_role_to\", \"relation_type\"]\n\n link1 = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"relation_type\"\n }\n\n hierarchy_creator = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n hierarchy_creator.time_context = None\n\n result = hierarchy_creator._test_link_against_filter(f1, link1)\n self.assertEqual(result, True)\n\n link2 = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"asdf\"\n }\n\n hierarchy_creator = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n hierarchy_creator.time_context = None\n\n result = hierarchy_creator._test_link_against_filter(f1, link2)\n self.assertEqual(result, False)\n\n f2 = [\"entity_role_from\", \"entity_role_to\", \"_all\"]\n\n link1 = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"relation_type\"\n }\n\n hierarchy_creator = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n hierarchy_creator.time_context = None\n\n result = hierarchy_creator._test_link_against_filter(f2, link1)\n self.assertEqual(result, True)\n\n link2 = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"asdf\"\n }\n\n hierarchy_creator = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n hierarchy_creator.time_context = None\n\n result = hierarchy_creator._test_link_against_filter(f2, link2)\n self.assertEqual(result, True)\n\n ##########################################################################\n # EntityHierarchyCreator._link_filter_is_recursive()\n\n def test_link_filter_is_recursive__all(self):\n\n f1 = \"_all\"\n link = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"relation_type\"\n }\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f1, link)\n self.assertEqual(result, True)\n\n def test_link_filter_is_recursive__list(self):\n\n link = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"relation_type\"\n }\n\n f1 = [\"entity_role_from\", \"entity_role_to\", \"relation_type\"]\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f1, link)\n self.assertEqual(result, True)\n\n f2 = [\"entity_role_from\", \"entity_role_to\", \"relation_type\", {\"recursive\": True}]\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f2, link)\n self.assertEqual(result, True)\n\n f3 = [\"entity_role_from\", \"entity_role_to\", \"relation_type\", {\"recursive\": False}]\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f3, link)\n self.assertEqual(result, False)\n\n def test_link_filter_is_recursive__list_of_lists(self):\n\n link = {\n \"entity_role_from\": \"entity_role_from\",\n \"entity_role_to\": \"entity_role_to\",\n \"relation_type\": \"relation_type\"\n }\n\n f1 = [[\"entity_role_from\", \"entity_role_to\", \"relation_type\"],\n [\"entity_role_to\", \"entity_role_from\", \"relation_type\"]]\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f1, link)\n self.assertEqual(result, True)\n\n f2 = [[\"entity_role_to\", \"entity_role_from\", \"relation_type\"],\n [\"entity_role_from\", \"entity_role_to\", \"relation_type\", {\"recursive\": True}]]\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f2, link)\n self.assertEqual(result, True)\n\n f3 = [[\"entity_role_to\", \"entity_role_from\", \"relation_type\"],\n [\"entity_role_from\", \"entity_role_to\", \"relation_type\", {\"recursive\": False}]]\n\n result = EntityHierarchyCreator._link_filter_is_recursive(f3, link)\n self.assertEqual(result, False)\n\n ##########################################################################\n # EntityHierarchyCreator._process_entity_with_fields()\n\n def test_process_entity_with_fields(self):\n\n return_format = \"dict\"\n sorted_fields = [\"_id\", \"name\", \"data.label\"]\n\n entity = {\n \"_id\": \"a\",\n \"name\": \"b\",\n \"data\": {\n \"label\": \"c\"\n }\n }\n\n hierarchy_creator = EntityHierarchyCreator.__new__(EntityHierarchyCreator)\n hierarchy_creator.find_raw_helper = find_raw_helper\n\n result = hierarchy_creator._process_entity_with_fields(entity, sorted_fields, return_format)\n self.assertDictEqual(result, {\"_id\": \"a\", \"name\": \"b\", \"data.label\": \"c\"})\n\n return_format = \"list\"\n result = hierarchy_creator._process_entity_with_fields(entity, sorted_fields, return_format)\n self.assertListEqual(result, [\"a\", \"b\", \"c\"])\n\n #---------------------# Private Helpers #---------------------#\n\n @staticmethod\n def __assign_attr_from_dict(obj, value_dict):\n\n for key, value in value_dict.iteritems():\n setattr(obj, key, value)","repo_name":"erezrubinstein/aa","sub_path":"tests/unit_tests/core_tests/service_tests/mds_tests/test_entity_hierarchy_creator.py","file_name":"test_entity_hierarchy_creator.py","file_ext":"py","file_size_in_byte":19121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26385087175","text":"import trio # type: ignore\nimport httpx\n\n\nasync def get_fiat_rate(currency: str):\n assert currency == \"USD\", \"Only USD is supported as fiat currency.\"\n return await get_usd_rate()\n\n\nasync def get_usd_rate():\n \"\"\"\n Returns an average satoshi price from multiple sources.\n \"\"\"\n\n satoshi_prices = [None, None, None]\n\n async def fetch_price(index, url, getter):\n try:\n async with httpx.AsyncClient() as client:\n r = await client.get(url)\n r.raise_for_status()\n satoshi_price = int(100_000_000 / float(getter(r.json())))\n satoshi_prices[index] = satoshi_price\n except Exception:\n pass\n\n async with trio.open_nursery() as nursery:\n nursery.start_soon(\n fetch_price,\n 0,\n \"https://api.kraken.com/0/public/Ticker?pair=XXBTZUSD\",\n lambda d: d[\"result\"][\"XXBTCZUSD\"][\"c\"][0],\n )\n nursery.start_soon(\n fetch_price,\n 1,\n \"https://www.bitstamp.net/api/v2/ticker/btcusd\",\n lambda d: d[\"last\"],\n )\n nursery.start_soon(\n fetch_price,\n 2,\n \"https://api.coincap.io/v2/rates/bitcoin\",\n lambda d: d[\"data\"][\"rateUsd\"],\n )\n\n satoshi_prices = [x for x in satoshi_prices if x]\n return sum(satoshi_prices) / len(satoshi_prices)\n","repo_name":"geco91/lnbits","sub_path":"lnbits/extensions/lnurlp/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"30717708706","text":"# В одномерном массиве найти сумму элементов, находящихся между минимальным и максимальным элементами. Сами\n# минимальный и максимальный элементы в сумму не включать.\nimport random\n\nSIZE = 10\nMIN_ITEM = -100\nMAX_ITEM = 100\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(0, SIZE)]\n# частный случай [93, 46, -85, -76, 1, -62, -90, 84, 86, -88]\n# позиция минмального элемента больше позции максимального элемента\nmin_digit = array[0]\nmax_digit = array[0]\nmax_el = 0\nmin_el = 0\nresult = 0\n\nprint(f'Массив случайных целых чисел до изменения \\n{array}')\nfor i, item in enumerate(array):\n max_digit = (item if item > max_digit else max_digit)\n min_digit = (item if item < min_digit else min_digit)\n max_el = (i if max_digit == item else max_el)\n min_el = (i if min_digit == item else min_el)\n\nstep = 1 if min_el < max_el else -1 # учитываем последовательность перебора элементов массива для частного случая\n\nfor i in range(min_el + step, max_el, step):\n result += array[i]\nprint(f'{\"*\"*50}')\nprint(f'Сумма чисел между элементами {min_el if step ==1 else max_el} и '\n f'{max_el if step ==1 else min_el} равна {result}')\n","repo_name":"RSV48/algorithms","sub_path":"tassk_03_06.py","file_name":"tassk_03_06.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18246502933","text":"\"Write a program to cyclically rotate an array by one.\"\n\n\ndef rotate( arr, n):\n b=arr.pop(n-1)\n arr.insert(0,b)\n return arr\n\nN = 5\nA= [1, 2, 3, 4, 5]\n\nprint(rotate(A,N))","repo_name":"Wasim-Akraam/DSA_with_Python","sub_path":"List/8_rotateList.py","file_name":"8_rotateList.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35491495814","text":"import os\nimport sys\nfrom glob import glob\nimport astropy.units as u\nimport sunpy.map\nfrom suncet_instrument_simulator import config_parser, instrument, make_radiance_maps\n\nroot_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nprint(root_directory)\nprint(os.path.exists(root_directory + '/setup_minimum_required_folders_files.py'))\nsys.path.insert(0, root_directory)\nimport setup_minimum_required_folders_files\n\ndef test_instrument():\n if os.getenv('suncet_data') == None:\n os.environ['suncet_data'] = './'\n setup_minimum_required_folders_files.run()\n \n hardware = setup_instrument_hardware()\n run_mirror_coating_tests(hardware)\n \n\ndef setup_instrument_hardware():\n config_filename = os.getcwd() + '/suncet_instrument_simulator/config_files/config_default.ini'\n config = config_parser.Config(config_filename)\n hardware = instrument.Hardware(config)\n make_radiance_maps.MakeRadianceMaps(config).run()\n filenames = glob(os.getenv('suncet_data') + '/mhd/bright_fast/rendered_euv_maps/SunCET_MapSeq_044.fits')\n radiance_maps = sunpy.map.Map(filenames, sequence=True)\n hardware.store_target_wavelengths(radiance_maps)\n hardware.compute_effective_area()\n return hardware\n\n\ndef run_mirror_coating_tests(hardware):\n assert hardware.coating_name == 'B4C_Mo_Al'\n assert hardware.wavelengths.unit == u.Angstrom\n assert hardware.effective_area.unit == u.cm**2\n\n\nif __name__ == \"__main__\":\n test_instrument()\n","repo_name":"suncet/suncet_instrument_simulator","sub_path":"tests/test_instrument.py","file_name":"test_instrument.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36473224704","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport math\nimport pandas as pd\nfrom pandas.plotting import table\nimport matplotlib\nmatplotlib.style.use('seaborn-darkgrid')\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, Markdown, HTML, Javascript\nfrom ipywidgets import widgets\nfrom utils import human_size, selectdata, nextall, getrawdata, ExplorersSelection\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n\n# In[ ]:\n\n\nexplorers=selectdata(\"/tmp/datazfssa\")\nf = ExplorersSelection('pools.csv', explorers)\nf.widget()\ncontinueall = widgets.Button(description=\"Continue\", button_style='success')\ncontinueall.on_click(nextall)\ndisplay(continueall)\n\n\n# In[ ]:\n\n\nInteractiveShell.ast_node_interactivity = \"last_expr\"\n\n\n# In[ ]:\n\n\nPOOLSFILES = f.selected_explorers\nPOOLSFILES\n\n\n# In[ ]:\n\n\ndataraw = getrawdata(POOLSFILES)\n\n\n# # Show available columns\n\n# In[ ]:\n\n\ndataraw.columns\n\n\n# In[ ]:\n\n\n# drop rows where the status is exported to avoid some duplicated values \ndataraw = dataraw[dataraw.status != 'exported']\ncountpools = dataraw[['asn']]\ndisplay(Markdown(\"### Pools: {}\".format(len(countpools))))\n\n\n# check profiles\n\n# In[ ]:\n\n\nprofiles = dataraw[['profile', 'name', 'owner', 'status']].set_index('name').sort_index(ascending=True)\nprofiles\n\n\n# check if more than one profile is in the same node\n\n# In[ ]:\n\n\nuniqref = profiles[profiles['status'] != 'exported']\nuniq = uniqref.groupby('name')['profile']\nif uniq.all().nunique() != len(uniqref):\n display(HTML(''\n 'Some pools with the same profile are in the same controller (Not Recommended).'\n ''))\nelse:\n display(HTML('Pools per profile distributed normally'))\nuniq.all()\n\n\n# ### Get usage distribution\n# \n# legend:\n# \n# usage_total = total pool capacity\n# usage_usage_total = total space used\n# usage_usage_data = space used (not considering reservation)\n# usage_used = real space used\n# usage_available = space available - reservation, etc\n# usage_free = space free for assigment\n# usage_usage_snapshots = space used for snapshots\n\n# In[ ]:\n\n\npoolsimported = dataraw.set_index('name')\npoolsimported = poolsimported[profiles['status'] != 'exported']\nusage = poolsimported[['usage_available', 'usage_used', 'usage_usage_data', 'usage_free',\n 'usage_usage_snapshots', 'usage_usage_total', 'usage_total']]\nusage = usage.astype(float)\nplt.figure(figsize=(30, 8))\n# plot bar\nax1 = plt.subplot(121)\nax1 = usage.sort_index(ascending=True).astype(float).plot(kind='bar', legend=True, ax=ax1, fontsize=12, grid=True)\nax1.set_ylabel('Space Usage')\nax1.set_xlabel('pool')\nplt.show()\nusage.sort_index(ascending=True).applymap(human_size)\n\n\n# In[ ]:\n\n\nusagefree = usage[['usage_free', 'usage_total']]\nusagefreepercent = (usagefree['usage_free'] * 100 ) / usagefree['usage_total']\nusagefreepercent.rename(\"Usage Free Percentage\", inplace=True).sort_index(ascending=True, inplace=True)\ncolors = []\nfor val in usagefreepercent:\n if val > 20:\n colors.append('g')\n elif val <= 20 and val > 10:\n colors.append('y')\n else:\n colors.append('r')\n \nplt.figure(figsize=(16, 6))\n# plot bar\nax1 = plt.subplot(121)\nax1 = usagefreepercent.astype(float).plot(kind='bar', ax=ax1, fontsize=12, grid=True, color=colors)\nax1.set_ylabel('Free Percent')\nax1.set_xlabel('pool')\n# plot table\nax2 = plt.subplot(122)\nplt.axis('off')\ntbl = table(ax2, usagefreepercent.round(2), loc='center', bbox=[0.2, 0.2, 0.5, 0.5])\ntbl.auto_set_font_size(False)\nplt.show()\n\n\n# ### Get compression info\n\n# In[ ]:\n\n\ncompress = dataraw[['name', 'usage_compression']]\ncompress.set_index('name').sort_index(ascending=True)\n\n","repo_name":"aldenso/jupyter-zfssa-analysis","sub_path":"pools.py","file_name":"pools.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40285333987","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 10 01:30:57 2017\n\n@author: Administrator\n\"\"\"\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)-1):\n for j in range(i+1,len(nums)): \n print(i,j)\n if ((nums[i] + nums[j]) == target):\n\n return [i,j]\n else:\n continue\n#test = Solution()\n#print(test.twoSum([3,2,4],6))","repo_name":"zjuyuchen/LeetCode","sub_path":"LeetCode/1 Two Sum.py","file_name":"1 Two Sum.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44868884233","text":"\"\"\"TcEx testing profile Class.\"\"\"\n# standard library\nimport json\nimport math\n\n# import os\nimport re\nimport sys\nfrom base64 import b64encode\nfrom typing import Optional, Union\n\n# third-party\nimport colorama as c\n\n# autoreset colorama\nc.init(autoreset=True, strip=False)\n\n\nclass Interactive:\n \"\"\"Testing Profile Interactive Class.\"\"\"\n\n def __init__(self, profile: object):\n \"\"\"Initialize Class properties.\n\n Args:\n profile (Profile): The profile object to build interactive inputs.\n \"\"\"\n self.profile = profile\n\n # properties\n self._inputs = {\n 'optional': {},\n 'required': {},\n }\n self._no_selection_text = 'No Selection'\n self._staging_data = {'kvstore': {}}\n # self._user_defaults = None\n self.collect_type_map = {\n 'Any': self.collect_string,\n 'Binary': self.collect_binary,\n 'BinaryArray': self.collect_binary_array,\n 'KeyValue': self.collect_key_value,\n 'KeyValueArray': self.collect_key_value_array,\n 'String': self.collect_string,\n 'StringArray': self.collect_string_array,\n 'TCEntity': self.collect_tcentity,\n 'TCEntityArray': self.collect_tcentity_array,\n }\n self.exit_codes = []\n self.input_type_map = {\n 'boolean': self.present_boolean,\n 'choice': self.present_choice,\n 'keyvaluelist': self.present_key_value_list,\n 'multichoice': self.present_multichoice,\n 'string': self.present_string,\n }\n # self.user_defaults_filename = os.path.join('tests', '.user_defaults')\n\n def _default(self, data: dict) -> Union[list, str]: # pylint: disable=unused-argument\n \"\"\"Return the best option for default.\n\n Args:\n data: The install.json params object.\n\n Returns:\n list, str: The default value for the input.\n \"\"\"\n if data.get('type').lower() == 'boolean':\n default = str(data.get('default', 'false')).lower()\n elif data.get('type').lower() == 'choice':\n default = 0\n valid_values: list = self._expand_valid_values(data.get('validValues', []))\n if data.get('name') == 'tc_action':\n for vv in valid_values:\n if self.profile.feature.lower() == vv.replace(' ', '_').lower():\n default = vv\n break\n else:\n default: str = data.get('default')\n elif data.get('type').lower() == 'multichoice':\n default: str = data.get('default')\n if default is not None and isinstance(default, str):\n default: list = default.split('|')\n else:\n default = data.get('default')\n # if default is None:\n # # set default from user default file\n # default = self.user_defaults.get(data.get('name'))\n return default\n\n def _expand_valid_values(self, valid_values: list) -> list:\n \"\"\"Expand supported playbook variables to their full list.\n\n Args:\n valid_values (list): The list of valid values for Choice or MultiChoice inputs.\n\n Returns:\n list: An expanded list of valid values for Choice or MultiChoice inputs.\n \"\"\"\n valid_values = list(valid_values)\n if '${ARTIFACT_TYPES}' in valid_values:\n valid_values.remove('${ARTIFACT_TYPES}')\n valid_values.extend(\n [\n 'ASN',\n 'Asset Group ID',\n 'Certificate File',\n 'CIDR',\n 'Credential ID',\n 'Document Metadata',\n 'Email Address',\n 'Email Attachment File',\n 'Email Attachment File Name',\n 'Email Body',\n 'Email Message File',\n 'Email Subject',\n 'Event File',\n 'Exploit ID',\n 'File Hash',\n 'Filter ID',\n 'Hashtag',\n 'Host',\n 'Image File',\n 'IP Address',\n 'Log File',\n 'MutEx',\n 'PCAP File',\n 'Policy ID',\n 'Registry Key',\n 'Results ID',\n 'Screenshot File',\n 'Tactic ID',\n 'Technique ID',\n 'Ticket ID',\n 'Timestamp',\n 'URL',\n 'User Agent',\n 'Vulnerability Detection ID',\n 'Vulnerability ID',\n ]\n )\n elif '${GROUP_TYPES}' in valid_values:\n valid_values.remove('${GROUP_TYPES}')\n valid_values.extend(\n [\n 'Adversary',\n 'Campaign',\n 'Document',\n 'Email',\n 'Event',\n 'Incident',\n 'Intrusion Set',\n 'Signature',\n 'Task',\n 'Threat',\n ]\n )\n elif '${INDICATOR_TYPES}' in valid_values:\n valid_values.remove('${INDICATOR_TYPES}')\n r = self.profile.session.get('/v2/types/indicatorTypes')\n if r.ok:\n valid_values.extend(\n [t.get('name') for t in r.json().get('data', {}).get('indicatorType', {})]\n )\n elif '${OWNERS}' in valid_values:\n valid_values.remove('${OWNERS}')\n r = self.profile.session.get('/v2/owners')\n if r.ok:\n valid_values.extend(\n [o.get('name') for o in r.json().get('data', {}).get('owner', {})]\n )\n elif '${USERS}' in valid_values:\n valid_values.remove('${USERS}')\n r = self.profile.session.get('/v2/owners/mine/members')\n if r.ok:\n valid_values.extend(\n [o.get('userName') for o in r.json().get('data', {}).get('user', {})]\n )\n elif '${USER_GROUPS}' in valid_values:\n valid_values.remove('${USER_GROUPS}')\n valid_values.extend(['User Group 1', 'User Group 1'])\n\n return valid_values\n\n def _input_value(self, label: str, option_text: Optional[str] = None) -> str:\n \"\"\"Return user input.\n\n Args:\n label: The label to display to the user.\n option_text: the Option text to display to the user.\n\n Returns:\n str: The value selected by the user.\n \"\"\"\n # update option text to include help message\n option_text = option_text or ''\n if option_text:\n # add space for cleaness in user display\n option_text = f' {option_text}'\n\n print(f'{c.Fore.WHITE}[? for help]')\n prompt = f'{c.Fore.MAGENTA}{label}{c.Fore.RESET}{c.Style.BRIGHT}{option_text}: '\n input_value = input(prompt).strip() # nosec\n\n # handle special user inputs\n if input_value == '?':\n self.present_help()\n return self._input_value(label, option_text)\n\n return input_value\n\n @staticmethod\n def _split_list(data: list) -> tuple:\n \"\"\"Split a list in two \"equal\" parts.\n\n Args:\n data: The list of data to split into two equal parts.\n\n Returns:\n tuple: The two halves of the list.\n \"\"\"\n half: int = math.ceil(len(data) / 2)\n return data[:half], data[half:]\n\n def add_input(self, name: str, data: dict, value: str) -> None:\n \"\"\"Add an input to inputs.\n\n Args:\n name: The name of the input.\n data: The install.json params object.\n value: The value for the input.\n \"\"\"\n if data.get('required', False):\n self._inputs['required'].setdefault(name, value)\n else:\n self._inputs['optional'].setdefault(name, value)\n\n # def add_user_default(self, key, value, data_type=None):\n # \"\"\"Add data to user default.\"\"\"\n # self.user_defaults.setdefault(self.profile.feature, {})\n # if data_type is None:\n # self.user_defaults[self.profile.feature][key] = value\n # else:\n # # store the value under the appropriate data type\n # self.user_defaults[self.profile.feature].setdefault(key, {})\n # self.user_defaults[self.profile.feature][key].setdefault(data_type, value)\n\n # if self.user_defaults.get('base') is None:\n # self.user_defaults['base'] = self.user_defaults[self.profile.feature]\n\n def add_staging_data(self, name: str, type_: str, value: str) -> str:\n \"\"\"Create staging data and return variable value.\n\n Args:\n name: The name of the input.\n type_: The type of input (Binary, StringArray, etc.)\n value: The value to write in the staging data.\n\n Returns:\n str: The newly create variable string.\n \"\"\"\n arg_value = value\n if (\n self.profile.ij.runtime_level.lower() not in ['triggerservice', 'webhooktriggerservice']\n and value is not None\n ):\n arg_value: str = self.profile.ij.create_variable(name, type_)\n self._staging_data['kvstore'].setdefault(arg_value, value)\n\n return arg_value\n\n def collect_binary(self, **kwargs) -> str:\n \"\"\"Collect binary data\n\n Args:\n default (str, kwargs): The default value if no value provided by user.\n feedback (bool, kwargs): If True user feedback will be printed.\n option_text (str, kwargs): The text shown to the user.\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n str: The input str from the user.\n \"\"\"\n input_value: str = self._input_value('Input', kwargs.get('option_text'))\n if not input_value:\n # if no default value and required force user to input again\n if kwargs.get('default') is None and kwargs.get('required') is True:\n self.print_required()\n return self.collect_binary(**kwargs)\n\n if input_value not in [None, '']:\n input_data: str = b64encode(input_value.encode()).decode()\n feedback = f'{input_value} -> ({input_data})'\n else:\n input_data: str = kwargs.get('default')\n feedback = input_data\n\n # print user feedback\n if kwargs.get('feedback', True):\n self.print_feedback(feedback)\n\n return input_data\n\n def collect_binary_array(self, **kwargs) -> list:\n \"\"\"Collect binary array data\n\n Args:\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n list: The input list from the user.\n \"\"\"\n input_values = []\n required = kwargs.get('required', False)\n while True:\n input_value = self.collect_binary(feedback=False, required=required)\n if not input_value:\n break\n input_values.append(input_value)\n required = False # only the first input is required\n\n if not input_values:\n # return None to ensure data doesn't get added to inputs\n input_values = None\n\n # print user feedback\n self.print_feedback(input_values)\n\n return input_values\n\n def collect_boolean(self, **kwargs) -> bool:\n \"\"\"Collect binary data\n\n Args:\n default (str, kwargs): The default value if no value provided by user.\n option_text (str, kwargs): The text shown to the user.\n\n Returns:\n bool: The boolean value select by the user.\n \"\"\"\n input_value = self._input_value('Input', kwargs.get('option_text'))\n if input_value == '':\n input_value = kwargs.get('default')\n\n if str(input_value).lower() not in ['0', 'f', 'false', '1', 't', 'true']:\n self.print_invalid_bool()\n return self.collect_boolean(**kwargs)\n\n # covert input value to a proper boolean\n input_value = self.profile.utils.to_bool(input_value)\n\n # print user feedback\n self.print_feedback(input_value)\n\n return input_value\n\n def collect_choice(self, **kwargs) -> str:\n \"\"\"Collect choice data\n\n Args:\n default (str, kwargs): The default value if no value provided by user.\n option_text (str, kwargs): The text shown to the user.\n required (str, kwargs): If True the user cannot continue until they provide a value.\n valid_values (str, kwargs): A list of valid values\n\n Returns:\n str: The users selected choice.\n \"\"\"\n # collect input value from user and set default if required\n input_value: str = self._input_value('Choice', kwargs.get('option_text')) or kwargs.get(\n 'default'\n )\n\n # ensure input value is provided when input is required\n if input_value is None and kwargs.get('required') is True:\n self.print_required()\n return self.collect_choice(**kwargs)\n\n # if input value is None then there is not need to continue\n if input_value is None:\n return input_value\n\n # set valid values\n valid_values: list = kwargs.get('valid_values', [])\n\n # convert to int or recollect input\n try:\n input_value = int(input_value)\n except ValueError:\n self.print_invalid_index(f'0-{len(valid_values)}')\n return self.collect_choice(**kwargs)\n\n # ensure input value is valid\n valid_index_values = [i for i, _ in enumerate(valid_values)]\n # valid_index_values = list(range(0, len(valid_values) - 1))\n if input_value not in valid_index_values:\n self.print_invalid_index(f'0-{len(valid_values)}')\n return self.collect_choice(**kwargs)\n\n # using index value provided by user, set value to valid value\n input_value = valid_values[input_value]\n if input_value == self._no_selection_text:\n # special case for when user select no selection\n input_value = None\n\n # print user feedback\n if kwargs.get('feedback', True):\n self.print_feedback(input_value)\n\n return input_value\n\n def collect_exit_code(self, **kwargs) -> int:\n \"\"\"Collect exit codes.\n\n Args:\n option_text (str, kwargs): The text shown to the user.\n\n Returns:\n str: The users provided exit code.\n \"\"\"\n input_value = self._input_value('Code', kwargs.get('option_text'))\n\n if input_value != '':\n try:\n input_value = int(input_value)\n except ValueError:\n self.print_invalid_exit_code()\n return self.collect_exit_code(**kwargs)\n\n if input_value not in [0, 1, 3]:\n self.print_invalid_exit_code()\n return self.collect_exit_code(**kwargs)\n\n return input_value\n\n def collect_exit_codes(self, **kwargs) -> list:\n \"\"\"Collect exit codes.\n\n Returns:\n list: The users provided exit code.\n \"\"\"\n input_values = []\n while True:\n input_value = self.collect_exit_code(**kwargs)\n if input_value == '':\n break\n input_values.append(input_value)\n\n if not input_values:\n # return None to ensure data doesn't get added to inputs\n input_values = [0]\n\n # print user feedback\n self.print_feedback(input_values)\n\n return input_values\n\n def collect_key_value(self, **kwargs) -> dict:\n \"\"\"Collect key value data.\n\n Args:\n option_text (str, kwargs): The text shown to the user.\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n dict: The users provided key value input.\n \"\"\"\n input_value = None\n key = self._input_value('Key', option_text=kwargs.get('option_text'))\n\n # ensure input value is provided when input is required\n if key == '' and kwargs.get('required') is True:\n self.print_required()\n return self.collect_key_value(**kwargs)\n\n if key != '':\n value = self._input_value('Value')\n input_value = {'key': key, 'value': value}\n else:\n input_value = kwargs.get('default')\n\n # print user feedback\n if kwargs.get('feedback', True):\n self.print_feedback(input_value)\n\n return input_value\n\n def collect_key_value_array(self, **kwargs) -> list:\n \"\"\"Collect key value array data\n\n Args:\n default (str, kwargs): The default value if no value provided by user.\n option_text (str, kwargs): The text shown to the user.\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n list: The users provided list of key value inputs.\n \"\"\"\n input_values = []\n required: bool = kwargs.get('required')\n while True:\n input_value = self.collect_key_value(\n default=kwargs.get('default'),\n feedback=False,\n option_test=kwargs.get('option_text'),\n required=required,\n )\n if not input_value:\n break\n input_values.append(input_value)\n required = False\n\n if not input_values:\n # return None to ensure data doesn't get added to inputs\n input_values = None\n\n # print user feedback\n self.print_feedback(input_values)\n\n return input_values\n\n def collect_multichoice(self, **kwargs) -> list:\n \"\"\"Collect multichoice data\n\n Args:\n required (str, kwargs): If True the user cannot continue until they provide a value.\n valid_values (str, kwargs): A list of valid values\n\n Returns:\n list: The users provided list of choice inputs.\n \"\"\"\n input_values = []\n required = kwargs.get('required', False)\n while True:\n input_value = self.collect_choice(\n feedback=False,\n # option_text=kwargs.get('option_text'),\n required=required,\n valid_values=kwargs.get('valid_values'),\n )\n if not input_value:\n break\n input_values.append(input_value)\n required = False\n\n input_values = list(set(input_values))\n if input_values:\n # format multichoice value as pipe delimited string\n input_values = '|'.join(input_values)\n else:\n # return None to ensure data doesn't get added to inputs\n input_values = None\n\n # print user feedback\n self.print_feedback(input_values)\n\n return input_values\n\n def collect_string(self, **kwargs) -> str:\n \"\"\"Collect string data\n\n Args:\n option_text (str, kwargs): The text shown to the user.\n default (str, kwargs): The default value if no value provided by user.\n\n Returns:\n str: The user provided input.\n \"\"\"\n input_value = self._input_value('Input', kwargs.get('option_text', ''))\n if not input_value:\n input_value = kwargs.get('default')\n\n if input_value is None and kwargs.get('required', False) is True:\n self.print_required()\n return self.collect_string(**kwargs)\n\n # print user feedback\n if kwargs.get('feedback', True):\n self.print_feedback(input_value)\n\n # APP-622 - handle null/None values\n if input_value == 'null':\n input_value = None\n elif input_value in ['\"null\"', \"'null'\"]:\n input_value = 'null'\n\n return input_value\n\n def collect_string_array(self, **kwargs) -> list:\n \"\"\"Collect string data\n\n Args:\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n str: The user provided input.\n \"\"\"\n input_values = []\n required = kwargs.get('required', False)\n while True:\n input_value = self.collect_string(feedback=False, required=required)\n if not input_value:\n break\n input_values.append(input_value)\n required = False\n\n if not input_values:\n # return None to ensure data doesn't get added to inputs\n input_values = None\n\n # print user feedback\n self.print_feedback(input_values)\n\n return input_values\n\n def collect_tcentity(self, **kwargs) -> dict:\n \"\"\"Collect tcentity data\n\n Args:\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n str: The user provided input.\n \"\"\"\n input_value = None\n id_ = self._input_value('ID')\n if id_:\n value = self._input_value('Value')\n type_ = self._input_value('Type')\n input_value = {'id': id_, 'value': value, 'type': type_}\n\n if input_value is None and kwargs.get('required', False) is True:\n self.print_required()\n return self.collect_tcentity(**kwargs)\n\n # print user feedback\n if kwargs.get('feedback', True):\n self.print_feedback(input_value)\n\n return input_value\n\n def collect_tcentity_array(self, **kwargs) -> list:\n \"\"\"Collect tcentity array data\n\n Args:\n required (str, kwargs): If True the user cannot continue until they provide a value.\n\n Returns:\n list: The user provided inputs.\n \"\"\"\n input_values = []\n required = kwargs.get('required', False)\n while True:\n input_value = self.collect_tcentity(feedback=False, required=required)\n if not input_value:\n break\n input_values.append(input_value)\n required = False\n\n if not input_values:\n # return None to ensure data doesn't get added to inputs\n input_values = None\n\n # print user feedback\n self.print_feedback(input_values)\n\n return input_values\n\n @property\n def inputs(self) -> dict:\n \"\"\"Return inputs dict.\"\"\"\n return self._inputs\n\n def present(self) -> None:\n \"\"\"Present interactive menu to build profile.\"\"\"\n\n def params_data() -> tuple:\n # handle non-layout and layout based App appropriately\n if self.profile.lj.has_layout:\n # using inputs from layout.json since they are required to be in order\n # (display field can only use inputs previously defined)\n for name in self.profile.lj.params_dict:\n # get data from install.json based on name\n data = self.profile.ij.params_dict.get(name)\n yield name, data\n\n # hidden fields will not be in layout.json so they need to be include manually\n for name, data in self.profile.ij.filter_params_dict(hidden=True).items():\n yield name, data\n else:\n for name, data in self.profile.ij.params_dict.items():\n yield name, data\n\n inputs = {}\n for name, data in params_data():\n if data.get('serviceConfig'):\n # inputs that are serviceConfig are not applicable for profiles\n continue\n\n if not data.get('hidden'):\n # each input will be checked for permutations if the App has layout and not hidden\n if not self.profile.permutations.validate_input_variable(name, inputs):\n continue\n\n # present the input\n value: str = self.input_type_map.get(data.get('type').lower())(name, data)\n\n # update inputs\n inputs[name] = value\n\n self.present_exit_code()\n\n def present_boolean(self, name: str, data) -> bool:\n \"\"\"Build a question for boolean input.\n\n Args:\n name: The name of the input field.\n data: The install.json input param object.\n\n Returns:\n bool: The user provided input.\n \"\"\"\n # print header information\n self.print_header(data)\n\n default = self._default(data)\n valid_values = ['true', 'false']\n\n option_default = 'false'\n option_text = ''\n options = []\n for v in valid_values:\n if v.lower() == default.lower():\n option_default = v\n v = f'[{v}]'\n options.append(v)\n option_text = f'''({'/'.join(options)})'''\n\n value = self.collect_boolean(default=option_default, option_text=option_text)\n\n # add input\n self.add_input(name, data, value)\n\n return value\n\n def present_choice(self, name: str, data: dict) -> str:\n \"\"\"Build a question for choice input.\n\n Args:\n name: The name of the input field.\n data: The install.json input param object.\n\n Returns:\n str: The user provided input.\n \"\"\"\n # print header information\n self.print_header(data)\n\n default = self._default(data)\n option_index = 0\n valid_values = self._expand_valid_values(data.get('validValues', []))\n if data.get('required', False) is False:\n # add option to invalidate defaults\n valid_values.insert(0, self._no_selection_text)\n\n # default value needs to be converted to index\n if default:\n try:\n option_index = valid_values.index(default)\n except ValueError:\n # if \"magic\" variable (e.g., ${GROUP_TYPES}) was not expanded then use index 0.\n # there is no way to tell if the default value is be part of the expansion.\n if any([re.match(r'^\\${.*}$', v) for v in valid_values]):\n option_index = 0\n else:\n print(\n f'''{c.Fore.RED}Invalid value of ({default}) for {data.get('name')}, '''\n 'check that default value and validValues match in install.json.'\n )\n sys.exit()\n option_text = f'[{option_index}]'\n\n # build options list to display to the user in two columns\n options = []\n for i, v in enumerate(valid_values):\n options.append(f'{i}. {v}')\n\n # display options list into two columns\n left, right = self._split_list(options)\n for i, _ in enumerate(left):\n ld = left[i]\n try:\n rd = right[i]\n except IndexError:\n rd = ''\n print(f'{ld:40} {rd:40}')\n\n # collect user input\n value = self.collect_choice(\n default=option_index, option_text=option_text, valid_values=valid_values\n )\n\n # add input\n self.add_input(name, data, value)\n\n return value\n\n def present_data_types(self, data_types: list, required: Optional[bool] = False) -> str:\n \"\"\"Present data types options.\n\n Args:\n data_types: A list of optional data types.\n required: If False the no selection option will be added.\n\n Returns:\n str: The user provided input.\n \"\"\"\n if 'Any' in data_types:\n data_types = [\n 'Binary',\n 'BinaryArray',\n 'KeyValue',\n 'KeyValueArray',\n 'String',\n 'StringArray',\n 'TCEntity',\n 'TCEntityArray',\n ]\n\n # add option to not select an index value if input is not required\n if required is False:\n data_types.insert(0, self._no_selection_text)\n\n # build options list to display to the user in two columns\n options = []\n for i, v in enumerate(data_types):\n options.append(f'{i}. {v}')\n\n left, right = self._split_list(options)\n for i, _ in enumerate(left):\n ld = left[i]\n try:\n rd = right[i]\n except IndexError:\n rd = ''\n print(f'{ld:40} {rd:40}')\n\n index = self._input_value('Type', '[0]') or 0\n\n try:\n data_type = data_types[int(index)]\n except (IndexError, TypeError, ValueError):\n print(\n f'{c.Fore.RED}Invalid index of {index} provided. '\n f'Please provide a integer between 0-{len(data_types) - 1}'\n )\n sys.exit(1)\n\n return data_type\n\n def present_exit_code(self) -> None:\n \"\"\"Provide user input for exit code.\"\"\"\n self.print_header({'label': 'Exit Codes'})\n self.exit_codes = list(set(self.collect_exit_codes(default=[0], option_text='[0]')))\n\n @staticmethod\n def present_help() -> None:\n \"\"\"Provide user help information.\"\"\"\n print(\n f'{c.Fore.CYAN}For String type inputs: \\n'\n ' * A value of null will be treated as an actual null value.\\n'\n ' * Using \"null\" or \\'null\\' to insert a string of null.\\n'\n )\n print(f'{c.Fore.CYAN}When done entering array data press enter to continue.')\n\n def present_key_value_list(self, name: str, data: dict) -> None:\n \"\"\"Build a question for key value list input.\n\n Args:\n name: The name of the input field.\n data: The install.json input param object.\n\n Returns:\n str: The user provided input.\n \"\"\"\n # print header information\n self.print_header(data)\n\n # the default value from install.json or user_data\n default = self._default(data) # array of default values\n\n # collect input\n input_data = self.collect_key_value_array(default=default, required=data.get('required'))\n\n # create variable\n variable = self.add_staging_data(name, 'KeyValueArray', input_data)\n\n # add input to args\n self.add_input(name, data, variable)\n\n # user feedback\n feedback_data = input_data\n if input_data is not None:\n feedback_data = json.dumps(feedback_data)\n\n # # update default\n # if default is None:\n # self.add_user_default(name, input_data)\n\n return variable\n\n def present_multichoice(self, name: str, data: dict) -> list:\n \"\"\"Build a question for multichoice input.\n\n Args:\n name: The name of the input field.\n data: The install.json input param object.\n\n Returns:\n list: The user provided inputs.\n \"\"\"\n # print header information\n self.print_header(data)\n\n default = self._default(data) # array of default values\n option_indexes = [0]\n valid_values = self._expand_valid_values(data.get('validValues', []))\n if data.get('required', False) is False:\n # add option to invalidate defaults\n valid_values.insert(0, self._no_selection_text)\n\n # default values will be return as an array (e.g., one|two -> ['one'. 'two']).\n # using the valid values array we can look up these values to show as default in input.\n if default:\n option_indexes = []\n for d in default:\n try:\n option_indexes.append(valid_values.index(d))\n except ValueError:\n # if \"magic\" variable (e.g., ${GROUP_TYPES}) was not expanded then skip value.\n # there is no way to tell if the default value is be part of the expansion.\n if any([re.match(r'^\\${.*}$', v) for v in valid_values]):\n continue\n\n print(\n f'''{c.Fore.RED}Invalid value of ({d}) for {data.get('name')}, check '''\n 'that default value(s) and validValues match in install.json.'\n )\n sys.exit()\n option_text = f''' [{','.join([str(v) for v in option_indexes])}]'''\n\n # build options list to display to the user in two columns\n options = []\n for i, v in enumerate(valid_values):\n options.append(f'{i}. {v}')\n\n # display options list into two columns\n left, right = self._split_list(options)\n for i, _ in enumerate(left):\n ld = left[i]\n try:\n rd = right[i]\n except IndexError:\n rd = ''\n print(f'{ld:40} {rd:40}')\n\n # collect user input\n values = self.collect_multichoice(\n default=option_indexes,\n option_text=option_text,\n required=data.get('required'),\n valid_values=valid_values,\n )\n\n # add input\n self.add_input(name, data, values)\n\n return values\n\n def present_string(self, name: str, data: dict) -> str:\n \"\"\"Build a question for string input.\n\n Args:\n name: The name of the input field.\n data: The install.json input param object.\n\n Returns:\n str: The user provided input.\n \"\"\"\n # display header information\n self.print_header(data)\n\n # use playbook data types to determine what input to provide (default to String)\n data_type = data.get('playbookDataType', ['String'])[0]\n if len(data.get('playbookDataType', [])) > 1 or data_type.lower() == 'any':\n data_type = self.present_data_types(\n data.get('playbookDataType'), required=data.get('required', False)\n )\n\n # no need to proceed if there is not valid data type selected.\n if data_type == self._no_selection_text:\n self.add_input(name, data, None)\n self.print_feedback('null')\n return None\n\n # the default value from install.json or user_data\n default = self._default(data)\n\n option_text = ''\n if default is not None:\n option_text = f'[{default}]'\n\n # use data_type to properly format collection input\n input_value = self.collect_type_map[data_type](\n default=default, option_text=option_text, required=data.get('required', False)\n )\n\n # add staging data and get variable name\n variable = self.add_staging_data(name, data_type, input_value)\n\n # add input\n self.add_input(name, data, variable)\n\n # # update default\n # if default is None:\n # if len(data.get('playbookDataType', [])) > 1 or data_type.lower() == 'any':\n # # for inputs that take multiple types we need to store user default with the type\n # self.add_user_default(name, input_value, data_type)\n # else:\n # self.add_user_default(name, input_value)\n\n return variable\n\n @staticmethod\n def print_feedback(feedback_value: Union[list, str]) -> None:\n \"\"\"Print the value used.\"\"\"\n print(f'Using value: {c.Fore.GREEN}{feedback_value}\\n')\n\n @staticmethod\n def print_header(data: dict) -> None:\n \"\"\"Enrich the header with metadata.\n\n Args:\n data: The install.json input param object.\n \"\"\"\n\n def _print_metadata(title: str, value: str) -> None:\n \"\"\"Print the title and value\"\"\"\n print(f'{c.Fore.CYAN}{title!s:<22}: {c.Fore.RESET}{c.Style.BRIGHT}{value}')\n\n label = data.get('label', 'NO LABEL')\n print(f'\\n{c.Fore.GREEN}{label}')\n\n # type\n _print_metadata('Type', data.get('type'))\n\n # default\n default = data.get('default')\n if default:\n _print_metadata('Default', default)\n\n # note\n note = data.get('note', '')[:200]\n if note:\n _print_metadata('Note', note)\n\n # required\n _print_metadata('Required', str(data.get('required', False)).lower())\n\n # hidden\n if data.get('hidden'):\n _print_metadata('Hidden', 'true')\n\n # Input Types\n pbt = ','.join(data.get('playbookDataType', []))\n if pbt:\n _print_metadata('Playbook Data Types', pbt)\n\n vv = ','.join(data.get('validValues', []))\n if vv:\n _print_metadata('Valid Values', vv)\n\n print('-' * 50)\n\n @staticmethod\n def print_invalid_bool() -> None:\n \"\"\"Print a invalid bool error.\"\"\"\n print(f'{c.Fore.RED}The provided value is not a boolean value (true/false).\\n')\n\n @staticmethod\n def print_invalid_exit_code() -> None:\n \"\"\"Print a invalid exit code error.\"\"\"\n print(f'{c.Fore.RED}The provided value is not a valid exit code (0, 1).\\n')\n\n @staticmethod\n def print_invalid_index(range_: str) -> None:\n \"\"\"Print a invalid index error.\n\n Args:\n range_: The range of possible value for choice or multichoice selections.\n \"\"\"\n print(\n f'{c.Fore.RED}The provided index value is not '\n f'valid, please select a valid value between {range_}.\\n'\n )\n\n @staticmethod\n def print_required() -> None:\n \"\"\"Print a required error.\"\"\"\n print(f'{c.Fore.RED}This input is required, please enter an appropriate value.\\n')\n\n @property\n def staging_data(self) -> None:\n \"\"\"Return staging data dict.\"\"\"\n return self._staging_data\n\n # @property\n # def user_defaults(self):\n # \"\"\"Return user defaults\"\"\"\n # if self._user_defaults is None:\n # user_defaults = {}\n # if os.path.isfile(self.user_defaults_filename):\n # with open(self.user_defaults_filename, 'r') as fh:\n # user_defaults = json.load(fh)\n\n # # use feature defaults\n # self._user_defaults = user_defaults.get(self.profile.feature)\n # if self._user_defaults is None:\n # # use base defaults if not feature defaults found\n # self._user_defaults = user_defaults.get('base', {})\n\n # return self._user_defaults\n","repo_name":"ThreatConnect-Inc/threatconnect-developer-docs","sub_path":"tcex/tcex/profile/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":38738,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"10619696634","text":"from django.contrib import admin\nfrom .models import BoxOffice, MovieInfo\n\n\n# Register your models here.\n\nclass BoxOfficeAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['yearRate', 'crawlDate']}),\n ('movieInfo', {'fields': ['movieId', 'movieName', 'releaseInfo']}),\n ('boxOfficeInfo', {'fields': ['boxRate', 'boxInfo', 'splitBoxInfo', 'sumBoxInfo', 'splitSumBoxInfo']}),\n ('showInfo', {'fields': ['showInfo', 'showView', 'showRate', 'seatRate']})\n ]\n\n\nadmin.site.register(BoxOffice, BoxOfficeAdmin)\nadmin.site.register(MovieInfo)\n","repo_name":"Dawinia/movie_backend","sub_path":"boxOffice/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34547268089","text":"import asyncio\nfrom khl import Bot, Cert, Client, Gateway, HTTPRequester\nfrom bot.api import log\nfrom bot.api.command_manager import ACommandManager\nfrom bot.cli import cli_entry\n\n\nclass ABot(Bot):\n def __init__(self, token: str = '', *, cert: Cert = None, client: Client = None, gate: Gateway = None,\n out: HTTPRequester = None, compress: bool = True, port=5000, route='/khl-wh'):\n super().__init__(token, cert=cert, client=client, gate=gate, out=out, compress=compress, port=port, route=route)\n self.command = ACommandManager()\n\n def run(self):\n if not self.loop:\n self.loop = asyncio.get_event_loop()\n try:\n self.loop.run_until_complete(self.start())\n except KeyboardInterrupt:\n cli_entry.stop_plugins()\n log.logger.info('再见')\n log.close()\n\n def stop(self):\n self.loop.close()\n pass\n","repo_name":"Dubhe-Studio/Mozi","sub_path":"bot/api/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"34828620506","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_vcs\n------------\n\nTests for `cookiecutter.vcs` module.\n\"\"\"\n\nimport os\nimport pytest\n\nfrom cookiecutter import exceptions, vcs\n\n\n@pytest.mark.parametrize('repo_url, exp_repo_type, exp_repo_url', [\n (\n \"git+https://github.com/pytest-dev/cookiecutter-pytest-plugin.git\",\n \"git\",\n \"https://github.com/pytest-dev/cookiecutter-pytest-plugin.git\"\n ), (\n \"hg+https://bitbucket.org/foo/bar.hg\",\n \"hg\",\n \"https://bitbucket.org/foo/bar.hg\"\n ), (\n \"https://github.com/pytest-dev/cookiecutter-pytest-plugin.git\",\n \"git\",\n \"https://github.com/pytest-dev/cookiecutter-pytest-plugin.git\"\n ), (\n \"https://bitbucket.org/foo/bar.hg\",\n \"hg\",\n \"https://bitbucket.org/foo/bar.hg\"\n )\n])\ndef test_identify_known_repo(repo_url, exp_repo_type, exp_repo_url):\n assert vcs.identify_repo(repo_url) == (exp_repo_type, exp_repo_url)\n\n\n@pytest.fixture(params=[\n \"foo+git\", # uses explicit identifier with 'git' in the wrong place\n \"foo+hg\", # uses explicit identifier with 'hg' in the wrong place\n \"foo+bar\", # uses explicit identifier with neither 'git' nor 'hg'\n \"foobar\" # no identifier but neither 'git' nor 'bitbucket' in url\n])\ndef unknown_repo_type_url(request):\n return request.param\n\n\ndef test_identify_raise_on_unknown_repo(unknown_repo_type_url):\n with pytest.raises(exceptions.UnknownRepoType):\n vcs.identify_repo(unknown_repo_type_url)\n\n\ndef test_prompt_should_ask_and_rm_repo_dir(mocker, tmpdir):\n \"\"\"In `prompt_and_delete_repo()`, if the user agrees to delete/reclone the\n repo, the repo should be deleted.\n \"\"\"\n mock_read_user = mocker.patch(\n 'cookiecutter.vcs.read_user_yes_no',\n return_value=True,\n autospec=True\n )\n repo_dir = tmpdir.mkdir('repo')\n\n vcs.prompt_and_delete_repo(str(repo_dir))\n\n assert mock_read_user.called\n assert not repo_dir.exists()\n\n\ndef test_prompt_should_ask_and_keep_repo_dir(mocker, tmpdir):\n \"\"\"In `prompt_and_delete_repo()`, if the user wants to keep their old\n cloned template repo, it should not be deleted.\n \"\"\"\n mock_read_user = mocker.patch(\n 'cookiecutter.vcs.read_user_yes_no',\n return_value=False,\n autospec=True\n )\n repo_dir = tmpdir.mkdir('repo')\n\n with pytest.raises(SystemExit):\n vcs.prompt_and_delete_repo(str(repo_dir))\n\n assert mock_read_user.called\n assert repo_dir.exists()\n\n\ndef test_prompt_should_not_ask_if_no_input_and_rm_repo_dir(mocker, tmpdir):\n \"\"\"In `prompt_and_delete_repo()`, if `no_input` is True, the call to\n `vcs.read_user_yes_no()` should be suppressed.\n \"\"\"\n mock_read_user = mocker.patch(\n 'cookiecutter.vcs.read_user_yes_no',\n return_value=True,\n autospec=True\n )\n repo_dir = tmpdir.mkdir('repo')\n\n vcs.prompt_and_delete_repo(str(repo_dir), no_input=True)\n\n assert not mock_read_user.called\n assert not repo_dir.exists()\n\n\n@pytest.fixture\ndef clone_dir(tmpdir):\n \"\"\"Simulates creation of a directory called `clone_dir` inside of `tmpdir`.\n Returns a str to said directory.\n \"\"\"\n return str(tmpdir.mkdir('clone_dir'))\n\n\ndef test_clone_should_raise_if_vcs_not_installed(mocker, clone_dir):\n \"\"\"In `clone()`, a `VCSNotInstalled` exception should be raised if no VCS\n is installed.\n \"\"\"\n mocker.patch(\n 'cookiecutter.vcs.is_vcs_installed',\n autospec=True,\n return_value=False\n )\n\n repo_url = 'https://github.com/pytest-dev/cookiecutter-pytest-plugin.git'\n\n with pytest.raises(exceptions.VCSNotInstalled):\n vcs.clone(repo_url, clone_to_dir=clone_dir)\n\n\n@pytest.mark.parametrize('which_return, result', [\n ('', False),\n (None, False),\n (False, False),\n ('/usr/local/bin/git', True),\n])\ndef test_is_vcs_installed(mocker, which_return, result):\n mocker.patch(\n 'cookiecutter.vcs.which',\n autospec=True,\n return_value=which_return\n )\n assert vcs.is_vcs_installed('git') == result\n\n\n@pytest.mark.parametrize('repo_type, repo_url, repo_name', [\n ('git', 'https://github.com/hello/world.git', 'world'),\n ('hg', 'https://bitbucket.org/foo/bar', 'bar'),\n])\ndef test_clone_should_invoke_git(\n mocker, clone_dir, repo_type, repo_url, repo_name):\n \"\"\"When `clone()` is called with a git/hg repo, the corresponding VCS\n command should be run via `subprocess.check_call()`.\n\n This should take place:\n * In the correct dir\n * With the correct args.\n \"\"\"\n mocker.patch(\n 'cookiecutter.vcs.is_vcs_installed',\n autospec=True,\n return_value=True\n )\n\n mock_subprocess = mocker.patch(\n 'cookiecutter.vcs.subprocess.check_call',\n autospec=True,\n )\n expected_repo_dir = os.path.normpath(os.path.join(clone_dir, repo_name))\n\n branch = 'foobar'\n\n repo_dir = vcs.clone(\n repo_url,\n checkout=branch,\n clone_to_dir=clone_dir,\n no_input=True\n )\n\n assert repo_dir == expected_repo_dir\n\n mock_subprocess.assert_any_call(\n [repo_type, 'clone', repo_url], cwd=clone_dir\n )\n mock_subprocess.assert_any_call(\n [repo_type, 'checkout', branch], cwd=expected_repo_dir\n )\n\n\ndef test_clone_should_abort_if_user_does_not_want_to_reclone(mocker, tmpdir):\n \"\"\"In `clone()`, if user doesn't want to reclone, Cookiecutter should exit\n without cloning anything.\n \"\"\"\n mocker.patch(\n 'cookiecutter.vcs.is_vcs_installed',\n autospec=True,\n return_value=True\n )\n mocker.patch(\n 'cookiecutter.vcs.prompt_and_delete_repo',\n side_effect=SystemExit,\n autospec=True\n )\n mock_subprocess = mocker.patch(\n 'cookiecutter.vcs.subprocess.check_call',\n autospec=True,\n )\n\n clone_to_dir = tmpdir.mkdir('clone')\n\n # Create repo_dir to trigger prompt_and_delete_repo\n clone_to_dir.mkdir('cookiecutter-pytest-plugin')\n\n repo_url = 'https://github.com/pytest-dev/cookiecutter-pytest-plugin.git'\n\n with pytest.raises(SystemExit):\n vcs.clone(repo_url, clone_to_dir=str(clone_to_dir))\n assert not mock_subprocess.called\n\n\ndef test_clone_should_rstrip_trailing_slash_in_repo_url(mocker, clone_dir):\n \"\"\"In `clone()`, repo URL's trailing slash should be stripped if one is\n present.\n \"\"\"\n mocker.patch(\n 'cookiecutter.vcs.is_vcs_installed',\n autospec=True,\n return_value=True\n )\n\n mock_subprocess = mocker.patch(\n 'cookiecutter.vcs.subprocess.check_call',\n autospec=True,\n )\n\n vcs.clone(\n 'https://github.com/foo/bar/',\n clone_to_dir=clone_dir,\n no_input=True\n )\n\n mock_subprocess.assert_called_once_with(\n ['git', 'clone', 'https://github.com/foo/bar'], cwd=clone_dir\n )\n","repo_name":"digideskio/cookiecutter","sub_path":"tests/test_vcs.py","file_name":"test_vcs.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"5699008626","text":"# Baekjoon Online Judge - 1547번. 공\n\nM = int(input())\n\ncups = list(range(4))\n# 컵의 위치를 뒤바꾸면서 어차피 공이 1번에 있기 때문에 다 옮긴 후 1의 위치를 찾으면 된다.\nfor _ in range(M):\n x, y = map(int, input().split())\n cups[x], cups[y] = cups[y], cups[x]\nprint(cups.index(1))\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_1547.py","file_name":"BOJ_1547.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8033778117","text":"import sys\nsys.path.append(\".\")\n\nfrom vector_map import *\n\ninit_visualize()\nworld = get_map_ROS(\"resource/matsuken_map6\") \nr = world.get_root_region()\nss = SimulationSpace(r)\nss.show_outer_boundary()","repo_name":"RobotSpatialCognition/vector_map","sub_path":"test/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30297840116","text":"# Application de l'algorithme minimax au morpion\n# Fait par Steve MAHOT et Matthieu LOUF\n\n#------------ IMPORTATION MODULES -------------#\nimport random as rnd\nimport os\n#classe du morpion dans le fichier morpion.py\nfrom morpion import morpion\n\n#------------ DEFINITION FONCTIONS -------------#\n\n#fonction pour nettoyer la console\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef humain_vs_humain():\n m = morpion()\n\n print(\"Début de la partie : \\n\")\n\n while not (m.gagnant() or m.matchNul()) :\n \n print(\"Au tour du joueur \\'\",m.tour,\"\\' :\")\n m.display()\n\n print(\"-> Calcul des actions possibles (peut durer 10s)\\n\")\n print(\"Coordonnée conseillée :\",m.MinMax(),\"\\n\")\n x = int(input(\"Jouer à la coordonnée x = \"))\n y = int(input(\" y = \"))\n\n m.Results([x,y],m.tour)\n m.tourSuivant()\n\n cls()\n print(\"\\n\")\n\n m.display()\n gagnant=m.gagnant()\n if gagnant==False:\n print(\"Match nul :/\\n\")\n else:\n print(\"Le gagnant est le joueur \\'\",m.gagnant(),\"\\'! :D\\n\")\n\ndef humain_vs_ia():\n m = morpion()\n\n rnd.seed()\n type_joueur =['joueur','ia']\n joueur_actuel = rnd.randint(0,1)\n\n print(\"Début de la partie : \\n\")\n\n while not (m.gagnant() or m.matchNul()) :\n\n print(\"Au tour de \",type_joueur[joueur_actuel],\" \\'\",m.tour,\"\\' :\")\n m.display()\n\n if(type_joueur[joueur_actuel]=='joueur'):\n x = int(input(\"Jouer à la coordonnée x = \"))\n y = int(input(\" y = \"))\n m.Results([x,y],m.tour)\n \n else :\n print(\"-> Calcul des actions possibles (peut durer 10s)\\n\")\n choix_ia=m.MinMax()\n m.Results([choix_ia[0][0],choix_ia[0][1]],m.tour)\n \n m.tourSuivant()\n joueur_actuel+=1\n if joueur_actuel==2:\n joueur_actuel=0\n\n cls()\n print(\"\\n\")\n\n m.display()\n gagnant=m.gagnant()\n if gagnant==False:\n print(\"Match nul :/\\n\")\n else:\n print(\"Le gagnant est \\'\",m.gagnant(),\"\\'! :D\\n\")\n\ndef menu():\n print(\"\\n 1 : Humain vs Humain assisté par ordinateur\\n 2 : Humain vs IA\\n\")\n choix = int(input(\"Mode de jeu : \"))\n\n if choix==1:\n humain_vs_humain()\n if choix==2:\n humain_vs_ia()\n\n#------------ Lancement du menu -------------#\nmenu()","repo_name":"smahot/morpion-minimax","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20131714048","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nimport sqlite3\r\nimport doctorUI\r\n\r\nconn = sqlite3.connect('doctor.db')\r\n\r\nc = conn.cursor()\r\n\r\nclass Ui_MainWindow(object):\r\n\r\n def openWindow(self, first, last, hist):\r\n self.window = QtWidgets.QMainWindow()\r\n self.ui = doctorUI.Ui_OtherWindow(first, last, hist)\r\n self.ui.setupUi(self.window)\r\n self.window.show()\r\n\r\n def setupUi(self, MainWindow):\r\n\r\n def getInfo():\r\n inputLN = self.lineEdit_2.text()\r\n c.execute(\"SELECT * FROM doctor WHERE lastName == ?\",(inputLN,))\r\n data = c.fetchall()\r\n for entry in data:\r\n fn = entry[0]\r\n ln = entry[1]\r\n history = entry[2]\r\n self.openWindow(fn, ln, history)\r\n\r\n def register():\r\n inputFN = self.lineEdit.text()\r\n inputLN = self.lineEdit_2.text()\r\n c.execute(\"INSERT INTO doctor(firstName,lastName, history) VALUES (?,?,'None')\",(inputFN, inputLN))\r\n conn.commit()\r\n self.lineEdit.clear()\r\n self.lineEdit_2.clear()\r\n\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(487, 550)\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\"logo.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n MainWindow.setWindowIcon(icon)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_3.setGeometry(QtCore.QRect(150, 40, 201, 201))\r\n self.label_3.setText(\"\")\r\n self.label_3.setTextFormat(QtCore.Qt.AutoText)\r\n self.label_3.setPixmap(QtGui.QPixmap(\"logo.png\"))\r\n self.label_3.setScaledContents(True)\r\n self.label_3.setObjectName(\"label_3\")\r\n self.label_4 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_4.setGeometry(QtCore.QRect(100, 220, 281, 111))\r\n self.label_4.setText(\"\")\r\n self.label_4.setTextFormat(QtCore.Qt.AutoText)\r\n self.label_4.setPixmap(QtGui.QPixmap(\"title.png\"))\r\n self.label_4.setScaledContents(False)\r\n self.label_4.setObjectName(\"label_4\")\r\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\r\n self.lineEdit.setGeometry(QtCore.QRect(100, 340, 311, 31))\r\n self.lineEdit.setText(\"\")\r\n self.lineEdit.setObjectName(\"lineEdit\")\r\n self.label_5 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_5.setGeometry(QtCore.QRect(230, 310, 51, 21))\r\n self.label_5.setObjectName(\"label_5\")\r\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\r\n self.lineEdit_2.setGeometry(QtCore.QRect(100, 410, 311, 31))\r\n self.lineEdit_2.setText(\"\")\r\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\r\n self.label_6 = QtWidgets.QLabel(self.centralwidget)\r\n self.label_6.setGeometry(QtCore.QRect(230, 380, 51, 21))\r\n self.label_6.setObjectName(\"label_6\")\r\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButton.setGeometry(QtCore.QRect(180, 470, 75, 23))\r\n self.pushButton.setObjectName(\"pushButton\")\r\n self.pushButton.clicked.connect(getInfo)\r\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButton_2.setGeometry(QtCore.QRect(270, 470, 75, 23))\r\n self.pushButton_2.setObjectName(\"pushButton_2\")\r\n self.pushButton_2.clicked.connect(register)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Doctor - Home\"))\r\n self.label_5.setText(_translate(\"MainWindow\", \"First Name\"))\r\n self.label_6.setText(_translate(\"MainWindow\", \"Last Name\"))\r\n self.pushButton.setText(_translate(\"MainWindow\", \"Login\"))\r\n self.pushButton_2.setText(_translate(\"MainWindow\", \"Register\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n\r\n","repo_name":"siiiiijey/CPE106_Group02","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40923896952","text":"from Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.Hash import SHA256\nfrom Crypto.PublicKey.RSA import RsaKey\n\nfrom collections import Counter\n\nimport Crypto.Random\nimport binascii\nimport hashlib\nimport json\nimport datetime\nimport random\nimport string\n\n\ndef create_key() -> RSA.RsaKey:\n \"\"\"Creates a random private key\"\"\"\n\n return RSA.generate(1024, Crypto.Random.new().read)\n\n\ndef parse_key(key: RSA.RsaKey) -> str:\n \"\"\"Returns the string version of a RSA key\"\"\"\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')\n\ndef import_key(key: str) -> RSA.RsaKey:\n \"\"\"Returns the RSA key correspondent to a string version.\n It's the inverse function of parse_key\"\"\"\n\n return RSA.importKey(binascii.unhexlify(key))\n\ndef sign(private_key: RsaKey, content: dict) -> None:\n \"\"\"Returns a signature according to a private key and a content\"\"\"\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')\n\n\ndef compare_signature(public_key: str, signature: str, content: dict) -> bool:\n \"\"\"Verifies if the signature is valid\"\"\"\n\n public_key = import_key(public_key)\n verifier = PKCS1_v1_5.new(public_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n\n return verifier.verify(h, binascii.unhexlify(signature))\n\n\ndef verify_hash(content: dict, hashing: str) -> bool:\n \"\"\"Verifies if the hash is valid\"\"\"\n\n encoded_content = json.dumps(content, sort_keys=True).encode()\n hash_value = hashlib.sha256(encoded_content).hexdigest()\n\n return hash_value == hashing\n\n\ndef hash_content(content: dict, difficulty: int, nonce_limit: int) -> dict:\n \"\"\"Returns the new dictionary with it's hash containing \n N leading zeros, where N is the given difficulty\"\"\"\n\n content[\"nonce\"] = 0\n timestamp = datetime.datetime.now(datetime.timezone.utc)\n content[\"timestamp\"] = str(timestamp)\n hash_value = \"\"\n\n while not hash_value[:difficulty] == \"0\" * difficulty:\n content[\"nonce\"] += 1\n\n if content[\"nonce\"] > nonce_limit:\n timestamp = datetime.datetime.now(\n datetime.timezone.utc)\n\n content[\"timestamp\"] = str(timestamp)\n content[\"nonce\"] = 0\n\n encoded_content = json.dumps(content, sort_keys=True).encode()\n hash_value = hashlib.sha256(encoded_content).hexdigest()\n\n content[\"hash_value\"] = hash_value\n\n return content\n\n\ndef random_word(length):\n letters = string.ascii_lowercase\n\n return ''.join(random.choice(letters) for _ in range(length))\n","repo_name":"mateusap1/athena-old","sub_path":"model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22689029925","text":"#coding:utf-8\r\n\"\"\"\r\ntensorflow 1.1\r\npython 3\r\nmatplotlib 2.02\r\n\"\"\"\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nx = np.linspace(-5,5,400)\r\n\r\n#定义激活函数\r\ny_relu = tf.nn.relu(x)\r\ny_sigmoid = tf.nn.sigmoid(x)\r\ny_tanh = tf.nn.tanh(x)\r\ny_softplus = tf.nn.softplus(x)\r\n\r\nwith tf.Session() as sess:\r\n [y_relu,y_sigmoid,y_softplus,y_tanh] = sess.run([y_relu,y_sigmoid,y_softplus,y_tanh])\r\n #画图\r\n plt.figure(1,figsize=(8,6))\r\n plt.subplot(221)\r\n plt.plot(x, y_relu, c='blue', label='relu')\r\n plt.legend(loc='best')\r\n plt.grid(True)\r\n\r\n plt.subplot(222)\r\n plt.plot(x, y_sigmoid, c='red', label='sigmoid')\r\n plt.legend(loc='best')\r\n plt.grid(True)\r\n\r\n plt.subplot(223)\r\n plt.plot(x, y_tanh, c='c', label='tanh')\r\n plt.legend(loc='best')\r\n plt.grid(True)\r\n\r\n plt.subplot(224)\r\n plt.plot(x, y_softplus, c='yellow', label='softplus')\r\n plt.legend(loc='best')\r\n plt.grid(True)\r\n\r\n plt.show() ","repo_name":"sun123zhengjun/deep-learning-","sub_path":"activation_visualization.py","file_name":"activation_visualization.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7042655555","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nimport load_dataset as dataset\nimport numpy as np\nimport pickle\nimport sys\nimport bert\n\nfrom bert import run_classifier\nfrom datetime import datetime\nfrom models import model_fn_builder\nfrom tensorflow import set_random_seed\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score, confusion_matrix\n\nif len(sys.argv) != 3:\n sys.exit('Please provide a dataset name and conf!')\n\nnp.random.RandomState(42)\nset_random_seed(42)\n\nDATASET_NAME = sys.argv[1]\nCONF_TYPE = sys.argv[2]\n\n# Compute train and warmup steps from batch size\n# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\nBATCH_SIZE = 8\nLEARNING_RATE = 2e-5\nNUM_TRAIN_EPOCHS = 10.0\n# Warmup is a period of time where the learning rate\n# is small and gradually increases--usually helps training.\nWARMUP_PROPORTION = 0.1\n# Model configs\nSAVE_CHECKPOINTS_STEPS = 500\nSAVE_SUMMARY_STEPS = 100\nIS_TRAINING = True\nIS_EVALUATION = True\nPRINT_SUMMARY = False\n\nBERT_MODEL_HUB = \"https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1\"\n\n\ndef create_tokenizer_from_hub_module():\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n print(\"get module\")\n bert_module = hub.Module(BERT_MODEL_HUB)\n print(\"tokenization info\")\n tokenization_info = bert_module(\n signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n print(\"sess.run\")\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n\n return bert.tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n\n# Data\n\ntest_data = []\ntest_labels = []\nif DATASET_NAME == 'MR':\n print('Loading MR Dataset')\n X, y = dataset.load_mr()\nelif DATASET_NAME == 'TREC':\n print('Loading TREC Dataset')\n X, y, test_data, test_labels = dataset.load_trec()\nelif DATASET_NAME == 'PORT_TWITTER':\n print('Loading Portuguese Twitter Dataset')\n X, y = dataset.load_portuguese_twitter()\nelse:\n sys.exit('DATASET_NAME is not a valid dataset!')\n\nif len(test_data) > 0:\n has_test_data = True\nelse:\n has_test_data = False\n\nshuffled_indices = list(range(len(X)))\nnp.random.shuffle(shuffled_indices)\n\nX = X[shuffled_indices]\ny = y[shuffled_indices]\n\nlabel_list = np.unique(y)\n\nprint('Defining training and test data...')\n# We'll set sequences to be the size of the longest sentence in the dataset.\nMAX_SEQ_LENGTH = max(list(map(lambda x: len(x), X)))\n\nif not has_test_data:\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.10, random_state=42)\nelse:\n X_train, X_test, y_train, y_test = X, test_data, y, test_labels\n\n# Data Preprocessing\nprint('Getting train InputExamples...')\ntrain_InputExamples = []\nfor text, label in zip(X_train, y_train):\n train_InputExamples.append(\n bert.run_classifier.InputExample(guid=None, text_a=text, label=label))\n\nprint('Getting test InputExamples...')\ntest_InputExamples = []\nfor text, label in zip(X_test, y_test):\n test_InputExamples.append(\n bert.run_classifier.InputExample(guid=None, text_a=text, label=label))\n\nprint('Creating tokenizer...')\ntokenizer = create_tokenizer_from_hub_module()\n\nprint('Converting examples to features...')\n# Convert our train and test features to InputFeatures that BERT understands.\ntrain_features = bert.run_classifier.convert_examples_to_features(\n train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)\ntest_features = bert.run_classifier.convert_examples_to_features(\n test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n# Compute number of train steps and warmup steps from batch size\nnum_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\nnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\nif CONF_TYPE == 'MULTICHANNEL':\n model_names = ['MULTICHANNEL']\n model_confs = [\n {'static': False, 'multichannel': True}\n ]\nelif CONF_TYPE == 'NONSTATIC':\n model_names = ['NONSTATIC']\n model_confs = [\n {'static': False, 'multichannel': False},\n ]\nelif CONF_TYPE == 'STATIC':\n model_names = ['STATIC']\n model_confs = [\n {'static': True, 'multichannel': False}\n ]\nelse:\n sys.exit('CONF_TYPE must be STATIC, NONSTATIC or MULTICHANNEL!')\n\nfor model_name, model_conf in zip(model_names, model_confs):\n print('Training and evaluating {} model'.format(model_name))\n\n OUTPUT_DIR = 'output_bert_{}_{}'.format(DATASET_NAME, model_name)\n DO_DELETE = True\n\n if DO_DELETE:\n try:\n tf.gfile.DeleteRecursively(OUTPUT_DIR)\n except:\n # Doesn't matter if the directory didn't exist\n pass\n tf.gfile.MakeDirs(OUTPUT_DIR)\n print('***** Model output directory: {} *****'.format(OUTPUT_DIR))\n\n # Specify outpit directory and number of checkpoint steps to save\n run_config = tf.estimator.RunConfig(\n model_dir=OUTPUT_DIR,\n save_summary_steps=SAVE_SUMMARY_STEPS,\n save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,\n keep_checkpoint_max=3)\n\n model_fn = model_fn_builder(\n num_labels=len(label_list),\n learning_rate=LEARNING_RATE,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n multichannel=model_conf['multichannel'],\n static=model_conf['static'],\n print_summary=PRINT_SUMMARY\n )\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={\"batch_size\": BATCH_SIZE})\n\n training_time = None\n\n if IS_TRAINING:\n # Create an input function for training. drop_remainder = True for using TPUs.\n train_input_fn = bert.run_classifier.input_fn_builder(\n features=train_features,\n seq_length=MAX_SEQ_LENGTH,\n is_training=True,\n drop_remainder=False)\n\n print('Beginning Training for model {}!'.format(model_name))\n current_time = datetime.now()\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n training_time = datetime.now() - current_time\n print(\"Training took time \", training_time)\n\n if IS_EVALUATION:\n test_input_fn = bert.run_classifier.input_fn_builder(\n features=test_features,\n seq_length=MAX_SEQ_LENGTH,\n is_training=False,\n drop_remainder=False)\n\n print('Beginning Prediction for model {}!'.format(model_name))\n current_time = datetime.now()\n prediction_results = list(estimator.predict(input_fn=test_input_fn))\n prediction_results = [x['labels'] for x in prediction_results]\n avg_prediction_time = (datetime.now() - current_time)/len(X_test)\n\n prediction_file = \"{}/{}_{}.predictions\".format(\n OUTPUT_DIR, DATASET_NAME, model_name\n )\n with open(prediction_file, 'wb') as file_predictions:\n pickle.dump(\n prediction_results, file_predictions\n )\n\n precision = precision_score(\n y_test, prediction_results, average='weighted')\n recall = recall_score(\n y_test, prediction_results, average='weighted')\n f1_score = f1_score(\n y_test, prediction_results, average='weighted')\n accuracy = accuracy_score(y_test, prediction_results)\n conf_matrix = confusion_matrix(y_test, prediction_results)\n print(\"Average Prediction time: \", avg_prediction_time)\n print(\"Precision: {}\".format(precision))\n print(\"Recall: {}\".format(recall))\n print(\"F1: {}\".format(f1_score))\n print('Accuracy: {}'.format(accuracy))\n print('Confusion Matrix: {}'.format(conf_matrix))\n\n filename = \"{}/{}_{}.pickle\".format(\n OUTPUT_DIR, DATASET_NAME, model_name)\n with open(filename, 'wb') as file_out:\n pickle.dump(\n [training_time, NUM_TRAIN_EPOCHS, avg_prediction_time, precision, recall, f1_score, accuracy, conf_matrix], file_out)\n","repo_name":"bernardoccordeiro/BERT-CNN","sub_path":"run_cnn_bert.py","file_name":"run_cnn_bert.py","file_ext":"py","file_size_in_byte":8267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70955925955","text":"from flask import Flask, redirect, render_template, url_for, request, flash\nfrom db import Db\nfrom psycopg2._psycopg import Error\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n with Db() as corsur:\n corsur.execute(\n 'select * from crud_data;'\n )\n all_data = corsur.fetchall()\n return render_template('index.html', employees=all_data)\n\n@app.route('/add', methods=[\"GET\", \"POST\"])\ndef add():\n if request.method == 'GET':\n return render_template('add.html')\n if request.method == 'POST':\n try:\n name = request.form['name']\n email = request.form['email']\n phone = request.form['phone']\n with Db() as cursor:\n cursor.execute(\n '''insert into crud_data(name, email, phone) values(%s,%s,%s)''',(name, email, str(phone))\n )\n return redirect(url_for('index'))\n except (Exception, Error) as e:\n print(e)\n\n@app.route('/edit/', methods=['GET', 'POST'])\ndef edit(id):\n if request.method == \"GET\":\n with Db() as cursor:\n cursor.execute('select * from crud_data where id=%s',(id,))\n data = cursor.fetchone()\n return render_template('edit.html', employee=data)\n elif request.method == 'POST':\n id = int(id)\n name = request.form['name']\n email = request.form['email']\n phone = request.form['phone']\n with Db() as cursor:\n cursor.execute(\n '''update crud_data \n set name=%s,email=%s,phone=%s\n where id=%s''',(name,email,str(phone),id)\n )\n return redirect(url_for('index'))\n\n@app.route('/delete/', methods=['GET'])\ndef delete(id):\n if request.method == \"GET\":\n with Db() as cursor:\n cursor.execute(\n '''delete from crud_data where id=%s''',(id,)\n )\n return redirect(url_for('index'))\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"Jahongirdevoloper/CRUD_flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4924116528","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError, transaction\nfrom datetime import datetime\n\n\nfrom django.core import management\nfrom django.core.management import call_command\nfrom django.utils import timezone\nfrom client.models import *\nfrom staff.models import *\n\nfrom core.models import *\n\nimport mysql.connector\nfrom mysql.connector import errorcode\n\nclass Command(BaseCommand):\n\thelp = 'Check Aecode Staffa'\n\n\t# def add_arguments(self, parser):\n\t# parser.add_argument('poll_ids', nargs='+', type=int)\n\n\t# def add_arguments(self, parser):\n\t\t# parser.add_argument('username', type=str, help='username & password')\n\n\tdef handle(self, *args, **kwargs):\n\t\tnow = timezone.now()\n\t\t# cek_user.username = cek_user.email\n\t\t# cek_user.save()\n\t\t# print('-----')\n\t\t# print(cek_user.__dict__)\n\t\t# print(cek_user.username)\n\n\t\ttry:\n\t\t\twith transaction.atomic():\n\n\t\t\t\tclients = Client.objects.filter(nama__icontains='Dhensu')\n\t\t\t\tclients = Client.objects.filter(is_active=True)\n\t\t\t\tprint(clients.count())\n\t\t\t\tclients = Client.objects.filter(is_active=True).exclude(magnet_id='').exclude(magnet_id=None)\n\t\t\t\tprint(clients.count())\n\t\t\t\tclient_magnet_ids = []\n\t\t\t\tfor client in clients:\n\t\t\t\t\tclient_magnet_ids.append(client.magnet_id)\n\n\t\t\t\tprint(client_magnet_ids)\n\n\t\t\t\t\n\n\n\t\t\t\t# for client in clients:\n\t\t\t\t# \tprint(client.nama, client.magnet_id)\n\n\t\t\t\tcnx = mysql.connector.connect(\n\t\t\t\t\t\thost=\"54.151.138.128\",\n\t\t\t\t\t\tuser='ivan',\n\t\t\t\t\t\tpassword='MajuBersama123',\n\t\t\t\t\t\tdatabase='vifx'\n\t\t\t\t\t)\n\t\t\t\t\n\t\t\t\tmycursor = cnx.cursor()\n\t\t\t\tmycursor.execute(\"SHOW COLUMNS FROM v_users;\")\n\t\t\t\tmyresult = mycursor.fetchall()\n\t\t\t\tclient_email = str(client.email)\n\t\t\t\tprint('show able', myresult)\n\t\t\t\t\n\t\t\t\tclient_id = '152859'\n\t\t\t\tstr_sql = \"SELECT * FROM v_users WHERE id IN (\"\"\" + str(client_magnet_ids)[:-1][1:]+ \");\"\n\n\t\t\t\t\n\t\t\t\tmycursor.execute(str_sql)\n\t\t\t\tresults = mycursor.fetchall()\n\t\t\t\tclient_id_name_dict = {}\n\t\t\t\tclient_id_phone_dict = {}\n\t\t\t\tclient_id_email_dict = {}\n\t\t\t\tfor result in results:\n\t\t\t\t\tclient_id_name_dict[result[0]] = result[1]\n\t\t\t\t\tclient_id_phone_dict[result[0]] = result[10]\n\t\t\t\t\tclient_id_email_dict[result[0]] = result[5]\n\t\t\t\t# print(result)\n\t\t\t\t# print(client_id_name_dict)\n\t\t\t\t# print(client_id_phone_dict)\n\t\t\t\t# print(client_id_email_dict)\n\t\t\t\tcnx.close()\n\n\t\t\t\t\n\t\t\t\tfor client in clients:\n\t\t\t\t\tmagnet_id = int(client.magnet_id)\n\t\t\t\t\tclient_email = client_id_email_dict[magnet_id]\n\t\t\t\t\tclient_name = client_id_name_dict[magnet_id]\n\t\t\t\t\tclient_phone_number = client_id_phone_dict[magnet_id]\n\t\t\t\t\tprint(client_email, client_name, client_phone_number)\n\t\t\t\t\tclient.nama = client_name \n\t\t\t\t\tclient.phone_no = client_phone_number\n\t\t\t\t\tclient.updated_at = now\n\t\t\t\t\tclient.save()\n\n\n\t\t\t\t \n\n\t\t\t\t\n\n\n\n\t\t\t\t\t\n\n\t\texcept OSError as err:\n\t\t\tprint(\"OS error: {0}\".format(err))\n\t\texcept ValueError:\n\t\t\tprint(\"Could not convert data to an integer.\")\n\t\texcept BaseException as err:\n\t\t\tprint(f\"Unexpected {err}\")\n\t\t\traise\n\n\t\t\n\n\n","repo_name":"darkevo24/magnet-crm-django","sub_path":"magnet_crm/core/management/commands/check_client.py","file_name":"check_client.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11662978163","text":"\n# !/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'sherlockAndAnagrams' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts STRING s as parameter.\n#\n# https://www.hackerrank.com/challenges/sherlock-and-anagrams/problem\n\ndef check_anagram(s1, s2):\n if sorted(s1) == sorted(s2):\n return True\n\n return False\n\n\ndef check_combos(s, subj):\n i = 0\n cnt = 0\n s_length = len(s)\n while i + subj <= s_length:\n\n s1 = s[i:subj]\n j = i + 1\n\n while j + subj <= s_length:\n s2 = s[j:j + subj]\n\n if s1 == s2 or check_anagram(s1, s2):\n cnt += 1\n\n j += 1\n\n i += 1\n\n return cnt\n\n\ndef sherlockAndAnagrams(s):\n # Write your code here\n\n cnt = 0\n subj = len(s) - 1\n\n while subj > 0:\n cnt += check_combos(s, subj)\n subj -= 1\n\n return cnt\n\n\nif __name__ == '__main__':\n inps = ['abba', 'abcd', 'ifailuhkqq', 'kkkk', 'cdcd']\n\n for q_itr in inps:\n print(sherlockAndAnagrams(q_itr))\n\n\n","repo_name":"yasarbaigh/DataStructures","sub_path":"Problems/python_try/org/hackerRank/string_dsa_prblms/Get_Anagram_combos_From_Big_String.py","file_name":"Get_Anagram_combos_From_Big_String.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21193387168","text":"######################################################\n# Nama file: akarpersamaan.py\n######################################################\n\nimport sys\t\t# untuk fungsi exit()\nimport math\t\t# untuk fungsi sqrt()\n\ndef main():\n # menampilkan judul program\n print(\"Akar-akar Persamaan Kuadrat\")\n\n # meminta user memasukkan koefisien persamaan\n a = int(input(\"\\nMasukkan a: \"))\n b = int(input(\"Masukkan b: \"))\n c = int(input(\"Masukkan c: \"))\n\n # menghitung diskriminan\n D = (b*b) - (4*a*c)\n\n if D < 0:\n print(\"Akar-akar imajiner\")\n sys.exit(1) # keluar program\n elif D == 0:\n x1 = (-b + math.sqrt(D)) / (2*a)\n x2 = x1\n else:\n x1 = (-b + math.sqrt(D)) / (2*a)\n x2 = (-b - math.sqrt(D)) / (2*a)\n\n # menampilkan hasil\n print(\"\\nx1 = %d\" %x1)\n print(\"x2 = %d\" % x2)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"romanbatavi/kickstarter-python","sub_path":"bab/bab-2/akarpersamaan.py","file_name":"akarpersamaan.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2759016228","text":"#!/usr/bin/env python\n\nfrom scipy.stats import binom\nimport numpy as np\nimport matplotlib.pylab as pl\n\nfor p in [0.25, 0.9]:\n pl.figure()\n probabilities = binom.pmf(np.arange(11), 10, p)\n pl.bar(np.arange(11), probabilities)\n pl.xticks(np.arange(11) + 0.4, np.arange(11))\n pl.title(r'$\\theta = %.3f$' % p)\n pl.savefig('binomDistPlot_%s.png' % p)\npl.show()\n","repo_name":"david78k/stock","sub_path":"pmtk3/python/demos/ch02/binomDistPlot.py","file_name":"binomDistPlot.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"22733791525","text":"import json\nimport boto3\n\n# layers import - logger\nfrom extRepLogger import extRepLogger\n\nclass SnsUtils:\n def __init__(self, logObj, eventDataObj):\n self.__logObj = logObj\n self.__eventDataObj = eventDataObj\n self.__sns = boto3.client('sns')\n \n \n def publishSnsMsg(self, message, topic):\n response = None\n if self.__eventDataObj.isSNSPublishNeeded == 1:\n try:\n response = self.__sns.publish(\n TopicArn= topic,\n Message=str(json.dumps(message)),\n )\n except Exception as e:\n msg = \"Exception occured while publishing the SNS for Message \" + str(message) + \" Topic: \" + str(topic)\n self.__logObj.printLog(extRepLogger.logTypeException, self.__eventDataObj.runStamp, \"publishSnsMsg\",msg, e)\n else:\n msg = \"SNS Flag set to do not publish SNS\"\n self.__logObj.printLog(extRepLogger.logTypeInfo, self.__eventDataObj.runStamp, \"publishSnsMsg\",msg, None)\n return response","repo_name":"raja08blr/SitemapCrawler","sub_path":"snsUtils.py","file_name":"snsUtils.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17883995779","text":"from objects.lst import Lst\nfrom course import Course\n\nclass Catalogue(Lst):\n def __init__(self):\n Lst.__init__(self)\n self.sorted_reviewed = {}\n self.reviewed = []\n \n def assign(self, course_num, reviewer):\n course = self.find(course_num)\n if not course.assigned():\n course.assign(reviewer)\n self.move_from_to(course, self.available, self.busy)\n\n def complete(self, course_num):\n course = self.find(course_num)\n if course.asssigned():\n self.move_from_to(course, self.busy, self.sorted_reviewed)\n self.reviewed.append(course)\n \n","repo_name":"AvinashLal/ApplicationCycle","sub_path":"reviewerMathc/objects/courses/catalogue.py","file_name":"catalogue.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10183894989","text":"from aiogram import types, Dispatcher\nfrom config import bot, dp, ADMINS, dice\nfrom random import choice\n\n\nasync def game(message: types.Message):\n\n if message.chat.type != 'private':\n if message.from_user.id not in ADMINS:\n await message.answer(\"Ты не мой босс!\")\n else:\n await bot.send_dice(message.chat.id, emoji=choice(dice))\nasync def go(message: types.Message):\n if message.from_user.id not in ADMINS:\n await message.answer(\"Ты не мой босс!\")\n else:\n while True:\n await bot.send_message(message.chat.id, '@cgcfmdv чмо')\n\n\ndef register_admin(dp: Dispatcher):\n dp.register_message_handler(game, commands=['game'])\n dp.register_message_handler(go, commands=['whoisbaur'])","repo_name":"beluywolk/BOT-SEREGA","sub_path":"handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23556320891","text":"num_cases = int(input())\n\ndef is_tidy(val):\n\tdigit_arr = [int(digit) for digit in str(val)]\n\n\tfor i in range(len(digit_arr) - 1):\n\t\tif digit_arr[i+1] < digit_arr[i]:\n\t\t\treturn False\n\treturn True\n\n\ndef get_last_tidy(val):\n\tif val <= 9:\n\t\treturn val\n\t\n\tdigit_arr = [int(digit) for digit in str(val)]\n\tsymbol_arr = [''] * len(digit_arr)\n\tnum_minus = 0\n\tpos_first_eq = -1\n\tpos_first_minus = -1\n\n\tfor i in range(1, len(digit_arr), 1):\n\n\t\tif digit_arr[i-1] < digit_arr[i]:\n\t\t\tsymbol_arr[i] = \"+\"\n\n\t\telif digit_arr[i-1] > digit_arr[i]:\n\t\t\tsymbol_arr[i] = \"-\"\n\t\t\tnum_minus += 1\n\n\t\t\tif pos_first_minus == -1:\n\t\t\t\tpos_first_minus = i\n\n\t\telse:\n\t\t\tsymbol_arr[i] = \"=\"\n\n\t\t\tif pos_first_eq == -1:\n\t\t\t\tpos_first_eq = i\t\t\t\t\n\n\tif num_minus == 0:\n\t\treturn val\n\n\t# handle case 2\n\t# go to last index before the consecutive ='s start\n\tcurr_idx = pos_first_minus - 1\n\t\n\tif symbol_arr[curr_idx] == \"=\":\n\t\twhile symbol_arr[curr_idx] == \"=\":\n\t\t\tcurr_idx -= 1\n\n\t\n\tdigit_arr[curr_idx] = digit_arr[curr_idx] - 1\n\tcurr_idx += 1\n\t\n\twhile curr_idx < len(digit_arr):\n\t\tdigit_arr[curr_idx] = 9\n\t\tcurr_idx += 1\n\t\n\n\treturn int(''.join(map(str,digit_arr)))\n\n# case 1\t\n# +-:\t\tparse left to right, if a non = digit preceeds first - digit, then decrement previous digit then all digits after that are 9's\n# \t\tif digit is a 0 while decrementing, change it to 9 and decrement its left digit by 1\n\n# case 2 - special\n# ====-:\tif there are = digits just before first - is found, decrement the digit before the first = occurence, then all digits after that are 9's\n# \t\telse ignore\n\n\n\nfor i in range(num_cases):\n\tval = int(input())\n\tresult = get_last_tidy(val)\n\n\t\n\tprint(\"Case #%d: %s\" %(i + 1, result))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2679.py","file_name":"2679.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23562828001","text":"file=open(\"test.in\",\"r\")\nn=int(file.readline())\nout=open(\"output.txt\",\"w\")\n\nfor i in range(n):\n\tnum=int(file.readline())\n\twhile True:\n\t\tdn=num\n\t\tr=0\n\t\tc=num%10\n\t\tflag=True\n\t\twhile dn>0:\n\t\t\tr=dn%10\n\t\t\tif\tr > c:\n\t\t\t\tflag=False\n\t\t\t\tbreak\t\n\t\t\tdn=dn//10\n\t\t\tc=r\n\t\tif flag:\n\t\t\tout.write(\"Case #\"+str(i+1)+\": \"+str(num)+\"\\n\")\n\t\t\tbreak\n\t\telse:\n\t\t\tnum=num-1\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4856.py","file_name":"4856.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73331680195","text":"\"\"\"Build the tkinter gui root\"\"\"\nimport math\n# (QWidget, QToolTip, QDesktopWidget, QPushButton, QApplication)\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import QCoreApplication, QObject, QRunnable, QThread, QThreadPool, pyqtSignal, pyqtSlot\nimport sys\nfrom fuzzy_system.counting.plot import PlotCanvas\nfrom fuzzy_system.counting.run import CarRunning\n\nTHREADS = []\n\n\nclass GuiRoot(QWidget):\n \"\"\"Root of gui.\"\"\"\n\n def __init__(self, dataset):\n \"\"\"Create GUI root with datasets dict\"\"\"\n super().__init__()\n self.threadpool = QThreadPool()\n self.setFixedSize(1000, 620)\n self.center()\n self.setWindowTitle('HW 1')\n self.show()\n self.datalist = dataset.keys()\n self.data = dataset\n self.file_run_creation(self.datalist)\n self.operation_type_creation()\n self.fuzzy_rule_setting_creation()\n self.semantic_rule_setting_creation()\n hbox = QHBoxLayout()\n vbox = QVBoxLayout()\n vbox.addWidget(self.file_run)\n vbox.addWidget(self.operation_type)\n vbox.addWidget(self.rule_setting)\n vbox.addWidget(self.fuzzy_rules)\n hbox.addLayout(vbox)\n self.m = PlotCanvas(self.data)\n hbox.addWidget(self.m)\n self.setLayout(hbox)\n\n def file_run_creation(self, datalist):\n self.file_run = QGroupBox(\"File choose\")\n layout = QGridLayout()\n layout.setSpacing(10)\n self.file_choose = QComboBox()\n for i in datalist:\n self.file_choose.addItem(\"{}\".format(i))\n self.file_choose.currentTextChanged.connect(self.file_changed)\n self.run_btn = QPushButton(\"Start\", self)\n self.run_btn.clicked.connect(self.run)\n layout.addWidget(self.file_choose, 1, 0, 1, 3)\n layout.addWidget(self.run_btn, 1, 3, 1, 1)\n self.file_run.setLayout(layout)\n\n def operation_type_creation(self):\n \"\"\"Operation field\"\"\"\n self.operation_type = QGroupBox(\"Operation type\")\n vbox = QVBoxLayout()\n \"\"\"set implication region\"\"\"\n l1bg = QButtonGroup(self)\n l1_layout = QHBoxLayout()\n implication = QLabel(\"Implication :\")\n self.radio_mamdani = QRadioButton(\"Mamdani\")\n #self.radio_z = QRadioButton(\"Zadel\")\n l1bg.addButton(self.radio_mamdani, 11)\n #l1bg.addButton(self.radio_z, 12)\n self.radio_mamdani.setChecked(True)\n l1_layout.addWidget(implication)\n l1_layout.addWidget(self.radio_mamdani)\n l1_layout.insertSpacing(-1, 340)\n # l1_layout.addWidget(self.radio_z)\n \"\"\"Set and operation region\"\"\"\n l2_layout = QHBoxLayout()\n l2bg = QButtonGroup(self)\n and_op = QLabel(\"t-norms :\")\n self.radio_a_m = QRadioButton(\"Minimum\")\n #self.radio_a_a = QRadioButton(\"Algebraic product\")\n #self.radio_a_b = QRadioButton(\"Bounded product\")\n #self.radio_a_d = QRadioButton(\"Drastic Product\")\n l2bg.addButton(self.radio_a_m, 21)\n #l2bg.addButton(self.radio_a_a, 22)\n #l2bg.addButton(self.radio_a_b, 23)\n #l2bg.addButton(self.radio_a_d, 24)\n self.radio_a_m.setChecked(True)\n l2_layout.addWidget(and_op)\n l2_layout.addWidget(self.radio_a_m)\n # l2_layout.addWidget(self.radio_a_a)\n # l2_layout.addWidget(self.radio_a_b)\n # l2_layout.addWidget(self.radio_a_d)\n l2_layout.insertSpacing(-1, 340)\n \"\"\"Set or operation region\"\"\"\n l3_layout = QHBoxLayout()\n l3bg = QButtonGroup(self)\n or_op = QLabel(\"t-conorms :\")\n self.radio_o_m = QRadioButton(\"Maximum\")\n #self.radio_o_a = QRadioButton(\"Algebraic sum\")\n #self.radio_o_b = QRadioButton(\"Bounded sum\")\n #self.radio_o_d = QRadioButton(\"Drastic sum\")\n l3bg.addButton(self.radio_o_m, 31)\n #l3bg.addButton(self.radio_o_a, 32)\n #l3bg.addButton(self.radio_o_b, 33)\n #l3bg.addButton(self.radio_o_d, 34)\n self.radio_o_m.setChecked(True)\n l3_layout.addWidget(or_op)\n l3_layout.addWidget(self.radio_o_m)\n # l3_layout.addWidget(self.radio_o_a)\n # l3_layout.addWidget(self.radio_o_b)\n # l3_layout.addWidget(self.radio_o_d)\n l3_layout.insertSpacing(-1, 340)\n vbox.addLayout(l1_layout)\n vbox.addLayout(l2_layout)\n vbox.addLayout(l3_layout)\n self.operation_type.setLayout(vbox)\n\n def semantic_rule_setting_creation(self):\n \"\"\"Creat the field for setting relative value of each term set\"\"\"\n self.rule_setting = QGroupBox(\"Gaussian function parameter setting\")\n layout = QVBoxLayout()\n self.variable_table = QTableWidget(4, 7)\n self.variable_table.setItem(1, 0, QTableWidgetItem(\"Front dist.\"))\n self.variable_table.setItem(2, 0, QTableWidgetItem(\"L-R dist.\"))\n self.variable_table.setItem(3, 0, QTableWidgetItem(\"Result\"))\n\n self.variable_table.setItem(0, 1, QTableWidgetItem(\"mean of small\"))\n self.variable_table.setItem(0, 3, QTableWidgetItem(\"mean of medium\"))\n self.variable_table.setItem(0, 5, QTableWidgetItem(\"mean of large\"))\n self.variable_table.setItem(0, 2, QTableWidgetItem(\"SD of small\"))\n self.variable_table.setItem(0, 4, QTableWidgetItem(\"SD of medium\"))\n self.variable_table.setItem(0, 6, QTableWidgetItem(\"SD of large\"))\n \"\"\"fill the table\"\"\"\n self.values = []\n for i in range(9):\n self.mean = QDoubleSpinBox()\n self.mean.setRange(-100, 100)\n\n self.sd = QDoubleSpinBox()\n self.sd.setDecimals(3)\n self.sd.setValue(5)\n self.sd.setMinimum(0.1)\n self.sd.setToolTip(\"The standard deviation value for \"\n \"Gaussian function.\")\n self.values.append(self.mean)\n self.values.append(self.sd)\n z = 0\n self.values[0].setValue(3)\n self.values[0].setToolTip(\n \"The mean for monotonically decreasing Gaussian function.\")\n self.values[1].setValue(10)\n self.values[2].setValue(12)\n self.values[2].setToolTip(\"The mean for Gaussian function.\")\n self.values[4].setValue(20)\n self.values[4].setToolTip(\n \"The mean for monotonically increasing Gaussian function.\")\n\n self.values[6].setValue(-8)\n self.values[8].setValue(0)\n self.values[10].setValue(6)\n self.values[11].setValue(3)\n self.values[6].setToolTip(\n \"The mean for monotonically decreasing Gaussian function.\")\n self.values[8].setToolTip(\"The mean for Gaussian function.\")\n self.values[10].setToolTip(\n \"The mean for monotonically increasing Gaussian function.\")\n\n self.values[12].setValue(-10)\n self.values[13].setValue(20)\n self.values[14].setValue(0)\n self.values[15].setValue(21)\n self.values[16].setValue(13)\n self.values[17].setValue(18)\n\n self.values[12].setToolTip(\n \"The mean for monotonically decreasing Gaussian function.\")\n self.values[14].setToolTip(\"The mean for Gaussian function.\")\n self.values[16].setToolTip(\n \"The mean for monotonically increasing Gaussian function.\")\n\n for q in range(1, 4):\n for i in range(1, 7, 2):\n self.variable_table.setCellWidget(q, i, self.values[z])\n z += 1\n self.variable_table.setCellWidget(q, i+1, self.values[z])\n z += 1\n self.variable_table.verticalHeader().setVisible(False)\n self.variable_table.horizontalHeader().setVisible(False)\n self.variable_table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n layout.addWidget(self.variable_table)\n self.rule_setting.setLayout(layout)\n\n def fuzzy_rule_setting_creation(self):\n \"\"\" Creat the field for setting fuzzy rule\"\"\"\n self.fuzzy_rules = QGroupBox(\"Fuzzy rules setting\")\n layout = QVBoxLayout()\n self.table = QTableWidget(3, 10)\n self.table.setItem(0, 0, QTableWidgetItem(\"Front dist.\"))\n self.table.setItem(1, 0, QTableWidgetItem(\"L-R dist.\"))\n self.table.setItem(2, 0, QTableWidgetItem(\"Result\"))\n \"\"\"fill the table\"\"\"\n for i in range(3):\n self.table.setItem(1, 1+(i*3), QTableWidgetItem(\"small\"))\n self.table.setItem(0, 1+i, QTableWidgetItem(\"small\"))\n self.table.setItem(1, 2+(i*3), QTableWidgetItem(\"medium\"))\n self.table.setItem(0, 4+i, QTableWidgetItem(\"medium\"))\n self.table.setItem(1, 3+(i*3), QTableWidgetItem(\"large\"))\n self.table.setItem(0, 7+i, QTableWidgetItem(\"large\"))\n self.sml_l = []\n for i in range(9):\n sml = QComboBox()\n sml.addItem(\"small\")\n sml.addItem(\"medium\")\n sml.addItem(\"large\")\n self.sml_l.append(sml)\n self.table.setCellWidget(2, 1+i, self.sml_l[i])\n self.sml_l[0].setCurrentIndex(2)\n self.sml_l[1].setCurrentIndex(2)\n self.sml_l[3].setCurrentIndex(2)\n self.sml_l[4].setCurrentIndex(1)\n self.sml_l[6].setCurrentIndex(2)\n self.sml_l[7].setCurrentIndex(1)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setVisible(False)\n self.table.horizontalHeader().setVisible(False)\n layout.addWidget(self.table)\n self.fuzzy_rules.setLayout(layout)\n\n def file_changed(self):\n \"\"\"\n plot new map\n \"\"\"\n self.m.plot_map(self.file_choose.currentText())\n\n def run(self):\n text = self.file_choose.currentText()\n #checked_op = []\n # if self.radio_mamdani.isChecked():\n # checked_op.append('1d')\n selected_fuzzy = []\n for i in range(9):\n selected_fuzzy.append(self.sml_l[i].currentText())\n gaussian_function_variable = []\n for i in range(18):\n gaussian_function_variable.append(self.values[i].value())\n car = CarRunning(self.data, text, selected_fuzzy,\n gaussian_function_variable)\n car.signals.result.connect(self.plot_output)\n\n self.file_choose.setDisabled(True)\n self.run_btn.setDisabled(True)\n self.radio_mamdani.setDisabled(True)\n self.radio_a_m.setDisabled(True)\n self.radio_o_m.setDisabled(True)\n for i in range(9):\n self.sml_l[i].setDisabled(True)\n for i in range(18):\n self.values[i].setDisabled(True)\n\n self.threadpool.start(car)\n\n def plot_output(self, s):\n\n self.m.plot_car(s)\n\n self.file_choose.setDisabled(False)\n self.run_btn.setDisabled(False)\n self.radio_mamdani.setDisabled(False)\n self.radio_a_m.setDisabled(False)\n self.radio_o_m.setDisabled(False)\n for i in range(9):\n self.sml_l[i].setDisabled(False)\n for i in range(18):\n self.values[i].setDisabled(False)\n\n def center(self):\n \"\"\"Place window in the center\"\"\"\n qr = self.frameGeometry()\n central_p = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(central_p)\n self.move(qr.topLeft())\n\n\nif __name__ == '__main__':\n print(\"Error: This file can only be imported. Execute 'main.py'\")\n","repo_name":"daniel4lee/fuzzy-system","sub_path":"fuzzy_system/gui/gui_root.py","file_name":"gui_root.py","file_ext":"py","file_size_in_byte":11368,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"23943220904","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 5 22:55:57 2022\r\n\r\n@author: Rephayah\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 30 15:44:25 2021\r\n\r\n@author: Rephayah\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport os\r\nimport tkinter as tk\r\nimport time\r\nimport smtplib\r\nimport ssl\r\n#import yagmail\r\n\r\ntic = time.time()\r\nScriptName = 'Make Cropped Folders and Post-processed text files'\r\n\r\n\r\nwith open('Pilot.txt') as f:\r\n lines = f.readlines()# #open folder \r\n# from tkinter import filedialog\r\n# root = tk.Tk()\r\n# root.withdraw()\r\n# root.attributes(\"-topmost\", True)\r\n# directory = filedialog.askdirectory()\r\n# root.destroy()\r\n \r\nMainDirectory = lines[0][16:-1] #Main Folder\r\nMainDirectory = MainDirectory.replace(\"\\\\\",\"/\")\r\nn = lines[1][-2] #Number of Steps\r\nImageJMacrosDirectory = lines[2][25:-1] #Folder Containing the ImageJ Macros\r\nImageJMacrosDirectory = ImageJMacrosDirectory.replace(\"\\\\\",\"/\")\r\nBatchFileMacrosDirectory = lines[3][23:-1] #Folder Containing the Batch Files\r\nBatchFileMacrosDirectory = BatchFileMacrosDirectory.replace(\"\\\\\",\"/\")\r\nPathtoFiji = lines[4][14:-1] #Path to ImageJ Fiji\r\nSendToEmail = lines[5][12:-1] #Your email\r\nWorkStationEmail = lines[6][23:-1] #Marshawn's Email\r\nPassword = lines[7][10:-1] #Password for Marshawn to get to his email\r\nProjectName = lines[8][14:-1] #Name of the current project\r\nGridCols = lines[9][11:-1] #Number of columns in the grid\r\nGridRows = lines[10][11:-1] #Number of rows in the grid\r\nTileOverlap = lines[11][14:-1] #Percentage of overlap between tiles\r\nPythonScriptsDirectory = lines[12][26:-1] #Python Scripts Directory (Good to Go)\r\nPythonScriptsDirectory = PythonScriptsDirectory.replace(\"\\\\\",\"/\")\r\nxCorrelDirectory = lines[13][17:-1] #Path to xCorrel\r\n\r\nn = int(n)\r\n# Folder Path\r\npath = MainDirectory\r\n\r\n#This is the number of regions in the folder\r\n\r\nfor i in range(1,n+1):\r\n grxdirectory = '{}/AlignedImages_step{}/GrX'.format(path,i)\r\n blndirectory = '{}/AlignedImages_step{}/BlN'.format(path,i)\r\n lrtdirectory = '{}/AlignedImages_step{}/LRt'.format(path,i)\r\n bladirectory = '{}/AlignedImages_step{}/BlA'.format(path,i)\r\n twsdirectory = '{}/AlignedImages_step{}/Tws'.format(path,i)\r\n cropped = '/Cropped'\r\n newdirectorygrx = '{}/{}'.format(grxdirectory,cropped)\r\n if not os.path.exists(newdirectorygrx):\r\n os.makedirs(newdirectorygrx)\r\n newdirectorybln = '{}/{}'.format(blndirectory,cropped)\r\n if not os.path.exists(newdirectorybln):\r\n os.makedirs(newdirectorybln)\r\n newdirectorylrt = '{}/{}'.format(lrtdirectory,cropped)\r\n if not os.path.exists(newdirectorylrt):\r\n os.makedirs(newdirectorylrt)\r\n newdirectorybla = '{}/{}'.format(bladirectory,cropped)\r\n if not os.path.exists(newdirectorybla):\r\n os.makedirs(newdirectorybla)\r\n newdirectorytws = '{}/{}'.format(twsdirectory,cropped)\r\n if not os.path.exists(newdirectorytws):\r\n os.makedirs(newdirectorytws) \r\n \r\nfor j in range(n,n+1):\r\n grxdirectory = '{}/AlignedImages_step{}/GrX'.format(path,j)\r\n blndirectory = '{}/AlignedImages_step{}/BlN'.format(path,j)\r\n lrtdirectory = '{}/AlignedImages_step{}/LRt'.format(path,j)\r\n bladirectory = '{}/AlignedImages_step{}/BlA'.format(path,j)\r\n twsdirectory = '{}/AlignedImages_step{}/Tws'.format(path,j)\r\n cropped = '/Cropped'\r\n newdirectorygrx = '{}/{}'.format(grxdirectory,cropped)\r\n newdirectorybln = '{}/{}'.format(blndirectory,cropped)\r\n newdirectorybla = '{}/{}'.format(bladirectory,cropped)\r\n newdirectorylrt = '{}/{}'.format(lrtdirectory,cropped)\r\n newdirectorytws = '{}/{}'.format(twsdirectory,cropped)\r\n #Read in text file\r\n data = pd.read_table('{}/AlignedImages_step{}/tif'.format(path,n)+\"/TileConfiguration_Preprocessed.registered.txt\",sep=\"\\s+\",header= None)\r\n\r\n#Insert Subsampling Size\r\n subsampling = 3\r\n\r\n #Extract each column\r\n column0 = data[0][:]\r\n column1 = data[1][:]\r\n column2 = data[2][:]\r\n column3 = data[3][:]\r\n column4 = data[4][:]\r\n column5 = data[5][:]\r\n column6 = data[6][:]\r\n column7 = data[7][:]\r\n column8 = data[8][:]\r\n column9 = data[9][:]\r\n \r\n #Extract filler data\r\n c2r03 = data[2][0:3]\r\n c3r03 = data[3][0:3]\r\n c0r03 = data[0][0:3]\r\n\r\n #Extract Data from y-Column of text file\r\n col3extrctddata = column3[3:]\r\n a = col3extrctddata.values\r\n \r\n #Populate Dummy Matrix w/ Divided Numbers from Column 3\r\n b= []\r\n\r\n for i in range(len(a)):\r\n c= a[i].split(\")\") #Remove closing parenthese\r\n b.append(float(c[0])/subsampling) #Append all values into list\r\n\r\n #Extract Data from x-Column of text file\r\n col2extrcteddata = column2[3:]\r\n d = col2extrcteddata.values\r\n\r\n #Populate Dummy Matrix w/ Stripped Values from Column 2\r\n e = []\r\n\r\n for i in d:\r\n e.append(i.strip(',').strip('(')) \r\n \r\n f = pd.Series(e, dtype='float64') #Convert values to float64 for mathematical manipulation\r\n g = f/subsampling\r\n\r\n #Populate Dummy Matrix with parenthese and comma re-attached for x-Column\r\n l = []\r\n \r\n for i in range(len(g)):\r\n l.append('('+str(g[i])+',')\r\n\r\n #Populate Dummy Matrix with parenthese re-attached for y-Column\r\n w = []\r\n for i in range(len(b)):\r\n w.append(str(b[i])+')')\r\n\r\n #Convert Lists to Arrays for x-Column\r\n cc2r03 = np.array(c2r03)\r\n aa = np.array(l)\r\n\r\n #Separate Filler Rows & Divided Rows\r\n\r\n naarray = np.append(cc2r03,aa)\r\n\r\n\r\n #Append Filler Rows & Divided Rows, convert to Series\r\n newcol2 = pd.Series(naarray)\r\n\r\n #Convert Lists to Arrays for y-Column\r\n cc3r03 = np.array(c3r03)\r\n bb = np.array(w)\r\n #Separate Filler Rows & Divided Rows\r\n\r\n nbarray = np.append(cc3r03,bb)\r\n\r\n \r\n #Append Filler Rows & Divided Rows, convert to Series\r\n newcol3 = pd.Series(nbarray)\r\n \r\n #Concatenate All Columns & Convert to Data Frame\r\n newtextfile = pd.DataFrame([column0,column1,newcol2,newcol3,column4,column5,column6,column7,column8,column9])\r\n finalproduct = newtextfile.T #Transpose Data Frame\r\n\r\n #Export Data Frame as Text File\r\n # finalproduct.to_csv(path+'/Postprocessed.txt', header=None, index=None, sep=' ', mode='a')\r\n\r\n col0extrcteddata = column0[3:]\r\n q = col0extrcteddata.values\r\n \r\n cc0r03 = np.array(c0r03)\r\n #Populate Dummy Matrix w/ Divided Numbers from Column 0\r\n \r\n # for i in d:\r\n # e.append(i.strip(',').strip('(')) \r\n\r\n s= []\r\n \r\n for i in q:\r\n s.append(i.strip('step0'.format(j)).strip('.tif;')) #Append all values into list\r\n\r\n z = []\r\n for i in range(len(s)):\r\n z.append('step{}'.format(j)+str(s[i])+'.GrX_crop.tif;')\r\n \r\n k = []\r\n for i in range(len(s)):\r\n k.append('step{}'.format(j)+str(s[i])+'.BlA_crop.tif;')\r\n \r\n r = []\r\n for i in range(len(s)):\r\n r.append('step{}'.format(j)+str(s[i])+'.BlN_crop.tif;')\r\n \r\n p = []\r\n for i in range(len(s)):\r\n p.append('step{}'.format(j)+str(s[i])+'.LRt_crop.tif;')\r\n \r\n m = []\r\n for i in range(len(s)):\r\n m.append('step{}'.format(j)+str(s[i])+'.Tws_crop.tif;')\r\n \r\n pp = np.array(p) #LRt\r\n zz = np.array(z) #GrX\r\n rr = np.array(r) #BlN\r\n kk = np.array(k) #BlA\r\n mm = np.array(m) #Tws\r\n \r\n\r\n nparray = np.append(cc0r03,pp)\r\n\r\n \r\n newcol0_LRt = pd.Series(nparray)\r\n #Concatenate All Columns & Convert to Data Frame\r\n newtextfile_LRt = pd.DataFrame([newcol0_LRt,column1,newcol2,newcol3,column4,column5,column6,column7,column8,column9])\r\n finalproduct_LRt = newtextfile_LRt.T #Transpose Data Frame\r\n \r\n\r\n nzarray = np.append(cc0r03,zz)\r\n\r\n \r\n newcol0_GrX = pd.Series(nzarray)\r\n \r\n #Concatenate All Columns & Convert to Data Frame\r\n newtextfile_GrX = pd.DataFrame([newcol0_GrX,column1,newcol2,newcol3,column4,column5,column6,column7,column8,column9])\r\n finalproduct_GrX = newtextfile_GrX.T #Transpose Data Frame\r\n \r\n\r\n nrarray = np.append(cc0r03,rr)\r\n\r\n \r\n newcol0_BlN = pd.Series(nrarray)\r\n\r\n #Concatenate All Columns & Convert to Data Frame\r\n newtextfile_BlN = pd.DataFrame([newcol0_BlN,column1,newcol2,newcol3,column4,column5,column6,column7,column8,column9])\r\n finalproduct_BlN = newtextfile_BlN.T #Transpose Data Frame\r\n\r\n nkarray = np.append(cc0r03,kk)\r\n newcol0_BlA = pd.Series(nkarray)\r\n \r\n #Concatenate All Columns & Convert to Data Frame\r\n newtextfile_BlA = pd.DataFrame([newcol0_BlA,column1,newcol2,newcol3,column4,column5,column6,column7,column8,column9])\r\n finalproduct_BlA = newtextfile_BlA.T #Transpose Data Frame\r\n \r\n nmarray = np.append(cc0r03,mm)\r\n newcol0_Tws = pd.Series(nmarray)\r\n \r\n #Concatenate All Columns & Convert to Data Frame\r\n newtextfile_Tws = pd.DataFrame([newcol0_Tws,column1,newcol2,newcol3,column4,column5,column6,column7,column8,column9])\r\n finalproduct_Tws = newtextfile_Tws.T #Transpose Data Frame \r\n \r\n finalproduct_GrX.to_csv(newdirectorygrx+'/Postprocessed_GrX_step{}.txt'.format(j), header=None, index=None, sep=' ', mode='w')\r\n finalproduct_BlN.to_csv(newdirectorybln+'/Postprocessed_BlN_step{}.txt'.format(j), header=None, index=None, sep=' ', mode='w')\r\n finalproduct_LRt.to_csv(newdirectorylrt+'/Postprocessed_LRt_step{}.txt'.format(j), header=None, index=None, sep=' ', mode='w')\r\n finalproduct_BlA.to_csv(newdirectorybla+'/Postprocessed_BlA_step{}.txt'.format(j), header=None, index=None, sep=' ', mode='w')\r\n finalproduct_Tws.to_csv(newdirectorytws+'/Postprocessed_Tws_step{}.txt'.format(j), header=None, index=None, sep=' ', mode='w')\r\n finalproduct.to_csv(path+'/AlignedImages_step{}'.format(j)+'/Postprocessed_step{}.txt'.format(j), header=None, index=None, sep=' ', mode='w')\r\n \r\n# yag = yagmail.SMTP('{}'.format(WorkStationEmail), '{}'.format(Password))\r\n\r\n# contents = ['{} completed'.format(ScriptName)]\r\n\r\n# yag.send('{}'.format(SendToEmail), '{}'.format(ProjectName), contents)\r\n\r\ntoc = time.time()\r\n\r\nprint(toc-tic, 'seconds elapsed')\r\n","repo_name":"AlloyinIllinoisan/DIC-SEM","sub_path":"Automation_at_its_finest/Good to Go/MakeCroppedFolders_and_MakePostprocessed.py","file_name":"MakeCroppedFolders_and_MakePostprocessed.py","file_ext":"py","file_size_in_byte":10035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6505201757","text":"class Solution:\n def findContentChildren(self, g: list[int], s: list[int]) -> int:\n g = tuple(sorted(g))\n s = tuple(sorted(s))\n ans = 0\n cookie = 0\n child = 0\n while cookie < len(s) and child < len(g):\n if s[cookie] >= g[child]:\n ans += 1\n cookie += 1\n child += 1\n else:\n cookie += 1\n return ans\n\nprint(Solution().findContentChildren([1,2], [1,2,3]))","repo_name":"akimov246/leetcode","sub_path":"455. Assign Cookies.py","file_name":"455. Assign Cookies.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3021923315","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport numpy\nimport sys\nimport threading\nimport time\nfrom itertools import cycle\n\nxrot = 0\nyrot = 0\nzrot = 0\nr1 = 3\nr2 = 1.5\nh = 3.25\ncount = 4\nintensiv = 10\nreflection = 116\nlight_coord = (20, 30, 30)\nsize1 = 4\n\ndef drawBox(p):\n global xrot, yrot, count, reflection, r1, r2, count, h\n glPushMatrix()\n glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, (0.2, 0.8, 0.0, 0.8))\n glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, (0.2, 0.8, 0.0, 0.8))\n glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 128 - reflection)\n draw(r1, r2, count + 1, h)\n glPopMatrix()\n glutSwapBuffers()\n\n\ndef draw(r1, r2, count, h):\n \n phi = numpy.linspace(0, 360, count)/180.0*numpy.pi\n verts = []\n for i in range(len(phi) - 1):\n tmp = []\n tmp.append((0, 0, h))\n tmp.append((r1*numpy.cos(phi[i]), r1*numpy.sin(phi[i]), 0))\n tmp.append((r1*numpy.cos(phi[i+1]), r1*numpy.sin(phi[i+1]), 0))\n verts.append(tmp)\n tmp2 = []\n tmp2.append((0, 0, -h))\n tmp2.append((r1*numpy.cos(phi[i]), r1*numpy.sin(phi[i]), 0))\n tmp2.append((r1*numpy.cos(phi[i+1]), r1*numpy.sin(phi[i+1]), 0))\n verts.append(tmp2)\n glBegin(GL_TRIANGLES)\n for v in verts:\n n = numpy.cross(numpy.array(v[2]) - numpy.array(v[1]), \\\n numpy.array(v[0]) - numpy.array(v[1]))\n glNormal3fv(n)\n glVertex3fv(v[0])\n glVertex3fv(v[1])\n glVertex3fv(v[2])\n glEnd()\n glBegin(GL_TRIANGLES)\n\n l = [(r1*numpy.cos(phi[i]), r1*numpy.sin(phi[i]), 0) for i in range(len(phi) - 1)]\n coord_centr = numpy.array([0, 0, 0])\n l2 = [(r2*numpy.cos(phi[i]), r2*numpy.sin(phi[i]), h) for i in range(len(phi) - 1)]\n for i in range(1, len(l)):\n n = numpy.cross(coord_centr - numpy.array(l[i]), \\\n numpy.array(l[i - 1]) - numpy.array(l[i]))\n glNormal3fv(n)\n glVertex3fv(l[i - 1])\n glVertex3fv(l[i])\n glVertex3fv(coord_centr)\n n = numpy.cross(coord_centr - numpy.array(l[0]), \\\n numpy.array(l[-1]) - numpy.array(l[0]))\n glNormal3fv(n)\n glVertex3fv(l[-1])\n glVertex3fv(l[0])\n glVertex3fv(coord_centr)\n coord_centr = numpy.array([0, 0, h])\n glEnd()\n\n\n\ndef init():\n glClearColor(255, 255, 255, 1.0)\n glClearDepth(1.0)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_FLAT)\n glDepthFunc(GL_LEQUAL)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_NORMALIZE)\n glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)\n glEnable(GL_LIGHTING)\n glLightModelf(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n glEnable(GL_NORMALIZE)\n\n\ndef reshape(width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(60.0, float(width)/float(height), 1.0, 60.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1, 0.0)\n\n\ndef display():\n global size1\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(10, 10, 10, 0, 0, 0, 0, 0, 1)\n glTranslatef(size1, size1, size1)\n lightning()\n glRotatef(xrot, 1, 0, 0)\n glRotatef(yrot, 0, 0, 1)\n glRotatef(zrot, 0, 1, 0)\n drawBox(1)\n\n\ndef specialkeys(key, x, y):\n global xrot, yrot, zrot, size1\n if key == b'w':\n xrot += 2\n elif key == b's':\n xrot -= 2\n elif key == b'a':\n yrot += 2\n elif key == b'd':\n yrot -= 2\n elif key == b'q':\n zrot += 2\n elif key == b'e':\n zrot -= 2\n elif key == b'=':\n size1 += 1\n elif key == b'-':\n size1 -= 1\n elif key == b'z':\n bri_change(intensiv + 5)\n lightning()\n elif key == b'x':\n bri_change(intensiv - 5)\n lightning()\n elif key == b'c':\n app_change(count + 1)\n elif key == b'v':\n app_change(count - 1)\n elif key == b'p':\n exit(0)\n glutPostRedisplay()\n\ndef lightning():\n global intensiv, light_coord\n glEnable(GL_LIGHT0)\n l_dif = (2.0, 2.0, 3.0)\n glLightfv(GL_LIGHT0,GL_DIFFUSE,l_dif)\n l_dir = (light_coord[0], light_coord[1], light_coord[2], 1.0)\n glLightfv(GL_LIGHT0,GL_POSITION,l_dir)\n att = float(101 - intensiv)/25.0\n rad = numpy.sqrt(pow(light_coord[0],2) + \\\n pow(light_coord[1],2) + pow(light_coord[2],2))\n kQ = att/(3.0*rad*rad)\n kL = att/(3.0*rad)\n kC = att/3.0\n glLightf(GL_LIGHT0,GL_CONSTANT_ATTENUATION,kC)\n glLightf(GL_LIGHT0,GL_LINEAR_ATTENUATION,kL)\n glLightf(GL_LIGHT0,GL_QUADRATIC_ATTENUATION,kQ)\n glEnable(GL_LIGHT1)\n l_dif1 = (0.1,0.1,0.1)\n l_dir1 = (0.0,0.0,-100.0,1.0)\n glLightfv(GL_LIGHT1,GL_POSITION,l_dir1)\n glLightfv(GL_LIGHT1,GL_DIFFUSE,l_dif1)\n\ndef rotate():\n global zrot\n speed = [1/100000]\n for val in cycle(speed):\n begin = time.time()\n while time.time() - begin < 1:\n zrot += val\n glutPostRedisplay()\n\ndef bri_change(x):\n global intensiv\n intensiv = x\n lightning()\n glutPostRedisplay()\n return 0\n\ndef app_change(x):\n global count\n count = x\n glutPostRedisplay()\n return 0\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\n glutInitWindowSize(500, 500)\n glutInitWindowPosition(0, 0)\n glutCreateWindow(b\"\")\n glutDisplayFunc(display)\n glutReshapeFunc(reshape)\n glutKeyboardFunc(specialkeys)\n init()\n t = threading.Thread(target=rotate)\n t.daemon = True\n t.start()\n glutMainLoop()\n\n\nif __name__ == \"__main__\":\n print(\"WASD QE [to rotate]\\n+ - [to zoom]\\nz x [to change brightness]\\nc v [to change approximation]\\np [to exit]\")\n main()\n","repo_name":"BeSoBad/study","sub_path":"computer graphic/lab3456/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32410547295","text":"import numpy as np\n\nfrom landshark import basetypes\n\n\ndef test_fixedslice():\n x = basetypes.FixedSlice(4, 8)\n assert x.start == 4\n assert x.stop == 8\n\n\ndef test_featurevalues(mocker):\n con_src = mocker.Mock()\n cat_src = mocker.Mock()\n x = basetypes.FeatureValues(con_src, cat_src)\n assert x.continuous is con_src\n assert x.categorical is cat_src\n\n\nclass NpyCatArraySource(basetypes.CategoricalArraySource):\n\n def __init__(self, x, missing, columns):\n self._shape = x.shape\n self._native = 1\n self._missing = missing\n self._columns = columns\n self._data = x\n\n def _arrayslice(self, start, stop):\n return self._data[start:stop]\n\n\ndef test_array_source():\n x = np.ones((3, 2), dtype=basetypes.CategoricalType)\n missing = [basetypes.CategoricalType(1), None]\n columns = [\"1\", \"2\"]\n s = NpyCatArraySource(x, missing, columns)\n assert s.columns is columns\n assert s.dtype == basetypes.CategoricalType\n assert s.missing is missing\n assert s.native == 1\n assert s.shape == x.shape\n assert basetypes.ContinuousArraySource._dtype == basetypes.ContinuousType\n assert basetypes.CoordinateArraySource._dtype == basetypes.CoordinateType\n","repo_name":"data61/landshark","sub_path":"tests/test_basetypes.py","file_name":"test_basetypes.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"177401652","text":"from __future__ import annotations\n\nimport collections.abc\nimport itertools\nimport warnings\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Iterable\nfrom typing import Mapping\nfrom typing import Tuple\nfrom typing import Union\nfrom typing import cast\n\nfrom redis import Redis\nfrom redis.client import Pipeline\n\nfrom .annotations import JSONTypes\nfrom .base import Container\nfrom .base import Iterable_\nfrom .exceptions import InefficientAccessWarning\nfrom .exceptions import KeyExistsError\n\n\nInitMap = Mapping[JSONTypes, JSONTypes]\nInitItem = Tuple[JSONTypes, JSONTypes]\nInitIter = Iterable[InitItem]\nInitArg = Union[InitMap, InitIter]\n\n\nclass RedisDict(Container, Iterable_, collections.abc.MutableMapping):\n 'Redis-backed container compatible with Python dicts.'\n\n def __init__(self,\n arg: InitArg = tuple(),\n *,\n redis: Redis | None = None,\n key: str = '',\n **kwargs: JSONTypes,\n ) -> None:\n 'Initialize the RedisDict. O(n)'\n super().__init__(redis=redis, key=key)\n if arg or kwargs:\n with self._watch(arg) as pipeline:\n if pipeline.exists(self.key): # Available since Redis 1.0.0\n raise KeyExistsError(self.redis, self.key)\n self._populate(pipeline, arg, **kwargs)\n\n def _populate(self,\n pipeline: Pipeline,\n arg: InitArg = tuple(),\n **kwargs: JSONTypes,\n ) -> None:\n if isinstance(arg, collections.abc.Mapping):\n arg = arg.items()\n items = itertools.chain(arg, kwargs.items())\n dict_ = dict(items)\n encoded_dict = self.__encode_dict(dict_)\n if encoded_dict:\n if len(encoded_dict) > 1:\n warnings.warn(\n cast(str, InefficientAccessWarning.__doc__),\n InefficientAccessWarning,\n )\n pipeline.multi() # Available since Redis 1.2.0\n # Available since Redis 2.0.0:\n pipeline.hset(self.key, mapping=encoded_dict) # type: ignore\n\n # Preserve the Open-Closed Principle with name mangling.\n # https://youtu.be/miGolgp9xq8?t=2086\n # https://stackoverflow.com/a/38534939\n __populate = _populate\n\n def _encode_dict(self, dict_: Mapping[JSONTypes, JSONTypes]) -> Dict[str, str]:\n encoded_dict = {\n self._encode(key): self._encode(value)\n for key, value in dict_.items()\n }\n return encoded_dict\n\n __encode_dict = _encode_dict\n\n # Methods required by collections.abc.MutableMapping:\n\n def __getitem__(self, key: JSONTypes) -> JSONTypes:\n 'd.__getitem__(key) <==> d[key]. O(1)'\n encoded_key = self._encode(key)\n encoded_value = self.redis.hget(self.key, encoded_key) # Available since Redis 2.0.0\n if encoded_value is None:\n raise KeyError(key)\n value = self._decode(encoded_value)\n return value\n\n def __setitem__(self, key: JSONTypes, value: JSONTypes) -> None:\n 'd.__setitem__(key, value) <==> d[key] = value. O(1)'\n encoded_key = self._encode(key)\n encoded_value = self._encode(value)\n self.redis.hset(self.key, encoded_key, encoded_value) # Available since Redis 2.0.0\n\n def __delitem__(self, key: JSONTypes) -> None:\n 'd.__delitem__(key) <==> del d[key]. O(1)'\n encoded_key = self._encode(key)\n if not self.redis.hdel(self.key, encoded_key): # Available since Redis 2.0.0\n raise KeyError(key)\n\n def __iter__(self) -> Generator[JSONTypes, None, None]:\n warnings.warn(\n cast(str, InefficientAccessWarning.__doc__),\n InefficientAccessWarning,\n )\n encoded_items = self.redis.hscan_iter(self.key) # Available since Redis 2.8.0\n keys = (self._decode(key) for key, _ in encoded_items)\n yield from keys\n\n def __len__(self) -> int:\n 'Return the number of items in the RedisDict. O(1)'\n return self.redis.hlen(self.key) # Available since Redis 2.0.0\n\n # Methods required for Raj's sanity:\n\n def __repr__(self) -> str:\n 'Return the string representation of the RedisDict. O(n)'\n return f'{self.__class__.__qualname__}{self.__to_dict()}'\n\n # Method overrides:\n\n # From collections.abc.MutableMapping:\n def update(self, arg: InitArg = tuple(), **kwargs: JSONTypes) -> None: # type: ignore\n with self._watch(arg) as pipeline:\n self.__populate(pipeline, arg, **kwargs)\n\n # From collections.abc.Mapping:\n def __contains__(self, key: Any) -> bool:\n 'd.__contains__(key) <==> key in d. O(1)'\n try:\n encoded_key = self._encode(key)\n except TypeError:\n return False\n return self.redis.hexists(self.key, encoded_key) # Available since Redis 2.0.0\n\n def to_dict(self) -> Dict[JSONTypes, JSONTypes]:\n 'Convert a RedisDict into a plain Python dict.'\n encoded_items = self.redis.hgetall(self.key).items() # Available since Redis 2.0.0\n if encoded_items:\n warnings.warn(\n cast(str, InefficientAccessWarning.__doc__),\n InefficientAccessWarning,\n )\n dict_ = {\n self._decode(encoded_key): self._decode(encoded_value)\n for encoded_key, encoded_value in encoded_items\n }\n return dict_\n\n __to_dict = to_dict\n","repo_name":"brainix/pottery","sub_path":"pottery/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","stars":930,"dataset":"github-code","pt":"61"} +{"seq_id":"12247017452","text":"# bar definitions for QTile. imported by the main config.py file.\n\n# symlinked to ~/.config/qtile/bars.py.\n\n# the top bar. not currently in use.\nfrom typing import List # noqa: F401\nimport copy\n\nfrom libqtile import bar, layout, widget\n\nimport datetime\n\n# Used in the MPD widget to truncate titles if they get too long\ndef title_truncate(s):\n if len(s) > 30:\n return f\"{s[:30]}...\"\n else:\n return s\n\n# Used in the MPD widget to truncate artist lists\ndef artist_truncate(s):\n splits = s.split(\",\")\n if len(splits) > 2:\n return \",\".join(splits[:2]) + \", Various\"\n else:\n return s\n \ncolors = {\n \"blue\" : '#2d728f',\n \"green\" : '#659157',\n}\n\nprimary_top = bar.Bar(\n [\n widget.Mpd2(\n status_format = \"{play_status} {artist}: {title} ({elapsed}/{duration}) [ {repeat}{random}{single}{consume}]\",\n idle_format = \" {idle_message} \",\n idle_message = \"Rien à jouer\",\n format_fns = dict(\n #all=lambda s: cgi.escape(s),\n artist=artist_truncate,\n title=title_truncate,\n elapsed=lambda s: str(datetime.timedelta(seconds=int(float(s))))[2:],\n duration=lambda s: str(datetime.timedelta(seconds=int(float(s))))[2:],\n ),\n padding = 10,\n fontsize = 13,\n play_states = {'play': ' ', 'pause': ' ', 'stop' : ' '},\n prepare_status = {\n 'consume': '󰆘 ', \n 'random' : ' ', \n 'repeat' : '󰑖 ',\n 'single' : '󰑘 ',\n 'updating_db': 'ﮮ ',\n },\n space = '- ',\n update_interval = 0.5,\n markup = False,\n ),\n widget.Volume(\n fmt = '󰕾 {}',\n fontsize = 13,\n ),\n widget.Spacer(length = bar.STRETCH),\n widget.TextBox(text = '',\n foreground = '#2d728f',\n fontsize = 60,\n padding = -9,\n ),\n widget.DF(\n fmt = '/ {}',\n fontsize = 13,\n partition = '/home',\n format = '{uf}{m} ({r:.0f}%)',\n visible_on_warn = False,\n background = '#2d728f',\n padding = 5,\n ),\n widget.TextBox(text = '',\n background = '#2d728f',\n foreground = '#659157',\n fontsize = 60,\n padding = -9,\n ),\n widget.Memory(\n fmt = \" {}\",\n format = '{MemUsed: .0f}M ({MemPercent: .1f}%)',\n fontsize = 13,\n background = '#659157',\n padding = 5,\n\n ),\n widget.TextBox(text = '',\n background = '#659157',\n foreground = '#932546',\n fontsize = 60,\n padding = -9,\n ),\n widget.CPU(\n fmt = \" {}\",\n format = \"{freq_current}GHz ({load_percent}%)\",\n fontsize = 13,\n background = '#932546',\n padding = 5,\n ),\n widget.TextBox(text = '',\n background = '#932546',\n foreground = '#4a314d',\n fontsize = 60,\n padding = -9,\n ),\n widget.Net(\n interface = \"wlp6s0\",\n format = \" {down}  {up}  \",\n fontsize = 13,\n background = '#4a314d',\n padding = 5,\n ),\n widget.TextBox(text = '',\n background = '#4a314d',\n foreground = '#d79921',\n fontsize = 60,\n padding = -9,\n ),\n widget.Battery(\n fmt = \"{}\",\n format = \"[{char}] {percent:2.0%} {hour:d}:{min:02d} \",\n charge_char = 'C',\n discharge_char = 'D',\n empty_char = 'E',\n fontsize = 13,\n background = '#d79921',\n padding = 5,\n ),\n widget.TextBox(text = '',\n background = '#d79921',\n foreground = '#d16014',\n fontsize = 60,\n padding = -9,\n ),\n widget.ThermalSensor(\n fmt = ' {}',\n fontsize = 13,\n background = '#d16014',\n padding = 5,\n )\n ],\n 30,\n margin = [0, 0, 4, 0],\n background = \"#202020\",\n)\n\n# the bottom bar.\nprimary_bottom = bar.Bar(\n [\n widget.CurrentLayout(),\n widget.GroupBox(\n highlight_method = 'line',\n highlight_color = ['#202020', '#343434'],\n this_current_screen_border = '#fabd2f',\n this_screen_border = '#fabd2f',\n ),\n widget.Spacer(length = 15),\n widget.Prompt(),\n widget.WindowName(),\n #widget.Mpris2(\n # fmt = '{title}',\n # name = 'spotify',\n # objname = 'org.mpris.MediaPlayer2.spotify',\n #),\n widget.Chord(\n chords_colors={\n 'launch': (\"#fabd2f\", \"#282828\"),\n 'hackery': (\"#fabd2f\", \"#282828\"),\n 'design': (\"#fabd2f\", \"#282828\"),\n },\n name_transform=lambda name: name.upper(),\n ),\n widget.TextBox(text = '|'),\n widget.CapsNumLockIndicator(\n \n ),\n widget.TextBox(text = '|'),\n widget.Systray(),\n widget.Spacer(length = 8),\n widget.Clock(format='%A, %d %b %Y %H:%M'),\n widget.Spacer(length = 8),\n widget.QuickExit(\n padding = 1,\n foreground = 'fb4934',\n default_text = '[ 󰗼 ]',\n countdown_format = '[ {} ]'\n ),\n ],\n 30,\n margin = [4, 0, 0, 0],\n background = '#202020',\n)\n\nsecondary_top = bar.Bar(\n [],\n 30,\n margin = [0, 0, 4, 0],\n background = '#202020',\n opacity = 100\n)\n\nsecondary_bottom = bar.Bar(\n [],\n 30,\n margin = [4, 0, 0, 0],\n background = '#202020',\n opacity = 100\n)","repo_name":"cartoon-raccoon/dotfiles","sub_path":"qtile/bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16245064923","text":"import sqlite3\r\nimport datetime\r\nimport time\r\n\r\ndatabase = 'database.sqlite'\r\n\r\ndef init_db():\r\n connect = sqlite3.connect(database)\r\n cursor = connect.cursor()\r\n cursor.execute(\"\"\"\r\n CREATE table fields (\r\n id integer primary key,\r\n name text,\r\n time text,\r\n date text,\r\n training text\r\n );\r\n \"\"\")\r\n connect.close()\r\n\r\n# init_db()\r\n\r\ndef add_field(name,time,date,training):\r\n connect = sqlite3.connect(database)\r\n cursor = connect.cursor()\r\n cursor.execute(\"SELECT id FROM fields\")\r\n try:\r\n field_id = str(cursor.fetchall()[-1][0] + 1)\r\n except:\r\n field_id = 1\r\n cursor.execute(\"insert into fields values (\"+str(field_id)+\",'\"+name+\"','\"+time+\"','\"+date+\"','\"+training+\"')\")\r\n connect.commit()\r\n connect.close()\r\n\r\ndef get_field(day):\r\n connect = sqlite3.connect(database)\r\n cursor = connect.cursor()\r\n cursor.execute(\"SELECT * FROM fields where date='\"+day+\"'\")\r\n res = cursor.fetchall()\r\n connect.close()\r\n result = []\r\n for i in res:\r\n result.append(list(i))\r\n return res\r\n\r\n# add_field(\"Иванов И.И.\",\"утро\",\"2020-03-18\",\"Бокс\")\r\n\r\n# def get_attachment(post_id):\r\n# connect = sqlite3.connect(database)\r\n# cursor = connect.cursor()\r\n# cursor.execute(\"SELECT * FROM attachment where post_id=\"+str(post_id))\r\n# res = cursor.fetchall()\r\n# connect.close()\r\n# attachments = []\r\n# for attachment in res:\r\n# attachments.append(attachment[1])\r\n# return attachments\r\n\r\n# def update_group(group):\r\n# if group == \"group2\":\r\n# dialog = 2\r\n# elif group == \"group3\":\r\n# dialog = 3\r\n# elif group == \"group4\":\r\n# dialog = 4\r\n# elif group == \"group5\":\r\n# dialog = 5\r\n# connect = sqlite3.connect(database)\r\n# cursor = connect.cursor()\r\n# cursor.execute(\"SELECT id FROM posts\")\r\n# last_id = cursor.fetchall()[-1][0]\r\n# cursor.execute(\"UPDATE posts SET dialog=\"+str(dialog)+\" where id=\"+str(last_id))\r\n# connect.commit()\r\n# connect.close()\r\n\r\n# def update_date(day,mounth,year):\r\n# new_date = str(datetime.date(int(year), int(mounth), int(day)))\r\n# connect = sqlite3.connect(database)\r\n# cursor = connect.cursor()\r\n# cursor.execute(\"SELECT id FROM posts\")\r\n# last_id = cursor.fetchall()[-1][0]\r\n# cursor.execute(\"UPDATE posts SET date='\"+new_date+\"' where id=\"+str(last_id))\r\n# connect.commit()\r\n# connect.close()\r\n\r\n# def update_time(post_time):\r\n# connect = sqlite3.connect(database)\r\n# cursor = connect.cursor()\r\n# cursor.execute(\"SELECT id FROM posts\")\r\n# last_id = cursor.fetchall()[-1][0]\r\n# cursor.execute(\"UPDATE posts SET time='\"+post_time+\"' where id=\"+str(last_id))\r\n# connect.commit()\r\n# connect.close()\r\n\r\n# def get_db(table):\r\n# connect = sqlite3.connect(database)\r\n# cursor = connect.cursor()\r\n# cursor.execute(\"SELECT * FROM \"+table)\r\n# result = cursor.fetchall()\r\n# connect.close()\r\n# print(result)\r\n","repo_name":"ViktorTolstov/own_trainers_vkbot","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41716621116","text":"'''\r\nKyle Pish\r\nCS355\r\nAssignment 1 p3.py\r\n'''\r\n#prompt user to input a string\r\ninputString = input(\"Enter a string (Tpye nothing and hit enter to end input): \")\r\nstringList = list()\r\n\r\n#while loop to add input to stringList until user enters \"\"\r\nwhile inputString != \"\": \r\n stringList.append(inputString);\r\n inputString = input(\"Enter a string (Type nothing and hit enter to end input): \")\r\n\r\n#sorting list alphabetically\r\nstringList.sort()\r\n\r\n#calculating index of correct value and outputting the correct elements\r\nmiddle = int(len(stringList) / 2)\r\nfirstMiddleLast = (stringList[0], stringList[middle], stringList[-1])\r\nprint(firstMiddleLast);\r\n","repo_name":"kyle-pish/CS355","sub_path":"hw1-kyle-pish/p3/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17613745053","text":"import cgi\nimport itertools\nimport logging\nimport socket\nimport time\n\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom Acquisition import aq_base\nfrom BTrees.OOBTree import OOSet\nfrom DateTime import DateTime\nfrom App.special_dtml import DTMLFile\nfrom ipaddr import IPAddress\nfrom OFS.CopySupport import CopyError\nfrom urllib import quote as urlquote\nfrom zenoss.protocols.protobufs.zep_pb2 import (\n SEVERITY_CRITICAL,\n SEVERITY_ERROR,\n SEVERITY_WARNING,\n STATUS_ACKNOWLEDGED,\n STATUS_NEW,\n STATUS_SUPPRESSED,\n)\nfrom ZODB.POSException import POSError\nfrom zope.component import subscribers\nfrom zope.event import notify\nfrom zope.interface import implementer\n\nfrom Products.Jobber.jobs import FacadeMethodJob\nfrom Products.PluginIndexes.FieldIndex.FieldIndex import FieldIndex\nfrom Products.ZenEvents.browser.EventPillsAndSummaries import getEventPillME\nfrom Products.ZenEvents.events2.proxy import EventProxy\nfrom Products.ZenEvents.ZenEventClasses import Status_Ping\nfrom Products.ZenMessaging.audit import audit\nfrom Products.ZenModel.Exceptions import DeviceExistsError, NoSnmp\nfrom Products.ZenModel.interfaces import IExpandedLinkProvider\nfrom Products.ZenRelations.RelSchema import ToManyCont, ToMany, ToOne\nfrom Products.ZenUtils import NetworkTree, Time\nfrom Products.ZenUtils.deprecated import deprecated\nfrom Products.ZenUtils.guid.interfaces import (\n IGlobalIdentifier,\n IGloballyIdentifiable,\n)\nfrom Products.ZenUtils.IpUtil import (\n checkip,\n getHostByName,\n IpAddressError,\n ipAndMaskFromIpMask,\n ipunwrap,\n maskToBits,\n numbip,\n)\nfrom Products.ZenUtils.Search import (\n makeCaseInsensitiveFieldIndex,\n makeCaseInsensitiveKeywordIndex,\n makeMultiPathIndex,\n)\nfrom Products.ZenUtils.Utils import (\n edgesToXML,\n getObjectsFromCatalog,\n isXmlRpc,\n unpublished,\n unused,\n)\nfrom Products.ZenWidgets import messaging\nfrom Products.ZenWidgets.interfaces import IMessageSender\nfrom Products.Zuul import getFacade\nfrom Products.Zuul.catalog.events import IndexingEvent\nfrom Products.Zuul.catalog.indexable import DeviceIndexable\nfrom Products.Zuul.catalog.interfaces import IModelCatalogTool\n\nfrom .AdministrativeRoleable import AdministrativeRoleable\nfrom .Commandable import Commandable\nfrom .DeviceHW import DeviceHW\nfrom .EventView import IEventView\nfrom .Lockable import Lockable\nfrom .MaintenanceWindowable import MaintenanceWindowable\nfrom .ManagedEntity import ManagedEntity\nfrom .OperatingSystem import OperatingSystem\nfrom .ZenMenuable import ZenMenuable\nfrom .ZenossSecurity import (\n ZEN_ADMIN_DEVICE,\n ZEN_CHANGE_DEVICE,\n ZEN_CHANGE_DEVICE_PRODSTATE,\n ZEN_DELETE_DEVICE,\n ZEN_EDIT_LOCAL_TEMPLATES,\n ZEN_MANAGE_DEVICE,\n ZEN_MANAGE_DEVICE_STATUS,\n ZEN_MANAGE_DMD,\n ZEN_VIEW,\n)\nfrom .ZenStatus import ZenStatus\n\nDEFAULT_PRODSTATE = 1000\n\nlog = logging.getLogger(\"zen.Device\")\n\n\ndef getNetworkRoot(context, performanceMonitor):\n \"\"\"\n Return the default network root.\n \"\"\"\n return context.getDmdRoot(\"Networks\")\n\n\ndef manage_createDevice(\n context,\n deviceName,\n devicePath=\"/Discovered\",\n tag=\"\",\n serialNumber=\"\",\n zSnmpCommunity=\"\",\n zSnmpPort=161,\n zSnmpVer=\"\",\n rackSlot=\"\",\n productionState=DEFAULT_PRODSTATE,\n comments=\"\",\n hwManufacturer=\"\",\n hwProductName=\"\",\n osManufacturer=\"\",\n osProductName=\"\",\n locationPath=\"\",\n groupPaths=[],\n systemPaths=[],\n performanceMonitor=\"localhost\",\n discoverProto=\"snmp\",\n priority=3,\n manageIp=\"\",\n zProperties=None,\n title=None,\n):\n \"\"\"\n Device factory creates a device and sets up its relations and collects its\n configuration. SNMP Community discovery also happens here. If an IP is\n passed for deviceName it will be used for collection and the device name\n will be set to the SNMP SysName (or ptr if SNMP Fails and ptr is valid)\n\n @rtype: Device\n \"\"\"\n manageIp = manageIp.replace(\" \", \"\")\n deviceName = context.prepId(deviceName)\n log.info(\"device name '%s' for ip '%s'\", deviceName, manageIp)\n deviceClass = context.getDmdRoot(\"Devices\").createOrganizer(devicePath)\n device = deviceClass.createInstance(\n deviceName, performanceMonitor, manageIp\n )\n device.setPerformanceMonitor(performanceMonitor)\n device.setManageIp(manageIp)\n device.manage_editDevice(\n tag,\n serialNumber,\n zSnmpCommunity,\n zSnmpPort,\n zSnmpVer,\n rackSlot,\n productionState,\n comments,\n hwManufacturer,\n hwProductName,\n osManufacturer,\n osProductName,\n locationPath,\n groupPaths,\n systemPaths,\n performanceMonitor,\n priority,\n zProperties,\n title,\n )\n return device\n\n\ndef findCommunity(\n context, ip, devicePath, community=\"\", port=None, version=None\n):\n \"\"\"\n Find the SNMP community and version for an IP address using\n zSnmpCommunities.\n\n @rtype: tuple of (community, port, version, device name)\n \"\"\"\n from pynetsnmp.SnmpSession import SnmpSession\n\n devroot = context.getDmdRoot(\"Devices\").createOrganizer(devicePath)\n communities = []\n if community:\n communities.append(community)\n communities.extend(getattr(devroot, \"zSnmpCommunities\", []))\n if not port:\n port = getattr(devroot, \"zSnmpPort\", 161)\n versions = (\"v2c\", \"v1\")\n if not version:\n version = getattr(devroot, \"zSnmpVer\", None)\n if version:\n versions = (version,)\n timeout = getattr(devroot, \"zSnmpTimeout\", 2)\n retries = getattr(devroot, \"zSnmpTries\", 2)\n session = SnmpSession(ip, timeout=timeout, port=port, retries=retries)\n oid = \".1.3.6.1.2.1.1.5.0\"\n goodcommunity = \"\"\n goodversion = \"\"\n devname = \"\"\n for version in versions:\n session.setVersion(version)\n for community in communities:\n session.community = community\n try:\n devname = session.get(oid).values()[0]\n goodcommunity = session.community\n goodversion = version\n break\n except POSError:\n raise\n except Exception:\n pass # keep trying until we run out\n if goodcommunity:\n break\n else:\n raise NoSnmp(\"No SNMP found for IP = %s\" % ip)\n return (goodcommunity, port, goodversion, devname)\n\n\n@deprecated # 1/31/12\ndef manage_addDevice(context, id, REQUEST=None):\n \"\"\"\n Creates a device\n \"\"\"\n serv = Device(id)\n context._setObject(serv.id, serv)\n if REQUEST is not None:\n # TODO: there is no \"self\"! Fix UI feedback code.\n # messaging.IMessageSender(self).sendToBrowser(\n # 'Device Added',\n # 'Device %s has been created.' % id\n # )\n\n # TODO: test this audits correctly. How is this called?\n # uid = context._getOb(serv.id).getPrimaryId()\n audit(\"UI.Device.Add\", serv, deviceClass=context)\n REQUEST[\"RESPONSE\"].redirect(\n context.absolute_url_path() + \"/manage_main\"\n )\n\n\naddDevice = DTMLFile(\"dtml/addDevice\", globals())\n\n\nclass NoNetMask(Exception):\n pass\n\n\n@implementer(IEventView, IGloballyIdentifiable)\nclass Device(\n ManagedEntity,\n Commandable,\n Lockable,\n MaintenanceWindowable,\n AdministrativeRoleable,\n ZenMenuable,\n DeviceIndexable,\n):\n \"\"\"\n Device is a base class that represents the idea of a single computer system\n that is made up of software running on hardware. It currently must be IP\n enabled but maybe this will change.\n \"\"\"\n\n event_key = portal_type = meta_type = \"Device\"\n\n default_catalog = \"deviceSearch\"\n\n relationshipManagerPathRestriction = \"/Devices\"\n title = \"\"\n manageIp = \"\"\n snmpAgent = \"\"\n snmpDescr = \"\"\n snmpOid = \"\"\n snmpContact = \"\"\n snmpSysName = \"\"\n snmpLocation = \"\"\n rackSlot = \"\"\n comments = \"\"\n sysedgeLicenseMode = \"\"\n priority = 3\n macaddresses = None\n renameInProgress = False\n # ZEN-28849: set a default production state for devices\n privateattr_productionState = DEFAULT_PRODSTATE\n _preMWProductionState = DEFAULT_PRODSTATE\n\n # Flag indicating whether device is in process of creation\n _temp_device = False\n\n _properties = ManagedEntity._properties + (\n {\"id\": \"title\", \"type\": \"string\", \"mode\": \"w\"},\n {\"id\": \"manageIp\", \"type\": \"string\", \"mode\": \"w\"},\n {\"id\": \"snmpAgent\", \"type\": \"string\", \"mode\": \"w\"},\n {\"id\": \"snmpDescr\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"snmpOid\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"snmpContact\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"snmpSysName\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"snmpLocation\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"snmpLastCollection\", \"type\": \"date\", \"mode\": \"\"},\n {\"id\": \"snmpAgent\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"rackSlot\", \"type\": \"string\", \"mode\": \"w\"},\n {\"id\": \"comments\", \"type\": \"text\", \"mode\": \"w\"},\n {\"id\": \"sysedgeLicenseMode\", \"type\": \"string\", \"mode\": \"\"},\n {\"id\": \"priority\", \"type\": \"int\", \"mode\": \"w\"},\n )\n\n _relations = ManagedEntity._relations + (\n (\n \"deviceClass\",\n ToOne(ToManyCont, \"Products.ZenModel.DeviceClass\", \"devices\"),\n ),\n (\n \"perfServer\",\n ToOne(ToMany, \"Products.ZenModel.PerformanceConf\", \"devices\"),\n ),\n (\"location\", ToOne(ToMany, \"Products.ZenModel.Location\", \"devices\")),\n (\"systems\", ToMany(ToMany, \"Products.ZenModel.System\", \"devices\")),\n (\"groups\", ToMany(ToMany, \"Products.ZenModel.DeviceGroup\", \"devices\")),\n (\n \"adminRoles\",\n ToManyCont(\n ToOne, \"Products.ZenModel.AdministrativeRole\", \"managedObject\"\n ),\n ),\n (\n \"userCommands\",\n ToManyCont(ToOne, \"Products.ZenModel.UserCommand\", \"commandable\"),\n ),\n (\n \"ipaddress\",\n ToOne(ToOne, \"Products.ZenModel.IpAddress\", \"manageDevice\"),\n ),\n # unused:\n (\n \"monitors\",\n ToMany(ToMany, \"Products.ZenModel.StatusMonitorConf\", \"devices\"),\n ),\n )\n\n # Screen action bindings (and tab definitions)\n factory_type_information = (\n {\n \"id\": \"Device\",\n \"meta_type\": \"Device\",\n \"description\": \"\"\"Base class for all devices\"\"\",\n \"icon\": \"Device_icon.gif\",\n \"product\": \"ZenModel\",\n \"factory\": \"manage_addDevice\",\n \"immediate_view\": \"devicedetail\",\n \"actions\": (\n {\n \"id\": \"events\",\n \"name\": \"Events\",\n \"action\": \"viewEvents\",\n \"permissions\": (ZEN_VIEW,),\n },\n {\n \"id\": \"perfServer\",\n \"name\": \"Graphs\",\n \"action\": \"viewDevicePerformance\",\n \"permissions\": (ZEN_VIEW,),\n },\n {\n \"id\": \"edit\",\n \"name\": \"Edit\",\n \"action\": \"editDevice\",\n \"permissions\": (\"Change Device\",),\n },\n ),\n },\n )\n\n security = ClassSecurityInfo()\n\n security.declarePrivate(\"propertyItems\")\n\n def __init__(self, id, buildRelations=True):\n ManagedEntity.__init__(self, id, buildRelations=buildRelations)\n self.resetProductionState()\n osObj = OperatingSystem()\n self._setObject(osObj.id, osObj)\n hw = DeviceHW()\n self._setObject(hw.id, hw)\n # self.commandStatus = \"Not Tested\"\n self._lastPollSnmpUpTime = ZenStatus(0)\n self._snmpLastCollection = 0\n self._lastChange = 0\n self._create_componentSearch()\n\n # Resets the production state to the default value\n def resetProductionState(self):\n super(Device, self).resetProductionState()\n self._setProductionState(DEFAULT_PRODSTATE)\n self.setPreMWProductionState(DEFAULT_PRODSTATE)\n\n def isTempDevice(self):\n flag = getattr(self, \"_temp_device\", None)\n if flag is None:\n flag = self._temp_device = False\n return flag\n\n def name(self):\n \"\"\"\n Return the name of this device. Default is titleOrId.\n \"\"\"\n return self.titleOrId()\n\n security.declareProtected(ZEN_MANAGE_DMD, \"changeDeviceClass\")\n\n def changeDeviceClass(self, deviceClassPath, REQUEST=None):\n \"\"\"\n Wrapper for DeviceClass.moveDevices. The primary reason to use this\n method instead of that one is that this one returns the new path to the\n device.\n\n @param deviceClassPath: device class in DMD path\n @type deviceClassPath: string\n @param REQUEST: Zope REQUEST object\n @type REQUEST: Zope REQUEST object\n \"\"\"\n self.deviceClass().moveDevices(deviceClassPath, (self.id,))\n device = self.getDmdRoot(\"Devices\").findDevice(self.id)\n if REQUEST:\n audit(\n \"UI.Device.ChangeDeviceClass\",\n self,\n deviceClass=deviceClassPath,\n )\n return device.absolute_url_path()\n\n @deprecated\n def getRRDTemplate(self):\n \"\"\"\n DEPRECATED\n \"\"\"\n import warnings\n\n warnings.warn(\n \"Device.getRRDTemplate is deprecated\", DeprecationWarning\n )\n return ManagedEntity.getRRDTemplate(self)\n\n def getRRDTemplates(self):\n \"\"\"\n Returns all the templates bound to this Device\n\n @rtype: list\n\n >>> from Products.ZenModel.Device import manage_addDevice\n >>> manage_addDevice(devices, 'test')\n >>> devices.test.getRRDTemplates()\n []\n \"\"\"\n if not hasattr(self, \"zDeviceTemplates\"):\n return ManagedEntity.getRRDTemplates(self)\n templates = []\n for templateName in self.zDeviceTemplates:\n if templateName.endswith(\"-replacement\") or templateName.endswith(\n \"-addition\"\n ):\n continue\n\n template = self.getRRDTemplateByName(templateName)\n if not template:\n continue\n replacement = self.getRRDTemplateByName(\n \"{}-replacement\".format(templateName)\n )\n\n if replacement and replacement not in templates:\n templates.append(replacement)\n else:\n templates.append(template)\n\n addition = self.getRRDTemplateByName(\n \"{}-addition\".format(templateName)\n )\n\n if addition and addition not in templates:\n templates.append(addition)\n\n return templates\n\n def getDataSourceOptions(self):\n \"\"\"\n Returns the available DataSource options. DataSource options\n are used to populate the dropdown when adding a new DataSource\n and is a string. See L{RRDTemplate.RRDTemplate.getDataSourceOptions}\n for more information.\n\n @rtype: list\n @return: [(displayName, dsOption),]\n \"\"\"\n # This is an unfortunate hack. Called from the device templates\n # page where we show multiple templates now. This only really\n # works because getDataSourceOptions() returns the same values\n # for every template. Ideally we would be able to pass some sort\n # of context to the Add DataSource dialog that calls this method.\n templates = self.getRRDTemplates()\n if templates:\n return templates[0].getDataSourceOptions()\n return []\n\n def sysUpTime(self):\n \"\"\"\n Returns the cached sysUpTime for this device\n\n @rtype: int\n \"\"\"\n try:\n return self.cacheRRDValue(\"sysUpTime\", -1)\n except Exception:\n log.exception(\"failed getting sysUpTime\")\n return -1\n\n def availability(self, *args, **kw):\n \"\"\"\n Returns the uptime of this device\n\n @rtype: string\n @todo: Performance enhancement: Should move import outside of method\n \"\"\"\n from Products.ZenEvents import Availability\n\n results = Availability.query(self.dmd, device=self.id, *args, **kw)\n if results:\n return results[0]\n else:\n return None\n\n # FIXME: cleanup --force option #2660\n def __getattr__(self, name):\n \"\"\"\n Override from object to handle lastPollSnmpUpTime and\n snmpLastCollection\n\n @todo: Not sure this is needed, see getLastPollSnmpUpTime and\n getSnmpLastCollection\n \"\"\"\n if name == \"lastPollSnmpUpTime\":\n return self._lastPollSnmpUpTime.getStatus()\n elif name == \"snmpLastCollection\":\n return DateTime(self._snmpLastCollection)\n else:\n raise AttributeError(name)\n\n def _setPropValue(self, id, value):\n \"\"\"\n Override from PropertyManager to handle checks and ip creation\n\n @todo: Not sure this is needed, see setSnmpLastCollection\n \"\"\"\n self._wrapperCheck(value)\n if id == \"snmpLastCollection\":\n self._snmpLastCollection = float(value)\n else:\n ManagedEntity._setPropValue(self, id, value)\n\n security.declareProtected(ZEN_MANAGE_DEVICE, \"applyDataMap\")\n\n def applyDataMap(\n self, datamap, relname=\"\", compname=\"\", modname=\"\", parentId=\"\"\n ):\n \"\"\"\n Apply a datamap passed as a list of dicts through XML-RPC.\n \"\"\"\n from Products.DataCollector.ApplyDataMap import ApplyDataMap\n\n adm = ApplyDataMap()\n return adm.applyDataMap(\n self,\n datamap,\n relname=relname,\n compname=compname,\n modname=modname,\n parentId=\"\",\n )\n\n def path(self):\n \"\"\"\n Return a sequence of path tuples suitable for indexing by\n a MultiPathIndex.\n \"\"\"\n orgs = (\n self.systems()\n + self.groups()\n + [self.location()]\n + [self.deviceClass()]\n )\n return [\n aq_base(self).__of__(o.primaryAq()).getPhysicalPath()\n for o in orgs\n if o is not None\n ]\n\n def traceRoute(self, target, ippath=None):\n \"\"\"\n Trace the route to target using our routing table.\n Wrapper method of OperatingSystem.traceRoute\n\n @param target: Device name\n @type target: string\n @param ippath: IP addesses\n @type ippath: list\n @return: IP Addresses\n @rtype: list\n \"\"\"\n if ippath is None:\n ippath = []\n if isinstance(target, basestring):\n target = self.findDevice(target)\n if not target:\n raise ValueError(\"Target %s not found in DMD\" % target)\n return self.os.traceRoute(target, ippath)\n\n def getMonitoredComponents(self, collector=None, type=None):\n \"\"\"\n Return list of monitored DeviceComponents on this device.\n Wrapper method for getDeviceComponents\n \"\"\"\n components = self.getDeviceComponents(\n monitored=True, collector=collector, type=type\n )\n return filter(\n lambda x: x.getProductionState() >= x.zProdStateThreshold,\n components,\n )\n\n security.declareProtected(ZEN_VIEW, \"getReportableComponents\")\n\n def getReportableComponents(self, collector=None, type=None):\n \"\"\"\n Return a list of DeviceComponents on this device that should be\n considered for reporting.\n\n @type collector: string\n @type type: string\n @permission: ZEN_VIEW\n @rtype: list\n \"\"\"\n return self.getMonitoredComponents(collector=collector, type=type)\n\n def _createComponentSearchPathIndex(self):\n indexName = \"getAllPaths\"\n if indexName not in self.componentSearch.indexes():\n zcat = self._getOb(\"componentSearch\")\n cat = zcat._catalog\n cat.addIndex(indexName, makeMultiPathIndex(indexName))\n for c in self.getDeviceComponentsNoIndexGen():\n c.index_object(idxs=[indexName])\n\n def _create_componentSearch(self):\n from Products.ZCatalog.ZCatalog import manage_addZCatalog\n\n manage_addZCatalog(self, \"componentSearch\", \"componentSearch\")\n zcat = self._getOb(\"componentSearch\")\n\n cat = zcat._catalog\n cat.addIndex(\"meta_type\", makeCaseInsensitiveFieldIndex(\"meta_type\"))\n cat.addIndex(\n \"getCollectors\", makeCaseInsensitiveKeywordIndex(\"getCollectors\")\n )\n cat.addIndex(\"id\", makeCaseInsensitiveFieldIndex(\"id\"))\n cat.addIndex(\"titleOrId\", makeCaseInsensitiveFieldIndex(\"titleOrId\"))\n\n zcat.addIndex(\"monitored\", FieldIndex(\"monitored\"))\n zcat.addColumn(\"meta_type\")\n zcat.addColumn(\"getUUID\")\n zcat.addColumn(\"id\")\n zcat.addColumn(\"titleOrId\")\n zcat.addColumn(\"description\")\n\n for c in self.getDeviceComponentsNoIndexGen():\n c.index_object()\n # see ZEN-4087 double index the first component when creating this\n # catalog, otherwise it will not appear in the list of components.\n if len(self.componentSearch):\n self.componentSearch()[0].getObject().index_object()\n\n def getDeviceComponents_from_model_catalog(\n self, monitored=None, collector=None, type=None\n ):\n \"\"\"\n Return list of all DeviceComponents on this device extracted from\n model catalog. not used for now.\n\n @type monitored: boolean\n @type collector: string\n @type type: string\n @permission: ZEN_VIEW\n @rtype: list\n \"\"\"\n query = {\n \"objectImplements\": (\n \"Products.ZenModel.DeviceComponent.DeviceComponent\"\n )\n }\n if collector is not None:\n query[\"collectors\"] = collector\n if monitored is not None:\n query[\"monitored\"] = monitored\n if type is not None:\n query[\"meta_type\"] = type\n\n cat = IModelCatalogTool(self)\n search_results = cat.search(query=query)\n results = []\n if search_results.total > 0:\n results = [brain.getObject() for brain in search_results.results]\n return results\n\n security.declareProtected(ZEN_VIEW, \"getDeviceComponents\")\n\n def getDeviceComponents(self, monitored=None, collector=None, type=None):\n \"\"\"\n Return list of all DeviceComponents on this device.\n\n @type monitored: boolean\n @type collector: string\n @type type: string\n @permission: ZEN_VIEW\n @rtype: list\n \"\"\"\n # Auto-migrate component catalog for this device\n # See ZEN-2537 for reason for this change\n if getattr(aq_base(self), \"componentSearch\", None) is None:\n self._create_componentSearch()\n\n query = {}\n if collector is not None:\n query[\"getCollectors\"] = collector\n if monitored is not None:\n query[\"monitored\"] = monitored\n if type is not None:\n query[\"meta_type\"] = type\n\n return list(getObjectsFromCatalog(self.componentSearch, query, log))\n\n def getDeviceComponentsNoIndexGen(self):\n \"\"\"\n Return a list of all device components by walking relations. This is\n much slower then the normal getDeviceComponents method which uses the\n component index. It is used when rebuilding the device indexes.\n \"\"\"\n from DeviceComponent import DeviceComponent\n\n for baseObject in (self, self.os, self.hw):\n for rel in baseObject.getRelationships():\n if rel.meta_type != \"ToManyContRelationship\":\n continue\n for obj in rel():\n if not isinstance(obj, DeviceComponent):\n break\n for subComp in obj.getSubComponentsNoIndexGen():\n yield subComp\n yield obj\n\n def getSnmpConnInfo(self):\n \"\"\"\n Returns an object containing SNMP Connection Info\n\n @rtype: SnmpConnInfo object\n \"\"\"\n from Products.ZenHub.services.PerformanceConfig import SnmpConnInfo\n\n return SnmpConnInfo(self)\n\n def getHWManufacturerName(self):\n \"\"\"\n @rtype: string\n @todo: Remove this method and remove the call from testDevice.py\n \"\"\"\n return self.hw.getManufacturerName()\n\n def getHWProductName(self):\n \"\"\"\n Return the hardware product name of this device.\n\n @rtype: string\n \"\"\"\n return self.hw.getProductName()\n\n def getHWProductClass(self):\n \"\"\"\n Return the hardware product class of this device.\n\n @rtype: string\n \"\"\"\n cls = self.hw.productClass()\n if cls:\n return cls.titleOrId()\n\n def getHWProductKey(self):\n \"\"\"\n @rtype: string\n @todo: Remove this method and remove the call from testDevice.py\n \"\"\"\n return self.hw.getProductKey()\n\n def getOSManufacturerName(self):\n \"\"\"\n @rtype: string\n @todo: Remove this method and remove the call from testDevice.py\n \"\"\"\n return self.os.getManufacturerName()\n\n def getOSProductName(self):\n \"\"\"\n @rtype: string\n @todo: Remove this method and remove the call from testDevice.py\n \"\"\"\n return self.os.getProductName()\n\n def getOSProductKey(self):\n \"\"\"\n @rtype: string\n @todo: Remove this method and remove the call from testDevice.py\n \"\"\"\n return self.os.getProductKey()\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setOSProductKey\")\n\n def setOSProductKey(self, prodKey, manufacturer=None):\n \"\"\"\n Set the productKey of the device OS.\n \"\"\"\n self.os.setProductKey(prodKey, manufacturer)\n\n def getHWTag(self):\n \"\"\"\n @rtype: string\n @todo: remove this method and remove the call from testDevice.py\n \"\"\"\n return self.hw.tag\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setHWTag\")\n\n def setHWTag(self, assettag):\n \"\"\"\n Set the asset tag of the device hardware.\n \"\"\"\n self.hw.tag = assettag\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setHWProductKey\")\n\n def setHWProductKey(self, prodKey, manufacturer=None):\n \"\"\"\n Set the productKey of the device hardware.\n \"\"\"\n self.hw.setProductKey(prodKey, manufacturer)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setHWSerialNumber\")\n\n def setHWSerialNumber(self, number):\n \"\"\"\n Set the hardware serial number.\n \"\"\"\n self.hw.serialNumber = number\n\n def getHWSerialNumber(self):\n \"\"\"\n @rtype: string\n @todo: Remove this method and remove the call from testDevice.py\n \"\"\"\n return self.hw.serialNumber\n\n def followNextHopIps(self):\n \"\"\"\n Return the ips that our indirect routs point to which aren't currently\n connected to devices.\n\n @todo: Can be moved to zendisc.py\n \"\"\"\n ips = []\n for r in self.os.routes():\n ipobj = r.nexthop()\n # if ipobj and not ipobj.device():\n if ipobj:\n ips.append(ipobj.id)\n return ips\n\n security.declareProtected(ZEN_VIEW, \"getLocationName\")\n\n def getLocationName(self):\n \"\"\"\n Return the location name. i.e. \"Rack\" from /Locations/Loc/SubLoc/Rack\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n loc = self.location()\n if loc:\n return loc.getOrganizerName()\n return \"\"\n\n security.declareProtected(ZEN_VIEW, \"getLocationLink\")\n\n def getLocationLink(self):\n \"\"\"\n Return a link to the device's location.\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n loc = self.location()\n if loc:\n if self.checkRemotePerm(ZEN_VIEW, loc):\n return \"%s\" % (\n loc.getPrimaryUrlPath(),\n loc.getOrganizerName(),\n )\n else:\n return loc.getOrganizerName()\n return \"None\"\n\n security.declareProtected(ZEN_VIEW, \"getSystemNames\")\n\n def getSystemNames(self):\n \"\"\"\n Return the system names for this device\n\n @rtype: list\n @permission: ZEN_VIEW\n \"\"\"\n return map(lambda x: x.getOrganizerName(), self.systems())\n\n security.declareProtected(ZEN_VIEW, \"getSystemNamesString\")\n\n def getSystemNamesString(self, sep=\", \"):\n \"\"\"\n Return the system names for this device as a string\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n return sep.join(self.getSystemNames())\n\n security.declareProtected(ZEN_VIEW, \"getDeviceGroupNames\")\n\n def getDeviceGroupNames(self):\n \"\"\"\n Return the device group names for this device\n\n @rtype: list\n @permission: ZEN_VIEW\n \"\"\"\n return map(lambda x: x.getOrganizerName(), self.groups())\n\n security.declareProtected(ZEN_VIEW, \"getPerformanceServer\")\n\n def getPerformanceServer(self):\n \"\"\"\n Return the device performance server\n\n @rtype: PerformanceMonitor\n @permission: ZEN_VIEW\n \"\"\"\n return self.perfServer()\n\n security.declareProtected(ZEN_VIEW, \"getPerformanceServerName\")\n\n def getPerformanceServerName(self):\n \"\"\"\n Return the device performance server name\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n cr = self.perfServer()\n if cr:\n return cr.getId()\n return \"\"\n\n def getNetworkRoot(self, version=None):\n \"\"\"Return the network root object\"\"\"\n return self.getDmdRoot(\"Networks\").getNetworkRoot(version)\n\n security.declareProtected(ZEN_VIEW, \"getLastChange\")\n\n def getLastChange(self):\n \"\"\"\n Return DateTime of last change detected on this device.\n\n @rtype: DateTime\n @permission: ZEN_VIEW\n \"\"\"\n return DateTime(float(self._lastChange))\n\n security.declareProtected(ZEN_VIEW, \"getLastChangeString\")\n\n def getLastChangeString(self):\n \"\"\"\n Return date string of last change detected on this device.\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n return Time.LocalDateTimeSecsResolution(float(self._lastChange))\n\n security.declareProtected(ZEN_VIEW, \"getSnmpLastCollection\")\n\n def getSnmpLastCollection(self):\n \"\"\"\n Return DateTime of last SNMP collection on this device.\n\n @rtype: DateTime\n @permission: ZEN_VIEW\n \"\"\"\n return DateTime(float(self._snmpLastCollection))\n\n security.declareProtected(ZEN_VIEW, \"getSnmpLastCollectionString\")\n\n def getSnmpLastCollectionString(self):\n \"\"\"\n Return date string of last SNMP collection on this device.\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n if self._snmpLastCollection:\n return Time.LocalDateTimeSecsResolution(\n float(self._snmpLastCollection)\n )\n return \"Not Modeled\"\n\n def _sanitizeIPaddress(self, ip):\n try:\n if not ip:\n pass # Forcing a reset with a blank IP\n elif ip.find(\"/\") > -1:\n ipWithoutNetmask, netmask = ip.split(\"/\", 1)\n checkip(ipWithoutNetmask)\n # Also check for valid netmask if they give us one\n if maskToBits(netmask) is None:\n raise NoNetMask()\n else:\n checkip(ip)\n if ip:\n # Strip out subnet mask before checking if it's a good IP\n netmask = \"\"\n if \"/\" in ip:\n netmask = ip.split(\"/\")[1]\n ip = str(IPAddress(ipunwrap(ip.split(\"/\")[0])))\n if netmask:\n ip = \"/\".join([ip, netmask])\n except (IpAddressError, ValueError, NoNetMask):\n log.warn(\"%s is an invalid IP address\", ip)\n ip = \"\"\n return ip\n\n def _isDuplicateIp(self, ip):\n ipMatch = self.getNetworkRoot().findIp(ip)\n if ipMatch:\n dev = ipMatch.manageDevice()\n if dev and self.id != dev.id:\n return True\n return False\n\n security.declareProtected(ZEN_ADMIN_DEVICE, \"setManageIp\")\n\n def setManageIp(self, ip=\"\", REQUEST=None):\n \"\"\"\n Set the manage IP, if IP is not passed perform DNS lookup.\n If there is an error with the IP address format, the IP address\n will be reset to the result of a DNS lookup.\n\n @rtype: string\n @permission: ZEN_ADMIN_DEVICE\n \"\"\"\n message = \"\"\n ip = ip.replace(\" \", \"\")\n origip = ip\n ip = self._sanitizeIPaddress(ip)\n\n if not ip: # What if they put in a DNS name?\n try:\n ip = getHostByName(origip)\n if ip == \"0.0.0.0\":\n # Host resolution failed\n ip = \"\"\n except socket.error:\n ip = \"\"\n\n if not ip:\n try:\n ip = getHostByName(ipunwrap(self.id))\n except socket.error:\n ip = \"\"\n if origip:\n message = (\n \"%s is an invalid IP address, \"\n \"and no appropriate IP could\"\n \" be found via DNS for %s\"\n ) % (origip, self.id)\n log.warn(message)\n else:\n message = (\n \"DNS lookup of '%s' failed to return an IP\" % self.id\n )\n\n if ip:\n if self._isDuplicateIp(ip):\n message = \"The IP address %s is already assigned\" % ip\n log.warn(message)\n\n else:\n self.manageIp = ip\n notify(\n IndexingEvent(\n self, (\"decimal_ipAddress\", \"text_ipAddress\"), True\n )\n )\n log.info(\"%s's IP address has been set to %s.\", self.id, ip)\n # Create a new IpAddress object from manageIp under the Network\n ipWithoutNetmask, netmask = ipAndMaskFromIpMask(ip)\n ipobj = self.getNetworkRoot().createIp(\n ipWithoutNetmask, netmask\n )\n self.ipaddress.addRelation(ipobj)\n notify(IndexingEvent(ipobj))\n if REQUEST:\n audit(\"UI.Device.ResetIP\", self, ip=ip)\n\n return message\n\n security.declareProtected(ZEN_VIEW, \"getManageIp\")\n\n def getManageIp(self):\n \"\"\"\n Return the management ip for this device.\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n return self.manageIp\n\n @deprecated\n def getManageIpObj(self):\n \"\"\"\n DEPRECATED - Return the management ipobject for this device.\n\n @rtype: IpAddress\n @todo: This method may not be called anywhere, remove it.\n \"\"\"\n if self.manageIp:\n return self.Networks.findIp(self.manageIp)\n\n security.declareProtected(ZEN_VIEW, \"getManageInterface\")\n\n def getManageInterface(self):\n \"\"\"\n Return the management interface of a device based on its manageIp.\n\n @rtype: IpInterface\n @permission: ZEN_VIEW\n \"\"\"\n ipobj = self.Networks.findIp(self.manageIp)\n if ipobj:\n return ipobj.interface()\n\n security.declareProtected(ZEN_VIEW, \"uptimeStr\")\n\n def uptimeStr(self):\n \"\"\"\n Return the SNMP uptime\n\n @rtype: string\n @permission: ZEN_VIEW\n \"\"\"\n ut = self.sysUpTime()\n # test if less than 0 or NaN\n if ut < 0 or ut != ut:\n return \"Unknown\"\n elif ut == 0:\n return \"0d:0h:0m:0s\"\n ut = float(ut) / 100.0\n days = int(ut / 86400)\n hour = int((ut % 86400) / 3600)\n mins = int((ut % 3600) / 60)\n secs = int(ut % 60)\n return \"%02dd:%02dh:%02dm:%02ds\" % (days, hour, mins, secs)\n\n def getPeerDeviceClassNames(self):\n \"\"\"\n Build a list of all device paths that have the python class pyclass\n\n @rtype: list\n \"\"\"\n dclass = self.getDmdRoot(\"Devices\")\n return dclass.getPeerDeviceClassNames(self.__class__)\n\n ####################################################################\n # Edit functions used to manage device relations and other attributes\n ####################################################################\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"manage_snmpCommunity\")\n\n def manage_snmpCommunity(self):\n \"\"\"\n Reset the snmp community using the zSnmpCommunities variable.\n\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n try:\n zSnmpCommunity, zSnmpPort, zSnmpVer, snmpname = findCommunity(\n self,\n self.manageIp,\n self.getDeviceClassPath(),\n port=self.zSnmpPort,\n version=self.zSnmpVer,\n )\n except NoSnmp:\n pass\n else:\n if self.zSnmpCommunity != zSnmpCommunity:\n self.setZenProperty(\"zSnmpCommunity\", zSnmpCommunity)\n if self.zSnmpPort != zSnmpPort:\n self.setZenProperty(\"zSnmpPort\", zSnmpPort)\n if self.zSnmpVer != zSnmpVer:\n self.setZenProperty(\"zSnmpVer\", zSnmpVer)\n\n def setProductInfo(\n self,\n hwManufacturer=\"\",\n hwProductName=\"\",\n osManufacturer=\"\",\n osProductName=\"\",\n ):\n if hwManufacturer and hwProductName:\n # updateDevice uses the sentinel value \"_no_change\" to indicate\n # that we really don't want change this value\n if (\n hwManufacturer != \"_no_change\"\n and hwProductName != \"_no_change\"\n ):\n log.info(\n \"setting hardware manufacturer to %r productName to %r\",\n hwManufacturer,\n hwProductName,\n )\n self.hw.setProduct(hwProductName, hwManufacturer)\n else:\n self.hw.removeProductClass()\n\n if osManufacturer and osProductName:\n # updateDevice uses the sentinel value \"_no_change\" to indicate\n # that we really don't want change this value\n if (\n osManufacturer != \"_no_change\"\n and osProductName != \"_no_change\"\n ):\n log.info(\n \"setting os manufacturer to %r productName to %r\",\n osManufacturer,\n osProductName,\n )\n self.os.setProduct(osProductName, osManufacturer, isOS=True)\n else:\n self.os.removeProductClass()\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"updateDevice\")\n\n def updateDevice(self, **kwargs):\n \"\"\"\n Update the device relation and attributes, if passed. If any parameter\n is not passed it will not be updated; the value of any unpassed device\n propeties will remain the same.\n\n @permission: ZEN_CHANGE_DEVICE\n Keyword arguments:\n title -- device title [string]\n tag -- tag number [string]\n serialNumber -- serial number [string]\n zProperties -- dict of zProperties [dict]\n zSnmpCommunity -- snmp community (overrides corresponding\n value is zProperties) [string]\n zSnmpPort -- snmp port (overrides corresponding value\n in zProperties) [string]\n zSnmpVer -- snmp version (overrides corresponding\n value in zProperties) [string]\n rackSlot -- rack slot number [integer]\n productionState -- production state of device [integer]\n priority -- device priority [integer]\n comment -- device comment [string]\n hwManufacturer -- hardware manufacturer [string]\n hwProductName -- hardware product name [string]\n osManufacturer -- operating system manufacturer [string]\n osProductName -- operating system name [string]\n locationPath -- location [string]\n groupPaths -- group paths [list]\n systemPaths -- systen paths [list]\n performanceMonitor -- collector name [string]\n\n \"\"\"\n if \"title\" in kwargs and kwargs[\"title\"] is not None:\n newTitle = str(kwargs[\"title\"]).strip()\n if newTitle and newTitle != self.title:\n log.info(\"setting title to %r\", newTitle)\n self.title = newTitle\n if (\n \"tag\" in kwargs\n and kwargs[\"tag\"] is not None\n and kwargs[\"tag\"] != self.hw.tag\n ):\n log.info(\"setting tag to %r\", kwargs[\"tag\"])\n self.hw.tag = kwargs[\"tag\"]\n if (\n \"serialNumber\" in kwargs\n and kwargs[\"serialNumber\"] is not None\n and kwargs[\"serialNumber\"] != self.hw.serialNumber\n ):\n log.info(\"setting serialNumber to %r\", kwargs[\"serialNumber\"])\n self.hw.serialNumber = kwargs[\"serialNumber\"]\n\n # Set zProperties passed in intelligently\n if \"zProperties\" in kwargs and kwargs[\"zProperties\"] is not None:\n zProperties = kwargs[\"zProperties\"]\n else:\n zProperties = {}\n\n # override any snmp properties that may be in zProperties\n zpropUpdate = dict(\n (name, kwargs[name])\n for name in (\"zSnmpCommunity\", \"zSnmpPort\", \"zSnmpVer\")\n if name in kwargs\n )\n zProperties.update(zpropUpdate)\n\n # apply any zProperties to self\n for prop, value in zProperties.items():\n if value is not None and value != \"\":\n # setZenProperty doesn't set it if it's the same value, so no\n # need to check here\n self.setZenProperty(prop, value)\n\n if \"rackSlot\" in kwargs and kwargs[\"rackSlot\"] != self.rackSlot:\n # rackSlot may be a string or integer\n log.info(\"setting rackSlot to %r\", kwargs[\"rackSlot\"])\n self.rackSlot = kwargs[\"rackSlot\"]\n\n if \"productionState\" in kwargs:\n # Always set production state,\n # but don't log it if it didn't change.\n if kwargs[\"productionState\"] != self.getProductionState():\n prodStateName = self.dmd.convertProdState(\n int(kwargs[\"productionState\"])\n )\n log.info(\"setting productionState to %s\", prodStateName)\n self.setProdState(kwargs[\"productionState\"])\n\n if \"priority\" in kwargs and int(kwargs[\"priority\"]) != self.priority:\n priorityName = self.dmd.convertPriority(kwargs[\"priority\"])\n log.info(\"setting priority to %s\", priorityName)\n self.setPriority(kwargs[\"priority\"])\n\n if \"comments\" in kwargs and kwargs[\"comments\"] != self.comments:\n log.info(\"setting comments to %r\", kwargs[\"comments\"])\n self.comments = kwargs[\"comments\"]\n\n self.setProductInfo(\n hwManufacturer=kwargs.get(\"hwManufacturer\", \"_no_change\"),\n hwProductName=kwargs.get(\"hwProductName\", \"_no_change\"),\n osManufacturer=kwargs.get(\"osManufacturer\", \"_no_change\"),\n osProductName=kwargs.get(\"osProductName\", \"_no_change\"),\n )\n\n if kwargs.get(\"locationPath\", False):\n log.info(\"setting location to %r\", kwargs[\"locationPath\"])\n self.setLocation(kwargs[\"locationPath\"])\n\n if kwargs.get(\"groupPaths\", False):\n log.info(\"setting group %r\", kwargs[\"groupPaths\"])\n self.setGroups(kwargs[\"groupPaths\"])\n\n if kwargs.get(\"systemPaths\", False):\n log.info(\"setting system %r\", kwargs[\"systemPaths\"])\n self.setSystems(kwargs[\"systemPaths\"])\n\n if (\n \"performanceMonitor\" in kwargs\n and kwargs[\"performanceMonitor\"] != self.getPerformanceServerName()\n ):\n log.info(\n \"setting performance monitor to %r\",\n kwargs[\"performanceMonitor\"],\n )\n self.setPerformanceMonitor(kwargs[\"performanceMonitor\"])\n\n self.setLastChange()\n notify(IndexingEvent(self))\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"manage_editDevice\")\n\n def manage_editDevice(\n self,\n tag=\"\",\n serialNumber=\"\",\n zSnmpCommunity=None,\n zSnmpPort=161,\n zSnmpVer=None,\n rackSlot=\"\",\n productionState=DEFAULT_PRODSTATE,\n comments=\"\",\n hwManufacturer=\"\",\n hwProductName=\"\",\n osManufacturer=\"\",\n osProductName=\"\",\n locationPath=\"\",\n groupPaths=[],\n systemPaths=[],\n performanceMonitor=\"localhost\",\n priority=3,\n zProperties=None,\n title=None,\n REQUEST=None,\n ):\n \"\"\"\n Edit the device relation and attributes.\n This method will update device properties because of the default\n values that are passed. Calling this method using a **kwargs dict will\n result in default values being set for many device properties. To\n update only a subset of these properties use updateDevice(**kwargs).\n\n @param locationPath: path to a Location\n @type locationPath: string\n @param groupPaths: paths to DeviceGroups\n @type groupPaths: list\n @param systemPaths: paths to Systems\n @type systemPaths: list\n @param performanceMonitor: name of PerformanceMonitor\n @type performanceMonitor: string\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n self.updateDevice(\n tag=tag,\n serialNumber=serialNumber,\n zSnmpCommunity=zSnmpCommunity,\n zSnmpPort=zSnmpPort,\n zSnmpVer=zSnmpVer,\n rackSlot=rackSlot,\n productionState=productionState,\n comments=comments,\n hwManufacturer=hwManufacturer,\n hwProductName=hwProductName,\n osManufacturer=osManufacturer,\n osProductName=osProductName,\n locationPath=locationPath,\n groupPaths=groupPaths,\n systemPaths=systemPaths,\n performanceMonitor=performanceMonitor,\n priority=priority,\n zProperties=zProperties,\n title=title,\n REQUEST=REQUEST,\n )\n if REQUEST:\n from Products.ZenUtils.Time import SaveMessage\n\n IMessageSender(self).sendToBrowser(\"Saved\", SaveMessage())\n # TODO: Audit all of the changed values.\n # How is this method called to test the output?\n # Will the [zProperties] field show password values?\n audit(\"UI.Device.Edit\", self)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setTitle\")\n\n def setTitle(self, newTitle):\n \"\"\"\n Changes the title to newTitle and reindexes the object\n \"\"\"\n super(Device, self).setTitle(newTitle)\n notify(IndexingEvent(self, (\"name\",), True))\n\n def monitorDevice(self):\n \"\"\"\n Returns true if the device production state >= zProdStateThreshold.\n\n @rtype: boolean\n \"\"\"\n return (\n self.getProductionState() >= self.zProdStateThreshold\n and not self.renameInProgress\n )\n\n def snmpMonitorDevice(self):\n \"\"\"\n Returns true if the device is subject to SNMP monitoring\n\n @rtype: boolean\n \"\"\"\n return (\n self.monitorDevice()\n and self.getManageIp()\n and not self.zSnmpMonitorIgnore\n )\n\n def getPriority(self):\n \"\"\"\n Return the numeric device priority.\n\n @rtype: int\n \"\"\"\n return self.priority\n\n def getPriorityString(self):\n \"\"\"\n Return the device priority as a string.\n\n @rtype: string\n \"\"\"\n return str(self.convertPriority(self.priority))\n\n def getPingStatusString(self):\n \"\"\"\n Return the pingStatus as a string\n\n @rtype: string\n \"\"\"\n result = self.getPingStatus()\n if result <= 0:\n return str(self.convertStatus(result))\n return \"Down\"\n\n def getSnmpStatusString(self):\n \"\"\"\n Return the snmpStatus as a string\n\n @rtype: string\n \"\"\"\n result = self.getSnmpStatus()\n if result <= 0:\n return str(self.convertStatus(result))\n return \"Down\"\n\n security.declareProtected(ZEN_CHANGE_DEVICE_PRODSTATE, \"setProdState\")\n\n def setProdState(self, state, maintWindowChange=False, REQUEST=None):\n \"\"\"\n Set the device's production state.\n\n @parameter state: new production state\n @type state: int\n @parameter maintWindowChange: are we resetting state from inside a MW?\n @type maintWindowChange: boolean\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n # Set production state on all components that inherit from this device\n ret = super(Device, self).setProdState(\n state, maintWindowChange, REQUEST\n )\n self._p_changed = True\n if REQUEST:\n audit(\n \"UI.Device.Edit\",\n self,\n productionState=state,\n maintenanceWindowChange=maintWindowChange,\n )\n return ret\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setPriority\")\n\n def setPriority(self, priority, REQUEST=None):\n \"\"\"\n Set the device's priority\n\n @type priority: int\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n self.priority = int(priority)\n if REQUEST:\n messaging.IMessageSender(self).sendToBrowser(\n \"Priority Updated\",\n \"Device priority has been set to %s.\"\n % (self.getPriorityString()),\n )\n audit(\"UI.Device.Edit\", self, priority=priority)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setLastChange\")\n\n def setLastChange(self, value=None):\n \"\"\"\n Set the changed datetime for this device.\n\n @param value: secs since the epoch, default is now\n @type value: float\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n if value is None:\n value = time.time()\n self._lastChange = float(value)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setSnmpLastCollection\")\n\n def setSnmpLastCollection(self, value=None):\n \"\"\"\n Set the last time snmp collection occurred.\n\n @param value: secs since the epoch, default is now\n @type value: float\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n if value is None:\n value = time.time()\n self._snmpLastCollection = float(value)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"addManufacturer\")\n\n def addManufacturer(\n self,\n newHWManufacturerName=None,\n newSWManufacturerName=None,\n REQUEST=None,\n ):\n \"\"\"\n @permission: ZEN_CHANGE_DEVICE\n @todo: Doesn't really do work on a device object.\n Already exists on ZDeviceLoader\n \"\"\"\n mname = newHWManufacturerName\n field = \"hwManufacturer\"\n if not mname:\n mname = newSWManufacturerName\n field = \"osManufacturer\"\n self.getDmdRoot(\"Manufacturers\").createManufacturer(mname)\n if REQUEST:\n REQUEST[field] = mname\n messaging.IMessageSender(self).sendToBrowser(\n \"Manufacturer Added\",\n \"The %s manufacturer has been created.\" % mname,\n )\n audit(\"UI.Device.AddManufacturer\", self, manufacturer=mname)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setHWProduct\")\n\n def setHWProduct(\n self, newHWProductName=None, hwManufacturer=None, REQUEST=None\n ):\n \"\"\"\n @permission: ZEN_CHANGE_DEVICE\n @todo: Doesn't really do work on a device object.\n Already exists on ZDeviceLoader\n \"\"\"\n added = False\n if newHWProductName and hwManufacturer:\n self.getDmdRoot(\"Manufacturers\").createHardwareProduct(\n newHWProductName, hwManufacturer\n )\n added = True\n if REQUEST:\n if added:\n messaging.IMessageSender(self).sendToBrowser(\n \"Product Set\",\n \"Hardware product has been set to %s.\" % newHWProductName,\n )\n REQUEST[\"hwProductName\"] = newHWProductName\n audit(\n \"UI.Device.SetHWProduct\",\n self,\n manufacturer=hwManufacturer,\n product=newHWProductName,\n )\n else:\n messaging.IMessageSender(self).sendToBrowser(\n \"Set Product Failed\",\n \"Hardware product could not be set to %s.\"\n % newHWProductName,\n priority=messaging.WARNING,\n )\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setOSProduct\")\n\n def setOSProduct(\n self, newOSProductName=None, osManufacturer=None, REQUEST=None\n ):\n \"\"\"\n @permission: ZEN_CHANGE_DEVICE\n @todo: Doesn't really do work on a device object.\n Already exists on ZDeviceLoader\n \"\"\"\n if newOSProductName:\n self.getDmdRoot(\"Manufacturers\").createSoftwareProduct(\n newOSProductName, osManufacturer, isOS=True\n )\n if REQUEST:\n if newOSProductName:\n messaging.IMessageSender(self).sendToBrowser(\n \"Product Set\",\n \"OS product has been set to %s.\" % newOSProductName,\n )\n REQUEST[\"osProductName\"] = newOSProductName\n audit(\n \"UI.Device.SetOSProduct\",\n self,\n manufacturer=osManufacturer,\n product=newOSProductName,\n )\n else:\n messaging.IMessageSender(self).sendToBrowser(\n \"Set Product Failed\",\n \"OS product could not be set to %s.\" % newOSProductName,\n priority=messaging.WARNING,\n )\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setLocation\")\n\n def setLocation(self, locationPath, REQUEST=None):\n \"\"\"\n Set the location of a device.\n If the location is new it will be created.\n\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n if not locationPath:\n self.location.removeRelation()\n else:\n locobj = self.getDmdRoot(\"Locations\").createOrganizer(locationPath)\n self.addRelation(\"location\", locobj)\n self.setAdminLocalRoles()\n notify(IndexingEvent(self, \"path\", False))\n if REQUEST:\n action = \"SetLocation\" if locationPath else \"RemoveFromLocation\"\n audit([\"UI.Device\", action], self, location=locationPath)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"addLocation\")\n\n def addLocation(self, newLocationPath, REQUEST=None):\n \"\"\"\n @todo: Doesn't really do work on a device object.\n Already exists on ZDeviceLoader\n \"\"\"\n self.getDmdRoot(\"Locations\").createOrganizer(newLocationPath)\n if REQUEST:\n REQUEST[\"locationPath\"] = newLocationPath\n messaging.IMessageSender(self).sendToBrowser(\n \"Location Added\",\n \"Location %s has been created.\" % newLocationPath,\n )\n audit(\"UI.Device.SetLocation\", self, location=newLocationPath)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setPerformanceMonitor\")\n\n def setPerformanceMonitor(\n self, performanceMonitor, newPerformanceMonitor=None, REQUEST=None\n ):\n \"\"\"\n Set the performance monitor for this device.\n If newPerformanceMonitor is passed in create it\n\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n if newPerformanceMonitor:\n performanceMonitor = newPerformanceMonitor\n\n if self.getPerformanceServer() is not None:\n oldPerformanceMonitor = self.getPerformanceServer().getId()\n self.getDmdRoot(\"Monitors\").setPreviousCollectorForDevice(\n self.getId(), oldPerformanceMonitor\n )\n\n collectorNotFound = False\n warning = None\n obj = self.getDmdRoot(\"Monitors\").getPerformanceMonitor(\n performanceMonitor\n )\n if obj.viewName() != performanceMonitor:\n collectorNotFound = True\n warning = (\n \"Collector {} is not found. \"\n \"Performance monitor has been set to {}.\".format(\n performanceMonitor, obj.viewName()\n )\n )\n log.warn(warning)\n self.addRelation(\"perfServer\", obj)\n self.setLastChange()\n notify(IndexingEvent(self))\n\n if REQUEST:\n message = \"Performance monitor has been set to {}.\".format(\n performanceMonitor\n )\n if collectorNotFound:\n message = warning\n messaging.IMessageSender(self).sendToBrowser(\n \"Monitor Changed\", message\n )\n audit(\n \"UI.Device.SetPerformanceMonitor\",\n self,\n performancemonitor=performanceMonitor,\n )\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setGroups\")\n\n def setGroups(self, groupPaths):\n \"\"\"\n Set the list of groups for this device based on a list of paths\n\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n objGetter = self.getDmdRoot(\"Groups\").createOrganizer\n self._setRelations(\"groups\", objGetter, groupPaths)\n notify(IndexingEvent(self, \"path\", False))\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"addDeviceGroup\")\n\n def addDeviceGroup(self, newDeviceGroupPath, REQUEST=None):\n \"\"\"\n DEPRECATED?\n Add a device group to the database and this device\n\n @permission: ZEN_CHANGE_DEVICE\n @todo: Already exists on ZDeviceLoader\n \"\"\"\n group = self.getDmdRoot(\"Groups\").createOrganizer(newDeviceGroupPath)\n self.addRelation(\"groups\", group)\n if REQUEST:\n messaging.IMessageSender(self).sendToBrowser(\n \"Group Added\",\n \"Group %s has been created.\" % newDeviceGroupPath,\n )\n audit(\"UI.Device.AddToGroup\", self, group=newDeviceGroupPath)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setSystems\")\n\n def setSystems(self, systemPaths):\n \"\"\"\n Set a list of systems to this device using their system paths\n\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n objGetter = self.getDmdRoot(\"Systems\").createOrganizer\n self._setRelations(\"systems\", objGetter, systemPaths)\n notify(IndexingEvent(self, \"path\", False))\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"addSystem\")\n\n def addSystem(self, newSystemPath, REQUEST=None):\n \"\"\"\n DEPRECATED?\n Add a systems to this device using its system path\n\n @permission: ZEN_CHANGE_DEVICE\n @todo: Already exists on ZDeviceLoader\n \"\"\"\n sys = self.getDmdRoot(\"Systems\").createOrganizer(newSystemPath)\n self.addRelation(\"systems\", sys)\n if REQUEST:\n messaging.IMessageSender(self).sendToBrowser(\n \"System Added\", \"System %s has been created.\" % newSystemPath\n )\n audit(\"UI.Device.AddToSystem\", self, system=newSystemPath)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"setTerminalServer\")\n\n def setTerminalServer(self, termservername):\n \"\"\"\n Set the terminal server of this device\n\n @param termservername: device name of terminal server\n @permission: ZEN_CHANGE_DEVICE\n \"\"\"\n termserver = self.findDevice(termservername)\n if termserver:\n self.addRelation(\"termserver\", termserver)\n\n def _setRelations(self, relName, objGetter, relPaths):\n \"\"\"\n Set related objects to this device\n\n @param relName: name of the relation to set\n @param objGetter: method to get the relation\n @param relPaths: list of relationship paths\n \"\"\"\n if not isinstance(relPaths, (list, tuple)):\n relPaths = [\n relPaths,\n ]\n relPaths = filter(lambda x: x.strip(), relPaths)\n rel = getattr(self, relName, None)\n if not rel:\n raise AttributeError(\"Relation %s not found\" % relName)\n curRelIds = {}\n for value in rel.objectValuesAll():\n curRelIds[value.getOrganizerName()] = value\n for path in relPaths:\n if path not in curRelIds:\n robj = objGetter(path)\n self.addRelation(relName, robj)\n else:\n del curRelIds[path]\n for obj in curRelIds.values():\n self.removeRelation(relName, obj)\n self.setAdminLocalRoles()\n\n def _getOtherExpandedLinks(self):\n \"\"\"\n @rtype list\n @return a list of the html links supplied by implementers\n of the IExpandedLinkProvider subscriber interface\n \"\"\"\n providers = subscribers([self], IExpandedLinkProvider)\n expandedLinkList = []\n for provider in providers:\n expandedLinkList.extend(provider.getExpandedLinks())\n return expandedLinkList\n\n def getExpandedLinks(self):\n \"\"\"\n Return the expanded zComment property\n\n @rtype: HTML output\n \"\"\"\n from Products.ZenUtils.ZenTales import talesEval\n\n try:\n linksHtml = talesEval(\"string:\" + self.zLinks, self)\n otherLinks = self._getOtherExpandedLinks()\n if otherLinks:\n linksHtml += \"
\".join(otherLinks)\n return linksHtml\n except Exception as ex:\n import cgi\n\n return \"%s\" % cgi.escape(str(ex))\n\n ####################################################################\n # Private getter functions that implement DeviceResultInt\n ####################################################################\n\n security.declareProtected(ZEN_VIEW, \"device\")\n\n def device(self):\n \"\"\"\n Support DeviceResultInt mixin class. Returns itself\n\n @permission: ZEN_VIEW\n \"\"\"\n return self\n\n ####################################################################\n # Status Management Functions used by status monitors\n ####################################################################\n\n def pastSnmpMaxFailures(self):\n \"\"\"\n Returns true if the device has more SNMP failures\n than maxFailures on its status mon.\n\n @rtype: boolean\n \"\"\"\n statusmon = self.monitors()\n if len(statusmon) > 0:\n statusmon = statusmon[0]\n return statusmon.maxFailures < self.getSnmpStatusNumber()\n return False\n\n # FIXME: cleanup --force option #2660\n security.declareProtected(\n ZEN_MANAGE_DEVICE_STATUS, \"getLastPollSnmpUpTime\"\n )\n\n def getLastPollSnmpUpTime(self):\n \"\"\"\n Get the value of the snmpUpTime status object\n\n @permission: ZEN_MANAGE_DEVICE_STATUS\n \"\"\"\n return self._lastPollSnmpUpTime.getStatus()\n\n # FIXME: cleanup --force option #2660\n security.declareProtected(\n ZEN_MANAGE_DEVICE_STATUS, \"setLastPollSnmpUpTime\"\n )\n\n def setLastPollSnmpUpTime(self, value):\n \"\"\"\n Set the value of the snmpUpTime status object\n\n @permission: ZEN_MANAGE_DEVICE_STATUS\n \"\"\"\n self._lastPollSnmpUpTime.setStatus(value)\n\n def snmpAgeCheck(self, hours):\n \"\"\"\n Returns True if SNMP data was collected more than 24 hours ago\n \"\"\"\n lastcoll = self.getSnmpLastCollection()\n hours = hours / 24.0\n if DateTime() > lastcoll + hours:\n return 1\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"applyProductContext\")\n\n def applyProductContext(self):\n \"\"\"\n Apply zProperties inherited from Product Contexts.\n \"\"\"\n self._applyProdContext(self.hw.getProductContext())\n self._applyProdContext(self.os.getProductContext())\n for soft in self.os.software():\n self._applyProdContext(soft.getProductContext())\n\n def _applyProdContext(self, context):\n \"\"\"\n Apply zProperties taken for the product context passed in.\n\n @param context: list of tuples returned from\n getProductContext on a MEProduct.\n \"\"\"\n for name, value in context:\n if name == \"zDeviceClass\" and value:\n log.info(\"move device to %s\", value)\n self.moveDevices(value, self.id)\n elif name == \"zDeviceGroup\" and value:\n log.info(\"add device to group %s\", value)\n self.addDeviceGroup(value)\n elif name == \"zSystem\" and value:\n log.info(\"add device to system %s\", value)\n self.addSystem(value)\n\n ####################################################################\n # Management Functions\n ####################################################################\n\n security.declareProtected(ZEN_MANAGE_DEVICE, \"collectDevice\")\n\n def collectDevice(\n self,\n setlog=True,\n REQUEST=None,\n generateEvents=False,\n background=False,\n write=None,\n debug=False,\n ):\n \"\"\"\n Collect the configuration of this device AKA Model Device\n\n @param setlog: If true, set up the output log of this process\n @permission: ZEN_MANAGE_DEVICE\n @todo: generateEvents param is not being used.\n \"\"\"\n unused(generateEvents)\n xmlrpc = isXmlRpc(REQUEST)\n perfConf = self.getPerformanceServer()\n if perfConf is None:\n msg = (\n \"Device %s in unknown state -- remove and remodel\"\n % self.titleOrId()\n )\n if write is not None:\n write(msg)\n log.error(\"Unable to get collector info: %s\", msg)\n if xmlrpc:\n return 1\n return\n\n perfConf.collectDevice(\n self,\n setlog,\n REQUEST,\n generateEvents,\n background,\n write,\n collectPlugins=\"\",\n debug=debug,\n )\n\n if REQUEST:\n audit(\"UI.Device.Remodel\", self)\n if xmlrpc:\n return 0\n\n security.declareProtected(ZEN_MANAGE_DEVICE, \"runDeviceMonitor\")\n\n def runDeviceMonitor(self, REQUEST=None, write=None, debug=False):\n \"\"\"\n Run monitoring daemon agains the device ones\n \"\"\"\n # Datasource source type and collection daemon to run\n data_collector = {\n \"Python\": \"zenpython\",\n \"SNMP\": \"zenperfsnmp\",\n \"COMMAND\": \"zencommand\",\n }\n # Daemons to run against the device\n collection_daemons = []\n xmlrpc = isXmlRpc(REQUEST)\n perfConf = self.getPerformanceServer()\n if perfConf is None:\n msg = (\n \"Device %s in unknown state -- remove and remodel\"\n % self.titleOrId()\n )\n if write is not None:\n write(msg)\n log.error(\"Unable to get collector info: %s\", msg)\n if xmlrpc:\n return 1\n return\n\n # Getting all the datasources from template signed to that\n # device for determining which daemon to run\n templates = self.getRRDTemplates()\n datasources = itertools.chain.from_iterable(\n [template.getRRDDataSources() for template in templates]\n )\n ds_src_types = set()\n for datasource in datasources:\n # BasicDataSource contain the info about the source type\n if datasource.__class__.__name__ == \"BasicDataSource\":\n ds_src_types.add(datasource.sourcetype)\n else:\n # We need parent class sourcetype since datasources inherited\n # from PythonDatasource do not have \"Python\" as a sourcetype\n ds_src_types.add(datasource.__class__.__base__.sourcetype)\n for source in data_collector:\n if source in ds_src_types:\n collection_daemons.append(data_collector[source])\n # We support only core collection daemons\n # zenpython; zenperfsnmp; zencommand\n if not collection_daemons and write:\n write(\n \"Modeling through UI only support COMMAND, \"\n \"SNMP and ZenPython type of datasources\"\n )\n if xmlrpc:\n return 1\n return\n perfConf.runDeviceMonitor(\n self, REQUEST, write, collection_daemons, debug=debug\n )\n if REQUEST:\n audit(\"UI.Device.Remodel\", self)\n if xmlrpc:\n return 0\n\n security.declareProtected(ZEN_MANAGE_DEVICE, \"monitorPerDatasource\")\n\n def monitorPerDatasource(self, dsObj, REQUEST=None, write=None):\n \"\"\"\n Run monitoring daemon against one device and one datasource ones\n \"\"\"\n parameter = \"--datasource\"\n value = \"%s/%s\" % (dsObj.rrdTemplate.obj.id, dsObj.id)\n collection_daemon = \"\"\n if dsObj.sourcetype == \"COMMAND\":\n collection_daemon = \"zencommand\"\n elif dsObj.__class__.__base__.sourcetype == \"Python\":\n collection_daemon = \"zenpython\"\n elif dsObj.sourcetype == \"SNMP\":\n collection_daemon = \"zenperfsnmp\"\n parameter = \"--oid\"\n value = dsObj.oid\n\n xmlrpc = isXmlRpc(REQUEST)\n perfConf = self.getPerformanceServer()\n if not collection_daemon and write:\n write(\n \"Modeling through UI only support COMMAND, \"\n \"SNMP and ZenPython type of datasources\"\n )\n if xmlrpc:\n return 1\n return\n\n perfConf.runDeviceMonitorPerDatasource(\n self, REQUEST, write, collection_daemon, parameter, value\n )\n if xmlrpc:\n return 0\n\n security.declareProtected(ZEN_DELETE_DEVICE, \"deleteDevice\")\n\n def deleteDevice(\n self,\n deleteStatus=False,\n deleteHistory=False,\n deletePerf=False,\n REQUEST=None,\n ):\n \"\"\"\n Delete device from the database\n\n NB: deleteHistory is disabled for the 2.2 release. In some\n circumstances it was causing many subprocesses to be spawned\n and creating a gridlock situation.\n\n NOTE: deleteStatus no longer deletes events from the summary\n table, but closes them.\n\n @permission: ZEN_ADMIN_DEVICE\n \"\"\"\n parent = self.getPrimaryParent()\n if deleteStatus:\n # Close events for this device\n zep = getFacade(\"zep\")\n tagFilter = {\"tag_uuids\": [IGlobalIdentifier(self).getGUID()]}\n eventFilter = {\"tag_filter\": [tagFilter]}\n log.debug(\"Closing events for device: %s\", self.getId())\n zep.closeEventSummaries(eventFilter=eventFilter)\n if REQUEST:\n audit(\n \"UI.Device.Delete\",\n self,\n deleteStatus=deleteStatus,\n deleteHistory=deleteHistory,\n deletePerf=deletePerf,\n )\n self.getDmdRoot(\"Monitors\").deletePreviousCollectorForDevice(\n self.getId()\n )\n self.dmd.getDmdRoot(\"ZenLinkManager\").remove_device_from_cache(\n self.getId()\n )\n parent._delObject(self.getId())\n if REQUEST:\n if parent.getId() == \"devices\":\n parent = parent.getPrimaryParent()\n REQUEST[\"RESPONSE\"].redirect(\n parent.absolute_url_path() + \"/deviceOrganizerStatus\"\n \"?message=Device deleted\"\n )\n\n security.declareProtected(ZEN_ADMIN_DEVICE, \"renameDevice\")\n\n def renameDevice(self, newId=None, REQUEST=None, retainGraphData=False):\n \"\"\"\n Rename device from the DMD. Disallow assignment of\n an id that already exists in the system.\n Block renaming for this Device if a rename is already in progress.\n\n @permission: ZEN_ADMIN_DEVICE\n @param newId: new name\n @type newId: string\n @param REQUEST: Zope REQUEST object\n @type REQUEST: Zope REQUEST object\n \"\"\"\n if self.renameInProgress:\n log.warn(\"Rename already in progress for device %s.\", self.id)\n raise Exception(\n \"Rename already in progress for device {}.\".format(self.id),\n )\n\n parent = self.getPrimaryParent()\n path = self.absolute_url_path()\n oldId = self.getId()\n\n if newId is None:\n return path\n\n if not isinstance(newId, unicode):\n newId = self.prepId(newId)\n\n newId = newId.strip()\n\n if newId == \"\" or newId == oldId:\n return path\n\n device = self.dmd.Devices.findDeviceByIdExact(newId)\n if device:\n raise DeviceExistsError(\n \"Device already exists with id %s\" % newId,\n device,\n )\n\n if REQUEST:\n audit(\"UI.Device.ChangeId\", self, id=newId)\n\n # side effect: self.getId() will return newId after this call\n try:\n # If there is a title, change the title to the newId\n # (ticket #5443). manage_renameObject will reindex.\n if self.title:\n self.title = newId\n parent.manage_renameObject(oldId, newId)\n self.setLastChange()\n\n # Replace the old id in performance data with the new id.\n # See ZEN-27329.\n if retainGraphData:\n self.renameInProgress = True\n self.reassociatePerfDataAfterRename(oldId, newId)\n\n return self.absolute_url_path()\n except CopyError:\n raise Exception(\"Device rename failed.\")\n\n def reassociatePerfDataAfterRename(self, oldId, newId):\n \"\"\"\n Replace a dev id in metric names and tag values with the new id after\n renaming the device.\n \"\"\"\n self.dmd.JobManager.addJob(\n FacadeMethodJob,\n description=(\n \"Reassociating performance data for device {} with \"\n \"new ID {}\".format(oldId, newId)\n ),\n kwargs=dict(\n facadefqdn=\"Products.Zuul.facades.metricfacade.MetricFacade\",\n method=\"renameDevice\",\n oldId=oldId,\n newId=newId,\n ),\n )\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"index_object\")\n\n @deprecated\n def index_object(self, idxs=None, noips=False):\n \"\"\"\n Override so ips get indexed on move. DEPRECATED\n \"\"\"\n pass\n\n security.declareProtected(ZEN_CHANGE_DEVICE, \"unindex_object\")\n\n @deprecated\n def unindex_object(self):\n \"\"\"\n Override so ips get unindexed as well. DEPRECATED\n \"\"\"\n pass\n\n def getUserCommandTargets(self):\n \"\"\"\n Called by Commandable.doCommand() to ascertain objects on which\n a UserCommand should be executed.\n \"\"\"\n return [self]\n\n def getUserCommandEnvironment(self):\n \"\"\"\n Returns the tales environment used to evaluate the command\n \"\"\"\n environ = Commandable.getUserCommandEnvironment(self)\n context = self.primaryAq()\n environ.update(\n {\n \"dev\": context,\n \"device\": context,\n }\n )\n return environ\n\n def getUrlForUserCommands(self):\n \"\"\"\n Returns a URL to redirect to after a command has executed\n used by Commandable\n \"\"\"\n return self.getPrimaryUrlPath() + \"/deviceManagement\"\n\n def getHTMLEventSummary(self, severity=4):\n \"\"\"\n Returns HTML Event Summary of a device\n \"\"\"\n html = []\n html.append(\"\")\n html.append(\"\")\n\n def evsummarycell(ev):\n if ev[1] - ev[2] >= 0:\n klass = \"%s empty thin\" % ev[0]\n else:\n klass = \"%s thin\" % ev[0]\n h = '' % (\n klass,\n ev[1],\n ev[2],\n )\n return h\n\n info = self.getEventSummary(severity)\n html += map(evsummarycell, info)\n html.append(\"
%s/%s
\")\n return \"\\n\".join(html)\n\n def getDataForJSON(self, minSeverity=0):\n \"\"\"\n Returns data ready for serialization\n \"\"\"\n url, classurl = map(\n urlquote, (self.getDeviceUrl(), self.getDeviceClassPath())\n )\n id = '%s' % (\n url,\n self.titleOrId(),\n )\n ip = self.getDeviceIp()\n if self.checkRemotePerm(ZEN_VIEW, self.deviceClass()):\n path = '%s' % (\n classurl,\n classurl,\n )\n else:\n path = classurl\n prod = self.getProdState()\n evsum = getEventPillME(self, 1, minSeverity)[0]\n return [id, ip, path, prod, evsum, self.id]\n\n def exportXmlHook(self, ofile, ignorerels):\n \"\"\"\n Add export of our child objects.\n \"\"\"\n map(lambda o: o.exportXml(ofile, ignorerels), (self.hw, self.os))\n\n def zenPropertyOptions(self, propname):\n \"\"\"\n Returns a list of possible options for a given zProperty\n \"\"\"\n if propname == \"zCollectorPlugins\":\n from Products.DataCollector.Plugins import loadPlugins\n\n return sorted(ldr.pluginName for ldr in loadPlugins(self.dmd))\n if propname == \"zCommandProtocol\":\n return [\"ssh\", \"telnet\"]\n if propname == \"zSnmpVer\":\n return [\"v1\", \"v2c\", \"v3\"]\n if propname == \"zSnmpAuthType\":\n return [\"\", \"MD5\", \"SHA\"]\n if propname == \"zSnmpPrivType\":\n return [\"\", \"DES\", \"AES\"]\n return ManagedEntity.zenPropertyOptions(self, propname)\n\n security.declareProtected(ZEN_MANAGE_DEVICE, \"pushConfig\")\n\n def pushConfig(self, REQUEST=None):\n \"\"\"\n This will result in a push of all the devices to live collectors\n\n @permission: ZEN_MANAGE_DEVICE\n \"\"\"\n self._p_changed = True\n if REQUEST:\n messaging.IMessageSender(self).sendToBrowser(\n \"Changes Pushed\",\n \"Changes to %s pushed to collectors.\" % self.id,\n )\n audit(\"UI.Device.PushChanges\", self)\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_EDIT_LOCAL_TEMPLATES, \"bindTemplates\")\n\n def bindTemplates(self, ids=(), REQUEST=None):\n \"\"\"\n This will bind available templates to the zDeviceTemplates\n\n @permission: ZEN_EDIT_LOCAL_TEMPLATES\n \"\"\"\n result = self.setZenProperty(\"zDeviceTemplates\", ids, REQUEST)\n if REQUEST:\n audit(\"UI.Device.BindTemplates\", self, templates=ids)\n return result\n\n security.declareProtected(\n ZEN_EDIT_LOCAL_TEMPLATES, \"removeZDeviceTemplates\"\n )\n\n def removeZDeviceTemplates(self, REQUEST=None):\n \"\"\"\n Deletes the local zProperty, zDeviceTemplates\n\n @permission: ZEN_EDIT_LOCAL_TEMPLATES\n \"\"\"\n for id in self.zDeviceTemplates:\n self.removeLocalRRDTemplate(id)\n if REQUEST:\n audit(\"UI.Device.RemoveLocalTemplate\", self, template=id)\n from Products.ZenRelations.ZenPropertyManager import (\n ZenPropertyDoesNotExist,\n )\n\n try:\n return self.deleteZenProperty(\"zDeviceTemplates\", REQUEST)\n except ZenPropertyDoesNotExist:\n if REQUEST:\n return self.callZenScreen(REQUEST)\n\n security.declareProtected(ZEN_EDIT_LOCAL_TEMPLATES, \"addLocalTemplate\")\n\n def addLocalTemplate(self, id, REQUEST=None):\n \"\"\"\n Create a local template on a device\n\n @permission: ZEN_EDIT_LOCAL_TEMPLATES\n \"\"\"\n from Products.ZenModel.RRDTemplate import manage_addRRDTemplate\n\n manage_addRRDTemplate(self, id)\n if id not in self.zDeviceTemplates:\n self.bindTemplates(self.zDeviceTemplates + [id])\n if REQUEST:\n messaging.IMessageSender(self).sendToBrowser(\n \"Local Template Added\",\n \"Added template %s to %s.\" % (id, self.id),\n )\n audit(\"UI.Device.AddLocalTemplate\", self, template=id)\n return self.callZenScreen(REQUEST)\n\n def getAvailableTemplates(self):\n \"\"\"\n Returns all available templates for this device\n \"\"\"\n # All templates defined on this device are available\n templates = self.objectValues(\"RRDTemplate\")\n # Any templates available to the class that aren't overridden locally\n # are also available\n device_template_ids = set(t.id for t in templates)\n templates.extend(\n t\n for t in self.deviceClass().getRRDTemplates()\n if t.id not in device_template_ids\n )\n # Filter out any templates that have been 'replaced'\n filteredTemplates = list(templates)\n for t in templates:\n tName = t.titleOrId()\n if tName.endswith(\"-replacement\"):\n tReplacedName = tName.replace(\"-replacement\", \"\")\n tReplaced = self.getRRDTemplateByName(tReplacedName)\n if tReplaced:\n filteredTemplates.remove(tReplaced)\n # filter for python class before sorting\n templates = filter(\n lambda t: isinstance(self, t.getTargetPythonClass()),\n filteredTemplates,\n )\n return sorted(templates, key=lambda x: x.id.lower())\n\n def getSnmpV3EngineId(self):\n return self.getProperty(\"zSnmpEngineId\")\n\n def setSnmpV3EngineId(self, value):\n self.setZenProperty(\"zSnmpEngineId\", value)\n\n security.declareProtected(ZEN_VIEW, \"getLinks\")\n\n def getLinks(self, OSI_layer=\"3\"):\n \"\"\"\n Returns all Links on this Device's interfaces\n\n @permission: ZEN_VIEW\n \"\"\"\n if OSI_layer == \"3\":\n from Products.ZenUtils.NetworkTree import getDeviceNetworkLinks\n\n for link in getDeviceNetworkLinks(self):\n yield link\n else:\n for iface in self.os.interfaces.objectValuesGen():\n for link in iface.links.objectValuesGen():\n yield link\n\n security.declareProtected(ZEN_VIEW, \"getXMLEdges\")\n\n def getXMLEdges(self, depth=3, filter=\"/\", start=()):\n \"\"\"\n Gets XML\n \"\"\"\n if not start:\n start = self.id\n edges = NetworkTree.get_edges(\n self, depth, withIcons=True, filter=filter\n )\n return edgesToXML(edges, start)\n\n security.declareProtected(ZEN_VIEW, \"getPrettyLink\")\n\n @unpublished\n def getPrettyLink(self, target=None, altHref=\"\"):\n \"\"\"\n Gets a link to this device, plus an icon\n\n @rtype: HTML text\n @permission: ZEN_VIEW\n \"\"\"\n template = (\n \"
\"\n \" \"\n \"
%s\"\n )\n icon = self.getIconPath()\n href = altHref if altHref else self.getPrimaryUrlPath()\n name = self.titleOrId()\n\n rendered = template % (icon, cgi.escape(name))\n\n if not self.checkRemotePerm(ZEN_VIEW, self):\n return rendered\n else:\n return \"%s\" % (\n \"target=\" + target if target else \"\",\n href,\n rendered,\n )\n\n def osProcessClassMatchData(self):\n \"\"\"\n Get a list of dictionaries containing everything needed to match\n processes against the global list of process classes.\n \"\"\"\n matchers = []\n for pc in self.getDmdRoot(\"Processes\").getSubOSProcessClassesSorted():\n matchers.append(\n {\n \"includeRegex\": pc.includeRegex,\n \"excludeRegex\": pc.excludeRegex,\n \"replaceRegex\": pc.replaceRegex,\n \"replacement\": pc.replacement,\n \"primaryUrlPath\": pc.getPrimaryUrlPath(),\n \"primaryDmdId\": pc.getPrimaryDmdId(),\n }\n )\n\n return matchers\n\n def manageIpVersion(self):\n \"\"\"\n Returns either 4 or 6 depending on the version\n of the manageIp ip adddress\n \"\"\"\n from ipaddr import IPAddress\n\n try:\n ip = self.getManageIp()\n return IPAddress(ip).version\n except ValueError:\n # could not parse the ip address\n pass\n # if we can't parse it assume it is ipv4\n return 4\n\n def snmpwalkPrefix(self):\n \"\"\"\n This method gets the ip address prefix used for this device when\n running snmpwalk.\n\n @rtype: string\n @return: Prefix used for snmwalk for this device\n \"\"\"\n if self.manageIpVersion() == 6:\n return \"udp6:\"\n return \"\"\n\n def pingCommand(self):\n \"\"\"\n Used by the user commands this returns which ping command\n this device should use.\n @rtype: string\n @return \"ping\" or \"ping6\" depending on if the manageIp is ipv6 or not\n \"\"\"\n if self.manageIpVersion() == 6:\n return \"ping6\"\n return \"ping\"\n\n def tracerouteCommand(self):\n \"\"\"\n Used by the user commands this returns which traceroute command\n this device should use.\n @rtype: string\n @return \"traceroute\" or \"traceroute6\" depending on if the manageIp is\n ipv6 or not\n \"\"\"\n if self.manageIpVersion() == 6:\n return \"traceroute6\"\n return \"traceroute\"\n\n def getStatus(self, statusclass=None, **kwargs):\n \"\"\"\n Return the status number for this device of class statClass.\n If statusclass not set, search by zStatusEventClass.\n \"\"\"\n if not self.monitorDevice():\n return None\n\n if statusclass is None:\n statusclass = self.zStatusEventClass\n zep = getFacade(\"zep\", self)\n try:\n event_filter = zep.createEventFilter(\n tags=[self.getUUID()],\n element_sub_identifier=[\"\"],\n severity=[SEVERITY_CRITICAL],\n status=[\n STATUS_NEW,\n STATUS_ACKNOWLEDGED,\n STATUS_SUPPRESSED,\n ],\n event_class=filter(None, [self.zStatusEventClass]),\n )\n\n result = zep.getEventSummaries(0, filter=event_filter, limit=0)\n return int(result[\"total\"])\n except Exception:\n return None\n\n if statusclass == Status_Ping:\n return self._getPingStatus(statusclass)\n\n return super(Device, self).getStatus(statusclass, **kwargs)\n\n def _getPingStatus(self, statusclass):\n if not self.zPingMonitorIgnore and self.getManageIp():\n # Override normal behavior - we only care if the manage IP is down\n\n # Need to add the ipinterface component id to search since we may\n # be pinging interfaces and only care about status of the one that\n # matches the manage ip. This is potentially expensive\n element_sub_identifier = [\"\"]\n ifaces = self.getDeviceComponents(type=\"IpInterface\")\n for iface in ifaces:\n if self.manageIp in [\n ip.partition(\"/\")[0] for ip in iface.getIpAddresses()\n ]:\n element_sub_identifier.append(iface.id)\n break\n\n zep = getFacade(\"zep\", self)\n event_filter = zep.createEventFilter(\n tags=[self.getUUID()],\n severity=[SEVERITY_WARNING, SEVERITY_ERROR, SEVERITY_CRITICAL],\n status=[STATUS_NEW, STATUS_ACKNOWLEDGED, STATUS_SUPPRESSED],\n element_sub_identifier=element_sub_identifier,\n event_class=filter(None, [statusclass]),\n details={\n EventProxy.DEVICE_IP_ADDRESS_DETAIL_KEY: self.getManageIp()\n },\n )\n result = zep.getEventSummaries(0, filter=event_filter, limit=0)\n return int(result[\"total\"])\n else:\n return None\n\n def ipAddressAsInt(self):\n ip = self.getManageIp()\n if ip:\n ip = ip.partition(\"/\")[0]\n if ip:\n return str(numbip(ip))\n\n def getMacAddressCache(self):\n if self.macaddresses is None:\n self.macaddresses = OOSet()\n\n return self.macaddresses\n\n def getMacAddresses(self):\n return list(self.macaddresses or [])\n\n\nInitializeClass(Device)\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/Device.py","file_name":"Device.py","file_ext":"py","file_size_in_byte":90964,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"24897078768","text":"class Node:\n\tdef __init__(self,data):\n\t\tself.left = None\n\t\tself.right = None\n\t\tself.data = data\n\t\n\t\t\n\t# Iterative function for inorder tree traversal\n\tdef inOrder(root):\n\t\t\n\t\t# Set current to root of binary tree\n\t\tcurrent = root \n\t\tstack = [] # initialize stack\n\t\tdone = 0\n\t\t\n\t\twhile True:\n\t\t\t\n\t\t\t# Reach the left most Node of the current Node\n\t\t\tif current is not None:\n\t\t\t\t\n\t\t\t\t# Place pointer to a tree node on the stack \n\t\t\t\t# before traversing the node's left subtree\n\t\t\t\tstack.append(current)\n\t\t\t\n\t\t\t\tcurrent = current.left \n\n\t\t\t\n\t\t\t# BackTrack from the empty subtree and visit the Node\n\t\t\t# at the top of the stack; however, if the stack is \n\t\t\t# empty you are done\n\t\t\telif(stack):\n\t\t\t\tcurrent = stack.pop()\n\t\t\t\tprint(current.data, end=\" \") # Python 3 printing\n\t\t\t\n\t\t\t\t# We have visited the node and its left \n\t\t\t\t# subtree. Now, it's right subtree's turn\n\t\t\t\tcurrent = current.right \n\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\n\t\tprint()\n\n\tdef PrintTree(self):\n\t\tif(self.left):\n\t\t\tself.left.PrintTree()\n\t\tprint(self.data)\n\t\t# if(self.right):\n\t\t# \tself.right.PrintTree()\n\n\tdef insert(self,data):\n\t\tif(self.data):\n\t\t\tif(data < self.data):\n\t\t\t\tif(self.left == None):\n\t\t\t\t\tself.left = Node(data)\n\t\t\t\telse:\n\t\t\t\t\tself.left.insert(data)\n\t\t\telif(data > self.data):\n\t\t\t\tif(self.right == None):\n\t\t\t\t\tself.right = Node(data)\n\t\t\t\telse:\n\t\t\t\t\tself.right.insert(data)\n\t\telse:\n\t\t\tself.data=data\n\n\tdef FindPath(self,goal,path):\n\t\t# if(self.data != goal):\n\t\tpath.append(self.data)\n\t\tprint(path)\n\t\tif(self.data == goal):\n\t\t\treturn True\n\n\t\tif(self.left and self.left.FindPath(goal , path)):\n\t\t\treturn True\n\t\tif(self.right and self.right.FindPath(goal , path)):\n\t\t\treturn True\n\t\tpath.pop()\n\t\treturn False\ndef LCA(path1,path2):\n\ti=0\n\twhile(True):\n\t\ti = i+1\t\t\t\n\t\tif(i btn\n pics (list): list of links for attached images\n '''\n self.post = post[0]\n self.header = post [1]\n self.text = None\n self.link = None\n self.pics = list()\n\n def parse_images(self, pic):\n js = pic.get('onclick')\n if 'showPhoto' in js:\n pic_url = js.split(',')[2].split(':')\n self.pics.append(InputMediaPhoto(json.loads(':'.join(pic_url[2:4]))))\n\n def parse_text(self):\n div_str = str(self.post.find(class_='wall_post_text')).replace('
', '\\r\\n')\n parts = bs(div_str, 'html.parser').getText().split('Показать полностью…')\n self.text = ''.join(parts)\n\n def parse(self):\n self.parse_text()\n \n new_markup = self.post.find_all('a', class_='image_cover')\n old_markup = self.post.find_all('a', class_='page_post_thumb_unsized')\n markup = new_markup + old_markup\n if markup:\n for pic in markup:\n self.parse_images(pic)\n\n self.link = self.header.find_all('a', class_='post_link')[0]['href']\n return self\n \n def upload(self):\n try:\n if self.text and self.text != 'None':\n markup = types.InlineKeyboardMarkup()\n markup.add(types.InlineKeyboardButton(text='Пост VK', url=f'https://vk.com{self.link}'))\n if len(self.text) > 4096:\n long_text = list()\n for x in range(0, len(self.text), 4096):\n long_text.append(self.text[x:x+4096])\n for sm_text in long_text[:-1]:\n bot.send_message(channel, sm_text)\n self.text = long_text[-1]\n bot.send_message(channel, self.text, reply_markup=markup)\n if self.pics: \n bot.send_media_group(channel, self.pics)\n except Exception as e:\n print(e)\n\ndef get_state():\n with open(logfile, 'r') as log:\n state = int(log.readlines()[-1]) - 20\n if state < 20: exit()\n return state\n\ndef save_sate(offset):\n with open(logfile, 'a') as log:\n log.write(str(offset) + '\\n')\n\ndef get_posts(target_uri):\n raw_page = requests.get(target_uri, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'})\n if raw_page.ok:\n page = bs(raw_page.text, 'html.parser')\n posts = page.find_all('div', class_='wall_text')\n headers = page.find_all('div', class_='post_header')\n return list(zip(posts, headers))[::-1]\n\ndef get_pages(lastet_offset, newest_offset=20):\n for offset in range(lastet_offset, newest_offset, -20): #8120-2100\n save_sate(offset)\n posts = get_posts(f'https://vk.com/wall-?offset={offset}&own=1')\n for post in posts:\n Post(post).parse().upload()\n\nif __name__ == '__main__':\n try:\n get_pages(int(argv[2]), int(argv[3]))\n except IndexError:\n get_pages(get_state())\n","repo_name":"fpkaos/scripts","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34764762657","text":"import numpy as np\nimport csv\nimport pandas as pd\n\n\ndef load(file):\n x=[]\n y=[]\n with open (file,'r') as f:\n reader = csv.reader(f,delimiter = ' ')\n for i in reader:\n tmp = [int(j) for j in i]\n x.append(tmp[:36])\n y.append(tmp[36])\n\n return np.array(x),np.array(y)\n\ndef get_data():\n train_x,train_y = load('sat.trn')\n test_x,test_y = load('sat.tst')\n return train_x,train_y,test_x,test_y\n\n","repo_name":"hsahib2912/ML-Assignments","sub_path":"A5_88/Q1_get_data.py","file_name":"Q1_get_data.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15867216825","text":"import os\nimport tempfile\nimport flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask_cors import CORS\n\nimport assemblyai as aai\n\n\napp = flask.Flask(__name__)\nCORS(app)\n\n\n@app.route('/transcribe', methods=['POST'])\ndef transcribe():\n if request.method == 'POST':\n aai.settings.api_key = \"346b2ed2346948dbafcadd645c5e1bb5\"\n\n wav_file = request.files['file']\n temp_dir = tempfile.mkdtemp()\n temp_file_path = os.path.join(temp_dir, 'temp.wav')\n wav_file.save(temp_file_path)\n\n language_code = request.form['langSelect']\n\n transcriber = aai.Transcriber()\n\n transcriber.config = aai.TranscriptionConfig(language_code=language_code, disfluencies=True,\n punctuate=True, format_text=True)\n\n transcript = transcriber.transcribe(temp_file_path)\n\n return jsonify(transcript.text)\n\n else:\n return \"This endpoint only processes POST wav blob\"\n","repo_name":"chethan-gloify/lyrics-finder-be","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32428217961","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport argparse\n\nimport numpy as np\nimport shutil\nimport random\nimport time\nimport math\nimport wget\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _WeightedLoss\n\nimport torchvision\nfrom torchvision import datasets, models, transforms\nfrom sync_batchnorm import convert_model\n#from tensorboardX import SummaryWriter\n#import seaborn as sns\n#import matplotlib.pyplot as plt\n\nimport torch.nn.functional as F\n\nfrom ImageDataLoader import SimpleImageLoader, FixMatchImageLoader\nfrom models import Res18, Res50, Dense121, Res18_basic\nfrom randaugment import RandAugmentMC\n\nimport nsml\nfrom nsml import DATASET_PATH, IS_ON_NSML\n\nfrom crt import ClassAwareSampler\nfrom efficientnet_pytorch import EfficientNet\n\nNUM_CLASSES = 265\nif not IS_ON_NSML:\n DATASET_PATH = '/workspace/cs492h-ssl/meta/'\n \n\n\ndef top_n_accuracy_score(y_true, y_prob, n=5, normalize=True):\n num_obs, num_labels = y_prob.shape\n idx = num_labels - n - 1\n counter = 0\n argsorted = np.argsort(y_prob, axis=1)\n for i in range(num_obs):\n if y_true[i] in argsorted[i, idx+1:]:\n counter += 1\n if normalize:\n return counter * 1.0 / num_obs\n else:\n return counter\n \nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n \ndef adjust_learning_rate(opts, optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n \"\"\"\n Linear Warmup.\n \"\"\"\n if epoch <= 5:\n lr = opts.lr + (0.4-0.03) * (epoch - 1) / 4\n elif epoch < 60:\n lr = 0.4\n elif epoch >= 60 and epoch < 120:\n lr = 0.4 * 0.1\n elif epoch >= 120 and epoch < 160:\n lr = 0.4 * (0.1 ** 2)\n elif epoch >= 160 and epoch < 200:\n lr = 0.4 * (0.1 ** 3)\n else:\n lr = 0.4 * (0.1 ** 4)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n \ndef linear_rampup(current, rampup_length):\n if rampup_length == 0:\n return 1.0\n else:\n current = np.clip(current / rampup_length, 0.0, 1.0)\n return float(current)\n \nclass SemiLoss(object):\n def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch, final_epoch):\n probs_u = torch.softmax(outputs_u, dim=1)\n Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))\n Lu = torch.mean((probs_u - targets_u)**2)\n return Lx, Lu, opts.lambda_u * linear_rampup(epoch, final_epoch)\n\ndef interleave_offsets(batch, nu):\n groups = [batch // (nu + 1)] * (nu + 1)\n for x in range(batch - sum(groups)):\n groups[-x - 1] += 1\n offsets = [0]\n for g in groups:\n offsets.append(offsets[-1] + g)\n assert offsets[-1] == batch\n return offsets\n\ndef interleave(xy, batch):\n nu = len(xy) - 1\n offsets = interleave_offsets(batch, nu)\n xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]\n for i in range(1, nu + 1):\n xy[0][i], xy[i][i] = xy[i][i], xy[0][i]\n return [torch.cat(v, dim=0) for v in xy]\n\ndef split_ids(path, ratio):\n with open(path) as f:\n ids_l = [[] for i in range(265)]\n ids_u = []\n for i, line in enumerate(f.readlines()):\n if i == 0 or line == '' or line == '\\n':\n continue\n line = line.replace('\\n', '').split('\\t')\n if int(line[1]) >= 0:\n ids_l[int(line[1])].append(int(line[0]))\n else:\n ids_u.append(int(line[0]))\n \n train_ids = []\n val_ids = []\n\n for labels in ids_l:\n cut = int(ratio*len(labels))\n train_ids += labels[cut:]\n val_ids += labels[:cut]\n \n ids_u += train_ids\n ids_u = np.array(ids_u)\n train_ids = np.array(train_ids)\n val_ids = np.array(val_ids)\n\n perm1 = np.random.permutation(np.arange(len(train_ids)))\n perm2 = np.random.permutation(np.arange(len(val_ids)))\n train_ids = train_ids[perm1]\n val_ids = val_ids[perm2]\n\n return train_ids, val_ids, ids_u\n\ndef split_ids_original(path, ratio):\n with open(path) as f:\n ids_l = []\n ids_u = []\n for i, line in enumerate(f.readlines()):\n if i == 0 or line == '' or line == '\\n':\n continue\n line = line.replace('\\n', '').split('\\t')\n if int(line[1]) >= 0:\n ids_l.append(int(line[0]))\n else:\n ids_u.append(int(line[0]))\n\n ids_l = np.array(ids_l)\n ids_u = np.array(ids_u)\n\n perm = np.random.permutation(np.arange(len(ids_l)))\n cut = int(ratio*len(ids_l))\n train_ids = ids_l[perm][cut:]\n val_ids = ids_l[perm][:cut]\n ids_u = np.concatenate((ids_u, train_ids))\n\n return train_ids, val_ids, ids_u\n\n\nclass SmoothCrossEntropyLoss(_WeightedLoss):\n def __init__(self, weight=None, reduction='mean', smoothing=0.0):\n super().__init__(weight=weight, reduction=reduction)\n self.smoothing = smoothing\n self.weight = weight\n self.reduction = reduction\n\n @staticmethod\n def _smooth_one_hot(targets:torch.Tensor, n_classes:int, smoothing=0.0):\n assert 0 <= smoothing < 1\n with torch.no_grad():\n targets = torch.empty(size=(targets.size(0), n_classes),\n device=targets.device) \\\n .fill_(smoothing /(n_classes-1)) \\\n .scatter_(1, targets.data.unsqueeze(1), 1.-smoothing)\n return targets\n\n def forward(self, inputs, targets):\n targets = SmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.size(-1),\n self.smoothing)\n lsm = F.log_softmax(inputs, -1)\n\n if self.weight is not None:\n lsm = lsm * self.weight.unsqueeze(0)\n\n loss = -(targets * lsm).sum(-1)\n\n if self.reduction == 'sum':\n loss = loss.sum()\n elif self.reduction == 'mean':\n loss = loss.mean()\n\n return loss\n### NSML functions\ndef _infer(model, root_path, test_loader=None):\n if test_loader is None:\n test_loader = torch.utils.data.DataLoader(\n SimpleImageLoader(root_path, 'test',\n transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.CenterCrop(opts.imsize),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])), batch_size=opts.batchsize*(opts.mu + 1), shuffle=False, num_workers=4, pin_memory=True)\n print('loaded {} test images'.format(len(test_loader.dataset)))\n\n outputs = []\n s_t = time.time()\n for idx, image in enumerate(test_loader):\n if torch.cuda.is_available():\n image = image.cuda()\n probs = model(image)\n output = torch.argmax(probs, dim=1)\n output = output.detach().cpu().numpy()\n outputs.append(output)\n\n outputs = np.concatenate(outputs)\n return outputs\n\ndef bind_nsml(model):\n def save(dir_name, *args, **kwargs):\n os.makedirs(dir_name, exist_ok=True)\n state = model.state_dict()\n torch.save(state, os.path.join(dir_name, 'model.pt'))\n print('saved')\n\n def load(dir_name, *args, **kwargs):\n state = torch.load(os.path.join(dir_name, 'model.pt'))\n model.load_state_dict(state)\n print('loaded')\n\n def infer(root_path):\n return _infer(model, root_path)\n\n nsml.bind(save=save, load=load, infer=infer)\n\n\ndef get_cosine_schedule_with_warmup(optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=7./16.,\n last_epoch=-1):\n def _lr_lambda(current_step):\n no_progress = float(current_step) / \\\n float(max(1, num_training_steps - num_warmup_steps))\n return max(0., math.cos(math.pi * num_cycles * no_progress))\n return LambdaLR(optimizer, _lr_lambda)\n\n######################################################################\n# Options\n######################################################################\nparser = argparse.ArgumentParser(description='Sample Product200K Training')\nparser.add_argument('--start_epoch', type=int, default=1, metavar='N', help='number of start epoch (default: 1)')\nparser.add_argument('--epochs', type=int, default=300, metavar='N', help='number of epochs to train (default: 200)')\n\n# basic settings\nparser.add_argument('--name',default='HA_trial3', type=str, help='output model name')\n\nparser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--batchsize', default=128, type=int, help='batchsize')\nparser.add_argument('--seed', type=int, default=123, help='random seed')\n\n# basic hyper-parameters\nparser.add_argument('--momentum', type=float, default=0.9, metavar='LR', help=' ')\nparser.add_argument('--lr', type=float, default=0.03, metavar='LR', help='learning rate (default: 5e-5)')\nparser.add_argument('--scheduler', type=int, default=1, metavar='LR', help='0: cosine, 1: multistep, 2: adjust learning rate')\n\nparser.add_argument('--imResize', default=256, type=int, help='')\nparser.add_argument('--imsize', default=224, type=int, help='')\nparser.add_argument('--lossXent', type=float, default=1, help='lossWeight for Xent')\n\n# arguments for logging and backup\nparser.add_argument('--log_interval', type=int, default=10, metavar='N', help='logging training status')\nparser.add_argument('--save_epoch', type=int, default=50, help='saving epoch interval')\n\n# hyper-parameters for fixmatch\nparser.add_argument('--lambda-u', default=1, type=float)\nparser.add_argument('--mu', default=1 , type=int, help=\"Batch ratio between labeled/unlabeled\")\nparser.add_argument('--threshold', type=float, default=0.85, help='Threshold setting for Fixmatch')\n\n\nparser.add_argument('--T', default=0.5, type=float)\nparser.add_argument('--smooth', type = int, default=0, help='use smoothcrossentropy loss')\n\nparser.add_argument('--val-iteration', type=int, default=100, help='Number of labeled data')\nparser.add_argument('--parser', type=int, default=2)\n\n### DO NOT MODIFY THIS BLOCK ###\n# arguments for nsml \nparser.add_argument('--pause', type=int, default=0)\nparser.add_argument('--mode', type=str, default='train')\n################################\n\ndef main():\n global opts\n opts = parser.parse_args()\n opts.cuda = 0\n\n# global writer\n# writer = SummaryWriter(\"runs/\"+opts.name)\n # Set GPU\n seed = opts.seed\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_ids\n use_gpu = torch.cuda.is_available()\n if use_gpu:\n opts.cuda = 1\n print(\"Currently using GPU {}\".format(opts.gpu_ids))\n cudnn.benchmark = True\n torch.cuda.manual_seed_all(seed)\n else:\n print(\"Currently using CPU (GPU is highly recommended)\")\n\n\n # Set model\n #model = Res50(NUM_CLASSES)\n if IS_ON_NSML:\n model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=265).cuda()\n else:\n model = EfficientNet.from_pretrained('efficientnet-b4', num_classes=265).cuda()\n model = torch.nn.DataParallel(model) \n model = convert_model(model) \n\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n n_parameters = sum([p.data.nelement() for p in model.parameters()])\n print(' + Number of params: {}'.format(n_parameters))\n\n if use_gpu:\n model.cuda()\n\n ### DO NOT MODIFY THIS BLOCK ###\n if IS_ON_NSML:\n bind_nsml(model)\n if opts.pause:\n nsml.paused(scope=locals())\n ################################\n \n '''\n if IS_ON_NSML:\n print(\"load our best checkpoint...\")\n url = \"https://docs.google.com/uc?export=download&id=1J7wlKlRpW_0Qm0vbDKXlmla4IpUsMhfF\"\n wget.download(url,'./')\n m = torch.load('./model.pt')\n model.load_state_dict(m)\n print(\"complete.\")\n '''\n \n if opts.mode == 'train':\n model.train()\n # Set dataloader\n if opts.parser == 1:\n train_ids, val_ids, unl_ids = split_ids_original(os.path.join(DATASET_PATH, 'train/train_label'), 0.1)\n if opts.parser >= 2:\n train_ids, val_ids, unl_ids = split_ids(os.path.join(DATASET_PATH, 'train/train_label'), 0.1)\n print('found {} train, {} validation and {} unlabeled images'.format(len(train_ids), len(val_ids), len(unl_ids)))\n \n crtSampler = ClassAwareSampler(data_source=DATASET_PATH + '/train/', ids=train_ids)\n if opts.parser != 3:\n train_loader = torch.utils.data.DataLoader(\n SimpleImageLoader(DATASET_PATH, 'train', train_ids,\n transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.RandomResizedCrop(opts.imsize),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),\n batch_size=opts.batchsize, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)\n else:\n train_loader = torch.utils.data.DataLoader(\n SimpleImageLoader(DATASET_PATH, 'train', train_ids,\n transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.RandomResizedCrop(opts.imsize),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),\n batch_size=opts.batchsize, sampler=crtSampler, num_workers=4, pin_memory=True, drop_last=True)\n\n\n print('train_loader done')\n\n unlabel_loader = torch.utils.data.DataLoader(\n FixMatchImageLoader(DATASET_PATH, 'unlabel', unl_ids,\n transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.RandomResizedCrop(opts.imsize),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]),\n strong_transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.RandomResizedCrop(opts.imsize),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n RandAugmentMC(n=2, m=10),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])), \n batch_size=opts.batchsize*opts.mu, shuffle=True, num_workers=4, pin_memory=True, drop_last=True)\n print('unlabel_loader done') \n\n crtSampler_val = ClassAwareSampler(data_source=DATASET_PATH + '/train/', ids=val_ids)\n if True:\n validation_loader = torch.utils.data.DataLoader(\n SimpleImageLoader(DATASET_PATH, 'val', val_ids,\n transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.CenterCrop(opts.imsize),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),\n batch_size=opts.batchsize*(opts.mu + 1), num_workers=4, pin_memory=True, drop_last=False,\n shuffle=True)\n else:\n validation_loader = torch.utils.data.DataLoader(\n SimpleImageLoader(DATASET_PATH, 'val', val_ids,\n transform=transforms.Compose([\n transforms.Resize(opts.imResize),\n transforms.CenterCrop(opts.imsize),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])),\n batch_size=opts.batchsize, num_workers=4, pin_memory=True, drop_last=False,\n sampler=crtSampler_val)\n print('validation_loader done')\n\n # Set optimizer\n #optimizer = optim.Adam(model.parameters(), lr=opts.lr)\n optimizer = optim.SGD(model.parameters(), lr=opts.lr, momentum=opts.momentum, nesterov=True, weight_decay=0.0001)\n fc_optimizer = optim.SGD(model.parameters(), lr=opts.lr, momentum=opts.momentum, nesterov=True, weight_decay=0.0001)\n\n\n # INSTANTIATE LOSS CLASS\n train_criterion = SemiLoss()\n\n iter_num = len(train_ids) // opts.batchsize \n # INSTANTIATE STEP LEARNING SCHEDULER CLASS\n if opts.scheduler == 0:\n scheduler = get_cosine_schedule_with_warmup(optimizer,0,iter_num * opts.epochs)\n elif opts.scheduler == 1:\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.1)\n else:\n scheduler = None # not used\n\n # Train and Validation \n best_acc = 0.0\n best_top5 = 0.0\n \n for epoch in range(opts.start_epoch, opts.epochs + 1):\n if epoch != 1:\n loss, train_top1, train_top5 = train(opts, train_loader, unlabel_loader, model, train_criterion, optimizer, epoch, use_gpu, scheduler)\n else:\n loss, train_top1, train_top5 = train(opts, train_loader, unlabel_loader, model, train_criterion, fc_optimizer, epoch, use_gpu, scheduler)\n\n print('start training')\n if opts.scheduler == 0:\n scheduler.step()\n elif opts.scheduler == 1:\n scheduler.step()\n else:\n adjust_learning_rate(opts, optimizer, epoch)\n \n\n print('start validation')\n acc_top1, acc_top5, cm = validation(opts, validation_loader, model, epoch, use_gpu)\n is_best = acc_top1 > best_acc\n is_best5 = acc_top5 > best_top5\n \n best_acc = max(acc_top1, best_acc)\n best_top5 = max(acc_top5, best_top5)\n if is_best:\n print('saving top1 best checkpoint...') \n if IS_ON_NSML:\n nsml.save(opts.name + '_best')\n else:\n torch.save(model.state_dict(), os.path.join('runs', opts.name + '_best.pth.tar'))\n '''\n print('saving confusion matrix...') \n plt.figure()\n sns.heatmap(cm,xticklabels = False, yticklabels=False)\n plt.xlabel(\"prediction\")\n plt.ylabel(\"True\")\n plt.savefig(\"./confusion_matrix/\"+opts.name+\"_epoch\"+str(epoch)+\"_\"+str(int(acc_top1))+\".png\")\n '''\n if is_best5:\n print('saving top5 best checkpoint')\n if IS_ON_NSML:\n nsml.save(opts.name + '_best5')\n else:\n torch.save(model.state_dict(), os.path.join('runs', opts.name + '_best5.pth.tar'))\n if (epoch + 1) % opts.save_epoch == 0:\n if IS_ON_NSML:\n nsml.save(opts.name + '_e{}'.format(epoch))\n else:\n torch.save(model.state_dict(), os.path.join('runs', opts.name + '_e{}.pth.tar'.format(epoch)))\n \n if IS_ON_NSML:\n nsml.report(summary={\n 'train__loss__epoch': loss,\n 'train__acc__top1': train_top1,\n 'train__acc__top5': train_top5,\n 'test__acc__top1': acc_top1,\n 'test__acc__top5': acc_top5},\n step=epoch)\n\n \ndef train(opts, train_loader, unlabel_loader, model, criterion, optimizer, epoch, use_gpu, scheduler):\n \n losses = AverageMeter()\n losses_x = AverageMeter()\n losses_un = AverageMeter()\n weight_scale = AverageMeter()\n acc_top1 = AverageMeter()\n acc_top5 = AverageMeter()\n avg_loss = 0.0\n avg_top1 = 0.0\n avg_top5 = 0.0\n \n model.train()\n if epoch == 1:\n for param in model.parameters():\n param.requires_grad = False\n if IS_ON_NSML:\n model._fc.weight.requires_grad = True\n model._fc.weight.requires_grad = True\n else:\n model.module._fc.weight.requires_grad = True\n model.module._fc.bias.requires_grad = True\n else:\n for param in model.parameters():\n param.requires_grad = True \n \n nCnt =0\n steps = (epoch - 1) * opts.val_iteration \n labeled_train_iter = iter(train_loader)\n unlabeled_train_iter = iter(unlabel_loader)\n \n for batch_idx in range(len(train_loader)):\n try:\n data = labeled_train_iter.next()\n inputs_x, targets_x = data\n except:\n labeled_train_iter = iter(train_loader) \n data = labeled_train_iter.next()\n inputs_x, targets_x = data\n try:\n data = unlabeled_train_iter.next()\n inputs_u1, inputs_u2 = data\n except:\n unlabeled_train_iter = iter(unlabel_loader) \n data = unlabeled_train_iter.next()\n inputs_u1, inputs_u2 = data \n \n batch_size = inputs_x.size(0)\n # Transform label to one-hot\n if use_gpu :\n inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda()\n inputs_u1, inputs_u2 = inputs_u1.cuda(), inputs_u2.cuda() \n inputs_x, targets_x = Variable(inputs_x), Variable(targets_x)\n inputs_u1, inputs_u2 = Variable(inputs_u1), Variable(inputs_u2)\n\n l_size = inputs_x.size(0)\n u_size = inputs_u1.size(0)\n\n inputs_total = torch.cat([inputs_x, inputs_u1, inputs_u2])\n pred_total = model(inputs_total)\n\n with torch.no_grad():\n # compute guessed labels of unlabel samples\n pred_u1 = pred_total[l_size:l_size+u_size]\n pseudo_label = torch.softmax(pred_u1.detach(), dim=-1)\n max_probs, targets_u = torch.max(pseudo_label, dim=-1)\n mask = max_probs.ge(opts.threshold).float()\n \n optimizer.zero_grad()\n if opts.smooth == 0:\n x_criterion = nn.CrossEntropyLoss().cuda()\n u_criterion = nn.CrossEntropyLoss(reduction='none').cuda() \n else:\n x_criterion = SmoothCrossEntropyLoss(smoothing=0.1).cuda()\n u_criterion = SmoothCrossEntropyLoss(reduction='none', smoothing=0.1).cuda()\n\n Lx = x_criterion(pred_total[:l_size], targets_x)\n Lu = (u_criterion(pred_total[l_size+u_size:], targets_u) * mask).mean()\n\n loss = Lx + opts.lambda_u * Lu\n \n # compute gradient and do SGD step\n loss.backward()\n optimizer.step()\n if opts.scheduler == 0:\n scheduler.step()\n \n losses.update(loss.item(), inputs_x.size(0))\n losses_x.update(Lx.item(), inputs_x.size(0))\n losses_un.update(Lu.item(), inputs_x.size(0))\n \n with torch.no_grad():\n # compute guessed labels of unlabel samples\n pred_x1 = model(inputs_x)\n\n acc_top1b = top_n_accuracy_score(targets_x.data.cpu().numpy(), pred_x1.data.cpu().numpy(), n=1)*100\n acc_top5b = top_n_accuracy_score(targets_x.data.cpu().numpy(), pred_x1.data.cpu().numpy(), n=5)*100 \n acc_top1.update(torch.as_tensor(acc_top1b), inputs_x.size(0)) \n acc_top5.update(torch.as_tensor(acc_top5b), inputs_x.size(0)) \n \n avg_loss += loss.item()\n avg_top1 += acc_top1b\n avg_top5 += acc_top5b \n \n if batch_idx % opts.log_interval == 0:\n print('Train Epoch:{} [{}/{}] Loss:{:.4f}({:.4f}) Top-1:{:.2f}%({:.2f}%) Top-5:{:.2f}%({:.2f}%) '.format( \n epoch, batch_idx *inputs_x.size(0), len(train_loader.dataset), losses.val, losses.avg, acc_top1.val, acc_top1.avg, acc_top5.val, acc_top5.avg)) \n '''\n if IS_ON_NSML:\n nsml.report(summary={\n 'train__loss': loss.item(),\n 'train__lossx': Lx.item(),\n 'train__lossunlabel': Lu.item(),\n 'train__acctop1': acc_top1b,\n 'train__acctop5': acc_top5b},\n step=steps) \n '''\n steps += 1\n nCnt += 1 \n \n avg_loss = float(avg_loss/nCnt)\n avg_top1 = float(avg_top1/nCnt)\n avg_top5 = float(avg_top5/nCnt)\n \n return avg_loss, avg_top1, avg_top5 \n\n\n\ndef validation(opts, validation_loader, model, epoch, use_gpu):\n model.eval()\n avg_top1= 0.0\n avg_top5 = 0.0\n nCnt =0\n steps = (epoch - 1) * len(validation_loader)\n cm = np.zeros((NUM_CLASSES,NUM_CLASSES))\n\n with torch.no_grad():\n for batch_idx, data in enumerate(validation_loader):\n inputs, labels = data\n if use_gpu :\n inputs = inputs.cuda()\n inputs = Variable(inputs)\n steps+=1\n nCnt +=1\n preds = model(inputs)\n\n acc_top1 = top_n_accuracy_score(labels.numpy(), preds.data.cpu().numpy(), n=1)*100\n acc_top5 = top_n_accuracy_score(labels.numpy(), preds.data.cpu().numpy(), n=5)*100\n avg_top1 += acc_top1\n avg_top5 += acc_top5\n '''\n if IS_ON_NSML:\n nsml.report(summary={\n 'test_step__acc_val_top1': acc_top1,\n 'test_step__acc_val_top5': acc_top5},\n step=steps) \n '''\n _, outputs = torch.max(preds.data.cpu(), 1)\n #print(labels.numpy())\n #print(outputs.numpy())\n for t, p in zip(labels.numpy(), outputs.numpy()):\n cm[t,p] +=1\n \n \n avg_top1 = float(avg_top1/nCnt) \n avg_top5= float(avg_top5/nCnt)\n cm = cm.astype(float)/cm.sum(axis=1)[:,np.newaxis]\n\n print('Test Epoch:{} Top1_acc_val:{:.2f}% Top5_acc_val:{:.2f}% '.format(epoch, avg_top1, avg_top5))\n return avg_top1, avg_top5, cm\n\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"JongSuk1/SSL-Fixmatch","sub_path":"train_fixmatch.py","file_name":"train_fixmatch.py","file_ext":"py","file_size_in_byte":27963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"10033150144","text":"import itertools\nimport logging\nimport json\n\nfrom yosai.core import (\n EVENT_TOPIC,\n SerializationManager,\n UnauthorizedException,\n authz_abcs,\n realm_abcs,\n serialize_abcs,\n)\n\nimport collections\n\nlogger = logging.getLogger(__name__)\n\n\nclass Permission:\n \"\"\"\n In this example, the first token is the *domain* that is being operated on\n and the second token is the *action* that is performed. Each level can contain\n multiple values. Given support for multiple values, you could simply grant\n a user the permission 'blogpost:view,edit,create', granting the user\n access to perform ``view``, ``edit``, and ``create`` actions in the ``blogpost``\n *domain*. Then you could check whether the user has the ``'blogpost:create'``\n permission by calling:::\n\n subject.is_permitted(['blogpost:create'])\n\n (which would return true)\n\n In addition to granting multiple permissions using a single string, you can\n grant all permission for a particular level:\n\n * If you want to grant a user permission to perform all actions in the\n ``blogpost`` domain, you could simply grant the user ``'blogpost:*'``.\n With this permission granted, any permission check for ``'blogpost:XXX'```\n will return ``True``.\n\n * It is also possible to use the wildcard token at the domain\n level (or both levels), granting a user the ``'view'`` action across all\n domains: ``'*:view'``.\n\n\n Instance-level Access Control\n -----------------------------\n Another usage of ``Permission`` is to model instance-level\n Access Control Lists (ACLs). In this scenario, you use three tokens:\n * the first token is the *domain*\n * the second token is the *action*\n * the third token is the *instance* that is acted upon (target)\n\n For example, suppose you grant a user ``'blogpost:edit:12,13,18'``.\n In this example, assume that the third token contains system identifiers of\n blogposts. That would allow the user to edit blogpost with id ``12``, ``13``, and ``18``.\n Representing permissions in this manner is an extremely powerful way to\n express permissions as you can state permissions like:\n *``'blogpost:*:13'``, granting a user permission to perform all actions for blogpost ``13``,\n *``'blogpost:view,create,edit:*'``, granting a user permission to ``view``, ``create``, or ``edit`` *any* blogpost\n *``'blogpost:*:*'``, granting a user permission to perform *any* action on *any* blogpost\n\n To perform checks against these instance-level permissions, the application\n should include the instance ID in the permission check like so:::\n\n subject.is_permitted(['blogpost:edit:13'])\n \"\"\"\n\n WILDCARD_TOKEN = '*'\n PART_DIVIDER_TOKEN = ':'\n SUBPART_DIVIDER_TOKEN = ','\n\n def __init__(self, wildcard_perm=None, parts=None):\n if wildcard_perm:\n parts = iter(self.partify(wildcard_perm))\n try:\n self.domain = next(parts)\n self.actions = next(parts)\n self.targets = next(parts)\n except StopIteration:\n raise ValueError(\"Permission cannot identify required parts from string\")\n else:\n self.domain = set([parts.get('domain', self.WILDCARD_TOKEN)])\n self.actions = set(parts.get('actions', self.WILDCARD_TOKEN))\n self.targets = set(parts.get('targets', self.WILDCARD_TOKEN))\n\n def partify(self, wildcard_perm):\n return [set(a.strip() for a in y.split(self.SUBPART_DIVIDER_TOKEN))\n for y in [x[0] if x[0] else x[1]\n for x in itertools.zip_longest(\n wildcard_perm.split(self.PART_DIVIDER_TOKEN),\n [self.WILDCARD_TOKEN] * 3)\n ]\n ]\n\n def implies(self, permission):\n if self.domain != {self.WILDCARD_TOKEN}:\n if self.domain != permission.domain:\n return False\n\n if self.actions != {self.WILDCARD_TOKEN}:\n if not self.actions >= permission.actions:\n return False\n\n if self.targets != {self.WILDCARD_TOKEN}:\n if not self.actions >= permission.actions:\n return False\n\n return True\n\n @staticmethod\n def get_domain(wildcard_perm):\n domain = wildcard_perm.split(Permission.PART_DIVIDER_TOKEN)[0].strip()\n if not domain:\n return Permission.WILDCARD_TOKEN\n return domain\n\n\nclass DefaultPermissionVerifier:\n\n def is_permitted_from_str(self, required, assigned):\n required_perm = Permission(wildcard_perm=required)\n for perm_str in assigned:\n assigned_perm = Permission(wildcard_perm=perm_str)\n if assigned_perm.implies(required_perm):\n return True\n return False\n\n def is_permitted_from_json(self, required, assigned):\n required = Permission(wildcard_perm=required)\n the_parts = json.loads(assigned.decode('utf-8'))\n for parts in the_parts:\n assigned_perm = Permission(parts=parts)\n if assigned_perm.implies(required):\n return True\n return False\n\n\nclass ModularRealmAuthorizer(authz_abcs.Authorizer):\n\n \"\"\"\n A ModularRealmAuthorizer is an Authorizer implementation that consults\n one or more configured Realms during an authorization operation.\n\n :type realms: Tuple\n \"\"\"\n def __init__(self):\n self.realms = None\n self.event_bus = None\n\n def init_realms(self, realms):\n \"\"\"\n :type realms: tuple\n \"\"\"\n # this eliminates the need for an authorizing_realms attribute:\n self.realms = tuple(realm for realm in realms\n if isinstance(realm, realm_abcs.AuthorizingRealm))\n self.register_cache_clear_listener()\n\n def assert_realms_configured(self):\n if (not self.realms):\n msg = (\"Configuration error: No realms have been configured! \"\n \"One or more realms must be present to execute an \"\n \"authorization operation.\")\n raise ValueError(msg)\n\n # Yosai refactors isPermitted and hasRole extensively, making use of\n # generators and sub-generators so as to optimize processing w/ each realm\n # and improve code readability\n\n # new to Yosai:\n def _has_role(self, identifiers, role_s):\n \"\"\"\n :type identifiers: subject_abcs.IdentifierCollection\n :type role_s: Set of String(s)\n \"\"\"\n for realm in self.realms:\n # the realm's has_role returns a generator\n yield from realm.has_role(identifiers, role_s)\n\n # new to Yosai:\n def _is_permitted(self, identifiers, permission_s):\n \"\"\"\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param permission_s: a collection of 1..N permissions\n :type permission_s: List of permission string(s)\n \"\"\"\n\n for realm in self.realms:\n # the realm's is_permitted returns a generator\n yield from realm.is_permitted(identifiers, permission_s)\n\n def is_permitted(self, identifiers, permission_s, log_results=True):\n \"\"\"\n Yosai differs from Shiro in how it handles String-typed Permission\n parameters. Rather than supporting *args of String-typed Permissions,\n Yosai supports a list of Strings. Yosai remains true to Shiro's API\n while determining permissions a bit more pythonically. This may\n be refactored later.\n\n :param identifiers: a collection of identifiers\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param permission_s: a collection of 1..N permissions\n :type permission_s: List of permission string(s)\n\n :param log_results: states whether to log results (True) or allow the\n calling method to do so instead (False)\n :type log_results: bool\n\n :returns: a set of tuple(s), containing the Permission and a Boolean\n indicating whether the permission is granted\n \"\"\"\n self.assert_realms_configured()\n\n results = collections.defaultdict(bool) # defaults to False\n\n is_permitted_results = self._is_permitted(identifiers, permission_s)\n\n for permission, is_permitted in is_permitted_results:\n # permit expected format is: (Permission, Boolean)\n # As long as one realm returns True for a Permission, that Permission\n # is granted. Given that (True or False == True), assign accordingly:\n results[permission] = results[permission] or is_permitted\n\n if log_results:\n self.notify_event(identifiers,\n list(results.items()),\n 'AUTHORIZATION.RESULTS')\n\n results = set(results.items())\n return results\n\n # yosai.core.refactored is_permitted_all to support ANY or ALL operations\n def is_permitted_collective(self, identifiers,\n permission_s, logical_operator):\n \"\"\"\n :param identifiers: a collection of Identifier objects\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param permission_s: a collection of 1..N permissions\n :type permission_s: List of Permission object(s) or String(s)\n\n :param logical_operator: indicates whether all or at least one\n permission check is true (any)\n :type: any OR all (from python standard library)\n\n :returns: a Boolean\n \"\"\"\n self.assert_realms_configured()\n\n # interim_results is a set of tuples:\n interim_results = self.is_permitted(identifiers, permission_s,\n log_results=False)\n\n results = logical_operator(is_permitted for perm, is_permitted\n in interim_results)\n\n if results:\n self.notify_event(identifiers,\n permission_s,\n 'AUTHORIZATION.GRANTED',\n logical_operator)\n else:\n self.notify_event(identifiers,\n permission_s,\n 'AUTHORIZATION.DENIED',\n logical_operator)\n\n return results\n\n # yosai.core.consolidates check_permission functionality to one method:\n def check_permission(self, identifiers, permission_s, logical_operator):\n \"\"\"\n like Yosai's authentication process, the authorization process will\n raise an Exception to halt further authz checking once Yosai determines\n that a Subject is unauthorized to receive the requested permission\n\n :param identifiers: a collection of identifiers\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param permission_s: a collection of 1..N permissions\n :type permission_s: List of Permission objects or Strings\n\n :param logical_operator: indicates whether all or at least one\n permission check is true (any)\n :type: any OR all (from python standard library)\n\n :raises UnauthorizedException: if any permission is unauthorized\n \"\"\"\n self.assert_realms_configured()\n permitted = self.is_permitted_collective(identifiers,\n permission_s,\n logical_operator)\n if not permitted:\n msg = \"Subject lacks permission(s) to satisfy logical operation\"\n raise UnauthorizedException(msg)\n\n # yosai.core.consolidates has_role functionality to one method:\n def has_role(self, identifiers, role_s, log_results=True):\n \"\"\"\n :param identifiers: a collection of identifiers\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param role_s: a collection of 1..N Role identifiers\n :type role_s: Set of String(s)\n\n :param log_results: states whether to log results (True) or allow the\n calling method to do so instead (False)\n :type log_results: bool\n\n :returns: a set of tuple(s), containing the role and a Boolean\n indicating whether the user is a member of the Role\n \"\"\"\n self.assert_realms_configured()\n\n results = collections.defaultdict(bool) # defaults to False\n\n for role, has_role in self._has_role(identifiers, role_s):\n # checkrole expected format is: (role, Boolean)\n # As long as one realm returns True for a role, a subject is\n # considered a member of that Role.\n # Given that (True or False == True), assign accordingly:\n results[role] = results[role] or has_role\n\n if log_results:\n self.notify_event(identifiers,\n list(results.items()),\n 'AUTHORIZATION.RESULTS') # before freezing\n results = set(results.items())\n return results\n\n def has_role_collective(self, identifiers, role_s, logical_operator):\n \"\"\"\n :param identifiers: a collection of identifiers\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param role_s: a collection of 1..N Role identifiers\n :type role_s: Set of String(s)\n\n :param logical_operator: indicates whether all or at least one\n permission check is true (any)\n :type: any OR all (from python standard library)\n\n :returns: a Boolean\n \"\"\"\n self.assert_realms_configured()\n\n # interim_results is a set of tuples:\n interim_results = self.has_role(identifiers, role_s, log_results=False)\n\n results = logical_operator(has_role for role, has_role\n in interim_results)\n\n if results:\n self.notify_event(identifiers,\n list(role_s),\n 'AUTHORIZATION.GRANTED',\n logical_operator)\n else:\n self.notify_event(identifiers,\n list(role_s),\n 'AUTHORIZATION.DENIED',\n logical_operator)\n\n return results\n\n def check_role(self, identifiers, role_s, logical_operator):\n \"\"\"\n :param identifiers: a collection of identifiers\n :type identifiers: subject_abcs.IdentifierCollection\n\n :param role_s: 1..N role identifiers\n :type role_s: a String or Set of Strings\n\n :param logical_operator: indicates whether all or at least one\n permission check is true (any)\n :type: any OR all (from python standard library)\n\n :raises UnauthorizedException: if Subject not assigned to all roles\n \"\"\"\n self.assert_realms_configured()\n has_role_s = self.has_role_collective(identifiers,\n role_s, logical_operator)\n if not has_role_s:\n msg = \"Subject does not have role(s) assigned.\"\n raise UnauthorizedException(msg)\n\n # --------------------------------------------------------------------------\n # Event Communication\n # --------------------------------------------------------------------------\n\n def session_clears_cache(self, items=None, topic=EVENT_TOPIC):\n try:\n identifier = items.identifiers.primary_identifier\n for realm in self.realms:\n realm.clear_cached_authorization_info(identifier)\n except AttributeError:\n msg = ('Could not clear authz_info from cache after event. '\n 'items: ' + str(items))\n logger.warn(msg)\n\n def authc_clears_cache(self, identifier=None, topic=EVENT_TOPIC):\n try:\n for realm in self.realms:\n realm.clear_cached_authorization_info(identifier)\n except AttributeError:\n msg = ('Could not clear authc_info from cache after event. '\n 'identifiers: ' + identifiers)\n logger.warn(msg)\n\n def register_cache_clear_listener(self):\n\n try:\n self.event_bus.subscribe(self.session_clears_cache, 'SESSION.STOP')\n self.event_bus.isSubscribed(self.session_clears_cache, 'SESSION.STOP')\n self.event_bus.subscribe(self.session_clears_cache, 'SESSION.EXPIRE')\n self.event_bus.isSubscribed(self.session_clears_cache, 'SESSION.EXPIRE')\n self.event_bus.subscribe(self.authc_clears_cache, 'AUTHENTICATION.SUCCEEDED')\n self.event_bus.isSubscribed(self.authc_clears_cache, 'AUTHENTICATION.SUCCEEDED')\n\n except AttributeError:\n msg = \"Authorizer failed to register listeners to event bus\"\n logger.debug(msg)\n\n def notify_event(self, identifiers, items, topic, logical_operator=None):\n try:\n self.event_bus.sendMessage(topic,\n identifiers=identifiers,\n items=items,\n logical_operator=logical_operator)\n\n except AttributeError:\n msg = \"Could not publish {} event\".format(topic)\n raise AttributeError(msg)\n\n # --------------------------------------------------------------------------\n\n def __repr__(self):\n return (\"ModularRealmAuthorizer(realms={0})\".\n format(self.realms))\n","repo_name":"YosaiProject/yosai","sub_path":"yosai/core/authz/authz.py","file_name":"authz.py","file_ext":"py","file_size_in_byte":17677,"program_lang":"python","lang":"en","doc_type":"code","stars":585,"dataset":"github-code","pt":"61"} +{"seq_id":"32892775517","text":"import psycopg2 as db\nimport numpy as np\n\npages_names = {}\nlink_count = {}\n\n\ndef connect_to_db():\n return db.connect(host=\"localhost\", database=\"crawldb\", user=\"postgres\", password=\"admin\")\n\n\ndef get_pages_for_site(site_id):\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT p.id, p.site_id, p.url FROM crawldb.page p WHERE p.site_id = \" + str(site_id))\n row = cur.fetchone()\n\n pages = []\n while row is not None:\n pages.append(row)\n row = cur.fetchone()\n cur.close()\n return pages\n\n\ndef get_pages_count_for_site(site_id):\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM crawldb.page p WHERE p.site_id = \" + str(site_id))\n row = cur.fetchone()\n\n return row[0]\n\n\ndef get_linked_pages(pages):\n\n str_pages = \"\"\n for page in pages:\n str_pages += str(page) + \", \"\n str_pages = str_pages[:-2]\n\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT l.to_page FROM crawldb.link l WHERE l.from_page IN (\" + str_pages + \")\")\n row = cur.fetchone()\n\n linked_pages = []\n while row is not None:\n linked_pages.append(row)\n row = cur.fetchone()\n cur.close()\n\n cur = conn.cursor()\n cur.execute(\"SELECT l.from_page FROM crawldb.link l WHERE l.to_page IN (\" + str_pages + \")\")\n row = cur.fetchone()\n\n while row is not None:\n linked_pages.append(row)\n row = cur.fetchone()\n cur.close()\n\n return linked_pages\n\n\ndef get_linked_pages_for_page(page_id):\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT l.to_page FROM crawldb.link l WHERE l.from_page = \" + str(page_id))\n row = cur.fetchone()\n\n linked_pages = []\n while row is not None:\n linked_pages.append(row[0])\n row = cur.fetchone()\n cur.close()\n return linked_pages\n\n\ndef get_sites_for_pages(pages):\n print(\"getting sites for pages\")\n str_pages = \"\"\n for page in pages:\n str_pages += str(page) + \", \"\n str_pages = str_pages[:-2]\n\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT p.site_id FROM crawldb.page p WHERE p.id IN (\" + str_pages + \") GROUP BY p.site_id\")\n row = cur.fetchone()\n\n linked_sites = []\n while row is not None:\n linked_sites.append(row[0])\n row = cur.fetchone()\n cur.close()\n\n return linked_sites\n\n\ndef get_sites():\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT s.id, s.domain FROM crawldb.site s WHERE s.id IN \"\n \"(SELECT p.site_id FROM crawldb.page p GROUP BY p.site_id)\")\n\n print(\"number of sites with at least one visited page: \", cur.rowcount)\n row = cur.fetchone()\n sites = []\n while row is not None:\n sites.append(row)\n row = cur.fetchone()\n\n cur.close()\n return sites\n\n\ndef get_site_by_id(id):\n conn = connect_to_db()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM crawldb.site s WHERE s.id = \" + str(id))\n\n return cur.fetchone()\n\n\ndef get_connections_between_sites():\n sites = get_sites()\n site_map = {}\n for site in sites:\n pages_for_site = np.array(get_pages_for_site(site[0]))\n linked_pages = np.array(get_linked_pages(pages_for_site[:,0]))\n linked_sites = get_sites_for_pages(linked_pages[:,0])\n site_map[site[0]] = linked_sites\n return site_map\n\n","repo_name":"tvenko/crawler","sub_path":"crawler visualization/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29822197071","text":"import urllib.request as urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport csv\r\nimport gspread\r\nimport json\r\nfrom oauth2client.client import SignedJwtAssertionCredentials\r\nfrom datetime import datetime\r\nfrom forex_python.converter import CurrencyRates\r\n\r\nconv = CurrencyRates()\r\nusdinr = conv.get_rate('USD', 'INR')\r\nprint (usdinr)\r\n\r\ndt = datetime.now()\r\nhours = dt.hour\r\nminute = dt.minute\r\nday = dt.day\r\nmonth = dt.month\r\n\r\nwith open('index.csv', 'a') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([usdinr , hours,minute])\r\n \r\njson_key = json.load(open('cred.json')) # json credentials you downloaded earlier\r\nscope = ['https://spreadsheets.google.com/feeds',\r\n 'https://www.googleapis.com/auth/drive']\r\ncredentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope) # get email and key from creds\r\nfile = gspread.authorize(credentials) # authenticate with Google\r\nsheet = file.open(\"indexpy\").sheet1 # open sheet\r\nrow = [usdinr,day,month,hours,minute]\r\nindex = 2\r\nsheet.insert_row(row, index)\r\n\r\n","repo_name":"keshavk2910/pythonfirst","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2491920692","text":"import itertools\n \nm=int(input())\ntotal_ways = set()\nabsent_ways = set()\nfor i in range(1, m+1):\n in_str = 'P'*i + 'A'*(m-i)\n in_lst = list(in_str)\n permutations = list(itertools.permutations(in_lst))\n result = set([''.join(permutation) for permutation in permutations])\n for k in result:\n if 'AAAA' not in k:\n total_ways.add(k)\n if k[-1] == 'A':\n absent_ways.add(k)\nprint(len(total_ways))\nprint(len(absent_ways),'/',len(total_ways), sep='')\n","repo_name":"kunal96/Problem","sub_path":"soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18150760517","text":"from calendar import c\nimport sys\nsys.stdin = open('input.txt', \"r\")\n'''\n왼쪽위 1,1\n맨위랑 맨 아래랑 연결되어있음\n오른쪽 왼쪽도 연결되어있음\n\n비바라기를 시전하면 \n왼쪽아래 2X2 비구름이 생성됨\n\ni번쨰 이동 명령은 방향 d와 거리 s로 이루어져있음\n\n'''\n\n# 모든 구름이 di방향으로 si칸 이동한다.\n# 각 구름에서 비가 내려 구름이 있는 칸의 바구니에 저장된 물이 1 증가한다\n# 구름이 모두 사라진다\n# 물이 증가한 칸에 물복사버그 마법을 시전함\n'''\n대각선 방향으로 거리가 1인 칸에 물이 있는 바구니의 수 만큼 바구니의 물의 양이 증가한다\n'''\n# 바구니에 저장된 물의 양이 2 이상인 모든 칸에 구름이 생기고, 물의 양이 2 줄어든다. 이때 구름이 생기는 곳은 전에 구름이 사라진 칸이 아니여야함.\n\ndirect = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]\nN, M = map(int, sys.stdin.readline().rstrip().split(' '))\nboard = []\nfor _ in range(N):\n board.append(list(map(int, sys.stdin.readline().rstrip().split(' '))))\n\n# 비구름 배열을 만들어놓고 각자 조정한다\n# 처음 구름은 왼쪽 아래에 4칸 존재하기때문에 초기화\ncloud = [[N-1, 0], [N-2, 0], [N-1, 1], [N-2, 1]]\n\n\ndef printBoard():\n for i in board:\n print(i)\n print()\n\n\nfor _ in range(M):\n\n d, s = map(int, sys.stdin.readline().rstrip().split(' '))\n d -= 1\n checkCloud = set()\n\n for idx in range(len(cloud)):\n # 이동\n cloud[idx][0] += direct[d][0] * s\n cloud[idx][1] += direct[d][1] * s\n # 넘어가는거 이어줘야함\n cloud[idx][0] %= N\n cloud[idx][1] %= N\n # 도착한곳에 물 증가 +1\n board[cloud[idx][0]][cloud[idx][1]] += 1\n checkCloud.add((cloud[idx][0], cloud[idx][1]))\n\n addWater = []\n for y, x in cloud:\n cnt = 0\n # 물복사 마법 시전\n for addy, addx in [[-1, -1], [-1, 1], [1, -1], [1, 1]]:\n ty = y+addy\n tx = x+addx\n\n if 0 <= ty < N and 0 <= tx < N and board[ty][tx] > 0:\n cnt += 1\n addWater.append([y, x, cnt])\n cloud = []\n for y, x, cnt in addWater:\n board[y][x] += cnt\n\n for y in range(N):\n for x in range(N):\n if board[y][x] >= 2 and (y, x) not in checkCloud:\n board[y][x] -= 2\n cloud.append([y, x])\n\nanswer = 0\nfor i in board:\n answer += sum(i)\nprint(answer)\n","repo_name":"aver1001/Problem-Solving","sub_path":"풀이 완료/21610/acmicpc.py","file_name":"acmicpc.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18833186487","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\n\nfrom pyprobml_utils import save_fig\n\nx = np.linspace(-3, 3, 100)\ny = norm.pdf(x)\nf = norm.cdf(x)\n\nplt.figure()\nplt.plot(x, f)\nplt.title('CDF')\nsave_fig('gaussianCDF.pdf')\nplt.show()\n\nplt.figure()\nplt.plot(x, y)\nsave_fig('gaussianPDF.pdf')\nplt.show()\n\nx_sep_left = norm.ppf(0.025)\nx_sep_right = norm.ppf(0.975)\nx_fill_left = np.linspace(-3, x_sep_left, 100)\nx_fill_right = np.linspace(x_sep_right, 3, 100)\nplt.fill_between(x_fill_left,\n norm.pdf(x_fill_left),\n color='b')\nplt.fill_between(x_fill_right,\n norm.pdf(x_fill_right),\n color='b')\nplt.annotate(r'$\\alpha/2$', xy=(x_sep_left, norm.pdf(x_sep_left)),\n xytext=(-2.5, 0.1),\n arrowprops=dict(facecolor='k'))\nplt.annotate(r'$1-\\alpha/2$', xy=(x_sep_right, norm.pdf(x_sep_right)),\n xytext=(2.5, 0.1),\n arrowprops=dict(facecolor='k'))\nplt.ylim([0, 0.5])\nsave_fig('gaussianQuantile.pdf')\nplt.show()\n","repo_name":"dwsmith1983/probabilistic_ml_solutions","sub_path":"chapter_3/code/quantile_plot.py","file_name":"quantile_plot.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17948617773","text":"\"\"\"A setuptools based setup module.\"\"\"\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nhere = path.abspath(path.dirname(__file__))\n\ninstall_requires = [\n \"flask\",\n \"flask-login >= 0.5\",\n \"flask-migrate\",\n \"flask-wtf\",\n \"pandas\",\n \"dash\",\n \"humanize\",\n]\n\n\ntests_require = [\"pytest\", \"coverage\"]\n\nextras_require = {\"dev\": [\"black\", \"flake8\", \"pre-commit\", \"selenium\"], \"test\": tests_require}\n\nsetup(\n name=\"Thalia\",\n version=\"0.3.0\",\n packages=find_packages(),\n install_requires=install_requires,\n extras_require=extras_require,\n)\n","repo_name":"maradude/Thalia-clean","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41194024063","text":"#!/bin/bash\n\n\"\"\":\"\npython_cmd=\"python3\"\npython -V >/dev/null 2>&1 && python_cmd=\"python\"\nexec ${python_cmd} $0 ${1+\"$@\"}\n\"\"\"\n\nfrom __future__ import print_function\n\nimport datetime\nfrom _py2with3compatibility import HTTPError\nfrom os.path import dirname, abspath\nfrom socket import setdefaulttimeout\nimport sys\n\nfrom cms_static import (\n GH_CMSSW_ORGANIZATION,\n GH_CMSSW_REPO,\n)\nfrom github_utils import (\n get_git_tag,\n create_git_tag,\n get_commits,\n find_tags,\n)\n\nfrom categories import CMSSW_L1\n\nsetdefaulttimeout(120)\nSCRIPT_DIR = dirname(abspath(sys.argv[0]))\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n\n parser = OptionParser(\n usage=\"%prog [-n|--dry-run] [-N|--release-name] [-d|--day] [-H|--hour] [-b|--branch]\"\n )\n parser.add_option(\n \"-n\",\n \"--dry-run\",\n dest=\"dryRun\",\n action=\"store_true\",\n help=\"Do not modify Github\",\n default=False,\n )\n parser.add_option(\n \"-N\",\n \"--release-name\",\n dest=\"release_name\",\n action=\"store\",\n help=\"CMSSW Release name\",\n )\n parser.add_option(\n \"-d\", \"--date\", dest=\"date\", action=\"store\", help=\"CMSSW IB date (YYYY-MM-DD)\"\n )\n parser.add_option(\"-H\", \"--hour\", dest=\"hour\", action=\"store\", help=\"CMSSW IB hour (HH)\")\n parser.add_option(\n \"-M\", \"--minute\", dest=\"minute\", action=\"store\", help=\"CMSSW IB minute (MM)\", default=\"00\"\n )\n parser.add_option(\"-b\", \"--branch\", dest=\"branch\", action=\"store\", help=\"CMSSW branch\")\n parser.add_option(\"-q\", \"--queue\", dest=\"queue\", action=\"store\", help=\"CMSSW IB queue\")\n opts, args = parser.parse_args()\n\n RELEASE_NAME = opts.release_name # \"CMSSW_13_0_X_2023-02-02-1100\"\n ib_date = datetime.datetime.strptime(\n \"%s %s:%s\" % (opts.date, opts.hour, opts.minute), \"%Y-%m-%d %H:%M\"\n )\n\n RELEASE_BRANCH = opts.branch # \"master\"\n QUEUE = opts.queue # \"CMSSW_13_0_X\"\n\n repo = \"%s/%s\" % (GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO)\n\n try:\n ref = get_git_tag(repo, RELEASE_NAME)\n HEAD_SHA = ref[\"object\"][\"sha\"]\n except HTTPError:\n commits_ = get_commits(repo, RELEASE_BRANCH, until=ib_date, per_page=100)\n if not commits_:\n sys.exit(1)\n\n head = None\n for commit_ in commits_:\n if commit_[\"commit\"][\"committer\"][\"name\"] == \"GitHub\" and commit_[\"commit\"][\"author\"][\n \"name\"\n ] in (CMSSW_L1 + [\"cmsbuild\"]):\n head = commit_\n break\n\n if head is None:\n sys.exit(1)\n\n HEAD_SHA = head[\"sha\"]\n if not opts.dryRun:\n create_git_tag(\n repo,\n RELEASE_NAME,\n HEAD_SHA,\n )\n\n tags = find_tags(repo, QUEUE + \"_20\")\n RELEASE_LIST = [\n t[\"ref\"].replace(\"refs/tags/\", \"\") for t in tags if t[\"object\"][\"sha\"] == HEAD_SHA\n ]\n print(\" \".join(RELEASE_LIST[::-1]))\n","repo_name":"cms-sw/cms-bot","sub_path":"ib-create-tag.py","file_name":"ib-create-tag.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"24767590105","text":"import pyfiglet \nimport sys \nimport time\n\nfont_list = pyfiglet.Figlet().getFonts()\n\n\ndef show_fonts():\n \n for f in font_list:\n print(\"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-\\n\",\"Font Name: \",f)\n pyfiglet.print_figlet(f,f)\n return font_list \n\n# def font_validity_check():\n # if in font_list:\n\n\ndef create_art():\n user_text = input(\"Please enter the text you want to work with:\")\n Font= \"\"\n \n while Font == \"\":\n Font = input(\"\"\"\n ──────────────────────────────────────────────────────────────\n if you want to see the available fonts type 'show fonts'\n ──────────────────────────────────────────────────────────────\n please enter the font name:\"\"\")\n \n if Font.upper() == \"SHOW FONTS\":\n show_fonts()\n Font = \"\"\n continue\n elif Font not in font_list:\n print(\" invalid font !!! try again \")\n Font = \"\"\n continue\n\n while True:\n try:\n width = int(input(\" Please enter your font width size:\"))\n if width < 10:\n print(\" >>>Please a bigger value than this<<<\")\n continue\n break\n except ValueError:\n print(\" Please enter a valid integer!!\")\n \n \n \n Thy_Art = pyfiglet.figlet_format(user_text,Font,justify= \"centre\" ,width=width)\n \n return Thy_Art\n\n\n\ndef exit_programm():\n pyfiglet.print_figlet(\"sayonara\", font=\"speed\")\n time.sleep(2)\n sys.exit()\n \n\ndef ArtGen():\n\n menu = \"\"\"\n┌──────────────────────────────────────────────────────────────┐\n│ ARTGEN MENU │\n├──────────────────────────────────────────────────────────────┤\n│ │\n│ 1.SHOW FONTS │\n│ 2.CREATE ART │\n│ 3.EXIT │\n│ │\n└──────────────────────────────────────────────────────────────┘\n\"\"\"\n print(menu)\n while True:\n user_input = input(\"Enter a command or type 'exit' to quit the program: \")\n if user_input.upper() == \"SHOW FONTS\":\n show_fonts()\n elif user_input.upper() == \"CREATE ART\":\n print(create_art())\n elif user_input.upper() == \"EXIT\":\n exit_programm()\n elif user_input.upper() == \"MENU\":\n print(menu)\n else:\n print('invalid input!!!')\n\n\n\nArtGen()","repo_name":"pokewizardSAM/phyton-programming","sub_path":"summer project/ascii_art@sameer_sahu-v1.0.py","file_name":"ascii_art@sameer_sahu-v1.0.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27649621264","text":"#!/usr/bin/python\n\nfrom collections import namedtuple\nfrom socket import *\nimport re\nimport logging\nimport time\nimport os\nimport sys\n\nSOCKET = 7002\n\nRE_FILE = \"^.*GET \\/(.*) HTTP\"\nRE_HTTP_VER = \"^.*HTTP\\/(.{3})\"\n\nlogging.basicConfig(filename = \"log.log\", level = logging.DEBUG)\n\n# Reply Header\n\nclass HTTPHeader:\n def reset(self):\n self.ver = \"1.1\"\n self.response = 200\n self.responseDescription = \"OK\"\n self.contentType = \"text/html\"\n self.numOfBytes = 0;\n self.dataToSend = \"\"\n \n def __init__(self):\n self.reset()\n \nheader = HTTPHeader()\n\n# Utilities\n\ndef matchRegex(s, exp):\n match = re.search(exp, s)\n if (match is not None):\n return match.group(1)\n else:\n return \"\"\n\ndef log(data, addr):\n date = time.strftime(\"%d/%B/%Y:%H:%M:%S\") + \" -0400\"\n\n get = re.search(\"GET.*\", data)\n if get is not None:\n get = get.group()[:-1]\n else:\n get == \"\"\n\n referrer = matchRegex(data, \"Referrer: (.*)\")\n if referrer != \"\":\n referrer = referrer[:-1]\n else:\n referrer = \"\"\n\n agent = matchRegex(data, \"User-Agent: (.*)\")[:-1]\n if agent != \"\":\n agent = agent[:-1]\n else:\n agent = \"\"\n\n try:\n msg = ('%s - [%s] %s %d %d %s %s' %\n (addr[0],\n date,\n \"\" if get == \"\" else '\"' + get + '\"',\n header.response,\n header.numOfBytes,\n \"\" if referrer == \"\" else '\"' + referrer + '\"',\n \"\" if agent == \"\" else '\"' + agent + '\"'));\n except:\n pass\n \n logging.info(msg)\n\ndef sendData(client):\n try:\n client.send(\"HTTP/%s %d %s\\nContent-Type: %s\\n\\n\" %\n (header.ver,\n header.response,\n header.responseDescription,\n header.contentType))\n\n if header.dataToSend != \"\":\n client.send(header.dataToSend)\n\n client.send(\"%s: %s\" % (header.response, header.responseDescription))\n \n header.reset()\n except:\n pass\n\n# HTTP Version\n\ndef handleHTTPVersion(data, client):\n header.ver = matchRegex(data, RE_HTTP_VER)\n\n# File\n\ndef handleFile(data, client):\n path = matchRegex(data, RE_FILE)\n if (not path == \"\"):\n try:\n sendReply = False\n if path.endswith(\".png\"):\n mimetype = \"image/png\"\n sendReply = True\n elif path.endswith(\".jpg\"):\n mimetype = \"image/jpg\"\n sendReply = True\n elif path.endswith(\".html\"):\n mimetype = \"text/html\"\n sendReply = True\n \n if sendReply:\n f = open(path)\n header.dataToSend = f.read()\n f.close()\n\n header.contentType = mimetype;\n header.response = 200\n header.responseDescription = \"OK\"\n header.numOfBytes = os.path.getsize(path)\n else:\n header.response = 415\n header.responseDescription = \"Unsupported Media Type\"\n except:\n header.response = 404\n header.responseDescription = \"File Not Found\"\n else:\n header.contentType = \"text/html\"\n header.response = 200\n header.responseDescription = \"OK\"\n\ndef handleGET(client, addr):\n data = client.recv(4096)\n header = HTTPHeader()\n handleFile(data, client)\n handleHTTPVersion(data, client)\n log(data, addr)\n\ndef doAll(client, addr):\n handleGET(client, addr)\n sendData(client)\n\n# Main\n\ndef main():\n s = socket()\n s.bind(('localhost', SOCKET))\n s.listen(5)\n while(True):\n c, addr = s.accept()\n pid = os.fork()\n if (pid == 0):\n doAll(c, addr)\n sendReply = True;\n c.close()\n sys.exit(0)\n c.close()\n\nmain()\n","repo_name":"cohenadair/assignments","sub_path":"CS442/HW5-WebServer/web-server.py","file_name":"web-server.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5521791495","text":"#Below is implementation of Heap sort, both recursively and iteratively\r\n\r\n#recursive\r\ndef heapify(A, i, heapSize):\r\n largest = i\r\n l = 2 * i + 1\r\n r = 2 * i + 2\r\n\r\n if l < heapSize and A[l] > A[i]:\r\n largest = l\r\n\r\n if r < heapSize and A[r] > A[largest]:\r\n largest = r\r\n\r\n if largest != i:\r\n temp = A[i]\r\n A[i] = A[largest]\r\n A[largest] = temp\r\n\r\n heapify(A, largest, heapSize)\r\n\r\n\r\ndef buildHeap(A):\r\n heapSize = len(A)\r\n for i in range(heapSize // 2 -1, -1, -1):\r\n heapify(A, i, heapSize)\r\n\r\n\r\ndef heapSort(A):\r\n heapSize = len(A)\r\n buildHeap(A)\r\n\r\n\r\n for i in range(heapSize-1, 0, -1):\r\n temp = A[i]\r\n A[i] = A[0]\r\n A[0] = temp\r\n heapify(A, 0, i)\r\n\r\n\r\n#iterative\r\ndef iBuildHeap(A, heapSize):\r\n for i in range(heapSize):\r\n if A[i] > A[int((i-1) / 2)]:\r\n j = i\r\n\r\n while A[j] > A[int((j - 1) / 2)]:\r\n temp = A[j]\r\n A[j] = A[int((j - 1) / 2)]\r\n A[int((j - 1) / 2)] = temp\r\n\r\n j = int((j-1)/2)\r\n\r\n\r\ndef iHeapSort(A, heapSize):\r\n iBuildHeap(A, heapSize)\r\n\r\n for i in range(heapSize-1, 0, -1):\r\n temp = A[0]\r\n A[0] = A[i]\r\n A[i] = temp\r\n\r\n bigChild = -1\r\n j = 0\r\n while bigChild < i:\r\n\r\n bigChild = 2*j+1\r\n\r\n if bigChild < i-1 and A[bigChild] < A[bigChild+1]:\r\n bigChild+=1\r\n\r\n if bigChild < i and A[j] < A[bigChild]:\r\n temp = A[j]\r\n A[j] = A[bigChild]\r\n A[bigChild] = temp\r\n\r\n j = bigChild\r\n\r\n\r\nfile = open(\"tosort.txt\")\r\n\r\narr = []\r\n\r\nfor line in file:\r\n arr.append(int(line))\r\nprint(arr)\r\nheapSort(arr)\r\nprint(arr)\r\n\r\nfile.close()\r\n\r\nfile = open(\"sorted.txt\", \"w\")\r\n\r\nfor i in arr:\r\n file.write(str(i))\r\n file.write(\"\\n\")\r\n\r\nfile.close()\r\n\r\nfile = open(\"tosort.txt\")\r\n\r\narr = []\r\n\r\nfor line in file:\r\n arr.append(int(line))\r\nprint(arr)\r\niHeapSort(arr, len(arr))\r\nprint(arr)\r\n\r\nfile.close()\r\n\r\nfile = open(\"isorted.txt\", \"w\")\r\n\r\nfor i in arr:\r\n file.write(str(i))\r\n file.write(\"\\n\")\r\n\r\nfile.close()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"WoolyAndWooden/Algorytmy-i-Struktury-Danych","sub_path":"Heap Sort/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11265326163","text":"user_input = int(input(\"Give me a number: \"))\n\nfor num in range(1, user_input):\n #or you can use 15\n if num % 3 and 5 == 0:\n print(\"FizzBuzz\")\n elif num % 3 == 0:\n print(\"Fizz\")\n elif num % 5 == 0:\n print(\"Buzz\")\n else:\n print(num)","repo_name":"alisonlauren/friyay","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31158965548","text":"import pymunk\nimport cv2\nimport random\nimport sys\nimport os\n\nglobal Ball1Pos, Ball2Pos\n\n\ndef main(moving, speed, static, color):\n\n global Ball1Pos, Ball2Pos\n\n # 创建一个空间对象,设置重力为零\n space = pymunk.Space()\n space.gravity = 0, 0\n\n CurlingSize = 8\n\n def getRandomPos():\n return (random.randint(60, 220), random.randint(400, 580))\n\n Ball1Init = moving or (random.randint(80, 200), 300)\n Ball2Init = static or [getRandomPos() for i in range(3)]\n\n # Ball2Init = [(121, 312), (130, 354), (76, 410), (138, 445)]\n\n # [(121, 312), (130, 354), (76, 410), (138, 445)]\n\n # 创建两个圆形刚体对象,分别表示运动的球和静止的球\n ball1 = pymunk.Body(1, 10) # 设置质量为1,惯性矩为10\n ball1.position = Ball1Init # 设置初始位置为(100, 100)\n\n ball2 = []\n\n for i in Ball2Init:\n ballTemp = pymunk.Body(1, 10) # 设置质量为1,惯性矩为10\n ballTemp.position = i # 设置初始位置为(300, 100)\n ballTemp.inertia = 0.5\n ball2.append(ballTemp)\n\n # 创建两个圆形形状对象,分别与刚体对象关联\n shape1 = pymunk.Circle(ball1, CurlingSize) # 设置半径为20\n\n shape2 = []\n\n for i in ball2:\n shapeTemp = pymunk.Circle(i, CurlingSize) # 设置半径为20\n shape2.append(shapeTemp)\n space.add(i, shapeTemp)\n\n # 将形状对象添加到空间对象中\n space.add(ball1, shape1)\n\n def getRandomColor():\n t = random.randint(0, 1)\n return ((0, 0, 255), (0, 0, 255)) if t == 0 else ((255, 255, 0), (0, 255, 255))\n\n # 给运动的球一个初始速度,使其朝向静止的球运动\n ball1.velocity = speed or (random.randint(-20, 20), 200)\n shape1.elasticity = 0.8\n\n for i in shape2:\n i.elasticity = 0.8\n\n # 创建一个仿真步长,表示每次更新的时间间隔\n dt = 0.01\n\n # 创建一个黑色的背景图像,大小为400x200\n src = cv2.imread(\"black.png\")\n\n cv2.imshow(\"Simulation\", src)\n\n Ball1Pos = Ball1Init\n Ball2Pos = Ball2Init\n\n ballColor = color or [getRandomColor() for i in Ball2Init]\n\n def printXY(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n print(x, y)\n\n cv2.setMouseCallback('Simulation', printXY)\n\n def render():\n\n global Ball1Pos, Ball2Pos\n\n Ball1CurrentPos = (int(ball1.position.x), int(ball1.position.y))\n Ball2CurrentPos = []\n\n for i, j, k, l in zip(Ball2Init, ball2, Ball2Pos, ballColor):\n t = (int(j.position.x), int(j.position.y))\n Ball2CurrentPos.append(t)\n cv2.circle(img, t, CurlingSize, l[1], -1)\n cv2.circle(img, t, CurlingSize, (0, 0, 0), 2)\n cv2.line(src, k, t, l[0], 2)\n\n # 在图像上绘制两个球的位置,颜色为白色,线宽为-1(表示填充)\n cv2.circle(img, Ball1CurrentPos, CurlingSize, (0, 255, 255), -1)\n cv2.circle(img, Ball1CurrentPos, CurlingSize, (0, 0, 0), 2)\n\n cv2.line(src, Ball1Pos, Ball1CurrentPos, (255, 255, 0), 2)\n\n Ball1Pos = Ball1CurrentPos\n Ball2Pos = Ball2CurrentPos\n\n # 显示图像,并等待10毫秒的按键\n cv2.imshow(\"Simulation\", img)\n\n waitKey = 1\n\n # out = cv2.VideoWriter('out.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 60, (src.shape[1], src.shape[0]))\n\n # 循环更新空间对象,模拟物理运动\n for i in range(90):\n img = src.copy()\n\n for i in range(1):\n space.step(dt)\n\n render()\n # out.write(img)\n\n key = cv2.waitKey(int(waitKey))\n\n waitKey *= 1.01\n\n print(key)\n\n while key == 112:\n ballColor = [getRandomColor() for i in Ball2Init]\n render()\n key = cv2.waitKey(0)\n\n if key == 113:\n break\n if key == 114:\n exit(0)\n\n key = cv2.waitKey(0)\n # out.release()\n\n\ndef label2color(label):\n res = []\n for i in label:\n if (i == 1):\n res.append(((0, 0, 255), (0, 0, 255)))\n else:\n res.append(((255, 255, 0), (0, 255, 255)))\n\n return res\n\n\nmain(moving=(100, 200),\n speed=(10, 200),\n static=[(76, 436), (60, 475), (106, 575), (74, 541), (83, 360)],\n color=label2color([1, 2, 2, 1, 2]))\n","repo_name":"Howardzhangdqs/curling_public","sub_path":"PhysicalSimulation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24435598437","text":"import os\nimport secrets\n\nimport pytest\nimport contextlib\nimport reg_server\nfrom sys_util import run_shell\n\n\ndef setup_environ(flask_app):\n # update running environment to load custom file\n os.environ[reg_server.DEFAULT_CONFIG_VAR] = 'tests/.env'\n os.environ['FLASK_APP'] = flask_app\n\n\n@pytest.fixture\ndef reg_client():\n setup_environ('reg_server:create_app()')\n test_app = reg_server.create_app()\n \n with test_app.test_client() as client:\n with test_app.app_context():\n yield client\n\n\n@pytest.fixture\ndef reg_get_clients():\n first_port = 5000\n def set_new_address():\n nonlocal first_port\n setup_environ('reg_server:create_app()')\n os.environ['NODE_CHAIN_STORAGE_PATH'] = f'{secrets.token_hex(3)}.json'\n os.environ['MAIL_CHAIN_STORAGE_PATH'] = f'{secrets.token_hex(3)}.json'\n test_app = reg_server.create_app(first_port)\n first_port += 1\n return test_app.app_context()\n\n with contextlib.ExitStack() as stack:\n yield [stack.enter_context(set_new_address())\n for _ in range(3)]\n\n@pytest.fixture\ndef temp_file():\n file = f'{secrets.token_hex(3)}.tmp'\n yield file\n os.unlink(file)\n ","repo_name":"yanmarques/vpnGate","sub_path":"old_stuff/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8494986558","text":"\nfrom setuptools import setup, find_packages\nfrom os import path\nimport re\n\npackage_name=\"onnx2tf\"\nroot_dir = path.abspath(path.dirname(__file__))\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nwith open(path.join(root_dir, package_name, '__init__.py')) as f:\n init_text = f.read()\n version = re.search(r'__version__\\s*=\\s*[\\'\\\"](.+?)[\\'\\\"]', init_text).group(1)\n\nsetup(\n name=package_name,\n version=version,\n description=\\\n \"Self-Created Tools to convert ONNX files (NCHW) to TensorFlow/TFLite/Keras format (NHWC). \"+\n \"The purpose of this tool is to solve the massive Transpose extrapolation problem in onnx-tensorflow (onnx-tf).\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Katsuya Hyodo\",\n author_email=\"rmsdh122@yahoo.co.jp\",\n url=\"https://github.com/PINTO0309/onnx2tf\",\n license=\"MIT License\",\n packages=find_packages(exclude=['test*','json_samples']),\n platforms=[\"linux\", \"unix\"],\n python_requires=\">=3.8\",\n entry_points={\n 'console_scripts': [\n \"onnx2tf=onnx2tf:main\"\n ]\n }\n)\n","repo_name":"PINTO0309/onnx2tf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":410,"dataset":"github-code","pt":"61"} +{"seq_id":"24350704667","text":"import requests\n\n# Webull API endpoint and authentication details\nAPI_ENDPOINT = 'https://api.webull.com'\n\n# Replace with your actual API keys and credentials\nAPI_KEY = 'YOUR_API_KEY'\nAPI_SECRET = 'YOUR_API_SECRET'\nTOKEN = 'YOUR_AUTH_TOKEN'\n\n# Example function to place a trade\ndef place_trade(symbol, quantity, side):\n headers = {\n 'Accept': 'application/json',\n 'Authorization': f'Bearer {TOKEN}'\n }\n\n payload = {\n 'symbol': symbol,\n 'quantity': quantity,\n 'action': side # 'buy' or 'sell'\n }\n\n response = requests.post(f'{API_ENDPOINT}/trading/secure/placeOrder', headers=headers, json=payload)\n\n if response.status_code == 200:\n print('Trade placed successfully.')\n else:\n print('Failed to place trade.')\n\n# Example usage\nsymbol = 'AAPL'\nquantity = 10\nside = 'buy'\nplace_trade(symbol, quantity, side)\n","repo_name":"yassineuav/python_master","sub_path":"connect_trade_bot_to_webull.py","file_name":"connect_trade_bot_to_webull.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24120957144","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# - Exercici 1\n# Crea una llista que agrupi els mesos de l’any en trimestres (1T: gener, febrer i març; 2T: abril, maig, juny...), és a dir, una llista amb 4 llistes dins.\n# \n\n# In[10]:\n\n\nAny = ['PrimerTrimestre', 'SegonTrimetre', 'TercerTrimetre',' QuartTrimestre']\nprint(Any)\n\n\n# In[11]:\n\n\nPrimerTrimestre = ['Gener, Febrer, Marc']\nSegonTrimetre = ['Abril, Maig, Juny']\nTercerTrimetre = ['Juliol, Agost, Setembre']\nQuartTrimestre = ['Octubre, Novembre, Decembre']\n\n\n# In[20]:\n\n\nAny = [PrimerTrimestre, SegonTrimetre, TercerTrimetre,QuartTrimestre]\n\n\n# In[15]:\n\n\nAny\n[['Gener', 'Febrer', 'Marc'],\n['Abril', 'Maig', 'Juny'],\n['Juliol','Agost','Setembre'],\n['Octubre','Novembre','Decembre']]\n\n\n# In[27]:\n\n\nAny [0]\n\n\n# In[54]:\n\n\nprint(Any[0])\n\n\n# - Exercici 2\n# Crea un codi que et permeti accedir a:\n# o\tEl segon mes del primer trimestre.\n# o\tEls mesos del primer trimestre.\n# o\tSetembre i octubre.\n# \n\n# In[62]:\n\n\nPrimerTrimestre = ['Gener', 'Febrer', 'Marc']\nSegonTrimetre = ['Abril', 'Maig', 'Juny']\nTercerTrimetre = ['Juliol', 'Agost', 'Setembre']\nQuartTrimestre = ['Octubre', 'Novembre', 'Decembre']\n\n\n# In[63]:\n\n\nAny = [PrimerTrimestre, SegonTrimetre, TercerTrimetre,QuartTrimestre]\n\n\n# In[65]:\n\n\nprint (Any)\n \n\n\n# Els mesos del primer trimestre.\n\n# In[66]:\n\n\nAny [0]\n\n\n# El segon mes del primer trimestre\n\n# In[67]:\n\n\nAny[0][1]\n\n\n# Setembre i octubre\n\n# In[69]:\n\n\nAny [2][2], Any [3][0]\n\n","repo_name":"Cons88/Tasca-M2-T01","sub_path":"M2 T01 Exercici 1 - 2.py","file_name":"M2 T01 Exercici 1 - 2.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23613209011","text":"\nfin = open('input.txt', 'r')\nfout = open('output.txt', 'w')\nn = int(fin.readline())\nfor i in range(n):\n st = fin.readline()\n t = st.split()\n t_t = 0\n robots = {'O':[1,0,0], 'B':[1,0,0]}\n prev = t[1]\n for j in range(int(t[0])): \n robot = t[2*j+1]\n loc = int(t[2*j+2])\n robots[robot][1] = 0\n if robot != prev: robots[robot][2]=0\n temp_t = abs(loc - robots[robot][0])\n if (robot != prev):\n if ((temp_t - robots[prev][2])>0):\n temp_t -= robots[prev][2]\n else:\n temp_t = 0\n temp_t += 1\n t_t += temp_t\n robots[robot][0] = loc\n robots[robot][1] = temp_t\n robots[robot][2] += temp_t\n prev = robot\n fout.write('Case #'+str(i+1)+': '+str(t_t)+'\\n')\n\nfin.close()\nfout.close()\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_74/1231.py","file_name":"1231.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3997193577","text":"from . import common\nfrom .manga import __manga_id, __manga_id_required\n\n\n__url = {\n 'required': False,\n 'type': 'string',\n 'regex': common.url_regex,\n}\n__url_required = common.required(__url)\n\n\n__location = {\n 'required': False,\n 'type': 'string',\n}\n__location_required = common.required(__location)\n\n__state = {\n 'required': False,\n 'type': 'string',\n 'allowed': ['ready', 'downloading', 'downloaded', 'parsing', 'done', 'error', 'ignore'],\n}\n__state_required = common.required(__state)\n\n__file_id = {\n 'required': False,\n 'type': 'integer',\n 'coerce': int,\n 'min': 0,\n}\n__file_id_required = common.required(__file_id)\n\n\ncreate = {\n 'manga_id': __manga_id_required,\n 'url': __url_required,\n 'location': __location,\n 'state': __state,\n}\n\n\nread = {\n 'file_id': __file_id_required,\n}\n\n\nupdate = {\n 'file_id': __file_id_required,\n 'manga_id': __manga_id,\n 'url': __url,\n 'location': __location,\n 'state': __state,\n}\n\n\ndelete = {\n 'file_id': __file_id_required,\n}\n\n\nindex = {\n 'manga_id': __manga_id,\n 'state': __state,\n}\n","repo_name":"antonpaquin/Homulili","sub_path":"src/backend/flask/validator/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13549189962","text":"import usb1\nfrom ctypes import *\n\nVID = 0x09db\nPID = 0x0082\nBLINK_LED = c_byte(0x40)\n\nwith usb1.USBContext() as context:\n handle = context.openByVendorIDAndProductID(VID, PID, skip_on_error = True,)\n if handle is None:\n print (\"Device not found.\")\n exit(1)\n print (\"Device found.\")\n\n if handle.kernelDriverActive(0):\n handle.detachKernelDriver(0)\n handle.claimInterface(0)\n\n if handle.kernelDriverActive(1):\n handle.detachKernelDriver(1)\n handle.claimInterface(1)\n\n if handle.kernelDriverActive(2):\n handle.detachKernelDriver(2)\n handle.claimInterface(2)\n\n if handle.kernelDriverActive(3):\n handle.detachKernelDriver(3)\n handle.claimInterface(3)\n\n reportID = BLINK_LED\n request_type = usb1.TYPE_CLASS|usb1.RECIPIENT_INTERFACE|usb1.ENDPOINT_OUT\n request = 0x09 \n wValue = (2 << 8) | 0x40\n wIndex = 0 \n\n handle._controlTransfer(request_type, request, wValue, wIndex, byref(reportID), 1, 5000)\n\n exit(0)\n","repo_name":"Sovichea/mccdaq-pi3","sub_path":"Examples/test_blink.py","file_name":"test_blink.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"41969760877","text":"import numpy as np\nimport torch\nfrom replay_buffer import ReplayBuffer\nfrom networks import Qnet\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass DQN_Agent():\n def __init__(self, gamma, epsilon, eps_end, eps_decay, lr, n_state, hidden, n_action, batch_size, max_size, device):\n self.gamma = gamma\n self.epsilon = epsilon\n self.eps_end = eps_end\n self.eps_decay = eps_decay\n self.lr = lr\n self.n_state = n_state\n self.n_action = n_action\n self.hidden = hidden\n self.batch_size = batch_size\n self.replay_buffer = ReplayBuffer(max_size)\n self.device = device\n self.steps = 0 # to document the training loss\n\n self.policy_net = Qnet(self.n_state, self.n_action, self.hidden).to(self.device)\n self.target_net = Qnet(self.n_state, self.n_action, self.hidden).to(self.device)\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=self.lr)\n self.loss_fn = torch.nn.MSELoss()\n \n def choose_action(self, state):\n sample = np.random.random_sample() # [0.0, 1.0)\n # eps_threshold = self.eps_end + (self.epsilon - self.eps_end) * np.exp(-1.* step/self.eps_decay)\n eps_threshold = self.epsilon\n if sample > eps_threshold:\n with torch.no_grad():\n action = self.policy_net(torch.tensor(state, device=self.device, dtype=torch.float32)).argmax().item()\n return action\n else: # randomly\n return np.random.randint(0, self.n_action)\n \n def train(self):\n if len(self.replay_buffer.storage) < self.batch_size:\n return # replay buffer is not enough\n \n states, actions, rewards, next_states, dones = self.replay_buffer.sample(self.batch_size)\n states = torch.tensor(states, dtype=torch.float32).to(self.device)\n actions = torch.tensor(actions, dtype=torch.int64).to(self.device)\n rewards = torch.tensor(rewards, dtype=torch.float32).to(self.device)\n next_states = torch.tensor(next_states, dtype=torch.float32).to(self.device)\n dones = torch.tensor(dones, dtype=torch.float32).to(self.device)\n\n Q = self.policy_net(states).gather(1, actions.unsqueeze(1)).squeeze(1)\n next_Q = self.target_net(next_states).max(1)[0] # the maximum next Q value\n expected_Q = rewards + self.gamma * next_Q * (1 - dones)\n\n loss = self.loss_fn(Q, expected_Q.detach()) \n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.steps += 1\n \n def updatae_target(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n \n def save_model(self, model_path):\n torch.save(self.target_net.state_dict(), model_path)","repo_name":"Hu-Hanyang/My-RL-Learning","sub_path":"8_Implementation/learners.py","file_name":"learners.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5434724323","text":"#Una empresa quiere gestionar su cartera de clientes. Escribe un programa que guarde los clientes en un diccionario u objeto literal en el que disponga de:\n#NIF (string), nombre (string), apellidos (string), teléfono (string), email (string) y preferente (boolean)\n#El programa debe mostrar las siguientes opciones para que escoja el usuario:\n#(1) Añadir un cliente\n#(2) Eliminar cliente por NIF\n#(3) Mostrar Cliente por NIF\n#(4) Listar TODOS os clientes\n#(5) Mostrar ÚNICAMENTE los clientes preferentes\n#(6) Finalizar Programa\n\nimport csv\n\nclientes = {}\nclientes_preferentes = {}\n\nopcion_elegida = input('(1) Añadir un cliente \\n(2) Eliminar cliente \\n(3) Mostrar Cliente \\n(4) Listar TODOS los clientes \\n(5) Mostrar ÚNICAMENTE los clientes preferentes \\n(6) Finalizar Programa \\nElija una de las opciones mostradas:')\n\nwhile opcion_elegida != '6':\n\n #Si la opción elegida es la 1, añadiremos un nuevo diccionario al diccionario de clientes.\n\n if opcion_elegida == '1':\n nif = input(\"Introduzca su NIF: \")\n nombre = input(\"Introduzca su nombre: \")\n apellidos = input(\"Introduzca sus apellidos: \")\n tlf = input(\"Introduzca su teléfono: \")\n email = input(\"Introduzca su email: \")\n preferente = (input(\"¿Ha contratado usted el servicio preferente? Indique Si o No: \"))\n \n #Mediante otro bucle while, tomamos la variable \"preferente\" como un boolean, dado que por el input cualquier valor non-empty sera True\n while True:\n if preferente.lower() != \"si\" and preferente.lower() != \"no\":\n print (\"Lo siento, debe responder Sí o No\")\n preferente = (input(\"¿Ha contratado usted el servicio preferente? Indique Si o No: \"))\n continue\n\n elif preferente.lower() == \"si\":\n preferente = True\n break\n\n else:\n preferente = False\n break\n\n cliente = {\"Nombre\": nombre, \"Apellidos\": apellidos, \"Teléfono\" : tlf, \"Email\": email, \"Preferente\": preferente}\n\n #Actualizamos el diccionario utilizando como clave el NIF y como valor el diccionario con sus datos.\n clientes[nif] = cliente\n\n #Incluimos los datos de los clientes en un archivo.csv en el que la clave será el NIF y el valor cada diccionario.\n with open('clientes.csv', 'w') as f: #Habría que poner la ruta donde queremos guardar el .csv\n writer = csv.writer(f)\n for k, v in clientes.items():\n writer.writerow([k, v])\n\n #Si elige la opción 2, el cliente puede borrar cualquier cliente mediante la clave de su NIF.\n if opcion_elegida == '2':\n nif = input(\"Introduzca el NIF del cliente que desea borrar: \")\n if nif in clientes:\n del clientes[nif]\n\n #Actualizamos el archivo .csv, retirando el cliente eliminado de la base de datos.\n with open('clientes.csv', 'w') as f: #Habría que poner la ruta donde queremos guardar el .csv\n writer = csv.writer(f)\n for k, v in clientes.items():\n writer.writerow([k, v])\n else:\n print (f\"Lo siento, no existe ningún cliente con el NIF: {nif}\")\n\n #Si elige la opción 3, podemos mostrar los datos de un cliente en concreto, utilizando la clave de su NIF.\n if opcion_elegida == '3':\n nif = input(\"Introduzca el NIF del cliente que desea mostrar: \")\n #Mediante una condición if, si el NIF es correcto, imprimiremos en un comentario multilínea los datos de ese cliente.\n if nif in clientes:\n\n print (f'''El cliente con NIF {nif} tiene los siguientes datos:\nNombre: {clientes[nif]['Nombre']} \nApellidos: {clientes[nif]['Apellidos']}\nTeléfono: {clientes[nif]['Teléfono']}\nEmail: {clientes[nif]['Email']}\nCliente Preferente: {clientes[nif]['Preferente']}''')\n\n #Si el NIF no corresponde a ningún cliente de nuestra base de datos, la consola devolverá un mensaje indicando que no existe.\n else:\n print(f\"Lo siento, no existe ningún cliente con el NIF: {nif}\")\n\n #Si elige la opción 4, podemos mostrar los datos de todos los clientes de la base de datos.\n if opcion_elegida == '4':\n\n #Mediante un bucle for iteramos por todos los clientes y los mostramos mediante un comentario multilínea.\n for key, value in clientes.items():\n print (f'''El cliente con NIF {key} tiene los siguientes datos:\nNombre: {value['Nombre']} \nApellidos: {value['Apellidos']}\nTeléfono: {value['Teléfono']}\nEmail: {value['Email']}\nCliente Preferente: {value['Preferente']}''')\n print ('--------------------------------')\n\n #Si elige la opción 5, podemos mostrar los datos de todos los clientes preferentes.\n if opcion_elegida == '5':\n\n #Mediante un bucle for iteramos por todos los clientes.\n for key, value in clientes.items():\n #Mediante un condicional (if), indicamos que deben imprimirse por consola únicamente los clientes preferentes. \n if value['Preferente'] == True:\n print(f'''El cliente con NIF {key} tiene los siguientes datos:\nNombre: {value['Nombre']} \nApellidos: {value['Apellidos']}\nTeléfono: {value['Teléfono']}\nEmail: {value['Email']}\nCliente Preferente: {value['Preferente']}''')\n print ('--------------------------------')\n \n\n opcion_elegida = input(\"(1) Añadir un cliente \\n(2) Eliminar cliente \\n(3) Mostrar Cliente \\n(4) Listar TODOS los clientes \\n(5) Mostrar ÚNICAMENTE los clientes preferentes \\n(6) Finalizar Programa \\nElija una de las opciones mostradas:\")\n\nprint (\"Programa Finalizado\")\n \n\n\n\n","repo_name":"joorgemartinez/EDEM2022","sub_path":"DATA ENGINEERING/RETOS PYTHON/RETOS INTERMEDIOS/RETO 11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75237139074","text":"import numpy as np\r\nimport os \r\nfrom quaternion import euler_to_quaternion, qeuler_np\r\n\r\nroot = './lafan1/lafan1'\r\ndef flip_bvh(filename):\r\n fout = open(os.path.join(root, filename.replace('.bvh', '_flip.bvh')), 'w')\r\n cnt = 0\r\n for line in open(os.path.join(root, filename), 'r'):\r\n cnt += 1\r\n if cnt <= 134:\r\n fout.write(line)\r\n else:\r\n line = line.split('\\n')[0].split(' ')[:69]\r\n line = np.reshape(np.array([float(x) for x in line]), [23, 3])\r\n line[0,2] *= -1.0\r\n \r\n quat = euler_to_quaternion(line[1:] / 180.0 * np.pi, 'zyx')\r\n quat[:,0] *= -1.0\r\n quat[:,1] *= -1.0\r\n line[1:] = qeuler_np(quat, 'zyx') / np.pi * 180.0\r\n \r\n left_idx = [2,3,4,5,15,16,17,18]\r\n right_idx = [6,7,8,9,19,20,21,22]\r\n line[left_idx+right_idx] = line[right_idx+left_idx].copy()\r\n \r\n line = np.reshape(line, (69,))\r\n new_line = ''\r\n for s in line[:-1]:\r\n new_line += (str(s) + ' ')\r\n new_line += (str(line[-1]) + '\\n')\r\n fout.write(new_line)\r\nfor filename in os.listdir(root):\r\n flip_bvh(filename)\r\n # assert 0\r\n print(filename)","repo_name":"Garfield-kh/PoseTriplet","sub_path":"hallucinator/code_rib/flip_bvh.py","file_name":"flip_bvh.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"61"} +{"seq_id":"73581858434","text":"\"\"\"\nScript for a teacher to send to students reminding them of their missing assignments\nInput: asks user for a list of names separated by a comma, list of grades separated\n by a comma, list of missing assignments separated by a comma. List of names,\n grades and missing assignments should be in order with each other\nOutput: prints a message with the student's name, grades and number of missing assignments\n\"\"\"\n\nmessage = \"Hi {}, \\n\\nThis is a reminder that you have {} assginments left to \\\nsubmit before you can graduate. You're current grade is {} and can increase \\\nto {} if you submit all assignments before the due date. \\n\\n\"\n\nnames = input(\"Enter a list of names separated by a comma: \").title().split(\",\")\nassignments = input(\"Enter a list of missing assignments separated by a comma: \").split(\",\")\ngrades = input(\"Enter a list of grades separated by a comma: \").split(\",\")\n\n\"\"\"\nfor loop that prints the message to each student with the correct values\nthe potential grade is the current grade + 2 * number of missing assignments\n\"\"\"\n\nfor name, assignment, grade in zip(names, assignments, grades):\n potential_grade = int(grade) + int(assignment)*2\n print(message.format(name, assignment, grade, potential_grade))\n","repo_name":"jduell12/Python_Practice","sub_path":"scripts/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19827580022","text":"# bot.py\nimport os\nimport discord\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\nimport random\n\nfrom constants import *\n\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\n\nintents = discord.Intents.all()\nbot = commands.Bot(command_prefix='!', intents=intents)\n\n\n@bot.event\nasync def on_ready():\n print(f'{bot.user.name} has connected to Discord!')\n\n\n@bot.event\nasync def on_member_join(member):\n channel = bot.get_channel(1109948451577401347)\n await channel.send(f\"Welcome to {member.guild.name}!\")\n\n\n@bot.event\nasync def on_message(message):\n if message.channel.id == 1109948451577401347:\n for i in BANNED_WORDS: # Go through the list of bad words;\n if i in message.content:\n await message.delete()\n await message.channel.send(f\"{message.author.mention} Don't use that word here!\")\n bot.dispatch('profanity', message, i)\n return # So that it doesn't try to delete the message again.\n await bot.process_commands(message)\n\n\n@bot.event\nasync def on_profanity(message, word):\n channel = bot.get_channel(1112860393874923591)\n embed = discord.Embed(title=\"Profanity Alert!\", description=f\"{message.author.name} just said ||{word}||\",\n color=discord.Color.blurple()) # Let's make an embed!\n await channel.send(embed=embed)\n\n\n@bot.command()\nasync def ping(ctx):\n await ctx.channel.send(\"pong\")\n\n\n# Retrieve information on a member, will return error if member does not exist.\n# This will go into its own class later on\n@bot.command()\nasync def info(ctx, *, member: discord.Member):\n \"\"\"Tells you some info about the member.\"\"\"\n msg = f'{member} joined on {member.joined_at} and has {len(member.roles)} roles.'\n await ctx.send(msg)\n\n\n@info.error\nasync def info_error(ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send('I could not find that member...')\n\n\n# Beginning of methods that are not part of Discords commands, but one's wrote by dev\n@bot.command(pass_context=True)\nasync def pick(ctx):\n play_this = random.choice(MULTIPLAYER_MODES)\n await ctx.channel.send(play_this)\n\n\nbot.run(TOKEN)\n# test\n","repo_name":"SethGuimont/discord-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6740931707","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n \n# The modules require\nimport sys\nimport socket\nimport struct\nimport string\n\n\n \ndef send_and_receive_tcp(address, port, message):\n print(\"You gave arguments: {} {} {}\".format(address, port, message))\n # create TCP socket\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n # connect socket to given address and port\n s.connect((address,port))\n \n # python3 sendall() requires bytes like object. encode the message with str.encode() command\n message += '\\r\\n'\n encodeStr = message.encode()\n \n # send given message to socket\n s.sendall(encodeStr)\n \n # receive data from socket\n data = s.recv(1024)\n \n # data you received is in bytes format. turn it to string with .decode() command\n decodeStr = data.decode()\n \n # print received data\n\n print(\"Received (TCP): \" +decodeStr)\n \n # close the socket\n\n s.close()\n \n # Get your CID and UDP port from the message\n list = decodeStr.split('\\r\\n')\n infoList = list[0].split(' ')\n cid = infoList[1]\n udpPort = infoList[2]\n # Continue to UDP messaging. You might want to give the function some other parameters like the above mentioned cid and port.\n send_and_receive_udp(address, udpPort, cid)\n return\n \n \ndef send_and_receive_udp(address, port, token):\n '''\n Implement UDP part here.\n '''\n ##print(address)\n ##print(port)\n ##print(token)\n \n ack = True\n eom = False\n dataRemaining = 0\n content = \"Hello from \" + token\n conLenght= len(content)\n content_utf = content.encode()\n token_utf = token.encode()\n\n #create udp sockets\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n \n \n\n while True:\n ##pack and send\n sendData = struct.pack('!8s??HH128s', token_utf, ack, eom, dataRemaining, conLenght, content_utf)\n s.sendto(sendData, (address, int(port)))\n print(\"Sent: \" + content_utf)\n\n ##get the data and address\n rcvData, addr = s.recvfrom(1024)\n \n ##unpack\n token, ack, eom, dataRemaining, conLenght, content = struct.unpack('!8s??HH128s', rcvData)\n \n ##decode received message\n message_rcvd = content.decode()\n print(\"Received: \" + message_rcvd) \n \n ##break the loop if last message\n if eom == True:\n s.close()\n break\n\n ##remove padding\n message_rcvd = message_rcvd.strip('\\x00')\n\n \n #reverse the word string\n message_rcvd = message_rcvd.split(' ')\n message_rcvd = reversed(message_rcvd)\n reversed_msg = ' '.join(message_rcvd)\n\n ##lenght\n conLenght = len(reversed_msg)\n\n ##encode reversed message\n content_utf = reversed_msg.encode()\n \n \n\n\n return\n \n \ndef main():\n USAGE = 'usage: %s ' % sys.argv[0]\n \n try:\n # Get the server address, port and message from command line arguments\n server_address = str(sys.argv[1])\n server_tcpport = int(sys.argv[2])\n message = str(sys.argv[3])\n except IndexError:\n print(\"Index Error\")\n except ValueError:\n print(\"Value Error\")\n # Print usage instructions and exit if we didn't get proper arguments\n sys.exit(USAGE)\n \n send_and_receive_tcp(server_address, server_tcpport, message)\n \n \nif __name__ == '__main__':\n # Call the main function when this script is executed\n main()\n","repo_name":"msrn/school-exercises","sub_path":"ClientProxy_py/final_assignment.py","file_name":"final_assignment.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6247289933","text":"import argparse\nfrom typing import List\n\nfrom mozuma.cli import checks, ls, run\nfrom mozuma.cli.types import CLICommandDefinition\n\nCOMMAND_DEFINITIONS: List[CLICommandDefinition] = [\n ls.COMMAND,\n run.COMMAND,\n checks.COMMAND,\n]\n\n\ndef cli():\n parser = argparse.ArgumentParser(\"mozuma\", description=\"CLI to list and run models\")\n\n subparsers = parser.add_subparsers(dest=\"cmd\", required=True)\n\n for cmd_def in COMMAND_DEFINITIONS:\n # Configuring command parser\n cmd_parser = subparsers.add_parser(cmd_def.name, help=cmd_def.help_text)\n cmd_def.args_parser(cmd_parser)\n cmd_parser.set_defaults(func=cmd_def.command_fun)\n cmd_parser.set_defaults(options_class=cmd_def.options_class)\n\n args = parser.parse_args()\n args.func(\n args.options_class(\n **{\n k: v\n for k, v in vars(args).items()\n if k not in (\"func\", \"options_class\", \"cmd\")\n }\n )\n )\n","repo_name":"mozuma/mozuma","sub_path":"src/mozuma/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"5215973465","text":"import requests\r\nimport re\r\n\r\nimport ssl\r\nssl._create_default_https_context = ssl._create_unverified_context\r\n\r\ndef string_process(string):\r\n a = 0\r\n while True:\r\n a = string.find('<',a)\r\n if a == -1:\r\n break\r\n b = string.find('>',a)\r\n string =string[0:a]+string[b+1:]\r\n return string\r\n \r\n\r\ndef get_url_list(url):\r\n html = open_url(url)\r\n urllist1 = list(set(re.findall(r'',html)))\r\n urllist2 = list(set(re.findall(r'(.+?)',html)\r\n summary = re.findall(r'

(.+?)

',html)\r\n content_list = re.findall(r'

(.+?)

',html)\r\n all_content = title + summary + content_list\r\n return all_content\r\n\r\ndef save_content(content, num, date):\r\n with open(r'%s.txt' % date,'a',encoding='utf-8') as f:\r\n f.write(\"No.%d article:\\n\" % num)\r\n for each_line in content:\r\n if '<' in each_line:\r\n each_line = string_process(each_line)\r\n f.write(each_line + '\\n\\n')\r\n f.write(\"\\n\\n\\n\\n\")\r\n \r\n\r\nif __name__ == '__main__':\r\n the_year = input(\"Please enter the year:\")\r\n the_month = input(\"Please enter the month:\")\r\n start = input(\"Please enter the start day:\")\r\n end = input(\"Please enter the end day:\")\r\n for the_day in range(int(start),int(end)+1):\r\n the_day = str(the_day)\r\n if len(the_day) == 1:\r\n the_day = '0' + the_day\r\n url = 'https://www.nytimes.com/issue/todayspaper/%s/%s/%s/todays-new-york-times' % (the_year,the_month,the_day)\r\n print('Date:%s/%s/%s loading......' % (the_year,the_month,the_day))\r\n num =1\r\n for each_url in get_url_list(url):\r\n each_url = \"https://www.nytimes.com\" + each_url\r\n content = get_content(open_url(each_url))\r\n print(\"Loading No.%d article......\" % num)\r\n the_date = \"NYTimes_%s_%s_%s\" % (the_year,the_month,the_day)\r\n save_content(content, num, the_date)\r\n num += 1\r\n print('%s/%s/%s finished!\\n' % (the_year,the_month,the_day))\r\n","repo_name":"Liuweihan2000/bursty-event-detection-storm","sub_path":"NYTimes/get_articles_NYTimes.py","file_name":"get_articles_NYTimes.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35078726078","text":"import random\r\navengers = [\"cap america\", \"dr strange\", \"thor\", \"hulk\", \"iron man\", \"starlord\", \"spiderman\", \"black panther\", \"scarlet witch\" ]\r\nThanosPower = 95\r\n\r\n\r\ndef game(k):\r\n if k == 0:\r\n return 92\r\n elif k == 1:\r\n return 96\r\n elif k == 2:\r\n return 99\r\n elif k == 3:\r\n return 98\r\n elif k == 4:\r\n return 97\r\n elif k == 5:\r\n return 91\r\n elif k == 6:\r\n return 92\r\n elif k == 7:\r\n return 92\r\n elif k == 8:\r\n return 95\r\n\r\n\r\ntrig = 'y'\r\nwhile trig == ( 'y' ) :\r\n print('START THE GAME')\r\n avgpow = 0\r\n sumpow = 0\r\n a = []\r\n for x in range(2):\r\n a.append(random.randint(1, 8))\r\n i=a[0]\r\n j=a[1]\r\n print('Select your two heroes')\r\n print('select\\t0-CAPTAIN AMERICA\\n 1-DOCTOR STRANGE\\n 2-THOR\\n 3-HULK\\n 4-IRON MAN\\n 5-PETER QUILL\\n 6-SPIDERMAN\\n 7-BLACK PANTHER\\n 8-SCARLET WITCH')\r\n m = int(input('Enter your first choice: '))\r\n n = int(input('enter your second choice: '))\r\n sumpow = sumpow + game(i) + game(j) + game(m) + game(n)\r\n avgpow = sumpow / 4.0\r\n print('your superheroes are')\r\n print(avengers[i])\r\n print(avengers[j])\r\n print(avengers[m])\r\n print(avengers[n])\r\n if (avgpow > 95):\r\n print('AVENGERS WON')\r\n else:\r\n print('THANOS WON')\r\n s = input('Do you wanna try again?Y/N:')\r\n trig = s.lower()\r\n\r\n if trig == 'n':\r\n exit\r\n","repo_name":"akforever/envisage-summer","sub_path":"thanosgame.py","file_name":"thanosgame.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11316635225","text":"import random\r\n\r\n\r\ndef checkScore(score, guess, numList):\r\n\ttempnumList = []\r\n\tfor element in numList:\r\n\t\tequalCount = 0\r\n\t\twrongCount = 0\r\n\t\tfor i in range(len(guess)):\r\n\t\t\tif guess[i] == element[i]:\r\n\t\t\t\tequalCount += 1\r\n\t\t\telif guess[i] != element[i] and guess[i] in element:\r\n\t\t\t\twrongCount += 1\r\n\t\tif (True and equalCount == int(score[0])) and wrongCount == int(score[1]):\r\n\t\t\ttempnumList.append(element)\r\n\r\n\treturn tempnumList\r\n\r\n\r\n\r\n\r\ndef makeNums2(length = 4):\r\n if length == 0:\r\n return ['']\r\n lista = ['01'[i]+rest for i in range(2) for rest in makeNums2(length-1)]\r\n return lista\r\n\r\n\r\ndef makeGuess(numList):\r\n\treturn random.choice(numList)\r\n\r\ndef getScore(guess, secret):\r\n\tequalCount = 0\r\n\twrongCount = 0\r\n\tfor i in range(len(secret)):\r\n\t\tif guess[i] == secret[i]:\r\n\t\t\tequalCount += 1\r\n\t\telif guess[i] != secret[i] and guess[i] in secret:\r\n\t\t\twrongCount += 1\r\n\treturn (equalCount, wrongCount)\r\n\r\n\r\ndef playGame(secret, numList):\r\n\tturns = 0\r\n\t#print()\r\n\twhile len(numList) > 0:\r\n\t\tturns += 1\r\n\t\tguess = makeGuess(numList)\r\n\t\t\r\n\t\tscore = getScore(guess, secret)\r\n\t\t#print(guess, score)\r\n\t\tif score[0] == len(secret):\r\n\t\t\treturn turns\r\n\t\tnumList = checkScore(score, guess, numList)\r\n\treturn turns\r\n\r\nnumList = makeNums2(5)\r\n#print(numList)\r\n\r\nsecrets = [random.choice(numList) for i in range(100)]\r\nprint('playing game')\r\nscores = [playGame(secret, numList[:]) for secret in secrets]\r\naverage = sum(scores)/len(scores)\r\n\r\nprint(average)","repo_name":"maxbergmark/old-work","sub_path":"Egna projekt/mastermind.py","file_name":"mastermind.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10635048405","text":"import tiktoken\nimport openai\nfrom ratelimiter import RateLimiter\nfrom retrying import retry\n\ndef parse_abc_notations(input_string):\n import re\n pattern = r'```abc\\s(.*?)```'\n match = re.search(pattern, input_string, re.DOTALL)\n if match:\n abc_notations = match.group(1)\n return abc_notations\n else:\n return None\n\ndef text2token(text: str, encoding: str = \"gpt2\"):\n \"\"\"Tokenize a text into a list of tokens.\n \n Ref:\n https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb\n https://github.com/openai/tiktoken\n \"\"\"\n encoding = tiktoken.get_encoding(encoding)\n tokens = encoding.encode(text)\n\n return tokens \n\ndef token2text(tokens: list, encoding: str = \"gpt2\"):\n \"\"\"Decode a list of tokens into a text.\"\"\"\n encoding = tiktoken.get_encoding(encoding)\n text = encoding.decode(tokens)\n\n return text \n\ndef count_tokens(text: str, encoding: str = \"gpt2\"):\n \"\"\"Count the number of tokens in a text.\n \n Ref: \n https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb\n https://github.com/openai/tiktoken\n \"\"\"\n num_tokens = len(text2token(text, encoding))\n\n return num_tokens\n\ndef parse_text_response(openai_text_response, text_engine):\n \"\"\"According to the text engine, parse the response content.\n \n Different text engine support different response structures\n \"\"\"\n if \"text\" in text_engine:\n return openai_text_response.choices[0].text.strip()\n elif \"gpt-3.5\" in text_engine or \"gpt-4\" in text_engine:\n return openai_text_response['choices'][0]['message']['content']\n\ndef get_model_selection(text_engine):\n \"\"\"Return the model selection according to the text engine.\"\"\"\n model_seletection = {\n \"gpt-3.5-turbo\": { \"model\": text_engine},\n \"gpt-4\": { \"model\": text_engine},\n \"text-davinci-003\": { \"engine\": text_engine},\n }\n return model_seletection[text_engine]\n\ndef get_engine_method(text_engine):\n \"\"\"Return the engine method according to the text engine.\"\"\"\n method_selected = {\n \"gpt-3.5-turbo\": openai.ChatCompletion.create,\n \"gpt-4\": openai.ChatCompletion.create,\n \"text-davinci-003\": openai.Completion.create,\n }\n return method_selected[text_engine]\n\ndef format_prompt(original_prompt, text_engine=\"gpt-3.5\"):\n \"\"\"Format the prompt according to the text engine.\"\"\"\n if \"text\" in text_engine:\n return {\n \"prompt\": original_prompt,\n }\n elif \"gpt-3.5\" in text_engine or \"gpt-4\" in text_engine:\n return {\n \"messages\":[\n {\"role\": \"system\", \"content\": original_prompt}\n ]\n }\n return original_prompt\n\n@retry(stop_max_attempt_number=10)\n@RateLimiter(max_calls=20, period=60)\ndef generate_openai_completion(text_engine, api_settings):\n \"\"\"Generate the completion using OpenAI API.\n \n Append the model selection and engine method according to the text engine.\n Add rate limiter and retry decorator to avoid the rate limit error.\n Ref: https://community.openai.com/t/continuous-gpt3-api-500-error-the-server-had-an-error-while-processing-your-request-sorry-about-that/42239/30?page=2\n Package: \n https://github.com/RazerM/ratelimiter\n https://github.com/rholder/retrying\n \"\"\"\n response=get_engine_method(text_engine)(**api_settings)\n return response","repo_name":"MIBlue119/music_generator","sub_path":"music_generator/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"17396621705","text":"import xarray as xr\nimport pandas as pd\nimport numpy as np\nimport scipy\nimport os\nimport sys\n\nera_dir=\"/g/data/e14/sm2435/ERA5/\"\nout_dir=\"/g/data/e14/sm2435/ERA5/clim/\"\n\ndef load_lhf(lhf_files):\n lhf = xr.open_mfdataset(lhf_files, parallel=True).mslhf\n return lhf\ndef get_lhf_clim(lhf_files):\n lhf = load_lhf(lhf_files)\n lhf = lhf.groupby('time.month').mean('time')\n return lhf\n\n#tauu and tauv files\nlhf = 'lhf/*.nc'\n\n#Wstress compoenents\nlhf_files = (os.path.join(era_dir, lhf))\ntry:\n #calculate wspd\n lhf_c =get_lhf_clim(lhf_files)\n #save output as netcdf file\n lhf_c.to_netcdf(os.path.join(out_dir, 'ERA5_lhf.nc'))\nexcept Exception as e:\n print(e)\n traceback.print_exc()\n pass\nsys.exit()\n\n","repo_name":"SebastianMckenna/Heatbudget_notebooks","sub_path":"Heatbudget_calculations/era5/get_lhf_clim.py","file_name":"get_lhf_clim.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5176182914","text":"from Bio.Seq import Seq\nimport sys\nimport os\nfrom Bio.Blast import NCBIXML\n\n\ndef writeproteinfasta(blastXml):\n foutpath = \"tmp/\"\n if not os.path.exists(foutpath):\n os.makedirs(foutpath)\n\n path = os.path.splitext(blastXml)\n\n acc = os.path.basename(path[0])\n fname = open(blastXml)\n\n blast_records = NCBIXML.parse(fname)\n blast_record = next(blast_records)\n fout = foutpath+acc+\".fasta\"\n with open(fout, 'w') as protein_fasta:\n dnas = list()\n for alignment in blast_record.alignments:\n for hsp in alignment.hsps:\n coding_dna = Seq(hsp.sbjct.replace(\"-\", \"\"))\n dnas.append(coding_dna)\n dnas = sorted(dnas, key=len)\n if not dnas:\n print(acc)\n else:\n longest_dna = dnas[-1]\n frame1 = str(longest_dna[0:].translate())\n# if frame1.count(\"*\") == 1 and frame1.startswith(\"M\"):\n if frame1.startswith(\"M\") and frame1.count(\"*\") < 2:\n protein_fasta.write(\n \">\"+acc+\"|\"+str(hsp.sbjct_start)+\"|\" + \"\\n\")\n protein_fasta.write(frame1+\"\\n\")\n# elif frame1.count(\"*\") > 1 and frame1.startswith(\"M\"):\n# print(acc)\n# print(\"Deleterious mutation found! Abort!\")\n# print(frame1)\n \n# else:\n# print(acc)\n# print(\"Only partial genes found\")\n# print(frame1)\n \n\n fname.close()\n return fout\n\n\ndef addMutationDictionary(path, protein_sequence, position, mutations,ref_aa):\n position = position-1\n acc = os.path.basename(path).replace(\".fasta\", \"\")\n if(len(protein_sequence) >= position):\n mutations[acc] = protein_sequence[position]\n if(protein_sequence[position]!=ref_aa):\n print(mutations)\n else:\n if \"partial\" not in mutations.keys():\n mutations[\"partial\"] = list()\n mutations[\"partial\"].append(\n os.path.basename(path).replace(\".fasta\", \"\"))\n return mutations\n\n\ndef getMutation(input_fasta_path, mutation):\n i = 0\n mutations = dict()\n path = input_fasta_path\n aa_position = int(mutation[1:len(mutation)-1])\n ref_aa = mutation[0]\n exp_aa = mutation[-1]\n with open(path) as lines:\n i = i+1\n for line in lines:\n if \">\" not in line:\n mutations = addMutationDictionary(\n path, line, aa_position, mutations,ref_aa)\n #print(mutations)\n","repo_name":"smha118/mutation_detector","sub_path":"defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1627954553","text":"\"\"\"\n-----------------------------------------------------------\nEJERCICIO 3 ----------------------------------------------\n-----------------------------------------------------------\n(2 puntos)\n\nCompleta el ejercicio adjunto, .py\nOs dejamos la información.\n\nPARTE 1:\n- Haz que funcione el código, y se muestren los 3 prints con las 3 listas\n\nPARTE 2:\n- Haz que el código principal (sin contar las listas) sea de 1 línea\n\n\"\"\"\n\nlst = [\"Blanco\", \"Verde\", \"Amarillo\", \"Rojo\", \"Azul\"]\nlst2 = [\"Blanco\", \"Negro\", \"Rojo\", \"Gris\", \"Naranja\"]\n\ndef mostrar_lista(lst):\n for e in lst:\n print(e)\n\ndef get_coincidencias(lst, lst2):\n comparacion = []\n for i in range(len(lst)):\n if lst[i] in lst2:\n comparacion.append(lst[i])\n return comparacion\n\n\ndef mostrar_todo(lst, lst2):\n print(\"-- LST1 --\")\n mostrar_lista(lst)\n\n print(\"\\n-- LST2 --\")\n mostrar_lista(lst2)\n\n lista_comparada = get_coincidencias(lst, lst2)\n print(\"\\n-- LST COMPARADA --\")\n mostrar_lista(lista_comparada)\n\n\nmostrar_todo(lst, lst2)\n","repo_name":"Chrisgoac/m03","sub_path":"exam310123ex3.py","file_name":"exam310123ex3.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70857509955","text":"#!/usr/bin/env python\n\nimport rospy\nimport roslib\nroslib.load_manifest('my_controller')\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String, Float32, Bool\n\nimport sys\nimport random\n\nimport numpy as np\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\n# from PIL import Image as Image_PIL\nimport tensorflow as tf\nfrom keras import models\nimport keras\nfrom tensorflow.python.keras.backend import set_session\nfrom tensorflow.python.keras.models import load_model\n\ndavid_path = '/home/davidw0311'\ncnn_path = '/ros_ws/src/my_controller/cnn_training/'\nPATH = david_path + cnn_path\nmodel_path = PATH + 'detector_model_v7_8epoch_lenet'\n\nmy_dim_rev = (105, 150)\n\ndef cut(img):\n '''cuts the cropped license plate and returns a crop of each character'''\n P = cv2.resize(img[150:245, 30:160], my_dim_rev)\n ID = cv2.resize(img[150:245, 165:285], my_dim_rev)\n A1 = cv2.resize(img[280:330, 20:76], my_dim_rev)\n A2 = cv2.resize(img[280:330, 76:135], my_dim_rev)\n N1 = cv2.resize(img[280:330, 180:227], my_dim_rev)\n N2 = cv2.resize(img[280:330, 227:280], my_dim_rev)\n return P, ID, A1, A2, N1, N2\n\ndef arr_to_char(one_hot):\n val_index = np.argmax(one_hot)\n print('val index', val_index)\n return my_str[val_index]\n\ndef get_one_hot_encoding(value):\n # value should be a character either 0-9 or A-Z\n\n encoding = np.zeros(36)\n\n # number\n if ord(value) > 47 and ord(value) < 58:\n encoding[ord(value)-48] = 1\n elif ord(value) > 64 and ord(value) < 91:\n encoding[ord(value)- 65 + 10] = 1\n\n return encoding\n\ndef decode_one_hot(encoding):\n if encoding < 10:\n return str(encoding)\n else:\n return chr(encoding-10 + 65) \n\nclass plate_decrypter:\n \n def __init__(self):\n print('here')\n self.sess = keras.backend.get_session()\n self.graph = tf.compat.v1.get_default_graph()\n self.conv_model = load_model(model_path, compile=True)\n print('loaded model')\n # print(self.conv_model.summary())\n \n self.bridge = CvBridge()\n self.license_value_pub = rospy.Publisher('/plate_value', String, queue_size=1)\n self.cropped_plate_sub = rospy.Subscriber(\"/cropped_plate\", Image, self.callback)\n \n def predict(self, c):\n y_predict = conv_model.predict(np.array([c]))\n return y_predict\n\n def callback(self, data):\n try:\n cropped_plate = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n \n if cropped_plate is not None:\n uh = 123\n us = 255\n uv = 228\n lh = 107\n ls = 102\n lv = 79\n lower_hsv = np.array([lh,ls,lv])\n upper_hsv = np.array([uh,us,uv])\n \n plate_HSV = cv2.cvtColor(cropped_plate, cv2.COLOR_BGR2HSV)\n thresh_blue = cv2.inRange(plate_HSV, lower_hsv, upper_hsv)\n thresh_black = cv2.inRange(plate_HSV, (0,0,0), (0,0,67))\n \n # ret, thresh = cv2.threshold(img,120,255,cv2.THRESH_BINARY_INV)\n total_thresh = cv2.bitwise_or(thresh_blue, thresh_black)\n total_thresh = ~total_thresh\n # cv2.imshow('thresh', total_thresh)\n\n P, ID, A1, A2, N1, N2 = cut(total_thresh)\n \n # cv2.imshow('P', P)\n # cv2.imshow('ID', ID)\n # cv2.imshow('A1', A1)\n # cv2.imshow('A2', A2)\n # cv2.imshow('N1', N1)\n # cv2.imshow('N2', N2)\n # cv2.waitKey(1)\n\n with self.graph.as_default():\n set_session(self.sess)\n def get_prediction(a, isnum):\n a = cv2.merge((a,a,a))\n a_predictions = self.conv_model.predict(np.array([a]))[0]\n if isnum:\n a_index = np.argmax(a_predictions[:10])\n else:\n a_index = np.argmax(a_predictions[10:]) + 10\n a_confidence = a_predictions[a_index]\n a_prediction = decode_one_hot(a_index)\n \n return a_prediction, a_confidence, a_predictions\n \n P_prediction, P_confidence, P_predictions = get_prediction(P, isnum=False)\n # print(P_prediction,'P predictions', np.round(np.array(P_predictions), 3))\n ID_prediction, ID_confidence, ID_predictions = get_prediction(ID, isnum=True)\n # print(ID_prediction,'ID predictions', np.round(np.array(ID_predictions), 3))\n A1_prediction, A1_confidence, A1_predictions = get_prediction(A1, isnum=False)\n A2_prediction, A2_confidence, A2_predictions = get_prediction(A2, isnum=False)\n # print(A1_prediction,'a1 predictions', np.round(np.array(A1_predictions), 3))\n N1_prediction, N1_confidence, N1_predictions = get_prediction(N1, isnum=True)\n N2_prediction, N2_confidence, N2_predictions = get_prediction(N2, isnum=True)\n\n prediction = ID_prediction + A1_prediction + A2_prediction + N1_prediction + N2_prediction\n confidence = (ID_confidence + A1_confidence + A2_confidence + N1_confidence + N2_confidence)/5\n self.license_value_pub.publish(prediction+str(confidence))\n print(prediction, confidence)\n self.license_value_pub.publish('')\n\n\ndef main(args):\n rospy.init_node('plate_decrypter', anonymous=True) \n pd = plate_decrypter()\n\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"davidw0311/ai_self_driving_controller","sub_path":"nodes/decrypt_plate.py","file_name":"decrypt_plate.py","file_ext":"py","file_size_in_byte":5694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70890016833","text":"# Econometrics III - Part 3:\n\n# Imports:\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.tsa.stattools import adfuller\nfrom stargazer.stargazer import Stargazer, LineLocation\nfrom IPython.core.display import HTML\n\n# Data:\ndf = pd.read_csv('data_assign_p3.csv')\ndf.index = pd.to_datetime(df['DATE'], dayfirst=True)\ndf = df.drop('DATE',axis=1)\n\n\n## Question 1:\n# Chosen series: Apple & Exxon Mobil\n\n# Apple Plots:\nplt.plot(df['APPLE'])\nplt.title('Apple Stock Price')\nplt.show()\n\n# Apple ACF plot:\nsm.graphics.tsa.plot_acf(df['APPLE'], lags=12, \n title='Apple ACF plot')\nplt.show()\n\n# Apple PACF plot:\nsm.graphics.tsa.plot_pacf(df['APPLE'], lags=12,\n title='Apple PACF plot')\nplt.show()\n\n\n# Exxon Mobil plots:\nplt.title('Exxon Mobil Stock Price')\nplt.plot(df['EXXON_MOBIL'])\nplt.show()\n\n# Exxon Mobil ACF plot:\nsm.graphics.tsa.plot_acf(df['EXXON_MOBIL'], lags=12, \n title='Exxon Mobil ACF plot')\nplt.show()\n\n# Exxon Mobil PACF plot:\nsm.graphics.tsa.plot_pacf(df['EXXON_MOBIL'], lags=12,\n title='Exxon Mobil PACF plot')\nplt.show()\n\n\n## Question 2:\n# Perform an ADF test\ndef adf_test_all_vars():\n output = pd.DataFrame()\n for i in df:\n result = adfuller(df[i], regression='c',autolag='bic', store=False)\n output[i] = pd.Series(result[0:4], \n index=['Stat',\n 'P-value',\n 'Lags',\n 'N'])\n return output\n\noutput_adf=adf_test_all_vars()\nprint(output_adf.to_latex(float_format='%.2f'))\n\n\n## Question 3:\n# Random walk forecast, fc = forecast:\nfc_length = 5\n\n# Create df of point forecast (simply fc) and variance (var_fc_STOCK):\nfc_apple = np.full(fc_length, df['APPLE'].iloc[-1])\nfc_exxon = np.full(fc_length, df['EXXON_MOBIL'].iloc[-1])\n\ndf['APPLE_L1'] = df['APPLE'].shift(1)\ndf['EXXON_MOBIL_L1'] = df['EXXON_MOBIL'].shift(1)\n\nmodel_apple = smf.ols(formula = 'APPLE ~ APPLE_L1', data = df).fit()\nmodel_exxon = smf.ols(formula = 'EXXON_MOBIL ~ EXXON_MOBIL_L1', data = df).fit()\n\nvar_fc_apple = np.multiply(np.full(fc_length, model_apple.resid.var()),\n np.array([*range(1,fc_length+1)]))\nvar_fc_exxon = np.multiply(np.full(fc_length, model_exxon.resid.var()),\n np.array([*range(1,fc_length+1)]))\n\n# Plot Apple forecast + variance:\nfig, axs = plt.subplots(2)\naxs[0].plot(fc_apple, color='k')\naxs[0].plot(fc_apple+1.96*np.sqrt(var_fc_apple), color='k', linestyle='--')\naxs[0].plot(fc_apple-1.96*np.sqrt(var_fc_apple), color='k', linestyle='--')\naxs[0].set_title('Apple random walk forecast')\n\n# Plot Exxon Mobil forecast + variance:\naxs[1].plot(fc_exxon, color='k')\naxs[1].plot(fc_exxon+1.96*np.sqrt(var_fc_exxon), color='k', linestyle='--')\naxs[1].plot(fc_exxon-1.96*np.sqrt(var_fc_exxon), color='k', linestyle='--')\naxs[1].set_title('Exxon Mobil random walk forecast')\n\nplt.xlabel('Time')\nplt.show()\n\n## Question 4:\n# Reg Mic ~ Exxon, spurious regression:\nmodel_mic_exxon = smf.ols('MICROSOFT ~ EXXON_MOBIL', data=df).fit(use_t = True)\nstargazer = Stargazer([model_mic_exxon])\nstargazer.custom_columns('MICROSOFT')\nstargazer.show_model_numbers(False)\nstargazer.show_degrees_of_freedom(False)\nprint(stargazer.render_latex())\n\n\n\n\n\n","repo_name":"fholstege/Econometrics-III","sub_path":"part-3/ti_ect_iii_p3.py","file_name":"ti_ect_iii_p3.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"257579851","text":"import numpy as np\nimport xml.etree.ElementTree as ET\nfrom glob import glob\nimport os\nimport time\nfrom collections import OrderedDict\nimport spacy\nimport argparse\nimport json\nfrom collections import Counter\nnlp = spacy.load('en')\n\n\ndef process(text):\n \n processed = []\n for i in nlp(text):\n if i.pos_ in ['SPACE', 'PUNCT']:\n continue\n #elif i.pos_ == 'PART':\n # processed.append('_s')\n #elif i.pos_ in ['NUM', 'SYM']:\n # processed.append(i.pos_)\n else:\n processed.append(i.text)\n\n return processed\n\ndef main(args):\n\n vocab0 = OrderedDict()\n start = time.time()\n\n count = 0\n with open(args.saveto+'%s.txt'%args.save_label, 'w') as f:\n print(\"loading all files...\")\n \n file = open(args.source+args.parse_file)\n count += 1\n for line in file:\n d = json.loads(line)\n label = d['gold_label']\n\n f.write(label+\"\\t\") \n\n s1 = process(d['sentence1'].lower())\n s2 = process(d['sentence2'].lower())\n\n for w in s1:\n f.write(w+\" \")\n if w in vocab0:\n vocab0[w] += 1\n else:\n vocab0[w] = 1\n f.write(\"\\t\")\n\n for w in s2:\n f.write(w+\" \")\n if w in vocab0:\n vocab0[w] += 1\n else:\n vocab0[w] = 1\n f.write(\"\\n\")\n\n if count % 1000 == 0:\n print(\"processed %s files\" % count)\n print(\"%s seconds elapsed\" % (time.time() - start))\n f.close()\n print(time.time() - start)\n\n tokens = list(vocab0.keys())\n \n freqs = list(vocab0.values())\n\n sidx = np.argsort(freqs)[::-1]\n\n # zero is reserve for padding\n vocab = OrderedDict([(tokens[s],i+1) for i, s in enumerate(sidx)])\n \n np.save(args.saveto+\"vocab\"+args.save_label+\".npy\", vocab)\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-saveto', type=str, default=\"../intermediate/\")\n parser.add_argument('-source', type=str, default=\"../snli_1.0/\")\n parser.add_argument('-save_label', type=str, default='snli_tst')\n parser.add_argument('-parse_file', type=str, default='snli_1.0_test.jsonl')\n args = parser.parse_args()\n #parser.parse_file = 'snli_1.0_dev.jsonl'\n #parser.parse_file = 'snli_1.0_train.jsonl'\n print(args)\n main(args)\n\n\n\n\n\n\n","repo_name":"rujunhan/DS1011_Final_Projet","sub_path":"BiMPM/create_dictionary_nli.py","file_name":"create_dictionary_nli.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"15770341788","text":"import pyautogui as pag\nimport keyboard\nimport sys\n\n# https://twitter.com\n\nSCREEN_SIZE = pag.size()\n\nrt_line = None\nmore_line = None\ndeleted_n = {\"RT\":0,\"Tweet\":0}\n\ndef exit():\n print(f\"{deleted_n}\")\n sys.exit()\n\nprint(\"RTボタンにポインタをおいてctrlを押してください。\")\nwhile True:\n if keyboard.is_pressed(\"esc\"): exit()\n if keyboard.is_pressed(\"ctrl\"):\n pos = pag.position()\n pag.move(100,0)\n pag.sleep(1)\n rt_line = [pos.x-50,0,100+1,SCREEN_SIZE[1]]\n if pag.locateOnScreen(\"imgs/RT.png\",confidence=0.7,region=rt_line):\n print(\"OK\")\n break\n else:\n print(\"Not found active RT button.\")\n\nprint(\"...ボタンにポインタをおいてctrlを押してください。\")\nwhile True:\n if keyboard.is_pressed(\"esc\"): exit()\n if keyboard.is_pressed(\"ctrl\"):\n pos = pag.position()\n pag.move(100,0)\n pag.sleep(1)\n more_line = [pos.x-50,0,100+1,SCREEN_SIZE[1]]\n if pag.locateOnScreen(\"imgs/more.png\",confidence=0.7,region=more_line):\n print(\"OK\")\n break\n else:\n print(\"Not found active more button.\")\n\ndef click(img_path,**args):\n box = pag.locateOnScreen(img_path,**args)\n if box:\n pag.click(pag.center(box))\n return True\n else:\n return False\n\nwhile True:\n while True:\n if keyboard.is_pressed(\"esc\"): exit()\n found = click(\"imgs/RT.png\",region=rt_line,confidence=0.9,grayscale=False)\n pag.sleep(1)\n if not found:\n break\n pag.move(100,0)\n pag.sleep(1)\n click(\"imgs/RT-del.png\",confidence=0.7)\n pag.sleep(1)\n deleted_n[\"RT\"] += 1\n\n if keyboard.is_pressed(\"esc\"): exit()\n found = click(\"imgs/more.png\",region=more_line,confidence=0.7)\n pag.move(100,0)\n pag.sleep(1)\n \n if not click(\"imgs/del-1.png\",confidence=0.7) or not found:\n pag.scroll(-300)\n pag.sleep(1.3)\n continue\n\n pag.sleep(1)\n click(\"imgs/del-2.png\",confidence=0.7)\n pag.sleep(1)\n deleted_n[\"Tweet\"] += 1","repo_name":"tsubasa-km/twitter-elaser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11063102725","text":"\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as Ft\n\nimport torchvision\nfrom torchvision import models\nimport torchvision.transforms as T\nfrom torchvision.utils import draw_bounding_boxes,draw_segmentation_masks\n\nfrom torchvision.ops import masks_to_boxes\nfrom torchvision.io import read_image\nimport torch\nimport torchvision\nimport cv2\nimport argparse\nfrom PIL import Image\nfrom torchvision.transforms import transforms as transforms\nimport numpy as np\nfrom d2l import torch as d2l\nimport cv2\nimport colorsys\nfrom skimage import morphology\nimport imutils\n\nfrom torchsummary import summary\nimport random\nfrom scipy.ndimage import label,binary_closing,find_objects\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom time import time\nfrom natsort import natsorted\nimport os\n#os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n#%matplotlib inline \nfrom matplotlib.patches import Polygon\nfrom skimage.measure import find_contours\nfrom torchvision.transforms import transforms as transforms\n\nimport matplotlib.patches as mpatches\nfrom matplotlib import patches\nfrom tqdm import tqdm\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\n#%% paths\nlocal_train = \"C:/Users/alber/Bureau/Development/Data/Images_data/cell_nucleus/train\"\n#local_train = \"C:/Users/alber/Bureau/Development/Data/Images_data/Warwick_QU/train\"\nlocal_train = \"C:/Users/alber/Bureau/Development/Data/Images_data/breast_echo/train\"\n\npth_name=\"maskrcnn_nucleus\"\npth_path_cluster=\"/bigdata/casus/optima/hemera_results/\"+pth_name+\"/\"\n\nroot_train=local_train\n#%% functions\n#%%% visu\ndef view(images,labels,n=2,std=1,mean=0):\n figure = plt.figure(figsize=(15,10))\n images=list(images)\n labels=list(labels)\n for i in range(n):\n out=torchvision.utils.make_grid(images[i])\n inp=out.cpu().numpy().transpose((1,2,0))\n inp=np.array(std)*inp+np.array(mean)\n inp=np.clip(inp,0,1) \n ax = figure.add_subplot(2,2, i + 1)\n ax.imshow(images[i].cpu().numpy().transpose((1,2,0)))\n l=labels[i]['boxes'].cpu().numpy()\n l[:,2]=l[:,2]-l[:,0]\n l[:,3]=l[:,3]-l[:,1]\n for j in range(len(l)):\n ax.add_patch(patches.Rectangle((l[j][0],l[j][1]),l[j][2],l[j][3],linewidth=1.5,edgecolor='r',facecolor='none')) \n plt.savefig(pth_path_cluster+'figure/images_test.pdf',format='pdf')\n \ndef view_mask(targets, output, n=2, cmap='Greys'):\n figure = plt.figure(figsize=(15,10))\n for i in range(n):\n # plot target (true) masks\n target_im = targets[i]['masks'][0].cpu().detach().numpy()\n for k in range(len(targets[i]['masks'])):\n target_im2 = targets[i]['masks'][k].cpu().detach().numpy()\n target_im2[target_im2>0.5] = 1\n target_im2[target_im2<0.5] = 0\n target_im = target_im+target_im2\n\n target_im[target_im>0.5] = 1\n target_im[target_im<0.5] = 0\n ax = figure.add_subplot(2,2, i+1)\n ax.imshow(target_im, cmap=cmap)\n # Plot output (predicted) masks\n output_im = output[i]['masks'][0][0, :, :].cpu().detach().numpy()\n for k in range(len(output[i]['masks'])):\n output_im2 = output[i]['masks'][k][0, :, :].cpu().detach().numpy()\n output_im2[output_im2>0.5] = 1\n output_im2[output_im2<0.5] = 0\n output_im = output_im+output_im2\n\n output_im[output_im>0.5] = 1\n output_im[output_im<0.5] = 0\n ax = figure.add_subplot(2,2, i+3)\n ax.imshow(output_im, cmap=cmap)\n plt.savefig(pth_path_cluster+'figure/mask_test.pdf',format='pdf')\n\ndef IoU(y_real, y_pred):\n # Intersection over Union loss function\n intersection = y_real*y_pred\n #not_real = 1 - y_real\n #union = y_real + (not_real*y_pred)\n union = (y_real+y_pred)-(y_real*y_pred)\n return np.sum(intersection)/np.sum(union)\n\ndef dice_coef(y_real, y_pred, smooth=1):\n intersection = y_real*y_pred\n union = (y_real+y_pred)-(y_real*y_pred)\n return np.mean((2*intersection+smooth)/(union+smooth))\n\ndef confusion_matrix(y_true, y_pred):\n y_true= y_true.flatten()\n y_pred = y_pred.flatten()*2\n cm = y_true+y_pred\n cm = np.bincount(cm, minlength=4)\n tn, fp, fn, tp = cm\n return tp, fp, tn, fn\n\ndef get_f1_score(y_true, y_pred):\n \"\"\"Return f1 score covering edge cases\"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred)\n f1_score = (2 * tp) / ((2 * tp) + fp + fn)\n\n return f1_score \n\ndef get_outputs(image, model, threshold,class_names):\n with torch.no_grad():\n # forward pass of the image through the modle\n outputs = model(image)\n names=[\"bck\",\"nucleus\"]\n # get all the scores\n scores = list(outputs[0]['scores'].detach().cpu().numpy())\n # index of those scores which are above a certain threshold\n thresholded_preds_inidices = [scores.index(i) for i in scores if i > threshold]\n thresholded_preds_count = len(thresholded_preds_inidices)\n # get the masks\n masks = (outputs[0]['masks']>0.5).squeeze().detach().cpu().numpy()\n # discard masks for objects which are below threshold\n masks = masks[:thresholded_preds_count]\n # get the bounding boxes, in (x1, y1), (x2, y2) format\n boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))] for i in outputs[0]['boxes'].detach().cpu()]\n # discard bounding boxes below threshold value\n boxes = boxes[:thresholded_preds_count]\n # get the classes labels\n labels = [class_names[i] for i in outputs[0]['labels']]\n return masks, boxes, labels,scores\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\ndef draw_segmentation_map(im, boxes, labels):\n \n image = np.array(im)\n N=boxes.shape[0]\n\n alpha = 1 \n beta = 0.6 # transparency for the segmentation map\n gamma = 0 # scalar added to each sum\n colors=random_colors(N)\n print(colors)\n for i in range(N):\n color=colors[i]\n start_point=(boxes[i][0],boxes[i][1])\n end_point =(boxes[i][2],boxes[i][3])\n #print(start_point,end_point)\n # draw the bounding boxes around the objects\n cv2.rectangle(image,start_point ,end_point , color, thickness=1)\n # put the label text above the objects\n cv2.putText(image , labels, (boxes[i][0], boxes[i][1]-10), \n cv2.FONT_HERSHEY_SIMPLEX, 1, color, \n thickness=2, lineType=cv2.LINE_AA)\n\n return image\ndef image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):\n # initialize the dimensions of the image to be resized and\n # grab the image size\n dim = None\n (h, w) = image.shape[:2]\n\n # if both the width and height are None, then return the\n # original image\n if width is None and height is None:\n return image\n\n # check to see if the width is None\n if width is None:\n # calculate the ratio of the height and construct the\n # dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n\n # otherwise, the height is None\n else:\n # calculate the ratio of the width and construct the\n # dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # resize the image\n resized = cv2.resize(image, dim, interpolation = inter)\n\n # return the resized image\n return resized\ndef draw_mask_box_labels(image, masks, boxes, labels,scores,fontsize,position):\n alpha = 1\n beta = 0.8 # transparency for the segmentation map\n gamma = 0 # scalar added to each sum\n COLORS = np.random.uniform(0, 255, size=(2, 3))\n print(masks.shape)\n for i in range(len(masks)):\n red_map = np.zeros_like(masks[i]).astype(np.uint8)\n green_map = np.zeros_like(masks[i]).astype(np.uint8)\n blue_map = np.zeros_like(masks[i]).astype(np.uint8)\n # apply a randon color mask to each object\n color = COLORS[random.randrange(0, len(COLORS))]\n red_map[masks[i] == 1], green_map[masks[i] == 1], blue_map[masks[i] == 1] = color\n # combine all the masks into a single image\n segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\n #print(segmentation_map.shape)\n #convert the original PIL image into NumPy format\n image = np.array(image)\n # convert from RGN to OpenCV BGR format\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n #image = image_resize(image, height = 800)\n # apply mask on the image\n cv2.addWeighted(image, alpha, segmentation_map, beta, gamma, image)\n # draw the bounding boxes around the objects\n cv2.rectangle(image, boxes[i][0], boxes[i][1], color=color, \n thickness=1)\n # put the label text above the objects\n cv2.putText(image , labels[i]+\" \"+str(round(scores[i],3)), (boxes[i][0][0], boxes[i][0][1]+position), \n cv2.FONT_HERSHEY_SIMPLEX, fontsize, [255, 255, 255], \n thickness=1, lineType=cv2.LINE_AA)\n \n return image\n\n#%%% dataset \nclass NucleusCellDataset(object):\n def __init__(self, root, transforms=None): # transforms\n self.root = root\n # self.transforms = transforms\n self.transforms=[]\n if transforms!=None:\n self.transforms.append(transforms)\n self.imgs = list(natsorted(os.listdir(os.path.join(root, \"image\"))))\n \n self.masks = list(natsorted(os.listdir(os.path.join(root, \"mask\"))))\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.root, \"image\", self.imgs[idx])\n mask_path = os.path.join(self.root, \"mask\", self.masks[idx])\n print(img_path,mask_path)\n \n img = Image.open(img_path)#.convert(\"RGB\")\n mask = Image.open(mask_path).convert('L') \n mask = np.array(mask)\n \"\"\"\n if \"benign\" in img_path:\n mask = mask/255\n elif \"malignant\" in img_path:\n mask = mask/255*2\n \"\"\"\n\n\n #mask[mask>0]=1\n\n #mask=morphology.remove_small_objects(mask==1, min_size=30, connectivity=True)\n #♠mask=binary_closing(mask,iterations=3)\n #mask,ref_num_features = label(mask)\n plt.imshow(mask)\n plt.show()\n \n\n obj_ids = np.unique(mask)\n print(\"obj_ids\",obj_ids)\n obj_ids = obj_ids[1:]\n # split the color-encoded mask into a set\n # of binary masks\n masks = mask == obj_ids[:, None, None]\n # get bounding box coordinates for each mask\n num_objs = len(obj_ids)\n #print(obj_ids)\n boxes = []\n for i in range(num_objs):\n pos = np.where(masks[i])\n #print(num_objs)\n #print(\"pos\",pos[1].shape)\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n # Check if area is larger than a threshold\n A = abs((xmax-xmin) * (ymax-ymin)) \n boxes.append([xmin, ymin, xmax, ymax])\n\n #maskk_test=draw_segmentation_map(mask, np.array(boxes), \"\")\n #plt.imshow(maskk_test)\n #plt.show()\n img_test=draw_segmentation_map(img, np.array(boxes), \"\")\n plt.imshow(img_test)\n plt.show()\n #target[\"labels\"] = labels # Not sure if this is needed\n #print(\"labels\",target[\"labels\"].shape)\n return boxes\n def __len__(self):\n return len(self.imgs)\n#%% test dataset\n\"\"\"\ndataset_train = NucleusCellDataset(root_train, transforms=torchvision.transforms.ToTensor()) # get_transform(train=True)\ndata_loader_train = DataLoader(dataset_train, batch_size=4, shuffle=True,collate_fn=lambda x:list(zip(*x)),pin_memory=True)\nimages,labels=next(iter(data_loader_train))\n\n\"\"\"\n\n\n#%% tests 1 image\n\"\"\"\nlocal_train = \"C:/Users/alber/Bureau/Development/Data/Images_data/cell_nucleus/test\"\nnames=[\"8efed2e62c919e6d70a2ab548b1a33014877fe8a23f177ef25a9dee25ffe8842\",\"fe80a2cf3c93dafad8c364fdd1646b0ba4db056cdb7bdb81474f957064812bba\",\"fec226e45f49ab81ab71e0eaa1248ba09b56a328338dce93a43f4044eababed5\",\"ff3e512b5fb860e5855d0c05b6cf5a6bcc7792e4be1f0bdab5a00af0e18435c0\",\"feffce59a1a3eb0a6a05992bb7423c39c7d52865846da36d89e2a72c379e5398\",\"ff599c7301daa1f783924ac8cbe3ce7b42878f15a39c2d19659189951f540f48\",\"ff3407842ada5bc18be79ae453e5bdaa1b68afc842fc22fa618ac6e6599d0bb3\"]\nname=names[0]\nimage_path=local_train+\"/image/\"+name+\".png\"\nmask_path=local_train+\"/mask/\"+name+\".jpg\"\npth_nucleus_name=\"/14_01/remove_small_30_10epochs/remove_small_30.pth\"\n#pth_nucleus_name=\"/13_01/remove_small_20_25epochs/remove_small_20.pth\"\npth_path=\"C:/Users/alber/Bureau/Development/DeepLearning/training_results/cluster/maskrcnn/nucleus\"+pth_nucleus_name\nnum_classes = 2\nclass_names=[\"background\",\"nucleus\"]\n\"\"\"\n\"\"\"\nlocal_train = \"C:/Users/alber/Bureau/Development/Data/Images_data/Warwick_QU/test\"\nname=\"testA_52\"\nimage_path=local_train+\"/image/\"+name+\".bmp\"\n#mask_path=local_train+\"/mask/\"+name+\"_anno.bmp\"\npth_path=\"C:/Users/alber/Bureau/Development/DeepLearning/training_results/cluster/maskrcnn/warwick/maskrcnn.pth\"\nclass_names=[\"background\",\"glande\"]\nnum_classes = 2\n\"\"\"\nlocal_train = \"C:/Users/alber/Bureau/Development/Data/Images_data/breast_echo/train\"\nnames=[\"benign (200)\",\"benign (70)\",\"malignant (30)\"]\nname=names[2]\nimage_path=local_train+\"/image/\"+name+\".png\"\n#mask_path=local_train+\"/mask/\"+name+\"_anno.bmp\"\npth_path=\"C:/Users/alber/Bureau/Development/DeepLearning/training_results/cluster/maskrcnn/breast/16_01/test_not_all_mask_1b2m_2epochs/test_not_all_mask_1b2m.pth\"\nclass_names=[\"background\",\"benign\",\"malignant\"]\nnum_classes = 3\n\n\n#%%% Model\ndevice = torch.device('cuda')\n\n\n# load an instance segmentation model pre-trained pre-trained on COCO\nmodel = torchvision.models.detection.maskrcnn_resnet50_fpn_v2(weights=None)\n\n# get number of input features for the classifier\nin_features = model.roi_heads.box_predictor.cls_score.in_features\n# replace the pre-trained head with a new one\nmodel.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n# now get the number of input features for the mask classifier\nin_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels\n# and replace the mask predictor with a new one\nmodel.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,256, num_classes)\n\nmap_location = torch.device('cuda')\n# load the modle on to the computation device and set to eval mode\nmodel.load_state_dict(torch.load(pth_path,map_location=map_location))\nmodel.to(device).eval()\n#model=model.double()\n\n#%%% images\n# transform to convert the image to tensor\ntransform = transforms.Compose([transforms.ToTensor()])\n\nimage = Image.open(image_path).convert('RGB')\n\n#image = imutils.resize(image, width=1280)\n\norig_image = image.copy()\n# transform the image\nimage = transform(image)\nimage = image.unsqueeze(0).to(device)\nmasks, boxes, labels,scores = get_outputs(image, model, 0.99,class_names)\n\n\nresult = draw_mask_box_labels(orig_image, masks, boxes, labels,scores,0.3,4)\n# visualize the image\n\n#print(type(result))\n#result = image_resize(result, height = 600)\n\n\n#result=cv2.resize(result, (256*2,256*2), interpolation = cv2.INTER_AREA)\ncv2.imshow('Segmented image', np.array(result))\ncv2.waitKey(0)\n","repo_name":"Albert-Saporta/CNN","sub_path":"MaskRCNN_tests/test_bounding_boxes.py","file_name":"test_bounding_boxes.py","file_ext":"py","file_size_in_byte":15556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43119413147","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def oddEvenList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n curr = head\n is_odd = True\n odd_head, odd_tail, even_head, even_tail = None, None, None, None\n while not curr is None:\n if is_odd:\n if odd_head:\n odd_tail.next = curr\n odd_tail = curr\n else: \n odd_head = curr\n odd_tail = curr\n else:\n if even_head:\n even_tail.next = curr\n even_tail = curr\n else:\n even_head = curr\n even_tail = curr\n is_odd = not is_odd\n curr = curr.next\n if even_head:\n odd_tail.next = even_head\n even_tail.next = None\n return odd_head\n ","repo_name":"hexecute/leetcode-answers","sub_path":"problems/odd_even_linked_list/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3611380387","text":"# coding=utf-8\n\nimport pymysql\nimport traceback\nimport datetime\n# MySQL相关设置\nmysql_host = '127.0.0.1'\nmysql_user = 'root'\nmysql_passwd = '1qazXSW@_xyz!'\nmysql_port = '3306'\nmysql_database = 'agr_data'\n\n\ndef my_connect():\n \"\"\"链接数据库\"\"\"\n\n global conn, cursor\n\n # print MySQLdb.version_info\n\n \n conn = pymysql.connect(host=mysql_host, user=mysql_user, passwd=mysql_passwd, charset='utf8', autocommit=True,\n db=mysql_database)\n \n cursor = conn.cursor()\n\ndef add_app(ID,Name,Ip,Port):\n # add\n sql = \"insert into application(id,name,ip,port) values(%s,%s,%s,%s)\" % (ID,\"'\"+ Name + \"'\" ,\"'\" + Ip + \"'\",Port)\n #param = (ID,Ip,Name,Type)\n my_connect() # 打开链接\n cursor.execute(sql)\n cursor.close()\n conn.close()\n # conn.rollback()\n\n\ndef update_app(dictory):\n # 更新\n my_connect()\n sql = \"update application set \"\n flag = 0\n if \"name\" in dictory:\n flag = 1\n sql += \"name = '%s'\" %(dictory[\"name\"])\n if \"ip\" in dictory:\n if flag == 1:\n sql += \",ip = '%s'\" %(dictory[\"ip\"])\n else:\n flag = 1\n sql += \"ip = '%s'\" %(dictory[\"ip\"])\n if \"port\" in dictory:\n if flag == 1:\n sql += \",port = '%s'\" %(dictory[\"port\"])\n else:\n flag = 1\n sql += \"port = '%s'\" %(dictory[\"port\"])\n sql += \" where id = %d\" %(dictory[\"id\"])\n \n #sql = \"update topo set ip='%s',name='%s',type='%s' where id='%d'\" % (Ip,Name,Type,ID)\n try:\n cursor.execute(sql)\n cursor.close()\n conn.close()\n except:\n print(traceback.print_exc())\n\n\ndef Select_app(dictory):\n # 查询\n my_connect()\n if 'name' in dictory:\n if dictory['name'] == 'total':\n cursor.execute(\"SELECT * from application\")\n else:\n cursor.execute(\"SELECT * from application where name ='%s'\"% dictory['type'])\n elif 'ip' in dictory:\n cursor.execute(\"SELECT * from application where ip = '%s'\" % dictory['ip'])\n elif 'port' in dictory:\n cursor.execute(\"SELECT * from application where port = '%s'\" % dictory['port'])\n else:\n cursor.execute(\"SELECT * from application where id =%d\" % dictory['id'])\n data = cursor.fetchall()\n #for row in data:\n # 注意int类型需要使用str函数转义\n #print('id: ', row[0], ' name: ', row[1], ' age ', row[2])\n # 提交事务\n cursor.close() # 关闭游标\n conn.close() # 释放数据库资源\n return data\n\ndef delete_app(ID):\n # 删除\n my_connect()\n sql = \"delete from application where id='%d'\" % (ID)\n # parama =(ID)\n cursor.execute(sql)\n cursor.close()\n conn.close()\n\ndef select_ip_name():\n my_connect()\n sql = \"SELECT name,ip from application\"\n cursor.execute(sql)\n data = cursor.fetchall()\n cursor.close()\n conn.close()\n return(data)\n\ndef record_ip_health(ip,health,time,name):\n # add\n sql = \"insert into iphealth(ip,health,time,name) values('%s','%s','%s','%s')\" % (ip,health,time,name)\n #param = (ID,Ip,Name,Type)\n my_connect() # 打开链接\n cursor.execute(sql)\n cursor.close()\n conn.close()\n # conn.rollback()\n\nif __name__ == \"__main__\":\n data = select_ip_name()\n print(data[0])","repo_name":"Styfjion/NongweiCode","sub_path":"mysql_iplist2.py","file_name":"mysql_iplist2.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73189736193","text":"\"\"\"The setup script.\"\"\"\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom contextlib import suppress\nfrom pkg_resources import parse_version\nimport sys\n\n\nclass InstallCommand(install):\n user_options = install.user_options + [\n ('without-leveldb', None, 'Do not install leveldb requirements'),\n ]\n\n def initialize_options(self):\n super().initialize_options()\n # Initialize options\n self.without_leveldb = None\n\n def run(self): # Use options\n if self.without_leveldb:\n print(\"install command called!\")\n with suppress(ValueError):\n idx = list(map(lambda i: \"plyvel\" in i, self.distribution.install_requires)).index(True)\n self.distribution.install_requires.pop(idx)\n\n super().run()\n\n\nclass DevelopCommand(develop):\n user_options = develop.user_options + [\n ('without-leveldb', None, 'Do not install leveldb requirements'),\n ]\n\n def initialize_options(self):\n super().initialize_options()\n # Initialize options\n self.without_leveldb = None\n\n def run(self):\n # Use options\n if self.without_leveldb:\n with suppress(ValueError):\n idx = list(map(lambda i: \"plyvel\" in i, self.distribution.install_requires)).index(True)\n self.distribution.install_requires.pop(idx)\n\n super().run()\n\ntry:\n from pip._internal.req import parse_requirements\n from pip import __version__ as __pip_version\n pip_version = parse_version(__pip_version)\n if (pip_version >= parse_version(\"20\")):\n from pip._internal.network.session import PipSession\n elif (pip_version >= parse_version(\"10\")):\n from pip._internal.download import PipSession\nexcept ImportError: # pip version < 10.0\n from pip.req import parse_requirements\n from pip.download import PipSession\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\n\nleveldb_requirements = [\"plyvel==1.3.0\" if sys.platform in [\"darwin\", \"linux\"] else \"plyvel-win32\"]\n\n# get the requirements from requirements.txt\ninstall_reqs = parse_requirements('requirements.txt', session=PipSession())\nreqs = []\nfor ir in install_reqs:\n if hasattr(ir, 'requirement'):\n reqs.append(str(ir.requirement))\n else:\n reqs.append(str(ir.req))\nsetup(\n name='neo-mamba',\n python_requires='>=3.7',\n version='0.7',\n description=\"Python SDK for the NEO 3 blockchain\",\n long_description=readme,\n long_description_content_type=\"text/x-rst\",\n author=\"Erik van den Brink\",\n author_email='erik@coz.io',\n maintainer=\"Erik van den Brink\",\n maintainer_email='erik@coz.io',\n url='https://github.com/CityOfZion/neo-mamba',\n packages=find_packages(include=['neo3']),\n include_package_data=True,\n install_requires=reqs,\n extras_require={\"leveldb\": leveldb_requirements},\n license=\"MIT license\",\n zip_safe=False,\n keywords='neo3, python, SDK',\n entry_points={\n 'sphinx.html_themes': [\n 'neo3 = docs.source._theme',\n ]\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n cmdclass={\n 'install': InstallCommand,\n 'develop': DevelopCommand\n }\n)\n","repo_name":"ixje/neo-voting-backend","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71069591555","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nfrom torch.utils.data import DataLoader\nfrom pickle import dump\n\nimport os, time\nimport argparse\nimport copy\n\nfrom model import Pyramid\nfrom timeseries import TimeSeriesWithAnomalies, NormalTimeSeries, AbnormalTimeSeries\nfrom softdtw_cuda import SoftDTW\nfrom utils import * \n\ndef evaluation(args, model, data_loader, pyra, train_loader=None):\n model.eval()\n total, total_loss, total_bce_loss = 0, 0, 0\n wscores, wlabels = [], []\n dscores, dlabels = [], []\n f1_result, iou_result = [], []\n wresult, dresult = np.zeros(3), np.zeros(3)\n\n for itr, batch in enumerate(data_loader):\n data = batch['data'].cuda()\n wlabel = batch['wlabel'].cuda()\n dlabel = batch['dlabel'].cuda()\n \n with torch.no_grad():\n out = model.get_scores(data)\n bce_loss = pyra.last_loss(out['wscore'], wlabel)\n # dtw_loss = model.dtw_loss(out['output'], wlabel).mean(0)\n loss = bce_loss\n\n total += data.size(0)\n total_bce_loss += bce_loss.item() * data.size(0)\n # total_dtw_loss += dtw_loss.item() * data.size(0)\n total_loss += loss.item() * data.size(0)\n\n # weak prediction 使用get_score函数\n wresult += compute_wacc(out['wpred'], wlabel)\n wscores.append(out['wscore'])\n wlabels.append(wlabel)\n\n # dense prediction 使用get_dpred函数\n dpred = model.get_dpred(out['output'], out['wpred']) # dense决策依赖于weak决策\n dresult += compute_dacc(dpred, dlabel)\n dscores.append(out['dscore'])\n dlabels.append(dlabel)\n case = compute_single_case(dpred, dlabel)\n # print(case[0][wlabel.detach().cpu().numpy()>0.5])\n f1_result.append(case[0][wlabel.detach().cpu().numpy()>0.5])\n iou_result.append(case[1][wlabel.detach().cpu().numpy()>0.5])\n \n f1_result = torch.cat(f1_result, dim=0)\n iou_result = torch.cat(iou_result, dim=0) \n\n\n if train_loader is not None:\n for itr, batch in enumerate(train_loader):\n data = batch['data'].cuda()\n wlabel = batch['wlabel'].cuda()\n \n with torch.no_grad():\n out = model.get_scores(data)\n wscores.append(out['wscore'])\n wlabels.append(wlabel)\n\n ret = {}\n ret['loss'] = total_loss / total\n ret['bce_loss'] = total_bce_loss / total\n # ret['dtw_loss'] = total_dtw_loss / total\n \n\n # Weak and dense results under predefined threshold\n ret['wprecision'], ret['wrecall'], ret['wf1'], ret['wIoU'] = compute_precision_recall(wresult)\n ret['dprecision'], ret['drecall'], ret['df1'], ret['dIoU'] = compute_precision_recall(dresult)\n\n wscores, wlabels = torch.cat(wscores, dim=0), torch.cat(wlabels, dim=0)\n dscores, dlabels = torch.cat(dscores, dim=0), torch.cat(dlabels, dim=0)\n\n # Weak Result Curve and best\n ret['wauc'] = compute_auc(wscores, wlabels)\n ret['wauprc'] = compute_auprc(wscores, wlabels)\n ret['wbestf1'], ret['global_threshold'] = compute_bestf1(wscores, wlabels, return_threshold=True)\n wbestpred = (wscores >= ret['global_threshold']).type(torch.cuda.FloatTensor)\n wbestresult = compute_dacc(wbestpred, wlabels)\n ret['wbprecision'], ret['wbrecall'], ret['wbf1'], ret['wbIoU'] = compute_precision_recall(wbestresult)\n\n # Dense Result Curve and best\n ret['dauc'] = compute_auc(dscores, dlabels)\n ret['dauprc'] = compute_auprc(dscores, dlabels)\n ret['dbestf1'], ret['local_threshold'] = compute_bestf1(dscores, dlabels, return_threshold=True)\n dbestpred = (dscores >= ret['local_threshold']).type(torch.cuda.FloatTensor)\n dbestresult = compute_dacc(dbestpred, dlabels)\n ret['dbprecision'], ret['dbrecall'], ret['dbf1'], ret['dbIoU'] = compute_precision_recall(wbestresult)\n return ret, f1_result, iou_result\n\n\ndef test(args, train_dataset, valid_dataset, test_dataset):\n\n train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\n valid_loader = DataLoader(dataset=valid_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\n test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\n\n # Select Device\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n dtw = SoftDTW(use_cuda=True, gamma=args.gamma, normalize=False)\n\n model = Pyramid(input_size=train_dataset.input_size, seq_size=args.split_size, ary_size=args.ary_size, inner_size=args.inner_size,\n d_model=args.d_model, n_layer=args.n_layer, n_head=args.n_head, d_k=args.d_k, d_v=args.d_v, d_inner_hid=args.d_inner_hid, agg_type=args.agg_type, dropout=args.dropout,\n pooling_type='max', granularity=1, local_threshold=0.5, global_threshold=0.5, beta=10, dtw=dtw)\n model = torch.load(f'result/{args.dataset}/model_{args.agg_type}_{args.pooling_type}.pkl')\n print(\"============================\")\n print(\" Pretrained model loaded \")\n print(\"============================\")\n\n\n model.cuda()\n \n pyra = PyraMIL()\n\n # Start Testing\n\n valid_result, _, _ = evaluation(args, model, valid_loader, pyra)\n model.global_threshold = valid_result['global_threshold']\n test_result, f1_result, iou_result = evaluation(args, model, test_loader, pyra)\n\n total_results = [f1_result.detach().cpu().numpy(), iou_result.detach().cpu().numpy()]\n with open(f'result/{args.dataset}/results_{args.agg_type}_{args.pooling_type}.npy','wb') as f:\n dump(total_results, f)\n\n\n print(\"============================\")\n print(\" Final evaluation results \")\n print(\"============================\")\n\n print('\\tTest (WEAK) AUC : {:.6f}, AUPRC : {:.6f}, Best F1 : {:.6f}, Precision : {:.6f}, Recall : {:.6f}'.format(\n test_result['wauc'], test_result['wauprc'], test_result['wbestf1'], test_result['wprecision'], test_result['wrecall']))\n print('\\tTest (DENSE) F1 : {:.6f}, Precision : {:.6f}, Recall : {:.6f}, IoU : {:.6f}'.format(\n test_result['df1'], test_result['dprecision'], test_result['drecall'], test_result['dIoU']))\n\ndef main(args):\n\n train_dataset = TimeSeriesWithAnomalies(args.data_dir, args.split_size, 'train')\n valid_dataset = TimeSeriesWithAnomalies(args.data_dir, args.split_size, 'valid')\n test_dataset = TimeSeriesWithAnomalies(args.data_dir, args.split_size, 'test')\n\n\n test(args, train_dataset, valid_dataset, test_dataset)\n\nif __name__ == '__main__':\n\n # Arguments parsing\n parser = argparse.ArgumentParser()\n\n # for C-ary tree Framework\n parser.add_argument('--ary_size', default=2, type=int, help='N-ary tree')\n parser.add_argument('--inner_size', default=3, type=int, help='adjacent node used in the same layer')\n parser.add_argument('--pooling_type', default='max', type=str, help='avg | max')\n parser.add_argument('--local_threshold', default=0.3, type=float, help='score threshold to identify anomalies')\n parser.add_argument('--granularity', default=4, type=int, help='granularity for sequential pseudo-labels') # default: 4\n parser.add_argument('--beta', default=0.1, type=float, help='margin size for the alignment loss')\n parser.add_argument('--gamma', default=0.1, type=float, help='smoothing for differentiable DTW')\n\n\n # for Pyramid Net\n parser.add_argument('--d_model', default=64, type=int, help='hidden dimension for attn')\n parser.add_argument('--d_k', default=64, type=int, help='key dimension')\n parser.add_argument('--d_v', default=64, type=int, help='value dimension')\n parser.add_argument('--d_inner_hid', default=32, type=int, help='hidden dimension for ffn')\n parser.add_argument('--n_head', default=5, type=int, help='number of attention head')\n parser.add_argument('--n_layer', default=2, type=int, help='# of layers in the dicnn')\n parser.add_argument('--dropout', type=float, default=0.5, help='dropout probability')\n\n # for Pretrain\n parser.add_argument('--fine_tune', default=False, action='store_true', help='whether to fine tune the pretrained model')\n \n \n # for Optimization\n parser.add_argument('--batch_size', default=32, type=int, help='batch size')\n parser.add_argument('--n_epochs', default=200, type=int, help=\"# of training epochs\")\n parser.add_argument('--learning_rate', default=0.0001, type=float, help='learning rate')\n parser.add_argument('--gpuidx', default=0, type=int, help='gpu index')\n parser.add_argument('--patience', default=50, type=int, help='# of patience for early stopping')\n parser.add_argument('--stopping', default='f1', type=str, help='f1 | loss')\n parser.add_argument('--agg_type', default='max', type=str, help='avg | max | conv')\n parser.add_argument('--seed', default=0, type=int)\n \n # for Dataset\n parser.add_argument('--dataset', default='CC', type=str, help='EMG | GHL | SMD | SMAP | PSM | MSL')\n parser.add_argument('--split_size', default=120, type=int, help='split size for preprocessing the data')\n\n args = parser.parse_args()\n\n # GPU setting\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuidx)\n\n # Random seed initialization\n np.random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n import random\n random.seed(args.seed)\n\n args.data_dir = './data/' + args.dataset\n args.load_model_path = f'result/{args.dataset}/pretrain_model.pkl'\n\n print(args)\n main(args=args)\n","repo_name":"fly-orange/TreeMIL","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15887520883","text":"\"\"\"Every project implementing ``benchalerts`` may want different input and output\nchannels for their alerts. This module implements a relatively simple framework for\ncomposing alert pipelines out of common steps.\n\"\"\"\n\nimport abc\nfrom traceback import format_exc\nfrom typing import Any, Dict, List, Optional\n\n\nclass AlertPipelineStep(abc.ABC):\n \"\"\"One step of a benchalerts pipeline.\n\n Parameters\n ----------\n step_name\n The name for this step. If not given, will default to this class's name.\n \"\"\"\n\n def __init__(self, step_name: Optional[str]) -> None:\n self.step_name = step_name or self.__class__.__name__\n\n @abc.abstractmethod\n def run_step(self, previous_outputs: Dict[str, Any]) -> Any:\n \"\"\"Run this step.\n\n Parameters\n ----------\n previous_outputs\n A dict of previous steps' outputs in the pipeline, keyed by step name.\n\n Returns\n -------\n Any\n Anything (sent to subsequent steps via the ``previous_outputs`` dict).\n \"\"\"\n\n\nclass AlertPipelineErrorHandler(abc.ABC):\n \"\"\"A class to handle errors during the running of a benchalerts pipeline.\"\"\"\n\n @abc.abstractmethod\n def handle_error(self, exc: BaseException, traceback: str) -> None:\n \"\"\"Handle an error that may have happened during a pipeline run.\n\n Parameters\n ----------\n exc\n The exception that was raised.\n traceback\n A string of the traceback.\n \"\"\"\n\n\nclass AlertPipeline:\n \"\"\"A structure for running a sequence of configurable ``AlertPipelineStep``\n instances.\n\n Parameters\n ----------\n steps\n A list of ``AlertPipelineStep`` instances.\n error_handlers\n An optional list of ``AlertPipelineErrorHandler`` instances to handle any errors\n that may arise before raising them.\n \"\"\"\n\n def __init__(\n self,\n steps: List[AlertPipelineStep],\n error_handlers: Optional[List[AlertPipelineErrorHandler]] = None,\n ) -> None:\n self.steps = steps\n self.error_handlers = error_handlers or []\n\n def run_pipeline(self) -> Dict[str, Any]:\n \"\"\"Run the pipeline.\n\n Returns\n -------\n Dict[str, Any]\n All steps' outputs, keyed by step name.\n \"\"\"\n step_outputs: Dict[str, Any] = {}\n\n try:\n for step in self.steps:\n step_outputs[step.step_name] = step.run_step(\n previous_outputs=step_outputs\n )\n\n except Exception as exc:\n for error_handler in self.error_handlers:\n error_handler.handle_error(exc=exc, traceback=format_exc())\n raise\n\n return step_outputs\n","repo_name":"conbench/conbench","sub_path":"benchalerts/benchalerts/alert_pipeline.py","file_name":"alert_pipeline.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"61"} +{"seq_id":"18057135066","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def maxPathSum(self, root: TreeNode) -> int:\n self.ans = -float('inf')\n self.search(root)\n return self.ans\n def search(self,node):\n if not node:\n return 0\n left_sum = max(self.search(node.left),0)\n right_sum = max(self.search(node.right),0)\n all_sum = node.val + left_sum + right_sum\n self.ans = max(self.ans,all_sum)\n return max(node.val + left_sum,node.val + right_sum)\n \n \n","repo_name":"hzhang934748656/hello-world","sub_path":"Leetcode/Leetcode124.py","file_name":"Leetcode124.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23593464411","text":"#!/usr/bin/python\n\nimport re\n\nfilename = \"B-large\"\ninput_filename = filename + \".in\"\noutput_filename = filename + \".out\"\n\ndef zeros(H, W):\n\tout = []\n\trow = []\n\tfor i in range(W):\n\t\trow.append(0)\n\tfor i in range(H):\n\t\tnewrow = list(row)\n\t\tout.append(newrow)\n\treturn out\n\ndef substitute(map, H, W, pattern, repl):\n\tfor i in range(H):\n\t\tfor j in range(W):\n\t\t\tif map[i][j] == pattern:\n\t\t\t\tmap[i][j] = repl\n\treturn map\n\ndef do_basins(map, H, W):\n\tsink = zeros(H, W)\n\tx = 0\n\ty = 0\n\tmark = 1\n\tmovs = [(-1,0), (0,-1), (0,1), (1,0)]\n\n\twhile ( x < H and y < W ):\n\t\tpx = x\n\t\tpy = y\n\t\twhile (True):\n\t\t\tsink[px][py] = mark\n\t\t\tminval = map[px][py]\n\t\t\toptmove = (0, 0)\n\t\t\tfor pair in movs:\n\t\t\t\tnewpx = px + pair[0]\n\t\t\t\tnewpy = py + pair[1]\n\t\t\t\tif ( newpx >= 0 and newpx < H and newpy >=0 and newpy < W):\n\t\t\t\t\tif map[newpx][newpy] < minval:\n\t\t\t\t\t\tminval = map[newpx][newpy]\n\t\t\t\t\t\toptmove = pair\n\t\t\tif (optmove == (0, 0)):\n\t\t\t\tbreak\n\t\t\tnewpx = px + optmove[0]\n\t\t\tnewpy = py + optmove[1]\n\t\t\tif sink[newpx][newpy] == 0:\n\t\t\t\tsink[newpx][newpy] == mark\n\t\t\t\tpx = newpx\n\t\t\t\tpy = newpy\n\t\t\telse:\n\t\t\t\tsink = substitute(sink, H, W, mark, sink[newpx][newpy])\n\t\t\t\tbreak\n\t\tmark += 1\n\t\twhile (sink[x][y]!=0):\n\t\t\tif y < W-1:\n\t\t\t\ty += 1\n\t\t\t\tcontinue\n\t\t\tif x == H-1:\n\t\t\t\treturn sink\n\t\t\tif y == W-1:\n\t\t\t\ty = 0\n\t\t\t\tx += 1\n\t\t\t\tcontinue\n\treturn sink\n\ndef main():\n\tfi = open(input_filename,\"r\")\n\tfo = open(output_filename,\"w\")\n\tlist = \"abcdefghijklmnopqrstuvwxyz\"\n\n\tT = int(fi.readline())\n\tfor case in range(T):\n\t\tl = fi.readline().split()\n\t\t(H, W) = (int(l[0]), int(l[1]))\n\t\tmap = []\n\t\tfor i in range(H):\n\t\t\trow = []\n\t\t\tfor val in fi.readline().split():\n\t\t\t\trow.append(int(val))\n\t\t\tmap.append(row)\n\t\tout = do_basins(map, H, W)\n\t\tcount = 0\n\t\tmapping = {}\n\t\tfo.write(\"Case #\" + str(case+1) + \": \" + \"\\n\")\n\t\tfor i in range(H):\n\t\t\tfor j in range(W):\n\t\t\t\tif mapping.has_key(out[i][j]):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tmapping[out[i][j]] = list[count]\n\t\t\t\t\tcount += 1\n\t\t\t\tfo.write(mapping[out[i][j]] + \" \")\n\t\t\tfo.write(\"\\n\")\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_35/306.py","file_name":"306.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33075555578","text":"\r\n\r\nwhile True:\r\n choice = int(input(\"Welcome to McDonalds, press 1 for drink, 2 for burgers, and 3 for sides \"))\r\n if not 1 <= choice <= 3: # catches invalid input\r\n print(\"Invalid Input\")\r\n else:\r\n if choice == 1:\r\n a = int(input(\"1. Sprite, 2. Coke, 3. Mountain Dew, 4. Nestea, 5. Fanta 6. Exit. Type the number \"))\r\n lst = [\"Sprite\", \"Coke\", \"Mountain Dew\", \"Nestea\", \"Fanta\"] # 2 lists, a takes the index of 2 lists to print\r\n v2 = list(i for i in range(1, 6))\r\n if not 1 <= a <= 5: # checks for invalid input\r\n print(\"Invalid input\")\r\n if a == 6:\r\n break\r\n print(lst[a - 1], \"costs $\" + str(v2[a - 1]) + \" for a can\")\r\n elif choice == 2: # same as the top\r\n b = int(input(\"1. Big Mac 2. Quarter Pounder 3. Mc Fllet 4. Mc Chicken 5. Jr Burger 6. Exit. Type the number \"))\r\n lst = [\"Big Mac\", \"Quarter Pounder\", \"Mc Fillet\", \"Mc Chicken\", \"Jr Burger\"]\r\n v2 = list(i for i in range(5, 10))\r\n if not 1 <= b <= 5:\r\n print(\"Invalid input\")\r\n if b == 6: # I added this, basically this checks if its 6\r\n break # break means to break the loop, rn its while True which means forever because there is no\r\n # counter argument, this tells it to break, otherwise it will continue because its infinite\r\n print(lst[b - 1], \"costs $\" + str(v2[b - 1]), \"each\")\r\n else:\r\n c = int(input(\"1. Fries 2. Onion Rings 3. Poutine 4. Caeser Salad 5. Hash Brown 6. Exit. Type the number \"))\r\n lst = [\"Fries\", \"Onion Rings\", \"Poutine\", \"Caeser Salad\", \"Hash Brown\"]\r\n v2 = list(i for i in range(2, 7))\r\n if not 1 <= c <= 5:\r\n print(\"Invalid input\")\r\n if c == 6:\r\n break\r\n print(lst[c - 1], \"costs $\" + str(v2[c - 1]), \"each\")\r\n\r\n\r\n\r\n","repo_name":"Coders222/Shared","sub_path":"Comp Sci Gr 10/Cohort B work/Revised McDonalds.py","file_name":"Revised McDonalds.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1351637882","text":"import typing\nfrom dataclasses import dataclass, field\n\nimport morpfw\nfrom morpcc.deform.codewidget import CodeWidget\nfrom morpcc.deform.richtextwidget import RichTextWidget\nfrom morpcc.preparer.html import HTMLSanitizer\nfrom morpfw.validator.field import valid_namespaced_identifier\n\n\n@dataclass\nclass EndpointSchema(morpfw.Schema):\n\n name: typing.Optional[str] = field(\n default=None,\n metadata={\n \"required\": True,\n \"editable\": False,\n \"validators\": [valid_namespaced_identifier],\n },\n )\n\n title: typing.Optional[str] = field(default=None, metadata={\"required\": True})\n description: typing.Optional[str] = field(default=None, metadata={\"format\": \"text\"})\n notes: typing.Optional[str] = field(\n default=None,\n metadata={\n \"format\": \"text/html\",\n \"preparers\": [HTMLSanitizer()],\n \"deform.widget\": RichTextWidget(),\n },\n )\n\n __unique_constraint__ = [\"name\"]\n","repo_name":"morpframework/morpcc_ttw","sub_path":"morpcc_ttw/endpoint/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29917616627","text":"import pygame\n\nclass Button:\n def __init__(self, x, y, \n radius, text,\n action, \n color=(200, 200, 200),\n text_color=(0, 0, 0)):\n self.x = x\n self.y = y\n self.radius = radius\n self.text = text\n self.action = action\n self.color = color\n self.text_color = text_color\n\n def draw(self, screen, font):\n pygame.draw.circle(screen, self.color,\n (self.x, self.y), self.radius)\n text_surface = font.render(self.text,\n True, self.text_color)\n screen.blit(text_surface, (self.x - text_surface.get_width() // 2,\n self.y - text_surface.get_height() // 2))\n\n def is_mouse_over(self, mouse_pos):\n distance = ((self.x - mouse_pos[0])**2 + (self.y - mouse_pos[1])**2)**0.5\n return distance <= self.radius\n","repo_name":"cxbxmxcx/LPGDwithChatGPT","sub_path":"chapter_7/isometric_world_v.3/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8009874775","text":"import smbus\nimport time\n\nclass BME280:\n #address\n BME280_ADDRESS = 0x76\n #registers\n REG_ID = 0xD0\n REG_ID_STATUS = 0xF3\n REG_CTRL_MEAS = 0xF4\n REG_CTRL_HUM = 0xF2\n REG_START_DATA = 0xF7\n REG_DIG_T1 = 0x88\n REG_DIG_T2 = 0x8A\n REG_DIG_T3 = 0x8C\n REG_DIG_P1 = 0x8E\n REG_DIG_P2 = 0x90\n REG_DIG_P3 = 0x92\n REG_DIG_P4 = 0x94\n REG_DIG_P5 = 0x96\n REG_DIG_P6 = 0x98\n REG_DIG_P7 = 0x9A\n REG_DIG_P8 = 0x9C\n REG_DIG_P9 = 0x9E\n REG_DIG_H1 = 0xA1\n REG_DIG_H2 = 0xE1\n REG_DIG_H3 = 0xE3\n REG_DIG_H4 = 0xE4\n REG_DIG_H5 = 0xE5\n REG_DIG_H6 = 0xE7\n #connection\n __bus = None\n #chip ID\n __chipID = 0\n #calibration values for temperature\n __dig_T = {\n 1: 0.0,\n 2: 0.0,\n 3: 0.0\n }\n #calibration values for pressure\n __dig_P = {\n 1: 0.0,\n 2: 0.0,\n 3: 0.0,\n 4: 0.0,\n 5: 0.0,\n 6: 0.0,\n 7: 0.0,\n 8: 0.0,\n 9: 0.0\n }\n #calibration values for humidity\n __dig_H = {\n 1: 0.0,\n 2: 0.0,\n 3: 0.0,\n 4: 0.0,\n 5: 0.0,\n 6: 0.0\n }\n #raw data\n __raw_data = {\n 'pressure': 0.0,\n 'temperature': 0.0,\n 'humidity':0.0,\n 't_fine': 0\n }\n #compensated_data\n data = {\n 'pressure_Pa': 0.0,\n 'temperature_deg_C': 0.0,\n 'humidity_RH':0.0\n }\n\n def __init__(self):\n self.__bus = smbus.SMBus(1)\n self.__chipID = self.__bus.read_byte_data(self.BME280_ADDRESS, self.REG_ID)\n\n self.__dig_T[1] = self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_T1) & 0xFFFF\n self.__dig_T[2] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_T2) & 0xFFFF)\n self.__dig_T[3] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_T3) & 0xFFFF)\n\n self.__dig_P[1] = self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P1) & 0xFFFF\n self.__dig_P[2] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P2) & 0xFFFF)\n self.__dig_P[3] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P3) & 0xFFFF)\n self.__dig_P[4] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P4) & 0xFFFF)\n self.__dig_P[5] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P5) & 0xFFFF)\n self.__dig_P[6] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P6) & 0xFFFF)\n self.__dig_P[7] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P7) & 0xFFFF)\n self.__dig_P[8] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P8) & 0xFFFF)\n self.__dig_P[9] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_P9) & 0xFFFF)\n\n self.__dig_H[1] = self.__bus.read_byte_data(self.BME280_ADDRESS, self.REG_DIG_H1) & 0xFF\n self.__dig_H[2] = (lambda x: (x - 65536) if x > 32767 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_H2) & 0xFFFF)\n self.__dig_H[3] = self.__bus.read_byte_data(self.BME280_ADDRESS, self.REG_DIG_H3) & 0xFF\n h4 = (lambda x: (x - 256) if x > 127 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_H4) & 0xFF)\n h4 = (h4 << 4)\n self.__dig_H[4] = h4 | (self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_H5) & 0x0F)\n h5 = (lambda x: (x - 256) if x > 127 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_H6) & 0xFF)\n h5 = (h5 << 4)\n self.__dig_H[5] = h5 | ((self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_H5) >> 4) & 0x0F)\n self.__dig_H[6] = (lambda x: (x - 256) if x > 127 else x)(self.__bus.read_word_data(self.BME280_ADDRESS, self.REG_DIG_H6) & 0xFF)\n\n def __compensate_humidity(self):\n #need t_fine from temperature count\n if self.__raw_data['t_fine'] == 0:\n self.__compensate_temperature()\n var1 = float(self.__raw_data['t_fine']) - 76800.0\n var2 = (self.__dig_H[4] * 64.0 + (self.__dig_H[5] / 16384.0) * var1)\n var3 = self.__raw_data['humidity'] - var2\n var4 = self.__dig_H[2] / 65536.0\n var5 = (1.0 + (self.__dig_H[3] / 67108864.0) * var1)\n var6 = (1.0 + (self.__dig_H[6] / 67108864.0) * var1 * var5)\n var6 = (var3 * var4 * (var5 * var6))\n self.data['humidity_RH'] = var6 * (1.0 - self.__dig_H[1] * var6 / 524288.0)\n\n def __compensate_pressure(self):\n #need t_fine from temperature count\n if self.__raw_data['t_fine'] == 0:\n self.__compensate_temperature()\n var1 = (float(self.__raw_data['t_fine']) / 2.0) - 64000.0\n var2 = var1 * var1 * self.__dig_P[6] / 32768.0\n var2 = var2 + var1 * self.__dig_P[5] / 2.0\n var2 = (var2 / 4.0) + (self.__dig_P[4] * 65536.0)\n var3 = self.__dig_P[2] *var1 * var1 / 524288.0\n var1 = (var3 + self.__dig_P[2] *var1) / 524288.0\n var1 = (1.0 + var1 / 32768.0) * self.__dig_P[1]\n self.data['pressure_Pa'] = 1048576.0 - self.__raw_data['pressure']\n self.data['pressure_Pa'] = (self.data['pressure_Pa'] - (var2 / 4096.0)) * 6250.0 / var1\n var1 = self.__dig_P[9] * self.data['pressure_Pa'] * self.data['pressure_Pa'] / 2147483648.0\n var2 = self.data['pressure_Pa'] * self.__dig_P[8] / 32768.0\n self.data['pressure_Pa'] = self.data['pressure_Pa'] + (var1 + var2 + self.__dig_P[7]) / 16.0\n\n def __compensate_temperature(self):\n #for float\n if self.__raw_data['t_fine'] != 0:\n #probably temperature compensation was calculated already\n return\n var1 = ((self.__raw_data['temperature'] / 16384.0) - (self.__dig_T[1] / 1024.0)) * self.__dig_T[2]\n var2 = (((self.__raw_data['temperature'] / 131072.0) - (self.__dig_T[1] / 8192.0)) * ((self.__raw_data['temperature'] / 131072.0) - (self.__dig_T[1] / 8192.0))) * self.__dig_T[3]\n self.__raw_data['t_fine'] = int(var1 + var2)\n self.data['temperature_deg_C'] = (self.__raw_data['t_fine'] / 5120.0)\n\n def __settings(self):\n OSRS_T = 0x05 \t\t# temperature oversampling x16\n OSRS_P = 0x05\t\t# pressure oversampling x16\n OSRS_H = 0x05 # humudity oversampling x16\n MODE = 0x01\t\t # mode forced\n ctrl_meas = ((OSRS_T << 5) | (OSRS_P << 2) | (MODE << 0))\n ctrl_hum = (OSRS_H << 0)\n self.__bus.write_byte_data(self.BME280_ADDRESS, self.REG_CTRL_MEAS, ctrl_meas)\n self.__bus.write_byte_data(self.BME280_ADDRESS, self.REG_CTRL_HUM, ctrl_hum)\n\n def __getMeasuringStatus(self):\n return ((self.__bus.read_byte_data(self.BME280_ADDRESS, self.REG_ID_STATUS) & 0x08) >> 3)\n\n def __getImUpdateStatus(self):\n return (self.__bus.read_byte_data(self.BME280_ADDRESS, self.REG_ID_STATUS) & 0x01)\n\n def __getRawData(self):\n data = self.__bus.read_i2c_block_data(self.BME280_ADDRESS, self.REG_START_DATA, 8)\n self.__raw_data['pressure'] = ((data[0] << 12) | (data[1] << 4) | (data[2] >> 4))\n self.__raw_data['temperature'] = ((data[3] << 12) | (data[4] << 4) | (data[5] >> 4))\n self.__raw_data['humidity'] = ((data[6] << 8) | (data[7] << 0))\n\n def getData(self):\n self.__settings()\n count = 0\n while self.__getMeasuringStatus() == 1:\n count = count + 1\n time.sleep(0.5)\n if count == 10:\n return\n self.__getRawData()\n self.__compensate_temperature()\n self.__compensate_humidity()\n self.__compensate_pressure()\n return self.data\n\nif __name__ == \"__main__\":\n data = BME280()\n result = data.getData()\n print(result)","repo_name":"kudl4t4/BME280_simple","sub_path":"BME280_simple.py","file_name":"BME280_simple.py","file_ext":"py","file_size_in_byte":8073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24812289966","text":"import socket\nimport logging\n\nimport tornado.httpclient\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.iostream\nimport tornado.web\n\nfrom tornado_proxy.cache import WaybackPageNotFound\n\n__all__ = ['ProxyHandler']\n\nlogger = logging.getLogger('tornado.proxy')\n\n\nclass ProxyHandler(tornado.web.RequestHandler):\n SUPPORTED_METHODS = ['GET', 'POST', 'CONNECT']\n\n def initialize(self, cache):\n self.cache = cache\n\n @tornado.web.asynchronous\n def get(self):\n def handle_response(response, set_cache=True):\n if response.error and not isinstance(response.error,\n tornado.httpclient.HTTPError):\n self.set_status(500)\n self.write('Internal server error:\\n' + str(response.error))\n else:\n if set_cache and self.cache is not None:\n # add the response to the cache\n self.cache[req] = response\n self.set_status(response.code)\n for header in ('Date', 'Cache-Control', 'Server',\n 'Content-Type', 'Location',\n 'X-Proxy-Cache-Key', 'X-Wayback-Timestamp'):\n v = response.headers.get(header)\n if v:\n self.set_header(header, v)\n if response.body:\n self.write(response.body)\n self.finish()\n\n body = self.request.body\n if self.request.method == 'GET' and not body:\n body = None\n req = tornado.httpclient.HTTPRequest(url=self.request.uri,\n method=self.request.method, body=body,\n headers=self.request.headers, follow_redirects=False,\n allow_nonstandard_methods=True)\n\n if self.cache is not None:\n try:\n response = self.cache.get(req)\n if response:\n return handle_response(response, False)\n except WaybackPageNotFound as e:\n # need to set the error code directly here, as 523 is not an\n # official error code. It's similar to the 523 code CloudFlare\n # returns, so it's appropriated here\n self._status_code = 523\n self._reason = 'WaybackPageNotFound'\n self.write(\n \"Could not find \\\"{0.url}\\\" in cache before {0.timestamp} \"\n \"within {0.within}\\n\".format(e))\n self.finish()\n return\n except:\n logger.exception(\"Error reading from cache\")\n\n client = tornado.httpclient.AsyncHTTPClient()\n try:\n client.fetch(req, handle_response)\n except tornado.httpclient.HTTPError as e:\n if hasattr(e, 'response') and e.response:\n handle_response(e.response)\n else:\n self.set_status(500)\n self.write('Internal server error:\\n' + str(e))\n self.finish()\n\n @tornado.web.asynchronous\n def post(self):\n return self.get()\n\n @tornado.web.asynchronous\n def connect(self):\n host, port = self.request.uri.split(':')\n client = self.request.connection.stream\n\n def read_from_client(data):\n upstream.write(data)\n\n def read_from_upstream(data):\n client.write(data)\n\n def client_close(data=None):\n if upstream.closed():\n return\n if data:\n upstream.write(data)\n upstream.close()\n\n def upstream_close(data=None):\n if client.closed():\n return\n if data:\n client.write(data)\n client.close()\n\n def start_tunnel():\n client.read_until_close(client_close, read_from_client)\n upstream.read_until_close(upstream_close, read_from_upstream)\n client.write(b'HTTP/1.0 200 Connection established\\r\\n\\r\\n')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n upstream = tornado.iostream.IOStream(s)\n upstream.connect((host, int(port)), start_tunnel)\n","repo_name":"sjhewitt/tornado-proxy","sub_path":"tornado_proxy/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"27582476044","text":"import pytest\nimport os\nimport logging\nimport logging.handlers\nimport tempfile\n\nfrom drp_1dpipe.core.engine.runner import Runner, register_runner, get_runner, list_runners\nfrom drp_1dpipe.core.config import Config\nfrom drp_1dpipe.core.engine.local import Local\n\nclass RunnerClass(Runner):\n def single(self, command, args):\n self.logger.info(\"{} {}\".format(command, args))\n\n\ndef test_register_runner():\n register_runner(RunnerClass)\n assert get_runner('RunnerClass') is RunnerClass\n\n\ndef test_runner():\n runner_class = get_runner('RunnerClass')\n config = Config({\"concurrency\":1, \"venv\":\"/venv\", \"workdir\":\"/wd\", \"logdir\":\"/ld\"})\n lf = tempfile.NamedTemporaryFile()\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n fh = logging.FileHandler(lf.name)\n fh.setLevel(logging.INFO)\n logger.addHandler(fh)\n runner = runner_class(config, tmpcontext=None, logger=logger)\n runner.single('command',{\"k\":\"v\"})\n with open(lf.name) as ff:\n line = ff.readline()\n assert line.strip() == \"command {'k': 'v'}\".strip()\n\ndef test_local():\n config = Config({\"concurrency\":1, \"venv\":\"/venv\", \"workdir\":\"/wd\", \"logdir\":\"/ld\"})\n runner = Local(config)\n task = runner.single(\"drp_1dpipe\",{\"version\":0})\n assert task[0] == \"drp_1dpipe\"\n assert task[1] == \"--version=0\"\n tasks = runner.parallel(\"drp_1dpipe\",{\"version\":[0,1]},{\"arg\":0})\n assert len(tasks) == 2\n assert tasks[0][0] == \"drp_1dpipe\"\n assert tasks[0][1] == \"--version=0\"\n assert tasks[0][2] == \"--arg=0\"\n assert tasks[1][0] == \"drp_1dpipe\"\n assert tasks[1][1] == \"--version=1\"\n assert tasks[1][2] == \"--arg=0\"\n","repo_name":"Subaru-PFS/drp_1dpipe","sub_path":"drp_1dpipe/tests/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28004512899","text":"import sys\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.core.management.base import NoArgsCommand\nfrom django.db import IntegrityError\n\nfrom assessment.models import Survey, Question, SurveyImage\n\n\nclass Command(NoArgsCommand):\n\n survey_instance = {\n 'name': 'Case Analysis',\n 'slug': 'casestudy',\n 'description': 'Respond to the following questions using information gathered from the case study.',\n 'pub_date': timezone.now(),\n 'minutes_allowed': 45,\n 'is_active': 'True',\n }\n\n image = {\n 'image': settings.MEDIA_URL + 'assessment/casestudyfile.jpg'\n }\n\n questions = [\n\n {\n 'question': 'What is the problem?',\n 'question_type': '3',\n },\n\n {\n 'question': 'What is the best solution?',\n 'question_type': '3',\n },\n\n {\n 'question': 'Defend your answer to #2 with facts and data from the case.',\n 'question_type': '3',\n }\n ]\n\n help = 'Creates the Case Study Survey.'\n\n def handle_noargs(self, **options):\n try:\n survey = Survey(**self.survey_instance)\n survey.save()\n image = SurveyImage(survey=survey, **self.image)\n image.save()\n q = {}\n for i in range(3):\n q[i] = Question(survey=survey, **self.questions[i])\n q[i].save()\n\n self.stdout.write(\n \"Successfully created case study survey: %s\" % survey.slug\n )\n\n except IntegrityError:\n err = sys.exc_info()[0]\n self.stdout.write(\n \"FAILED to create case study survey: %s\" % err\n )","repo_name":"MVanBoxtel/assessment","sub_path":"management/commands/casestudy.py","file_name":"casestudy.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73290554115","text":"# Concat\na = [1, 2]\nb = [7, 8]\nprint(a + b) #--> [1,2,7,8]\n\n# Increase the same list\na = [1, 7]\nprint(a * 2) #--> [1,7,1,7]\n\n# Add element to final list\na = [1, 2]\na.append(7)\nprint(a) #--> [1,2,7]\n\n# Delete element to final list\na = [1, 7]\nb = a.pop()\nprint(a) #--> [1]\n\n# Delete element by position\na = [3, 7, 8]\nb = a.pop(1)\nprint(a) # --> [3,8]\n\n# Delete element by value\na = [4, 6, 8]\na.remove(6)\nprint(a) # --> [4,8]\n\n# Sort list smallest to largest\na = [3, 8, 1]\na.sort()\nprint(a) #--> [1,3,8]\n\n# List creation in a certain range\na = (list(range(0, 10, 2))) # Create a count from 0 until 10 (plus 2)\nprint(a) # --> [0,2,,4,6,8]\n\n# List size value\na = [0, 2, 4, 6, 8]\nprint(len(a)) # --> 5\n\n# nested list\nlist = [[1, 2, 3], [4, 5, 6], [10, 11, 12]]\nprint(list)","repo_name":"alejoalvarez/python","sub_path":"DataStructure/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38232075819","text":"from random import uniform\nfrom typing import Optional, List, Tuple\n\nfrom haversine import haversine, Unit\n\n\nLOWERLIMIT = None\nUPPERLIMIT = None\nbbox: Optional[List[float]] = None\n\n\ndef init(b: List[float], lower: int, upper: int):\n \"\"\"Set the global attributes\"\"\"\n global bbox, LOWERLIMIT, UPPERLIMIT\n bbox = b.copy()\n LOWERLIMIT = lower\n UPPERLIMIT = upper\n\n\ndef work(_):\n assert bbox is not None\n\n p1: Tuple[float, float] = (\n round(uniform(bbox[0], bbox[2]), 6),\n round(uniform(bbox[1], bbox[3]), 6),\n )\n # Conform with the imposed limits\n while True:\n p2 = (round(uniform(bbox[0], bbox[2]), 6), round(uniform(bbox[1], bbox[3]), 6))\n dist = haversine(reversed(p1), reversed(p2), unit=Unit.METERS)\n if dist > UPPERLIMIT or dist < LOWERLIMIT:\n break\n\n return p1, p2\n","repo_name":"gis-ops/osrm-tester","sub_path":"osrm_tester/task_locations.py","file_name":"task_locations.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23455499691","text":"import sys\r\n\r\nif __name__ == \"__main__\":\r\n\r\n sys.stdin = open('A-small-attempt2.in', 'r')\r\n sys.stdout = open('A-small-attempt2.out', 'w')\r\n\r\n for c in range(int(input())):\r\n (m, x) = map(str, input().split())\r\n m = int(m)\r\n iv = 0\r\n count = 0\r\n for i in range(m+1):\r\n if(i == 0):\r\n count = int(x[i])\r\n else:\r\n if (count >= i):\r\n count += int(x[i])\r\n elif (int(x[i]) != 0):\r\n iv += i-count\r\n count += (iv+int(x[i]))\r\n print('Case #%d: %d' %(c+1, iv))\r\n\r\n sys.stdin.close()\r\n sys.stdout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/3593.py","file_name":"3593.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71165147394","text":"import matplotlib.image as mpimg\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.figure()\nplt.close('all')\nimport pandas as pd\n\nfrom paths import paths\nimport load\n\nitems_pos_path = paths().item_pos_path()\nitems_pos = pd.read_csv(items_pos_path)\n\n# Complete target pos\nplt.ioff()\nexp_path = paths().experiment_path()\nplots_path = paths().plots_path()\n\nsubject = load.raw_subject()\nbh_data = subject.load_raw_bh_data()\n\nall_targets = bh_data[bh_data['Tpres'] == 1]\nimage_names = all_targets['searchimage'].drop_duplicates()\nimage_names = image_names.str.split('cmp_', expand=True)[1]\nimage_names = image_names.str.split('.jpg', expand=True)[0]\n\ntarget_names = all_targets['st5'].drop_duplicates()\n\ncorrelations = {}\nfor image_name, target_name in zip(image_names, target_names):\n print(image_name)\n\n os.makedirs(plots_path + f'Target_search/{image_name}/', exist_ok=True)\n\n item_pos_image = items_pos[items_pos['folder'] == image_name]\n\n target_full = mpimg.imread(exp_path + target_name)\n target = target_full[:, :, :3]\n target_alpha = target_full[:, :, -1]\n\n image = img = mpimg.imread(exp_path + 'cmp_' + image_name + '.jpg')\n\n plt.figure()\n plt.imshow(image)\n plt.savefig(plots_path + f'Target_search/{image_name}/search_image.jpg')\n plt.close()\n\n correlations[image_name] = {}\n\n for idx, row in item_pos_image.iterrows():\n print(row['indice'])\n x_lims = np.arange(row['pos_x'], min(row['pos_x'] + abs(row['width']), image.shape[1]))\n y_lims = np.arange(row['pos_y'], min(row['pos_y'] + abs(row['height']), image.shape[0]))\n\n crop_image = image[y_lims, :, :][:, x_lims, :]\n\n r_corr = np.correlate(crop_image[:, :, 0].ravel(), target[:, :, 0].ravel())[0]\n g_corr = np.correlate(crop_image[:, :, 1].ravel(), target[:, :, 1].ravel())[0]\n b_corr = np.correlate(crop_image[:, :, 2].ravel(), target[:, :, 2].ravel())[0]\n\n correlations[image_name][row['indice']] = np.mean([r_corr, g_corr, b_corr])\n\n fig, axs = plt.subplots(2)\n plt.title(image_name)\n axs[0].set_title('Target')\n axs[0].imshow(target)\n axs[1].set_title('Croped item')\n axs[1].imshow(crop_image)\n fig.tight_layout()\n plt.savefig(plots_path + f'Target_search/{image_name}/{row[\"indice\"]}.jpg')\n plt.close(fig)\n\n print(list(correlations[image_name].values()))\n best_match = item_pos_image.iloc[np.argmax(list(correlations[image_name].values()))]\n print(np.argmax(list(correlations[image_name].values())))\n\n x_lims = np.arange(best_match['pos_x'], min(best_match['pos_x'] + abs(best_match['width']), image.shape[1]))\n y_lims = np.arange(best_match['pos_y'], min(best_match['pos_y'] + abs(best_match['height']), image.shape[0]))\n crop_image = image[y_lims, :, :][:, x_lims, :]\n\n fig, axs = plt.subplots(2)\n plt.title(image_name)\n axs[0].set_title('Target')\n axs[0].imshow(target)\n axs[1].set_title('Best match')\n axs[1].imshow(crop_image)\n\n plt.savefig(plots_path + f'Target_search/{image_name}/Best_match.jpg')\n plt.close(fig)\n\n# target_pos = items_pos[items_pos['istarget'] == 1]","repo_name":"jegonza66/MEGEYEHS","sub_path":"target_pos.py","file_name":"target_pos.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2694701020","text":"from django.db import models\nfrom base.models import BaseModel\nfrom account.models import Profile\nfrom user.models import Address\nfrom product.models import Product, ProductVariant\n\n# Create your models here.\n\n\nclass Payment(BaseModel):\n user = models.ForeignKey(Profile, on_delete=models.CASCADE, blank=True, null=True)\n session_id = models.CharField(max_length=100)\n payment_id = models.CharField(max_length=50)\n payment_method = models.CharField(max_length=100)\n amount_paid = models.CharField(max_length=10)\n status = models.CharField(max_length=50)\n\n def __str__(self):\n return self.payment_id\n\n\nclass Order(BaseModel):\n status_choice = (\n (\"Order placed\", \"Order placed\"),\n (\"Shipped\", \"Shipped\"),\n (\"Delivered\", \"Delivered\"),\n (\"Cancelled\", \"Cancelled\"),\n )\n\n user = models.ForeignKey(\n Profile, on_delete=models.CASCADE, related_name=\"orders\", blank=True, null=True\n )\n session_id = models.CharField(max_length=100)\n payment = models.ForeignKey(\n Payment, on_delete=models.SET_NULL, blank=True, null=True\n )\n order_number = models.CharField(max_length=20)\n coupon_price = models.IntegerField(default=0)\n subtotal = models.IntegerField(default=0)\n discount = models.IntegerField(default=0)\n order_total = models.DecimalField(max_digits=10, decimal_places=2, default=0)\n address = models.ForeignKey(Address, on_delete=models.CASCADE)\n status = models.CharField(\n max_length=15, choices=status_choice, default=\"Order Pending\"\n )\n is_orderd = models.BooleanField(default=False)\n\n class Meta:\n ordering = [\"-created_at\"]\n\n\nclass OrderProduct(BaseModel):\n order = models.ForeignKey(\n Order, on_delete=models.CASCADE, related_name=\"orderproduct\"\n )\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n variation = models.ForeignKey(ProductVariant, on_delete=models.CASCADE)\n quantity = models.IntegerField()\n\n def __str__(self):\n return self.product.product_name\n","repo_name":"ckmridul/Django_Ecommerce","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3679122722","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nimport multiprocessing as mp\nfrom openpyxl import Workbook,load_workbook\nfrom openpyxl.styles import PatternFill, colors\nimport time\n\ndef thread(x,y,xl_data):\n driver = webdriver.Firefox(executable_path=\"C:\\\\Users\\\\Administrator\\\\Downloads\\\\geckodriver-v0.26.0-win64\\\\geckodriver.exe\")\n driver.get(\"http://www.results.manabadi.co.in/2017/telangana/Inter-2nd/ts-intermediate-2nd-year-regular-exam-results-2017.htm\")\n elem = driver.find_element_by_id(\"htno\")\n select = Select(driver.find_element_by_id('Degree'))\n btn = driver.find_element_by_id(\"btnsubmit\")\n hallTicket = driver.find_element_by_id(\"sid0\")\n name = driver.find_element_by_id(\"sid2\")\n data = []\n for i in range(x,y):\n elem.clear()\n elem.send_keys(i)\n select.select_by_visible_text('Inter 2Year')\n btn.click()\n time.sleep(0.7)\n if(name.text == \"\"):\n data.append((\"Invalid\",\"Nan\"))\n else:\n #ht.value =\n #na.value = name.text\n data.append((hallTicket.text, name.text))\n xl_data+=[*data]\n driver.quit()\n\nif __name__ == \"__main__\":\n xl_data = mp.Manager().list()\n\n t1 = mp.Process(target=thread,args = (1761216000,1761216500,xl_data))\n t2 = mp.Process(target=thread,args = (1761216501,1761217000,xl_data))\n t3 = mp.Process(target=thread,args = (1761217001,1761217500,xl_data))\n t4 = mp.Process(target=thread,args = (1761217501,1761218000,xl_data))\n\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n\n t1.join()\n t2.join()\n t3.join()\n t4.join()\n wb = Workbook()\n sheet = wb.active\n j = 1\n for reg_no,name in xl_data:\n sheet.cell(row = j,column = 1).value = reg_no\n sheet.cell(row = j,column = 2).value = name\n j = j+1\n\n wb.save('scrappedData.xlsx')\n print(\"Done!\")\n","repo_name":"SomaRe/web-scraper-2017-inter-results-manabadi-","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10061193330","text":"#!/usr/bin/python\n # Author: Maximilian Weinberg\n# Date: 2019-03-24\n# gamestate.py:\n\nfrom random import shuffle, choice\nfrom numpy.random import choice as npchoice\n\nfrom constants import *\n\nHEXES = 4*[\"L\"] + 3*[\"B\"] + 4*[\"G\"] + 4*[\"W\"] + 3*[\"O\"] + [\"D\"]\nSPIRALS = [[ 1, 2, 3, 7,12,16,19,18,17,13, 8, 4, 5, 6,11,15,14, 9,10],\n [ 3, 7,12,16,19,18,17,13, 8, 4, 1, 2, 6,11,15,14, 9, 5,10],\n [12,16,19,18,17,13, 8, 4, 1, 2, 3, 7,11,15,14, 9, 5, 6,10],\n [19,18,17,13, 8, 4, 1, 2, 3, 7,12,16,15,14, 9, 5, 6,11,10],\n [17,13, 8, 4, 1, 2, 3, 7,12,16,19,18,14, 9, 5, 6,11,15,10],\n [ 8, 4, 1, 2, 3, 7,12,16,19,18,17,13, 9, 5, 6,11,15,14,10],\n ]\nCHIP_ORDERS = [[HEXCOORS[i-1] for i in spiral] for spiral in SPIRALS]\nCHIPS = [ 5, 2, 6, 3, 8,10, 9,12,11, 4, 8,10, 9, 4, 5, 6, 3,11]\n#CHIPS = list(range(19))\n\nPORTS = RESOURCES + 4*[\"X\"]\n\nclass GameState:\n def __init__(self, number_players, victory_points_to_win=10):\n self.number_players = number_players\n self.victory_points_to_win = victory_points_to_win\n\n # Randomize board\n hexes = HEXES.copy()\n shuffle(hexes)\n self.dir_hexes = dict(zip(HEXCOORS, hexes))\n\n chip_order = choice(CHIP_ORDERS)\n self.dir_chips = {}\n self.robber = None\n j = 0\n for coor in chip_order:\n if self.dir_hexes[coor] not in NO_YIELDS:\n self.dir_chips[coor] = CHIPS[j]\n j+=1\n else:\n self.robber = coor\n\n ports = PORTS.copy()\n shuffle(ports)\n self.dir_ports = dict(zip(PORTCOORS, ports))\n\n # Initialize state variables\n self.dir_crossings = {crossing:None for crossing in CROSSINGS}\n self.dir_paths = {path:None for path in PATHS}\n self.dir_resources = {i:{resource:0 for resource in RESOURCES} for i in range(self.number_players)}\n self.dir_devcards = {i:0 for i in range(self.number_players)}\n self.dir_knights = {i:0 for i in range(self.number_players)}\n\n self.largest_army = (0, None)\n\n # READ methods\n def are_adjacent_crossings(self, crossing1, crossing2):\n return (vec_diff(crossing1, crossing2) in DIRECTIONS)\n\n def are_adjacent_paths(self, path1, path2):\n return (not path1 & path2 == set())\n\n def are_adjacent_crossing_and_path(self, crossing, path):\n return (crossing in path)\n\n def adjacent_paths(self, crossing):\n paths = set()\n for direction in DIRECTIONS:\n point = vec_add(crossing, direction)\n if point in CROSSINGS:\n paths.add(frozenset({crossing, point}))\n return paths\n\n def adjacent_crossings(self, crossing):\n crossings = set()\n for direction in DIRECTIONS:\n point = vec_add(crossing, direction)\n if point in CROSSINGS:\n crossings.add(point)\n return crossings\n\n def adjacent_hexcoors(self, crossing):\n hexcoors = set()\n for direction in DIRECTIONS:\n point = vec_add(crossing, direction)\n if point in HEXCOORS:\n hexcoors.add(point)\n return hexcoors\n\n def free_crossings(self):\n return {crossing for crossing in self.dir_crossings if self.dir_crossings[crossing] is None}\n\n def free_paths(self):\n return {path for path in self.dir_paths if self.dir_paths[path] is None}\n\n def available_crossings(self):\n free_crossings = self.free_crossings()\n #if CROSSINGS == free_crossings:\n #return free_crossings\n other_crossings = CROSSINGS - free_crossings\n #print(other_crossings)\n return {crossing for crossing in free_crossings if all(not(vec_add(crossing, direction) in other_crossings) for direction in DIRECTIONS)}\n\n def network(self, n):\n network = set()\n for path in self.dir_paths:\n if self.dir_paths[path] == n:\n #network |= path\n for crossing in path:\n if self.dir_crossings[crossing] == None:\n network.add(crossing)\n elif self.dir_crossings[crossing][1] == n:\n network.add(crossing)\n return network\n\n def accessible_crossings(self, n):\n return self.available_crossings() & self.network(n)\n\n def accessible_paths(self,n):\n free_paths = self.free_paths()\n return {path for path in free_paths if any(crossing in path for crossing in self.network(n))}\n\n def paths_of(self, n):\n return {path for path in self.dir_paths if self.dir_paths[path] == n}\n\n def settlements_of(self, n):\n return {crossing for crossing in self.dir_crossings if self.dir_crossings[crossing] == (1,n)}\n\n def cities_of(self, n):\n return {crossing for crossing in self.dir_crossings if self.dir_crossings[crossing] == (2,n)}\n\n def has_largest_army(self, n):\n return self.largest_army[0] == n\n\n def number_resources(self, n):\n return sum(self.dir_resources[n].values())\n\n def can_afford(self, n, cost):\n return all(cost[resource] <= self.dir_resources[n][resource] for resource in RESOURCES)\n\n def adjacent_players(self, hexcoor):\n players = set()\n for direction in DIRECTIONS:\n crossing = vec_add(hexcoor, direction)\n piece = self.dir_crossings[crossing]\n if not piece == None:\n players.add(piece[1])\n return players\n\n def price(self, n, resource):\n price = 4\n for port in PORTCOORS:\n if self.dir_ports[port] == resource:\n for crossing in port:\n if self.dir_crossings[crossing] in [(1,n), (2,n)]:\n return 2\n elif self.dir_ports[port] == \"X\":\n for crossing in port:\n if self.dir_crossings[crossing] in [(1,n), (2,n)]:\n price = 3\n return price\n\n def victory_points(self, n):\n vps = 0\n for crossing in self.dir_crossings:\n if self.dir_crossings[crossing] == (1, n):\n vps += 1\n if self.dir_crossings[crossing] == (2, n):\n vps += 2\n if self.largest_army[1] == n:\n vps += 2\n return vps\n\n def has_won(self, n):\n return (self.victory_points(n) >= self.victory_points_to_win)\n\n def choose_resource_card(self, n):\n m = self.number_resources(n)\n resources = self.dir_resources[n]\n if m == 0:\n return None\n p = [(resources[resource]/m) for resource in resources]\n #return np.random.choice(list(resources.keys()), p=p)\n return npchoice(list(resources.keys()), p=p)\n\n def print_state(self):\n for n in range(self.number_players):\n resources = self.dir_resources[n]\n devcards = self.dir_devcards[n]\n knights = self.dir_knights[n]\n vps = self.victory_points(n)\n #resource_string = \"\"\n #for resource in resources:\n #resource_string += resources[resource] * (resource + \" \")\n print(f\"Player {n}: Resources: {resources}; {devcards} devcards, {knights} knights, {vps} victory points.\")\n\n # WRITE methods\n def pays(self, n, cost):\n if self.can_afford(n, cost):\n for resource in RESOURCES:\n self.dir_resources[n][resource] -= cost[resource]\n return True\n return False\n\n def update_armies(self, n):\n self.dir_devcards[n] -= 1\n self.dir_knights[n] += 1\n if self.dir_knights[n] > max(self.largest_army[0],2):\n self.largest_army = (self.dir_knights[n], n)\n\n def update_roads(self, n, path=None):\n if path == None:\n # Insert full algorithm here\n pass\n else:\n for crossing1 in path:\n m = 0\n if self.dir_crossings[crossing1] != None:\n if self.dir_crossings[crossing1][1] != n:\n m = 1\n if m == 0:\n for direction in DIRECTIONS:\n crossing2 = vec_add(crossing1, direction)\n path2 = frozenset({crossing1, crossing2})\n if path2 in PATHS:\n if self.dir_paths[path2] == n:\n m += 1\n if m == 1:\n # crossing1 is an end\n # Count longest road starting at crossing1\n pass\n return\n # Both crossing1 and crossing2 are connected to roads\n # Call full counting algorithm\n\n ### Actions\n def build_initial_settlement(self, n, crossing, path):\n if crossing in self.available_crossings():\n if self.are_adjacent_crossing_and_path(crossing, path):\n self.dir_crossings[crossing] = (1,n)\n self.dir_paths[path] = n\n return 0\n else:\n return 2\n else:\n return 1\n def build_road(self, n, path):\n if path in self.accessible_paths(n):\n cost = PRICES[\"road\"]\n if self.pays(n, cost):\n self.dir_paths[path] = n\n return 0\n else:\n return 1\n else:\n return 2\n\n def build_settlement(self, n, crossing):\n if crossing in self.accessible_crossings(n):\n cost = PRICES[\"settlement\"]\n if self.pays(n, cost):\n self.dir_crossings[crossing] = (1,n)\n return 0\n else:\n return 1\n else:\n return 2\n\n def build_city(self, n, crossing):\n if crossing in self.settlements_of(n):\n cost = PRICES[\"city\"]\n if self.pays(n, cost):\n self.dir_crossings[crossing] = (2,n)\n return 0\n else: #elif not DEBUG:\n return 1\n else:\n return 2\n\n def buy_devcard(self, n):\n cost = PRICES[\"devcard\"]\n if self.pays(n, cost):\n self.dir_devcards[n] += 1\n return 0\n else:\n return 1\n","repo_name":"Taraxacus/hesperus","sub_path":"gamestate.py","file_name":"gamestate.py","file_ext":"py","file_size_in_byte":10233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3669573100","text":"import csv\nimport os\nfrom pathlib import Path\nfrom django.db import models\nfrom shopApp.models import gameList, gameDetails\nfrom django.core.management.base import BaseCommand, CommandError\n\nclass Command(BaseCommand):\n\thelp = 'Load data from csv'\n\tdef handle(self, *args, **options):\n\t\tgameList.objects.all().delete()\n\t\tgameDetails.objects.all().delete()\n\n\t\t# Refill the model database\n\t\twith open('appstore_games.csv', newline='') as f:\n\t\t\tcount = 0;\n\t\t\treader = csv.reader(f, delimiter=\",\")\n\t\t\tnext(reader) # skip the header line\n\t\t\tfor row in reader:\n\t\t\t\tcount += 1;\n\t\t\t\tif row[0] != '':\n\t\t\t\t\tgList = gameList.objects.create(\n\t\t\t\t\tgameID = row[1],\n\t\t\t\t\tgameName = row[2],\n\t\t\t\t\tprice = row[7],\n\t\t\t\t\tageRating = row[11],\n\t\t\t\t\tprimaryGenre = row[14],\n\t\t\t\t\t)\n\t\t\t\t\tgList.save()\n\t\t\t\tprint(\"Completed Row \"+str(count)+\":\",row[2])\n\t\t\tprint(\"Game List Complete\")\n\n\t\twith open('appstore_games.csv', newline='') as f:\n\t\t\treader = csv.reader(f, delimiter=\",\")\n\t\t\tcount = 0;\n\t\t\tnext(reader) # skip the header line\n\t\t\tfor row in reader:\n\t\t\t\tif row[0] != '':\n\t\t\t\t\tgdList = gameDetails.objects.create(\n\t\t\t\t\tuniqueid = count,\n\t\t\t\t\tgameID = gameList.objects.get(gameID=row[1]),\n\t\t\t\t\tappURL = row[0],\n\t\t\t\t\tsubtitle = row[3],\n\t\t\t\t\ticonURL = row[4],\n\t\t\t\t\taverageUserRating = row[5],\n\t\t\t\t\tnumberOfRating = row[6],\n\t\t\t\t\tinAppPurchases = row[8],\n\t\t\t\t\tdescription = row[9],\n\t\t\t\t\tdeveloper = row[10],\n\t\t\t\t\tlanguages = row[12],\n\t\t\t\t\tsize = row[13],\n\t\t\t\t\tgenres = row[15],\n\t\t\t\t\toriginalReleaseDate = row[16],\n\t\t\t\t\tCurrentVersionReleaseDate = row[17],\n\t\t\t\t\t)\n\t\t\t\t\tcount += 1;\n\t\t\t\t\tgdList.save()\n\t\t\t\tprint(\"Completed Row \"+str(count)+\":\",row[1],row[2],row[13])\n\t\t\tprint(\"Details List Complete\")","repo_name":"Xenterra/CS551Q-Solo-Assessment","sub_path":"shopApp/management/commands/database_Fill.py","file_name":"database_Fill.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28083184579","text":"import unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nimport time\n\nclass miclase_unittest(unittest.TestCase):\n\n def setUp(self):\n s = Service(\"C:/chromedriver/chromedriver.exe\")\n self.driver = webdriver.Chrome(service=s)\n self.driver.maximize_window()\n\n def test_navegar_entre_pestañas(self):\n driver = self.driver\n driver.get(\"http://www.google.com\")\n time.sleep(3)\n driver.execute_script(\"window.open('');\")\n time.sleep(3)\n driver.switch_to.window(driver.window_handles[1])\n driver.get(\"https://www.python.org\")\n time.sleep(3)\n driver.switch_to.window(driver.window_handles[0])\n time.sleep(3)\n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"grecia9623/Automation_practice","sub_path":"primera_sesion/Test5_abrir_nueva_pestaña.py","file_name":"Test5_abrir_nueva_pestaña.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23561507211","text":"\nres_str = 'Case #{0}: {1}'\n\ntest_cases = int(input())\n\nfor t in range(1, test_cases+1):\n n = int(input())\n n_length = len(str(n))-1\n i = n_length\n while i > 0:\n if int(n / pow(10,i) % 10) > int(n / pow(10,i-1) % 10):\n n = n - ((n % pow(10,i)) + 1)\n i = n_length\n else:\n i -= 1\n print(res_str.format(t, n))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4413.py","file_name":"4413.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18689710025","text":"from paymentScheduleAndCalc import monthly_loan, createSchedule\nfrom lenmoSingleton import Lenmo\nimport threading\nimport copy\n\n\nclass PaymentFacade():\n def __init__(self, loan):\n\n # check Loan status before creating the instance to assure that Loan is accepted!\n if loan.status != 'Accepted':\n return print('Sorry! you cannot do any payment transaction as the loan is not accepted yet!')\n\n self.__borrower = loan.borrower\n self.__loan = loan\n self.__investor = loan.acceptedOffer.investor\n self.__lenmo = Lenmo()\n self.__paymentSchedule = []\n self.__toPaySchedule = []\n self.__paidSchedule = []\n\n def getLoanStatus(self):\n return self.__loan.getLoanStatus()\n\n def __changeLoanStatus(self, status):\n self.__loan.changeLoanStatus(status)\n\n def __calculatMonthlyPayment(self):\n return monthly_loan(self.__loan.amount, self.__loan.interest, self.__loan.installment_period)\n\n def __calcTotalAmntWithInterest(self):\n try:\n return self.__calculatMonthlyPayment()*self.__loan.installment_period\n except Exception as err:\n print(err)\n return None\n\n def fundLoan(self):\n # Force synchronous threading on transaction method to avoid Async operation by applying the thread Lock using context manager\n # so that we assure only one transaction is in progress at a prticular moment\n with threading.Lock():\n if self.__loan.status != 'Funded':\n loanAmountwithInterest = self.__calcTotalAmntWithInterest()\n lenmoProfitAmount = self.__lenmo.profitAmount\n self.__investor.pay(loanAmount=self.__loan.amount,\n loanAmountWithInterest=loanAmountwithInterest, lenmoProfiitAmount=lenmoProfitAmount)\n self.__lenmo.gain()\n self.__borrower.gain(loanAmount=self.__loan.amount,\n loanAmountWithInterest=loanAmountwithInterest)\n self.__changeLoanStatus('Funded')\n self.__createPaymentSchedule()\n print('Loan has been Funded Successfully!')\n else:\n return print('Loan transaction already done before and loan status in Funded')\n\n def __createPaymentSchedule(self):\n\n # check if schedule already created before return it if not create new\n if len(self.__paymentSchedule) != 0:\n return self.__paymentSchedule\n\n # create new\n monthlyPay = self.__calculatMonthlyPayment()\n schedule = createSchedule(principal=self.__loan.amount, duration=self.__loan.installment_period,\n interest_rate=self.__loan.interest, monthly=monthlyPay)\n self.__paymentSchedule = schedule\n self.__toPaySchedule = schedule\n return schedule\n\n def __isValidToDoMonthlyPayment(self, monthlyPay):\n\n isValid = False\n if len(self.__toPaySchedule) == 0:\n error = 'no pending payment!'\n elif self.__borrower.balance < monthlyPay:\n error = 'Borrower do not have enough ceredit for thistransaction'\n else:\n isValid = True\n error = None\n return {'isValid': isValid, 'error': error}\n\n def doMonthlyPayment(self):\n\n monthlyPay = self.__calculatMonthlyPayment()\n validation = self.__isValidToDoMonthlyPayment(monthlyPay)\n if validation['isValid']:\n self.__borrower.pay(payAmount=monthlyPay)\n self.__investor.gain(gainAmount=monthlyPay)\n self.__paidSchedule.append(self.__toPaySchedule[0])\n\n del self.__toPaySchedule[0]\n print('Monthly payment done Successfully!')\n if len(self.__toPaySchedule) == 0:\n self.__changeLoanStatus('Completed')\n else:\n return print(validation['error'])\n\n def showPaymentSchedule(self):\n\n [print(x)\n for x in self.__paymentSchedule if len(self.__paymentSchedule) > 0]\n\n def showToPaySchedule(self):\n\n [print(x)\n for x in self.__toPaySchedule if len(self.__toPaySchedule) > 0]\n\n def showPaidSchedule(self):\n\n [print(x) for x in self.__paidSchedule if len(self.__paidSchedule) > 0]\n","repo_name":"marthabib/lenmoTask","sub_path":"paymentFacade.py","file_name":"paymentFacade.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31291587994","text":"# HW on abstract factory (Helpdesk)\n\nfrom abc import ABC, abstractmethod\nimport sys\n\n# *** CLASS SECTION ***\n# 1\nclass Ticket(ABC):\n @abstractmethod\n def info(self): pass\n\n\n# 2\nclass IncidentTicket(Ticket): # [1] to be used by both Jira and ServiceNow factories\n def info(self):\n self.ID = id(self)\n message = \"{name} number {ID} was created.\" \\\n \"\".format(name=self.__class__.__name__, ID=self.ID)\n return message\n\n\n# 3a\nclass ProblemTicket(Ticket): # [1] to be used by Jira factory\n def info(self):\n self.ID = id(self)\n message = \"{name} number {ID} was created.\" \\\n \"\".format(name=self.__class__.__name__, ID=self.ID)\n return message\n\n\n# 3b\nclass ChangeRequestTicket(Ticket): # [1] to be used by ServiceNow factory\n def info(self):\n self.ID = id(self)\n message = \"{name} number {ID} was created.\" \\\n \"\".format(name=self.__class__.__name__, ID=self.ID)\n return message\n\n\n# 4 interface, abstract base class\nclass AbstractFactory(ABC): # to be used by both Jira and ServiceNow factories\n @abstractmethod\n def create_ticket(self, issue_type):\n pass\n\n\n# 5 concrete factory\nclass JiraFactory(AbstractFactory): # [4]\n def create_ticket(self, issue_type):\n if issue_type == 'incident':\n return IncidentTicket() # [2]\n if issue_type == 'problem':\n return ProblemTicket() # [3a]\n\n\n# 6 concrete factory\nclass SnowFactory(AbstractFactory): # [4]\n def create_ticket(self, issue_type):\n if issue_type == 'incident':\n return IncidentTicket() # [2]\n if issue_type == 'change':\n return ChangeRequestTicket() # [3b]\n\n\n# 7 support class to create individual factory based on type of system\nclass FactoryProducer:\n def get_factory(self, system_type):\n if system_type == 'jira':\n return JiraFactory() # [5]\n if system_type == 'snow':\n return SnowFactory() # [6]\n\n\n# *** MENU SECTION ***\nproducer = FactoryProducer()\n\n\ndef menu():\n print('\\nChatbot: In which system do you work?'\n '\\n\\t [1] ServiceNow'\n '\\n\\t [2] Jira')\n system_selected = int(input('\\nUser: '))\n if system_selected == 1:\n servicenow()\n elif system_selected == 2:\n jira()\n else:\n print('Chatbot: Please only select only [1] or [2].')\n menu()\n\n\ndef servicenow():\n snow_factory = producer.get_factory('snow')\n print('\\nChatbot: What type of ticket would you like to raise in ServiceNow?'\n '\\n\\t [1] Incident'\n '\\n\\t [2] Change request')\n ticket_selected = int(input('\\nUser: '))\n\n if ticket_selected == 1:\n snow_incident = snow_factory.create_ticket('incident')\n print(snow_incident.info())\n goon()\n\n elif ticket_selected == 2:\n snow_change = snow_factory.create_ticket('change')\n print(snow_change.info())\n goon()\n\n else:\n print('Chatbot: Please only select either [1] or [2].')\n servicenow()\n\n\ndef jira():\n jira_factory = producer.get_factory('jira')\n print('\\nChatbot: What type of ticket would you like to raise in Jira?'\n '\\n\\t [1] Incident'\n '\\n\\t [2] Problem')\n ticket_selected = int(input('\\nUser: '))\n if ticket_selected == 1:\n jira_incident = jira_factory.create_ticket('incident')\n print(jira_incident.info())\n goon()\n\n elif ticket_selected == 2:\n jira_problem = jira_factory.create_ticket('problem')\n print(jira_problem.info())\n goon()\n\n else:\n print('Chatbot: Please only select either [1] or [2].')\n jira()\n\ndef goon():\n print(\"Chatbot: Do you want to continue? [Y/N]\")\n option = str(input(\"\\nUser: \"))\n if option.lower() == 'y':\n menu()\n elif option.lower() == 'n':\n print('Thank you for using our service.')\n sys.exit()\n else:\n print('Please choose either [y] or [n].')\n goon()\n\n# invoke menu\nmenu()\n","repo_name":"MikeKorsikov/PythonClasses","sub_path":"Lesson33n/HW33n.py","file_name":"HW33n.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15922243403","text":"from __future__ import division\n\nfrom math import pi, cos, sin\n\nfrom mesh import Vector\n\n\ndef add_torus(m, r_out, r_in, meridians, parallels, x_axis=Vector(1,0,0), y_axis=Vector(0,1,0), z_axis=Vector(0,0,1), meridian_phase=0, parallel_phase=0, offset=Vector(0,0,0)):\n \"\"\"\n Add a torus of inner radius r_out and outer radius r_in to the\n existing mesh m.\n\n More precisely it approximates the manifold parametrised by\n x = (r_out + r_in*cos(theta))*cos(phi)\n y = (r_out + r_in*cos(theta))*sin(phi)\n z = r_in*sin(theta)\n\n The approximation is based the given number of meridians and\n parallels, with squares subdivided into two triangles, and thus\n uses 2*meridians*parallels triangular faces.\n \"\"\"\n\n def make_vertex(i,j):\n theta = 2*pi*i/parallels + parallel_phase\n phi = 2*pi*j/meridians + meridian_phase\n p = x_axis*((r_out + r_in*cos(theta))*cos(phi)) + \\\n y_axis*((r_out + r_in*cos(theta))*sin(phi)) + \\\n z_axis*(r_in*sin(theta)) + \\\n offset\n return m.add_vertex(p.x,p.y,p.z)\n\n vertices = [[make_vertex(i,j) for j in xrange(meridians)] for i in xrange(parallels)]\n\n for i in xrange(parallels):\n for j in xrange(meridians):\n v0 = vertices[(i+0)%parallels][(j+0)%meridians]\n v1 = vertices[(i+0)%parallels][(j+1)%meridians]\n v2 = vertices[(i+1)%parallels][(j+1)%meridians]\n v3 = vertices[(i+1)%parallels][(j+0)%meridians]\n m.add_face([v0,v1,v2])\n m.add_face([v2,v3,v0])\n","repo_name":"brachyprint/brachyprint","sub_path":"src/mesh/primitives/torus.py","file_name":"torus.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"12220254638","text":"\"\"\"\nFlux-vector control for synchronous motor drives.\n\nThis implements a version of stator-flux-vector control [1]_. Rotor coordinates \nas well as decoupling between the stator flux and torque channels are used [2]_. \nHere, the stator flux magnitude and the electromagnetic torque are selected as\ncontrollable variables. Proportional controllers are used for simplicity. The \nmagnetic saturation is not considered in this implementation.\n\nReferences\n----------\n.. [1] Pellegrino, Armando, Guglielmi, “Direct flux field-oriented control of\n IPM drives with variable DC link in the field-weakening region,” IEEE Trans.\n Ind. Appl., 2009, https://doi.org/10.1109/TIA.2009.2027167\n\n.. [2] Awan, Hinkkanen, Bojoi, Pellegrino, \"Stator-flux-oriented control of\n synchronous motors: A systematic design procedure,\" IEEE Trans. Ind. Appl.,\n 2019, https://doi.org/10.1109/TIA.2019.2927316\n\n\"\"\"\nfrom typing import Callable\nfrom dataclasses import dataclass, field\nimport numpy as np\nfrom motulator.helpers import abc2complex, Bunch\nfrom motulator.control.common import Ctrl, SpeedCtrl, PWM\nfrom motulator.control.sm_vector import SensorlessObserver\nfrom motulator.control.sm_obs_vhz import FluxTorqueRef\n\n\n# %%\n@dataclass\nclass SynchronousMotorFluxVectorCtrlPars:\n \"\"\"Control parameters: flux-vector control for synchronous motor drives.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n # Speed reference (in electrical rad/s)\n w_m_ref: Callable[[float], float] = field(\n repr=False, default=lambda t: (t > .2)*(2*np.pi*75))\n # Mode\n sensorless: bool = True\n # Sampling period\n T_s: float = 250e-6\n # Flux reference limits\n psi_s_min: float = None\n psi_s_max: float = None\n # Voltage marginal\n k_u: float = .9\n # Bandwidths\n alpha_psi: float = 2*np.pi*150\n alpha_tau: float = 2*np.pi*50\n alpha_s: float = 2*np.pi*4\n # Maximum values\n tau_M_max: float = 1.5*14\n i_s_max: float = 1.5*np.sqrt(2)*5.\n # Motor parameter estimates\n R_s: float = 3.6\n L_d: float = .036\n L_q: float = .051\n psi_f: float = .545\n n_p: int = 3\n J: float = .015\n # Sensorless observer (used only in the sensorless mode)\n w_o: float = 2*np.pi*100\n zeta_inf: float = .2\n # Sensored observer (used only in the sensored mode)\n g: float = 2*np.pi*15\n\n\n# %%\nclass SynchronousMotorFluxVectorCtrl(Ctrl):\n \"\"\"\n Flux-vector control for synchronous motor drives.\n\n This class interconnects the subsystems of the control system and\n provides the interface to the solver.\n\n Parameters\n ----------\n pars : SynchronousMotoroFluxVectorCtrlPars\n Control parameters.\n\n \"\"\"\n\n # pylint: disable=too-many-instance-attributes\n def __init__(self, pars):\n super().__init__()\n self.T_s = pars.T_s\n self.w_m_ref = pars.w_m_ref\n self.sensorless = pars.sensorless\n self.speed_ctrl = SpeedCtrl(pars)\n self.pwm = PWM(pars)\n if pars.sensorless:\n self.observer = SensorlessObserver(pars)\n else:\n self.observer = Observer(pars)\n self.flux_torque_ref = FluxTorqueRef(pars)\n # Bandwidths\n self.alpha_psi = pars.alpha_psi\n self.alpha_tau = pars.alpha_tau\n # Motor parameter estimates\n self.R_s = pars.R_s\n self.L_d = pars.L_d\n self.L_q = pars.L_q\n self.n_p = pars.n_p\n\n def __call__(self, mdl):\n \"\"\"\n Run the main control loop.\n\n Parameters\n ----------\n mdl : SynchronousMotorDrive\n Continuous-time model of a synchronous motor drive for getting the\n feedback signals.\n\n Returns\n -------\n T_s : float\n Sampling period.\n d_abc_ref : ndarray, shape (3,)\n Duty ratio references.\n\n \"\"\"\n # Get the speed reference\n w_m_ref = self.w_m_ref(self.t)\n\n # Feedback signals\n i_s_abc = mdl.motor.meas_currents() # Phase currents\n u_dc = mdl.conv.meas_dc_voltage() # DC-bus voltage\n u_s = self.pwm.realized_voltage # Realized voltage from PWM\n\n if self.sensorless:\n # Get the rotor speed and position estimates\n w_m, theta_m = self.observer.w_m, self.observer.theta_m\n else:\n # Measure the rotor speed\n w_m = self.n_p*mdl.mech.meas_speed()\n # Limit the electrical rotor position into [-pi, pi)\n theta_m = np.mod(\n self.n_p*mdl.mech.meas_position() + np.pi, 2*np.pi) - np.pi\n\n # Current vector in estimated rotor coordinates\n i_s = np.exp(-1j*theta_m)*abc2complex(i_s_abc)\n\n # Flux and torque estimates\n psi_s = self.observer.psi_s\n tau_M = 1.5*self.n_p*np.imag(i_s*np.conj(psi_s))\n\n # Outputs\n tau_M_ref = self.speed_ctrl.output(w_m_ref/self.n_p, w_m/self.n_p)\n psi_s_ref, tau_M_ref_lim = self.flux_torque_ref(tau_M_ref, w_m, u_dc)\n\n # Auxiliary current\n i_a = psi_s.real/self.L_q + 1j*psi_s.imag/self.L_d - i_s\n\n # Torque-production factor (c_tau = 0 corresponds to the MTPV condition)\n c_tau = np.real(i_a*np.conj(psi_s))\n\n # References for the flux and torque controllers\n v_psi = self.alpha_psi*(psi_s_ref - np.abs(psi_s))\n v_tau = self.alpha_tau*(tau_M_ref_lim - tau_M)\n if c_tau > 0:\n v = (np.abs(psi_s)*i_a*v_psi + 1j*psi_s*v_tau)/c_tau\n else:\n v = v_psi\n\n # Stator voltage reference\n u_s_ref = self.R_s*i_s + 1j*w_m*psi_s + v\n\n # PWM output\n d_abc_ref, u_s_ref_lim = self.pwm.output(u_s_ref, u_dc, theta_m, w_m)\n\n # Data logging\n data = Bunch(\n i_s=i_s,\n psi_s=psi_s,\n psi_s_ref=psi_s_ref,\n t=self.t,\n tau_M_ref_lim=tau_M_ref_lim,\n theta_m=theta_m,\n u_dc=u_dc,\n u_s=u_s,\n w_m=w_m,\n w_m_ref=w_m_ref,\n )\n self.save(data)\n\n # Update states\n self.observer.update(u_s, i_s, w_m)\n self.speed_ctrl.update(tau_M_ref_lim)\n self.pwm.update(u_s_ref_lim)\n self.update_clock(self.T_s)\n\n return self.T_s, d_abc_ref\n\n\n# %%\nclass Observer:\n \"\"\"\n Sensored observer.\n\n Parameters\n ----------\n pars : SynchronousMotoroFluxVectorCtrlPars\n Control parameters.\n\n \"\"\"\n\n def __init__(self, pars):\n self.T_s = pars.T_s\n self.R_s = pars.R_s\n self.L_d = pars.L_d\n self.L_q = pars.L_q\n self.psi_f = pars.psi_f\n self.g = pars.g\n # Initial state\n self.psi_s = pars.psi_f\n\n def update(self, u_s, i_s, w_m):\n \"\"\"\n Update the states for the next sampling period.\n\n Parameters\n ----------\n u_s : complex\n Stator voltage in estimated rotor coordinates.\n i_s : complex\n Stator current in estimated rotor coordinates.\n w_m : float\n Rotor speed (in electrical rad/s).\n\n \"\"\"\n # Estimation error\n e = self.L_d*i_s.real + 1j*self.L_q*i_s.imag + self.psi_f - self.psi_s\n\n # Update the state\n self.psi_s += self.T_s*(\n u_s - self.R_s*i_s - 1j*w_m*self.psi_s + self.g*e)\n","repo_name":"matthunz/motif","sub_path":"motif-example/.env/lib/python3.11/site-packages/motulator/control/sm_flux_vector.py","file_name":"sm_flux_vector.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10596245213","text":"import itertools\nfrom tqdm import tqdm\nfrom collections import Counter\nimport math\nfrom sympy import primefactors, sieve\nimport matplotlib.pyplot as plt\n\nfrom itertools import chain, combinations, combinations_with_replacement\n\n\ndef is_a_term_of_seq(n):\n arr_tripl = []\n for i in range(len(cubes)):\n if n > 1 + cubes[i]:\n for j in range(i):\n for k in range(j):\n if n == (cubes[i] + cubes[j] + cubes[k]) :\n if i in arr_tripl or j in arr_tripl or k in arr_tripl:\n return False\n else:\n arr_tripl = arr_tripl + [i , j , k]\n return arr_tripl != []\n\n#957206 20\n\ndef is_a_term_of_seq(n = 3071):\n arr_topl = []\n for p in combinations(list(range(1 , n//4)), 2):\n t = 4*p[0]*p[1] - p[0] - p[1]\n if t == n:\n arr_topl.append(p)\n print(arr_topl)\n\n\n\n\n\n\n\ndef make_arr_4xy():\n a = []\n for p in itertools.permutations(list(range(0 , 1_000)), 2):\n\n t = p[0]**2 - 4*p[1]\n t2 = p[0]**2 + 4*p[1]\n\n if t > 0:\n a.append((t, p[0] , -p[1]))\n\n if t2 > 0:\n a.append((t2, p[0] , p[1]))\n\n return sorted(a)\n\ndef run ():\n a = make_arr_4xy()\n c = 1\n cmax = [0]*500\n s = []\n out = []\n\n for i in range(1 , len(a)) :\n\n if a[i-1][0] == a[i][0]:\n c += 1\n s.append(a[i][1])\n s.append(a[i][2])\n else:\n \n if cmax[c] == 0:\n out.append( (a[i-1][0], c))\n #print(c, a[i-1][0], s, len(set(s)) == len(s))\n cmax[c] = 1\n\n c = 1\n s = []\n\n print(sorted(out))\n\n for i in out:\n print(i[0], end = \", \")\n\nif __name__ == \"__main__\":\n #is_a_term_of_seq()\n #run()\n print(24 % 16)\n print(24 % 12)\n\n \n\n ","repo_name":"weightan/some-numerical-experiments","sub_path":"stuff2.py","file_name":"stuff2.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24769900880","text":"import bpy\nfrom mathutils import Vector\n\nmode = bpy.context.object.mode\nbpy.ops.object.mode_set(mode=\"OBJECT\")\n\nuvAct = bpy.context.object.data.uv_layers.active\n\n\ndef calcArea(v1,v2,v3):\n v12 = v2 - v1\n v13 = v3 - v1\n v12 = Vector((v12[0],v12[1],0))\n v13 = Vector((v13[0],v13[1],0))\n area = v12.cross(v13).length / 2\n return(area) \n \ntotal = .0\n \nfor poly in bpy.context.object.data.polygons: \n i =0\n pa = .0\n while i != poly.loop_total-2: \n pa += calcArea(uvAct.data[poly.loop_start].uv,\n uvAct.data[poly.loop_start+1+i].uv,\n uvAct.data[poly.loop_start+2+i].uv) \n i += 1\n #print(poly.index, pa)\n total += pa\n\nbpy.ops.object.mode_set(mode=mode)\n\nprint(\"Area: %s percent\" % (total)) ","repo_name":"oscurart/BlenderAddons","sub_path":"oscurart_calculate_uv_area.py","file_name":"oscurart_calculate_uv_area.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"61"} +{"seq_id":"4235161733","text":"# -- coding: utf-8 --\nimport os\nimport re\nimport traceback\nimport json\nimport datetime\nimport utils\nimport dateutil.parser as dparser\n\n\nclass OgUsersUserParser:\n def __init__(self, parser_name, files, output_folder, folder_path):\n self.parser_name = parser_name\n self.file_pattern = re.compile(\n r'-history\\.html$'\n )\n self.output_file = '{}/USER_HISTORY.json'.format(\n str(output_folder)\n )\n self.files = self.get_filtered_files(files)\n self.folder_path = folder_path\n self.main()\n\n def get_filtered_files(self, files):\n filtered_files = list(\n filter(\n lambda x: self.file_pattern.search(\n x.split('/')[-1]) is not None,\n files\n )\n )\n return filtered_files\n\n def main(self):\n comments = []\n with open(self.output_file, 'w', encoding='utf-8') as fp:\n for index, template in enumerate(self.files):\n print(template)\n try:\n html_response = utils.get_html_response(template)\n user_info = get_user_info(html_response)\n if not user_info:\n continue\n aliases = get_aliases(html_response)\n if not aliases:\n continue\n self.write_data(fp, user_info[0], user_info[1], aliases)\n except Exception as ex:\n traceback.print_exc()\n continue\n print('\\nJson written in {}'.format(self.output_file))\n print('----------------------------------------------------\\n')\n\n def write_data(self, fp, uid, username, aliases):\n data = {\n \"_type\": \"forums\",\n \"_source\": {\n \"forum\": \"ogusers.com\",\n \"uid\": uid,\n \"username\": username,\n \"aliases\": aliases\n }\n }\n utils.write_json(fp, data)\n\n\ndef get_user_info(html_response):\n id_block = html_response.xpath('//a[@id=\"alerts \"]/@onclick')\n if not id_block:\n return\n match = re.findall(r'uid%3D(\\d+)', id_block[0])\n uid = match[0]\n username = html_response.xpath('//td[@class=\"thead\"]/strong/a/text()')\n if not username:\n return\n return uid, username[0]\n\n\ndef get_aliases(html_response):\n aliases = list()\n if html_response.xpath('//td[text()=\"No changes are logged.\"]'):\n return\n rows = html_response.xpath('//tr[td[contains(@class, \"trow\")]]')\n for row in rows:\n alias, date_changed = row.xpath('td/text()')\n try:\n ts = str(dparser.parse(date_changed.strip()).timestamp())\n except Exception:\n ts = date_changed\n aliases.append({\n 'alias': alias,\n 'date_changed': ts\n })\n return aliases\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"templates/ogusers_users_template.py","file_name":"ogusers_users_template.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30328210155","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date: Created on 12 Oct 2023 10:34\n# @Author: Yao LI\n# @File: spagrn/auprc.py\n\n\nimport os\nfrom typing import Union\nimport glob\nimport json\nimport anndata\nimport scanpy as sc\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom itertools import product\nfrom sklearn.metrics import precision_recall_curve, roc_curve, auc\n\n'''\npython ../../../../spagrn_debug/au.py\n'''\n\n\nclass AUPRC:\n def __init__(self, data, tfs, name_df=None):\n self._adata = data # only necessary when using spearman cc\n self._tfs = tfs\n\n self._ground_truth = None # all genes\n self._prediction = None # all genes\n self._baseline = None\n self._auprc_ratio = None\n self._auprc = None\n self._auroc = None\n\n self._adj = None\n self._regulons = None\n\n # column names in Ground Truth file\n self._value_col = 'regulator.effect'\n self._tf_col = 'regulator.gene'\n self._target_col = 'regulated.gene'\n\n self._name_df = name_df # ID-gene name chart\n self._true_df = None # ground truth exclude noises\n self._pred_df = None # prediction exclude genes that has been filtered\n\n self.prec = None\n self.recall = None\n self.thresholds = None\n\n @property\n def adata(self):\n return self._adata\n\n @adata.setter\n def adata(self, value):\n self._adata = value\n\n @property\n def tfs(self):\n return self._tfs\n\n @property\n def name_df(self):\n return self._name_df\n\n @name_df.setter\n def name_df(self, value):\n self._name_df = value\n\n @property\n def ground_truth(self):\n return self._ground_truth\n\n @ground_truth.setter\n def ground_truth(self, value):\n self._ground_truth = value\n\n @property\n def true_df(self):\n return self._true_df\n\n @true_df.setter\n def true_df(self, value):\n self._true_df = value\n\n @property\n def pred_df(self):\n return self._pred_df\n\n @pred_df.setter\n def pred_df(self, value):\n self._pred_df = value\n\n @property\n def baseline(self):\n return self._baseline\n\n @baseline.setter\n def baseline(self, value):\n self._baseline = value\n\n @property\n def auprc_ratio(self):\n return self._auprc_ratio\n\n @auprc_ratio.setter\n def auprc_ratio(self, value):\n self._auprc_ratio = value\n\n @property\n def auroc(self):\n return self._auroc\n\n @auroc.setter\n def auroc(self, value):\n self._auroc = value\n\n @property\n def auprc(self):\n return self._auprc\n\n @auprc.setter\n def auprc(self, value):\n self._auprc = value\n\n @property\n def prediction(self):\n return self._prediction\n\n @prediction.setter\n def prediction(self, value):\n self._prediction = value\n\n @property\n def value_col(self):\n return self._value_col\n\n @property\n def tf_col(self):\n return self._tf_col\n\n @property\n def target_col(self):\n return self._target_col\n\n @target_col.setter\n def target_col(self, value):\n self._target_col = value\n\n @property\n def adj(self):\n return self._adj\n\n @adj.setter\n def adj(self, value):\n self._adj = value\n\n @property\n def regulons(self):\n return self._regulons\n\n @regulons.setter\n def regulons(self, value):\n self._regulons = value\n\n def get_true_df(self, ground_truth_files):\n \"\"\"\n\n :param ground_truth_files:\n :return:\n \"\"\"\n fl = glob.glob(ground_truth_files)\n self.true_df = pd.concat([pd.read_csv(i) for i in fl]).astype(str)\n return self.true_df\n\n def make_ground_truth(self, ground_truth_files, real_tfs=None, false_tfs=None):\n \"\"\"\n\n :param ground_truth_files:\n :param real_tfs:\n :param false_tfs:\n :return:\n \"\"\"\n # names = pd.read_csv(naming_fn)\n fl = glob.glob(ground_truth_files)\n df_true = pd.concat([pd.read_csv(i) for i in fl]).astype(str)\n\n # adata = sc.read_h5ad(adata_fn)\n # if self.adata is None:\n # self.adata = adata\n all_genes = self.adata.var_names\n ground_truth = pd.DataFrame(product(self.tfs, all_genes), columns=['regulator.gene', 'regulated.gene']).astype(\n str)\n # ! make sure gene names are using the same nomenclature\n ground_truth['regulated.gene'] = ground_truth['regulated.gene'].replace(list(self.name_df['name']),\n list(self.name_df['id']))\n ground_truth['regulator.effect'] = [0] * ground_truth.shape[0]\n ground_truth = pd.concat([ground_truth, df_true])\n ground_truth = ground_truth.drop_duplicates(['regulator.gene', 'regulated.gene'], keep='last')\n\n # if false TF exists\n if real_tfs and false_tfs:\n t_ground_truth = ground_truth[ground_truth['regulator.gene'].isin(real_tfs)]\n f_ground_truth = ground_truth[ground_truth['regulator.gene'].isin(false_tfs)]\n f_ground_truth['regulator.effect'] = [0.0] * f_ground_truth.shape[0]\n ground_truth = pd.concat([t_ground_truth, f_ground_truth])\n\n ground_truth[['regulator.gene', 'regulated.gene']] = ground_truth[['regulator.gene', 'regulated.gene']].replace(\n list(self.name_df['id']), list(self.name_df['name']))\n ground_truth['regulator.effect'] = ground_truth['regulator.effect'].astype('float64')\n # convert y_true into a binary matrix\n ground_truth.loc[ground_truth['regulator.effect'] > 0, 'regulator.effect'] = 1\n # order of genes need to be consistent between ground_truth and prediction\n ground_truth = ground_truth.sort_values(['regulator.gene', 'regulated.gene'], ascending=[True, True])\n self.ground_truth = ground_truth\n return ground_truth\n\n def get_baseline(self):\n self.baseline = 1 - self.ground_truth[self.ground_truth[self.value_col] == 0].shape[0] / \\\n self.ground_truth.shape[0]\n print(f'Baseline is {self.baseline} (num of 1/num of total)')\n return self.baseline\n\n def get_pred_df(self, y_true_label=None, y_true_tf_col=None, y_true_target_col=None):\n \"\"\"\n\n :param y_true_label:\n :return:\n \"\"\"\n if self.adj is None:\n self.adj = self.adata.uns['adj']\n if y_true_label is None:\n y_true_label = self.value_col\n if y_true_tf_col is None:\n y_true_tf_col = self.tf_col\n if y_true_target_col is None:\n y_true_target_col = self.target_col\n\n # 2. input prediction value\n self.regulons = self.adata.uns['regulon_dict']\n mylist = [(key, x) for key, val in self.regulons.items() for x in val]\n df_pred = pd.DataFrame(mylist, columns=['Name', 'Values'])\n # TODO: if has (+)\n df_pred['Name'] = df_pred['Name'].str.strip('(+)')\n df_pred['prediction'] = [1] * df_pred.shape[0]\n\n # 1. get importance (pred_label) values\n df_pred = self.adj.merge(df_pred, left_on=['TF', 'target'], right_on=['Name', 'Values'], how='left')\n df_pred['prediction'] = df_pred['prediction'].fillna(0)\n\n # 3. introduce ground truth classification label\n df_pred = df_pred.merge(self.ground_truth, left_on=['TF', 'target'],\n right_on=[y_true_tf_col, y_true_target_col],\n how='left')\n\n df_pred = df_pred[['TF', 'target', 'importance', 'prediction', y_true_label]]\n df_pred.columns = [y_true_tf_col, y_true_target_col, 'importance', 'prediction', 'ground truth']\n # df_pred.to_csv('df_pred.csv', index=False)\n self.pred_df = df_pred\n return self.pred_df\n\n def get_pred_df_grnboost(self, y_true_label=None, y_true_tf_col=None, y_true_target_col=None):\n \"\"\"\n\n :param y_true_label:\n :return:\n \"\"\"\n if self.adj is None:\n self.adj = self.adata.uns['adj']\n if y_true_label is None:\n y_true_label = self.value_col\n if y_true_tf_col is None:\n y_true_tf_col = self.tf_col\n if y_true_target_col is None:\n y_true_target_col = self.target_col\n\n # 1. input prediction value\n df_pred = self.adj.copy()\n df_pred['prediction'] = [1] * df_pred.shape[0]\n\n # 3. introduce ground truth classification label\n df_pred = df_pred.merge(self.ground_truth, left_on=['TF', 'target'],\n right_on=[y_true_tf_col, y_true_target_col],\n how='left')\n\n df_pred = df_pred[['TF', 'target', 'importance', 'prediction', y_true_label]]\n df_pred.columns = [y_true_tf_col, y_true_target_col, 'importance', 'prediction', 'ground truth']\n # df_pred.to_csv('df_pred.csv', index=False)\n self.pred_df = df_pred\n return self.pred_df\n\n # alternative to get_pred_df\n # calculate spearman values\n def get_pred_df_spearman(self, data: Union[pd.DataFrame, anndata.AnnData] = None, y_true_label=None,\n y_true_tf_col=None, y_true_target_col=None):\n \"\"\"\n\n :param data:\n :param y_true_label:\n :param y_true_tf_col:\n :param y_true_target_col:\n :return:\n \"\"\"\n if y_true_label is None:\n y_true_label = self.value_col\n if y_true_tf_col is None:\n y_true_tf_col = self.tf_col\n if y_true_target_col is None:\n y_true_target_col = self.target_col\n # 1. calculate spearman cc\n # adata = sc.read_h5ad(self.adata_fn)\n # if self.adata is None:\n # self.adata = adata\n # df = adata.to_df()\n if data is None:\n raise ValueError('Must provide expression matrix when pred label is set to spearman')\n if isinstance(data, anndata.AnnData):\n data = data.to_df()\n if self.adj is None:\n self.adj = self.adata.uns['adj']\n s = []\n for i in self.adj.index:\n res = stats.spearmanr(data[self.adj.loc[i].TF], data[self.adj.loc[i].target])\n s.append(res.correlation)\n self.adj['spearman'] = s\n adj = self.adj.sort_values(['importance', 'spearman'], ascending=False)\n\n # 2. input prediction value\n regs = self.adata.uns['regulon_dict']\n mylist = [(key, x) for key, val in regs.items() for x in val]\n df_pred = pd.DataFrame(mylist, columns=['Name', 'Values'])\n df_pred['Name'] = df_pred['Name'].str.strip('(+)')\n df_pred['prediction'] = [1] * df_pred.shape[0]\n\n # 1. merge spearman df and prediction df\n df_pred = adj.merge(df_pred, left_on=['TF', 'target'], right_on=['Name', 'Values'], how='left')\n df_pred['prediction'].fillna(0)\n df_pred['prediction'] = df_pred['prediction'].fillna(0)\n\n # 3. introduce ground truth classification label\n df_pred = df_pred.merge(self.ground_truth, left_on=['TF', 'target'],\n right_on=[y_true_tf_col, y_true_target_col],\n how='left')\n df_pred = df_pred[['TF', 'target', 'importance', 'spearman', 'prediction', y_true_label]]\n # sort by spearman value\n tt1 = df_pred[df_pred.prediction > 0]\n tt0 = df_pred[df_pred.prediction == 0]\n tt1 = tt1.sort_values(['spearman'], ascending=False)\n tt0 = tt0.sort_values(['spearman'], ascending=False)\n # make sure 0 labels (negatives) spearman value is smaller than 1 labels\n tt0['spearman'] = tt0['spearman'] - 1\n df_prediction = pd.concat([tt1, tt0])\n df_prediction.columns = [y_true_tf_col, y_true_target_col, 'importance', 'spearman', 'prediction',\n 'ground truth']\n # df_prediction.to_csv('df_pred.csv', index=False)\n self.pred_df = df_prediction\n # self.adata.uns['prediction'] = df_prediction\n return df_prediction\n\n def get_prediction_df(self, pred_label='spearman', y_true_tf_col=None, y_true_target_col=None):\n \"\"\"\n get prediction for all genes (including genes had been filtered out by SpaGRN),\n so Ground Truth and Prediction have the same dimension (aka len(all_genes))\n value_col: column of value to pass in AUPRC calculation e.g. importance, spearman coefficient ...\n :param pred_label:\n :param y_true_tf_col:\n :param y_true_target_col:\n :return:\n \"\"\"\n if y_true_tf_col is None:\n y_true_tf_col = self.tf_col\n if y_true_target_col is None:\n y_true_target_col = self.target_col\n\n pred_index = pd.merge(\n self.pred_df[[y_true_tf_col, y_true_target_col, pred_label, 'prediction', 'ground truth']],\n self.ground_truth[[y_true_tf_col, y_true_target_col]], on=[y_true_tf_col, y_true_target_col],\n how='outer')\n assert pred_index.shape[0] == self.ground_truth.shape[0]\n pred = pred_index.sort_values([y_true_tf_col, y_true_target_col], ascending=[True, True])\n if pred_label == 'spearman':\n pred = pred.fillna(int(pred[pred_label].min()) - 2)\n else:\n pred = pred.fillna(0)\n\n self.prediction = pred\n return self.prediction\n\n def get_auprc(self, y_true_label='regulator.effect', pred_label='spearman'):\n \"\"\"\n\n :param y_true_label:\n :param pred_label:\n :return:\n \"\"\"\n self.prec, self.recall, self.thresholds = precision_recall_curve(y_true=self.ground_truth[y_true_label],\n probas_pred=self.prediction[pred_label],\n pos_label=1)\n new_auc = auc(self.recall, self.prec)\n self.auprc = new_auc\n\n def get_ratio(self):\n \"\"\"\n AUPRC ratio\n :return:\n \"\"\"\n if self.baseline: # walrus operator cannot be used on instance attributes?\n self.auprc_ratio = self.auprc / self.baseline\n else:\n self.get_baseline()\n self.auprc_ratio = self.auprc / self.baseline\n if self.adata is not None and isinstance(self.adata, anndata.AnnData):\n self.adata.uns['auprc_ratio'] = self.auprc_ratio\n print(f'AUPRC ratio is {self.auprc_ratio}.')\n\n def plot_prec_recall(self, fn='Precision-Recall.png'):\n if self.recall is None or self.prec is None:\n raise ValueError('Calculate auprc first plotting. See method get_auprc')\n plt.fill_between(self.recall, self.prec)\n plt.ylabel(\"Precision\")\n plt.xlabel(\"Recall\")\n plt.title(\"Train Precision-Recall curve\")\n plt.savefig(fn)\n plt.close()\n\n def get_auroc(self, y_true_label='regulator.effect', pred_label='spearman'):\n \"\"\"\n\n :param y_true_label:\n :param pred_label:\n :return:\n \"\"\"\n fpr, tpr, thresholds2 = roc_curve(y_true=self.ground_truth[y_true_label],\n y_score=self.prediction[pred_label],\n pos_label=1)\n auroc = auc(fpr, tpr)\n self.auroc = auroc\n return auroc\n # plt.fill_between(fpr, tpr)\n # plt.ylabel(\"true positive\")\n # plt.xlabel(\"false positive\")\n # plt.title(\"AUROC\")\n # plt.savefig('aucroc.png')\n # plt.close()\n\n def auprc(self,\n pred_label,\n ground_truth_files,\n y_true_label='regulator.effect',\n fn='adata.h5ad',\n fig_fn='Precision-Recall.png'):\n \"\"\"\n Main logic method. SpaGRN.AUPRC pipeline\n 1. generate ground truth\n 2.1. load in prediction output by SpaGRN\n 2.2. fill in the blank\n 3. Calculate AUPRC and plot result\n :param pred_label:\n :param ground_truth_files:\n :param y_true_label:\n :param fn:\n :return:\n \"\"\"\n # 1.\n self.make_ground_truth(ground_truth_files, real_tfs=['2', '232', '408', '805', '1006'],\n false_tfs=['1140', '1141', '1142', '1143', '1144'])\n # self.get_baseline()\n\n # 2.\n if pred_label == 'spearman':\n self.get_pred_df_spearman(data=self.adata)\n else:\n self.get_pred_df_grnboost()\n # self.get_pred_df()\n self.get_prediction_df(pred_label=pred_label)\n\n # 3.\n self.get_auprc(pred_label=pred_label, y_true_label=y_true_label)\n self.get_ratio()\n # self.plot_prec_recall(fn=fig_fn)\n\n # 4. save results\n # self.adata.write_h5ad(fn)\n\n def roc(self,\n pred_label,\n ground_truth_files,\n y_true_label='regulator.effect',\n fn='adata.h5ad',\n fig_fn='Precision-Recall.png'):\n \"\"\"\n Main logic method. SpaGRN.AUPRC pipeline\n 1. generate ground truth\n 2.1. load in prediction output by SpaGRN\n 2.2. fill in the blank\n 3. Calculate AUPRC and plot result\n :param pred_label:\n :param ground_truth_files:\n :param y_true_label:\n :param fn:\n :return:\n \"\"\"\n # 1.\n self.make_ground_truth(ground_truth_files, real_tfs=['2', '232', '408', '805', '1006'],\n false_tfs=['1140', '1141', '1142', '1143', '1144'])\n # self.get_baseline()\n\n # 2.\n if pred_label == 'spearman':\n self.get_pred_df_spearman(data=self.adata)\n else:\n self.get_pred_df_grnboost()\n # self.get_pred_df()\n self.get_prediction_df(pred_label=pred_label)\n\n # 4.\n self.get_auroc(pred_label=pred_label, y_true_label=y_true_label)\n\n # 5. save results\n # self.adata.write_h5ad(fn)\n\n\ndef cal_auprc(adata, tfs, name_df, pred_label='spearman', ground_truth_files='', fn='adata.h5ad'):\n \"\"\"\n\n :param adata:\n :param tfs:\n :param name_df:\n :param adj_fn:\n :param reg_fn:\n :param pred_label:\n :param ground_truth_files:\n :return:\n \"\"\"\n a = AUPRC(data=adata,\n tfs=tfs,\n name_df=name_df)\n a.auprc(pred_label=pred_label, ground_truth_files=ground_truth_files, fn=fn)\n return a.auprc_ratio\n\n\ndef cal_auroc(adata, tfs, name_df, pred_label='spearman', ground_truth_files='', fn='adata.h5ad'):\n \"\"\"\n\n :param adata:\n :param tfs:\n :param name_df:\n :param adj_fn:\n :param reg_fn:\n :param pred_label:\n :param ground_truth_files:\n :return:\n \"\"\"\n a = AUPRC(data=adata,\n tfs=tfs,\n name_df=name_df)\n a.roc(pred_label=pred_label, ground_truth_files=ground_truth_files, fn=fn)\n return a.auroc\n\n\ndef calculate_multi_samples(methods):\n data_nums = list(range(2, 12))\n ratios = {}\n aurocs = {}\n for method in methods:\n ratios[method] = []\n aurocs[method] = []\n for num in data_nums:\n data_folder = os.path.join(data_fn_base, f'data{num}')\n data_fn = os.path.join(data_folder, f'hetero_{num}.h5ad')\n if method == 'hotspot':\n pred_label = 'spearman'\n else:\n pred_label = 'importance'\n\n adata = sc.read_h5ad(data_fn)\n ratio = cal_auprc(adata, tfs, names, pred_label=pred_label, ground_truth_files=ver7_ground_truth)\n ratios[method].append(ratio)\n auroc = cal_auroc(adata, tfs, names, pred_label=pred_label, ground_truth_files=ver7_ground_truth)\n aurocs[method].append(auroc)\n\n with open('ratios.json', 'w') as f:\n json.dump(ratios, f, sort_keys=True, indent=4)\n with open('aurocs.json', 'w') as f:\n json.dump(aurocs, f, sort_keys=True, indent=4)\n return ratios, aurocs\n\n\ndef ratios_boxplot(ratios, methods, x_labels=None, fn='auprc_ratio_boxplot.pdf'):\n if x_labels is None:\n x_labels = ['SpaGRN', 'GRNBoost2', 'GENIE3', 'HOTSPOT']\n df = pd.DataFrame.from_records(ratios).astype('float64')\n df = df[methods]\n ax = sns.boxplot(data=df)\n plt.title('AUPRC ratios')\n plt.ylabel('ratio')\n ax.set_xticklabels(x_labels)\n plt.tight_layout()\n plt.savefig(fn, format='pdf')\n plt.close()\n\n\ndef auroc_boxplot(aurocs, methods, x_labels=None, fn='auroc_ratio_boxplot.pdf'):\n if x_labels is None:\n x_labels = ['SpaGRN', 'GRNBoost2', 'GENIE3', 'HOTSPOT']\n df = pd.DataFrame.from_records(aurocs).astype('float64')\n df = df[methods]\n ax = sns.boxplot(data=df)\n plt.title('AUROC')\n plt.ylabel('auroc')\n ax.set_xticklabels(x_labels)\n plt.tight_layout()\n plt.savefig(fn, format='pdf')\n plt.close()\n\n\nif __name__ == '__main__':\n names = pd.read_csv('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver7/name_df.csv')\n '''\n print(names)\n id,name\n 2,Adf1\n 232,Aef1\n 408,grh\n '''\n tfs = [2, 232, 408, 805, 1006, 1140, 1141, 1142, 1143, 1144]\n ver7_ground_truth = '/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver7/GRN_params_*.csv'\n data_fn_base = '/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8'\n\n # for one sample\n # 2023-10-24: test h5ad adj & regulons: success\n # adata = sc.read_h5ad('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/data11/HOTSPOT.h5ad')\n # adata = sc.read_h5ad('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/data11/hetero_11.h5ad')\n # adj_fn = '/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/data11/genie3.adj.csv'\n # adj_fn = '/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/grnboost_adj.csv'\n # reg_fn = '/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/data11/grnboost/grnboost_regulons.json'\n # ratio = cal_auprc(adata, tfs, names, adj_fn=adj_fn, reg_fn=reg_fn, pred_label='importance', ground_truth_files=ver7_ground_truth)\n # 2023-10-24\n # ratio = cal_auprc(adata, tfs, names, pred_label='importance', ground_truth_files=ver7_ground_truth)\n # print(ratio)\n\n # if os.path.isfile('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/ratios.json') \\\n # and os.path.isfile('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/aurocs.json'):\n # rs = json.load(open('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/ratios.json'))\n # aus = json.load(open('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/aurocs.json'))\n # else:\n methods = ['hotspot', 'grnboost', 'genie3', 'HOTSPOT']\n rs, aus = calculate_multi_samples(methods)\n ratios_boxplot(rs, methods)\n auroc_boxplot(aus, methods)\n\n # 2023-10-25\n # 补run HOTSPOT\n # ratios = json.load(open('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/ratios.json'))\n # aurocs = json.load(open('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/aurocs.json'))\n # data_nums = list(range(2, 12))\n # methods = ['HOTSPOT']\n # for method in methods:\n # ratios[method] = []\n # aurocs[method] = []\n # for num in data_nums:\n # data_folder = os.path.join(data_fn_base, f'data{num}')\n # data_fn = os.path.join(data_folder, f'{method}/hotspot.h5ad')\n # if method == 'hotspot':\n # pred_label = 'spearman'\n # else:\n # pred_label = 'importance'\n #\n # adata = sc.read_h5ad(data_fn)\n # ratio = cal_auprc(adata, tfs, names, pred_label=pred_label, ground_truth_files=ver7_ground_truth)\n # ratios[method].append(ratio)\n # auroc = cal_auroc(adata, tfs, names, pred_label=pred_label, ground_truth_files=ver7_ground_truth)\n # aurocs[method].append(auroc)\n # with open('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/ratios.json', 'w') as f:\n # json.dump(ratios, f, sort_keys=True, indent=4)\n # with open('/dellfsqd2/ST_OCEAN/USER/liyao1/07.spatialGRN/exp/07.simulation/ver8/aurocs.json', 'w') as f:\n # json.dump(aurocs, f, sort_keys=True, indent=4)\n #\n # ratios_boxplot(ratios)\n # auroc_boxplot(aurocs)\n","repo_name":"BGI-Qingdao/SpaGRN","sub_path":"spagrn/auprc.py","file_name":"auprc.py","file_ext":"py","file_size_in_byte":24475,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"72334041474","text":"import numpy as np\nimport cv2\nfrom keras.models import load_model\nimport files,data.seqs\nfrom . import DataDict\n\nclass FrameSeqs(DataDict):\n def __init__(self, args=[]):\n super(FrameSeqs, self).__init__(args)\n\n def n_persons(self):\n persons=set([ name_i.get_person() for name_i in self.keys()])\n return len(persons)\n\n def n_frames(self):\n n=0\n for seq_i in self.values():\n n+=len(seq_i)\n return n\n\n def dims(self):\n return list(self.values())[0][0].shape\n\n def scale(self,dims=(64,64),new=False):\n def helper(img_j):\n img_j=cv2.resize(img_j,dsize=dims,interpolation=cv2.INTER_CUBIC)\n if(img_j.ndim==3):\n return img_j\n return np.expand_dims(img_j,axis=-1)\n return self.transform(helper,new,single=True)\n\n def transform(self,fun,new=False,single=True):\n frame_dict= FrameSeqs() if(new) else self\n for name_i,seq_i in self.items():\n print(name_i)\n if(single):\n frame_dict[name_i]=[fun(img_j)\n for img_j in seq_i]\n else:\n frame_dict[name_i]=fun(seq_i)\n return frame_dict\n\n def to_dataset(self):\n names=self.names()\n X=[ np.array(self[name_i]) for name_i in names]\n y=[name_i.get_cat() for name_i in names]\n return np.array(X),y\n\n def save(self,out_path):\n files.make_dir(out_path)\n for name_i,seq_i in self.items():\n out_i=\"%s/%s\" % (out_path,name_i)\n if( len(self.dims())==3 and self.dims()[-1]!=1):\n seq_i=[np.concatenate(frame_j.T,axis=0) \n for frame_j in seq_i]\n save_frames(out_i,seq_i)\n\n def seqs_len(self):\n return [len(seq_i) for seq_i in self.values()]\n\n def min_len(self):\n return min(self.seqs_len())\n\nclass ReadFrames(object):\n def __init__(self,n_split=1):\n self.n_split=n_split\n\n def __call__(self,in_path):\n return read_frame_seqs(in_path,n_split=self.n_split)\n\ndef read_frame_seqs(in_path,n_split=1):\n frame_seqs=FrameSeqs()\n for i,path_i in enumerate(files.top_files(in_path)):\n name_i=files.Name(path_i.split('/')[-1]).clean()\n if(len(name_i)==0):\n name_i=files.Name(str(i))\n frames=[ read_frame(path_j,n_split) \n for path_j in files.top_files(path_i)]\n frame_seqs[name_i]=frames\n return frame_seqs\n\ndef read_frame(in_path,n_split=1):\n frame_ij=cv2.imread(in_path,cv2.IMREAD_GRAYSCALE)\n if(n_split is None):\n n_split=int(frame_ij.shape[1] /frame_ij.shape[0]) \n if(n_split==1):\n return frame_ij\n return np.array(np.vsplit(frame_ij,n_split)).T\n\ndef save_frames(in_path,frames):\n files.make_dir(in_path)\n for i,frame_i in enumerate(frames):\n out_i=\"%s/%d.png\" % (in_path,i)\n cv2.imwrite(out_i, frame_i)\n\ndef extract_features(in_path,nn_path,out_path):\n frame_seqs=read_frame_seqs(in_path)\n model=load_model(nn_path) \n feat_seqs=seqs.Seqs()\n for name_i,seq_i in frame_seqs.items():\n feat_seqs[name_i]=model.predict(np.array(seq_i))\n feat_seqs.save(out_path)\n\ndef rescale_seqs(in_path,out_path,dims=(64,64),n_split=1):\n frame_seqs=read_frame_seqs(in_path,n_split=n_split)\n frame_seqs.scale(dims,new=False)\n frame_seqs.save(out_path)\n\ndef tranform_frames(in_path,out_path,fun,whole=False):\n frames=read_frame_seqs(in_path,n_split=1)\n if(whole):\n fun(frames) \n else:\n frames.transform(fun)\n frames.save(out_path)","repo_name":"tjacek/ActionClassifier","sub_path":"data/imgs.py","file_name":"imgs.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3383626098","text":"from db import get_current_temp, retrieve_enable_inner, retrieve_temp_threshold\nfrom meross import turn_device_on, turn_device_off\nimport os\nimport asyncio\n\nasync def ctrl_check():\n current_temp_inside = float(get_current_temp(0))\n current_temp_outside = float(get_current_temp(1))\n\n temp_threshold_inner = float(retrieve_temp_threshold(0))\n temp_threshold_outside = float(retrieve_temp_threshold(1))\n\n inner_enabled = retrieve_enable_inner()\n\n if current_temp_outside > temp_threshold_outside and current_temp_inside < temp_threshold_inner:\n await turn_device_off()\n else:\n await turn_device_on()\n\nif __name__ == '__main__':\n if os.name == 'nt':\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n loop = asyncio.get_event_loop()\n loop.run_until_complete(ctrl_check())\n loop.stop()\n","repo_name":"grossamos/cooler-cooler","sub_path":"backend/ctrlcheck.py","file_name":"ctrlcheck.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29213388075","text":"from django.shortcuts import render\r\nfrom django.core.validators import URLValidator\r\nfrom django.core.exceptions import ValidationError\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom bs4.element import Comment\r\nfrom datetime import datetime\r\nfrom django.utils.dateformat import DateFormat\r\nfrom django.utils.formats import get_format\r\nfrom django.utils import formats\r\n# Create your views here.\r\nfrom .models import ScrapedData\r\n\r\n\r\ndef tag_visible(element):\r\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\r\n return False\r\n if isinstance(element, Comment):\r\n return False\r\n return True\r\n\r\n\r\ndef index(request):\r\n if request.method == 'POST':\r\n url_to_check = request.POST.get('url')\r\n if url_to_check:\r\n url_validator = URLValidator()\r\n try:\r\n url_validator(url_to_check)\r\n req = requests.get(url_to_check)\r\n soup = BeautifulSoup(req.content, \"html.parser\")\r\n # for re in soup([\"thead\"]):\r\n # re.extract()\r\n # for script in soup([\"script\", \"style\", \"title\", \"head\", \"form\", 'meta', '[document]']):\r\n # script.extract()\r\n # res = soup.table.text\r\n # print(res)\r\n\r\n table = soup.find('table')\r\n rows = table.find_all('tr')\r\n for row in rows[1:]: # Skip the header row if present\r\n columns = row.find_all('td')\r\n college_name = columns[0].text.strip()\r\n prn = columns[1].text.strip()\r\n std_name = columns[2].text.strip()\r\n tcNo = columns[3].text.strip()\r\n date = columns[4].text.strip()\r\n reason = columns[5].text.strip()\r\n phone = columns[6].text.strip()\r\n print(date)\r\n # df = DateFormat(date)\r\n # formatted_datetime = formats.date_format(df, \"%d-%m-%Y\")\r\n # # Df = df.format(get_format('DATE_FORMAT'))\r\n # print(formatted_datetime)\r\n ScrapedData.objects.create(college_name=college_name, PRN=prn, name=std_name, TcNo=tcNo, date=date, reason=reason, mobile=phone)\r\n print(college_name, prn, std_name, tcNo, date, reason, phone)\r\n\r\n # for row in rows:\r\n # th = row.find_all('th')\r\n # columns = row.find_all('td')\r\n # for i in th:\r\n # if i.text == \"First\":\r\n # n = len(i)\r\n # elif i.text == \"Last\":\r\n # a = len(i)\r\n # print(a)\r\n # print(n, a)\r\n # for row in rows[1:]:\r\n # th = row.find_all('th')\r\n # columns = row.find_all('td')\r\n # name = columns[a].text.strip()\r\n # address = columns[n].text.strip()\r\n # print(name, address, a)\r\n\r\n\r\n\r\n except ValidationError as e:\r\n\r\n error_message = \"Invalid URL provided.\"\r\n print(error_message)\r\n\r\n else:\r\n error_message = \"Please provide a URL.\"\r\n print(error_message)\r\n return render(request, 'index.html')\r\n","repo_name":"SVP2408/Dropout_Student","sub_path":"Scrapdata/Scrapapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23376515911","text":"'''\r\nCreated on 06.05.2012\r\n\r\n@author: alex\r\n'''\r\nfrom __future__ import print_function\r\nimport sys\r\nfrom numpy.core.defchararray import isalpha\r\n\r\nsys.stdin = open('in', 'rb')\r\nsys.stdout = open(\"out2\", 'w')\r\nclasses = None\r\n\r\ndef solve(row):\r\n global classes\r\n \r\n if not len(classes[row]):\r\n return [row]\r\n \r\n paths = [None] * len(classes[row])\r\n for i, c in enumerate(classes[row]):\r\n paths[i] = solve(c-1)\r\n cnt = 0\r\n ex = set()\r\n rn = None\r\n for p in paths:\r\n if p:\r\n for n in p:\r\n cnt += n\r\n rn = n\r\n if n not in ex:\r\n ex.add(n)\r\n else:\r\n return [n,n]\r\n elif row != 0 and cnt:\r\n return p\r\n if cnt:\r\n return [rn]\r\n return 0\r\n\r\ndef solve1(row):\r\n global classes\r\n \r\n if not len(classes[row]):\r\n return [row]\r\n paths = [None] * len(classes[row])\r\n pths = []\r\n for i, c in enumerate(classes[row]):\r\n paths[i] = solve1(c-1)\r\n if type(paths[i]).__name__ != 'bool':\r\n pths += paths[i]\r\n else:\r\n return True\r\n for s in set(list(pths)):\r\n if pths.count(s) >= 2:\r\n #print('cnt:', row, pths)\r\n return True\r\n\r\n f = [row]\r\n if len(pths):\r\n f += pths\r\n return f\r\n \r\nT = int(raw_input())\r\nfor t in range(T):\r\n N = int(raw_input())\r\n classes = [None] * N\r\n for i in range(N):\r\n st = [int(x) for x in raw_input().split()]\r\n inheritanceCount = st.pop(0)\r\n classes[i] = st[:inheritanceCount]\r\n \r\n #\r\n k = [\"No\", \"Yes\"]\r\n for i in range(N):\r\n r = solve1(i)\r\n if type(r).__name__ == 'bool':\r\n print(\"Case #%d: Yes\" % (t+1))\r\n break\r\n else:\r\n print(\"Case #%d: No\" % (t+1))\r\nsys.stdout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_105/372.py","file_name":"372.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44407304320","text":"#In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.\r\n#\r\n#Two nodes of a binary tree are cousins if they have the same depth, but have different parents.\r\n#\r\n#We are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.\r\n#\r\n#Return true if and only if the nodes corresponding to the values x and y are cousins.\r\n#\r\n# \r\n#\r\n#Example 1:\r\n#\r\n#\r\n#Input: root = [1,2,3,4], x = 4, y = 3\r\n#Output: false\r\n#Example 2:\r\n#\r\n#\r\n#Input: root = [1,2,3,null,4,null,5], x = 5, y = 4\r\n#Output: true\r\n#Example 3:\r\n#\r\n#\r\n#\r\n#Input: root = [1,2,3,null,4], x = 2, y = 3\r\n#Output: false\r\n\r\n\r\nclass Solution(object):\r\n def isCousins(self, root, x, y):\r\n \"\"\"\r\n :type root: TreeNode\r\n :type x: int\r\n :type y: int\r\n :rtype: bool\r\n \"\"\"\r\n val_to_node = {root.val: root} # value to nodes at current depth\r\n node_to_parent = {root: None}\r\n\r\n while True:\r\n\r\n x_node = val_to_node.get(x, None)\r\n y_node = val_to_node.get(y, None)\r\n if x_node is not None and y_node is not None:\r\n return node_to_parent[x_node] != node_to_parent[y_node]\r\n if x_node is not None or y_node is not None:\r\n return False\r\n\r\n new_val_to_node = {}\r\n for node in val_to_node.values():\r\n if node.left:\r\n node_to_parent[node.left] = node\r\n new_val_to_node[node.left.val] = node.left\r\n if node.right:\r\n node_to_parent[node.right] = node\r\n new_val_to_node[node.right.val] = node.right\r\n val_to_node = new_val_to_node","repo_name":"nileshpaliwal/May-Leetcoding-Challenge-2020","sub_path":"Cousins in Binary Tree.py","file_name":"Cousins in Binary Tree.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34628218321","text":"class Solution:\n def findAnagrams(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: List[int]\n \"\"\"\n \n def oord(x):\n return ord(x) - 97\n\n list1, num = [0] * 26, 0\n for i in p:\n idx = oord(i)\n list1[idx] += 1\n if list1[idx] == 1: num += 1\n \n list2, cnt = [0] * 26, 0\n\n def calc(o, x):\n nonlocal cnt, list2\n idx = oord(x)\n if list2[idx] == list1[idx]: \n cnt = cnt - 1\n list2[idx] += o\n if list2[idx] == list1[idx]: \n cnt = cnt + 1\n\n len1, len2, ans = len(p), len(s), []\n \n if len1 > len2: return ans\n\n for i in range(0, len1):\n calc(1, s[i: i + 1])\n\n if cnt == num:\n ans.append(0)\n \n for i in range(len1, len2):\n calc(-1, s[i - len1: i - len1 + 1])\n calc(1, s[i: i + 1])\n if cnt == num:\n ans.append(i - len1 + 1)\n\n return ans","repo_name":"zqy1018/my_leetcode","sub_path":"code/438.py","file_name":"438.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"4844715739","text":"\n\n# Escribir un programa que almacene las asignaturas de un curso\n# (por ejemplo Matemáticas, Física, Química, Historia y Lengua)\n# en una lista y la muestre por pantalla.\ndef ejercicio1():\n curso = [\"Matemáticas\", \"Física\", \"Química\", \"Historia\", \"Lengua\"]\n for asignatura in curso:\n print(asignatura)\n\n\n# Escribir un programa que almacene las asignaturas de un curso\n# (por ejemplo Matemáticas, Física, Química, Historia y Lengua)\n# en una lista y la muestre por pantalla el mensaje\n# Yo estudio , donde \n# es cada una de las asignaturas de la lista..\ndef ejercicio2():\n curso = [\"Matemáticas\", \"Física\", \"Química\", \"Historia\", \"Lengua\"]\n for asignatura in curso:\n print(\"Yo estudio \" + asignatura)\n\n\n# Escribir un programa que almacene las asignaturas de un curso\n# (por ejemplo Matemáticas, Física, Química, Historia y Lengua) en una lista,\n# pregunte al usuario la nota que ha sacado en cada asignatura,\n# y después las muestre por pantalla con el mensaje\n# En has sacado donde \n# es cada una des las asignaturas de la lista\n# y cada una de las correspondientes notas introducidas por el usuario.\ndef ejercicio3():\n curso = [\"Matemáticas\", \"Física\", \"Química\", \"Historia\", \"Lengua\"]\n notas = {}\n for asignatura in curso:\n notas[asignatura] = input(\n \"Introduzca la nota que has sacado en \" + asignatura + \": \")\n for asignatura in notas:\n print(asignatura + \" : \" + notas[asignatura])\n\n\n# Escribir un programa que pregunte al usuario\n# los números ganadores de la lotería primitiva,\n# los almacene en una lista\n# y los muestre por pantalla ordenados de menor a mayor.\ndef ejercicio4():\n ganadores = []\n ganador = input(\"Introduzca el nombre de ganador: \")\n while ganador:\n ganadores.append(ganador)\n ganador = input(\"Introduzca el nombre de ganador: \")\n print(sorted(ganadores))\n\n\n# Escribir un programa que almacene en una lista los números del 1 al 10\n# y los muestre por pantalla en orden inverso separados por comas\ndef ejercicio5():\n print(\",\".join(map(str, reversed(range(1, 11)))))\n\n\n# Escribir un programa que pida al usuario una palabra\n# y muestre por pantalla el número de veces que contiene cada vocal.\ndef ejercicio6():\n palabra = input(\"Introduzca un palabra: \")\n print(\"a: \", palabra.count(\"a\"))\n print(\"e: \", palabra.count(\"c\"))\n print(\"i: \", palabra.count(\"i\"))\n print(\"o: \", palabra.count(\"o\"))\n print(\"u: \", palabra.count(\"u\"))\n\n\n# Escribir un programa que almacene el abecedario en una lista,\n# elimine de la lista las letras que ocupen posiciones múltiplos de 3,\n# y muestre por pantalla la lista resultante.\ndef ejercicio7():\n abecedario = [chr(ord('a') + i) for i in range(26)]\n print(abecedario)\n print(list(filter(lambda letter: (ord(letter) - ord('a') + 1) % 3 != 0, abecedario)))\n\n\nif __name__ == \"__main__\":\n ejercicio7()\n","repo_name":"vitaminac/code","sub_path":"competition/python_ejemplo/hoja4.py","file_name":"hoja4.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70451785796","text":"\"\"\"\n862. Shortest Subarray with Sum at Least K\n\"\"\"\n\nfrom typing import List\nimport sys\n\nclass Solution:\n def shortestSubarray(self, A: List[int], K: int) -> int:\n ret = len(A) + 1\n\n pre_sum = [0]\n for i in A:\n pre_sum.append(pre_sum[-1] + i)\n\n monoq = []\n for idx, val in enumerate(pre_sum):\n while monoq and val <= pre_sum[monoq[-1]]:\n monoq.pop()\n while monoq and val - pre_sum[monoq[0]] >= K:\n print(pre_sum[monoq[0]])\n ret = min(ret, idx - monoq.pop(0))\n monoq.append(idx)\n\n return ret if ret < len(A) + 1 else -1\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/0862_shortest_subarray_with_sum_at_least_k.py","file_name":"0862_shortest_subarray_with_sum_at_least_k.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9415286666","text":"'''\n.. note:: New in verision 0.5.11\n\n.. testsetup::\n\n from pyroute2 import NDB\n ndb = NDB(sources=[{'target': 'localhost', 'kind': 'IPMock'}])\n\n.. testcleanup:: *\n\n for key, value in tuple(globals().items()):\n if key.startswith('ndb') and hasattr(value, 'close'):\n value.close()\n\nFiltering example:\n\n.. testcode::\n\n report = ndb.interfaces.dump()\n report.select_fields('index', 'ifname', 'address', 'state')\n report.transform_fields(\n address=lambda r: '%s%s.%s%s.%s%s' % tuple(r.address.split(':'))\n )\n for record in report.format('csv'):\n print(record)\n\n.. testoutput::\n\n 'index','ifname','address','state'\n 1,'lo','0000.0000.0000','up'\n 2,'eth0','5254.0072.58b2','up'\n\n'''\nimport json\nimport warnings\nfrom itertools import chain\n\nfrom pyroute2 import cli\n\nMAX_REPORT_LINES = 10000\n\ndeprecation_notice = '''\nRecordSet API is deprecated, pls refer to:\n\nhttps://docs.pyroute2.org/ndb_reports.html\n'''\n\n\ndef format_json(dump, headless=False):\n buf = []\n fnames = None\n yield '['\n for record in dump:\n if fnames is None:\n if headless:\n fnames = record._names\n else:\n fnames = record\n continue\n if buf:\n buf[-1] += ','\n for line in buf:\n yield line\n buf = []\n lines = json.dumps(dict(zip(fnames, record)), indent=4).split('\\n')\n buf.append(' {')\n for line in sorted(lines[1:-1]):\n if line[-1] == ',':\n line = line[:-1]\n buf.append(' %s,' % line)\n buf[-1] = buf[-1][:-1]\n buf.append(' }')\n for line in buf:\n yield line\n yield ']'\n\n\ndef format_csv(dump, headless=False):\n def dump_record(rec):\n row = []\n for field in rec:\n if isinstance(field, int):\n row.append('%i' % field)\n elif field is None:\n row.append('')\n else:\n row.append(\"'%s'\" % field)\n return row\n\n fnames = None\n for record in dump:\n if fnames is None and headless:\n fnames = True\n yield ','.join(dump_record(record._names))\n yield ','.join(dump_record(record))\n\n\nclass Record:\n def __init__(self, names, values, ref_class=None):\n self._names = tuple(names)\n self._values = tuple(values)\n if len(self._names) != len(self._values):\n raise ValueError('names and values must have the same length')\n self._ref_class = ref_class\n\n def __getitem__(self, key):\n idx = len(self._names)\n for i in reversed(self._names):\n idx -= 1\n if i == key:\n return self._values[idx]\n\n def __setitem__(self, *argv, **kwarg):\n raise TypeError('immutable object')\n\n def __getattribute__(self, key):\n if key.startswith('_'):\n return object.__getattribute__(self, key)\n else:\n return self[key]\n\n def __setattr__(self, key, value):\n if not key.startswith('_'):\n raise TypeError('immutable object')\n return object.__setattr__(self, key, value)\n\n def __iter__(self):\n return iter(self._values)\n\n def __repr__(self):\n return repr(self._values)\n\n def __len__(self):\n return len(self._values)\n\n def _select_fields(self, *fields):\n return Record(fields, map(lambda x: self[x], fields), self._ref_class)\n\n def _transform_fields(self, **spec):\n data = self._as_dict()\n for key, func in spec.items():\n data[key] = func(self)\n return Record(data.keys(), data.values(), self._ref_class)\n\n def _match(self, f=None, **spec):\n if callable(f):\n return f(self)\n for key, value in spec.items():\n if not (\n value(self[key]) if callable(value) else (self[key] == value)\n ):\n return False\n return True\n\n def _as_dict(self):\n ret = {}\n for key, value in zip(self._names, self._values):\n ret[key] = value\n return ret\n\n def __eq__(self, right):\n if hasattr(right, '_names'):\n n = all(x[0] == x[1] for x in zip(self._names, right._names))\n v = all(x[0] == x[1] for x in zip(self._values, right._values))\n return n and v\n elif isinstance(right, dict):\n for key, value in right.items():\n if value != self[key]:\n break\n else:\n return True\n return False\n elif self._ref_class is not None and isinstance(right, (str, int)):\n return self._ref_class.compare_record(self, right)\n else:\n return all(x[0] == x[1] for x in zip(self._values, right))\n\n\nclass BaseRecordSet(object):\n def __init__(self, generator, ellipsis='(...)'):\n self.generator = generator\n self.ellipsis = ellipsis\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.generator)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def __repr__(self):\n counter = 0\n ret = []\n for record in self:\n if isinstance(record, str):\n ret.append(record)\n else:\n ret.append(repr(record))\n ret.append('\\n')\n counter += 1\n if self.ellipsis and counter > MAX_REPORT_LINES:\n ret.append(self.ellipsis)\n break\n if ret:\n ret.pop()\n return ''.join(ret)\n\n\nclass RecordSetConfig(dict):\n def __init__(self, prime):\n if isinstance(prime, dict):\n for key, value in prime.items():\n self[key] = value\n else:\n raise ValueError('only dict allowed')\n\n def __setitem__(self, key, value):\n if isinstance(value, str):\n value = json.loads(value)\n return super().__setitem__(key, value)\n\n\nclass RecordSet(BaseRecordSet):\n '''\n NDB views return objects of this class with `summary()` and `dump()`\n methods. RecordSet objects are generator-based, they do not store the\n data in the memory, but transform them on the fly.\n\n RecordSet filters also return objects of this class, thus making possible\n to make chains of filters.\n '''\n\n def __init__(self, generator, config=None, ellipsis=True):\n super().__init__(generator, ellipsis)\n self.filters = []\n self.config = RecordSetConfig(config) if config is not None else {}\n\n def __next__(self):\n while True:\n record = next(self.generator)\n for f in self.filters:\n record = f(record)\n if record is None:\n break\n else:\n return record\n\n @cli.show_result\n def select_fields(self, *fields):\n '''\n Select only chosen fields for every record:\n\n .. testcode::\n\n report = ndb.interfaces.dump()\n report.select_fields('index', 'ifname')\n for line in report.format('csv'):\n print(line)\n\n .. testoutput::\n\n 'index','ifname'\n 1,'lo'\n 2,'eth0'\n '''\n self.filters.append(lambda x: x._select_fields(*fields))\n if self.config.get('recordset_pipe'):\n return RecordSet(self, config=self.config)\n\n @cli.show_result\n def select_records(self, f=None, **spec):\n '''\n Select records based on a function f() or a spec match. A spec\n is dictionary of pairs `field: constant` or `field: callable`:\n\n .. testcode::\n\n report = ndb.addresses.summary()\n report.select_records(ifname=lambda x: x.startswith('eth'))\n for line in report.format('csv'):\n print(line)\n\n .. testoutput::\n\n 'target','tflags','ifname','address','prefixlen'\n 'localhost',0,'eth0','192.168.122.28',24\n '''\n self.filters.append(lambda x: x if x._match(f, **spec) else None)\n if self.config.get('recordset_pipe'):\n return RecordSet(self, config=self.config)\n\n @cli.show_result\n def transform_fields(self, **kwarg):\n '''\n Transform fields with a function. Function must accept\n the record as the only argument:\n\n .. testcode::\n\n report = ndb.addresses.summary()\n report.transform_fields(\n address=lambda r: f'{r.address}/{r.prefixlen}'\n )\n report.select_fields('ifname', 'address')\n for line in report.format('csv'):\n print(line)\n\n .. testoutput::\n\n 'ifname','address'\n 'lo','127.0.0.1/8'\n 'eth0','192.168.122.28/24'\n '''\n self.filters.append(lambda x: x._transform_fields(**kwarg))\n if self.config.get('recordset_pipe'):\n return RecordSet(self, config=self.config)\n\n @cli.show_result\n def transform(self, **kwarg):\n warnings.warn(deprecation_notice, DeprecationWarning)\n\n def g():\n for record in self.generator:\n if isinstance(record, Record):\n values = []\n names = record._names\n for name, value in zip(names, record._values):\n if name in kwarg:\n value = kwarg[name](value)\n values.append(value)\n record = Record(names, values, record._ref_class)\n yield record\n\n return RecordSet(g())\n\n @cli.show_result\n def filter(self, f=None, **kwarg):\n warnings.warn(deprecation_notice, DeprecationWarning)\n\n def g():\n for record in self.generator:\n m = True\n for key in kwarg:\n if kwarg[key] != getattr(record, key):\n m = False\n if m:\n if f is None:\n yield record\n elif f(record):\n yield record\n\n return RecordSet(g())\n\n @cli.show_result\n def select(self, *argv):\n warnings.warn(deprecation_notice, DeprecationWarning)\n return self.fields(*argv)\n\n @cli.show_result\n def fields(self, *fields):\n warnings.warn(deprecation_notice, DeprecationWarning)\n\n def g():\n for record in self.generator:\n yield record._select_fields(*fields)\n\n return RecordSet(g())\n\n @cli.show_result\n def join(self, right, condition=lambda r1, r2: True, prefix=''):\n warnings.warn(deprecation_notice, DeprecationWarning)\n # fetch all the records from the right\n # ACHTUNG it may consume a lot of memory\n right = tuple(right)\n\n def g():\n for r1 in self.generator:\n for r2 in right:\n if condition(r1, r2):\n n = tuple(\n chain(\n r1._names,\n ['%s%s' % (prefix, x) for x in r2._names],\n )\n )\n v = tuple(chain(r1._values, r2._values))\n yield Record(n, v, r1._ref_class)\n\n return RecordSet(g())\n\n @cli.show_result\n def format(self, kind):\n '''\n Return an iterator over text lines in the chosen format.\n\n Supported formats: 'json', 'csv'.\n '''\n if kind == 'json':\n return BaseRecordSet(format_json(self, headless=True))\n elif kind == 'csv':\n return BaseRecordSet(format_csv(self, headless=True))\n else:\n raise ValueError()\n\n def count(self):\n '''\n Return number of records.\n\n The method exhausts the generator.\n '''\n counter = 0\n for record in self:\n counter += 1\n return counter\n\n def __getitem__(self, key):\n return list(self)[key]\n","repo_name":"svinota/pyroute2","sub_path":"pyroute2/ndb/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":12147,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"} +{"seq_id":"73652895553","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom sklearn.neighbors import KNeighborsRegressor\nimport csv\nimport sys\nimport src.utils as utils\nfrom src.utils import DISTANCES\n\n\ndef predict(train, target, testIds, testVals, neighbors, distance=2):\n\n m_distance = DISTANCES[distance] if (distance in DISTANCES) else 'minkowski'\n p_distance = distance if (m_distance not in DISTANCES) else None\n s_algorithm = 'ball_tree' if (distance < 1) else 'kd_tree'\n\n knn = KNeighborsRegressor(n_neighbors=neighbors, weights='distance',\n algorithm=s_algorithm, leaf_size=30, p=p_distance,\n metric=m_distance, metric_params=None, n_jobs=-1) # Jobs = -1 elije numero de\n # jobs segun cantidad de cores.\n print(\"KNN - Volcando puntos...\")\n knn.fit(train, target)\n\n print(\"KNN - Prediciendo...\")\n predictions = knn.predict(testVals)\n\n return predictions\n","repo_name":"fpenovi/SFbikeTravelDuration","sub_path":"src/NearestNeighbours/KnnPredictor.py","file_name":"KnnPredictor.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16297950323","text":"\"\"\"\nModule principal du package othello. C'est ce module que nous allons exécuter pour démarrer votre jeu.\n\"\"\"\n\nfrom interface.interface_othello import InterphaceOthello\n\nif __name__ == '__main__':\n \n app = InterphaceOthello()\n app.mainloop()","repo_name":"NicholasLangevin/IFT-1004_Travaux_pratiques","sub_path":"TP4/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17321646025","text":"# This script prepares zip archives for the tutorial\n#\n# Author: K. Desnos\n# License: CeCILL-C\n\nfrom fileinput import filename\nimport re\nfrom zipfile import ZipFile\nimport os\nfrom os.path import basename\nimport shutil\n\n# Add a file to the zip archive, with an additional parentName folder.\ndef zipFileAdd(zipObj, fileName, parentName = \"\"):\n zipObj.write(fileName, parentName + fileName)\n\n# Zip the files from given directory that matches the filter, includes the subdirectories\n# keeps the relative structure\n# Adapted from https://thispointer.com/python-how-to-create-a-zip-archive-from-multiple-files-or-directory/\ndef zipFilesInDir(dirName, zipObj, regex, parentName=\"\", withSubdirectories = True):\n if(withSubdirectories):\n # Iterate over all the files in directory\n for folderName, subfolders, filenames in os.walk(dirName):\n for filename in filenames:\n if (re.match(regex, filename)):\n # create complete filepath of file in directory\n filePath = os.path.join(folderName, filename)\n # Add file to zip \n zipObj.write(filePath, parentName + filePath)\n else:\n # Iterate over all the files in directory\n for filename in os.listdir(dirName):\n # create complete filepath of file in directory\n filePath = os.path.join(dirName, filename)\n if (os.path.isfile(filePath) and re.match(regex, filename)):\n # create complete filepath of file in directory\n filePath = os.path.join(dirName, filename)\n # Add file to zip \n zipObj.write(filePath, parentName + filePath)\n\n\n\n# Create the tutorialTemplate archive\nmainFolder = \"gegelati-tutorial/\"\ntutorialTemplateArchive = ZipFile(\"./docs/data/gegelati-tutorial.zip\", \"w\")\nzipFileAdd(tutorialTemplateArchive,\"bin/\", mainFolder)\nzipFileAdd(tutorialTemplateArchive,\"dat/download_dat.sh\", mainFolder)\nzipFilesInDir(\"./\",tutorialTemplateArchive, r'[^\\.]+.*', mainFolder, False) # exclude .gitgnore\nzipFilesInDir(\"./lib/\",tutorialTemplateArchive, r'.*', mainFolder)\nzipFilesInDir(\"src/\",tutorialTemplateArchive, r'.*', mainFolder, False)\nzipFilesInDir(\"src/manual/\",tutorialTemplateArchive, r'.*', mainFolder)\nzipFilesInDir(\"src/training\",tutorialTemplateArchive, r'^(?!.*(pendulum_wrapper))', mainFolder, False) # all files except pendulum_wrapper\ntutorialTemplateArchive.write(\"src/training/pendulum_wrapper_empty.cpp\", mainFolder + \"src/training/pendulum_wrapper.cpp\" ) # overwrite empty_file\ntutorialTemplateArchive.write(\"src/training/pendulum_wrapper_empty.h\", mainFolder + \"src/training/pendulum_wrapper.h\") # overwrite empty_file\ntutorialTemplateArchive.close()\n\n# Create the pendulum_wrapper_solution archive\npendulumWrapperSolutionArchive = ZipFile(\"./docs/data/pendulum_wrapper_solution.zip\", \"w\")\npendulumWrapperSolutionArchive.write(\"src/training/pendulum_wrapper_solution.cpp\", \"pendulum_wrapper.cpp\") # overwrite empty_file\npendulumWrapperSolutionArchive.write(\"src/training/pendulum_wrapper_solution.h\", \"pendulum_wrapper.h\") # overwrite empty_file\npendulumWrapperSolutionArchive.close()\n\n# Create the gegelati-tutorial-solution archive\nmainFolder = \"gegelati-tutorial/\"\ntutorialSolutionArchive = ZipFile(\"./docs/data/gegelati-tutorial-solution.zip\", \"w\")\nzipFileAdd(tutorialSolutionArchive,\"bin/\", mainFolder)\nzipFileAdd(tutorialSolutionArchive,\"dat/download_dat.sh\", mainFolder)\nzipFilesInDir(\"./\",tutorialSolutionArchive, r'[^\\.]+.*', mainFolder, False) # exclude .gitgnore\nzipFilesInDir(\"./lib/\",tutorialSolutionArchive, r'.*', mainFolder)\nzipFilesInDir(\"src/\",tutorialSolutionArchive, r'.*', mainFolder, False)\nzipFilesInDir(\"src/manual/\",tutorialSolutionArchive, r'.*', mainFolder)\nzipFilesInDir(\"src/training\",tutorialSolutionArchive, r'^(?!.*(pendulum_wrapper))', mainFolder, False) # all files except pendulum_wrapper\ntutorialSolutionArchive.write(\"src/training/pendulum_wrapper_solution.cpp\", mainFolder + \"src/training/pendulum_wrapper.cpp\" ) # overwrite empty_file\ntutorialSolutionArchive.write(\"src/training/pendulum_wrapper_solution.h\", mainFolder + \"src/training/pendulum_wrapper.h\") # overwrite empty_file\ntutorialSolutionArchive.close()\n\n# Make the main-inference.cpp file available\nshutil.copy2(\"./src/inference/main-inference.cpp\", \"./docs/data/\")","repo_name":"gegelati/gegelati-tutorial","sub_path":"scripts/prepare_archives.py","file_name":"prepare_archives.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27222782698","text":"import requests\nimport pprint\nimport json \n\npp = pprint.PrettyPrinter(indent = 2)\n\ndef getToken():\n\theaders = {'Authorization':'Basic mTpzZWNyZXQ='}\n\tpayload = {'username': 'rmsUser', 'password': 'P@ssword1', 'grant_type':'password', 'tenant': 'rms'}\n\tresponse = requests.post('http://localhost:8080/tokens', data = payload, headers = headers)\n\tprint(response)\n\tif response.status_code == 201:\n\t\treturn response.json()[\"access_token\"]\n\ndef postRiskAnalysis(token, analysisName, structureId, portfolioId, lossSettings, currencyExchangeRateId, currencyCode, reportingWindowStartDate, reportingWindowEndDate):\n\theaders = {'Authorization' : 'bearer ' + token, 'ContentType' : 'application/json'}\n\tpayload = {\n \"name\": analysisName,\n \"description\": \"\",\n \"position\": \"/structures/\"+ structureId + \"/portfolios/\" + portfolioId,\n \"lossSettings\": lossSettings,\n \"currencySettings\": [\n {\n \"setting\": \"currencyExchangeRateID\",\n \"value\": currencyExchangeRateId\n },\n {\n \"setting\": \"currencyCode\",\n \"value\": currencyCode\n }\n ],\n \"outputSettings\": [\n {\n \"setting\": \"ReportingWindowStartDate\",\n \"value\": reportingWindowStartDate\n },\n {\n \"setting\": \"ReportingWindowEndDate\",\n \"value\": reportingWindowEndDate\n }\n ],\n \"overriddenModelSettings\": [\n ] \n }\n\tresponse = requests.post('http://localhost:8080/riskanalyses', headers = headers, json = payload)\n\tprint(response)\n\ntoken = getToken()\nanalysisName = \"Test1\"\nstructureId = \"10000000001\"\nportfolioId = \"10000000001\"\nlossSettings = \"/losssettings/test_uk_fl\"\ncurrencyExchangeRateId = \"1\"\ncurrencyCode = \"USD\"\nreportingWindowStartDate = \"25 NOV 2015\"\nreportingWindowEndDate = \"25 NOV 2016\"\npostRiskAnalysis(token, analysisName, structureId, portfolioId, lossSettings, currencyExchangeRateId, currencyCode, reportingWindowStartDate, reportingWindowEndDate)","repo_name":"apatel-rms/automation","sub_path":"run-analysis.py","file_name":"run-analysis.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2783180504","text":"import math\n\nimport numpy as np\nimport pandas as pd\n\nclass Boundaryconditionsassignment:\n def __init__(self, PD, NoN, NPE, NoE, EL, m, p, k, sigmav, dispp, disssp, max_y, min_x, min_y, max_x, E_node):\n\n self.PD = PD\n self.NoN = NoN\n self.NPE = NPE\n self.NoE = NoE\n self.EL = EL\n self.m = m\n self.p = p\n self.k = k\n self.sigmav = sigmav\n self.dispp = dispp\n self.disssp = disssp\n self.max_y = max_y\n self.min_x = min_x\n self.min_y = min_y\n self.max_x = max_x\n self.E_node = E_node\n\n def BC(self, NL_Cartesian):\n\n ENL = np.zeros([self.NoN, 6*self.PD])\n ENL[:, 0:self.PD] = NL_Cartesian\n ENL[:, 3] = ENL[:, 4] = ENL[:, 5] = 1\n######################bottom face#################\n dirichlet_nodes = np.zeros([self.m*self.p, 8])\n for i in range(0, self.m*self.p):\n dirichlet_nodes[i, 0:4] = self.EL[i, 0:4]\n dirichlet_nodes[i, 4:] = self.EL[i, 8:12]\n dirichlet_nodes = dirichlet_nodes.astype(int)\n############# sigma H back face############\n kk = 0\n elmlist_b = np.zeros([self.p * self.k, 8])\n for ki in range(0, self.k):\n for i in range((self.m-1)*(self.p)+ki*self.m*self.p, self.m*self.p*(ki+1)):\n elmlist_b[kk, :] = self.EL[i, [3, 10, 2, 14, 6, 18, 7, 15]]\n kk += 1\n elmlist_b = elmlist_b.astype(int)\n elmlist_b = np.unique(elmlist_b.flatten())\n for i in elmlist_b:\n ENL[i-1, 3] = -1\n ENL[i-1, 4] = -1\n ENL[i-1, 13] = -self.dispp*0.5\n ENL[i-1, 12] = (self.disssp/10)*0.5\n# # ################sigma H front face##########\n jj = 0\n elmlist_f = np.zeros([self.p * self.k, 8])\n for ki in range(0, self.k):\n for i in range(ki*self.m*self.p, ki*self.m*self.p+self.p):\n elmlist_f[jj, :] = self.EL[i, [0, 8, 1, 13, 5, 16, 4, 12]]\n jj += 1\n elmlist_f = elmlist_f.astype(int)\n elmlist_f = np.unique(elmlist_f.flatten())\n for i in elmlist_f:\n ENL[i-1, 3] = -1\n ENL[i-1, 4] = -1\n ENL[i-1, 13] = self.dispp*0.5\n ENL[i-1, 12] = -(self.disssp/10)*0.5\n###################sigma h left face########\n ii = 0\n elmlist_l = np.zeros([self.m * self.k, 8])\n for ki in range(0, self.k):\n for i in range(ki*self.m*self.p+1, (ki+1)*self.m*self.p+1, self.p):\n elmlist_l[ii, :] = self.EL[i-1, [0, 11, 3, 15, 7, 19, 4, 12]]\n ii += 1\n elmlist_l = elmlist_l.astype(int)\n elmlist_l = np.unique(elmlist_l.flatten())\n for i in elmlist_l:\n ENL[i-1, 3] = -1\n ENL[i-1, 4] = -1\n ENL[i-1, 13] = -(self.disssp/10)*0.5 + ENL[i-1, 13]\n ENL[i-1, 12] = self.dispp*0.1 + ENL[i-1, 12]\n# # #################sigma h right face########\n ff = 0\n elmlist_r = np.zeros([self.m * self.k, 8])\n for ki in range(0, self.k):\n for i in range(ki*self.m*self.p+self.p, (ki+1)*self.m*self.p+1, self.p):\n elmlist_r[ff, :] = self.EL[i-1, [1, 9, 2, 14, 6, 17, 5, 13]]\n ff += 1\n elmlist_r = elmlist_r.astype(int)\n elmlist_r = np.unique(elmlist_r.flatten())\n for i in elmlist_r:\n ENL[i-1, 3] = -1\n ENL[i-1, 4] = -1\n ENL[i-1, 13] = (self.disssp/10)*0.5 + ENL[i-1, 13]\n ENL[i-1, 12] = -self.dispp*0.1 + ENL[i-1, 12]\n#################sigma vertical ########\n j = 0\n elmlist_v = np.zeros([self.p * self.m, 8])\n for i in range(3*self.m*self.p, self.m*self.p*self.k):\n elmlist_v[j, :] = self.EL[i, [4, 16, 5, 17, 6, 18, 7, 19]]\n j += 1\n elmlist_v = elmlist_v.astype(int)\n############defining boundary conditions##########\n dirichlet_nodes = np.unique(dirichlet_nodes.flatten())\n for i in dirichlet_nodes:\n ENL[i-1, 5] = -1\n DOFS = 0\n DOCS = 0\n for i in range(0, self.NoN):\n for j in range(0, self.PD):\n\n if ENL[i, self.PD+j] == -1 :\n DOCS -= 1\n ENL[i, 2*self.PD+j] = DOCS\n else:\n DOFS += 1\n ENL[i, 2*self.PD+j] = DOFS\n\n for i in range(0, self.NoN):\n for j in range(0, self.PD):\n\n if ENL[i, 2*self.PD+j] < 0 :\n ENL[i, 3*self.PD+j] = abs(ENL[i, 2*self.PD+j]) + DOFS\n else:\n ENL[i, 3*self.PD+j] = abs(ENL[i, 2*self.PD+j])\n DOCS = abs(DOCS)\n print(f\"DOCS is {DOCS}\")\n\n######################vertical load application####################################################\n z_gradient = np.zeros([8, 1])\n for i in range(0, np.size(elmlist_v, 0)):\n coordinate_one_element_v = ENL[elmlist_v[i, :]-1, 0:self.PD]\n Nodal_force_v = np.zeros([8*self.PD, 1])\n traction_force_v = np.zeros([self.PD, 1])\n nod_force_v = np.zeros([8, (self.PD+1)])\n for g in range(1, 4+1):\n (x_bou, eta_bou, alpaha_bou) = Boundaryconditionsassignment.gausspoint_boundary(self, g)\n derivative_boundary = Boundaryconditionsassignment.grad_shapefunction(self, x_bou, eta_bou)\n Jacobian_3d = (coordinate_one_element_v[:,[0,1]].T) @ (derivative_boundary.T)\n det_jacobian = (np.linalg.det(Jacobian_3d))\n shape_function = Boundaryconditionsassignment.gausspoint_boundary_traction(self, x_bou, eta_bou)\n traction_force_v[0, 0] = 0\n traction_force_v[1, 0] = 0\n traction_force_v[2, 0] = -self.sigmav\n Nodal_force_v = Nodal_force_v + ((shape_function @ traction_force_v) * (det_jacobian ) * alpaha_bou)\n nod_force_v[:,1:4] = Nodal_force_v.reshape(8, 3)\n\n for ii in range(0, 8):\n nod_force_v[ii, 0] = elmlist_v[i, ii]\n z_gradient[ii, 0] = (460.908 - coordinate_one_element_v[ii, 2])/1000\n nod_force_v[ii, 3] = nod_force_v[ii, 3] * z_gradient[ii, 0]\n ENL[int(nod_force_v[ii, 0])-1, 5*self.PD+2] = nod_force_v[ii, 3] + ENL[int(nod_force_v[ii, 0])-1, 5*self.PD+2]\n#####################################APPLICATION OF GRAVITY #################################################################\n density = np.loadtxt(\"src/data/DEN_ELEM.txt\", dtype=float)\n for i in range(0, np.size(self.EL, 0)):\n coordinate_one_element_g = ENL[self.EL[i, :]-1, 0:self.PD]\n Nodal_force_g = np.zeros([self.NPE*self.PD, 1])\n gravity = np.zeros([self.PD, 1])\n nod_force_g = np.zeros([self.NPE, (self.PD+1)])\n for g in range(1, 8+1):\n (xi, eta, Si, alpha) = Boundaryconditionsassignment.Gausspoint(self, g)\n derivative = Boundaryconditionsassignment.grad_shapefunction_gravity(self, xi, eta, Si)\n Jacobian_3d_g = (coordinate_one_element_g.T) @ (derivative.T)\n detjdens = (np.linalg.det(Jacobian_3d_g))\n shape_function = Boundaryconditionsassignment.gausspoint_boundary_gravity(self, xi, eta, Si)\n gravity[0, 0] = 0\n gravity[1, 0] = 0\n gravity[2, 0] = -(density[i] * 10) #pascal\n Nodal_force_g = Nodal_force_g + ((shape_function @ gravity) * detjdens * alpha)\n nod_force_g[:,1:4] = Nodal_force_g.reshape(self.NPE, 3)\n for ii in range(0, self.NPE):\n nod_force_g[ii, 0] = self.EL[i, ii]\n ENL[int(nod_force_g[ii, 0])-1, 5*self.PD+2] = ENL[int(nod_force_g[ii, 0])-1, 5*self.PD+2] + nod_force_g[ii, 3]\n\n return ENL, DOFS, DOCS\n\n def gausspoint_boundary(self, g):\n\n if g == 1:\n x_bou = -1/math.sqrt(3)\n eta_bou = -1/math.sqrt(3)\n alpaha_bou = 1\n if g == 2:\n x_bou = 1/math.sqrt(3)\n eta_bou = -1/math.sqrt(3)\n alpaha_bou = 1\n if g == 3:\n x_bou = 1/math.sqrt(3)\n eta_bou = 1/math.sqrt(3)\n alpaha_bou = 1\n if g == 4:\n x_bou = -1/math.sqrt(3)\n eta_bou = 1/math.sqrt(3)\n alpaha_bou = 1\n\n return x_bou, eta_bou, alpaha_bou\n\n def gausspoint_boundary_coordinate(self, x_bou, eta_bou):\n\n shape_function_coor = np.zeros([1, 8])\n\n shape_function_coor[0, 0] = -1/4 * (1 - x_bou) * (1 - eta_bou) * (1 + x_bou + eta_bou)\n shape_function_coor[0, 1] = 1/2 * (1 - x_bou) * (1 + x_bou) * (1 - eta_bou)\n shape_function_coor[0, 2] = -1/4 * (1 + x_bou) * (1 - eta_bou) * (1 - x_bou + eta_bou)\n shape_function_coor[0, 3] = 1/2 * (1 + x_bou) * (1 + eta_bou) * (1 - eta_bou)\n shape_function_coor[0, 4] = -1/4 * (1 + x_bou) * (1 + eta_bou) * (1 - x_bou - eta_bou)\n shape_function_coor[0, 5] = 1/2 * (1 - x_bou) * (1 + x_bou) * (1 + eta_bou)\n shape_function_coor[0, 6] = -1/4 * (1 - x_bou) * (1 + eta_bou) * (1 + x_bou - eta_bou)\n shape_function_coor[0, 7] = 1/2 * (1 - x_bou) * (1 + eta_bou) * (1 - eta_bou)\n\n return shape_function_coor\n\n def grad_shapefunction(self, x_bou, eta_bou):\n derivative_boundary = np.zeros([2, 8])\n derivative_boundary[0, 0] = 1/4 * (1 - eta_bou) * (1 + x_bou + eta_bou) - 1/4 * (1 - x_bou) * (1 - eta_bou)\n derivative_boundary[0, 1] = -x_bou * (1 - eta_bou)\n derivative_boundary[0, 2] = -1/4 * (1 - eta_bou) * (1 - x_bou + eta_bou) + 1/4 * (1 + x_bou) * (1 - eta_bou)\n derivative_boundary[0, 3] = 1/2 * (1 + eta_bou) * (1 - eta_bou)\n derivative_boundary[0, 4] = -1/4 * (1 + eta_bou) * (1 - x_bou - eta_bou) + 1/4 * (1 + x_bou) * (1 + eta_bou)\n derivative_boundary[0, 5] = -x_bou * (1 + eta_bou)\n derivative_boundary[0, 6] = 1/4 * (1 + eta_bou) * (1 + x_bou - eta_bou) - 1/4 * (1 - x_bou) * (1 + eta_bou)\n derivative_boundary[0, 7] = -1/2 * (1 + eta_bou) * (1 - eta_bou)\n\n derivative_boundary[1, 0] = 1/4 * (1 - x_bou)*(1 + x_bou + eta_bou) - 1/4 * (1 - x_bou) * (1 - eta_bou)\n derivative_boundary[1, 1] = -1/2 * (1 - x_bou) * (1 + x_bou)\n derivative_boundary[1, 2] = 1/4 * (1 + x_bou)* (1 - x_bou + eta_bou) - 1/4 * (1 + x_bou) * (1 - eta_bou)\n derivative_boundary[1, 3] = -eta_bou * (1 + x_bou)\n derivative_boundary[1, 4] = -1/4 * (1 + x_bou)* (1 - x_bou - eta_bou) + 1/4 * (1 + x_bou) * (1 + eta_bou)\n derivative_boundary[1, 5] = 1/2 * (1 - x_bou) * (1 + x_bou)\n derivative_boundary[1, 6] = -1/4 * (1 - x_bou)*(1 + x_bou - eta_bou) + 1/4 * (1 - x_bou) * (1 + eta_bou)\n derivative_boundary[1, 7] = -eta_bou * (1 - x_bou)\n\n return derivative_boundary\n\n def gausspoint_boundary_traction(self, x_bou, eta_bou):\n\n shape_function = np.zeros([24, self.PD])\n\n shape_function[0, 0] = shape_function[1, 1] = shape_function[2, 2] = -1/4 * (1 - x_bou) * (1 - eta_bou) * (1 + x_bou + eta_bou)\n shape_function[3, 0] = shape_function[4, 1] = shape_function[5, 2] = 1/2 * (1 - x_bou) * (1 + x_bou) * (1 - eta_bou)\n shape_function[6, 0] = shape_function[7, 1] = shape_function[8, 2] = -1/4 * (1 + x_bou) * (1 - eta_bou) * (1 - x_bou + eta_bou)\n shape_function[9, 0] = shape_function[10, 1] = shape_function[11, 2] = 1/2 * (1 + x_bou) * (1 + eta_bou) * (1 - eta_bou)\n shape_function[12, 0] = shape_function[13, 1] = shape_function[14, 2] = -1/4 * (1 + x_bou) * (1 + eta_bou) * (1 - x_bou - eta_bou)\n shape_function[15, 0] = shape_function[16, 1] = shape_function[17, 2] = 1/2 * (1 - x_bou) * (1 + x_bou) * (1 + eta_bou)\n shape_function[18, 0] = shape_function[19, 1] = shape_function[20, 2] = -1/4 * (1 - x_bou) * (1 + eta_bou) * (1 + x_bou - eta_bou)\n shape_function[21, 0] = shape_function[22, 1] = shape_function[23, 2] = 1/2 * (1 - x_bou) * (1 + eta_bou) * (1 - eta_bou)\n\n return shape_function\n\n def Gausspoint(self, gp):\n\n if gp == 1:\n xi = -1/math.sqrt(3)\n eta = -1/math.sqrt(3)\n Si = -1/math.sqrt(3)\n alpha = 1\n if gp == 2:\n xi = 1/math.sqrt(3)\n eta = -1/math.sqrt(3)\n Si = -1/math.sqrt(3)\n alpha = 1\n if gp == 3:\n xi = 1/math.sqrt(3)\n eta = 1/math.sqrt(3)\n Si = -1/math.sqrt(3)\n alpha = 1\n if gp == 4:\n xi = -1/math.sqrt(3)\n eta = 1/math.sqrt(3)\n Si = -1/math.sqrt(3)\n alpha = 1\n if gp == 5:\n xi = -1/math.sqrt(3)\n eta = -1/math.sqrt(3)\n Si = 1/math.sqrt(3)\n alpha = 1\n if gp == 6:\n xi = 1/math.sqrt(3)\n eta = -1/math.sqrt(3)\n Si = 1/math.sqrt(3)\n alpha = 1\n if gp == 7:\n xi = 1/math.sqrt(3)\n eta = 1/math.sqrt(3)\n Si = 1/math.sqrt(3)\n alpha = 1\n if gp == 8:\n xi = -1/math.sqrt(3)\n eta = 1/math.sqrt(3)\n Si = 1/math.sqrt(3)\n alpha = 1\n\n return (xi, eta, Si, alpha)\n\n def grad_shapefunction_gravity(self, xi, eta, Si):\n derivative = np.zeros([self.PD, self.NPE])\n derivative[0, 0] = -1/8*(1-eta)*(1-Si)*(-xi-eta-Si-2) + (-1/8*(1-xi)*(1-eta)*(1-Si))\n derivative[0, 1] = 1/8*(1-eta)*(1-Si)*(xi-eta-Si-2) + (1/8*(1+xi)*(1-eta)*(1-Si))\n derivative[0, 2] = 1/8*(1+eta)*(1-Si)*(xi+eta-Si-2) + (1/8*(1+xi)*(1+eta)*(1-Si))\n derivative[0, 3] = -1/8*(1+eta)*(1-Si)*(-xi+eta-Si-2) + (-1/8*(1-xi)*(1+eta)*(1-Si))\n derivative[0, 4] = -1/8*(1-eta)*(1+Si)*(-xi-eta+Si-2) + (-1/8*(1-xi)*(1-eta)*(1+Si))\n derivative[0, 5] = 1/8*(1-eta)*(1+Si)*(xi-eta+Si-2) + (1/8*(1+xi)*(1-eta)*(1+Si))\n derivative[0, 6] = 1/8*(1+eta)*(1+Si)*(xi+eta+Si-2) + (1/8*(1+xi)*(1+eta)*(1+Si))\n derivative[0, 7] = -1/8*(1+eta)*(1+Si)*(-xi+eta+Si-2) + (-1/8*(1-xi)*(1+eta)*(1+Si))\n derivative[0, 8] = -1/2*(xi)*(1-eta)*(1-Si)\n derivative[0, 9] = 1/4*(1-eta**2)*(1-Si)\n derivative[0, 10] = -1/2*(xi)*(1+eta)*(1-Si)\n derivative[0, 11] = -1/4*(1-eta**2)*(1-Si)\n derivative[0, 12] = -1/4*(1-eta)*(1-Si**2)\n derivative[0, 13] = 1/4*(1-eta)*(1-Si**2)\n derivative[0, 14] = 1/4*(1+eta)*(1-Si**2)\n derivative[0, 15] = -1/4*(1+eta)*(1-Si**2)\n derivative[0, 16] = -1/2*(xi)*(1-eta)*(1+Si)\n derivative[0, 17] = 1/4*(1-eta**2)*(1+Si)\n derivative[0, 18] = -1/2*(xi)*(1+eta)*(1+Si)\n derivative[0, 19] = -1/4*(1-eta**2)*(1+Si)\n\n derivative[1, 0] = -1/8*(1-xi)*(1-Si)*(-xi-eta-Si-2) + (-1/8*(1-xi)*(1-eta)*(1-Si))\n derivative[1, 1] = -1/8*(1+xi)*(1-Si)*(xi-eta-Si-2) + (-1/8*(1+xi)*(1-eta)*(1-Si))\n derivative[1, 2] = 1/8*(1+xi)*(1-Si)*(xi+eta-Si-2) + (1/8*(1+xi)*(1+eta)*(1-Si))\n derivative[1, 3] = 1/8*(1-xi)*(1-Si)*(-xi+eta-Si-2) + (1/8*(1-xi)*(1+eta)*(1-Si))\n derivative[1, 4] = -1/8*(1-xi)*(1+Si)*(-xi-eta+Si-2) + (-1/8*(1-xi)*(1-eta)*(1+Si))\n derivative[1, 5] = -1/8*(1+xi)*(1+Si)*(xi-eta+Si-2) + (-1/8*(1+xi)*(1-eta)*(1+Si))\n derivative[1, 6] = 1/8*(1+xi)*(1+Si)*(xi+eta+Si-2) + (1/8*(1+xi)*(1+eta)*(1+Si))\n derivative[1, 7] = 1/8*(1-xi)*(1+Si)*(-xi+eta+Si-2) + (1/8*(1-xi)*(1+eta)*(1+Si))\n derivative[1, 8] = -1/4*(1-xi**2)*(1-Si)\n derivative[1, 9] = -1/2*(eta)*(1+xi)*(1-Si)\n derivative[1, 10] = 1/4*(1-xi**2)*(1-Si)\n derivative[1, 11] = -1/2*(eta)*(1-xi)*(1-Si)\n derivative[1, 12] = -1/4*(1-xi)*(1-Si**2)\n derivative[1, 13] = -1/4*(1+xi)*(1-Si**2)\n derivative[1, 14] = 1/4*(1+xi)*(1-Si**2)\n derivative[1, 15] = 1/4*(1-xi)*(1-Si**2)\n derivative[1, 16] = -1/4*(1-xi**2)*(1+Si)\n derivative[1, 17] = -1/2*(eta)*(1+xi)*(1+Si)\n derivative[1, 18] = 1/4*(1-xi**2)*(1+Si)\n derivative[1, 19] = -1/2*(eta)*(1-xi)*(1+Si)\n\n derivative[2, 0] = -1/8*(1-xi)*(1-eta)*(-xi-eta-Si-2) + (-1/8*(1-xi)*(1-eta)*(1-Si))\n derivative[2, 1] = -1/8*(1+xi)*(1-eta)*(xi-eta-Si-2) + (-1/8*(1+xi)*(1-eta)*(1-Si))\n derivative[2, 2] = -1/8*(1+xi)*(1+eta)*(xi+eta-Si-2) + (-1/8*(1+xi)*(1+eta)*(1-Si))\n derivative[2, 3] = -1/8*(1-xi)*(1+eta)*(-xi+eta-Si-2) + (-1/8*(1-xi)*(1+eta)*(1-Si))\n derivative[2, 4] = 1/8*(1-xi)*(1-eta)*(-xi-eta+Si-2) + (1/8*(1-xi)*(1-eta)*(1+Si))\n derivative[2, 5] = 1/8*(1+xi)*(1-eta)*(xi-eta+Si-2) + (1/8*(1+xi)*(1-eta)*(1+Si))\n derivative[2, 6] = 1/8*(1+xi)*(1+eta)*(xi+eta+Si-2) + (1/8*(1+xi)*(1+eta)*(1+Si))\n derivative[2, 7] = 1/8*(1-xi)*(1+eta)*(-xi+eta+Si-2) + (1/8*(1-xi)*(1+eta)*(1+Si))\n derivative[2, 8] = -1/4*(1-xi**2)*(1-eta)\n derivative[2, 9] = -1/4*(1-eta**2)*(1+xi)\n derivative[2, 10] = -1/4*(1-xi**2)*(1+eta)\n derivative[2, 11] = -1/4*(1-eta**2)*(1-xi)\n derivative[2, 12] = -1/2*(Si)*(1-xi)*(1-eta)\n derivative[2, 13] = -1/2*(Si)*(1+xi)*(1-eta)\n derivative[2, 14] = -1/2*(Si)*(1+xi)*(1+eta)\n derivative[2, 15] = -1/2*(Si)*(1-xi)*(1+eta)\n derivative[2, 16] = 1/4*(1-xi**2)*(1-eta)\n derivative[2, 17] = 1/4*(1-eta**2)*(1+xi)\n derivative[2, 18] = 1/4*(1-xi**2)*(1+eta)\n derivative[2, 19] = 1/4*(1-eta**2)*(1-xi)\n\n return derivative\n\n def shapefunction_gravity(self, xi, eta, Si):\n shape_gravity = np.zeros([self.NPE, 1])\n\n shape_gravity[0, 0] = 1/8*(1-xi)*(1-eta)*(1-Si)*(-xi-eta-Si-2)\n shape_gravity[1, 0] = 1/8*(1+xi)*(1-eta)*(1-Si)*(xi-eta-Si-2)\n shape_gravity[2, 0] = 1/8*(1+xi)*(1+eta)*(1-Si)*(xi+eta-Si-2)\n shape_gravity[3, 0] = 1/8*(1-xi)*(1+eta)*(1-Si)*(-xi+eta-Si-2)\n shape_gravity[4, 0] = 1/8*(1-xi)*(1-eta)*(1+Si)*(-xi-eta+Si-2)\n shape_gravity[5, 0] = 1/8*(1+xi)*(1-eta)*(1+Si)*(xi-eta+Si-2)\n shape_gravity[6, 0] = 1/8*(1+xi)*(1+eta)*(1+Si)*(xi+eta+Si-2)\n shape_gravity[7, 0] = 1/8*(1-xi)*(1+eta)*(1+Si)*(-xi+eta+Si-2)\n shape_gravity[8, 0] = 1/4*(1-xi**2)*(1-eta)*(1-Si)\n shape_gravity[9, 0] = 1/4*(1+xi)*(1-eta**2)*(1-Si)\n shape_gravity[10, 0] = 1/4*(1-xi**2)*(1+eta)*(1-Si)\n shape_gravity[11, 0] = 1/4*(1-xi)*(1-eta**2)*(1-Si)\n shape_gravity[12, 0] = 1/4*(1-xi)*(1-eta)*(1-Si**2)\n shape_gravity[13, 0] = 1/4*(1+xi)*(1-eta)*(1-Si**2)\n shape_gravity[14, 0] = 1/4*(1+xi)*(1+eta)*(1-Si**2)\n shape_gravity[15, 0] = 1/4*(1-xi)*(1+eta)*(1-Si**2)\n shape_gravity[16, 0] = 1/4*(1-xi**2)*(1-eta)*(1+Si)\n shape_gravity[17, 0] = 1/4*(1+xi)*(1-eta**2)*(1+Si)\n shape_gravity[18, 0] = 1/4*(1-xi**2)*(1+eta)*(1+Si)\n shape_gravity[19, 0] = 1/4*(1-xi)*(1-eta**2)*(1+Si)\n\n return shape_gravity\n\n def gausspoint_boundary_gravity(self, xi, eta, Si):\n\n shape_function = np.zeros([self.PD*self.NPE, self.PD])\n\n shape_function[0, 0] = shape_function[1, 1] = shape_function[2, 2] = 1/8*(1-xi)*(1-eta)*(1-Si)*(-xi-eta-Si-2)\n shape_function[3, 0] = shape_function[4, 1] = shape_function[5, 2] = 1/8*(1+xi)*(1-eta)*(1-Si)*(xi-eta-Si-2)\n shape_function[6, 0] = shape_function[7, 1] = shape_function[8, 2] = 1/8*(1+xi)*(1+eta)*(1-Si)*(xi+eta-Si-2)\n shape_function[9, 0] = shape_function[10, 1] = shape_function[11, 2] = 1/8*(1-xi)*(1+eta)*(1-Si)*(-xi+eta-Si-2)\n shape_function[12, 0] = shape_function[13, 1] = shape_function[14, 2] = 1/8*(1-xi)*(1-eta)*(1+Si)*(-xi-eta+Si-2)\n shape_function[15, 0] = shape_function[16, 1] = shape_function[17, 2] = 1/8*(1+xi)*(1-eta)*(1+Si)*(xi-eta+Si-2)\n shape_function[18, 0] = shape_function[19, 1] = shape_function[20, 2] = 1/8*(1+xi)*(1+eta)*(1+Si)*(xi+eta+Si-2)\n shape_function[21, 0] = shape_function[22, 1] = shape_function[23, 2] = 1/8*(1-xi)*(1+eta)*(1+Si)*(-xi+eta+Si-2)\n shape_function[24, 0] = shape_function[25, 1] = shape_function[26, 2] = 1/4*(1-xi**2)*(1-eta)*(1-Si)\n shape_function[27, 0] = shape_function[28, 1] = shape_function[29, 2] = 1/4*(1+xi)*(1-eta**2)*(1-Si)\n shape_function[30, 0] = shape_function[31, 1] = shape_function[32, 2] = 1/4*(1-xi**2)*(1+eta)*(1-Si)\n shape_function[33, 0] = shape_function[34, 1] = shape_function[35, 2] = 1/4*(1-xi)*(1-eta**2)*(1-Si)\n shape_function[36, 0] = shape_function[37, 1] = shape_function[38, 2] = 1/4*(1-xi)*(1-eta)*(1-Si**2)\n shape_function[39, 0] = shape_function[40, 1] = shape_function[41, 2] = 1/4*(1+xi)*(1-eta)*(1-Si**2)\n shape_function[42, 0] = shape_function[43, 1] = shape_function[44, 2] = 1/4*(1+xi)*(1+eta)*(1-Si**2)\n shape_function[45, 0] = shape_function[46, 1] = shape_function[47, 2] = 1/4*(1-xi)*(1+eta)*(1-Si**2)\n shape_function[48, 0] = shape_function[49, 1] = shape_function[50, 2] = 1/4*(1-xi**2)*(1-eta)*(1+Si)\n shape_function[51, 0] = shape_function[52, 1] = shape_function[53, 2] = 1/4*(1+xi)*(1-eta**2)*(1+Si)\n shape_function[54, 0] = shape_function[55, 1] = shape_function[56, 2] = 1/4*(1-xi**2)*(1+eta)*(1+Si)\n shape_function[57, 0] = shape_function[58, 1] = shape_function[59, 2] = 1/4*(1-xi)*(1-eta**2)*(1+Si)\n\n return shape_function","repo_name":"AtefehDa/3DiStress","sub_path":"src/test/BC_gravity.py","file_name":"BC_gravity.py","file_ext":"py","file_size_in_byte":21248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17471868946","text":"import pygame\r\n\r\npygame.init() \r\n\r\n\r\nfrom .constants import WIDTH, HEIGHT, BLACK, WHITE, RED, BLUE, GREEN\r\nfrom .game import Game\r\n\r\n \r\nclass Play:\r\n def __init__(self, win, rows, cols, square_size, with_ai):\r\n self.win = win\r\n self.square_size = square_size\r\n self.FPS = 60\r\n self.rows = rows\r\n self.cols = cols\r\n self.square_size = square_size\r\n self.with_ai = with_ai\r\n self.font = pygame.font.Font('freesansbold.ttf', 32) \r\n \r\n\r\n def get_row_col_from_mouse(self, pos):\r\n x, y = pos\r\n row = y // self.square_size\r\n col = x // self.square_size\r\n return row, col\r\n\r\n \r\n \r\n\r\n def start_playing(self):\r\n run = True\r\n clock = pygame.time.Clock()\r\n game = Game(self.win, self.rows, self.cols, self.square_size, self.with_ai)\r\n finished = False\r\n\r\n while run:\r\n\r\n clock.tick(self.FPS)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n \r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n pos = pygame.mouse.get_pos()\r\n row, col = self.get_row_col_from_mouse(pos)\r\n game.select(row, col)\r\n\r\n if not finished:\r\n game.update()\r\n else:\r\n pygame.display.update()\r\n\r\n result = game.winner()\r\n if result != None:\r\n if result == BLACK:\r\n text = self.font.render('BLACK WINS!', True, GREEN, )\r\n else:\r\n text = self.font.render('WHITE WINS!', True, GREEN, )\r\n\r\n textRect = text.get_rect() \r\n textRect.center = (WIDTH// 2, HEIGHT // 2) \r\n self.win.fill(BLACK)\r\n self.win.blit(text, textRect) \r\n finished = True\r\n\r\n \r\n \r\n\r\n ","repo_name":"rhs99/LinesOfAction","sub_path":"loa/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8741414834","text":"import logging\nimport sys\nimport time\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nlog = logging.getLogger('DataLogAnalyser')\nlog.setLevel(logging.DEBUG)\nlogging.basicConfig(stream=sys.stdout, level=logging.WARNING)\n\n\nclass DataLogAnalyserBase(ABC):\n \"\"\"The Data Log Analyser Base\"\"\"\n GLOB_CSV_PATTERN = '*.csv'\n\n def __init__(self, _options, _column_definition, _read_csv_kwargs):\n self.cvs_directory = _options['cvs_directory']\n self.cvs_filenames = _options['cvs_filenames'] if 'cvs_filenames' in _options else None\n self.column_definition = _column_definition\n self.read_csv_kwargs = _read_csv_kwargs\n\n @property\n def columns(self):\n return list(self.column_definition.keys())\n\n class ColumnNoExist(Exception):\n pass\n\n class NoFilesException(Exception):\n pass\n\n @abstractmethod\n def _process_df(self, df):\n pass\n\n def _columns_by_type(self, ty):\n return [col for col, t in self.column_definition.items() if t == ty]\n\n def _read_csv_logs(self):\n if self.cvs_filenames is None:\n csv_paths = list(sorted(Path(self.cvs_directory).glob(self.GLOB_CSV_PATTERN)))\n if len(csv_paths) == 0:\n raise DataLogAnalyserBase.NoFilesException(\n 'No *.csv files found in \"{}\"'.format(self.cvs_directory))\n else:\n csv_paths = [Path(self.cvs_directory).joinpath(file) for file in self.cvs_filenames]\n for file in csv_paths:\n log.debug('Reading file %s', file)\n if not Path(file).exists():\n raise DataLogAnalyserBase.NoFilesException(\n 'csv file not found: \"{}\"'.format(file))\n try:\n yield self._process_df(\n pd.read_csv(file, **self.read_csv_kwargs),\n )\n except pd.errors.ParserError:\n log.exception('Error Reading file %s', file)\n continue\n\n def process_data_and_write_to_csv(self, file):\n write_csv_header = True\n for df in self._read_csv_logs():\n log.debug('Writing df to file %s', file)\n df.to_csv(file, index=False, mode='a', header=write_csv_header)\n write_csv_header = False\n log.info('Done writing df to file %s', file)\n\n def plot_scatter_graph(self, x, y, title, save=True, display=False):\n if not set(self.columns).issuperset((x, y)):\n raise DataLogAnalyserBase.ColumnNoExist()\n ax = plt.gca()\n for df in self._read_csv_logs():\n df.plot.scatter(x, y, ax=ax, title=title)\n ax.grid(True)\n if display:\n plt.show()\n if save:\n file = '{time}-{uuid}-{x}-{y}.png'.format(\n time=time.strftime(\"%Y%m%d-%H%M\"),\n uuid=uuid.uuid4().hex[:8],\n x=x.replace(' ', '_'),\n y=y.replace(' ', '_'),)\n log.info('Saving as \"{}\"'.format(file))\n ax.get_figure().savefig(file)\n\n\nclass FutureEnergyDataLogAnalyser(DataLogAnalyserBase):\n \"\"\"The Future Energy Wind Turbine Data Log Analyser\"\"\"\n GLOB_CSV_PATTERN = '[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9].csv'\n\n def __init__(self, _options):\n _column_definition = {\n 'Date_Time': 'datetime',\n 'Windspeed MPS': 'numeric',\n 'Wind Direction': 'numeric',\n 'RPM': 'numeric',\n 'ref RPM': 'numeric',\n 'TSR': 'numeric',\n 'Power': 'numeric',\n 'Inhibit State': 'numeric',\n 'MPS used for TSR': 'numeric',\n }\n _read_csv_kwargs = {\n 'header': 3,\n 'error_bad_lines': False,\n 'warn_bad_lines': False,\n }\n super().__init__(_options, _column_definition, _read_csv_kwargs)\n\n def _process_df(self, df):\n # strict parse datetime columns\n df['Date_Time'] = pd.to_datetime(\n df['Date'] + ' ' + df['Time'], errors='coerce',\n format='%Y/%m/%d %H:%M:%S',\n )\n # strict parse numeric columns\n numeric_cols = self._columns_by_type('numeric')\n df[numeric_cols] = df[numeric_cols].apply(\n pd.to_numeric, errors='coerce')\n # select cols & remove bad NaN rows & cols\n df = df[self.columns].dropna(axis=0)\n return df\n","repo_name":"tomoswill/wind_turbine_analyser","sub_path":"datalog_analyser/datalog_analyser.py","file_name":"datalog_analyser.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30831842841","text":"#Ejercicio de clase introduccion\n\nimport cv2 as cv \nimport numpy as np \n\ns = 100\n\ndef im_1():\n im = np.zeros([100,100])\n for i in range (s):\n im[i,i] = 255\n\n cv.imwrite('Imagen1.jpg', im)\n\nim_1()\n\ndef im_2():\n im2 = np.zeros((100,100,3), np.uint8)\n rojo = cv.rectangle (im2, (35,35), (65,65), (0,0,255), -1)\n verde = cv.rectangle (rojo, (40,40), (60,60), (0,255,0), -1)\n azul = cv.rectangle (verde, (45,45), (55,55), (255,0,0), -1)\n negro = cv.rectangle (azul, (48,48), (52,52), (0,0,0), -1)\n\n cv.imwrite('Imagen2.jpg', negro)\n\nim_2()\n\ndef im_3():\n im_3 = np.zeros([100,100])","repo_name":"DanielaZabaleta/VisionAII","sub_path":"Clases/Clase1.py","file_name":"Clase1.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26316605212","text":"import os\r\nimport cv2\r\nimport gzip\r\nimport theano\r\nimport lasagne\r\nimport matplotlib\r\nimport numpy as np\r\nimport pandas as pd\r\nimport _pickle as pickle\r\nimport theano.tensor as T\r\nimport matplotlib.cm as cm\r\nimport matplotlib.pyplot as plt\r\nfrom lasagne import layers\r\nfrom sklearn import cross_validation\r\nfrom urllib.request import urlretrieve\r\nfrom nolearn.lasagne import NeuralNet\r\nfrom nolearn.lasagne import visualize\r\nfrom lasagne.updates import nesterov_momentum\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\ndef changeSize(x):\r\n I = [] \r\n for j in range(16): \r\n templist = [] \r\n for k in range (8): \r\n templist.append(0) \r\n I.append(templist) \r\n \r\n for i in range(16):\r\n for j in range(8):\r\n I[i][j] = 255*x[i*8 + j]\r\n\r\n I = np.array(I, dtype = np.uint8)\r\n I = cv2.resize(I,(16, 32))\r\n ret, I = cv2.threshold(I,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n \r\n x = [] \r\n for i in range(32*16):\r\n x.append(0)\r\n \r\n for i in range(32):\r\n for j in range(16):\r\n if I[i][j] == 255:\r\n x[i*16 + j] = 1\r\n return x\r\n\r\ndef load_dataset():\r\n \r\n data = pd.read_excel('small.xlsx',header=None)\r\n \r\n X_test = []\r\n y_test = []\r\n \r\n data = np.array(data);\r\n\r\n for i in range(50000,52152):\r\n X_test.append(changeSize(data[i,1:]))\r\n for i in range(50000,52152):\r\n y_test.append(int(ord(data[i,0])-97))\r\n \r\n X_test = np.array(X_test)\r\n y_test = np.array(y_test)\r\n X_test = X_test.reshape((-1, 1, 32, 16)).astype('float32')\r\n y_test = y_test.astype(np.uint8)\r\n return X_test, y_test\r\n\r\nX_test, y_test = load_dataset()\r\n\r\nf = open('net1_master.pickle','rb')\r\nnet1 = pickle.load(f)\r\nf.close()\r\n\r\nc = 0\r\npreds = net1.predict(X_test)\r\nfor i in range(len(preds)):\r\n if preds[i] == y_test[i]:\r\n c = c + 1\r\n \r\nprint(c/len(preds))\r\n \r\n \r\nprint(preds.shape)\r\n#print(lasagne.objectives.categorical_crossentropy(preds, y_test))\r\n\r\ncm = confusion_matrix(y_test, preds)\r\nplt.matshow(cm)\r\nplt.title('Confusion matrix')\r\nplt.colorbar()\r\nplt.ylabel('True label')\r\nplt.xlabel('Predicted label')\r\nplt.show()","repo_name":"vignesh2496/Line-Segmentation-for-English-Handwriting","sub_path":"Slant/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24781664252","text":"from operator import mul\nfrom functools import reduce\n\ndef triangle(row):\n\t'''\n\t>>> triangle(0)\n\t[]\n\t>>> triangle(1)\n\t[[1]]\n\t>>> triangle(2)\n\t[[1], [1, 1]]\n\t>>> triangle(3)\n\t[[1], [1, 1], [1, 2, 1]]\n\t>>> triangle(4)\n\t[[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]]\n\t>>> triangle(5)\n\t[[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]]\n\t>>> triangle(-1)\n\tTraceback (most recent call last):\n\tAssertionError: invalid number of rows\n\t>>> triangle(3.14)\n\tTraceback (most recent call last):\n\tAssertionError: invalid number of rows\n\t'''\n\ttriangle_seq = []\n\tassert (isinstance(row, int) and row >= 0), \"invalid number of rows\"\n\t\n\tif row == 0: \n\t\treturn triangle_seq\n\telse:\n\t\ttriangle_seq = [[1]]\n\t\tfor _ in range(1,row):\n\t\t\ttriangle_seq.append([1]+ [sum(pair) for pair in zip(triangle_seq[-1], triangle_seq[-1][1:])]+[1])\n\t\t\t#triangle_seq.append(nest)\n\treturn triangle_seq\n\n\ndef hexagon(row, col):\n\t'''\n\t>>> hexagon(8, 4)\n\t[15, 20, 35, 70, 56, 21]\n\t>>> hexagon(16, 7)\n\t[2002, 3003, 6435, 11440, 8008, 3003]\n\t>>> hexagon(3, 3)\n\tTraceback (most recent call last):\n\tAssertionError: invalid internal position\n\t'''\n\tassert (isinstance(row, int) and isinstance(col, int) and row > 2 and 1>> square(8, 4)\n\t'15 x 20 x 35 x 70 x 56 x 21 = 864360000 = 29400 x 29400'\n\t>>> square(16, 7)\n\t'2002 x 3003 x 6435 x 11440 x 8008 x 3003 = 10643228293383247161600 = 103166022960 x 103166022960'\n\t>>> square(3, 3)\n\tTraceback (most recent call last):\n\tAssertionError: invalid internal position\n\t'''\n\thex_ = hexagon(row, col)\n\tall_mul = reduce(mul, hex_, 1)\n\treturn \"{0} = {1} = {2} x {2}\".format(' x '.join(str(x) for x in hex_), all_mul, round(all_mul**0.5))\n\n\n\nif __name__ == '__main__':\n\timport doctest\n\tdoctest.testmod()","repo_name":"halqin/py_practise","sub_path":"triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18304262549","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n# Uploading the dataset, creating dataframe (using pandas)\r\ncolumn_names= ['Shift','SNR','Data','Test acc','Test loss','Precision','Recall','F1']\r\ndf = pd.read_csv('Test1_data_file_N-10',sep=',', names=column_names, engine='python',index_col=False)\r\n\r\n#-----------------------------------------------------------------------------#\r\n# Comparison of time domain, magnitude spectrogram and rectangular spectrogram for various SNR\r\n# for specific modulation type \r\nshift_key = 'phase'\r\ndf_X = df.loc[(df['Shift'] == shift_key)]\r\nX=np.unique(np.array(df_X['SNR']))\r\n\r\ndf_Y1=df_X.loc[(df_X['Data'] == 0)]\r\nY1=df_Y1['Test acc']\r\ndf_Y2=df_X.loc[(df_X['Data'] == 1)]\r\nY2=df_Y2['Test acc']\r\ndf_Y3=df_X.loc[(df_X['Data'] == 2)]\r\nY3=df_Y3['Test acc']\r\n\r\nsns.lineplot(x=df_Y1['SNR'],y=df_Y1['Test acc'],linewidth=4,color='r',label='Time-series',ci=100)\r\nsns.lineplot(x=df_Y2['SNR'],y=df_Y2['Test acc'],linewidth=4,color='b',label='Magnitude spectrogram',ci=100)\r\nsns.lineplot(x=df_Y3['SNR'],y=df_Y3['Test acc'],linewidth=4,color='g',label='Rectangular spectrogram',ci=100)\r\n\r\nplt.xlabel('SNR [dB] ',fontsize=28)\r\nplt.ylabel('Accuracy',fontsize=28)\r\nplt.xticks(X,fontsize=20)\r\nplt.yticks(np.linspace(0,1,21),fontsize=20)\r\nplt.ylim(0.4,1.05)\r\nplt.xlim(-21,21)\r\nplt.legend(loc='lower right',fontsize=20)\r\nplt.grid(linestyle='--',linewidth=2)\r\nplt.tight_layout\r\n\r\n#-----------------------------------------------------------------------------#","repo_name":"kushal-thapa/NN_complex-valued-data","sub_path":"data_processing_script.py","file_name":"data_processing_script.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34328705587","text":"import http.client\n\nimport collections\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import IntegrityError\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django.views.generic import CreateView, TemplateView\nfrom django_jinja.views.generic import ListView\n\nfrom .. import forms, models\n\n__all__ = ['CitationListView', 'CitationCreateView', 'CitationProgressView']\n\n\nclass CitationListView(ListView):\n model = models.Citation\n paginate_by = 200\n ordering = ['-created']\n\n\nclass CitationCreateView(LoginRequiredMixin, CreateView):\n model = models.Citation\n form_class = forms.NewCitationForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n accident = get_object_or_404(models.Accident, pk=self.kwargs['accident_pk'])\n kwargs['instance'] = models.Citation(created_by=self.request.user,\n accident=accident)\n return kwargs\n\n def form_valid(self, form):\n try:\n return super().form_valid(form)\n except IntegrityError:\n return render(self.request, 'icw/citation_already_exists.html', context={\n 'accident_pk': self.kwargs['accident_pk']\n }, status=http.client.CONFLICT)\n\n def get_success_url(self):\n return reverse('accident-detail', kwargs={'pk': self.kwargs['accident_pk']}) + '#citations'\n\n\nclass CitationProgressView(TemplateView):\n template_name = 'icw/citation_progress.html'\n\n def get_context_data(self, **kwargs):\n categories = [\n ('Pedestrians killed on a footway or verge',\n {'casualties__in': models.Casualty.objects.filter(severity_id=1, pedestrian_location_id=6)}),\n ('Pedestrians killed',\n {'casualties__in': models.Casualty.objects.filter(severity_id=1, type_id=0)}),\n ('Pedestrians killed in collision with a pedal cycle',\n {'casualties__in': models.Casualty.objects.filter(severity_id=1, pedestrian_hit_by=1)}),\n ('Cyclists killed',\n {'casualties__in': models.Casualty.objects.filter(severity_id=1, type_id=1)}),\n ('All road deaths',\n {'severity_id': 1}),\n ]\n\n counts = []\n for label, filter in categories:\n category_counts = collections.defaultdict(lambda: {'yes': 0, 'no': 0})\n queryset = models.Accident.objects \\\n .filter(**filter) \\\n .extra(select={'year': 'EXTRACT(year FROM date)'}).values('year', 'has_citations') \\\n .annotate(count=Count('*'))\n for result in queryset:\n category_counts[int(result['year'])]['yes' if result['has_citations'] else 'no'] = result['count']\n for value in category_counts.values():\n value['total'] = value['yes'] + value['no']\n counts.append({\n 'label': label,\n 'counts': sorted(category_counts.items(), reverse=True),\n })\n return {'counts': counts}\n","repo_name":"incollisionwith/incollisionwith","sub_path":"icw/views/citation.py","file_name":"citation.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74709911235","text":"'''添砖加瓦-练习一'''\n#导入mymin和mymax模块\nfrom compare import mymax,mymin\n\n# 定义排序函数\ndef mysorted(y, reverse=False):\n y = list(y)\n # 结果列表\n s = []\n # 默认升序排序\n if reverse == False:\n while y:\n # 调用最小值函数\n m = mymin.mymin(y) # 调用mymin模块中的函数\n y.remove(m)\n s.append(m)\n # 降序排序\n else:\n while y:\n # 调用最大值函数\n m = mymax.mymax(y) # 调用mymax模块中的函数\n y.remove(m)\n s.append(m) \n return s\n\n\n\n","repo_name":"Mae1228/codemao-python1-48","sub_path":"【40】添砖加瓦-课程资料/添砖加瓦-工程包练习一/mysorted.py","file_name":"mysorted.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32074082028","text":"'''Создайте модуль приложения и настройте сервер и маршрутизацию.\nСоздайте класс Task с полями id, title, description и status.\nСоздайте список tasks для хранения задач.\nСоздайте функцию get_tasks для получения списка всех задач (метод GET).\nСоздайте функцию get_task для получения информации о задаче по её ID\n(метод GET).\nСоздайте функцию create_task для добавления новой задачи (метод POST).\nСоздайте функцию update_task для обновления информации о задаче по её ID\n(метод PUT).\nСоздайте функцию delete_task для удаления задачи по её ID (метод DELETE).'''\n\nfrom fastapi import FastAPI, HTTPException\nfrom pydantic import BaseModel\nfrom enum import Enum\n\napp = FastAPI(title=\"FastAPI App\")\n\nclass Status(Enum):\n TODO = 1\n INPROGRESS = 2\n DONE = 3\n\nclass Task(BaseModel):\n id: int\n title: str\n description: str\n status: str\n\nclass TaskMod(BaseModel):\n title: str\n description: str\n status: str\n\ntasks = []\ntasks.append(Task(id=1, title='sleep', description='sleep well', status=Status.TODO.name))\n\n@app.get(\"/tasks\", response_model=list[Task])\nasync def get_tasks():\n return tasks\n\n@app.get(\"/tasks/{task_id}\", response_model=Task)\nasync def get_task_by_id(task_id: int):\n is_task = [task for task in tasks if task.id == task_id]\n if is_task:\n return is_task[0]\n raise HTTPException(status_code=404, detail='No task with such id')\n\n@app.post('/tasks')\nasync def add_task(task: TaskMod):\n next_id = max(tasks, key=lambda x: x.id).id + 1\n next_task = Task(id=next_id, title=task.title, description=task.description, status=task.status)\n tasks.append(next_task)\n return 'New task added'\n\n@app.put(\"/tasks/{task_id}\", response_model=Task)\nasync def update_task(task_id: int, task: TaskMod):\n is_task = [task for task in tasks if task.id == task_id]\n if not is_task:\n raise HTTPException(status_code=404, detail='No task with such id')\n is_task[0].title = task.title\n is_task[0].description = task.description\n is_task[0].status = task.status\n return is_task[0]\n\n@app.delete(\"/tasks/{task_id}\")\nasync def delete_task(task_id: int):\n is_task = [task for task in tasks if task.id == task_id]\n if not is_task:\n raise HTTPException(status_code=404, detail='No task with such id')\n tasks.pop(task_id - 1)","repo_name":"yukka-yu/Flask-5","sub_path":"task07.py","file_name":"task07.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23620380551","text":"#!/usr/bin/env python3\n\nfrom copy import copy\n\ndata = open('candy.in')\n\nfor i in range(int(data.readline())):\n data.readline()\n candy_str = data.readline().split(' ')\n candies_main = list()\n final = 'NO'\n for cand in candy_str:\n candies_main.append(int(cand))\n for rounds in range(len(candies_main)-1):\n candies = copy(candies_main)\n sean = min(candies)\n candies.remove(sean)\n for iteration in range(rounds):\n next_candy = min(candies)\n candies.remove(next_candy)\n sean ^= next_candy\n patrick = 0\n for remaining in candies:\n patrick ^= remaining\n if sean == patrick:\n final = sum(candies)\n break\n del candies\n print('Case #%d:'%(i+1), final)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_76/400.py","file_name":"400.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39245221219","text":"from PIL import Image, ImageEnhance\nimport os\nimport shutil\nimport zipfile\nimport random\n\nclass new_photo_unik:\n\n # Параметры изменения\n brightness = 0.98\n contrast = 1.1\n sharpness = 1.1\n saturation = 1.1\n scale = 0.99\n resize = 0.98\n \n path_list = []\n\n user_id = None\n last_n_index = None\n zip_path = None\n\n # Путь к исходному изображению\n input_image = None\n\n # Папка для сохранения измененных изображений\n output_dir = \"media\"\n\n def __init__(self, image_path, id, i) -> None:\n self.input_image = image_path\n self.output_dir = f\"media/{id}\"\n self.user_id = id\n\n try:\n os.mkdir(os.path.abspath(f\"media/{id}\"))\n except:\n try:\n os.rmdir(os.path.abspath(f\"media/{id}\"))\n os.mkdir(os.path.abspath(f\"media/{id}\"))\n except:\n shutil.rmtree(os.path.abspath(f\"media/{id}\"))\n os.mkdir(os.path.abspath(f\"media/{id}\"))\n\n # Изменение изображения и сохранение 10 разных фото\n self.path_list = []\n for i in range(i):\n modified_image = self.modify_image(self.input_image,\n self.brightness, \n self.contrast, \n self.sharpness, \n self.saturation, \n self.scale, \n self.resize, \n self.output_dir, i)\n self.path_list.append(modified_image)\n print(f\"Создано измененное изображение: {modified_image}\")\n self.gen_zip()\n\n def gen_zip(self):\n with zipfile.ZipFile(f\"media/{self.user_id}.zip\", 'w', zipfile.ZIP_DEFLATED) as zipf:\n for root, _, files in os.walk(self.output_dir):\n for file in files:\n file_path = os.path.join(root, file)\n zipf.write(file_path, os.path.relpath(file_path, self.output_dir))\n self.zip_path = f\"media/{self.user_id}.zip\"\n return f\"{self.user_id}.zip\"\n\n def modify_image(self ,image_path, brightness, contrast, sharpness, saturation, scale, resize, output_dir, i):\n n_index = random.randrange(1, 3) / 100\n def n_c(n_index):\n if n_index == self.last_n_index:\n n_index = random.randrange(1, 3) / 100\n n_c(n_index)\n else: pass\n # Открытие изображения\n image = Image.open(image_path)\n\n # Изменение яркости\n enhancer = ImageEnhance.Brightness(image)\n image = enhancer.enhance(brightness + n_index)\n\n # Изменение контрастности\n enhancer = ImageEnhance.Contrast(image)\n image = enhancer.enhance(contrast + n_index)\n\n # Изменение резкости\n enhancer = ImageEnhance.Sharpness(image)\n image = enhancer.enhance(sharpness + n_index)\n\n # Изменение насыщенности\n enhancer = ImageEnhance.Color(image)\n image = enhancer.enhance(saturation + n_index)\n\n # Изменение масштаба\n width, height = image.size\n new_width = int(width * scale)\n new_height = int(height * scale)\n image = image.resize((new_width, new_height))\n\n # Изменение ширины и высоты\n width_change = int(width * resize)\n height_change = int(height * resize)\n new_width = width + width_change\n new_height = height + height_change\n image = image.resize((new_width, new_height))\n\n # Удаление EXIF-данных\n image = image.convert(\"RGB\")\n data = list(image.getdata())\n image_without_exif = Image.new(image.mode, image.size)\n image_without_exif.putdata(data)\n\n # Генерация имени нового файла\n filename = os.path.splitext(os.path.basename(image_path))[0]\n output_path = os.path.join(output_dir, f\"{random.randint(1, 10000000)}.png\")\n\n # Генерация имени нового файла\n filename = os.path.splitext(os.path.basename(image_path))[0]\n output_path = os.path.join(output_dir, f\"{random.randint(1, 10000000)}.png\")\n\n # Сохранение измененного изображения\n image_without_exif.save(output_path, \"PNG\")\n\n return output_path\n","repo_name":"IUDA194/A-bot","sub_path":"new_photo.py","file_name":"new_photo.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43774543685","text":"\"\"\"Define League class and RatingSystem base class\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport sqlalchemy.types as sqlt\nimport datetime\n\nfrom pyrate.db.schema import schema\n\ndef rank_array(a, descending=True):\n \"\"\"Rank array counting from 1\"\"\"\n temp = np.argsort(a)\n if descending:\n temp = temp[::-1]\n ranks = np.empty_like(temp)\n ranks[temp] = np.arange(1,len(a)+1)\n return ranks\n\nclass League:\n \"\"\"Data structure to hold score and schedule data\"\"\"\n def __init__(self, df_games, df_teams=None, duplicated_games=True):\n \"\"\"Create League instance\n\n Parameters\n ----------\n df_games : DataFrame\n DataFrame containing at least columns 'team_id',\n 'opponent_id', 'points', and 'opponent_points'\n df_teams : DataFrame\n DataFrame containing at least an index of team id's.\n duplicated_games : bool\n Whether each game is represented twice, once for each team\n \"\"\"\n if 'team_id' not in df_games:\n raise ValueError(\"expected 'team_id' column\")\n elif 'points' not in df_games:\n raise ValueError(\"expected 'points' column\")\n elif 'opponent_id' not in df_games:\n raise ValueError(\"expected 'opponent_id' column\")\n elif 'opponent_points' not in df_games:\n raise ValueError(\"expected 'opponent_points' column\")\n\n if not duplicated_games:\n df2 = df_games.rename(columns={'team_id':'opponent_id', 'opponent_id':'team_id',\n 'points':'opponent_points','opponent_points':'points',\n 'location':'opponent_location','opponent_location':'location'})\n df_games = pd.concat((df_games,df2), join='inner')\n\n # Note: although for interactive use, indexing by name is\n # convenient, currently index by id to cover case where names\n # are not provided (and maybe it provides faster lookup?)\n if df_teams is None:\n self.teams = pd.DataFrame(index=np.sort(df_games['team_id'].unique()))\n else:\n # Here we retain only teams that are referenced in\n # df_games. Note this does not necessarily mean all teams\n # have data for played games (could be scheduled games\n # only).\n self.teams = df_teams.loc[df_games['team_id'].unique()]\n # print('len teams before, after:', len(df_teams), len(self.teams))\n\n # Drop teams with no played games. (May be a way to clean\n # this up a little; currently requires recomputing the set of\n # unplayed games.)\n unplayed = (df_games['points'].isnull() | df_games['opponent_points'].isnull())\n self.teams = self.teams.loc[[t for t in self.teams.index if sum(df_games.loc[~unplayed,'team_id']==t) > 0]]\n # And remove remnants from the games data frame (i.e.,\n # scheduled games for teams that haven't played yet, which\n # will cause problems in the indexing below)\n df_games = df_games.copy()\n df_games = df_games.loc[df_games['team_id'].isin(self.teams.index),:]\n df_games = df_games.loc[df_games['opponent_id'].isin(self.teams.index),:]\n\n team_ids = list(self.teams.index)\n df_games['team_index'] = df_games['team_id'].apply(lambda x: team_ids.index(x))\n df_games['opponent_index'] = df_games['opponent_id'].apply(lambda x: team_ids.index(x))\n\n # Split up into games and schedule\n unplayed = (df_games['points'].isnull() | df_games['opponent_points'].isnull())\n self.double_games = df_games[~unplayed].copy()\n self.double_schedule = df_games[unplayed].copy()\n self.double_games = self.double_games.astype({'points':'int32', 'opponent_points':'int32'})\n\n def get_wl(row):\n if row['points'] > row['opponent_points']:\n return 'W'\n elif row['points'] < row['opponent_points']:\n return 'L'\n else:\n return 'T'\n self.double_games['result'] = self.double_games.apply(get_wl, axis=1)\n\n self.teams['wins'] = [sum(self.double_games.loc[self.double_games['team_id'] == tid, 'result'] == 'W') for tid in self.teams.index]\n self.teams['losses'] = [sum(self.double_games['team_id']==tid) - self.teams.at[tid,'wins'] for tid in self.teams.index]\n\n def summarize(self):\n print('League summary: {} teams, {} games'.format(len(self.teams), len(self.double_games)//2))\n\n\n @classmethod\n def from_hyper_table(cls, df_games, df_teams=None):\n \"\"\"Set up league from hyper table format\n\n Parameters\n ----------\n df_games : pandas Data frame\n A data frame with league data, containing at least the\n following columns: 'game_id', 'team_id', 'points'.\n Optional columns are 'date', 'location' ('H', 'A', and\n 'N').\n \"\"\"\n if 'team_id' not in df_games:\n raise ValueError(\"expected 'team_id' column\")\n elif 'game_id' not in df_games:\n raise ValueError(\"expected 'game_id' column\")\n elif 'points' not in df_games:\n raise ValueError(\"expected 'points' column\")\n\n num_rows = len(df_games)\n if num_rows % 2 != 0:\n raise ValueError(\"expected even number of rows\")\n\n games = df_games.set_index('game_id')\n games = games.join(games, rsuffix='2')\n # Each index was originally represented twice, so after join\n # now appears 4 times, 2 combinations of which are not valid\n games = games[games['team_id'] != games['team_id2']]\n # Now have double-games format; rename the columns\n games = games.rename(columns={'team_id2':'opponent_id',\n 'points2':'opponent_points',\n 'location2':'opponent_location'})\n if len(games) != num_rows:\n # If there is a mismatch, game_id's not appearing twice\n # get dropped by the join\n raise ValueError(\"game_id mismatch\")\n\n # For compatibility with Massey data, treat 0 points as\n # scheduled game (could be added as a flag)\n scheduled = (games['points'] == 0) & (games['opponent_points'] == 0)\n games.loc[scheduled,['points','opponent_points']] = np.nan # Flag scheduled games\n\n return cls(games, df_teams)\n\n\nclass RatingSystem:\n \"\"\"Base class for rating system\"\"\"\n def __init__(self, league, train_interval=None, test_interval=None):\n \"\"\"Base class initialization called by child classes\n\n For train/test split, only one of train_interval or\n test_interval may be provided.\n\n Parameters\n ----------\n league : League instance\n train_interval : int or None\n Interval used to define train/test split of games. Use a\n value of 1 to train on every game. A value of 2 will\n train on every other game, etc.\n test_interval : int or None\n Interval used to define train/test split of games. Use a\n value of 2 to test on every other game, 3 to test on every\n 3rd game, etc.\n \"\"\"\n if train_interval and test_interval:\n raise ValueError\n self.df_teams = league.teams\n # The double_games table may be modified by the fitting\n # routine, so we let the fitting routine store single_games\n # once that is done.\n self.double_games = league.double_games\n self.double_schedule = league.double_schedule\n self.single_schedule = self.double_schedule[ self.double_schedule['team_id'] < self.double_schedule['opponent_id'] ]\n\n # Flagging the train/test set is a little tricky because we\n # work with double games. To handle this, we can set it based\n # on the DataFrame index, which is unique by game.\n if train_interval:\n self.double_games['train'] = False\n self.double_games.loc[self.double_games.index.unique()[::train_interval], 'train'] = True\n elif test_interval:\n self.double_games['train'] = True\n self.double_games.loc[self.double_games.index.unique()[::test_interval], 'train'] = False\n else:\n self.double_games['train'] = True\n\n def summarize(self):\n \"\"\"Print summary information to screen\"\"\"\n cv_flag = (not self.double_games['train'].all())\n\n print('{} played games'.format(len(self.double_games)//2))\n if cv_flag:\n print('{} trained games'.format(sum(self.double_games['train'])//2))\n num_sched = len(self.double_schedule)//2\n if num_sched > 0:\n print('{} scheduled games'.format(num_sched))\n\n if self.homecourt:\n print('home advantage: {:.1f}'.format(self.home_adv))\n\n print('Consistency: {:.3f}'.format(self.consistency))\n if hasattr(self, 'loo_consistency'):\n print('LOO consistency: {:.3f}'.format(self.loo_consistency))\n if cv_flag:\n correct, total = self.evaluate_predicted_wins(exclude_train=True)\n print('CV consistency: {:.3f}'.format(correct/total))\n if self.full_rank:\n print('Log lhood: {:.3f}'.format(self.log_likelihood()))\n if cv_flag:\n print('CV log lhood: {:.3f}'.format(self.log_likelihood(exclude_train=True)))\n \n\n def store_ratings(self, ratings, offense=None, defense=None):\n \"\"\"After child method is called, organize rating data into DataFrame\"\"\"\n self.df_teams['rating'] = ratings\n self.df_teams['rank'] = rank_array(ratings)\n if offense is not None:\n self.df_teams['offense'] = offense\n self.df_teams['offense_rank'] = rank_array(offense)\n if defense is not None:\n self.df_teams['defense'] = defense\n self.df_teams['defense_rank'] = rank_array(defense)\n\n self.get_strength_of_schedule()\n\n def get_strength_of_schedule(self):\n \"\"\"Compute strength of schedule as average of opponent rating\n\n Call into child class method to compute schedule strength from\n array of opponent ratings.\n\n For now, does not account for home court\"\"\"\n self.df_teams['strength_of_schedule_past'] = np.nan\n self.df_teams['strength_of_schedule_future'] = np.nan\n self.df_teams['strength_of_schedule_all'] = np.nan\n for team_id,team in self.df_teams.iterrows():\n games = self.double_games[self.double_games['team_id'] == team_id]\n schedule = self.double_schedule[self.double_schedule['team_id'] == team_id]\n self.df_teams.at[team_id,'strength_of_schedule_past'] = self.strength_of_schedule(self.df_teams.loc[games['opponent_id'],'rating'])\n if len(schedule) > 0:\n self.df_teams.at[team_id,'strength_of_schedule_future'] = self.strength_of_schedule(self.df_teams.loc[schedule['opponent_id'],'rating'])\n else:\n self.df_teams.at[team_id,'strength_of_schedule_future'] = np.nan\n self.df_teams.at[team_id,'strength_of_schedule_all'] = self.strength_of_schedule(self.df_teams.loc[np.concatenate((games['opponent_id'],schedule['opponent_id'])),'rating'])\n\n def display_ratings(self, n=10):\n print(self.df_teams.sort_values(by='rating', ascending=False).head(n))\n\n def store_predictions(self):\n \"\"\"Compute and store predictions for scheduled games\"\"\"\n self.double_games['predicted_result'] = self.predict_result(self.double_games)\n self.double_games['win_probability'] = self.predict_win_probability(self.double_games)\n self.double_schedule['win_probability'] = self.predict_win_probability(self.double_schedule)\n self.consistency = sum(self.double_games['predicted_result']==self.double_games['result']) / float(len(self.double_games))\n if hasattr(self, 'single_games') and 'loo_predicted_result' in self.single_games:\n games = self.single_games[self.single_games['train']]\n self.loo_consistency = sum(games['loo_predicted_result']==games['result']) / float(len(games))\n\n # Expected wins, losses:\n if all(self.double_schedule['win_probability'].notnull()):\n exp_wins = [int(round(sum(self.double_schedule.loc[self.double_schedule['team_id']== tid,'win_probability']))) + self.df_teams.at[tid,'wins'] for tid in self.df_teams.index]\n\n self.df_teams['expected_losses'] = [sum(self.double_games['team_id']==tid) + sum(self.double_schedule['team_id']==tid) - exp_wins[i] for i,tid in enumerate(self.df_teams.index)]\n self.df_teams['expected_wins'] = exp_wins\n\n def evaluate_predicted_wins(self, exclude_train=False):\n \"\"\"Evaluate how many past games are predicted correctly\"\"\"\n if exclude_train:\n idx = ~ self.double_games['train']\n else:\n # Set idx to all True\n idx = self.double_games['train'].notnull()\n\n correct = sum( self.double_games.loc[idx,'predicted_result'] == self.double_games.loc[idx,'result'] ) // 2\n total = sum(idx) // 2\n\n return correct, total\n\n def log_likelihood(self, exclude_train=False):\n \"\"\"Evaluate log of likelihood of outcomes based on predicted win probabilities\"\"\"\n games = self.double_games[ self.double_games['team_id'] < self.double_games['opponent_id'] ]\n if exclude_train:\n games = games[~ games['train']]\n\n pvals = games.apply(lambda r: r['win_probability'] if r['result']=='W' else 1.0 - r['win_probability'], axis=1)\n return sum(np.log(pvals))\n\n def evaluate_coverage_probability(self, exclude_train=False):\n pvals = np.array([0.5, 0.6, 0.7, 0.8, 0.9])\n counts = np.zeros(len(pvals), dtype=int)\n correct = np.zeros(len(pvals), dtype=int)\n\n total_count = 0 # Sanity check\n\n # Use double games for book-keeping simplicity...\n if exclude_train:\n games = self.double_games[self.double_games['train']]\n else:\n games = self.double_games\n\n pred_probs = self.predict_win_probability(games)\n pred_outcomes = ['W' if p>0.5 else 'L' for p in pred_probs]\n for p,wl,(index,game) in zip(pred_probs,pred_outcomes,games.iterrows()):\n if p > 0.5:\n total_count += 1\n # Determine interval\n interval = np.where(p>pvals)[0][-1]\n counts[interval] += 1\n if wl == game['result']:\n correct[interval] += 1\n\n print(\"Total count:\", total_count)\n for i,p in enumerate(pvals):\n print('Coverage for {}: {} / {} ({:.2})'.format(p, correct[i], counts[i], float(correct[i])/counts[i]))\n\n def to_db(self, engine, rating_name, finished=False):\n \"\"\"Write to database\n\n Create \"teams\" and \"games\" tables. The \"games\" table also\n includes scheduled games. Each game in the games table is\n represented twice, once for each team in the team_id position\n\n Parameters\n ----------\n rating_name : str\n A unique name for the rating\n finished : bool\n Flag for whether the season is finished, used by website.\n \"\"\"\n\n with engine.connect() as conn:\n for s in schema.split('\\n\\n'):\n conn.execute(s)\n\n ## properties table (general info)\n today = pd.to_datetime(datetime.datetime.today())\n df = pd.DataFrame({'Updated':[today]})\n df.to_sql(\"properties\", engine, if_exists='replace', index=False)\n\n ### ratings table\n # Needs to be handled carefully because previous rating_id\n # needs to be used to remove associated game/team entries,\n # but we also need to update the rating if that row\n # already exists\n\n # Check whether rating exists:\n output = conn.execute('SELECT rating_id FROM ratings WHERE name=?', (rating_name,))\n result = output.fetchone()\n if result:\n rating_id = result[0]\n else:\n conn.execute('INSERT INTO ratings (name) VALUES (?);', (rating_name,))\n output = conn.execute('SELECT last_insert_rowid();')\n rating_id = output.fetchone()[0]\n\n # Now update rating_id with new data. Re-using the\n # rating_id prevents the rating_id values from continuuing\n # to increase (alternatively, could just delete all\n # rating_id entries from games and teams here and then get\n # a new arbitrary rating_id before adding the data)\n n_games = len(self.double_games) // 2\n n_scheduled = len(self.double_schedule) // 2\n Rsquared = self.Rsquared if hasattr(self, 'Rsquared') else None\n conn.execute('UPDATE ratings SET home_advantage = ?, r_squared = ?, consistency=?, games_played = ?, games_scheduled = ?, finished = ? WHERE rating_id = ?;', (self.home_adv, Rsquared, self.consistency, n_games, n_scheduled, finished, rating_id))\n\n ### teams table\n df = self.df_teams.copy()\n df['rating_id'] = rating_id\n df['team_id'] = df.index\n\n # First delete previous entries for this league:\n conn.execute('DELETE FROM teams WHERE rating_id=?;', (rating_id,))\n\n df.to_sql(\"teams\", engine, if_exists='append', index=False,\n dtype = {'team_id': sqlt.Integer,\n 'rating_id': sqlt.Integer,\n 'name': sqlt.Text,\n 'rating': sqlt.Float,\n 'rank': sqlt.Integer,\n 'wins': sqlt.Integer,\n 'losses': sqlt.Integer,\n 'expected_wins': sqlt.Integer,\n 'expected_losses': sqlt.Integer,\n 'offense_rank': sqlt.Integer,\n 'defense_rank': sqlt.Integer})\n\n ### games table\n # Using reindex both selects columns and creates NA\n # columns if not present (using .loc for this will trigger\n # a warning if requested columns are not present)\n df = self.double_games.reindex(columns=['team_id','opponent_id','points','opponent_points','location','date','normalized_score','result','win_probability'])\n df['rating_id'] = rating_id\n\n df.rename(columns={'points':'points_for',\n 'opponent_points':'points_against'},\n inplace=True)\n\n # First delete previous entries for this league:\n conn.execute('DELETE FROM games WHERE rating_id=?;', (rating_id,))\n\n df.to_sql(\"games\", engine, if_exists='append', index=False,\n dtype = {'team_id': sqlt.Integer,\n 'rating_id': sqlt.Integer,\n 'opponent_id': sqlt.Integer,\n 'points_for': sqlt.Integer,\n 'points_against': sqlt.Integer,\n 'date': sqlt.Date,\n 'normalized_score': sqlt.Float})\n\n # scheduled games\n df = self.double_schedule.loc[:,['team_id','opponent_id','location','date','win_probability']]\n df['rating_id'] = rating_id\n\n df.to_sql(\"games\", engine, if_exists='append', index=False,\n dtype = {'team_id': sqlt.Integer,\n 'rating_id': sqlt.Integer,\n 'opponent_id': sqlt.Integer,\n 'date': sqlt.Date})\n","repo_name":"mcfarljm/pyrate","sub_path":"pyrate/rate/ratingbase.py","file_name":"ratingbase.py","file_ext":"py","file_size_in_byte":19837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9186607209","text":"\"\"\"debounce example\"\"\"\nfrom pythonicqt.Qt import QtCore, QtGui\nfrom pythonicqt.examples import ExampleBase\nfrom pythonicqt import debounce\n\nclass ExampleDebounce(ExampleBase):\n \"\"\"This Widget demonstrates the debounce functionality.\n You can manually change the debounce code in the file\n to play with different debounce arguments.\"\"\"\n title=\"Debounce\"\n\n def __init__(self, *args, **kwargs):\n super(ExampleDebounce, self).__init__(*args, **kwargs)\n self._layout = QtGui.QVBoxLayout(self)\n self.description_label = QtGui.QLabel(\n \"Quickly change spin box values and watch them update.\")\n self._layout.addWidget(self.description_label)\n \n self.debounce_checkbox = QtGui.QCheckBox(\"use debounce.\", checked=True)\n self._layout.addWidget(self.debounce_checkbox)\n \n self.spin_box = QtGui.QSpinBox()\n self.spin_box.setMaximum(100)\n self._layout.addWidget(self.spin_box)\n \n self.value_template = \"Last updated value is {}.\"\n self.last_value_label = QtGui.QLabel(self.value_template.format(0))\n self._layout.addWidget(self.last_value_label)\n \n self.spin_box.valueChanged.connect(self.spin_box_changed)\n \n #This method is called every time the spin box changes.\n #The method redirects the call based on the checkbox state.\n def spin_box_changed(self, new_value):\n update_method = self.update_description\n if self.debounce_checkbox.isChecked():\n update_method = self.update_description_debounced\n update_method(new_value)\n \n def update_description(self, value):\n new_description = self.value_template.format(value)\n self.last_value_label.setText(new_description)\n \n #You can change the debounce arguments to see the effects. \n @debounce(msecs=200)\n def update_description_debounced(self, value):\n \"\"\"Changes the label to reflect the value of the spinbox.\"\"\"\n self.update_description(value)\n\nif __name__ == \"__main__\":\n ExampleDebounce.run_example()\n","repo_name":"Digirolamo/pythonicqt","sub_path":"pythonicqt/examples/debounce_example.py","file_name":"debounce_example.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70106203394","text":"#Created by Jedsada LUENGARAMSUK 15/4/2021\n#lucky.py - Opens several Google search results. \n\nimport requests, sys, webbrowser , bs4 \n\nprint('Googling... now') #Display text while downloading the Google page \nres = requests.get('http://google.com/search?q=' + ' '.join(sys.argv[1:]))\nres.raise_for_status() #Check status of this page, if doesn't work it will raise an error \n\n\n#Retrieve top search result links. \nsoup = bs4.BeautifulSoup(res.text)\n\n#Open a browser tab for each result \nlinkElems = soup.select('.r a')\nnumOpen = min(5, len(linkElems)) #Select the right element\nfor i in range(numOpen): \n webbrowser.open('http://google.com' + linkElems[i].get('href'))\n\n\n#After learning HTML it will make sense. ","repo_name":"jedlueng/Python-Learning","sub_path":"Archive/Web-Scraping/lucky.py","file_name":"lucky.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23581232981","text":"t = int(input())\r\nfor testcase in range(1, t + 1):\r\n\tline = input().split(\" \")\r\n\tD = int(line[0])\r\n\tN = int(line[1])\r\n\tlongest_time = 0.0\r\n\tfor i in range(0, N):\r\n\t\tline2 = input().split(\" \")\r\n\t\tKi = int(line2[0])\r\n\t\tSi = int(line2[1])\r\n\t\ttime_i = (D - Ki) / Si\r\n\t\tif time_i > longest_time:\r\n\t\t\tlongest_time = time_i\r\n\t\r\n\tprint(\"Case #{}: {:.6f}\".format(testcase, D / longest_time))\r\n\r\n\t\t\r\n'''\r\n\r\n\t#storage = [[0]*(s_length - flipper_size)]*(s_length - flipper_size)\r\n\tflips = 0\r\n\tfor i in range(0, s_length - flipper_size + 1):\r\n\t\tif s[i] == '-':\r\n\t\t\tflips += 1\r\n\t\t\tfor j in range(i, i + flipper_size):\r\n\t\t\t\tif s[j] == '-':\r\n\t\t\t\t\ts[j] = '+'\r\n\t\t\t\telse:\r\n\t\t\t\t\ts[j] = '-'\r\n\tvalid = True\r\n\tfor i in range(s_length - flipper_size + 1, s_length):\r\n\t\tif s[i] == '-':\r\n\t\t\tvalid = False\r\n\t\t\tbreak\r\n\tif valid:\r\n\t\tprint(\"Case #{}: {}\".format(testcase, flips))\r\n\telse:\r\n\t\tprint(\"Case #{}: IMPOSSIBLE\".format(testcase))\r\n'''","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/645.py","file_name":"645.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18577550537","text":"# system imports\nimport time\nimport os\nimport threading\nimport click\n\n# sieve imports\nimport cv2\nfrom sieve_helpers import upload_local_video\nfrom video_tools import VideoLoopWriteManager\n\n@click.command()\n@click.option('--sieve_api_key', default='YOUR_API_KEY', help='Your Sieve API key')\n@click.option('--video_push_interval', default=10, help='How often to push video to Sieve')\n@click.option('--video_feed_path', default=0, help='Path to video feed')\ndef record_video(sieve_api_key, video_push_interval, video_feed_path):\n # create temp directory to save videos before pushing to Sieve\n write_directory = os.path.join(os.getcwd(), './.sieve_tmp/')\n print(\"All temp videos saved in dir:\", write_directory)\n if not os.path.exists(write_directory):\n os.mkdir(write_directory)\n\n # Create video feed capture and write manager\n cap = cv2.VideoCapture(video_feed_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n video_loop_write_manager = VideoLoopWriteManager(write_directory, fps, frame_width, frame_height)\n\n # Create a video writer before entering the loop\n video_writer = video_loop_write_manager.get_next_video_writer()\n\n start_time = time.time()\n while cap.isOpened():\n # read frame\n ret, frame = cap.read()\n if ret:\n # push video to Sieve every interval\n if time.time() - start_time > video_push_interval:\n # close current video writer\n video_writer.release()\n # start separate thread for uploading video to Sieve\n threading.Thread(target=upload_local_video, args=(sieve_api_key, video_loop_write_manager.current_video_name)).start()\n \n # get next video writer and start writing to it\n video_writer = video_loop_write_manager.get_next_video_writer()\n start_time = time.time()\n\n video_writer.write(frame)\n else:\n break\n\n cap.release()\n\nif __name__ == '__main__':\n record_video()","repo_name":"bigdatasciencegroup/automatic-video-processing","sub_path":"run_live_video.py","file_name":"run_live_video.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23502421367","text":"\"\"\"Implement a stack using a LinkedList.\nThe stack has push and pop functions.\"\"\"\n\nimport sys\nsys.path.append(\"../linked_list\")\nfrom linked_list import LinkedList, Node\n\nclass Stack():\n def __init__(self, max_size=sys.maxsize, top=None):\n self.max_size = max_size\n self.linked_list = LinkedList(top)\n if top is None:\n self.num_nodes = 0\n else:\n self.num_nodes = 1\n \n def push(self, key):\n \"\"\"Insert a key on top of a stack.\"\"\"\n if self.num_nodes == self.max_size:\n print (\"Stack overflow\")\n else:\n self.linked_list.insert(key, 1)\n self.num_nodes += 1\n \n def pop(self):\n \"\"\"Remove the top key.\"\"\"\n deleted_node = self.linked_list.delete_head()\n if deleted_node:\n self.num_nodes -= 1\n return deleted_node\n\n\nif __name__ == \"__main__\":\n # Initialize a stack with a node\n stack = Stack(top=Node(1))\n\n # Test stack functionality\n stack.push(2)\n stack.push(3)\n print (stack.num_nodes)\n \n print (stack.pop().key)\n print (stack.pop().key)\n print (stack.pop().key)\n print (stack.pop())\n stack.push(4)\n print (stack.num_nodes)\n print (stack.pop().key)","repo_name":"ntrang086/python_snippets","sub_path":"stack_queue_heap/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35905465297","text":"def fizzbuzz():\r\n print(\r\n \"\\nThis is the FizzBuzz Test!\\n\"\r\n \"FizzBuzz is a test that outputs Fizz when divisible by a certain number, Buzz when divisible by another \"\r\n \"number and FizzBuzz when divisible by both.\\nThis is not the best FizzBuzz test created in Python, \"\r\n \"but it does the deed.\\nUser has to enter Fizz-Buzz conditions and the number range.\\n\"\r\n \"Code by: @Joshboi || GitHub: github.com/swarnab1 || swarnab1.github.io\\n\")\r\n\r\n while True:\r\n fizz = input(\"FizzBuzz >>> Fizz (divisor1) = \")\r\n if fizz == \"\" or fizz.isdigit() is False:\r\n print(\"Enter a valid number\")\r\n continue\r\n elif fizz == \"0\":\r\n print(\"Can't use Zero as a divisor\")\r\n continue\r\n else:\r\n fizz = int(fizz)\r\n buzz = input(\"FizzBuzz >>> Buzz (divisor2) = \")\r\n if buzz == \"\" or buzz.isdigit() is False:\r\n print(\"Enter a valid number\")\r\n continue\r\n elif buzz == \"0\":\r\n print(\"Can't use Zero as divisor\")\r\n continue\r\n elif fizz == int(buzz):\r\n print(\"Can't use the same number for both divisors\")\r\n continue\r\n else:\r\n buzz = int(buzz)\r\n userRangeInput = input(\r\n \"FizzBuzz >>> Enter your desired range (Outputs FizzBuzz within the range): \")\r\n if userRangeInput == \"\" or userRangeInput.isdigit() is False:\r\n print(\"Enter a valid number\")\r\n continue\r\n elif int(userRangeInput) < 2:\r\n print(\"Use a value greater than 1\")\r\n continue\r\n else:\r\n userRangeInput = int(userRangeInput)\r\n\r\n for i in range(1, userRangeInput + 1):\r\n if i % fizz == 0 and i % buzz == 0:\r\n print(\"FizzBuzz\")\r\n continue\r\n elif i % buzz == 0:\r\n print(\"Buzz\")\r\n continue\r\n elif i % fizz == 0:\r\n print(\"Fizz\")\r\n continue\r\n\r\n print(i)\r\n\r\n print(\"And that's Fizzbuzz!\\nTo rerun, press Enter. To exit, type anything and press Enter.\")\r\n if input(\"FizzBuzz >>> \") == \"\":\r\n continue\r\n else:\r\n return\r\n","repo_name":"swarnab1/JPyScripts","sub_path":"UtilitiesModules/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32096686162","text":"\"\"\"Scatter plot.\"\"\"\nfrom bokeh.models import ColumnDataSource, HoverTool, Range1d\nfrom bokeh.plotting import figure\n\nfrom plotski.base import Plot\n\n\nclass PlotScatter(Plot):\n \"\"\"Scatter plot.\"\"\"\n\n # Data parameters\n DATA_KEYS = (\"x\", \"y\")\n TOOLS = (\"pan, xpan, xbox_zoom, box_zoom, crosshair, reset\",)\n ACTIVE_DRAG = \"xbox_zoom\"\n\n # Defaults\n WIDTH = 800\n HEIGHT = 400\n\n def __init__(\n self,\n output_dir: str,\n source: ColumnDataSource,\n x_axis_label: str = \"x\",\n y_axis_label: str = \"y\",\n title: str = \"Scatter\",\n plot_type: str = \"Scatter\",\n initialize: bool = True,\n **kwargs,\n ):\n Plot.__init__(\n self,\n output_dir,\n source,\n x_axis_label,\n y_axis_label,\n title=title,\n plot_type=plot_type,\n initialize=initialize,\n **kwargs,\n )\n\n def plot(self):\n \"\"\"Generate main plot.\"\"\"\n label = self.kwargs.get(\"label\", \"\")\n scatter = self.figure.scatter(\n x=\"x\",\n y=\"y\",\n source=self.source,\n name=self.plot_type,\n )\n if label:\n scatter.legend_label = label\n self.plots[scatter.id] = scatter\n\n def get_figure(self):\n \"\"\"Get figure.\"\"\"\n return figure(tools=self.kwargs[\"tools\"], active_drag=self.kwargs[\"active_drag\"])\n\n def set_hover(self):\n \"\"\"Set hover.\"\"\"\n self.figure.add_tools(\n HoverTool(\n show_arrow=True,\n tooltips=[(f\"{self.x_axis_label}\", \"@x\"), (f\"{self.y_axis_label}\", \"@y\")],\n mode=\"vline\",\n # names=[self.plot_type],\n )\n )\n\n def set_ranges(self, **kwargs):\n \"\"\"Set x/y-axis ranges.\"\"\"\n src = self.source.data\n x_range = self.kwargs.get(\"x_range\", (min(src[\"x\"]), max(src[\"x\"]) * 1.05))\n y_range = self.kwargs.get(\"y_range\", (min(src[\"y\"]), max(src[\"y\"]) * 1.05))\n self.figure.x_range = Range1d(*x_range)\n self.figure.y_range = Range1d(*y_range)\n","repo_name":"lukasz-migas/plotski","sub_path":"src/plotski/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25225519694","text":"import json\nimport logging\nimport mock\nimport tempfile\n\nfrom cuckoo.core.database import Database\nfrom cuckoo.core.log import logger\nfrom cuckoo.core.startup import (\n init_logging, init_logfile, init_console_logging, init_yara\n)\nfrom cuckoo.main import cuckoo_create, main\nfrom cuckoo.misc import set_cwd, cwd\n\ndb = Database()\n\ndef reset_logging():\n \"\"\"Reset the logging module to its initial state so that we can\n re-register all kinds of logging logic for unit testing purposes.\"\"\"\n logging.root = logging.RootLogger(logging.WARNING)\n logging.Logger.root = logging.root\n logging.Logger.manager = logging.Manager(logging.Logger.root)\n\ndef test_init_logging():\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n init_logging(logging.DEBUG)\n\ndef test_logger():\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n init_logfile(\"cuckoo.json\")\n\n with mock.patch(\"time.time\") as p:\n p.return_value = 1484232001\n logger(\"test %s\", \"message\", action=\"a\", status=\"b\")\n\n assert json.load(open(cwd(\"log\", \"cuckoo.json\"), \"rb\")) == {\n \"asctime\": mock.ANY,\n \"action\": \"a\",\n \"level\": \"info\",\n \"message\": \"test message\",\n \"status\": \"b\",\n \"task_id\": None,\n \"time\": 1484232001,\n }\n\ndef test_logging():\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n init_logfile(\"cuckoo.json\")\n\n with mock.patch(\"time.time\") as p:\n p.return_value = 1484232002\n log = logging.getLogger(\"test.module\")\n log.warning(\"test %s\", \"message2\", extra={\n \"action\": \"a\", \"status\": \"b\",\n })\n\n assert json.load(open(cwd(\"log\", \"cuckoo.json\"), \"rb\")) == {\n \"asctime\": mock.ANY,\n \"action\": \"a\",\n \"level\": \"warning\",\n \"message\": \"test message2\",\n \"status\": \"b\",\n \"task_id\": None,\n \"time\": 1484232002,\n }\n\ndef test_process_json_logging():\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n init_yara()\n init_logfile(\"process-p0.json\")\n\n def process_tasks(instance, maxcount, timeout):\n logger(\"foo bar\", action=\"hello.world\", status=\"success\")\n\n with mock.patch(\"cuckoo.main.Database\"):\n with mock.patch(\"cuckoo.main.process_tasks\") as p1:\n with mock.patch(\"time.time\") as p2:\n p1.side_effect = process_tasks\n p2.return_value = 1484232003\n main.main(\n (\"--cwd\", cwd(), \"process\", \"p0\"), standalone_mode=False\n )\n\n assert json.load(open(cwd(\"log\", \"process-p0.json\"), \"rb\")) == {\n \"asctime\": mock.ANY,\n \"action\": \"hello.world\",\n \"level\": \"info\",\n \"message\": \"foo bar\",\n \"status\": \"success\",\n \"task_id\": None,\n \"time\": 1484232003,\n }\n\ndef test_init_logging_info(capsys):\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n\n reset_logging()\n init_console_logging(logging.WARNING)\n init_logging(logging.WARNING)\n\n log = logging.getLogger(\"testing\")\n log.debug(\"debug test\", extra={\n \"action\": \"foo\",\n \"status\": \"bar\",\n })\n log.info(\"info test\", extra={\n \"action\": \"foo\",\n \"status\": \"bar\",\n })\n log.warning(\"warning test\", extra={\n \"action\": \"foo\",\n \"status\": \"bar\",\n })\n\n buf = open(cwd(\"log\", \"cuckoo.log\")).read()\n assert \"debug test\" not in buf\n assert \"info test\" not in buf\n assert \"warning test\" in buf\n\n buf = open(cwd(\"log\", \"cuckoo.json\")).read()\n assert \"debug test\" in buf\n assert \"info test\" in buf\n assert \"warning test\" in buf\n\n _, buf = capsys.readouterr()\n assert \"debug test\" not in buf\n assert \"info test\" not in buf\n assert \"warning test\" in buf\n\ndef test_init_console_logging(capsys):\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n\n reset_logging()\n init_console_logging(logging.DEBUG)\n\n log = logging.getLogger(\"console-testing\")\n log.debug(\"this is a test\")\n\n _, buf = capsys.readouterr()\n assert \"console-testing\" in buf\n assert \"this is a test\" in buf\n\ndef test_log_error_action():\n set_cwd(tempfile.mkdtemp())\n cuckoo_create()\n db.connect()\n\n reset_logging()\n init_console_logging(logging.DEBUG)\n\n task_id = db.add_path(__file__)\n assert db.view_errors(task_id) == []\n\n logging.getLogger(__name__).error(\"message1\", extra={\n \"error_action\": \"erroraction\",\n \"task_id\": task_id,\n })\n\n logging.getLogger(__name__).error(\"message2\", extra={\n \"task_id\": task_id,\n })\n\n errors = db.view_errors(task_id)\n assert len(errors) == 2\n assert errors[0].message == \"message1\"\n assert errors[0].action == \"erroraction\"\n assert errors[1].message == \"message2\"\n assert errors[1].action is None\n","repo_name":"cuckoosandbox/cuckoo","sub_path":"tests/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":5316,"dataset":"github-code","pt":"61"} +{"seq_id":"2329689540","text":"from string import lower\nimport traceback\n\nfrom mangrove.datastore.documents import EnrichedSurveyResponseDocument\nfrom mangrove.datastore.entity import by_short_code, Contact\nfrom mangrove.errors.MangroveException import DataObjectNotFound\nfrom mangrove.form_model.field import DateField, SelectField, UniqueIdField\n\n\nclass EnrichedSurveyResponseBuilder(object):\n def __init__(self, dbm, survey_response, form_model, additional_details, logger=None, ds_mobile_number=None):\n self.ds_mobile_number = ds_mobile_number\n self.dbm = dbm\n self.additional_details = additional_details\n self.survey_response = survey_response\n self.form_model = form_model\n self.logger = logger\n self.values_lower_case_dict = LowerCaseKeyDict(self.survey_response.values)\n\n\n def _values(self):\n values = {}\n if self.survey_response.status:\n for field in self.form_model.fields:\n answer_dictionary = self._create_answer_dictionary(field)\n values.update({lower(field.code): answer_dictionary})\n else:\n values = self.survey_response.values\n return values\n\n def feed_document(self):\n status = 'success' if self.survey_response.status else 'error'\n\n return EnrichedSurveyResponseDocument(self.survey_response.uuid, self.survey_response.modified,\n self.survey_response.channel,\n self.form_model.form_code, self.survey_response.form_model_revision,\n self._values(), status,\n self.survey_response.errors, self._data_sender(), self.additional_details,\n self.survey_response.is_void())\n\n\n def update_event_document(self, feeds_dbm):\n enriched_survey_response = get_feed_document_by_id(feeds_dbm, self.survey_response.uuid)\n self._update_feed_with_latest_info(enriched_survey_response)\n return enriched_survey_response\n\n def delete_feed_document(self, feeds_dbm):\n error = None\n try:\n enriched_survey_response = get_feed_document_by_id(feeds_dbm, self.survey_response.uuid)\n self._update_feed_with_latest_info(enriched_survey_response)\n enriched_survey_response.delete()\n feeds_dbm._save_document(enriched_survey_response)\n except Exception as e:\n error = 'error while deleting feed doc for %s \\n' % self.survey_response.uuid\n error += e.message + '\\n'\n error += traceback.format_exc()\n finally:\n return error\n\n def _update_feed_with_latest_info(self, enriched_survey_response):\n enriched_survey_response.update(self.feed_document())\n\n def _data_sender(self):\n try:\n data_sender = Contact.get(self.dbm, self.survey_response.owner_uid)\n #todo Do we need to store datasender question code information in enriched survey response?\n return self._get_data_sender_info_dict(data_sender, '')\n except:\n return {'id': self.ds_mobile_number,\n 'last_name': None,\n 'mobile_number': self.ds_mobile_number,\n 'question_code': None,\n 'deleted': None\n }\n\n\n def _get_data_sender_info_dict(self, data_sender, question_code):\n return {'id': data_sender.short_code,\n 'last_name': data_sender.data['name']['value'],\n 'mobile_number': data_sender.data['mobile_number']['value'],\n 'question_code': question_code,\n 'deleted': data_sender.is_void()\n }\n\n def _update_entity_answer_in_dictionary(self, answer_dictionary, value, unique_id_type):\n answer_dictionary.update({'is_entity_question': 'true'})\n if self.form_model.entity_type != [\"reporter\"]:\n try:\n subject = by_short_code(self.dbm, value, [unique_id_type])\n answer_dictionary.update(\n {'answer': {'id': value, 'name': subject.data['name']['value'], 'deleted': False}})\n except DataObjectNotFound:\n answer_dictionary.update(\n {'answer': {'id': value, 'name': '', 'deleted': True}})\n\n\n def _create_answer_dictionary(self, field):\n answer_dictionary = {}\n value = self.values_lower_case_dict.get(field.code)\n answer_dictionary.update({'label': field.label})\n answer_dictionary.update({'type': field.type})\n answer_dictionary.update({'answer': value})\n if isinstance(field, DateField):\n answer_dictionary.update({'format': field.date_format})\n if isinstance(field, SelectField):\n selected = self._select_field_values(answer_dictionary.get('answer'), field)\n answer_dictionary.update({'answer': selected})\n if isinstance(field, UniqueIdField):\n answer_dictionary.update({'unique_id_type': field.unique_id_type})\n self._update_entity_answer_in_dictionary(answer_dictionary, value, field.unique_id_type)\n #if field.code == self.form_model.entity_question.code:\n return answer_dictionary\n\n def _select_field_values(self, choices, field):\n\n # if field.has_other and isinstance(choices, list) and choices[0] == 'other':\n # return choices[1]\n\n choice_array = field.get_option_list(choices)\n value_array = field.get_option_value_list(choices)\n if len(choice_array) != len(value_array):\n return {}\n\n selected = {}\n for i in range(len(choice_array)):\n selected.update({choice_array[i]: value_array[i]})\n return selected\n\n def _option_value(self, field, value):\n for option in field.options:\n if option.get('val') == value:\n return option.get('text')\n return None\n\n\nclass LowerCaseKeyDict():\n def __init__(self, input_dict):\n self.dictionary = {}\n for key, value in input_dict.iteritems():\n self.dictionary.update({lower(key): value})\n\n def get(self, key):\n return None if key is None else self.dictionary.get(lower(key))\n\n\ndef get_feed_document_by_id(feed_dbm, survey_response_id):\n return feed_dbm._load_document(survey_response_id, EnrichedSurveyResponseDocument)\n","repo_name":"mangroveorg/mangrove","sub_path":"mangrove/feeds/enriched_survey_response.py","file_name":"enriched_survey_response.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"23876274813","text":"from machine import Pin, PWM\nimport time, sys\n\n# application states\nstate_beam_unbroken = 0\nstate_beam_broken = 1\n\n# musical notes\na_5 = 880\na_6 = 1760\n\nbeam = Pin(26, Pin.IN, Pin.PULL_DOWN)\n\nred = Pin(18, Pin.OUT)\namber = Pin(19, Pin.OUT)\ngreen = Pin(20, Pin.OUT)\nall_leds = [red, amber, green]\n\nbuzzer = PWM(Pin(13))\n\n\ndef set_all(leds_to_set, to_set, delay=None, reverse_order=False):\n if reverse_order:\n leds_to_set = reversed(leds_to_set)\n\n for led in leds_to_set:\n led.value(to_set)\n if delay:\n time.sleep(delay)\n\n\ndef blink(leds_to_blink, blink_duration=0.5, delay=None, alternate=False):\n set_all(leds_to_blink, 1, delay=delay)\n time.sleep(blink_duration)\n\n set_all(leds_to_blink, 0, delay=delay, reverse_order=alternate)\n time.sleep(blink_duration)\n \n\ndef play_note(note, duration=0.5, duty=1000):\n buzzer.freq(note)\n buzzer.duty_u16(duty)\n time.sleep(duration)\n buzzer.duty_u16(0)\n\n\ndef start_sequence():\n play_note(a_5, duration=0.5)\n time.sleep(0.5)\n play_note(a_5, duration=0.5)\n time.sleep(0.5)\n play_note(a_5)\n time.sleep(0.5)\n play_note(a_6)\n\n\nprint('Ready!')\nstart_sequence()\nprint('Go!')\n\nstart = time.time()\nstate = state_beam_unbroken\ncounter = 0\ngoal = 230\ngoal_perc = 0\n\nseconds_elapsed = 0\nwhile seconds_elapsed < 31:\n time.sleep(0.001)\n \n goal_perc = counter / goal\n if goal_perc < 1/3:\n set_all(all_leds, 0)\n elif goal_perc < 2/3:\n set_all([red], 1)\n set_all([amber, green], 0)\n elif goal_perc < 1:\n set_all([red, amber], 1)\n set_all([green], 0)\n else:\n set_all(all_leds, 1)\n \n if state == state_beam_unbroken:\n if beam.value() == 0:\n print('Beam broken')\n state = state_beam_broken\n counter += 1\n elif state == state_beam_broken:\n if beam.value() == 1:\n print('Beam unbroken')\n state = state_beam_unbroken\n \n seconds_elapsed = time.time() - start\n \nprint(f'Finished with {counter} taps.')\nfor _ in range(5):\n blink(all_leds)\n","repo_name":"WalternativE/maker_advent_calendar","sub_path":"twelve_days_of_pi/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"54853317","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nimport time\n\n\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 800\n\nscreen = Screen()\nscreen.bgcolor('black')\nscreen.setup(width=SCREEN_WIDTH, height=SCREEN_HEIGHT)\nscreen.tracer(0)\n\n\nright_paddle = Paddle((480, 0))\nleft_paddle = Paddle((-488, 0))\nball = Ball()\nscoreboard = Scoreboard()\n\n\nscreen.onkey(right_paddle.up, \"Up\")\nscreen.onkey(right_paddle.down, \"Down\")\nscreen.onkey(left_paddle.up, \"w\")\nscreen.onkey(left_paddle.down, \"s\")\n\nscreen.listen()\n\nwhile True:\n screen.update()\n ball.move()\n time.sleep(ball.ball_speed)\n # time.sleep(1)\n\n if ball.ycor() > 380 or ball.ycor() < -375:\n ball.bounce_wall()\n\n if (ball.xcor() > 455 and ball.distance(right_paddle) < 84) or (ball.xcor() < -463 and ball.distance(left_paddle) < 84):\n ball.bounce_paddle()\n\n if ball.xcor() > 465:\n ball.reset_ball_pos()\n scoreboard.increase_lscore()\n\n if ball.xcor() < -473:\n ball.reset_ball_pos()\n scoreboard.increase_rscore()\n\nscreen.exitonclick()\n","repo_name":"not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022","sub_path":"Day 22 - Intermediate - Build Pong_ The Famous Arcade Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15879910869","text":"import sys\nimport os\nimport subprocess\n\nSOURCE_FILE = sys.argv[-2]\nSOURCE_ROW = sys.argv[-1]\n\nBASE_URL = \"https://developer.blender.org/diffusion/B/browse\"\n\n\ndef main():\n dirname, _filename = os.path.split(SOURCE_FILE)\n\n process = subprocess.Popen(\n [\"git\", \"rev-parse\", \"--symbolic-full-name\", \"--abbrev-ref\",\n \"@{u}\"], stdout=subprocess.PIPE, cwd=dirname, universal_newlines=True)\n output = process.communicate()[0]\n branchname = output.rstrip().rsplit('/', 1)[-1]\n\n process = subprocess.Popen(\n [\"git\", \"rev-parse\", \"--show-toplevel\"],\n stdout=subprocess.PIPE, cwd=dirname, universal_newlines=True)\n output = process.communicate()[0]\n toplevel = output.rstrip()\n filepath = os.path.relpath(SOURCE_FILE, toplevel)\n\n url = '/'.join([BASE_URL, branchname, filepath]) + \"$\" + SOURCE_ROW\n\n print(url)\n\n # Maybe handy, but also annoying?\n if \"--browse\" in sys.argv:\n import webbrowser\n webbrowser.open(url)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blender/blender","sub_path":"tools/utils_ide/qtcreator/externaltools/qtc_blender_diffusion.py","file_name":"qtc_blender_diffusion.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"41572563491","text":"# assets\nFILE = 'file'\nDIRECTORY = 'directory'\n\n# operators\nSCANNER = 'scanner'\n\n# operations\nSCAN = 'scan'\nHSCAN = 'high.level.scan'\nDEEP = 'deep.scan'\nUSCAN = 'update.scan'\n\nCLEAN = 'clean'\nREAD = 'read'\nMATCH = 'match'\nANALYZE = 'analyze'\nFIX = 'fix'\nREPORT = 'report'\nREQUESTS = 'requests'\nSHUTDOWN = 'shutdown'\nSTARTUP = 'startup'\nSYNC = 'sync'\nCALC = 'calc'\nSYNC = 'sync'\nSLEEP = 'SLEEP'\n\nCOMMAND = 'COMMAND'\n\n# reader constants\n\nMAX_DATA_LENGTH = 512\nKNOWN = 'known_fields'\nMETADATA = 'file_attribute'\n\n# states\nINITIAL = \"initial\"\nTERMINAL = 'terminal'\n\nSCAN_DISCOVER = \"discover\"\nSCAN_UPDATE = 'update'\nSCAN_MONITOR = 'monitor'\n","repo_name":"markpippins/mildred","sub_path":"python/server/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39654402559","text":"import unittest\n\nfrom amusement.show import Show\n\nclass ShowTestCase(unittest.TestCase):\n \n def test_showtimeKey(self):\n show_obj = Show()\n self.assertEqual('showtimes' in show_obj.attributes(), True)\n\n def test_addTime(self):\n show_obj = Show()\n show_obj.addTime('12:00 PM')\n self.assertEqual(len(show_obj.getTimes()), 1)\n\n","repo_name":"rambleraptor/amusement","sub_path":"amusement/test/generic/test_show.py","file_name":"test_show.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"6610897105","text":"from pathlib import Path\n\nday_num = 1\ninput_file = Path.cwd() / '2022' / 'inputs' / f\"day{day_num}.txt\"\n\nwith open(input_file) as f:\n elves = f.read().split('\\n\\n')\n elf_cals = []\n for elf in elves:\n elf_cals.append(sum([int(item) for item in elf.split('\\n')]))\n\n\ndef part_1():\n return max(elf_cals)\n\n\ndef part_2():\n elf_cals.sort(reverse=True)\n return sum(elf_cals[:3])\n\n\nif __name__ == '__main__':\n print(part_1())\n print(part_2())\n","repo_name":"lcirvine/advent_of_code","sub_path":"2022/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72740828354","text":"import season\nimport json\nimport datetime\n\ndef spawner(code, namespace, logger, **kwargs):\n fn = {'__file__': namespace, '__name__': namespace, 'print': logger, 'season': season}\n for key in kwargs: fn[key] = kwargs[key]\n exec(compile(code, namespace, 'exec'), fn)\n return fn\n\nclass Controller(wiz.controller(\"base\")):\n def __startup__(self, wiz):\n super().__startup__(wiz)\n print(wiz.request.segment)\n segment = wiz.request.segment\n self.api(segment.app_unique_id, segment.app_component, segment.fnname)\n wiz.response.json(wiz.request.segment)\n \n def logger(self, tag=None, log_color=94):\n class logger:\n def __init__(self, tag, log_color, wiz):\n self.tag = tag\n self.log_color = log_color\n self.wiz = wiz\n\n def log(self, *args):\n tag = self.tag\n log_color = self.log_color\n wiz = self.wiz\n \n if tag is None: tag = \"undefined\"\n tag = \"[wiz]\" + tag\n \n args = list(args)\n for i in range(len(args)): \n args[i] = str(args[i])\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n logdata = f\"\\033[{log_color}m[{timestamp}]{tag}\\033[0m \" + \" \".join(args)\n print(logdata)\n\n # wiz.socketio.emit(\"debug\", logdata + \"\\n\", namespace=\"/wiz\", broadcast=True)\n return logger(tag, log_color, self).log\n\n def api(self, app_unique_id, app_component, fnname):\n data = wiz.model(\"react/storage\").use(app_unique_id)\n filename = f\"{app_component}.py\"\n if data.isfile(filename) == False:\n wiz.response.status(404)\n\n api = data.read.text(filename)\n\n logger = self.logger(f\"[wiz-react][api][{app_component}]\", 93)\n apifn = spawner(api, 'season.wiz.app.api', logger, wiz=wiz)\n \n if fnname not in apifn:\n wiz.response.status(404)\n \n apifn[fnname](wiz)\n","repo_name":"season-framework/wiz-react","sub_path":"interfaces/controller/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18508194139","text":"# Create your views here.\nfrom rest_framework import mixins, renderers, parsers, permissions, status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom apps.prescriptions.models import Prescriptions\nfrom apps.prescriptions.serializers import (\n PrecriptionsSerializers,\n ListPrecriptionsSerializers)\nfrom apps.prescriptions.services import RequestEndPoint\n\nrequest_endpoint = RequestEndPoint()\n\nclass PostUserPrescriptionsViewSet(mixins.CreateModelMixin, GenericViewSet):\n queryset = Prescriptions.objects.all()\n serializer_class = PrecriptionsSerializers\n renderer_classes = [renderers.StaticHTMLRenderer, renderers.JSONRenderer]\n parser_classes = (parsers.MultiPartParser, parsers.JSONParser,)\n permission_classes = (permissions.AllowAny,)\n\n def create(self, request, *args, **kwargs):\n if request_endpoint.patients(id=request.data['patient']['id']) == \"Not found\":\n not_found = dict(\n error=dict(\n message=\"patient not found\",\n code=\"03\"\n )\n\n )\n return Response(not_found, status.HTTP_404_NOT_FOUND)\n\n if request_endpoint.physicians(id=request.data['physician']['id']) == \"Not found\":\n not_found = dict(\n error=dict(\n message=\"physician not found\",\n code=\"02\"\n )\n\n )\n return Response(not_found, status.HTTP_404_NOT_FOUND)\n\n data = dict(\n clinic=request.data['clinic']['id'],\n physician=request.data['physician']['id'],\n patient=request.data['patient']['id'],\n text=request.data['text'],\n )\n\n _prescription = self.queryset.create(**data)\n\n return Response(request.data, status.HTTP_201_CREATED)\n\n\nclass ListUserPrescriptionsViewSet(mixins.ListModelMixin, GenericViewSet):\n queryset = Prescriptions.objects.all()\n serializer_class = ListPrecriptionsSerializers\n renderer_classes = [renderers.StaticHTMLRenderer, renderers.JSONRenderer]\n parser_classes = (parsers.MultiPartParser, parsers.JSONParser,)\n permission_classes = (permissions.AllowAny,)\n","repo_name":"andreemidio/iclinic","sub_path":"apps/prescriptions/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"21463710587","text":"from django.db.models import Q, F\n\nfrom .aircraft_mod_models import AircraftBucket, SortieAugmentation, AircraftKillboard\nfrom .variant_utils import has_juiced_variant, has_bomb_variant, get_sortie_type\nfrom .ammo_file_manager import write_breakdown_line, OFFENSIVE_BREAKDOWN, DEFENSIVE_BREAKDOWN\nfrom .apps import IGNORE_AI_KILLS_STREAKS\n\nfrom stats.models import Sortie, LogEntry, Player, Object\nfrom stats.logger import logger\n\n\ndef process_aircraft_stats(sortie, player=None, is_retro_compute=False):\n \"\"\"\n Takes a Sortie, and increments the corresponding data in AircraftBucket.\n\n Note that there might be several aircraft buckets.\n\n Passing sortie.player into player will update the AircraftBuckets with player.\n \"\"\"\n if not sortie.aircraft.cls_base == \"aircraft\":\n return\n\n bucket = (AircraftBucket.objects.get_or_create(tour=sortie.tour, aircraft=sortie.aircraft,\n filter_type='NO_FILTER', player=player))[0]\n\n has_subtype = has_juiced_variant(bucket.aircraft) or has_bomb_variant(bucket.aircraft)\n\n process_bucket(bucket, sortie, has_subtype, False, is_retro_compute)\n\n if has_subtype:\n filtered_bucket = (AircraftBucket.objects.get_or_create(tour=sortie.tour, aircraft=sortie.aircraft,\n filter_type=get_sortie_type(sortie), player=player))[0]\n process_bucket(filtered_bucket, sortie, True, True, is_retro_compute)\n\n\ndef process_bucket(bucket, sortie, has_subtype, is_subtype, is_retro_compute):\n if not sortie.is_not_takeoff:\n bucket.total_sorties += 1\n bucket.total_flight_time += sortie.flight_time\n bucket.kills += sortie.ak_total\n bucket.ground_kills += sortie.gk_total\n bucket.assists += sortie.ak_assist\n bucket.aircraft_lost += 1 if sortie.is_lost_aircraft else 0\n bucket.score += sortie.score\n bucket.deaths += 1 if sortie.is_dead else 0\n bucket.captures += 1 if sortie.is_captured and not sortie.is_dead else 0\n bucket.bailouts += 1 if sortie.is_bailout else 0\n bucket.ditches += 1 if sortie.is_ditched else 0\n bucket.landings += 1 if sortie.is_landed else 0\n bucket.in_flight += 1 if sortie.is_in_flight else 0\n bucket.crashes += 1 if sortie.is_crashed else 0\n bucket.shotdown += 1 if sortie.is_shotdown else 0\n bucket.coalition = sortie.coalition\n increment_ammo(bucket, sortie)\n if sortie.damage:\n bucket.sorties_plane_was_hit += 1\n bucket.plane_survivability_counter += 1 if not sortie.is_lost_aircraft else 0\n bucket.pilot_survivability_counter += 1 if not sortie.is_relive else 0\n for key in sortie.killboard_pvp:\n value = sortie.killboard_pvp[key]\n if key in bucket.killboard_planes:\n bucket.killboard_planes[key] += value\n else:\n bucket.killboard_planes[key] = value\n for key in sortie.killboard_pve:\n value = sortie.killboard_pve[key]\n if key in bucket.killboard_ground:\n bucket.killboard_ground[key] += value\n else:\n bucket.killboard_ground[key] = value\n\n from .background_jobs.run_background_jobs import retro_streak_compute_running\n if bucket.player is not None and ((not retro_streak_compute_running()) or is_retro_compute):\n process_streaks_and_best_sorties(bucket, sortie)\n\n process_log_entries(bucket, sortie, has_subtype, is_subtype)\n\n sortie_augmentation = (SortieAugmentation.objects.get_or_create(sortie=sortie))[0]\n if not bucket.player:\n sortie_augmentation.sortie_stats_processed = True\n else:\n sortie_augmentation.player_stats_processed = True\n sortie_augmentation.fixed_aa_accident_stats = True\n sortie_augmentation.fixed_doubled_turret_killboards = True\n sortie_augmentation.added_player_kb_losses = True\n sortie_augmentation.fixed_accuracy = True\n sortie_augmentation.recomputed_ammo_breakdown = True\n sortie_augmentation.recomputed_ammo_breakdown_2 = True\n sortie_augmentation.fixed_captures = True\n\n sortie_augmentation.save()\n\n\ndef increment_ammo(bucket, sortie):\n if sortie.is_bailout:\n return # Bug work around. Bailout results in all ammo being used according to logs.\n\n takeoff_count = LogEntry.objects.filter(\n act_sortie_id=sortie.id,\n type='takeoff'\n ).count()\n if takeoff_count > 1:\n return # Bug work around. Rearming (and as such taking off twice) resets ammo used according to logs.\n\n if sortie.ammo['used_cartridges']:\n bucket.ammo_shot += sortie.ammo['used_cartridges']\n if sortie.ammo['hit_bullets']:\n bucket.ammo_hit += sortie.ammo['hit_bullets']\n if sortie.ammo['used_bombs']:\n bucket.bomb_rocket_shot += sortie.ammo['used_bombs']\n if sortie.ammo['hit_bombs']:\n bucket.bomb_rocket_hit += sortie.ammo['hit_bombs']\n if sortie.ammo['used_rockets']:\n bucket.bomb_rocket_shot += sortie.ammo['used_rockets']\n if sortie.ammo['hit_rockets']:\n bucket.bomb_rocket_hit += sortie.ammo['hit_rockets']\n\n\ndef decrement_ammo_bugged(bucket, sortie):\n \"\"\"\n For retroactive fixing, this reverses increment_ammo on a given sortie.\n \"\"\"\n takeoff_count = LogEntry.objects.filter(\n act_sortie_id=sortie.id,\n type='takeoff'\n ).count()\n\n if takeoff_count <= 1 and not sortie.is_bailout:\n return\n\n if sortie.ammo['used_cartridges']:\n bucket.ammo_shot -= sortie.ammo['used_cartridges']\n if sortie.ammo['hit_bullets']:\n bucket.ammo_hit -= sortie.ammo['hit_bullets']\n if sortie.ammo['used_bombs']:\n bucket.bomb_rocket_shot -= sortie.ammo['used_bombs']\n if sortie.ammo['hit_bombs']:\n bucket.bomb_rocket_hit -= sortie.ammo['hit_bombs']\n if sortie.ammo['used_rockets']:\n bucket.bomb_rocket_shot -= sortie.ammo['used_rockets']\n if sortie.ammo['hit_rockets']:\n bucket.bomb_rocket_hit -= sortie.ammo['hit_rockets']\n\n\ndef process_log_entries(bucket, sortie, has_subtype, is_subtype, stop_update_primary_bucket=False,\n compute_only_pure_killboard_stats=False, do_not_use_pilot_kbs=False):\n events = (LogEntry.objects\n .select_related('act_object', 'act_sortie', 'cact_object', 'cact_sortie')\n .filter(Q(act_sortie_id=sortie.id),\n Q(type='shotdown') | Q(type='killed') | Q(type='damaged'),\n act_object__cls_base='aircraft', cact_object__cls_base='aircraft',\n # Disregard AI sorties\n act_sortie_id__isnull=False, cact_sortie_id__isnull=False, )\n # Disregard friendly fire incidents.\n .exclude(act_sortie__coalition=F('cact_sortie__coalition')))\n\n enemies_damaged = set()\n enemies_shotdown = set()\n enemies_killed = set()\n\n for event in events:\n enemy_sortie = Sortie.objects.filter(id=event.cact_sortie_id).get()\n\n enemy_plane_sortie_pair = (event.cact_object, enemy_sortie)\n if event.type == 'damaged':\n enemies_damaged.add(enemy_plane_sortie_pair)\n elif event.type == 'shotdown':\n enemies_shotdown.add(enemy_plane_sortie_pair)\n elif event.type == 'killed':\n enemies_killed.add(enemy_plane_sortie_pair)\n\n use_pilot_kbs = bucket.player is None\n if do_not_use_pilot_kbs:\n use_pilot_kbs = False\n enemy_buckets, kbs = update_from_entries(bucket, enemies_damaged, enemies_killed, enemies_shotdown,\n has_subtype, is_subtype, use_pilot_kbs,\n update_primary_bucket=not stop_update_primary_bucket)\n\n for killboard in kbs.values():\n killboard.reset_kills_turret_bug = True\n killboard.reset_player_loses = True\n killboard.save()\n for enemy_bucket in enemy_buckets.values():\n enemy_bucket.update_derived_fields()\n enemy_bucket.save()\n\n # LogEntry does not store what your turrets did. Only what turrets hit you.\n # So we parse all turret encounters from the perspective of the turret's plane.\n turret_events = (LogEntry.objects\n .select_related('act_object', 'act_sortie', 'cact_object', 'cact_sortie')\n .filter(Q(cact_sortie_id=sortie.id),\n Q(type='shotdown') | Q(type='killed') | Q(type='damaged'),\n act_object__cls='aircraft_turret', cact_object__cls_base='aircraft',\n # Filter out AI kills from turret.\n cact_sortie_id__isnull=False)\n\n # Disregard friendly fire incidents.\n .exclude(extra_data__is_friendly_fire=True))\n\n enemies_damaged = set()\n enemies_shotdown = set()\n enemies_killed = set()\n\n if not compute_only_pure_killboard_stats:\n process_aa_accident_death(bucket, sortie)\n if 'ammo_breakdown' in sortie.ammo:\n process_ammo_breakdown(bucket, sortie, is_subtype)\n\n if not stop_update_primary_bucket:\n bucket.update_derived_fields()\n bucket.save()\n\n if len(turret_events) > 0 and not is_subtype:\n cache_turret_buckets = dict()\n\n for event in turret_events:\n turret_name = event.act_object.name\n if turret_name not in cache_turret_buckets:\n log_name = event.act_object.log_name\n turret_bucket = turret_to_aircraft_bucket(turret_name, log_name, bucket.tour)\n if turret_bucket is None:\n continue\n cache_turret_buckets[turret_name] = turret_bucket\n if event.type == 'damaged':\n enemies_damaged.add(turret_name)\n elif event.type == 'shotdown':\n enemies_shotdown.add(turret_name)\n elif event.type == 'killed':\n enemies_killed.add(turret_name)\n\n for turret_name in cache_turret_buckets:\n turret_bucket = cache_turret_buckets[turret_name]\n enemy_damaged = set()\n if turret_name in enemies_damaged:\n enemy_damaged.add((bucket.aircraft, sortie))\n enemy_shotdown = set()\n if turret_name in enemies_shotdown:\n enemy_shotdown.add((bucket.aircraft, sortie))\n enemy_killed = set()\n if turret_name in enemies_killed:\n enemy_killed.add((bucket.aircraft, sortie))\n\n update_primary_bucket = bucket.player is None\n if stop_update_primary_bucket:\n update_primary_bucket = False\n use_pilot_kbs = bucket.player is not None\n if do_not_use_pilot_kbs:\n use_pilot_kbs = False\n\n buckets, kbs = update_from_entries(turret_bucket, enemy_damaged, enemy_killed, enemy_shotdown,\n # We can't determine the subtype of the bomber\n # Edge case: Halberstadt. It is turreted and has a jabo variant.\n # This should be fixed somehow in the long run.\n False, False, use_pilot_kbs, update_primary_bucket)\n if not stop_update_primary_bucket:\n turret_bucket.update_derived_fields()\n turret_bucket.save()\n for bucket in buckets.values():\n bucket.update_derived_fields()\n bucket.save()\n\n for kb in kbs.values():\n kb.reset_kills_turret_bug = True\n kb.reset_player_loses = True\n kb.save()\n\n\ndef update_from_entries(bucket, enemies_damaged, enemies_killed, enemies_shotdown, has_subtype, is_subtype,\n use_pilot_kbs, update_primary_bucket):\n cache_kb = dict()\n cache_enemy_buckets_kb = dict() # Helper cache needed in order to find buckets (quickly) inside get_killboard.\n for damaged_enemy in enemies_damaged:\n enemy_sortie = damaged_enemy[1]\n kbs = get_killboards(damaged_enemy, bucket, cache_kb, cache_enemy_buckets_kb, use_pilot_kbs,\n update_primary_bucket)\n for kb in kbs:\n update_damaged_enemy(bucket, damaged_enemy, enemies_killed, enemies_shotdown, enemy_sortie, kb,\n update_primary_bucket)\n\n cache_enemy_buckets = dict()\n for shotdown_enemy in enemies_shotdown:\n enemy_sortie = shotdown_enemy[1]\n enemy_sortie_type = get_sortie_type(enemy_sortie)\n\n subtype_enemy_bucket_key = (bucket.tour, shotdown_enemy[0], enemy_sortie_type)\n subtype_enemy_bucket = ensure_bucket_in_cache(cache_enemy_buckets, subtype_enemy_bucket_key, None)\n if bucket.player is None and update_primary_bucket:\n update_elo(bucket, cache_enemy_buckets, enemy_sortie_type, has_subtype, is_subtype, shotdown_enemy,\n subtype_enemy_bucket)\n\n kbs = get_killboards(shotdown_enemy, bucket, cache_kb, cache_enemy_buckets_kb, use_pilot_kbs,\n update_primary_bucket)\n for kb in kbs:\n if kb.aircraft_1.aircraft == bucket.aircraft:\n kb.aircraft_1_shotdown += 1\n else:\n kb.aircraft_2_shotdown += 1\n for killed_enemy in enemies_killed:\n if update_primary_bucket:\n bucket.pilot_kills += 1\n kbs = get_killboards(killed_enemy, bucket, cache_kb, cache_enemy_buckets_kb, use_pilot_kbs,\n update_primary_bucket)\n for kb in kbs:\n if kb.aircraft_1.aircraft == bucket.aircraft:\n kb.aircraft_1_kills += 1\n else:\n kb.aircraft_2_kills += 1\n return cache_enemy_buckets, cache_kb\n\n\ndef update_elo(bucket, cache_enemy_buckets, enemy_sortie_type, has_subtype, is_subtype, shotdown_enemy,\n subtype_enemy_bucket):\n \"\"\"\n # TODO: Refactor elo functions into new file.\n\n Computes changes to AircraftBucket.elo field. Note that Elo is zero-sum, thus two buckets must always be updated.\n\n There are in essence three cases for the Elo Update:\n\n Aircraft 1 and Aircraft 2 have no subtypes -> Just update elo directly.\n Aircraft 1 and 2 have subtypes: Main types update each other. Subtypes update each other.\n Aircraft 1 has subtypes, Aircraft 2 does not:\n Aircraft 1 main type and subtype updates directly from Aircraft 2 only type\n Aircraft 2 has \"half an encounter\" with aircraft 1 main type, and \"half an encounter\" with aircraft 1 subtype.\n \"\"\"\n if enemy_sortie_type == bucket.NO_FILTER: # No subtypes for enemy\n if not has_subtype:\n bucket.elo, subtype_enemy_bucket.elo = calc_elo(bucket.elo, subtype_enemy_bucket.elo)\n else:\n bucket.elo, new_elo = calc_elo(bucket.elo, subtype_enemy_bucket.elo)\n delta_elo = new_elo - subtype_enemy_bucket.elo # This is negative!\n # This elo will be touched twice: once in subtype, once in not-filtered type.\n # Hence take (approximately) the average delta.\n subtype_enemy_bucket.elo += round(delta_elo / 2)\n else: # Enemy has subtypes\n enemy_bucket_key = (bucket.tour, shotdown_enemy[0], bucket.NO_FILTER)\n enemy_bucket = ensure_bucket_in_cache(cache_enemy_buckets, enemy_bucket_key, None)\n\n if has_subtype:\n if is_subtype:\n bucket.elo, subtype_enemy_bucket.elo = calc_elo(bucket.elo, enemy_bucket.elo)\n else:\n bucket.elo, enemy_bucket.elo = calc_elo(bucket.elo, enemy_bucket.elo)\n else:\n first_new_elo, enemy_bucket.elo = calc_elo(bucket.elo, enemy_bucket.elo)\n second_new_elo, subtype_enemy_bucket.elo = calc_elo(bucket.elo, subtype_enemy_bucket.elo)\n\n first_delta_elo = first_new_elo - bucket.elo\n second_delta_elo = second_new_elo - bucket.elo\n bucket.elo = round(bucket.elo + first_delta_elo / 2 + second_delta_elo / 2)\n\n\ndef ensure_bucket_in_cache(cache_enemy_buckets, bucket_key, player):\n if bucket_key not in cache_enemy_buckets:\n cache_enemy_buckets[bucket_key] = (AircraftBucket.objects.get_or_create(\n tour=bucket_key[0], aircraft=bucket_key[1], filter_type=bucket_key[2], player=player))[0]\n\n return cache_enemy_buckets[bucket_key]\n\n\ndef update_damaged_enemy(bucket, damaged_enemy, enemies_killed, enemies_shotdown, enemy_sortie, kb,\n update_primary_bucket):\n if kb.aircraft_1.aircraft == bucket.aircraft:\n kb.aircraft_1_distinct_hits += 1\n if update_primary_bucket:\n bucket.distinct_enemies_hit += 1\n if enemy_sortie.is_shotdown:\n if update_primary_bucket:\n bucket.plane_lethality_counter += 1\n if damaged_enemy not in enemies_shotdown:\n kb.aircraft_1_assists += 1\n\n if enemy_sortie.is_dead:\n if update_primary_bucket:\n bucket.pilot_lethality_counter += 1\n if damaged_enemy not in enemies_killed:\n kb.aircraft_1_pk_assists += 1\n else:\n kb.aircraft_2_distinct_hits += 1\n if update_primary_bucket:\n bucket.distinct_enemies_hit += 1\n if enemy_sortie.is_shotdown:\n bucket.plane_lethality_counter += 1\n if damaged_enemy not in enemies_shotdown:\n kb.aircraft_2_assists += 1\n\n if enemy_sortie.is_dead:\n if update_primary_bucket:\n bucket.pilot_lethality_counter += 1\n if damaged_enemy not in enemies_killed:\n kb.aircraft_2_pk_assists += 1\n\n\ndef process_aa_accident_death(bucket, sortie):\n if not sortie.is_lost_aircraft:\n return\n\n types_damaged = list((LogEntry.objects\n .values_list('act_object__cls', flat=True)\n .filter(Q(type='shotdown') | Q(type='killed') | Q(type='destroyed'), cact_sortie=sortie)\n .order_by().distinct()))\n\n if len(types_damaged) == 0 or (len(types_damaged) == 1 and types_damaged[0] is None):\n bucket.aircraft_lost_to_accident += 1\n bucket.deaths_to_accident += 1 if sortie.is_relive else 0\n else:\n only_aa = True\n for type_damaged in types_damaged:\n if type_damaged and 'aa' not in type_damaged:\n only_aa = False\n\n if only_aa:\n bucket.aircraft_lost_to_aa += 1\n bucket.deaths_to_aa += 1 if sortie.is_relive else 0\n\n\ndef process_ammo_breakdown(bucket, sortie, is_subtype):\n # We only care about statistics like \"avg shots to kill\" or \"avg shots till our plane lost\".\n if not sortie.is_lost_aircraft:\n return\n\n # We only process Sorties where there was essentially a single source of damage.\n # Note: Planes also take damage when crashing into the ground. We ignore these sources of damage.\n # So to be more precise, we only want damage from exactly a single enemy aircraft/AA/Tank/object type.\n # I.e. \"Only took damage from a Spitfire Mk IX\" or \"Only took damage from a Flak 88\".\n if not sortie.ammo['ammo_breakdown']['dmg_from_one_source']:\n return\n\n enemy_objects = (LogEntry.objects\n .values_list('act_object', 'act_sortie')\n .filter(Q(cact_sortie_id=sortie.id),\n Q(type='shotdown') | Q(type='killed') | Q(type='damaged'),\n Q(act_object__cls_base='aircraft') | Q(act_object__cls_base='vehicle')\n | Q(act_object__cls__contains='tank') | Q(act_object__cls_base='turret'),\n # Disregard Sorties flown by AI\n cact_sortie_id__isnull=False)\n # Disregard sorties shotdown by AI plane.\n .exclude(Q(act_object__cls_base='aircraft') & Q(act_sortie_id__isnull=True))\n .order_by().distinct())\n\n if enemy_objects.count() != 1:\n if enemy_objects.count() > 1 and sortie.ammo['ammo_breakdown']['last_turret_account'] is not None:\n # We've been hit by a turret!\n # Check if we've been hit by multiple turrets of the same plane.\n # If so, continue - otherwise there is a bug in the sortie log where we throw out the data.\n # I.e. we got hit by an aircraft turret and the MGs of another plane (the MGs didn't cause any dmg)\n aircraft_hit_us = set()\n for enemy_object in enemy_objects:\n db_enemy_object = Object.objects.get(id=enemy_object[0])\n if db_enemy_object.cls != 'aircraft_turret':\n return\n aircraft = turret_to_aircraft_bucket(db_enemy_object.name, db_enemy_object.log_name, tour=bucket.tour)\n if aircraft is None:\n return\n aircraft_hit_us.add(aircraft.id)\n if len(aircraft_hit_us) != 1:\n return\n\n else:\n return\n # Something went wrong here. This is likely due to errors in the sortie logs.\n # I.e. \"Damage\" and \"Hits\" ATypes tell a different story.\n # According to \"Hits\", there should be one source of damage, according to \"Damage\" that isn't the case.\n # (Likely) Because the hits were so minor that they didn't register as damage.\n # Just in case, we're still throwing the data out, there will be more than enough left over.\n ammo_breakdown = sortie.ammo['ammo_breakdown']\n\n # TODO: At some point make this less hacky. Possibly derive other ammo from aircraft payload?\n # This is a totally band-aid solution.\n # The Tempest and spitfire destroys targets often in a single gun cycle, so the other Hispano ammo doesn't show up\n # in the ammo breakdown. This pollutes the data, especially since it happens often.\n # So we manually add in the missing ammo type (HE or AP).\n fill_in_ammo(ammo_breakdown, 'SHELL_ENG_20x110_AP', 'SHELL_ENG_20x110_HE')\n # Same problem as above, but for MG 151/20 and MG 151/15.\n fill_in_ammo(ammo_breakdown, 'SHELL_GER_20x82_AP', 'SHELL_GER_20x82_HE')\n fill_in_ammo(ammo_breakdown, 'SHELL_GER_15x96_AP', 'SHELL_GER_15x96_HE')\n\n # For ShVAKs: We keep it as is, since LA-5(FN) has mono-ammo belts.\n # So even if another plane has a fluke like this, it does same damage as when shot by LA-5 anyways.\n\n enemy_object = enemy_objects[0][0]\n enemy_sortie = enemy_objects[0][1]\n db_enemy_object = Object.objects.get(id=enemy_object)\n pilot_snipe = is_pilot_snipe(sortie)\n\n bucket.increment_ammo_received(ammo_breakdown['total_received'], pilot_snipe)\n if not bucket.player:\n write_breakdown_line(bucket, ammo_breakdown['total_received'], DEFENSIVE_BREAKDOWN, db_enemy_object,\n pilot_snipe)\n\n if is_subtype:\n # Updates for enemy aircraft were done in main type.\n return\n\n if db_enemy_object.cls_base != 'aircraft' and db_enemy_object.cls != 'aircraft_turret':\n return\n if db_enemy_object.cls_base == 'aircraft' and not enemy_sortie:\n return\n\n base_bucket, db_sortie, filtered_bucket = ammo_breakdown_enemy_bucket(ammo_breakdown, bucket, db_enemy_object,\n enemy_sortie)\n\n if base_bucket is not None:\n base_bucket.increment_ammo_given(ammo_breakdown['total_received'], pilot_snipe)\n if not base_bucket.player:\n write_breakdown_line(base_bucket, ammo_breakdown['total_received'], OFFENSIVE_BREAKDOWN, bucket.aircraft,\n pilot_snipe)\n base_bucket.save()\n if filtered_bucket is not None:\n filtered_bucket.increment_ammo_given(ammo_breakdown['total_received'], pilot_snipe)\n if not filtered_bucket.player:\n write_breakdown_line(filtered_bucket, ammo_breakdown['total_received'], OFFENSIVE_BREAKDOWN,\n bucket.aircraft, pilot_snipe)\n filtered_bucket.save()\n\n\ndef fill_in_ammo(ammo_breakdown, ap_ammo, he_ammo):\n if (ap_ammo not in ammo_breakdown['total_received']\n and he_ammo in ammo_breakdown['total_received']):\n ammo_breakdown['total_received'][ap_ammo] = 0\n if (he_ammo not in ammo_breakdown['total_received']\n and ap_ammo in ammo_breakdown['total_received']):\n ammo_breakdown['total_received'][he_ammo] = 0\n\n\ndef is_pilot_snipe(sortie):\n \"\"\"\n A pilot snipe is when a plane goes down because the pilot gets killed, and not because the aircraft is crtically\n damaged. Currently, in the logs, a pilot snipe looks rather similar to a normal death. Even in a pilot snipe,\n the logs think the aircraft gets shotdown before the pilot dies - i.e. it emits \"plane shotdown\" before \"pilot dead\"\n\n Instead the logs sees to relay the information that it was a pilot snipe by \"damage to the pilot\". I.e. a pilot\n snipe has \"damage to pilot X by plane Y\" events, whereas a death due to a not pilot snipe has \"damgage to pilot X\n without a plane\" events.\n\n So, to check for pilot snipe we check:\n\n 1. The pilot must have died to a player/AI object.\n 2. That the death didn't happen much later than the shotdown, otherwise it could've been someone strafing a plane\n which was already dead.\n 3. That the shotdown didn't happen much later than the last damage to pilot event, otherwise it could be as above.\n 4. That there was sufficent damage to the pilot from enemy planes to cause a death to the pilot.\n\n If all 4 conditions are satisified, then it's a pilot snipe.\n \"\"\"\n death_event = (LogEntry.objects\n .filter(Q(cact_sortie_id=sortie.id),\n Q(type='killed'), act_object_id__isnull=False))\n\n shotdown_event = (LogEntry.objects\n .filter(Q(cact_sortie_id=sortie.id),\n Q(type='killed'), act_object_id__isnull=False))\n\n wound_events = (LogEntry.objects\n .filter(Q(cact_sortie_id=sortie.id),\n Q(type='wounded'), act_object_id__isnull=False)\n .order_by('-tik'))\n\n if not death_event.exists() or not shotdown_event.exists() or not wound_events.exists():\n # Condition 1 in function description.\n return False\n\n death_event = death_event[0]\n shotdown_event = shotdown_event[0]\n\n if death_event.tik - shotdown_event.tik > 20:\n # Condition 2 in function description\n # Threshold is 20 tiks = 0.4 seconds.\n return False\n\n if wound_events[0].tik - shotdown_event.tik > 20:\n # Condition 3 in function description.\n # Threshold is 20 tiks = 0.4 seconds.\n return False\n\n wound_damage = 0\n for wound_event in wound_events:\n if type(wound_event.extra_data['damage']) is dict:\n wound_damage += wound_event.extra_data['damage']['pct']\n else:\n wound_damage += wound_event.extra_data['damage']\n\n return wound_damage > 0.95 # Condition 4 in function description. At least 95% damage threshold.\n\n\ndef ammo_breakdown_enemy_bucket(ammo_breakdown, bucket, db_object, enemy_sortie):\n \"\"\"\n This finds the bucket which damaged our plane for ammo breakdown purposes.\n\n There are two main cases: We've been damaged by the main guns of an aircraft, and we've been damaged by the turret.\n Unfortunately, it is impossible to find the Sortie corresponding to the aircraft turret, which results in divergent\n logic.\n\n @param ammo_breakdown The ammo breakdown of our sortie.\n @param bucket Our bucket, i.e. the plane which got damaged.\n @param db_object The object which did the damaging. May be an aircraft or turret.\n @param enemy_sortie None if turret (can't know sortie), otherwise the id of Sortie of the plane which damaged our plane.\n\n @return base_bucket: Enemy bucket who did the damaging,\n db_sortie: Sortie corresponding to input enemy_sortie or None if not passed.\n filtered_bucket: Enemy subbucket which did the damaging, e.g. \"With bombs\" if jabo flight.\n \"\"\"\n if db_object.cls_base == 'aircraft':\n db_sortie = Sortie.objects.get(id=enemy_sortie)\n if bucket.player: # We only want to update the enemy player bucket and the enemy generic bucket once each.\n base_bucket = AircraftBucket.objects.get_or_create(\n tour=db_sortie.tour, aircraft=db_object, filter_type='NO_FILTER', player=db_sortie.player)[0]\n else:\n base_bucket = AircraftBucket.objects.get_or_create(\n tour=db_sortie.tour, aircraft=db_object, filter_type='NO_FILTER', player=None)[0]\n\n filter_type = get_sortie_type(db_sortie)\n if filter_type != 'NO_FILTER' and db_sortie.player:\n if bucket.player: # We only want to update the enemy player bucket and the enemy generic bucket once each.\n filtered_bucket = AircraftBucket.objects.get_or_create(\n tour=bucket.tour, aircraft=db_object, filter_type=filter_type, player=db_sortie.player)[0]\n else:\n filtered_bucket = AircraftBucket.objects.get_or_create(\n tour=bucket.tour, aircraft=db_object, filter_type=filter_type, player=None)[0]\n else:\n filtered_bucket = None\n\n else: # Turret\n db_sortie = None\n # Note that we can't update filtered Halberstadt (turreted plane with jabo type)\n # here since we don't know which subtype it is.\n filtered_bucket = None\n if bucket.player: # We only want to update the enemy player bucket and the enemy generic bucket once each.\n if 'last_turret_account' in ammo_breakdown:\n try:\n enemy_player = Player.objects.filter(\n profile__uuid=ammo_breakdown['last_turret_account'],\n tour=bucket.tour,\n type='pilot'\n ).get()\n base_bucket = turret_to_aircraft_bucket(db_object.name, db_object.log_name, tour=bucket.tour,\n player=enemy_player)\n except Player.DoesNotExist:\n base_bucket = None\n else:\n base_bucket = None\n else:\n base_bucket = turret_to_aircraft_bucket(db_object.name, db_object.log_name, tour=bucket.tour)\n return base_bucket, db_sortie, filtered_bucket\n\n\ndef process_streaks_and_best_sorties(bucket, sortie):\n \"\"\"\n Updates fields like max_score_streak, current_ak_streak, and best_score_in_sortie.\n\n This method may update the passed bucket, but also the bucket without player.\n\n @param bucket Player bucket associated to sortie. This is one of the buckets that may be updated.\n @param sortie Sortie which is being processed now.\n \"\"\"\n\n bucket.current_score_streak += sortie.score\n bucket.current_ak_streak += sortie.ak_total\n if IGNORE_AI_KILLS_STREAKS:\n killboard = sortie.killboard_pve\n aircraft_types = ['aircraft_light', 'aircraft_medium', 'aircraft_heavy', 'aircraft_transport']\n for aircraft_type in aircraft_types:\n bucket.current_ak_streak -= killboard[aircraft_type] if aircraft_type in killboard else 0\n bucket.current_gk_streak += sortie.gk_total\n\n bucket.max_score_streak = max(bucket.max_score_streak, bucket.current_score_streak)\n bucket.max_ak_streak = max(bucket.max_ak_streak, bucket.current_ak_streak)\n bucket.max_gk_streak = max(bucket.max_gk_streak, bucket.current_gk_streak)\n\n if sortie.score > bucket.best_score_in_sortie:\n bucket.best_score_in_sortie = sortie.score\n bucket.best_score_sortie = sortie\n if sortie.ak_total > bucket.best_ak_in_sortie:\n bucket.best_ak_in_sortie = sortie.ak_total\n bucket.best_ak_sortie = sortie\n if sortie.gk_total > bucket.best_gk_in_sortie:\n bucket.best_gk_in_sortie = sortie.gk_total\n bucket.best_gk_sortie = sortie\n\n if sortie.is_relive:\n bucket.current_score_streak = 0\n bucket.current_ak_streak = 0\n bucket.current_gk_streak = 0\n\n not_player_bucket = AircraftBucket.objects.filter(\n tour=sortie.tour,\n aircraft=sortie.aircraft,\n filter_type=bucket.filter_type,\n player=None,\n ).get_or_create()[0]\n\n if bucket.max_score_streak > not_player_bucket.max_score_streak:\n not_player_bucket.max_score_streak = bucket.max_score_streak\n not_player_bucket.max_score_streak_player = sortie.player\n if bucket.max_ak_streak > not_player_bucket.max_ak_streak:\n not_player_bucket.max_ak_streak = bucket.max_ak_streak\n not_player_bucket.max_ak_streak_player = sortie.player\n if bucket.max_gk_streak > not_player_bucket.max_gk_streak:\n not_player_bucket.max_gk_streak = bucket.max_gk_streak\n not_player_bucket.max_gk_streak_player = sortie.player\n\n if sortie.score > not_player_bucket.best_score_in_sortie:\n not_player_bucket.best_score_in_sortie = sortie.score\n not_player_bucket.best_score_sortie = sortie\n if sortie.ak_total > not_player_bucket.best_ak_in_sortie:\n not_player_bucket.best_ak_in_sortie = sortie.ak_total\n not_player_bucket.best_ak_sortie = sortie\n if sortie.gk_total > not_player_bucket.best_gk_in_sortie:\n not_player_bucket.best_gk_in_sortie = sortie.gk_total\n not_player_bucket.best_gk_sortie = sortie\n\n sortie.SortieAugmentation_MOD_STATS_BY_AIRCRAFT.computed_max_streaks = True\n sortie.SortieAugmentation_MOD_STATS_BY_AIRCRAFT.save()\n\n not_player_bucket.save()\n\n\ndef get_killboards(enemy, bucket, cache_kb, cache_enemy_buckets_kb, use_pilot_kbs, update_primary_bucket):\n (enemy_aircraft, enemy_sortie) = enemy\n\n enemy_bucket_keys = set()\n if update_primary_bucket:\n # This is the case where we are updating only a bucket with a Player, from the perspective of the turret.\n # The values of the turret's aircraft bucket should not change, only the damaged Player's bucket.\n enemy_bucket_keys.add((bucket.tour, enemy_aircraft, bucket.NO_FILTER, None))\n if use_pilot_kbs:\n # This happens when the bucket doesn't have a player, in this case we record \"Player in aircraft X got damaged\"\n # Or in the case where we're looking from the perspective of the turret, then this happens in the case\n # that we're currently updating a player bucket, here we record \"player in aircrafct X got damaged by turret\"\n enemy_bucket_keys.add((bucket.tour, enemy_aircraft, bucket.NO_FILTER, enemy_sortie.player))\n if has_bomb_variant(enemy_aircraft) or has_juiced_variant(enemy_aircraft):\n if update_primary_bucket:\n # This is the case where we are updating only a bucket with a Player, from the perspective of the turret.\n # The values of the turret's aircraft bucket should not change, only the damaged Player's bucket.\n enemy_bucket_keys.add((bucket.tour, enemy_aircraft, get_sortie_type(enemy_sortie), None))\n if use_pilot_kbs:\n # This happens when the bucket doesn't have a player, in this case we record \"Player in aircraft X got damaged\"\n # Or in the case where we're looking from the perspective of the turret, then this happens in the case\n # that we're currently updating a player bucket, here we record \"player in aircrafct X got damaged by turret\"\n enemy_bucket_keys.add((bucket.tour, enemy_aircraft, get_sortie_type(enemy_sortie), enemy_sortie.player))\n\n result = []\n for enemy_bucket_key in enemy_bucket_keys:\n if enemy_bucket_key in cache_enemy_buckets_kb:\n enemy_bucket = cache_enemy_buckets_kb[enemy_bucket_key]\n else:\n (enemy_bucket, created) = AircraftBucket.objects.get_or_create(\n tour=enemy_bucket_key[0], aircraft=enemy_bucket_key[1], filter_type=enemy_bucket_key[2],\n player=enemy_bucket_key[3])\n cache_enemy_buckets_kb[enemy_bucket_key] = enemy_bucket\n if created:\n enemy_bucket.update_derived_fields()\n enemy_bucket.save()\n\n if bucket.id < enemy_bucket.id:\n kb_key = (bucket, enemy_bucket)\n else:\n kb_key = (enemy_bucket, bucket)\n\n if kb_key not in cache_kb:\n kb = (AircraftKillboard.objects.get_or_create(aircraft_1=kb_key[0], aircraft_2=kb_key[1],\n tour=bucket.tour))[0]\n cache_kb[kb_key] = kb\n result.append(cache_kb[kb_key])\n\n return result\n\n\ndef calc_elo(winner_rating, loser_rating):\n \"\"\"\n From https://github.com/ddm7018/Elo\n \"\"\"\n k = 15 # Low k factor (in chess ~30 is common), because there will be a lot of engagements.\n # k factor is the largest amount elo can shift. So a plane gains/loses at most k = 15 per engagement.\n result = expected_result(winner_rating, loser_rating)\n new_winner_rating = winner_rating + k * (1 - result)\n new_loser_rating = loser_rating + k * (0 - (1 - result))\n return int(round(new_winner_rating)), int(round(new_loser_rating))\n\n\ndef expected_result(p1, p2):\n exp = (p2 - p1) / 400.0\n return 1 / ((10.0 ** exp) + 1)\n\n\nTURRET_AMBIGUITIES = {\n 'Bristol',\n 'Halberstadt'\n}\n\nTURRET_TO_AIRCRAFT = {\n 'turretbristolf2b_1': 'Bristol F2B (F.II)',\n 'turretbristolf2bf2_1': 'Bristol F2B (F.II)',\n 'turretbristolf2bf2_1_wm2': 'Bristol F2B (F.II)',\n 'turretbristolf2bf2_1m': 'Bristol F2B (F.II)',\n 'turretbristolf2bf3_1': 'Bristol F2B (F.III)',\n 'turretbristolf2bf3_1_wm2': 'Bristol F2B (F.III)',\n 'turretbristolf2bf3_1m': 'Bristol F2B (F.III)',\n 'turrethalberstadtcl2_1': 'Halberstadt CL.II',\n 'turrethalberstadtcl2_1_wm_beckap': 'Halberstadt CL.II',\n 'turrethalberstadtcl2_1_wm_beckhe': 'Halberstadt CL.II',\n 'turrethalberstadtcl2_1_wm_beckheap': 'Halberstadt CL.II',\n 'turrethalberstadtcl2_1_wm_twinpar': 'Halberstadt CL.II',\n 'turrethalberstadtcl2_1m': 'Halberstadt CL.II',\n 'turrethalberstadtcl2_1m2': 'Halberstadt CL.II',\n 'turrethalberstadtcl2au_1': 'Halberstadt CL.II 200hp',\n 'turrethalberstadtcl2au_1_wm_beckap': 'Halberstadt CL.II 200hp',\n 'turrethalberstadtcl2au_1_wm_beckhe': 'Halberstadt CL.II 200hp',\n 'turrethalberstadtcl2au_1_wm_beckheap': 'Halberstadt CL.II 200hp',\n 'turrethalberstadtcl2au_1_wm_twinpar': 'Halberstadt CL.II 200hp',\n 'turrethalberstadtcl2au_1m': 'Halberstadt CL.II 200hp',\n 'turrethalberstadtcl2au_1m2': 'Halberstadt CL.II 200hp',\n}\n\nTYPOS = {\n 'Airco DH4': 'Airco D.H.4',\n 'U-2VS': 'U-2',\n}\n\n\ndef turret_to_aircraft_bucket(turret_name, log_name, tour, player=None):\n aircraft_name = turret_name[:len(turret_name) - 7]\n if aircraft_name in TYPOS:\n aircraft_name = TYPOS[aircraft_name]\n if aircraft_name in TURRET_AMBIGUITIES:\n aircraft_name = TURRET_TO_AIRCRAFT[log_name]\n\n if 'B25' in aircraft_name:\n # It's an AI flight, which isn't (yet) supported.\n return None\n try:\n aircraft = Object.objects.filter(name=aircraft_name).get()\n return (AircraftBucket.objects.get_or_create(tour=tour, aircraft=aircraft, filter_type='NO_FILTER',\n player=player))[0]\n except Object.DoesNotExist:\n logger.warning(\"[mod_stats_by_aircraft] Could not find aircraft for turret \" + turret_name)\n return None\n","repo_name":"FGlazov/IL2Stats_GlobalAircraftStatsMod","sub_path":"src/mod_stats_by_aircraft/aircraft_stats_compute.py","file_name":"aircraft_stats_compute.py","file_ext":"py","file_size_in_byte":39589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38835531523","text":"# Embraer_190\n#\n# Created: Feb 2017, M. Vegh (data taken from Embraer_E190_constThr/mission_Embraer_E190_constThr, and Regional_Jet_Optimization/Vehicles2.py), takeoff_field_length/takeoff_field_length.py, landing_field_length/landing_field_length.py\n# Modified: Mar 2020, M. Clarke\n# May 2020, E. Botero\n# Oct 2021, M. Clarke\n\n\n\"\"\" setup file for the E190 vehicle\n\"\"\"\n\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nimport numpy as np\nimport SUAVE\nfrom SUAVE.Core import Units\nfrom SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing\nfrom SUAVE.Methods.Geometry.Two_Dimensional.Planform import wing_planform, segment_properties\n\nfrom copy import deepcopy\n\n# ----------------------------------------------------------------------\n# Define the Vehicle\n# ----------------------------------------------------------------------\n\ndef vehicle_setup():\n\n # ------------------------------------------------------------------\n # Initialize the Vehicle\n # ------------------------------------------------------------------\n\n vehicle = SUAVE.Vehicle()\n vehicle.tag = 'Embraer_E190AR'\n\n # ------------------------------------------------------------------\n # Vehicle-level Properties\n # ------------------------------------------------------------------\n\n # mass properties (http://www.embraercommercialaviation.com/AircraftPDF/E190_Weights.pdf)\n vehicle.mass_properties.max_takeoff = 51800. # kg\n vehicle.mass_properties.operating_empty = 27837. # kg\n vehicle.mass_properties.takeoff = 51800. # kg\n vehicle.mass_properties.max_zero_fuel = 40900. # kg\n vehicle.mass_properties.max_payload = 13063. # kg\n vehicle.mass_properties.max_fuel = 12971. # kg\n vehicle.mass_properties.cargo = 0.0 # kg\n\n vehicle.mass_properties.center_of_gravity = [[16.8, 0, 1.6]]\n vehicle.mass_properties.moments_of_inertia.tensor = [[10 ** 5, 0, 0],[0, 10 ** 6, 0,],[0,0, 10 ** 7]] \n\n # envelope properties\n vehicle.envelope.ultimate_load = 3.5\n vehicle.envelope.limit_load = 1.5\n\n # basic parameters\n vehicle.reference_area = 92.\n vehicle.passengers = 106\n vehicle.systems.control = \"fully powered\"\n vehicle.systems.accessories = \"medium range\"\n\n\n # ------------------------------------------------------------------\n # Main Wing\n # ------------------------------------------------------------------\n wing = SUAVE.Components.Wings.Main_Wing()\n wing.tag = 'main_wing'\n wing.areas.reference = 92.0\n wing.aspect_ratio = 8.4\n wing.chords.root = 6.2\n wing.chords.tip = 1.44\n wing.sweeps.quarter_chord = 23.0 * Units.deg\n wing.thickness_to_chord = 0.11\n wing.taper = 0.28\n wing.dihedral = 5.00 * Units.deg\n wing.spans.projected = 28.72\n wing.origin = [[13.0,0,-1.50]]\n wing.vertical = False\n wing.symmetric = True \n wing.high_lift = True\n wing.areas.exposed = 0.80 * wing.areas.wetted \n wing.twists.root = 2.0 * Units.degrees\n wing.twists.tip = 0.0 * Units.degrees \n wing.dynamic_pressure_ratio = 1.0\n \n \n segment = SUAVE.Components.Wings.Segment()\n segment.tag = 'root'\n segment.percent_span_location = 0.0\n segment.twist = 4. * Units.deg\n segment.root_chord_percent = 1.\n segment.thickness_to_chord = .11\n segment.dihedral_outboard = 5. * Units.degrees\n segment.sweeps.quarter_chord = 20.6 * Units.degrees\n wing.Segments.append(segment) \n \n segment = SUAVE.Components.Wings.Segment()\n segment.tag = 'yehudi'\n segment.percent_span_location = 0.348\n segment.twist = (4. - segment.percent_span_location*4.) * Units.deg\n segment.root_chord_percent = 0.60\n segment.thickness_to_chord = .11\n segment.dihedral_outboard = 4 * Units.degrees\n segment.sweeps.quarter_chord = 24.1 * Units.degrees\n wing.Segments.append(segment)\n \n segment = SUAVE.Components.Wings.Segment()\n segment.tag = 'section_2'\n segment.percent_span_location = 0.961\n segment.twist = (4. - segment.percent_span_location*4.) * Units.deg\n segment.root_chord_percent = 0.25\n segment.thickness_to_chord = .11\n segment.dihedral_outboard = 70. * Units.degrees\n segment.sweeps.quarter_chord = 50. * Units.degrees\n wing.Segments.append(segment)\n\n segment = SUAVE.Components.Wings.Segment() \n segment.tag = 'Tip'\n segment.percent_span_location = 1.\n segment.twist = (4. - segment.percent_span_location*4.) * Units.deg\n segment.root_chord_percent = 0.070\n segment.thickness_to_chord = .11\n segment.dihedral_outboard = 0.\n segment.sweeps.quarter_chord = 0.\n wing.Segments.append(segment) \n \n # Fill out more segment properties automatically\n wing = segment_properties(wing) \n\n # control surfaces -------------------------------------------\n flap = SUAVE.Components.Wings.Control_Surfaces.Flap() \n flap.tag = 'flap' \n flap.span_fraction_start = 0.11\n flap.span_fraction_end = 0.85\n flap.deflection = 0.0 * Units.deg \n flap.chord_fraction = 0.28 \n flap.configuration_type = 'double_slotted'\n wing.append_control_surface(flap) \n \n slat = SUAVE.Components.Wings.Control_Surfaces.Slat()\n slat.tag = 'slat' \n slat.span_fraction_start = 0.324 \n slat.span_fraction_end = 0.963 \n slat.deflection = 1.0 * Units.deg \n slat.chord_fraction = 0.1 \n wing.append_control_surface(slat) \n \n wing = wing_planform(wing)\n \n wing.areas.exposed = 0.80 * wing.areas.wetted\n wing.twists.root = 2.0 * Units.degrees\n wing.twists.tip = 0.0 * Units.degrees \n wing.dynamic_pressure_ratio = 1.0 \n\n # add to vehicle\n vehicle.append_component(wing)\n \n # ------------------------------------------------------------------\n # Horizontal Stabilizer\n # ------------------------------------------------------------------\n\n wing = SUAVE.Components.Wings.Horizontal_Tail()\n wing.tag = 'horizontal_stabilizer'\n wing.areas.reference = 26.0\n wing.aspect_ratio = 5.5\n wing.sweeps.quarter_chord = 34.5 * Units.deg\n wing.thickness_to_chord = 0.11\n wing.taper = 0.11\n wing.dihedral = 8.4 * Units.degrees\n wing.origin = [[31,0,0.44]]\n wing.vertical = False\n wing.symmetric = True \n wing.high_lift = False \n wing = wing_planform(wing)\n wing.areas.exposed = 0.9 * wing.areas.wetted \n wing.twists.root = 2.0 * Units.degrees\n wing.twists.tip = 2.0 * Units.degrees \n wing.dynamic_pressure_ratio = 0.90\n\n # add to vehicle\n vehicle.append_component(wing)\n\n # ------------------------------------------------------------------\n # Vertical Stabilizer\n # ------------------------------------------------------------------\n\n wing = SUAVE.Components.Wings.Vertical_Tail()\n wing.tag = 'vertical_stabilizer'\n wing.areas.reference = 16.0\n wing.aspect_ratio = 1.7\n wing.sweeps.quarter_chord = 35. * Units.deg\n wing.thickness_to_chord = 0.11\n wing.taper = 0.31\n wing.dihedral = 0.00\n wing.origin = [[30.4,0,1.675]]\n wing.vertical = True\n wing.symmetric = False \n wing.high_lift = False\n wing = wing_planform(wing)\n wing.areas.exposed = 0.9 * wing.areas.wetted\n wing.twists.root = 0.0 * Units.degrees\n wing.twists.tip = 0.0 * Units.degrees \n wing.dynamic_pressure_ratio = 1.00\n \n # add to vehicle\n vehicle.append_component(wing)\n \n # ------------------------------------------------------------------\n # Fuselage\n # ------------------------------------------------------------------\n\n fuselage = SUAVE.Components.Fuselages.Fuselage()\n fuselage.tag = 'fuselage'\n fuselage.origin = [[0,0,0]]\n fuselage.number_coach_seats = vehicle.passengers\n fuselage.seats_abreast = 4\n fuselage.seat_pitch = 30. * Units.inches\n\n fuselage.fineness.nose = 1.28\n fuselage.fineness.tail = 3.48\n\n fuselage.lengths.nose = 6.0\n fuselage.lengths.tail = 9.0\n fuselage.lengths.cabin = 21.24\n fuselage.lengths.total = 36.24\n fuselage.lengths.fore_space = 0.\n fuselage.lengths.aft_space = 0.\n\n fuselage.width = 3.01 * Units.meters\n\n fuselage.heights.maximum = 3.35 \n fuselage.heights.at_quarter_length = 3.35 \n fuselage.heights.at_three_quarters_length = 3.35 \n fuselage.heights.at_wing_root_quarter_chord = 3.35 \n\n fuselage.areas.side_projected = 239.20\n fuselage.areas.wetted = 327.01\n fuselage.areas.front_projected = 8.0110\n\n fuselage.effective_diameter = 3.18\n\n fuselage.differential_pressure = 10**5 * Units.pascal # Maximum differential pressure\n\n # add to vehicle\n vehicle.append_component(fuselage)\n\n # -----------------------------------------------------------------\n # Design the Nacelle\n # ----------------------------------------------------------------- \n nacelle = SUAVE.Components.Nacelles.Nacelle()\n nacelle.diameter = 2.05\n nacelle.length = 2.71\n nacelle.tag = 'nacelle_1'\n nacelle.inlet_diameter = 2.0\n nacelle.origin = [[12.0,4.38,-2.1]]\n Awet = 1.1*np.pi*nacelle.diameter*nacelle.length # 1.1 is simple coefficient\n nacelle.areas.wetted = Awet \n nacelle.Airfoil.NACA_4_series_flag = True \n nacelle.Airfoil.coordinate_file = '2410' \n nacelle_2 = deepcopy(nacelle)\n nacelle_2.tag = 'nacelle_2'\n nacelle_2.origin = [[12.0,-4.38,-2.1]]\n \n vehicle.append_component(nacelle) \n vehicle.append_component(nacelle_2) \n \n \n # ------------------------------------------------------------------\n # Turbofan Network\n # ------------------------------------------------------------------ \n #initialize the gas turbine network\n gt_engine = SUAVE.Components.Energy.Networks.Turbofan()\n gt_engine.tag = 'turbofan'\n gt_engine.origin = [[12.0,4.38,-2.1],[12.0,-4.38,-2.1]] \n gt_engine.engine_length = 2.71 \n gt_engine.number_of_engines = 2.0\n gt_engine.bypass_ratio = 5.4 \n \n #set the working fluid for the network\n working_fluid = SUAVE.Attributes.Gases.Air()\n\n #add working fluid to the network\n gt_engine.working_fluid = working_fluid\n\n\n #Component 1 : ram, to convert freestream static to stagnation quantities\n ram = SUAVE.Components.Energy.Converters.Ram()\n ram.tag = 'ram'\n #add ram to the network\n gt_engine.ram = ram\n\n\n #Component 2 : inlet nozzle\n inlet_nozzle = SUAVE.Components.Energy.Converters.Compression_Nozzle()\n inlet_nozzle.tag = 'inlet nozzle'\n inlet_nozzle.polytropic_efficiency = 0.98\n inlet_nozzle.pressure_ratio = 0.98\n #add inlet nozzle to the network\n gt_engine.inlet_nozzle = inlet_nozzle\n\n\n #Component 3 :low pressure compressor \n low_pressure_compressor = SUAVE.Components.Energy.Converters.Compressor() \n low_pressure_compressor.tag = 'lpc'\n low_pressure_compressor.polytropic_efficiency = 0.91\n low_pressure_compressor.pressure_ratio = 1.9 \n #add low pressure compressor to the network \n gt_engine.low_pressure_compressor = low_pressure_compressor\n\n #Component 4 :high pressure compressor \n high_pressure_compressor = SUAVE.Components.Energy.Converters.Compressor() \n high_pressure_compressor.tag = 'hpc'\n high_pressure_compressor.polytropic_efficiency = 0.91\n high_pressure_compressor.pressure_ratio = 10.0 \n #add the high pressure compressor to the network \n gt_engine.high_pressure_compressor = high_pressure_compressor\n\n #Component 5 :low pressure turbine \n low_pressure_turbine = SUAVE.Components.Energy.Converters.Turbine() \n low_pressure_turbine.tag ='lpt'\n low_pressure_turbine.mechanical_efficiency = 0.99\n low_pressure_turbine.polytropic_efficiency = 0.93\n #add low pressure turbine to the network \n gt_engine.low_pressure_turbine = low_pressure_turbine\n\n #Component 5 :high pressure turbine \n high_pressure_turbine = SUAVE.Components.Energy.Converters.Turbine() \n high_pressure_turbine.tag ='hpt'\n high_pressure_turbine.mechanical_efficiency = 0.99\n high_pressure_turbine.polytropic_efficiency = 0.93\n #add the high pressure turbine to the network \n gt_engine.high_pressure_turbine = high_pressure_turbine \n\n #Component 6 :combustor \n combustor = SUAVE.Components.Energy.Converters.Combustor() \n combustor.tag = 'Comb'\n combustor.efficiency = 0.99 \n combustor.alphac = 1.0 \n combustor.turbine_inlet_temperature = 1500\n combustor.pressure_ratio = 0.95\n combustor.fuel_data = SUAVE.Attributes.Propellants.Jet_A() \n #add the combustor to the network \n gt_engine.combustor = combustor\n\n #Component 7 :core nozzle\n core_nozzle = SUAVE.Components.Energy.Converters.Expansion_Nozzle() \n core_nozzle.tag = 'core nozzle'\n core_nozzle.polytropic_efficiency = 0.95\n core_nozzle.pressure_ratio = 0.99 \n #add the core nozzle to the network \n gt_engine.core_nozzle = core_nozzle\n\n #Component 8 :fan nozzle\n fan_nozzle = SUAVE.Components.Energy.Converters.Expansion_Nozzle() \n fan_nozzle.tag = 'fan nozzle'\n fan_nozzle.polytropic_efficiency = 0.95\n fan_nozzle.pressure_ratio = 0.99\n #add the fan nozzle to the network\n gt_engine.fan_nozzle = fan_nozzle\n\n #Component 9 : fan \n fan = SUAVE.Components.Energy.Converters.Fan() \n fan.tag = 'fan'\n fan.polytropic_efficiency = 0.93\n fan.pressure_ratio = 1.7 \n #add the fan to the network\n gt_engine.fan = fan \n\n #Component 10 : thrust (to compute the thrust)\n thrust = SUAVE.Components.Energy.Processes.Thrust() \n thrust.tag ='compute_thrust'\n #total design thrust (includes all the engines)\n thrust.total_design = 37278.0* Units.N #Newtons\n\n #design sizing conditions\n altitude = 35000.0*Units.ft\n mach_number = 0.78 \n isa_deviation = 0.\n # add thrust to the network\n gt_engine.thrust = thrust\n\n #size the turbofan\n turbofan_sizing(gt_engine,mach_number,altitude) \n\n # add gas turbine network gt_engine to the vehicle\n vehicle.append_component(gt_engine) \n \n fuel = SUAVE.Components.Physical_Component()\n vehicle.fuel = fuel\n fuel.mass_properties.mass = vehicle.mass_properties.max_takeoff-vehicle.mass_properties.max_fuel\n fuel.origin = vehicle.wings.main_wing.mass_properties.center_of_gravity \n fuel.mass_properties.center_of_gravity= vehicle.wings.main_wing.aerodynamic_center\n # ------------------------------------------------------------------\n # Vehicle Definition Complete\n # ------------------------------------------------------------------\n\n return vehicle\n \n# ----------------------------------------------------------------------\n# Define the Configurations\n# ---------------------------------------------------------------------\n\ndef configs_setup(vehicle):\n\n # ------------------------------------------------------------------\n # Initialize Configurations\n # ------------------------------------------------------------------\n\n configs = SUAVE.Components.Configs.Config.Container()\n\n base_config = SUAVE.Components.Configs.Config(vehicle)\n base_config.tag = 'base'\n configs.append(base_config)\n\n # ------------------------------------------------------------------\n # Cruise Configuration\n # ------------------------------------------------------------------\n\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'cruise'\n configs.append(config)\n\n\n # ------------------------------------------------------------------\n # Takeoff Configuration\n # ------------------------------------------------------------------\n\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'takeoff'\n config.wings['main_wing'].control_surfaces.flap.deflection = 20. * Units.deg\n config.wings['main_wing'].control_surfaces.slat.deflection = 25. * Units.deg\n config.V2_VS_ratio = 1.21\n configs.append(config)\n \n # ------------------------------------------------------------------\n # Landing Configuration\n # ------------------------------------------------------------------\n\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'landing'\n config.wings['main_wing'].control_surfaces.flap.deflection = 30. * Units.deg\n config.wings['main_wing'].control_surfaces.slat.deflection = 25. * Units.deg\n config.Vref_VS_ratio = 1.23\n configs.append(config) \n \n # ------------------------------------------------------------------\n # Short Field Takeoff Configuration\n # ------------------------------------------------------------------ \n\n config = SUAVE.Components.Configs.Config(base_config)\n config.tag = 'short_field_takeoff' \n config.wings['main_wing'].control_surfaces.flap.deflection = 20. * Units.deg\n config.wings['main_wing'].control_surfaces.slat.deflection = 25. * Units.deg\n config.V2_VS_ratio = 1.21\n \n # payload?\n \n configs.append(config)\n\n # done!\n return configs\n","repo_name":"suavecode/SUAVE","sub_path":"regression/scripts/Vehicles/Embraer_190.py","file_name":"Embraer_190.py","file_ext":"py","file_size_in_byte":19394,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"199457084","text":"import os\r\n\r\nfrom .base import * # noqa\r\nfrom .base import env\r\n\r\n# GENERAL\r\n# ------------------------------------------------------------------------------\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\r\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\")\r\n\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\r\nALLOWED_HOSTS = env.list(\r\n \"DJANGO_ALLOWED_HOSTS\",\r\n default=[\".bbbs.fun\", \".kiryanov.ru\"],\r\n)\r\n\r\n# DATABASES\r\n# ------------------------------------------------------------------------------\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\r\nDATABASES = {\r\n \"default\": {\r\n \"ENGINE\": \"django.db.backends.postgresql\",\r\n \"NAME\": os.environ.get(\"POSTGRES_DB\"),\r\n \"USER\": os.environ.get(\"POSTGRES_USER\"),\r\n \"PASSWORD\": os.environ.get(\"POSTGRES_PASSWORD\"),\r\n \"HOST\": os.environ.get(\"POSTGRES_HOST\"),\r\n \"PORT\": os.environ.get(\"POSTGRES_PORT\"),\r\n }\r\n}\r\n\r\n# SECURITY WARNING: don't run with debug turned on in production!\r\nDEBUG = False\r\n\r\n# EMAIL\r\n# ------------------------------------------------------------------------------\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email\r\nDEFAULT_FROM_EMAIL = env(\r\n \"DJANGO_DEFAULT_FROM_EMAIL\", default=\"BBBS Team \"\r\n)\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#server-email\r\nSERVER_EMAIL = env(\"DJANGO_SERVER_EMAIL\", default=DEFAULT_FROM_EMAIL)\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix\r\nEMAIL_SUBJECT_PREFIX = env(\"DJANGO_EMAIL_SUBJECT_PREFIX\", default=\"[bbbs]\")\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\r\nEMAIL_BACKEND = \"anymail.backends.mailjet.EmailBackend\"\r\n\r\n# Anymail\r\n# ------------------------------------------------------------------------------\r\n# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail\r\nINSTALLED_APPS += [\"anymail\"] # noqa F405\r\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\r\n# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference\r\n# https://anymail.readthedocs.io/en/stable/esps/mailjet/\r\nANYMAIL = {\r\n \"MAILJET_API_KEY\": env(\"MAILJET_API_KEY\"),\r\n \"MAILJET_SECRET_KEY\": env(\"MAILJET_SECRET_KEY\"),\r\n}\r\n# Youtube Token\r\n# ------------------------------------------------------------------------------\r\nYOUTUBE_KEY = env(\"YOUTUBE_KEY\")\r\n","repo_name":"ivartm/bbbs","sub_path":"config/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23577813991","text":"import sys\n\n\ndef solve(D, N, cases):\n # print(D, N, cases)\n max_time = 0\n for c in cases:\n curr_time = (D - c[0]) / c[1]\n if curr_time > max_time:\n max_time = curr_time\n res = D / max_time\n return res\n\n\ndef process_file(input_file, output_file):\n file_in = open(input_file, 'rU')\n file_out = open(output_file, 'w')\n\n num_cases = None\n case_num = 0\n\n cases = []\n D = None\n N = None\n\n for row in file_in:\n # print(row)\n\n if not num_cases:\n num_cases = int(row)\n\n elif D is None:\n case_num += 1\n row_split = row.split()\n D = int(row_split[0])\n N = int(row_split[1])\n cases = []\n\n elif len(cases) < N:\n row_split = row.split()\n cases.append((int(row_split[0]), int(row_split[1])))\n\n if len(cases) == N:\n # print('Case %i' % case_num)\n result = solve(D, N, cases)\n res_string = 'Case #%i: %.7f' % (case_num, result)\n # print(res_string)\n file_out.write(res_string + '\\n')\n D = None\n N = None\n cases = []\n\n\n file_out.close()\n\n\ndef main():\n if len(sys.argv) == 3:\n print('Program starts')\n process_file(sys.argv[1], sys.argv[2])\n print('Done')\n sys.exit(1)\n\n else:\n print('Give two arguments (INPUT_FILE OUTPUT_FILE)')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/1008.py","file_name":"1008.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38809196554","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# LICENSE: see LICENSE file\n#\n# bbs mode:\n# You must rewrite Download,GetCurrentDir,CheckThreadsValid,\n# GetThreadUrl and GetTitle function.\n# single-page mode:\n# You must rewrite Download function.\n\nimport sys\nimport logging\nimport os\nimport os.path\nimport requests\nimport requesocks\nimport re\nimport ConfigParser\nimport argparse\nimport imghdr\nfrom HTMLParser import HTMLParser\n\ndef success(val): return val,None\ndef error(why): return None,why\ndef get_val(m_val): return m_val[0]\ndef get_error(m_val): return m_val[1]\n\n#global variables\ninit_with_config_file = True\nhas_log_file = True\n\nif os.name != 'nt':\n WindowsError = OSError\n\nclass Downloader(object):\n \"\"\"docstring for ClassName\"\"\"\n def __init__(self):\n super(Downloader, self).__init__()\n\n self.type = 'none'\n self._isUrlFormat = re.compile(r'https?://([\\w-]+\\.)+[\\w-]+(/[\\w\\- ./?%&=]*)?');\n self._path = get_val(self.DealDir(\"Images\"))\n self.currentDir = \"\"\n self.cf = ConfigParser.ConfigParser()\n self.pageNum = 1\n self.pageTo = 1\n self.isMono = False\n self.keepOriginTitle = True\n self.numToDownload = -1\n self.loggingFile = 'log.txt'\n self.retryTimes = 5\n self.encode = None\n self.useProxy = False\n self.httpProxy = '127.0.0.1:1080'\n self.httpsProxy = '127.0.0.1:1080'\n self.imageCount = 0\n self.verbose = False\n self.silent = False\n self.targetThread = \"\" # single thread\n self.targetThreadRegex = \"\"\n\n #moeimg specific\n self.moeimgdomain = 'moeimg.net'\n self.moeimgTags = False\n self.moeimgSortWithTags = False\n self.currentTag = 'default'\n\n #caoliu specific\n self.caoliudomain = 't66y.com'\n\n #jandan specific\n self.jandandomain = 'jandan.net'\n self.jandanPageToDownload = 1\n\n global init_with_config_file\n global has_log_file\n if init_with_config_file:\n if not os.path.exists('config'):\n self.InternalPrint('No config file. Creating a default one.', False)\n self.SetDefaultConfig();\n self.LoadConfig()\n #init logging file\n if has_log_file:\n logging.basicConfig(filename = os.path.join(os.getcwd(), self.loggingFile), level = logging.WARN, filemode = 'a+', format = '%(asctime)s - %(levelname)s: %(message)s')\n\n def InternalPrint(self, msg, is_verbose):\n if not self.silent:\n if is_verbose:\n if(self.verbose):\n print(msg)\n else:\n print(msg)\n\n def LoadConfig(self):\n self.cf.read(\"config\")\n self.pageNum = self.cf.getint('web','page_from')\n self.pageTo = self.cf.getint('web','page_to')\n self.isMono = self.cf.getboolean('file','mono')\n self.numToDownload = self.cf.getint('web','num_to_download')\n self.loggingFile = self.cf.get('basic','log_file')\n self.retryTimes = self.cf.getint('web','retry_times')\n self.caoliudomain = self.cf.get('caoliu','domain')\n self.moeimgdomain = self.cf.get('moeimg','domain')\n self.keepOriginTitle = self.cf.getboolean('file','keep_origin_title')\n self.jandandomain = self.cf.get('jandan','domain')\n self.jandanPageToDownload = self.cf.getint('jandan','pages_to_download')\n self.moeimgTags = self.cf.getboolean('moeimg','tags')\n self.moeimgSortWithTags = self.cf.getboolean('moeimg','sort_with_tags')\n self.useProxy = self.cf.getboolean('basic','use_proxy')\n self.httpProxy = self.cf.get('basic','http_proxy')\n self.httpsProxy = self.cf.get('basic','https_proxy')\n\n\n def SetDefaultConfig(self):\n self.cf.add_section('basic')\n self.cf.set('basic','log_file','log.txt')\n self.cf.set('basic','use_proxy','false')\n self.cf.set('basic','http_proxy','127.0.0.1:1080')\n self.cf.set('basic','https_proxy','127.0.0.1:1080')\n self.cf.add_section('web')\n self.cf.set('web','page_from','1')\n self.cf.set('web','page_to','1')\n self.cf.set('web','num_to_download','-1')\n self.cf.set('web','retry_times','5')\n self.cf.add_section('caoliu')\n self.cf.set('caoliu','domain','t66y.com')\n self.cf.add_section('moeimg')\n self.cf.set('moeimg','domain','moeimg.net')\n self.cf.set('moeimg','tags','false')\n self.cf.set('moeimg','sort_with_tags','false')\n self.cf.add_section('jandan')\n self.cf.set('jandan','domain','jandan.net')\n self.cf.set('jandan','pages_to_download','1')\n self.cf.add_section('file')\n self.cf.set('file','mono','false')\n self.cf.set('file','keep_origin_title','true')\n with open('config', 'wb') as configfile:\n self.cf.write(configfile)\n\n def StripIllegalChar(self, path):\n return path.strip('>').strip('<').strip('*').strip('|').strip('?').strip(':').strip('\"').strip('/')\n\n def DealDir(self, path):\n solved = False\n while True:\n try:\n if not os.path.exists(path):\n os.mkdir(path)\n return success(path)\n except WindowsError:\n #windows specific\n global has_log_file\n if has_log_file:\n logging.error('Windows error with path %s' % path)\n if not solved:\n path = self.StripIllegalChar(path)\n solved = True\n else:\n return error('Invalid path name %s' % path)\n\n def FetchHtml(self, url):\n retry = 0\n proxies = {\n 'http':self.httpProxy,\n 'https':self.httpsProxy,\n }\n headers = {\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',\n }\n while True:\n try:\n self.InternalPrint(\"Fetching HTML: %s\" % url, True)\n session = requesocks.session()\n session.headers = headers;\n if self.useProxy:\n self.InternalPrint(\"Using proxy: http %s, https %s\" % (self.httpProxy, self.httpsProxy), True)\n session.proxies = proxies\n else:\n self.InternalPrint(\"No proxy.\", True)\n response = session.get(url)\n if response.status_code != 200:\n self.InternalPrint(response.text, True)\n return error(\"Failed to fetch html. CODE:%i\" % response.status_code)\n elif (response.text) == 0:\n return error(\"Empty html.\")\n else:\n if self.encode != None:\n response.encoding = self.encode\n return success(response.text)\n #except requests.ConnectionError:\n except requesocks.exceptions.ConnectionError:\n if retry0 and num>=self.numToDownload:\n break\n\n def DoFetchSingleThread(self, url):\n self.InternalPrint('Thread:'+url, False)\n\n self.InternalPrint(\"Fetching thread html...\", True)\n res = self.FetchHtml(url)\n if get_error(res):\n return res\n self.InternalPrint(\"Thread html fetched.\", True)\n\n html = get_val(res)\n #get current directory\n if self.keepOriginTitle:\n # get thread title\n #self.currentDir = self.GetTitle(href)\n prog = re.compile(self.targetThreadRegex, re.IGNORECASE)\n matches = prog.findall(html)\n self.currentDir = matches[0]\n else:\n self.currentDir = url.split('/')[-1].split('.')[-2]\n #TODO: gb2312 bug\n try:\n self.InternalPrint(self.currentDir.encode(sys.getfilesystemencoding())+'/', False)\n except UnicodeEncodeError:\n global has_log_file\n if has_log_file:\n logging.warning('Unicode encode error at %s' % url)\n self.currentDir = 'tmp'\n self.InternalPrint(self.currentDir+'/', False)\n\n html = get_val(res)\n self.currentTag = self.GetThreadTagName(html)\n self.FetchImgLinksFromThread(html);\n return success(0)\n\n # need to rewrite\n def GetThreadUrl(self, href):pass\n def GetTitle(self, href):pass\n def CheckThreadsValid(self, href):pass\n def GetCurrentDir(self, href):pass\n def GetThreadTagName(self, html):return 'default'\n def Download(self):\n self.init()\n\n def PreHandleImgLink(self, href):\n return href\n\n def PreHandleTagName(self, local_file):\n return local_file\n\n def FetchThreadHtml(self, threadurl):\n self.InternalPrint(\"Fetching thread html...\", True)\n res = self.FetchHtml(threadurl)\n self.InternalPrint(\"Thread html fetched.\", True)\n if get_error(res):\n return res\n html = get_val(res)\n self.currentTag = self.GetThreadTagName(html)\n self.FetchImgLinksFromThread(html);\n return success(html)\n\n def FetchImgLinksFromThread(self, htmlSource):\n prog = re.compile(self.ImgRegex, re.IGNORECASE)\n matchesImgSrc = prog.findall(htmlSource)\n global has_log_file\n if not self.isMono:\n self.imageCount = 0\n for href in matchesImgSrc:\n self.InternalPrint(href, True)\n href = self.PreHandleImgLink(href)\n if not self.CheckIsUrlFormat(href):\n #warning: requests library does not support non-http(s) url\n self.InternalPrint('Invalid url format %s' % href, False)\n if has_log_file:\n logging.error('Invalid url format %s' % href)\n continue;\n res = self.download_file(href)\n if get_error(res):\n self.InternalPrint(get_error(res).encode(sys.getfilesystemencoding()), False)\n self.imageCount += 1\n\n def CheckIsUrlFormat(self, value):\n return self._isUrlFormat.match(value) is not None\n\n def GetImageType(self, img_path):\n type = imghdr.what(img_path)\n if type != None:\n return type\n else:\n return \"jpg\"\n\n def ImageExists(self, path, img_name):\n files = os.listdir(path)\n for f in files:\n if img_name == os.path.splitext(f)[0]:\n return True\n return False\n\n def download_file(self, url):\n dir = self.type\n local_directory = \"\"\n if self.isMono:\n local_directory = \"Images/\"+ dir + '/'\n self.DealDir(local_directory)\n local_directory = self.PreHandleTagName(local_directory)\n else:\n local_directory = \"Images/\" + dir + '/'\n self.DealDir(local_directory)\n local_directory = self.PreHandleTagName(local_directory)\n # deal windows directory error\n res = self.DealDir(local_directory + self.currentDir + '/')\n if get_error(res):\n #self.InternalPrint(get_error(res), False)\n self.DealDir(local_directory + 'tmp/')\n local_directory += 'tmp/'\n else:\n local_directory += self.currentDir + '/'\n\n #local_filename = local_filename + self.StripIllegalChar(url.split('/')[-1])#has bug in windows\n image_path = local_directory + str(self.imageCount)# so use image count instead\n if self.ImageExists(local_directory, str(self.imageCount)):\n if not self.isMono:\n return error('\\t skip '+image_path)\n else:\n while(self.ImageExists(local_directory, str(self.imageCount))):\n self.imageCount+=1\n image_path = local_directory + str(self.imageCount)\n\n self.InternalPrint('\\t=>'+image_path.encode(sys.getfilesystemencoding()), False)\n # NOTE the stream=True parameter\n retry = 0\n proxies = {\n 'http':self.httpProxy,\n 'https':self.httpsProxy,\n }\n global has_log_file\n while True:\n try:\n session = requesocks.session()\n if self.useProxy:\n self.InternalPrint(\"Using proxy: http %s, https %s\" % (self.httpProxy, self.httpsProxy), True)\n session.proxies = proxies\n #r = requests.get(url, stream=True, proxies=proxies)\n #else:\n #r = requests.get(url, stream=True)\n r = session.get(url)\n break\n #except requests.ConnectionError:\n except requesocks.exceptions.ConnectionError:\n if retry]+?)[ \\'\"]\\s*(?:alt=\"\\d*\")?\\s*class=\"thumbnail_image\"'\n #self.ThreadsRegex = r'\\s*]+?)[\\'\"]\\s*title=[\"\\']?([^\\'\"]+?)[\\'\"]'\n self.ThreadsRegex = r'

\\s*\\s*([^<]+?)\\s*\\s*

'\n self.targetThreadRegex = r'\\s*\\s*([^<]+?)\\s*'\n\n def Download(self):\n if self.moeimgTags:\n res = self.LoadTags()\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n return\n tags = get_val(res)\n else:\n tags = ['default']\n self.InternalPrint(\"=============== start ===============\", False)\n i = self.pageNum\n domain = ''\n for tag in tags:\n self.currentTag = tag\n if self.targetThread == \"\":\n for i in range(self.pageNum, self.pageTo+1):\n if not self.moeimgTags:\n self.InternalPrint(\"=============== loading page {0} ===============\".format(i), False)\n if i == 1:\n domain = \"http://\"+self.moeimgdomain\n else:\n domain = \"http://\"+self.moeimgdomain+\"/page/{0}\".format(i)\n else:\n self.InternalPrint(\"=============== loading tag: %s page %i ===============\" % (tag.decode('utf-8').encode(sys.getfilesystemencoding()),i), False)\n if i == 1:\n domain = \"http://\"+self.moeimgdomain+\"/tag/%s\" % (tag)\n else:\n domain = \"http://\"+self.moeimgdomain+\"/tag/%s/page/%i\" % (tag,i)\n res = self.DoFetch(domain)\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n else:\n self.InternalPrint(\"=============== loading target thread {0} ===============\".format(self.targetThread), False)\n res = self.DoFetchSingleThread(self.targetThread)\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n self.InternalPrint(\"=============== end ===============\", False)\n\n def FetchAllTags(self):\n res = self.FetchHtml('http://'+self.moeimgdomain+'/taglist')\n if get_error(res):\n return res\n html = get_val(res)\n tagRegex = r'([^<]+?)'\n prog = re.compile(tagRegex, re.IGNORECASE)\n matches = prog.findall(html)\n tags = []\n for m in matches:\n if re.search('tag', m[0]):\n if not m[1] in tags:\n tags.append(m[1])\n self.InternalPrint('Fetched %s tags.' % len(tags), True)\n return success(tags)\n\n def LoadTags(self):\n if os.path.exists(self.tag_file):\n tagsfile = open(self.tag_file, 'r')\n else:\n return error('No tags file.')\n\n tags = []\n for tag in tagsfile:\n tags.append(tag.strip('\\n').strip(';').decode('utf-8').replace(' ', '-').lower())\n self.InternalPrint('Loaded %s tags.' % len(tags), True)\n return success(tags)\n\n def GetCurrentDir(self, href):\n dir = href[0].split('/')[-1]\n dir = dir.split('.')[-2]\n return dir\n\n def GetThreadTagName(self, html):\n #tagRegex = r'\\s*]+?)[ \\'\"]\\s*>([^<]*)'\n tagRegex = r']+?)[ \\'\"]\\s*rel=\"tag\">([^<]*)'\n prog = re.compile(tagRegex, re.IGNORECASE)\n matches = prog.findall(html)\n for m in matches:\n if re.search('http://moeimg.net/tag/',m[0]):\n return m[1]\n return 'default'\n\n def PreHandleTagName(self, local_file):\n if self.moeimgSortWithTags:\n if self.moeimgTags:\n local_file += self.currentTag.encode(sys.getfilesystemencoding()) + '/'\n else:\n local_file += self.currentTag + '/'\n self.DealDir(local_file)\n return local_file\n\n def CheckThreadsValid(self, href):\n return True\n\n def GetThreadUrl(self, href):\n return href[0]\n\n def GetTitle(self, href):\n return href[1]\n\nclass CaoliuDownloader(Downloader):\n def __init__(self):\n super(CaoliuDownloader, self).__init__()\n\n self.type = 'caoliu'\n self.encode = 'gbk'\n self.ImgRegex = r']+?)[ \\'\"]\\s*type=\\'image\\''\n self.ThreadsRegex = r'

]+?)[ \\'\"][^>]*?>(?:)?([^<]*)(?:)?

'\n self.targetThreadRegex = r' --> [^<]+?\\s*([^<]+?)\\s*'\n\n def Download(self):\n self.InternalPrint(\"=============== start ===============\", False)\n if self.targetThread == \"\":\n for i in range(self.pageNum, self.pageTo+1):\n self.InternalPrint(\"=============== loading page {0} ===============\".format(i), False)\n domain = \"http://\"+self.caoliudomain+\"/thread0806.php?fid=16&search=&page={0}\".format(i)\n res = self.DoFetch(domain)\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n else:\n self.InternalPrint(\"=============== loading target thread {0} ===============\".format(self.targetThread), False)\n res = self.DoFetchSingleThread(self.targetThread)\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n self.InternalPrint(\"=============== end ===============\", False)\n\n def GetCurrentDir(self, href):\n dir = href[0].split('/')[-3] + href[0].split('/')[-2] + href[0].split('/')[-1]\n dir = dir.split('.')[-2]\n return dir\n\n def CheckThreadsValid(self, href):\n return href[0][0:8] == \"htm_data\"\n\n def GetThreadUrl(self, href):\n return 'http://'+self.caoliudomain+'/' + href[0]\n\n def GetTitle(self, href):\n return href[1]\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\nclass JanDanDownloader(Downloader):\n def __init__(self):\n super(JanDanDownloader, self).__init__()\n\n self.isMono = True\n\n self.type = 'jandan'\n self.encode = 'utf-8'\n self.ImgRegex = r'

\\s*]+?)[ \\'\"]\\s*target=\"_blank\"\\s*class=\"view_img_link\"\\s*>'\n\n def Download(self):\n #get max\n res = self.FetchHtml(\"http://\"+self.jandandomain+\"/ooxx\")\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n return res\n html = get_val(res)\n newest = self.get_max(html)\n\n self.InternalPrint(\"=============== start ===============\", False)\n for i in range(newest-self.jandanPageToDownload+1, newest+1):\n self.InternalPrint(\"=============== loading page {0} ===============\".format(i), False)\n domain = \"http://\"+self.jandandomain+\"/ooxx/page-{0}#comments\".format(i)\n res = self.FetchThreadHtml(domain)\n if get_error(res):\n self.InternalPrint(get_error(res), False)\n self.InternalPrint(\"=============== end ===============\", False)\n\n def strip_tags(self, html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n def get_max(self, html_code):\n m = re.search('.+cp-pagenavi.+', html_code)\n m = re.search('\\d+', self.strip_tags(m.group(0)).strip())\n return int(m.group(0))\n\n def download_file(self, url):\n dir = self.type\n local_directory = \"Images/\"+ dir + '/'\n self.DealDir(local_directory)\n image_path = local_directory + url.split('/')[-1]\n if os.path.exists(image_path):\n return error('\\t skip '+image_path)\n self.InternalPrint('\\t=>'+image_path.encode(sys.getfilesystemencoding()), False)\n # NOTE the stream=True parameter\n retry = 0\n proxies = {\n 'http':self.httpProxy,\n 'https':self.httpsProxy,\n }\n global has_log_file\n while True:\n try:\n if self.useProxy:\n r = requests.get(url, stream=True, proxies=proxies)\n else:\n r = requests.get(url, stream=True)\n break\n except requests.ConnectionError:\n if retry 0:\n d.pageTo = d.pageNum + num - 1\n\ndef parse_general_args(obj, args):\n if args.no_log:\n obj.hasLog = False\n if args.threads:\n obj.numToDownload = args.threads\n if args.single:\n obj.targetThread = args.single[0]\n if args.proxy:\n obj.useProxy = True\n obj.httpProxy = args.proxy[0]\n obj.httpsProxy = args.proxy[0]\n if args.direct:\n obj.useProxy = False\n if args.retry:\n obj.retryTimes = args.retry\n if args.mono:\n obj.isMono = True\n if args.verbose:\n obj.verbose = True\n if args.quiet:\n obj.silent = True\n\ndef caoliu(args):\n cl = CaoliuDownloader()\n if args.pages:\n process_pages(cl, args.pages)\n if args.domain:\n cl.caoliudomain = args.domain\n parse_general_args(cl, args)\n cl.InternalPrint(\"Processing caoliu...\", False)\n cl.Download()\n\ndef moeimg(args):\n moe = MoeimgDownloader()\n if args.pages:\n process_pages(moe, args.pages)\n if args.domain:\n moe.moeimgdomain = args.domain\n if args.sort_with_tags:\n moe.moeimgSortWithTags = True\n parse_general_args(moe, args)\n moe.InternalPrint(\"Processing moeimg...\", False)\n if args.fetch_all_tags:\n res = moe.FetchAllTags()\n if get_error(res):\n print(get_error(res))\n return\n tags = get_val(res)\n with open('all_tags.txt', 'w') as all_tags_file:\n for t in tags:\n all_tags_file.write(t + '\\n')\n print('Fetched all tags.')\n elif args.with_tags:\n if args.tag_file:\n moe.tag_file = args.tag_file\n moe.moeimgTags = True\n moe.Download()\n else:\n moe.Download()\n\ndef jandan(args):\n j = JanDanDownloader()\n if args.pages:\n j.jandanPageToDownload = args.pages\n if args.domain:\n j.jandandomain = args.domain\n parse_general_args(j, args)\n j.InternalPrint(\"Processing jandan...\", False)\n j.Download()\n\n#def all():pass\n\ndef main():\n global init_with_config_file\n global has_log_file\n ap = argparse.ArgumentParser(description='This tool can download ooxx image from some websites. :P',\n epilog=\" Please report bugs to https://github.com/KanagiMiss/MoeDownloader/issues\")\n sp = ap.add_subparsers(title='subcommands',\n description='available subcommands',\n help='')\n\n p_caoliu = sp.add_parser(\"caoliu\", help=\"download caoliu images\")\n p_caoliu.set_defaults(func=caoliu)\n p_moeimg = sp.add_parser(\"moeimg\", help=\"download moeimg images\")\n p_moeimg.set_defaults(func=moeimg)\n p_jandan = sp.add_parser(\"jandan\", help=\"download jandan images\")\n p_jandan.set_defaults(func=jandan)\n# p_all = sp.add_parser(\"all\", help=\"download all images\")\n\n g1 = ap.add_mutually_exclusive_group()\n g2 = ap.add_mutually_exclusive_group()\n ap.add_argument(\"-p\", \"--pages\", type=int,\n help=\"number of pages to download\")\n\n #general options\n ap.add_argument(\"-i\", \"--ignore_config\", action=\"store_true\", help=\"ignore config file and load with default options\")\n ap.add_argument(\"-n\", \"--no_log\", action=\"store_true\", help=\"run without log\")\n ap.add_argument(\"-r\", \"--retry\", type=int, help=\"retry times if failed\")\n ap.add_argument(\"-m\", \"--mono\", action=\"store_true\", help=\"set if mono file\")\n ap.add_argument(\"-t\", \"--threads\", type=int, help=\"number of threads to download\")\n ap.add_argument(\"-S\", \"--single\", nargs=1, help=\"download single thread\")\n g1.add_argument(\"-q\", \"--quiet\", action=\"store_true\", help=\"run quietly and briefly\")\n g1.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"run verbosely\")\n g2.add_argument(\"-d\", \"--direct\", action=\"store_true\", help=\"connect directly(without proxy)\")\n g2.add_argument(\"--proxy\", nargs=1, help='set http and https proxy')\n ap.add_argument('--version', action='version', version='%(prog)s 1.0')\n\n #moeimg options\n p_moeimg.add_argument(\"-T\", \"--fetch_all_tags\", action=\"store_true\", help=\"fetch all tags from site\")\n p_moeimg.add_argument(\"-t\", \"--with_tags\", action=\"store_true\", help=\"download with tags\")\n p_moeimg.add_argument(\"-s\", \"--sort_with_tags\", action=\"store_true\", help=\"sort files with tags\")\n p_moeimg.add_argument(\"--domain\", nargs=1, help=\"set domain\")\n p_moeimg.add_argument(\"-f\", \"--tag_file\", type=argparse.FileType('r'), help=\"set specific tag file\")\n\n #caoliu options\n p_caoliu.add_argument(\"--domain\", nargs=1, help=\"set domain\")\n\n #jandan options\n p_jandan.add_argument(\"--domain\", nargs=1, help=\"set domain\")\n\n args = ap.parse_args()\n\n # run with default config (ignore config file)\n if args.ignore_config:\n init_with_config_file = False\n\n # run without log file\n if args.no_log:\n has_log_file = False\n\n args.func(args)\n\nif __name__ == '__main__':\n reload(sys)\n sys.setdefaultencoding(sys.getfilesystemencoding())\n main()\n","repo_name":"misskanagi/MoeDownloader","sub_path":"catch.py","file_name":"catch.py","file_ext":"py","file_size_in_byte":30718,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"61"} +{"seq_id":"39785341064","text":"from selenium import webdriver\nimport time\n\n\nclass Globo:\n def __init__(self, driver):\n self.driver = driver\n self.url = 'http://www.globo.com/' # url a ser crawleada\n self.wrapper = 'hui-premium'\n # self.wrapper = 'hui-premium__title'\n self.list = []\n\n # Inicia a navegação\n def navigate(self):\n self.driver.get(self.url)\n\n def get_noticias(self):\n noticias = self.driver.find_elements_by_class_name(self.wrapper)\n import ipdb; ipdb.set_trace()\n for noticia in noticias:\n images = noticia.find_elements_by_tag_name('img')\n sub = noticia.find_elements_by_class_name('hui-premium__related')\n for image in images:\n dict = {\n 'title': noticia.text,\n 'sub_title': [subtitle.text for subtitle in sub],\n 'url_img': image.get_attribute('src') if image.get_attribute('src') else '-'\n }\n self.list.append(dict)\n print(self.list)\n self.driver.close()\n\n# Web driver Firefox\nwbf = webdriver.Firefox()\n\n# Instanciando o objeto\nglobo = Globo(wbf)\n\n# Navegando\nglobo.navigate()\n\ntime.sleep(5)\nglobo.get_noticias()","repo_name":"carruda1980/prova_Desenvolvedor_BackEnd","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36420944549","text":"# Reverse a string\n# => Method 1\n\"\"\" myStr='Hey this is a string'\ndef reverseStr():\n output=''\n for i in range(len(myStr)-1,-1,-1):\n output+=myStr[i] \n return output\n\nprint(reverseStr()) \"\"\"\n\n# => Method 2\n\"\"\" myStr='Hey this is a string'\ndef reverseStr():\n output=''\n for i in myStr:\n output= i + output\n return output\n\nprint(reverseStr()) \"\"\"\n\n# => Method 3 (recursion)\n\"\"\" myStr='manas'\ndef reverseStr(s):\n if len(s)==0:\n return s\n else:\n return reverseStr(s[1:]) + s[0]\nprint(reverseStr(myStr)) \"\"\"\n\n# Print duplicate characters from a string\n# Method 1 (using hashing) #? time comp => O(n) space comp => O(n)\n\"\"\" myStr='sadlnajsd nsj sadasdk'\ndef printDuplicates():\n NO_OF_CHARS=256\n count=[0]*NO_OF_CHARS #array with a length of 256 and each element as 0\n for i in myStr:\n count[ord(i)]+=1\n for i in range(0,len(count)):\n if count[i] > 1:\n print(chr(i))\nprintDuplicates() \"\"\"\n\n\n#How do you check if two strings are anagrams of each other? \n#Method 1 #? time comp => O(n) space comp => O(2*NO_OF_CHARS) \n\"\"\" def isAnagram(str1,str2):\n NO_OF_CHARS=256\n m,n=(len(str1),len(str2))\n if(m==n):\n hashTable1=[0]*NO_OF_CHARS\n hashTable2=[0]*NO_OF_CHARS\n for i in str1:\n hashTable1[ord(i)]+=1\n for j in str2:\n hashTable2[ord(j)]+=1\n if(hashTable2==hashTable1):\n return 'Strings are anagram'\n else:\n return 'Strings are no anagram'\n else:\n return 'Strings are no anagram' \"\"\"\n\n#TODO => The above implementation can be further to use only one count array instead of two. We can increment the value in count array for characters in str1 and decrement for characters in str2. Finally, if all count values are 0, then the two strings are anagram of each other.\n\n\n#Method 2 (using sorting) #? time comp => O(nlogn) \n\"\"\" def isAnagram(str1,str2):\n m,n=(len(str1),len(str2))\n if(m==n):\n str1=sorted(str1) #sorted function in python returns a list of sorted elements\n str2=sorted(str2) #O(nlogn)\n if(str1==str2):\n return 'Strings are anagram'\n else:\n return 'Strings are not anagram'\n else:\n return 'Strings are not anagram'\n\nprint(isAnagram('manas','sanam')) \"\"\"\n\n#How do you check if a string contains only digits?\n#Method 1 #? time comp => O(n)\n\"\"\" def onlyDigits(str1):\n for i in str1:\n if (i>='0' and i <='9'):\n continue\n else:\n return False\n return True\n \"\"\"\n\n\n#Method 2 #? time comp => O(n)\n\"\"\" def onlyDigits(str1):\n for i in str1:\n if i.isdigit():\n continue\n else:\n return False\n return True\n\nprint(onlyDigits('59887'))\n \"\"\"\n\n#How do you count a number of vowels and consonants in a given string? #? time comp => O(n)\n\"\"\" def countVowels(str1):\n vowels=0\n consonants=0\n for i in str1:\n if i =='a' or i=='e' or i=='i' or i=='o' or i=='u':\n vowels+=1\n else:\n consonants+=1\n return (vowels,consonants)\n\nprint(countVowels('permutations'))\n \"\"\"\n\n##Function to reverse words in a given string.\n\"\"\" def reverseWords(S):\n # code here \n output=''\n word=''\n for i in range(len(S)-1,-1,-1):\n if(S[i]!='.'):\n word=S[i]+word\n else:\n word+='.'\n output+=word\n word=''\n \n output+=word\n return output \"\"\"\n\n\n#A Program to check if strings are rotations of each other or not\n\"\"\" def rotationStrings(str1,str2):\n if len(str1) == len(str2):\n temp=str1+str1\n if temp.count(str2) > 0:\n return True\n else:\n return False\n else:\n return False\n\n\nprint(rotationStrings('manas','asman')) \"\"\"\n\n#A program to reverse each words in a string\n\"\"\" def reverseWords(mystr):\n st=list(); #st=stack\n for i in range(0,len(mystr)):\n if mystr[i] !=' ':\n st.append(mystr[i]);\n else:\n while(len(st)>0):\n print(st.pop(),end='');\n print(' ',end='')\n while(len(st)>0):\n print(st.pop(),end='');\n \nreverseWords('manas joshi')\n \"\"\"\n\n#Check whether the String is a palindrome or not.\n#Solution => reverse the string and compare them.. (you can also try solving using recursion)\n\"\"\" def palindrone(mystr,i,j):\n if i>=j:\n return True\n elif mystr[i]!=mystr[j]:\n return False\n return palindrone(mystr,i+1,j-1)\n\nprint(palindrone('nitin',0,len('nitin')-1)) \"\"\"\n\n\n# print the first non repeating character from the string \nmyStr='adadall'\nalpha=[0]*26 #hash table (you can also take 256 character array)\nfor char in myStr:\n alpha[ord(char)-ord('a')]+=1\n \nfor char in myStr:\n if alpha[ord(char)-ord('a')]==1:\n print(char) \n break\nelse:\n print('no non repeating character found')","repo_name":"Maanu07/dsa_python","sub_path":"Strings/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38549657154","text":"\"\"\"\r\n Hacer un programa que imprima los números impares entre el 10 y el cero en orden decreciente y que calcule la suma de esos números impares.\r\n\"\"\"\r\ni = int(input(\"Ingrese el numero inicial: \"))\r\nf = int(input(\"Ingrese el numero final: \"))\r\nsuma = 0\r\nprint(\"Los numeros impares del rango\")\r\nwhile i <= f:\r\n if i % 2 != 0:\r\n print(i)\r\n suma = suma + i\r\n i+=1\r\nprint(f\"\\nLa suma de los numeros: {suma}\")","repo_name":"alexarxe/web_class_23_idt_python","sub_path":"ejercicio04.py","file_name":"ejercicio04.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36337161746","text":"\"\"\"\nGiven preorder and inorder traversal of a tree, construct the binary tree.\nTime: O(N^2) if make preorder into collections.deque\nSpace: O(logN)\n\"\"\"\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\" \n if inorder:\n index = inorder.index(preorder.pop(0))\n root = TreeNode(inorder[index])\n root.left = self.buildTree(preorder, inorder[0:index])\n root.right = self.buildTree(preorder, inorder[index+1:])\n return root","repo_name":"CheRayLiu/LeetCode","sub_path":"medium/construct_bst_from_preorder_and_inorder_traversal.py","file_name":"construct_bst_from_preorder_and_inorder_traversal.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23540498311","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 8 15:45:16 2017\r\n\r\n@author: Julien\r\n\"\"\"\r\n\r\n\r\nimport os\r\n\r\ns='---------'\r\nk=3\r\n\r\ndef solve(s,k):\r\n a=[]\r\n for i in s:\r\n if i=='-':\r\n a.append(1)\r\n else:\r\n a.append(0)\r\n \r\n counter=0\r\n while(len(a)>k):\r\n if sum(a)==0:\r\n return counter\r\n\r\n \r\n# print(a)\r\n if a[0]==0:\r\n a.pop(0)\r\n else:\r\n counter+=1\r\n a.pop(0)\r\n for i in range(k-1):\r\n a[i]=(a[i]+1)%2\r\n \r\n if sum(a)==0:\r\n return counter\r\n \r\n# print(\"apres 1er flip\") \r\n# print(a)\r\n\r\n if a[-1]==0:\r\n a.pop()\r\n else:\r\n counter+=1\r\n a.pop()\r\n \r\n for i in range(k-1):\r\n a[-i-1]=(a[-1-i]+1)%2\r\n\r\n# print(a)\r\n\r\n\r\n if sum(a)==0:\r\n return counter\r\n elif sum(a)==k:\r\n return counter +1 \r\n else:\r\n return 'IMPOSSIBLE'\r\n \r\n \r\n#print(solve(s,k))\r\n\r\n\r\n\r\n#\r\nfh=\"A-large.in\"\r\nchem=\"C:\\CodeJam\\ExoA\"\r\nfhOut='Solution_'+fh\r\n\r\nwith open(os.path.join(chem,fh),'r') as f:\r\n with open(os.path.join(chem,fhOut),'w') as fOut:\r\n CASE=int(f.readline())\r\n \r\n for c in range(CASE):\r\n l=(f.readline())\r\n [s,k]=l.split()\r\n k=int(k)\r\n \r\n res=solve(s,k)\r\n# print(s +str(k))\r\n# print(res)\r\n fOut.write(\"Case #\" +str(c+1) + ': ' + str(res) +'\\n')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/1453.py","file_name":"1453.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73633258755","text":"#\n# random_card.py:\n#\n# \tStarting code for L10-1\n#\n# Modifies code in deck.py, then tests it below\n#\nimport deck as d\nTRIALS = 10000\n\ndef main():\n\n # Count number of times this occurs (loop TRIALS times):\n # Deck.randomCard() generates two aces in a row\n deck1 = d.Deck()\n numTwoAces = 0\n\n for count in range(TRIALS):\n card1 = deck1.randomCard()\n card2 = deck1.randomCard()\n\n if card1._rank ==1 and card2._rank ==1:\n numTwoAces += 1\n print (\"Percentage is %.4f\" % (100.0 * numTwoAces / TRIALS))\n\n # Print percentage of time this occurs\n\n\n\nmain()","repo_name":"jlmoldan/pythonclass","sub_path":"Lab10/random_card.py","file_name":"random_card.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27226094083","text":"import imaplib\nimport time\nimport telebot\nfrom dotenv import load_dotenv\n\nimport logger\nfrom logger import logger\nfrom os import environ\n\nimport schedule\n\nfrom classes.email_parser import CloudflareAlertsParser\nfrom classes.notifier import Notifier\n\nload_dotenv() # take environment variables from .env.\n\nlogin = environ.get(\"EMAIL\")\npassword = environ.get(\"PASSWORD\")\n\nbot_token = environ.get(\"TG_BOT_TOKEN\")\nchat_id = environ.get(\"TG_CHAT_ID\")\n\ndef check_email_and_notify():\n logger.info(\"Starting email checking\")\n try:\n # Connect to the Gmail IMAP server\n imap_conn = imaplib.IMAP4_SSL(\"imap.gmail.com\")\n try:\n # Log in to your account\n imap_conn.login(login, password)\n\n parsed_alerts = CloudflareAlertsParser(imap_conn).parse_inbox()\n\n bot = telebot.TeleBot(bot_token)\n for alert in parsed_alerts:\n try:\n bot.send_message(\n chat_id,\n f\"Alert! DDos Attack \\n\"\n f\"Host: {alert.target_hostname} \\n\"\n f\"Zone: {alert.target_zone} \\n\"\n f\"Rule ID: {alert.rule_id} \\n\"\n f\"Rule link: link\",\n parse_mode=\"html\"\n )\n except Exception as e:\n logger.error(e)\n\n except Exception as e:\n logger.error(e)\n Notifier.notify(f\"Error on email parsing {e}\")\n finally:\n imap_conn.close()\n\n except Exception as e:\n logger.error(f\"Some high level error {e}\")\n\n\nschedule.every(1).minutes.do(check_email_and_notify)\n\n\ndef main():\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"misterhell/email-parsing-for-cloudflare-alert-zone-lock","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74632569153","text":"import numpy as np\nimport random\n\narray = np.array(range(25), int)\nfor __ in array: array[__] = random.randint(-20,20)\n\narray = array.reshape((5,5))\n\nprint(array)\n\nprint(\"Произведение min*max = \"+str(array.min()*array.max()))\n\nqueue = {\n(1,1), (1,3), (2,1), (2,4), (3,2), (3,5), (4,1), (4,3), (5,2), (5,5) }\n\nS=0\nfor __ in queue:\n\tS+=array[__[0]-1,__[1]-1]\n\nprint(\"Сумма элементов: \"+str(S))","repo_name":"HasellEas/IPO","sub_path":"hilevel12.py","file_name":"hilevel12.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21727570000","text":"import urllib.request \nimport os\n\nDATA_DIR = \"data\"\n\n# Download lncipedia fasta files:\nfasta_url = \"https://lncipedia.org/downloads/lncipedia_5_2/full-database/lncipedia_5_2.fasta\"\nfile_path = os.path.join(DATA_DIR, fasta_url.split(\"/\")[-1])\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(fasta_url, file_path)\n print(\"Done with fasta files\")\nelse:\n print(\"fasta file already downloaded\")\n\n# Download lncipedia bed files:\nbed38_url = \"https://lncipedia.org/downloads/lncipedia_5_2/full-database/lncipedia_5_2_hg38.bed\"\nfile_path = os.path.join(DATA_DIR, bed38_url.split(\"/\")[-1])\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(bed38_url, file_path)\n print(\"Done with bed files\")\nelse:\n print(\"bed file already downloaded\")\n\n# Download lncipedia gff files:\ngff38_url = \"https://lncipedia.org/downloads/lncipedia_5_2/full-database/lncipedia_5_2_hg38.gff\"\nfile_path = os.path.join(DATA_DIR, gff38_url.split(\"/\")[-1])\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(gff38_url, file_path)\n print(\"Done with bed files\")\nelse:\n print(\"gff file already downloaded\")\n","repo_name":"hahahannes/rna-clustering","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28642330729","text":"import sys\nfrom collections import deque\n\ndq = deque(sys.stdin.readline().rstrip())\ndq2 = deque()\n\ns_total = ''\ntag_check = False\n\nwhile len(dq) != 0:\n s = dq.popleft()\n \n if s == '<':\n for _ in range(len(dq2)):\n s_total += dq2.pop()\n s_total += s\n \n tag_check = True\n continue\n \n if s == '>':\n s_total += s\n tag_check = False\n continue\n \n if s == ' ':\n if tag_check:\n s_total += s\n continue\n \n for _ in range(len(dq2)):\n s_total += dq2.pop()\n s_total += s\n continue\n \n if tag_check:\n s_total += s\n continue\n \n else:\n dq2.append(s)\n if len(dq) == 0:\n for _ in range(len(dq2)):\n s_total += dq2.pop()\n continue\nprint(s_total)","repo_name":"tmdwls3475/2021_Winter_Algorithm","sub_path":"4 week/BOJ 17413.py","file_name":"BOJ 17413.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37593079837","text":"import sys\nimport re\nfrom channel_specific_bias import set_voltage_offset\n\nhosts_outer_east = [\"80\",\"81\",\"82\",\"83\"]\nhosts_outer_west = [\"90\",\"91\",\"92\",\"93\"]\nhosts_inner_east = [\"84\",\"85\",\"86\",\"87\"]\nhosts_inner_west = [\"94\",\"95\",\"96\",\"97\"]\n\nhosts = hosts_outer_east + hosts_outer_west + hosts_inner_east + hosts_inner_west\n\nsector_host_map = {\n \"OHCal0\": \"92\",\n \"OHCal1\": \"92\",\n \"OHCal2\": \"92\",\n \"OHCal3\": \"92\",\n\n \"OHCal4\": \"93\",\n \"OHCal5\": \"93\",\n \"OHCal6\": \"93\",\n \"OHCal7\": \"93\",\n\n \"OHCal8\": \"80\",\n \"OHCal9\": \"80\",\n \"OHCal10\": \"80\",\n \"OHCal11\": \"80\",\n\n \"OHCal12\": \"81\",\n \"OHCal13\": \"81\",\n \"OHCal14\": \"81\",\n \"OHCal15\": \"81\",\n\n \"OHCal16\": \"82\",\n \"OHCal17\": \"82\",\n \"OHCal18\": \"82\",\n \"OHCal19\": \"82\",\n\n \"OHCal20\": \"83\",\n \"OHCal21\": \"83\",\n \"OHCal22\": \"83\",\n \"OHCal23\": \"83\",\n\n \"OHCal24\": \"90\",\n \"OHCal25\": \"90\",\n \"OHCal26\": \"90\",\n \"OHCal27\": \"90\",\n\n \"OHCal28\": \"91\",\n \"OHCal29\": \"91\",\n \"OHCal30\": \"91\",\n \"OHCal31\": \"91\",\n\n\n \"IHCal0\": \"96\",\n \"IHCal1\": \"96\",\n \"IHCal2\": \"96\",\n \"IHCal3\": \"96\",\n\n \"IHCal4\": \"97\",\n \"IHCal5\": \"97\",\n \"IHCal6\": \"97\",\n \"IHCal7\": \"97\",\n\n \"IHCal8\": \"84\",\n \"IHCal9\": \"84\",\n \"IHCal10\": \"84\",\n \"IHCal11\": \"84\",\n\n \"IHCal12\": \"85\",\n \"IHCal13\": \"85\",\n \"IHCal14\": \"85\",\n \"IHCal15\": \"85\",\n\n \"IHCal16\": \"86\",\n \"IHCal17\": \"86\",\n \"IHCal18\": \"86\",\n \"IHCal19\": \"86\",\n\n \"IHCal20\": \"87\",\n \"IHCal21\": \"87\",\n \"IHCal22\": \"87\",\n \"IHCal23\": \"87\",\n\n \"IHCal24\": \"94\",\n \"IHCal25\": \"94\",\n \"IHCal26\": \"94\",\n \"IHCal27\": \"94\",\n\n \"IHCal28\": \"95\",\n \"IHCal29\": \"95\",\n \"IHCal30\": \"95\",\n \"IHCal31\": \"95\",\n\n}\n\n\ndef increase_one_voltage(sector, north_south, tower):\n global host\n\n gain_modification = 0\n\n for sec in sector_host_map.keys():\n host = sector_host_map[sec]\n sector_number = int(re.findall(r'\\d+',sec)[0])\n print(sector_number)\n\n for ns in range(2):\n board = (sector_number % 4)*2 + ns\n\n for tow in range(24):\n if sector == sec and north_south == ns and tower == tow:\n gain_modification = 2499\n else:\n gain_modification = -2499\n \n set_voltage_offset(hosts, host, board, tow, gain_modification)\n\n\ndef main(argv):\n\n detector = \"\"\n if argv[1] == 1:\n detector = \"IHCal\"\n else:\n detector = \"OHCal\"\n\n sector = detector+argv[2]\n north_south = argv[3]\n tower = argv[4]\n\n increase_one_voltage(sector,north_south,tower)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"sPHENIX-Collaboration/hcal_control","sub_path":"control/one_up_others_down.py","file_name":"one_up_others_down.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43780849175","text":"import serial\r\n\r\n\r\ntry:\r\n gps = serial.Serial(\"com4\")\r\n\r\n while True:\r\n\r\n ser_bytes = gps.readline()\r\n decoded_bytes = ser_bytes.decode(\"utf-8\")\r\n data = decoded_bytes.split(\",\")\r\n print(data)\r\n\r\n \"\"\"\r\n if data[0] == \"$GNRMC\":\r\n lat_nmea = data[3]\r\n lat_degrees = lat_nmea[:2]\r\n if data[6] == 'S':\r\n latitude_degrees = float(lat_degrees) * -1\r\n else:\r\n latitude_degrees = float(lat_degrees)\r\n\r\n latitude_degrees = str(latitude_degrees).strip('0.')\r\n lat_ddd = lat_nmea[2:10]\r\n lat_mmm = float(lat_ddd) / 60\r\n lat_mmm = str(lat_mmm).strip('.0')[:8]\r\n latitude = latitude_degrees + \".\" + lat_mmm\r\n\r\n long_nmea = data[5]\r\n long_degrees = lat_nmea[1:3]\r\n if data[6] == 'W':\r\n longitude_degrees = float(long_degrees) * -1\r\n else:\r\n longitude_degrees = float(long_degrees)\r\n longitude_degrees = str(longitude_degrees).strip('0.')\r\n long_ddd = long_nmea[3:10]\r\n long_mmm = float(long_ddd) / 60\r\n long_mmm = str(long_mmm).strip('.0')[:8]\r\n longitude = longitude_degrees + \".\" + long_mmm\r\n\r\n print(\"Longitude: \" + longitude + \"Latitude: \" + latitude)\r\n \"\"\"\r\nexcept serial.SerialException:\r\n print(\"No hay GPS conectado\")\r\n","repo_name":"RobertoBore/RockBlock2InfluxDB","sub_path":"gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35280295783","text":"\"\"\"affidavit inactive status added\n\nRevision ID: a37f90e6802d\nRevises: d0392ebda924\nCreate Date: 2021-04-28 12:26:09.064247\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\nfrom sqlalchemy import Boolean, String\nfrom sqlalchemy.sql import column, table\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a37f90e6802d'\ndown_revision = 'd0392ebda924'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # Insert affidavit statuses\n\n affidavit_status_table = table('affidavit_statuses',\n column('code', String),\n column('description', String),\n column('default', Boolean))\n op.bulk_insert(\n affidavit_status_table,\n [\n {'code': 'INACTIVE', 'description': 'Inactivated affidavit', 'default': False}\n ]\n )\n\ndef downgrade():\n op.execute('delete from affidavit_statuses where code=\\'INACTIVE\\'')\n","repo_name":"bcgov/sbc-auth","sub_path":"auth-api/migrations/versions/a37f90e6802d_affidavit_inactive_status_added.py","file_name":"a37f90e6802d_affidavit_inactive_status_added.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"6182337192","text":"\"\"\"Application definition.\"\"\"\nfrom bocadillo import App, discover_providers, Templates, settings\nfrom tortoise import Tortoise\nfrom tortoise.query_utils import Q\n\nfrom .models import BookSummary\nfrom .pagination import Pagination\n\napp = App()\ntemplates = Templates(app)\ndiscover_providers(\"librarian.providerconf\")\n\n\n@app.on(\"startup\")\nasync def init_db():\n await Tortoise.init(\n db_url=str(settings.get(\"DATABASE_URL\")), modules={\"models\": [\"librarian.models\"]}\n )\n await Tortoise.generate_schemas()\n\n\n@app.on(\"shutdown\")\nasync def db_cleanup():\n await Tortoise.close_connections()\n\n\n@app.route(\"/\")\nasync def index(req, res):\n res.html = await templates.render(\"index.html\")\n\n\n@app.route(\"/search\")\nasync def search(req, res, q: str = None, page: int = 1):\n if q:\n words = q.strip().split(\" \")\n query = Q(isbn__in=words)\n for word in words:\n query |= Q(title__icontains=word)\n query |= Q(publisher__icontains=word)\n query |= Q(author__icontains=word)\n summaries = BookSummary.filter(query).order_by(\"-isbn\")\n else:\n summaries = BookSummary.all().order_by(\"-isbn\")\n\n total = await summaries.count()\n paginator = Pagination(summaries, per_page=10, current_page=page)\n summaries = await paginator.paginate()\n\n res.html = await templates.render(\n \"search.html\",\n summaries=summaries,\n q=q,\n page=page,\n paginator=paginator,\n total=total,\n )\n\n\n@app.route(\"/book/{isbn}\")\nasync def book_detail(req, res, isbn):\n summary = await BookSummary.get(isbn=isbn)\n res.html = await templates.render(\"book.html\", summary=summary)\n","repo_name":"kk6/librarian","sub_path":"librarian/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22033678551","text":"import sqlalchemy as sa\nimport pandas as pd\nfrom io import StringIO\n#from config import *\n\ndef to_sql(gen):\n\tengine = sa.create_engine('postgresql://PG:PGAH17@data.seia.org:5432/seia')\n\tcon = engine.raw_connection()\n\tcursor = con.cursor()\n\n\tschema = 'markets'\n\tif_exists = 'replace'\n\tsep='\\t'\n\tencoding='utf8'\n\ttable='eia_826_nem'\n\tgen[:0].to_sql(table, engine, schema=schema, if_exists=if_exists, index=False)\n\t\n\toutput = StringIO()\n\tgen.to_csv(output, sep=sep, header=False, encoding=encoding, index=False)\n\toutput.seek(0)\n\n\tcursor.copy_from(output, schema+'.'+table, sep=sep, null='')\n\tcon.commit()\n\tcon.close()\n\tprint('Sent.')\n\treturn\n","repo_name":"AaronHolm/EIA-861","sub_path":"scripts/nem/to_sql.py","file_name":"to_sql.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23566693531","text":"def add(data, width, num):\r\n if width not in data:\r\n data[width] = 0\r\n data[width] += num\r\n\r\ndef assign(data, K):\r\n # data is a dict from `width` to `number`\r\n width = max(data.keys())\r\n num = data[width]\r\n k = min(K, num)\r\n if (width % 2) == 0:\r\n ma = width // 2\r\n mi = ma - 1\r\n else:\r\n ma = (width-1) // 2\r\n mi = ma\r\n \r\n # Remove used\r\n data[width] -= k\r\n if data[width] == 0:\r\n del data[width]\r\n \r\n # Add new\r\n if ma > 0:\r\n add(data, ma, k)\r\n if mi > 0:\r\n add(data, mi, k)\r\n\r\n return ma, mi, K-k\r\n\r\ndef answer(line):\r\n N, K = [int(x) for x in line.split()]\r\n data = {N:1}\r\n while K > 0:\r\n ma, mi, K = assign(data, K)\r\n return str(ma) + \" \" + str(mi)\r\n\r\nassert( answer(\"4 2\") == \"1 0\" )\r\nassert( answer(\"5 2\") == \"1 0\" )\r\nassert( answer(\"6 2\") == \"1 1\" )\r\nassert( answer(\"1000 1000\") == \"0 0\" )\r\nassert( answer(\"1000 1\") == \"500 499\" )\r\n\r\nimport sys\r\nwith open(sys.argv[1]) as input:\r\n number = int(next(input))\r\n data = [line.strip() for line in input]\r\n\r\nif len(data) != number:\r\n raise Exception(\"Read {} but expected {}\".format(len(data), number))\r\n\r\nwith open(sys.argv[2], \"w\") as output:\r\n for i, N in enumerate(data):\r\n print(\"Case #{}: {}\".format(i+1, answer(N)), file=output)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/122.py","file_name":"122.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21465155179","text":"with open('input.txt') as infile:\n logistics_file = infile.readlines()\n\nsplit_index = logistics_file.index('\\n')\n\nschematic = logistics_file[:split_index-1] # -1 removes the 1 2 3 4 5... index line\ninstructions = logistics_file[split_index+1:] # +1 removes empty line\n\nstacks_count = int(len(schematic[0]) / 4)\n\nstacks = [[] for _ in range(stacks_count)]\n\nfor row in schematic[::-1]:\n for i in range(stacks_count):\n print(row[i * 4 + 1], end=' ')\n crate = row[i * 4 + 1]\n if crate != ' ':\n stacks[i].append(crate)\n print()\n\nprint()\n\nfor stack in stacks:\n print(stack)\n\nprint()\n\nfor line in instructions:\n _, amount, _, source, _, dest = line.strip().split(' ')\n amount, source, dest = int(amount), int(source) - 1, int(dest) - 1\n for _ in range(amount):\n if len(stacks[source]) > 0:\n stacks[dest].append(stacks[source].pop())\n\nfor stack in stacks:\n print(stack)\n\nprint()\n\ntop = [stack[-1] for stack in stacks]\n\nprint(f'top row: {top}') # top row: ['Z', 'R', 'L', 'J', 'G', 'S', 'C', 'T', 'R']\nprint(f'=> {\"\".join(top)}') # => ZRLJGSCTR\n","repo_name":"DragonFighter603/AdventOfCode","sub_path":"AOC-2022/day05/1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11040154327","text":"################################### \r\n#### 2D NumPy Array #####\r\n###################################\r\nbaseball = [[180, 78.4],\r\n [215, 102.7],\r\n [210, 98.5],\r\n [188, 75.2]]\r\n# Import numpy\r\nimport numpy as np\r\n\r\n# Create a 2D numpy array from baseball: np_baseball\r\nnp_baseball=np.array(baseball)\r\n\r\n# Print out the type of np_baseball\r\nprint(np_baseball)\r\n\r\n# Print out the shape of np_baseball\r\nprint(type(np_baseball))\r\nprint(np_baseball.shape) \r\n# for shape (How many row '---', How many column '|' )\r\n\r\n###################################### \r\n#### 2D Slicing and dicing #####\r\n######################################\r\n# The indexes before the comma refer to the rows, \r\n# while those after the comma refer to the columns. \r\n# baseball is available as a regular list of lists\r\n\r\n# Create np_baseball (2 cols)\r\nnp_baseball = np.array(baseball)\r\n\r\n# Print out the 50th row of np_baseball\r\nprint(np_baseball[49,:])\r\n\r\n# Select the entire second column of np_baseball: np_weight_lb\r\nnp_weight_lb=np_baseball[:,1]\r\n\r\n# Print out height of 124th player\r\nprint(np_baseball[123,0])\r\n\r\n################################\r\n#### 2D Arithmetic #####\r\n################################\r\nnp_mat = np.array([[1, 2],\r\n [3, 4],\r\n [5, 6]])\r\n\r\nnp_mat * 2 # All element are multipy by 2\r\nnp_mat + np.array([10, 5]) # 1st column add 10, 2nd column add 5\r\nnp_mat + np_mat # Element wise addition since same shape\r\n\r\n##################################\r\n#### Basic statistics #####\r\n##################################\r\n\r\nimport numpy as np\r\nx = [1, 4, 8, 10, 12]\r\nnp.mean(x)\r\nnp.median(x)\r\n\r\nstddev = np.std(x)\r\nprint(\"Standard Deviation: \" + str(stddev))\r\n\r\ncorr = np.corrcoef(x[0],x[1])\r\nprint(\"Correlation: \" + str(corr))","repo_name":"GuileLiz/Data_camp","sub_path":"2D_NumPy_Array .py","file_name":"2D_NumPy_Array .py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6323683246","text":"import numpy as np\nimport os\n\nfrom nomad.metainfo import (\n Quantity,\n SubSection,\n Section)\nfrom nomad.datamodel.data import ArchiveSection\n\nfrom .microscope import TEMMicroscopeTechnique, MicroscopeConfiguration2, Image\n\nfrom baseclasses.helper.utilities import get_parameter\n\n\nclass Illumination(ArchiveSection):\n magnification = Quantity(\n type=np.dtype(np.float64),\n a_eln=dict(component='NumberEditQuantity'))\n\n illumination_index = Quantity(\n type=np.dtype(np.float64),\n a_eln=dict(component='NumberEditQuantity'))\n\n spot_size = Quantity(\n type=np.dtype(np.float64),\n unit=\"nm\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='nm'))\n\n camera_length = Quantity(\n type=np.dtype(np.float64),\n unit=\"m\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='m'))\n\n scan_rotation = Quantity(\n type=np.dtype(np.float64),\n unit=\"deg\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='deg'))\n\n\nclass HAADEScan(Image):\n m_def = Section(label_quantity='file_name')\n\n file_name = Quantity(\n type=str,\n a_eln=dict(component='StringEditQuantity'))\n\n image_length = Quantity(\n type=np.dtype(np.float64),\n unit=\"px\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='px'))\n\n image_width = Quantity(\n type=np.dtype(np.float64),\n unit=\"px\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='px'))\n\n samples_per_pixel = Quantity(\n type=np.dtype(np.float64),\n unit=\"px\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='px'))\n\n pixel_size = Quantity(\n type=np.dtype(np.float64),\n unit=\"nm\",\n a_eln=dict(component='NumberEditQuantity', defaultDisplayUnit='nm'))\n\n frame_time = Quantity(\n type=np.dtype(\n np.float64), unit=\"minute\", a_eln=dict(\n component='NumberEditQuantity', defaultDisplayUnit='minute'))\n\n noise_reduction = Quantity(\n type=str,\n a_eln=dict(component='EnumEditQuantity', props=dict(\n suggestions=['Frame Avg'])))\n\n microscope_configuration = SubSection(\n section_def=MicroscopeConfiguration2,\n label_quantity='file_name')\n illumination = SubSection(\n section_def=Illumination,\n label_quantity='file_name')\n\n\nclass TEM_HAADE(TEMMicroscopeTechnique):\n\n @staticmethod\n def get_data(file_name):\n if file_name.lower().endswith(\".tif\"):\n\n import hyperspy.api as hs\n try:\n tif_file = hs.load(file_name)\n illumination = Illumination(\n magnification=get_parameter(\n [\"Acquisition_instrument\", \"SEM\", \"magnification\"], tif_file.metadata),\n illumination_index=get_parameter(\n [\"CZ_SEM\", \"ap_ill_index\"], tif_file.original_metadata, 1),\n spot_size=get_parameter(\n [\"CZ_SEM\", \"ap_spot_size\"], tif_file.original_metadata, 1),\n camera_length=get_parameter(\n [\"CZ_SEM\", \"ap_camera_length\"], tif_file.original_metadata, 1),\n scan_rotation=get_parameter(\n [\"CZ_SEM\", \"ap_scanrotation\"], tif_file.original_metadata, 1)\n )\n\n microscope_configuration = MicroscopeConfiguration2(\n x_value=get_parameter(\n [\"Acquisition_instrument\", \"SEM\", \"Stage\", \"x\"], tif_file.metadata),\n y_value=get_parameter(\n [\"Acquisition_instrument\", \"SEM\", \"Stage\", \"y\"], tif_file.metadata),\n z_value=get_parameter(\n [\"Acquisition_instrument\", \"SEM\", \"Stage\", \"z\"], tif_file.metadata),\n alpha_tilt=get_parameter(\n [\"CZ_SEM\", \"ap_stage_at_t\"], tif_file.original_metadata, 1),\n beta_tilt=get_parameter(\n [\"CZ_SEM\", \"ap_stage_at_m\"], tif_file.original_metadata, 1)\n )\n\n image_section = HAADEScan(\n file_name=os.path.basename(file_name),\n image_length=get_parameter(\n [\"ImageLength\"], tif_file.original_metadata),\n image_width=get_parameter(\n [\"ImageWidth\"], tif_file.original_metadata),\n samples_per_pixel=get_parameter(\n [\"SamplesPerPixel\"], tif_file.original_metadata),\n pixel_size=get_parameter(\n [\"CZ_SEM\", \"ap_pixel_size\"], tif_file.original_metadata, 1),\n frame_time=get_parameter(\n [\"CZ_SEM\", \"ap_frame_time\"], tif_file.original_metadata, 1),\n noise_reduction=get_parameter(\n [\"CZ_SEM\", \"dp_noise_reduction\"], tif_file.original_metadata, 1),\n microscope_configuration=microscope_configuration,\n illumination=illumination\n )\n return image_section\n\n except Exception as e:\n print(e)\n return None\n\n def normalize(self, archive, logger):\n super(TEM_HAADE, self).normalize(archive, logger)\n","repo_name":"RoteKekse/nomad-baseclasses","sub_path":"baseclasses/characterizations/electron_microscopy/TEM_HAADE_detector.py","file_name":"TEM_HAADE_detector.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17490160948","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nfrom gtts import gTTS \n\ndef sliceToText(text, htmlMarker):\n searchExp = htmlMarker + \">[A-Za-z\\.,0-9+ ]*<\"\n strippedDown = re.search(searchExp, str(text), flags=0).group()\n return strippedDown[len(htmlMarker)+1:len(strippedDown)-1]\n\ndef pullData():\n url = \"https://www.nytimes.com/interactive/2020/us/covid-19-vaccine-doses.html\";\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n \n table = soup.find(\"div\", class_=\"g-table-div g-eligibility-table\")\n \n states = table.find_all(\"td\", class_=\"g-cell g-name\")\n ages = table.find_all(\"p\", class_=\"g-use-cat-color\")\n otherMarkers = table.find_all(\"td\", class_=\"g-cell g-col\")\n listFrame = []\n \n for index, state in enumerate(states):\n tempList = [sliceToText(state, \"full\\\"\")]\n \n age = sliceToText(ages[index], \"\")\n \n if(age != ''):\n age = age[:-1]\n else:\n age = \"None\"\n \n tempList.append(age)\n \n for i in range(3):\n mark = sliceToText(otherMarkers[i + 3*index], \"p\")\n if(mark == ''):\n mark = 'No'\n \n tempList.append(mark)\n \n listFrame.append(tempList)\n \n return pd.DataFrame(listFrame, columns=['State', 'Age', 'Healthcare', 'EssentialWorker', \"HighRisk\"])\n\ndef generateMessages(saveDirectory):\n stateData = pullData()\n \n for index, row in stateData.iterrows():\n \n eligibilityList = []\n eligibilityListSpan = []\n \n if(row['Age'] != 'None'):\n eligibilityList.append('people over the age of {}'.format(row['Age']))\n eligibilityListSpan.append('personas mayores de {} años'.format(row['Age']))\n \n else:\n eligibilityList.append('people of any age')\n eligibilityListSpan.append('personas de cualquier edad')\n\n \n if(row['EssentialWorker'] == 'Yes'):\n eligibilityList.append('some essential workers')\n eligibilityListSpan.append('algunos trabajadores esenciales')\n\n \n if(row['HighRisk'] == 'Yes'):\n eligibilityList.append('some high risk individuals')\n eligibilityListSpan.append('algunas personas de alto riesgo')\n\n \n message = \"Hello. This is an automated reminder that in your state, {}, \".format(row['State'])\n spanMessage = \"Hola. Este es un recordatorio automatizado de que en su estado, {}, \".format(row['State'])\n \n for index, item in enumerate(eligibilityList):\n if index == len(eligibilityList)-1 and index > 0:\n message += 'and '\n spanMessage += 'y '\n \n message += item + ', '\n spanMessage += eligibilityListSpan[index] + ', '\n \n message = message[:-2] + ' may be eligible for the coronavirus vaccine. If you think you qualify and want assistance registering for a vaccination, call 1-800-123-4567 to speak with a volunteer who will help you sign up.'\n spanMessage = spanMessage[:-2] + ' puede ser elegible para la vacuna contra el coronavirus. Si cree que califica y desea asistencia para registrarse para una vacunación, llame a 1-800-123-4567 para hablar con un voluntario que lo ayudará a inscribirse.'\n \n recording_en = gTTS(text=message, lang='en', slow=False)\n recording_es = gTTS(text=spanMessage, lang='es', slow=False)\n \n with open('{}/{}.mp3'.format(saveDirectory, row['State']), 'wb') as fp:\n recording_en.write_to_fp(fp)\n recording_es.write_to_fp(fp)\n \n \nif __name__ == '__main__':\n generateMessages(\"/Users/Ari/Desktop/VaccineMessages\")","repo_name":"aditidam/VaccinationVolunteers","sub_path":"VaccineNotification.py","file_name":"VaccineNotification.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29850463532","text":"from copy import deepcopy\n\nfrom datadog_checks.base import OpenMetricsBaseCheckV2\n\nfrom .metrics import METRIC_MAP, construct_metrics_config\n\nREQUIRED_TAGS = [\n \"vendor:rapdev\",\n]\n\n\nclass InfluxdbCheck(OpenMetricsBaseCheckV2):\n\n __NAMESPACE__ = \"rapdev.influxdb\"\n\n def __init__(self, name, init_config, instances):\n super(InfluxdbCheck, self).__init__(name, init_config, instances)\n self.instance[\"tags\"] = REQUIRED_TAGS + self.instance.get(\"tags\", [])\n self.instance[\"tags\"].append(\n \"influxdb_endpoint:{}\".format(self.instance.get(\"openmetrics_endpoint\"))\n )\n\n def check(self, _):\n self.gauge(\n \"datadog.marketplace.rapdev.influxdb\",\n 1,\n tags=[\n \"influxdb_endpoint:{}\".format(self.instance.get(\"openmetrics_endpoint\"))\n ],\n )\n super(InfluxdbCheck, self).check(_)\n\n def get_default_config(self):\n metric_map = deepcopy(METRIC_MAP)\n return {\"metrics\": construct_metrics_config(metric_map)}\n","repo_name":"gjanco/mezmo-dd-marketplace","sub_path":"rapdev_influxdb/datadog_checks/rapdev_influxdb/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4025053970","text":"n, m = map(int,input().split())\n\nnSum = 0\nmSum = 30-m\n\nfor i in range(n, 10):\n if i >= 10:\n break\n else:\n if i % 2 == 0:\n nSum += 30\n else:\n nSum += 31\n\n\nprint(nSum+mSum)","repo_name":"JinleeJeong/Algorithm","sub_path":"20년 3월/1716.py","file_name":"1716.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"45060753740","text":"import random\n\nclass Hand():\n \"\"\"\n Models the hand of a player.\n \"\"\"\n\n def __init__(self, player):\n '''\n Initializes the hand\n\n :param player: The player to whom the hand belongs\n '''\n self.player = player\n self.cards = []\n\n\n def __repr__(self):\n '''\n Returns the representation of the hand.\n '''\n return str(self.cards)\n\n\n def is_empty(self):\n '''\n Returns True if the hand is empty and False otherwise.\n '''\n if self.cards:\n return False\n else:\n return True\n \n\n def get_cards_in_hand(self):\n '''\n Returns a list with all cards in the hand\n '''\n return self.cards\n\n\n def add_card(self, card):\n '''\n Adds a card to the hand.\n\n :param card: The card that should be added to the hand\n '''\n self.cards.append(card)\n \n\n def remove_card(self, card):\n '''\n Removes the specified card from the hand\n\n :param card: The card that should be removed from the hand\n '''\n if card in self.cards:\n self.cards.remove(card)\n\n\n def get_random_card(self):\n '''\n Return a random card.\n '''\n return random.choice(self.get_cards_in_hand())\n\n\n def get_lowest_card(self):\n '''\n Return the lowest card in the hand\n '''\n options = self.get_cards_in_hand()\n lowest_card = options[0]\n for card in options:\n if card.get_ranking() < lowest_card.get_ranking():\n lowest_card = card\n return lowest_card\n\n\n def get_highest_card(self):\n '''\n Return the highest card in the hand\n '''\n options = self.get_cards_in_hand()\n highest_card = options[0]\n for card in options:\n if card.get_ranking() > highest_card.get_ranking():\n highest_card = card\n return highest_card\n\n\n def get_lowest_card_that_wins_defence(self, attacking_card):\n '''\n Return the lowest card from the player's hand that beats the given card, if there is one.\n '''\n return_card = None\n options = self.get_cards_in_hand()\n for card in options:\n if return_card:\n if card.get_ranking() > return_card.get_ranking() and card.get_ranking() >= attacking_card.get_ranking():\n return_card = card\n elif card.get_ranking() >= attacking_card.get_ranking():\n return_card = card\n\n # If a winning card was found, return it\n if return_card:\n return return_card\n else:\n return None\n\n\n def get_lowest_card_that_wins_attack(self, defending_card):\n '''\n Return the lowest card from the player's hand that beats the given card, if there is one.\n '''\n return_card = None\n options = self.get_cards_in_hand()\n for card in options:\n if return_card:\n if card.get_ranking() > return_card.get_ranking() and card.get_ranking() > defending_card.get_ranking():\n return_card = card\n elif card.get_ranking() > defending_card.get_ranking():\n return_card = card\n\n # If a winning card was found, return it\n if return_card:\n return return_card\n else:\n return None","repo_name":"BorisWinter/durak","sub_path":"Hand.py","file_name":"Hand.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3153424027","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#import libraries and modules\nimport io\nimport pandas as pd\nimport re\nimport spacy\nimport nltk\nimport numpy as np\nimport tensorflow as tf\nimport string\nimport nltk.tokenize\n#NLP packages\nfrom nltk.corpus import stopwords\npunc = string.punctuation\nstop_words = set(stopwords.words('english'))\n#Supervised learning\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\n##Deep learning libraries and APIs\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\n# In[3]:\n\n\n# Load the dataset into a Pandas DataFrame\ndf = pd.read_csv('DataLatest.csv')\n\n\n# In[4]:\n\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n# Initialize the VADER sentiment analyzer\nanalyzer = SentimentIntensityAnalyzer()\n\n\n# In[5]:\n\n\n# Convert Headline and Summary columns to strings and drop any rows with NaN values\ndf['Headline'] = df['Headline'].astype(str)\ndf['Summary'] = df['Summary'].astype(str)\ndf = df.dropna(subset=['Summary'])\n\n# Apply the VADER sentiment analyzer to the Headline column\ndf['headline_sentiment'] = df['Headline'].apply(lambda x: analyzer.polarity_scores(x)['compound'])\n\n# Apply the VADER sentiment analyzer to the Summary column\ndf['summary_sentiment'] = df['Summary'].apply(lambda x: analyzer.polarity_scores(x)['compound'])\n\n# Calculate the overall sentiment score as the average of headline and summary sentiment scores\ndf['sentiment_score'] = (df['headline_sentiment'] + df['summary_sentiment']) / 2\n\n\n# In[6]:\n\n\n# Define a function to assign sentiment labels based on the sentiment score\ndef label_sentiment(score):\n if score > 0:\n return 'Positive'\n elif score < 0:\n return 'Negative'\n else:\n return 'Neutral'\n\n# Apply the function to the sentiment_score column to create the Sentiment column\ndf['Sentiment'] = df['sentiment_score'].apply(label_sentiment)\n\n\n# In[7]:\n\n\ndf[\"text\"] = df[\"Headline\"] + \" \" + df[\"Summary\"]\ndf.drop('Ticker', inplace=True, axis=1)\ndf.drop('Headline', inplace=True, axis=1)\ndf.drop('Datetime', inplace=True, axis=1)\ndf.drop('Summary', inplace=True, axis=1)\ndf.drop('Source', inplace=True, axis=1)\ndf.drop('URL', inplace=True, axis=1)\ndf.drop('headline_sentiment', inplace=True, axis=1)\ndf.drop('sentiment_score', inplace=True, axis=1)\ndf.drop('summary_sentiment', inplace=True, axis=1)\nprint(df)\n\n\n# In[8]:\n\n\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport string\nfrom nltk.stem import WordNetLemmatizer\n\n#make a copy of the dataframe\ndata = df.copy()\n\n# function to perform all pre-processing tasks\ndef preprocess(text):\n # iterate over each string object in the Series and apply lower() method\n text = text.apply(lambda x: x.lower())\n \n # tokenize text into individual words\n tokens = text.apply(lambda x: word_tokenize(x))\n \n # remove stop words\n stop_words = set(stopwords.words('english'))\n filtered_tokens = tokens.apply(lambda x: [word for word in x if word not in stop_words])\n \n # lemmatize tokens\n lemmatizer = nltk.WordNetLemmatizer()\n lemmatized_tokens = filtered_tokens.apply(lambda x: [lemmatizer.lemmatize(word) for word in x])\n \n # remove punctuations\n table = str.maketrans('', '', string.punctuation)\n clean_tokens = lemmatized_tokens.apply(lambda x: [word.translate(table) for word in x])\n \n # join the tokens back into a string\n text = clean_tokens.apply(lambda x: ' '.join(x))\n return text\n\n#apply the data preprocessing function\ndata['text'] = preprocess(data['text'])\ndata\n\n\n# In[17]:\n\n\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n\n\n# In[25]:\n\n\n# Split the data into training and testing sets\nX = data['text'].values\ny = pd.get_dummies(data['Sentiment']).values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\n# Tokenize the text data\ntokenizer = Tokenizer(num_words=5000)\ntokenizer.fit_on_texts(X_train)\nX_train = tokenizer.texts_to_sequences(X_train)\nX_test = tokenizer.texts_to_sequences(X_test)\n\n# Pad the tokenized data to have the same length\nmaxlen = 100\nX_train = pad_sequences(X_train, padding='post', maxlen=maxlen)\nX_test = pad_sequences(X_test, padding='post', maxlen=maxlen)\n\n# Define the deep learning model\nmodel = Sequential()\nmodel.add(Embedding(5000, 128, input_length=maxlen))\nmodel.add(LSTM(128))\nmodel.add(Dense(3, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# Train the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=32)\n\n\n# In[26]:\n\n\n# Evaluate the model\ny_pred = model.predict(X_test)\ny_pred = np.argmax(y_pred, axis=1)\ny_test = np.argmax(y_test, axis=1)\n\naccuracy = accuracy_score(y_test, y_pred)\nprecision = precision_score(y_test, y_pred, average='weighted')\nrecall = recall_score(y_test, y_pred, average='weighted')\nf1 = f1_score(y_test, y_pred, average='weighted')\n\nprint(\"Accuracy:\", accuracy)\nprint(\"Precision:\", precision)\nprint(\"Recall:\", recall)\nprint(\"F1 score:\", f1)\n\n\n# In[27]:\n\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\n# Get the confusion matrix\ncm = confusion_matrix(y_test, y_pred)\nlabels = np.unique(y_test)\n# Define the color map\ncmap = plt.get_cmap('Blues')\n\n# Plot the confusion matrix with colors\nsns.heatmap(cm, annot=True, cmap=\"Blues\", fmt=\"d\", xticklabels=labels, yticklabels=labels)\nplt.title(\"Confusion Matrix\")\nplt.xlabel(\"Predicted\")\nplt.ylabel(\"Actual\")\nplt.show()\n\n\n# In[33]:\n\n\nnew_headline = [\"Shares of Amazon.com Inc. advanced 2.35% to $104.98 Wednesday, on what proved to be an all-around dismal trading session for the stock market, with the S&P 500 Index falling 0.38% to 4,055.99 and Dow Jones Industrial Average falling 0.68% to 33,301.87\"]\n##prepare the sequences of the sentences in question\nsequences = tokenizer.texts_to_sequences(new_headline)\npadded_seqs = pad_sequences(sequences, maxlen=120, padding='post', truncating='post')\nprint(model.predict(padded_seqs))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Deekreddy18/Stock-Market-Prediction-using-sentimental-analysis","sub_path":"Deep Learning.py","file_name":"Deep Learning.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9340768289","text":"# Glossary\n\n\n\nrivers = {\n\t'nile': 'egypt',\n\t'tigris': 'turkey',\n\t'euphrates': 'syria',\n}\n\nfor key, value in rivers.items():\n\tprint(f\"The {key.title()} river runs through {value.title()}.\")\n\nfor key in rivers.keys():\n\tprint(key.title())\n\nfor value in rivers.values():\n\tprint(value.title())","repo_name":"TimothyHorth/python_crash_course","sub_path":"chapter_6_Dictionaries/glossary.py","file_name":"glossary.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10969071146","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 9 19:33:26 2021\n\n@author: bonnyaigergo\n\nBlack Hole Algorithm (BHA)\n\nhttps://github.com/mMarzeta/BlackHole_Swarm_Alghorithm/blob/master/BH.py\nhttps://www.researchgate.net/publication/281786410_Black_Hole_Algorithm_and_Its_Applications/link/570df45108ae2b772e43305a/download\nhttps://www.sciencepubco.com/index.php/JACST/article/view/4094\n\nBest = Black Hole\nevent horizon\n\nmaybe an inertia can help here as well\n\"\"\"\n\nimport numpy as np\n\n \ndef BHA(objective_func, \n func_bounds, \n population_size,\n runs=1, \n iterations=50,\n patience=10,\n epsilon=1E-10,\n verbose=0):\n\n dimensions = len(func_bounds)\n min_bound = np.asarray([min(dim) for dim in func_bounds])\n max_bound = np.asarray([max(dim) for dim in func_bounds])\n dimension_range = np.fabs(min_bound - max_bound)\n \n for run_num in range(runs):\n population = min_bound + np.random.rand(population_size, dimensions) * dimension_range\n fitness = np.asarray([objective_func(individual) for individual in population]) \n best_idx = np.argmin(fitness)\n best = population[best_idx]\n best_list = [best]\n for ite_num in range(iterations):\n for idx in range(population_size):\n population[idx] += np.random.rand() * (best - population[idx])\n population[idx] = np.clip(a=population[idx], a_min=min_bound, a_max=max_bound)\n event_horizon = fitness[best_idx] / sum(fitness)\n \n for idx in range(population_size):\n if np.linalg.norm(best - population[idx]) < event_horizon and idx != best_idx:\n population[idx] = min_bound + np.random.rand(dimensions) * dimension_range\n fitness = np.asarray([objective_func(individual) for individual in population])\n best_idx = np.argmin(fitness)\n best = population[best_idx]\n \n if patience != None and ite_num >= patience:\n if (np.asarray([abs(element-best) for element in best_list[-patience:]]) < [epsilon]*dimensions).all():\n break\n best_list.append(best)\n yield run_num, ite_num, best, fitness[best_idx]\n \nif __name__ == \"__main__\":\n \n import matplotlib.pyplot as plt\n \n obj_func = lambda x: sum(x**2)/len(x)\n func_bounds = [(-100,100)]*2\n runs = 5\n result = list(BHA(objective_func=obj_func, \n func_bounds=func_bounds, \n population_size=20, \n runs=runs, \n iterations=300, \n patience=50))\n \n for run_num in range(runs):\n run, gen, x, f = zip(*[element for element in result if element[0]==run_num])\n plt.yscale('log', base=2) \n plt.plot(f, label='run_num={}'.format(run_num))\n plt.legend() ","repo_name":"GergoGit/Evolutionary-Algorithms","sub_path":"Metaheuristics1/BHA.py","file_name":"BHA.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25090478944","text":"\"\"\"\n - Avoid repeating variable name in compound if statement\n\"\"\"\n# this is not pythonic way to do it\nis_found = False\nname = 'John'\nif name == 'James' or name == 'John' or name == 'Garry':\n is_found = True\n\n# this is way to go\nname = 'John'\nis_found = name in ('James', 'John', 'Garry')\n\n\n\n\n\n\n\n\n\"\"\"\n - Avoid placing conditional branch code on the same line as\n the colon (:)\n\"\"\"\n\n# this is not pythonic way to do it\n# name = 'James'\n# profession = 'actor'\n# if name: print(name)\n# print(profession)\n\n# this is way to go\nname = 'James'\nprofession = 'actor'\nif name:\n print(name)\nprint(profession)\n\n\n\n\n\n\n\n\n\"\"\"\n - Avoid comparing directly to True, False\n - Rely on implicit \"truthiness\"\n - The following are considered False:\n - None\n - False\n - zero for numeric types\n - empty sequences\n - empty dictionaries\n - a value of 0 or False returned when either __len__ or __nonzero__ is called\n - Everything else is considered True (and thus most things are implicitly True).\n\"\"\"\nmessage = None\nif message:\n print(f\"Detailed description: {message}\")\nelse:\n print(f\"Message is empty!\")\n","repo_name":"stojkovm/PT2022","sub_path":"Day01/Code_Samples/D1_04_Control_Flow/D1_05_idiomatic_control_flow.py","file_name":"D1_05_idiomatic_control_flow.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22963202666","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport traceback\nimport seaborn as sns; sns.set()\nsns.set_style(\"ticks\")\nimport matplotlib.ticker as ticker\nfrom matplotlib.ticker import MaxNLocator\nimport sys\nimport pandas as pd\n\ndef plot_boxes(df):\n fig, axes = plt.subplots(nrows=2, ncols=2, sharey=False, sharex=True, figsize=(10,10))\n titles=\"1% 2% 3% 3%/Q\".split()\n categories=\"01 02 03 03_q\".split()\n i = 0\n flat = axes.flatten()\n for axi in flat:\n cat = categories[i]\n df2 = df.loc[df['Category'] == cat]\n for tool in [\"MASH\", \"VAPOR\", \"BLAST\"]:\n df3 = df2.loc[df2['Tool'] == tool] \n print(cat, tool, np.mean(df3['Score']))\n for perc in [75, 95, 99]:\n print(\"\\t\",np.percentile(df3['Score'], perc))\n ax = sns.boxplot(x=\"Tool\", y='Score', data=df2, ax=axi)\n ax.set_title(titles[i])\n ax.set_ylabel(\"\")\n ax.set_xlabel(\"\")\n ax.yaxis.set_major_locator(ticker.MultipleLocator(max(df[\"Score\"])/10))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n i += 1\n\n params = {'mathtext.default': 'regular' } \n plt.rcParams.update(params)\n\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')\n plt.grid(False)\n plt.subplots_adjust(left=0.07, right=0.95, top=0.95, bottom=0.07)\n plt.xlabel(\"Tool\",labelpad=7, size=14)\n plt.ylabel(\"$L_A$\",labelpad=7, size=14)\n plt.savefig(\"tool_scores_simulation.pdf\", format=\"pdf\", dpi=300)\n\nheaders = ['Category', 'Tool', 'Score']\ntable = []\nfname = sys.argv[1]\nwith open(fname) as f:\n lines = [l.strip() for l in f]\n c = 0\n for line in lines[1:]:\n spl = line.split()\n cls = spl[0]\n original_distance = float(spl[6])\n mash_scores = [float(j) for j in spl[9].split(\",\")]\n vapor_scores = [float(j) for j in spl[12].split(\",\")]\n blast_scores = [float(j) for j in spl[15].split(\",\")]\n table.append([cls, \"MASH\", mash_scores[0]-original_distance])\n table.append([cls, \"VAPOR\", vapor_scores[0]-original_distance])\n table.append([cls, \"BLAST\", blast_scores[0]-original_distance])\n# if blast_scores[0] < vapor_scores[0]:\n# print(spl)\n print(\"Warning:\", c, \"of\", len(lines[1:]), \"datapoints were invalid. Please review the above lines.\")\ndf = pd.DataFrame(table, columns=headers)\n\nplot_boxes(df)\n","repo_name":"connor-lab/vapor_benchmark_simulation","sub_path":"plotting/plot_simulation_boxes.py","file_name":"plot_simulation_boxes.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73949407555","text":"import os\nimport tqdm\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom util import read_padded_csv\n\nfrom optimization.pso import PSO\nfrom optimization.fss import FSS\nfrom optimization.abc import ABC\n\nfrom benchmark.stop import StopCriterion\nfrom benchmark.functions.continuous import SphereFunction\nfrom benchmark.functions.continuous import RastriginFunction\nfrom benchmark.functions.continuous import RosenbrockFunction\nfrom benchmark.initializer.continuous import ContinuousInitializer\n\nfrom optimization.fss.strategies import Step\nfrom optimization.pso.strategies import Inertia\nfrom optimization.pso.strategies import Communication\n\ndimensions = 30\nsample_size = 30\nfn_evaluations = 500000\nshould_evaluate = False\nmatplotlib.use('Agg')\n\nobjective_functions = [\n SphereFunction,\n RastriginFunction,\n RosenbrockFunction\n]\n\nmethods = ['pso', 'fss', 'abc']\n\ntarget_directory = os.path.join('results', 'compare')\nplot_directory = os.path.join(target_directory, 'plot')\nregistry_directory = os.path.join(target_directory, 'reg')\nsample_base_path = os.path.join(registry_directory, '{}_{}_b.csv')\nevolution_base_path = os.path.join(registry_directory, '{}_{}_a.csv')\n\nif not os.path.exists('results'):\n os.mkdir('results')\n\nif not os.path.exists(target_directory):\n os.mkdir(target_directory)\n os.mkdir(registry_directory)\n os.mkdir(plot_directory)\n\n\ndef evaluate(objective_function, paths):\n def run_method(method, fn, parameters):\n fn_instance = fn(dimensions)\n method_instance = method(**parameters)\n stop_criterion = StopCriterion.fn_evaluation(fn_evaluations)\n initializer = ContinuousInitializer.uniform_random(fn.bounds, fn.dimensions)\n return method_instance.optimize(fn_instance, initializer, stop_criterion)\n\n for _ in tqdm.tqdm(range(sample_size), total=sample_size):\n results = {\n 'pso': run_method(PSO, objective_function, {\n 'n_particles': 30,\n 'social': 2.05,\n 'cognitive': 2.05,\n 'communication': Communication.socially_connected(),\n 'inertia': Inertia.linear(min_weight=0.4, max_weight=0.9)\n }),\n\n 'fss': run_method(FSS, objective_function, {\n 'n_fishes': 30,\n 'step': Step.linear(individual=(0.1, 0.001),\n volitive=(0.01, 0.001))\n }),\n\n 'abc': run_method(ABC, objective_function, {\n 'colony_size': 30,\n 'trials': 100\n })\n }\n\n for m in methods:\n with open(paths[m]['sp'], 'a+') as f:\n f.write(str(results[m][1]))\n f.write('\\n')\n\n with open(paths[m]['ev'], 'a+') as f:\n tracker = results[m][2]\n evolution = tracker.fn_evaluations.tolist()\n f.write(','.join(map(lambda t: '{:.2f}'.format(t), evolution)))\n f.write('\\n')\n\n\ndef plot(objective_function, paths):\n figure_1 = plt.figure()\n figure_2 = plt.figure()\n samples = np.zeros((sample_size, len(methods)))\n\n sample_figure = figure_1.add_subplot(111)\n evolution_figure = figure_2.add_subplot(111)\n names = list(map(lambda m: m.upper(), methods))\n\n for i, method in tqdm.tqdm(enumerate(methods), total=len(methods)):\n sample = np.genfromtxt(paths[method]['sp'])\n evolution = read_padded_csv(paths[method]['ev'], 0.0)\n evolution[np.isnan(evolution)] = 0.0\n\n samples[:, i] = sample.copy()\n evolution_figure.plot(evolution.mean(axis=0))\n\n sample_figure.boxplot(samples)\n sample_figure.set_xticklabels(names)\n sample_figure.set_xlabel('Algorithm')\n sample_figure.set_ylabel('Best fitness')\n sample_figure.set_title('Best fitness: {}'.format(objective_function))\n\n evolution_figure.set_xlabel('Function evaluation')\n evolution_figure.set_ylabel('Fitness')\n evolution_figure.legend(names)\n evolution_figure.set_title('Evolution: {}'.format(objective_function))\n\n sample_name = 'sp_{}.png'.format(objective_function)\n evolution_name = 'ev_{}.png'.format(objective_function)\n\n sample_figure.figure.savefig(os.path.join(plot_directory, sample_name))\n evolution_figure.figure.savefig(os.path.join(plot_directory, evolution_name))\n plt.clf()\n plt.close()\n\n\ndef main():\n for objective_function in objective_functions:\n operation = 'Plotting'\n if should_evaluate:\n operation = 'Evaluating on'\n\n print('{} {}'.format(operation, objective_function.__name__))\n\n paths = {\n m: {\n 'sp': sample_base_path.format(m, objective_function.__name__),\n 'ev': evolution_base_path.format(m, objective_function.__name__)\n } for m in methods\n }\n\n if should_evaluate:\n evaluate(objective_function, paths)\n continue\n\n plot(objective_function.__name__, paths)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"itsmealves/swarm-intelligence","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10075695248","text":"import json\nimport glob\nimport ipdb\n\nfile_all = 'data/real_object_placing.json'\nfile_name_size = 'data/class_name_size.json'\nfiles_correct = []\nnames = []\n\n\n\ndef convert(list_containers):\n return ['{}.{}'.format(x[0], x[1]) for x in list_containers]\n\ndef correct(dest_name):\n dest_name = dest_name.replace('_', '')\n map_dest = {\n 'freezer': 'fridge',\n 'bathroom_cabinet': 'bathroomcabinet',\n 'mini-fridge': 'fridge',\n 'filingcabinet': 'cabinet',\n 'tablecloth': 'kitchentable',\n 'placemat': 'kitchentable',\n 'dishrack': 'dishwasher',\n 'oven': 'stove',\n 'loveseat': 'sofa',\n 'couch': 'sofa',\n 'saucepan': 'fryingpan',\n 'painobench': 'bench'\n\n }\n if dest_name not in map_dest:\n return dest_name\n return map_dest[dest_name]\n\ndef correct2(dest_name):\n map_dest = {\n 'barsoap': 'soap',\n 'washingsponge': 'sponge',\n 'dishbowl': 'bowl',\n 'cutleryknife': 'knife',\n 'cutleryfork': 'fork',\n 'glasses': 'spectacles',\n 'pancake': 'food_dessert',\n 'milkshake': 'food_dessert',\n 'carrot': 'food_carrot',\n 'salmon': 'food_fish',\n 'sundae': 'food_dessert',\n 'cupcake': 'food_dessert',\n 'chicken': 'food_chicken',\n 'bananas': 'food_fruit',\n 'bellpepper': 'food_fruit',\n 'poundcake': 'food_cake',\n 'plum': 'food_fruit',\n 'pear': 'food_fruit',\n 'apple': 'food_fruit',\n 'lime': 'food_fruit',\n 'boardgame': 'board_game',\n 'wineglass': 'wine_glass',\n 'waterglass': 'water_glass',\n 'toiletpaper': 'toilet_paper',\n 'pie': 'food_cake',\n 'cuttingboard': 'cutting_board',\n 'toothpaste': 'tooth_paste',\n 'remotecontrol': 'remote_control',\n 'coffeepot': 'coffee_pot',\n 'cereal': 'food_cereal',\n 'condimentbottle': 'food_salt',\n 'condimentshaker': 'food_salt',\n 'oventray': 'tray'\n }\n if dest_name not in map_dest:\n return dest_name\n return map_dest[dest_name]\n\nwith open(file_all, 'r') as f:\n cont = json.load(f)\n\n\nwith open(file_name_size, 'r') as f:\n name_size = json.load(f)\n\nall_names = []\nfor i in range(7):\n file_name = f'data/object_info{i+1}.json'\n with open(file_name, 'r') as f:\n map_content = json.load(f)\n files_correct.append(map_content)\n names.append(list(map_content.keys()))\n all_names += names[-1]\nall_names = set(all_names)\n\nreported = []\nobject_info_final = {}\nignore_dest = ['washing_machine', 'pantry']\nfor name in all_names:\n used_rel = []\n object_info_final[name] = []\n newname = correct2(name)\n if newname not in cont and newname not in reported:\n # print(\"Missing name\", name)\n reported.append(name)\n else:\n for containers in cont[newname]:\n dest = correct(containers['destination'])\n rel = containers['relation']\n\n if dest not in name_size and dest not in reported:\n # print(\"Missing dest\", dest)\n reported.append(dest)\n else:\n if rel == 'IN':\n rel = 'INSIDE'\n if rel in ['ON', 'INSIDE']:\n if (rel, dest) not in used_rel:\n object_info_final[name].append([rel, dest])\n used_rel.append((rel, dest))\nwith open('data/object_info_final.json', 'w+') as f:\n f.write(json.dumps(object_info_final, indent=4))","repo_name":"xavierpuigf/online_watch_and_help","sub_path":"gen_data/correct_init_data.py","file_name":"correct_init_data.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"7218183434","text":"#python3 move.py 10,10,1000:-10,-10,1000:10,-10,1000\r\nfrom gpiozero import Motor\r\nfrom math import pi, atan\r\nimport time\r\nimport sys\r\nimport getopt\r\nimport numpy as np\r\nfrom picamera import PiCamera\r\nimport requests\r\nfrom ina219 import INA219\r\nfrom ina219 import DeviceRangeError\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.interpolate import make_interp_spline, BSpline\r\n\r\nSHUNT_OHMS = 0.1\r\n\r\nmotor_b = Motor(forward=17, backward=27,pwm=True)\r\nmotor_a = Motor(forward=23, backward=22,pwm=True)\r\n\r\ndef read():\r\n ina = INA219(SHUNT_OHMS)\r\n ina.configure()\r\n return ina.voltage()\r\n\r\ndef send_file(filepath,g):\r\n with open(filepath, 'rb') as fh:\r\n mydata = fh.read()\r\n mydata = bytes(str(g)+'#', encoding = 'utf-8')+mydata\r\n headers_data={\"Origin\":\"http://scriptlab.net\",\"Referer\":\"http://scriptlab.net/telegram/bots/fpv_rover_bot/\",'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}\r\n response = requests.put('http://scriptlab.net/telegram/bots/fpv_rover_bot/relayPhotoViaPut_rover.php',data=mydata,headers=headers_data,params={'file': filepath})\r\n\r\ndef move_motors(command,a,b,t,g,time_last):\r\n f_log = open('log.txt', 'a')\r\n f_log.write(time.strftime(\"%Y.%m.%d-%H:%M:%S\", time.localtime())+' c:'+str(command)+' a:'+str(a)+' b:'+str(b)+' t:'+str(t)+' g:'+str(g)+'\\n')\r\n f_log.close()\r\n if command==0:#m\r\n ina = INA219(SHUNT_OHMS)\r\n ina.configure()\r\n millis_time=t*1000\r\n percent_a = a*(-1)\r\n percent_b = b*(-1)\r\n millis_current=int(round(time.time() * 1000))\r\n millis_start = millis_current\r\n\r\n voltage_values=[]\r\n voltage_time=[]\r\n\r\n while millis_current0.95 else dampfer\r\n pa=percent_a*dampfer\r\n pb=percent_b*dampfer\r\n if (pa>0):\r\n motor_a.forward(speed=abs(pa))\r\n else:\r\n motor_a.backward(speed=abs(pa))\r\n if (pb>0):\r\n motor_b.forward(speed=abs(pb))\r\n else:\r\n motor_b.backward(speed=abs(pb))\r\n time.sleep(1/1000)\r\n\r\n millis_current=int(round(time.time() * 1000))\r\n\r\n voltage_values += [ ina.voltage() ]\r\n voltage_time += [ (millis_current-millis_start)/1000 ]\r\n\r\n motor_a.stop()\r\n motor_b.stop()\r\n\r\n print(str(min(voltage_values))+' - '+str(max(voltage_values)))\r\n print(str(round(time.time()-time_last,2))+\": m,\"+str(a)+\",\"+str(b)+\",\"+str(t))\r\n\r\n if command==1 or command==2:\r\n if command==2:\r\n f = open('led_cmd.txt', 'w')\r\n f.write('1')\r\n f.close()\r\n filepath='image.jpg'\r\n camera = PiCamera()\r\n camera.rotation=180\r\n camera.resolution = (int(a), int(b))#1920, 1080\r\n camera.start_preview()\r\n time.sleep(1)\r\n camera.capture(filepath)\r\n camera.stop_preview()\r\n camera.close()\r\n if command==2:\r\n f = open('led_cmd.txt', 'w')\r\n f.write('0')\r\n f.close()\r\n send_file(filepath,g)\r\n\r\n print(str(read())+' v\\n'+str(round(time.time()-time_last,2))+\": p,\"+str(int(a))+\",\"+str(int(b))+\",\"+str(t)+\",\")\r\n if command==3:\r\n with open('v_log_seldom.txt') as file:\r\n time_current=int(float(time.time()))\r\n log_length=60*60*2 # s * min * hours_count\r\n time_line=[]\r\n value_line=[]\r\n with open('v_log_seldom.txt') as file:\r\n lines = file.readlines()\r\n time_line=[]\r\n value_line=[]\r\n for line in lines:\r\n val=line.split()\r\n val_time=time_current-int(float(val[0]))\r\n value_line.append(float(val[1]))\r\n time_line=np.arange(0, len(value_line), step=1)\r\n fig, ax = plt.subplots( nrows=1, ncols=1 )\r\n ax.plot(time_line,value_line, color = 'blue', linestyle = 'solid',label = 'V')\r\n fig.savefig('log.png')\r\n plt.close(fig)\r\n send_file('log.png',g)\r\n\r\ndef job_lock():\r\n with open('job.txt','r') as f:\r\n val=f.read()\r\n if int(val)==0:\r\n f.close()\r\n with open('job.txt','w') as f:\r\n f.write('1')\r\n f.close()\r\n return False\r\n f.close()\r\n return True\r\n\r\ndef job_unlock():\r\n with open('job.txt','w') as f:\r\n f.write('0')\r\n f.close()\r\n\r\ndef main(argv):\r\n if job_lock():\r\n print('job locked')\r\n exit()\r\n time_start=time.time()\r\n time_last=time_start\r\n cmd=argv[0].replace(\":\", \";\")\r\n cmd=cmd.replace(\"m\", \"0\")\r\n cmd=cmd.replace(\"p\", \"1\")\r\n cmd=cmd.replace(\"n\", \"2\")\r\n cmd=cmd.replace(\"v\", \"3\")\r\n cmd=np.matrix(cmd)\r\n for cmd_str in cmd:\r\n c = cmd_str.item(0) # cmd\r\n a = cmd_str.item(1) # a param\r\n b = cmd_str.item(2) # b param\r\n t = cmd_str.item(3) # time\r\n g = cmd_str.item(4) # group\r\n\r\n if c==0 and (abs(a)>1 or abs(b)>1 or t>10 or t<0.01):#m\r\n print(\"m,\"+str(a)+\",\"+str(b)+\",\"+str(t))\r\n print(\"m values should be beetwen m,-1 to 1,-1 to 1,0.01 to 10\")\r\n exit()\r\n if c==1 and (abs(a)>2000 or abs(b)>2000):#p\r\n print(\"p,\"+str(int(a))+\",\"+str(int(b))+\",\"+str(t))\r\n print(\"p values should be beetwen p,0-1920,0-1080,0\")\r\n exit()\r\n\r\n move_motors(c,a,b,t,g,time_last)\r\n time_last=time.time()\r\n\r\n print(str(round(time.time()-time_start,2))+\": Complete\")\r\n job_unlock()\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])","repo_name":"format37/fpv_rover","sub_path":"Rover-A/rover.py","file_name":"rover.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11424644037","text":"\n\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise.model_selection import cross_validate\n#from surprise import evaluate \nfrom surprise import KNNBasic\nimport pandas as pd\nfrom read2be import Read2Be\n\n\n\nfrom collections import defaultdict\n \n# def get_top_recommendations(predictions, topN=5):\n# top_recs = defaultdict(list)\n# for uid, iid, true_r, est, _ in predictions:\n# top_recs[uid].append((iid, est))\n \n# for uid, user_ratings in top_recs.items():\n# user_ratings.sort(key = lambda x: x[1], reverse = True)\n# top_recs[uid] = user_ratings[:topN]\n# return top_recs\n\n# def list2df(data):\n# ratings_dict = {\n# \"itemID\": [],\n# \"rating\": [],\n# \"userID\": []\n# }\n# for user in data:\n# for book in user['books']:\n# ratings_dict['rating'].append(book['rate'])\n# ratings_dict['userID'].append(user['username'])\n# ratings_dict['itemID'].append(book['isbn'])\n# return pd.DataFrame(ratings_dict)\n\n# import os, io\n\n# def read_item_names():\n# \"\"\"Read the u.item file from MovieLens 100-k dataset and returns a\n# mapping to convert raw ids into movie names.\n# \"\"\"\n \n# file_name = (os.path.expanduser('~') +\n# '/.surprise_data/ml-100k/ml-100k/u.item')\n# rid_to_name = {}\n# with io.open(file_name, 'r', encoding='ISO-8859-1') as f:\n# for line in f:\n# line = line.split('|')\n# rid_to_name[line[0]] = line[1]\n \n# return rid_to_name\n\n# Convert from user list to ratings dataframe\n\n\n# if __name__ == '__main__':\n# read2be = Read2Be()\n# df = list2df(list(read2be.get_users()))\n# reader = Reader(rating_scale=(0, 10))\n# data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)\n# trainingSet = data.build_full_trainset()\n\n# sim_options = {\n# 'name': 'cosine',\n# 'user_based': False\n# }\n \n# knn = KNNBasic(sim_options=sim_options)\n# knn.fit(trainingSet)\n# testSet = trainingSet.build_anti_testset()\n# predictions = knn.test(testSet)\n# print(predictions)\n\n# #### Recomendacao feita para cada user \n# top_recommendations = get_top_recommendations(predictions,topN=5)\n# #print(top_recommendations)\n\n\n\n\n\n\n\ndef get_top_recommendations(predictions, topN=5):\n top_recs = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_recs[uid].append((iid, est))\n \n for uid, user_ratings in top_recs.items():\n user_ratings.sort(key = lambda x: x[1], reverse = True)\n top_recs[uid] = user_ratings[:topN]\n return top_recs\n\ndef list2df(data):\n ratings_dict = {\n \"itemID\": [],\n \"rating\": [],\n \"userID\": []\n }\n for user in data:\n for book in user['books']:\n ratings_dict['rating'].append(book['rate'])\n ratings_dict['userID'].append(user['username'])\n ratings_dict['itemID'].append(book['isbn'])\n return pd.DataFrame(ratings_dict)\n\n\nclass CollaborativeFiltering:\n def __init__(self):\n self.usersdata = None\n self.top_recommendations = None\n\n def fit(self, usersdata):\n self.usersdata = usersdata\n\n df = list2df(self.usersdata)\n reader = Reader(rating_scale=(0, 10))\n data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)\n trainingSet = data.build_full_trainset()\n \n sim_options = {\n 'name': 'cosine',\n 'user_based': False\n }\n \n knn = KNNBasic(sim_options=sim_options)\n knn.fit(trainingSet)\n testSet = trainingSet.build_anti_testset()\n predictions = knn.test(testSet)\n self.top_recommendations = get_top_recommendations(predictions,topN=5)\n\n def is_trained(self):\n return (self.top_recommendations != None)\n\n def predict(self, target_user, top_n=5):\n if target_user in self.top_recommendations:\n res = [ f for f,s in self.top_recommendations[target_user]]\n return res\n else:\n raise Exception(f\"Cannot give recommendations for '{target_user}'\")","repo_name":"K1llByte/read2be","sub_path":"recommender/src/collab_recommender.py","file_name":"collab_recommender.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40186299011","text":"import base64\nimport hashlib\nimport hmac\nimport json\nimport time\nfrom urllib import parse\n\nfrom ..config import config\nfrom .basenotifier import BaseNotifier as Base\n\n\nclass DingTalkBot(Base):\n def __init__(self):\n self.name = 'DingTalk Bot'\n self.token = config.DD_BOT_TOKEN\n self.retcode_key = 'errcode'\n self.retcode_value = 0\n\n # Fixed by wananing\n def send(self, text, status, desp):\n url = ''\n if config.DD_BOT_TOKEN:\n url = f'https://oapi.dingtalk.com/robot/send?access_token={config.DD_BOT_TOKEN}'\n if config.DD_BOT_SECRET:\n secret = config.DD_BOT_SECRET\n timestamp = int(round(time.time() * 1000))\n secret_enc = secret.encode('utf-8')\n string_to_sign = f'{timestamp}\\n{secret}'\n string_to_sign_enc = string_to_sign.encode('utf-8')\n hmac_code = hmac.new(\n secret_enc, string_to_sign_enc, digestmod=hashlib.sha256\n ).digest()\n sign = parse.quote_plus(base64.b64encode(hmac_code))\n url = f'https://oapi.dingtalk.com/robot/send?access_token={config.DD_BOT_TOKEN}×tamp={timestamp}&sign={sign}'\n\n header = {'Content-Type': 'application/json ;charset=utf-8 '}\n data = {'msgtype': 'text', 'text': {'content': f'{text} {status}\\n\\n{desp}'}}\n data = json.dumps(data)\n return self.push('post', url, data=data, headers=header)\n","repo_name":"Xm798/Genshin-Dailynote-Reminder","sub_path":"dailynotereminder/notifiers/dingtalkbot.py","file_name":"dingtalkbot.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"33151739070","text":"\"\"\"\n守护主线程\n\n#测试主线程是否会等待子线程执行完成以后程序再退出\n设置成为守护主线程 主线程退出后子线程直接销毁不再执行子线程的代码\n\n\"\"\"\nimport threading\nfrom time import sleep\n\n\ndef task():\n for i in range(5):\n print(\"test\", i)\n sleep(.5)\n\n\ndef main():\n # 创建子线程守护主线程\n # 守护主线程方式1\n # sub_thread = threading.Thread(target=task, daemon=True)\n sub_thread = threading.Thread(target=task, daemon=False)\n\n #\n\n # 守护主线程方式2\n sub_thread.setDaemon(True)\n # =======分割线\n\n sub_thread.start()\n sleep(.5)\n print(\"over\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"Mumujane/PythonAdvance","sub_path":"Thread/Introduction/MainThread.py","file_name":"MainThread.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43514931787","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport gtk, os\ngtk.gdk.threads_init()\n\nTRAY_ICON = os.path.dirname(os.path.realpath(__file__)) + '/../img/trayicon.svg'\n\nclass TrayIcon:\n def __init__(self):\n self.tray = gtk.StatusIcon()\n self.tray.set_from_file(TRAY_ICON) \n self.tray.connect('popup-menu', self.on_right_click)\n self.tray.set_tooltip(('ALS Adaptive Brightness'))\n\n def on_right_click(self, icon, event_button, event_time):\n self.make_menu(event_button, event_time)\n\n def make_menu(self, event_button, event_time):\n menu = gtk.Menu()\n\n # add quit item\n quit = gtk.MenuItem(\"Quit\")\n quit.show()\n menu.append(quit)\n quit.connect('activate', gtk.main_quit)\n\n menu.popup(None, None, gtk.status_icon_position_menu,\n event_button, event_time, self.tray)\n \n\ndef start():\n TrayIcon()\n gtk.main()\n\n\ndef stop():\n gtk.main_quit()\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"numian/als-adaptive-brightness","sub_path":"lib/trayicon.py","file_name":"trayicon.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34338889992","text":"'''\n # @ Author: Zhi Wu\n # @ Create Time: 2022-07-14 18:11:52\n # @ Modified by: Zhi Wu\n # @ Modified time: 2022-07-14 18:12:54\n # @ Description: Collection of some useful functions for running the whole project.\n '''\n\nimport pprint\nfrom copy import deepcopy\n\nfrom lightning.pytorch import Trainer\nfrom lightning.pytorch.callbacks import *\nfrom lightning.pytorch.loggers import TensorBoardLogger\nfrom lightning.pytorch.tuner import Tuner\n\nfrom .datasets import build_minist_dataset\nfrom .interface import PLModule\nfrom .tools import *\n\n\ndef build_dataset(name, **kwargs):\n _ = {\n 'minist' : build_minist_dataset,\n }\n return _[name.lower()](**kwargs)\n\ndef deep_update(raw, new):\n if new is None:\n return raw\n foo = deepcopy(raw)\n update_keys(foo, new)\n insert_keys(foo, new)\n return foo\n\ndef update_keys(raw, new):\n for key in raw:\n if key not in new.keys():\n continue\n if isinstance(raw[key], dict) and isinstance(new[key], dict):\n raw[key] = deep_update(raw[key], new[key])\n else:\n raw[key] = new[key]\n\ndef insert_keys(raw, new):\n update_dict = {}\n for key in new:\n if key not in raw.keys():\n update_dict[key] = new[key]\n raw.update(update_dict)\n\n\ndef find_best_lr(trainer, model, logger, train_dataloaders, val_dataloaders, show=True, update_attr=False):\n import matplotlib.pyplot as plt\n logger.info('Seaching for best learning rate ...')\n tuner = Tuner(trainer)\n lr_finder = tuner.lr_find(model, train_dataloaders=train_dataloaders, \n val_dataloaders=val_dataloaders, update_attr=update_attr)\n\n fig = lr_finder.plot(suggest=True)\n fig.savefig('lr_curve.jpg')\n if show:\n fig.show()\n plt.pause(10)\n try:\n logger.info('Best learning rate found %4f' % lr_finder.suggestion())\n except TypeError:\n logger.info('Best learning rate not found.')\n else:\n logger.info('Learning rate curve has been saved in lr_curve.jpg')\n return lr_finder\n\nclass ConfigParser():\n CALLBACKS = {\n 'LearningRateMonitor' : {'logging_interval':'step'},\n # 'CKPTCallback' : {},\n 'ModelSummary' : {'max_depth':3},\n # 'TxtLoggerCallback' : {},\n }\n\n def __init__(self, args) -> None:\n config = self.load_config(args.cfg)\n self.config = config\n self.args = args\n self.mode = args.mode\n self.tb_logger = TensorBoardLogger('checkpoints', self.config.exper)\n self.logger = build_logger(config.exper, args.mode, logger_name=config.model_name,\n root='checkpoints', v_num=self.log_version)\n \n self.logger.info(args)\n self.logger.info(f'Configuration:\\n{pprint.pformat(config)}')\n \n def load_config(self, cfg):\n cfgs = load_config(cfg)\n \n cfgs.train.model = deep_update(cfgs.model, cfgs.train.model)\n cfgs.train.trainset = deep_update(cfgs.dataset, cfgs.train.trainset)\n cfgs.train.valset = deep_update(cfgs.dataset, cfgs.train.valset)\n cfgs.test.model = deep_update(cfgs.model, cfgs.test.model)\n cfgs.test.dataset = deep_update(cfgs.dataset, cfgs.test.dataset)\n return cfgs\n \n def get_logger(self):\n return self.logger\n\n @property\n def log_version(self):\n if not hasattr(self, 'tb_logger'):\n return None\n v = self.tb_logger.version\n v = v if v == 0 else (v if self.mode == 'train' else v-1)\n return f'version_{v}'\n\n @property\n def ckpt(self):\n ckpt = self.cfg.ckpt_path if hasattr(self.cfg, 'ckpt_path') else None\n return ckpt\n \n @property\n def cfg(self):\n return eval(f'self.config.{self.mode}')\n\n def build_plmodule(self):\n self.logger.info('Building model.')\n return PLModule(**self.cfg.model)\n\n def build_dataloader(self):\n self.logger.info(f'Building {self.mode.capitalize()} dataset.')\n if self.mode == 'train':\n trainset, train_loader = build_dataset(**self.cfg.trainset)\n valset, val_loader = build_dataset(**self.cfg.valset)\n loaders = {\n 'train_dataloaders': train_loader,\n 'val_dataloaders': val_loader,\n }\n else:\n dataset, data_loader = build_dataset(**self.cfg.dataset)\n loaders = {\n 'dataloaders': data_loader,\n }\n return loaders\n\n def build_trainer(self):\n self.logger.info(f'Building {self.mode.capitalize()} phase Trainer.')\n cfg = deepcopy(self.cfg.trainer)\n for k in ['logger']:\n cfg.pop(k, None)\n cfg['callbacks'] = self.build_callbacks(cfg.get('callbacks', {}))\n if self.mode == 'train':\n cfg['logger'] = self.tb_logger\n trainer = Trainer(**cfg)\n else:\n cfg['enable_checkpointing'] = False\n cfg['logger'] = False\n trainer = Trainer(**cfg)\n return trainer\n\n def build_callbacks(self, cb_params):\n for k, v in cb_params.items():\n cb_params[k] = {} if v is None else v\n cb_params = deep_update(self.CALLBACKS, cb_params)\n cbs = [eval(k)(**v) for k, v in cb_params.items()]\n return cbs\n","repo_name":"wuzhiwyyx/lightning_template","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31968763206","text":"import os\r\nimport json\r\nimport string\r\nfrom pprint import pprint\r\nfrom pymystem3 import Mystem\r\nimport re\r\nimport pandas as pd\r\n\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndata_root = \"datatest/\"\r\n\r\nfnames = {\r\n 'item_id': 'item_id',\r\n 'row_data': 'row_data',\r\n 'film_id': 'film_id',\r\n 'user_id': 'user_id',\r\n 'user_name': 'user_name',\r\n 'film_name': 'film_name',\r\n 'review_mark': 'review_mark',\r\n}\r\n\r\n\r\nstop_list = [\"c\",\r\n \"а\", \"алло\",\r\n \"без\", \"белый\", \"близко\", \"более\", \"больше\", \"большой\", \"будем\", \"будет\",\r\n \"будете\", \"будешь\", \"будто\", \"буду\", \"будут\", \"будь\", \"бы\", \"бывает\", \"бывь\", \"был\", \"была\",\r\n \"были\", \"было\", \"быть\",\r\n \"в\", \"важная\", \"важное\", \"важные\", \"важный\", \"вам\", \"вами\", \"вас\", \"ваш\",\r\n \"ваша\", \"ваше\", \"ваши\", \"вверх\", \"вдали\", \"вдруг\", \"ведь\", \"везде\", \"вернуться\", \"весь\",\r\n \"взять\", \"вид\", \"видел\", \"видеть\", \"вместе\", \"вне\", \"вниз\", \"внизу\", \"во\",\r\n \"вокруг\", \"вон\", \"вообще\", \"вопрос\", \"восемнадцатый\", \"восемнадцать\", \"восемь\", \"восьмой\", \"вот\",\r\n \"впрочем\", \"все\", \"всегда\", \"всего\", \"всем\", \"всеми\", \"всему\", \"всех\", \"всею\",\r\n \"всю\", \"всюду\", \"вся\", \"всё\", \"второй\", \"вы\", \"выйти\",\r\n \"г\",\r\n \"где\",\r\n \"да\",\r\n \"давать\", \"давно\", \"даже\", \"далекий\", \"далеко\", \"дальше\", \"даром\", \"дать\", \"два\",\r\n \"двадцатый\", \"двадцать\", \"две\", \"двенадцатый\", \"двенадцать\", \"двух\", \"девятнадцатый\",\r\n \"девятнадцать\", \"девятый\", \"девять\", \"действительно\", \"дел\", \"делал\", \"делать\", \"делаю\",\r\n \"десятый\", \"десять\", \"для\", \"до\", \"долго\", \"должен\", \"должно\", \"должный\", \"другая\", \"другие\",\r\n \"других\", \"друго\", \"другое\", \"другой\",\r\n \"е\", \"его\", \"ее\", \"ей\", \"ему\", \"если\", \"есть\", \"еще\", \"ещё\", \"ею\", \"её\", \"ж\", \"же\", \"за\", \"занят\",\r\n \"занята\", \"занято\", \"заняты\", \"затем\", \"зато\", \"зачем\", \"здесь\", \"значит\", \"значить\",\r\n \"и\",\r\n \"из\", \"или\", \"им\", \"имеет\", \"имел\", \"именно\", \"иметь\", \"ими\", \"иногда\", \"их\",\r\n \"к\",\r\n \"каждая\", \"каждое\", \"каждые\", \"каждый\", \"как\", \"какая\", \"какой\", \"кем\", \"когда\", \"кого\", \"ком\",\r\n \"кому\", \"конец\", \"конечно\", \"которая\", \"которого\", \"которой\", \"которые\", \"который\", \"которых\",\r\n \"кроме\", \"кругом\", \"кто\", \"куда\",\r\n \"лежать\", \"лет\", \"ли\", \"лицо\", \"лишь\", \"лучше\",\r\n \"м\",\r\n \"маленький\", \"мало\", \"между\", \"меля\", \"менее\", \"меньше\", \"меня\", \"место\", \"миллионов\", \"мимо\",\r\n \"мне\", \"много\", \"многочисленная\", \"многочисленное\", \"многочисленные\", \"многочисленный\", \"мной\", \"мною\",\r\n \"мог\", \"могу\", \"могут\", \"мож\",\r\n \"может\", \"можно\", \"можхо\", \"мои\", \"мой\", \"мочь\", \"моя\", \"моё\", \"мы\",\r\n \"на\",\r\n \"наверху\", \"над\", \"надо\",\r\n \"наиболее\", \"найти\", \"наконец\", \"нам\", \"нами\", \"нас\", \"начала\", \"начать\", \"наш\", \"наша\", \"наше\", \"наши\",\r\n \"не\", \"него\", \"недавно\", \"недалеко\", \"нее\", \"ней\", \"некоторый\", \"нельзя\", \"нем\", \"немного\", \"нему\",\r\n \"непрерывно\", \"нередко\", \"несколько\", \"нет\", \"нею\", \"неё\", \"ни\", \"нибудь\", \"ниже\", \"низко\", \"никогда\",\r\n \"никто\", \"никуда\", \"ним\", \"ними\", \"них\", \"ничего\", \"ничто\", \"но\", \"новый\", \"ну\", \"нужно\", \"нужный\", \"нх\",\r\n \"о\",\r\n \"об\", \"оба\", \"обычно\", \"один\", \"одиннадцатый\", \"одиннадцать\", \"однажды\", \"однако\", \"одного\",\r\n \"одной\", \"оказаться\", \"около\", \"он\", \"она\", \"они\", \"оно\", \"опять\", \"особенно\", \"остаться\", \"от\",\r\n \"откуда\", \"отовсюду\", \"отсюда\", \"очень\",\r\n \"первый\", \"перед\", \"писать\", \"плечо\", \"по\", \"под\",\r\n \"позже\", \"пойти\", \"пока\", \"пол\", \"получить\", \"помнить\", \"понимать\", \"понять\", \"пор\", \"пора\", \"после\",\r\n \"посреди\", \"потом\", \"потому\", \"почему\", \"почти\", \"правда\", \"прекрасно\", \"при\", \"про\", \"просто\", \"против\",\r\n \"процентов\", \"пятнадцатый\", \"пятнадцать\", \"пятый\", \"пять\", \"раз\", \"разве\", \"рано\", \"раньше\", \"рядом\",\r\n \"с\",\r\n \"сам\", \"сама\", \"сами\", \"самим\", \"самими\", \"самих\", \"само\", \"самого\", \"самой\", \"самом\", \"самому\", \"саму\",\r\n \"самый\", \"свое\", \"своего\", \"своей\", \"свои\", \"своих\", \"свой\", \"свою\", \"сеаой\", \"себе\", \"себя\", \"сегодня\",\r\n \"седьмой\", \"сейчас\", \"семнадцатый\", \"семнадцать\", \"семь\", \"сидеть\", \"сила\", \"сих\", \"сказал\", \"сказала\",\r\n \"сказать\", \"сколько\", \"слишком\", \"сначала\", \"снова\", \"со\", \"собой\", \"собою\", \"совсем\", \"спасибо\",\r\n \"спросить\",\r\n \"сразу\", \"стал\", \"старый\", \"стать\", \"стол\", \"сторона\", \"стоять\", \"страна\", \"суть\", \"считать\",\r\n \"т\",\r\n \"та\", \"так\", \"такая\", \"также\", \"таки\", \"такие\", \"такое\", \"такой\", \"там\", \"твои\", \"твой\", \"твоя\", \"твоё\",\r\n \"те\", \"тебе\", \"тебя\", \"тем\", \"теми\", \"теперь\", \"тех\", \"то\", \"тобой\", \"тобою\", \"тогда\", \"того\", \"тоже\",\r\n \"только\", \"том\", \"тому\", \"тот\", \"тою\", \"третий\", \"три\", \"тринадцатый\", \"тринадцать\", \"ту\", \"туда\", \"тут\",\r\n \"ты\", \"тысяч\",\r\n \"у\",\r\n \"уж\", \"уже\",\r\n \"хоть\",\r\n \"хотя\", \"хочешь\",\r\n \"час\",\r\n \"часто\", \"часть\", \"чаще\", \"чего\", \"чем\", \"чему\", \"через\", \"четвертый\", \"четыре\", \"четырнадцатый\",\r\n \"четырнадцать\", \"что\", \"чтоб\", \"чтобы\", \"чуть\",\r\n \"шестнадцатый\",\r\n \"шестнадцать\", \"шестой\", \"шесть\",\r\n \"эта\", \"эти\", \"этим\", \"этими\", \"этих\", \"это\", \"этого\", \"этой\", \"этом\", \"этому\", \"этот\", \"эту\",\r\n \"я\", \"являюсь\"\r\n ]\r\n\r\ndef Lemmatizator(text: str) -> str:\r\n m = Mystem()\r\n lemmas = m.lemmatize(text)\r\n res = ''.join(lemmas)\r\n # print(res)\r\n return res\r\n\r\n\r\ndef BachLemmatizasator(texts: list) -> list:\r\n res = ' , '.join(texts)\r\n res = Lemmatizator(res)\r\n res = res.split(' , ')\r\n res[-1] = res[-1].strip()\r\n\r\n return res\r\n\r\n\r\ndef PrepareText(text: str) -> str:\r\n # Преобразование текста в нижний регист\r\n text = text.lower()\r\n\r\n # Удаление цифр\r\n text = re.sub(r'\\d+', \"\", text)\r\n\r\n # Удаление пунктуации [!”#$%&’()*+,-./:;<=>?@[\\]^_`{|}~]:\r\n text = text.translate(str.maketrans('--—.,', ' ', ))\r\n text = text.translate(str.maketrans('', '', string.punctuation + '«»…'))\r\n\r\n # Удаление пробельных символов (whitespaces);\r\n text = text.strip()\r\n\r\n # Удаление стоп слов;\r\n l_text = text.split(' ')\r\n l_clear_text = []\r\n for item in l_text:\r\n if len(item) < 2: continue\r\n if item in stop_list: continue\r\n l_clear_text.append(item)\r\n\r\n text = ' '.join(l_clear_text)\r\n\r\n # Лемматизация\r\n text = Lemmatizator(text)\r\n\r\n return text\r\n\r\n\r\ndef main():\r\n positiv_list = []\r\n negative_list = []\r\n netral_list = []\r\n\r\n reviews_list = []\r\n reviews_id_list = []\r\n reviews_marks_list = []\r\n\r\n col_review = 'review'\r\n\r\n root_path = \"data/\"\r\n iinn = 0\r\n for (root, dirs, files) in os.walk(root_path):\r\n if not files: continue\r\n iinn += 1\r\n\r\n for file in files:\r\n path = f\"{root}/{file}\"\r\n\r\n with open(path, 'r', encoding='utf-8') as f:\r\n data = json.load(f)\r\n\r\n text = data[fnames['row_data']]\r\n\r\n text = PrepareText(text)\r\n\r\n reviews_list.append(text)\r\n reviews_id_list.append(data[fnames['item_id']])\r\n reviews_marks_list.append(data[fnames['review_mark']])\r\n\r\n if data[fnames['review_mark']] == 'N':\r\n netral_list.append({col_review: text, \"id\": data[fnames['item_id']]})\r\n if data[fnames['review_mark']] == 'good':\r\n positiv_list.append({col_review: text, \"id\": data[fnames['item_id']]})\r\n if data[fnames['review_mark']] == 'bad':\r\n negative_list.append({col_review: text, \"id\": data[fnames['item_id']]})\r\n\r\n pprint(f\"done: {iinn}\")\r\n\r\n\r\n df_pos = pd.DataFrame.from_records(positiv_list)\r\n df_bad = pd.DataFrame.from_records(negative_list)\r\n df_n = pd.DataFrame.from_records(netral_list)\r\n\r\n bach_size = 1000\r\n reviews_list_lemm = []\r\n for i in range(0, len(reviews_list), bach_size):\r\n bach = reviews_list[i:i + bach_size]\r\n bach = BachLemmatizasator(bach)\r\n reviews_list_lemm.extend(bach)\r\n print(f\"progres:\\ti:{i} from {len(reviews_list)}\\t (s:{len(bach)})\")\r\n\r\n df_review = pd.DataFrame(list(zip(reviews_id_list, reviews_marks_list, reviews_list_lemm)),\r\n columns=['r_id', 'mark', 'review'])\r\n df_review.to_csv('reviews.csv', mode='a', index=False, sep='\\t', encoding='utf-8')\r\n\r\ndef writeListWithId(path: str, l: list):\r\n for index, item in enumerate(l):\r\n review_path = path + str(item[0]) + \".txt\"\r\n os.makedirs(os.path.dirname(review_path), exist_ok=True)\r\n with open(f\"{review_path}\", 'w', encoding='utf-8') as f:\r\n f.write(item[2])\r\n\r\n if index % 100 == 0:\r\n print(f\"{path}\\t{index}\")\r\n\r\n\r\ndef splitCsv():\r\n df = pd.read_csv('reviews.csv', sep='\\t', encoding='utf-8')\r\n pprint(df)\r\n\r\n df_good = df[df['mark'] == 'good']\r\n df_bad = df[df['mark'] == 'bad']\r\n df_nnn = df[df['mark'] == 'N']\r\n\r\n pprint(df_good)\r\n pprint(df_bad)\r\n pprint(df_nnn)\r\n print(f\"g:{len(df_good)}\\tb:{len(df_bad)}\\tn:{len(df_nnn)}\")\r\n\r\n data = {'Хорошие': len(df_good),\r\n 'Плохие': len(df_bad),\r\n 'Нейтральные': len(df_nnn),\r\n }\r\n group_data = list(data.values())\r\n group_names = list(data.keys())\r\n\r\n plt.rcParams.update({'figure.autolayout': True})\r\n fig, ax = plt.subplots()\r\n x = np.arange(3)\r\n plt.bar(x, group_data)\r\n plt.xticks(x, group_names)\r\n plt.show()\r\n\r\n\r\n\r\n z_good = list(zip(df_good['r_id'].tolist(), df_good['mark'].tolist(), df_good['review'].tolist()))\r\n writeListWithId(\"data_kino/train/pos/\", z_good[:4000])\r\n writeListWithId(\"data_kino/test/pos/\", z_good[4000:5000])\r\n\r\n z_bad = list(zip(df_bad['r_id'].tolist(), df_bad['mark'].tolist(), df_bad['review'].tolist()))\r\n writeListWithId(\"data_kino/train/neg/\", z_bad[:4000])\r\n writeListWithId(\"data_kino/test/neg/\", z_bad[4000:5000])\r\n\r\n z_uns = list(zip(df_nnn['r_id'].tolist(), df_nnn['mark'].tolist(), df_nnn['review'].tolist()))\r\n writeListWithId(\"data_kino/train/uns/\", z_uns[:4000])\r\n writeListWithId(\"data_kino/test/uns/\", z_uns[4000:4947])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # main - сбор json-файлов, очистка, лематизация и формирование итогового review.csv\r\n main()\r\n \r\n # splitCsv - формирование файловой структуры из \"review.csv\" для работы tf.keras.utils.text_dataset_from_directory\r\n splitCsv()\r\n","repo_name":"FilinArtem/MasterThesis","sub_path":"Parser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13347,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13351345844","text":"import sys\nsys.path.insert(1,\"../../\")\nimport h2o\nimport math\nfrom tests import pyunit_utils\nfrom h2o.utils.typechecks import assert_is_type\nfrom h2o.frame import H2OFrame\n\n# global dictionaries storing model answers\ng_iris_setosa_sepal_len=dict()\ng_iris_versicolor_sepal_wid=dict()\ng_iris_virginica_petal_wid=dict()\ng_iris_versicolor_petal_len_NA_ignore=dict()\ng_iris_versicolor_petal_len_NA_rm=dict()\n\ndef group_by_all():\n \"\"\"\n This is a comprehenisve test that will test all aggregations in the groupBy class.\n \"\"\"\n generate_dict_answers() # generate answer dictionary\n\n # perform group-by with datasets containing no NAs. All three na mode should produce same results\n h2o_iris = h2o.import_file(path=pyunit_utils.locate(\"smalldata/iris/iris_wheader.csv\"))\n result_all = perform_group_by(h2o_iris,'all')\n result_ignore = perform_group_by(h2o_iris,'ignore')\n result_rm = perform_group_by(h2o_iris, 'rm')\n\n # make sure return type of get_frame() is H2OFrame\n assert_is_type(result_all, H2OFrame)\n assert_is_type(result_ignore, H2OFrame)\n assert_is_type(result_rm, H2OFrame)\n\n # make sure the result frame contains the correct number of rows and columns\n assert result_all.shape==result_ignore.shape==result_rm.shape==(3,30), \"H2O group_by() command is not working.\"\n\n # check all group by results are the same\n assert pyunit_utils.compare_frames(result_all, result_ignore, 0, 0, 1e-6, strict=True, compare_NA=False), \\\n \"H2O group_by() command is not working.\"\n assert pyunit_utils.compare_frames(result_ignore, result_rm, 0, 0, 1e-6, strict=True, compare_NA=False), \\\n \"H2O group_by() command is not working.\"\n\n # check group by result with known correct result\n assert_group_by_result(result_all, g_iris_setosa_sepal_len, \"Iris-setosa\")\n assert_group_by_result(result_rm, g_iris_versicolor_sepal_wid, \"Iris-versicolor\")\n assert_group_by_result(result_ignore, g_iris_virginica_petal_wid, \"Iris-virginica\")\n\n # perform group-by with datasets contain NAs.\n h2o_iris_NA = h2o.import_file(path=pyunit_utils.locate(\"smalldata/iris/iris_wheader_NA_2.csv\"))\n result_all_NA = perform_group_by(h2o_iris_NA,'all')\n result_ignore_NA = perform_group_by(h2o_iris_NA,'ignore')\n result_rm_NA = perform_group_by(h2o_iris_NA, 'rm')\n\n # make sure return type of get_frame() is H2OFrame\n assert_is_type(result_all_NA, H2OFrame)\n assert_is_type(result_ignore_NA, H2OFrame)\n assert_is_type(result_rm_NA, H2OFrame)\n\n # make sure the result frame contains the correct number of rows and columns\n assert result_all_NA.shape==result_ignore_NA.shape==result_rm_NA.shape==(3,30), \\\n \"H2O group_by() command is not working.\"\n\n # column petal_wid contains no NA and hence should provide same result as before independent of NA treatment\n assert pyunit_utils.compare_frames(result_all_NA[list(g_iris_virginica_petal_wid.keys())],\n result_rm_NA[list(g_iris_virginica_petal_wid.keys())], 0, 0, 1e-6,\n strict=False, compare_NA=False), \"H2O group_by() command is not working.\"\n assert pyunit_utils.compare_frames(result_all_NA[list(g_iris_virginica_petal_wid.keys())],\n result_ignore_NA[list(g_iris_virginica_petal_wid.keys())], 0, 0, 1e-6,\n strict=False, compare_NA=False), \"H2O group_by() command is not working.\"\n assert_group_by_result(result_all_NA, g_iris_virginica_petal_wid, \"Iris-virginica\")\n\n # check to make sure result_all_NA columns for sepal_len, sepal_wid, petal_len are all NAs for na='all'\n assert_all_NAs(result_all_NA, list(g_iris_setosa_sepal_len.keys())) # check sepal_len\n assert_all_NAs(result_all_NA, list(g_iris_versicolor_sepal_wid.keys())) # check sepal_wid\n assert_all_NAs(result_all_NA, list(g_iris_versicolor_petal_len_NA_ignore.keys())) # check petal_len\n\n # check to make sure na=\"ignore\", and na=\"rm\" are calculated correctly against known answers\n assert_group_by_result(result_ignore_NA, g_iris_versicolor_petal_len_NA_ignore, \"Iris-versicolor\")\n assert_group_by_result(result_rm_NA, g_iris_versicolor_petal_len_NA_rm, \"Iris-versicolor\")\n\ndef assert_all_NAs(h2oframe, col_names):\n \"\"\"\n Throw an assert error if not all columns of h2oframe with column names specified in list col_names are NAs.\n\n :param h2oframe:\n :param col_names:\n \"\"\"\n for column_name in col_names:\n assert h2oframe[column_name].all(), \"H2O group_by() command is not working.\"\n\n\ndef perform_group_by(h2oFrame, na):\n \"\"\"\n Given a H2OFrame h2oFrame, and the na treatment, perform chained group by aggregation and return the\n results of aggregations in an H2OFrame.\n\n :param h2oFrame:\n :param na:\n :return:\n \"\"\"\n grouped = h2oFrame.group_by(\"class\")\n grouped.count(na=na).min(na=na).max(na=na).mean(na=na).var(na=na).sd(na=na).ss(na=na).sum(na=na)\n print(grouped.get_frame())\n return grouped.get_frame()\n\ndef assert_group_by_result(h2oFrame, answer_dict, row_name):\n \"\"\"\n Given a result frame h2oFrame, a dictionary containing the answers to the group by operation and\n row_name denoting which groupby group to examine, this method will throw an error if result\n frame does not agree with dict values or the wrong group name is provided.\n\n :param h2oFrame:\n :param answer_dict:\n :param row_name:\n \"\"\"\n row_ind = -1\n for ind in range(h2oFrame.nrow):\n if row_name == h2oFrame[ind, 0]:\n row_ind=ind\n break\n assert row_ind>=0, \"row_name is not a valid row name in your result frame.\"\n\n for key, value in answer_dict.items():\n assert abs(value-h2oFrame[row_ind, key]) < 1e-10, \"H2O group_by() command is not working.\"\n\ndef generate_dict_answers():\n \"\"\"\n Generates dictionary containing answers that I have pre-calculated for iris dataset.\n \"\"\"\n global g_iris_setosa_sepal_len\n global g_iris_versicolor_sepal_wid\n global g_iris_virginica_petal_wid\n global g_iris_versicolor_petal_len_NA_ignore\n global g_iris_versicolor_petal_len_NA_rm\n\n # collect pre-calculated information\n g_iris_setosa_sepal_len[\"mean_sepal_len\"]=5.006\n g_iris_setosa_sepal_len[\"sum_sepal_len\"]=250.3\n g_iris_setosa_sepal_len[\"sumSquares_sepal_len\"]=1259.09\n g_iris_setosa_sepal_len[\"max_sepal_len\"]=5.8\n g_iris_setosa_sepal_len[\"min_sepal_len\"]=4.3\n g_iris_setosa_sepal_len[\"var_sepal_len\"]=(g_iris_setosa_sepal_len[\"sumSquares_sepal_len\"]-\n 2*g_iris_setosa_sepal_len[\"mean_sepal_len\"]*g_iris_setosa_sepal_len[\"sum_sepal_len\"]+\n 50*g_iris_setosa_sepal_len[\"mean_sepal_len\"]*g_iris_setosa_sepal_len[\"mean_sepal_len\"])/49.0\n g_iris_setosa_sepal_len[\"sdev_sepal_len\"]=math.sqrt(g_iris_setosa_sepal_len[\"var_sepal_len\"])\n\n g_iris_versicolor_sepal_wid[\"mean_sepal_wid\"]=2.77\n g_iris_versicolor_sepal_wid[\"sum_sepal_wid\"]=138.5\n g_iris_versicolor_sepal_wid[\"sumSquares_sepal_wid\"]=388.47\n g_iris_versicolor_sepal_wid[\"max_sepal_wid\"]=3.4\n g_iris_versicolor_sepal_wid[\"min_sepal_wid\"]=2.0\n g_iris_versicolor_sepal_wid[\"var_sepal_wid\"]=(g_iris_versicolor_sepal_wid[\"sumSquares_sepal_wid\"]-\n 2*g_iris_versicolor_sepal_wid[\"mean_sepal_wid\"]*g_iris_versicolor_sepal_wid[\"sum_sepal_wid\"]+\n 50*g_iris_versicolor_sepal_wid[\"mean_sepal_wid\"]*g_iris_versicolor_sepal_wid[\"mean_sepal_wid\"])/49.0\n g_iris_versicolor_sepal_wid[\"sdev_sepal_wid\"]=math.sqrt(g_iris_versicolor_sepal_wid[\"var_sepal_wid\"])\n\n g_iris_virginica_petal_wid[\"mean_petal_wid\"]=2.026\n g_iris_virginica_petal_wid[\"sum_petal_wid\"]=101.3\n g_iris_virginica_petal_wid[\"sumSquares_petal_wid\"]=208.93\n g_iris_virginica_petal_wid[\"max_petal_wid\"]=2.5\n g_iris_virginica_petal_wid[\"min_petal_wid\"]=1.4\n g_iris_virginica_petal_wid[\"var_petal_wid\"]=(g_iris_virginica_petal_wid[\"sumSquares_petal_wid\"]-\n 2*g_iris_virginica_petal_wid[\"mean_petal_wid\"]*g_iris_virginica_petal_wid[\"sum_petal_wid\"]+\n 50*g_iris_virginica_petal_wid[\"mean_petal_wid\"]*g_iris_virginica_petal_wid[\"mean_petal_wid\"])/49.0\n g_iris_virginica_petal_wid[\"sdev_petal_wid\"]=math.sqrt(g_iris_virginica_petal_wid[\"var_petal_wid\"])\n\n\n g_iris_versicolor_petal_len_NA_ignore[\"sum_petal_len\"]=204.5\n g_iris_versicolor_petal_len_NA_ignore[\"sumSquares_petal_len\"]=881.95\n g_iris_versicolor_petal_len_NA_ignore[\"mean_petal_len\"]=g_iris_versicolor_petal_len_NA_ignore[\"sum_petal_len\"]/50.0\n g_iris_versicolor_petal_len_NA_ignore[\"max_petal_len\"]=5.1\n g_iris_versicolor_petal_len_NA_ignore[\"min_petal_len\"]=3.0\n g_iris_versicolor_petal_len_NA_ignore[\"var_petal_len\"]=(g_iris_versicolor_petal_len_NA_ignore[\"sumSquares_petal_len\"]-\n 2*g_iris_versicolor_petal_len_NA_ignore[\"mean_petal_len\"]*g_iris_versicolor_petal_len_NA_ignore[\"sum_petal_len\"]+\n 50*g_iris_versicolor_petal_len_NA_ignore[\"mean_petal_len\"]*g_iris_versicolor_petal_len_NA_ignore[\"mean_petal_len\"])/49.0\n g_iris_versicolor_petal_len_NA_ignore[\"sdev_petal_len\"]=math.sqrt(g_iris_versicolor_petal_len_NA_ignore[\"var_petal_len\"])\n\n g_iris_versicolor_petal_len_NA_rm[\"sum_petal_len\"]=204.5\n g_iris_versicolor_petal_len_NA_rm[\"sumSquares_petal_len\"]=881.95\n g_iris_versicolor_petal_len_NA_rm[\"mean_petal_len\"]=g_iris_versicolor_petal_len_NA_rm[\"sum_petal_len\"]/48.0\n g_iris_versicolor_petal_len_NA_rm[\"max_petal_len\"]=5.1\n g_iris_versicolor_petal_len_NA_rm[\"min_petal_len\"]=3.0\n g_iris_versicolor_petal_len_NA_rm[\"var_petal_len\"]=(g_iris_versicolor_petal_len_NA_rm[\"sumSquares_petal_len\"]-\n 2*g_iris_versicolor_petal_len_NA_rm[\"mean_petal_len\"]*g_iris_versicolor_petal_len_NA_rm[\"sum_petal_len\"]+\n 48*g_iris_versicolor_petal_len_NA_rm[\"mean_petal_len\"]*g_iris_versicolor_petal_len_NA_rm[\"mean_petal_len\"])/47.0\n g_iris_versicolor_petal_len_NA_rm[\"sdev_petal_len\"]=math.sqrt(g_iris_versicolor_petal_len_NA_rm[\"var_petal_len\"])\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(group_by_all)\nelse:\n group_by_all()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_munging/pyunit_groupby_allOps.py","file_name":"pyunit_groupby_allOps.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"38382893421","text":"import collections.abc\nimport json\nimport os\nfrom dataclasses import asdict, is_dataclass\nfrom enum import Enum\nfrom typing import Any, Iterable, Mapping, cast\n\nfrom pkg_resources import Requirement\n\nfrom pants.engine.addresses import Address, BuildFileAddress\nfrom pants.engine.console import Console\nfrom pants.engine.fs import DigestContents, FileContent, PathGlobs\nfrom pants.engine.goal import Goal, GoalSubsystem, Outputting\nfrom pants.engine.rules import Get, MultiGet, collect_rules, goal_rule\nfrom pants.engine.target import Target, UnexpandedTargets\n\n\nclass OutputOptions(Enum):\n RAW = \"raw\"\n JSON = \"json\"\n\n\nclass PeekSubsystem(Outputting, GoalSubsystem):\n \"\"\"Display BUILD file info to the console.\n\n In its most basic form, `peek` just prints the contents of a BUILD file. It can also display\n multiple BUILD files, or render normalized target metadata as JSON for consumption by other\n programs.\n \"\"\"\n\n name = \"peek\"\n help = \"Display BUILD target info\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--output\",\n type=OutputOptions,\n default=OutputOptions.JSON,\n help=(\n \"Which output style peek should use: `json` will show each target as a seperate \"\n \"entry, whereas `raw` will simply show the original non-normalized BUILD files.\"\n ),\n )\n register(\n \"--exclude-defaults\",\n type=bool,\n default=False,\n help=(\n \"Whether to leave off values that match the target-defined default values \"\n \"when using `json` output.\"\n ),\n )\n\n @property\n def output_type(self) -> OutputOptions:\n \"\"\"Get the output type from options.\n\n Must be renamed here because `output` conflicts with `Outputting` class.\n \"\"\"\n return cast(OutputOptions, self.options.output)\n\n @property\n def exclude_defaults(self) -> bool:\n return cast(bool, self.options.exclude_defaults)\n\n\nclass Peek(Goal):\n subsystem_cls = PeekSubsystem\n\n\ndef _render_raw(fcs: Iterable[FileContent]) -> str:\n sorted_fcs = sorted(fcs, key=lambda fc: fc.path)\n rendereds = map(_render_raw_build_file, sorted_fcs)\n return os.linesep.join(rendereds)\n\n\ndef _render_raw_build_file(fc: FileContent, encoding: str = \"utf-8\") -> str:\n dashes = \"-\" * len(fc.path)\n content = fc.content.decode(encoding)\n parts = [dashes, fc.path, dashes, content]\n if not content.endswith(os.linesep):\n parts.append(\"\")\n return os.linesep.join(parts)\n\n\n_nothing = object()\n\n\ndef _render_json(ts: Iterable[Target], exclude_defaults: bool = False) -> str:\n targets: Iterable[Mapping[str, Any]] = [\n {\n \"address\": t.address.spec,\n \"target_type\": t.alias,\n **{\n k.alias: v.value\n for k, v in t.field_values.items()\n if not (exclude_defaults and getattr(k, \"default\", _nothing) == v.value)\n },\n }\n for t in ts\n ]\n return f\"{json.dumps(targets, indent=2, cls=_PeekJsonEncoder)}\\n\"\n\n\nclass _PeekJsonEncoder(json.JSONEncoder):\n \"\"\"Allow us to serialize some commmonly-found types in BUILD files.\"\"\"\n\n safe_to_str_types = (Requirement,)\n\n def default(self, o):\n \"\"\"Return a serializable object for o.\"\"\"\n if is_dataclass(o):\n return asdict(o)\n if isinstance(o, collections.abc.Mapping):\n return dict(o)\n if isinstance(o, collections.abc.Sequence):\n return list(o)\n try:\n return super().default(o)\n except TypeError:\n return str(o)\n\n\n@goal_rule\nasync def peek(\n console: Console,\n subsys: PeekSubsystem,\n targets: UnexpandedTargets,\n) -> Peek:\n if subsys.output_type == OutputOptions.RAW:\n build_file_addresses = await MultiGet(\n Get(BuildFileAddress, Address, t.address) for t in targets\n )\n build_file_paths = {a.rel_path for a in build_file_addresses}\n digest_contents = await Get(DigestContents, PathGlobs(build_file_paths))\n output = _render_raw(digest_contents)\n elif subsys.output_type == OutputOptions.JSON:\n output = _render_json(targets, subsys.exclude_defaults)\n else:\n raise AssertionError(f\"output_type not one of {tuple(OutputOptions)}\")\n\n with subsys.output(console) as write_stdout:\n write_stdout(output)\n\n return Peek(exit_code=0)\n\n\ndef rules():\n return collect_rules()\n","repo_name":"williamscs/pants","sub_path":"src/python/pants/backend/project_info/peek.py","file_name":"peek.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"22545647417","text":"import datetime,json,asyncio\r\nfrom commands.bin.embed import getembed\r\nimport discord\r\nfrom commands.bin.FacebookRequest import *\r\nfrom commands.bin.time import RewriteTime, TimeZoneChange\r\nfrom commands.config.TimeConfig import WHSH_FUCK_TIME, WHSH_FUCK_TO_TIME\r\nfrom commands.config.config import WHSHembed\r\nfrom core.classes import Cog_Extension \r\nimport requests\r\nimport facebook\r\nimport urllib3\r\n\r\n\r\n\r\n\r\nclass FBpost(Cog_Extension):\r\n #a = datetime.datetime.now().strftime('%Y %m %d %H %M')\r\n \r\n def __init__(self,*args,**kwargs):\r\n super().__init__(*args,**kwargs)\r\n\r\n async def time_task():\r\n await self.bot.wait_until_ready()\r\n Id = \"whsh\"\r\n data = getFbPost(Id)\r\n updateNowId(Id,data)\r\n while not self.bot.is_closed():\r\n await asyncio.sleep(50)\r\n now_time = datetime.datetime.now().strftime('%M')\r\n if (int(now_time)%15==0):\r\n data = getFbPost(Id)\r\n count = getCount(Id,data)\r\n\r\n def sendmessage(i):\r\n usedata = data['posts']['data'][i]\r\n message = usedata['message']\r\n Title = message.split(\"\\n\",1)\r\n time = RewriteTime(TimeZoneChange(usedata['created_time'],WHSH_FUCK_TIME),WHSH_FUCK_TO_TIME)\r\n embed = getembed(\r\n WHSHembed.title.format(Title[0]),\r\n WHSHembed.description.format(\r\n Title[1],\r\n time,\r\n Title[0][1:],\r\n usedata[\"id\"]\r\n ),\r\n WHSHembed.color\r\n )\r\n return embed\r\n\r\n if count==-1:\r\n for i in range(100):\r\n embed = sendmessage(99-i)\r\n for k in openFB()[\"sendchannel\"][\"whsh\"]:\r\n try:\r\n channel = self.bot.get_channel(k)\r\n except:\r\n pass\r\n await channel.send(embed=embed)\r\n elif(count==0):\r\n pass\r\n else:\r\n for i in range(count):\r\n embed = sendmessage(count-i-1)\r\n for k in openFB()[\"sendchannel\"][\"whsh\"]:\r\n channel = self.bot.get_channel(k)\r\n await channel.send(embed=embed)\r\n await asyncio.sleep(100)\r\n updateNowId(Id,data)\r\n self.bg_task = self.bot.loop.create_task(time_task())\r\ndef setup(bot):\r\n bot.add_cog(FBpost(bot))","repo_name":"ALiangLiang/Uto2.0","sub_path":"commands/cmds/FBEvent.py","file_name":"FBEvent.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23555925461","text":"def isWrong(x):\r\n for i in range(len(x)-1,0,-1):\r\n if x[i]-1:\r\n wrongPos=isWrong(n)\r\n normalize(n,wrongPos)\r\n result=str(int(\"\".join(n)))\r\n print(\"Case #\"+str(testcase+1)+\": \"+result)\r\n \r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2546.py","file_name":"2546.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2006854568","text":"import cv2\nimport numpy as np\n\nCANNY =1000\n\ncap=cv2.VideoCapture(0)\nlower_red = np.array([0,100,100])\nupper_red = np.array([10,255,255])\n\n\nlowerBlue = np.array([100,100,100])\nupperBlue = np.array([140,255,255])\nwhile True:\n _, frame=cap.read()\n \n hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)\n\n\n\n mask=cv2.inRange(hsv,lower_red,upper_red)\n mask = cv2.bilateralFilter(mask,1,10,120)\n\n res=cv2.bitwise_and(frame,frame, mask=mask)\n \n edges = cv2.Canny( res , 10, CANNY)\n _, contours, hierarchy = cv2.findContours( edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE )\n for cont in contours:\n area = cv2.contourArea(cont)\n if area > 300:\n arc_len = cv2.arcLength( cont, True ) #arc length\n approx = cv2.approxPolyDP(cont, 0.1 * arc_len, True)\n \n #c = max(cont, key=cv2.contourArea) #find the max contour \n\n if(len(approx) ==4):\n #SQUARES NOT WORKING BUT TRI IS PERFECT \n print(\"sq\")\n (x,y,w,h) = cv2.boundingRect(approx)\n cv2.drawContours(frame, [approx], -1, (255,0,0), 2)\n cv2.putText(frame, 'blue sq', (x,y), cv2.FONT_HERSHEY_SIMPLEX,0.7,(255,0,0))\n# elif(len(approx) ==3):\n# print(\"tri\")\n# (x,y,w,h) = cv2.boundingRect(approx)\n# cv2.drawContours(frame, [approx], -1, (255,0,0), 2)\n# cv2.putText(frame, 'sblue tri', (x,y), cv2.FONT_HERSHEY_SIMPLEX,0.7,(255,0,0))\n else:\n pass\n \n cv2.imshow('frame',frame)\n cv2.imshow('mask', mask)\n cv2.imshow('res', res)\n cv2.imshow('edges',edges)\n\n k=cv2.waitKey(5)& 0xFF\n if k==27:\n break\n \ncv2.destroyAllWindows()\ncap.release()\n","repo_name":"gispda/image-processing","sub_path":"bitwiseand.py","file_name":"bitwiseand.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2603378274","text":"import sc2\r\nfrom sc2 import Race, Difficulty\r\nfrom sc2 import maps, run_game\r\nfrom sc2.constants import *\r\nfrom sc2.position import Point2, Point3\r\nfrom sc2.unit import Unit\r\nfrom sc2.player import Bot, Computer\r\nfrom sc2.player import Human\r\nfrom sc2.ids.unit_typeid import UnitTypeId\r\nfrom sc2.ids.ability_id import AbilityId\r\nfrom sc2.units import Units\r\nfrom sc2.ids.upgrade_id import UpgradeId\r\n\r\n\r\nclass Vinc3nt(sc2.BotAI):\r\n\tasync def on_step(self, iteration):\r\n\t\tawait self.distribute_workers()\r\n\t\tawait self.build_workers()\r\n\t\tawait self.build_supply()\r\n\t\tawait self.lower_depot()\r\n\t\tawait self.expansion()\r\n\t\tawait self.build_barracks()\r\n\t\tawait self.build_refinery()\r\n\t\tawait self.ccmorph()\r\n\t\tawait self.mules()\r\n\t\tawait self.marines()\r\n\t\tawait self.attackmarines()\r\n\t\tawait self.morebarracks()\r\n\t\tawait self.more_workers1()\r\n\t\tawait self.build_engineeringbay()\r\n\t\tawait self.upgradearmor1()\r\n\t\tawait self.upgradeweapon1()\r\n\t\tawait self.more_workers2()\r\n\t\tawait self.more_supply()\r\n\t\tawait self.build_factory()\r\n\t\tawait self.build_armory()\r\n\t\tawait self.upgradetier2()\r\n\t\tawait self.boombarracks()\r\n\r\n\t\t\r\n\tasync def build_workers(self):\r\n\t\tfor th in self.townhalls.idle:\r\n\t\t\tif (\r\n\t\t\t\tself.can_afford(UnitTypeId.SCV) \r\n\t\t\t\tand self.supply_left > 0\r\n\t\t\t\tand self.supply_workers < 19\r\n\t\t\t\tand (\r\n\t\t\t\t\tself.structures(UnitTypeId.BARRACKS).ready.amount < 1 \r\n\t\t\t\t\tand self.townhalls(UnitTypeId.COMMANDCENTER).idle \r\n\t\t\t\t\tor self.townhalls(UnitTypeId.ORBITALCOMMAND).idle\r\n\t\t\t\t)\r\n\t\t\t):\r\n\t\t\t\t\tself.do(th.train(UnitTypeId.SCV), subtract_cost=True, subtract_supply=True)\r\n\tasync def build_supply(self):\r\n\t\tif self.supply_left < 7 and not self.already_pending(UnitTypeId.SUPPLYDEPOT):\r\n\t\t\tworkers = self.workers.gathering\r\n\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\tlocation = await self.find_placement(UnitTypeId.SUPPLYDEPOT, worker.position, placement_step=3)\r\n\t\t\tif location:\r\n\t\t\t\tif self.can_afford(UnitTypeId.SUPPLYDEPOT):\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.SUPPLYDEPOT, location), subtract_cost=True)\r\n\r\n\tasync def lower_depot(self):\r\n\t\tfor depot in self.structures(UnitTypeId.SUPPLYDEPOT).ready:\r\n\t\t\tself.do(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))\r\n\tasync def ccmorph(self):\r\n\t\torbital_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.ORBITALCOMMAND)\r\n\t\tif orbital_tech_requirement == 1:\r\n\t\t\tfor cc in self.townhalls(UnitTypeId.COMMANDCENTER).idle:\r\n\t\t\t\tif self.can_afford(UnitTypeId.ORBITALCOMMAND):\r\n\t\t\t\t\tself.do(cc(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND),subtract_cost=True)\r\n\tasync def build_barracks(self):\r\n\t\tbarracks_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.BARRACKS)\r\n\t\tif (\r\n\t\t\tbarracks_tech_requirement == 1\r\n\t\t\tand self.structures(UnitTypeId.BARRACKS).ready.amount + self.already_pending(UnitTypeId.BARRACKS) < 2\r\n\t\t\tand self.can_afford(UnitTypeId.BARRACKS)\r\n\t\t):\r\n\t\t\tworkers = self.workers.gathering\r\n\t\t\tif (\r\n\t\t\t\tworkers and self.townhalls\r\n\t\t\t):\r\n\t\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\t\tlocation = await self.find_placement(UnitTypeId.BARRACKS, self.townhalls.random.position, placement_step=5)\r\n\t\t\t\tif location:\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.BARRACKS, location), subtract_cost=True)\r\n\tasync def build_refinery(self):\r\n\t\tfor th in self.townhalls.ready:\r\n\t\t\tvgs = self.vespene_geyser.closer_than(10, th)\r\n\t\t\tfor vg in vgs:\r\n\t\t\t\tif await self.can_place(UnitTypeId.REFINERY, vg.position) and self.can_afford(UnitTypeId.REFINERY) and self.structures(UnitTypeId.BARRACKS).ready.amount + self.already_pending(UnitTypeId.BARRACKS) >= 3 and (self.already_pending(UnitTypeId.REFINERY) + self.structures(UnitTypeId.REFINERY).ready.amount < 1):\r\n\t\t\t\t\tworkers = self.workers.gathering\r\n\t\t\t\t\tif workers:\r\n\t\t\t\t\t\tworker = workers.closest_to(vg)\r\n\t\t\t\t\t\tself.do(worker.build(UnitTypeId.REFINERY, vg), subtract_cost=True)\r\n\t\t\t\t\t\tbreak\r\n\tasync def expansion(self):\r\n\t\tif (\r\n\t\t\t1 <= self.townhalls.amount < 6\r\n\t\t\tand self.already_pending(UnitTypeId.COMMANDCENTER) == 0\r\n\t\t\tand self.can_afford(UnitTypeId.COMMANDCENTER)\r\n\t\t):\r\n\t\t\tlocation = await self.get_next_expansion()\r\n\t\t\tif location:\r\n\t\t\t\tworker = self.select_build_worker(location)\r\n\t\t\t\tif worker and self.can_afford(UnitTypeId.COMMANDCENTER):\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.COMMANDCENTER, location), subtract_cost=True)\r\n\r\n\tasync def mules(self):\r\n\t\tfor oc in self.townhalls(UnitTypeId.ORBITALCOMMAND).filter(lambda x: x.energy >= 50):\r\n\t\t\tmfs = self.mineral_field.closer_than(10, oc)\r\n\t\t\tif mfs:\r\n\t\t\t\tmf = max(mfs, key=lambda x: x.mineral_contents)\r\n\t\t\t\tself.do(oc(AbilityId.CALLDOWNMULE_CALLDOWNMULE, mf))\r\n\tasync def marines(self):\r\n\t\tif self.supply_left > 0 and self.supply_army < 60:\r\n\t\t\tfor rax in self.structures(UnitTypeId.BARRACKS).idle:\r\n\t\t\t\tif self.can_afford(UnitTypeId.MARINE):\r\n\t\t\t\t\tself.do(rax.train(UnitTypeId.MARINE), subtract_cost=True, subtract_supply=True)\r\n\tasync def attackmarines(self):\r\n\t\tmarines: Units = self.units(UnitTypeId.MARINE).idle\r\n\t\tif marines.amount >= 20:\r\n\t\t\ttarget = self.enemy_structures.random_or(self.enemy_start_locations[0]).position\r\n\t\t\tfor marine in marines:\r\n\t\t\t\tself.do(marine.attack(target))\r\n\tasync def morebarracks(self):\r\n\t\tbarracks_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.BARRACKS)\r\n\t\tif(\r\n\t\t\tbarracks_tech_requirement == 1\r\n\t\t\tand self.townhalls.amount >= 2\r\n\t\t\tand self.structures(UnitTypeId.BARRACKS).ready.amount + self.already_pending(UnitTypeId.BARRACKS) < 10\r\n\t\t\tand self.can_afford(UnitTypeId.BARRACKS)\r\n\t\t\t):\r\n\t\t\t\tworkers = self.workers.gathering\r\n\t\t\t\tif(\r\n\t\t\t\tworkers and self.townhalls\r\n\t\t\t):\r\n\t\t\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\t\tlocation = await self.find_placement(UnitTypeId.BARRACKS, self.townhalls.random.position, placement_step=5)\r\n\t\t\t\tif location:\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.BARRACKS, location), subtract_cost=True)\r\n\tasync def more_workers1(self):\r\n\t\tfor th in self.townhalls.idle:\r\n\t\t\tif(\r\n\t\t\t\tself.can_afford(UnitTypeId.SCV) \r\n\t\t\t\tand self.supply_left > 0\r\n\t\t\t\tand self.supply_workers < 37\r\n\t\t\t\tand self.townhalls.amount >= 2\r\n\t\t\t\tand self.can_afford(UnitTypeId.SCV)\r\n\t\t\t\tand (\r\n\t\t\t\tself.structures(UnitTypeId.BARRACKS).ready.amount < 1 \r\n\t\t\t\tand self.townhalls(UnitTypeId.COMMANDCENTER).idle \r\n\t\t\t\tor self.townhalls(UnitTypeId.ORBITALCOMMAND).idle\r\n\t\t\t\t)\r\n\t\t):\r\n\t\t\t\tself.do(th.train(UnitTypeId.SCV), subtract_cost=True, subtract_supply=True)\r\n\tasync def build_engineeringbay(self):\r\n\t\tmarines: Units = self.units(UnitTypeId.MARINE).idle\r\n\t\tif marines.amount > 8 and (self.structures(UnitTypeId.ENGINEERINGBAY).ready.amount + self.already_pending(UnitTypeId.ENGINEERINGBAY) < 1):\r\n\t\t\tworkers = self.workers.gathering\r\n\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\tlocation = await self.find_placement(UnitTypeId.ENGINEERINGBAY, self.townhalls.random.position, placement_step=5)\r\n\t\t\tif location:\r\n\t\t\t\tif self.can_afford(UnitTypeId.ENGINEERINGBAY):\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.ENGINEERINGBAY, location), subtract_cost=True)\r\n\r\n\tasync def upgradearmor1(self):\r\n\t\tif self.already_pending_upgrade(UpgradeId.TERRANINFANTRYARMORSLEVEL1) == 0 and self.can_afford(UpgradeId.TERRANINFANTRYARMORSLEVEL1):\r\n\t\t\t\tengineeringbay_ready = self.structures(UnitTypeId.ENGINEERINGBAY).ready\r\n\t\t\t\tif engineeringbay_ready:\r\n\t\t\t\t\tself.research(UpgradeId.TERRANINFANTRYARMORSLEVEL1)\r\n\tasync def upgradeweapon1(self):\r\n\t\tif self.already_pending_upgrade(UpgradeId.TERRANINFANTRYWEAPONSLEVEL1) == 0 and self.can_afford(UpgradeId.TERRANINFANTRYWEAPONSLEVEL1):\r\n\t\t\t\tengineeringbay_ready = self.structures(UnitTypeId.ENGINEERINGBAY).ready\r\n\t\t\t\tif engineeringbay_ready:\r\n\t\t\t\t\tself.research(UpgradeId.TERRANINFANTRYWEAPONSLEVEL1)\r\n\tasync def more_workers2(self):\r\n\t\tfor th in self.townhalls.idle:\r\n\t\t\tif(\r\n\t\t\t\tself.can_afford(UnitTypeId.SCV) \r\n\t\t\t\tand self.supply_left > 0\r\n\t\t\t\tand self.supply_workers < 44\r\n\t\t\t\tand self.townhalls.amount >= 3\r\n\t\t\t\tand self.can_afford(UnitTypeId.SCV)\r\n\t\t\t\tand (\r\n\t\t\t\tself.structures(UnitTypeId.BARRACKS).ready.amount < 1 \r\n\t\t\t\tand self.townhalls(UnitTypeId.COMMANDCENTER).idle \r\n\t\t\t\tor self.townhalls(UnitTypeId.ORBITALCOMMAND).idle\r\n\t\t\t\t)\r\n\t\t):\r\n\t\t\t\tself.do(th.train(UnitTypeId.SCV), subtract_cost=True, subtract_supply=True)\r\n\tasync def more_supply(self):\r\n\t\tif self.townhalls.amount >= 3 and self.already_pending(UnitTypeId.SUPPLYDEPOT) <= 2:\r\n\t\t\tworkers = self.workers.gathering\r\n\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\tlocation = await self.find_placement(UnitTypeId.SUPPLYDEPOT, worker.position, placement_step=3)\r\n\t\t\tif location:\r\n\t\t\t\tif self.can_afford(UnitTypeId.SUPPLYDEPOT):\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.SUPPLYDEPOT, location), subtract_cost=True)\r\n\tasync def build_factory(self):\r\n\t\tfactory_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.FACTORY)\r\n\t\tif(\r\n\t\t\tfactory_tech_requirement == 1\r\n\t\t\tand self.structures(UnitTypeId.ENGINEERINGBAY).ready.amount >= 1\r\n\t\t\tand self.structures(UnitTypeId.FACTORY).ready.amount + self.already_pending(UnitTypeId.FACTORY) < 1\r\n\t\t\tand self.can_afford(UnitTypeId.FACTORY)\r\n\t\t\t):\r\n\t\t\t\tworkers = self.workers.gathering\r\n\t\t\t\tif(\r\n\t\t\t\tworkers and self.townhalls\r\n\t\t\t):\r\n\t\t\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\t\tlocation = await self.find_placement(UnitTypeId.FACTORY, self.townhalls.random.position, placement_step=5)\r\n\t\t\t\tif location:\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.FACTORY, location), subtract_cost=True)\r\n\tasync def build_armory(self):\r\n\t\tarmory_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.ARMORY)\r\n\t\tmarines: Units = self.units(UnitTypeId.MARINE).idle\r\n\t\tif(\r\n\t\t\tarmory_tech_requirement == 1\r\n\t\t\tand self.structures(UnitTypeId.ENGINEERINGBAY).ready.amount >= 1\r\n\t\t\tand self.already_pending_upgrade(UpgradeId.TERRANINFANTRYARMORSLEVEL1) == 0\r\n\t\t\tand self.structures(UnitTypeId.ARMORY).ready.amount + self.already_pending(UnitTypeId.ARMORY) <= 1\r\n\t\t\tand self.can_afford(UnitTypeId.ARMORY)\r\n\t\t\t):\r\n\t\t\t\tworkers = self.workers.gathering\r\n\t\t\t\tif(\r\n\t\t\t\tworkers and self.townhalls\r\n\t\t\t):\r\n\t\t\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\t\tlocation = await self.find_placement(UnitTypeId.ARMORY, self.townhalls.random.position, placement_step=3)\r\n\t\t\t\tif location:\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.ARMORY, location), subtract_cost=True)\r\n\tasync def upgradetier2(self):\r\n\t\tif self.structures(UnitTypeId.ARMORY).ready.amount >= 1:\r\n\t\t\tif self.can_afford(UpgradeId.TERRANINFANTRYARMORSLEVEL2):\r\n\t\t\t\tif self.structures(UnitTypeId.ENGINEERINGBAY).idle:\r\n\t\t\t\t\tself.research(UpgradeId.TERRANINFANTRYARMORSLEVEL2)\r\n\t\t\t\t\tself.research(UpgradeId.TERRANINFANTRYwEAPONSLEVEL2)\r\n\tasync def boombarracks(self):\r\n\t\tbarracks_tech_requirement: float = self.tech_requirement_progress(UnitTypeId.BARRACKS)\r\n\t\tif(\r\n\t\t\tbarracks_tech_requirement == 1\r\n\t\t\tand self.townhalls.amount >= 5\r\n\t\t\tand self.structures(UnitTypeId.BARRACKS).ready.amount + self.already_pending(UnitTypeId.BARRACKS) < 15\r\n\t\t\tand self.can_afford(UnitTypeId.BARRACKS)\r\n\t\t\t):\r\n\t\t\t\tworkers = self.workers.gathering\r\n\t\t\t\tif(\r\n\t\t\t\tworkers and self.townhalls\r\n\t\t\t):\r\n\t\t\t\t\tworker = workers.furthest_to(workers.center)\r\n\t\t\t\tlocation = await self.find_placement(UnitTypeId.BARRACKS, self.townhalls.random.position, placement_step=5)\r\n\t\t\t\tif location:\r\n\t\t\t\t\tself.do(worker.build(UnitTypeId.BARRACKS, location), subtract_cost=True)\r\n\r\n\r\n\r\n\r\nsc2.run_game(\r\n sc2.maps.get(\"AcropolisLE\"),\r\n [Bot(Race.Terran, Vinc3nt()),\r\n\tComputer(Race.Zerg, Difficulty.Hard)\r\n\t], realtime=False)\r\n","repo_name":"Nunikid/SC2","sub_path":"Modelos de IA/Vinc3nt2.py","file_name":"Vinc3nt2.py","file_ext":"py","file_size_in_byte":11358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72904251713","text":"import json\nimport pytest\n\n\n@pytest.fixture()\ndef sample_test(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.railflow(author='Bob', description='Addition of two numbers',\n jira_id=100231, test_path='test_calculation.py',\n case_fields='filedA1', result_fields='fieldB1',\n id_mappings='map id1', case_type='test case', case_priority='important')\n def test_pass():\n assert 1==1\n\n @pytest.mark.railflow(testrail_user='Nulli',\n testrail_project= \"Mathematics\",\n case_fields='field',\n result_fields='output',\n test_path='manipulation.py',\n case_type='Normal tests',\n case_priority='Important',\n assign=['user1@gmail.com', 'user2@gmail.com'])\n class TestClass:\n\n def test_fail(self):\n assert 4 == 5\n \"\"\"\n )\n return testdir\n\n\n@pytest.fixture()\ndef load_json(sample_test):\n sample_test.runpytest(\"--jsonfile=output.json\")\n\n with open(\"output.json\", \"r\") as f:\n report = json.load(f)\n\n return report\n\n\ndef test_open_json(load_json):\n \"\"\"\n Tests if json file created and output is dictionary type\n \"\"\"\n if type(load_json[0]) == dict:\n assert True\n\n\n@pytest.mark.parametrize(\n \"A, B\",\n [\n (\"author\", \"Bob\"),\n (\"description\", \"Addition of two numbers\"),\n (\"jira_id\", 100231),\n (\"test_path\", \"test_calculation.py\"),\n (\"case_fields\", \"filedA1\"),\n (\"result_fields\", \"fieldB1\"),\n (\"id_mappings\", \"map id1\"),\n (\"case_type\", \"test case\"),\n (\"case_priority\", \"important\"),\n ],\n)\ndef test_json(load_json, A, B):\n \"\"\"\n Tests if railflow_test_attributes are correctly printed in json report\n \"\"\"\n test_attr = load_json[0][\"railflow_test_attributes\"]\n assert test_attr[A] == B\n\n\n@pytest.mark.parametrize(\n \"A,B\",\n [\n (\"suite_name\", \"test_json_test_report\"),\n (\"test_name\", \"test_pass\"),\n (\"details\", None),\n (\"markers\", \"\"),\n (\"result\", \"PASSED\"),\n (\"file_name\", \"test_json_test_report.py\"),\n ],\n)\ndef test_json_test_report(load_json, A, B):\n \"\"\"\n Tests report paramters\n \"\"\"\n\n report_dict = load_json[0]\n assert report_dict[A] == B\n\n\n@pytest.mark.parametrize(\n \"A,B\",\n [\n (\"testrail_user\", \"Nulli\"),\n (\"testrail_project\", \"Mathematics\"),\n (\"test_path\", \"manipulation.py\"),\n (\"case_fields\", \"field\"),\n (\"result_fields\", \"output\"),\n (\"case_type\", \"Normal tests\"),\n (\"case_priority\", \"Important\"),\n (\"assign\", [\"user1@gmail.com\", \"user2@gmail.com\"]),\n ],\n)\ndef test_json_class(load_json, A, B):\n\n \"\"\"\n Tests railflow_test_attributes in class.\n \"\"\"\n\n test_attr = load_json[1][\"railflow_test_attributes\"]\n assert test_attr[A] == B\n\n\n@pytest.mark.parametrize(\n \"A,B\",\n [\n (\"suite_name\", \"TestClass\"),\n (\"test_name\", \"test_fail\"),\n (\"details\", None),\n (\"result\", \"FAILED\"),\n (\"file_name\", \"test_json_class_report.py\"),\n ],\n)\ndef test_json_class_report(load_json, A, B):\n \"\"\"\n Tests failed class report parameters\n \"\"\"\n report_dict = load_json[1]\n assert report_dict[A] == B\n","repo_name":"sujithatzackriya/railflow-pytest-plugin","sub_path":"tests/test_railflow.py","file_name":"test_railflow.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73540852034","text":"\nimport os, sys, string, re, time\nimport marshal\n\nfrom log import *\n\nimport time,hashlib\n\nimport neo_cgi, neo_util\n\nfrom clearsilver import odb, hdfhelp, odb_sqlite3\n\nclass WhichReadDB(odb.Database):\n def __init__ (self, conn):\n odb.Database.__init__(self, conn)\n self.addTable(\"whichread\", \"wr_whichread\", WhichReadTable)\n\n def get(self, readerid):\n row = self.whichread.lookup(readerid=readerid)\n if not row: row = ''\n return row\n\nclass WhichReadTable(odb.Table):\n def _defineRows(self):\n self.d_addColumn(\"readerid\", odb.kVarString, primarykey=1)\n self.d_addColumn(\"wrlist\", odb.kVarString)\n\ndef createTables(path):\n dbpath = \"%s/whichread.db3\" % path\n# conn = odb_sqlite3.Connection(dbpath, autocommit=0)\n conn = odb_sqlite3.Connection(dbpath)\n db = WhichReadDB(conn)\n\n db.createTables()\n db.synchronizeSchema()\n db.createIndices()\n\n \n\nclass WhichRead:\n def __init__ (self, listname,path,ncgi):\n self.listname = listname\n self._path = path\n self.ncgi = ncgi\n self.__db = None\n self._whichReadID = self.getWhichReadID()\n\n def getWhichReadID(self):\n wrid = self.ncgi.hdf.getValue(\"Cookie.WRID\",\"\")\n if not wrid:\n m = hashlib.md5()\n m.update(\"%s-%s\" % (self.ncgi.hdf.getValue(\"CGI.RemoteAddress\",\"ADDR\"),\n time.time()))\n wrid = m.hexdigest()\n log(\"issued new WhichReadID: %s\" % wrid)\n self.ncgi.cookieSet(\"WRID\",wrid,persist=1)\n # self.ncgi.hdf.setValue(\"Cookie.WRID\",wrid)\n return wrid\n\n def _db(self):\n if self.__db is None:\n dbpath = \"%s/whichread.db3\" % self._path\n# conn = odb_sqlite3.Connection(dbpath, autocommit=0)\n conn = odb_sqlite3.Connection(dbpath)\n self.__db = WhichReadDB(conn)\n return self.__db\n\n def markMsgRead(self, message_num):\n\n # unpack the seen cookie\n seencookiename = \"%s.WR\" % self.listname\n seencookie = self.ncgi.hdf.getValue(\"Cookie.%s\" % seencookiename, \"\")\n if seencookie:\n c_parts = string.split(seencookie,\",\")\n else:\n c_parts = []\n mnum_str = \"%s\" % message_num\n\n try:\n index = c_parts.remove(mnum_str)\n log(\"already seen in cookie: %s\" % message_num)\n except ValueError:\n log(\"markread: %s\" % message_num)\n # yes, it's new!\n \n # make a new seen cookie! (only 200 entries)\n c_parts.insert(0,mnum_str)\n new_seencookie = string.join(c_parts[:200],\",\")\n self.ncgi.cookieSet(seencookiename,new_seencookie,persist=1)\n\n # add to whichread DB\n self.addToDB(message_num)\n\n # append to whichread log\n fp = open(\"%s/whichreadchanges.log\" % self._path,\"ab+\")\n fp.write(\"%s %s\\n\" % (self._whichReadID,mnum_str))\n fp.close()\n\n def getWRList(self):\n # read whichread from disk\n wdb = self._db()\n whichread = \"\"\n\n whichread = wdb.whichread.lookup(readerid=self._whichReadID)\n if whichread is None:\n wrlist = ''\n else:\n wrlist = whichread.wrlist\n wrl = WRList(wrlist)\n return wrl\n\n def addToDB(self,mnum):\n wdb = self._db()\n whichread = \"\"\n \n whichread = wdb.whichread.lookup(readerid=self._whichReadID)\n if whichread is None:\n wrlist = ''\n else:\n wrlist = whichread.wrlist\n\n wr_list = WRList(wrlist)\n wr_list.markRead(mnum)\n \n row = wdb.whichread.lookupCreate(readerid=self._whichReadID)\n row.wrlist = wr_list.dump()\n row.save()\n\n def __del__ (self):\n if self.__db:\n self.__db.close()\n\n\nclass WRList:\n def __init__(self,val):\n self._val = val\n self._parts = string.split(val,\",\")\n self._dict = {}\n dict = self._dict\n for a_part in self._parts:\n dict[a_part] = 1\n\n def markRead(self,mnum):\n mnum = \"%s\" % mnum\n try:\n index = self._parts.index(mnum)\n except ValueError:\n self._parts.insert(0,mnum)\n \n def dump(self):\n # log(\"WRLIST: %s\" % self._parts)\n return string.join(self._parts,\",\")\n\n def isRead(self,mnum):\n mnum = \"%s\" % mnum\n # log(\"isRead %s = %s\" % (mnum,self._dict.has_key(mnum)))\n return self._dict.has_key(mnum)\n \n","repo_name":"jeske/csla","sub_path":"pysrc/which_read.py","file_name":"which_read.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31840364126","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass MLP(nn.Module):\n \n def __init__(self, sizes, activation=nn.ReLU, input_norm=None, output_layer_init=None, output_activation=None, seed=None):\n super().__init__()\n\n if seed is not None:\n self.seed = torch.manual_seed(seed)\n \n self.layers = []\n for i in range(len(sizes)-2):\n self.layers += [nn.Linear(sizes[i], sizes[i+1]), activation()]\n self.layers.append( nn.Linear(sizes[-2], sizes[-1]) )\n\n if output_layer_init is not None:\n self.layers[-1].weight.data.uniform_(-output_layer_init, output_layer_init)\n\n if output_activation is not None:\n self.layers.append(output_activation())\n \n self.layers = nn.ModuleList(self.layers)\n\n self.input_norm = input_norm\n \n def forward(self, x):\n \n if self.input_norm is not None:\n x = self.input_norm(x)\n\n for layer in self.layers:\n x = layer(x)\n \n return x\n\n\nclass Actor(MLP):\n pass\n\n\nclass Critic(MLP):\n\n def __init__(self, **kargs):\n super().__init__(**kargs)\n \n def forward(self, states, actions):\n\n if self.input_norm is not None:\n x = self.input_norm(states)\n else:\n x = states\n \n x = torch.cat((x, actions), dim=1)\n\n for layer in self.layers:\n x = layer(x)\n \n return x\n\n","repo_name":"joao-d-semedo/drlnd-p3-collab-compet","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14037277010","text":"import time\n\nfrom winterbloom_sol import _utils\n\n# Use nanaseconds for absolute time throughout to avoid losing precision for float\n# time over long program duration.\n_NS_TO_S = 1000000000\n\n\nclass SlewLimiter:\n \"\"\"A Slew Limiter.\n\n The slew limiter's rate are set at creation time and can be modified at any\n time::\n\n slew = sol.SlewLimiter(\n rate=0.1, # seconds\n )\n\n slew.rate = 1.0 # seconds\n\n\n After that, you can set the target value for the limiter::\n\n slew.target = state.cc[1] * 10.0\n\n The slew's output will then available to be used as a CV output::\n\n outputs.cv_b = slew.output\n\n \"\"\"\n\n def __init__(self, rate):\n self.rate = rate\n self._last = None\n self._target = None\n self._set_time = 0\n\n @property\n def target(self):\n return self._target\n\n @target.setter\n def target(self, value):\n # Don't limit for the initial value.\n if self._last is None:\n self._last = value\n else:\n self._last = self.output\n\n # Ignore duplicate target values to avoid\n # re-starting the slew.\n if self._target is not None and _utils.isclose(\n value, self._target, rel_tol=1e-05\n ):\n return\n\n self._target = value\n self._set_time = time.monotonic_ns()\n\n @property\n def output(self):\n if self._target is None:\n return 0\n\n now = time.monotonic_ns()\n rate_s = self.rate * _NS_TO_S\n delta = min(1.0, (now - self._set_time) / rate_s)\n\n return _utils.lerp(self._last, self._target, delta)\n","repo_name":"wntrblm/Sol","sub_path":"firmware/winterbloom_sol/slew_limiter.py","file_name":"slew_limiter.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"61"} +{"seq_id":"9926879005","text":"########################################################################################################################\r\n########################################################################################################################\r\n### Bot Objects for WhisperTrades.com API ###\r\n### ###\r\n### Authored by Paul Nobrega Contact: Paul@PaulNobrega.net ###\r\n### Python Version 3.10 ###\r\n########################################################################################################################\r\n########################################################################################################################\r\nimport json\r\nimport warnings\r\nfrom datetime import datetime\r\n\r\nclass WhisperTradesBots(object):\r\n\r\n def __init__(self, scheduler):\r\n self._scheduler = scheduler\r\n self._endpts = self._scheduler._endpts\r\n self.bots_list = self.__bot_list(self._scheduler)\r\n\r\n def __call__(self, bot_number):\r\n for bot in self.bots_list.all:\r\n if bot.number == bot_number:\r\n return bot\r\n warnings.warn(f\"Bot Number: {bot_number} not found!\")\r\n return\r\n \r\n def get_all_bot_variables(self) -> json:\r\n \"\"\"\r\n Query WhisperTrades.com for all bot variables and associate data with related bot object\r\n \r\n :return: json data from response recieved from WhisperTrades API\r\n :type return: json\r\n \"\"\"\r\n all_variables = self._endpts.variables.get_all_bot_variables()\r\n for var in all_variables:\r\n for bot in self.bots_list.all:\r\n for i, bot_var in enumerate(bot.variables):\r\n if bot_var['number'] == var['number']:\r\n bot.variables[i] = var\r\n return all_variables\r\n \r\n def update_all_bots(self):\r\n \"\"\"\r\n Update bots_list with data retrieved from WHisperTrades.com API\r\n \"\"\"\r\n self.bots_list.all = []\r\n _ = [self.bots_list.add_bot_to_list(bot) for bot in self._endpts.bots.get_all_bots()]\r\n return\r\n\r\n class __bot_list(object):\r\n def __init__(self, scheduler):\r\n self.all = []\r\n self._scheduler = scheduler\r\n self._endpts = self._scheduler._endpts\r\n \r\n def all(self) -> list:\r\n \"\"\"\r\n Return list of all bot numbers\r\n \"\"\"\r\n return self.all\r\n \r\n def is_enabled(self) -> list:\r\n \"\"\"\r\n Return list of all bot numbers that have status = 'enabled'\r\n \"\"\"\r\n return [bot for bot in self.all if bot.status.lower() == 'enabled']\r\n \r\n def is_disabled(self) -> list:\r\n \"\"\"\r\n Return list of all bot numbers that have status = 'disabled'\r\n \"\"\"\r\n return [bot for bot in self.all if bot.status.lower() == 'disabled']\r\n \r\n def is_disabled_on_close(self) -> list:\r\n \"\"\"\r\n Return list of all bot numbers that have status = 'disabled on close'\r\n \"\"\"\r\n return [bot for bot in self.all if bot.status.lower() == 'disabled on close']\r\n \r\n def add_bot_to_list(self, bot_dict:dict={}):\r\n \"\"\"\r\n Add dictionary representation of a WT bot to bot_list.all\r\n\r\n Note: if bot_number exists in bot_list.all, it is removed and replaced with the new information\r\n \"\"\"\r\n if bot_dict=={}:\r\n warnings.warn(f'bot_dict is empty!')\r\n return\r\n bot_json = json.loads(json.dumps(bot_dict))\r\n self.remove_bot_from_list(bot_json['number'])\r\n self.all.append(self.bot_obj(bot_json, self._scheduler))\r\n return\r\n \r\n def remove_bot_from_list(self, bot_number:str):\r\n \"\"\"\r\n Removes bot from bots.all list by given bot number\r\n \"\"\"\r\n for i in range(len(self.all)):\r\n if self.all[i].number == bot_number:\r\n del self.all[i]\r\n return\r\n return\r\n\r\n\r\n class bot_obj(object):\r\n \r\n def __init__(self, bot_dict, scheduler):\r\n self.number = ''\r\n self.name = ''\r\n self.broker_connection = {}\r\n self.is_paper= False\r\n self.status = ''\r\n self.can_enable = True\r\n self.can_disable = True\r\n self.symbol = ''\r\n self.type = ''\r\n self.notes = ''\r\n self.last_active_at = ''\r\n self.disabled_at = ''\r\n self.entry_condition = {}\r\n self.exit_condition = {}\r\n self.adjustments = []\r\n self.notifications = []\r\n self.variables = []\r\n self._scheduler = scheduler\r\n self._endpts = self._scheduler._endpts\r\n self.__bot_dict_to_attr(bot_dict)\r\n self.enable = self._change_status('enable', self.number, self._endpts, self._scheduler)\r\n self.disable = self._change_status('disable', self.number, self._endpts, self._scheduler)\r\n \r\n def __str__(self):\r\n attrs = vars(self)\r\n test = [f'{item[0]}: {str(item[1])}' for item in attrs.items()]\r\n return \"\\n\".join(test)\r\n \r\n def __repr__(self):\r\n return self.__str__()\r\n \r\n def __bot_dict_to_attr(self, bot_dict):\r\n for key in bot_dict: \r\n setattr(self, key, bot_dict[key])\r\n \r\n def update(self):\r\n \"\"\"\r\n Query WhisperTrades.com for bot information and update object with new information \r\n \"\"\"\r\n bot_dict = self._endpts.bots.get_bot(bot_number=self.number)\r\n self.__bot_dict_to_attr(json.loads(json.dumps(bot_dict)))\r\n return\r\n \r\n def get_bot_variables(self):\r\n \"\"\"\r\n Query WhisperTrades.com for variables associated with bot and update object with new information \r\n \"\"\"\r\n all_var = [v['number'] for v in self.variables]\r\n self.variables = []\r\n self.variables = [self._endpts.variables.get_bot_variables(v) for v in all_var]\r\n return self.variables\r\n \r\n class _change_status(object):\r\n\r\n def __init__(self, target_status, bot_number, endpts, scheduler):\r\n self._endpts = endpts\r\n self._scheduler = scheduler\r\n self._target_status = target_status\r\n self._bot_number = bot_number\r\n\r\n def __call__(self):\r\n return self._toggle_status()\r\n \r\n def _toggle_status(self):\r\n if self._target_status == 'enable':\r\n return self._endpts.bots.enable_bot(self._bot_number)\r\n elif self._target_status == 'disable':\r\n return self._endpts.bots.disable_bot(self._bot_number)\r\n return\r\n \r\n def _meridian_time_to_military_time(self, time_str):\r\n return datetime.strptime(time_str, '%I:%M %p').strftime('%H:%M')\r\n\r\n def at_time(self, time_str=None, tz_str='America/New_York'):\r\n \"\"\"\r\n Schedule bot status change.\r\n \r\n :param time_str: string representation of military time (example: '22:30'). If 12-hr format, PM or AM must be included in string.\r\n :type time_str: String\r\n :param tz_str: human readable TimeZone. Default is 'America/New_York'\r\n :type tz_str: String\r\n \"\"\"\r\n if not time_str or not isinstance(time_str, str):\r\n raise ValueError('Time input string is required!')\r\n if 'pm' in time_str.lower() or 'am' in time_str.lower():\r\n time_str = self._meridian_time_to_military_time(time_str)\r\n if not self._scheduler.scheduler_is_on:\r\n self._scheduler.start()\r\n self._scheduler.add_task(time_str, tz_str, self._toggle_status)\r\n return\r\n \r\n\r\n\r\n\r\n ","repo_name":"PaulNobrega/WhisperDriver","sub_path":"WhisperDriver/Obj/bots.py","file_name":"bots.py","file_ext":"py","file_size_in_byte":8873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70641515395","text":"import math\n\nD = {'food' : 'Tuna', 'quantity' : 42, 'color' : 'brown'}\n\nprint (D['food'])\n\nD['quantity'] += 1\n\nprint (D['quantity'])\n\nPerson = {}\n\nPerson['name'] = 'Bob'\nPerson['job'] = 'rich, ain\\'t got no need for no jahb'\nPerson['age'] = '25'\n\nprint (Person['job'])\n\nPerson = dict(name='Bahb', job='wandering rich man', age=26)\n\n#This overwrites the previous content of the dictionary\nPerson = dict(address = 'whatevs')\nprint (Person)\n\nbob2 = dict(zip(['name', 'job', 'age',], ['bob', 'dev', 21]))\nprint (bob2)\n\nbob3 = {'name': {'first': 'Bob', 'second': 'Bobson'},\n 'jobs': ['dev', 'ops guy']}\n\nprint (bob3['name']['second'])\nprint (bob3['name'])\nbob3['jobs'].append('boss of everything')\nprint (bob3['jobs'])\n\nD = {'a': 1, 'ddd': 90, 'b': 2, 'c': 3}\nD['e'] = 123\nprint (D['e'])\n\nif not 'f' in D:\n print ('f in D is missing!')\n\nvalue = D['x'] if 'x' in D else 0\nprint (value)\n\nK = list(D.keys())\nK.sort()\nprint (K)\n\nfor key in K:\n print (key, '=>', D[key])\n\nprint (\"This output is getting confusing!\")\n\nfor key in sorted(D):\n for char in key:\n print (char.upper())\n\nx = 5\nwhile x > 0:\n print (x, \"weeee\")\n x -= 1\n\n#Wew look at this nexting:\n\nsquare_roots = [math.sqrt(x) for x in [1, 9, 16, 25, 33, 43] ]\nfor square_root in square_roots: print (square_root)\n\nsquare_roots = [math.sqrt(x) for x in [1,3,4,5,6]]\nprint (square_roots)\n","repo_name":"DanielCalvo/studies","sub_path":"Python/Learning Python book/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10417322554","text":"from pyrogram import filters\nfrom pyrogram.types import CallbackQuery, Message\n\nfrom config import Config\nfrom Music.core.clients import hellbot\nfrom Music.core.decorators import UserWrapper, check_mode\nfrom Music.helpers.formatters import formatter\nfrom Music.utils.pages import MakePages\nfrom Music.utils.youtube import ytube\n\n\n@hellbot.app.on_message(filters.command(\"song\") & ~Config.BANNED_USERS)\n@check_mode\n@UserWrapper\nasync def songs(_, message: Message):\n if len(message.command) == 1:\n return await message.reply_text(\"Nothing given to search.\")\n query = message.text.split(None, 1)[1]\n hell = await message.reply_photo(\n Config.BLACK_IMG, caption=f\"Searching “`{query}`” ...\"\n )\n all_tracks = await ytube.get_data(query, False, 10)\n rand_key = formatter.gen_key(str(message.from_user.id), 5)\n Config.SONG_CACHE[rand_key] = all_tracks\n await MakePages.song_page(hell, rand_key, 0)\n\n\n@hellbot.app.on_message(filters.command(\"lyrics\") & ~Config.BANNED_USERS)\n@check_mode\n@UserWrapper\nasync def lyrics(_, message: Message):\n if not Config.LYRICS_API:\n return await message.reply_text(\"Lyrics module is disabled!\")\n lists = message.text.split(\" \", 1)\n if not len(lists) == 2:\n return await message.reply_text(\n \"__Nothing given to search.__ \\nExample: `/lyrics loose yourself - eminem`\"\n )\n _input_ = lists[1].strip()\n query = _input_.split(\"-\", 1)\n if len(query) == 2:\n song = query[0].strip()\n artist = query[1].strip()\n else:\n song = query[0].strip()\n artist = \"\"\n text = f\"**Searching lyrics ...** \\n\\n__Song:__ `{song}`\"\n if artist != \"\":\n text += f\"\\n__Artist:__ `{artist}`\"\n hell = await message.reply_text(text)\n results = await ytube.get_lyrics(song, artist)\n if results:\n title = results[\"title\"]\n image = results[\"image\"]\n lyrics = results[\"lyrics\"]\n final = f\"• Song: {title} \\n• Lyrics: \\n{lyrics}\"\n if len(final) >= 4095:\n page_name = f\"{title}\"\n to_paste = f\" \\n{final} \\n\"\n link = await formatter.telegraph_paste(page_name, to_paste)\n await hell.edit_text(\n f\"**Lyrics too big! Get it from here:** \\n\\n• [{title}]({link})\",\n disable_web_page_preview=True,\n )\n else:\n await hell.edit_text(final)\n chat = message.chat.title or message.chat.first_name\n await hellbot.logit(\n \"lyrics\",\n f\"**⤷ Lyrics:** `{title}`\\n**⤷ Chat:** {chat} [`{message.chat.id}`]\\n**⤷ User:** {message.from_user.mention} [`{message.from_user.id}`]\",\n )\n else:\n await hell.edit_text(\"Unexpected Error Occured.\")\n\n\n@hellbot.app.on_callback_query(filters.regex(r\"song_dl(.*)$\") & ~Config.BANNED_USERS)\nasync def song_cb(_, cb: CallbackQuery):\n _, action, key, rand_key = cb.data.split(\"|\")\n user = rand_key.split(\"_\")[0]\n key = int(key)\n if cb.from_user.id != int(user):\n await cb.answer(\"You are not allowed to do that!\", show_alert=True)\n return\n if action == \"adl\":\n await ytube.send_song(cb, rand_key, key, False)\n return\n elif action == \"vdl\":\n await ytube.send_song(cb, rand_key, key, True)\n return\n elif action == \"close\":\n Config.SONG_CACHE.pop(rand_key)\n await cb.message.delete()\n return\n else:\n all_tracks = Config.SONG_CACHE[rand_key]\n length = len(all_tracks)\n if key == 0 and action == \"prev\":\n key = length - 1\n elif key == length - 1 and action == \"next\":\n key = 0\n else:\n key = key + 1 if action == \"next\" else key - 1\n await MakePages.song_page(cb, rand_key, key)\n","repo_name":"The-HellBot/Music","sub_path":"Music/plugins/songs.py","file_name":"songs.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"11969909859","text":"import os\nimport platform\nfrom contextlib import AbstractContextManager, closing, contextmanager, nullcontext\nfrom socket import AF_INET, SO_REUSEADDR, SOCK_STREAM, SOL_SOCKET, socket\nfrom typing import Callable, Optional, TypeVar\n\nfrom yaspin import yaspin\n\n_T = TypeVar(\"_T\")\n_SPINNER_FAILMSG = \"💥 \"\n_SPINNER_SUCCESSMSG = \"✅ \"\n\n\n@contextmanager\ndef _spinner(text):\n with yaspin(text=text) as spinner:\n try:\n yield\n except Exception:\n spinner.fail(_SPINNER_FAILMSG)\n raise\n spinner.ok(_SPINNER_SUCCESSMSG)\n\n\ndef _get_spinner(real=True) -> Callable[[str], AbstractContextManager]:\n if not real:\n return lambda text: nullcontext()\n return _spinner\n\n\ndef get_free_port():\n with closing(socket(AF_INET, SOCK_STREAM)) as s:\n s.bind((\"\", 0))\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\nuname = platform.uname().release.lower()\n\nif platform.system() == \"Linux\" and (\"microsoft\" not in uname): # catch WSL\n docker_host_name = \"172.17.0.1\"\nelse:\n docker_host_name = \"host.docker.internal\"\n\n# if we expose a port in docker, this is where we expect to find it hosted\n# this is almost always just localhost\n# you can set the value here to some value with the YELLOWBOX_DOCKER_EXPOSE_HOST env-var\n# DO NOT manually change this const, instead call update_docker_expose_host\nDOCKER_EXPOSE_HOST = \"\"\n\n\ndef update_docker_expose_host(value: Optional[str]):\n \"\"\"\n update the global docker expose host, attempting to infer from the environment\n\n Args:\n value: if provided, will use this value instead of inferring a host\n\n Notes:\n This function is called once when yellowbox is imported with a value of the env var\n YELLOWBOX_DOCKER_EXPOSE_HOST\n\n \"\"\"\n global DOCKER_EXPOSE_HOST # noqa: PLW0603\n\n if value is None:\n value = \"127.0.0.1\"\n\n DOCKER_EXPOSE_HOST = value\n\n\nupdate_docker_expose_host(os.getenv(\"YELLOWBOX_DOCKER_EXPOSE_HOST\"))\n","repo_name":"biocatchltd/yellowbox","sub_path":"yellowbox/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"10018115735","text":"import numpy as np\n\ndef input_MatVec():\n CorrectMatrix = False\n while True:\n Size = int(input('Enter Size of Matrix/Vector: '))\n A = np.zeros((Size,Size),dtype=float)\n print('Enter Matrix:')\n for i in range(Size):\n A[i,:] = list(map(int,input(f\"\\nEnter Values in Row {i} : \").strip().split()))[:Size]\n print('Printing Resulting Matrix...')\n for i in range(Size):\n print(A[i,:])\n \n CorrectMatrix = str(input('Is this Matrix Correct? (Y/N) '))\n if CorrectMatrix == 'y' or CorrectMatrix == 'Y':\n break\n else:\n print('Retrying...')\n print()\n\n print()\n CorrectMatrix = False\n while True:\n b = np.zeros((Size),dtype=float)\n print('Enter Vector:')\n b[:] = list(map(int,input(f\"\\nEnter Values : \").strip().split()))[:Size]\n print('Printing Resulting Vector...')\n print(b[:])\n \n CorrectMatrix = str(input('Is this Vector Correct? (Y/N) '))\n if CorrectMatrix == 'y' or CorrectMatrix == 'Y':\n break\n else:\n print('Retrying...')\n print()\n \n return A, b\n\n# Bi-Conjugate Gradient Solver\ndef BCG_Solver(A,M,b,x,tolerance):\n r = b - np.dot(A,x)\n rh = r \n k = 0 \n while np.linalg.norm(r) > tolerance:\n rho1 = np.dot(rh,r)\n if k != 0:\n beta = (rho1/rho2)*(alpha/omega)\n p = r + beta*(p-omega*v)\n else:\n p = r \n\n ph = np.dot(M,p)\n v = np.dot(A,ph)\n\n alpha = rho1/np.dot(rh,v)\n s = r - alpha*v\n\n sh = np.dot(M,s)\n t = np.dot(A,sh)\n\n omega = np.dot(t,s)/np.dot(t,t)\n x = x + alpha*ph + omega*sh \n r = s - omega*t \n rho2 = rho1 \n k += 1\n\n return x, k\n\ndef Generate_Pre(A,P):\n # Jacobi\n def Jacobi(A,P):\n n = len(A)\n P = np.zeros([n,n])\n\n for i in range(n): \n P[i,i] = 1/A[i,i]\n return P\n\n # SSOR\n def SSOR(A,P):\n n = len(A)\n D = np.zeros([n,n],dtype=float)\n L = np.zeros([n,n],dtype=float)\n\n for i in range(n):\n for j in range(n):\n if i == j:\n D[i,j] = A[i,j]\n elif i > j:\n L[i,j] = A[i,j]\n P = np.transpose(D+L)\n P = np.matmul(np.linalg.inv(D),P)\n P = np.matmul(D+L,P)\n return P\n\n # ILU\n def ILU(A,P):\n n = len(A)\n P = np.zeros([n,n],dtype=float)\n\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if (A[i,k] != 0.0) and (A[k,j] != 0.0):\n P[i,j] = P[i,j] - (A[i,k]*A[k,j]/A[k,k])\n\n return P\n\n print()\n print('Enter Desired Preconditioner: 1) - Jacobi, 2) - SSOR, 3) - ILU')\n Pre_Type = int(input('Desired Preconditioner: '))\n if Pre_Type == 1:\n P = Jacobi(A,P)\n elif Pre_Type == 2:\n P = SSOR(A,P)\n elif Pre_Type == 3:\n P = ILU(A,P)\n else:\n print('ERROR: Unknown Preconditioner ID specified')\n exit()\n\n return P\n \n\nA, b = input_MatVec()\n# A = np.array( [[1., 2., 0.,],\n# [2., 4., 5.,],\n# [0., 5., 6.]], dtype=float)\n# b = np.array( [1., 2., 3.], dtype=float)\nx = np.ones(np.size(b),dtype=float)\nM = np.zeros([np.size(b),np.size(b)],dtype=float)\n\nM = Generate_Pre(A,M)\n\nx, its = BCG_Solver(A,M,b,x,1e-5)\n\nprint('Solution:',x)\nprint('Required Iterations:', its)\n\n#Calculate Error on Solve\nprint('Residual Error:',np.sum(np.matmul(A,x)-b))\n","repo_name":"jt3020/public","sub_path":"LinAlg/BCG.py","file_name":"BCG.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44096493651","text":"from django.urls import path,include\nfrom .views import ViewAllProducts, ViewProductDetail, ViewSearchedProduct, ViewTrendingProduct, CartAddRemoveUpdateView, WishListAddRemoveUpdateView, ProductReviewAddUpdateDelete, FavouriteBrands, OrderBuyCartCod, OrderDetail, ReturnProduct, ReplaceProduct, OrderBuyCartRazorpay, PaymentHandler\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\nrouter.register('view_all_products', ViewAllProducts, basename='view_all_products')\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('fav_brands//', FavouriteBrands.as_view(), name='fav_brand_section'),\n path('product/', ViewProductDetail.as_view(), name='product_view'),\n path('product/review//', ProductReviewAddUpdateDelete.as_view(), name='product_reviews'),\n path('search//', ViewSearchedProduct.as_view(), name='search_view'),\n path('product/trending/', ViewTrendingProduct.as_view(), name='trending_product_view'),\n path('view_cart/', CartAddRemoveUpdateView.as_view(), name='view_cart'),\n path('cart/product//', CartAddRemoveUpdateView.as_view(), name='add_to_cart'),\n path('view_wishlist/', WishListAddRemoveUpdateView.as_view(), name='view_wishlist'),\n path('wishlist/product//', WishListAddRemoveUpdateView.as_view(), name='WishlistCrud'),\n path('order/', OrderBuyCartCod.as_view(), name='order'),\n path('order_razorpay/', OrderBuyCartRazorpay.as_view(), name='order_razorpay'),\n path('paymentHandler/', PaymentHandler.as_view(), name=\"PaytmentHandler\"),\n path('order/view//', OrderDetail.as_view(), name='vieworderdetails'),\n path('order/cancel//', OrderDetail.as_view(), name='Cancelorder'),\n path('order/return_products/', ReturnProduct.as_view(), name='Return Products'),\n path('order/return_product//', ReturnProduct.as_view(), name='return_products'),\n path('order/replace_products/', ReplaceProduct.as_view(), name='Replace Products'),\n path('order/replace_product//', ReplaceProduct.as_view(), name='replace_products'),\n]","repo_name":"bhargav2800/Cloth-Flossy-API","sub_path":"customer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8218616088","text":"from os import path\nimport sys\n\ndef get_input_file_locations(file_name: str) -> list:\n '''\n Return possible locations where specific file will be looked for.\n :param str file_name: name of input file\n :return: list of possible locations\n :rtype: list\n '''\n return [\n file_name,\n path.join('..', file_name),\n path.join('Input', file_name),\n path.join('..', 'Input', file_name)\n ]\n\ndef get_input_file_path(file_name: str) -> str:\n '''\n Return the path to the specified file name (if one exists).\n :param str file_name: name of the file to look for\n :return: path to the file or empty string if file not found\n :rtype: str\n '''\n for file_path in get_input_file_locations(file_name):\n if path.exists(file_path):\n return file_path\n return ''\n\ndef print_separator(file=sys.stdout):\n '''\n Print a horizontal line having a standard console window width (80 symbols).\n :param file: file to print separator to. Defaults to sys.stdout\n '''\n print(''.rjust(80, '-'), file=file)","repo_name":"Demarsch/data-analytics-bootcamp","sub_path":"03 - PyWare/Code/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15480981176","text":"import os\nimport subprocess\nimport datetime\nimport hashlib\n\ndef writeToFile(start_urls, website_counter,internal_scripts_src, external_scripts_src):\n\texternal_scripts=[]\n\tinternal_scripts=[]\n\t#Create a folder for storing javascripts unless already present\n\tfor item in external_scripts_src:\n\t\texternal_js = item\n\t\texternal_js_hashname = hashlib.sha512(external_js.encode('utf-8')).hexdigest()\n\t\texternal_scripts.append(external_js_hashname)\n\t\texternal_js_hashname = str(external_js_hashname) + \".js\"\n\t\t#check if this script has already been stored\n\t\t#print \"Making file \" + external_js_hashname\t\n\t\tf_js = open('/home/group9p1/javascripts/' + start_urls[website_counter] + '/' + external_js_hashname, 'w' )\n\t\tf_js.write(external_js.encode('utf-8'))\n\t\tf_js.close()\n\n\tfor item in internal_scripts_src:\n\t\tinternal_js = item\n\t\tinternal_js_hashname = hashlib.sha512( internal_js.encode('utf-8') ).hexdigest()\n\t\tinternal_scripts.append(internal_js_hashname)\n\t\tinternal_js_hashname = str(internal_js_hashname) + \".js\"\n\t\t#check if this script has already been stored\n\t\tf_js = open('/home/group9p1/javascripts/' + start_urls[website_counter] + '/' + internal_js_hashname, 'w' )\n\t\tf_js.write(internal_js.encode('utf-8'))\n\t\tf_js.close()\t\n\t\t#print\n\t\n","repo_name":"cipher1729/js-crawler","sub_path":"writeToFile.py","file_name":"writeToFile.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"744808729","text":"#pylint: disable=bare-except,global-statement\n'''Helper module for querying etcd for configuration options.'''\n\nimport etcd\nimport os\n\n__CLIENT__ = None\n__ETCD_HOST__ = ''\n__ETCD_PORT__ = ''\n\ndef _get_hostname_and_port():\n '''Retrieves the hostname and port from the environmental variables.'''\n global __ETCD_HOST__\n global __ETCD_PORT__\n\n __ETCD_HOST__, __ETCD_PORT__ = os.environ.get('ETCD_ENDPOINT', '127.0.0.1:4001').split(':')\n\ndef _create_client():\n '''Creates a client if host and port exist.'''\n global __CLIENT__\n\n if __CLIENT__:\n return\n\n if __ETCD_HOST__ and __ETCD_PORT__:\n __CLIENT__ = etcd.Client(host=__ETCD_HOST__, port=int(__ETCD_PORT__), allow_redirect=True)\n\ndef get(key):\n '''Gets a key from the etcd store.'''\n if __CLIENT__:\n try:\n return __CLIENT__.get(key).value #pylint: disable=no-member\n except:\n return None\n\n_get_hostname_and_port()\n_create_client()\n","repo_name":"Millz0r/daedalus","sub_path":"daedalus/common/config_manager.py","file_name":"config_manager.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23619204956","text":"import os, shutil\nfrom PIL import Image\nimport random\n\ndef get_xmlPath(xmlPath):\n all_xmlName = next(os.walk(xmlPath))[2]\n xmlPath_list = [os.path.join(xmlPath, i) for i in all_xmlName]\n return xmlPath_list\n\ndef get_picPath(picPath):\n all_picName = next(os.walk(picPath))[2]\n picPath_list = [os.path.join(picPath, i) for i in all_picName]\n return picPath_list\n\ndef delete_file(filePath):\n if not os.path.exists(filePath):\n print('%s the file is not exist, please check' %filePath)\n else:\n print('%s these file will be delete' %filePath)\n os.remove(filePath)\n\ndef selectPic(xmlPath, picPath):\n # dic = {}\n picFilePath = get_picPath(picPath)\n picFilePath_list = [i for i in picFilePath if i.endswith('.jpg')]\n allpicMark = True\n for picFilePath in picFilePath_list:\n xmlFilePath = picFilePath[:-4] + '.xml'\n image = Image.open(picFilePath)\n width, height = image.size\n # dic[width, height] = 0\n if width >= 416 and height >= 416:\n # dic[width, height] += 1\n continue\n else:\n delete_file(os.path.join(picPath, picFilePath))\n delete_file(os.path.join(xmlPath, xmlFilePath))\n allpicMark = False\n # break\n if allpicMark:\n print('selectPic pass, all the pics are qualified!')\n print('All pic number is ' + str(len(picFilePath_list)))\n # print(dic)\n\ndef copyPicXml(sample_num, xmlPath, picPath):\n picFilePath = get_picPath(picPath)\n picFilePath_list = [i for i in picFilePath if i.endswith('.jpg')]\n rdPic = random.sample(picFilePath_list, sample_num)\n if not os.path.exists(picSavePath):\n os.makedirs(picSavePath)\n\n if not os.path.exists(xmlSavePath):\n os.makedirs(xmlSavePath)\n\n for picFilePath in rdPic:\n repicFilePath = picFilePath.split(\"/\")[-1]\n rexmlFilePath = repicFilePath[:-4] + '.xml'\n shutil.copy(os.path.join(picPath, repicFilePath), os.path.join(picSavePath, repicFilePath))\n print(os.path.join(picSavePath, repicFilePath) + \" is been written....\")\n shutil.copy(os.path.join(xmlPath, rexmlFilePath), os.path.join(xmlSavePath, rexmlFilePath))\n print(os.path.join(xmlSavePath, rexmlFilePath) + \" is been written....\")\n print(\"copy pic and xml process is done!\")\n\nif __name__ == '__main__':\n sample_num = 6000\n picPath = '/home/supernode/data/coco/result/images'\n xmlPath = '/home/supernode/data/coco/result/Annotations'\n\n picSavePath = \"/home/supernode/anaconda3/envs/helmet/trafficSystem_8type/JPEGImages\"\n xmlSavePath = \"/home/supernode/anaconda3/envs/helmet/trafficSystem_8type/Annotations\"\n # selectPic(xmlPath, picPath)\n copyPicXml(sample_num, xmlPath, picPath)\n","repo_name":"PiseyYou/Yolov3_script","sub_path":"preDeal/xml/copySelectFile.py","file_name":"copySelectFile.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2505774191","text":"import numpy as np\r\n\r\nNtSd = 433\r\nbf = 10.160\r\nt = 1.111\r\nec = 2.95\r\nfy = 35\r\nfu = 45\r\nb = 2*bf-t\r\nbi = bf-t\r\ng = (bi-3-3.5)/2\r\ns = 7\r\nAn1 = (b-2*(1.6+0.35))*t\r\nAn2 = (b-3*(1.6+0.35)+((s**2)/(4*g)))*t\r\nA = b*t\r\ndef menor(An1,An2,A):\r\n min = An1\r\n\r\n if An2 < min:\r\n min = An2\r\n if A < min:\r\n min = A\r\n\r\n return min\r\nlc = 14\r\nCt = 1-(ec/lc)\r\nfyAg = (fy*A)/1.1\r\nfuAe = (fu*menor(An1,An2,A)*Ct)/1.35\r\n\r\nprint('Ag: ', round(A,2),'cm²')\r\nprint('An1: ', round(An1,2),'cm²')\r\nprint('An2: ', round(An2,2),'cm²')\r\nprint('fyAg: ', round(fyAg,2),'kN')\r\nprint('fuAe: ', round(fuAe,2),'kN')\r\nprint('NtSd: ', NtSd, 'kN')","repo_name":"awanderlind/designofsteel","sub_path":"TraçãoPerfilL.py","file_name":"TraçãoPerfilL.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36405022380","text":"from pydub import AudioSegment as AS\nfrom pydub.playback import play\nimport media_tool\n\nwhile True:\n cmd = int(input('指令:'))\n if cmd == 1:\n filename_in = input('导入:')\n filename_out = input('输出:')\n aud = AS.from_file(filename_in, format=filename_in.split('.')[-1])\n aud.export(filename_out, format=filename_out.split('.')[-1])\n else:\n break\nprint('再见')\n","repo_name":"PythonOrC/PythonCodeArchive","sub_path":"program/Pydub/2 - 音频格式转换器/音频格式转换器-1.py","file_name":"音频格式转换器-1.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43936304145","text":"from django.urls import path\nfrom . import views\n\napp_name = 'user' # 为了区分不同app下的同名模板文件\n\nurlpatterns = [\n # 用户管理\n path('user/list/', views.user_list, name=\"user_list\"),\n path('user/add/', views.user_add, name=\"user_add\"),\n]\n\n","repo_name":"showyouhappiness/Python_study","sub_path":"mysite_study/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21772038512","text":"from typing import List, Any, Dict, cast, Optional\nimport numpy as np\nimport Metal\nfrom tinygrad.helpers import dtypes, dedup, unwrap2\nfrom tinygrad.device import Buffer, CompiledASTRunner, update_stats\nfrom tinygrad.jit import JitItem, get_input_replace, get_jit_stats, get_jc_idxs_with_updatable_launch_dims, GraphException\nfrom tinygrad.shape.symbolic import Variable\nfrom tinygrad.runtime.ops_metal import MetalDevice\n\nclass MetalGraph:\n def __init__(self, device:MetalDevice, jit_cache: List[JitItem], input_rawbuffers: List[Buffer], var_vals: Dict[Variable, int]):\n if not all(isinstance(ji.prg, CompiledASTRunner) for ji in jit_cache): raise GraphException\n\n self.jit_cache = jit_cache\n self.input_replace = get_input_replace(jit_cache, input_rawbuffers)\n self.op_estimate, self.mem_estimate = get_jit_stats(jit_cache)\n self.jc_idx_with_updatable_launch_dims = get_jc_idxs_with_updatable_launch_dims(jit_cache)\n self.device: MetalDevice = device\n\n # create metal batch exec\n icb_descriptor = Metal.MTLIndirectCommandBufferDescriptor.new()\n icb_descriptor.setCommandTypes_(Metal.MTLIndirectCommandType(Metal.MTLIndirectCommandTypeConcurrentDispatch))\n icb_descriptor.setInheritBuffers_(False)\n icb_descriptor.setInheritPipelineState_(False)\n icb_descriptor.setMaxKernelBufferBindCount_(31)\n self.icb = self.device.device.newIndirectCommandBufferWithDescriptor_maxCommandCount_options_(icb_descriptor, len(self.jit_cache), Metal.MTLResourceOptions(0)) # noqa: E501\n if self.icb is None: raise GraphException(\"create indirect command buffer failed, does your system support this?\")\n\n if len(var_vals): self.int_buf = self.device.allocator.alloc(len(var_vals)*dtypes.int32.itemsize)\n all_resources = [self.int_buf] if len(var_vals) else []\n for j,ji in enumerate(self.jit_cache):\n prg: CompiledASTRunner = cast(CompiledASTRunner, ji.prg)\n descriptor = Metal.MTLComputePipelineDescriptor.new()\n descriptor.setComputeFunction_(prg.clprg.fxn)\n descriptor.setSupportIndirectCommandBuffers_(True)\n pipeline_state = unwrap2(self.device.device.newComputePipelineStateWithDescriptor_options_reflection_error_(descriptor, Metal.MTLPipelineOption(0), None, None)) # noqa: E501\n icb_command = self.icb.indirectComputeCommandAtIndex_(j)\n icb_command.setComputePipelineState_(pipeline_state)\n for i,b in enumerate(ji.rawbufs):\n if b is not None:\n icb_command.setKernelBuffer_offset_atIndex_(b._buf, 0, i)\n all_resources.append(b._buf)\n var_vals_keys = list(var_vals.keys())\n for i,v in enumerate(prg.vars):\n icb_command.setKernelBuffer_offset_atIndex_(self.int_buf, var_vals_keys.index(v)*4, len(ji.rawbufs)+i)\n if j not in self.jc_idx_with_updatable_launch_dims:\n global_size, local_size = prg.launch_dims(var_vals)\n icb_command.concurrentDispatchThreadgroups_threadsPerThreadgroup_(Metal.MTLSize(*global_size), Metal.MTLSize(*local_size))\n icb_command.setBarrier()\n self.all_resources = dedup(all_resources)\n self.command_buffer: Any = None\n if len(var_vals): self.int_buf_view = np.frombuffer(self.int_buf.contents().as_buffer(self.int_buf.length()), np.int32)\n\n def __call__(self, input_rawbuffers: List[Buffer], var_vals: Dict[Variable, int], wait=False, jit=False) -> Optional[float]:\n # NOTE: you at least can't update the ints if this is running\n if self.command_buffer is not None and self.command_buffer in self.device.mtl_buffers_in_flight: self.command_buffer.waitUntilCompleted()\n all_resources = self.all_resources + [x._buf for x in input_rawbuffers]\n for (j,i),input_idx in self.input_replace.items():\n self.icb.indirectComputeCommandAtIndex_(j).setKernelBuffer_offset_atIndex_(input_rawbuffers[input_idx]._buf, 0, i)\n for j in self.jc_idx_with_updatable_launch_dims:\n global_size, local_size = cast(CompiledASTRunner, self.jit_cache[j].prg).launch_dims(var_vals)\n self.icb.indirectComputeCommandAtIndex_(j).concurrentDispatchThreadgroups_threadsPerThreadgroup_(Metal.MTLSize(*global_size), Metal.MTLSize(*local_size)) # noqa: E501\n if len(var_vals): self.int_buf_view[:] = list(var_vals.values())\n command_buffer = self.device.mtl_queue.commandBuffer()\n encoder = command_buffer.computeCommandEncoder()\n encoder.useResources_count_usage_(all_resources, len(all_resources), Metal.MTLResourceUsageRead | Metal.MTLResourceUsageWrite)\n encoder.executeCommandsInBuffer_withRange_(self.icb, Metal.MTLIndirectCommandBufferExecutionRangeMake(0,len(self.jit_cache)))\n encoder.endEncoding()\n command_buffer.commit()\n self.command_buffer = command_buffer\n if wait:\n command_buffer.waitUntilCompleted()\n et = command_buffer.GPUEndTime() - command_buffer.GPUStartTime()\n else:\n self.device.mtl_buffers_in_flight.append(command_buffer)\n et = None\n update_stats(f\"\", self.op_estimate, self.mem_estimate, var_vals, et, buf_count=len(input_rawbuffers), jit=jit, num_kernels=len(self.jit_cache)) # noqa: E501\n return et","repo_name":"tinygrad/tinygrad","sub_path":"tinygrad/features/graph/metal.py","file_name":"metal.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":20676,"dataset":"github-code","pt":"61"} +{"seq_id":"20411622059","text":"from mapr.ojai.storage.ConnectionFactory import ConnectionFactory\nfrom mapr.ojai.storage.ConnectionFactory import ConnectionFactory\nimport getopt\nimport sys\nimport time\nimport logging\nimport re\nimport datetime\nimport json\n\nclass SpartanQuery:\n \n def __init__(self, urlConn):\n self.connection_string = urlConn\n self.races = list()\n self.course_results = list()\n self.events = list()\n\n\n #10.10.99.151:5678?auth=basic;user=mapr;password=mapr;ssl=false\n self.options = {\n 'ojai.mapr.query.include-query-plan': True,\n 'ojai.mapr.query.timeout-milliseconds': 120000,\n 'ojai.mapr.query.result-as-document': True\n }\n\n def queryResultsByUser(self, user):\n connection = ConnectionFactory.get_connection(self.connection_string)\n document_store = connection.get_store(store_path='/apps/course_results')\n #query_dict = {\"$select\":[\"RaceEntries.List[1].EventCourseID\"],\"$where\":{\"$like\":{\"RaceEntries.List[].DisplayName\":\"sargon%benjamin\"}}}\n query_dict = {\"$select\":[\"event_id\",\"RaceID\",\"CourseName\",\"CoursePattern\",\"DisplayName\",\"TicksString\",\"RankO\"],\"$orderby\": {\"RaceID\": \"asc\"},\"$where\":{\"$matches\":{\"DisplayName\":\"(?i)\"+user}}}\n #query_dict = {\"$select\":[\"CourseID\",\"CourseName\",\"RaceID\",\"RaceEntries.List[].DisplayName\"],\"$where\":{\"$like\":{\"RaceEntries.List[].DisplayName\":\"sargon%\"}}}\n #query_dict = {\"$select\":[\"_id\",\"event_id\",\"RaceID\",\"CourseName\",\"CoursePattern\",\"DisplayName\",\"TicksString\",\"RankO\",\"RankG\",\"RankA\",\"RacerID\",\"BibNum\"],\"$orderby\": {\"RaceID\": \"asc\"},\"$where\":{\"$like\":{\"DisplayName\":user}}}\n\n start = time.time()\n query_result = document_store.find(query_dict,options=self.options)\n\n iterations = 0\n raceEntries = 0\n course_results = list()\n #courseIds = list()\n\n print(query_result.get_query_plan())\n\n for item in query_result:\n iterations+=1\n #print (item.as_dictionary())\n\n row = item.as_dictionary()\n course_results.append(row)\n #courseId = row['CourseID']\n #courseIds.append(courseId)\n courseName = row['CourseName']\n #racers = row['RaceEntries']['List']\n print(\" Race : \" + str(row['RaceID']) + \" CoursePattern: \" + str(row['CoursePattern']) + \" \" + courseName + \" Time : \" + str(row['TicksString']) +\" Rank: \" + str(row['RankO']) +\" event_id: \" + str(row['event_id']))\n #print(\"Race : \" + str(row['RaceID']) + \" Course: \" + str(courseId) + \" \" + courseName + \" with \" + str(len(racers)))\n\n #raceEntries+=len(racers)\n #for racer in racers:\n # if racer['DisplayName'].lower() == 'sargon benjamin':\n # print(\"Race : \" + str(row['RaceID']) + \" Course: \" + str(courseId) + \" \" + courseName + \" with \" + str(len(racers)))\n # print (\"Found match with \" + str(racer))\n end = time.time()\n print(\"Duration = \" + str(end - start))\n print(\"iterations is \" + str(iterations))\n connection.close()\n return course_results\n \n def queryRaceInfo(self, raceIds):\n connection = ConnectionFactory.get_connection(self.connection_string)\n document_store = connection.get_store(store_path='/apps/races')\n #query_dict = {\"$select\":[\"RaceID\",\"RaceName\",\"RaceDate\",\"StateProvName\",\"Latitude\",\"Longitude\"],\"$orderby\": {\"RaceDate\": \"asc\"},\"$where\":{\"$in\":{\"RaceID\":raceIds}}}\n query_dict = {\"$orderby\": {\"RaceDate\": \"asc\"},\"$where\":{\"$in\":{\"RaceID\":raceIds}}}\n start = time.time()\n query_result = document_store.find(query_dict,options=self.options)\n races = list()\n iterations = 0\n for item in query_result:\n iterations+=1\n row = item.as_dictionary()\n races.append(row)\n \n end = time.time()\n logging.info(\"Duration = \" + str(end - start))\n logging.info(\"iterations is \" + str(iterations))\n connection.close()\n return races\n\n def queryEventInfo(self, eventIds):\n connection = ConnectionFactory.get_connection(self.connection_string)\n document_store = connection.get_store(store_path='/apps/events')\n\n query_dict = {\"$select\":[\"_id\",\"id\",\"event_name\",\"start_date\",\"end_date\",\"venue\"],\"$orderby\": {\"end_date\": \"asc\"},\"$where\":{\"$in\":{\"_id\":eventIds}}}\n start = time.time()\n query_result = document_store.find(query_dict,options=self.options)\n events = list()\n iterations = 0\n for item in query_result:\n iterations+=1\n row = item.as_dictionary()\n #print (row['_id']+\"_\" + str(row['venue']))\n events.append(row)\n \n end = time.time()\n logging.info(\"Duration = \" + str(end - start))\n logging.info(\"iterations is \" + str(iterations))\n connection.close()\n return events\n\n\n def parseDateString(self, dateStr):\n dateMillis = re.search(r'\\((.*?)\\)',dateStr).group(1)\n dateTime = datetime.datetime.fromtimestamp(float(dateMillis)/1000.0)\n return dateTime\n\n def flattenInfo(self):\n flatList = list()\n row = {}\n for race in self.races:\n eventId = race['event_id']\n course_result = next((x for x in self.course_results if x['event_id'] == eventId), None)\n event = next((x for x in self.events if x['id'] == eventId), None)\n\n #course_result.update(race)\n #course_result.update(event)\n row = course_result\n row['race'] = race\n row['event'] = event\n dateTimeStr = self.parseDateString(race['RaceDate']).strftime(\"%m/%d/%Y, %H:%M:%S\")\n row['MetaRaceDateTime'] = dateTimeStr\n row['MetaEventUrl'] = \"https://www.spartan.com/en/race/detail/\"+str(eventId)+\"/overview\"\n flatList.append(row)\n return flatList\n\n\n\ndef main():\n logging.basicConfig(filename='myapp.log', level=logging.DEBUG)\n urlString = ''\n password = ''\n \n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"u:p:h\", [\n \"urlconnection=\", \"password=\"])\n except getopt.GetoptError:\n print(\"queries.py -u -p \")\n \n for opt, arg in opts:\n if opt == '-h':\n print('queries.py -u ')\n print(\"eg -u '10.10.99.151:5678?auth=basic;user=mapr;password=mapr;ssl=false'\")\n sys.exit()\n elif opt in (\"-u\", \"--urlconnection\"):\n urlString = arg\n elif opt in (\"-p\", \"--password\"):\n password = arg\n\n logging.info(\"url string is \" + urlString)\n \n spartanQuery = SpartanQuery(urlString)\n spartanQuery.course_results = spartanQuery.queryResultsByUser(\"sargon benjamin\")\n raceIds = [cr['RaceID'] for cr in spartanQuery.course_results]\n eventIds = [cr['event_id'] for cr in spartanQuery.course_results]\n spartanQuery.races = spartanQuery.queryRaceInfo(raceIds)\n spartanQuery.events = spartanQuery.queryEventInfo(eventIds)\n flattened = spartanQuery.flattenInfo()\n\n with open('result.json','w') as json_file:\n json.dump(flattened,json_file)\n \nif __name__ == \"__main__\":\n main()\n\n","repo_name":"sogwiz/spartan","sub_path":"analytics/mapr/python/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70078282756","text":"from PyDictionary import PyDictionary\r\ndictionary = PyDictionary()\r\n\r\nprint('-----Enter 0 to exit-----\\nWelcome to Dictionary\\n')\r\n\r\nwhile True :\r\n word = input(\"Enter the word : \")\r\n if word == '0':\r\n break\r\n print(dictionary.meaning(word))","repo_name":"codicaly/Dictionary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71302456833","text":"from data import DataReader\nimport matplotlib.pyplot as plt\n\nfrom model import SSLModel\n\nimport time\nimport numpy as np\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--checkpoint_dir', dest='CHECKPOINT_DIR', nargs='?', const='checkpoints_4000_unsupervised')\nparser.add_argument('--output', dest='OUTPUT', nargs='?', const='perf_effort.csv')\nargs = parser.parse_args()\n\nmb_size = 32\nimages_directory = 'images'\nwidth = 32\nchannels = 3\n\nchunk_size = 100\n\nfrom numpy import genfromtxt\nclass_list = [x.decode('ascii') for x in genfromtxt('classes.csv', delimiter=',', dtype=None)]\n\nimport os\n\nCLUSTER_DEPTH = 8\n\ndef main():\n reader = DataReader(images_directory, width, width, channels, class_list)\n\n model = SSLModel(width, width, channels, mb_size, len(class_list), args.CHECKPOINT_DIR, load=True)\n\n #perform clustering\n reader.autolabel(model, 1.1, use_clustering=True)\n\n levels = len(reader.image_list[0].clusters)\n\n\n cor, tot = reader.evaluate_model(model)\n\n print(\"test performance {}\".format(cor/tot))\n\n #all possible cluster ranges\n #first two ranges must have a \n sizes = []\n for i in range(CLUSTER_DEPTH):\n sizes.append(2**(i+1))\n\n\n evals = []\n\n for iters in range(10):\n indices = np.random.permutation(len(reader.image_list))\n\n for clustering_level in range(CLUSTER_DEPTH):\n\n\n # randomly sample groups of images of size 32\n max_group_size = 32\n\n print(\"clustering level {}\".format(clustering_level))\n\n n_interactions = 0\n n_correct = 0\n n_total = 0\n def evaluate_group(group, max_clustering_level, curr_clustering_level=0):\n if len(group) == 0:\n return 0, 0, 0\n incorrect = 0\n for ind,cluster in group:\n if reader.image_list[ind].ground_truth != cluster[0]:\n incorrect += 1\n break\n\n\n if incorrect == 0:\n return 1, len(group), len(group)\n else:\n if curr_clustering_level < max_clustering_level:\n\n new_evals = [evaluate_group([x for x in group if x[1][curr_clustering_level+2] == c], curr_clustering_level + 1, max_clustering_level) for c in range(sizes[curr_clustering_level])]\n\n return tuple(np.sum(new_evals,axis=0))\n\n else:\n\n n_interactions = 0\n n_correct = 0\n n_total = 0\n\n #group[0][1][0] is a surrogate for the proposed label\n for ind, clusters in group:\n if reader.image_list[ind].ground_truth == group[0][1][0]:\n n_correct+=1\n n_total+=1\n n_interactions +=1\n\n #if a majority of the images are not correct, automatically label them as negatives\n if n_correct < len(group) / 2:\n n_correct = len(group) - n_correct\n\n return n_interactions, n_correct, n_total\n\n for c in range(len(class_list)):\n for k_cluster in range(sizes[1]):\n for a_cluster in range(sizes[clustering_level]):\n i = 0\n group = []\n while i < len(indices):\n if reader.image_list[indices[i]].clusters is not None and reader.image_list[indices[i]].clusters[0] == c:\n if (reader.image_list[indices[i]].clusters[1] == k_cluster):\n if (reader.image_list[indices[i]].clusters[clustering_level+2] == a_cluster):\n group.append((indices[i],reader.image_list[indices[i]].clusters[:2+clustering_level]))\n\n i+=1\n\n if len(group) >= max_group_size:\n inter, corr, tot = evaluate_group(group, clustering_level)\n n_interactions+=inter\n n_correct += corr\n n_total += tot\n # print('appendign group c: {} k {} a {}'.format(c, k_cluster, a_cluster))\n group = []\n\n if len(group) > 0:\n inter, corr, tot = evaluate_group(group, clustering_level)\n n_interactions+=inter\n n_correct += corr\n n_total += tot\n # print('tappendign group c: {} k {} a {}'.format(c, k_cluster, a_cluster))\n group = []\n\n evals.append((n_interactions, n_correct, n_total))\n print(\"{} interactions, {} correct, {} total\".format(n_interactions, n_correct, n_total))\n np.savetxt(args.OUTPUT, np.array(evals), delimiter=',')\n\n\n\n \n\nif __name__ == '__main__':\n main()","repo_name":"christiancosgrove/ssl-annotation-acceleration","sub_path":"experiment_effort.py","file_name":"experiment_effort.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34610639171","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ---- nonebot ----\nimport nonebot\nnonebot.init()\napp = nonebot.get_asgi()\ndriver = nonebot.get_driver()\n\n# 注册适配器\nfrom nonebot.adapters.onebot.v11 import Adapter\ndriver.register_adapter(Adapter)\n\n# # ---- 搞事 ----\n# import ayaka.patch as hack\n# # 统计加载时间\n# hack.hack_load_plugin()\n\n# ---- 加载插件 ----\n# nonebot.load_plugin(\"test\")\nnonebot.load_plugin(\"ayaka_games\")\n# nonebot.load_from_toml(\"pyproject.toml\")\nnonebot.load_plugin(\"ayaka_test\")\n\n\nif __name__ == \"__main__\":\n nonebot.run(app=\"__mp_main__:app\")\n","repo_name":"bridgeL/nonebot-plugin-ayaka","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"4256966882","text":"import smbus\nimport time\nimport RPi.GPIO as gpio\n\nis_pin_on = 0\n\ngpio.setmode(gpio.BCM)\ngpio.setup(4, gpio.IN)\ngpio.setup(17, gpio.OUT)\n\naddress = 0x48\nA0 = 0x40\nbus = smbus.SMBus(1)\nA1 = 0x41\nx_value = 0\ny_value = 0\n\ntry:\n while True:\n bus.write_byte(address,A0)\n x_value = bus.read_byte(address)\n bus.write_byte(address,A1)\n y_value = bus.read_byte(address)\n print(x_value, y_value)\n \n is_pin_on = gpio.input(4)\n if is_pin_on == 0:\n print('on')\n else:\n print('off')\n \n time.sleep(0.1)\nexcept KeyboardInterrupt:\n gpio.cleanup()\n","repo_name":"huichangs/Embedded-Programming","sub_path":"0410.py","file_name":"0410.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10791028054","text":"class Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n # Solution 1:\n # Time complexity: O(m + n)\n # Space complexity: O(n)\n# stack = [] # storing the index\n# res = []\n# dict = {}\n \n# for i, n in enumerate(nums2):\n# # monotonic decreasing stack, if the curr number is >\n# # than the one at the top of the stack, pop it out\n# while stack and n > nums2[stack[-1]]:\n# idx = stack.pop()\n# # record the value and its corresponding next larger value\n# dict[nums2[idx]] = n\n# stack.append(i)\n \n# for j in nums1:\n# # find the correspond larger value through dictionary\n# res.append(dict.get(j, -1))\n# return res\n\t\t\n\t\t# Solution 2:\n # Time complexity: O(m * n)\n # Space complexity: O(m)\n# dict = {n : i for i, n in enumerate(nums1)}\n# res = [-1] * len(nums1)\n \n# for i in range(len(nums2)):\n# if nums2[i] not in nums1:\n# continue\n# for j in range(i + 1, len(nums2)):\n# if nums2[j] > nums2[i]:\n# idx = dict[nums2[i]]\n# res[idx] = nums2[j]\n# break\n# return res\n \n\t\t\n\t\t# Solution 3:\n # Time complexity: O(m + n)\n # Space complexity: O(m)\n stack = [] # storing the value\n res = [-1] * len(nums1)\n dict = {n : i for i, n in enumerate(nums1)}\n \n for i, n in enumerate(nums2):\n # monotonic decreasing stack, if the curr number is >\n # than the one at the top of the stack, pop it out\n while stack and n > stack[-1]:\n val = stack.pop()\n idx = dict[val]\n res[idx] = n\n if n in nums1:\n stack.append(n)\n return res","repo_name":"k9evin/Crashing-LeetCode","sub_path":"496-next-greater-element-i/496-next-greater-element-i.py","file_name":"496-next-greater-element-i.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5473844929","text":"\n\n# zadanie 6 i 7\n\ndef Cesar_cipher(key):\n alfabet = \"a, ą, b, c, ć, d, e, ę, f, g, h, i, j, k, l, ł, m, n, ń, o, ó, p, r, s, ś, t, u, w, y, z, ź, ż\"\n alfabet_arr = alfabet.split(\", \")\n CESAR_CIPHER = {}\n for i in range(len(alfabet_arr)):\n if i < len(alfabet_arr) - key:\n CESAR_CIPHER[alfabet_arr[i]] = alfabet_arr[i + key]\n else:\n CESAR_CIPHER[alfabet_arr[i]] = alfabet_arr[(i + key) % 32]\n return CESAR_CIPHER\n\ndef encrypt(dict, message):\n encrypted = \"\"\n message = message.lower()\n for letter in message:\n if letter in [\" \", \",\"]:\n encrypted = encrypted + letter\n continue\n else:\n encrypted = encrypted + dict[letter]\n\n return encrypted\n\ndef decrypt(dict, encrypted):\n decrypted = \"\"\n for letter in encrypted:\n if letter in [\" \", \",\"]:\n decrypted = decrypted + letter\n continue\n else:\n for key, value in dict.items():\n if value == letter:\n decrypted = decrypted + key\n\n return decrypted\n\n\nmessage = \"MĘŻNY BĄDŹ, CHROŃ PUŁK TWÓJ I SZCZEŚĆ FLAG\"\n\nencrypted = encrypt(Cesar_cipher(3), message)\nprint(encrypted)\n# print(encrypt(Cesar_cipher(-3), encrypted))\ndecrypted = decrypt(Cesar_cipher(3), encrypted)\nprint(decrypted)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"pprzybyla777/WdpLaby","sub_path":"Laby/Lab09/zad6,7.py","file_name":"zad6,7.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16030012869","text":"# There are 3 teams that take part in the darts' competition. Each team comprises 4 participants. Each participant has\n# 3 attempts. The number of points that each participant gets for one throw is entered from a keyboard. The maximum\n# number of points for 1 attempt is 60. Display the winner (i.e. the number of the participant with the biggest\n# number of points and his/her result) from each team. The participant of which team showed the best result?\nteam_count = 3\nplayers_count = 4\nteam_results = []\n\nmax_team_number = winner_number = max_result = 0\nfor team_number in range(1, team_count + 1):\n team_result = []\n max_for_player = max_player_number = 0\n\n for player_number in range(1, players_count + 1):\n player_score = input(\"Enter {} team {} player result: \".format(\n team_number,\n player_number,\n )).split()\n player_sum = sum([int(score) for score in player_score])\n if player_sum > max_for_player:\n max_for_player, max_player_number = player_sum, player_number\n\n print(\"Team {}. The winner is the player {} with the score of {}\".format(\n team_number,\n max_player_number,\n max_for_player,\n ))\n\n if max_for_player > max_result:\n max_team_number, winner_number = team_number, max_player_number\n max_result = max_for_player\n\nprint(\"The best result was shown by player {} of the team {} with the score of {}\".format(\n winner_number,\n max_team_number,\n max_result,\n))\n\"\"\"\nOutput:\nEnter 1 team 1 player result: 20\nEnter 1 team 2 player result: 80\nEnter 1 team 3 player result: 100\nEnter 1 team 4 player result: 100\nTeam 1. The winner is the player 3 with the score of 100\nEnter 2 team 1 player result: 70\nEnter 2 team 2 player result: 40\nEnter 2 team 3 player result: 20\nEnter 2 team 4 player result: 120\nTeam 2. The winner is the player 4 with the score of 120\nEnter 3 team 1 player result: 20\nEnter 3 team 2 player result: 30\nEnter 3 team 3 player result: 40\nEnter 3 team 4 player result: 140\nTeam 3. The winner is the player 4 with the score of 140\nThe best result was shown by player 4 of the team 3 with the score of 140\n\"\"\"","repo_name":"stepanskyvlad/Learning-Python","sub_path":"Loops_and_Conditinal_Execution/for_example.py","file_name":"for_example.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24350723565","text":"import random\nfrom functools import lru_cache\nfrom typing import Dict, Iterable, List, Optional, Tuple, Union\n\nimport unrealsdk # type: ignore\n\n\nclass Materials:\n selected: Optional[\"MaterialInstanceConstant\"] = None\n\n @staticmethod\n @lru_cache(maxsize=1)\n def all_materials(search: str) -> List[str]:\n return [\n m.PathName(m)\n for m in unrealsdk.FindAll(\"MaterialInstanceConstant\")\n if search.lower() in m.PathName(m).lower()\n ]\n\n @staticmethod\n def select(material_name: str) -> bool:\n mat = unrealsdk.FindObject(\"MaterialInstanceConstant\", material_name)\n if mat:\n Materials.selected = MaterialInstanceConstant(mat, material_name)\n return True\n return False\n\n\nclass Texture2D:\n search: str = \"\"\n index: int = 0\n\n backup: unrealsdk.UObject = None\n texture_parameter: str = \"\"\n\n @staticmethod\n @lru_cache(maxsize=1)\n def all_textures(search: str) -> List[str]:\n return [\n t.PathName(t)\n for t in unrealsdk.FindAll(\"Texture2D\")\n if search.lower() in t.PathName(t).lower()\n ]\n\n\nclass MaterialInstanceConstant:\n def __init__(\n self, material_instance_constant: unrealsdk.UObject, path_name: str = \"\"\n ) -> None:\n self.material_instance_constant = material_instance_constant\n self.path_name = path_name or material_instance_constant.PathName(\n material_instance_constant\n )\n self.vector_parameters: Dict[str, Tuple[float, float, float, float]] = {}\n self.scalar_parameters: Dict[str, float] = {}\n self.texture_parameters: Dict[str, str] = {}\n self.update_parameters()\n\n def parents(self) -> List[unrealsdk.UObject]:\n \"\"\"Index -1 is the root parent. Index 0 is the object itself.\"\"\"\n parent: unrealsdk.UObject = self.material_instance_constant\n parents: List[unrealsdk.UObject] = [parent]\n while parent.Parent:\n parent = parent.Parent\n parents.append(parent)\n return parents\n\n def update_vector_parameters(self) -> None:\n parents = self.parents()\n\n for expression in parents[-1].Expressions:\n if (\n expression\n and expression.Class.Name == \"MaterialExpressionVectorParameter\"\n ):\n # Just add any default value for now. Most skins overwrite them anyway.\n self.vector_parameters[expression.ParameterName] = (1.0, 1.0, 1.0, 1.0)\n\n # Walk from root to our material and update all VectorParameters\n for material in parents[::-1]:\n if not material.VectorParameterValues:\n continue\n for param in material.VectorParameterValues:\n p_val = param.ParameterValue\n self.vector_parameters[param.ParameterName] = (\n p_val.R,\n p_val.G,\n p_val.B,\n p_val.A,\n )\n\n def update_scalar_parameters(self) -> None:\n parents = self.parents()\n\n for expression in parents[-1].Expressions:\n if (\n expression\n and expression.Class.Name == \"MaterialExpressionScalarParameter\"\n ):\n # Just add any default value for now. Most skins overwrite them anyway.\n self.scalar_parameters[expression.ParameterName] = 1.0\n\n # Walk from root to our material and update all ScalarParameters\n for material in parents[::-1]:\n if not material.ScalarParameterValues:\n continue\n for param in material.ScalarParameterValues:\n self.scalar_parameters[\n param.ParameterName\n ] = material.GetScalarParameterValue(param.ParameterName, 0)[1]\n\n def update_texture_parameters(self) -> None:\n parents = self.parents()\n\n for expression in parents[-1].Expressions:\n if (\n expression\n and expression.Class.Name\n == \"MaterialExpressionTextureSampleParameter2D\"\n ):\n # Just add any default value for now. Most skins overwrite them anyway.\n self.texture_parameters[expression.ParameterName] = \"\"\n\n # Walk from root to our material and update all TextureParameters\n for material in parents[::-1]:\n if not material.TextureParameterValues:\n continue\n for param in material.TextureParameterValues:\n self.texture_parameters[param.ParameterName] = param.ParameterValue\n\n def update_parameters(self) -> None:\n self.texture_parameters = {}\n self.vector_parameters = {}\n self.scalar_parameters = {}\n self.update_texture_parameters()\n self.update_vector_parameters()\n self.update_scalar_parameters()\n\n def randomize_parameters(\n self, locked_parameters: Optional[Iterable[str]] = None\n ) -> None:\n if locked_parameters is None:\n locked_parameters = []\n\n for parameter_name in self.vector_parameters:\n if parameter_name not in locked_parameters:\n if \"color\" in parameter_name.lower():\n r = random.random() * 2.55\n g = random.random() * 2.55\n b = random.random() * 2.55\n a = random.random() * 2.55\n else:\n r = random.random() * 20\n g = random.random() * 20\n b = random.random() * 20\n a = random.random() * 20\n self.set_vector_parameter_value(parameter_name, (r, g, b, a))\n\n for parameter_name in self.scalar_parameters:\n if parameter_name not in locked_parameters:\n self.set_scalar_parameter_value(parameter_name, random.random() * 10)\n\n for parameter_name in self.texture_parameters:\n if parameter_name not in locked_parameters:\n textures = Texture2D.all_textures(\"\")\n if not textures:\n return\n texture = random.choice(textures)\n self.set_texture_parameter_value(parameter_name, texture)\n\n self.update_parameters()\n\n def set_texture_parameter_value(\n self, parameter_name: str, parameter_value: Union[str, unrealsdk.UObject]\n ) -> None:\n if not Materials.selected:\n return\n if isinstance(parameter_value, str):\n parameter_value = unrealsdk.FindObject(\"Texture2D\", parameter_value)\n Materials.selected.material_instance_constant.SetTextureParameterValue(\n parameter_name, parameter_value\n )\n\n def get_texture_parameter_value(self, parameter_name: str) -> unrealsdk.UObject:\n if not Materials.selected:\n return\n return Materials.selected.material_instance_constant.GetTextureParameterValue(\n parameter_name\n )[1]\n\n def set_vector_parameter_value(\n self, parameter_name: str, parameter_value: Tuple[float, float, float, float]\n ) -> None:\n if not Materials.selected:\n return\n Materials.selected.material_instance_constant.SetVectorParameterValue(\n parameter_name, parameter_value\n )\n\n def set_scalar_parameter_value(\n self, parameter_name: str, parameter_value: float\n ) -> None:\n if not Materials.selected:\n return\n Materials.selected.material_instance_constant.SetScalarParameterValue(\n parameter_name, parameter_value\n )\n","repo_name":"juso40/bl2sdk_Mods","sub_path":"mateditor/materials.py","file_name":"materials.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"61"} +{"seq_id":"75128683715","text":"# 8.Tumhare pass four employes ki details hai list mai:-\n\n# emp1=[\"neelam\",\"programer\",\"24\",\"2400\"]\n# emp2=[\"komal\",\"trainer\",\"24\",\"20000\"]\n# emp3=[\"anuradha\",\"HR\",\"25\",\"40000\"]\n# emp4=[\"Abhishek\",\"manager\",\"29\",\"63000\"]\n\n# # Visualize\n\n# ab aapko 4 dictionaries create karni hai jaise ki emp1 emp2 emp3 and emp4.\n# har ek employee ka dictionary main name,designation,age and salary honi chahiye.\n# aur ye sab dictionary ki keys hai jismai maine list main value di hai. Iska use kar ke aapko ek json file create karni hai? Jaise ki niche diya hai.\n\n# Output:-\n\n# { \n# \"emp1\":{ \"name\":\"nilam\",\n# \"Designation\":\"programmer\",\n# \"Age\":\"34\",\n# \"salary\":\"24000\",\n# }\n\n# \"emp2\":\n# { \"name\":\"komal\",\n# \"Designation\":\"Trainee\",\n# \"Age\":\"24\",\n# \"salary\":\"20000\" ,\n# }\n\n \n# \"emp3\":\n# { \"name\":\"anuradha\",\n# \"Designation\":\"HR\",\n# \"Age\":\"25\",\n# \"salary\":\"40000\",\n# }\n\n\n# \"emp4\":\n# { \"name\":\"Abhishek\",\n# \"Designation\":\"Manager\",\n# \"Age\":\"29\",\n# }\n# }\n\n# emp1={\"neelam\",\"programer\",\"24\",\"2400\"}\n\nimport json \nkeys=[\"name\",\"Degignation\",\"age\",\"salary\"]\na=[\"nelam\",\"programer\",\"24\",\"24000\"]\nemp1={}\nemp2={}\nemp3={}\nemp4={}\ndict={}\nfor i in range(len(keys)):\n emp1[keys[i]]=a[i]\n dict[\"employee1\"]=emp1\n b=[\"komal\",\"trainer\",\"24\",\"20000\"]\n for i in range(len(keys)):\n emp2[keys[i]]=b[i]\n dict[\"employee2\"]=emp2\n c=[\"anuradha\",\"HR\",\"25\",\"40000\"] \n for i in range(len(keys)):\n emp3[keys[i]]=c[i]\n dict[\"employee3\"]=emp3\n d=[\"Abhishek\",\"manager\",\"29\",\"63000\"]\n for i in range(len(keys)):\n emp4[keys[i]]=d[i]\n dict[\"employee4\"]=emp4\n out_file=open(\"file.json\",\"w\")\n json.dump(dict,out_file,indent=6)\nout_file.close()","repo_name":"alimausmani/Json","sub_path":"question8.py","file_name":"question8.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24633738523","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport cgi\r\nimport re\r\nimport sys\r\nimport pickle\r\nfrom decimal import *\r\n\r\ndic={}\r\ncost=0\r\nbase=\"Content-type: text/html; charset=UTF-8\\r\\n\"\r\noutput_error=\"ERROR\\n\"\r\n\r\nprint ('Content-type: text/html; charset=UTF-8')\r\nprint(\"\\r\")\r\n\r\nre_int = re.compile(r'\\d+$')\r\nre_deci = re.compile(r'\\d+\\.\\d+$')\r\nre_just = re.compile(r'\\d+\\.0+$')\r\n\r\ndef check_pat(pat,num):\r\n if(not pat.match(str(num))):\r\n print(\"ERROR\")\r\n sys.exit()\r\n return\r\n\r\n\r\ndef check_key(dict,s):\r\n if s not in dict:\r\n dict[s]=0\r\n return\r\ndef add(dict,name,value):\r\n check_pat(re_int,value)\r\n check_key(dict,name)\r\n dict[name]+=int(value)\r\n return\r\n\r\ndef deleteall(dict):\r\n dic.clear()\r\n global cost\r\n cost=0\r\n return\r\ndef show_list(dict,s):\r\n print(str(s)+\": \"+str(dict[s]))\r\n return\r\n\r\ndef show_list_all(dict):\r\n for i in sorted(dict, key=lambda i:i[0]):\r\n if(int(dict[i]) > 0):\r\n print(str(i)+\": \"+str(dict[i]))\r\n return\r\n\r\ndef buy(dict,name,amount):\r\n check_pat(re_int,amount)\r\n if(int(dict[name])= 0:\n if board.board[row1][col] != 0:\n if board.board[row1][col].color != self.color:\n valid_moves.append((row1, col))\n break\n else:\n break\n else:\n valid_moves.append((row1, col))\n row1 -= 1\n\n col1 = col + 1\n while col1 < 8:\n if board.board[row][col1] != 0:\n if board.board[row][col1].color != self.color:\n valid_moves.append((row, col1))\n break\n else:\n break\n else:\n valid_moves.append((row, col1))\n col1 += 1\n\n col1 = col - 1\n while col1 >= 0:\n if board.board[row][col1] != 0:\n if board.board[row][col1].color != self.color:\n valid_moves.append((row, col1))\n break\n else:\n break\n else:\n valid_moves.append((row, col1))\n col1 -= 1\n \n return valid_moves","repo_name":"trupewate/ChessGame","sub_path":"Pieces/rook.py","file_name":"rook.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14888567921","text":"#!/usr/bin/python\nimport sys\nimport matplotlib.pyplot as plt\ncolor=None\nmarker='o'\nlabel=''\nsize=36\nall_x=[]\nall_y=[]\nx=[]\ny=[]\ncolor_list=[\"r\", \"g\", \"b\", \"c\", \"m\", \"y\", \"k\", \"w\"]\nmarker_list=[\"o\",\"+\",\"*\",\".\",\"x\",\"s\",\"d\",\"^\",\"v\",\">\",\"<\",\"p\",\"h\"]\n\nlines=[i.strip() for i in sys.stdin.readlines()]\n\nfrom pprint import pprint\n\npprint(lines)\n\nfor line in lines:\n vals = line.split(' ');\n \n if len(vals)==2:\n if vals[0]=='marker':\n if vals[1] in marker_list:\n marker=vals[1]\n elif vals[0]=='color':\n if vals[1] in color_list:\n color=vals[1]\n else:\n v0=float(vals[0])\n v1=float(vals[1])\n x.append(v0)\n y.append(v1)\n all_x.append(v0)\n all_y.append(v1)\n elif vals[0]=='$':\n plt.scatter(x,y,s=size,c=color,marker=marker)\n x=[]\n y=[]\n\"\"\"\nif len(all_x)>0 :\n x1=float(min(all_x))\n x2=float(max(all_x))\n y1=float(min(all_y))\n y2=float(max(all_y))\n w=(x2-x1)*0.5\n h=(y2-y1)*0.5\n \n plt.xlim(x1-w,x2+w)\n plt.ylim(y1-h,y2+h)\n\"\"\"\n\nplt.show()\n\n\n","repo_name":"fxborg/hexgrid-with-spiral-honeycomb-index","sub_path":"bin/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74971935553","text":"\nimport cv2\nimport os\nimport numpy as np\nfrom PIL import Image\n\ndef down_sample(images, scale=32):\n new_images = []\n for image in images:\n h, w, c = image.shape\n res = cv2.resize(image, dsize=(w//scale, h//scale), interpolation=cv2.INTER_CUBIC)\n new_images.append(res)\n\n return new_images\n\ndef load_images(root):\n images = []\n img_paths = []\n for path, subdirs, files in os.walk(root):\n for name in files:\n if name[-3:] not in [\"JPG\", \"jpg\", \"png\", \"PNG\"] :\t\n continue\n img_path = os.path.join(path, name)\n img_paths.append(img_path)\n\n img_paths.sort()\n for img_path in img_paths:\n img = np.asarray(Image.open(img_path))\n images.append(img)\n\n return images","repo_name":"IHo-Chiu/VFX","sub_path":"HW2/code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18611675813","text":"from api.utils import get_price, get_subtotal\nfrom api.models import Product\n\nfrom .settings import DEFAULT_PRICE\n\n\ndef test_get_price():\n product_ids = []\n for index in range(2):\n id = index + 1\n product = Product(id=id)\n product_ids.append(product.id)\n prices = {id: id * DEFAULT_PRICE for id in product_ids}\n assert prices == get_price(product_ids)\n\n\ndef test_get_subtotal():\n product_id_1 = 1\n product_id_2 = 2\n qty_1 = 3\n qty_2 = 2\n cart = {\n 'cart_items': [\n {\n 'product_id': product_id_1,\n 'qty': qty_1\n },\n {\n 'product_id': product_id_2,\n 'qty': qty_2\n },\n ]\n }\n assert sum([DEFAULT_PRICE * product_id_1 * qty_1, DEFAULT_PRICE * product_id_2 * qty_2]) == get_subtotal(cart)['subtotal']\n","repo_name":"NURSYAHRI/TESTING_QA","sub_path":"test/test_functions.py","file_name":"test_functions.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74057068995","text":"import os\nimport random\nimport csv\nimport time\nfrom tqdm import tqdm\nimport json\n\nrandom.seed(201)\n\ndef myFunc(e):\n return int((e.split(\"/\")[-1]).split(\".\")[0])\n\ndef read_image(folder,img_folder):\n img_list = []\n for root, dirs, files in os.walk(img_folder):\n for file_ in files:\n if(file_.endswith(\".csv\")):\n with open(os.path.join(root,file_), newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n for row in csv_reader:\n temp = row[0].split(\"/\")\n if(\"(1)\" not in temp[-1] and os.path.exists(os.path.join(root, temp[-1]+\".png\")) ):\n img_list.append(os.path.join(root, temp[-1]+\".png\"))\n img_list.sort(key=myFunc)\n return img_list\n\ndef read_precipitation(folder,exp_num, end_threshold):\n end_point = 0\n precipitation_dict={}\n with open(\"./\"+folder+\"/\"+str(exp_num)+'/label.csv', newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n for row in csv_reader:\n temp = row[0].split(\"/\")\n mod_path = str(exp_num)+\"/\"+temp[-1]+\".png\"\n precipitation_dict[mod_path] = float(row[-1])\n if(float(row[-1]) <= end_threshold):\n end_point=int(row[0].split(\"/\")[-1])\n csvfile.close()\n return precipitation_dict,end_point\n\ndef read_image_for_carla(img_folder):\n exp_list = []\n for root, dirs, files in os.walk(img_folder):\n for i in dirs:\n if i not in exp_list:\n exp_list.append(i)\n return exp_list\n\ndef check_carla_ood(exp_folder,memorization_object,initial_memory_threshold, window_size,window_thres,detect_threshold,prob_threshold,task):\n exp_list = read_image_for_carla(exp_folder)\n total=0\n total_detect = 0\n ood_epi = 0\n total_delay = []\n detect_res_list =[]\n detect_frame_list =[]\n threshold_list={\"oods_bike\":20,\"oods_foggy\":0,\"oods_night\":10,\"out_foggy\":0,\"out_night\":0,\"out_rainy\":0,\"out_snowy\":0,\"out_replay\":8,\"in\":200}\n\n ood_window = 0\n num_window = 0\n gt_ood_window = 0\n\n result=[]\n gt_result = []\n\n\n for exp_num in tqdm(exp_list):\n window = []\n total_exp_time = []\n episode = False\n window_delay = 0\n #mapping img to the memory\n img_list = read_image(exp_folder,\"./\"+exp_folder+\"/\"+str(exp_num))\n\n for img_path in img_list:\n \n\n current_frame = int(img_path.split(\"/\")[-1][:-4])\n total += 1\n key = img_path.split(\"/\")[-2]+\"/\"+img_path.split(\"/\")[-1]\n start_ = time.time()\n nearest_memory, matched_set, prob_density, exp_time_ = memorization_object.find_match(img_path,initial_memory_threshold)\n\n \n\n if (len(window) == window_size):\n window.pop(0)\n if (prob_density < prob_threshold):\n window.append(0)\n else:\n window.append(1)\n \n \n else:\n if (prob_density < prob_threshold):\n window.append(0)\n else:\n window.append(1)\n\n if (len(window) == window_size):\n if task == 'in':\n gt_result.append(1)\n else:\n gt_result.append(0)\n num_window += 1\n total_win = window.count(0)\n if (total_win >= window_thres) and (episode == False):\n episode = True\n total_detect += 1\n detect_frame_list.append(current_frame-15)\n if (total_win >= window_thres):\n ood_window += 1\n result.append(0)\n else:\n result.append(1)\n\n #test ood episode for only\n total_exp_time.append(round((time.time()-start_)*1000,2)) \n #if(episode):\n # break\n \n detect_res_list.append(episode)\n total_delay.append(window_delay)\n #print(exp_num,episode)\n if (episode):\n ood_epi += 1\n \n with open('test_'+task+'.json', 'w') as f:\n json.dump({'gt':gt_result, 'pred':result}, f)\n f.close()\n\n print(ood_window, num_window, ood_window/num_window)\n results_stat = {}\n \n results_stat[\"detection_rate\"] = round(ood_epi/len(exp_list),3)\n results_stat[\"ood_episode\"] = ood_epi\n results_stat[\"total_episode\"] = len(exp_list)\n results_stat[\"detect_frame_list\"] = detect_frame_list\n results_stat[\"detect_res_list\"] = detect_res_list\n new_frame=[]\n for m in results_stat[\"detect_frame_list\"]:\n if m > threshold_list[task]:\n new_frame.append(m-threshold_list[task])\n else:\n new_frame.append(0)\n results_stat[\"window_list\"] = new_frame\n if (len(results_stat[\"detect_frame_list\"]) > 0):\n results_stat[\"average_window_delay\"] = round(sum(results_stat[\"window_list\"])/len(results_stat[\"detect_frame_list\"]),2)\n else:\n results_stat[\"average_window_delay\"] = None\n\n return results_stat\n\ndef check_carla_heavy_rain_ood(exp_folder,memorization_object,initial_memory_threshold, window_size,window_thres,detect_threshold,prob_threshold):\n exp_list = read_image_for_carla(exp_folder)\n frame_diff = 0\n total_prep = 0\n total=0\n total_detect = 0\n ood_epi = 0\n total_delay = []\n evaluate_time_list =[]\n total_evaluate_time_list =[]\n detect_pre_list =[]\n detect_frame_list =[]\n\n ood_window = 0\n num_window = 0\n gt_ood_window = 0\n\n for exp_num in tqdm(exp_list):\n window = []\n exp_time = [] \n total_exp_time = []\n episode = False\n window_delay = 0\n\n #mapping img to the memory\n \n precipitation_dict,end_point=read_precipitation(exp_folder,exp_num,detect_threshold)\n img_list = read_image(exp_folder,\"./\"+exp_folder+\"/\"+str(exp_num))\n\n for img_path in img_list:\n num_window += 1\n print(img_path)\n current_frame = int(img_path.split(\"/\")[-1][:-4])\n total += 1\n key = img_path.split(\"/\")[-2]+\"/\"+img_path.split(\"/\")[-1]\n start_ = time.time()\n nearest_memory, matched_set, prob_density, exp_time_ = memorization_object.find_match(img_path,initial_memory_threshold)\n \n exp_time.append(round((exp_time_)*1000,5))\n if (len(window) >= window_size):\n window.pop(0)\n if (prob_density < prob_threshold):\n window.append(0)\n else:\n window.append(1)\n total_win = window.count(0)\n if (total_win >= window_thres):\n ood_window += 1\n #if (total_win >= window_thres and episode == False):\n # episode = True\n # total_detect += 1\n # detect_pre = int(precipitation_dict[key])\n # detect_pre_list.append(detect_pre)\n # detect_frame_list.append(current_frame)\n # total_prep += detect_pre\n \n else:\n if (prob_density < prob_threshold):\n window.append(0)\n else:\n window.append(1)\n\n #test ood episode for only\n total_exp_time.append(round((time.time()-start_)*1000,2)) \n if (current_frame - end_point > 0):\n gt_ood_window += 1\n if(current_frame - end_point > 0 and episode == False):\n window_delay += 1\n\n if(current_frame - end_point > 1 and episode == True):\n frame_diff += window_delay\n #break\n evaluate_time_list.append(exp_time)\n total_evaluate_time_list.append(total_exp_time)\n total_delay.append(window_delay)\n if (episode):\n ood_epi += 1\n\n print(ood_window, num_window, ood_window/num_window)\n\n results_stat = {}\n\n results_stat[\"detection_rate\"] = 100*round(ood_epi/len(exp_list),3)\n results_stat[\"ood_episode\"] = ood_epi\n results_stat[\"total_episode\"] = len(exp_list)\n results_stat[\"detect_frame_list\"] = detect_frame_list\n \n if total_detect > 0:\n results_stat[\"average_window_delay\"] = round(frame_diff/total_detect,2)\n else: \n results_stat[\"average_window_delay\"] = 0\n\n\n total_time = 0\n length = 0\n #select 3 random traces\n select = random.sample(range(0, len(evaluate_time_list)), 3)\n for m in select:\n total_time += sum(evaluate_time_list[m]) \n length += len(evaluate_time_list[m])\n results_stat[\"average_evaluate_time\"] = round(total_time/length,2)\n\n \n\n return results_stat\n\n","repo_name":"kaustubhsridhar/time-series-OOD","sub_path":"Related-Work/Memories/crash_prediction/predict_carla.py","file_name":"predict_carla.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"25145879494","text":"import numpy as np\nfrom time import sleep\n\n\n#\n# Does the work for the 'explore' mode\n# As long as there is a path ahead it will try to move on that path\n# with a slight bias towards the left side; this is a simple way of\n# avoiding going in circles and exploring the whole map\n# if too litle navigation path is available it will go in 'stop' mode\n# if a rock is detected it will stitch to 'collect' mode\n#\ndef explore_mode(Rover):\n\t# check if there are any samples to pick and they are on the left side\n\t# if they are on the right side they will be picked up when we come back\n\tif len(Rover.samp_angles) > 3:\n\t\tRover.throttle = 0\n\t\tRover.brake = 1 \t# slight brake\n\t\t# we need to store the positon before pickup otherwise there is a\n\t\t# very high risk that after pickup we will not be able to continue on\n\t\t# the same path\n\t\tRover.save_yaw = Rover.yaw\n\t\tRover.steer = np.clip(np.mean(Rover.samp_angles) * 180/np.pi, -15, 15)\n\t\tRover.mode = 'collect'\n\n\telif Rover.samples_collected > 5 and Rover.perc_mapped > 0.95:\n\t\tRover.mode = 'return'\n\t\t\n\t# Check the extent of navigable terrain\n\telif len(Rover.nav_angles) >= Rover.stop_forward:\n\t\t# If mode is forward, navigable terrain looks good\n\t\t# and velocity is below max, then throttle\n\t\tif Rover.vel < Rover.max_vel:\n\t\t\tRover.throttle = Rover.throttle_set\n\t\telse: # Else coast\n\t\t\tRover.throttle = 0\n\t\tRover.brake = 0\n\t\t# Set steering to average angle clipped to the range +/- 15\n\t\tnav_mean = np.mean(Rover.nav_angles)\n\t\tnav_std = np.std(Rover.nav_angles)\n\t\tsteer = nav_mean + nav_std / 2.25\t # slight bias to drive on the left side\n\t\tsteer = steer * 0.6\t\t\t\t # acts like a P factor (PID); reduces overshooting\n\t\tRover.steer = np.clip(steer * 180/np.pi, -15, 15)\n\t# If there's a lack of navigable terrain pixels then go to 'stop' mode\n\telif len(Rover.nav_angles) < Rover.stop_forward:\n\t\t\t# Set mode to \"stop\" and hit the brakes!\n\t\t\tRover.throttle = 0\n\t\t\t# Set brake to stored brake value\n\t\t\tRover.brake = Rover.brake_set\n\t\t\tRover.steer = 0\n\t\t\tRover.mode = 'stop'\n\t# make sure we return the updated Rover\n\treturn Rover\n\n\n#\n# deals with the 'stop' mode\n# means we have an obstacle and we need to avoid it\n#\ndef stop_mode(Rover):\n\t# If we're in stop mode but still moving keep braking\n\tif Rover.vel > 0.2:\n\t\tRover.throttle = 0\n\t\tRover.brake = Rover.brake_set\n\t\tRover.steer = 0\n\t# If we're not moving (vel < 0.2) then do something else\n\telif Rover.vel <= 0.2:\n\t\t# Now we're stopped and we have vision data to see if there's a path forward\n\t\tif len(Rover.nav_angles) < Rover.go_forward:\n\t\t\tRover.throttle = 0\n\t\t\tRover.brake = 0\t\t\t# Release the brake to allow turning\n\t\t\tRover.steer = -15\t # we trun to the left to keep aligned with\n\t\t\t\t\t\t\t\t\t# the fact that we drive on the left side\n\t\t# If we're stopped but see sufficient navigable terrain in front then go!\n\t\tif len(Rover.nav_angles) >= Rover.go_forward:\n\t\t\tRover.throttle = Rover.throttle_set # Set throttle back to stored value\n\t\t\tRover.brake = 0\t\t\t\t\t\t# Release the brake\n\t\t\t# Set steer to mean angle\n\t\t\tRover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)\n\t\t\tRover.mode = 'explore'\n\t# make sure we return the updated Rover\n\treturn Rover\n\n\ndef collect_mode(Rover):\n\t# near sample?\n\tif Rover.near_sample > 0:\n\t\tRover.brake = 5\n\t\tRover.throttle = 0\n\t\tRover.send_pickup = True\n\t\tRover.mode = 'pick'\n\n\t# not yet; move closer\n\telse:\n\t\tRover.brake = 0\n\t\tif Rover.vel < Rover.max_vel / 2.0:\t\t\t# go slower\n\t\t\tRover.throttle = Rover.throttle_set\n\t\telse: # Else coast\n\t\t\tRover.throttle = 0\n\t\tif len(Rover.samp_angles) > 0:\n\t\t\tmean_dir = np.mean(Rover.samp_angles)\n\t\t\tsteer_dir = mean_dir * 180 / np.pi\n\t\t\tRover.steer = np.clip(steer_dir, -15, 15) \n\t# make sure we return the updated Rover\n\treturn Rover\n\n\n#\n\ndef done_pick_mode(Rover):\n\t# we finished picking and we need to reorient the rover in the direction\n\t# we were initially\n\tRover.throttle = 0\n\tRover.brake = 0\n\t#print('save: %4.2f; actual: %4.2f; comm: %4.2f' % (Rover.save_yaw, Rover.yaw, Rover.save_yaw - Rover.yaw))\n\tif abs(Rover.save_yaw - Rover.yaw) > 2.0 :\t\n\t\tRover.steer = np.clip(Rover.save_yaw - Rover.yaw, -15, 15)\n\telse:\n\t\tRover.steer = 0 \n\t\tRover.mode = 'explore'\n # make sure we return the updated Rover\n\treturn Rover\n\n#\n# returns to center\n# just cruise until close enough to the map start\n#\ndef return_mode(Rover):\n\t# calculates distance from start\n\tdx = Rover.pos[0] - Rover.return_pos[0]\n\tdy = Rover.pos[1] - Rover.return_pos[1]\n\tdist = np.sqrt(dx**2 + dy**2)\n\tif dist < 10:\n\t\t# we are close enough\n\t\tRover.mode = 'finish'\n\t\t\n\telif len(Rover.nav_angles) >= Rover.stop_forward:\n\t\t# navigate forward - will get them eventually\n\t\tif Rover.vel < Rover.max_vel:\n\t\t\tRover.throttle = Rover.throttle_set\n\t\telse: # Else coast\n\t\t\tRover.throttle = 0\n\t\tRover.brake = 0\n\t\t# Set steering to average angle clipped to the range +/- 15\n\t\tnav_mean = np.mean(Rover.nav_angles)\n\t\tsteer = nav_mean * 0.6\t\t\t\t # acts like a P factor (PID); reduces overshooting\n\t\tRover.steer = np.clip(steer * 180/np.pi, -15, 15)\n\t# If there's a lack of navigable terrain pixels then go to 'stop' mode\n\telif len(Rover.nav_angles) < Rover.stop_forward:\n\t\t\t# Set mode to \"stop\" and hit the brakes!\n\t\t\tRover.throttle = 0\n\t\t\t# Set brake to stored brake value\n\t\t\tRover.brake = Rover.brake_set\n\t\t\tRover.steer = 0\n\t\t\tRover.mode = 'stop'\n\t# make sure we return the updated Rover\n\treturn Rover\n\n\n\n# This is where you can build a decision tree for determining throttle, brake and steer\n# commands based on the output of the perception_step() function\ndef decision_step(Rover):\n\n\t# Implement conditionals to decide what to do given perception data\n\t# Here you're all set up with some basic functionality but you'll need to\n\t# improve on this decision tree to do a good job of navigating autonomously!\n\n\tif Rover.nav_angles is None:\n\t\tRover.throttle = Rover.throttle_set\n\t\tRover.steer = 0\n\t\tRover.brake = 0\n\n\telse:\n\t\t# Check for Rover.mode status\n\t\tif Rover.mode == 'start':\n\t\t\t# save start position so we can return\n\t\t\tRover.return_pos = Rover.pos\n\t\t\tRover.mode = 'explore'\n\n\t\telif Rover.mode == 'explore':\n\t\t\tRover = explore_mode(Rover)\n\n\t\telif Rover.mode == 'stop':\n\t\t\tRover = stop_mode(Rover)\n\n\t\telif Rover.mode == 'collect':\n\t\t\tRover = collect_mode(Rover)\n\n\t\telif Rover.mode == 'pick':\n\t\t\tsleep(10)\t\t\t # we need this to deal with some dalays in operation\n\t\t\tRover.mode = 'done_pick' # finished picking\n\t\t\t# otherwise just wait for the pickup to complete\n\n\t\telif Rover.mode == 'done_pick':\n\t\t\tRover = done_pick_mode(Rover)\n\n\t\telif Rover.mode == 'return':\n\t\t\tRover = return_mode(Rover)\n\n\t\telif Rover.mode == 'finish':\n\t\t\t# we're done; stop the rover\n\t\t\tRover.brake = 5\n\t\t\tRover.thorottle = 0\n\n\n\t# If in a state where want to pickup a rock send pickup command\n\t#if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:\n\t#\t Rover.send_pickup = True\n\n\treturn Rover\t# Implement conditionals to decide what to do given perception data\n","repo_name":"sonelu/rsend","sub_path":"Term 1/Project 1 - Search and Sample Return/submission/decision.py","file_name":"decision.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13307700659","text":"import sqlite3\nimport json\n\ndb = \"./stonkdb.db\"\nconn = sqlite3.connect(db)\nc = conn.cursor()\nuserid = '297924849639227393'\nusername = \"stuff\"\nticker = \"$JAM\"\nshares = 10\nc.execute(\"SELECT * from 'Users' WHERE UserID = '\" + userid + \"'\")\ncol_names = [cn[0] for cn in c.description]\ncol_names = col_names[3:]\nprint(col_names)\ninfo = c.fetchone()\nprint(info)\nstonks = info[3:]\nprint(stonks)\nholdqty = \"\"\nholdticker = \"\"\nnetworth = int(info[2])\ncash = str(info[2])\nstonks = info[3:]\nfor idx,values in enumerate(stonks):\n if values > 0:\n holdqty = str(holdqty) + str(values) + \"\\n\"\n holdticker = str(holdticker) + \"$\" + str(col_names[idx]) + \"\\n\"\n c.execute(\"SELECT Price FROM Stonks WHERE ticker = '$\" + col_names[idx] + \"'\")\n stonkvalue = c.fetchone()\n print(col_names[idx], values)\n iterstonkvalue = stonkvalue[0] * values\n print(str(col_names[idx])+\" worth $\"+str(iterstonkvalue))\n networth = networth + iterstonkvalue\n print(\"$\"+str(networth))\n# stonks = info[3:64]\n# userstonklist = \"\"\n# for values in stonks:\n# if values[2] > 0:\n# userstonklist = str(userstonklist) + values\n# print(userstonklist)","repo_name":"ReonMonterus/CBRStonks","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13579358250","text":"import os\nimport re\nimport sys\nimport json\nimport shutil\nimport sqlite3\nimport hashlib\nimport zipfile\nimport argparse\nimport requests\nimport subprocess\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\n\n# Argument parser stuff\nparser = argparse.ArgumentParser()\nparser.add_argument(\"device\", help=\"import device scheme from github repo via codename\")\nparser.add_argument(\"version\", help=\"choose miui version for generate firmware zip\")\nparser.add_argument(\"--output\", help=\"set output location\")\nparser.add_argument(\"--file\", help=\"import device scheme from storage\", action=\"store_true\")\nparser.add_argument(\"--skip-miui-release-check\", help=\"skip miui release check\", action=\"store_true\")\nargs = parser.parse_args()\n\n# Check miui version is available\nversions = [\"global-stable\", \"global-dev\", \"china-stable\", \"china-dev\"]\nif not args.version in versions:\n print(\"Please input an available miui version.\")\n sys.stdout.write(\"=> \")\n curr = 0\n for ver in versions:\n curr+=1\n if curr < len(versions):\n sys.stdout.write(ver + \", \")\n else:\n print(ver)\n sys.exit(1)\n\n# If exists local device.json file, use it. Or fetch from GitHub.\nif args.file:\n with open(args.device, 'r') as device_data_file:\n ddata = json.load(device_data_file)\nelse:\n ddata = json.loads(requests.get(\"https://raw.githubusercontent.com/mifirmware/devices/master/%s.json\" % args.device).text)\n\nprint(\"Current device: %s (%s) | %s\" % (ddata['name'], ddata['codename'], args.version))\n\n# Parse miui download page\npage = requests.get(\"http://en.miui.com/download-\" + ddata['id'] + \".html\").text\nsoup = BeautifulSoup(page, 'html.parser')\n\nfor line in soup.find(id=ddata['content_id'][args.version.split('-')[0]]).find_all('a', class_='btn_5'):\n # Define miui download url\n zip_url = line['href']\n # Split miui release and miui zip name\n zip_url_split = list(filter(None, urlparse(zip_url).path.split('/')))\n # Define miui release\n miui_release = zip_url_split[0]\n # Choose dev or stable link according to option\n if re.match(\"[0-9].[0-9].[0-9]\", miui_release) and \"dev\" in args.version:\n break\n elif \"stable\" in args.version:\n break\n\n# If not defined zip url, terminate\nif not 'zip_url' in globals():\n print(\"Not found any URL address\")\n print(\"Process is terminating now..\")\n sys.exit(1)\n\nprint(\"Here is, miui zip url: %s\" % zip_url)\n\n# Create (if not exists) device db & table for caching last release\ncachedb = sqlite3.connect('cache.db')\ncursor = cachedb.cursor()\ncursor.execute(\"CREATE TABLE IF NOT EXISTS devices (codename TEXT, version TEXT, last_miui_release REAL)\")\ncursor.execute(\"INSERT INTO devices(codename, version, last_miui_release) SELECT ?, ?, '0.0.0' WHERE NOT EXISTS(SELECT * FROM devices WHERE codename=? and version=?);\", [ddata['codename'], args.version, ddata['codename'], args.version])\ncachedb.commit()\n\n# If not defined skip miui release argument, compare last miui release & miui release\nif not args.skip_miui_release_check:\n cursor.execute(\"SELECT * FROM devices WHERE codename=? and version=?\", [ddata['codename'], args.version])\n last_miui_release = cursor.fetchone()[2]\n \n if miui_release <= last_miui_release:\n print(\"Nope, not have any new release. Try again later or skip miui release check.\")\n print(\"Process is terminating..\")\n sys.exit(0)\n \n print(\"Found a new miui release!: %s > %s\" % (miui_release, last_miui_release))\n\n# If not exists folder, create it\nif not os.path.exists(miui_release):\n os.makedirs(miui_release)\n\nzip_location = miui_release + \"/\" + zip_url_split[1]\n\n# Fetch miui zip\nif not os.path.isfile(zip_location):\n print(\"Downloading: %s\" % zip_url_split[1])\n with urllib.request.urlopen(zip_url) as response, open(zip_location, 'wb') as outf:\n shutil.copyfileobj(response, outf)\n\n# Test miui zip\nwith zipfile.ZipFile(zip_location) as zip_file:\n zip_stat = zip_file.testzip()\n\nif zip_stat is not None:\n print(\"Zip file is broken: %s\" % zip_stat)\n sys.exit(1)\n\nout = (miui_release + \"/\") if not args.output else args.output\n\n# Create firmware zip\nsubprocess.check_call(\"xiaomi-flashable-firmware-creator/create_flashable_firmware.sh %s %s\" % (zip_location, out), shell=True)\nos.remove(zip_location)\n\n# Generate checksum\nhash_sha256 = hashlib.sha256()\nhash_md5 = hashlib.md5()\nwith open(out, 'rb') as outfile:\n for chunk in iter(lambda: f.read(4096), b''):\n hash_sha256.update(chunk)\n hash_md5.update(chunk)\nhash_sha256.hexdigest()\nhash_md5.hexdigest()\n\nprint(\"Created flashable firmware for %s.\" % ddata['codename'])\nprint(\"SHA256: %s\" % hash_sha256)\nprint(\"MD5: %s\" % hash_md5)\n\n# If not defined skip miui release argument, commit last miui version\nif not args.skip_miui_release_check:\n cursor.execute(\"UPDATE devices SET last_miui_release=? WHERE codename=? and version=?\", [miui_release, ddata['codename'], args.version])\n cachedb.commit()\n\n# Finally close cache db\ncachedb.close()\n","repo_name":"mifirmware/mifirmware","sub_path":"maker.py","file_name":"maker.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19302631961","text":"import numpy as np\nimport numpy.random as rn\nimport pandas as pd\n\nimport torch\nimport pyro\n\nimport sys\nfrom path import Path\nfrom argparse import ArgumentParser\n\nfrom bayesian_factor_model import PPCA, GAP, run_NUTS_with_mask\nfrom deconfound_and_plot import get_counterfactual_from_best_reg\n\n\ndef main(args=None):\n p = ArgumentParser()\n p.add_argument('-d', '--data', type=Path, required=True)\n p.add_argument('-m', '--mask', type=Path, default=None)\n p.add_argument('-o', '--out', type=Path, default=None)\n p.add_argument('--model', type=str, default='GAP', choices=['GAP', 'PPCA'])\n p.add_argument('--model_seed', type=int, default=None)\n p.add_argument('-k', '--latent_dim', type=int, default=10)\n p.add_argument('--reg_type', type=str, default='Ridge', choices=['Ridge', 'Lasso', 'MLPRegressor'])\n\n if args is None:\n args = sys.argv[1:]\n args = p.parse_args(args)\n\n # create and set random seeds\n seed = args.model_seed\n if seed is None:\n seed = np.random.randint(0, 1000)\n rn.seed(seed)\n pyro.set_rng_seed(seed)\n torch.manual_seed(seed)\n\n # create output directory\n out_dir = args.out\n if out_dir is None:\n out_dir = args.data.parent\n out_dir = out_dir.joinpath('results', args.model, f'latent_dim{args.latent_dim}', f'model_seed_{seed}')\n out_dir.makedirs_p()\n\n # load data\n train_pivot = pd.read_csv(args.data, index_col=0)\n train_data = torch.tensor(train_pivot.values)\n\n # load mask\n mask = args.mask\n if mask is not None:\n mask = torch.load(args.mask)\n\n if not out_dir.joinpath('posterior_samples.npz').exists():\n # setup model\n if args.model == 'GAP':\n model = GAP(latent_dim=args.latent_dim, shp=1.0, rte=1.0)\n \n elif args.model == 'PPCA':\n model = PPCA(latent_dim=args.latent_dim, variance_support=(0, 10))\n\n # run MCMC\n posterior_samples = run_NUTS_with_mask(model=model.model, \n data=train_data,\n mask=mask,\n warmup_steps=1000,\n num_samples=2000)\n \n # covert to numpy\n posterior_samples = {k: v.numpy() for k, v in posterior_samples.items()}\n\n # save using np.savez_compressed\n np.savez_compressed(out_dir.joinpath('posterior_samples.npz'), **posterior_samples)\n print(out_dir.joinpath('posterior_samples.npz'))\n else:\n posterior_samples = np.load(out_dir.joinpath('posterior_samples.npz'))\n \n # load test data\n test_pivot = pd.read_csv(args.data.parent.joinpath('test_pivot.csv'), index_col=0)\n total_pivot = pd.concat([train_pivot, test_pivot], axis=0)\n intervention_t = train_pivot.values.shape[0]\n\n # compute counterfactuals for every posterior sample using Ridge regression\n Z_samples = posterior_samples['Z']\n func = lambda z: get_counterfactual_from_best_reg(z,\n total_pivot=total_pivot, \n intervention_t=intervention_t, \n reg_type=args.reg_type,\n include_previous_outcome=True)[0]\n \n counterfactual_preds = np.array([func(Z) for Z in Z_samples])\n np.save(out_dir.joinpath('counterfactuals.npy'), counterfactual_preds) \n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"Joshuashou/Synthetic-Control-Paper-Model","sub_path":"src/run_semi_synthetic_experiment.py","file_name":"run_semi_synthetic_experiment.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70109891715","text":"from django.shortcuts import render\nfrom django.template.loader import get_template\nimport random\nfrom django.http import HttpResponse, Http404\nfrom mysite.models import Product\ndef about(request):\n\ttemplate = get_template('about.html')\n\tquotes = [\"時光腳步輕,年歲不饒人\",\"智者順時而謀,愚者逆理而動\",\"訓教不嚴師之惰,學問無成子之罪\",\"言不能亂髮,筆不能妄動\"]\n\thtml = template.render({'quote':random.choice(quotes)})\n\treturn HttpResponse(html)\n\n\ndef disp_detail(request,sku):\n\t\n\ttry:\n\t\tp = Product.objects.get(sku=sku)\n\texcept Product.DoesNotExist:\n\t\traise Http404(\"Not find items\")\n\n\ttemplate = get_template('disp.html')\n\thtml = template.render({'product':p})\n\treturn HttpResponse(html)\n\ndef listing(request):\n\tproducts= Product.objects.all()\n\ttemplate = get_template('list.html')\n\thtml = template.render({'products':products})\n\treturn HttpResponse(html)","repo_name":"frankye1000/django_practice","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19720996774","text":"#팩토리얼\ndef factorial(num):\n if num == 1:\n return 1\n else:\n return factorial(num-1) * num\n\ndef solution(n):\n answer = 1\n while True:\n if factorial(answer) > n:\n return answer - 1\n answer += 1\n\n#문제에서 최대의 n 값이 10!보다 작다고 했으므로 간단하게 반복문의 범위를 10까지로 지정할 수 있음\nfrom math import factorial\n\ndef solution2(n):\n k = 10\n while n < factorial(k):\n k -= 1\n return k","repo_name":"haeniKim/TIL","sub_path":"Algorithm/programmers/level_0/044.py","file_name":"044.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12515834597","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.right = None\n self.left = None\n\n\n# Time complexity: O(n), visit each node exactly once\n# Space complexity: O(n)(worst case: completely unbalanced), O(log(n))(best case: the height of the tree would be log(n))\ndef max_depth(root):\n if root is None:\n return 0\n left_height = max_depth(root.left)\n right_height = max_depth(root.right)\n return max(left_height, right_height) + 1\n\n\ndef test_max_depth():\n root = TreeNode(3)\n root.left = TreeNode(10)\n root.right = TreeNode(1)\n root.right.left = TreeNode(32)\n assert max_depth(root) == 3\n\n root = None\n assert max_depth(root) == 0\n\n root = TreeNode(1)\n assert max_depth(root) == 1\n","repo_name":"satojkovic/algorithms","sub_path":"problems/max_depth.py","file_name":"max_depth.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20460744086","text":"from build_buy import get_stair_count\nfrom objects.portal import Portal\nfrom protocolbuffers import Routing_pb2 as routing_protocols\nfrom interactions.utils.routing import WalkStyle\nimport sims4.geometry\nimport sims4.utils\nimport routing\n\nclass Stairs(Portal):\n __qualname__ = 'Stairs'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._footprints = []\n self._cached_lanes = None\n\n def portal_cleanup(self):\n super().portal_cleanup()\n self._footprints = []\n self._cached_lanes = None\n\n def on_buildbuy_exit(self):\n super().on_buildbuy_exit()\n if self._has_changed():\n self.portal_cleanup()\n self.portal_setup()\n\n def _build_discouragement_footprint(self, start, end, surface, offset):\n fwd = start - end\n fwd.y = 0\n fwd = sims4.math.vector_normalize(fwd)\n cross = sims4.math.vector_cross(fwd, sims4.math.Vector3.Y_AXIS())\n width = 0.05\n length = 0.5\n pos = start - cross*0.25*offset\n vertices = []\n vertices.append(pos - width*cross)\n vertices.append(pos - width*cross + length*fwd)\n vertices.append(pos + width*cross + length*fwd)\n vertices.append(pos + width*cross)\n poly = sims4.geometry.Polygon(vertices)\n return sims4.geometry.PolygonFootprint(poly, routing_surface=surface, cost=routing.get_default_discouragement_cost(), footprint_type=6, enabled=True)\n\n def _has_changed(self):\n if self._cached_lanes is None:\n return True\n stair_lanes = routing.get_stair_portals(self.id, self.zone_id)\n if len(stair_lanes) != len(self._cached_lanes):\n return True\n for (lane1, lane2) in zip(stair_lanes, self._cached_lanes):\n for (end1, end2) in zip(lane1, lane2):\n while not sims4.math.vector3_almost_equal(end1[0][0], end2[0][0]) or not sims4.math.vector3_almost_equal(end1[1][0], end2[1][0]):\n return True\n return False\n\n def portal_setup(self):\n super().portal_setup()\n stair_lanes = routing.get_stair_portals(self.id, self.zone_id)\n self._cached_lanes = stair_lanes\n for lane in stair_lanes:\n created_portals = []\n for end_set in lane:\n lane_start = end_set[0]\n lane_end = end_set[1]\n start_pos = lane_start[0]\n end_pos = lane_end[0]\n diff = start_pos - end_pos\n traversal_cost = diff.magnitude()*4.0\n created_portals.append(self.create_portal(routing.Location(start_pos, routing_surface=lane_start[1]), routing.Location(end_pos, routing_surface=lane_end[1]), Portal.PortalType.PortalType_Animate, self.id, traversal_cost))\n self.add_pair(*created_portals)\n\n def _traversing_up(self, portal_id):\n for p in self.portals:\n while portal_id == p.there:\n return True\n return False\n\n def _get_stairs_walkstyle(self, walkstyle, traversing_up):\n if walkstyle == WalkStyle.RUN or walkstyle == WalkStyle.JOG:\n if traversing_up:\n return WalkStyle.get_hash(WalkStyle.RUNSTAIRSUP)\n return WalkStyle.get_hash(WalkStyle.RUNSTAIRSDOWN)\n elif walkstyle == sims4.hash_util.hash32('walkreaper'):\n if traversing_up:\n return sims4.hash_util.hash32('reaperstairsup')\n return sims4.hash_util.hash32('reaperstairsdown')\n else:\n if traversing_up:\n return WalkStyle.get_hash(WalkStyle.STAIRSUP)\n return WalkStyle.get_hash(WalkStyle.STAIRSDOWN)\n\n @sims4.utils.exception_protected(1)\n def c_api_get_portal_duration(self, portal_id, walkstyle, age, gender):\n stairs_walkstyle = self._get_stairs_walkstyle(walkstyle, self._traversing_up(portal_id))\n (duration, _distance) = routing.get_walkstyle_info(stairs_walkstyle, age, gender)\n return duration*get_stair_count(self.id, self.zone_id)\n\n def add_portal_data(self, portal_id, actor, walkstyle):\n op = routing_protocols.RouteStairsData()\n op.traversing_up = self._traversing_up(portal_id)\n op.stair_count = get_stair_count(self.id, self.zone_id)\n op.walkstyle = self._get_stairs_walkstyle(walkstyle, op.traversing_up)\n op.stairs_per_cycle = 1\n node_data = routing_protocols.RouteNodeData()\n node_data.type = routing_protocols.RouteNodeData.DATA_STAIRS\n node_data.data = op.SerializeToString()\n return node_data\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/objects/stairs/stairs.py","file_name":"stairs.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"4256280994","text":"from __future__ import annotations\n\nimport re\nfrom typing import Callable, Dict, Optional, Union\n\nimport numpy as np\nimport numpy.typing as npt\nfrom xarray import Dataset\n\nfrom sisl._lattice import cell_invert\nfrom sisl.lattice import Lattice, LatticeChild\nfrom sisl.utils.mathematics import fnorm\n\nfrom .axes import axes_cross_product, axis_direction, get_ax_title\n\n# from ...types import Axes, CellLike, Axis\n\nCoordsDataset = Dataset\n\n\ndef projected_2Dcoords(\n cell: CellLike, xyz: npt.NDArray[np.float64], xaxis: Axis = \"x\", yaxis: Axis = \"y\"\n) -> npt.NDArray[np.float64]:\n \"\"\"Moves the 3D positions of the atoms to a 2D supspace.\n\n In this way, we can plot the structure from the \"point of view\" that we want.\n\n NOTE: If xaxis/yaxis is one of {\"a\", \"b\", \"c\", \"1\", \"2\", \"3\"} the function doesn't\n project the coordinates in the direction of the lattice vector. The fractional\n coordinates, taking in consideration the three lattice vectors, are returned\n instead.\n\n Parameters\n ------------\n geometry: sisl.Geometry\n the geometry for which you want the projected coords\n xyz: array-like of shape (natoms, 3), optional\n the 3D coordinates that we want to project.\n otherwise they are taken from the geometry.\n xaxis: {\"x\", \"y\", \"z\", \"a\", \"b\", \"c\"} or array-like of shape 3, optional\n the direction to be displayed along the X axis.\n yaxis: {\"x\", \"y\", \"z\", \"a\", \"b\", \"c\"} or array-like of shape 3, optional\n the direction to be displayed along the X axis.\n\n Returns\n ----------\n np.ndarray of shape (2, natoms)\n the 2D coordinates of the geometry, with all positions projected into the plane\n defined by xaxis and yaxis.\n \"\"\"\n if isinstance(cell, (Lattice, LatticeChild)):\n cell = cell.cell\n\n try:\n all_lattice_vecs = len(set([xaxis, yaxis]).intersection([\"a\", \"b\", \"c\"])) == 2\n except:\n # If set fails it is because xaxis/yaxis is unhashable, which means it\n # is a numpy array\n all_lattice_vecs = False\n\n if all_lattice_vecs:\n coord_indices = [\"abc\".index(ax) for ax in (xaxis, yaxis)]\n\n icell = cell_invert(cell.astype(float))\n else:\n # Get the directions that these axes represent\n xaxis = axis_direction(xaxis, cell)\n yaxis = axis_direction(yaxis, cell)\n\n fake_cell = np.array([xaxis, yaxis, np.cross(xaxis, yaxis)], dtype=np.float64)\n icell = cell_invert(fake_cell)\n coord_indices = [0, 1]\n\n return np.dot(xyz, icell.T)[..., coord_indices]\n\n\ndef projected_1Dcoords(cell: CellLike, xyz: npt.NDArray[np.float64], axis: Axis = \"x\"):\n \"\"\"\n Moves the 3D positions of the atoms to a 2D supspace.\n\n In this way, we can plot the structure from the \"point of view\" that we want.\n\n NOTE: If axis is one of {\"a\", \"b\", \"c\", \"1\", \"2\", \"3\"} the function doesn't\n project the coordinates in the direction of the lattice vector. The fractional\n coordinates, taking in consideration the three lattice vectors, are returned\n instead.\n\n Parameters\n ------------\n geometry: sisl.Geometry\n the geometry for which you want the projected coords\n xyz: array-like of shape (natoms, 3), optional\n the 3D coordinates that we want to project.\n otherwise they are taken from the geometry.\n axis: {\"x\", \"y\", \"z\", \"a\", \"b\", \"c\", \"1\", \"2\", \"3\"} or array-like of shape 3, optional\n the direction to be displayed along the X axis.\n nsc: array-like of shape (3, ), optional\n only used if `axis` is a lattice vector. It is used to rescale everything to the unit\n cell lattice vectors, otherwise `GeometryPlot` doesn't play well with `GridPlot`.\n\n Returns\n ----------\n np.ndarray of shape (natoms, )\n the 1D coordinates of the geometry, with all positions projected into the line\n defined by axis.\n \"\"\"\n if isinstance(cell, (Lattice, LatticeChild)):\n cell = cell.cell\n\n if isinstance(axis, str) and axis in (\"a\", \"b\", \"c\", \"0\", \"1\", \"2\"):\n return projected_2Dcoords(\n cell, xyz, xaxis=axis, yaxis=\"a\" if axis == \"c\" else \"c\"\n )[..., 0]\n\n # Get the direction that the axis represents\n axis = axis_direction(axis, cell)\n\n return xyz.dot(axis / fnorm(axis)) / fnorm(axis)\n\n\ndef coords_depth(coords_data: CoordsDataset, axes: Axes) -> npt.NDArray[np.float64]:\n \"\"\"Computes the depth of 3D points as projected in a 2D plane\n\n Parameters\n ----------\n coords_data: CoordsDataset\n The coordinates for which the depth is to be computed.\n axes: Axes\n The axes that define the plane where the coordinates are projected.\n \"\"\"\n cell = _get_cell_from_dataset(coords_data=coords_data)\n\n depth_vector = axes_cross_product(axes[0], axes[1], cell)\n depth = project_to_axes(coords_data, axes=[depth_vector]).x.values\n\n return depth\n\n\ndef sphere(\n center: npt.ArrayLike = [0, 0, 0], r: float = 1, vertices: int = 10\n) -> Dict[str, np.ndarray]:\n \"\"\"Computes a mesh defining a sphere.\"\"\"\n phi, theta = np.mgrid[\n 0.0 : np.pi : 1j * vertices, 0.0 : 2.0 * np.pi : 1j * vertices\n ]\n center = np.array(center)\n\n phi = np.ravel(phi)\n theta = np.ravel(theta)\n\n x = center[0] + r * np.sin(phi) * np.cos(theta)\n y = center[1] + r * np.sin(phi) * np.sin(theta)\n z = center[2] + r * np.cos(phi)\n\n return {\"x\": x, \"y\": y, \"z\": z}\n\n\ndef _get_cell_from_dataset(coords_data: CoordsDataset) -> npt.NDArray[np.float64]:\n cell = coords_data.attrs.get(\"cell\")\n if cell is None:\n if \"lattice\" in coords_data.attrs:\n cell = coords_data.lattice.cell\n else:\n cell = coords_data.geometry.cell\n\n return cell\n\n\ndef projected_1D_data(\n coords_data: CoordsDataset,\n axis: Axis = \"x\",\n dataaxis_1d: Union[Callable, npt.NDArray, None] = None,\n) -> CoordsDataset:\n cell = _get_cell_from_dataset(coords_data=coords_data)\n\n xyz = coords_data.xyz.values\n\n x = projected_1Dcoords(cell, xyz=xyz, axis=axis)\n\n dims = coords_data.xyz.dims[:-1]\n\n if dataaxis_1d is None:\n y = np.zeros_like(x)\n else:\n if callable(dataaxis_1d):\n y = dataaxis_1d(x)\n elif isinstance(dataaxis_1d, (int, float)):\n y = np.full_like(x, dataaxis_1d)\n else:\n y = dataaxis_1d\n\n coords_data = coords_data.assign(x=(dims, x), y=(dims, y))\n\n return coords_data\n\n\ndef projected_2D_data(\n coords_data: CoordsDataset,\n xaxis: Axis = \"x\",\n yaxis: Axis = \"y\",\n sort_by_depth: bool = False,\n) -> CoordsDataset:\n cell = _get_cell_from_dataset(coords_data=coords_data)\n\n xyz = coords_data.xyz.values\n\n xy = projected_2Dcoords(cell, xyz, xaxis=xaxis, yaxis=yaxis)\n\n x, y = xy[..., 0], xy[..., 1]\n dims = coords_data.xyz.dims[:-1]\n\n coords_data = coords_data.assign(x=(dims, x), y=(dims, y))\n\n coords_data = coords_data.assign(\n {\"depth\": (dims, coords_depth(coords_data, [xaxis, yaxis]).data)}\n )\n if sort_by_depth:\n coords_data = coords_data.sortby(\"depth\")\n\n return coords_data\n\n\ndef projected_3D_data(coords_data: CoordsDataset) -> CoordsDataset:\n x, y, z = np.moveaxis(coords_data.xyz.values, -1, 0)\n dims = coords_data.xyz.dims[:-1]\n\n coords_data = coords_data.assign(x=(dims, x), y=(dims, y), z=(dims, z))\n\n return coords_data\n\n\ndef project_to_axes(\n coords_data: CoordsDataset,\n axes: Axes,\n dataaxis_1d: Optional[Union[npt.ArrayLike, Callable]] = None,\n sort_by_depth: bool = False,\n cartesian_units: str = \"Ang\",\n) -> CoordsDataset:\n ndim = len(axes)\n if ndim == 3:\n xaxis, yaxis, zaxis = axes\n coords_data = projected_3D_data(coords_data)\n elif ndim == 2:\n xaxis, yaxis = axes\n coords_data = projected_2D_data(\n coords_data, xaxis=xaxis, yaxis=yaxis, sort_by_depth=sort_by_depth\n )\n elif ndim == 1:\n xaxis = axes[0]\n yaxis = dataaxis_1d\n coords_data = projected_1D_data(\n coords_data, axis=xaxis, dataaxis_1d=dataaxis_1d\n )\n\n plot_axes = [\"x\", \"y\", \"z\"][:ndim]\n\n for ax, plot_ax in zip(axes, plot_axes):\n coords_data[plot_ax].attrs[\"axis\"] = {\n \"title\": get_ax_title(ax, cartesian_units=cartesian_units),\n }\n\n coords_data.attrs[\"ndim\"] = ndim\n\n return coords_data\n","repo_name":"zerothi/sisl","sub_path":"src/sisl/viz/processors/coords.py","file_name":"coords.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"21283693223","text":"import boto3\nfrom b2b_app.config import (\n S3_AWS_REGION,\n S3_ACCESS_KEY_ID,\n S3_AWS_SECRET_ACCESS_KEY,\n S3_AWS_END_POINT,\n S3_B2B_APP_BUCKET,\n)\n\nclass S3:\n def __init__(self, _id):\n self._id = _id\n self._s3_session = boto3.Session(\n aws_access_key_id=S3_ACCESS_KEY_ID,\n aws_secret_access_key=S3_AWS_SECRET_ACCESS_KEY,\n )\n self._s3_region_conn = {}\n self._s3_region_conn[S3_AWS_REGION] = self._s3_session.resource('s3', S3_AWS_REGION)\n \n def get_region_connection(self, region_name):\n if region_name in self._s3_region_conn:\n return self._s3_region_conn[region_name]\n else:\n new_conn = self._s3_session.resource('s3', region_name)\n self._s3_region_conn[region_name] = new_conn\n return new_conn\n \n # In a virtual-hosted–style URL\n # http://bucket.s3-aws-region.amazonaws.com\n # http://bucket.s3.s3-aws-region.amazonaws.com\n @staticmethod\n def get_details_from_virtual_style_url(url):\n url_split = url.split('/')\n end_point = url_split[2] #region and bucket\n file_key = urlsplit[3]\n end_point_split = end_point.split('.')[0]\n bucket = end_point_split[0]\n region = end_point_split[2]\n return {\n 'end_point': end_point,\n 'file_key': file_key,\n 'bucket': bucket,\n 'region': region,\n }\n \n # In a path–style URL\n # http://s3-aws-region.amazonaws.com/bucket\n @staticmethod\n def get_details_from_path_style_url(url):\n url_split = url.split('/')\n end_point = url_split[2] #region.\n bucket = url_split[3]\n file_key = urlsplit[4]\n region = end_point.split('.')[0]\n return {\n 'end_point': end_point,\n 'file_key': file_key,\n 'bucket': bucket,\n 'region': region,\n }\n\n def get_file_iterator(self, region, bucket, key):\n s3_obj = self.get_region_connection(region).Object(bucket_name=bucket, key=key)\n body = s3_obj.get()['Body']\n return body._raw_stream\n # for line in body._raw_stream:\n # print('line', line)\n \n def get_url_for_upload(self, file, path, file_type):\n pass\n","repo_name":"PrudhviRaj5/boilerplate-sanic","sub_path":"app/services/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42876675618","text":"\"\"\" checks for homebrew things \"\"\"\n\nimport sys\nfrom typing import List, TypedDict, Callable, TypeVar, cast\nfrom loguru import logger\n\nfrom ..repolinter import RepoLinter\nfrom ..utils import get_fix_file_path\n\nCATEGORY = \"homebrew\"\n\nLANGUAGES = [\"Ruby\"]\n\n\nclass DefaultConfig(TypedDict):\n \"\"\"config typing for module config\"\"\"\n\n required_files: List[str]\n\n\nDEFAULT_CONFIG: DefaultConfig = {\n \"required_files\": [\n \"homebrew_check_latest_release.sh\",\n \".github/workflows/homebrew_check_updates.yml\",\n ]\n}\n\n\nWrappedFunction = TypeVar(\"WrappedFunction\", bound=Callable[[RepoLinter], None])\n\n\ndef should_this_run(func: WrappedFunction) -> WrappedFunction:\n \"\"\"if the repo name doesn't match then don't run\"\"\"\n\n def inner(repo: RepoLinter) -> None:\n if not repo.repository.name.startswith(\"homebrew-\"):\n logger.debug(\"Not a homebrew repo, skipping\")\n return None\n logger.debug(\"Name checks out: {}\", repo.repository.name)\n func(repo)\n return None\n\n return cast(WrappedFunction, inner)\n\n\n@should_this_run\ndef check_update_files_exist(repo: RepoLinter) -> None:\n \"\"\"checks that the required files exist\"\"\"\n for filename in repo.config[CATEGORY][\"required_files\"]:\n filecontents = repo.cached_get_file(filename)\n if not filecontents:\n repo.error(CATEGORY, f\"Missing homebrew file file: {filename}\")\n\n\n@should_this_run\ndef fix_update_files_exist(repo: RepoLinter) -> None:\n \"\"\"updates the homebrew files from the templates\"\"\"\n\n for filename in repo.config[CATEGORY][\"required_files\"]:\n updatefile = get_fix_file_path(CATEGORY, filename)\n if not updatefile.exists():\n logger.error(\"Running fix, can't find fix file {}!\", updatefile.as_posix())\n sys.exit(1)\n\n filecontents = repo.cached_get_file(filepath=filename, clear_cache=True)\n\n result = repo.create_or_update_file(\n filename,\n updatefile,\n filecontents,\n f\"github_linter.homebrew updating {filename}\",\n )\n if result is not None:\n repo.fix(\n CATEGORY,\n f\"Updated {filename} in commit {result}\",\n )\n","repo_name":"yaleman/github_linter","sub_path":"github_linter/tests/homebrew.py","file_name":"homebrew.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23166429601","text":"# custom library files\nfrom bloom import *\nfrom ephid import *\nfrom helper import *\nfrom sender import *\n\n# imported library\nfrom binascii import hexlify, unhexlify\nfrom Crypto.Protocol.SecretSharing import Shamir\nfrom hashlib import sha256\nfrom socket import *\nimport threading\nimport time\nfrom copy import deepcopy\n\n# global variable\nport = 40000\npriv_key = 0\nbroadcast_hash = \"\"\nfilter_size = 800000\ndbf = BloomFilter(filter_size)\ndbf_list = []\ncovid = 0\nold_hash = 0\n\n\nprint(f\"[STARTING] Program is starting on port {port}.\")\n\n######################\n\n# thread to broadcast shares\ndef udp_broadcaster():\n\n\tglobal port, broadcast_hash, priv_key, old_hash\n\n\t# create socket\n\tbroadcast_socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)\n\tbroadcast_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\n\n\t# create new ephID and generate recv_shares\n\tpriv_key, broadcast_id = generate_ephid()\n\tbroadcast_id_recv_shares = Shamir.split(3, 6, broadcast_id)\n\t\n\t# hash of ephid\n\tbroadcast_hash = sha256(broadcast_id).hexdigest()\n\n\t# print recv_shares and id\n\tprint_id(broadcast_id, broadcast_id_recv_shares)\n\n\t# timer\n\tstart_time = time.time()\n\tbroadcast_timer = 10\t\t# 10 seconds\n\tid_timer = 60\t\t\t\t# 1 minute\n\tcurr_timer = time.time() - start_time\n\n\twhile True:\n\n\t\t# broadcast id every 10 seconds\n\t\tif curr_timer > broadcast_timer and len(broadcast_id_recv_shares) != 0:\n\t\t\tprint(f\"[TASK 3A] Broadcasting shares: {broadcast_id_recv_shares[0][0], hexlify(broadcast_id_recv_shares[0][1])}\")\n\t\t\tsend_str = str(broadcast_id_recv_shares[0][0]) + \"|\" + hexlify(broadcast_id_recv_shares[0][1]).decode() + \"|\" + broadcast_hash\n\t\t\tbroadcast_socket.sendto(send_str.encode('utf-8'), ('192.168.4.255', port))\n\t\t\tbroadcast_id_recv_shares.pop(0)\n\t\t\tbroadcast_timer += 10\n\n\t\t# create new id every minute\n\t\telif curr_timer > id_timer:\n\t\t\t# create new ephID and generate recv_shares\n\t\t\tpriv_key, broadcast_id = generate_ephid()\n\t\t\tbroadcast_id_recv_shares = Shamir.split(3, 6, broadcast_id)\n\t\t\t\n\t\t\t# hash of ephid\n\t\t\told_hash = broadcast_hash\n\t\t\tbroadcast_hash = sha256(broadcast_id).hexdigest()\n\n\t\t\t# print recv_shares and id\n\t\t\tprint_id(broadcast_id, broadcast_id_recv_shares)\n\n\t\t\t# set timer\n\t\t\tid_timer += 60\n\n\t\t# update timer\n\t\tcurr_timer = time.time() - start_time\n\n# thread to receive shares\ndef udp_receiver():\n\n\tglobal port, broadcast_hash, priv_key, dbf, old_hash\n\t\n\tnew_contact_list = {}\n\n\tdbf.restart()\n\n\t# create socket\n\tserver_socket = socket(AF_INET, SOCK_DGRAM) # UDP\n\tserver_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\n\tserver_socket.bind((\"\", port))\n\n\tprint(\"Waiting to receive shares from other devices...\")\n\tprint()\n\n\twhile True:\n\t\t# receive message\n\t\trecv_msg, _ = server_socket.recvfrom(2048)\n\t\trecv_index, recv_share, recv_hash = recv_msg.decode(\"utf-8\").split(\"|\")\n\n\t\t# skip if receive own message\n\t\tif recv_hash == broadcast_hash or recv_hash == old_hash:\n\t\t\tcontinue\n\t\telse:\n\t\t\t# recv_index\n\t\t\trecv_index = int(recv_index)\n\n\t\t\t# recv_share\n\t\t\trecv_share = unhexlify(recv_share.encode())\n\t\t\t\n\t\t\tif recv_hash not in new_contact_list.keys():\n\t\t\t\tnew_contact_list[recv_hash] = [(recv_index, recv_share)]\n\t\t\telse:\n\t\t\t\tnew_contact_list[recv_hash].append((recv_index, recv_share))\n\t\t\t\n\t\t\t# keep track of number of recv_shares received\n\t\t\tnum_recv_shares = len(new_contact_list[recv_hash])\n\t\t\t# print()\n\t\t\tprint(f\"[TASK 3B/3C] Received {num_recv_shares} recv_shares for {recv_hash}.\")\n\t\t\tprint()\n\t\t\t\n\t\t\t# Check if the hash contains 3 entries\n\t\t\tif num_recv_shares == 3:\n\t\t\t\tsec = Shamir.combine(new_contact_list[recv_hash])\n\t\t\t\tprint()\n\t\t\t\tprint(f\"[TASK 4A] Reconstructing EphID: {hexlify(sec)}\")\n\t\t\t\tprint(\"[TASK 4B] Verifying integrity of EphID...\")\n\t\t\t\tnew_hash = sha256(sec).hexdigest()\n\t\t\t\tprint()\n\t\t\t\tprint(f\"Received hash: \t {recv_hash}\")\n\t\t\t\tprint(f\"Recontructed hash: {new_hash}\")\n\t\t\t\tprint()\n\t\t\t\tif recv_hash == new_hash:\n\t\t\t\t\tprint(\"Verified hash. Computing EncID...\")\n\t\t\t\t\tenc_id = int(hexlify(sec), 16) * priv_key\n\t\t\t\t\tprint(f\"[TASK 5A/5B] EncID is: {enc_id}\")\n\t\t\t\t\tprint(\"[TASK 6] Adding EncID to DBF and deleting EncID...\")\n\t\t\t\t\tdbf.add(str(enc_id))\n\t\t\t\t\tprint()\n\t\t\t\t\tprint(f\"[TASK 7A] Current state of DBF: {dbf.get_indices()}\")\n\t\t\t\t\tprint()\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error: Hash not verified.\")\n\t\t\t\t\tprint()\n\n# thread to deal with backend API\ndef udp_sender():\n\n\tglobal dbf, dbf_list, filter_size, covid\n\n\tqbf = BloomFilter(filter_size)\n\n\tstart_time = time.time()\n\tdbf_timer = 600\t\t# 10 minutes\n\tqbf_timer = 3600 \t# 60 minutes\n\tcurr_timer = time.time() - start_time\n\n\twhile not covid:\n\t\tif curr_timer > dbf_timer:\n\t\t\t# remove oldest DBF\n\t\t\tif len(dbf_list) == 6:\n\t\t\t\tdbf_list.pop(0)\n\n\t\t\tdbf_list.append(deepcopy(dbf))\t\n\t\t\tdbf.restart()\n\t\t\tdbf_timer += 600\n\n\t\t\tprint()\n\t\t\tprint(f\"[TASK 7B] Creating new DBF...\")\n\t\t\tprint()\n\n\t\tif curr_timer > qbf_timer:\n\t\t\t# print debug messages\n\t\t\tprint()\n\t\t\tprint(\"All available DBFs:\")\n\t\t\tfor i, bf in enumerate(dbf_list):\n\t\t\t\tprint(f\"DBF {i+1}: {bf.get_indices()}\")\n\t\t\tprint()\n\t\t\tqbf.merge(dbf_list)\n\t\t\tprint(f\"[TASK 8] Creating QBF: {qbf.get_indices()}\")\n\t\t\tprint(\"[TASK 9A] Sending QBF to server, waiting for result...\")\n\t\t\tprint()\n\t\t\tresp = send_qbf(qbf.bit_array)\n\t\t\tprint(f\"[TASK 9B] Query result: {resp['result']}. {resp['message']}\")\n\t\t\tprint()\n\t\t\tqbf_timer += 3600\n\n\t\t# update timer\n\t\tcurr_timer = time.time() - start_time\n\ndef monitor_input():\n\tglobal dbf, dbf_list, filter_size, covid\n\n\t# wait till the first dbf has generated\n\ttime.sleep(600)\t\t# follow dbf_timer\n\n\tprint(\"##############################################################\")\n\tprint(\"# #\")\n\tprint(\"# Type 'uploadcbf' to upload your CBF to the server #\")\n\tprint(\"# #\")\n\tprint(\"##############################################################\")\n\tprint()\n\n\t# listen for input from user\n\twhile True:\n\t\tcommand = input()\n\t\tif command == 'uploadcbf': \n\t\t\t# print messages\n\t\t\tprint()\n\t\t\tprint(\"User has COVID-19. The program will stop generating QBFs now.\")\n\t\t\tprint(\"[TASK 10] Uploading CBF to the backend server...\")\n\t\t\tprint()\n\n\t\t\t# create CBF\n\t\t\tcovid = 1\n\t\t\tcbf = BloomFilter(filter_size)\n\t\t\tcbf.merge(dbf_list)\n\t\t\tsend_cbf(cbf.bit_array)\n\t\t\tprint(\"Upload success\")\n\t\t\tbreak\n\n# thread for listening for beacons\nudp_broad_thread = threading.Thread(name = \"ClientBroadcaster\", target = udp_broadcaster)\nudp_broad_thread.start()\n\n# thread for receiving messages\nudp_sender_thread = threading.Thread(name = \"ClientSender\", target = udp_sender)\nudp_sender_thread.start()\n\n# thread for receiving messages\nudp_receiver_thread = threading.Thread(name = \"ClientReceiver\", target = udp_receiver)\nudp_receiver_thread.start()\n\n# thread for monitoring user input\nmonitor_input_thread = threading.Thread(name = \"MonitorInput\", target = monitor_input)\nmonitor_input_thread.start()","repo_name":"eushaun/comp9337","sub_path":"Dimy.py","file_name":"Dimy.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30359621418","text":"# 입력한 카테고리 중 이미지와 가장 가까운 카테고리 선택\n\nfrom PIL import Image\nimport requests\n\nfrom transformers import CLIPProcessro, ClIPModel\n\nmodel = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\nprocessor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\")\n\nurl = input(\"이미지 URL 입력: \")\nimage = Image.open(requests.get(url, stream=True).raw)\n\nnum_text = int(input(\"텍스트 입력 수: \"))\nli_text = []\nfor i in range(num_text):\n li_text.append(input(f\"category({i}) > \"))\n\ninputs = processor(li_text, images=image, return_tensors=\"pt\", padding=True)\n\noutputs = model(**inputs)\nlogits_per_image = outputs.logits_per_image # this is the image-text siilarity score\nprobs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities\n\nhigh = 0\nfor i in range(len(probs[0])):\n if(probs[0][high]> {li_text[high]}\")\n","repo_name":"SE0NA/AI_chatBot","sub_path":"chatbot3.py","file_name":"chatbot3.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27582384584","text":"import os\nimport json\nimport subprocess\nimport uuid\nfrom drp_1dpipe.core.utils import normpath, wait_semaphores, convert_dl_to_ld\nfrom drp_1dpipe.core.engine.runner import Runner\n\nclass BatchQueue(Runner):\n\n batch_submitter = \"# Program that queue a task\"\n single_script_template = \"# Batch script for single task\"\n\n parallel_script_template = \"# Batch script for parallel task\"\n\n def single(self, command, args):\n \"\"\"Run a single command using batch queue.\"\"\"\n\n task_id = uuid.uuid4().hex\n\n # generate batch script\n extra_args = ' '.join(['--{}={}'.format(k, v) for k, v in args.items()])\n\n script = self.single_script_template.format(workdir=normpath(self.workdir),\n venv=self.venv,\n command=command,\n extra_args=extra_args,\n task_id=task_id)\n batch_script_name = normpath(self.workdir,\n 'batch_script_{}.sh'.format(task_id))\n with open(batch_script_name, 'w') as batch_script:\n batch_script.write(script)\n self.tmpcontext.add_files(batch_script_name)\n\n # run batch\n result = subprocess.run([self.batch_submitter, batch_script_name])\n assert result.returncode == 0\n\n # block until completion\n semaphores = [normpath(self.workdir, '{}.done'.format(task_id))]\n self.tmpcontext.add_files(*semaphores)\n wait_semaphores(semaphores)\n return batch_script_name\n\n def parallel(self, command, parallel_args=None, args=None):\n \"\"\"Execute parallel task for batch runners\n\n Parameters\n ----------\n command : str\n Path to command to execute\n parallel_args : dict, optional\n command line arguments to related to each parallel task, by default None\n args : dict, optional\n command line arguments common to all parallel tasks, by default None\n \"\"\"\n task_id = uuid.uuid4().hex\n executor_script = normpath(self.workdir, 'batch_executor_{}.py'.format(task_id))\n self.tmpcontext.add_files(executor_script)\n\n # Convert dictionnary of list to list of dictionnaries\n pll_args = convert_dl_to_ld(parallel_args)\n\n # generate batch_executor script\n tasks = []\n extra_args = ['--{}={}'.format(k, v)\n for k, v in args.items()]\n # if k not in ('pre-commands', seq_arg_name, 'notifier')]\n\n # setup tasks\n # with open(filelist, 'r') as f:\n # subtasks = json.load(f)\n # # register these files for deletion\n # self.tmpcontext.add_files(*subtasks)\n\n # for k, v in pll_args.items():\n # task = [command,\n # '--{arg_name}={arg_value}'.format(arg_name=k,\n # arg_value=v)]\n # task.extend(extra_args)\n # tasks.append(task)\n\n for i, arg_value in enumerate(pll_args):\n task = [command]\n for k, v in arg_value.items():\n task.append('--{arg_name}={arg_value}'.format(arg_name=k, arg_value=v))\n task.extend(extra_args)\n tasks.append(task)\n # for i, arg_value in enumerate(subtasks):\n # task = [command,\n # '--{arg_name}={arg_value}'.format(arg_name=arg_name,\n # arg_value=arg_value)]\n # task.extend(extra_args)\n # if seq_arg_name:\n # [task.append('--{}={}'.format(\n # seq_arg,\n # os.path.join(args[seq_arg], 'B'+str(i)))\n # ) for seq_arg in seq_arg_name]\n # tasks.append(task)\n\n # setup pipeline notifier\n # notifier = args['notifier']\n # notifier.update(command,\n # children=['{}-{}'.format(command, i)\n # for i in range(ntasks)])\n # for i in range(ntasks):\n # notifier.update('{}-{}'.format(command, i), state='WAITING')\n # notifier.update(command, 'RUNNING')\n\n # generate batch script\n with open(os.path.join(os.path.dirname(__file__), 'resources', 'executor.py.in'), 'r') as f:\n batch_executor = f.read().format(tasks=tasks, notification_url='')\n # batch_executor = f.read().format(tasks=tasks,\n # notification_url=(notifier.pipeline_url\n # if notifier.pipeline_url\n # else ''))\n with open(executor_script, 'w') as executor:\n executor.write(batch_executor)\n\n # generate batch script\n ntasks = len(tasks)\n script = self.parallel_script_template.format(jobs=ntasks,\n workdir=normpath(self.workdir),\n venv=self.venv,\n executor_script=executor_script,\n task_id=task_id)\n batch_script_name = normpath(self.workdir,\n f'batch_script_{task_id}.sh')\n with open(batch_script_name, 'w') as batch_script:\n batch_script.write(script)\n self.tmpcontext.add_files(batch_script_name)\n\n # run batch\n result = subprocess.run([self.batch_submitter, batch_script_name])\n assert result.returncode == 0\n\n # wait all sub-tasks\n semaphores = [normpath(self.workdir, f'{task_id}_{i}.done')\n for i in range(1, ntasks+1)]\n self.tmpcontext.add_files(*semaphores)\n\n wait_semaphores(semaphores)\n # notifier.update(command, 'SUCCESS')\n","repo_name":"Subaru-PFS/drp_1dpipe","sub_path":"drp_1dpipe/core/engine/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35937578391","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 17 11:10:55 2019\n\n@author: dejanmaric\n\"\"\"\n\nimport pandas as pd\nimport requests\nimport time\n\n### Get pokemon stats from Kanto pokedex => to_csv\n\nurl = \"https://pokeapi.co/api/v2/pokedex/2/\"\nget_pokemons = requests.get(url)\nget_pokemons_json = get_pokemons.json()\nelements = get_pokemons_json['pokemon_entries']\nall_data=[]\n\nstart = time.time()\n\nfor x in elements:\n url = \"https://pokeapi.co/api/v2/pokemon/\" + str(x[\"entry_number\"]) + \"/\"\n get_pokemon = requests.get(url)\n get_pokemon_json = get_pokemon.json()\n d = {'id': x[\"entry_number\"], \n 'name': x[\"pokemon_species\"][\"name\"],\n 'order': get_pokemon_json[\"order\"],\n 'weight': get_pokemon_json[\"weight\"],\n 'height': get_pokemon_json[\"height\"],\n 'speed': get_pokemon_json[\"stats\"][0][\"base_stat\"],\n 'special_defense': get_pokemon_json[\"stats\"][1][\"base_stat\"],\n 'special_attack': get_pokemon_json[\"stats\"][2][\"base_stat\"],\n 'defense': get_pokemon_json[\"stats\"][3][\"base_stat\"],\n 'attack': get_pokemon_json[\"stats\"][4][\"base_stat\"],\n 'hp': get_pokemon_json[\"stats\"][5][\"base_stat\"]}\nall_data.append(d)\n\nelapsed = (time.time() - start)\nprint(\"Time elapsed: \", elapsed)\n\ndf = pd.DataFrame(all_data)\ndf.sort_values(by=['id'])\ndf.to_csv('pokemon-stats.csv')\n\n\n### Get more pokemon stats (Move and Type)\n\nurl = \"https://pokeapi.co/api/v2/pokedex/2/\"\nget_pokemons = requests.get(url)\nget_pokemons_json = get_pokemons.json()\nelements = get_pokemons_json['pokemon_entries']\n\nall_data=[]\n\nstart1 = time.time()\nfor x in elements:\n url = \"https://pokeapi.co/api/v2/pokemon/\" + str(x[\"entry_number\"]) + \"/\"\n get_pokemon = requests.get(url)\n get_pokemon_json = get_pokemon.json()\n e = {'id': x[\"entry_number\"], \n 'name': x[\"pokemon_species\"][\"name\"],\n 'speed' : get_pokemon_json[\"stats\"][0][\"base_stat\"],\n 'special_defense': get_pokemon_json[\"stats\"][1][\"base_stat\"],\n 'special_attack': get_pokemon_json[\"stats\"][2][\"base_stat\"],\n 'defense': get_pokemon_json[\"stats\"][3][\"base_stat\"],\n 'attack': get_pokemon_json[\"stats\"][4][\"base_stat\"],\n 'hp': get_pokemon_json[\"stats\"][5][\"base_stat\"],\n 'move': set(y['move']['name'] for y in get_pokemon_json[\"moves\"]),\n 'type': set(z['type']['name'] for z in get_pokemon_json['types'])\n } \n all_data.append(e)\n \nelapsed1 = (time.time() - start1)\nprint(\"Time elapsed: \", elapsed1)\ndf1 = pd.DataFrame(all_data)\n\n# inverting data set (to make attributes pointing to id)\nmove_to_id = {}\nfor id, conds in df1[\"move\"].items():\n for c in conds:\n move_to_id.setdefault(c, set()).add(id)\n\ntype_to_id = {}\nfor id, conds in df1[\"type\"].items():\n for c in conds:\n type_to_id.setdefault(c, set()).add(id)\n\n\n# random checks\n[key for key, value in df1[\"type\"].items() if 'ice' in value] ##id=86,90,123,130,143\n[key for key, value in df1[\"move\"].items() if 'laser-focus' in value] ##id=149\n\n# define function (finding pokemon's id with the max stat id and match found id to the name id)\ndef getBest(move, tip, stat):\n argsAllowed = ['hp','attack','speed','special_attack','special_defense','defense']\n if stat not in argsAllowed: print(\"Not a valid stat\"); return\n if move == 'all' and tip == 'all':\n print(df1['name'].values[df1[stat] == df1[stat].max()]) \n elif move == 'all':\n matches=type_to_id[tip]\n max_stat = max(df1[stat][id] for id in matches)\n return [df1[\"name\"][id] for id in matches if df1[stat][id] == max_stat]\n elif tip == 'all':\n matches=move_to_id[move]\n max_stat = max(df1[stat][id] for id in matches)\n return [df1[\"name\"][id] for id in matches if df1[stat][id] == max_stat]\n else:\n matches = move_to_id[move] & type_to_id[tip]\n max_stat = max(df1[stat][id] for id in matches)\n return [df1[\"name\"][id] for id in matches if df1[stat][id] == max_stat]\n \n \n## some testings to find best pokemon based on some criteria \ngetBest('all', 'all', 'hp')\ngetBest('cut', 'rock', 'attack')\ngetBest('all', 'ice', 'speed')\ngetBest('laser-focus', 'all', 'hp')\ngetBest('mega-kick', 'water', 'special_defense')\ngetBest('light-screen', 'rock', 'special_attack') #return \"not found\"\n\n## Check random stat column\ngetBest('mega-kick', 'water', 'water')\ngetBest('mega-kick', 'water', 'bezveze')\n\n\n\n\n\n\n\n\n\n","repo_name":"dejanmarich/pokeapi_rest","sub_path":"pokeapi_rest.py","file_name":"pokeapi_rest.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3724540621","text":"def printt(dl):\n t=dl.head\n for i in range(dl.length):\n print(str(t.val)+\"<->\",end=\"\")\n t=t.next\n print(\"\\n\")\n\nclass Node:\n def __init__(self,val):\n self.val=val\n self.next=None\n self.prev=None\n\nclass DoublyLinkedList:\n def __init__(self):\n self.length=0\n self.head=None\n self.tail=None\n\n def push(self,val):\n n=Node(val)\n if self.length==0:\n self.head=n\n self.tail=n\n else:\n self.tail.next=n\n n.prev=self.tail\n self.tail=n\n self.length+=1\n return self\n\n def pop(self):\n if(self.length==0):\n return None\n elif(self.length==1):\n t=self.tail\n self.head=None\n self.tail=None\n self.length-=1\n return t\n else:\n tail=self.tail\n prev=self.tail.prev\n tail.prev=None\n prev.next=None\n self.tail=prev\n self.length-=1\n return tail\n\n def shift(self):\n if(self.length==0):\n return None\n else:\n old=self.head\n if(self.length==1):\n self.head=None\n self.tail=None\n self.length-=1\n return old\n else:\n self.head=old.next\n self.head.prev=None\n old.next=None\n self.length-=1\n return old\n\n def unshift(self,val):\n n=Node(val)\n if(self.length==0):\n self.head=n\n self.tail=n\n else:\n n.next=self.head\n self.head.prev=n\n self.head=n\n self.length+=1\n return self\n\n def at(self,index):\n t=self.get(index)\n return t.val\n\n def get(self,index):\n if(index<0 or index>=self.length):\n return None\n else:\n if(index<=self.length//2):\n counter=0\n t=self.head\n while(counter!=index):\n t=t.next\n counter+=1\n return t\n else:\n counter=self.length-1\n t=self.tail\n while(counter!=index):\n t=t.prev\n counter-=1\n return t\n\n def set(self,index,val):\n t=self.get(index)\n if(t!=None):\n t.val=val\n return True\n else:\n return False\n\n def insert(self,index,val):\n if(index<0 or index>self.length):\n return None\n elif(index==0):\n self.unshift(val)\n elif(index==self.length):\n self.push(val)\n else:\n n=Node(val)\n next=self.get(index)\n prev=next.prev\n prev.next=n\n next.prev=n\n n.prev=prev\n n.next=next\n self.length+=1\n return True\n\n def remove(self,index):\n if(index<0 or index>=self.length):\n return False\n elif(index==0):\n n=self.get(0)\n self.shift(index)\n elif(index==self.length-1):\n n=self.get(index)\n self.pop(index)\n else:\n n=self.get(index)\n prev=n.prev\n next=n.next\n prev.next=next\n next.prev=prev\n n.prev=None\n n.next=None\n self.length-=1\n return n\n\ndl=DoublyLinkedList()\n\n","repo_name":"BenMeehan/Python-Data-Structures","sub_path":"Data Structures/Linked Lists/DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16854188518","text":"# Copyright (c) Bentley Systems, Incorporated. All rights reserved.\r\n# See LICENSE.md in the project root for license terms and full copyright notice.\r\n\r\nimport requests\r\nimport json\r\n\r\nfrom reality_apis.CCS.ccs_utils import (\r\n CCWorkspaceProperties,\r\n CCJobType,\r\n CCJobQuality,\r\n CCJobSettings,\r\n CCJobCostParameters,\r\n CCJobProperties,\r\n)\r\nfrom reality_apis.utils import ReturnValue, JobState, JobDateTime, JobProgress, iTwinCaptureError, iTwinCaptureWarning, __version__\r\n\r\n\r\nclass ContextCaptureService:\r\n \"\"\"\r\n Service handling communication with Reality Modeling Service.\r\n\r\n Args:\r\n token_factory: An object that implements the abstract functions in AbstractTokenFactory. Used to retrieve the\r\n service url and the authorization token used to connect with the service.\r\n \"\"\"\r\n\r\n def __init__(self, token_factory) -> None:\r\n self._token_factory = token_factory\r\n self._session = requests.Session()\r\n self._service_url = self._token_factory.get_service_url()\r\n\r\n self._header = {\r\n \"Authorization\": None,\r\n \"User-Agent\": f\"Reality Modeling Python SDK/{__version__}\",\r\n \"Content-type\": \"application/json\",\r\n \"Accept\": \"application/vnd.bentley.itwin-platform.v1+json\",\r\n }\r\n\r\n def _get_header(self) -> dict:\r\n self._header[\"Authorization\"] = self._token_factory.get_token()\r\n return self._header\r\n\r\n @staticmethod\r\n def _error_msg(status_code, data_json) -> str:\r\n error = data_json.get(\"error\", {})\r\n code = error.get(\"code\", \"\")\r\n message = error.get(\"message\", \"\")\r\n return f\"code {status_code}: {code}, {message}\"\r\n\r\n def create_workspace(\r\n self, work_name: str, iTwin_id: str, cc_version: str = \"\"\r\n ) -> ReturnValue[str]:\r\n \"\"\"\r\n Creates a workspace.\r\n\r\n Args:\r\n work_name: Name for the workspace.\r\n iTwin_id: ID of the project.\r\n cc_version: Version of Context Capture to use.\r\n\r\n Returns:\r\n The ID of the workspace, and a potential error message.\r\n \"\"\"\r\n wc_dict = {\"name\": work_name, \"iTwinId\": iTwin_id}\r\n if cc_version != \"\":\r\n wc_dict[\"contextCaptureVersion\"] = cc_version\r\n json_data = json.dumps(wc_dict)\r\n response = self._session.post(\"https://\" + self._service_url + \"/contextcapture/workspaces\", json_data,\r\n headers=self._get_header())\r\n\r\n try:\r\n # if the query was successful we return the id of the workspace, else we return an empty string\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=\"\", error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(value=data_json[\"workspace\"][\"id\"], error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=\"\",\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n\r\n def delete_workspace(self, work_id: str) -> ReturnValue[bool]:\r\n \"\"\"\r\n Deletes a workspace.\r\n\r\n Args:\r\n work_id: id of the workspace.\r\n\r\n Returns:\r\n True if the workspace was deleted successfully, and a potential error message.\r\n \"\"\"\r\n response = self._session.delete(\"https://\" + self._service_url + f\"/contextcapture/workspaces/{work_id}\",\r\n headers=self._get_header())\r\n try:\r\n if response.status_code < 200 or response.status_code >= 400:\r\n data_json = response.json()\r\n return ReturnValue(value=False, error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(value=True, error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=False,\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n\r\n def get_workspace_properties(\r\n self, work_id: str\r\n ) -> ReturnValue[CCWorkspaceProperties]:\r\n \"\"\"\r\n Get all properties of a given workspace.\r\n By default this function returns a placeholder empty CCWorkspaceProperties if it hasn't succeeded in retrieving\r\n workspace properties. Use is_error() to be sure the return value is valid.\r\n Args:\r\n work_id: id of the workspace.\r\n\r\n Returns:\r\n An object with all the workspace properties, and a potential error message.\r\n\r\n \"\"\"\r\n response = self._session.get(\"https://\" + self._service_url + f\"/contextcapture/workspaces/{work_id}\",\r\n headers=self._get_header())\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=CCWorkspaceProperties(),\r\n error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(\r\n value=CCWorkspaceProperties(\r\n id=data_json[\"workspace\"][\"id\"],\r\n created_date_time=data_json[\"workspace\"][\"id\"],\r\n name=data_json[\"name\"][\"id\"],\r\n iTwin_id=data_json[\"iTwinId\"][\"id\"],\r\n context_capture_version=data_json[\"contextCaptureVersion\"][\"id\"],\r\n ),\r\n error=\"\",\r\n )\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=CCWorkspaceProperties(),\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n except KeyError as e:\r\n return ReturnValue(value=CCWorkspaceProperties(),\r\n error=str(e))\r\n\r\n def create_job(\r\n self, job_type: CCJobType, settings: CCJobSettings, job_name: str, work_id: str\r\n ) -> ReturnValue[str]:\r\n \"\"\"\r\n Creates a job corresponding to the given settings.\r\n\r\n Args:\r\n job_type: Type of the job.\r\n settings: Settings for the job.\r\n job_name: Name of the job.\r\n work_id: ID of the workspace to be used.\r\n\r\n Returns:\r\n The ID of the job, and a potential error message.\r\n \"\"\"\r\n settings_dict, inputs_dict = settings.to_json()\r\n jc_dict = {\r\n \"type\": job_type.value,\r\n \"name\": job_name,\r\n \"inputs\": inputs_dict[\"inputs\"],\r\n \"workspaceId\": work_id,\r\n \"settings\": settings_dict[\"settings\"],\r\n }\r\n job_json = json.dumps(jc_dict)\r\n response = self._session.post(\"https://\" + self._service_url + \"/contextcapture/jobs\", job_json,\r\n headers=self._get_header())\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=\"\", error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(value=data_json[\"job\"][\"id\"], error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=\"\",\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n except KeyError as e:\r\n return ReturnValue(value=\"\", error=str(e))\r\n\r\n def submit_job(self, job_id: str) -> ReturnValue[bool]:\r\n \"\"\"\r\n Submit a job.\r\n\r\n Args:\r\n job_id: The ID of the job to be submitted.\r\n Returns:\r\n True if the job was successfully submitted, and a potential error message.\r\n \"\"\"\r\n jc_dict = {\r\n \"state\": \"active\",\r\n }\r\n job_json = json.dumps(jc_dict)\r\n response = self._session.patch(\"https://\" + self._service_url + f\"/contextcapture/jobs/{job_id}\", job_json,\r\n headers=self._get_header())\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=False, error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(value=True, error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=False,\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n\r\n def cancel_job(self, job_id: str) -> ReturnValue[bool]:\r\n \"\"\"\r\n Cancel a job.\r\n\r\n Args:\r\n job_id: The ID of the job to be cancelled.\r\n Returns:\r\n True if the job was successfully cancelled, and a potential error message.\r\n \"\"\"\r\n jc_dict = {\r\n \"state\": \"cancelled\",\r\n }\r\n job_json = json.dumps(jc_dict)\r\n response = self._session.patch(\"https://\" + self._service_url + f\"/contextcapture/jobs/{job_id}\", job_json,\r\n headers=self._get_header())\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=False, error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(value=True, error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=False,\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n\r\n def delete_job(self, job_id: str) -> ReturnValue[bool]:\r\n \"\"\"\r\n Delete existing job (job cannot already be submitted to be deleted).\r\n\r\n Args:\r\n job_id: The ID of the job to be deleted.\r\n Returns:\r\n True if the job was successfully deleted, and a potential error message.\r\n \"\"\"\r\n response = self._session.delete(\"https://\" + self._service_url + f\"/contextcapture/jobs/{job_id}\",\r\n headers=self._get_header())\r\n try:\r\n if response.status_code < 200 or response.status_code >= 400:\r\n data_json = response.json()\r\n return ReturnValue(value=False, error=self._error_msg(response.status_code, data_json))\r\n return ReturnValue(value=True, error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=False,\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n\r\n def get_job_properties(self, job_id: str) -> ReturnValue[CCJobProperties]:\r\n \"\"\"\r\n Get properties for a given job.\r\n By default this function returns an empty CCJobProperties if it didn't succeeded in retrieving settings.\r\n Use is_error() to be sure the return value is valid.\r\n\r\n Args:\r\n job_id: The ID of the relevant job.\r\n Returns:\r\n The properties for the job, and a potential error message.\r\n \"\"\"\r\n response = self._session.get(\"https://\" + self._service_url + f\"/contextcapture/jobs/{job_id}\",\r\n headers=self._get_header())\r\n\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=CCJobProperties(), error=self._error_msg(response.status_code, data_json))\r\n\r\n job_name = data_json[\"job\"].get(\"name\", \"\")\r\n job_type = CCJobType(data_json[\"job\"].get(\"type\", CCJobType.NONE.value))\r\n job_state = JobState(data_json[\"job\"].get(\"state\", JobState.UNKNOWN.value))\r\n\r\n cost_estimation_parameters = CCJobCostParameters()\r\n estimate = data_json[\"job\"].get(\"costEstimationParameters\", None)\r\n if estimate is not None:\r\n cost_estimation_parameters.giga_pixels = float(\r\n estimate.get(\"gigaPixels\", 0.0)\r\n )\r\n cost_estimation_parameters.mega_points = float(\r\n estimate.get(\"megaPoints\", 0.0)\r\n )\r\n cost_estimation_parameters.mesh_quality = CCJobQuality(\r\n estimate.get(\"meshQuality\", CCJobQuality.UNKNOWN.value)\r\n )\r\n estimated_cost = float(data_json[\"job\"].get(\"estimatedCost\", 0.0))\r\n created_date_time = data_json[\"job\"].get(\"createdDateTime\", \"\")\r\n\r\n errors = []\r\n warnings = []\r\n\r\n execution = data_json[\"job\"].get(\"executionInformation\", None)\r\n if execution is not None:\r\n job_date_time = JobDateTime(\r\n created_date_time=created_date_time,\r\n submission_date_time=execution.get(\"submittedDateTime\", \"\"),\r\n started_date_time=execution.get(\"startedDateTime\", \"\"),\r\n ended_date_time=execution.get(\"endedDateTime\", \"\"),\r\n )\r\n estimated_units = float(execution.get(\"estimatedUnits\", 0.0))\r\n\r\n exec_errors = execution.get(\"errors\", None)\r\n if exec_errors is not None:\r\n for error in exec_errors:\r\n itwin_error = iTwinCaptureError(code=error.get(\"code\", \"\"), title=error.get(\"title\", \"\"), message=error.get(\"message\", \"\"))\r\n params = error.get(\"params\", [])\r\n itwin_error.params.extend(params)\r\n errors.append(itwin_error)\r\n\r\n exec_warnings = execution.get(\"warnings\", None)\r\n if exec_warnings is not None:\r\n for warning in exec_warnings:\r\n itwin_warning = iTwinCaptureWarning(code=warning.get(\"code\", \"\"), title=warning.get(\"title\", \"\"), message=warning.get(\"message\", \"\"))\r\n params = warning.get(\"params\", [])\r\n\r\n itwin_warning.params.extend(params)\r\n warnings.append(itwin_warning)\r\n else:\r\n job_date_time = JobDateTime(created_date_time=created_date_time)\r\n estimated_units = 0.0\r\n\r\n itwin_id = data_json[\"job\"].get(\"iTwinId\", \"\")\r\n location = data_json[\"job\"].get(\"location\", \"\")\r\n email = data_json[\"job\"].get(\"email\", \"\")\r\n work_id = data_json[\"job\"].get(\"workspaceId\", \"\")\r\n\r\n job_settings = CCJobSettings.from_json(data_json[\"job\"]).value\r\n\r\n return ReturnValue(\r\n value=CCJobProperties(\r\n job_id=job_id,\r\n job_name=job_name,\r\n job_type=job_type,\r\n job_state=job_state,\r\n job_date_time=job_date_time,\r\n iTwin_id=itwin_id,\r\n location=location,\r\n email=email,\r\n work_id=work_id,\r\n estimated_units=estimated_units,\r\n job_settings=job_settings,\r\n cost_estimation_parameters=cost_estimation_parameters,\r\n estimated_cost=estimated_cost,\r\n warnings=warnings,\r\n errors=errors,\r\n ),\r\n error=\"\",\r\n )\r\n\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=CCJobProperties(),\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n except (KeyError, ValueError) as e:\r\n return ReturnValue(value=CCJobProperties(), error=str(e))\r\n\r\n def get_job_progress(self, job_id: str) -> ReturnValue[JobProgress]:\r\n \"\"\"\r\n Get progress for a given job.\r\n\r\n Args:\r\n job_id: The ID of the relevant job.\r\n Returns:\r\n The progress for the job, and a potential error message.\r\n \"\"\"\r\n response = self._session.get(\"https://\" + self._service_url + f\"/contextcapture/jobs/{job_id}/progress\",\r\n headers=self._get_header())\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=JobProgress(state=JobState.UNKNOWN, progress=-1, step=\"\"),\r\n error=self._error_msg(response.status_code, data_json))\r\n\r\n dp = data_json[\"jobProgress\"]\r\n state = JobState(dp[\"state\"].lower())\r\n return ReturnValue(value=JobProgress(state=state, progress=int(dp[\"percentage\"]), step=dp[\"step\"]),\r\n error=\"\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=JobProgress(state=JobState.UNKNOWN, progress=-1, step=\"\"),\r\n error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n except (KeyError, ValueError) as e:\r\n return ReturnValue(\r\n value=JobProgress(state=JobState.UNKNOWN, progress=-1, step=\"\"),\r\n error=str(e))\r\n\r\n def get_job_estimated_cost(\r\n self, job_id: str, cost_parameters: CCJobCostParameters\r\n ) -> ReturnValue[float]:\r\n \"\"\"\r\n Get estimated cost for a given job.\r\n\r\n Args:\r\n job_id: The ID of the relevant job.\r\n cost_parameters: New cost estimation parameters for the job.\r\n Returns:\r\n The estimated cost of the job, and a potential error\r\n message.\r\n \"\"\"\r\n pi_dict = {\"costEstimationParameters\": {\r\n \"gigaPixels\": str(cost_parameters.giga_pixels),\r\n \"megaPoints\": str(cost_parameters.mega_points),\r\n \"meshQuality\": cost_parameters.mesh_quality.value}}\r\n json_data = json.dumps(pi_dict)\r\n response = self._session.patch(\"https://\" + self._service_url + f\"/contextcapture/jobs/{job_id}\", json_data,\r\n headers=self._get_header())\r\n try:\r\n data_json = response.json()\r\n if response.status_code < 200 or response.status_code >= 400:\r\n return ReturnValue(value=-1.0, error=self._error_msg(response.status_code, data_json))\r\n ret = float(data_json[\"job\"].get(\"estimatedCost\", -1.0))\r\n if ret != -1.0:\r\n return ReturnValue(value=ret, error=\"\")\r\n return ReturnValue(value=ret, error=\"No estimatedCost field in received json\")\r\n except json.decoder.JSONDecodeError:\r\n return ReturnValue(value=-1.0, error=self._error_msg(response.status_code, {\"error\": {\"message\": response.text}}))\r\n except (KeyError, ValueError) as e:\r\n return ReturnValue(value=-1.0, error=str(e))\r\n","repo_name":"iTwin/reality-capture","sub_path":"python/reality_apis/CCS/context_capture_service.py","file_name":"context_capture_service.py","file_ext":"py","file_size_in_byte":19029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22805167858","text":"# coding: utf-8\nimport requests\nfrom requests.auth import HTTPDigestAuth\nimport json\nfrom pprint import pprint\nimport argparse\nfrom noc.core.management.base import BaseCommand\nfrom noc.core.mongo.connection import connect\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"-l\", \"--login\", dest=\"login\", default=None)\n\n def handle(self, *args, **options):\n connect()\n login = 'aaa'\n output=[]\n if login:\n from noc.sa.models.action import Action\n from noc.sa.models.managedobject import ManagedObject\n data = {'host': '10.6.6.40', 'llid': 1, 'login': '16532', 'passwd': '2za9d2s8', 'port': '1/1/8', 'sn': 'ZTEGC101F4B6', 'vlanid': 1301}\n mo = ManagedObject.objects.get(address = data['host'])\n action = Action.objects.get(name='zteunregonu')\n cmd1 = str(action.expand(mo,**data))\n params={\"commands\":cmd1.split('\\n'), \"ignore_cli_errors\":True}\n result = mo.scripts.commands(**params)\n output.append(result)\n action = Action.objects.get(name='f620-router')\n cmd2 = str(action.expand(mo,**data))\n print(cmd2.split('\\n'))\n# return 1\n params={\"commands\":cmd2.split('\\n'), \"ignore_cli_errors\":True}\n result = mo.scripts.commands(**params)\n output.append(result)\n action = Action.objects.get(name='zteconfpppoe')\n cmd21 = str(action.expand(mo,**data))\n print(cmd21.split('\\n'))\n# return 1\n params={\"commands\":cmd21.split('\\n'), \"ignore_cli_errors\":True}\n result = mo.scripts.commands(**params)\n# action = Action.objects.get(name='ztewrite')\n# com3 = str(action.expand(mo))\n result = mo.scripts.commands(commands = ['write'])\n output.append(result)\n return 200, [output]\n\nif __name__ == \"__main__\":\n Command().run()\n","repo_name":"santor72/noc_custom","sub_path":"commands/testregonu.py","file_name":"testregonu.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41263920954","text":"\"\"\"\nGiven a signed 32-bit integer x, return x with its digits reversed.\nIf reversing x causes the value to go outside the signed 32-bit\ninteger range [-231, 231 - 1], then return 0.\n\nAssume the environment does not allow you to store 64-bit integers\n(signed or unsigned).\n\nExample 1:\n\nInput: x = 123\nOutput: 321\nExample 2:\n\nInput: x = -123\nOutput: -321\nExample 3:\n\nInput: x = 120\nOutput: 21\nExample 4:\n\nInput: x = 0\nOutput: 0\n\n\nConstraints:\n\n-231 <= x <= 231 - 1\n\"\"\"\n\nclass Solution:\n def reverse(self, x: int) -> int:\n sign = -1 if x < 0 else 1\n x *= sign\n res = 0\n while x > 0:\n res = res * 10 + x % 10\n x //= 10\n res *= sign\n return res if -2147483648 <= res <= 2147483647 else 0\n\nsol = Solution()\nprint(sol.reverse(1534236469))","repo_name":"Pavel-Bylkov/lessons","sub_path":"codo/Zadachi/lee_reverse_num.py","file_name":"lee_reverse_num.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42083677064","text":"'''\nRead about logign at http://docs.python.org/library/logging.html\n\nSee examples of using this module in module ./basic_logger_example.py\n'''\n\nfrom __future__ import print_function # Python 2 vs. 3 compatibility --> use print()\nfrom __future__ import division # Python 2 vs. 3 compatibility --> / returns float\nfrom __future__ import unicode_literals # Python 2 vs. 3 compatibility --> / returns float\nfrom __future__ import absolute_import # Python 2 vs. 3 compatibility --> absolute imports\n\nimport logging\nimport sys\n\n__console_handler = None\n\n\ndef make_logger(module, target=None, mask=\"%(module)s%(type)s%(method)s\"):\n '''\n Retrieve instance of logger for speficig module and optionally for target.\n '''\n assert module in sys.modules, \"Unknown module %s\" % module\n type = __extract_type(target)\n method = __extract_method(target)\n name_params = {\n 'module': '__' + get_run_module_name() + '__' if module == '__main__' else module,\n 'type': '.' + type if type else '',\n 'method': '.' + method if method else '' }\n logger_name = mask % name_params\n enable_console_handler()\n return logging.getLogger(logger_name) \n\n\ndef inject_logger(any_class, attribute_name='logger'):\n '''\n Inject logger instance to given class (firt argument).\n \n This method also test if logger was already set before and creates new only of not.\n Name of class attribute can be custtomized by 2nd argument \"attribute_name\".\n Private names of atrribute (like \"__logger\") are also supported. \n '''\n if attribute_name.startswith(\"__\") and not attribute_name.endswith(\"__\"):\n attribute_name = \"_\" + any_class.__name__ + attribute_name \n if not hasattr(any_class, attribute_name):\n setattr(any_class, attribute_name, make_logger(any_class.__module__, target=any_class))\n\n\ndef enable_console_handler(format=\"%(asctime)s [%(levelname)s] %(name)s -- %(message)s\"):\n '''\n Enable stream handler to logging on console for all loggers.\n \n Has one optional parameter \"format\" which define format of output logging messages.\n See http://docs.python.org/library/logging.html?highlight=logging.getlogger#formatter\n for all mapping keys which can be used in format string.\n \n This method returns False if stream handler was already created \n (it means that someone else already call this mehtod), otherwise return True.\n ''' \n global __console_handler\n if (__console_handler): return False\n __console_handler = logging.StreamHandler()\n formatter = logging.Formatter(format)\n __console_handler.setFormatter(formatter)\n logging.getLogger().addHandler(__console_handler)\n return True\n\ndef basic_logger_make(logger_name=None, level='info', target=None):\n LEVELS = {'debug': logging.DEBUG,\\\n 'info': logging.INFO,\\\n 'warning': logging.WARNING,\\\n 'error': logging.ERROR,\\\n 'critical': logging.CRITICAL}\n \n result = logging.getLogger(logger_name) \n result.setLevel(LEVELS.get(level, logging.INFO))\n enable_console_handler()\n make_logger(__name__).warning(\"You are using deprecated way of creating logger instance. \" +\\\n \"Use make_logger() instead of basic_logger_make()\")\n return result\n\n\ndef __extract_type(object):\n _type = type(object).__name__\n if _type == 'instancemethod':\n return object.im_class.__name__\n elif _type =='type':\n return object.__name__\n else:\n return None\n\n\ndef __extract_method(object):\n _type = type(object).__name__\n if _type == 'instancemethod' or _type == 'function' or _type == 'classobj':\n return object.__name__\n else:\n return None\n\n\ndef get_run_module_name():\n main_name=sys.argv[0]\n indx_py=main_name.rfind(\"/\") #remove path\n if indx_py!= -1: main_name=main_name[indx_py+1:]\n indx_py=main_name.rfind(\".\") #remove \".py\"\n if indx_py!= -1: main_name=main_name[:indx_py]\n return main_name\n\n\nclass ObjectWithLogger(object):\n def __new__(cls, *args, **kwds):\n inject_logger(cls)\n return object.__new__(cls)\n\nclass with_logger:\n def __init__(self, attribute_name=\"logger\"):\n self.attribute_name = attribute_name\n \n def __call__(self, target):\n inject_logger(target, self.attribute_name)\n return target\n","repo_name":"milos-korenciak/xelatex-test-heroku","sub_path":"xelatex_test_heroku/basic_logger.py","file_name":"basic_logger.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2128028247","text":"n = int(input())\narr = []\nfor _ in range(n):\n arr.append(input())\n\nhead_x = -1\nhead_y = -1\nfor i in range(n):\n for j in range(n):\n # 가장 첫번째 *이 머리이다..\n if arr[i][j] == '*' and head_x== -1:\n # print(i,j)\n # 머리는 아래에 심장이므로\n head_x = i+ 2\n head_y = j + 1\n break\n\n\nres = []\ncount = 0\n#왼팔 구하기\nx,y = head_x-1, head_y-1\nwhile y>0:\n y -=1\n if arr[x][y] == '*':\n count +=1\n else:\n break\nres.append(count)\ncount = 0\n\n# 오른팔 구하기\nx,y = head_x-1, head_y-1\nwhile y 230:\n # for a window of 500x400, and a turtle size of 40x40, if a turtle reaches 230 it has won\n is_race_on = False\n winning_color = turtle.pencolor()\n if winning_color == user_in:\n print(f\"You've won!, the {winning_color} turtle is the winner\")\n else:\n print(f\"You've lost!, the {winning_color} turtle is the winner\")\n\n random_distance = random.randint(0, 30)\n turtle.forward(random_distance)\n\nscreen.exitonclick()\n","repo_name":"syedabrarali/PythonProjects","sub_path":"turtleRace.py","file_name":"turtleRace.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42176600427","text":"import pygame, sys\r\n\r\nfrom pyfiles.Stage import Stage as TestPage\r\n\r\ndef main():\r\n # setup args here:\r\n args = 1\r\n\r\n window = pygame.display.set_mode((1000, 600))\r\n pygame.display.set_caption(\"Rescoding Page Tester\")\r\n\r\n page = TestPage(window, args)\r\n pygame.init()\r\n nextPage, nextArgs = page.mainloop()\r\n\r\n print(\"\\nValues returned:\")\r\n print(\"nextPage =\", nextPage)\r\n print(\"nextArgs =\", nextArgs)\r\n\r\n pygame.quit()\r\n sys.exit()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"GraceFu/Rescoding","sub_path":"Rescoding/PageTester.py","file_name":"PageTester.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6420036229","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator\nfrom django.forms.models import model_to_dict\n\n# cache and rate limit\nfrom django.views.decorators.cache import cache_page\nfrom ratelimit.decorators import ratelimit\nfrom ratelimit.core import get_usage, is_ratelimited\n\nimport math\nimport json\nimport scipy\nimport numpy\n\nfrom company.models import CompanyDescription\nfrom company.models import Company\n\nfrom yata.handy import *\nfrom player.models import Player\nfrom player.functions import updatePlayer\n\n\ndef index(request):\n try:\n player = getPlayer(request.session.get(\"player\", {}).get(\"tId\", -1))\n\n context = {\"player\": player, \"compcat\": True, \"view\": {\"index\": True}}\n page = 'company/content-reload.html' if request.method == 'POST' else 'company.html'\n return render(request, page, context)\n\n except Exception as e:\n return returnError(exc=e, session=request.session)\n\ndef browse(request):\n try:\n player = getPlayer(request.session.get(\"player\", {}).get(\"tId\", -1))\n\n # get all companies\n companies = CompanyDescription.objects.order_by(\"name\")\n\n if request.POST.get(\"type\") == \"company-details\":\n company_details = companies.filter(tId=request.POST.get(\"company_id\")).first()\n return render(request, \"company/browse/details.html\", {\"player\": player, \"company_details\": company_details})\n\n page = 'company/content-reload.html' if request.method == 'POST' else 'company.html'\n context = {\"player\": player, \"companies\": companies,\"compcat\": True, \"view\": {\"browse\": True}}\n return render(request, page, context)\n\n except Exception as e:\n return returnError(exc=e, session=request.session)\n\ndef supervise(request):\n try:\n player = getPlayer(request.session.get(\"player\", {}).get(\"tId\", -1))\n message = False\n\n # get company\n company = Company.objects.filter(tId=player.companyId).first()\n if company is None and not player.companyDi:\n context = {\"player\": player, \"compcat\": True, \"view\": {\"supervise\": True}}\n page = 'company/content-reload.html' if request.method == 'POST' else 'company.html'\n return render(request, page, context)\n elif company is None and player.companyDi:\n updatePlayer(player)\n company = Company.objects.filter(tId=player.companyId).first()\n error, message = company.update_info()\n\n # update company\n if player.companyDi and ((tsnow() - company.timestamp) > 3600 or request.POST.get(\"type\") == \"update-data\"):\n error, message = company.update_info()\n\n # add employees requirements and potential efficiency on the fly\n company_positions = company.company_description.position_set.all()\n employees = company.employee_set.all().order_by(\"-effectiveness_total\")\n\n # prepare effectiveness matrix\n ws_eff_matrix = []\n company.effectiveness_ws_act = 0\n hrm = 1.1 if company.director_hrm else 0.9\n\n # modify employees positions on the fy if simu\n employees_simu = {}\n if request.POST.get(\"type\", False) == \"employees-simu\":\n for k, v in json.loads(request.POST.get(\"employees_position_simu\", \"{}\")).items():\n e = employees.filter(tId=k).first()\n if e is None:\n continue\n if e.position != v:\n employees_simu[k] = v\n\n # get all current positions\n positions = [company_positions.filter(name=employees_simu.get(str(e.tId), e.position)).first() for e in employees]\n\n # loop over employees\n now = tsnow()\n for employee in employees:\n employee.last_action_relative = now - employee.last_action\n employee.position = employees_simu.get(str(employee.tId), employee.position)\n position = company_positions.filter(name=employee.position).first()\n employee.man_required = 0 if position is None else position.man_required\n employee.int_required = 0 if position is None else position.int_required\n employee.end_required = 0 if position is None else position.end_required\n\n # compute ws_eff matrix (each row is an employee, each column is a position)\n sta = [employee.manual_labor, employee.intelligence, employee.endurance]\n ws_eff_matrix_row = []\n for p in positions:\n # compute S and P\n req = [p.man_required, p.int_required, p.end_required]\n\n # test if not unassined\n if sum(req):\n Pi = req.index(max(req))\n Si = req.index(min([s for s in req if s]))\n P = max(hrm * sta[Pi] / float(req[Pi]), 1)\n S = max(hrm * sta[Si] / float(req[Si]), 1)\n ws_eff = min(45, 45 * P) + 5*math.log2(P) + min(45, 45 * S) + 5*math.log2(S) + employee.effectiveness_merits\n else:\n ws_eff = 0\n\n ws_eff_matrix_row.append(ws_eff)\n\n # use simu value if necessary\n if p.name == employee.position and employees_simu.get(str(employee.tId), False):\n employee.effectiveness_total -= employee.effectiveness_working_stats\n employee.effectiveness_working_stats = ws_eff\n employee.effectiveness_total += employee.effectiveness_working_stats\n employee.simu = True\n\n t = employee.effectiveness_total\n n = employee.effectiveness_addiction + employee.effectiveness_inactivity\n employee.effectiveness_potential = 100 * (t + n) / max(t, 1)\n company.effectiveness_ws_act += employee.effectiveness_working_stats\n\n ws_eff_matrix.append(ws_eff_matrix_row)\n try:\n ws_eff_matrix = numpy.array(ws_eff_matrix)\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(ws_eff_matrix, maximize=True)\n company.effectiveness_ws_max = round(ws_eff_matrix[row_ind, col_ind].sum())\n company.effectiveness_ws_err = round(100 * (company.effectiveness_ws_act)/company.effectiveness_ws_max)\n company.employees_suggestion = [[list(employees)[i].name, list(employees)[i].tId, positions[j].name, ws_eff_matrix[i, j], list(employees)[i].effectiveness_working_stats] for i, j in zip(row_ind, col_ind)]\n except BaseException as e:\n print(e)\n pass\n\n # send employees if simu\n if request.POST.get(\"type\", False) == \"employees-simu\":\n context = {\"company\": company, \"company_positions\": company_positions, \"employees\": employees}\n return render(request, \"company/supervise/employees.html\", context)\n\n # send employees if show details\n if request.POST.get(\"type\", False) == \"show-details\":\n company_data = company.companydata_set.filter(timestamp=request.POST.get(\"timestamp\")).first()\n if company_data is not None:\n employees = json.loads(company_data.employees)\n return render(request, \"company/supervise/details.html\", {\"company_data\": company_data, \"employees\": employees})\n\n # get company data\n company_data = company.companydata_set.all().order_by(\"-timestamp\")\n company_data_p = Paginator(company_data, 7)\n if request.GET.get('page_d') is not None:\n return render(request, \"company/supervise/logs.html\", {\"company_data_p\": company_data_p.get_page(request.GET.get('page_d'))})\n\n # get company stock\n company_stock = company.companystock_set.all().order_by(\"-timestamp\")\n company_stock_p = Paginator(company_stock, 25)\n if request.GET.get('page_s') is not None:\n print(request.GET.get('page_s'))\n return render(request, \"company/supervise/stock.html\", {\"company_stock_p\": company_stock_p.get_page(request.GET.get('page_s'))})\n\n # create employee graph\n # current employees [id, name]\n employee_graph_headers = [[str(e.tId), f'{e.name} [{e.tId}]'] for e in employees]\n employee_graph_data = []\n for data in company_data:\n d = json.loads(data.employees)\n tmp_data = [data.timestamp, []]\n for e_id, e_name in employee_graph_headers:\n to_append = [d.get(e_id, {}).get(\"effectiveness_total\", \"undefined\"), d.get(e_id, {}).get(\"position\", \"undefined\")]\n if to_append[0] == 0:\n to_append[0] = \"undefined\"\n tmp_data[1].append(to_append)\n\n employee_graph_data.append(tmp_data)\n\n employees_graph = {\"header\": employee_graph_headers, \"data\": employee_graph_data}\n\n company.save()\n\n context = {\"player\": player,\n \"company\": company,\n \"company_positions\": company_positions,\n \"company_data\": company_data,\n \"company_data_p\": company_data_p.get_page(1),\n \"company_stock\": company_stock,\n \"company_stock_p\": company_stock_p.get_page(1),\n \"employees\": employees,\n \"employees_graph\": employees_graph,\n \"compcat\": True,\n \"view\": {\"supervise\": True}}\n\n if message:\n sub = \"Sub\" if request.method == 'POST' else \"\"\n if error:\n context[\"errorMessage\" + sub] = \"Company: API error {apiErrorString}, data not updated\".format(**message)\n else:\n context[\"validMessage\" + sub] = \"Company data has been updated.\"\n\n page = 'company/content-reload.html' if request.method == 'POST' else 'company.html'\n return render(request, page, context)\n\n except Exception as e:\n return returnError(exc=e, session=request.session)\n\n\n# @cache_page(60*10)\n# def ws(request):\n# payload = {\"effectiveness\": []}\n# for company in Company.objects.all():\n# if not company.director:\n# continue\n#\n# # add employees requirements and potential efficiency on the fly\n# company_positions = company.company_description.position_set.all()\n# employees = company.employee_set.all().order_by(\"-effectiveness_total\")\n#\n# manager = employees.filter(position=\"Manager\").first()\n# manager_effectiveness = manager.effectiveness_total if manager is not None else 0\n# for employee in employees:\n# position = company_positions.filter(name=employee.position).first()\n# if position is None:\n# continue\n#\n# employee.man_required = 0 if position is None else position.man_required\n# employee.int_required = 0 if position is None else position.int_required\n# employee.end_required = 0 if position is None else position.end_required\n# t = employee.effectiveness_total\n# n = employee.effectiveness_addiction + employee.effectiveness_inactivity\n# # compute theoretical efficiency\n# req = [employee.man_required, employee.int_required, employee.end_required]\n# sta = [employee.manual_labor, employee.intelligence, employee.endurance]\n# if sum(req):\n# Pi = req.index(max(req))\n# Si = req.index(min([s for s in req if s]))\n# P = sta[Pi] / float(req[Pi])\n# S = sta[Si] / float(req[Si])\n#\n# ws = {\n# \"working_stats\": employee.effectiveness_working_stats,\n# \"settled_in\": employee.effectiveness_settled_in,\n# \"director_education\": employee.effectiveness_director_education,\n# \"addiction\": employee.effectiveness_addiction,\n# \"inactivity\": employee.effectiveness_inactivity,\n# \"management\": employee.effectiveness_management,\n# \"book_bonus\": employee.effectiveness_book_bonus,\n# \"merits\": employee.effectiveness_merits,\n# \"effectiveness_total\": employee.effectiveness_total,\n# \"manager_effectiveness\": manager_effectiveness,\n# \"position\": employee.position,\n# \"director_hrm\": company.director_hrm,\n# \"primary_ratio\": P,\n# \"secondary_ratio\": S,\n# \"company_id\": company.company_description.tId,\n# \"company_name\": company.company_description.name\n# }\n#\n# payload[\"effectiveness\"].append(ws)\n#\n# return JsonResponse(payload, status=200)\n","repo_name":"OranWeb/yata-","sub_path":"company/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22297823094","text":"import json\nimport logging\nimport os\nfrom binascii import hexlify\n\nimport pymongo\nfrom mode import *\nimport hashlib\nimport requests\n\n# Data for each document\nID = \"id\"\nSHORT_TITLE = \"short_title\"\nQUESTION = \"question\"\nDESCRIPTION = \"description\"\nSTART_DATE = \"start_date\"\nCHAMBER = \"chamber\"\nSPONSOR = \"sponsor\"\n# --- also create\nBALLOTSPEC_HASH = \"ballotspec_hash\"\n\n# Connection String\n_client = None\n\n\ndef _get_client():\n global _client\n if _client is None:\n _client = pymongo.MongoClient(mongosettings[URL])\n return _client\n\n\nlog = logging.getLogger(__name__)\n\n\ndef hash_ballotspec(ballotspec_string):\n h = hashlib.sha256()\n h.update(str(ballotspec_string).encode('utf-8'))\n return (h.hexdigest())\n\n\ndef push_to_chain(method, params):\n log.info(f\"Pushing to BC API: {json.dumps(params)}\")\n print(method)\n print(params)\n r = requests.post(\"https://api.blockchain.suzuka.flux.party/members/api\", data=json.dumps({\"method\": method, \"params\": params}))\n print(r.text)\n log.info(f\"push_to_chain Response: {r}\\n\\n-- Response content {r.content}\\n\\n-- As text: {r.text}\")\n return json.loads(r.text)[\"billCreationTxid\"]\n\n\ndef render_spec_hash(_s):\n if isinstance(_s, str) or type(_s) is str:\n if _s[:2] != \"0x\" or len(_s) != 66:\n return \"0x\" + (\"00\" * (32 - len(_s)) + hexlify(_s.encode()).decode())\n return _s\n\ndef update_ballotspecs(id, short_title, question, description, start_date, chamber, sponsor):\n db = _get_client()[mongosettings[MONGODB]]\n ballotspecs_collection = db[mongosettings[BALLOTSPECSCOLLECTION]]\n bills_collection = db[mongosettings[BILLSCOLLECTION]]\n\n input_dict = {\n ID: id,\n SHORT_TITLE: short_title,\n QUESTION: question,\n DESCRIPTION: description,\n START_DATE: start_date,\n CHAMBER: chamber,\n SPONSOR: sponsor,\n }\n # create ballotspec_hash\n\n issue_string = json.dumps(input_dict)\n\n ballotspec_dict = {\n \"ballotTitle\": id,\n \"longDesc\": issue_string,\n \"shortDesc\": short_title,\n \"ballotVersion\": 2,\n \"optionsVersion\": 1,\n }\n ballot_spec_sz = json.dumps(ballotspec_dict)\n bs_h = hash_ballotspec(ballot_spec_sz)\n\n try:\n # Post to API => posts the blockchain\n TxID = push_to_chain(\"ballot_publish\", {\n \"specHash\": bs_h, #render_spec_hash(id),\n \"ballotSpec\": ballot_spec_sz,\n \"realSpecHash\": bs_h\n })\n print(\"Bill\")\n print(bs_h)\n print(TxID)\n except Exception as e:\n import traceback\n log.error(f\"Error pushing to chain: {e}\\n\\n{traceback.format_tb(e.__traceback__)}\\n\\nCONTINUING\")\n\n try:\n ballotspecs_collection.insert_one(\n {'_id': input_dict[\"id\"],\n 'data': input_dict,\n BALLOTSPEC_HASH: bs_h,\n # \"tx_id\" : TxID,\n \"specHash\": bs_h,\n \"ballotSpec\": ballot_spec_sz,\n \"realSpecHash\": bs_h})\n except Exception as e:\n print(e)","repo_name":"voteflux/voting-app-api","sub_path":"update_ballotspecs_db.py","file_name":"update_ballotspecs_db.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"16382712219","text":"def main():\n user_input = input(\"Input a variable name: \")\n snake_case(user_input)\n\n\ndef snake_case(user_variable):\n new_var = \"\"\n for index, letter in enumerate(user_variable):\n print(index,letter)\n if letter.isupper():\n if index == 0:\n new_var += letter.lower()\n else:\n new_var += f\"_{letter.lower()}\"\n else:\n new_var += letter\n print(new_var)\n\nmain()","repo_name":"OfirPicciotto/CS50P","sub_path":"pset2/camel/camel.py","file_name":"camel.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27143255668","text":"'''\r\nGiven an array and a range [lowVal, highVal], partition the array around the range such that array is divided in three parts. \r\n1) All elements smaller than lowVal come first. \r\n2) All elements in range lowVal to highVVal come next. \r\n3) All elements greater than highVVal appear in the end. \r\nThe individual elements of three sets can appear in any order.\r\n\r\nExamples: \r\nInput: arr[] = {1, 14, 5, 20, 4, 2, 54, 20, 87, 98, 3, 1, 32} \r\n lowVal = 14, highVal = 20\r\nOutput: arr[] = {1, 5, 4, 2, 1, 3, 14, 20, 20, 98, 87, 32, 54}\r\n\r\n'''\r\n\r\n# T(n)= O(n) S(n)=O(1) \r\n# 1 approach is by sorting but its time complexity will be O(nlogn)\r\n\r\n\r\ndef threewaypartitioning(a,n,leftval,rightval):\r\n start=0\r\n end=n-1\r\n i=0\r\n a.sort()\r\n while i<=end:\r\n if a[i]rightval:\r\n a[i],a[end]=a[end],a[i]\r\n end-=1\r\n else:\r\n i+=1\r\n return 1\r\n\r\n\r\na=[1, 14, 5, 20, 4, 2, 54, 20, 87, 98, 3, 1, 32]\r\nn=len(a)\r\nprint(threewaypartitioning(a,n,14,20))\r\n\r\nprint(\"Modified Array :\")\r\nfor i in range (n):\r\n print(a[i],end=' ')","repo_name":"shrutii2/Array-in-Python","sub_path":"threewaypartitioning.py","file_name":"threewaypartitioning.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12913376118","text":"def left_factor(grammar):\n non_terminals = set()\n factorized_rules = {}\n\n for rule in grammar:\n non_terminal, productions = rule.split('->')\n non_terminals.add(non_terminal)\n\n productions = productions.split('|')\n common_prefix = longest_common_prefix(productions)\n\n if common_prefix:\n if non_terminal not in factorized_rules:\n factorized_rules[non_terminal] = []\n\n factorized_productions = [production[len(common_prefix):] for production in productions if production.startswith(common_prefix)]\n factorized_rules[non_terminal].append(common_prefix + non_terminal + \"'\")\n factorized_rules[non_terminal + \"'\"] = factorized_productions\n\n else:\n if non_terminal not in factorized_rules:\n factorized_rules[non_terminal] = []\n\n factorized_rules[non_terminal].extend(productions)\n\n return factorized_rules\n\n\ndef longest_common_prefix(strings):\n if not strings:\n return ''\n\n prefix = strings[0]\n\n for string in strings[1:]:\n while not string.startswith(prefix):\n prefix = prefix[:-1]\n\n return prefix\n\n\n# Example usage\ngrammar = [\n 'S->abc|abx|aby',\n 'A->abAB|abA|ab',\n 'B->bB|b',\n]\n\nfactorized_grammar = left_factor(grammar)\n\nprint(\"Factorized Grammar:\")\nfor non_terminal, productions in factorized_grammar.items():\n print(f\"{non_terminal}->{'|'.join(prod if prod != '' else '#' for prod in productions)}\")\n\n# Output\n\n# Factorized Grammar:\n# S->abS'\n# S'->c|x|y\n# A->abA'\n# A'->AB|A|#\n# B->bB'\n# B'->B|#\n","repo_name":"vanshika-singh518/Compiler-Design","sub_path":"4. Elimination of LR and LF/LR.py","file_name":"LR.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24093885047","text":"import json\n\nfrom helper_session import get_user_id_by_session\nfrom flask import current_app as app\nfrom helper_database import MysqlType\nfrom helper_json import item_from_json\n\n\ndef get_list(db, session_key):\n \"\"\"\n Return the list of user maps for current user or default from config.ini\n :param session_key:\n :param db:\n :return:\n \"\"\"\n user_owner_id = get_user_id_by_session(db, session_key)\n r = {}\n cursor = db.cursor()\n\n sql = 'select id, name, selected, provider_id, scale, sortorder, user_id, layers, ST_X(coordinates) as lng, ' \\\n 'ST_Y(coordinates) as lat from gis_user_map where user_id = %s order by sortorder, name'\n cursor.execute(sql, user_owner_id)\n\n lng = 0.0\n lat = 0.0\n\n if 'GIS_DEFAULT_LNG' in app.config and 'GIS_DEFAULT_LAT' in app.config:\n lng = app.config['GIS_DEFAULT_LNG']\n lat = app.config['GIS_DEFAULT_LAT']\n\n items = [{'name': app.config['GIS_DEFAULT_MAP_NAME'],\n 'coordinates': [lng, lat],\n 'provider_id': app.config['GIS_DEFAULT_PROVIDER_ID'],\n 'scale': app.config['GIS_DEFAULT_SCALE'],\n 'selected': 1,\n 'user_id': user_owner_id}]\n\n cursor_items = cursor.fetchall()\n\n if len(cursor_items) != 0:\n items = []\n\n for row in cursor_items:\n item = {}\n for i, value in enumerate(row):\n if value is not None:\n if cursor.description[i][0] == 'lng':\n lng = value\n elif cursor.description[i][0] == 'lat':\n lat = value\n elif cursor.description[i][1] == MysqlType.JSON.value:\n item[cursor.description[i][0]] = json.loads(value)\n else:\n item[cursor.description[i][0]] = value\n\n item['coordinates'] = [lng, lat]\n items.append(item)\n\n r['count'] = len(items)\n r['items'] = items\n\n return r\n\n\ndef insert(db, session_key, user_map_json):\n \"\"\"\n Insert user map settings from json for selected user\n :param db:\n :param session_key:\n :param user_map_json:\n :return:\n \"\"\"\n user_owner_id = get_user_id_by_session(db, session_key)\n\n name = item_from_json(user_map_json, 'name')\n selected = item_from_json(user_map_json, 'selected', 0)\n provider_id = item_from_json(user_map_json, 'provider_id')\n scale = item_from_json(user_map_json, 'scale')\n sortorder = item_from_json(user_map_json, 'sortorder', 100)\n user_id = item_from_json(user_map_json, 'user_id', user_owner_id)\n layers = json.dumps(item_from_json(user_map_json, 'layers'))\n coordinates = item_from_json(user_map_json, 'coordinates')\n\n sql_insert = 'insert into gis_user_map (name, selected, provider_id, scale, sortorder, user_id, ' \\\n 'layers, coordinates) values(%s,%s,%s,%s,%s,%s,%s,Point(%s,%s))'\n cursor = db.cursor()\n\n try:\n cursor.execute(sql_insert,\n (name, selected, provider_id, scale, sortorder, user_id, layers, coordinates[0], coordinates[1]))\n db.commit()\n\n return {'status': 'Ok', 'id': cursor.lastrowid}\n\n except Exception:\n return {'status': 'Error', 'code': 200, 'message': 'Error while insert user map. Check your parameters.'}\n\n\ndef update(db, session_key, user_map_json, user_map_id=None):\n \"\"\"\n Update or insert (if id not present) user map settings from json for selected user. For update id must present\n in parameters or in user_map_json.\n :param user_map_id:\n :param db:\n :param session_key:\n :param user_map_json:\n :return:\n \"\"\"\n user_owner_id = get_user_id_by_session(db, session_key)\n\n user_map_id = item_from_json(user_map_json, 'id', user_map_id)\n\n if user_map_id is None:\n return insert(db, session_key, user_map_json)\n\n name = item_from_json(user_map_json, 'name')\n selected = item_from_json(user_map_json, 'selected', 0)\n provider_id = item_from_json(user_map_json, 'provider_id')\n scale = item_from_json(user_map_json, 'scale')\n sortorder = item_from_json(user_map_json, 'sortorder', 100)\n user_id = item_from_json(user_map_json, 'user_id', user_owner_id)\n layers = json.dumps(item_from_json(user_map_json, 'layers'))\n coordinates = item_from_json(user_map_json, 'coordinates')\n\n sql = 'update gis_user_map set name = %s, selected = %s, provider_id = %s, scale = %s, sortorder = %s, ' \\\n 'user_id = %s, layers = %s , coordinates = Point(%s,%s) where id = %s'\n cursor = db.cursor()\n\n try:\n cursor.execute(sql, (name, selected, provider_id, scale, sortorder, user_id, layers,\n coordinates[0], coordinates[1], user_map_id))\n db.commit()\n\n return {'status': 'Ok'}\n\n except Exception:\n return {'status': 'Error', 'code': 200, 'message': 'Error while updating user map. Check your parameters.'}\n\n\ndef delete(db, session_key, user_map_id):\n \"\"\"\n Delete item from user map settings for selected user and id item.\n :param db:\n :param session_key:\n :param user_map_id:\n :return:\n \"\"\"\n user_owner_id = get_user_id_by_session(db, session_key)\n\n sql = 'delete from gis_user_map where user_id = %s and id = %s'\n\n cursor = db.cursor()\n\n try:\n cursor.execute(sql, (user_owner_id, user_map_id))\n db.commit()\n\n return {'status': 'Ok'}\n\n except Exception:\n return {'status': 'Error',\n 'code': 200,\n 'message': 'Error while deleting from user map for id = {} and user = {}. '\n 'Check your parameters.'.format(user_map_id, user_owner_id)}\n\n\ndef set_current(db, session_key, user_map_id):\n \"\"\"\n Set selected user map configuration to select state and clear select state from other items for current user\n :param db:\n :param session_key:\n :param user_map_id:\n :return:\n \"\"\"\n user_owner_id = get_user_id_by_session(db, session_key)\n db.autocommit = False\n cursor = db.cursor()\n\n try:\n sql = 'select id from gis_user_map where user_id = %s and id = %s'\n cursor.execute(sql, (user_owner_id, user_map_id))\n if len(cursor.fetchall()) == 0:\n cursor.close()\n return {'status': 'Error',\n 'message': 'Error while set selected for user map for id = {} and user = {}. '\n 'Check your parameters.'.format(user_map_id, user_owner_id)}\n\n sql = 'update gis_user_map set selected = 0 where user_id = %s'\n cursor.execute(sql, user_owner_id)\n\n sql = 'update gis_user_map set selected = 1 where user_id = %s and id = %s'\n cursor.execute(sql, (user_owner_id, user_map_id))\n\n db.commit()\n cursor.close()\n\n return {'status': 'Ok'}\n\n except Exception:\n db.rollback()\n cursor.close()\n\n return {'status': 'Error',\n 'code': 200,\n 'message': 'Error while set selected for user map for id = {} and user = {}. '\n 'Check your parameters.'.format(user_map_id, user_owner_id)}\n","repo_name":"PVKonovalov/omsserver","sub_path":"gis_user_map.py","file_name":"gis_user_map.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7018878778","text":"# https://www.cnblogs.com/dongkuo/p/6714071.html\n# http://c.biancheng.net/view/6457.html\nimport socket\nimport DNSPacket\nimport time\nimport sys\nimport MeasureClient\nimport urllib\nimport json\nimport threading\nimport Queue\nfrom math import sin, cos, sqrt, atan2, radians\n\nTTL = 240\n\nEC2_HOST = {\n 'ec2-34-238-192-84.compute-1.amazonaws.com': '34.238.192.84', # N. Virginia\n 'ec2-13-231-206-182.ap-northeast-1.compute.amazonaws.com': '13.231.206.182', # Tokyo\n 'ec2-13-239-22-118.ap-southeast-2.compute.amazonaws.com': '13.239.22.118', # Sydney\n 'ec2-34-248-209-79.eu-west-1.compute.amazonaws.com': '34.248.209.79', # Ireland\n 'ec2-18-231-122-62.sa-east-1.compute.amazonaws.com': '18.231.122.62', # Sao Paulo\n 'ec2-3-101-37-125.us-west-1.compute.amazonaws.com': '3.101.37.125' # N. California\n}\nEC2_IP_LOCATION= {\n \"ec2-34-238-192-84.compute-1.amazonaws.com\":[-77.4874, 39.0438],\n\"ec2-13-231-206-182.ap-northeast-1.compute.amazonaws.com\":[139.692, 35.6895],\n\"ec2-13-239-22-118.ap-southeast-2.compute.amazonaws.com\":[151.2002, -33.8591],\n\"ec2-34-248-209-79.eu-west-1.compute.amazonaws.com\":[-6.26031, 53.3498],\n\"ec2-18-231-122-62.sa-east-1.compute.amazonaws.com\":[-46.6333, -23.5505],\n\"ec2-3-101-37-125.us-west-1.compute.amazonaws.com\":[-121.895, 37.3394]\n}\n\nDEFAULT= '34.238.192.84'\n\n\ndef get_location(ip):\n \"\"\"\n Use api to get the location of ip\n :param ip\n :return\n the Latitude and Longitude\n \"\"\"\n while True:\n try:\n response = urllib.urlopen('http://ip-api.com/json/' + ip)\n resp_json = json.load(response)\n break\n except:\n continue\n print(resp_json['lat'], resp_json['lon'])\n return resp_json['lon'], resp_json['lat']\n\n\ndef cal_dis(client, host):\n \"\"\"\n Calculate the distance between client and host\n :param client: the Latitude and Longitude of client\n :param host: the Latitude and Longitude of host\n :return\n The distance between client and host\n \"\"\"\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(client[0])\n lon1 = radians(client[1])\n lat2 = radians(host[0])\n lon2 = radians(host[1])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c # in km\n\n return distance\n\n\ndef get_dis_to_client(client_ip):\n \"\"\"\n Get the Queue of distance between each EC2 host and client\n :param client_ip: ip of client\n :return\n The Queue of distance distance between each EC2 host and client\n \"\"\"\n\n # get the client location\n client = get_location(client_ip)\n host_dis = []\n\n threads = []\n\n # use for loop to get each ec2 host's ip then cal the distance and put it into queue\n for host in EC2_IP_LOCATION.keys():\n location = EC2_IP_LOCATION[host]\n dis = cal_dis(client, location)\n host_dis.append((host, dis))\n # for host in EC2_HOST.keys():\n # host_ip = EC2_HOST[host]\n # t = threading.Thread(target=lambda q, arg1: q.put((host, cal_dis(client, get_location(arg1)))),\n # args=(host_dis, host_ip))\n # t.start()\n # threads.append(t)\n # while threads:\n # threads.pop().join()\n print('+++++++++++++++++++++++++++++++++++++++++')\n print(host_dis)\n print('+++++++++++++++++++++++++++++++++++++++++')\n\n return host_dis\n\n\ndef get_nearest_3(host_dis):\n \"\"\"\n Get the Queue of distance between each EC2 host and client\n :param host_dis: the queue of host distance\n :return\n The 3 nearest distance between replica server and client\n \"\"\"\n dis_tuple_ls = sorted(host_dis, key=lambda x: x[1])\n # dis_tuple_ls = sorted(host_dis.items(), key=lambda x: x[1])\n sorted_hosts = map(lambda x: x[0], dis_tuple_ls)\n print('sorted host ============= ', sorted_hosts)\n return sorted_hosts[:3]\n\n\nclass DNSserver:\n \"\"\"\n This is DNS server which is listening the request and then redirection to send clients to\n the replica server with the fastest response time.\n\n \"\"\"\n\n def __init__(self, port):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # udp\n self.server.bind(('', port))\n self.port = port\n self.cache = {} # client ip => (which replica to map, last time to fetch ip)\n self.measure_client = MeasureClient.MeasureClient(EC2_HOST.keys(), self.port)\n\n def serve_forever(self):\n try:\n while True:\n # dns request\n data, addr = self.server.recvfrom(1024)\n print('new dns <- ', addr[0])\n start = time.time()\n\n # self.measure_client.set_probe(addr[0])\n #\n # start1 = time.time()\n #\n # # Set the 3 nearest distance between replica server and client\n # self.measure_client.set_hosts(get_nearest_3(get_dis_to_client(addr[0])))\n # print('++++++++++++++++++++++++++++++++++++++++++', time.time() - start1)\n\n dns = DNSPacket.DNSFrame(data)\n name = dns.getname()\n toip = None\n ifrom = ''\n\n if dns.query.type != 1 or name != DOMAIN:\n print('diff in domain...', name, DOMAIN)\n continue\n\n # cache hit\n if self.cache.__contains__(addr[0]) \\\n and time.time() - self.cache.get(addr[0])[1] <= TTL:\n toip = self.cache.get(addr[0])[0]\n ifrom = 'cache'\n # update ttl\n ttl = TTL - (time.time() - self.cache.get(addr[0])[1])\n print('%s: %s-->%s (%s)' % (addr[0], name, toip, ifrom))\n dns.setanswer(toip, ttl)\n self.server.sendto(dns.pack(), addr)\n\n else:\n try:\n self.measure_client.set_probe(addr[0])\n\n start1 = time.time()\n\n # Set the 3 nearest distance between replica server and client\n self.measure_client.set_hosts(get_nearest_3(get_dis_to_client(addr[0])))\n print('++++++++++++++++++++++++++++++++++++++++++', time.time() - start1)\n\n print(self.cache)\n if self.cache.__contains__(addr[0]):\n print(time.time() - self.cache.get(addr[0])[1])\n # If this is query a A record, then response it\n\n # name = dns.getname()\n # toip = None\n ifrom = \"rtt\"\n best_host = self.measure_client.get_best()\n print(best_host)\n\n # if the 3 nearest replica server all connection refused, the toip will be None\n # then the server will listen for next client request and try to connect again\n if best_host:\n toip = EC2_HOST[best_host]\n else:\n print(\"All Connection refused\")\n toip = DEFAULT\n ifrom = 'default'\n # continue\n except:\n toip = DEFAULT\n ifrom = 'default'\n\n if toip:\n dns.setanswer(toip)\n self.cache[addr[0]] = (toip, time.time())\n print('%s: %s-->%s (%s)\\t\\t%s' % (addr[0], name, toip, ifrom, str(time.time() - start)))\n self.server.sendto(dns.pack(), addr)\n except KeyboardInterrupt:\n self.server.close()\n print('shutdonwn...')\n return\n except:\n print('RETRY...')\n self.serve_forever()\n\n\nif __name__ == \"__main__\":\n DOMAIN = sys.argv[2]\n PORT = int(sys.argv[1])\n sev = DNSserver(port=PORT)\n # sev.addname('*', '0.0.0.0') # default address\n print('listening...')\n sev.serve_forever() # start DNS server\n\n# run the server: python DNSserver.py 50004 cs5700cdn.example.com\n# test: dig +short +time=2 +tries=1 -p 50004 @cs5700cdnproject.ccs.neu.edu cs5700cdn.example.com\n","repo_name":"yinrouni/Roll-Your-Own-CDN","sub_path":"DNSserver.py","file_name":"DNSserver.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13903242378","text":"# EPE Time Series Service \n# by Sage Lichtenwalner\n# Database Schema\n#\n# Revised 9/10/2014\n\ndb_tables = {}\n\ndb_tables['networks'] = (\n \"\"\"CREATE TABLE IF NOT EXISTS `networks` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(10) NOT NULL,\n `long_name` text NOT NULL,\n `description` text NOT NULL,\n `url` tinytext NOT NULL,\n `created` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `modified` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n PRIMARY KEY (`id`),\n UNIQUE KEY `name` (`name`)\n ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1\"\"\")\n\ndb_tables['parameters'] = (\n \"\"\"CREATE TABLE IF NOT EXISTS `parameters` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(100) NOT NULL,\n `category` varchar(100) NOT NULL,\n `description` text NOT NULL,\n `units` varchar(20) NOT NULL,\n `cf_url` tinytext NOT NULL,\n `ioos_url` tinytext NOT NULL,\n `created` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `modified` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n PRIMARY KEY (`id`),\n UNIQUE KEY `name` (`name`)\n ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1\"\"\")\n\ndb_tables['stations'] = (\n \"\"\"CREATE TABLE IF NOT EXISTS `stations` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `network_name` varchar(10) NOT NULL,\n `name` varchar(25) NOT NULL,\n `description` text NOT NULL,\n `location` point NOT NULL,\n `start_time` datetime DEFAULT NULL,\n `end_time` datetime DEFAULT NULL,\n `info_url` varchar(255) NOT NULL,\n `image_url` varchar(255) NOT NULL,\n `created` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `modified` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n PRIMARY KEY (`id`),\n UNIQUE KEY `unique-station` (`network_name`,`name`)\n ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1\"\"\")\n \ndb_tables['sensors'] = (\n \"\"\"CREATE TABLE IF NOT EXISTS `sensors` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `station_id` int(11) NOT NULL,\n `parameter_id` int(11) NOT NULL,\n `depth` decimal(6,2) NOT NULL,\n `erddap_url` varchar(255) NOT NULL,\n `active` tinyint(4) NOT NULL,\n `created` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n `modified` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',\n PRIMARY KEY (`id`),\n UNIQUE KEY `unique-sensor` (`station_id`,`parameter_id`,`depth`)\n ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1\"\"\")\n\ndb_tables['data'] = (\n \"\"\"CREATE TABLE IF NOT EXISTS `data` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `sensor_id` int(11) NOT NULL,\n `date_time` datetime NOT NULL,\n `value` double NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `sensor-date` (`sensor_id`,`date_time`)\n ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=1\"\"\")\n\n","repo_name":"ooiepe/tss-python","sub_path":"tseries/db_schema.py","file_name":"db_schema.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10180927920","text":"#!/usr/bin/env python3\n\nimport time\n\nimport mwapi\n\nsession = mwapi.Session(\n host='https://commons.wikimedia.org',\n user_agent='We Are Beautiful purger (mail@lucaswerkmeister.de)',\n)\n\nfor result in session.post(\n action='purge',\n generator='categorymembers',\n gcmtitle='Category:We Are Beautiful',\n gcmtype='file',\n gcmlimit=30, # rate limit permits 30 purges per 60 seconds\n forcelinkupdate=True,\n continuation=True,\n ):\n pages = result['purge']\n first_title = pages[0]['title']\n last_title = pages[-1]['title']\n print('Purged {}\\nUntil {}'.format(first_title, last_title), flush=True)\n time.sleep(75) # rate limit permits 30 purges per 60 seconds, +15s for some buffer\n\nprint('Done.')\n","repo_name":"lucaswerkmeister/wearebeautiful-commons","sub_path":"purge-all-files.py","file_name":"purge-all-files.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981203887","text":"# Given a non-empty string s and a dictionary wordDict containing a list of non-empty words, add spaces in s to construct a sentence where each word is a valid dictionary word. Return all such possible sentences.\n#\n# Note:\n#\n# The same word in the dictionary may be reused multiple times in the segmentation.\n# You may assume the dictionary does not contain duplicate words.\n# Example 1:\n#\n# Input:\n# s = \"catsanddog\"\n# wordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\n# Output:\n# [\n# \"cats and dog\",\n# \"cat sand dog\"\n# ]\n# Example 2:\n#\n# Input:\n# s = \"pineapplepenapple\"\n# wordDict = [\"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"]\n# Output:\n# [\n# \"pine apple pen apple\",\n# \"pineapple pen apple\",\n# \"pine applepen apple\"\n# ]\n# Explanation: Note that you are allowed to reuse a dictionary word.\n# Example 3:\n#\n# Input:\n# s = \"catsandog\"\n# wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\n# Output:\n# []\n\nclass Solution(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: List[str]\n \"\"\"\n # dp stack over pre result, oom\n # if not s or not wordDict:\n # return []\n # dp = [[]]\n # dp[0].append('')\n # for i in range(1, len(s) + 1):\n # l = []\n # for j in range(i):\n # if dp[j] and s[j:i] in wordDict:\n # for sub in dp[j]:\n # l.append(sub + ('' if sub == '' else ' ') + s[j:i])\n # dp.append(l)\n # return dp[-1]\n\n # save split position, use dfs to get path\n if not s or not wordDict:\n return []\n dp = [[-1]]\n for i in range(1, len(s) + 1):\n l = []\n for j in range(i):\n if dp[j] and s[j:i] in wordDict:\n l.append(j)\n dp.append(l)\n res = []\n\n def search(end, path, res):\n if end == 0:\n res.append(' '.join(path[::-1]))\n return\n for begin in dp[end]:\n search(begin, path + [s[begin:end]], res)\n\n search(len(s), [], res)\n return res\n\n\ns = Solution()\ns.wordBreak(\"catsanddog\",\n [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])\n","repo_name":"yshshadow/Leetcode","sub_path":"101-150/140.py","file_name":"140.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19425414326","text":"import requests\nfrom datetime import datetime as dt\n\nurl = 'https://api.wikitree.com/api.php'\n\nparams = dict(\n action='getAncestors',\n key='Bäckstrand-5',\n depth=\"1\",\n appId='jb_test_1'\n)\n\nnow = dt.now()\nresp = requests.get(url=url, params=params)\ndata = resp.json() # Check the JSON Response Content documentation below\nprint(f\"Took {dt.now()-now}\")\n\nprint(data)\n\nnum = 0\n\nfor p in data[0][\"ancestors\"]:\n print(p[\"Name\"], p[\"Id\"], p[\"Father\"], p[\"Mother\"])\n num += 1\n\n\nprint(data[0][\"ancestors\"][0])\nprint(f\"Number {num}\")\n","repo_name":"sandos/wikitree","sub_path":"api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18480976845","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def minDiffInBST(self, root: TreeNode) -> int:\n if not root:\n return None\n \n self.path = []\n self.makeAList(root)\n \n min_dst = float('inf')\n for i in range(len(self.path)-1):\n if self.path[i+1] - self.path[i] <= min_dst:\n min_dst = self.path[i+1] - self.path[i]\n \n return min_dst\n \n def makeAList(self, root: TreeNode) -> [int]:\n if not root:\n return None\n self.makeAList(root.left)\n self.path.append(root.val)\n self.makeAList(root.right)\n\n","repo_name":"thydrdy/competitive_programming","sub_path":"leetcode/minimum distance between bst nodes.py","file_name":"minimum distance between bst nodes.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40823939172","text":"import unittest\nfrom tests.unit_test_helper.console_test_helper import *\n\n\nclass TestOutput(unittest.TestCase):\n\n def test(self):\n temp_globals, temp_locals, content, output = execfile(\"lab16/ch016_t17_list_slicing.py\")\n self.assertEqual(\"!XeXgXaXsXsXeXmX XtXeXrXcXeXsX XeXhXtX XmXaX XI\", temp_locals['garbled'])\n self.assertEqual(\"I am the secret message!\", temp_locals['message'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"wongcyrus/ite3101_introduction_to_programming","sub_path":"tests/lab16/test_ch016_t17_list_slicing.py","file_name":"test_ch016_t17_list_slicing.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"29498490418","text":"from flask import Blueprint, request, jsonify\r\n\r\nfrom mysql import Apply\r\nfrom app import db\r\nfrom common import turn_to_int\r\nimport datetime\r\n\r\n\r\nstu_apply = Blueprint('stu_apply', __name__)\r\n@stu_apply.route('', methods = ['GET','POST'])\r\ndef Stu_Apply():\r\n global result_data\r\n if request.method == 'POST':\r\n json_data = request.get_json(silent=True)\r\n\r\n if json_data is None:\r\n return jsonify(code=-101, data=None)\r\n\r\n if json_data.get('activity_name') :\r\n return jsonify(code=-101, data={'tip': '缺少活动名称参数'})\r\n if json_data.get('activity_date') :\r\n return jsonify(code=-101, data={'tip': '缺少教室开始使用日期参数'})\r\n if json_data.get('activity_time') :\r\n return jsonify(code=-101, data={'tip': '缺少活动时间参数'})\r\n if json_data.get('end_time') :\r\n return jsonify(code=-101, data={'tip': '缺少教室结束使用时间参数'})\r\n if json_data.get('org') :\r\n return jsonify(code=-101, data={'tip': '缺少负责审核的组织参数'})\r\n if json_data.get('applicant_id') :\r\n return jsonify(code=-101, data={'tip': '缺少申请人学号参数'})\r\n if json_data.get('applicant_name') :\r\n return jsonify(code=-101, data={'tip': '缺少申请人姓名参数'})\r\n if json_data.get('applicant_phone') :\r\n return jsonify(code=-101, data={'tip': '缺少申请人联系方式参数'})\r\n if json_data.get('people_num') :\r\n return jsonify(code=-101, data={'tip': '缺少参加人数参数'})\r\n if json_data.get('leader_name') :\r\n return jsonify(code=-101, data={'tip': '缺少负责教师姓名参数'})\r\n\r\n\r\n room = Apply()\r\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n apply_id = str(len(Apply.query.all())+1)\r\n room.apply_id = apply_id\r\n room.activity_name = json_data.get('activity_name')\r\n room.applicant_id = json_data.get('applicant_id')\r\n room.applicant_name = json_data.get('applicant_name')\r\n room.applicant_phone = json_data.get('applicant_phone')\r\n room.apply_time = time\r\n room.use_date = json_data.get('activity_date')\r\n room.begin_time = turn_to_int(json_data.get('activity_time'))\r\n room.end_time = turn_to_int(json_data.get('end_time'))\r\n room.people_count = json_data.get('people_num')\r\n room.requests = json_data.get('requests')\r\n room.org = json_data.get('org')\r\n room.teacher_name = json_data.get('leader_name')\r\n room.material = json_data.get('material')\r\n room.check_status = '待��核'\r\n\r\n Apply(room)\r\n try:\r\n db.session.commit()\r\n except:\r\n db.session.rollback()\r\n return jsonify(code=101, data={'tip': '数据库异常'})\r\n\r\n return jsonify(code=0,data={'tip': '正常'})\r\n\r\n elif request.method == 'GET':\r\n # 获取学生学号\r\n stu_id = '181320429'\r\n records = Apply.query.filter(Apply.applicant_id == stu_id).all()\r\n result_data = list()\r\n\r\n status_map = {\r\n '待审核' : 0,\r\n '审核通过' : 1,\r\n '审核失败' : 2,\r\n }\r\n\r\n for record in records:\r\n result_data.append(dict(\r\n apply_id=record.apply_id,\r\n room=record.room_name,\r\n date=record.use_date,\r\n begin_time=record.begin_time,\r\n end_time=record.end_time,\r\n check_status=status_map.get(record.check_status)\r\n ))\r\n\r\n return jsonify(code=0,data=result_data)\r\n","repo_name":"lvyyevd/hitapply","sub_path":"app/stu_apply.py","file_name":"stu_apply.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22417506708","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nimport cPickle as pkl\n\nfrom keras.preprocessing import sequence, text\nfrom keras.optimizers import SGD, RMSprop, Adagrad\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.recurrent import LSTM, GRU\nfrom text_processing import load_imdb_data\n\n'''\n Modified version of keras LSTM example: trains an LSTM network on \n the imdb sentiment analysis data set. In addition to predicting on\n test data, also stores the model's weights and intermediate\n activation values for training and test data.\n\n GPU command:\n THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_lstm.py\n'''\n\nmax_features=20000\nmaxlen = 100 # cut texts after this number of words (among top max_features most common words)\nbatch_size = 16\n\n# had some luck with seed 111\nprint(\"Loading data...\")\n(X_train, y_train), (X_test, y_test), w = load_imdb_data(\n binary=True, max_features=max_features, maxlen=maxlen, seed=37)\n\nprint(len(X_train), 'train sequences')\nprint(len(X_test), 'test sequences')\n\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\n\nprint('Build model...')\nmodel = Sequential()\nmodel.add(Embedding(max_features, 256))\nmodel.add(LSTM(256, 128)) # try using a GRU instead, for fun\nmodel.add(Dropout(0.5))\nmodel.add(Dense(128, 1))\nmodel.add(Activation('sigmoid'))\n\n# try using different optimizers and different optimizer configs\nmodel.compile(loss='binary_crossentropy', optimizer='adam', class_mode=\"binary\")\n\nprint(\"Train...\")\nmodel.fit(X_train, y_train, batch_size=batch_size, nb_epoch=5, validation_split=0.1, show_accuracy=True)\nscore = model.evaluate(X_test, y_test, batch_size=batch_size)\nprint('Test score:', score)\n\nclasses = model.predict_classes(X_test, batch_size=batch_size)\nacc = np_utils.accuracy(classes, y_test)\n\nprint('Test accuracy:', acc)\n\nstore_weights = {}\nfor layer in model.layers :\n store_weights[layer] = layer.get_weights() \n\n# create a new model of same structure minus last layers, to explore intermediate outputs\nprint('Build truncated model')\nchopped_model = Sequential()\nchopped_model.add(Embedding(max_features, 256, weights=model.layers[0].get_weights()))\nchopped_model.add(LSTM(256, 128, weights=model.layers[1].get_weights()))\nchopped_model.compile(loss='binary_crossentropy', optimizer='adam', class_mode=\"binary\")\n\n# pickle intermediate outputs, model weights\ntrain_activations = chopped_model.predict(X_train, batch_size=batch_size)\ntest_activations = chopped_model.predict(X_test, batch_size=batch_size)\noutputs = dict(final=classes, acc=acc, weights=store_weights, y_train=y_train, y_test=y_test,\n train_activations=train_activations, test_activations=test_activations) \n\npkl.dump(outputs, open('results/predicted_activations.pkl', 'wb'), \n protocol=pkl.HIGHEST_PROTOCOL)\n","repo_name":"eelanagaraj/imdb_experiments","sub_path":"LSTM_keras.py","file_name":"LSTM_keras.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"16089919166","text":"import torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\n\r\nfrom torch.nn.utils.rnn import pack_padded_sequence\r\nfrom torch.autograd import Variable\r\n\r\nclass EncoderCNN(nn.Module):\r\n def __init__(self,args):\r\n super(EncoderCNN,self).__init__()\r\n resnet=models.resnet152(pretrained=True)\r\n modules=list(resnet.children())[:-1]\r\n self.resnet=nn.Sequential(*modules)\r\n self.linear=nn.Linear(resnet.fc.in_features,args['word_dime'])\r\n self.bn=nn.BatchNorm1d(args['word_dime'],momentum=0.01)\r\n\r\n def forward(self,images):\r\n with torch.no_grad():\r\n features=self.resnet(images)\r\n features=features.reshape(features.size(0),-1)\r\n features=self.bn(self.linear(features))\r\n return features\r\n\r\nclass DecoderRNN(nn.Module):\r\n def __init__(self,args):\r\n super(DecoderRNN,self).__init__()\r\n self.word_dime=args['word_dime']\r\n self.hidd_dime=args['hidd_dime']\r\n self.hidd_layer=args['hidd_layer']\r\n self.vocab_size=args['vocab_size']\r\n\r\n self.drop=nn.Dropout(p=0.2)\r\n self.embed=nn.Embedding(self.vocab_size,self.word_dime)\r\n self.rnn=nn.LSTM(self.word_dime,self.hidd_dime,self.hidd_layer,batch_first=True)\r\n self.linear=nn.Linear(self.hidd_dime,self.vocab_size)\r\n self.max_seq_leng=args['max_seq_leng']\r\n\r\n def forward(self, features,caption,leng):\r\n embed_captn=self.drop(self.embed(caption))\r\n embed_captn=torch.cat((features.unsqueeze(1),embed_captn),1)\r\n pack_captn=torch.nn.utils.rnn.pack_padded_sequence(embed_captn,leng,batch_first=True)\r\n hidd_state,_=self.rnn(pack_captn)\r\n outputs=self.linear(hidd_state[0])\r\n return outputs\r\n\r\n def sample(self,features,hidd_state=None):\r\n sample_ids=[]\r\n words=features.unsqueeze(1)\r\n\r\n for idx in range(self.max_seq_leng):\r\n outs, hidd_state=self.rnn(words,hidd_state)\r\n outs=self.linear(outs.squeeze(1))\r\n _,pred_words=outs.max(1)\r\n sample_ids.append(pred_words)\r\n words=self.embed(pred_words)\r\n words=words.unsqueeze(1)\r\n sample_ids=torch.stack(sample_ids,1)\r\n\r\n return sample_ids\r\n\r\n def decode_with_beam_search(self,features,hidden_vector=None):\r\n max_len=25\r\n beam_size=3\r\n dead_num=0\r\n eos_idx=2\r\n features=features.unsqueeze(1)\r\n\r\n generated_sents=[]\r\n \r\n inp=features.repeat(beam_size,1,1) \r\n \r\n if torch.cuda.is_available(): \r\n inp=inp.cuda()\r\n \r\n \r\n for step in range(max_len):\r\n \r\n out_step,hidden_vector=self.rnn(inp,hidden_vector)\r\n hidd_fact=hidden_vector[0].size(0)\r\n out_step=nn.functional.log_softmax(self.linear(out_step.squeeze(1)))\r\n word_prob=out_step.topk(beam_size)[0]\r\n word_idx=out_step.topk(beam_size)[1]\r\n \r\n tmp_words=[]\r\n \r\n if step==0:\r\n for idx in range(beam_size):\r\n word=[]\r\n word.append([word_idx.data[0][idx].tolist()])\r\n word.append([word_prob.data[0][idx].tolist()])\r\n word.append([hidden_vector[0].transpose(0,1).data[idx],\r\n hidden_vector[1].transpose(0,1).data[idx]])\r\n \r\n tmp_words.append(word)\r\n \r\n \r\n else:\r\n for idx in range(beam_size-dead_num):\r\n for sub_idx in range(beam_size):\r\n word=[]\r\n word.append(candidate_words[idx][0]+[word_idx.data[idx][sub_idx].tolist()])\r\n word.append([candidate_words[idx][1][0]+\r\n word_prob.data[idx][sub_idx].tolist()])\r\n word.append([hidden_vector[0].transpose(0,1).data[idx],\r\n hidden_vector[1].transpose(0,1).data[idx]])\r\n \r\n tmp_words.append(word)\r\n\r\n \r\n #print(step)\r\n\r\n tmp_words=sorted(tmp_words,key=lambda x: x[1][0]/len(x[0]), reverse=True)\r\n\r\n tmp_words=tmp_words[0:beam_size-dead_num]\r\n\r\n #if step==5:\r\n #print(tmp_words)\r\n\r\n candidate_words=[]\r\n\r\n #print(tmp_words)\r\n\r\n for idx in range(len(tmp_words)):\r\n if tmp_words[idx][0][-1]==eos_idx:\r\n generated_sents.append((tmp_words[idx][0],tmp_words[idx][1][0]))\r\n dead_num+=1\r\n #print('generted words')\r\n \r\n else:\r\n candidate_words.append(tmp_words[idx])\r\n \r\n #print(candidate_words)\r\n \r\n if beam_size-dead_num==0: \r\n return generated_sents\r\n\r\n\r\n hidden_vector=(Variable(torch.zeros(beam_size-dead_num,hidd_fact,hidden_vector[0].size(2))),\r\n Variable(torch.zeros(beam_size-dead_num,hidd_fact,hidden_vector[0].size(2))))\r\n inp=Variable(torch.LongTensor([0]*(beam_size-dead_num)))\r\n\r\n\r\n for idx in range(beam_size-dead_num):\r\n hidden_vector[0].data[idx]=candidate_words[idx][2][0]\r\n hidden_vector[1].data[idx]=candidate_words[idx][2][1]\r\n inp.data[idx]=candidate_words[idx][0][-1]\r\n\r\n hidden_vector=(hidden_vector[0].transpose(0,1).contiguous(),\r\n hidden_vector[1].transpose(0,1).contiguous())\r\n\r\n if torch.cuda.is_available():\r\n hidden_vector=(hidden_vector[0].cuda(),hidden_vector[1].cuda())\r\n inp=inp.cuda()\r\n\r\n inp=inp.contiguous().view(beam_size-dead_num,1)\r\n inp=self.embed(inp)\r\n\r\n\r\n #if step==1:\r\n #print(candidate_words)\r\n #print(inp)\r\n\r\n if step==max_len-1:\r\n for idx in range(len(candidate_words)):\r\n generated_sents.append((candidate_words[idx][0],candidate_words[idx][1][0]))\r\n \r\n# print(generated_sents)\r\n\r\n return generated_sents\r\n'''\r\nencoder=EncoderCNN({'embed_size':128})\r\ndecoder=DecoderRNN({'word_dime':128,'hidd_dime':256,'hidd_layer':1,'vocab_size':50000,'max_seq_leng':20})\r\nencoder.cuda()\r\ndecoder.cuda()\r\nprint(encoder)\r\nprint(decoder)\r\n'''\r\n","repo_name":"ZVengin/Image-Captioning","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21578936735","text":"# coding=utf-8\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDEBUG = True,\nROOT_URLCONF = 'config.urls'\nSECRET_KEY = ''\nWSGI_APPLICATION = 'config.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n # third party apps\n 'rest_framework',\n # local apps\n 'api',\n)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'HOST': 'db',\n 'PORT': 5432\n },\n}\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n","repo_name":"robohead/minimal-django-rest-api-boilerplate","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"12006179056","text":"# adventOfCode 2021 day 11\n# https://adventofcode.com/2021/day/11\n\n\nimport enum\n\n# this enum defines the states of an octopus during a step:\nclass FlashStatus(enum.Enum):\n # it hasn't flashed yet in the current step\n NOT_FLASHED = 0\n\n # it has just flashed\n JUST_FLASHED = 1\n\n # it flashed earlier (the adjacent octpuses already had energy increased)\n PREVIOUSLY_FLASHED = 2\n\n\n# this class defines the status of a particular octopus\nclass OctopusStatus:\n def __init__(self, energy):\n self.energy = energy\n self.flashed = FlashStatus.NOT_FLASHED\n \n # total number of times that this octopus has flashed\n self.flash_tally = 0\n\n # get the current value of energy\n def get_energy(self):\n return self.energy\n \n # this determines if the octopus has been flashed\n def get_flashed(self):\n return self.flashed\n\n # get total number of times that this octopus has flashed\n def get_flash_tally(self):\n return self.flash_tally\n\n # increase energy, update associated flash statuses\n def increment_energy(self):\n self.energy += 1\n if self.energy == 10:\n self.flashed = FlashStatus.JUST_FLASHED\n self.flash_tally += 1\n\n # update flash status to PREVIOUSLY_FLASHED\n def post_flashed(self):\n self.flashed = FlashStatus.PREVIOUSLY_FLASHED\n\n # (at the end of a step) reset octopus status\n def reset_energy(self):\n self.energy = 0\n self.flashed = FlashStatus.NOT_FLASHED\n\n\n# this class holds the data structure for all octopuses,\n# and implements associated logic\nclass Octopus:\n def __init__(self):\n self.oct_status = []\n\n # returns True/False, if all octopuses are flashing right now\n # \n # this function should be called before reset_to_zero is run\n # (since it is designed to capture the state before that is run)\n def are_all_octopuses_flashing(self):\n # traverse all octopuses ... return False immediately if an unflashed octopus is found\n for i in range(len(self.oct_status)):\n for j in range(len(self.oct_status[i])):\n if self.oct_status[i][j].get_flashed() == FlashStatus.NOT_FLASHED:\n return False\n\n # if the end gets reached, then the answer is True (all octopuses are flashing)\n return True\n\n # calculates total number of times that all octupuses have flashed (in the past as well as now)\n def get_flash_tally(self):\n ret_val = 0\n for i in range(len(self.oct_status)):\n for j in range(len(self.oct_status[i])):\n ret_val += self.oct_status[i][j].get_flash_tally()\n return ret_val\n\n # this displays a grid display of octopus energies\n def display(self):\n for line in self.oct_status:\n str_list = line\n for oct in str_list:\n print(oct.get_energy(), end='')\n print()\n\n # this is used when importing energy numbers into a row of octopuses\n # line is a string of single digit integers, representing energy\n def add_line(self, line):\n oct_line = []\n for digit in line:\n oct_line.append(OctopusStatus(int(digit)))\n self.oct_status.append(oct_line)\n\n # this function increases the energy of a\n # single octopus at coordinates (i,j)\n # returns zero if the increment didn't cause a flash\n # returns one if the increment caused a flash\n def increment_one_oct(self, i, j):\n # protect against (i,j) outside of the grid\n if True in [i<0, j<0, i>=len(self.oct_status), j>=len(self.oct_status[0])]:\n return 0\n\n # increment the octopus' energy\n self.oct_status[i][j].increment_energy()\n \n # detect if flash has just been triggered by this call\n if self.oct_status[i][j].get_flashed() == FlashStatus.JUST_FLASHED:\n return 1\n else:\n return 0\n\n # this function resets the energy to zero of any \n # octopuses whose energy was greater than nine\n # (this is designed to be used at the end of a step)\n def reset_to_zero(self):\n for i in range(len(self.oct_status)):\n for j in range(len(self.oct_status[i])):\n if self.oct_status[i][j].get_energy() > 9:\n self.oct_status[i][j].reset_energy()\n\n # this function increases the score of all\n # octopuses by one\n def increment_all_octs(self):\n for i in range(len(self.oct_status)):\n for j in range(len(self.oct_status[i])):\n self.increment_one_oct(i,j)\n\n # this function determines if flashes are necessary and then does them\n # if subsequent flashes become necessary after the first round, it triggers them too\n def flashes(self):\n while True:\n flash_count = 0\n # look for any octopuses that just flashed\n # if found, increment the adjacent octopuses\n for i in range(len(self.oct_status)):\n for j in range(len(self.oct_status[i])):\n if self.oct_status[i][j].get_flashed() == FlashStatus.JUST_FLASHED:\n self.oct_status[i][j].post_flashed()\n flash_count += self.increment_one_oct(i-1,j-1)\n flash_count += self.increment_one_oct(i-1,j)\n flash_count += self.increment_one_oct(i-1,j+1)\n flash_count += self.increment_one_oct(i,j-1)\n flash_count += self.increment_one_oct(i,j+1)\n flash_count += self.increment_one_oct(i+1,j-1)\n flash_count += self.increment_one_oct(i+1,j)\n flash_count += self.increment_one_oct(i+1,j+1)\n # stop repeating if no new flashes have happened\n if flash_count == 0:\n break\n\n # this function performs a single step (for parts a and b)\n # the return value (for part b only) is whether all octpuses are lit at this time\n # all other information passed to the calling program are embedded in the \n # octopus' oct_status data structure\n def perform_step(self):\n ret_val = None\n self.increment_all_octs()\n self.flashes()\n ret_val = self.are_all_octopuses_flashing()\n self.reset_to_zero()\n return ret_val\n\n\n# reading energy input from the input file\ninput_filename='input.txt'\noctopus = Octopus()\nwith open(input_filename) as f:\n # pull in each line from the input file\n for in_string in f:\n octopus.add_line(in_string.rstrip())\n\n# if desired, the initial state could be displayed\n# print()\n# print('Initial state:')\n# octopus.display()\n\nprint()\n\n# range parameter needs to be set high enough to deliver answers to a and b\nfor i in range(327):\n\n # this calls the code to perform the step\n # and it checks if all octopuses are lit up\n if octopus.perform_step():\n print('Step# ' + str(i+1) + ' has all octopuses illuminated')\n\n # add numbers to list for steps you wish to display\n # the step number and the whole grid of energies will be shown\n # you need to type in one less than the step number(s) to display in the list\n if i in [99, 323]: \n print('After step # ' + str(i+1) + ', the number of flashes is ', end='')\n print(octopus.get_flash_tally()) \n print()\n\n octopus.display()\n print()\n print()\n\n","repo_name":"LewisStaples/advent_of_code_2021","sub_path":"day11/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":7446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4703931894","text":"\"\"\"tasks URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom .views import TaskListView, TaskCreateView, TaskUpdateView, TaskDeleteView, signup\n\nurlpatterns = [\n url(r'^list/$', TaskListView.as_view(),name = \"task_list\"),\n url(r'^add/$', TaskCreateView.as_view(), name = \"task_add\"),\n url(r'^edit/(?P\\d+)/$', TaskUpdateView.as_view(), name='task_edit',),\n url(r'^delete/(?P\\d+)/$', TaskDeleteView.as_view(), name='task_delete',),\n url(r'^signup/$', signup, name='signup')\n]\n","repo_name":"jeremyklein/taskmaster","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27924221717","text":"##########################\n# Name : Minh Pham\n# Email: minh.pham@columbia.edu\n# File contains main flask framework for Sloan web app\n##########################\n\nfrom flask import Flask, render_template, request, redirect\nimport db\nimport second\n\n\napp = Flask(__name__)\n\n\n@app.route('/',methods=['GET','POST'])\ndef index():\n\t'''function to display the index page with checkboxes'''\n\tif request.method == 'GET':\t\n\t\tlabs = db.get_labels()\n\t\treturn render_template('checkbox_page.html',\\\n\t\t\tlabs_jobs = labs['job'],\\\n\t\t\tlabs_vol = labs['vol'], \\\n\t\t\tlabs_lei = labs['lei'],\\\n\t\t\tlabs_demo = labs['demo'],\\\n\t\t\tlabs_hlth = labs['hlth'],\\\n\t\t\tlabs_psysoc = labs['psysoc'],\\\n\t\t\tlabs_cogn = labs['cogn'])\n\t\t\n\telse:\n\t\tglobal x\n\t\tx = store_input()\n\t\treturn redirect('/table_results')\n\n\n@app.route('/table_results', methods = ['GET', 'POST'])\ndef table_results():\n\t'''function to return results bokeh web app'''\n\ttry: \n\t\tpage = second.build_app(db.obtain_results(x))\n\t\treturn render_template('app.html', snippet=page)\n\texcept:\n\t\treturn \"Sorry, somehow you have managed to break my script.\\\n\t\t If you have selected Interview Information for Leisure and Volunteering, \\\n\t\tthey cannot be displayed. Try something else!\"\n\n\ndef store_input():\n\t'''storing user input on index page\n\tfunction returns nothing'''\n\tselected = []\n\tlabs = ['job_gray','lei_gray','vol_gray','demo_gray',\\\n\t'hlth_gray','psysoc_gray','cogn_gray','search_lulu']\n\tfor lab in labs:\n\t\tif lab != 'search_lulu':\n\t\t\tselected.append(request.form.getlist(lab))\n\t\telse:\n\t\t\tselected.append(request.form[lab])\n\treturn selected\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True) ","repo_name":"ptmminh/SloanApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70089440194","text":"#Thomas Thorpe\r\n#Pet Service Are You Sure Pop-Up\r\n\r\nfrom PyQt4.QtGui import *\r\n\r\nclass ConfirmationWindow(QDialog):\r\n def __init__(self, came_from, message):\r\n super(QDialog,self).__init__(came_from)\r\n self.setWindowTitle(\"Are You Sure?\")\r\n self.CreateConfirmationWindow(message)\r\n self.setLayout(self.confirmation_layout)\r\n\r\n def CreateConfirmationWindow(self, message):\r\n #create widgets\r\n self.message = QLabel(\"{0}\".format(message))\r\n self.yes_button = QPushButton(\"Yes\")\r\n self.no_button = QPushButton(\"No\")\r\n\r\n #create layout\r\n self.bottom_bar = QHBoxLayout()\r\n self.bottom_bar.addWidget(self.yes_button)\r\n self.bottom_bar.addWidget(self.no_button)\r\n\r\n self.confirmation_layout = QVBoxLayout()\r\n self.confirmation_layout.addWidget(self.message)\r\n self.confirmation_layout.addLayout(self.bottom_bar)\r\n\r\n #connections\r\n self.yes_button.clicked.connect(self.ReturnTrue)\r\n self.no_button.clicked.connect(self.ReturnFalse)\r\n\r\n def ReturnTrue(self):\r\n self.accept()\r\n\r\n def ReturnFalse(self):\r\n self.reject()\r\n","repo_name":"ThomasThorpe/PetMindingBusiness","sub_path":"Program/AreYouSureWindow.py","file_name":"AreYouSureWindow.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9104849909","text":"# input (real data, policy)\nimport torch\nimport numpy as np\n\n\nclass DataSet:\n def __init__(self, X):\n self.X = X\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n return self.X[index]\n\n\n\ndef logireg_loss(de_data, nu_data): # weight decay around 0.1\n # min [- (1/N_de)*sum_{j} log (1/(1+r(x_de^j))) - (1/N_nu)*sum_{i} log (r(x_nu^i)/(1+r(x_nu^i))) ]\n return -torch.sum(-torch.log(1.+de_data))/de_data.shape[0] -torch.sum(-torch.log(1.+nu_data) + torch.log(nu_data))/nu_data.shape[0]\ndef ulsif_loss(de_data, nu_data):\n # min [(0.5/N_de)*sum_{j} r(x_de^j)*r(x_de^j) - (1/N_nu)*sum_{i} r(x_nu^i) ]\n return 0.5*torch.sum((de_data)**2)/de_data.shape[0] - torch.sum(nu_data)/nu_data.shape[0]\n\n\ndef rulsif_loss(de_data, nu_data, alpha=0.1):\n # min [(0.5/N_de)*(1-alpha)*sum_{j} r(x_de^j)*r(x_de^j) + (0.5/N_nu)*alpha*sum_{i} r(x_nu^i)*r(x_nu^i) - (1/N_nu)*sum_{i} r(x_nu^i) ]\n return 0.5*(1.-alpha)*torch.sum(de_data**2)/de_data.shape[0] + 0.5*alpha*torch.sum(nu_data**2)/nu_data.shape[0] - torch.sum(nu_data)/nu_data.shape[0]\n\n\n#def kliep_loss(de_data, nu_data):\n# # min [- (1/N_nu)*sum_{i} log r(x_nu^i) + (1/N_de)*sum_{j}r(x_de^j)]\n# return - torch.sum(torch.log(nu_data))/nu_data.shape[0] + torch.sum(de_data)/de_data.shape[0]\n\n\ndef normalized_logireg_loss(de_data, nu_data):\n # r(x) = c*q(x) is density ratio\n # s.t. 1=(1/N_de)*c*sum_{j}q(x_de^j) <==> s.t. [c=N_de/(sum_{j}q(x_de^j)\n # min [- (1/N_de)*sum_{j} log (1/(1+c*q(x_de^j))) - (1/N_nu)*sum_{i} log (c*q(x_nu^i)/(1+c*q(x_nu^i))) ] s.t. [1=(1/N_de)*c*sum_{j}q(x_de^j)]\n c = de_data.shape[0]/torch.sum(de_data)\n return -torch.sum(-torch.log(1.+c*de_data))/de_data.shape[0] -torch.sum(-torch.log(1.+c*nu_data) + torch.log(c*nu_data))/nu_data.shape[0]\n\n#def normalized_lsif_loss(de_data, nu_data):\n# # min [(0.5/N_de)*sum_{j} c*c*q(x_de^j)*q(x_de^j) - (1/N_nu)*sum_{i} c*q(x_nu^i) ] s.t. [1=(1/N_de)*c*sum_{j}q(x_de^j)]\n# # min c*[0.5*sum_{j}q(x_de^j)*q(x_de^j)/(sum_{j'}q(x_de^{j'}) - (1/N_nu)*sum_{i} q(x_nu^i) ] s.t. [1=(1/N_de)*c*sum_{j}q(x_de^j)]\n# c = de_data.shape[0]/torch.sum(de_data)\n# return 0.5*torch.sum((c*de_data)**2)/de_data.shape[0] - torch.sum(c*nu_data)/nu_data.shape[0]\n#def normalized_kliep_loss(de_data, nu_data):\n# # min [- (1/N_nu)*sum_{i} log c*q(x_nu^i) + (1/N_de)*sum_{j}r(x_de^j)] s.t. [1=(1/N_de)*c*sum_{j}q(x_de^j)]\n# # <==> min [- (1/N_nu)*sum_{i} log q(x_nu^i) - log c ]\n# # <==> min [- (1/N_nu)*sum_{i} log q(x_nu^i) + log(sum_{j}q(x_de^j)) ]\n# return - torch.sum(torch.log(nu_data))/nu_data.shape[0] + torch.log(torch.sum(de_data))\n\n\n\nfrom model import RatioModel\n\nclass RatioEstimation():\n def __init__(self, real_data, H=16):\n\n self.real_data = torch.from_numpy(real_data.astype(np.float32)).clone()\n self.input_dim = real_data.shape[1]\n self.real_data_num = real_data.shape[0]\n self.H = H\n\n \n self.model = RatioModel(self.input_dim, self.H)\n\n #self.loss_fn = rulsif_loss\n #self.loss_fn = normalized_logireg_loss\n self.loss_fn = logireg_loss\n #self.loss_fn = ulsif_loss\n\n print(\"[dens] input_dim =\",self.input_dim)\n print(\"[dens] real_data_num =\",self.real_data_num)\n\n\n def load_sim_data(self, sim_data):\n self.sim_data = torch.from_numpy(sim_data.astype(np.float32)).clone()\n self.sim_data_num = sim_data.shape[0]\n print(\"[dens] sim_data_num =\",self.sim_data_num)\n\n def train_model(self, num_iter=1000, lr=1e-3, weight_decay=.01, holdout_ratio=0.0):\n #self.model = RatioModel(self.input_dim, self.H)\n real_index = np.arange(self.real_data.shape[0])\n np.random.shuffle(real_index)\n sim_index = np.arange(self.sim_data.shape[0])\n np.random.shuffle(sim_index)\n\n real_train_data = self.real_data[real_index[int(holdout_ratio*self.real_data.shape[0]):]]\n sim_train_data = self.sim_data[sim_index[int(holdout_ratio*self.real_data.shape[0]):]]\n\n train_real_dataset = DataSet(real_train_data)\n temp_real_loader = torch.utils.data.DataLoader(train_real_dataset, batch_size=32, shuffle=True, drop_last=True)\n\n if holdout_ratio>0.001:\n real_test_data = self.real_data[real_index[:int(holdout_ratio*self.real_data.shape[0])]]\n sim_test_data = self.sim_data[sim_index[:int(holdout_ratio*self.real_data.shape[0])]]\n\n train_sim_dataset = DataSet(sim_train_data)\n temp_sim_loader = torch.utils.data.DataLoader(train_sim_dataset, batch_size=32, shuffle=True, drop_last=True)\n\n\n optimizer = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n\n best_loss = 1.e12\n update_num = 0\n print(\"weight_decay\",weight_decay)\n for epoch in range(num_iter):\n\n train_loss =0.\n for nu__data in temp_sim_loader:\n optimizer.zero_grad()\n de__data = iter(temp_real_loader).next()\n loss = self.loss_fn(self.model(de__data), self.model(nu__data))\n loss.backward()\n optimizer.step()\n train_loss += loss.item() * de__data.shape[0]\n\n if holdout_ratio>0.001:\n with torch.no_grad():\n valid_loss = self.loss_fn(self.model(real_test_data), self.model(sim_test_data)).numpy()\n if best_loss>valid_loss:\n update_num = 0\n best_loss = valid_loss\n else:\n update_num += 1\n print(\"epoch, valid_loss, update_num\", epoch, valid_loss, update_num)\n else:\n if best_loss>train_loss:\n update_num = 0\n best_loss = train_loss\n else:\n update_num += 1\n print(\"epoch, train_loss, update_num\", epoch, train_loss, update_num)\n\n if update_num>20:\n break\n\n\n\n\n def output_weight(self):\n unnormalized_density_ratio = (torch.nn.functional.relu(self.model(self.real_data))).detach().numpy()\n #return unnormalized_density_ratio\n return unnormalized_density_ratio / unnormalized_density_ratio.mean()\n\n def save_model(self, filename='temp_ratio_model.pt'):\n torch.save(self.model, filename)\n\n def load_model(self, filename='temp_ratio_model.pt'):\n self.model = torch.load(filename)\n\n\n\nif __name__ == '__main__':\n\n obac_data = np.loadtxt('np_obac.csv',delimiter=',')\n sim_data_example = np.loadtxt(\"np_obac_simulation.csv\",delimiter=',' )\n ratio_model = RatioEstimation(obac_data)\n ratio_model.load_sim_data(sim_data_example)\n\n ratio_model.train_model()\n\n np.savetxt('weight.csv',ratio_model.output_weight(),delimiter=',')\n import matplotlib.pyplot as plt\n plt.plot(ratio_model.output_weight())\n plt.show()\n\n","repo_name":"numahha/wmopo","sub_path":"pendulum_experiments/ratio_estimation.py","file_name":"ratio_estimation.py","file_ext":"py","file_size_in_byte":6896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40220012783","text":"import mne\nimport numpy as np\nimport os\nimport sys\nimport scipy\nfrom scipy import signal\nimport pickle as pkl\n\nSR = 1024\nPRES_TIME = 2.5\nIMAGE_START = SR*3\n\n#get list of bdf objects\ndatadir = sys.argv[1]\nclassfile = sys.argv[2]\n\nprint('Using data directory: %s' % datadir)\nprint('Class file: %s' % classfile)\n\ndata_files = []\nfilenames = []\nnot_read = []\nmarkers = [] #list of lists of event onset markers\n\nprint('Extracting data from BDFs')\nfor f in os.listdir(datadir):\n if 'bdf' in f and not 'mrk' in f:\n keep = True\n try:\n data_files.append(mne.io.read_raw_edf(os.path.join(datadir,f)))\n except UnicodeDecodeError:\n not_read.append(f)\n keep = False\n pass\n if keep:\n filenames.append(f)\n\n#get raw data out of bdfs\ndata_arrs = []\nfor f in data_files:\n data_arrs.append(f.get_data())\n\nprint('Number of data arrays: %d' % len(data_arrs))\n\n#read in markers\nprint('Reading event markers')\nfor f in os.listdir(datadir):\n if 'mrk' in f and not any([f[:-4] in i for i in not_read]):\n mark = []\n fp = open(os.path.join(datadir,f), 'rb')\n lines = fp.readlines()\n for l in range(1,len(lines)):\n parts = lines[l].split()\n if '254' in str(parts[-1]) and '255' in str(lines[l-1].split()[-1]):\n mark.append(int(parts[0].decode(\"utf-8\")))\n markers.append(mark)\n\nprint('Number of event markers: %s' % np.shape(markers))\n\n#get classes, save dict with one-hots for lookup\nprint('Reading in class labels')\nclasses = [[], [], []]\ncf = open(classfile, 'rb')\ncl = cf.readlines()\nfor cc in cl:\n cp = cc.split()\n classes[0].append(cp[0].decode(\"utf-8\"))\n classes[1].append(cp[1].decode(\"utf-8\"))\n classes[2].append(cp[2].decode(\"utf-8\"))\nclassdict = {}\nclassdict['Pos'] = [1,0,0]\nclassdict['Neg'] = [0,1,0]\nclassdict['Calm'] = [0,0,1]\n\n#slice up data arrays into event-based windows\nmaster_X_raw = []\nmaster_Y = []\n#loop over each separate session datafile\nprint('Slicing data by event windows')\nfor s in range(len(data_arrs)):\n print('Session %d' % s)\n session = 0\n if 'SES2' in filenames[s]:\n session = 1\n elif 'SES3' in filenames[s]:\n session = 2\n m = markers[s]\n #loop over block start points (30 blocks)\n for time in range(len(m)):\n print('Block %d' % time)\n start = m[time] + IMAGE_START\n print('Got start time %d' % start)\n for im in range(5):\n print('Image pres %d' % im)\n st = start + int(im*SR*PRES_TIME)\n end = start + int(SR*PRES_TIME*(im+1))\n data_window = data_arrs[s][:,st:end]\n print('Checking data window slice:')\n print(np.shape(data_window))\n if np.shape(data_window)[1] > 0:\n print('Nonzero array - added')\n master_X_raw.append(data_window)\n cl = classes[session][time]\n class_vec = classdict[cl]\n master_Y.append(class_vec)\n\n#perform Welch-method spectral power analysis on event windows\nmaster_X = []\nprint('Performing power spectrum analysis')\nfor ev in master_X_raw:\n #find data center, get rid of any outlier channels\n print('Event-windowed array shape:')\n print(np.shape(ev))\n ev_av = np.average(ev,axis=0)\n m_av = np.mean(ev_av)\n m_std = np.std(ev_av)\n mark = np.zeros((1,len(ev))).astype('bool')\n for l in range(len(ev)):\n if np.average(ev[l]) > m_av + 3*m_std:\n mark[0][l] = True\n cleaned = [ev[i] for i in range(len(ev)) if not mark[0][i]]\n av_cl = np.average(cleaned,axis=0)\n #ww, Pxx = scipy.signal.welch(av_cl, fs=SR)\n #feat = Pxx/np.average(Pxx)\n master_X.append(av_cl)\n\nprint('Number of features extracted: %d' % len(master_X))\nprint('Number of class vectors extracted: %d' % len(master_Y))\npkl.dump(master_X, open('features.pkl','wb'))\npkl.dump(master_Y, open('classes.pkl','wb'))\n","repo_name":"pitchaim/EmotionDecoding","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18019432021","text":"import json\n\n# Load the JSON data\nwith open('./net/network.json', 'r') as f:\n data = json.load(f)\n\n# Create a dictionary to store the max values for each source\nmax_values = {}\n\n# Loop through the links to find the max value for each source\nfor link in data['links']:\n source = link['source']\n value = link['value']\n if source in max_values:\n max_values[source].append(value)\n else:\n max_values[source] = [value]\n\n# Loop through the links again to filter out those with values less than the top 3\nfiltered_links = []\nfor link in data['links']:\n source = link['source']\n value = link['value']\n if len(max_values[source]) > 3 and value < sorted(max_values[source], reverse=True)[2]:\n continue\n filtered_links.append(link)\n\n# Create a new dictionary with the filtered links\nfiltered_data = {\n 'nodes': data['nodes'],\n 'links': filtered_links\n}\n\n# Save the filtered data to a new JSON file\nwith open('./net/filtered_network.json', 'w') as f:\n json.dump(filtered_data, f, indent=4)\n","repo_name":"ChuBL/bcb504-blog","sub_path":"posts/final/net/trim_links.py","file_name":"trim_links.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30692070384","text":"'''\nCreated on 5 May 2012\n\n@author: Carnage\n'''\n\nfrom spider.spider import spider\nfrom spider.plugins import titleTest\n\n#list of plugins to use\nplugins = [titleTest.titleTest()]\n\n#blacklist these absolute urls - any urls in this list will not be visited.\nblacklistUrls = ['http://www.example.com/forum']\n\ntest = spider(plugins, blacklistUrls)\ntest.spider('http://www.example.com')\n\ntest.report()","repo_name":"carnage/pySpider","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"26530696932","text":"# -*- coding:utf-8 -*-\n# @Time : 2020/9/6 16:04 \n# @Author : bendan50\n# @File : Q1005-largest-sum-after-k-negations.py \n# @Function : K 次取反后最大化的数组和\n# 给定一个整数数组 A,我们只能用以下方法修改该数组:我们选择某个索引 i 并将 A[i] 替换为 -A[i],\n# 然后总共重复这个过程 K 次。(我们可以多次选择同一个索引 i。)\n# 以这种方式修改数组后,返回数组可能的最大和。\n# 示例 1:\n# 输入:A = [4,2,3], K = 1\n# 输出:5\n# 解释:选择索引 (1,) ,然后 A 变为 [4,-2,3]。\n# 示例 2:\n# 输入:A = [3,-1,0,2], K = 3\n# 输出:6\n# 解释:选择索引 (1, 2, 2) ,然后 A 变为 [3,1,0,2]。\n# 示例 3:\n# 输入:A = [2,-3,-1,5,-4], K = 2\n# 输出:13\n# 解释:选择索引 (1, 4) ,然后 A 变为 [2,3,-1,5,4]。\n# 提示:\n# 1 <= A.length <= 10000\n# 1 <= K <= 10000\n# -100 <= A[i] <= 100\n# @Software: PyCharm\n\nfrom typing import List\n\nclass Solution:\n def largestSumAfterKNegations(self, A: List[int], K: int) -> int:\n \"\"\"\n 思路:对A先进行排序,然后全部将负数变成正数,最后若K依然剩余且为奇数,则找到最小的值改变符号。\n :param A:\n :param K:\n :return:\n \"\"\"\n A.sort()\n idx = 0\n while A[idx] < 0 and K > 0:\n A[idx] = -A[idx]\n K -= 1\n idx += 1\n if K % 2 == 0: #K=0,则无法调整,直接返回和;若K为非零偶数说明此时A均为正数\n return sum(A)\n else:\n if idx == 0:\n A[idx] = -A[idx]\n return sum(A)\n else:\n min_idx = idx if A[idx] < A[idx - 1] else idx - 1\n A[min_idx] = -A[min_idx]\n return sum(A)","repo_name":"EAGLE50/LearnLeetCode","sub_path":"2020-09/Q1005-largest-sum-after-k-negations.py","file_name":"Q1005-largest-sum-after-k-negations.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2083156487","text":"import tqdm\nimport time\nimport random\nimport torch \nimport sys\nfrom models.joint_constrain_model import *\nfrom data_loader.data_loaders import joint_constrained_loader\nfrom JointConstrain_main import EXP\nimport csv\nfrom utils.tools import format_time, CUDA\n\n\ntorch.manual_seed(42)\nfinetune = True\n# Read parameters\nrst_file_name = \"00001.rst\"\n# Restore model\nmodel_params_dir = \"./model_params/\"\nHiEve_best_PATH = model_params_dir + \"HiEve_best/\" + rst_file_name.replace(\".rst\", \".pt\")\nMATRES_best_PATH = model_params_dir + \"MATRES_best/\" + rst_file_name.replace(\".rst\", \".pt\")\nI2B2_best_PATH = model_params_dir + \"I2B2_best/\" + rst_file_name.replace(\".rst\", \".pt\") \n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndataset = \"Joint\"\nadd_loss = 1\nepochs = 32\nbatch_size = 16\n\n# Use Optuna to select the best hyperparameters\nimport optuna\nfrom timeit import default_timer as timer\ninteraction = 0\ndef objective(trial): \n params ={\n \"downsample\": trial.suggest_float(\"downsample\", 0.01, 0.2),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 1e-6, 1e-2, log=True),\n 'lambda_annoT': trial.suggest_float('lambda_annoT', 0.0, 1.0),\n 'lambda_annoH': trial.suggest_float('lambda_annoH', 0.0, 1.0),\n 'lambda_transT': trial.suggest_float('lambda_transT', 0.0, 1.0),\n 'lambda_transH': trial.suggest_float('lambda_transH', 0.0, 1.0),\n 'lambda_cross': trial.suggest_float('lambda_cross', 0.0, 1.0),\n 'MLP_size': trial.suggest_categorical(\"MLP_size\", [512, 256, 768]),\n 'num_layers': trial.suggest_int(\"num_layers\", 1, 3),\n 'lstm_hidden_size': trial.suggest_categorical(\"lstm_hidden_size\", [512, 256]),\n 'roberta_hidden_size': trial.suggest_categorical(\"roberta_hidden_size\", [768]),\n 'lstm_input_size': 768,\n }\n\n global interaction\n interaction += 1\n start = timer()\n train_dataloader, valid_dataloader_MATRES, test_dataloader_MATRES, valid_dataloader_HIEVE, test_dataloader_HIEVE, valid_dataloader_I2B2, test_dataloader_I2B2, num_classes = joint_constrained_loader(dataset, params['downsample'], batch_size)\n \n model = roberta_mlp(num_classes, dataset, add_loss, params)\n if CUDA:\n model.cuda()\n model.zero_grad()\n print(\"# of parameters:\", count_parameters(model))\n model_name = rst_file_name.replace(\".rst\", \"\") # to be designated after finding the best parameters\n total_steps = len(train_dataloader) * epochs\n print(\"Total steps: [number of batches] x [number of epochs] =\", total_steps)\n\n # Total number of training steps is [number of batches] x [number of epochs]. \n # (Note that this is not the same as the number of training samples).\n if dataset == \"MATRES\":\n total_steps = len(train_dataloader) * epochs\n print(\"Total steps: [number of batches] x [number of epochs] =\", total_steps)\n matres_exp = EXP(model, epochs, params['learning_rate'], \n train_dataloader, valid_dataloader_MATRES, test_dataloader_MATRES,\n None, None,\n None, None, \n finetune, dataset, MATRES_best_PATH, None, None, None, model_name)\n T_F1, H_F1, I_F1 = matres_exp.train()\n matres_exp.evaluate(eval_data = \"MATRES\", test = True)\n if dataset == \"I2B2\":\n total_steps = len(train_dataloader) * epochs\n print(\"Total steps: [number of batches] x [number of epochs] =\", total_steps)\n i2b2_exp = EXP(model, epochs, params['learning_rate'], \n train_dataloader, None, None,\n valid_dataloader_I2B2, test_dataloader_I2B2,\n valid_dataloader_HIEVE, test_dataloader_HIEVE, \n finetune, dataset, None, I2B2_best_PATH, None, None, model_name)\n T_F1, H_F1, I_F1 = i2b2_exp.train()\n i2b2_exp.evaluate(eval_data = \"I2B2\", test = True)\n elif dataset == \"HiEve\":\n total_steps = len(train_dataloader) * epochs\n print(\"Total steps: [number of batches] x [number of epochs] =\", total_steps)\n hieve_exp = EXP(model, epochs, params['learning_rate'], \n train_dataloader, None, None,\n None, None,\n valid_dataloader_HIEVE, test_dataloader_HIEVE, \n finetune, dataset, None, None, HiEve_best_PATH, None, model_name)\n T_F1, H_F1, I_F1 = hieve_exp.train()\n hieve_exp.evaluate(eval_data = \"HiEve\", test = True)\n elif dataset == \"Joint\":\n total_steps = len(train_dataloader) * epochs\n print(\"Total steps: [number of batches] x [number of epochs] =\", total_steps)\n joint_exp = EXP(model, epochs, params['learning_rate'], \n train_dataloader, valid_dataloader_MATRES, test_dataloader_MATRES,\n valid_dataloader_I2B2, test_dataloader_I2B2,\n valid_dataloader_HIEVE, test_dataloader_HIEVE, \n finetune, dataset, MATRES_best_PATH, I2B2_best_PATH, HiEve_best_PATH, None, model_name)\n T_F1, H_F1, I_F1 = joint_exp.train()\n joint_exp.evaluate(eval_data = \"HiEve\", test = True)\n joint_exp.evaluate(eval_data = \"MATRES\", test = True)\n joint_exp.evaluate(eval_data=\"I2B2\", test=True)\n else:\n raise ValueError(\"Currently not supporting this dataset! -_-'\")\n \n print(f'Iteration {interaction} result: MATRES F1: {T_F1}; HiEve F1: {H_F1}; I2B2 F1: {I_F1}')\n \n run_time = format_time(timer() - start)\n \n # Write to the csv file ('a' means append)\n return T_F1, H_F1, I_F1\n\nstudy = optuna.create_study(direction='maximize')\nstudy.optimize(objective, n_trials=30)\ntrial = study.best_trial\n\nprint('Accuracy: {}'.format(trial.value))\nprint(\"Best hyperparameters: {}\".format(trial.params))\n\n\n\n","repo_name":"ManHieu/EventEventRelations","sub_path":"JointConstrain_main.py","file_name":"JointConstrain_main.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40032387913","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport GPy\n\n# Make 2D data, sample size 10\nx = np.random.rand(10, 1)\ny = x.copy()\n\nkernel = GPy.kern.RBF(1)\nmodel = GPy.models.GPRegression(x, y, kernel)\nmodel.optimize()\n\n# Plot variance\n\nplot_x = np.linspace(0, 1, 200)\nmu, s2 = model.predict(plot_x.reshape((200, 1)))\nplt.plot(plot_x, s2.flatten())\nplt.show()\n'''\nmodel.plot()\nplt.show()\n'''\n","repo_name":"dnreshef/CausalNetworkLearning","sub_path":"spring16/bug_example.py","file_name":"bug_example.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70549134274","text":"T = int(input())\nfor tc in range(1, T + 1):\n N, M = map(int, input().split())\n arr = [list(map(int, input().split())) for i in range(N)]\n s = 0\n for i in range(N - M + 1):\n for j in range(N - M + 1):\n temp = 0\n for r in range(i, i+M):\n for c in range(j, j+M):\n if (r-i) % 2 == 0 and (c-j) % 2 == 1:\n temp += arr[r][c]\n elif (r-i) % 2 == 1 and (c-j) % 2 == 0:\n temp += arr[r][c]\n if temp > s:\n s = temp\n print('#{} {}'.format(tc, s))","repo_name":"bumbum9944/bumpycharm","sub_path":"fly/김기범_모자이크.py","file_name":"김기범_모자이크.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71037871555","text":"import random\nfrom datetime import datetime\nfrom sortedcontainers import SortedList\nfrom pybloom import BloomFilter\n\ndef readData(fileName): \n f = open(fileName, \"r\")\n if f.mode == \"r\": \n contents = f.read()\n # print(contents)\n return contents\n f.close()\n \ndef generateTestData(queryPositivePercentage, querySize, sampleSize, maxUniverse): \n uncompressedList = []\n queryList = SortedList([])\n negativeList = []\n samplePercentage = sampleSize * 100 / maxUniverse\n queryPercentage = querySize * 100 / sampleSize\n random.seed(datetime.now())\n for x in range(1, maxUniverse): \n if (random.randint(1, 100) <= samplePercentage): \n uncompressedList.append(x)\n if (random.randint(1, 100) <= queryPercentage): \n\n if (random.randint(1, 100) <= queryPositivePercentage): \n queryList.add(x)\n elif (len(negativeList) > 0): \n # you add some random element from the negative list\n randomIndex = random.randrange(len(negativeList))\n queryList.add(negativeList.pop(randomIndex))\n else: \n negativeList.append(x)\n return (uncompressedList, list(queryList.islice(0, len(queryList))))\n\ndef main(): \n uncompressedList, queryList = generateTestData(20, 1000, 100000, 10000000)\n # print(uncompressedList)\n # print(\"\\n\\n\\n\\n\\n\\n\")\n # print(queryList)\n print(len(uncompressedList), len(queryList))\n f = BloomFilter(capacity=1000, error_rate=0.001)\n for x in range(10): \n f.add(x)\n print(10 in f)\n print(5 in f)\n\nif __name__ == '__main__':\n main()","repo_name":"yzhang0927/BloomFilterProject","sub_path":"python-bloomfilter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6065634171","text":"#! usr/bin/env python3\n\nimport random\n\n\"\"\"\n2.3\n\nAdaptive Random Search was designed to address the limitations\nof the fixed step size in the Localized Random Search algorithm.\nThe strategy for Adaptive Random Search is to continually approximate\nthe optimal step size required to reach the global optimum in the search\nspace. This is achieved by trialling and adopting smaller or larger step sizes\nonly if they result in an improvement in the search performance.\n\nThe strategy of Adaptive Random Search is to trial a larger step in each\niteration and adopt the larger step if it results in an improved result. Very\nlarge step sizes are trialled in the same manner although with a much lower\nfrequency. This strategy of preferring large moves is intended to allow the\ntechnique to escape local optima. Smaller step sizes are adopted if no\nimprovement is made for an extended period.\n\nThe example problem below is similar to the one solved by Random Search [2.2].\n\n@author Chad Estioco\n\"\"\"\n\ndef objective_function(vector):\n\t\"\"\"\n\tSimilar to the one in [2.2]\n\t\"\"\"\n\tsum = 0\n\t\n\tfor val in vector:\n\t\tsum += val ** 2\n\t\n\treturn sum\n\ndef rand_in_bounds(minimum, maximum):\n\treturn minimum + ((maximum - minimum) * random.random())\n\ndef random_vector(minmax):\n\t\"\"\"\n\t_Essentially_ similar to the one in [2.2]\n\t\"\"\"\n\ti = 0\n\tlimit = len(minmax)\n\trandom_vector = [0 for i in range(limit)]\n\t\n\tfor i in range(limit):\n\t\trandom_vector[i] = rand_in_bounds(minmax[i][0], minmax[i][1])\n\t\n\treturn random_vector\n\ndef take_step(minmax, current, step_size):\n\tlimit = len(current)\n\tposition = [0 for i in range(limit)]\n\t\n\tfor i in range(limit):\n\t\tminimum = max(minmax[i][0], current[i] - step_size)\n\t\tmaximum = min(minmax[i][1], current[i] + step_size)\n\t\tposition[i] = rand_in_bounds(minimum, maximum)\n\t\n\treturn position\n\ndef large_step_size(iter_count, step_size, s_factor, l_factor, iter_mult):\n\tif iter_count > 0 and iter_count % iter_mult == 0:\n\t\treturn step_size * l_factor\n\telse:\n\t\treturn step_size * s_factor\n\ndef take_steps(bounds, current, step_size, big_stepsize):\n\tstep, big_step = {}, {}\n\tstep[\"vector\"] = take_step(bounds, current[\"vector\"], step_size)\n\tstep[\"cost\"] = objective_function(step[\"vector\"])\n\tbig_step[\"vector\"] = take_step(bounds, current[\"vector\"], big_stepsize)\n\tbig_step[\"cost\"] = objective_function(big_step[\"vector\"])\n\treturn step, big_step\n\ndef search(max_iter, bounds, init_factor, s_factor, l_factor, iter_mult, max_no_impr):\n\tstep_size = (bounds[0][1] - bounds[0][0]) * init_factor\n\tcurrent, count = {}, 0\n\tcurrent[\"vector\"] = random_vector(bounds)\n\tcurrent[\"cost\"] = objective_function(current[\"vector\"])\n\t\n\tfor i in range(max_iter):\n\t\tbig_stepsize = large_step_size(i, step_size, s_factor, l_factor, iter_mult)\n\t\tstep, big_step = take_steps(bounds, current, step_size, big_stepsize)\n\t\t\n\t\tif step[\"cost\"] <= current[\"cost\"] or big_step[\"cost\"] <= current[\"cost\"]:\n\t\t\tif big_step[\"cost\"] <= step[\"cost\"]:\n\t\t\t\tstep_size, current = big_stepsize, big_step\n\t\t\telse:\n\t\t\t\tcurrent = step\n\t\t\t\n\t\t\tcount = 0\n\t\telse:\n\t\t\tcount += 1\n\t\t\t\n\t\t\tif count >= max_no_impr:\n\t\t\t\tcount, stepSize = 0, (step_size/s_factor)\n\t\t\n\t\tprint(\"Iteration \" + str(i) + \": best = \" + str(current[\"cost\"]))\n\t\n\treturn current\n\nif __name__ == \"__main__\":\n\t# problem configuration\n\tproblem_size = 2\n\tbounds = [[-5, 5] for i in range(problem_size)]\n\t\n\t# algorithm configuration\n\tmax_iter = 1000\n\tinit_factor = 0.05\n\ts_factor = 1.3\n\tl_factor = 3.0\n\titer_mult = 10\n\tmax_no_impr = 30\n\t\n\t# execute the algorithm\n\tbest = search(max_iter, bounds, init_factor, s_factor, l_factor, iter_mult, max_no_impr)\n\tprint(\"Done. Best Solution: cost = \" + str(best[\"cost\"]) + \", v = \" + str(best[\"vector\"]))\n","repo_name":"mostafaashraf413/CleverAlgorithms-Python","sub_path":"python/stochastic/adaptive_random_search.py","file_name":"adaptive_random_search.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"33081425156","text":"classes = [\n [ \"Under weight\", 18.5 ],\n [ \"Normal weight\", 24.9 ],\n [ \"Over weight\", 29.9 ],\n [ \"Obese (Class-I)\", 34.9 ],\n [ \"Obese (Class-II)\", 39.9 ],\n [ \"Obese (Class-III)\", float(\"inf\") ] \n]\n\nprint(\"Find out your bmi here! Now! (only if u use metric units)\\n\")\n\nweight = float(input(\"Your Weight (kg): \"))\nheight = float(input(\"Your Height (m): \"))\n\nbmi = weight / (height**2)\n\nfor c in classes:\n if bmi < c[1]:\n bmi_class = c[0]\n break\n\nprint(\"\\nWe have a winner! Your BMI is %.2f. Your BMI tells me that you are- %s.\" % (bmi, bmi_class))\n\n","repo_name":"shleen/bmi-a","sub_path":"bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"3455225887","text":"from django.shortcuts import render\n\n# Create your views here.\nimport json,os,uuid,datetime,base64\nfrom django.views import View\nfrom django.http import HttpRequest, HttpResponse, JsonResponse\nfrom django.http import StreamingHttpResponse\nfrom django.db import connection,connections\n\nfrom utils.get_sources import get_source, get_source_by_id\nfrom utils.tuling_answer import get_tuling_answer\nfrom .models import SourcesCore\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status,mixins,generics,viewsets,filters\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.throttling import AnonRateThrottle,UserRateThrottle\n\nfrom aip import AipOcr\nfrom helloD2.settings import BAIDU_APP_ID,BAIDU_API_KEY,BAIDU_SECRET_KEY\n\nimport pandas as pd\n\nsysfile = os.path.abspath('.')\n\n\nclass WechatTalk(View):\n def get(self, request):\n return render(request, \"sources/talk.html\", {})\n\n\n\nclass QueryWechat(View):\n def get(self, request):\n return render(request, \"sources/talk.html\", {})\n\n def post(self, request):\n source = request.POST.get(\"msg\")\n num_id = int(request.POST.get(\"num_id\", -1))\n content = []\n if num_id != -1:\n data_dict = get_source_by_id(num_id)\n for k, v in data_dict.items():\n this_value = \"{0}的百度云盘{2}\".format(v[\"sourcename\"], v[\"sourceurl\"], v[\"sourcedesc\"])\n content.append(this_value)\n else:\n data_count, data_dict = get_source(source)\n if data_count > 1:\n content.append(\"相关类似资源如下:
\")\n for k, v in data_dict.items():\n this_value = \"{2}
\".format(\n v[\"id\"], v[\"sourcename\"], v[\"sourcename\"])\n content.append(this_value)\n elif data_count == 1:\n for k, v in data_dict.items():\n this_value = \"{0}的百度云盘{2}\".format(v[\"sourcename\"], v[\"sourceurl\"], v[\"sourcedesc\"])\n content.append(this_value)\n else:\n tuling_answer = get_tuling_answer(source)\n content.append(tuling_answer)\n content = '\\n'.join(content)\n try:\n reginfs = {\n \"code\": 200,\n \"message\": \"success\",\n \"data\": content\n }\n except:\n reginfs = {\n \"code\": 400,\n \"message\": \"failed\",\n \"data\": \"失败\"\n }\n return JsonResponse(reginfs, content_type='application/json')\n\n\nclass SourcesUpload(View):\n def get(self, request):\n print(request.GET)\n try:\n reginfs = {\n \"code\": 400,\n \"message\": \"success\",\n \"data\": \"hello\"\n }\n except:\n reginfs = {\n \"code\": 200,\n \"message\": \"failed\",\n \"data\": \"注册失败\"\n }\n return HttpResponse(json.dumps(reginfs), content_type='application/json')\n\n def post(self, request):\n print()\n # 上传图片的处理\n try:\n stick_img=request.POST.get(\"stick_img\",False)\n sysfile = os.path.abspath('.')\n unknown_img_uuid = (str(uuid.uuid1())).replace(\"-\", \"\")\n imgpath = unknown_img_uuid\n unknownimgpath = sysfile + '/static/img2word/' + imgpath + '.jpg'\n request.session[\"imgpath\"] = imgpath\n if stick_img:\n img_path = base64.b64decode(stick_img.split(',')[-1])\n with open(unknownimgpath, 'wb') as f:\n f.write(img_path)\n reginfs = {\n \"code\": 400,\n \"message\": \"success\",\n \"data\": \"hello\"\n }\n else:\n f = request.FILES[\"file\"]\n with open(unknownimgpath, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n reginfs = {\n \"code\": 400,\n \"message\": \"success\",\n \"data\": \"hello\"\n }\n except Exception as e:\n reginfs = {\n \"code\": 200,\n \"message\": \"failed\",\n \"data\": str(e)\n }\n return HttpResponse(json.dumps(reginfs), content_type='application/json')\nclass ImgtoWords(View):\n def get(self, request):\n try:\n sysfile = os.path.abspath('.')\n imgpath = request.session.get(\"imgpath\")\n unknownimgpath = sysfile + '/static/img2word/' + imgpath + '.jpg'\n os.remove(unknownimgpath)\n reginfs = {\n \"code\": 444,\n \"message\": \"success\",\n \"data\": \"hello\"\n }\n except:\n reginfs = {\n \"code\": 222,\n \"message\": \"failed\",\n \"data\": \"失败\"\n }\n return HttpResponse(json.dumps(reginfs), content_type='application/json')\n def post(self, request):\n # 图片的处理\n # h获取图片的路径\n imgpath = request.session.get(\"imgpath\")\n sysfile = os.path.abspath('.')\n unknownimgpath = sysfile + '/static/img2word/' + imgpath + '.jpg'\n options = {\n 'detect_direction': 'true',\n 'language_type': 'CHN_ENG',\n }\n try:\n aipOcr = AipOcr(BAIDU_APP_ID, BAIDU_API_KEY, BAIDU_SECRET_KEY)\n result = aipOcr.webImage(self.get_file_content(unknownimgpath), options)\n if result[\"words_result_num\"] == 0:\n vector_word = \"图中没有文字或未能识别\"\n else:\n pic_words = [i[\"words\"] for i in result[\"words_result\"]]\n pic_word_data=[(i+'
') for i in pic_words]\n vector_word = ''.join(pic_word_data)\n except:\n vector_word = \"图中没有文字或未能识别\"\n imginfo={}\n imginfo[\"vector_words\"] = vector_word\n imginfo[\"imgpath\"]='/static/img2word/' + imgpath + '.jpg'\n try:\n reginfs = {\n \"code\": 400,\n \"message\": \"success\",\n \"data\": imginfo\n }\n except:\n reginfs = {\n \"code\": 200,\n \"message\": \"failed\",\n \"data\": \"失败\"\n }\n return HttpResponse(json.dumps(reginfs), content_type='application/json')\n\n def get_file_content(self,filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\ndef img2wordRes(request):\n return render(request, \"sources/img2wordRes.html\", {})\n\nclass ImgtoExcel(View):\n def get(self, request):\n try:\n imgpath = request.session.get(\"imgpath\")\n print(imgpath)\n unknownimgpath = sysfile + '/static/img2word/' + imgpath + '.jpg'\n excel_name = sysfile + \"/static/img2word/\" + imgpath + \".xls\"\n os.remove(unknownimgpath)\n os.remove(excel_name)\n reginfs = {\n \"code\": 444,\n \"message\": \"success\",\n \"data\": \"hello\"\n }\n except:\n reginfs = {\n \"code\": 222,\n \"message\": \"failed\",\n \"data\": \"失败\"\n }\n return HttpResponse(json.dumps(reginfs), content_type='application/json')\n def post(self, request):\n # 图片的处理\n # h获取图片的路径\n imgpath = request.session.get(\"imgpath\")\n unknownimgpath = sysfile + '/static/img2word/' + imgpath + '.jpg'\n options = {\n 'detect_direction': 'true',\n 'language_type': 'CHN_ENG',\n }\n picUrl = \"error\"\n try:\n aipOcr = AipOcr(BAIDU_APP_ID, BAIDU_API_KEY, BAIDU_SECRET_KEY)\n result = aipOcr.tableRecognitionAsync(self.get_file_content(unknownimgpath), options)\n starttime = datetime.datetime.now()\n #api-1\n sub_one_sql = \"UPDATE 'sources_sourcelimit' SET num_count=num_count-1\"\n sub_one_cursor = connection.cursor()\n sub_one_cursor.execute(sub_one_sql)\n while True:\n try:\n requestId = result[\"result\"][0][\"request_id\"]\n aaa = aipOcr.getTableRecognitionResult(requestId, options)\n picUrl = aaa[\"result\"][\"result_data\"]\n if picUrl != '':\n break\n except:\n picUrl = \"error\"\n endtime = datetime.datetime.now()\n if (endtime - starttime).seconds > 20:\n picUrl = \"error\"\n break\n if picUrl == \"error\":\n os.remove(unknownimgpath)\n reginfs = {\n \"code\": 200,\n \"message\": \"fail1\",\n \"data\": \"fail\"\n }\n else:\n excel_json={}\n excel_source = pd.read_excel(picUrl)\n excel_name = sysfile+\"/static/img2word/\" + imgpath + \".xls\"\n excel_source.to_excel(excel_name)\n excel_html=excel_source.to_html(classes='layui-table')\n excel_json[\"excel_html\"]=excel_html\n excel_json[\"imgpath\"]=imgpath\n row,col=excel_source.shape\n if row==0:\n os.remove(unknownimgpath)\n os.remove(excel_name)\n reginfs = {\n \"code\": 200,\n \"message\": \"fail2\",\n \"data\": \"fail\"\n }\n else:\n reginfs = {\n \"code\": 400,\n \"message\": \"success\",\n \"data\": excel_json\n }\n except:\n picUrl = \"error\"\n os.remove(unknownimgpath)\n reginfs = {\n \"code\": 200,\n \"message\": \"fail3\",\n \"data\": \"fail\"\n }\n return HttpResponse(json.dumps(reginfs), content_type='application/json')\n\n def get_file_content(self,filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\ndef excel_download(request):\n \"\"\"\n sql 文件下载\n :param request:\n :return:\n \"\"\"\n imgpath = request.session.get(\"imgpath\")\n the_file_name = imgpath + '.xls'\n filename = sysfile+'/static/img2word/{}'.format(the_file_name) # 要下载的文件路径\n response = StreamingHttpResponse(readFile(filename))\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(the_file_name)\n return response\n\n\ndef readFile(filename, chunk_size=512):\n \"\"\"\n 缓冲流下载文件方法\n :param filename:\n :param chunk_size:\n :return:\n \"\"\"\n with open(filename, 'rb') as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\ndef sourceExcel(request):\n return render(request, \"sources/sourcesExcel.html\", {})\n\ndef sourceList(request):\n return render(request, \"sources/sourcesList.html\", {})\n\n\n\n\n\n","repo_name":"Cenergy/helloD2","sub_path":"apps/sources/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35834672504","text":"import random\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef create_points():\r\n '''\r\n 生成训练和测试用的正态分布点\r\n :return:测试数据的列表\r\n '''\r\n\r\n a_train = [{'x1': 0.2, 'x2': 0.7, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.3, 'x2': 0.3, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.4, 'x2': 0.5, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.6, 'x2': 0.5, 'bias': 1, 'y': 1, 'y_': 0},\r\n {'x1': 0.1, 'x2': 0.4, 'bias': 1, 'y': 1, 'y_': 0}]\r\n b_train = [{'x1': 0.4, 'x2': 0.6, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.6, 'x2': 0.2, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.7, 'x2': 0.4, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.8, 'x2': 0.6, 'bias': 1, 'y': -1, 'y_': 0},\r\n {'x1': 0.7, 'x2': 0.5, 'bias': 1, 'y': -1, 'y_': 0}]\r\n\r\n return [a_train, b_train]\r\n\r\n\r\ndef judge(w, a_train, b_train):\r\n '''\r\n\r\n :param w:前一个迭代的w\r\n :param a_train:训练数据列表\r\n :param b_train:训练数据列表\r\n :return:\r\n '''\r\n accu = 10\r\n wrong_list = {}\r\n wrong_list['a'] = []\r\n wrong_list['b'] = []\r\n for i in range(0, 5):\r\n t = a_train[i]['x1'] * w[0] + a_train[i]['x2'] * w[1] + a_train[i]['bias'] * w[2]\r\n if t > 0:\r\n a_train[i]['y_'] = 1\r\n elif t < 0:\r\n a_train[i]['y_'] = -1\r\n else:\r\n a_train[i]['y_'] = 0\r\n\r\n if a_train[i]['y'] != a_train[i]['y_']:\r\n wrong_list['a'].append(i)\r\n accu = accu - 1\r\n\r\n for i in range(0, 5):\r\n t = b_train[i]['x1'] * w[0] + b_train[i]['x2'] * w[1] + b_train[i]['bias'] * w[2]\r\n if t > 0:\r\n b_train[i]['y_'] = 1\r\n elif t < 0:\r\n b_train[i]['y_'] = -1\r\n else:\r\n b_train[i]['y_'] = 0\r\n\r\n if b_train[i]['y'] != b_train[i]['y_']:\r\n wrong_list['b'].append(i)\r\n accu = accu - 1\r\n\r\n return [accu, wrong_list, a_train, b_train]\r\n\r\n\r\ndef renew_w(w, wrong_list, a_train, b_train):\r\n '''\r\n\r\n :param w:前一个迭代w\r\n :param wrong_list:错误分类列表\r\n :return:新迭代的w\r\n '''\r\n a_or_b = random.random()\r\n if a_or_b < 0.5:\r\n if wrong_list['a']:\r\n choice = random.choice(wrong_list['a'])\r\n w[0] = w[0] + a_train[choice]['x1'] * a_train[choice]['y']\r\n w[1] = w[1] + a_train[choice]['x2'] * a_train[choice]['y']\r\n w[2] = w[2] + a_train[choice]['bias'] * a_train[choice]['y']\r\n else:\r\n choice = random.choice(wrong_list['b'])\r\n w[0] = w[0] + b_train[choice]['x1'] * b_train[choice]['y']\r\n w[1] = w[1] + b_train[choice]['x2'] * b_train[choice]['y']\r\n w[2] = w[2] + b_train[choice]['bias'] * b_train[choice]['y']\r\n else:\r\n if wrong_list['b']:\r\n choice = random.choice(wrong_list['b'])\r\n w[0] = w[0] + b_train[choice]['x1'] * b_train[choice]['y']\r\n w[1] = w[1] + b_train[choice]['x2'] * b_train[choice]['y']\r\n w[2] = w[2] + b_train[choice]['bias'] * b_train[choice]['y']\r\n else:\r\n choice = random.choice(wrong_list['a'])\r\n w[0] = w[0] + a_train[choice]['x1'] * a_train[choice]['y']\r\n w[1] = w[1] + a_train[choice]['x2'] * a_train[choice]['y']\r\n w[2] = w[2] + a_train[choice]['bias'] * a_train[choice]['y']\r\n\r\n return w\r\n\r\n\r\n# 初始化\r\n[a_train, b_train] = create_points() #训练样本初始化\r\nnum = -1 #最佳迭代次数计数初始化\r\nw = [0, 0, 0] #w初始化\r\n\r\n#错误分类样本初始化\r\nwrong_list = {}\r\nwrong_list['a'] = [i for i in range(0, 5)]\r\nwrong_list['b'] = [i for i in range(0, 5)]\r\n\r\n#最佳w、正确率初始化\r\nbest_w = [0, 0, 0]\r\nbest_accu = 0\r\n\r\nfor i in range(0, 20):\r\n w = renew_w(w, wrong_list, a_train, b_train) #w更新\r\n [accu, wrong_list, a_train, b_train] = judge(w, a_train, b_train) #判断哪些点错误分类\r\n if best_accu < accu: #记录最佳迭代\r\n best_w = w.copy()\r\n best_accu = accu\r\n num = i + 1\r\n\r\n #输出每次迭代结果\r\n print('It is the ' + str(i + 1) + 'th renew')\r\n print('The recent w is ' + str(w))\r\n print('The best_w is ' + str(best_w))\r\n print('The best accu is ' + str(best_accu))\r\n print('The recent accuracy is ' + str(accu))\r\n print(' ')\r\n if best_accu == 10:\r\n break\r\n\r\nprint('The best w is ' + str(best_w))\r\nprint('The best accuracy is ' + str(best_accu))\r\nprint('It is the ' + str(num) + 'th renew')\r\n\r\n# 可视化\r\nfor a in a_train:\r\n plt.scatter(a['x1'], a['x2'], c = 'red', s = 1, label = 'a')\r\nfor b in b_train:\r\n plt.scatter(b['x1'], b['x2'], c = 'blue', s = 1, label = 'b')\r\n\r\nplt.plot([0, 1], [-(best_w[2]) / best_w[1], -(best_w[0] * 1 + best_w[2]) / best_w[1]], c = 'green')\r\n\r\nplt.xlabel(\"x1\", fontdict = {'size': 16})\r\nplt.ylabel(\"x2\", fontdict = {'size': 16})\r\nplt.show()\r\n","repo_name":"Liwen-Xiao/Pattern_Recognization_and_Machine_Learning","sub_path":"Linear Regression/L2_question_5.py","file_name":"L2_question_5.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34628909657","text":"from sqlalchemy import create_engine, Table, MetaData\nfrom sqlalchemy.sql import select, and_\nfrom configuration import CONNECTION_ROW\n\n\nengine = create_engine(CONNECTION_ROW, echo=True)\nmeta = MetaData(engine)\n\nauthors = Table('Authors', meta, autoload=True)\nbooks = Table('Books', meta, autoload=True)\n\nconn = engine.connect()\n\ns = select([books, authors]).where(and_(books.c.author_id == authors.c.id_author, books.c.price > 1200))\nresult = conn.execute(s)\n\nfor row in result.fetchall():\n print(row)\n\ndelete_query = books.delete().where(books.c.id_book == 1)\nconn.execute(delete_query)\n\nupdate_query = books.update().where(books.c.id_book == '3').values(title='AnotherTitle')\nconn.execute(update_query)\n\n\n\n\n","repo_name":"fisher1706/alchemy_2023","sub_path":"db_test.py","file_name":"db_test.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72239433793","text":"from flask import render_template, redirect, url_for, flash, request\nfrom app.editor_bp import blueprint\nfrom app.models import db, Contacts\n\nfrom flask_user import roles_required\nfrom loguru import logger\n\n\n@blueprint.route('/contacts/', methods=['GET', 'POST'])\n@roles_required(['editor', 'admin'])\n@logger.catch()\ndef contacts():\n title = 'Изменение контактных данных.'\n contact = Contacts.query.first()\n if request.method == 'POST':\n contact.address = request.form['address']\n contact.address_en = request.form['address_en']\n contact.address_kz = request.form['address_kz']\n contact.phone = request.form['phone']\n contact.email = request.form['email']\n db.session.add(contact)\n db.session.commit()\n flash('Контактные данные были успешно изменены')\n return redirect(url_for('editor.contacts'))\n return render_template('editor_bp/contacts.html', contact=contact, title=title)\n","repo_name":"ganitsa-vadim/library_web_application","sub_path":"web/app/editor_bp/routes/contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34500211919","text":"from networktables import NetworkTables\nfrom tabulate import tabulate\n\nNetworkTables.initialize(server='localhost')\nsd = NetworkTables.getTable(\"SmartDashboard\")\n\nerror_history = []\n\ndef value_changed(key, value, isNew):\n if key == '/SmartDashboard/PID error':\n error_history.append((sd.getNumber(\"Timer\", 0), value))\n\ndef find_max(array):\n max = -99999\n\n while True:\n time, error = array.pop(0)\n if error > max:\n max = error\n else:\n return (time, error)\n\ndef find_min(array):\n min = 99999\n\n while True:\n time, error = array.pop(0)\n if error < min:\n min = error\n else:\n return (time, error)\n\n\nNetworkTables.addEntryListener(value_changed)\n\nkP = float(input(\"Enter kP coefficient: \"))\n\nsd.putNumber(\"kP\", kP)\n\ninput(\"Ready. When autonomous ends, press Enter to continue\")\n\nperiods = []\n\nwhile error_history:\n try:\n time_first, err_first = find_max(error_history)\n time_min, err_min = find_min(error_history)\n time_second, err_second = find_max(error_history)\n periods.append((time_second-time_first, ))\n except IndexError:\n break\n\nif len(periods) == 0:\n print(\"No periods detected: exiting\")\nelif len(periods) == 1:\n selected = 0\nelse:\n print(\"These are the detected periods: choose one\")\n print(tabulate(periods, showindex=True, headers=[\"Index\", \"Period (seconds)\"]))\n selected = int(input(f\"Enter selection (0 - {len(periods)-1}): \"))\n\nTu = periods[selected][0]\n\nprint(f\"Final values for PI Ziegler-Nichols are: kP = {0.45*kP}, kI = {0.54 *(kP/Tu)}\")\n\n","repo_name":"HORUS-Team6348/pid-tuner","sub_path":"tuner.py","file_name":"tuner.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10173745746","text":"# 9012 괄호\n# 주어진 문자열이 올바른 괄효열인지 판단하는 문제\n\nimport sys\n\ndef is_vps(string):\n stack = []\n for i in range(len(string)-1, -1, -1):\n if string[i] == ')':\n stack.append(string[i])\n elif string[i] == '(' and not stack:\n return \"NO\"\n elif string[i] == '(' and stack[-1] == ')':\n stack.pop()\n \n if not stack:\n return \"YES\"\n else:\n return \"NO\"\n \n\n# n = int(sys.stdin.readline().rstrip())\nn = int(input())\n\nfor i in range(n):\n string = list(str(input()))\n ans = is_vps(string)\n print(ans)\n","repo_name":"kimsungbo/Algorithms","sub_path":"백준/스택/9012_괄호.py","file_name":"9012_괄호.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7289842348","text":"\"\"\"\nAuthor: Logan Blue\nDate: May 18, 2021\n\nFor the USENIX 2021 Major revision prcess. We need to change up a few things\ninterally, and just using Hadi's code is probably going to cause more problems\nthat just rewriting it all.\n\nSo this code will find thresholds for all the bigrams a human speaker is likely\nto create (basically a range of values that is realistic) and then saves them.\nIt will also check (if we have synthetic examples of that bigram) if the\nthreshold is an ideal classifier. i.e., can we achieve a 90% recall and\nprecision rate using just that feature for our evaluation set. If so, we can\nmark it as an ideal feature for future examples from this model. \"\"\"\n\n#pylint: disable='trailing-whitespace', 'invalid-name', 'too-many-locals'\n#pylint: disable='pointless-string-statement', 'too-many-statements'\n#pylint: disable='singleton-comparison'\nimport random\nimport sys, pickle\nfrom multiprocessing import Pool\n\nimport pdb\nimport pymongo\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport sklearn as sk \nimport sklearn.metrics\n\n#turn off warnings\nimport warnings\nwarnings.simplefilter('ignore')\n\n\n#turn off pandas warning\npd.options.mode.chained_assignment = None # default='warn'\n\n#set fixed random seed for consistency\nnp.random.seed(123)\n\n#load data\ndef load_data(table_name, collection_name='exploration'):\n \"\"\"load the data from a specified table for analysis\"\"\"\n myclient = pymongo.MongoClient('mongodb://localhost:27017')\n db = myclient[collection_name]\n table = db[table_name]\n\n return pd.DataFrame(list(table.find()))\n\ndef process_df(df):\n \"\"\"This function will clean up my dataframe and explore out the different\n cross_sect_est into their own rows\"\"\"\n df = df.drop(columns=['_id'])\n\n #explode data\n df_exp = df.explode('cross_sect_est')\n\n #add indices in the cross_sect_est\n indices = []\n for _, row in df.iterrows():\n areas = row['cross_sect_est']\n indices += list(range(0, len(areas)))\n\n df_exp['area_index'] = indices\n\n return df_exp\n\ndef get_organic_ranges(df):\n \"\"\" === Implementation for operation 1 ===\n This function will return the ranges win which each organic bigram will \n be present. \n Input: df - Dataframe with the cross-sectional area estimates for\n organic audio samples. \n Output: df_ranges - Dataframe containing all unique bigrams and their \n associated ranges\n \"\"\"\n #get ranges for each organic\n df_ranges = []\n for key, grp in df.groupby(by=['label', 'area_index']):\n #get max and min value for each group with certain key\n df_ranges.append([key[0], key[1], grp.cross_sect_est.min(),\n grp.cross_sect_est.max()])\n \n df_ranges = pd.DataFrame(df_ranges, columns=['label', 'area_index', 'min',\\\n 'max'])\n\n return df_ranges\n\ndef pooled_get_optimal_threshold(input_values):\n \"\"\"This is a function that encapsilate much of the same code originally in \n get_optimal_threshold that has been move so that I can increase processing\n time using a pool. \n \"\"\"\n #extract data segments\n df, df_ranges, index, row = input_values\n\n #sweep through range of the possible values, calculating the recall and \n #precision values\n for threshold in np.linspace(row['min'], row['max'], 25):\n #output values\n new_row = row.values\n new_row = np.append(new_row, [-1, -1, -1])\n #Hadi's code I'm using\n #set fakes to true\n y_true = list(df[(df.label == row.label) &\\\n (df.area_index == row.area_index)]['dataset'] == 'fakes') \n y_pred = list(df[(df.label == row.label) &\\\n (df.area_index == row.area_index)].cross_sect_est >\\\n threshold)\n\n recall = sklearn.metrics.recall_score(y_true, y_pred)\n precision = sklearn.metrics.precision_score(y_true, y_pred)\n\n #augment df with new values: threshold, ideal indicator, precision, recall\n if precision >= 0.9 and recall >= 0.9:\n #save results\n new_row[-4] = True\n new_row[-3] = threshold\n new_row[-2] = recall\n new_row[-1] = precision\n \n #break out since we are done\n break\n \n #we have to return out segment of df_ranges since it isn't shared \n #between workers\n return new_row\n\ndef get_optimal_threshold(df, df_ranges):\n \"\"\"This function will find the optimal threshold for every bigram within our\n dataset, labeling it as an ideal theshold only is it can achieve a precision\n and recall of at least 0.9\"\"\"\n #for every phoneme and area_index\n df_ranges['ideal_feature'] = False\n\n #create groups to pool over\n data_values = [(df, df_ranges, index, row) for index, row in \\\n df_ranges.iterrows()]\n\n updates = []\n with Pool(4) as p:\n for new_row in tqdm(\\\n p.imap_unordered(pooled_get_optimal_threshold, data_values), \n total=len(data_values), desc='Getting ideal set'):\n updates.append(new_row)\n \n return pd.DataFrame(updates, columns=['label', 'area_index', 'min', 'max',\n 'ideal_feature', 'threshold', 'recall', 'precision'])\n\ndef calc_non_opt_sentence_threshold(df_ranges, df_data):\n \"\"\"This function will find maximum error rate floor necessary for our \n non-optimized detector to function accurately while hopefully minimizing the \n number of false positives. For example, if we require 5% of all bigrams in a \n sentense to be positive (and maintain a 100% TPR or recall) then we have 5%\n wiggle room to not get all organic speakers in our extraction set. If we\n move to 6% and our recall goes to less than 100%, then we are trading \n recall for a better false positive rate.\"\"\"\n threshold_max = -1\n threshold_min = -1\n threshold_either = -1\n\n #merge two datasets on label and area_index columns\n df_analysis = pd.merge(df_ranges, df_data, how='right', on=['label', \n 'area_index'])\n\n #find and mark row that fall outside the organic ranges calculated\n df_analysis['breaks_max'] = df_analysis.apply(lambda row: row['max'] <\\\n row.cross_sect_est, axis=1)\n df_analysis['breaks_min'] = df_analysis.apply(lambda row: row['min'] >\\\n row.cross_sect_est, axis=1)\n df_analysis['breaks_either'] = df_analysis.apply(lambda row: (row['min'] >\\\n row.cross_sect_est) or (row['max'] < row.cross_sect_est), axis=1)\n\n\n #per sentence, calculate the percentage of the time we are outside of \n #organic ranges\n df_results = df_analysis.groupby(['filepath', 'dataset']).agg('mean')\n df_results.reset_index(inplace=True)\n\n #sweep values through df_results to find a threshold that divides \n #fakes and organic will enough for the MAX values\n space = np.linspace(0.004, 0.2, 100)\n y_true = list(df_results.dataset == 'fakes')\n for threshold in space:\n y_pred = list(df_results.breaks_max > threshold)\n precision = sklearn.metrics.precision_score(y_true, y_pred)\n recall = sklearn.metrics.recall_score(y_true, y_pred)\n\n if recall >= 0.9 and precision >= 0.9:\n #good results\n print(\"Sentence Threshold: \", threshold)\n threshold_max = threshold\n break\n\n #sweep values through df_results to find a threshold that divides \n #fakes and organic will enough for the MIN values\n space = np.linspace(0.039, 0.2, 100)\n y_true = list(df_results.dataset == 'fakes')\n for threshold in space:\n y_pred = list(df_results.breaks_min > threshold)\n precision = sklearn.metrics.precision_score(y_true, y_pred)\n recall = sklearn.metrics.recall_score(y_true, y_pred)\n\n if recall >= 0.9 and precision >= 0.9:\n #good results\n print(\"Sentence Threshold: \", threshold)\n threshold_min = threshold\n break\n \n #sweep values through df_results to find a threshold that divides \n #fakes and organic will enough for the MIN values\n space = np.linspace(0.2, 0, 100)\n y_true = list(df_results.dataset == 'fakes')\n for threshold in space:\n y_pred = list(df_results.breaks_either > threshold)\n precision = sklearn.metrics.precision_score(y_true, y_pred)\n recall = sklearn.metrics.recall_score(y_true, y_pred)\n\n if recall >= 0.9 and precision >= 0.9:\n #good results\n print(\"Sentence Threshold: \", threshold)\n threshold_min = threshold\n break\n\n #we didn't find a suitable value\n return threshold_max, threshold_min, threshold_either\n\ndef non_opt_test_sentences(df_ranges, df_data, threshold_max, threshold_min, \n threshold_either):\n \"\"\"This function will be used to evaluate the effectiveness of our threshold\n on the validation data set. df_ranges are the acceptable organic ranges, \n df_data is the data we will be processing, and threshold is the percentage\n of any given sentence that can be outside our threshold before we label the\n whole sentence as a deepfake.\"\"\"\n #merge two datasets on label and area_index columns\n df_analysis = pd.merge(df_ranges, df_data, how='right', on=['label', \n 'area_index'])\n\n #find and mark row that fall outside the organic ranges calculated\n df_analysis['breaks_max'] = df_analysis.apply(lambda row: row['max'] <\\\n row.cross_sect_est, axis=1)\n df_analysis['breaks_min'] = df_analysis.apply(lambda row: row['min'] >\\\n row.cross_sect_est, axis=1)\n df_analysis['breaks_either'] = df_analysis.apply(lambda row: (row['max'] <\\\n row.cross_sect_est) or (row['min'] > row.cross_sect_est), axis=1)\n\n #per sentence, calculate the percentage of the time we are outside of \n #organic ranges\n df_results = df_analysis.groupby(['filepath', 'dataset']).agg('mean')\n df_results.reset_index(inplace=True)\n\n #mark those that above our detection threshold\n df_results['max_pred'] = df_results['breaks_max'] > threshold_max\n df_results['min_pred'] = df_results['breaks_min'] > threshold_min\n df_results['either_pred'] = df_results['breaks_either'] > threshold_either\n\n return df_results\n\n \ndef opt_test_sentence(df_thresholds, df_data):\n \"\"\"This function will only examine ideal features when determining if a \n sentence is organic or not. It will then using the resulting decisions as \n votes in determining the overall sentences label.\"\"\"\n #filter out non_ideal features\n print(\"Starting op testing...\")\n df_ideal = df_thresholds[df_thresholds.ideal_feature][['label',\\\n 'area_index', 'threshold']]\n df_examine = pd.merge(df_ideal, df_data, how='left', on=['label',\\\n 'area_index'])\n\n #test all ideal features\n print(\"Doing apply\")\n df_examine['classification'] = df_examine.apply(lambda row: \\\n row.cross_sect_est > row.threshold, axis=1)\n \n df_results = []\n groups = df_examine.groupby('filepath')\n #every sentences together (filepath)\n for key, grp in tqdm(groups, desc='voting'):\n #vote\n voting_total_percentage = grp.classification.mean()\n sentence_truth = grp.head(1).dataset.item()\n \n if voting_total_percentage >= 0.5:\n #label sentence as a deepfake\n df_results.append([key, True, sentence_truth])\n else:\n #label sentence as an organic sample\n df_results.append([key, False, sentence_truth])\n \n #save results to a new df\n return pd.DataFrame(df_results, columns=['filepath', 'prediction',\\\n 'ground_truth'])\n\ndef main():\n \"\"\"This code will be generating our output data values for the guesswho\n project. It was a one point Hadi's code, however, I have gone ahead and\n rewritten it for clarity and small methodological changes. \n\n Operations list:\n 1) Get all ranges for organic speakers in TIMIT dataset\n 2) Check for how well ranges differentiates organic and synthetic audio\n samples. (RESULT)\n 3) Extract ideal feature set\n 3a) Recalc Hadi's orginal numbers from the paper\n 4) Check ASV Spoof and Lyrebird against the ranges found in the TIMIT \n dataset\n\n \"\"\"\n\n #\"\"\n print(\"Loading datasets...\")\n #start by load audio sets\n print(\"TIMIT...\")\n #TIMIT\n df_timit_true = load_data('timit_true_extended', collection_name='windows')\n df_timit_true = process_df(df_timit_true)\n df_timit_fakes = load_data('real_time_extended', collection_name='windows')\n df_timit_fakes = process_df(df_timit_fakes)\n\n\n #add dataset label\n df_timit_true['dataset'] = 'true'\n df_timit_fakes['dataset'] = 'fakes'\n\n #NOTE: Add additional idiosyncrasies\n\n #into a single df for analysis\n df_timit = pd.concat([df_timit_true, df_timit_fakes], ignore_index=True)\n df_timit.reset_index(drop=True, inplace=True)\n\n print()\n print('Loading Complete')\n\n #create exploration dataset and evaluation datasets\n eval_speaker = random.sample(list(df_timit.speaker_id.unique()), 250)\n df_test = df_timit[~df_timit.speaker_id.isin(eval_speaker)] #small\n df_eval = df_timit[df_timit.speaker_id.isin(eval_speaker)] #big\n\n #==========_Operation #1_==========\n #filter out organic audio samples\n #call processor function\n print(\"Getting organic ranges...\")\n df_org_ranges = get_organic_ranges(df_test[df_test.dataset == 'true'])\n\n #==========_Operation #2_==========\n #find number of bigrams outside of organic range for my deepfakes (zero \n #organic should, but double check it here). Used to find a threshold value\n #using the exploration set\n print(\"calc non opt sentence threshold...\")\n sentence_threshold_max, sentence_threshold_min, sentence_threshold_either =\\\n calc_non_opt_sentence_threshold(df_org_ranges, df_test)\n\n if sentence_threshold_max < 0 or sentence_threshold_min < 0:\n print('Problems...')\n pdb.set_trace()\n\n #with threshold, use validation threshold to get preformance of technique\n #TODO: REMOVE TEST\n #df_eval_test = df_eval[df_eval.filepath.isin(\n # random.sample(set(df_eval.filepath.unique()), 20))]\n #END\n print(\"Starting non_opt test...\")\n #non_opt_test_sentences(df_org_ranges, df_eval, sentence_threshold)\n df_results = non_opt_test_sentences(df_org_ranges, df_eval,\\\n sentence_threshold_max, sentence_threshold_min, \n sentence_threshold_either)\n\n #TODO: REMOVE\n #save to speed up dev\n #pickle.dump(df_results, open('df_results.pkl', 'wb'))\n #pickle.dump(df_org_ranges, open('org_range.pkl', 'wb'))\n #pickle.dump(df_eval, open('df_eval.pkl', 'wb'))\n #pickle.dump(df_test, open('df_test.pkl', 'wb'))\n #pickle.dump((sentence_threshold_max, sentence_threshold_min),\\\n # open('sent_thres.pkl', 'wb'))\n #\"\"\n\n #resume code\n #with open('org_range.pkl', 'rb') as f:\n # df_org_ranges = pickle.load(f)\n\n #with open('df_eval.pkl', 'rb') as f:\n # df_eval = pickle.load(f)\n\n #with open('df_test.pkl', 'rb') as f:\n # df_test = pickle.load(f)\n\n #with open('sent_thres.pkl', 'rb') as f:\n # sentence_threshold_max, sentence_threshold_min = pickle.load(f)\n\n #with open('df_results.pkl', 'rb') as f:\n # df_results = pickle.load(f)\n\n #get high level stats for this test\n y_true = list(df_results.dataset == 'fakes')\n y_pred = list(df_results.max_pred)\n precision_max = sklearn.metrics.precision_score(y_true, y_pred)\n recall_max = sklearn.metrics.recall_score(y_true, y_pred)\n\n y_pred = list(df_results.min_pred)\n precision_min = sklearn.metrics.precision_score(y_true, y_pred)\n recall_min = sklearn.metrics.recall_score(y_true, y_pred)\n\n y_pred = list(df_results.either_pred)\n precision_either = sklearn.metrics.precision_score(y_true, y_pred)\n recall_either = sklearn.metrics.recall_score(y_true, y_pred)\n\n print(\"Validate on Timit, the case of testing all values in a sentence\")\n print(\"===== Max checks ====\")\n print(\"Recall: \", recall_max)\n print(\"Precision: \", precision_max)\n print()\n print(\"===== Min checks ====\")\n print(\"Recall: \", recall_min)\n print(\"Precision: \", precision_min)\n print()\n print(\"===== Either checks ====\")\n print(\"Recall: \", recall_either)\n print(\"Precision: \", precision_either)\n print()\n\n\n\n \"\"\"Operation #3 \"\"\"\n ##find ideal set for TIMIT Dataset\n #print(\"Finding ideal set...\")\n #df_org_ranges = get_optimal_threshold(df_test, df_org_ranges) \n #\n ##vote on a per sentence basis similar to how hadi did it in the original\n ##paper to determine our effectiveness\n #df_oper3 = opt_test_sentence(df_org_ranges, df_eval)\n\n #y_true = list(df_oper3.ground_truth == 'fakes')\n #y_pred = list(df_oper3.prediction.values)\n #recall = sklearn.metrics.recall_score(y_true, y_pred) \n #precision = sklearn.metrics.precision_score(y_true, y_pred) \n #\n #print('Idealset operation on Timit Evaluation Set')\n #print('Recall: ', recall)\n #print('Precision: ', precision)\n #print()\n\n ##delete most of the timit so that we can free up space\n #pickle.dump(df_org_ranges, open('df_org_ranges.pkl', 'wb'))\n #pickle.dump(df_oper3, open('df_results.pkl', 'wb'))\n #del df_oper3\n #del df_eval\n #del df_test\n #del df_timit\n #pickle.dump(df_results, open('df_results.pkl', 'wb'))\n #del df_results\n\n \"\"\"Operation #4\"\"\"\n print(\"Loading Lyrebird...\")\n #Lyrebird\n #add dataset label\n df_lyrebird_true = load_data('lyrebird_true')\n df_lyrebird_true = process_df(df_lyrebird_true)\n df_lyrebird_fakes = load_data('lyrebird_fake')\n df_lyrebird_fakes = process_df(df_lyrebird_fakes)\n\n #add dataset label\n df_lyrebird_true['dataset'] = 'true'\n df_lyrebird_fakes['dataset'] = 'fakes'\n \n #into a single df for analysis\n df_lyrebird = pd.concat([df_lyrebird_true, df_lyrebird_fakes],\n ignore_index=True)\n df_lyrebird.reset_index(drop=True, inplace=True)\n\n print(\"Loading complete\")\n\n #using ranges found for TIMIT, can we still detect Lyrebird and ASV_Spoof\n #with threshold, use validation threshold to get preformance of technique\n df_results = non_opt_test_sentences(df_org_ranges, df_lyrebird,\\\n sentence_threshold_max, sentence_threshold_min, \n sentence_threshold_either)\n\n #get high level stats for this test\n y_true = list(df_results.dataset == 'fakes')\n y_pred = list(df_results.max_pred)\n precision_max = sklearn.metrics.precision_score(y_true, y_pred)\n recall_max = sklearn.metrics.recall_score(y_true, y_pred)\n\n y_pred = list(df_results.min_pred)\n precision_min = sklearn.metrics.precision_score(y_true, y_pred)\n recall_min = sklearn.metrics.recall_score(y_true, y_pred)\n\n y_pred = list(df_results.either_pred)\n precision_either = sklearn.metrics.precision_score(y_true, y_pred)\n recall_either = sklearn.metrics.recall_score(y_true, y_pred)\n\n print(\"Validate on Lyrebird, the case of testing all values in a sentence\")\n print(\"===== Max checks ====\")\n print(\"Recall: \", recall_max)\n print(\"Precision: \", precision_max)\n print()\n print(\"===== Min checks ====\")\n print(\"Recall: \", recall_min)\n print(\"Precision: \", precision_min)\n print()\n print(\"===== Either checks ====\")\n print(\"Recall: \", recall_either)\n print(\"Precision: \", precision_either)\n print()\n\n\n\n print(\"Loading ASV Spoof...\")\n #delete lyrebird \n del df_lyrebird\n pickle.dump(df_results, open('df_lyrebird_results.pkl', 'wb'))\n del df_results\n \n #ASV_SPOOF (needs to load multiple mongo collections and then \n #concatenate\n #add dataset label\n #df_asv_sets = ['asv_spoof_b1', 'asv_spoof_b2_100000',\n # 'asv_spoof_b2_100k_200k', 'asv_spoof_b2_200k_300k',\n # 'asv_spoof_b2_300k_400k', 'asv_spoof_b2_400k_500k', \n # 'asv_spoof_b2_500k_600k', 'asv_spoof_b2_600k_700k', \n # 'asv_spoof_b2_700k_800k', 'asv_spoof_b2__800k_end']\n\n #list_df_asv = []\n #for name in tqdm(df_asv_sets, position=0, leave=True, desc='ASV Spoof...'):\n # list_df_asv.append(process_df(load_data(name)))\n\n #print('#1')\n ##into a single df for analysis\n #df_asv = pd.concat(list_df_asv, ignore_index=True)\n #df_asv.reset_index(drop=True, inplace=True)\n \n #print('#2')\n ##add dataset label\n #df_asv.loc['dataset'] = 'fakes'\n #df_asv.loc[df_asv.filepath.str.contains('bonafide'), 'dataset'] = 'true'\n\n #for each asv attack, run a validation pass\n\n print(\"Validate on ASV, the case of testing all values in a sentence\")\n asv_pickles = ['df_asv_A07.pkl', 'df_asv_A08.pkl', 'df_asv_A09.pkl',\n 'df_asv_A10.pkl', 'df_asv_A12.pkl', 'df_asv_A13.pkl',\n 'df_asv_A14.pkl', 'df_asv_A15.pkl', 'df_asv_A17.pkl',\n 'df_asv_A18.pkl', 'df_asv_A19.pkl']\n df_asv_bon = pickle.load(open('asv_data_files/df_asv_bon.pkl', 'rb'))\n df_asv_bon['dataset'] = 'true'\n\n for asv_curr in asv_pickles:\n #load data file\n df_asv_curr = pickle.load(open('asv_data_files/' + asv_curr, 'rb'))\n df_asv_curr['dataset'] = 'fakes'\n \n #combine with df_asv_bon\n df_asv_curr = pd.concat([df_asv_curr, df_asv_bon], ignore_index=True)\n df_asv_curr.reset_index(drop=True, inplace=True)\n\n #with threshold, use validation threshold to get preformance of techn\n df_results = non_opt_test_sentences(df_org_ranges, df_asv_curr,\\\n sentence_threshold_max, sentence_threshold_min, \n sentence_threshold_either)\n\n #get high level stats for this test\n y_true = list(df_results.dataset == 'fakes')\n y_pred = list(df_results.max_pred)\n precision_max = sklearn.metrics.precision_score(y_true, y_pred)\n recall_max = sklearn.metrics.recall_score(y_true, y_pred)\n\n y_pred = list(df_results.min_pred)\n precision_min = sklearn.metrics.precision_score(y_true, y_pred)\n recall_min = sklearn.metrics.recall_score(y_true, y_pred)\n\n y_pred = list(df_results.either_pred)\n precision_either = sklearn.metrics.precision_score(y_true, y_pred)\n recall_either = sklearn.metrics.recall_score(y_true, y_pred)\n\n print(\"RESULTS FOR --> \", asv_curr)\n print(\"===== Max checks ====\")\n print(\"Recall: \", recall_max)\n print(\"Precision: \", precision_max)\n print()\n print(\"===== Min checks ====\")\n print(\"Recall: \", recall_min)\n print(\"Precision: \", precision_min)\n print()\n print(\"===== Either checks ====\")\n print(\"Recall: \", recall_either)\n print(\"Precision: \", precision_either)\n print()\n\n pickle.dump(df_results, open('df_asv.pkl', 'wb'))\n \nif __name__ == '__main__':\n main()\n","repo_name":"blue-logan/who_are_you","sub_path":"core/extract_threshold.py","file_name":"extract_threshold.py","file_ext":"py","file_size_in_byte":22953,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"43545794157","text":"\n## Example of .xml\n#http://py4e-data.dr-chuck.net/comments_1427864.xml\n#http://py4e-data.dr-chuck.net/comments_42.xml\n\n\nimport xml.etree.ElementTree as ET\nimport urllib.request,urllib.parse,urllib.error\n\nurl = input('Enter filename-')\nxmlfile = urllib.request.urlopen(url).read()\ndata = xmlfile.decode() ## decoding the byte string to unicode\ncomntinfo = ET.fromstring(data) ## here data is string\nlst = comntinfo.findall('comments/comment') ## this list will store all tree whose root is comment\nsum =0\nfor i in range(0,len(lst),1):\n sum = sum + int(lst[i].find('count').text)\n\nprint(sum)\n\n\n\n \n","repo_name":"KrishnenduDakshi2002/Python-learning","sub_path":"course/webdata/parsingXMLassign.py","file_name":"parsingXMLassign.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9012577525","text":"from .views import CalculateSum, CalculateEquation, CalculateNextNumebrInSeries, RegisterUser\nfrom django.urls import include, path, re_path\n\nurlpatterns = [\n path('api/v1/calculate',\n CalculateSum.as_view(), name='get_calculate_sum'),\n path('api/v1/calculate/equation',\n CalculateEquation.as_view(), name='get_calculate_equation'),\n path('api/v1/calculate/next/number',\n CalculateNextNumebrInSeries.as_view(), name='get_calculate_next_number'),\n path('api/v1/register',\n RegisterUser.as_view(), name='register_user')\n]\n","repo_name":"rohitmanjhi/django_test","sub_path":"djangoTest/djangoTest/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23526496803","text":"from tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom kafka import SimpleProducer, KafkaClient\nfrom kafka import KafkaProducer\nimport json\n\naccess_token = \"1163722320675917825-FksT39mhHtA5HIkgi68ztAcTfK30g0\"\naccess_token_secret = \"AXq9ZQqzrEnZBkIcVRnHvQ59sKeeskGpDrqFvvyahLzEt\"\nconsumer_key = \"swNlLMN3d4iCzbMbAlt33c122\"\nconsumer_secret = \"iJ5Ql2xEJMVj5pHSQv2bSClmqaw1tYVC1Rru5ZGjXwYqDkTlSE\"\n\nclass StdOutListener(StreamListener):\n def on_data(self, data):\n print(\"ok\")\n print(type(data))\n print(type(data.encode('utf-8')))\n producer.send(\"trump\", data.encode('utf-8'))\n # producer.send_messages(\"trump\", data.encode('utf-8'))\n return True\n def on_error(self, status):\n print (status)\n\nkafka = KafkaClient(\"localhost:9092\")\nproducer = KafkaProducer(bootstrap_servers='localhost:9092')\nl = StdOutListener()\nprint(\"no\")\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\nstream = Stream(auth, l)\nprint(\"s\")\nstream.filter(track=\"jaipur\")\n\n","repo_name":"AbhiRuchiK/Kafka","sub_path":"trumptweets.py","file_name":"trumptweets.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21433356630","text":"\"\"\" Main flask module for running flask application \"\"\"\n\nfrom flask import Flask, request, render_template\nfrom src.run_scrape import run\nimport json\nimport os\nfrom src.utils.config import get_config\nfrom src.utils import utility\nfrom src.dao import db\n\napp = Flask(__name__)\nenv_type = os.environ.get('ENV_TYPE', 'dev')\n\n\n@app.route('/')\ndef index():\n remote_ip = request.remote_addr\n environment = get_config()\n print(remote_ip)\n\n args = {\n 'remote_ip': remote_ip,\n 'environment': environment\n }\n\n return render_template('index.html', **args)\n\n\n@app.route('/forex', methods=['GET', 'POST'])\ndef forex():\n args = {}\n bank_list = db.get_bank_list()\n if request.method == 'GET':\n args.update({'bank_list': bank_list})\n return render_template('forex.html', bank_list=bank_list)\n elif request.method == 'POST':\n rates = run(request.form.getlist('banks'))\n rates_with_diff = utility.calc_diff(rates)\n args.update({'bank_list': bank_list})\n args.update({'rates': rates_with_diff})\n return render_template('forex-result.html', **args)\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=8000)\n","repo_name":"bimalghartimagar/nifty","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12449620121","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.mixture import GaussianMixture\r\n\r\ndef get_largest_areas(folder_path):\r\n largest_areas = []\r\n\r\n for filename in os.listdir(folder_path):\r\n if filename.endswith('.jpg') or filename.endswith('.png'):\r\n frame = cv2.imread(os.path.join(folder_path, filename))\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n lower = np.array([30, 50, 50])\r\n upper = np.array([70, 255, 255])\r\n\r\n mask = cv2.inRange(hsv, lower, upper)\r\n\r\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(mask)\r\n\r\n largest_label = 1\r\n max_area = stats[1, cv2.CC_STAT_AREA]\r\n for label in range(2, ret):\r\n area = stats[label, cv2.CC_STAT_AREA]\r\n if area > max_area:\r\n max_area = area\r\n largest_label = label\r\n\r\n largest_areas.append(max_area)\r\n\r\n largest_areas.sort()\r\n return largest_areas\r\n\r\ndef cluster_with_gaussian(largest_areas, n_clusters):\r\n data = np.array(largest_areas).reshape(-1, 1)\r\n gmm = GaussianMixture(n_components=n_clusters, random_state=0)\r\n gmm.fit(data)\r\n predicted_labels = gmm.predict(data)\r\n cluster_order = np.argsort(gmm.means_.flatten())\r\n new_labels = np.zeros_like(predicted_labels)\r\n for i, label in enumerate(cluster_order):\r\n new_labels[predicted_labels == label] = i\r\n return new_labels\r\n\r\nif __name__ == \"__main__\":\r\n folder_path1 = r'C:\\Users\\bskylcnr\\Desktop\\test1\\Lugano'\r\n folder_path2 = r'C:\\Users\\bskylcnr\\Desktop\\test1\\Aphylion'\r\n\r\n lugano_largest_areas = get_largest_areas(folder_path1)\r\n aphylion_largest_areas = get_largest_areas(folder_path2)\r\n\r\n n_clusters = 7\r\n\r\n predicted_labels_lugano = cluster_with_gaussian(lugano_largest_areas, n_clusters)\r\n predicted_labels_aphylion = cluster_with_gaussian(aphylion_largest_areas, n_clusters)\r\n\r\n clusters_lugano = [[] for _ in range(n_clusters)]\r\n clusters_aphylion = [[] for _ in range(n_clusters)]\r\n\r\n for i, label in enumerate(predicted_labels_lugano):\r\n clusters_lugano[label].append(lugano_largest_areas[i])\r\n\r\n for i, label in enumerate(predicted_labels_aphylion):\r\n clusters_aphylion[label].append(aphylion_largest_areas[i])\r\n\r\n\r\n\r\n\r\ndef plot_cluster_statistics(data, cluster_number, title):\r\n max_val = np.max(data)\r\n min_val = np.min(data)\r\n mean_val = np.mean(data)\r\n median_val = np.median(data)\r\n\r\n plt.axvline(x=max_val, color='red', linestyle='dashed', linewidth=2, label='Max')\r\n plt.axvline(x=min_val, color='green', linestyle='dashed', linewidth=2, label='Min')\r\n plt.axvline(x=mean_val, color='purple', linestyle='dashed', linewidth=2, label='Mean')\r\n plt.axvline(x=median_val, color='orange', linestyle='dashed', linewidth=2, label='Median')\r\n plt.hist(data, bins=20, color='blue', alpha=0.7)\r\n plt.title(title)\r\n plt.xlabel(\"pixel\")\r\n plt.ylabel(\"time\")\r\n plt.legend()\r\n\r\nfrom scipy.stats import norm\r\ndef plot_std_curve(data, title):\r\n std_val = np.std(data)\r\n mean_val = np.mean(data)\r\n print(std_val)\r\n plt.plot(data, color='blue', alpha=0.7, label='Std Deviation')\r\n plt.axhline(y=mean_val, color='purple', linestyle='dashed', linewidth=2, label='Mean')\r\n plt.title(title)\r\n plt.xlabel(\"Week\")\r\n plt.ylabel(\"Pixel\")\r\n plt.legend()\r\n\r\n\r\ndef plot_bell_curve(data, title):\r\n std_val = np.std(data)\r\n mean_val = np.mean(data)\r\n\r\n x = np.linspace(min(data), max(data), 100)\r\n y = norm.pdf(x, mean_val, std_val)\r\n\r\n plt.plot(x, y, color='blue', label='Bell Curve')\r\n plt.plot(data, color='red', alpha=0.7, label='Std Deviation')\r\n plt.axhline(y=mean_val, color='purple', linestyle='dashed', linewidth=2, label='Mean')\r\n plt.title(title)\r\n plt.xlabel(\"Week\")\r\n plt.ylabel(\"Pixel\")\r\n plt.legend()\r\n\r\nplt.figure(figsize=(16, 8))\r\n\r\nfor i in range(n_clusters):\r\n plt.subplot(2, 4, i+1)\r\n plot_cluster_statistics(clusters_lugano[i], i, f\"Lugano Week {i+1}\")\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nplt.figure(figsize=(16, 8))\r\nfor i in range(n_clusters):\r\n plt.subplot(2, 4, i+1)\r\n plot_cluster_statistics(clusters_aphylion[i], i, f\"Aphylion Week {i+1}\")\r\n \r\n\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# 7 haftanın standart sapmalarını hesapla\r\nlugano_std_data = [np.std(cluster) for cluster in clusters_lugano]\r\naphylion_std_data = [np.std(cluster) for cluster in clusters_aphylion]\r\n\r\n# Tek bir grafik üzerinde standart sapma karşılaştırmasını çizdir\r\nplt.figure(figsize=(10, 6))\r\n\r\nplt.plot(lugano_std_data, label='Lugano', marker='o')\r\nplt.plot(aphylion_std_data, label='Aphylion', marker='o')\r\n\r\nplt.title('Standard Deviation Comparison for 7 Weeks')\r\nplt.xlabel('Week')\r\nplt.ylabel('Standard Deviation')\r\nplt.legend()\r\nplt.xticks(range(n_clusters), [f'Week {i+1}' for i in range(n_clusters)])\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\r\n# Tek bir grafik üzerinde çan eğrisi ve standart sapma karşılaştırmasını çizdir\r\nplt.figure(figsize=(10, 6))\r\n\r\nplt.plot(lugano_std_data, label='Lugano Std Deviation', marker='o')\r\nplt.plot(aphylion_std_data, label='Aphylion Std Deviation', marker='o')\r\n\r\nplt.title('Bell Curve and Std Deviation Comparison for 7 Weeks')\r\nplt.xlabel('Week')\r\nplt.ylabel('Value')\r\nplt.legend()\r\nplt.xticks(range(n_clusters), [f'Week {i+1}' for i in range(n_clusters)])\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n# Bu güncellenmiş kod, 7 haftanın standart sapmalarını ve çan eğrisini çizecek, ardından bu verileri karşılaştıracak. Umarım bu kod size istediğiniz sonucu sağlar.\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"basakyalciner/DETERMINATION-OF-THE-AREA-INDEX-OF-LETTUCE-LEAVES-WITH-A-MONOCULAR-CAMERA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42117237375","text":"import logging\nimport base64\n\nimport requests\nimport networkx\n\nfrom canals.errors import PipelineDrawingError\nfrom canals.type_utils import _type_name\n\nlogger = logging.getLogger(__name__)\n\n\nMERMAID_STYLED_TEMPLATE = \"\"\"\n%%{{ init: {{'theme': 'neutral' }} }}%%\n\ngraph TD;\n\n{connections}\n\nclassDef component text-align:center;\n\"\"\"\n\n\ndef _to_mermaid_image(graph: networkx.MultiDiGraph):\n \"\"\"\n Renders a pipeline using Mermaid (hosted version at 'https://mermaid.ink'). Requires Internet access.\n \"\"\"\n graph_styled = _to_mermaid_text(graph=graph)\n\n graphbytes = graph_styled.encode(\"ascii\")\n base64_bytes = base64.b64encode(graphbytes)\n base64_string = base64_bytes.decode(\"ascii\")\n url = \"https://mermaid.ink/img/\" + base64_string\n\n logging.debug(\"Rendeding graph at %s\", url)\n try:\n resp = requests.get(url, timeout=10)\n if resp.status_code >= 400:\n logger.warning(\"Failed to draw the pipeline: https://mermaid.ink/img/ returned status %s\", resp.status_code)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n resp.raise_for_status()\n\n except Exception as exc: # pylint: disable=broad-except\n logger.warning(\"Failed to draw the pipeline: could not connect to https://mermaid.ink/img/ (%s)\", exc)\n logger.info(\"Exact URL requested: %s\", url)\n logger.warning(\"No pipeline diagram will be saved.\")\n raise PipelineDrawingError(\n \"There was an issue with https://mermaid.ink/, see the stacktrace for details.\"\n ) from exc\n\n return resp.content\n\n\ndef _to_mermaid_text(graph: networkx.MultiDiGraph) -> str:\n \"\"\"\n Converts a Networkx graph into Mermaid syntax. The output of this function can be used in the documentation\n with `mermaid` codeblocks and it will be automatically rendered.\n \"\"\"\n sockets = {\n comp: \"\".join(\n [\n f\"

  • {name} ({_type_name(socket.type)})
  • \"\n for name, socket in data.get(\"input_sockets\", {}).items()\n if socket.is_optional and not socket.sender\n ]\n )\n for comp, data in graph.nodes(data=True)\n }\n optional_inputs = {\n comp: f\"

    Optional inputs:
      {sockets}
    \" if sockets else \"\"\n for comp, sockets in sockets.items()\n }\n\n states = {\n comp: f\"{comp}[\\\"{comp}
    {type(data['instance']).__name__}{optional_inputs[comp]}\\\"]:::component\"\n for comp, data in graph.nodes(data=True)\n if comp not in [\"input\", \"output\"]\n }\n\n connections_list = [\n f\"{states[from_comp]} -- \\\"{conn_data['label']}
    {conn_data['conn_type']}\\\" --> {states[to_comp]}\"\n for from_comp, to_comp, conn_data in graph.edges(data=True)\n if from_comp != \"input\" and to_comp != \"output\"\n ]\n input_connections = [\n f\"i{{*}} -- \\\"{conn_data['label']}
    {conn_data['conn_type']}\\\" --> {states[to_comp]}\"\n for _, to_comp, conn_data in graph.out_edges(\"input\", data=True)\n ]\n output_connections = [\n f\"{states[from_comp]} -- \\\"{conn_data['label']}
    {conn_data['conn_type']}\\\"--> o{{*}}\"\n for from_comp, _, conn_data in graph.in_edges(\"output\", data=True)\n ]\n connections = \"\\n\".join(connections_list + input_connections + output_connections)\n\n graph_styled = MERMAID_STYLED_TEMPLATE.format(connections=connections)\n logger.debug(\"Mermaid diagram:\\n%s\", graph_styled)\n\n return graph_styled\n","repo_name":"deepset-ai/canals","sub_path":"canals/pipeline/draw/mermaid.py","file_name":"mermaid.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"1828149734","text":"from pdfminer.layout import LTTextBoxHorizontal\n\n\ndef split_texts(lines):\n return list(map(lambda x: list(filter(lambda t: t != \"\", x.get_text().split('\\n'))), lines))\n\n\ndef extract_tuples(line):\n lines = sorted(line, key=lambda x: x.x0)\n tuples = [x if len(x) > 1 else x[0] for x in list(zip(*split_texts(lines)))]\n return tuples\n\n\ndef extract_lines_of_column(column):\n lines = sorted(column, key=lambda l: l.y0, reverse=True)\n arrays = [x if len(x) > 1 else x[0] for x in split_texts(lines)]\n return arrays if len(arrays) > 1 else arrays[0]\n\n\ndef get_line_boxes(page, rounding=3):\n lines = []\n boxes = [t for t in page if isinstance(t, LTTextBoxHorizontal)]\n boxes = sorted(boxes, key=lambda x: x.y0, reverse=True)\n prev_line = None\n current_line = []\n for b in boxes:\n if round(b.y0, rounding) != prev_line:\n if len(current_line):\n tuples = extract_tuples(current_line)\n lines.append(tuples)\n current_line = []\n\n current_line.append(b)\n prev_line = round(b.y0, rounding)\n\n tuples = extract_tuples(current_line)\n lines.append(tuples)\n return lines\n\n\ndef get_column_boxes(page, rounding=3):\n columns = []\n boxes = [t for t in page if isinstance(t, LTTextBoxHorizontal)]\n boxes = sorted(boxes, key=lambda x: (x.x0, x.x1))\n prev_column_x0 = None\n prev_column_x1 = None\n current_column = []\n for b in boxes:\n if round(b.x0, rounding) != prev_column_x0:\n if round(b.x1, rounding) != prev_column_x1:\n if len(current_column):\n columns.append(extract_lines_of_column(current_column))\n current_column = []\n\n current_column.append(b)\n prev_column_x0 = round(b.x0, rounding)\n prev_column_x1 = round(b.x1, rounding)\n\n columns.append(extract_lines_of_column(current_column))\n return columns\n","repo_name":"rmic/pdfinterpret","sub_path":"pdfinterpret/extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23954997723","text":"# expenses = [10.5, 8, 5, 9, 2, 3]\n# total = sum(expenses)\n# print(\"You spent $\", total, \" on lunch this week\", sep=\"\")\n# for x in expenses:\n# sum = sum + x\n#\n#\n# print(\"you spent $\", sum, \"on lunch this week\", sep=\"\")\n# fruits = [\"apple\", \"guava\", \"mango\", \"orange\", \"peach\"]\n\n# if \"orange\" in fruits:\n# print(\"yes, this fruit is in fruits\")\n# else:\n# print(\"blueberry is not part of fruits\")\n#\n# for x in fruits:\n# print(x)\n\n# print(fruits[1:])\n\n# total = 0\n# expenses = []\n# for i in range(7):\n# expenses.append(float(input(\"Type in your expenses here \\n\")))\n#\n# total = sum(expenses)\n# print(\"You spent N\", total, \" for the week of 29th April 2021\", sep=\"\")\n\ntotal = 0\nexpenses = []\nnum_expenses = int(input(\"Enter number of expenses\\n\"))\nfor i in range(num_expenses):\n expenses.append(float(input(\"Type in your expenses here \\n\")))\n\ntotal = sum(expenses)\nprint(\"You spent N\", total, \" for the week of 29th April 2021\", sep=\"\")\n\n","repo_name":"Oviep/pluralsight_tutorial","sub_path":"loops_expenses.py","file_name":"loops_expenses.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13763946115","text":"from scipy.spatial import distance\nfrom imutils import face_utils\nimport imutils\nimport dlib\nimport cv2\nimport time\nimport datetime\n\nthresh = 0.18\n\ndetect = dlib.get_frontal_face_detector()\npredict = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")# Dat file is the crux of the code\n\ndef eye_aspect_ratio(eye):\n\tA = distance.euclidean(eye[1], eye[5])\n\tB = distance.euclidean(eye[2], eye[4])\n\tC = distance.euclidean(eye[0], eye[3])\n\tear = (A + B) / (2.0 * C)\n\treturn ear\n\ndef CountingArray(array):\n\t# array = [110101101]\n\treturn array.count(\"01\")\n\t# return finalCount\n\ndef CountBlink():\n\tnumber_of_blinks = 0\n\t(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"left_eye\"]\n\t(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS[\"right_eye\"]\n\tcap = cv2.VideoCapture(0)\n\tblink_array = \"\"\n\n\tend_time = time.time() + 5\n\n\twhile time.time() < end_time:\n\t\t# if len(blink_array)>0 and blink_array[-1]==\"1\":\n\t\t# \tprint(blink_array)\n\t\tret, frame=cap.read()\n\t\tframe = imutils.resize(frame, width=450)\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\tsubjects = detect(gray, 0)\n\t\tfor subject in subjects:\n\t\t\tshape = predict(gray, subject)\n\t\t\tshape = face_utils.shape_to_np(shape)#converting to NumPy Array\n\t\t\tleftEye = shape[lStart:lEnd]\n\t\t\trightEye = shape[rStart:rEnd]\n\t\t\tleftEAR = eye_aspect_ratio(leftEye)\n\t\t\trightEAR = eye_aspect_ratio(rightEye)\n\t\t\tear = (leftEAR + rightEAR) / 2.0\n\t\t\tleftEyeHull = cv2.convexHull(leftEye)\n\t\t\trightEyeHull = cv2.convexHull(rightEye)\n\n\t\t\tif ear < thresh:\n\t\t\t\tblink_array += \"1\"\n\t\t\telse:\n\t\t\t\tblink_array += \"0\"\n\n\t\tkey = cv2.waitKey(1) & 0xFF\n\n\tcv2.destroyAllWindows()\n\tcap.release()\n\treturn CountingArray(blink_array)\n\t# stats_file.write(str(datetime.datetime.now())+\",\"+str(finalCount)+\"\\n\")\n\t# print(CountingArray(blink_array))\n\t# sys.stdout.flush()\n","repo_name":"zenithexpo/iblink","sub_path":"engine/Blink_Countcopy.py","file_name":"Blink_Countcopy.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"16932822535","text":"from lib.alerttask import AlertTask\nfrom mozdef_util.query_models import SearchQuery, TermMatch, ExistsMatch\n\n\nclass AlertGuardDutyProbe(AlertTask):\n def main(self):\n # Create a query to look back the last 20 minutes\n search_query = SearchQuery(minutes=20)\n\n # Add search terms to our query\n search_query.add_must([\n TermMatch('source', 'guardduty'),\n TermMatch('details.finding.action.actionType', 'PORT_PROBE'),\n ExistsMatch('details.sourceipaddress'),\n ])\n\n self.filtersManual(search_query)\n # Search aggregations on field 'sourceipaddress'\n # keep X samples of events at most\n self.searchEventsAggregated('details.sourceipaddress', samplesLimit=10)\n # alert when >= X matching events in an aggregation\n self.walkAggregations(threshold=1)\n\n # Set alert properties\n def onAggregation(self, aggreg):\n # aggreg['count']: number of items in the aggregation, ex: number of failed login attempts\n # aggreg['value']: value of the aggregation field, ex: toto@example.com\n # aggreg['events']: list of events in the aggregation\n category = 'bruteforce'\n tags = ['guardduty', 'bruteforce']\n severity = 'INFO'\n summary = \"Guard Duty Port Probe by {}\".format(aggreg['value'])\n\n # Create the alert object based on these properties\n return self.createAlertDict(summary, category, tags, aggreg['events'], severity)\n","repo_name":"mozilla/MozDef","sub_path":"alerts/guard_duty_probe.py","file_name":"guard_duty_probe.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":2170,"dataset":"github-code","pt":"61"} +{"seq_id":"40478696573","text":"import shutil\nimport json\nimport io\nimport random\nimport os\n\n'''\nGet 4000 images from COCO train json file + Create Images folder and Captions folder\n'''\n\nORG_DIR = './train2014/'\nDST_DIR = './COCOvn/Images_train/'\n\ncapDST_DIR = './COCOvn/Captions_train_en/'\n\njsonfile = './annotations/captions_train2014.json'\n\n\n### PREPARING FOLDERS\n# Delete old data\nif os.path.exists(DST_DIR):\n shutil.rmtree(DST_DIR)\nif os.path.exists(capDST_DIR):\n shutil.rmtree(capDST_DIR)\n\nos.mkdir(DST_DIR)\nos.mkdir(capDST_DIR)\n\n\n# Select number of images will be selected\nN = 4000\n\nwith io.open(jsonfile, 'r', encoding='utf-8') as f:\n jsondata = json.load(f)\n\n# Delete unneccessery fields\njsondata['licenses'] = []\n\nimgs = [(x['id'], x[\"file_name\"]) for x in jsondata[\"images\"]]\n\n# Only get N images\nimgs = random.sample(imgs, N)\n\n# List images ID\nimgs_IDs = [x[0] for x in imgs]\n\n# List images filepath\nimgs_filepaths = [x[1] for x in imgs]\n\nprint('=' * 40)\nprint('Copying files to destination...')\nc = 0\nfor filename in imgs_filepaths:\n srcpath = ORG_DIR + filename\n \n # Format dest filename\n fname = os.path.splitext(filename)[0]\n ext = os.path.splitext(filename)[-1]\n imgid = fname.split('_')[-1]\n dest_filename = 'COCO5k_train_' + imgid + ext\n\n dstpath = DST_DIR + dest_filename\n shutil.copyfile(srcpath, dstpath)\n \n c += 1\n if (c % 100 == 0):\n print(\"[{}/{}] Copy file: {}\".format(c, len(imgs_filepaths), filename))\n\nprint('=' * 40)\nprint('Getting imgid_to_captions dict...')\nnumCaps = 0\nimgid_to_captions = {}\nfor annotation in jsondata['annotations']:\n if annotation[\"image_id\"] not in imgs_IDs:\n continue\n\n imgid = annotation['image_id']\n caption = annotation['caption']\n\n imgid_to_captions.setdefault(imgid, [])\n imgid_to_captions[imgid].append(caption)\n numCaps += 1\n\nassert len(imgs) == len(imgid_to_captions)\nassert set(imgs_IDs) == set(imgid_to_captions.keys())\n\nprint(\"=> There are {} captions for {} images.\".format(numCaps,len(imgs)))\n\nprint('Creating captions files...')\nc = 0\nfor (imgid, filename) in imgs:\n captions = imgid_to_captions[imgid]\n\n fname = os.path.splitext(filename)[0]\n fid = fname.split('_')[-1]\n dest_filename = 'COCO5k_train_' + fid + '_en.txt'\n\n # Print Abnormal captions\n if len(captions) != 5:\n print(\"### Abnormal captions (len = {}) at file: {}\".format(len(captions), dest_filename))\n # truncate to 5 captions\n captions = captions[:5]\n\n # Abnormal file image ID\n if imgid != int(fid):\n print(\"### Abnormal file ID at file: {}\".format(len(captions), dest_filename))\n\n with open(capDST_DIR + dest_filename, 'w+', encoding='utf-8') as file:\n # format id\n #id_s = \"{:08}\".format(imgid)\n\n if len(captions) != 5:\n print(dest_filename)\n print(captions)\n exit()\n\n file.write(str(imgid))\n for cap in captions:\n # Normalize cap\n cap = cap.strip()\n if (cap[-1] == '.'):\n cap = cap[:-1]\n \n if len(cap) < 20:\n print(cap)\n print(dest_filename)\n exit()\n\n file.write('\\n' + cap.strip())\n\n c += 1\n if (c % 100 == 0):\n print(\"[{}/{}] Create caption file: {}\".format(c, len(imgs), filename))","repo_name":"Flavius1996/VNcap_Translator","sub_path":"UTILITIES_CODE/CreateDataset.py","file_name":"CreateDataset.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71080562754","text":"from eggscript_src.config import get_config\nfrom eggscript_src.expressions.expression import Expression\nfrom eggscript_src.regex import closing_curly_bracket_token, colon_token, valid_case, valid_default\n\nclass CaseExpression(Expression):\n\tdef __init__(self, tokenizer=None):\n\t\tsuper().__init__(tokenizer=tokenizer)\n\t\tself.conditional_expressions = []\n\t\tself.is_code_block = True\n\t\n\tdef convert_expressions_to_conditionals(self):\n\t\tself.conditional_expressions = self.expressions\n\t\tself.expressions = []\n\t\n\tdef __str__(self):\n\t\treturn f\"CaseExpression({self.expressions})\"\n\t\n\tdef __repr__(self):\n\t\treturn self.__str__()\n\t\n\tdef to_script(self):\n\t\tnewline = \"\\n\"\n\t\tspace = \" \"\n\t\ttab = \"\\t\"\n\t\tif get_config(\"minify\") == True:\n\t\t\tnewline = \"\"\n\t\t\tspace = \"\"\n\t\t\ttab = \"\"\n\n\t\toutput = \"\"\n\t\tfor conditional_expression in self.conditional_expressions:\n\t\t\toutput = output + conditional_expression.to_script()\n\t\t\n\t\tfull_output = f\"case {output}:\" + newline\n\n\t\toutput = \"\"\n\t\tfor expression in self.expressions:\n\t\t\toutput = output + (tab * self.get_indent_level()) + expression.to_script() + newline\n\t\t\n\t\tfull_output = full_output + output\n\t\n\t\treturn full_output\n\t\n\tdef read_expression(tokenizer, tree):\n\t\texpression = CaseExpression(tokenizer=tokenizer)\n\t\ttokenizer.file.give_character_back()\n\t\ttokenizer.tokenize(stop_ats=[colon_token], tree=expression)\n\t\texpression.convert_expressions_to_conditionals()\n\n\t\t# read up until next case, next default, or }\n\t\ttokenizer.tokenize(give_back_stop_ats=[closing_curly_bracket_token], buffer_give_back_stop_at=[valid_case, valid_default], tree=expression)\n\n\t\treturn expression\n\nExpression.add_keyword_regex(valid_case, CaseExpression)","repo_name":"bansheerubber/eggscript","sub_path":"eggscript_src/expressions/case_expression.py","file_name":"case_expression.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"70204267715","text":"class Solution:\n def buildArray(self, target: list[int], n: int) -> list[str]:\n p=1\n ans=[]\n for i in range(len(target)):\n while target[i]!=p:\n p+=1\n ans.append('Push')\n ans.append('Pop')\n ans.append('Push')\n p+=1\n return ans\na = Solution()\nprint(a.buildArray([1,3],3))","repo_name":"z369437558/Leetcode","sub_path":"1441.py","file_name":"1441.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32435994686","text":"from Tkinter import *\n\nroot = Tk()\n\nxtarget = 0\nytarget = 0\n\nclass incdec():\n 'Common class for all tkinter motor interface portions controlled by keystrokes'\n def __init__(self, label, value, decbutton, incbutton, incdecres, labelcol, labelrow, valuecol, valuerow):\n global root\n self.value = value\n self.incdecres = incdecres\n self.valdisp = Label(root, text=self.value)\n self.valdisp.grid(row=valuerow, column=valuecol)\n self.labdisp = Label(root, text=label)\n self.labdisp.grid(row=labelrow, column=labelcol)\n root.bind_all(decbutton, self.decrement)\n root.bind_all(incbutton, self.increment)\n root.bind_all(printbut, self.printval)\n def increment(self, event):\n self.value += self.incdecres\n self.valdisp.config(text=self.value)\n def decrement(self, event):\n self.value -= self.incdecres\n self.valdisp.config(text=self.value)\n\nxincdec = incdec(\"X:\", xtarget, \"\", \"\", 1, 0, 0, 1, 0)\nyincdec = incdec(\"Y:\", ytarget, \"\", \"\", 1, 0, 1, 1, 1)\n\nroot.mainloop()\n","repo_name":"davidcutting42/robotarm-17","sub_path":"MK2-WM-Roboarm/SOUP Invitational/SOUP-Python-Code-MK2-WM-Roboarm/SOUP-Python-Code-Dev.py","file_name":"SOUP-Python-Code-Dev.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30782852498","text":"import datetime\nimport sys\n\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators import S3ToRedshiftOperator\nfrom airflow.operators import PostgresHasRowsOperator\nfrom airflow.models import Variable\n\nfrom airflow import DAG\n\nsys.path.insert(0,\"~/airflow/dags/s3-redshift-helpers/\")\n\nimport dag_sql\n\ndag_vars = Variable.get(\"songs_s3_buckets\", deserialize_json=True)\ninput_bucket = dag_vars[\"bucket\"]\nlog_data = dag_vars[\"logs\"]\nsong_data = dag_vars[\"songs\"]\n\nstage_songs_params = []\nstage_songs_params.append(\" json 'auto' \")\n\nwith DAG(dag_id=\"s3_to_redshift-w-plugin\",schedule_interval=None, \n start_date=datetime.datetime.now()-datetime.timedelta(hours = 4)\n ) as dag:\n\n init_tables = PostgresOperator( dag = dag,\n task_id = \"init_tables\",\n postgres_conn_id = \"redshift_songs\",\n sql = dag_sql.init_tables\n )\n \n verify_staged_songs = PostgresHasRowsOperator(\n task_id = \"verify_imported_records\",\n table = \"staging_songs\", \n connection_id = \"redshift_songs\")\n \n stage_songs_from_s3 = S3ToRedshiftOperator(\n task_id = \"stage-songs-to-redshift\",\n redshift_conn_id = \"redshift_songs\",\n aws_conn_id = \"s3_songs\",\n table = \"staging_songs\",\n s3_bucket = input_bucket,\n s3_key = song_data,\n overwrite = True,\n copy_params = stage_songs_params\n )\n\n extract_artists = PostgresOperator(\n task_id = \"load_artists\",\n postgres_conn_id=\"redshift_songs\",\n sql = dag_sql.load_artists\n )\n\n extract_songs = PostgresOperator(\n task_id = \"load_songs\",\n postgres_conn_id=\"redshift_songs\",\n sql = dag_sql.load_songs\n ) \n init_tables>>stage_songs_from_s3\n stage_songs_from_s3>>verify_staged_songs\n verify_staged_songs>>[extract_artists,extract_songs]\n\n","repo_name":"kbaafi/apache-airflow-adventures","sub_path":"Airflow-S3-to-Redshift-Plugin/dags/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72329660353","text":"from __future__ import annotations\nfrom enum import auto\nfrom typing import Optional, TYPE_CHECKING\n\nfrom base_enum import BaseEnum\nfrom monster_base import MonsterBase\nfrom random_gen import RandomGen\nfrom helpers import get_all_monsters\n\nfrom data_structures.referential_array import ArrayR\n\nif TYPE_CHECKING:\n from battle import Battle\n\nclass MonsterTeam:\n\n class TeamMode(BaseEnum):\n\n FRONT = auto()\n BACK = auto()\n OPTIMISE = auto()\n\n class SelectionMode(BaseEnum):\n\n RANDOM = auto()\n MANUAL = auto()\n PROVIDED = auto()\n\n class SortMode(BaseEnum):\n\n HP = auto()\n ATTACK = auto()\n DEFENSE = auto()\n SPEED = auto()\n LEVEL = auto()\n\n TEAM_LIMIT = 6\n\n def __init__(self, team_mode: TeamMode, selection_mode, **kwargs) -> None:\n # Add any preinit logic here.\n self.team_mode = team_mode\n self.monster_order = ArrayR(self.TEAM_LIMIT)\n self.current_size = 0\n if 'provided_monsters' in kwargs:\n self.provided_monsters = kwargs.get(\"provided_monsters\")\n else:\n self.provided_monsters = None\n\n if selection_mode == self.SelectionMode.RANDOM:\n self.select_randomly(**kwargs)\n elif selection_mode == self.SelectionMode.MANUAL:\n self.select_manually(**kwargs)\n elif selection_mode == self.SelectionMode.PROVIDED:\n self.select_provided(**kwargs)\n else:\n raise ValueError(f\"selection_mode {selection_mode} not supported.\")\n\n def _get_sort_key_method(self):\n if self.sort_key == self.SortMode.HP:\n return lambda monster: monster.get_hp()\n elif self.sort_key == self.SortMode.ATTACK:\n return lambda monster: monster.get_attack()\n elif self.sort_key == self.SortMode.DEFENSE:\n return lambda monster: monster.get_defense()\n elif self.sort_key == self.SortMode.SPEED:\n return lambda monster: monster.get_speed()\n elif self.sort_key == self.SortMode.LEVEL:\n return lambda monster: monster.get_level()\n \n def add_to_team(self, monster: MonsterBase):\n if self.current_size >= self.TEAM_LIMIT:\n raise ValueError(\"Team is already at maximum capacity.\")\n\n if self.team_mode == self.TeamMode.FRONT:\n for i in range(self.current_size, 0, -1):\n self.monster_order[i] = self.monster_order[i - 1]\n self.monster_order[0] = monster\n elif self.team_mode == self.TeamMode.BACK:\n self.monster_order[self.current_size] = monster\n elif self.team_mode == self.TeamMode.OPTIMISE:\n # Find the position to insert the monster based on the sorting stat\n insert_position = 0\n sort_key_method = self._get_sort_key_method() # Retrieve the appropriate method\n \n while (\n insert_position < self.current_size\n and sort_key_method(monster) < sort_key_method(self.monster_order[insert_position])\n ):\n insert_position += 1\n \n # Shift elements manually to make space for the new monster\n for i in range(self.current_size, insert_position, -1):\n self.monster_order[i] = self.monster_order[i - 1]\n\n # Insert the monster at the correct position\n self.monster_order[insert_position] = monster\n\n self.current_size += 1\n\n def retrieve_from_team(self) -> MonsterBase:\n if self.current_size == 0:\n return self.monster_order[0]\n\n retrieved_monster = self.monster_order[0]\n\n if self.team_mode == self.TeamMode.FRONT:\n # Shift remaining monsters to the left\n for i in range(self.current_size - 1):\n self.monster_order[i] = self.monster_order[i + 1]\n elif self.team_mode == self.TeamMode.BACK:\n # Shift remaining monsters to the left (excluding the last one)\n for i in range(1, self.current_size):\n self.monster_order[i - 1] = self.monster_order[i]\n elif self.team_mode == self.TeamMode.OPTIMISE:\n for i in range(self.current_size - 1):\n self.monster_order[i] = self.monster_order[i + 1]\n\n self.current_size -= 1\n\n return retrieved_monster\n\n def special(self) -> None:\n middle_index = self.current_size // 2\n\n if self.team_mode == self.TeamMode.FRONT:\n self.monster_order[0], self.monster_order[middle_index] = self.monster_order[middle_index], self.monster_order[0]\n\n elif self.team_mode == self.TeamMode.BACK:\n for i in range(middle_index):\n j = self.current_size - i - 1\n x = self.monster_order[i]\n y = self.monster_order[j]\n self.monster_order[j] = x\n self.monster_order[i] = y\n if middle_index > 1:\n if (self.current_size % 2) == 0:\n self.monster_order[middle_index], self.monster_order[self.current_size-1] = self.monster_order[self.current_size-1], self.monster_order[middle_index]\n else:\n self.monster_order[middle_index+1], self.monster_order[self.current_size-1] = self.monster_order[self.current_size-1], self.monster_order[middle_index+1]\n\n elif self.team_mode == self.TeamMode.OPTIMISE:\n sort_key_method = self._get_sort_key_method() # Retrieve the appropriate method\n\n index_dict = {\n monster: sort_key_method(monster)\n for monster in self.monster_order\n if monster is not None\n }\n monsters = sorted(index_dict, key=lambda x: index_dict[x])\n for i, monster in enumerate(monsters):\n self.monster_order[i] = monster\n\n def regenerate_team(self) -> None:\n if self.provided_monsters:\n for i in range(self.TEAM_LIMIT):\n self.monster_order[i] = None\n\n self.current_size = 0\n for i, m in enumerate(self.provided_monsters):\n if self.team_mode == self.TeamMode.FRONT:\n num = len(self.provided_monsters)\n self.monster_order[num-i-1] = m()\n self.monster_order[num-i-1].level = 1 # Reset to level 1\n self.monster_order[num-i-1].hp = self.monster_order[num-i-1].get_max_hp() # Restore full health\n\n elif self.team_mode == self.TeamMode.BACK:\n self.monster_order[i] = m()\n self.monster_order[i].level = 1 # Reset to level 1\n self.monster_order[i].hp = self.monster_order[i].get_max_hp() # Restore full health\n self.current_size += 1\n return \n for i in range(self.current_size):\n self.monster_order[i].level = 1 # Reset to level 1\n self.monster_order[i].hp = self.monster_order[i].get_max_hp() # Restore full health\n\n def select_randomly(self, sort_key=None):\n self.sort_key = sort_key\n\n team_size = RandomGen.randint(1, self.TEAM_LIMIT)\n monsters = get_all_monsters()\n n_spawnable = 0\n for x in range(len(monsters)):\n if monsters[x].can_be_spawned():\n n_spawnable += 1\n\n for _ in range(team_size):\n spawner_index = RandomGen.randint(0, n_spawnable-1)\n cur_index = -1\n for x in range(len(monsters)):\n if monsters[x].can_be_spawned():\n cur_index += 1\n if cur_index == spawner_index:\n # Spawn this monster\n self.add_to_team(monsters[x]())\n break\n else:\n raise ValueError(\"Spawning logic failed.\")\n\n def select_manually(self, sort_key=None):\n \"\"\"\n Prompt the user for input on selecting the team.\n Any invalid input should have the code prompt the user again.\n\n First input: Team size. Single integer\n For _ in range(team size):\n Next input: Prompt selection of a Monster class.\n * Should take a single input, asking for an integer.\n This integer corresponds to an index (1-indexed) of the helpers method\n get_all_monsters()\n * If invalid of monster is not spawnable, should ask again.\n\n Add these monsters to the team in the same order input was provided. Example interaction:\n\n How many monsters are there? 2\n MONSTERS Are:\n 1: Flamikin [✔️]\n 2: Infernoth [❌]\n 3: Infernox [❌]\n 4: Aquariuma [✔️]\n 5: Marititan [❌]\n 6: Leviatitan [❌]\n 7: Vineon [✔️]\n 8: Treetower [❌]\n 9: Treemendous [❌]\n 10: Rockodile [✔️]\n 11: Stonemountain [❌]\n 12: Gustwing [✔️]\n 13: Stormeagle [❌]\n 14: Frostbite [✔️]\n 15: Blizzarus [❌]\n 16: Thundrake [✔️]\n 17: Thunderdrake [❌]\n 18: Shadowcat [✔️]\n 19: Nightpanther [❌]\n 20: Mystifly [✔️]\n 21: Telekite [❌]\n 22: Metalhorn [✔️]\n 23: Ironclad [❌]\n 24: Normake [❌]\n 25: Strikeon [✔️]\n 26: Venomcoil [✔️]\n 27: Pythondra [✔️]\n 28: Constriclaw [✔️]\n 29: Shockserpent [✔️]\n 30: Driftsnake [✔️]\n 31: Aquanake [✔️]\n 32: Flameserpent [✔️]\n 33: Leafadder [✔️]\n 34: Iceviper [✔️]\n 35: Rockpython [✔️]\n 36: Soundcobra [✔️]\n 37: Psychosnake [✔️]\n 38: Groundviper [✔️]\n 39: Faeboa [✔️]\n 40: Bugrattler [✔️]\n 41: Darkadder [✔️]\n Which monster are you spawning? 38\n MONSTERS Are:\n 1: Flamikin [✔️]\n 2: Infernoth [❌]\n 3: Infernox [❌]\n 4: Aquariuma [✔️]\n 5: Marititan [❌]\n 6: Leviatitan [❌]\n 7: Vineon [✔️]\n 8: Treetower [❌]\n 9: Treemendous [❌]\n 10: Rockodile [✔️]\n 11: Stonemountain [❌]\n 12: Gustwing [✔️]\n 13: Stormeagle [❌]\n 14: Frostbite [✔️]\n 15: Blizzarus [❌]\n 16: Thundrake [✔️]\n 17: Thunderdrake [❌]\n 18: Shadowcat [✔️]\n 19: Nightpanther [❌]\n 20: Mystifly [✔️]\n 21: Telekite [❌]\n 22: Metalhorn [✔️]\n 23: Ironclad [❌]\n 24: Normake [❌]\n 25: Strikeon [✔️]\n 26: Venomcoil [✔️]\n 27: Pythondra [✔️]\n 28: Constriclaw [✔️]\n 29: Shockserpent [✔️]\n 30: Driftsnake [✔️]\n 31: Aquanake [✔️]\n 32: Flameserpent [✔️]\n 33: Leafadder [✔️]\n 34: Iceviper [✔️]\n 35: Rockpython [✔️]\n 36: Soundcobra [✔️]\n 37: Psychosnake [✔️]\n 38: Groundviper [✔️]\n 39: Faeboa [✔️]\n 40: Bugrattler [✔️]\n 41: Darkadder [✔️]\n Which monster are you spawning? 2\n This monster cannot be spawned.\n Which monster are you spawning? 1\n \"\"\"\n self.sort_key = sort_key\n team_size = int(input(\"How many monsters are there? \"))\n while team_size > self.TEAM_LIMIT:\n print(\"Too many monsters.\")\n team_size = int(input(\"How many monsters are there? \"))\n\n print(\"MONSTERS ARE:\")\n monsters = get_all_monsters()\n for i, monster_cls in enumerate(monsters, start=1):\n spawnable = \"✔️\" if monster_cls.can_be_spawned() else \"❌\"\n print(f\"{i}: {monster_cls.get_name()} [{spawnable}]\")\n \n for _ in range(team_size):\n while True:\n selection = int(input(\"Which monster are you spawning? \"))\n if selection < 1 or selection > len(monsters):\n print(\"Invalid selection. Please choose a valid index.\")\n continue\n \n monster_cls = monsters[selection - 1]\n if not monster_cls.can_be_spawned():\n print(\"This monster cannot be spawned.\")\n else:\n self.add_to_team(monster_cls())\n break\n\n def select_provided(self, provided_monsters:Optional[ArrayR[type[MonsterBase]]]=None, sort_key=None):\n \"\"\"\n Generates a team based on a list of already provided monster classes.\n\n While the type hint imples the argument can be none, this method should never be called without the list.\n Monsters should be added to the team in the same order as the provided array.\n\n Example input:\n [Flamikin, Aquariuma, Gustwing] <- These are all classes.\n\n Example team if in TeamMode.FRONT:\n [Gustwing Instance, Aquariuma Instance, Flamikin Instance]\n \"\"\"\n self.sort_key = sort_key\n if not provided_monsters:\n raise ValueError(\"No provided monsters found.\")\n \n for monster_class in provided_monsters:\n monster = monster_class()\n if not monster.can_be_spawned():\n raise ValueError(\"Monster not esits.\")\n self.add_to_team(monster)\n\n def choose_action(self, currently_out: MonsterBase, enemy: MonsterBase) -> Battle.Action:\n # This is just a placeholder function that doesn't matter much for testing.\n from battle import Battle\n if currently_out.get_speed() >= enemy.get_speed() or currently_out.get_hp() >= enemy.get_hp():\n return Battle.Action.ATTACK\n return Battle.Action.SWAP\n\n def __len__(self) -> int:\n return self.current_size\n \n\nif __name__ == \"__main__\":\n team = MonsterTeam(\n team_mode=MonsterTeam.TeamMode.OPTIMISE,\n selection_mode=MonsterTeam.SelectionMode.RANDOM,\n sort_key=MonsterTeam.SortMode.HP,\n )\n print(team)\n while len(team):\n print(team.retrieve_from_team())\n","repo_name":"Rong830/Monster-Battles","sub_path":"team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":14126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32974013458","text":"from github import Github\n\ndef get_blob_content(repo, branch, path_name):\n\t# first get the branch reference\n\tref = repo.get_git_ref(f'heads/{branch}')\n\t# then get the tree\n\ttree = repo.get_git_tree(ref.object.sha, recursive='/' in path_name).tree\n\t# look for path in tree\n\tsha = [x.sha for x in tree if x.path == path_name]\n\tif not sha:\n\t\t# well, not found..\n\t\treturn None\n\t# we have sha\n\treturn repo.get_git_blob(sha[0])\n\ndef update_Data():\n\tg = Github(\"itracanalise\",\" \")\n\n\tuser = g.get_user()\n\trepo = g.get_repo(\"itracanalise/analiseComentarios\")\n\n\tall_files = []\n\n\tcontents = repo.get_contents(\"\")\n\n\twhile contents:\n\t\tfile_content = contents.pop(0)\n\t\tif file_content.type == \"dir\":\n\t\t\tcontents.extend(repo.get_contents(file_content.path))\n\t\telse:\n\t\t\tfile = file_content\n\t\t\tall_files.append(str(file).replace('ContentFile(path=\"','').replace('\")',''))\n\t\tprint(all_files[-1])\n\twith open('General_Data.csv', 'r') as file:\n\t\tcontent = file.read()\n\n\t# Upload to github\n\n\tgit_file = 'General_Data.csv'\n\n\n\tif git_file in all_files:\n\t\tprint(\"at update\")\n\t\tGlob = get_blob_content(repo, 'main', git_file)\n\t\trepo.update_file(git_file, \"committing files\", content,Glob.sha, branch=\"main\")\n\t\tprint(git_file + ' UPDATED')\n\telse:\n\t\trepo.create_file(git_file, \"committing files\", content, branch=\"main\")\n\t\tprint(git_file + ' CREATED')","repo_name":"itracanalise/analiseComentarios","sub_path":"UpdateOrCreateFile.py","file_name":"UpdateOrCreateFile.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13683872948","text":"\n#barname kasr ha\n\ndef jam(f1,f2):\n result={}\n result['s']=(f1['s']*f2['m']+ f2['s']*f1['m'])\n result['m']=f1['m']*f2['m']\n return result\ndef zarb(f1,f2):\n result={}\n result['s']=f1[\"s\"]*f2[\"s\"]\n result['m']=f1[\"m\"]*f2[\"m\"]\n return result\ndef taghsim(f1,f2):\n result={}\n result['s']=(f1['s']* f2['m'])\n result['m']=(f1['m']* f2['s'])\n return result\ndef tafrigh(f1,f2):\n result={}\n result['s']=(((f1['m']*f2['m']/f1['m'])*f1['s'])- (f1['m']*f2['m']/ f2['m'])*f2['s'])\n result['m']=(f1['m']*f2['m'])\n return result\ndef show(r):\n print(f'{r[\"s\"]} / {r[\"m\"]}')\n\ndef options():\n while 1:\n num1=float(input(\"enter top num: \"))\n num2=float(input(\"enter down num: \"))\n if num2==0:\n print(\"wrong\")\n while 1:\n num2=float(input(\"try again : \"))\n if num2!=0:\n break\n num3=float(input(\"enter top num : \"))\n num4=float(input(\"enter down num: \"))\n if num4==0:\n print(\"wrong!!!\")\n while 1:\n num4=float(input(\"try again: \"))\n if num4!=0:\n break\n \n print(f\"kasr aval :\\n {num1}/{num2} va kasr dovom: {num3}/{num4} \")\n break\n \n f1={\"s\":num1 , \"m\":num2}\n f2={\"s\":num3 , \"m\":num4} \n\n print(\"dastoore khod ra entekhab konid: \\t 1: jam \\t 2:zarb \\t 3:tafrigh \\t 4:taghsim\")\n option=int(input())\n if option==1:\n show(jam(f1,f2))\n elif option==2:\n show(zarb(f1,f2))\n elif option==3:\n show(tafrigh(f1,f2))\n elif option==4:\n show(taghsim(f1,f2))\n else:\n print(\"wrong!!!\")\n\n\noptions() \n \n\n","repo_name":"ElahahAria/session9","sub_path":"homework9.py","file_name":"homework9.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11664070244","text":"class Car(object):\n \n \n def __init__(self, name = \"General\", model = \"GM\" , *car_type):\n \n self.num_of_doors = 4 if not(name =='Porshe' or name == 'Koenigsegg') else 2\n self.num_of_wheels = 8 if 'trailer' in car_type else 4\n self.speed = 0\n self.name = name\n self.model = model\n self.car_type = car_type\n\n\n def drive(self, knot):\n if self.is_saloon():\n self.speed = 10**knot\n else:\n self.speed = knot*11\n return self\n\n def is_saloon(self):\n return \"trailer\" not in self.car_type","repo_name":"FawazFarid/Andela","sub_path":"4 - OOP/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34525702428","text":"import os\nimport numpy as np\nimport pandas as pd\n\ndtdir = '../../../datasets/FKD-kaggle'\nimg_width = 96\nimg_height = 96\n\ndef gray_to_rgb(X):\n X = X.reshape(-1, 96, 96, 1)\n \n ret = np.empty((X.shape[0], img_width, img_height, 3), dtype=np.float32)\n ret[:, :, :, 0] = X[:, :, :, 0]\n ret[:, :, :, 1] = X[:, :, :, 0]\n ret[:, :, :, 2] = X[:, :, :, 0]\n return ret\n\n\ndef data(drop=True, cols=None, reshape=False, g2rgb=False):\n\n test_set = pd.read_csv(os.path.join(dtdir, 'test.csv'))\n train_set = pd.read_csv(os.path.join(dtdir, 'training.csv'))\n train_set['Image'] = train_set['Image'].apply(lambda im: np.fromstring(im, sep=' '))\n test_set['Image'] = test_set['Image'].apply(lambda im: np.fromstring(im, sep=' '))\n\n\n if cols: # get a subset of columns\n train_set = train_set[list(cols) + ['Image']]\n test_set = test_set[list(cols) + ['Image']]\n\n print(train_set.count())\n\n if drop:\n train_set = train_set.dropna()\n test_set = test_set.dropna()\n\n X_train = np.vstack(train_set['Image'].values) / 255.\n X_train = X_train.astype(np.float32)\n X_test = np.vstack(test_set['Image'].values) / 255.\n X_test = X_test.astype(np.float32)\n\n y_train = train_set[train_set.columns[:-1]].values\n y_train = (y_train - 48) / 48. # scale target coordinates to [-1, 1]\n y_train = y_train.astype(np.float32)\n \n if reshape:\n X_train = X_train.reshape(-1, 96, 96, 1)\n X_test = X_test.reshape(-1, 96, 96, 1)\n \n if g2rgb:\n X_train = gray_to_rgb(X_train)\n X_test = gray_to_rgb(X_test)\n\n print('Train Shape:', X_train.shape, y_train.shape)\n print('Test Shape:', X_test.shape)\n\n return (X_train, y_train), X_test\n\n\nif __name__ == \"__main__\":\n data()\n\n\n\n\n\n\n","repo_name":"YasiRajaee/FaceLandmark-Detection","sub_path":"scripts/FKD-kaggle/predictive-models/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8102846493","text":"#Author: JC\n#Date: 8/13/2018\n#Version: 1.0\n\ndef decoding(s):\n count, result = 0, []\n for c in s:\n if c.isdigit():\n count = count * 10 + int(c)\n else: # c is letter of alphabet.\n result.append(c * count) # Appends count copies of c to result\n count = 0\n return ''.join(result)\n\ndef encoding(s):\n result, count = [], 0\n for i in range(1, len(s) + 1):\n if i == len(s) or s[i]!= s[i-1]:\n # Found new character so weite the conut of previous character\n result.append(str(count) + s[i-1])\n count = 1\n else: # s[i] == s[i-1]\n count += 1\n return ''.join(result)\n\ns1 = 'aaaabcccaa'\ns2 = '3d4f2e'\nprint(decoding(s2))\nprint(encoding(s1))","repo_name":"macrocj/Python","sub_path":"String/Run_length_encoding.py","file_name":"Run_length_encoding.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73917597633","text":"import json\nfrom datetime import datetime\nfrom typing import List, Optional, Tuple, Union\n\nfrom sqlalchemy import and_, desc, not_\nfrom sqlalchemy.orm import Session\n\nfrom app.config import settings\nfrom app.crud.base import CRUDBase\nfrom app.models.task import Task, TaskState, TaskType\nfrom app.schemas.task import TaskCreate, TaskUpdate\n\n\nclass CRUDTask(CRUDBase[Task, TaskCreate, TaskUpdate]):\n def create_task(\n self,\n db: Session,\n *,\n obj_in: TaskCreate,\n task_hash: str,\n user_id: int,\n ) -> Task:\n config = obj_in.config\n if isinstance(config, dict):\n config = json.dumps(config)\n db_obj = Task(\n name=obj_in.name,\n type=obj_in.type.name,\n config=config,\n hash=task_hash,\n user_id=user_id,\n state=TaskState.pending.name,\n progress=0,\n parameters=obj_in.parameters.json() if obj_in.parameters else None,\n )\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def get_tasks_by_states(\n self, db: Session, states: List[TaskState], including_deleted: bool = False\n ) -> List[Task]:\n query = db.query(self.model)\n if not including_deleted:\n query = query.filter(not_(self.model.is_deleted))\n return query.filter(self.model.state.in_(states)).all()\n\n def update_task_state(\n self, db: Session, *, task_id: int, new_state: TaskState\n ) -> Optional[Task]:\n db_obj = self.get(db, id=task_id)\n if not db_obj:\n return None\n db_obj.state = new_state.name\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def get_multi_tasks(\n self,\n db: Session,\n user_id: Optional[int] = None,\n name: Optional[str] = None,\n type_: Optional[TaskType] = None,\n state: Optional[TaskState] = None,\n start_time: Optional[int] = None,\n end_time: Optional[int] = None,\n offset: int = 0,\n limit: int = settings.DEFAULT_LIMIT,\n ) -> Tuple[List[Task], int]:\n query = db.query(self.model).filter(not_(self.model.is_deleted))\n if user_id:\n query = query.filter(self.model.user_id == user_id)\n if name:\n query = query.filter(self.model.name.like(f\"%{name}%\"))\n if type_:\n query = query.filter(self.model.type == type_)\n if state:\n query = query.filter(self.model.state == state)\n if start_time and end_time:\n _start_time = datetime.utcfromtimestamp(start_time)\n _end_time = datetime.utcfromtimestamp(end_time)\n query = query.filter(\n and_(\n self.model.create_datetime >= _start_time,\n self.model.create_datetime <= _end_time,\n )\n )\n\n query = query.order_by(desc(self.model.id))\n return query.offset(offset).limit(limit).all(), query.count()\n\n\ntask = CRUDTask(Task)\n","repo_name":"IJtLJZ8Rm4Yr/ymir-backend","sub_path":"src/pymir-app/app/crud/crud_task.py","file_name":"crud_task.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10582999592","text":"# Digite um inteiro entre 0 e 5. O computador irá sortear um numero nesse range e informará se vc acertou ou errou.\r\n\r\nimport random\r\nimport time\r\n\r\nnd = int(input('Digite um numero inteiro entre 0 e 5:'))\r\nn = random.randint(0, 5)\r\nprint('Processando...')\r\ntime.sleep(3)\r\nif nd == n:\r\n print('Voce acertou! Parabéns!')\r\nelse:\r\n print('Que pena! Voce errou! O numero escolhido foi {}'.format(n))\r\n","repo_name":"iamcamilasilva/Python-Exercicios","sub_path":"advinhacao@random.py","file_name":"advinhacao@random.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39950614602","text":"from sympy import var, solve, diff\nfrom scipy import e, linalg as LA\nimport numpy as np\n\nX1, X2, Y, Z = var(\"X1 X2 Y Z\")\nd, c1, ke, H, l, j = var('d c1 ke H l j')\ne1, e2, c2, c, h2, f2 ,f1, h1, h = var('e1 e2 c2 c h2 f2 f1 h1 h')\na, b, n, p, m, e = var('a b n p m e')\n\nE1 = (-e1 * X1) - (c1 * Y * (X1 / (1 + X1))) + (np.e ** (-ke * h) * h1 * (X1 / (1 + X1))) + (f1 * (X1 / (1 + X1)) * Z)\n\nE2 = (-e2 * X2) - (c2 * Y * (X2 / (1 + X2))) + (c * np.e ** (-ke * h) * h2 * (X2 / (1 + X2))) + (f2 * (X2 / (1 + X2)) * Z)\nE3 = (-a * Y) + ((b * X1) * (Y / (Y + 1)) * (1 - (Y / n))) + ((p * (1 / np.e ** (-ke * h))) * (Y / (Y + 1)))\nE4 = (f1 * X1) + (f2 * X2) - (m * Z)\n\nA11 = diff(E1, X1)\nA12 = diff(E1, X2)\nA13 = diff(E1, Y)\nA14 = diff(E1, Z)\n\nA21 = diff(E2, X1)\nA22 = diff(E2, X2)\nA23 = diff(E2, Y)\nA24 = diff(E2, Z)\n\nA31 = diff(E3, X1)\nA32 = diff(E3, X2)\nA33 = diff(E3, Y)\nA34 = diff(E3, Z)\n\nA41 = diff(E4, X1)\nA42 = diff(E4, X2)\nA43 = diff(E4, Y)\nA44 = diff(E4, Z)\n\nprint(\"A11: \" + str(A11))\nprint(\"A12: \" + str(A12))\nprint(\"A13: \" + str(A13))\nprint(\"A14: \" + str(A14))\n\nprint(\"A21: \" + str(A21))\nprint(\"A22: \" + str(A22))\nprint(\"A23: \" + str(A23))\nprint(\"A24: \" + str(A24))\n\nprint(\"A31: \" + str(A31))\nprint(\"A32: \" + str(A32))\nprint(\"A33: \" + str(A33))\nprint(\"A34: \" + str(A34))\n\nprint(\"A41: \" + str(A41))\nprint(\"A42: \" + str(A42))\nprint(\"A43: \" + str(A43))\nprint(\"A44: \" + str(A44))\n\na = .105 # predator adult mortality rate\n\nb = 0.1 # Benefit to the predator of predator adult consumption rate of weevil\n\nc = 0.3 # Negative impact of early harvesting on the weevil population (not being used)\n\np = .5 # predator adult population gain from nymphs\n\nn = 10\n\nn1 = 100\n\nke = .1635 # constant\n\nh = .8\n\nc1 = .15 # Negative impact on the aphids of predator consumption of aphids; previous values used: .1,2\n\n# !\nc2 = .05 # Negative impact on the weevil of predator consumption of weevil; previous values used: .03\n\nh1 = .5 # adult population gain (aphids) # previous values used: .2\n\nh2 = .3 # adult population gain (weevil) previous values used: .2\n\ne1 = .15 # mortality of aphids; previous values used: .15, .3\n\ne2 = .15 # mortality of weevil; previous values used: .15, .3\n\nf1 = .2 # aphids pest consumption rate of alfalfa; previous values used: .15\n\n\nf2 = .2 # weevil pest consumption rate of alfalfa; previous values used: .15,\n\n\nm = .5\n\n\nX1 = 0\nX2 = 0\nY = 4.4\nZ = 0\ne = np.e\n\nSA11 = eval(str(A11))\nSA12 = eval(str(A12))\nSA13 = eval(str(A13))\nSA14 = eval(str(A14))\n\nSA21 = eval(str(A21))\nSA22 = eval(str(A22))\nSA23 = eval(str(A23))\nSA24 = eval(str(A24))\n\nSA31 = eval(str(A31))\nSA32 = eval(str(A32))\nSA33 = eval(str(A33))\nSA34 = eval(str(A34))\n\nSA41 = eval(str(A41))\nSA42 = eval(str(A42))\nSA43 = eval(str(A43))\nSA44 = eval(str(A44))\n\nJ1 = np.matrix([[SA11, SA12, SA13, SA14], [SA21, SA22, SA23, SA24], [SA31, SA32, SA33, SA34],[SA41, SA42, SA43, SA44]])\nprint(J1)\n\nprint(\"eigen: \")\neigen_values = LA.eig(J1)[0]\nprint(eigen_values)\n\n","repo_name":"Apthox/MathProofs","sub_path":"JacobianMatrix.py","file_name":"JacobianMatrix.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23482988801","text":"case=[]\nwith open('/Users/cindy_liao/Downloads/A-large.in','r') as file1:\n case_num=file1.readline()\n case=file1.readlines()\nfor i in range(int(case_num)):\n case[i]=int(case[i].strip())\n\n\n\ndef count_sheep(n):\n\n if n==0:\n return 'INSOMNIA'\n else:\n count=1\n num=n\n digits=[0,1,2,3,4,5,6,7,8,9]\n while True:\n for i in str(num):\n if int(i) in digits:\n digits.remove(int(i))\n\n if digits==[]:\n return num\n else:\n count+=1\n num=count*n\n\n\n\n\nfor i in range(len(case)):\n with open('/Users/cindy_liao/Desktop/1.txt','a') as file2:\n file2.write('Case #'+str(i+1)+': '+str(count_sheep(case[i]))+'\\n')\n\n'''\n\nwith open('/Users/cindy_liao/Desktop/1.txt','a') as file2:\n file2.write(str(i)+'\\n')\n'''","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/3379.py","file_name":"3379.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8690320908","text":"import unittest\nfrom mc.tcf.client import MLClient\nimport mc.tests.test_config as test_config\nimport mc.tests.test_data as data\n\nc = 0\n\ndef config_client():\n global c\n c = MLClient(test_config)\n print(\"Client loaded\")\n\nclass MLClientTestCase(unittest.TestCase):\n\n def test_predict(self):\n y_pred = c.predict(data.mock_data)\n self.assertEqual(data.mock_data.shape[0], len(y_pred))\n\nif __name__ == '__main__':\n config_client()\n unittest.main()","repo_name":"mcRoot/TheCyclingFeastApp","sub_path":"mc/tests/tests_mlclient.py","file_name":"tests_mlclient.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23569477681","text":"from math import floor, ceil\n\ndef bathroomStalls(N, K):\n # Starting pos, width\n gaps = [[N,0]]\n maxWidth = N\n Ls = 0\n Rs = 0\n for i in range(0,K):\n maxWidth = 0\n # Find biggest gap\n for gap in gaps:\n maxWidth = max(maxWidth, gap[0])\n gap = [-1,N+1]\n for possibleGap in gaps:\n if possibleGap[0] == maxWidth and possibleGap[1] < gap[1]:\n gap = possibleGap\n # Split gap\n Ls = floor((gap[0] - 1) / 2)\n Rs = ceil((gap[0] - 1) / 2)\n if Ls != 0:\n gaps.append([Ls, gap[1]])\n if Rs != 0:\n gaps.append([Rs, gap[1] + Rs])\n gaps.remove(gap)\n return str(max(Ls, Rs)) + \" \" + str(min(Ls, Rs))\n\nwith open('C-small-1-attempt0.in', 'r') as f:\n caseCounter = 0\n for line in f:\n if caseCounter != 0:\n inputs = line.split(\" \")\n N = int(inputs[0])\n K = int(inputs[1])\n print(\"Case #\" + str(caseCounter) + \": \" + bathroomStalls(N, K))\n caseCounter += 1\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2151.py","file_name":"2151.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6217872033","text":"import numpy as np\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nfrom collections import UserDict\r\nfrom shapely.geometry import Point\r\n\r\nfrom .json_serializable import JSONSerializable\r\n\r\nclass Frame(UserDict, JSONSerializable):\r\n \"\"\"Contains a set of particles and their positions in a timestep.\"\"\"\r\n\r\n def __init__(self, time, particles):\r\n \"\"\"Create a Frame object.\r\n\r\n Params\r\n ------\r\n time : int\r\n The timestamp of this frame.\r\n particles : array/iterable/DataFrame/GeoDataFrame of (pID, pos)\r\n The collection of particle IDs and positions\r\n pos can be either a tuple (x, y [,...]) or shapely.geometry.Point\r\n \"\"\"\r\n self.time = time\r\n\r\n # input data given as geopandas geodataframe, convert to dict\r\n if isinstance(particles, gpd.GeoDataFrame):\r\n super().__init__({\r\n list(f['properties'].values())[0]:f['geometry']['coordinates']\r\n for f in particles.iterfeatures()})\r\n\r\n # input data given in row dict format {'id1':(), 'id2':(), ...}\r\n elif isinstance(particles, dict) and len(particles) != 2:\r\n super().__init__(particles)\r\n\r\n # uncertain input (row/column format, column order)\r\n else:\r\n\r\n # input data given as pandas dataframe, convert to dict\r\n if isinstance(particles, pd.DataFrame):\r\n p_list = list(particles.to_dict('list').values())\r\n\r\n # numpy array given, convert to list\r\n elif isinstance(particles, np.ndarray):\r\n p_list = particles.tolist()\r\n\r\n # input data given as dict with length 2\r\n elif isinstance(particles, dict):\r\n p_list = list(particles.items())\r\n\r\n # anything else gets converted to list of lists\r\n else:\r\n p_list = list(particles)\r\n\r\n # anything in row format\r\n if len(p_list) != 2:\r\n super().__init__(p_list)\r\n\r\n else:\r\n v0, v1 = p_list\r\n # first item is ids, second is points\r\n if all(isinstance(i, (str,int)) for i in v0):\r\n super().__init__(dict(zip(v0, v1)))\r\n\r\n # first item is points, second is ids\r\n elif all(isinstance(i, (str,int)) for i in v1):\r\n super().__init__(dict(zip(v1, v0)))\r\n\r\n # column format with column names, first is ids\r\n elif (not isinstance(v1[1], Point)\r\n and all(isinstance(i, (tuple,Point)) for i in v1[1])):\r\n super().__init__(dict(zip(v0[1], v1[1])))\r\n\r\n # column format with column names, first is points\r\n elif (not isinstance(v0[1], Point)\r\n and all(isinstance(i, (tuple,Point)) for i in v0[1])):\r\n super().__init__(dict(zip(v1[1], v0[1])))\r\n\r\n # row format with 2 items\r\n else:\r\n super().__init__(p_list)\r\n\r\n # make sure particle coords are lists, not Points or tuples\r\n if len(self.data) > 0:\r\n if isinstance(next(iter(self.data.values())), Point):\r\n self.data = {k:list(v.coords[0]) for k,v in self.data.items()}\r\n else:\r\n self.data = {k:list(v) for k,v in self.data.items()}\r\n\r\n\r\n def __eq__(self, other):\r\n \"\"\"Compare self and other frame based on time and points.\"\"\"\r\n return self.time == other.time and super().__eq__(other)\r\n\r\n\r\n def to_dict(self):\r\n \"\"\"Get a dict representation of this object.\"\"\"\r\n return {'time':self.time, 'particles': self.data}\r\n\r\n\r\n def __str__(self):\r\n \"\"\"Get a string representation of this object.\"\"\"\r\n return \"Frame(t={},#points={})\".format(self.time, len(self.data))\r\n\r\n @classmethod\r\n def from_dict(cls, d):\r\n \"\"\"Get a Frame object from a dict object.\r\n\r\n Params\r\n ------\r\n d : dict\r\n dict object to turn into a Frame.\r\n \"\"\"\r\n return Frame(**d)\r\n\r\n\r\ndef read_frame(source):\r\n \"\"\"Get a Frame object from a JSON file, buffer or string.\r\n\r\n Params\r\n ------\r\n source : str or file handle\r\n File path, object or JSON string.\r\n \"\"\"\r\n return Frame.from_json(source)\r\n","repo_name":"tipech/spatialnet","sub_path":"spatialnet/classes/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":4356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73068667074","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom .utils import Player\nimport matplotlib.pyplot as plt\nfrom .forms import GraphForm\nfrom .models import Graph\nimport secrets\nimport mimetypes\n\n\ndef home_view(request):\n\tgraph_form=GraphForm()\n\tif request.method == \"POST\":\n\t\tgraph_form = GraphForm(request.POST)\n\t\tif graph_form.is_valid():\n\t\t\tgraph=graph_form.save()\n\t\t\t\n\t\t\tplayer_1=Player(graph.turns, graph.money, graph.bet, 'Henry')\n\t\t\tplayer_2=Player(graph.turns, graph.money, graph.bet, 'Bob')\n\t\t\tplayer_3=Player(graph.turns, graph.money, graph.bet, 'Silvie')\n\t\t\tplayer_4=Player(graph.turns, graph.money, graph.bet, 'John')\n\t\t\tplayer_5=Player(graph.turns, graph.money, graph.bet, 'Alfie')\n\t\t\tplayers=[player_1, player_2, player_3, player_4, player_5]\n\n\t\t\tfor player in players:\n\t\t\t\tplayer.create_turns()\n\t\t\t\tplayer.play()\n\t\t\t\tmoney_list=[]\n\t\t\t\tturns_list=[]\n\t\t\t\tfor key, value in player.data_dict.items():\n\t\t\t\t\tturns_list.append(key)\n\t\t\t\t\tmoney_list.append(value)\n\t\t\t\tplt.plot(turns_list, money_list, label=player.__str__())\n\n\t\t\tplt.xlabel('Turns')\n\t\t\tplt.ylabel('Money')\n\t\t\tplt.title('Outcome of 5 players after playing {} turns'.format(graph.turns))\n\t\t\tplt.legend()\n\t\t\tfig_name=secrets.token_hex(nbytes=16)\n\t\t\tplt.savefig('media/graphs/g{}.png'.format(fig_name))\n\n\t\t\tgraph.graph='graphs/g{}.png'.format(fig_name)\n\t\t\tgraph_form.save()\n\n\t\t\treturn redirect('result', graph.graph_id)\n\tcontent={\n\t\t'graph_form':graph_form,\n\t}\n\treturn render(request, 'home.html', content)\n\n\ndef result_view(request, pk):\n\tgraph=get_object_or_404(Graph, graph_id=pk)\n\tprint(graph)\n\n\tif request.method == \"POST\":\n\t\twith open(graph.graph.path, 'rb') as f:\n\t\t\tmime_type, _ = mimetypes.guess_type(graph.graph.path)\n\t\t\tresponse = HttpResponse(f, content_type=mime_type)\n\t\t\tgraphname=\"{}\".format(graph.graph)\n\t\t\tgraphname=graphname[7:]\n\t\t\tresponse['Content-Disposition'] = \"attachment; filename={}\".format(graphname)\n\t\t\treturn response\n\tcontent={\n\t\t'graph': graph,\n\t}\n\treturn render(request, 'result.html', content)\n\n\ndef view_404(request, exception=None):\n return redirect('home')","repo_name":"IvanGadosi/simulation","sub_path":"simulation/roulette/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"38182125147","text":"from pyopp import cSimpleModule, cMessage\nfrom pyopp import WATCH, EV\nfrom pyopp import simTime, cTimestampedValue\n\n\nclass PyServer(cSimpleModule):\n\n IDLE = 0\n TRANSMISSION = 1\n COLLISION = 2\n\n def __init__(self, *args, **kwargs):\n cSimpleModule.__init__(self, *args, **kwargs)\n self.endRxEvent = None\n\n def __del__(self):\n self.cancelAndDelete(self.endRxEvent)\n\n def initialize(self):\n self.channelStateSignal = self.registerSignal(\"channelState\")\n self.endRxEvent = cMessage(\"end-reception\")\n self.channelBusy = False\n self.emit(self.channelStateSignal, self.IDLE)\n\n self.gate(\"in\").setDeliverOnReceptionStart(True)\n\n self.currentCollisionNumFrames = 0\n self.receiveCounter = 0\n WATCH('currentCollisionNumFrames')\n\n self.receiveBeginSignal = self.registerSignal(\"receiveBegin\")\n self.receiveSignal = self.registerSignal(\"receive\")\n self.collisionSignal = self.registerSignal(\"collision\")\n self.collisionLengthSignal = self.registerSignal(\"collisionLength\")\n\n self.emit(self.receiveSignal, 0);\n self.emit(self.receiveBeginSignal, 0);\n\n self.getDisplayString().setTagArg(\"p\", 0, self.par(\"x\").doubleValue())\n self.getDisplayString().setTagArg(\"p\", 1, self.par(\"y\").doubleValue())\n\n def handleMessage(self, msg):\n if msg == self.endRxEvent:\n EV << \"reception finished\\n\"\n self.channelBusy = False\n self.emit(self.channelStateSignal, self.IDLE)\n\n # update statistics\n if self.currentCollisionNumFrames == 0:\n # start of reception at recvStartTime\n tmp = cTimestampedValue(self.recvStartTime, 1)\n self.emit(self.receiveSignal, tmp)\n # end of reception now\n self.emit(self.receiveSignal, 0)\n else:\n # start of collision at recvStartTime\n tmp = cTimestampedValue(self.recvStartTime, self.currentCollisionNumFrames)\n self.emit(self.collisionSignal, tmp)\n\n dt = simTime() - self.recvStartTime\n self.emit(self.collisionLengthSignal, dt)\n\n self.currentCollisionNumFrames = 0;\n self.receiveCounter = 0;\n self.emit(self.receiveBeginSignal, self.receiveCounter)\n else:\n\n pkt = msg.as_cPacket()\n endReceptionTime = simTime() + pkt.getDuration()\n\n self.receiveCounter += 1\n self.emit(self.receiveBeginSignal, self.receiveCounter)\n\n if not self.channelBusy:\n EV << \"started receiving\\n\"\n self.recvStartTime = simTime()\n self.channelBusy = True\n self.emit(self.channelStateSignal, self.TRANSMISSION)\n self.scheduleAt(endReceptionTime, self.endRxEvent)\n else:\n EV << \"another frame arrived while receiving -- collision!\\n\"\n self.emit(self.channelStateSignal, self.COLLISION)\n\n if self.currentCollisionNumFrames == 0:\n self.currentCollisionNumFrames = 2\n else:\n self.currentCollisionNumFrames += 1\n\n if endReceptionTime > self.endRxEvent.getArrivalTime():\n self.cancelEvent(self.endRxEvent)\n self.scheduleAt(endReceptionTime, self.endRxEvent)\n\n # update network graphics\n if self.hasGUI():\n self.bubble(\"Collision! (%d frames)\" % self.currentCollisionNumFrames)\n self.getParentModule().getCanvas().holdSimulationFor(\n self.par(\"animationHoldTimeOnCollision\").doubleValue())\n self.channelBusy = True\n self.delete(pkt)\n\n def refreshDisplay(self):\n if not self.channelBusy:\n self.getDisplayString().setTagArg(\"i2\", 0, \"status/off\")\n self.getDisplayString().setTagArg(\"t\", 0, \"\")\n elif self.currentCollisionNumFrames == 0:\n self.getDisplayString().setTagArg(\"i2\", 0, \"status/yellow\")\n self.getDisplayString().setTagArg(\"t\", 0, \"RECEIVE\")\n self.getDisplayString().setTagArg(\"t\", 2, \"#808000\")\n else:\n self.getDisplayString().setTagArg(\"i2\", 0, \"status/red\")\n self.getDisplayString().setTagArg(\"t\", 0, \"COLLISION\")\n self.getDisplayString().setTagArg(\"t\", 2, \"#800000\")\n\n def finish(self):\n EV << \"duration: \" << simTime().dbl() << '\\n'\n self.recordScalar(\"duration\", simTime())\n","repo_name":"mmodenesi/omnetpy","sub_path":"pysamples/pyaloha/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"61"} +{"seq_id":"8714808038","text":"# coding=utf8\nimport sqlite3\nfrom sys import argv\n\nconn = sqlite3.connect('a')\nc = conn.cursor()\n\n# Create table\nc.execute('''CREATE TABLE stocks (date text, trans text, symbol text, qty real, price real)''')\n\n# Insert a row of data\nc.execute(\"INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)\")\n# c.execute(u\"insert into phrases(mlen,clen,m0,m1,m2,m3,category,phrase,freq) values(4,2,20,13,23,25,1,'微信',10700000)\")\n\n# Save (commit) the changes\nconn.commit()\n\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n","repo_name":"iinux/JohannCarlFriedrichGauss","sub_path":"simple_tool/sqlite3.py","file_name":"sqlite3.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10141000252","text":"import streamlit as st\n# from pptx import Presentation\n# from pptx.util import Inches,Pt\n# from datetime import date\nfrom PIL import Image\nfrom io import BytesIO\nfrom datetime import datetime\nimport requests\nimport openai\nfrom PIL import Image\nfrom io import BytesIO\n\n\ndef enable_download(ppt_file):\n filename = prompt +\" \"+str(datetime.now())+\" \"+\".pptx\"\n st.sidebar.write(\"Download your PPT here!! :gear:\")\n st.sidebar.download_button(label='Click to download PowerPoint',\n data=ppt_file.getvalue(),\n file_name=filename)\n\ndef get_images(prompt,n):\n openAI_token = \"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\"\n openai.api_key = openAI_token\n response = openai.Image.create(\n prompt= prompt,\n n=n,\n size=\"512x512\"\n )\n i=0\n try:\n for image in response['data']:\n response = requests.get(image['url'])\n # Convert the image data to a PIL image and save it to your computer\n image_data = response.content\n image = Image.open(BytesIO(image_data))\n # image.show()\n image.save(\"GEN_IMG\"+str(i)+\".png\")\n # files.download(prompt_text+str(i)+\".png\")\n i+=1\n except:\n print(\" error in generating images.Please refresh the page \")\ndef display(prompt,n):\n get_images(prompt,n)\n try:\n for i in range(0,n):\n st.image(\"GEN_IMG\"+str(i)+\".png\")\n except:\n print(\"error in loading image files.Please refresh the page\")\n\na = st.empty()\n\nst.write(\"Team ABS presents....\")\nst.title(\"Image Generator\")\n\nst.write(\"## Get images based on a text prompt\")\nst.write(\n \" Write a prompt into the textbox below and watch the magic happen\"\n)\n\n\nppt_data = \"REPLACE WITH PPT\"\n\n\nprompt = st.text_input(\"Terms\",\"Your prompt\").lower()\nst.write(\n \" please enter How many images do you want to generate\"\n)\nnum=st.number_input(\"The number\",min_value=1,max_value=10)\n\nif st.button(\"Generate\"):\n display(prompt,num)\n\n# st.image(\"./images/bg1.jpeg\") ","repo_name":"samarthagali/sfash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34178861834","text":"from glob import glob\nimport random\n\n\ndef speak_contents(mode, no):\n path = \"/home/pi/build-in-app/\"\n # path = \"\"\n\n filter = path + \"*/\" + mode + \"*.mp3\"\n files = glob(filter)\n \n text = files[int(no)]\n\n return text","repo_name":"DoiRyoto/Meeting-TimeKeeper","sub_path":"back/speak_contents.py","file_name":"speak_contents.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18742466305","text":"import argparse\nimport numpy as np\nimport os\nimport sys\nimport chainer\nimport chainer.functions as F\n\nfrom net import Generator, Discriminator\nfrom dataset import load_mnist\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Chainer Novelty Detection')\n parser.add_argument('--batchsize', '-b', type=int, default=256,\n help='Number of images in each mini-batch')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--percent', '-p', type=float, default=0.5,\n help='percentage of outliers')\n parser.add_argument('--threshold', '-t', type=float, default=0.4,\n help='threshold of outliers')\n parser.add_argument('--load_epoch', type=str, default='30')\n args = parser.parse_args()\n\n print(args)\n\n\n gen = Generator()\n dis = Discriminator()\n\n D = []\n DR = []\n print('\\tdigit\\tpre\\trec\\tf1_score')\n\n for i in range(10):\n if os.path.exists(f'result_{i}/gen_epoch_{args.load_epoch}.npz'):\n chainer.serializers.load_npz(f'result_{i}/gen_epoch_{args.load_epoch}.npz', gen)\n else:\n sys.exit(f'result_{i}/gen_epoch_{args.load_epoch}.npz does not exist.')\n\n if os.path.exists(f'result_{i}/dis_epoch_{args.load_epoch}.npz'):\n chainer.serializers.load_npz(f'result_{i}/dis_epoch_{args.load_epoch}.npz', dis)\n else:\n sys.exit(f'result_{i}/dis_epoch_{args.load_epoch}.npz does not exist.')\n\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n gen.to_gpu()\n dis.to_gpu()\n\n train, test = load_mnist(i, args.percent)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)\n\n outputs = []\n outputs_dash = []\n labels = []\n for batch in test_iter:\n x, t = chainer.dataset.concat_examples(batch, device=args.gpu)\n with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):\n y = F.sigmoid(dis(x))\n x_dash = gen(x)\n y_dash = F.sigmoid(dis(x_dash))\n outputs.extend(y.data[:, 0])\n outputs_dash.extend(y_dash.data[:, 0])\n labels.extend(t)\n\n outputs = np.array(outputs)\n outputs_dash = np.array(outputs_dash)\n labels = np.array(labels)\n\n positive = np.where(outputs > args.threshold)[0]\n tp = np.sum(labels[positive] == 1)\n fp = np.sum(labels[positive] == 0)\n negative = np.where(outputs <= args.threshold)[0]\n tn = np.sum(labels[negative] == 0)\n fn = np.sum(labels[negative] == 1)\n precision = tp / (tp + fp + 1e-8)\n recall = tp / (tp + fn + 1e-8)\n f1_score = 2 * recall * precision / (recall + precision + 1e-8)\n print('D(X)\\t{:d}\\t{:4.3f}\\t{:4.3f}\\t{:4.3f}'.format(i, precision, recall, f1_score))\n D.append(f1_score)\n positive = np.where(outputs_dash > args.threshold)[0]\n tp = np.sum(labels[positive] == 1)\n fp = np.sum(labels[positive] == 0)\n negative = np.where(outputs_dash <= args.threshold)[0]\n tn = np.sum(labels[negative] == 0)\n fn = np.sum(labels[negative] == 1)\n precision = tp / (tp + fp + 1e-8)\n recall = tp / (tp + fn + 1e-8)\n f1_score = 2 * recall * precision / (recall + precision + 1e-8)\n print('D(R(X))\\t{:d}\\t{:4.3f}\\t{:4.3f}\\t{:4.3f}'.format(i, precision, recall, f1_score))\n DR.append(f1_score)\n\n print('D(X) f1_score {:4.3f}'.format(np.mean(np.array(D))))\n print('D(R(X)) f1_score {:4.3f}'.format(np.mean(np.array(DR))))\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tanaka-daiki/EizoMediaReport","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30210280556","text":"'''\n문제\n3×N 크기의 벽을 2×1, 1×2 크기의 타일로 채우는 경우의 수를 구해보자.\n\n입력\n첫째 줄에 N(1 ≤ N ≤ 30)이 주어진다.\n\n출력\n첫째 줄에 경우의 수를 출력한다\n'''\nfrom sys import stdin\ninput = stdin.readline\n\nN = int(input())\ndp = [0]*(N+1)\nif N>1:\n dp[0] = 1\n dp[1] = 0\n dp[2] = 3\n for i in range(3,N+1):\n if i%2 == 0:\n sum = 0\n for j in range(i-4,-1,-2):\n sum+=dp[j]*2\n dp[i] = dp[i-2]*3+sum\nprint(dp[-1])","repo_name":"DongjinS/jungle_week4_ProblemSolving","sub_path":"신동진/2133.py","file_name":"2133.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22445477262","text":"import MySQLdb\r\nimport MySQLdb.cursors\r\nfrom flask import Flask, render_template, request, jsonify, json\r\nfrom flask_mysqldb import MySQL\r\nimport requests\r\n\r\napp = Flask(__name__)\r\n\r\n#mysql = MySQL(app)\r\n\r\n@app.route(\"/\",methods=['GET', 'POST'])\r\ndef main():\r\n if request.method == 'POST':\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"qwerty\", db=\"bookauthor\", charset='utf8')\r\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\r\n print(\"POST\")\r\n try:\r\n print(request.data)\r\n parsed = json.loads(request.data)\r\n print(parsed)\r\n with open('G:/data.txt', 'w') as outfile:\r\n json.dump(parsed, outfile)\r\n outfile.close()\r\n title=parsed['title']\r\n print(title)\r\n year = parsed['year']\r\n print(year)\r\n authors = parsed['authors']\r\n print(authors)\r\n\r\n executeStr3 = 'Select id from book where title= %s and year = %s'\r\n row_count=cursor.execute(executeStr3,(title, year))\r\n print(\"row_count=\" + str(row_count))\r\n cursorselectBook_fetchall=cursor.fetchall()\r\n print(cursorselectBook_fetchall)\r\n if len(cursorselectBook_fetchall)>0:\r\n print(str(cursorselectBook_fetchall[0])+\" already exists\")\r\n # print(cursorselectBook_fetchall[0][\"id\"])\r\n else:\r\n print(\"Add new book\")\r\n query = \"insert into book (title, year) values (%s,%s)\"\r\n cursor.execute(query, (title, year))\r\n cursor.execute('''Select MAX(id) as maxid from book''') # последний добавленный\r\n str_id_book = cursor.fetchone()\r\n #print(str_id_book)\r\n id_book=str_id_book['maxid']\r\n print(id_book)\r\n for author in authors:\r\n # for row in cursor.execute(\"select question_id, foo, bar from questions\"):\r\n # question_id, foo, bar = row\r\n print(author)\r\n cursor.execute('''Select id from author where name = %s''', [author])\r\n str_id_author=cursor.fetchone()\r\n print(str_id_author)\r\n id_author=0\r\n if str_id_author and str_id_author['id']:\r\n id_author=str_id_author['id']\r\n print(id_author)\r\n if not id_author:\r\n print(\"auhAdd\")\r\n query = \"insert into author (name) values (%s)\"\r\n cursor.execute(query, [author])\r\n cursor.execute('''Select id from author where name = %s''', [author])\r\n id_author = cursor.fetchone()['id']\r\n print(id_author)\r\n query = \"insert into book_author (id_book,id_author) values (%s,%s)\"\r\n cursor.execute(query, (id_book,id_author))\r\n except:\r\n # print(\"EXCEPTION!\")\r\n return 'Error decoding json'\r\n db.commit()\r\n db.close()\r\n print(\"POST END\")\r\n return 'Example'\r\n #return 'Hello!'\r\n if request.method == 'GET':\r\n print(\"GET\")\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"qwerty\", db=\"bookauthor\", charset='utf8')\r\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\r\n cursor.execute('''Select * from book''')\r\n resultsBook = cursor.fetchall()\r\n result=json.dumps(resultsBook)\r\n db.commit()\r\n db.close()\r\n return result\r\n print(\"END\")\r\n return 'Example'\r\n\r\n@app.route(\"/books\")\r\ndef books():\r\n if request.method == 'GET':\r\n print(\"GET BOOK\")\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"qwerty\", db=\"bookauthor\", charset='utf8')\r\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\r\n cursor.execute('''Select * from book''')\r\n resultsBook = cursor.fetchall()\r\n result = json.dumps(resultsBook)\r\n db.commit()\r\n db.close()\r\n return result\r\n@app.route(\"/authors\")\r\ndef authors():\r\n if request.method == 'GET':\r\n print(\"GET AUTHOR\")\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"qwerty\", db=\"bookauthor\", charset='utf8')\r\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\r\n cursor.execute('''Select * from author''')\r\n resultsAuthor = cursor.fetchall()\r\n result = json.dumps(resultsAuthor)\r\n db.commit()\r\n db.close()\r\n return result\r\n@app.route(\"/output\")\r\ndef out_data():\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"qwerty\", db=\"bookauthor\", charset='utf8')\r\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\r\n print(\"out_data\")\r\n cursor.execute('''Select * from book''')\r\n resultsBook = cursor.fetchall()\r\n print(resultsBook)\r\n\r\n cursor.execute('''Select max(id) as mid from book''')\r\n resultsBook1 = cursor.fetchall()\r\n print(resultsBook1)\r\n if resultsBook1:\r\n print(\"resultsBook1_true\")\r\n print(resultsBook1[0][\"mid\"])\r\n else:\r\n print(\"resultsBook1_false\")\r\n\r\n cursor.execute('''Select * from author''')\r\n resultsAuthor = cursor.fetchall()\r\n\r\n cursor.execute('''SELECT book.title as title,\r\n book.year as year,\r\n GROUP_CONCAT(author.name) as name\r\nFROM\r\n book_author ba\r\nINNER JOIN\r\n book on book.id = ba.id_book\r\nINNER JOIN\r\n author on author.id = ba.id_author\r\nGROUP BY\r\n book.id''')\r\n resultsBookAuthor= cursor.fetchall()\r\n return render_template('output.html',resultsBook=resultsBook,resultsAuthor=resultsAuthor,resultsBookAuthor=resultsBookAuthor )\r\n@app.route('/showSignUp')\r\ndef showSignUp():\r\n print(\"showSignUp\")\r\n #call_find_all_sp()\r\n return render_template('signup.html')\r\n@app.route('/actions',methods=['POST'])\r\ndef actions():\r\n if request.method == 'POST':\r\n print(\"POST actions\")\r\n db = MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"qwerty\", db=\"bookauthor\", charset='utf8')\r\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\r\n print(request.data)\r\n parsed = json.loads(request.data)\r\n print(parsed)\r\n action = parsed['action']\r\n print(action)\r\n table = parsed['table']\r\n print(table)\r\n if action == 'delete':\r\n if table == 'book':\r\n cur_id = parsed['cur_id']\r\n cursor.execute(\"DELETE FROM book WHERE id= %s\", [cur_id])\r\n cursor.execute(\"DELETE FROM book_author WHERE id_book= %s\", [cur_id])\r\n elif table == 'author':\r\n cur_id = parsed['cur_id']\r\n print(cur_id)\r\n query1= \"DELETE FROM author WHERE id=%s\"\r\n #query1 = 'DELETE FROM %s WHERE id= %s'\r\n #query1 = \"DELETE FROM %(table)s WHERE id=%(cur_id)s\"\r\n\r\n print(query1)\r\n cursor.execute(query1, [cur_id])\r\n #cursor.execute(query1, (table,cur_id) )\r\n #cursor.execute(query1, {\"table\": str(table),\"cur_id\": cur_id})\r\n query2 = 'DELETE FROM book_author WHERE id_author= %s'\r\n print(query2)\r\n cursor.execute(query2, [cur_id])\r\n elif table == 'book_author':\r\n cur_id_book = parsed['cur_id_book']\r\n cur_id_author = parsed['cur_id_author']\r\n cursor.execute('''DELETE FROM book_author WHERE id_author=%s and id_book=%s''', (cur_id_author,cur_id_book))\r\n elif action=='update':\r\n if table=='book':\r\n cur_id = parsed['cur_id']\r\n title = parsed['title']\r\n year = parsed['year']\r\n cursor.execute('''Update book \r\n set title=%s,\r\n year= %s \r\n where id=%s\r\n ''',(title,year,cur_id))\r\n elif table=='author':\r\n cur_id = parsed['cur_id']\r\n name = parsed['name']\r\n cursor.execute('''Update author \r\n set name=%s \r\n where id=%s\r\n ''', (name,cur_id))\r\n elif table == 'book_author':\r\n cur_id_book = parsed['cur_id_book']\r\n cur_id_author = parsed['cur_id_author']\r\n id_book = parsed['id_book']\r\n id_author = parsed['id_author']\r\n cursor.execute('''Update book_author \r\n set id_book=%s, \r\n id_author= %s \r\n where id_book=%s and id_author=%s \r\n ''', (id_book,id_author, cur_id_book,cur_id_author))\r\n db.commit()\r\n db.close()\r\n return 'Action page'\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n","repo_name":"DmSide/HttpRequestPyFlaskMySQL","sub_path":"program/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71385376195","text":"import json\nimport psycopg2\n\nfrom kafka import KafkaConsumer\n\nkafka_hostname = '172.16.0.3'\nkafka_port = '9092'\ntopic = 'iot'\n\ngreenplum_hostname = '172.16.0.2'\ngreenplum_port = '5432'\ngreenplum_database_name = 'db'\ngreenplum_database_user = 'gpuser'\ngreenplum_database_password = 'pwd'\n\nwhile True:\n try:\n consumer = KafkaConsumer(topic, bootstrap_servers=[f'{kafka_hostname}:{kafka_port}'], auto_offset_reset='earliest',\n enable_auto_commit=True, value_deserializer=lambda v: json.loads(v.decode('utf-8')))\n break\n except:\n continue\n\nprint(f'Подключено к Kafka: {kafka_hostname}:{kafka_port}, топик: {topic}')\n\nwhile True:\n try:\n conn = psycopg2.connect(host=greenplum_hostname, port=greenplum_port, database=greenplum_database_name,\n user=greenplum_database_user, password=greenplum_database_password)\n break\n except:\n continue\n\ncursor = conn.cursor()\nprint(f'Подключено к Greenplum: {greenplum_hostname}:{greenplum_port}, БД: {greenplum_database_name}')\n\ncount = 0\n\ntry:\n for message in consumer:\n record = message.value\n count += 1\n print(f'Считана запись #{count} из Kafka: {record}')\n\n cursor.execute('insert into records('\n 'occur_time,'\n 'sensor_id,'\n 'latitude,'\n 'longitude,'\n 'temperature,'\n 'controller_id)'\n 'values(%s, %s, %s, %s, %s, %s)',\n [record['occur_time'], record['sensor_id'], record['latitude'], record['longitude'],\n record['temperature'], record['controller_id']])\n conn.commit()\n print(f\"Внесение записи #{count}\")\n\nexcept KeyboardInterrupt:\n print(f'Обработано {count} записей')\n","repo_name":"iratewarrior/iot-device","sub_path":"consumer/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27481192230","text":"#!/usr/bin/env python3\nimport sys\n\nfrom PyQt5.QtWidgets import QMainWindow,QMessageBox,QApplication\n\nfrom mainwindow import MainWindow\nfrom onlinewindow import OnlineWindow\nfrom mygame import MyGame\n\n\n__author__ = \"Kushagra Surana\"\n\n\nclass Window(QMainWindow):\n def __init__(self, parent=None):\n super(Window, self).__init__(parent)\n self.mainWidget = MainWindow()\n self.mainWidget.ui.start.clicked.connect(self.change_central_widget)\n self.mainWidget.ui.load_game2.clicked.connect(self.showLoadGameWindow)\n self.mainWidget.ui.onlineGameButton.clicked.connect(self.show_online_game_window)\n self.setCentralWidget(self.mainWidget)\n\n def show_online_game_window(self):\n self.onlineWindow = OnlineWindow()\n self.setCentralWidget(self.onlineWindow)\n\n def change_central_widget(self):\n f = 1\n if not ((self.mainWidget.ui.rb1.isChecked() or self.mainWidget.ui.rb2.isChecked()) and (\n self.mainWidget.ui.rb3.isChecked() or self.mainWidget.ui.rb4.isChecked())):\n QMessageBox.about(None, \"\", \"Select Players\")\n else:\n if self.mainWidget.ui.rb1.isChecked():\n if self.mainWidget.ui.rb3.isChecked(): # bot vs bot\n if self.mainWidget.ui.bot_path1.text() == \"\" or self.mainWidget.ui.bot_path2.text() == \"\":\n QMessageBox.about(None, \"error\", \"no bot file\")\n f = 0 # dialog for null\n self.gameWidget = MyGame(1, \"\", self.mainWidget.ui.bot_path1.text(),\n self.mainWidget.ui.bot_path2.text())\n else:\n if self.mainWidget.ui.bot_path1.text() == \"\":\n QMessageBox.about(None, \"error\", \"no bot file\")\n f = 0\n else:\n self.gameWidget = MyGame(1, \"\", self.mainWidget.ui.bot_path1.text()) # bot vs human\n else:\n if self.mainWidget.ui.rb3.isChecked():\n if self.mainWidget.ui.bot_path2.text() == \"\":\n QMessageBox.about(None, \"error\", \"no bot file\")\n f = 0\n else:\n self.gameWidget = MyGame(1, \"\", \"\", self.mainWidget.ui.bot_path2.text()) # human vs bot\n else:\n self.gameWidget = MyGame() # human vs human\n if f:\n self.setCentralWidget(self.gameWidget)\n self.gameWidget.back.clicked.connect(self.change_central_widget2)\n\n def change_central_widget2(self):\n self.mainWidget = MainWindow()\n self.mainWidget.ui.start.clicked.connect(self.change_central_widget)\n self.mainWidget.ui.load_game2.clicked.connect(self.showLoadGameWindow)\n self.setCentralWidget(self.mainWidget)\n\n def showLoadGameWindow(self):\n new_game = 0\n print(\"loading\")\n file_path = self.mainWidget.ui.load_path.text()\n if (file_path == \"\"):\n pass\n else:\n self.gameWidget = MyGame(new_game, file_path)\n self.setCentralWidget(self.gameWidget)\n self.gameWidget.back.clicked.connect(self.change_central_widget2)\n\n\ndef main():\n app = QApplication(sys.argv)\n my_app = Window()\n my_app.show()\n my_app.setWindowTitle(\"3Knights\")\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"kushagrasurana/3Knights","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"41484339552","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom doubanspider.items import DoubanspiderItem\n\nclass Douban2018Spider(scrapy.Spider):\n name = 'douban2018'\n allowed_domains = ['douban.com']\n start_urls = ['https://book.douban.com/ithil_j/activity/book_annual2018/widget/1']\n\n def parse(self, response):\n item = DoubanspiderItem()\n # for i in range(1, 37):\n page = json.loads(response.body)\n tag = page['res']['kind_str']\n\n if tag != 'excerpt':\n sort_name = page['res']['payload']['title']\n subjects = page['res']['subjects']\n item['sort_name'] = sort_name\n # yield item\n for n, book in enumerate(subjects):\n number = n + 1\n title = book['title']\n rating = book['rating']\n item['number'] = number\n item['title'] = title\n item['rating'] = rating\n yield item\n next_page = int(response.url.split('/')[-1]) + 1\n next_url = response.urljoin(str(next_page))\n yield scrapy.Request(next_url, callback=self.parse)\n\n\n\n","repo_name":"windshell90/douban2018","sub_path":"doubanspider/doubanspider/spiders/douban2018.py","file_name":"douban2018.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23430184101","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\ndef get_next_decide_point(rate, target, farm_cost, pts):\n time_required = (min(target, farm_cost) - pts) / rate\n return time_required\n\n\ndef decide(rate, target, farm_cost, rate_inc, pts):\n # time to achieve target as the current rate\n t1 = (target - pts) / rate\n\n # time to achieve target if we purchase the tractor\n t2 = (target - pts + farm_cost) / (rate + rate_inc)\n\n if t1 < t2:\n return None\n else:\n pts -= farm_cost # buy the farm\n rate += rate_inc # increment the rate\n return (pts, rate)\n\n\ndef solve(cipher):\n rate = 2 # the rate of getting cookies per second (initially)\n pts = 0.0 # the points\n time_elapsed = 0.0 # the time elapsed\n\n parts = cipher.strip().split()\n farm_cost = float(parts[0]) # the cost of a new farm\n rate_inc = float(parts[1]) # the increase in rate per second due to\n # new farm\n target = float(parts[2])\n\n # flag to check if we are done with the program\n done = False\n\n while not done:\n # go to the decide point\n time_to_next_decide_point = get_next_decide_point(rate, target,\n farm_cost, pts)\n\n # add time required in going to the decide point to the elapsed time\n time_elapsed += time_to_next_decide_point\n pts += rate * time_to_next_decide_point\n\n if pts >= target:\n done = True\n continue\n\n # now decide\n decision = decide(rate, target, farm_cost, rate_inc, pts)\n\n if decision is None:\n done = True\n else:\n pts, rate = decision # the new pts and rate\n\n # now when the control reaches here, just find how much more time is\n # required to reach the target\n time_elapsed += (target - pts)/rate\n\n return time_elapsed\n\nif __name__ == \"__main__\":\n testcases = input()\n\n for caseNr in xrange(1, testcases + 1):\n cipher = raw_input()\n # print cipher\n print(\"Case #%i: %.7f\" % (caseNr, solve(cipher)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/991.py","file_name":"991.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9812406047","text":"import streamlit as st\nimport Shamir.Services.ControlsService as CS\nimport Shamir.Services.MathService as MS\nimport Shamir.Services.ShamirMethodMathService as smms\nimport Shamir.Constants as cst\n\ndef ShamirUi():\n secret = 0\n rangeMaxValue = 0\n generatedSecret = 0\n sharesCount = 0\n sharesToRecover = \"\"\n sharesToRecoverCount = 0\n prime = 0\n si = []\n\n method = st.radio(\"Wybierz metodę:\", [\"trywialny\", \"shamir\"])\n if method == \"trywialny\":\n\n rangeMaxValue = st.number_input(\"podaj zakres (k): \", min_value=0)\n if rangeMaxValue != 0:\n secret = st.number_input(\"podaj wartość sekretu(s):\", max_value=rangeMaxValue - 1, min_value=0)\n if secret != 0:\n sharesCount = st.number_input(\"podaj ilość udziałów(n):\", min_value=0)\n if st.button(\"generuj:\"):\n shares = MS.GenerateShares(sharesCount, rangeMaxValue, secret)\n st.write(\"udziały:\")\n for share in shares:\n st.write(share)\n generatedSecret = MS.GenerateSecret(shares, rangeMaxValue)\n print(generatedSecret)\n st.write(\"Wygenerowany sekret: \")\n st.write(generatedSecret)\n\n if method == \"shamir\":\n\n secret = st.number_input(\"podaj wartość sekretu(s):\", min_value=0)\n if secret != 0:\n sharesCount = st.number_input(\"podaj ilość udziałów(n):\", min_value=0)\n if sharesCount != 0:\n sharesToRecoverCount = st.number_input(\"podaj ilość udziałów do odtworzenia(t)\", min_value=0)\n if sharesToRecoverCount != 0:\n prime = st.select_slider(options=[i for i in cst.primeNumbers if i > secret and i > sharesCount], label=\"wybierz liczbę pierwszą:\")\n if st.button(\"generuj\"):\n AValues = smms.GetRandomAs(sharesToRecoverCount)\n Si = smms.GetSi(sharesCount, secret, sharesToRecoverCount, prime, AValues)\n for item in Si:\n st.write(item)\n returnedSecret = smms.RecoverSecret(\"1,2,3,4,5,6\", Si, prime)\n st.write(returnedSecret)\n\n","repo_name":"JBMErykTaszarek/CryptoUI","sub_path":"Shamir/ShamirUI.py","file_name":"ShamirUI.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23586849701","text":"from itertools import combinations\r\ndef Choose(data, c):\r\n\tres = 0;\r\n\targ = None;\r\n\tfor e in combinations(data, c):\r\n\t\tpre = res;\r\n\t\tres = max(res, max(i[2] for i in e) + 2 * sum(i[3] for i in e))\r\n\t\tif res is not pre:\r\n\t\t\targ = e\r\n\treturn list(arg)\r\n\r\nimport math\t\r\n\r\nfor t in range(1, int(input()) + 1):\r\n\ttotal, choose = tuple(map(int, input().split()))\r\n\tdata = [tuple(map(int, input().split())) for i in range(0, total)]\r\n\tdata = [tuple([e[0], e[1], e[0]**2, e[0]*e[1]]) for e in data]\r\n\tresult = Choose(data, choose)\r\n\tanswer = math.pi * (max(e[2] for e in result) + 2 * sum(e[3] for e in result))\r\n\tprint(\"Case #{}: {:.9f}\".format(t, answer))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/663.py","file_name":"663.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20568755656","text":"def main():\n aantal = int(input(\"Hoeveel karakters wil u ingeven? \"))\n som = 0\n for i in range(aantal):\n char = input(\"geef een karakter: \")\n if char >= '0' and char <= '9':\n som += int(char)\n elif char >= 'A' and char <= 'Z':\n print(char + \" is een hoofdletter\")\n elif char >= 'a' and char <= 'z':\n print(char + \" is een kleine letter\")\n else:\n print(char + \" is onbekend\")\n print(\"Totaal cijfers \" + str(som))\n\nif __name__ == '__main__':\n main()","repo_name":"SemihAltintasPXL/PXLToegepast-Informatica","sub_path":"Vakken_eerste_jaar/IT-Essentials/IT-Essentials-oefeningen/oplossingen/6 Strings/oefening6.3.py","file_name":"oefening6.3.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"nl","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2906264145","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for xinhua project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'xinhua'\n\nSPIDER_MODULES = ['xinhua.spiders']\nNEWSPIDER_MODULE = 'xinhua.spiders'\n\nDNSCACHE_ENABLED = False\nDOWNLOAD_TIMEOUT = 60\nLOG_LEVEL = 'INFO'\nCONCURRENT_REQUESTS = 100\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'xinhua (+http://www.yourdomain.com)'\n","repo_name":"cicirao/xinhua","sub_path":"xinhua/xinhua/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"32726269833","text":"\"\"\"\n길이 1: 무조건 회문\n\n길이 2 - 1번 검사\n길이 3 - 1번 검사\n\n길이 4 - 2번 검사\n길이 5 - 2번 검사\n\n길이 6 - 3번 검사\n길이 7 - 3번 검사\n\n길이 8 - 4번 검사\n\"\"\"\nimport sys\nsys.stdin = open('input/input_0816_1.txt', 'r')\n\ndef palindromeCheck(length):\n if length == 1:\n return 32\n cnt = 0\n for row in board:\n # print(row)\n # row 안에서 몇 번 반복할지:\n for i in range(8 - length + 1):\n # 찾은 문자열 회문 검사\n temp = row[i:i+length]\n # print(temp)\n for j in range(length // 2):\n if temp[0+j] != temp[-1-j]:\n break\n else:\n cnt += 1\n return cnt\n \n# for i in range(1,9):\n# palindromeCheck(i+1)\n\nfor T in range(10):\n N = int(input())\n board = []\n for _ in range(8):\n board.append(input())\n result = 0\n result += palindromeCheck(N)\n board = list(zip(*board))\n result += palindromeCheck(N)\n\n print('#{0} {1}'.format(T+1, result))\n","repo_name":"ghleokim/algorithm","sub_path":"190816/solvingClub0816_palindrome.py","file_name":"solvingClub0816_palindrome.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5598461812","text":"from django.shortcuts import render\r\nfrom .forms import PersonEntryForm\r\nfrom .models import Person\r\n\r\n# Create your views here.\r\n\r\ndef display_form(request):\r\n form1 = PersonEntryForm()\r\n return render(request, 'app15/form1.html',{'form1':form1})\r\n\r\ndef receive_form(request):\r\n result = \"\"\r\n if request.method=='POST':\r\n form1 = PersonEntryForm(request.POST)\r\n if form1.is_valid():\r\n person = form1.save(commit=False)\r\n person.save()\r\n result=\"Save record successfully\"\r\n return render(request, 'app15/display1.html', {'result': result})\r\n\r\ndef display_all(request):\r\n persons = Person.objects.all()\r\n return render(request, 'app15/display2.html', {'persons':persons})","repo_name":"indrakhanal/CRUD-operation-django","sub_path":"app15/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22483336388","text":"\"\"\"Utilities for running code in parallel using multiprocessing\"\"\"\nimport math\nfrom multiprocessing import Pool\nimport numpy as np\nfrom ibis.custom_logging import get_logger\nfrom ibis.utilities.sqoop_helper import SqoopHelper, SQOOP_CACHE\nfrom ibis.utilities.sqoop_helper import SQOOP_CACHE_VIEW\nfrom ibis.utilities.utilities import Utilities\n\n\ndef parallel_dryrun_workflows(info):\n \"\"\"Dry run workflows in parallel.\n For sake of multiprocessing.Pool, this needs to be a top level function\n Args:\n info: List[cfg_mgr, workflow.xml]\n \"\"\"\n cfg_mgr = info[0]\n workflow_name = info[1]\n utils = Utilities(cfg_mgr)\n status = utils.dryrun_workflow(workflow_name)\n return status, workflow_name\n\n\ndef get_split_num(size, max_pool_size):\n \"\"\"Returns ideal number of splits\"\"\"\n splits = 1\n if size > max_pool_size:\n splits = math.ceil(float(size)/float(max_pool_size))\n return splits\n\n\nclass DryRunWorkflowManager(object):\n \"\"\"Dry run workflows in parallel\"\"\"\n\n def __init__(self, cfg_mgr):\n \"\"\"init\"\"\"\n self.cfg_mgr = cfg_mgr\n self.logger = get_logger(self.cfg_mgr)\n\n def run_all(self, workflows):\n \"\"\"Dry run in parallel. Filter files and dry run xmls\n Args:\n workflows: generated files\n \"\"\"\n status = True\n pool_info = []\n for file_name in workflows:\n if '.xml' in file_name and 'props_job.xml' not in file_name:\n pool_info.append(\n [self.cfg_mgr, file_name.replace('.xml', '')])\n\n num_splits = get_split_num(\n len(pool_info), self.cfg_mgr.parallel_dryrun_procs)\n chunks = np.array_split(pool_info, num_splits)\n\n for pool_chunk in chunks:\n pool_obj = Pool(processes=len(pool_chunk))\n result_list = pool_obj.map(parallel_dryrun_workflows, pool_chunk)\n pool_obj.terminate()\n for info in result_list:\n if not info[0]:\n err_msg = 'Dry run failed: {0}'.format(info[1])\n self.logger.error(err_msg)\n status = status and False\n else:\n status = status and True\n return status\n\n\ndef parallel_sqoop_output(info):\n \"\"\"Run sqoop queries in parallel\n For sake of multiprocessing.Pool, this needs to be a top level function\n Args:\n info: List[cfg_mgr, jdbcurl, sql_query, db_username,\n password_file]\n \"\"\"\n cfg_mgr = info[0]\n jdbc = info[1]\n sql_stmt = info[2]\n db_username = info[3]\n password_file = info[4]\n sqoop = SqoopHelper(cfg_mgr)\n result = sqoop.eval(jdbc, sql_stmt, db_username, password_file)\n return sql_stmt, result\n\n\nclass SqoopCacheManager(object):\n \"\"\"Cache sqoop query results\"\"\"\n\n def __init__(self, cfg_mgr):\n \"\"\"init\"\"\"\n self.cfg_mgr = cfg_mgr\n self.logger = get_logger(self.cfg_mgr)\n\n def cache_ddl_queries(self, tables):\n \"\"\"Caches DDL queries\"\"\"\n global SQOOP_CACHE\n\n pool_info = []\n for tbl in tables:\n sqoop = SqoopHelper(self.cfg_mgr)\n query = sqoop.get_ddl_query(tbl.jdbcurl, tbl.database,\n tbl.table_name, tbl.schema)\n pool_info.append([self.cfg_mgr, tbl.jdbcurl, query,\n tbl.username, tbl.password_file])\n\n num_splits = get_split_num(\n len(pool_info), self.cfg_mgr.parallel_sqoop_procs)\n chunks = np.array_split(pool_info, num_splits)\n\n for pool_chunk in chunks:\n pool_obj = Pool(processes=len(pool_chunk))\n result_list = pool_obj.map(parallel_sqoop_output, pool_chunk)\n pool_obj.terminate()\n for info in result_list:\n SQOOP_CACHE[info[0]] = info[1]\n\n def cache_ddl_views(self, tables):\n \"\"\"Caches DDL queries\"\"\"\n global SQOOP_CACHE_VIEW\n\n pool_info = []\n for tbl in tables:\n sqoop = SqoopHelper(self.cfg_mgr)\n if tbl.is_oracle:\n query = sqoop.get_ddl_table_view(tbl.jdbcurl, tbl.database,\n tbl.table_name)\n pool_info.append([self.cfg_mgr, tbl.jdbcurl, query,\n tbl.username, tbl.password_file])\n\n if len(pool_info) > 0:\n num_splits = get_split_num(\n len(pool_info), self.cfg_mgr.parallel_sqoop_procs)\n chunks = np.array_split(pool_info, num_splits)\n\n for pool_chunk in chunks:\n pool_obj = Pool(processes=len(pool_chunk))\n result_list = pool_obj.map(parallel_sqoop_output, pool_chunk)\n pool_obj.terminate()\n for info in result_list:\n SQOOP_CACHE_VIEW[info[0]] = info[1]\n","repo_name":"Cigna/ibis","sub_path":"ibis/utilities/run_parallel.py","file_name":"run_parallel.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"} +{"seq_id":"39218081867","text":"\"\"\"API blueprint configuration.\"\"\"\nfrom http import HTTPStatus\nfrom flask import Blueprint, current_app\nfrom flask_restx import Api\n\nfrom main.api.clasifier_pd.endpoints import imagentry_ns\n\nfrom main.exceptions import (\n NotFound,\n ServiceUnavailable,\n InternalServerError,\n)\n\napi_bp = Blueprint(\"api\", __name__)\nauthorizations = {\"Bearer\": {\"type\": \"apiKey\", \"in\": \"header\", \"name\": \"Authorization\"}}\n\napi = Api(\n api_bp,\n version=\"1.0\",\n title=\"Rest API Scaffold\",\n description=\"Welcome to the Swagger UI documentation site!\",\n doc=\"/api/1/ui\",\n authorizations=authorizations,\n)\n\napi.add_namespace(imagentry_ns, path=\"/entryimages\")\n\n\n@api.errorhandler(NotFound)\ndef not_found_handler(error):\n current_app.logger.error(str(error))\n return (\n {\"message\": error.message, \"details\": str(error)},\n HTTPStatus.BAD_REQUEST,\n )\n\n\n@api.errorhandler(ServiceUnavailable)\ndef service_unavailable_handler(error):\n current_app.logger.error(str(error))\n return (\n {\"message\": error.message, \"details\": str(error)},\n HTTPStatus.SERVICE_UNAVAILABLE,\n )\n\n\n@api.errorhandler(InternalServerError)\ndef internal_server_error_handler(error):\n current_app.logger.error(str(error))\n return (\n {\"message\": error.message, \"details\": str(error)},\n HTTPStatus.INTERNAL_SERVER_ERROR,\n )\n","repo_name":"imceballos/productodedatos_2021","sub_path":"src/main/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42208630254","text":"#!/usr/bin/python3\n'''\n The N queens puzzle is the challenge of placing\n N non-attacking queens on an N×N chessboard.\n Write a program that solves the N queens problem.\n'''\n\n\nimport sys\n\n\ndef is_safe(board, row, col):\n '''\n Checks if a queen can be placed at the given position\n without attacking any other queens on the board.\n\n Args:\n board (list): The current state of the chessboard.\n row (int): The row index of the position to check.\n col (int): The column index of the position to check.\n\n Returns:\n bool: True if the position is safe, False otherwise.\n '''\n # Check the current row on the left side\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n # Check the upper diagonal on the left side\n i, j = row, col\n while i >= 0 and j >= 0:\n if board[i][j] == 1:\n return False\n i -= 1\n j -= 1\n\n # Check the lower diagonal on the left side\n i, j = row, col\n while i < N and j >= 0:\n if board[i][j] == 1:\n return False\n i += 1\n j -= 1\n\n return True\n\n\ndef solve_nqueens(board, col, solutions):\n '''\n Recursive function to solve the N-Queens problem.\n\n Args:\n board (list): The current state of the chessboard.\n col (int): The current column being considered.\n solutions (list): A list to store the found solutions.\n\n Return:\n bool: True if a solution is found, False otherwise.\n '''\n # Base case: All queens have been placed\n if col == N:\n queens = []\n for i in range(N):\n for j in range(N):\n if board[i][j] == 1:\n queens.append([i, j])\n solutions.append(queens)\n return True\n\n # Recursive case: Try placing a queen in each row of the current column\n for row in range(N):\n if is_safe(board, row, col):\n board[row][col] = 1\n solve_nqueens(board, col + 1, solutions)\n board[row][col] = 0\n\n return False\n\n\ndef print_solutions(solutions):\n '''\n Prints the solutions to the N-Queens problem.\n\n Args:\n solutions (list): A list of found solutions.\n '''\n for solution in solutions:\n print(solution)\n\n\nif __name__ == \"__main__\":\n # Parse the command-line argument\n if len(sys.argv) != 2:\n print(\"Usage: nqueens N\")\n sys.exit(1)\n\n try:\n N = int(sys.argv[1])\n except ValueError:\n print(\"N must be an integer\")\n sys.exit(1)\n if N < 4:\n print(\"N must be at least 4\")\n sys.exit(1)\n\n # Create an empty chessboard\n board = [[0 for _ in range(N)] for _ in range(N)]\n\n # Solve the N-Queens problem\n solutions = []\n solve_nqueens(board, 0, solutions)\n\n # Print the solutions\n print_solutions(solutions)\n","repo_name":"kelechi-aims/alx-higher_level_programming","sub_path":"0x08-python-more_classes/101-nqueens.py","file_name":"101-nqueens.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20821801397","text":"# ./file/proverbs.txt 파일 열기 \r\nfilename = input(\"파일 이름을 입력하시오: \").strip()\r\nsIn = \"./st01.Python기초/py31파일처리/file/%s\" % (filename)\r\ninfile=open(sIn, \"r\")\r\nfreqs={}\r\n\r\n# 파일의 각 줄에 대하여 문자를 추출한다. 각 문자를 사전에 추가한다.\r\nfor line in infile:\r\n for char in line.strip():\r\n if char in freqs:\r\n freqs[char]+=1\r\n else:\r\n freqs[char]=1\r\n\r\nprint(freqs)\r\ninfile.close()\r\n","repo_name":"parky83/python0209","sub_path":"st01.Python기초/py31파일처리/py31_13_count_letter.py","file_name":"py31_13_count_letter.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35559243339","text":"#ukol-04: Třídy\n#Deadline: 25.10.2022\n\n#Vyberte si kterou variantu chcete, pro odvážnější jsem sepsala jen obecne požadavky třídy, atributy a metody nechávám na vás, můžete použít něco co vam přijde praktické, co vás zajímá :) Kdo se nechce zdržovat s vymášlením a chce si procvičit látku na konkretním zadání, může použít druhou variantu.\n\n#Na úkolu si vyzkuoušejte, že umíte třídu definovat a vyvořit konkrétní objekty, experimentujte. Svoje pokusy v kódu klidně nechte i při odevzdání. Pokud budete chtít něco vytvořit ale nevíte jak, jestli na to jdete dobře, nebo prostě chcete jen sdílet nápad, napiště na slack\n\n#Obecné požadavky na úkol (vyžaduje více času a vlastní iniciativy)\n#Existuji alespoň dvě různé třídy z toho každá má:\n#alespoň 3 atributy (nemusí být všechny jako argumenty v __init__)\n#alespoň 3 metody + init (nebo může být i dataclass)\n#Na konci souboru je ukázka použití tříd a metod\n\n\n\n#Vymyšlené zadání\n#Uvažuj že vytváříš kuchařku a potřebuješ uložit několik receptů. Vytvoř dvě třídy Recept a Kucharka (idealne v tomto poradi).\n\n#1. Recept\n#Bude mít 3 atributy:\n\n#nazev - string, jmeno kucharky\n#narocnost - necham na vas jak ji budete reprezentovat (muze byt cislo, muze byt slovni vyjadreni)\n#url_adresa - string, odkaz na recept\n#vyzkouseno - bool, metoda __init__ ji vzdy nastavi na False\n#nazev,narocnost, url_adresa budou atributy metody __init__, tedy uzivatel si je muze zvolit pri vytvareni objektu.\n\n#A bude mít také 3 metody:\n\n#__str___(self)\n#vraci hezky vypis receptu (necham na vas ktere atributy chcete do vypisu dat)\n#zmen_narocnost(self, nova_hodnota)\n#zmeni narocnost, tedy zmeni atribut narocnost na nova_hodnota\n#zkusit(self)\n#zmeni atribut vyzkouseno na True\n#2. Kucharka\n\n#_______________________________________________________________________________________________\n\n\n\n#Vymyšlené zadání\n#Uvažuj že vytváříš kuchařku a potřebuješ uložit několik receptů. Vytvoř dvě třídy Recept a Kucharka (idealne v tomto poradi).\n\n#1. Recept\n#Bude mít 3 atributy:\n\n#nazev - string, jmeno kucharky\n#narocnost - necham na vas jak ji budete reprezentovat (muze byt cislo, muze byt slovni vyjadreni)\n#url_adresa - string, odkaz na recept\n#vyzkouseno - bool, metoda __init__ ji vzdy nastavi na False\n#nazev,narocnost, url_adresa budou atributy metody __init__, tedy uzivatel si je muze zvolit pri vytvareni objektu.\n\n\nclass Recept:\n \n def __init__(self, nazev_receptu:str, narocnost_receptu: int, URL_adresa:str):\n self.nazev_receptu = nazev_receptu\n self.narocnost_receptu = narocnost_receptu\n self.URL_adresa = URL_adresa\n \n def __str__(self):\n return f'Recept {self.nazev_receptu} najdete na sdrese {self.URL_adresa} Náročnost receptu {self.narocnost_receptu}'\n \nmuj_recept = Recept('Bábovka', 5, 'www.recepty.cz')\n\nprint(muj_recept.nazev_receptu)\nprint(muj_recept.narocnost_receptu)\nprint(muj_recept.URL_adresa)\n\nclass Kucharka(Recept):\n def __init__(self, nazev_receptu, narocnost_receptu, URL_adresa, vyzkouseno):\n super().__init__(nazev_receptu,narocnost_receptu, URL_adresa)\n self.vyzkouseno = vyzkouseno\n \nbábovka = Kucharka('Bábovka', 4,'www.recepty.cz', True)\n\n\ndef recepty(self, vyzkouseno):\n if vyzkouseno == self.vyzkouseno:\n \n return f'{self.vyzkouseno} jedná se o osvědčený recept.'\n \n else:\n \n return f'{self.vyzkouseno} nikdo neochutnal'\n \nrecept_z_kucharky = Kucharka('Bábovka', 4, 'www.recepty.cz', True)\n \n \nprint(f' Recept byl vyzkoušen = {recept_z_kucharky.vyzkouseno}')\n \n \n \n\n\n","repo_name":"Nezmar/ukol4_python","sub_path":"ukol4_python.py","file_name":"ukol4_python.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15833190949","text":"import re\nimport nltk\nimport pandas as pd\nimport os\nimport csv\nimport matplotlib.pyplot as plt\nimport sqlite3\nimport seaborn as sns\nimport base64\n\nfrom os import path\nfrom PIL import Image\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\nfrom Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory\nfrom flask import Flask, request, render_template, request, redirect\nfrom io import BytesIO\n\nnltk.download('punkt')\n\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\napp.config['FILE_UPLOADS'] = \"c:/GC/uploads\"\n\n#===================================================\nwith open('c:/GC/data/new_kamusalay.csv', encoding = 'latin-1', mode='r') as infile:\n reader = csv.reader(infile)\n ids = {rows[0]:rows[1] for rows in reader}\n \n#Fungsi untuk mengubah teks menjadi huruf kecil untuk memudahkan proses cleaning\ndef casefolding(review):\n review = str(review).lower()\n return review\n\n#Fungsi untuk memproses pemisahan teks menjadi potongan-potongan yang disebut sebagai token untuk kemudian di analisa\ndef tokenize(review):\n #token = nltk.word_tokenize(str(review))\n token = nltk.tokenize.word_tokenize(review)\n return token\n\n#Fungsi untuk menghilangkan angka, karakter aneh dan kata yang tidak diperlukan\ndef filtering(review):\n # Remove angka termasuk angka yang berada dalam string\n # Remove non ASCII chars\n review = re.sub(r'[^\\x00-\\x7f]', r'', review)\n review = re.sub(r'(\\\\u[0-9A-Fa-f]+)', r'', review)\n review = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", review)\n review = re.sub(r'\\\\u\\w\\w\\w\\w', '', review)\n # Remove link web\n review = re.sub(r'http\\S+', '', review)\n # Remove URL\n review = re.sub(r'url', '', review)\n # Remove RT USER\n review = re.sub(r'rt user', '', review)\n # Remove USER\n review = re.sub(r'user', '', review)\n # Remove @username\n review = re.sub('@[^\\s]+', '', review)\n # Remove #tagger\n review = re.sub(r'#([^\\s]+)', '', review)\n # Remove simbol, angka dan karakter aneh\n review = re.sub(r\"[.,:;+!\\-_<^/=?\\\"'\\(\\)\\d\\*]\", \" \", review)\n return review\n\n#Fungsi untuk menghilangkan karakter yang lebih dari 1 menjai cukup 1 karakter\ndef replaceThreeOrMore(review):\n # Pattern to look for three or more repetitions of any character, including newlines (contoh goool -> gol).\n pattern = re.compile(r\"(.)\\1{2,}\", re.DOTALL)\n return pattern.sub(r\"\\1\", review)\n\n#Fungsi untuk mengubah kata-kata sesuai dengan kamus yang diberikan\ndef convertToSlangword(review):\n kamus_slangword = ids\n # Search pola kata (contoh kpn -> kapan)\n pattern = re.compile(r'\\b( ' + '|'.join (kamus_slangword.keys())+r')\\b') \n content = []\n # Replace slangword berdasarkan pola review yg telah ditentukan\n for kata in review:\n filteredSlang = pattern.sub(lambda x: kamus_slangword[x.group()],kata) \n content.append(filteredSlang.lower())\n review = content\n return review\n\n#Fungsi untuk menghilangkan kata yang hanya terdiri dari konsonan / tidak berarti\ndef removeNoVowelWord(review):\n vowel = ['a','e','i','o','u'] \n review = [word for word in review if any(v in word for v in vowel)]\n return review\n\n#Fungsi untuk mengambil kata dasar dengan menghilangkan awal, akhiran dan sisipan dalam bahasa indonesia menggunakan library Sastrawi\ndef stemmer(review):\n # create stemmer\n factory = StemmerFactory()\n stemmer = factory.create_stemmer()\n # stemming process\n review = stemmer.stem(str(review))\n return review\n\n#Fungsi untuk menghilangkan kata sambung yang tidak diperlukan untuk dianalisa\ndef removeStopWordIndo(review):\n factory = StopWordRemoverFactory()\n stopword = factory.create_stop_word_remover()\n review = stopword.remove(str(review))\n return review\n\n#Fungsi untuk membuat grafik word cloud dengan menggunakan Matplotlib\ndef wordcloud(review,namafile):\n # Create and generate a word cloud image:\n wordcloud = WordCloud(max_font_size=50, max_words=100, background_color=\"white\").generate(review)\n plt.figure()\n plt.imshow(wordcloud, interpolation=\"bilinear\")\n plt.axis(\"off\")\n # Save the image in the images folder:\n namafile = namafile + '.png'\n imagePath = ''\n imagePath = os.path.join('static',namafile)\n wordcloud.to_file(imagePath)\n return imagePath\n\n#Fungsi yang digunakan untuk menghitung kata berulang dan menyimpannya ke dalam database sqlite\ndef word_count(str):\n # Count the number of occurrences of each word in the string\n counts = {}\n words = str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n # Connect to the database\n conn = sqlite3.connect('teks.db')\n cursor = conn.cursor()\n\n # Check if the table exists\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='teksstat'\")\n table_exists = bool(cursor.fetchone())\n\n if not table_exists:\n # Create the table if it doesn't exist\n cursor.execute(\"CREATE TABLE teksstat(kata text, jumlah integer)\")\n\n try:\n cursor.execute(\"DROP TABLE teksstat\")\n cursor.execute(\"CREATE TABLE teksstat(kata text, jumlah integer)\")\n # Insert records into the table\n for word, count in counts.items():\n cursor.execute(\"INSERT INTO teksstat (kata, jumlah) VALUES (?, ?)\", (word, count))\n conn.commit()\n except sqlite3.Error as e:\n print(\"An error occurred:\", e)\n\n # Close the database connection\n conn.close()\n return\n\n#fungsi untuk menampilkan grafik perulangan kata dengan menggunakan seaborn\ndef show_barchart():\n # Connect to the database\n conn = sqlite3.connect('teks.db')\n\n # Read the data from the teksstat table into a pandas DataFrame\n df = pd.read_sql_query(\"SELECT * FROM teksstat ORDER BY jumlah DESC LIMIT 5\", conn)\n\n # Close the database connection\n conn.close()\n\n # Create a bar chart using Seaborn\n sns.set_style('whitegrid')\n plt.figure(figsize=(6, 4))\n sns.barplot(x='kata', y='jumlah', data=df)\n plt.title('5 Kata yang sering muncul dari Teks Bersih')\n plt.xlabel('Kata')\n plt.ylabel('Jumlah')\n\n # Convert the plot to a PNG image\n img = BytesIO()\n plt.savefig(img, format='png')\n img.seek(0)\n\n # Encode the PNG image in base64 for embedding in the HTML\n plot_url = base64.b64encode(img.getvalue()).decode()\n return plot_url \n\n#===================================================\n\n# Routing Home menampilkan dua tombol untuk memilih apakah akan mengcleaning teks atau file\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n# Routing Cleaning text menampilkan Scrollbox untuk inputan teks\n@app.route('/cleaningtext')\ndef text_input():\n return render_template('indextext.html')\n\n# Routing Cleaning text untuk memproses dan menampilkan hasil cleaning teks\n@app.route('/cleaningtext', methods=[\"POST\"])\ndef text_clean():\n # Mulai mengambil data dari form dan melakukan cleaning teks\n teks_asli = [request.form['note']]\n teks = casefolding(teks_asli)\n teks = filtering(teks)\n teks = replaceThreeOrMore(teks)\n teks = tokenize(teks)\n teks = convertToSlangword(teks)\n teks = removeNoVowelWord(teks)\n teks = ' '.join(teks)\n teks = stemmer(teks)\n teks = removeStopWordIndo(teks)\n\n #Menampilkan word cloud dengan mengcreate wordcloud dari teks dan mengembalikan path gambar word cloud untuk ditampilkan\n tekswc = wordcloud(teks,'teksbersih')\n teks_asli = ' '.join(teks_asli)\n teksasliwc = wordcloud(teks_asli,'teksasli')\n #hitung kata, masukkan ke sqlite database\n word_count(teks)\n plot_url = show_barchart()\n return render_template('hasilbersih.html', note_asli=teks_asli, note_hasil=teks, wordcloudbersih=tekswc, wordcloudasli=teksasliwc, plot_url=plot_url)\n \n@app.route('/cleaningfile', methods=[\"GET\",\"POST\"])\ndef get_file():\n if request.method == 'POST':\n if request.files:\n uploaded_file = request.files['filename'] # This line uses the same variable and worked fine\n filepath = os.path.join(app.config['FILE_UPLOADS'], uploaded_file.filename)\n uploaded_file.save(filepath)\n df = pd.read_csv(filepath,encoding='latin1')\n jumlahrecord = request.form['jmlrecord']\n namakolom = request.form['namakolom']\n datasets = [df.head(int(jumlahrecord))]\n # Tampilkan Data Frame dalam bentuk tabel\n # return render_template('hasilfiletablebersih.html', tables=[df.head().to_html(classes='data')], titles=df.head().columns.values)\n # Keeping only the neccessary columns\n for teks in datasets:\n teks_asli = teks[namakolom].astype(\"string\") \n teks = teks[namakolom]\n teks = teks.apply(casefolding)\n teks = teks.apply(filtering)\n teks = teks.apply(replaceThreeOrMore)\n teks = teks.apply(tokenize)\n teks = teks.apply(convertToSlangword)\n teks = teks.apply(removeNoVowelWord)\n teks = teks.apply(\" \".join)\n teks = teks.apply(stemmer)\n teks = teks.apply(removeStopWordIndo)\n teks = ' '.join(teks)\n teks_asli = ' '.join(teks_asli)\n # Menampilkan word cloud dengan mengcreate wordcloud dari teks dan mengembalikan path gambar word cloud untuk ditampilkan\n tekswc = wordcloud(teks,'teksbersih')\n teksasliwc = wordcloud(teks,'teksasli')\n #hitung kata, masukkan ke sqlite database\n word_count(teks)\n plot_url = show_barchart()\n return render_template('hasilfilebersih.html',note_asli=teks_asli, note_hasil=teks, wordcloudbersih=tekswc, wordcloudasli=teksasliwc, plot_url=plot_url)\n\n return render_template('indexfile.html')\n\n# No caching at all for API endpoints.\n@app.after_request\ndef add_header(response):\n # response.cache_control.no_store = True\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"PJB-bootcamp-binar/gold_challange_andrie","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31961492140","text":"import os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef extract_google_reviews(driver, resturauntName):\n driver.get('https://www.google.com/?hl=en')\n driver.find_element_by_name('q').send_keys(resturauntName)\n try:\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.NAME, 'btnK'))).click()\n except:\n driver.find_element_by_name('q').send_keys(Keys.ENTER)\n\n Header = driver.find_element_by_css_selector('div.kp-header')\n Rating = Header.find_element_by_class_name(\"Aq14fc\").get_attribute('innerHTML')\n Link = Header.find_element_by_partial_link_text('Google reviews')\n numberOfReviews = int((Link.text.split()[0]).replace(',', ''))\n Link.click()\n\n allReviews = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'div.gws-localreviews__google-review')))\n if(numberOfReviews > 20):\n totalReviews = 20\n else:\n totalReviews = numberOfReviews\n\n while len(allReviews) < totalReviews:\n driver.execute_script('arguments[0].scrollIntoView(true);', allReviews[-1])\n WebDriverWait(driver, 10, 0.1).until_not(EC.presence_of_element_located((By.CSS_SELECTOR, 'div[class$=\"activityIndicator\"]')))\n allReviews = driver.find_elements_by_css_selector('div.gws-localreviews__google-review')\n\n reviewsSearched = 0\n text = open(\"review.txt\", \"w+\", encoding='utf-8')\n for review in allReviews:\n try:\n element = review.find_element_by_css_selector('span.review-full-text')\n reviewsSearched += 1\n except NoSuchElementException:\n element = review.find_element_by_xpath('div[1]/div[3]/div[2]/span[1]')\n reviewsSearched += 1\n \n review = element.get_attribute('textContent')\n text.write(review)\n text.write(\"\\n\")\n \n text.close()\n return reviewsSearched, numberOfReviews, Rating\n\ndef test():\n #chrome_options = Options()\n #chrome_options.add_argument(\"--headless\")\n driver = webdriver.Chrome(os.getcwd() + r\"/chromedriver_win32/chromedriver.exe\")\n reviewsSearched, numberOfReviews, Rating = extract_google_reviews(driver, 'Izakaya O-Tori')\n driver.quit()\n #print(reviewsSearched, \" \" , numberOfReviews, \" \", Rating)\n","repo_name":"HiltonTR/mahjongers","sub_path":"getGoogleReviews.py","file_name":"getGoogleReviews.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3680228185","text":"\"\"\"\n1. Написати Python-скрипт, який виводить на екран усі числа в діапазоні від 1 до 100 кратні 7\n\"\"\"\nw = [i for i in range(101) if not i % 7]\nprint(w)\n\n# better way\ny = [i for i in range(7,101,7) if not i % 7] # set step <7> to make less iterations\nprint(y)\n\n\"\"\"\n2. Написати Python-скрипт, який обчислює за допомогою циклу факторіал числа n (n вводиться з клавіатури).\n\"\"\"\n\nn = int(input('n = '))\n\nresult = 1\nfor num in range(1, n+1):\n result *= num\n print(result)\n\n\"\"\"\n3. Написати Python-скрипт, який виводить на екран таблицю множення на 5.\nПереважно друкувати 1 x 5 = 5, 2 x 5 = 10, а не просто 5, 10,\n\"\"\"\n# num = int(input('num = '))\n\n# print(f'Таблиця множення на {num}')\n# for item in range(1,11):\n# value = item * num\n# print(item, 'x', num, '=', value)\n\nfor i in range(1,11):\n for j in range(1,11):\n print(f'{i} x {j} = {i * j}')\n print('*'*20)\n\n\"\"\"\n4. Написати Python-скрипт, який виводить на екран прямокутник із '*'.\nВисота і ширина прямокутника вводяться з клавіатури.\nНаприклад, нижче представлений прямокутник з висотою 4 та шириною 5\n\"\"\"\n\nh = int(input('h='))\nw = int(input('w='))\n\nprint('*' * w)\nfor i in range(h - 2):\n print('*', ' ' * (w - 4), '*')\n# print('*' * w)\n\n\n# n, m = int(input('n=')), int(input('m='))\n#\n# res = f\"{'*' * m}\\n\" + f\"*{' ' * (m-2)}*\\n\"*(n-2) + f\"{'*' * m}\\n\"\n# print(res)\n\n\"\"\"\n5. Є список [0,5,2,4,7,1,3,19]. Написати Python-скрипт для підрахунку непарних цифр у ньому\n\"\"\"\n\nin_list = [0, 5, 2, 4, 7, 1, 3, 19]\n# step by step code\nnew_list = [i for i in in_list if i % 2]\nodd_quantity = len(new_list)\nprint(new_list)\nprint(odd_quantity)\n\n# one line code\nresult = len([i for i in in_list if i % 2])\nprint(result)\n\n\n# homework review\n\nx= [1,2,3,4,5,6]\ncount = 0\nfor item in x:\n if item % 2:\n count +=1\nprint(count)\n\n\"\"\"\n6. Створіть список випадкових чисел (розміром 4 елементи). Створіть другий список у два рази більше першого,\nде перші 4 елементи повинні дорівнювати елементам першого списку, а решта елементів - подвоєним значенням початкових.\n Було → [1,4,7,2]\n Стало → [1,4,7,2,2,8,14,4]\n\"\"\"\n\nimport random\n#\n# list1 = [random.randint(1, 9) for i in range(4)]\n# list2 = [item*2 for item in list1]\n# result = list1 + list2\n#\n# print(list1)\n# print(list2)\n# print('='*26)\n# print(result)\n\n\n# homework review\nx = [random.randint(1,100) for _ in range(4)]\nprint(x)\ny = x[:] + [item + item for item in x]\nprint(y)\n\n\n\"\"\"\n7. Створіть список із 12 елементів. Кожен елемент цього списку є зарплатою робітника за місяць.\nВиведіть цей список на екран та обчисліть середньомісячну зарплату цього робітника.\n\"\"\"\n\nimport random\n\nsalary_list = [random.randint(1,60000) for _ in range(25)] # salary for the last two years\navg = round(sum(salary_list[-12:])/12)\n\nprint(salary_list)\nprint(avg)\n\n\"\"\"\n8. \n [1, 2, 3, 4]\n [5, 6, 7, 8]\n [9,10, 11, 12]\n [13,14, 15, 16]\nНапишіть Python-скрипт, який виведе цю матрицю на екран,\nобчислить та виведе суму елементів цієї матриці.\n\"\"\"\n\nx = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9,10, 11, 12],\n [13,14, 15, 16]\n ]\n\nfor row in x:\n matrix = '\\t'.join(map(str,row))\n print(matrix)\n\nlev1 = []\nfor i in x:\n lev1.append(sum(i))\nres = sum(lev1)\n\nprint('Sum of matrix element:',res)\n\n\n\"\"\"\n9. Написати код для дзеркального перевороту списку [7,2,9,4] -> [4,9,2,7].\nСписок може бути довільною довжиною.\n\"\"\"\n\na = [7, 2, 9, 4]\n\nb = list(reversed(a)) # create new list\nprint(b)\n\nfor i in reversed(a): # not creating new list, just iterating through the elements\n print(i)\n\n\"\"\"\n10. За допомогою циклів вивести на екран усі прості числа від 1 до 100.\n *Просте число — натуральне число, яке має рівно два різних натуральних дільники (лише 1 і саме число).\n *Послідовність простих чисел до 100:\n 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,\n\"\"\"\n\nfor n in range(1, 101):\n for i in range(2, n):\n if not n % i:\n break\n else:\n print(n)\n\n\"\"\"\n11. Виведіть на екран «пісочний годинник», максимальна ширина якого зчитується з клавіатури (число непарне).\nУ прикладі ширина дорівнює 5.\n*****\n ***\n *\n ***\n*****\n\"\"\"\nnum = int(input('num='))\nnum_list = [i for i in range(1, num+1) if i% 2]\nnew = list(reversed(num_list))+num_list[1::]\nprint(new)\n\nfor n in new:\n print((n*'*').center(num))\n\n\n\n\n\n","repo_name":"HelgaTe/prog_academy_course_2022","sub_path":"homework/homework_6.py","file_name":"homework_6.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13314500976","text":"import gym\nimport gym_RUBiS_sim\nfrom dyna_q import DynaQAgent\nfrom discretizer import RubisDiscretizer\nimport traceback\nimport os\nimport time\nimport experiment_constants as c\nfrom metrics_creator import save_metrics, summarize_metrics, get_drifts_for_env\nfrom dataframe_creator import get_and_save_dataframe_with_optimums, smooth_data_for_plots\nfrom plot_creator import save_plot\nfrom gym_RUBiS_sim.envs.rubis_env import RubisEnv\n\n\ndef create_dir_if_not_exists(directory_name):\n if not os.path.exists(directory_name):\n try:\n os.makedirs(directory_name)\n except OSError:\n assert False\n\n\ndef create_file(file_name, file_content):\n directory_name = os.path.dirname(file_name)\n\n create_dir_if_not_exists(directory_name)\n\n if not os.path.exists(file_name):\n file = open(file_name, \"a\")\n file.write(file_content)\n\n\ndef create_file_with_counter(base_file_name_with_extension, file_content):\n file_name, file_extension = os.path.splitext(base_file_name_with_extension)\n file_counter = 1\n\n while os.path.exists(f'{file_name}_{file_counter}{file_extension}'):\n file_counter += 1\n\n create_file(f'{file_name}_{file_counter}{file_extension}', file_content)\n pass\n\n\nclass Experiment:\n\n # Helper methods\n\n def make_environment(self, save_log, log_filename):\n env = gym.make(\n 'RUBiS_sim_functionalized-v0',\n case=self.case,\n noise_coefficient=self.noise_coefficient,\n noise_seed=None,\n save_log=save_log,\n log_filename=log_filename,\n smoothing=self.smoothing,\n smoothing_factor_latency=self.smoothing_factor_latency,\n smoothing_factor_recommendation_ratio=self.smoothing_factor_recommendation_ratio,\n action_space_type=self.action_space_type,\n discrete_step_size=self.discrete_step_size,\n reward_function='linear',\n response_time=self.response_time,\n threshold=self.threshold,\n workload_pattern=self.workload_pattern,\n initial_concurrency=self.initial_concurrency,\n amplitude=self.amplitude,\n high_concurrency=self.high_concurrency,\n period_length=self.period_length,\n drifts=get_drifts_for_env(self.drifts, self.execution_time_steps)\n )\n\n return env\n\n def make_environment_object(self, log_filename, save_log=False):\n env_object = RubisEnv(\n # Always case 4, so that the calculation of the optimum reward and dimmer is not affected by noise when\n # using case 6\n case=4,\n noise_coefficient=self.noise_coefficient,\n noise_seed=None,\n save_log=save_log,\n log_filename=log_filename,\n smoothing=self.smoothing,\n smoothing_factor_latency=self.smoothing_factor_latency,\n smoothing_factor_recommendation_ratio=self.smoothing_factor_recommendation_ratio,\n action_space_type=self.action_space_type,\n discrete_step_size=self.discrete_step_size,\n reward_function='linear',\n response_time=self.response_time,\n threshold=self.threshold,\n workload_pattern=self.workload_pattern,\n initial_concurrency=self.initial_concurrency,\n amplitude=self.amplitude,\n high_concurrency=self.high_concurrency,\n period_length=self.period_length,\n drifts=get_drifts_for_env(self.drifts, self.execution_time_steps)\n )\n\n return env_object\n\n # Example for a possible experiment location:\n # data/experiments/dyna-q/e=0.3,a=0.9,y=0.8/workload=cyclic,drift=False/target=avg...reward=linear/granularity_rec_ratio=0.01...max_concurrency=100/experiment_12/\n\n # 'experiment_12' means that this is the 12th experiment conducted with the given parameters for the agent,\n # environment and discretizer.\n\n # The raw data generated by the given experiment is saved in a training.csv and an\n # evaluation.csv file. The evaluation.csv file is evaluated in order to obtain the metrics and plots for that\n # experiment.\n\n # After all experiments have been done and evaluated, the metrics are averaged to obtain the average performance of\n # the given parametrization.\n\n def get_agent_category_directory_name(self):\n return f\"{c.DATA_BASE_DIRECTORY_NAME}\" \\\n f\"{'q-learning' if self.n == 0 else f'dyna-q,n={self.n}'}\" \\\n f\"/\"\n\n def get_agent_parameter_configuration_directory_name(self):\n return f\"{self.get_agent_category_directory_name()}\" \\\n f\"e={self.exploration_rate},\" \\\n f\"a={self.learning_rate},\" \\\n f\"y={self.discount_factor}\" \\\n f\"/\"\n\n def get_environment_category_directory_name(self):\n return f\"{self.get_agent_parameter_configuration_directory_name()}\" \\\n f\"workload={self.workload_pattern},\" \\\n f\"drifts={self.drifts}\" \\\n f\"/\"\n\n def get_environment_parameter_configuration_directory_name(self):\n\n amplitude = f\"amplitude={self.amplitude},\" if self.workload_pattern == 'cyclic' else ''\n\n high_concurrency = f\"high_concurrency={self.high_concurrency},\" if self.workload_pattern == 'burst' else ''\n\n period_length = '' if self.workload_pattern == 'constant' else f\"period_length={self.period_length},\"\n\n if self.smoothing is not None:\n smoothing_params = f\",smoothing_l={self.smoothing_factor_latency}\" \\\n f\",smoothing_rr={self.smoothing_factor_recommendation_ratio}\"\n else:\n smoothing_params = ''\n\n action_granularity = f'dimmer_step_size={self.discrete_step_size},' if self.action_space_type == 'discrete' \\\n else f'dimmer_granularity={self.continuous_dimmer_granularity},'\n\n return f\"{self.get_environment_category_directory_name()}\" \\\n f\"time_steps={self.execution_time_steps},\" \\\n f\"init_concurrency={self.initial_concurrency},\" \\\n f\"{amplitude}\" \\\n f\"{high_concurrency}\" \\\n f\"{period_length}\" \\\n f\"action_space={self.action_space_type},\" \\\n f\"{action_granularity}\" \\\n f\"target={self.response_time},\" \\\n f\"threshold={self.threshold},\" \\\n f\"case={self.case},\" \\\n f\"noise={self.noise_coefficient},\" \\\n f\"smoothing={self.smoothing}\" \\\n f\"{smoothing_params}\" \\\n f\"/\"\n\n def get_discretizer_directory_name(self):\n dimmer_granularity = f\"dimmer_granularity={self.continuous_dimmer_granularity},\" \\\n if self.action_space_type == 'continuous' else ''\n\n granularity_rr = f'granularity_rr={self.granularity_recommendation_ratio},' \\\n if self.include_recommendation_ratio else ''\n\n granularity_l = f'granularity_l={self.granularity_latency},' \\\n if self.include_latency else ''\n\n upper_bound_l = f'upper_bound_l={self.upper_bound_latency},' \\\n if self.include_latency else ''\n\n return f\"{self.get_environment_parameter_configuration_directory_name()}\" \\\n f\"include_l={self.include_latency},\" \\\n f\"include_rr={self.include_recommendation_ratio},\" \\\n f\"{granularity_l}\" \\\n f\"{granularity_rr}\" \\\n f\"{upper_bound_l}\" \\\n f\"{dimmer_granularity}\" \\\n f\"/\"\n\n def get_single_experiment_directory_name(self, experiment_number):\n return f\"{self.get_discretizer_directory_name()}\" \\\n f\"experiments_data/\" \\\n f\"experiment_{experiment_number}\" \\\n f\"/\"\n\n def create_description_file(self):\n file_name = f\"{self.get_discretizer_directory_name()}description.txt\"\n create_file(file_name, self.get_experiment_description())\n\n def save_time_in_description_file(self,\n seconds_for_all_successful_experiments,\n experiments_interrupted=False,\n interrupted_experiment_number=None):\n file_name = f\"{self.get_discretizer_directory_name()}description.txt\"\n minutes_since_start = seconds_for_all_successful_experiments / 60\n if not experiments_interrupted:\n seconds_per_experiment = seconds_for_all_successful_experiments / self.number_of_experiments\n minutes_per_experiment = seconds_per_experiment / 60\n time_description = f'It took {seconds_for_all_successful_experiments} seconds ({minutes_since_start} minutes) ' \\\n f'to execute all {self.number_of_experiments} experiments.\\n' \\\n f'\\n' \\\n f'That is {seconds_per_experiment} seconds ({minutes_per_experiment} minutes) ' \\\n f'per experiment.\\n\\n\\n'\n else:\n\n seconds_per_experiment = seconds_for_all_successful_experiments / (interrupted_experiment_number - 1) if \\\n interrupted_experiment_number > 1 else 0\n minutes_per_experiment = seconds_per_experiment / 60\n time_description = f'Experiment #{interrupted_experiment_number} of {self.number_of_experiments} ' \\\n f'experiments was interrupted. All experiments before it took ' \\\n f'{seconds_for_all_successful_experiments} seconds ({minutes_since_start} minutes).\\n' \\\n f'\\n' \\\n f'It therefore took on average {seconds_per_experiment} seconds ' \\\n f'({minutes_per_experiment} minutes) per experiment.\\n\\n\\n'\n if os.path.exists(file_name):\n file = open(file_name, \"a\")\n file.write(time_description)\n else:\n print('Description file does not exist')\n\n def get_experiment_description(self):\n return f\"# Environment\\n\" \\\n f\"case = {self.case},\\n\" \\\n f\"noise_coefficient = {self.noise_coefficient},\\n\" \\\n f\"smoothing = {self.smoothing},\\n\" \\\n f\"smoothing_factor_latency = {self.smoothing_factor_latency},\\n\" \\\n f\"smoothing_factor_recommendation_ratio = {self.smoothing_factor_recommendation_ratio},\\n\" \\\n f\"action_space_type = \\'{self.action_space_type}\\',\\n\" \\\n f\"discrete_step_size = {self.discrete_step_size},\\n\" \\\n f\"reward_function = \\'linear\\',\\n\" \\\n f\"response_time = \\'{self.response_time}\\',\\n\" \\\n f\"threshold = {self.threshold},\\n\" \\\n f\"workload_pattern = \\'{self.workload_pattern}\\',\\n\" \\\n f\"initial_concurrency = {self.initial_concurrency},\\n\" \\\n f\"amplitude = {self.amplitude},\\n\" \\\n f\"high_concurrency = {self.high_concurrency},\\n\" \\\n f\"period_length = {self.period_length},\\n\" \\\n f\"drifts = {self.drifts},\\n\" \\\n f\"# Discretizer\\n\" \\\n f\"continuous_dimmer_granularity = {self.continuous_dimmer_granularity},\\n\" \\\n f\"granularity_recommendation_ratio = {self.granularity_recommendation_ratio},\\n\" \\\n f\"granularity_latency = {self.granularity_latency},\\n\" \\\n f\"upper_bound_latency = {self.upper_bound_latency},\\n\" \\\n f\"max_concurrency = {self.max_concurrency},\\n\" \\\n f\"include_recommendation_ratio = {self.include_recommendation_ratio},\\n\" \\\n f\"include_latency = {self.include_latency},\\n\" \\\n f\"# Agent\\n\" \\\n f\"exploration_rate = {self.exploration_rate},\\n\" \\\n f\"learning_rate = {self.learning_rate},\\n\" \\\n f\"discount_factor = {self.discount_factor},\\n\" \\\n f\"n = {self.n},\\n\" \\\n f\"# Experiment execution\\n\" \\\n f\"execution_time_steps = {self.execution_time_steps},\\n\" \\\n f\"number_of_experiments = {self.number_of_experiments}\\n\" \\\n f\"\\n\"\n\n def create_error_file(self):\n create_file_with_counter(f'{c.ERROR_FILES_DIRECTORY}error.txt', self.get_error_description())\n\n def get_error_description(self):\n return f'### There was an error while executing the experiment with the following parameters: ### \\n' \\\n f'\\n' \\\n f'{self.get_experiment_description()}\\n' \\\n f'\\n' \\\n f'\\n' \\\n f'### You can find the experiment at {self.get_discretizer_directory_name()} ### \\n' \\\n f'\\n' \\\n f'\\n' \\\n f'### This was the error message: ### \\n' \\\n f'\\n' \\\n f'{traceback.format_exc()}\\n'\n\n def __init__(self,\n run_separate_eval,\n # Environment\n case,\n noise_coefficient,\n smoothing,\n smoothing_factor_latency,\n smoothing_factor_recommendation_ratio,\n action_space_type,\n discrete_step_size,\n response_time,\n threshold,\n workload_pattern,\n initial_concurrency,\n amplitude,\n high_concurrency,\n period_length,\n drifts,\n # Discretizer\n continuous_dimmer_granularity,\n granularity_recommendation_ratio,\n granularity_latency,\n upper_bound_latency,\n max_concurrency,\n include_recommendation_ratio,\n include_latency,\n # Agent\n exploration_rate,\n learning_rate,\n discount_factor,\n n,\n # Experiment execution\n execution_time_steps,\n number_of_experiments):\n\n self.run_separate_eval = run_separate_eval\n # Environment\n self.case = case\n self.noise_coefficient = noise_coefficient\n self.smoothing = smoothing\n self.smoothing_factor_latency = smoothing_factor_latency\n self.smoothing_factor_recommendation_ratio = smoothing_factor_recommendation_ratio\n self.action_space_type = action_space_type\n self.discrete_step_size = discrete_step_size\n self.response_time = response_time\n self.threshold = threshold\n self.workload_pattern = workload_pattern\n self.initial_concurrency = initial_concurrency\n self.amplitude = amplitude\n self.high_concurrency = high_concurrency\n self.period_length = period_length\n self.drifts = drifts\n # Discretizer\n self.continuous_dimmer_granularity = continuous_dimmer_granularity\n self.granularity_recommendation_ratio = granularity_recommendation_ratio\n self.granularity_latency = granularity_latency\n self.upper_bound_latency = upper_bound_latency\n self.max_concurrency = max_concurrency\n self.include_recommendation_ratio = include_recommendation_ratio\n self.include_latency = include_latency\n # Agent\n self.exploration_rate = exploration_rate\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.n = n\n # Experiment execution\n self.execution_time_steps = execution_time_steps\n self.number_of_experiments = number_of_experiments\n\n self.d = RubisDiscretizer(\n granularity_recommendation_ratio=self.granularity_recommendation_ratio,\n granularity_latency=self.granularity_latency,\n upper_bound_latency=self.upper_bound_latency,\n max_concurrency=self.max_concurrency,\n include_recommendation_ratio=self.include_recommendation_ratio,\n include_latency=self.include_latency,\n action_space_type=self.action_space_type,\n continuous_dimmer_granularity=self.continuous_dimmer_granularity\n )\n\n self.agent = DynaQAgent(\n exploration_rate=self.exploration_rate,\n learning_rate=self.learning_rate,\n discount_factor=self.discount_factor,\n number_of_states=self.d.number_of_states,\n number_of_actions=self.d.number_of_actions,\n n=self.n\n )\n\n self.optimum_dimmers_and_rewards = {}\n\n self.plot_files_directory = f'{self.get_discretizer_directory_name()}plots/'\n\n def run(self, mode, zoom_time_steps_array=None):\n if mode == 'execute':\n self.execute_and_evaluate_all()\n elif mode == 'create plots and metrics':\n self.evaluate_all(create_plots=True, create_metrics=True)\n elif mode == 'create plots':\n self.evaluate_all(create_plots=True, create_metrics=False)\n elif mode == 'create zoom plots':\n self.evaluate_all(create_plots=False, create_metrics=False, zoom_time_steps_array=zoom_time_steps_array)\n elif mode == 'create metrics':\n self.evaluate_all(create_plots=False, create_metrics=True)\n elif mode == 'summarize metrics':\n self.summarize_metrics()\n\n # Execution and Evaluation\n\n def execute_and_evaluate_all(self):\n print(f'Starting experiment {self.get_discretizer_directory_name()}')\n experiment_start_time = time.time()\n\n create_dir_if_not_exists(self.plot_files_directory)\n self.create_description_file()\n\n last_experiment_completed_time = experiment_start_time\n try:\n for experiment_number in range(self.number_of_experiments):\n self.agent.reset()\n experiment_number += 1\n experiment_directory_name = self.get_single_experiment_directory_name(experiment_number)\n\n training_file_name = f\"{experiment_directory_name}training.csv\"\n evaluation_file_name = f\"{experiment_directory_name}evaluation.csv\"\n\n try:\n\n if not os.path.exists(training_file_name) or (not os.path.exists(evaluation_file_name) and self.run_separate_eval):\n try:\n os.makedirs(experiment_directory_name)\n except OSError:\n print(f'WARNING dir exists: {experiment_directory_name}')\n\n training_environment = self.make_environment(\n save_log=True,\n log_filename=training_file_name\n )\n\n if self.run_separate_eval:\n evaluation_environment = self.make_environment(\n save_log=True,\n log_filename=evaluation_file_name\n )\n else:\n evaluation_environment = None\n\n self.execute_single_experiment(training_environment=training_environment,\n evaluation_environment=evaluation_environment)\n\n self.evaluate_single_experiment(experiment_number=experiment_number)\n\n last_experiment_completed_time = time.time()\n else:\n # print(f\"Experiment has already been done: {experiment_directory_name}\")\n pass\n except KeyboardInterrupt:\n # Deleting the data and possible plots of the currently executing experiment if it gets stopped\n # before finishing\n print(f'Deleting incomplete data for experiment #{experiment_number}: '\n f'{self.get_discretizer_directory_name()}\\n\\n')\n\n self.save_time_in_description_file(\n seconds_for_all_successful_experiments=last_experiment_completed_time - experiment_start_time,\n experiments_interrupted=True,\n interrupted_experiment_number=experiment_number)\n os.remove(training_file_name)\n os.remove(evaluation_file_name)\n os.rmdir(self.get_single_experiment_directory_name(experiment_number))\n\n for dir_path, dir_names, file_names in os.walk(self.plot_files_directory):\n for file_name in file_names:\n if file_name.startswith(f'plot_eval_{experiment_number}') \\\n or file_name.startswith(f'plot_train_{experiment_number}'):\n os.remove(self.plot_files_directory + file_name)\n\n raise\n\n except AssertionError:\n self.create_error_file()\n self.summarize_metrics()\n except Exception:\n self.create_error_file()\n self.summarize_metrics()\n except:\n self.create_error_file()\n self.summarize_metrics()\n else:\n self.summarize_metrics()\n seconds_since_start = (time.time() - experiment_start_time)\n print(f\"Finished all experiments: {self.get_discretizer_directory_name()}\")\n self.save_time_in_description_file(seconds_since_start)\n\n # Execution\n\n def execute_single_experiment(self, training_environment, evaluation_environment):\n if self.run_separate_eval:\n self.execute_single_train_and_eval(training_environment, evaluation_environment)\n else:\n self.execute_single_train(training_environment)\n\n def execute_single_train_and_eval(self, training_environment, evaluation_environment):\n training_state = self.d.discretize_state(training_environment.reset())\n evaluation_state = self.d.discretize_state(evaluation_environment.reset())\n\n for time_step in range(self.execution_time_steps):\n training_state = self.training_step(training_environment, training_state)\n evaluation_state = self.evaluation_step(evaluation_environment, evaluation_state)\n\n def execute_single_train(self, training_environment):\n training_state = self.d.discretize_state(training_environment.reset())\n\n for time_step in range(self.execution_time_steps):\n training_state = self.training_step(training_environment, training_state)\n\n def training_step(self, training_environment, state):\n action_index = self.agent.choose_action(state)\n action = self.d.get_continuous_action_value(action_index)\n next_state_continuous, reward, done, info = training_environment.step(action)\n next_state = self.d.discretize_state(next_state_continuous)\n self.agent.update_q(state, action_index, reward, next_state)\n return next_state\n\n def evaluation_step(self, evaluation_environment, state):\n action_index = self.agent.choose_action_greedy(state)\n action = self.d.get_continuous_action_value(action_index)\n next_state_continuous, reward, done, info = evaluation_environment.step(action)\n next_state = self.d.discretize_state(next_state_continuous)\n return next_state\n\n # Evaluation\n\n def summarize_metrics(self):\n metrics_file_name_eval, metrics_file_name_train = self.get_metrics_file_names()\n if self.run_separate_eval:\n summarize_metrics(metrics_file_name_eval)\n summarize_metrics(metrics_file_name_train)\n\n def get_metrics_file_names(self):\n metrics_file_name_eval = f'{self.get_discretizer_directory_name()}metrics_eval.csv'\n metrics_file_name_train = f'{self.get_discretizer_directory_name()}metrics_train.csv'\n return metrics_file_name_eval, metrics_file_name_train\n\n def evaluate_all(self, create_plots, create_metrics, zoom_time_steps_array=None):\n print(f'Evaluating experiment {self.get_discretizer_directory_name()}\\n'\n f'create_plots={create_plots}, create_metrics={create_metrics}')\n experiment_start_time = time.time()\n\n metrics_file_name_eval, metrics_file_name_train = self.get_metrics_file_names()\n\n if create_metrics:\n with open(metrics_file_name_eval, \"w\"):\n pass\n with open(metrics_file_name_train, \"w\"):\n pass\n\n if create_plots and os.path.exists(self.get_discretizer_directory_name()):\n create_dir_if_not_exists(self.plot_files_directory)\n\n for experiment_number in range(self.number_of_experiments):\n experiment_number += 1\n self.evaluate_single_experiment(experiment_number, create_plots, create_metrics, zoom_time_steps_array)\n\n self.summarize_metrics()\n\n seconds_since_start = (time.time() - experiment_start_time)\n minutes_since_start = seconds_since_start / 60\n\n print(f'Evaluated experiment {self.get_discretizer_directory_name()}\\n'\n f'in {seconds_since_start} seconds ({minutes_since_start} minutes)\\n'\n f'create_plots={create_plots}, create_metrics={create_metrics}')\n\n def evaluate_single_experiment(self, experiment_number, create_plots=True, create_metrics=True,\n zoom_time_steps_array=None):\n experiment_directory_name = self.get_single_experiment_directory_name(experiment_number)\n\n training_file_name = f\"{experiment_directory_name}training.csv\"\n evaluation_file_name = f\"{experiment_directory_name}evaluation.csv\"\n\n if os.path.exists(training_file_name) or (os.path.exists(evaluation_file_name) and self.run_separate_eval):\n training_env_object = self.make_environment_object(log_filename=training_file_name)\n\n if self.run_separate_eval:\n evaluation_env_object = self.make_environment_object(log_filename=evaluation_file_name)\n eval_dataframe = get_and_save_dataframe_with_optimums(\n csv_file_name=evaluation_file_name,\n env_object=evaluation_env_object,\n dimmer_granularity=self.continuous_dimmer_granularity,\n threshold=self.threshold,\n response_time=self.response_time,\n drifts=self.drifts,\n execution_time_steps=self.execution_time_steps,\n optimum_dimmers_and_rewards=self.optimum_dimmers_and_rewards\n )\n else:\n eval_dataframe = None\n\n train_dataframe = get_and_save_dataframe_with_optimums(\n csv_file_name=training_file_name,\n env_object=training_env_object,\n dimmer_granularity=self.continuous_dimmer_granularity,\n threshold=self.threshold,\n response_time=self.response_time,\n drifts=self.drifts,\n execution_time_steps=self.execution_time_steps,\n optimum_dimmers_and_rewards=self.optimum_dimmers_and_rewards\n )\n\n if zoom_time_steps_array is not None and experiment_number <= 3:\n for zoom_time_steps in zoom_time_steps_array:\n zoom_from_time_step = zoom_time_steps[0]\n zoom_to_time_step = zoom_time_steps[1]\n\n zoom = f'zoom_{zoom_from_time_step}_to_{zoom_to_time_step}'\n\n number_of_time_steps = zoom_to_time_step - zoom_from_time_step\n\n if self.run_separate_eval:\n eval_df_zoom = eval_dataframe[zoom_from_time_step < eval_dataframe[c.TIME_STEP_COL_NAME]]\n eval_df_zoom = eval_df_zoom[eval_df_zoom[c.TIME_STEP_COL_NAME] <= zoom_to_time_step]\n\n self.save_plots(dataframe=eval_df_zoom, mode=f'eval_{zoom}',\n experiment_number=experiment_number,\n resolution=200, number_of_time_steps=number_of_time_steps)\n\n train_df_zoom = train_dataframe[zoom_from_time_step < train_dataframe[c.TIME_STEP_COL_NAME]]\n train_df_zoom = train_df_zoom[train_df_zoom[c.TIME_STEP_COL_NAME] <= zoom_to_time_step]\n\n self.save_plots(dataframe=train_df_zoom, mode=f'train_{zoom}',\n experiment_number=experiment_number,\n resolution=200, number_of_time_steps=number_of_time_steps)\n\n if create_plots and experiment_number <= 3:\n if self.run_separate_eval:\n self.save_plots(dataframe=eval_dataframe, mode='eval', experiment_number=experiment_number,\n resolution=200, number_of_time_steps=self.execution_time_steps)\n self.save_plots(dataframe=train_dataframe, mode='train', experiment_number=experiment_number,\n resolution=200, number_of_time_steps=self.execution_time_steps)\n\n if create_metrics:\n if self.run_separate_eval:\n self.save_metrics(dataframe=eval_dataframe, mode='eval', experiment_number=experiment_number)\n self.save_metrics(dataframe=train_dataframe, mode='train', experiment_number=experiment_number)\n\n def save_plots(self, dataframe, mode, experiment_number, resolution, number_of_time_steps):\n if resolution is not None:\n smoothing = f'res={resolution}'\n plot_dataframe = smooth_data_for_plots(\n dataframe,\n number_of_time_steps,\n resolution\n )\n else:\n smoothing = f'no_smoothing'\n plot_dataframe = dataframe\n\n save_plot(\n smoothed_dataframe_with_optimums=plot_dataframe,\n plot_file_name=f'{self.plot_files_directory}plot_{mode}_{experiment_number}_{smoothing}_dimmer_diff.png',\n drifts=self.drifts,\n response_time=self.response_time,\n include_opt_dimmer=True,\n include_opt_reward=False\n )\n\n save_plot(\n smoothed_dataframe_with_optimums=plot_dataframe,\n plot_file_name=f'{self.plot_files_directory}plot_{mode}_{experiment_number}_{smoothing}_reward_diff.png',\n drifts=self.drifts,\n response_time=self.response_time,\n include_opt_dimmer=False,\n include_opt_reward=True\n )\n\n def save_metrics(self, dataframe, mode, experiment_number):\n\n save_metrics(\n dataframe=dataframe,\n metrics_file_name=f'{self.get_discretizer_directory_name()}metrics_{mode}.csv',\n experiment_number=experiment_number,\n drifts=self.drifts,\n execution_time_steps=self.execution_time_steps\n )\n","repo_name":"BimoBu/bachelor-thesis","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":30918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4998895780","text":"suits = ['S','H','C','D']\n\ntable = [[False]*14 for i in range(4)]\n\nn = int(input())\n\nfor loop in range(n):\n mark,num = input().split()\n\n num = int(num)\n\n if mark == 'S':\n table[0][num] = True\n elif mark == 'H':\n table[1][num] = True\n elif mark == 'C':\n table[2][num] = True\n else:\n table[3][num] = True\n\nfor i in range(4):\n for j in range(1,14):\n if table[i][j] == False:\n print(suits[i], j)","repo_name":"SoraMause/myAOJSample","sub_path":"python/introduce/FindingMissingCards.py","file_name":"FindingMissingCards.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27056131818","text":"import sys\n\n#print(sys.argv[1:])\noperation = sys.argv[1]\n\ndef sum_numbers(nums):\n nums = [2, 10 ,11 ,1]\n return sum(nums)\n\nnumbers_to_sum = [int(i) for i in sys.argv[2:]]\n\nif operation == \"add\":\n print(sum_numbers(numbers_to_sum))\nelif operation == \"multiply\":\n def multiplylist(inputlist):\n output = 1\n correctoutput = Flase\n for element in inputlist:\n if type(element) ==int:\n correctoutput = True\n output = output * element\n return output if correctoutput else None\n print(multiplylist(numbers_to_sum))\n\n\n#\n\nimport sys\n\narguments = sys.argv[1:]\ninput_file = \"\"\noutput_file = \"\"\n\nfor i, arg in enumerate(arguments):\n if arg == \"-i\":\n input_file = arguments[i+1]\n elif arg == \"-o\":\n output_file = arguments[i+1]\n\nwith open(input_file, mode=\"r\") as file:\n lines = file.readlines()\n lines = [int(line.strip()) for line in lines]\n\nwith open(output_file, mode=\"w\") as output:\n output.write(str(sum(lines)))","repo_name":"OCCOHOCOREX/HuangXinren-Portfolio","sub_path":"Weekly Tasks/Week8/HuangXinren-Week8-Task.py","file_name":"HuangXinren-Week8-Task.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16155138158","text":"收盘价列表=[]\nfor i in range(0, total):\n 收盘价=get(\"收盘价\", i)\n 收盘价列表.append(收盘价)\n\n列表=[]\ndiff列表 = [] #新建diff列表\ndea列表 = [] #新建dea列表\n上根买线 = 0\n上根卖线 = 0\n\nfor i in range(0, total):\n 列表.append(SLOPE(收盘价列表,21,i)*20+收盘价列表[i])\n\nfor i in range(0, total): \n 最高价=get(\"最高价\", i) #获取最高价.\n 开盘价=get(\"开盘价\", i) #获取开盘价.\n 最低价=get(\"最低价\", i) #获取最低价.\n 收盘价=get(\"收盘价\", i) #获取收盘价.\n 上根K线收盘价 = 0\n if i > 0:\n 上根K线收盘价 = get(\"收盘价\", i - 1)\n hevo.save(\"K\", 最高价, 开盘价, 最低价, 收盘价, i) #将四个价按顺序储存在\"K\"对象上.\n 是阳线 = 上根K线收盘价 > 收盘价\n 是阴线 = 上根K线收盘价 < 收盘价\n 涨跌幅 = 0\n if 上根K线收盘价 > 0:\n if 是阳线:\n 涨跌幅 = abs(((收盘价 - 上根K线收盘价) / 上根K线收盘价) * 100)\n else:\n 涨跌幅 = abs(((上根K线收盘价 - 收盘价) / 上根K线收盘价) * 100)\n 卖线=EMA(列表,55,i)\n 买线=EMA(收盘价列表,2,i)\n save(\"买线\",买线,i)\n save(\"卖线\",卖线,i)\n if 买线 > 卖线 and 上根买线 <= 上根卖线:\n text(最低价, i, \"突破看多\", 2)\n text(开盘价 + ((收盘价 - 开盘价) / 2), i, \"     ——止损位\", 2)\n if 买线 < 卖线 and 上根买线 >= 上根卖线:\n text(最高价, i, \"突破看空\", 7)\n text(最高价 - ((开盘价 - 收盘价) / 2), i, \"     ——止损位\", 7)\n if i > 55:\n 上根买线 = 买线\n 上根卖线 = 卖线\ndraw.kline(\"K\") #画出K线.\n","repo_name":"lisniuse/chart","sub_path":"抓趋势.py","file_name":"抓趋势.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28693927558","text":"__author__ = 'riot'\n\nimport pygame\nfrom circuits import Event\n\n\n# App/GUI management events\n\nclass guiresize(Event):\n def __init__(self, width, height, *args):\n super(guiresize, self).__init__(*args)\n self.height = height\n self.width = width\n\n\nclass guiquit(Event):\n def __init__(self, reason, *args):\n super(guiquit, self).__init__(*args)\n self.reason = reason\n\n\n# Control events\n\nclass controlinput(Event):\n def __init__(self, inputevent, *args):\n super(controlinput, self).__init__(*args)\n self.input = inputevent\n\n\nclass joystickchange(controlinput):\n def __init__(self, *args):\n super(joystickchange, self).__init__(*args)\n\n\n# Midi events\n\nclass resetcclock(Event):\n pass\n\n\nclass midicc(Event):\n def __init__(self, cc, data=None, force=False, *args):\n super(midicc, self).__init__(*args)\n\n self.cc = cc\n self.data = data\n self.force = force\n\n\nclass midiinput(Event):\n def __init__(self, data, *args):\n super(midiinput, self).__init__(*args)\n self.code = data[0][0]\n self.data = data\n\n\nclass midinote(Event):\n def __init__(self, note, velocity, midi_channel, length=None, *args):\n super(midinote, self).__init__(*args)\n self.note = note\n self.velocity = velocity\n self.midi_channel = midi_channel\n self.length = length\n self.start = 0\n\n\n# Router events\n\nclass loadscene(Event):\n def __init__(self, scene, *args):\n super(loadscene, self).__init__(*args)\n self.scene = scene\n\n\nclass loadprogram(Event):\n def __init__(self, program, *args):\n super(loadprogram, self).__init__(*args)\n self.program = program\n\n\nclass saveprogram(loadprogram):\n pass\n\n\n# Keyboard\n\nclass keypress(Event):\n def __init__(self, ev, *args):\n super(keypress, self).__init__(*args)\n self.ev = ev\n\n def __repr__(self):\n\n if len(self.channels) > 1:\n channels = repr(self.channels)\n elif len(self.channels) == 1:\n channels = str(self.channels[0])\n else:\n channels = \"\"\n\n data = \"%s %s\" % (\n \", \".join(repr(arg) for arg in self.args),\n \", \".join(\"%s=%s\" % (k, repr(v)) for k, v in self.kwargs.items())\n )\n\n key = pygame.key.name(self.ev.key)\n ev = self.ev\n\n return \"<%s_%s_%s[%s] (%s)>\" % (self.name, key, ev, channels, data)\n","repo_name":"ri0t/avio","sub_path":"avio/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"22446663611","text":"def calculate_user_sum():\n desiredNumber = input(\"To what number would you like to calculate? \")\n total = 0\n averageCounter = 0\n for sum in range(1, int(desiredNumber) + 1):\n total += sum\n averageCounter += 1\n total /= averageCounter\n print(total)\n\ncalculate_user_sum()","repo_name":"naspapas/kata_challenges","sub_path":"2023/Seek programming exercise prep/ex19 + 20.py","file_name":"ex19 + 20.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14613585237","text":"from l_layer_nn import L_Layer_NN\nimport data_service, plotting_service, dnn_service\n\n\ntrain_x, train_y, test_x, test_y, classes = data_service.load_and_preprocess_data()\n\nn_x = train_x.shape[0] # num_px * num_px * 3\nn_h1 = 20\nn_h2 = 7\nn_h3 = 5\nn_y = train_y.shape[0]\nlayers_dims = [n_x, n_h1, n_h2, n_h3, n_y] # 4-layer model\n\nlearning_rate = 0.0075\nnum_iterations = 2501\n\nl_layer_NN = L_Layer_NN()\nparameters, costs = l_layer_NN.fit(train_x, train_y, layers_dims, learning_rate=learning_rate,\n num_iterations=num_iterations, print_cost=True)\n\nplotting_service.plot_learning_curve(costs, learning_rate)\n\ntrain_predictions = l_layer_NN.predict(parameters, train_x)\ntest_predictions = l_layer_NN.predict(parameters, test_x)\n\ntrain_accuracy = dnn_service.accuracy(train_predictions, train_y)\ntest_accuracy = dnn_service.accuracy(test_predictions, test_y)\n\nprint(\"Train/Test accuracy: \", train_accuracy, test_accuracy)\n\n\n","repo_name":"boyko11/DNN-DLAI","sub_path":"Runner_L_Layer.py","file_name":"Runner_L_Layer.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36645040318","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n \n def traverseOutwards(s,left,right)-> int:\n if left>right or not s:\n return 0\n while(left>=0 and right end-start:\n start = i - (length-1)//2\n end = i + length//2\n return s[start:end+1]\n \n","repo_name":"gagankaushal/LeetCodeSolutions","sub_path":"5_Longest_Palindromic_Substring.py","file_name":"5_Longest_Palindromic_Substring.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10466398912","text":"# coding: utf-8\nfrom datetime import datetime, timedelta\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nfrom main.algorithms import fill_store_houses, create_resource_orders\n\n\n# /////////////////////////////////////////////////task1////////////////////////////////////////////////////////////////\n\nclass Volonter(models.Model):\n GENDER_CHOICES = (\n (u'М', 'Male'),\n (u'Ж', 'Female'),\n )\n OBLAST_CHOICES = (\n (u'Вінницька область',u'Вінницька область'),\n (u'Волинська область',u'Волинська область'),\n (u'Дніпропетровська область',u'Дніпропетровська область'),\n (u'Донецька область',u'Донецька область'),\n (u'Закарпатська область',u'Закарпатська область'),\n (u'Запорізька область',u'Запорізька область'),\n (u'Івано-Франківська область',u'Івано-Франківська область'),\n (u'Київська область',u'Київська область'),\n (u'Кіровоградська область',u'Кіровоградська область'),\n (u'Луганська область',u'Луганська область'),\n (u'Львівська область',u'Львівська область'),\n (u'Миколаївська область',u'Миколаївська область'),\n (u'Одеська область',u'Одеська область'),\n (u'Полтавська область',u'Полтавська область'),\n (u'Рівненська область',u'Рівненська область'),\n (u'Сумська область',u'Сумська область'),\n (u'Тернопільська область',u'Тернопільська область'),\n (u'Харківська область',u'Харківська область'),\n (u'Херсонська область',u'Херсонська область'),\n (u'Хмельницька область',u'Хмельницька область'),\n (u'Черкаська область',u'Черкаська область'),\n (u'Чернігівська область',u'Чернігівська область'),\n (u'Чернівецька область',u'Чернівецька область'),\n (u'Автономна Республіка Крим',u'Автономна Республіка Крим'),\n )\n fio = models.CharField(verbose_name=u'ПІБ', max_length=200)\n birthday = models.DateField(verbose_name=u'Дата народження',null=True, blank=True)\n address = models.CharField(verbose_name=u'Область проживання',max_length=30, choices=OBLAST_CHOICES)\n telephone = models.CharField(verbose_name=u'Телефон',max_length=20)\n gender = models.CharField(verbose_name=u'Стать',max_length=1, choices=GENDER_CHOICES)\n activeted = models.BooleanField(default=False, verbose_name=u'Підтвердження')\n categories = models.ManyToManyField('CategoryResource', verbose_name=u'Категорія ресурсів')\n\n class Meta:\n verbose_name_plural = u'Волонтери'\n\n def __unicode__(self):\n return u\"%s, %s\" % (self.fio, self.address)\n\n\nclass Potential(models.Model):\n PERIOD_CHOICES=(\n (u'Кожного разу',u'Кожного разу'),\n (u'Кожної неділі',u'Кожної неділі'),\n (u'Кожного місяця',u'Кожного місяця'),\n )\n volonter = models.ForeignKey('Volonter', verbose_name=u'Волонтер')\n category = models.ForeignKey('CategoryResource', verbose_name=u'Категорія')\n period = models.CharField(max_length=30, verbose_name=u'Періодичність',choices=PERIOD_CHOICES)\n\n class Meta:\n verbose_name_plural = u'Потенціал'\n\n def __unicode__(self):\n return \"%s,%s\"%(self.volonter.fio, self.category.category)\n\n\nclass CategoryResource(models.Model):\n category = models.CharField(max_length=50, verbose_name=u'Категорія')\n\n class Meta:\n verbose_name_plural = u'Категорії ресурсів'\n\n def __unicode__(self):\n return self.category\n\n\nclass Resource(models.Model):\n category_resource = models.ForeignKey('CategoryResource',verbose_name=u'Категорія ресурса')\n name = models.CharField(max_length=30,verbose_name=u'Назва ресурсу')\n unit_of_mesure = models.CharField(max_length=30,verbose_name=u'Одиниця виміру')\n weight_one_unit = models.FloatField(verbose_name=u'Маса однієї одиниці', null=True)\n volume_of_one_unit = models.FloatField(verbose_name=u'Об\"єм однієї одиниці')\n price_one_unit = models.FloatField(verbose_name=u'Ціна однієї одиниці')\n\n class Meta:\n verbose_name_plural = u'Ресурси'\n\n def __unicode__(self):\n return u\"%s, %s\" % (self.category_resource.category, self.name)\n\n\n#///////////////////////////////////////////////////task2///////////////////////////////////////////////////////////////\n\nclass Need(models.Model):\n resource = models.ForeignKey('Resource',verbose_name=u'Потрібний ресурс')\n order = models.ForeignKey('Order', verbose_name=u'Замовлення', null=True)\n amount = models.IntegerField(verbose_name=u'Кількість ресурсу')\n finished = models.BooleanField(default=False, null=False)\n priority = models.IntegerField(verbose_name=u'Пріорітет', null=True)\n date_recomended = models.DateField(verbose_name=u'Дата рекомендованої доставки', null=True)\n\n class Meta:\n verbose_name_plural = u'Потреба'\n\n def __unicode__(self):\n return \"%s,%s,%s\"%(self.order.name, self.resource.name,self.amount)\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n created = self.pk is None\n if self.amount == 0:\n self.finished = True\n super(Need, self).save(force_insert, force_update, using,\n update_fields)\n if created:\n create_resource_orders(self)\n\n\nclass Order(models.Model):\n point_consuming = models.ForeignKey('PointOfConsuming', verbose_name=u'Точка споживання')\n name = models.CharField(max_length=30, verbose_name=u'Назва', null=True)\n date_order = models.DateField(auto_now_add=True,null = True)\n\n class Meta:\n verbose_name_plural = u'Замовлення'\n\n def __unicode__(self):\n return \"%s,%s,%s\"%(self.name, self.point_consuming.geography_point.address, self.date_order)\n\n\nclass Perfomance(models.Model):\n need = models.ForeignKey('Need', verbose_name=u'Потреба')\n amount = models.IntegerField(verbose_name=u'Кількість')\n date = models.DateField(verbose_name=u'Дата виконання')\n\n def __unicode__(self):\n return \"%s\"%(self.pk)\n\n class Meta:\n verbose_name_plural = u'Виконання'\n\n\n#////////////////////////////////////////////////////task3//////////////////////////////////////////////////////////////\n\nclass Shipping(models.Model):\n date_recomended = models.DateField(verbose_name=u'Дата відгрузки')\n\n class Meta:\n verbose_name_plural = u'Відгрузка'\n\n def __unicode__(self):\n return \"%s\"%self.date_recomended\n\n\nclass StoreHouse(models.Model):\n geography_point = models.OneToOneField('GeographyPoint', null=True, verbose_name=u'Географічна точка')\n volume = models.FloatField(verbose_name=u'Об\"єм складу')\n rent = models.IntegerField(verbose_name=u'Ціна за м^2')\n free_volume = models.FloatField(blank=True, null=True, verbose_name=u'Вільний об\"єм')\n\n class Meta:\n verbose_name_plural = u'Склади'\n\n def __unicode__(self):\n return u\"%s\" % self.geography_point.address\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n just_created = self.pk is None\n if just_created:\n self.free_volume = self.volume\n super(StoreHouse, self).save(force_insert, force_update, using, update_fields)\n if just_created:\n virtual_stocks = Stock.objects.filter(store_house__isnull=True)\n for stock in virtual_stocks:\n fill_store_houses(stock)\n\n\nclass DeliveryDetalization(models.Model):\n shipping = models.ForeignKey('Delivery', verbose_name=u'Доставка')\n storehouse = models.ForeignKey('StoreHouse', verbose_name=u'Склад')\n amount = models.IntegerField(verbose_name=u'Кількість')\n\n class Meta:\n verbose_name_plural = u'Деталі поставки'\n\n def __unicode__(self):\n return \"%s,%s,%s,\"%(self.shipping.pk, self.storehouse.geography_point.address, self.amount)\n\n\nclass Delivery(models.Model):\n volonter = models.ForeignKey('Volonter', verbose_name=u'Волонтер')\n resource = models.ForeignKey('Resource', verbose_name=u'Ресурс')\n amount = models.IntegerField( verbose_name=u'Кількість')\n date_recomended = models.DateField( verbose_name=u'Дата рекомендована')\n date_real = models.DateField( verbose_name=u'Дата реальна')\n\n class Meta:\n verbose_name_plural = u'Поставка'\n\n def __unicode__(self):\n return \"%s,%s,%s\"%(self.volonter.fio, self.resource.name, self.amount)\n\n\nclass ShippingDetalization(models.Model):\n shipping = models.ForeignKey('Shipping',verbose_name=u'Відгрузка')\n stock = models.ForeignKey('Stock', verbose_name=u'Запас')\n amount = models.IntegerField(verbose_name=u'Кількість')\n\n class Meta:\n verbose_name_plural = u'Деталізація відгрузки'\n\n # def __unicode__(self):\n # return \"%s,%s,%s\"%(self.shipping, self.stock, self.amount)\n\n\nclass Stock(models.Model):\n store_house = models.ForeignKey('StoreHouse', null=True, verbose_name=u'Склад')\n resource = models.ForeignKey('Resource', verbose_name=u'Ресурс')\n amount = models.IntegerField(null=True, verbose_name=u'Кількість одиниць ресурсу')\n\n def __unicode__(self):\n return u\"%s, %s\"%(self.store_house, self.resource.name)\n\n class Meta:\n verbose_name_plural = u'Запас'\n\n def save(self, force_insert=False, force_update=False, using=None,update_fields=None):\n created = self.pk is None\n super(Stock, self).save(force_insert, force_update, using, update_fields)\n if created:\n fill_store_houses(self)\n\n\n#/////////////////////////////////////////////////////task4/////////////////////////////////////////////////////////////\n\nclass ResourceOrder(models.Model):\n resource = models.ForeignKey('Resource',verbose_name=u'Потрібний ресурс')\n amount = models.IntegerField(verbose_name=u'Кількість ресурсу')\n finished = models.BooleanField(default=False,verbose_name=u'Виконано:')\n date_created = models.DateTimeField(auto_now_add=True, verbose_name=u'Дата створення')\n date_finished = models.DateTimeField(verbose_name=u'Дата повного виконання')\n\n class Meta:\n verbose_name_plural = u'Замовлення ресурсів'\n\n def __unicode__(self):\n return \"%s,%s,%s,\"%(self.resource.name, self.date_created, self.date_finished)\n\n\nclass KindOfTransport(models.Model):\n name = models.CharField(max_length=100, verbose_name=u'Назва')\n category = models.CharField(max_length=30, verbose_name=u'Категорія')\n speed = models.IntegerField( verbose_name=u'Максимальна швидкість')\n expences_fuel = models.IntegerField( verbose_name=u'Витрати пального')\n volume_transport = models.FloatField( verbose_name=u'Об\"єм')\n max_weight = models.IntegerField( verbose_name=u'Грузопід\"ємніcть')\n passability = models.FloatField( verbose_name=u'Проходимість')\n\n class Meta:\n verbose_name_plural = u'Вид транспорту'\n\n def __unicode__(self):\n return \"%s,%s\"%(self.name,self.category)\n\n\nclass Transport(models.Model):\n kind_of_transport = models.ForeignKey('KindOfTransport', verbose_name=u'Вид автомобіля')\n number = models.CharField(max_length=10, verbose_name=u'Держ. номер')\n\n class Meta:\n verbose_name_plural = u'Транспорт'\n\n def __unicode__(self):\n return \"%s,%s,%s\"%(self.kind_of_transport.category, self.kind_of_transport.passability, self.number)\n\n\nclass Employment(models.Model):\n transport = models.ForeignKey('Transport', verbose_name=u'Транспорт')\n date_start = models.DateField(auto_now_add=True, verbose_name=u'Дата початку')\n date_finish = models.DateField(verbose_name=u'Дата закінчення')\n\n class Meta:\n verbose_name_plural = u'Зайнятість'\n\n def __unicode__(self):\n return \"%s,%s,%s\"%(self.transport.number, self.date_start, self.date_finish)\n\n\nclass Trip(models.Model):\n roat = models.ForeignKey('Roat', verbose_name=u'Маршрут',null=True)\n transport = models.ForeignKey('Transport', verbose_name=u'Транспорт')\n shipping = models.ForeignKey('Shipping', verbose_name=u'Відгрузка')\n date_start = models.DateField(auto_now_add=True,verbose_name=u'Дата початку')\n perfomance = models.BooleanField(default=False,verbose_name=u'Виконаність')\n\n class Meta:\n verbose_name_plural = u'Поїздка'\n\n def __unicode__(self):\n return \"%s,%s,%s\"%(self.transport.number, self.shipping, self.date_start)\n\n\nclass Way(models.Model):\n YANDEX_OR_YOU=(\n (True,u'Яндекс'),\n (False,u'Вручну'),\n )\n point_from = models.ForeignKey('GeographyPoint', verbose_name=u'Звідки', related_name='point_from')\n point_to = models.ForeignKey('GeographyPoint', verbose_name=u'Куди', related_name='point_to')\n roat_length = models.IntegerField(verbose_name=u'Довжина')\n danger = models.FloatField(verbose_name=u'Небезпечність')\n passability = models.IntegerField(verbose_name=u'Проходимість')\n load = models.IntegerField(verbose_name=u'Заповненість')\n yandex_or_byhand = models.BooleanField(verbose_name=u'Вид створення доріг', choices=YANDEX_OR_YOU, default=YANDEX_OR_YOU[0][0])\n\n class Meta:\n verbose_name_plural = u'Дороги'\n\n def __unicode__(self):\n return \"%s,%s,%s,\"%(self.point_from.address,self.point_to.address,self.roat_length)\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None, ignore=False):\n just_created = self.pk is None\n if just_created and ignore is False:\n way = Way(\n point_from=self.point_to,\n point_to=self.point_from,\n roat_length = self.roat_length,\n danger = self.danger,\n passability = self.passability,\n load = self.load\n )\n way.save(ignore=True)\n super(Way, self).save(force_insert, force_update, using,\n update_fields)\n\n\nclass Roat(models.Model):\n name = models.CharField(max_length=100,verbose_name=u'Назва', null=True)\n storehouse = models.ForeignKey('StoreHouse', verbose_name=u'Від складу',null=True)\n point_consuming = models.ForeignKey('PointOfConsuming', verbose_name=u'До пункту', null=True)\n transport = models.ForeignKey('Transport',verbose_name=u'Рекомендований транспортний засіб', null=True, max_length=50)\n wasys = models.ManyToManyField('Way', verbose_name=u'Проміжні дороги',blank=True)\n\n class Meta:\n verbose_name_plural = u'Маршрут'\n\n def __unicode__(self):\n return \"%s\" % (self.name)\n\n\nclass MakingRoat(models.Model):\n roat = models.ForeignKey('Roat', verbose_name=u'Маршрут')\n way = models.ForeignKey('Way', verbose_name=u'Дорога')\n number = models.IntegerField(verbose_name=u'Номер по порядку',null=True)\n\n def __unicode__(self):\n return \"%s\"%(self.roat.name)\n\n class Meta:\n verbose_name_plural = u'Створення маршруту'\n\n\n#//////////////////////////////////////////////////////task5////////////////////////////////////////////////////////////\n\nclass GeographyPoint(models.Model):\n x = models.FloatField()\n y = models.FloatField()\n address = models.CharField(max_length=100)\n\n class Meta:\n verbose_name_plural = u'Географічні точки'\n\n def __unicode__(self):\n return u\"%s,%s\"%(self.pk, self.address)\n\n\nclass PointOfConsuming(models.Model):\n user = models.OneToOneField(User, null=True, related_name='point_consuming')\n geography_point = models.OneToOneField('GeographyPoint', null=True, verbose_name=u'Географічна точка')\n fio = models.CharField(max_length=50, null = False,verbose_name=u'ПІБ заказника')\n telephone = models.CharField(max_length=20, null = False, verbose_name=u'Телефон заказника')\n\n class Meta:\n verbose_name_plural = u'Споживач'\n\n def __unicode__(self):\n return \"%s, %s\" % (self.fio, self.geography_point.address)\n","repo_name":"2vitalik/TUI_2015","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17887,"program_lang":"python","lang":"uk","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72565749314","text":"'''\n\nDescription:\n\nGiven a binary tree, return the sum of values of nodes with even-valued grandparent. (A grandparent of a node is the parent of its parent, if it exists.)\n\nIf there are no nodes with an even-valued grandparent, return 0.\n\n \n\nExample 1:\n\n\n\nInput: root = [6,7,8,2,7,1,3,9,null,1,4,null,null,null,5]\nOutput: 18\nExplanation: The red nodes are the nodes with even-value grandparent while the blue nodes are the even-value grandparents.\n \n\nConstraints:\n\nThe number of nodes in the tree is between 1 and 10^4.\nThe value of nodes is between 1 and 100.\n\n'''\n\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def sumEvenGrandparent(self, root: TreeNode) -> int:\n \n if not root:\n # empty tree\n return 0\n \n # initialization:\n summation = 0\n \n # root node, -1 for init, -1 for init\n dfs_stack = [ (root, -1, -1) ]\n \n \n # pre-order DFS traveral\n while dfs_stack:\n \n node, parent, grandparent = dfs_stack.pop()\n \n if grandparent & 1 == 0:\n summation += node.val\n \n if node.right:\n dfs_stack.append( (node.right, node.val, parent) )\n \n if node.left:\n dfs_stack.append( (node.left, node.val, parent) )\n \n \n return summation\n\n\n\n# n : the number of nodes in binary tree\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of pre-order DFS traversal, which is if O( n ).\n\n## Space Complexity: O( n )\n#\n# The overhead in space is the storage for dfs_stack, which is of O( n ).\n\n\n\ndef test_bench():\n\n root = TreeNode(6)\n\n root.left = TreeNode(7)\n root.right = TreeNode(8)\n\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(7)\n\n root.right.left = TreeNode(1)\n root.right.right = TreeNode(3)\n\n root.left.left.left = TreeNode(9)\n root.left.right.left = TreeNode(1)\n root.left.right.right = TreeNode(4)\n root.right.right.right = TreeNode(5)\n\n # expected output:\n '''\n 18\n '''\n\n print( Solution().sumEvenGrandparent(root) )\n\n return \n\n\n\nif __name__ == '__main__':\n \n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"No_1315_Sum of Nodes with Even-Valued Grandparent/sum_of_nodes_with_even_valued_grandparent_by_dfs_on_stack.py","file_name":"sum_of_nodes_with_even_valued_grandparent_by_dfs_on_stack.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"6778156054","text":"import io\n\n# ------------------------------------------------------\n\ndef main():\n with io.open('input1.txt','r', encoding='utf-8') as f:\n a = [l.split() for l in f]\n\n hpos = 0\n depth = 0\n\n for x in a:\n change = int(x[1])\n\n if x[0] == 'forward':\n hpos += change\n elif x[0] == 'down':\n depth += change\n elif x[0] == 'up':\n depth -= change\n else:\n print('Unknown command', x)\n\n print(hpos, depth, hpos * depth)\n\nif __name__ == '__main__':\n main()\n","repo_name":"JeffreyMFarley/adventofcode","sub_path":"2021/day2/solve1.py","file_name":"solve1.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2107775364","text":"'''\nPassing agruments:(3 ways)\n-->Positional arguments:\n An argument is a variable, value or object passed to a function or method as\ninput. Positional arguments are arguments that need to be included in the\nproper position or order\n-->Default arguments:\n Default arguments in Python functions are those arguments that take default\nvalues if no explicit values are passed to these arguments from the function\ncall.\n-->Keyword arguments:\n Keyword arguments (or named arguments) are values that,\nwhen passed into a function, are identifiable by specific parameter names.\n'''\nprint(\".....Positional Arguments:\".center(50))\n\n# Concatination of string:\n\ndef full_name(fname, mname, lname):\n name = fname + mname + lname\n return name\nfullname = full_name(\"Karthi\", \" keyan\", \" Rameshbabu\")\nprint(\"The Fullname of the candidate:\", fullname)\n\n# Biggest number among three values:\ndef big_num(a, b, c):\n if a > b:\n if a > c:\n return a\n elif b > c:\n return b\n else:\n return c\nx = big_num(10, 21, 19) # scenario 1\ny = big_num(25, 12, 18) # Scenario 2\nz = big_num(12, 34, 48) # Scenario 3\nprint(\"a is greater than b & c\", x)\nprint(\"b is greater than a & c\", y)\nprint(\"c is greater than a & b\", z)\n\nprint(\"**Default arguments**\".center(50))\n\n# Ex 1:\n#def full_name(fname, mname=\"keyan\", lname):\n# non-default parameter follows default parameter\ndef full_name(fname, mname=\"keyan\", lname=\"rameshbau\"):\n name = fname + mname + lname\n return name\nfullname = full_name(\"Karthi\")\nprint(\"The Fullname of the candidate:\", fullname)\n\n# Factorial number:\ndef fac_num(n = 10):\n factorial=1\n for i in range(1,n + 1):\n factorial = factorial*i\n return factorial\nnum = fac_num()\nprint(\"The factorial value of the given number\", num)\n\n# sum of even numbers:\ndef even_num(st, end=10):\n sum = 0\n for even in range (st, end + 1):\n if even % 2 == 0:\n print(even)\n sum += even\n return sum\nsum_even = even_num(1)\nprint(\"The sum of the even numbers\", sum_even)\n\n# Key word arguments:\n\n# Employee's Profile:\n\ndef profile(empid, empnum, systemno, offnum ):\n print(offnum, systemno, empid, empnum)\nemp = profile(empid=\"MCS_0038\", empnum=\"9786910190\",systemno=\"101\",offnum=\"7904134297\")\n\ndef order_list(list1):\n list1 = [10, 22, 9, 13, 14, 8]\n list1.sort()\n print(list1)\nlist2 = order_list(list1=[19, 23, 3, 4, 5])\nprint(list2)\n\ndef profile(empid, empnum, systemno, offnum, empname ):\n print(empname, empid, systemno, empnum, offnum)\nemp = profile(empnum=\"9786910190\", systemno=\"101\", empid=\"MCS-0038\",\n offnum=\"7904134297\", empname=\"Karthi\")\n","repo_name":"Karthi2245/MCS_0038_Core_Python.","sub_path":"_10_Functions/My_notes/_6_passing_arguments.py","file_name":"_6_passing_arguments.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38614257504","text":"import dataclasses\nfrom collections.abc import Callable\nfrom enum import Enum, auto\n\nimport cocotb\nfrom cocotb.triggers import RisingEdge\n\nfrom .component import Component\nfrom .transaction import BaseTransaction\n\n\nclass MonitorEvent(Enum):\n CAPTURE = auto()\n\n\n@dataclasses.dataclass()\nclass MonitorStatistics:\n captured: int = 0\n\n\nclass BaseMonitor(Component):\n \"\"\"\n Component for sampling transactions from an interface matching the\n implementation's signalling protocol.\n\n :param tb: Handle to the testbench\n :param io: Handle to the BaseIO interface\n :param clk: Clock signal to use when driving/sampling the interface\n :param rst: Reset signal to use when driving/sampling the interface\n :param random: Random number generator to use (optional)\n :param name: Unique name for this component instance (optional)\n \"\"\"\n\n def __init__(self, *args, **kwds) -> None:\n super().__init__(*args, **kwds)\n self.stats = MonitorStatistics()\n cocotb.start_soon(self._monitor_loop())\n\n async def _monitor_loop(self) -> None:\n \"\"\"Main loop for monitoring transactions on the interface\"\"\"\n await self.tb.ready()\n await RisingEdge(self.clk)\n self._ready.set()\n\n def _capture(obj: BaseTransaction):\n self.stats.captured += 1\n self.publish(MonitorEvent.CAPTURE, obj)\n\n while True:\n await self.monitor(_capture)\n\n async def monitor(self, capture: Callable) -> None:\n \"\"\"\n Placeholder monitor, this should be overridden by a child class to match\n the signalling protocol of the interface's implementation.\n\n :param capture: Function to call whenever a transaction is captured\n \"\"\"\n del capture\n raise NotImplementedError(\"monitor is not implemented on BaseMonitor\")\n","repo_name":"Intuity/forastero","sub_path":"forastero/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"9414993456","text":"from pyroute2 import NDB\nfrom pyroute2.common import uifname\n\n# unique interface names\nvlan_host = uifname()\nvlan_interface = uifname()\n\nwith NDB() as ndb:\n\n\n (\n ndb.interfaces.create(ifname=vlan_host, kind='dummy')\n .set('state', 'up')\n .commit()\n )\n (\n ndb.interfaces.create(\n ifname=vlan_interface,\n kind='vlan',\n link=ndb.interfaces[vlan_host],\n vlan_id=101\n )\n .set('mtu', 1400)\n .set('state', 'up')\n .add_ip('10.251.0.1/24')\n .add_ip('10.251.0.2/24')\n .commit()\n )\n\n for i in (vlan_interface, vlan_host):\n ndb.interfaces[i].remove().commit()\n","repo_name":"svinota/pyroute2","sub_path":"examples/ndb/create_vlan.py","file_name":"create_vlan.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"} +{"seq_id":"26644058914","text":"'''\nАлгоритм Эдмондса-Карпа решает задачу поиска максимального потока в графе между\nвершинами s и t. Данный алгоритм основан на методе Форда-Фалкерсона с\nприменением алгоритма поиска в ширину BFS для вычисления увеличивающего пути.\n'''\n\n\ntry:\n from Graph.graph import Graph, Vertex\nexcept ImportError:\n import sys\n sys.path.append('..')\n from Graph.graph import Graph, Vertex\n\nfrom collections import deque\n\n\ndef edmonds_karp(graph: Graph, start: Vertex, end: Vertex) -> int:\n f = 0\n re_net = get_residual_network(graph, start, end)\n minimum, visited = BFS(re_net, re_net.start, re_net.end)\n while minimum is not None:\n for i in range(len(visited)-1):\n u = visited[i]\n v = visited[i+1]\n for j in range(len(u.neighbours)):\n vertex, weight = u.neighbours[j]\n if vertex == v:\n weight -= minimum\n if weight == 0:\n u.neighbours.pop(j)\n else:\n u.neighbours[j] = vertex, weight\n break\n f += minimum\n minimum, visited = BFS(re_net, re_net.start, re_net.end)\n return f\n\n\ndef get_residual_network(graph: Graph, start: Vertex, end: Vertex) -> Graph:\n residual_network = Graph(weight=True)\n ind = {}\n for i in range(len(graph.vertices)):\n val = graph.vertices[i].value\n vertex = Vertex(val)\n residual_network.add_vertex(vertex)\n ind[val] = i\n for vertex in graph.vertices:\n for v, weight in vertex.neighbours:\n vertex1 = residual_network.vertices[ind[vertex.value]]\n vertex2 = residual_network.vertices[ind[v.value]]\n residual_network.add_edge(vertex1, vertex2, weight)\n residual_network.start = residual_network.vertices[ind[start.value]]\n residual_network.end = residual_network.vertices[ind[end.value]]\n return residual_network\n\n\ndef BFS(graph: Graph, start: Vertex, end: Vertex) -> None:\n distance_parent_weight = {\n vertex: (None, None, None) for vertex in graph.vertices\n }\n distance_parent_weight[start] = (0, None, float('inf'))\n verties_process = deque()\n verties_process.append(start)\n while len(verties_process) != 0:\n u = verties_process.popleft()\n for v, weight in u.neighbours:\n if distance_parent_weight[v][0] is None:\n distance_parent_weight[v] = (\n distance_parent_weight[u][0] + 1, u, weight\n )\n verties_process.append(v)\n\n if distance_parent_weight[end][0] is None:\n return None, None\n else:\n visited = []\n vertex = end\n minimum = distance_parent_weight[vertex][2]\n while vertex is not None:\n visited.insert(0, vertex)\n minimum = min(minimum, distance_parent_weight[vertex][2])\n vertex = distance_parent_weight[vertex][1]\n return minimum, visited\n\n\ndef main():\n values = ['s', 'v1', 'v2', 'v3', 'v4', 't']\n edges = [\n (0, 1, 16),\n (0, 2, 13),\n (1, 3, 12),\n (2, 1, 4),\n (2, 4, 14),\n (3, 2, 9),\n (3, 5, 20),\n (4, 3, 7),\n (4, 5, 4),\n ]\n graph = Graph(values=values, edges=edges, weight=True)\n max_f = edmonds_karp(graph, graph.vertices[0], graph.vertices[-1])\n print(max_f)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leonid-T/Algorithms","sub_path":"Graphs/Edmonds-Karp/edmonds_karp.py","file_name":"edmonds_karp.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2071879940","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import date, timedelta, datetime\r\nimport os\r\nfrom CanadianHaltsResumptions import historicalHaltsInformation,historicalResumptionsInformation,cleanTimeHalts,cleanTimeResumptions,adjTimeResumption\r\nfrom UnitedStatesHaltsResumptions import cleanNamesAmerican,downloadAmericanHalts\r\nfrom StockNews import newsDataframe,stockInformation,recentData\r\nfrom CandlestickPlots import plotStock\r\nfrom GeneratePDF import generatePDF,finalGeneratePDF\r\nimport mpld3\r\n\r\npd.set_option('display.max_rows', None)\r\npd.set_option('display.max_columns', None)\r\npd.set_option('display.width', None)\r\npd.set_option('display.max_colwidth', None)\r\n\r\ndef createURL(complete_url):\r\n urlHaltResumptionList = []\r\n soup = (BeautifulSoup(requests.get(complete_url).content, 'html.parser'))\r\n\r\n if len((soup.find_all('div', attrs={'class': 'item_name'}))) == 0:\r\n print('Length of item name is zero!')\r\n else:\r\n for foo in soup.find_all('div', attrs={'class': 'item_name'}):\r\n bar = foo.find('a', attrs={'class': 'itemlink'})\r\n final_url = (bar.get('href'))\r\n urlHaltResumptionList.append(final_url)\r\n\r\n return urlHaltResumptionList\r\n\r\n\r\ndef splitURL(urlHaltResumptionList):\r\n haltsHistorical = []\r\n resumptionsHistorical = []\r\n othersHistorical = []\r\n\r\n for value in urlHaltResumptionList:\r\n if 'Resumption' in value:\r\n resumptionsHistorical.append(value)\r\n elif 'Halt' in value:\r\n haltsHistorical.append(value)\r\n else:\r\n othersHistorical.append(value)\r\n\r\n for url in othersHistorical:\r\n if 'Halt' in BeautifulSoup(requests.get(url).content, 'html.parser').find('title').text:\r\n haltsHistorical.append(url)\r\n elif 'Resumption' in BeautifulSoup(requests.get(url).content, 'html.parser').find('title').text:\r\n resumptionsHistorical.append(url)\r\n\r\n return haltsHistorical,resumptionsHistorical\r\n\r\ndef createHalts(HistoricalFinalHaltsData):\r\n # 1. Halt Tickers\r\n halt_df_list = []\r\n\r\n for i in range(len(HistoricalFinalHaltsData)):\r\n for dict_ in HistoricalFinalHaltsData[i]:\r\n halt_df_list.append(dict_)\r\n\r\n dfHalt = pd.DataFrame.from_dict(halt_df_list)\r\n clean_ticker_halts = []\r\n clean_reason_halts = []\r\n\r\n\r\n for ticker in dfHalt.ticker:\r\n try:\r\n ticker = ticker.split('.')[0]\r\n clean_ticker_halts.append(re.sub(r'[^A-Za-z0-9 ]+', '', ticker.strip()))\r\n except Exception as e:\r\n clean_ticker_halts.append(ticker)\r\n\r\n for reason in dfHalt.reason:\r\n try:\r\n clean_reason_halts.append(re.sub(r'[^A-Za-z0-9 ]+', '', reason.strip()))\r\n except Exception as e:\r\n clean_reason_halts.append(reason)\r\n\r\n dfHalt['ticker'] = clean_ticker_halts\r\n dfHalt['reason'] = clean_reason_halts\r\n\r\n dfHalt = dfHalt.dropna() # Contains ALl Halted Tickers in Dataframe format\r\n dfHalt['date'] = cleanTimeHalts(dfHalt)\r\n dfHalt['date'] = pd.to_datetime(dfHalt['date'])\r\n\r\n return dfHalt\r\n\r\ndef createResumptions(HistoricalFinalResumptionData):\r\n # 2. Resumption Tickers\r\n resumption_df_list = []\r\n for i in range(len(HistoricalFinalResumptionData)):\r\n for dict_ in HistoricalFinalResumptionData[i]:\r\n resumption_df_list.append(dict_)\r\n\r\n clean_ticker_resumptions = []\r\n dfResumption = pd.DataFrame.from_dict(resumption_df_list)\r\n for ticker in dfResumption.ticker:\r\n try:\r\n ticker = ticker.split('.')[0]\r\n clean_ticker_resumptions.append(re.sub(r'[^A-Za-z0-9 ]+', '', ticker.strip()))\r\n except Exception as e:\r\n clean_ticker_resumptions.append(ticker)\r\n\r\n dfResumption['ticker'] = clean_ticker_resumptions\r\n dfResumption = dfResumption.dropna()\r\n\r\n dfResumption['clean_time'] = cleanTimeResumptions(dfResumption)[0]\r\n dfResumption['date'] = cleanTimeResumptions(dfResumption)[1]\r\n\r\n adjTimeResumption(dfResumption)\r\n resumptiondf_final = dfResumption.drop(['timestamp', 'resumption_timestamp', 'clean_time'], axis=1)\r\n resumptiondf_final.date = pd.to_datetime(resumptiondf_final.date) # Contains ALl Resumed Tickers in Dataframe format\r\n\r\n return resumptiondf_final\r\n\r\ndef companyInformationCanada(df):\r\n for company in df.company:\r\n try:\r\n print(company)\r\n recent_df = recentData(company)\r\n recent_df = recent_df.reset_index()\r\n print(stockInformation(company))\r\n (plotStock(recent_df,company))\r\n except Exception as e:\r\n pass\r\n\r\ndef companyInformationUSA(companylist):\r\n\r\n for company in companylist:\r\n try:\r\n print(company)\r\n recent_df = recentData(company)\r\n recent_df = recent_df.reset_index()\r\n print(stockInformation(company))\r\n (plotStock(recent_df,company))\r\n except Exception as e:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n currentDate = date.today()\r\n path_wkhtmltopdf = r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\r\n path_html_directory = r\"E:\\News GCP\\Scripts\\PDF\\Halts&Resumptions\"\r\n html_file = path_html_directory + fr'\\Small Cap Reports\\HTMLs\\Report_{currentDate}.html'\r\n pdf_file = path_html_directory + fr'\\Small Cap Reports\\PDFs\\Report_{currentDate}.pdf'\r\n\r\n #1. Canada Names\r\n\r\n HistoricalFinalHaltsData = []\r\n HistoricalFinalResumptionData = []\r\n\r\n base_url_historical = 'https://iiroc.mediaroom.com/index.php?o='\r\n parameter1 = \"0\"\r\n parameter2 = \"2021\"\r\n url_year = '&s=2429&year='\r\n\r\n complete_url = base_url_historical + parameter1 + url_year + parameter2\r\n\r\n urlHaltResumptionList = createURL(complete_url)\r\n\r\n haltsHistorical = splitURL(urlHaltResumptionList)[0]\r\n resumptionsHistorical = splitURL(urlHaltResumptionList)[1]\r\n\r\n finalHalts = historicalHaltsInformation(haltsHistorical)\r\n finalResumptions = historicalResumptionsInformation(resumptionsHistorical)\r\n\r\n HistoricalFinalHaltsData.append(finalHalts)\r\n HistoricalFinalResumptionData.append(finalResumptions)\r\n\r\n haltdf = createHalts(HistoricalFinalHaltsData)\r\n resumptiondf = createResumptions(HistoricalFinalResumptionData)\r\n\r\n # print(\"------------------------------------------------------------\")\r\n # print('Halt Dataframe: Canada')\r\n # print(haltdf)\r\n # print(\"------------------------------------------------------------\")\r\n # print('Resumption Dataframe: Canada')\r\n # print(resumptiondf)\r\n\r\n # USA Names:\r\n currentDate = date.today()\r\n path_to_save = os.path.dirname(os.path.abspath(__file__)) + fr\"\\UsData\\{currentDate}.csv\"\r\n # uncomment to download data for the day\r\n downloadAmericanHalts(path_to_save)\r\n\r\n dfUSA = pd.read_csv(path_to_save)\r\n dfUSA = dfUSA[(dfUSA.Reason == 'News pending') | (dfUSA.Reason == 'LULD pause')]\r\n\r\n # print(\"------------------------------------------------------------\")\r\n # print('USA Halts and Resumptions')\r\n # print(dfUSA)\r\n\r\n ##########################\r\n halt_companies_list = []\r\n todayTickersHalts = list(haltdf[haltdf['date'] > f'{currentDate} 00:00:00'].ticker)\r\n\r\n for i,ticker in enumerate(todayTickersHalts):\r\n try:\r\n halt_companies_list.append((newsDataframe(ticker).to_html()))\r\n except Exception as e:\r\n pass\r\n\r\n resumptions_companies_list = []\r\n todayTickersResumptions = list(resumptiondf[resumptiondf['date'] > f'{currentDate} 00:00:00'].ticker)\r\n for ticker in todayTickersResumptions:\r\n try:\r\n resumptions_companies_list.append((newsDataframe(ticker).to_html()))\r\n except Exception as e:\r\n pass\r\n\r\n generatePDF(path_wkhtmltopdf,path_html_directory,html_file,pdf_file,haltdf.to_html(),resumptiondf.to_html(),dfUSA.to_html())\r\n\r\n with open(fr\"E:\\News GCP\\Scripts\\PDF\\Halts&Resumptions\\Report_{currentDate}.html\",\"a\") as htmlFile:\r\n htmlFile.write('

    Canadian Halted Tickers Specific News

    ')\r\n\r\n for df in halt_companies_list:\r\n htmlFile.write('
    ')\r\n htmlFile.write('
    ')\r\n htmlFile.write(df)\r\n\r\n htmlFile.write('

    Canadian Resumed Tickers Specific News

    ')\r\n for df in resumptions_companies_list:\r\n htmlFile.write('
    ')\r\n htmlFile.write('
    ')\r\n htmlFile.write(df)\r\n\r\n htmlFile.write('

    Canadian Resumed Tickers Financial Metrics

    ')\r\n for company in resumptiondf.company:\r\n try:\r\n htmlFile.write(f'

    Company Name: {company}

    ')\r\n df = stockInformation(company).to_html()\r\n htmlFile.write(df)\r\n\r\n recent_df = recentData(company)\r\n recent_df = recent_df.reset_index()\r\n figure = plotStock(recent_df, company)\r\n html_str = mpld3.fig_to_html(figure)\r\n htmlFile.write(html_str)\r\n except Exception as e:\r\n htmlFile.write(f'

    NONE

    ')\r\n\r\n htmlFile.write('

    United States Halted/Resumed Tickers Financial Metrics

    ')\r\n for company in cleanNamesAmerican(dfUSA):\r\n try:\r\n htmlFile.write(f'

    Company Name: {company}

    ')\r\n df = stockInformation(company).to_html()\r\n htmlFile.write(df)\r\n\r\n recent_df = recentData(company)\r\n recent_df = recent_df.reset_index()\r\n figure = plotStock(recent_df, company)\r\n html_str = mpld3.fig_to_html(figure)\r\n htmlFile.write(html_str)\r\n except Exception as e:\r\n htmlFile.write(f'

    NONE

    ')\r\n\r\n finalGeneratePDF(path_wkhtmltopdf, html_file, pdf_file)\r\n\r\n\r\n\r\n\r\n # print(\"------------------------------------------------------------\")\r\n # print('News Section')\r\n # print('Canada News:')\r\n # print(\"Canada Halted Tickers News:\")\r\n #\r\n # todayTickersHalts = list(haltdf[haltdf['date'] > f'{currentDate} 00:00:00'].ticker)\r\n #\r\n # for ticker in todayTickersHalts:\r\n # try:\r\n # print('--------------------------------')\r\n # print(f'Ticker: {ticker}')\r\n # print((newsDataframe(ticker)))\r\n # except Exception as e:\r\n # pass\r\n #\r\n # print(\" \")\r\n # print(\"Canada Resumed Tickers News:\")\r\n #\r\n # todayTickersResumptions = list(resumptiondf[resumptiondf['date'] > f'{currentDate} 00:00:00'].ticker)\r\n #\r\n # for ticker in todayTickersResumptions:\r\n # try:\r\n # print('--------------------------------')\r\n # print(f'Ticker: {ticker}')\r\n # print((newsDataframe(ticker)))\r\n # except Exception as e:\r\n # pass\r\n #\r\n # print(\"------------------------------------------------------------\")\r\n # print(\"USA Tickers News:\")\r\n #\r\n # todayTickers = list(dfUSA.Symbol)\r\n # print('Under Construction')\r\n #\r\n # print(\"-------------------------------------------\")\r\n # print(\"Canada News Information:\")\r\n # companyInformationCanada(resumptiondf)\r\n #\r\n # print(\"------------------------------------------------------------\")\r\n # print(\"USA News Information:\")\r\n # companyInformationUSA(cleanNamesAmerican(dfUSA))\r\n","repo_name":"mpucci92/HaltsResumptionsCADUSA_PDFs","sub_path":"mainHaltsResumptions.py","file_name":"mainHaltsResumptions.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31254000618","text":"\nimport ROOT\nimport PyROOTUtils\nPyROOTUtils.style()\n\n# to prevent python garbage collection of objects that still need to drawn\ncontainer = []\n\n\n\ndef content( mHBestFit=130.0, height1SigmaLabel=10.0, color=ROOT.kBlue ):\n\t# make mock curve\n\tlinSpace = [ 100 + i*60.0/300.0 for i in range(300) ]\n\tlikelihood = [ (x, (x-mHBestFit)*(x-mHBestFit)/25.0) for x in linSpace ]\n\tlikelihood_statOnly = [ (x, (x-mHBestFit)*(x-mHBestFit)/20.0) for x in linSpace ]\n\t# draw curve\n\tg = PyROOTUtils.Graph( likelihood, lineColor=color, lineWidth=2 )\n\tg.Draw()\n\tg_statOnly = PyROOTUtils.Graph( likelihood_statOnly, lineColor=color, lineWidth=2, lineStyle=ROOT.kDashed )\n\tg_statOnly.Draw()\n\n\t# find 68% CL interval from likelihood\n\tlow,high = g.getFirstIntersectionsWithValue(1.0)\n\tvLineM1Sigma = PyROOTUtils.DrawVLine( low, lineStyle=ROOT.kDashed, lineWidth=1, lineColor=color )\n\tvLineP1Sigma = PyROOTUtils.DrawVLine( high, lineStyle=ROOT.kDashed, lineWidth=1, lineColor=color )\n\thLine1Sigma = PyROOTUtils.DrawLine( low,height1SigmaLabel,high,height1SigmaLabel, lineWidth=5, lineColor=color )\n\tlabel1Sigma = PyROOTUtils.DrawText( mHBestFit,height1SigmaLabel, (\"#lower[-0.5]{%.1f^{%+.1f}_{%+.1f} GeV}\"%(mHBestFit,high-mHBestFit,low-mHBestFit)), NDC=False, textSize=0.025, halign=\"center\", valign=\"bottom\", textColor=color )\n\n\tcontainer.append( (g,g_statOnly,vLineM1Sigma,vLineP1Sigma,hLine1Sigma,label1Sigma) )\n\n\treturn (g,g_statOnly)\n\n\ndef main():\n\tcanvas = ROOT.TCanvas(\"c\",\"c\",600,450)\n\taxes = canvas.DrawFrame( 105,0, 160,12 )\n\taxes.GetXaxis().SetTitle( \"m_{H} [GeV]\" )\n\taxes.GetYaxis().SetTitle( \"-2 ln #Lambda\" )\n\n\thLine68 = PyROOTUtils.DrawHLine( 1.0, lineStyle=ROOT.kDashed, lineWidth=1 )\n\thLine95 = PyROOTUtils.DrawHLine( 4.0, lineStyle=ROOT.kDotted, lineWidth=1 )\n\n\tg,g_statOnly = content()\n\tg2,g2_statOnly = content( 123.0, 8.0, ROOT.kRed )\n\n\t# create black line proxies for legend\n\texpectedLine = PyROOTUtils.DrawHLine( -10.0, lineWidth=2 )\n\tstatOnlyLine = PyROOTUtils.DrawHLine( -10.0, lineWidth=2, lineStyle=ROOT.kDashed )\n\n\tl1 = PyROOTUtils.Legend( 0.94,0.5, textSize=0.035, valign=\"bottom\", halign=\"right\" )\n\tl1.AddEntry( expectedLine, \"expected\", \"L\" )\n\tl1.AddEntry( statOnlyLine, \"stat only\", \"L\" )\n\tl1.AddEntry( hLine95, \"95% CL\", \"L\" )\n\tl1.AddEntry( hLine68, \"68% CL\", \"L\" )\n\tl1.Draw()\n\n\tcanvas.SaveAs( 'doc/example.svg' )\n\tcanvas.SaveAs( 'doc/example.png' )\n\tcanvas.SaveAs( 'doc/example.eps' )\n\tprint( 'Image saved to doc/example.{svg|png|eps}.' )\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"svenkreiss/PyROOTUtils","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"86488310643","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport heapq as hp\n\n\n# In[43]:\n\n\nk = 3\n#con = [[5,1],[1,1],[4,0]]\ncon = [[5, 1], [2, 1], [1, 1], [8, 1], [10, 0], [5, 0]]\n\n\n# In[48]:\n\n\ndef luckBalance(k, contests):\n savBalnce = 0\n buffer = []\n for c in contests:\n #loose all unimportant games\n if c[1] == 0:\n savBalnce += c[0]\n elif k == 0:\n savBalnce -= c[0]\n else:\n if len(buffer) < k:\n hp.heappush(buffer,c[0])\n else:\n if c[0] > buffer[0]:\n savBalnce -= hp.heappop(buffer)\n hp.heappush(buffer,c[0])\n else:\n savBalnce -= c[0]\n savBalnce += sum(buffer)\n return savBalnce, buffer\n\n\n# In[81]:\n\n\narry = ['a','c','v','m', 'h']\nr = 0\narry[::-1][:r] + arry[:len(arry)-r] \n\n\n# In[49]:\n\n\nluckBalance(k, con)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"senoichi/Python-functions","sub_path":"HackerRank/Greedy Algorithms/luckBalance.py","file_name":"luckBalance.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34519437418","text":"myinput = []\nwith open('input2.txt') as f:\n myinput = f.read().splitlines()\n\nhorizontal = 0\ndepth = 0\naim = 0\n\nfor i in myinput:\n i = i.split()\n if i[0] == 'forward':\n horizontal += int(i[1])\n depth += aim * int(i[1])\n elif i[0] == 'up':\n aim -= int(i[1])\n elif i[0] == 'down':\n aim += int(i[1])\n\nprint(horizontal * depth)","repo_name":"YasiTL/aoc-2021","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22372492182","text":"import copy\nimport os\n\nfrom cloudinit import log as logging\nfrom cloudinit import sources\nfrom cloudinit import util\n\nfrom cloudinit.sources.helpers import openstack\n\nLOG = logging.getLogger(__name__)\n\n# Various defaults/constants...\nDEFAULT_IID = \"iid-dsconfigdrive\"\nDEFAULT_MODE = 'pass'\nDEFAULT_METADATA = {\n \"instance-id\": DEFAULT_IID,\n}\nVALID_DSMODES = (\"local\", \"net\", \"pass\", \"disabled\")\nFS_TYPES = ('vfat', 'iso9660')\nLABEL_TYPES = ('config-2',)\nPOSSIBLE_MOUNTS = ('sr', 'cd')\nOPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS\n for i in range(0, 2)))\n\n\nclass DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):\n def __init__(self, sys_cfg, distro, paths):\n super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)\n self.source = None\n self.dsmode = 'local'\n self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')\n self.version = None\n self.ec2_metadata = None\n self._network_config = None\n self.network_json = None\n self.files = {}\n\n def __str__(self):\n root = sources.DataSource.__str__(self)\n mstr = \"%s [%s,ver=%s]\" % (root, self.dsmode, self.version)\n mstr += \"[source=%s]\" % (self.source)\n return mstr\n\n def get_data(self):\n found = None\n md = {}\n results = {}\n if os.path.isdir(self.seed_dir):\n try:\n results = read_config_drive(self.seed_dir)\n found = self.seed_dir\n except openstack.NonReadable:\n util.logexc(LOG, \"Failed reading config drive from %s\",\n self.seed_dir)\n if not found:\n for dev in find_candidate_devs():\n try:\n # Set mtype if freebsd and turn off sync\n if dev.startswith(\"/dev/cd\"):\n mtype = \"cd9660\"\n sync = False\n else:\n mtype = None\n sync = True\n results = util.mount_cb(dev, read_config_drive,\n mtype=mtype, sync=sync)\n found = dev\n except openstack.NonReadable:\n pass\n except util.MountFailedError:\n pass\n except openstack.BrokenMetadata:\n util.logexc(LOG, \"Broken config drive: %s\", dev)\n if found:\n break\n if not found:\n return False\n\n md = results.get('metadata', {})\n md = util.mergemanydict([md, DEFAULT_METADATA])\n user_dsmode = results.get('dsmode', None)\n if user_dsmode not in VALID_DSMODES + (None,):\n LOG.warn(\"User specified invalid mode: %s\", user_dsmode)\n user_dsmode = None\n\n dsmode = get_ds_mode(cfgdrv_ver=results['version'],\n ds_cfg=self.ds_cfg.get('dsmode'),\n user=user_dsmode)\n\n if dsmode == \"disabled\":\n # most likely user specified\n return False\n\n # TODO(smoser): fix this, its dirty.\n # we want to do some things (writing files and network config)\n # only on first boot, and even then, we want to do so in the\n # local datasource (so they happen earlier) even if the configured\n # dsmode is 'net' or 'pass'. To do this, we check the previous\n # instance-id\n prev_iid = get_previous_iid(self.paths)\n cur_iid = md['instance-id']\n if prev_iid != cur_iid and self.dsmode == \"local\":\n on_first_boot(results, distro=self.distro)\n\n # dsmode != self.dsmode here if:\n # * dsmode = \"pass\", pass means it should only copy files and then\n # pass to another datasource\n # * dsmode = \"net\" and self.dsmode = \"local\"\n # so that user boothooks would be applied with network, the\n # local datasource just gets out of the way, and lets the net claim\n if dsmode != self.dsmode:\n LOG.debug(\"%s: not claiming datasource, dsmode=%s\", self, dsmode)\n return False\n\n self.source = found\n self.metadata = md\n self.ec2_metadata = results.get('ec2-metadata')\n self.userdata_raw = results.get('userdata')\n self.version = results['version']\n self.files.update(results.get('files', {}))\n\n vd = results.get('vendordata')\n self.vendordata_pure = vd\n try:\n self.vendordata_raw = openstack.convert_vendordata_json(vd)\n except ValueError as e:\n LOG.warn(\"Invalid content in vendor-data: %s\", e)\n self.vendordata_raw = None\n\n try:\n self.network_json = results.get('networkdata')\n except ValueError as e:\n LOG.warn(\"Invalid content in network-data: %s\", e)\n self.network_json = None\n\n return True\n\n def check_instance_id(self):\n # quickly (local check only) if self.instance_id is still valid\n return sources.instance_id_matches_system_uuid(self.get_instance_id())\n\n @property\n def network_config(self):\n if self._network_config is None:\n if self.network_json is not None:\n self._network_config = convert_network_data(self.network_json)\n return self._network_config\n\n\nclass DataSourceConfigDriveNet(DataSourceConfigDrive):\n def __init__(self, sys_cfg, distro, paths):\n DataSourceConfigDrive.__init__(self, sys_cfg, distro, paths)\n self.dsmode = 'net'\n\n\ndef get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):\n \"\"\"Determine what mode should be used.\n valid values are 'pass', 'disabled', 'local', 'net'\n \"\"\"\n # user passed data trumps everything\n if user is not None:\n return user\n\n if ds_cfg is not None:\n return ds_cfg\n\n # at config-drive version 1, the default behavior was pass. That\n # meant to not use use it as primary data source, but expect a ec2 metadata\n # source. for version 2, we default to 'net', which means\n # the DataSourceConfigDriveNet, would be used.\n #\n # this could change in the future. If there was definitive metadata\n # that indicated presense of an openstack metadata service, then\n # we could change to 'pass' by default also. The motivation for that\n # would be 'cloud-init query' as the web service could be more dynamic\n if cfgdrv_ver == 1:\n return \"pass\"\n return \"net\"\n\n\ndef read_config_drive(source_dir):\n reader = openstack.ConfigDriveReader(source_dir)\n finders = [\n (reader.read_v2, [], {}),\n (reader.read_v1, [], {}),\n ]\n excps = []\n for (functor, args, kwargs) in finders:\n try:\n return functor(*args, **kwargs)\n except openstack.NonReadable as e:\n excps.append(e)\n raise excps[-1]\n\n\ndef get_previous_iid(paths):\n # interestingly, for this purpose the \"previous\" instance-id is the current\n # instance-id. cloud-init hasn't moved them over yet as this datasource\n # hasn't declared itself found.\n fname = os.path.join(paths.get_cpath('data'), 'instance-id')\n try:\n return util.load_file(fname).rstrip(\"\\n\")\n except IOError:\n return None\n\n\ndef on_first_boot(data, distro=None):\n \"\"\"Performs any first-boot actions using data read from a config-drive.\"\"\"\n if not isinstance(data, dict):\n raise TypeError(\"Config-drive data expected to be a dict; not %s\"\n % (type(data)))\n net_conf = data.get(\"network_config\", '')\n if net_conf and distro:\n LOG.debug(\"Updating network interfaces from config drive\")\n distro.apply_network(net_conf)\n files = data.get('files', {})\n if files:\n LOG.debug(\"Writing %s injected files\", len(files))\n for (filename, content) in files.items():\n if not filename.startswith(os.sep):\n filename = os.sep + filename\n try:\n util.write_file(filename, content, mode=0o660)\n except IOError:\n util.logexc(LOG, \"Failed writing file: %s\", filename)\n\n\ndef find_candidate_devs(probe_optical=True):\n \"\"\"Return a list of devices that may contain the config drive.\n\n The returned list is sorted by search order where the first item has\n should be searched first (highest priority)\n\n config drive v1:\n Per documentation, this is \"associated as the last available disk on the\n instance\", and should be VFAT.\n Currently, we do not restrict search list to \"last available disk\"\n\n config drive v2:\n Disk should be:\n * either vfat or iso9660 formated\n * labeled with 'config-2'\n \"\"\"\n # query optical drive to get it in blkid cache for 2.6 kernels\n if probe_optical:\n for device in OPTICAL_DEVICES:\n try:\n util.find_devs_with(path=device)\n except util.ProcessExecutionError:\n pass\n\n by_fstype = []\n for fs_type in FS_TYPES:\n by_fstype.extend(util.find_devs_with(\"TYPE=%s\" % (fs_type)))\n\n by_label = []\n for label in LABEL_TYPES:\n by_label.extend(util.find_devs_with(\"LABEL=%s\" % (label)))\n\n # give preference to \"last available disk\" (vdb over vda)\n # note, this is not a perfect rendition of that.\n by_fstype.sort(reverse=True)\n by_label.sort(reverse=True)\n\n # combine list of items by putting by-label items first\n # followed by fstype items, but with dupes removed\n candidates = (by_label + [d for d in by_fstype if d not in by_label])\n\n # We are looking for a block device or partition with necessary label or\n # an unpartitioned block device (ex sda, not sda1)\n devices = [d for d in candidates\n if d in by_label or not util.is_partition(d)]\n return devices\n\n\n# Used to match classes to dependencies\ndatasources = [\n (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),\n (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),\n]\n\n\n# Return a list of data sources that match this set of dependencies\ndef get_datasource_list(depends):\n return sources.list_from_depends(depends, datasources)\n\n\n# Convert OpenStack ConfigDrive NetworkData json to network_config yaml\ndef convert_network_data(network_json=None):\n \"\"\"Return a dictionary of network_config by parsing provided\n OpenStack ConfigDrive NetworkData json format\n\n OpenStack network_data.json provides a 3 element dictionary\n - \"links\" (links are network devices, physical or virtual)\n - \"networks\" (networks are ip network configurations for one or more\n links)\n - services (non-ip services, like dns)\n\n networks and links are combined via network items referencing specific\n links via a 'link_id' which maps to a links 'id' field.\n\n To convert this format to network_config yaml, we first iterate over the\n links and then walk the network list to determine if any of the networks\n utilize the current link; if so we generate a subnet entry for the device\n\n We also need to map network_data.json fields to network_config fields. For\n example, the network_data links 'id' field is equivalent to network_config\n 'name' field for devices. We apply more of this mapping to the various\n link types that we encounter.\n\n There are additional fields that are populated in the network_data.json\n from OpenStack that are not relevant to network_config yaml, so we\n enumerate a dictionary of valid keys for network_yaml and apply filtering\n to drop these superflous keys from the network_config yaml.\n \"\"\"\n if network_json is None:\n return None\n\n # dict of network_config key for filtering network_json\n valid_keys = {\n 'physical': [\n 'name',\n 'type',\n 'mac_address',\n 'subnets',\n 'params',\n ],\n 'subnet': [\n 'type',\n 'address',\n 'netmask',\n 'broadcast',\n 'metric',\n 'gateway',\n 'pointopoint',\n 'mtu',\n 'scope',\n 'dns_nameservers',\n 'dns_search',\n 'routes',\n ],\n }\n\n links = network_json.get('links', [])\n networks = network_json.get('networks', [])\n services = network_json.get('services', [])\n\n config = []\n for link in links:\n subnets = []\n cfg = {k: v for k, v in link.items()\n if k in valid_keys['physical']}\n cfg.update({'name': link['id']})\n for network in [net for net in networks\n if net['link'] == link['id']]:\n subnet = {k: v for k, v in network.items()\n if k in valid_keys['subnet']}\n if 'dhcp' in network['type']:\n t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'\n subnet.update({\n 'type': t,\n })\n else:\n subnet.update({\n 'type': 'static',\n 'address': network.get('ip_address'),\n })\n subnets.append(subnet)\n cfg.update({'subnets': subnets})\n if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:\n cfg.update({\n 'type': 'physical',\n 'mac_address': link['ethernet_mac_address']})\n elif link['type'] in ['bond']:\n params = {}\n for k, v in link.items():\n if k == 'bond_links':\n continue\n elif k.startswith('bond'):\n params.update({k: v})\n cfg.update({\n 'bond_interfaces': copy.deepcopy(link['bond_links']),\n 'params': params,\n })\n elif link['type'] in ['vlan']:\n cfg.update({\n 'name': \"%s.%s\" % (link['vlan_link'],\n link['vlan_id']),\n 'vlan_link': link['vlan_link'],\n 'vlan_id': link['vlan_id'],\n 'mac_address': link['vlan_mac_address'],\n })\n else:\n raise ValueError(\n 'Unknown network_data link type: %s' % link['type'])\n\n config.append(cfg)\n\n for service in services:\n cfg = service\n cfg.update({'type': 'nameserver'})\n config.append(cfg)\n\n return {'version': 1, 'config': config}\n","repo_name":"dmsimard/cloud-init","sub_path":"cloudinit/sources/DataSourceConfigDrive.py","file_name":"DataSourceConfigDrive.py","file_ext":"py","file_size_in_byte":14508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26927275433","text":"class Node():\n def __init__(self,data):\n self.data=data\n self.next=None\nclass LinkedList():\n def __init__(self):\n self.head=None\n def append(self,new_data):\n new_node = Node(new_data)\n if self.head==None:\n self.head=new_node\n return\n else:\n last=self.head\n while (last.next):\n last=last.next\n last.next=new_node\n\n\n","repo_name":"Aditi-Here/Python-Codes","sub_path":"LinkedLists/Add a node at the end.py","file_name":"Add a node at the end.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74943634115","text":"import random\nimport string\nfrom typing import List, Optional\n\nfrom fastapi import HTTPException, status\nfrom sqlalchemy import func, select, update\nfrom sqlalchemy.exc import NoResultFound\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom core.config import SHORTEN_URL_LEN\nfrom models.link import Link, RequestsHistory, PrivacyStatusEnum\nfrom models.user import User\n\nfrom schemas.link import RequestsHistoryDB, FullInfoLinks\n\n\nasync def create_link(\n new_link: Link,\n user: User,\n session: AsyncSession\n):\n \"\"\"Create new link in DB\"\"\"\n new_link_dict = new_link.dict()\n new_link_dict['created_by'] = user\n new_link_dict['shorten_url'] = await make_shorten_url(session)\n if not user:\n new_link_dict['status'] = PrivacyStatusEnum.public\n db_link = Link(**new_link_dict)\n session.add(db_link)\n\n await session.commit()\n await session.refresh(db_link)\n\n return db_link\n\n\nasync def get_original_link_by_shorten(\n shorten_url: str,\n user: User,\n session: AsyncSession\n) -> Link:\n \"\"\"Получение записи из БД по короткой ссылке.\"\"\"\n statement = select(Link).where(Link.shorten_url == shorten_url)\n results = await session.execute(statement=statement)\n try:\n original_link = results.scalar_one()\n except NoResultFound:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n if original_link.status == PrivacyStatusEnum.private and user != original_link.created_by:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n return original_link\n\n\nasync def make_shorten_url(session: AsyncSession) -> str:\n \"\"\"Генерация короткой ссылки и проверка, что ее нет в БД\"\"\"\n def generate_short_name():\n \"\"\"Генерация тела короткой ссылки.\"\"\"\n return ''.join(\n random.choice(\n string.ascii_lowercase + string.digits\n )\n for _ in range(SHORTEN_URL_LEN)\n )\n\n while True:\n shorten_url = generate_short_name()\n statement = select(Link).where(Link.shorten_url == shorten_url)\n is_short_url_exists = (await session.execute(statement)).scalars().all()\n if not is_short_url_exists:\n break\n\n return shorten_url\n\n\nasync def get_all_links_by_user(user: User, session: AsyncSession) -> List[Link]:\n \"\"\"Получение всех ссылок, созданных пользователем.\"\"\"\n statement = select(Link).where(Link.created_by == user)\n results = await session.execute(statement)\n return results\n\n\nasync def get_jumps_count_by_link(shorten_url: str, session: AsyncSession) -> int:\n \"\"\"Получение количества переходов по ссылке.\"\"\"\n statement = select(func.count(\"*\")).select_from(\n RequestsHistory\n ).where(RequestsHistory.link == shorten_url)\n\n result = await session.execute(statement)\n return result.scalar_one()\n\n\nasync def get_full_info_about_link(\n shorten_url: str,\n user: User,\n session: AsyncSession,\n fullinfo: Optional[str],\n limit: int, offset: int\n) -> FullInfoLinks:\n \"\"\"Получение полной информации о ссылке: сколько раз был сделан переход по ней и информация по каждому переходу\"\"\"\n current_link = await get_original_link_by_shorten(shorten_url, user, session)\n\n if current_link.status == PrivacyStatusEnum.private and current_link.created_by != user:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)\n\n count = await get_jumps_count_by_link(shorten_url, session)\n\n if not isinstance(fullinfo, str):\n return count\n statement = select(\n RequestsHistory\n ).where(RequestsHistory.link == shorten_url).limit(limit).offset(offset)\n\n full_links_info = await session.execute(statement)\n return full_links_info\n\n\nasync def delete_link(shorten_url: str, user: User, session: AsyncSession):\n \"\"\"Удаление ссылки. Физическое удаление ссылки из БД не производится, только помечается удаленной.\"\"\"\n current_link = await get_original_link_by_shorten(shorten_url, user, session)\n if current_link.created_by != user:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)\n statement = update(Link).where(Link.shorten_url == shorten_url).values(is_deleted=True)\n await session.execute(statement)\n await session.commit()\n\n\nasync def add_to_requests_history(\n shorten_url: str,\n client: str,\n session: AsyncSession\n):\n \"\"\"Создание информации о факте перехода по ссылке.\"\"\"\n new_record_dict = RequestsHistoryDB(client=client, link=shorten_url).dict()\n db_record = RequestsHistory(**new_record_dict)\n session.add(db_record)\n\n await session.commit()\n\n\nasync def update_link(\n shorten_url: str,\n updated_link: Link,\n user: User,\n session: AsyncSession\n):\n \"\"\"Обновление статуса ссылки. Можно изменить только статус ссылки.\n Метод доступен только для зарегистрированных пользователей.\"\"\"\n if not user:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)\n current_link = await get_original_link_by_shorten(\n shorten_url,\n user,\n session\n )\n current_link.status = updated_link.status\n session.add(current_link)\n await session.commit()\n await session.refresh(current_link)\n return current_link\n","repo_name":"alex-s-nik/one_more_url_shortener","sub_path":"src/services/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40322569034","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport functools\nimport json\nimport math\nimport os\n\nfrom meta_dataset.data import decoder\nfrom meta_dataset.data import utils\nfrom task_adaptation import data_loader\n\nimport tensorflow.compat.v1 as tf\n\nVTAB_NATURAL = [\n \"caltech101\",\n \"cifar(num_classes=100)\",\n \"dtd\",\n \"oxford_flowers102\",\n \"oxford_iiit_pet\",\n \"sun397\",\n \"svhn\"\n]\nVTAB_SPECIALIZED = [\n \"diabetic_retinopathy(config='btgraham-300')\",\n \"eurosat\",\n \"resisc45\",\n \"patch_camelyon\",\n]\nVTAB_STRUCTURED = [\n \"clevr(task='closest_object_distance')\",\n \"clevr(task='count_all')\",\n \"dmlab\",\n \"dsprites(predicted_attribute='label_orientation', num_classes=16)\",\n \"dsprites(predicted_attribute='label_x_position', num_classes=16)\",\n \"smallnorb(predicted_attribute='label_azimuth')\",\n \"smallnorb(predicted_attribute='label_elevation')\",\n \"kitti(task='closest_vehicle_distance')\",\n]\nVTAB_DATASETS = VTAB_NATURAL + VTAB_SPECIALIZED + VTAB_STRUCTURED\n\n\ndef read_episode_as_dataset(episodes_dir,\n episode_index,\n split,\n with_info=False):\n \"\"\"This function reads a single episode from the directory.\n\n Args:\n episodes_dir: str, directory that has tf_record files.\n episode_index: int, of the episode to be loaded.\n split: str, `test` or `train`\n with_info: bool, if True reads the json file in the folder and returns\n number of\n\n Returns:\n decoded_dataset: tf.data.Dataset, with `image` and `label` fields.\n num_images_per_class_dict: tf.data.Dataset, if with_info=True.\n\n Raises:\n ValueError, when `split` is not one of {'train', 'test'}.\n \"\"\"\n episode_path = utils.get_file_path(episodes_dir, episode_index, split)\n raw_dataset = tf.data.TFRecordDataset(episode_path)\n decoded_dataset = raw_dataset.map(decoder.read_example_and_parse_image)\n if with_info:\n info_path = utils.get_info_path(episodes_dir)\n with tf.io.gfile.GFile(info_path, \"r\") as f:\n all_info = json.load(f)\n # Convert keys to integer.\n key = os.path.basename(episode_path)\n num_images_per_class_dict = {int(k): v for k, v in all_info[key].items()}\n return decoded_dataset, num_images_per_class_dict\n\n return decoded_dataset\n\n\ndef read_episodes_from_records(episodes_dir,\n train_suffix=utils.TRAIN_SUFFIX,\n test_suffix=utils.TEST_SUFFIX):\n \"\"\"This function reads all episodes from a given directory.\n\n Additionally it returns total number of episodes available in one epoch.\n Args:\n episodes_dir: str, directory that has tf_record files.\n train_suffix: str, used during dumping of episodes to indicate training\n records. Default value should be kept unless the default is overwritten\n during creation of these episodes.\n test_suffix: str, used during dumping of episodes to indicate test records.\n Default value should be kept unless the default is overwritten during\n creation of these episodes.\n\n Returns:\n tf.data.Dataset, that returns a tuple of training and test datasets. Each\n dataset has `image` and `label` fields.\n int, number of episodes read.\n Raises:\n RuntimeError: when some episodes are missing.\n \"\"\"\n all_files = sorted(tf.io.gfile.listdir(episodes_dir))\n train_files = [f for f in all_files if f.endswith(train_suffix)]\n test_files = [f for f in all_files if f.endswith(test_suffix)]\n for test_file, train_file in zip(test_files, train_files):\n # Check whether ids match: expected format episode-0001-train.tfrecords\n # TODO(evcu) maybe use regex.\n if test_file.split(\"-\")[1] != train_file.split(\"-\")[1]:\n test_id = int(test_file.split(\"-\")[1])\n train_id = int(train_file.split(\"-\")[1])\n if test_id < train_id:\n raise RuntimeError(\"Train data missing for %d th episode.\" % test_id)\n else:\n raise RuntimeError(\"Test data missing for %d th episode.\" % train_id)\n\n # Load episode information to obtain total number of images for each episode.\n info_path = utils.get_info_path(episodes_dir)\n with tf.io.gfile.GFile(info_path, \"r\") as f:\n all_info = json.load(f)\n\n def get_total_img_count(file_name):\n return sum(all_info[file_name].values())\n\n # Define function to load individual tf-record files.\n def _load_and_batch(file_path, n_images):\n dataset = tf.data.TFRecordDataset(file_path)\n dataset = dataset.batch(tf.cast(n_images, tf.int64))\n batch_data = dataset.make_initializable_iterator().get_next()\n # This pipeline doesn't have original label ids.\n return batch_data\n\n train_paths = tf.data.Dataset.from_tensor_slices(\n [os.path.join(episodes_dir, f) for f in train_files])\n train_n_images = tf.data.Dataset.from_tensor_slices(\n [get_total_img_count(f) for f in train_files])\n test_paths = tf.data.Dataset.from_tensor_slices(\n [os.path.join(episodes_dir, f) for f in test_files])\n test_n_images = tf.data.Dataset.from_tensor_slices(\n [get_total_img_count(f) for f in test_files])\n dataset_of_datasets = tf.data.Dataset.zip(\n (tf.data.Dataset.zip((train_paths, train_n_images)).map(_load_and_batch),\n tf.data.Dataset.zip((test_paths, test_n_images)).map(_load_and_batch)))\n return dataset_of_datasets, len(test_files)\n\n\ndef read_episodes_from_records_multiple_sources(episodes_dir_list,\n train_suffix=utils.TRAIN_SUFFIX,\n test_suffix=utils.TEST_SUFFIX):\n \"\"\"This function reads episodes from a list of episode directories.\n\n Additionally it returns list of total number of episodes available in per\n directory in one epoch.\n\n Args:\n episodes_dir_list: list, list of directories that have tf_record files.\n train_suffix: str, used during dumping of episodes to indicate training\n records. Default value should be kept unless the default is overwritten\n during creation of these episodes.\n test_suffix: str, used during dumping of episodes to indicate test records.\n Default value should be kept unless the default is overwritten during\n creation of these episodes.\n\n Returns:\n tf.data.Dataset, that returns a tuple of training and test datasets. These\n tuples are randomly chosen from one of the directories in episode_dir_list\n with uniform probability. Each dataset has `image` and `label` fields.\n list(int), number of episodes read in each directory.\n Raises:\n RuntimeError: when some episodes are missing.\n \"\"\"\n\n num_files_per_dir = []\n dataset_per_dir = []\n for episode_dir in episodes_dir_list:\n episode_dataset, episode_num_files = read_episodes_from_records(\n episode_dir, train_suffix, test_suffix)\n num_files_per_dir.append(episode_num_files)\n dataset_per_dir.append(episode_dataset)\n return tf.data.experimental.sample_from_datasets(\n dataset_per_dir), num_files_per_dir\n\n\ndef read_vtab_as_episode(vtab_key,\n image_size=224,\n query_size_limit=500,\n data_dir=None):\n \"\"\"This function reads VTAB-1k datasets as episodes.\n\n The training set becomes support set and the test is the query set.\n Query set size can be large and this could prevent evaluation without\n batching. Therefore we split the query set in to batches of size\n `query_size_limit`. In addition to these 2 datasets, the function returns\n total number of episodes(batches) after splitting the query set in\n `query_size_limit` batches.\n\n Args:\n vtab_key: str, one of constants.VTAB_DATASETS.\n image_size: int, used to resize the images read.\n query_size_limit: int, used to batch the query set.\n data_dir: str, optional data directory path for tf-datasets.\n\n Returns:\n tf.data.Dataset, of support set in one batch.\n tf.data.Dataset, of query set in batches of size `query_size_limit`.\n int, number of batches available in the query set.\n \"\"\"\n dataset_instance = data_loader.get_dataset_instance({\n \"dataset\": \"data.%s\" % vtab_key,\n \"data_dir\": data_dir\n })\n query_ds = dataset_instance.get_tf_data(\n split_name=\"test\",\n batch_size=query_size_limit,\n preprocess_fn=functools.partial(\n data_loader.preprocess_fn, input_range=(-1.0, 1.0), size=image_size),\n epochs=1,\n drop_remainder=False,\n for_eval=True,\n shuffle_buffer_size=0,\n prefetch=1,\n )\n n_query = math.ceil(\n dataset_instance.get_num_samples(\"test\") / float(query_size_limit))\n support_ds = dataset_instance.get_tf_data(\n split_name=\"train800val200\",\n # We get all 1000 images at once.\n batch_size=dataset_instance.get_num_samples(\"train800val200\"),\n preprocess_fn=functools.partial(\n data_loader.preprocess_fn, input_range=(-1.0, 1.0), size=image_size),\n epochs=1,\n drop_remainder=False,\n for_eval=False,\n shuffle_buffer_size=0,\n prefetch=1,\n )\n n_classes = dataset_instance.get_num_classes()\n\n def cast_label_fn(ex):\n ex[\"label\"] = tf.cast(ex[\"label\"], tf.int32)\n return ex\n\n support_ds = support_ds.map(cast_label_fn)\n query_ds = query_ds.map(cast_label_fn)\n return support_ds, query_ds, n_query, n_classes\n","repo_name":"google-research/meta-dataset","sub_path":"meta_dataset/data/read_episodes.py","file_name":"read_episodes.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","stars":720,"dataset":"github-code","pt":"61"} +{"seq_id":"36479277200","text":"# Question to Answer?\n# 1. What were all the differrent types of fire calls in 2018?\n# 2. What month within the year 2018 saw the higest number of fire call?\n# 3. Which neighborhood genrated the most of fire call in 2018?\n# 4. Which week of in the year 2018 had most fire call?\n\n\nimport sys\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n#to_timestamp, to_date, col, year, month, weekofyear\n\n# root\n# |-- CallNumber: integer (nullable = true)\n# |-- UnitID: string (nullable = true)\n# |-- IncidentNumber: integer (nullable = true)\n# |-- CallType: string (nullable = true)\n# |-- CallDate: string (nullable = true)\n# |-- WatchDate: string (nullable = true)\n# |-- CallFinalDisposition: string (nullable = true)\n# |-- AvailableDtTm: string (nullable = true)\n# |-- Address: string (nullable = true)\n# |-- City: string (nullable = true)\n# |-- Zipcode: integer (nullable = true)\n# |-- Battalion: string (nullable = true)\n# |-- StationArea: string (nullable = true)\n# |-- Box: string (nullable = true)\n# |-- OriginalPriority: string (nullable = true)\n# |-- Priority: string (nullable = true)\n# |-- FinalPriority: integer (nullable = true)\n# |-- ALSUnit: boolean (nullable = true)\n# |-- CallTypeGroup: string (nullable = true)\n# |-- NumAlarms: integer (nullable = true)\n# |-- UnitType: string (nullable = true)\n# |-- UnitSequenceInCallDispatch: integer (nullable = true)\n# |-- FirePreventionDistrict: string (nullable = true)\n# |-- SupervisorDistrict: string (nullable = true)\n# |-- Neighborhood: string (nullable = true)\n# |-- Location: string (nullable = true)\n# |-- RowID: string (nullable = true)\n# |-- Delay: double (nullable = true)\n# |-- IncidentDate: date (nullable = true)\n# |-- OnWatchDate: date (nullable = true)\n# |-- OnAvailableDtTm: timestamp (nullable = true)\n\ndef open_spark_session(app_name):\n ses = SparkSession\\\n .builder\\\n .appName(app_name)\\\n .getOrCreate()\n return ses\n\n\ndef read_input_csv(ses, input_file):\n df = ses.read.csv(input_file, inferSchema=True, header=True)\n return df\n\ndef convert_df_date_columns(ses, df):\n temp_df = df\\\n .withColumn(\"IncidentDate\", to_date(col(\"CallDate\"),\"MM/dd/yyyy\"))\\\n .withColumn(\"OnWatchDate\", to_date(col(\"WatchDate\"),\"MM/dd/yyyy\"))\\\n .withColumn(\"OnAvailableDtTm\", to_timestamp(col(\"CallDate\"),\"MM/dd/yyyy hh:mm:ss a\"))\n return temp_df\n\ndef call_type_by_year(df, i_year):\n temp_df = df\\\n .select(\"CallType\")\\\n .where(year(col(\"IncidentDate\")) == i_year)\\\n .groupBy(\"CallType\")\\\n .count()\\\n .orderBy(\"count\", ascending=False)\n return temp_df\n\ndef highest_call_by_month(df, i_year):\n temp_df = df\\\n .withColumn(\"IncidentMonth\",month(col(\"IncidentDate\")))\\\n .select(\"IncidentMonth\")\\\n .where(year(col(\"IncidentDate\")) == i_year)\\\n .groupBy(\"IncidentMonth\")\\\n .count()\\\n .orderBy(\"count\", ascending=False)\n return temp_df\n\n\ndef calls_by_neighborhood(df, i_year):\n temp_df = df\\\n .select(\"Neighborhood\")\\\n .where(year(col(\"IncidentDate\")) == i_year)\\\n .groupBy(\"Neighborhood\")\\\n .count()\\\n .orderBy(\"count\", ascending=False)\n return temp_df\n\n\ndef highest_call_by_week(df, i_year):\n temp_df = df\\\n .withColumn(\"IncidentWeek\",weekofyear(col(\"IncidentDate\")))\\\n .select(\"IncidentWeek\")\\\n .where(year(col(\"IncidentDate\")) == i_year)\\\n .groupBy(\"IncidentWeek\")\\\n .count()\\\n .orderBy(\"count\", ascending=False)\n return temp_df\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print('Usage: sf_fire_call.py ', file=sys.stderr)\n sys.exit(-1)\n\n data = sys.argv[1]\n spark = open_spark_session('sf_fire_call')\n fire_df = read_input_csv(spark, data)\n fire_df = convert_df_date_columns(spark,fire_df )\n # fire_df.printSchema()\n\n\n\n # What were all the differrent types of fire calls in 2018?\n call_type_by_year_df = call_type_by_year(fire_df, 2018)\n\n # What month within the year 2018 saw the higest number of fire call?\n highest_call_by_month_df = highest_call_by_month(fire_df, 2018)\n\n # Which neighborhood genrated the most of fire call in 2018?\n calls_by_neighborhood_df = calls_by_neighborhood(fire_df, 2018)\n\n # Which week of in the year 2018 had most fire call?\n highest_call_by_week_df = highest_call_by_week(fire_df, 2018)\n\n\n # save output in parquet format\n parquet_table = \"calls_by_week\"\n #highest_call_by_week_df.write.format(\"parquet\").saveAsTable(parquet_table)\n\n # distinct call type\n count_distinct_call_type = fire_df\\\n .select(\"CallType\")\\\n .where(col(\"CallType\").isNotNull())\\\n .agg(countDistinct(\"CallType\").alias(\"DistinctCallType\"))\\\n .show()\n\n\n fire_df\\\n .select(\"CallType\")\\\n .where(col(\"CallType\").isNotNull())\\\n .distinct()\\\n .show(10, truncate=False)\n\n # column Rename\n fire_df.withColumnRenamed(\"Delay\", \"ResponseDelayinMins\")\\\n .select(\"ResponseDelayinMins\")\\\n .where(col(\"ResponseDelayinMins\") > 5)\\\n .orderBy(\"ResponseDelayinMins\", ascending=False)\\\n .show(5, truncate= False)\n\n # Min, max, Avg\n fire_df.withColumnRenamed(\"Delay\", \"ResponseDelayinMins\")\\\n .select(min(\"ResponseDelayinMins\"),\\\n max(\"ResponseDelayinMins\"),\n avg(\"ResponseDelayinMins\"))\\\n .show( truncate=False)\n\n\n","repo_name":"nileshvarshney/python_","sub_path":"Spark/Learning Spark/ch03/src/sf_fire_call.py","file_name":"sf_fire_call.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5863098759","text":"# -*- coding:utf-8 -*-\n\"\"\"\nClasses collecting data about CPU\n\"\"\"\nimport re\nfrom datetime import timedelta, datetime\n\nfrom janitor.collector.base import BaseCollect\n\n\nclass CPULoadCollect(BaseCollect):\n \"\"\"\n Collect CPU load\n \"\"\"\n chart_name = 'CPU usage'\n\n table_name = 'cpu_usage'\n\n time_list = None\n\n proc_stat_fd = file(\"/proc/stat\", \"r\")\n\n # in /proc/stat each core/thread is name as cpu#\n column_description = (\n ('cpu', 'number', 'Total percentage usage'),\n ('cpu0', 'number', '#1 percentage usage'),\n ('cpu1', 'number', '#2 percentage usage'),\n ('cpu2', 'number', '#3 percentage usage'),\n ('cpu3', 'number', '#4 percentage usage'),\n ('cpu4', 'number', '#5 percentage usage'),\n ('cpu5', 'number', '#6 percentage usage'),\n ('cpu6', 'number', '#7 percentage usage'),\n ('cpu7', 'number', '#8 percentage usage'),\n )\n\n def __init__(self, connection, alerts=None):\n self.time_list = self.get_time_list()\n super(CPULoadCollect, self).__init__(connection, alerts)\n\n @property\n def count_cores(self):\n \"\"\"\n Return information about cores\n\n :return: Cores count\n :rtype: int\n \"\"\"\n self.proc_stat_fd.seek(0)\n\n return sum(tuple(\n 1\n for line in self.proc_stat_fd.readlines()\n if re.match('cpu\\d+.*', line)\n ))\n\n def install(self):\n sql = 'create table %s (' \\\n ' id INTEGER PRIMARY KEY AUTOINCREMENT, ' \\\n ' cpu REAL, ' \\\n ' cpu0 REAL, ' \\\n ' cpu1 REAL NULL, ' \\\n ' cpu2 REAL NULL, ' \\\n ' cpu3 REAL NULL, ' \\\n ' cpu4 REAL NULL, ' \\\n ' cpu5 REAL NULL, ' \\\n ' cpu6 REAL NULL, ' \\\n ' cpu7 REAL NULL, ' \\\n ' created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP' \\\n ');' % self.table_name\n\n self.cursor.execute(sql)\n\n self.connection.commit()\n\n def get_time_list(self):\n \"\"\"\n http://www.linuxhowtos.org/System/procstat.htm\n \"\"\"\n # dirty fix, when we want to quickly read second time, we have problem,\n # because delta is 0\n self.proc_stat_fd.seek(0)\n cpu_times = {}\n\n for line in self.proc_stat_fd.readlines():\n if not line.startswith('cpu'):\n continue\n\n columns = line.split(' ')\n cpu_times[columns[0]] = map(int, filter(None, columns[1:]))\n\n return cpu_times\n\n def get_delta_time(self):\n \"\"\"\n Return difference of cpu stats between last call and current call\n \"\"\"\n current_time_list = self.get_time_list()\n delta_times = {}\n for k in current_time_list.keys():\n delta_times[k] = [\n (t2 - t1)\n for t1, t2 in zip(self.time_list[k], current_time_list[k])\n ]\n\n self.time_list = current_time_list\n\n return delta_times\n\n def get_cpu_load(self):\n \"\"\"\n Returns load of all CPus in system\n \"\"\"\n delta_times = self.get_delta_time()\n cpu_loads = {}\n for k in delta_times.keys():\n dt = list(delta_times[k])\n idle_time = float(dt[3])\n total_time = sum(dt)\n dt_load = (idle_time / total_time) if total_time > 0 else 1\n cpu_loads[k] = 1 - (dt_load)\n\n return cpu_loads\n\n def sort_values(self, cpu, cpu0=None, cpu1=None, cpu2=None, cpu3=None,\n cpu4=None, cpu5=None, cpu6=None, cpu7=None):\n \"\"\"\n Small helper, I need tuple in correct order\n\n :param cpu:\n :type cpu:\n :param cpu0:\n :type cpu0:\n :param cpu1:\n :type cpu1:\n :param cpu2:\n :type cpu2:\n :param cpu3:\n :type cpu3:\n :param cpu4:\n :type cpu4:\n :param cpu5:\n :type cpu5:\n :param cpu6:\n :type cpu6:\n :param cpu7:\n :type cpu7:\n :return: Tuple\n :rtype: tuple\n \"\"\"\n return cpu, cpu0, cpu1, cpu2, cpu3, cpu4, cpu5, cpu6, cpu7\n\n def collect(self):\n cpu, cpu0, cpu1, cpu2, cpu3, cpu4, cpu5, cpu6, cpu7 = \\\n tuple(\n (p if p else 0) * 100.0\n for p in self.sort_values(**self.get_cpu_load())\n )\n\n data_to_insert = (\n self.table_name,\n cpu,\n cpu0,\n cpu1,\n cpu2,\n cpu3,\n cpu4,\n cpu5,\n cpu6,\n cpu7,\n )\n\n sql = 'insert into %s ' \\\n '(cpu, cpu0, cpu1, cpu2, cpu3, cpu4, cpu5, cpu6, cpu7) ' \\\n 'values (%f, %f, %f, %f, %f, %f, %f, %f, %f);'\n\n self.cursor.execute(sql % data_to_insert)\n self.connection.commit()\n\n def get_data(self, limit=30, interval=10):\n limit_date = datetime.now()-timedelta(days=limit)\n params = {\n 'interval': interval,\n 'table_name': self.table_name,\n 'limit_date': limit_date.strftime('%Y-%m-%d'),\n }\n\n sql = 'select ' \\\n ' strftime(\\'%%Y-%%m-%%d %%H:\\', created_at) || ' \\\n ' (strftime(\\'%%M\\', created_at)/%(interval)s) || ' \\\n ' \\'0:00\\' as timestamp, ' \\\n ' round(avg(cpu),2) as cpu, ' \\\n ' round(avg(cpu0),2) as cpu0, ' \\\n ' round(avg(cpu1),2) as cpu1, ' \\\n ' round(avg(cpu2),2) as cpu2, ' \\\n ' round(avg(cpu3),2) as cpu3, ' \\\n ' round(avg(cpu4),2) as cpu4, ' \\\n ' round(avg(cpu5),2) as cpu5, ' \\\n ' round(avg(cpu6),2) as cpu6, ' \\\n ' round(avg(cpu7),2) as cpu7 ' \\\n 'from %(table_name)s ' \\\n 'where DATE(created_at) > \\'%(limit_date)s\\' ' \\\n 'group by strftime(\\'%%Y%%m%%d%%H0\\', created_at) + ' \\\n ' strftime(\\'%%M\\', created_at)/%(interval)s' \\\n ';' % params\n\n return self.prepare_result(self.cursor.execute(sql).fetchall())\n","repo_name":"Alkemic/janitor","sub_path":"janitor/collector/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26214159194","text":"import pandas as pd\r\nimport os\r\nfrom rich.progress import track\r\nimport requests\r\nimport argparse\r\naparser = argparse.ArgumentParser()\r\nrequired_arguments = aparser.add_argument_group('required arguments')\r\nrequired_arguments.add_argument('-g','--genes', help='file with one gene per line', required=True)\r\nargs = aparser.parse_args()\r\n\r\nstring_api_url = \"https://string-db.org/api\"\r\noutput_format = \"tsv\"\r\nmethod = \"interaction_partners\"\r\n\r\nremaining = set([g.strip() for g in open(args.genes)])\r\n\r\nwith open(f'{args.genes}.interactions', 'w', newline='\\n') as f:\r\n for gene in track(remaining):\r\n request_url = \"/\".join([string_api_url, output_format, method])\r\n\r\n params = {\r\n\r\n \"identifiers\" : \"%0d\".join([gene]), # your protein\r\n #\"species\" : 9606, # species NCBI identifier \r\n \"limit\" : 5000,\r\n \"caller_identity\" : \"www.awesome_app.org\" # your app name\r\n\r\n }\r\n response = requests.post(request_url, data=params)\r\n for line in response.text.strip().split(\"\\n\"):\r\n f.write(line + '\\n')","repo_name":"torresmateo/query_string","sub_path":"query_string.py","file_name":"query_string.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41491110418","text":"from ._conflicts import _tf_idf_automatic_algorithm\nfrom .utils import Video\nfrom ._http import _get_video_transcription, _get_video_info, _get_video_comments, _get_list_search_videos\nfrom ._tree import YoutubeDiscussionTree\nfrom ._quota import QuotaManager\nfrom ._errors import SearchBoundsExceded\nfrom transformers import pipeline\n\nclass YoutubeDiscussionTreeAPI():\n\n def __init__(self, api_key):\n self.api_key = api_key\n self.quota_manager = QuotaManager(\".quota.pickle\", api_key)\n\n def generate_tree(self, video_id, summarization = False, conflict_solving_algorithm = _tf_idf_automatic_algorithm):\n video_content = _get_video_transcription(video_id) if not summarization else self._sumarize_video(_get_video_transcription(video_id))\n video_info = _get_video_info(video_id, self.api_key, self.quota_manager)\n comments = _get_video_comments(video_id, self.api_key, self.quota_manager)[\"items\"]\n return YoutubeDiscussionTree(video_id, conflict_solving_algorithm).make_tree(video_info[\"items\"][0], video_content, comments)\n\n def quota_info(self):\n return {\n \"limit\" : self.quota_manager._get_api_limit(),\n \"spent\" : self.quota_manager._get_current_quota()\n }\n\n def search_videos(self, query, search_results = 5):\n if search_results < 0 or search_results > 50:\n raise SearchBoundsExceded(search_results, \"Search Results parameter out of bounds, you have to set it from 0 to 50\")\n videos_json = _get_list_search_videos(query, search_results, self.api_key, self.quota_manager)\n return list(map(lambda x : Video(\n id = x[\"id\"][\"videoId\"],\n title = x[\"snippet\"][\"title\"],\n description = x[\"snippet\"][\"description\"],\n channel_name = x[\"snippet\"][\"channelTitle\"],\n channel_id = x[\"snippet\"][\"channelId\"],\n published_at = x[\"snippet\"][\"publishedAt\"]\n ), \n videos_json[\"items\"])) \n \n\n def _sumarize_video(self, video_transcription):\n summarizer = pipeline(\"summarization\")\n return summarizer(video_transcription, max_length=512, min_length=256, do_sample=False, truncation=True)[0][\"summary_text\"]","repo_name":"quimpm/youtube_discussion_tree","sub_path":"youtube_discussion_tree_api/_api.py","file_name":"_api.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"41146900641","text":"from objects.modulebase import ModuleBase\nfrom objects.permissions import PermissionEmbedLinks\n\nfrom discord import Embed, Colour\n\n\ntry:\n import aiogoogletrans as gt\nexcept ImportError:\n\traise ImportError(\n 'aiogoogletrans python library is required to use module'\n 'You can install it at https://github.com/Fogapod/aiogoogletrans'\n )\n\n\ntranslate_urls = [\n 'translate.google.com', 'translate.google.co.kr',\n 'translate.google.at', 'translate.google.de',\n 'translate.google.ru', 'translate.google.ch',\n 'translate.google.fr', 'translate.google.es'\n]\n\nclass Module(ModuleBase):\n\n usage_doc = '{prefix}{aliases} '\n short_doc = 'Translate text'\n long_doc = (\n 'Subcommands:\\n'\n '\\tlist: get list of languages\\n\\n'\n 'Command flags:\\n'\n '\\t[--in|-i] : input language\\n'\n '\\t[--out|-o] : output language'\n )\n\n name = 'goodtranslator'\n aliases = (name, 'gt')\n category = 'Actions'\n min_args = 1\n bot_perms = (PermissionEmbedLinks(), )\n flags = {\n 'in': {\n 'alias': 'i',\n 'bool': False\n },\n 'out': {\n 'alias': 'o',\n 'bool': False\n }\n }\n\n async def on_load(self, from_reload):\n self.translator = gt.Translator(service_urls=translate_urls)\n\n async def on_call(self, ctx, args, **flags):\n if args[1:].lower() == 'list':\n return '\\n'.join(f'`{k}`: {v}' for k, v in gt.LANGUAGES.items())\n\n in_lang = flags.get('in', None)\n if in_lang and in_lang.lower() not in gt.LANGUAGES:\n return await ctx.warn('Invalid input language. Try using list subcommand')\n\n out_lang = flags.get('out', 'en').lower()\n if out_lang not in gt.LANGUAGES:\n return await ctx.warn('Invalid out language. Try using list subcommand')\n\n try:\n translation = await self.translator.translate(\n args[1:], src=in_lang or 'auto', dest=out_lang,\n proxy=self.bot.get_proxy(allow_none=True)\n )\n except Exception:\n return await ctx.error(\n 'Failed to translate. Please, try again later.\\n'\n 'While this is broken you could try using gt2.'\n )\n\n e = Embed(colour=Colour.gold(), title='GoodTranslator')\n e.description = translation.text[:2048]\n e.add_field(\n name='Translated',\n value=f'{gt.LANGUAGES.get(translation.src, translation.src)} -> {gt.LANGUAGES[out_lang]}'\n )\n e.set_footer(text=ctx.author, icon_url=ctx.author.avatar_url)\n\n await ctx.send(embed=e)\n\n async def on_unload(self):\n await self.translator.close()\n","repo_name":"Fogapod/KiwiBot","sub_path":"modules/apis/module_goodtranslator.py","file_name":"module_goodtranslator.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"33529770209","text":"#!/usr/bin/python3\n\"\"\" module: log parsing \"\"\"\nimport sys\n\n\nclass Dat:\n \"\"\" class to hold data \"\"\"\n filesizes = [0]\n status = {}\n\n\ndef printLog():\n \"\"\" Print one log \"\"\"\n size = sum(Dat.filesizes)\n print(\"File size: {}\".format(size))\n x = sorted(Dat.status.keys())\n for k in x:\n print(str(k) + \": \" + str(Dat.status[k]))\n\n\ncount = 0\ntry:\n for i in sys.stdin:\n if count == 10:\n printLog()\n count = 0\n fsize = int(i.split(' ')[-1].rstrip())\n Dat.filesizes.append(fsize)\n code = int(i.split(' ')[-2])\n Dat.status[code] = Dat.status.get(code, 0) + 1\n count = count + 1\nexcept (KeyboardInterrupt):\n printLog()\n raise\n","repo_name":"maybe-william/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/101-stats.py","file_name":"101-stats.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34060458724","text":"import unittest\nfrom datetime import datetime\n\nfrom django.test import Client, TestCase\nimport time\nfrom django.contrib.auth.models import User\nfrom mdot.models import Sponsor, Manager, App, Platform, Agreement\nfrom mdot.admin import AgreementFilter, AgreementAdmin\nfrom django.core.exceptions import ValidationError\n\n\nclass MdotAdminTest(TestCase):\n \"\"\"\n Tests that cover the functionality of mdot's\n admin site.\n \"\"\"\n\n def setUp(self):\n App.objects.all().delete()\n Agreement.objects.all().delete()\n Manager.objects.all().delete()\n Sponsor.objects.all().delete()\n Platform.objects.all().delete()\n User.objects.all().delete()\n\n self.client = Client()\n self.user = User.objects.create_user(\n username=\"javerage\",\n email=\"javerage@uw.edu\",\n password=\"p@ssTest1\"\n )\n self.platform_android = Platform.objects.create(\n name='Android',\n app_store='Google Play Store'\n )\n self.platform_ios = Platform.objects.create(\n name='IOS',\n app_store='Apple Store'\n )\n self.sponsor = Sponsor.objects.create(\n first_name='Sponsor',\n last_name='lname',\n netid='spontest',\n email='sponsor@uw.edu'\n )\n self.manager = Manager.objects.create(\n first_name='J',\n last_name='average',\n netid='mantest',\n email='manager@uw.edu'\n )\n self.app = App.objects.create(\n name='TestApp',\n primary_language='English',\n app_manager=self.manager,\n app_sponsor=self.sponsor,\n requestor=self.user,\n )\n\n def test_platform_name_displays_properly(self):\n \"\"\"\n Test that the platform's name in the Platform admin link\n displays the app store name correctly.\n \"\"\"\n\n platform = Platform(name='Incorrect', app_store='Android')\n self.assertEqual('Android', str(platform))\n\n def test_sponsor_name_displays_properly(self):\n \"\"\"\n Test that the sponsor's name in the Sponsor admin link\n displays the full name correctly.\n \"\"\"\n\n self.assertEqual('Sponsor lname', str(self.sponsor.full_name()))\n\n def test_manager_name_displays_properly(self):\n \"\"\"\n Test that the manager's name in the Manager admin link\n displays the full name correctly.\n \"\"\"\n\n self.assertEqual('J average', str(self.manager.full_name()))\n\n def test_app_name_displays_properly(self):\n \"\"\"\n Test that the app's name in the App admin link\n displays the app's name correctly.\n \"\"\"\n\n self.assertEqual('TestApp', str(self.app))\n\n def test_manager_contact_displays_properly(self):\n \"\"\"\n Test that app manager's email in detail view displays\n properly.\n \"\"\"\n\n display = self.app.manager_contact\n self.assertEqual('manager@uw.edu', display)\n\n def test_sponsor_contact_displays_properly(self):\n \"\"\"\n Test that app sponsor's email in detail view displays\n properly.\n \"\"\"\n\n display = self.app.sponsor_contact\n self.assertEqual('sponsor@uw.edu', display)\n\n def test_agreement_name_displays_properly(self):\n \"\"\"\n Test that the agreement's app name in the Agreements admin link\n displays correctly.\n \"\"\"\n\n agreement = Agreement(app=self.app, status='agreed')\n self.assertEqual('TestApp', str(agreement))\n\n def test_agreed_status_displays_properly(self):\n \"\"\"\n Test that an approved app's agreement status displays properly.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement.objects.create(\n app=self.app,\n status='agreed',\n agree_time=time\n )\n display = self.app.status()\n self.assertTrue(str(display).startswith('Agreed on '))\n # self.assertEqual(\n # 'Agreed on ' + time.strftime('%b %d, %Y, %I:%M %p'),\n # str(display)\n # )\n\n def test_denied_status_displays_properly(self):\n \"\"\"\n Test that a denied app's agreement status displays properly.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement.objects.create(\n app=self.app,\n status='denied',\n agree_time=time\n )\n display = self.app.status()\n self.assertTrue(str(display).startswith('Denied on '))\n # self.assertEqual(\n # 'Denied on ' + time.strftime('%b %d, %Y, %I:%M %p'),\n # str(display)\n # )\n\n def test_removed_status_displays_properly(self):\n \"\"\"\n Test that a removed app's agreement status displays properly.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement.objects.create(\n app=self.app,\n status='removed',\n agree_time=time\n )\n display = self.app.status()\n self.assertTrue(str(display).startswith('Removed on '))\n # self.assertEqual(\n # 'Removed on ' + time.strftime('%b %d, %Y, %I:%M %p'),\n # str(display)\n # )\n\n def test_pending_status_displays_properly(self):\n \"\"\"\n Test that a pending app's agreement status displays properly.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement(\n app=self.app,\n agree_time=time\n )\n display = self.app.status()\n self.assertEqual('Pending', str(display))\n\n def test_app_platform_displays_properly(self):\n \"\"\"\n Test that an app's platforms display properly on the admin dashboard.\n \"\"\"\n\n self.app.platform.add(self.platform_ios, self.platform_android)\n display = self.app.app_platform()\n self.assertEqual('Google Play Store, Apple Store', str(display))\n\n def test_invalid_agreement_status(self):\n \"\"\"\n Test that agreement dashboard will raise ValidationError if no\n agreement status is selected.\n \"\"\"\n\n time = datetime.now()\n with self.assertRaises(ValidationError):\n agreement = Agreement.objects.create(\n app=self.app,\n status='',\n agree_time=time)\n\n def test_agreement_filter_accepted_status(self):\n \"\"\"\n Test that the agreement filter properly filters apps when the\n app's latest agreement status is accepted.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement(\n app=self.app,\n status='agreed',\n agree_time=time\n )\n apps = App.objects.all()\n f = AgreementFilter(\n None,\n {'status': 'agreed'},\n Agreement,\n AgreementAdmin\n )\n self.assertTrue(f.queryset(None, apps).filter(id=self.app.id).exists)\n\n def test_agreement_filter_denied_status(self):\n \"\"\"\n Test that the agreement filter properly filters apps when the\n app's latest agreement status is denied.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement(\n app=self.app,\n status='denied',\n agree_time=time\n )\n apps = App.objects.all()\n f = AgreementFilter(\n None,\n {'status': 'denied'},\n Agreement,\n AgreementAdmin\n )\n self.assertTrue(f.queryset(None, apps).filter(id=self.app.id).exists)\n\n def test_agreement_filter_removed_status(self):\n \"\"\"\n Test that the agreement filter properly filters apps when the\n app's latest agreement status is removed.\n \"\"\"\n\n time = datetime.now()\n agreement = Agreement(\n app=self.app,\n status='removed',\n agree_time=time\n )\n apps = App.objects.all()\n f = AgreementFilter(\n None,\n {'status': 'removed'},\n Agreement,\n AgreementAdmin\n )\n self.assertTrue(f.queryset(None, apps).filter(id=self.app.id).exists)\n\n def test_agreement_filter_pending_status(self):\n \"\"\"\n Test that the agreement filter properly filters apps when the\n app's latest agreement status is pending.\n \"\"\"\n\n time = datetime.now()\n Agreement.objects.all().delete()\n apps = App.objects.all()\n f = AgreementFilter(\n None,\n {'status': 'pending'},\n Agreement,\n AgreementAdmin\n )\n self.assertTrue(f.queryset(None, apps).filter(id=self.app.id).exists())\n\n def tearDown(self):\n pass\n","repo_name":"uw-it-aca/mdot","sub_path":"mdot/test/admin_view.py","file_name":"admin_view.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33184137431","text":"#!/usr/bin/python3\n'''recurse'''\nimport requests\n\n\ndef recurse(subreddit, hot_list=[], after=''):\n headers = {'User-Agent': 'ned6'}\n res = requests.get(\n 'https://www.reddit.com/r/{}/hot.json?after={}'.\n format(subreddit, after), headers=headers)\n if res.status_code != 200:\n return None\n info = res.json()\n data1 = info.get('data')\n chi = data1.get('children')\n for i in chi:\n hot_list.append(i.get('data').get('title'))\n after = data1.get('after')\n if after:\n return recurse(subreddit, hot_list, after)\n return hot_list\n","repo_name":"nedhir6/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27596902646","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[25]:\n\n\nfrom itertools import combinations as com\n\n\n# In[20]:\n\n\n# task 1\n\nmembers = ['Ava']\nfor i in range(11):\n members.append('Member' + str(i+2))\n\nlen(list(com(members, 1))) * len(list(com(members, 4)))\n\n\n# In[3]:\n\n\n# task 2\n\ntotal = 150\ncar = 80\nvan = 50\nlorry = 20\n\nprint(f\"\"\"\na) Probability of car leaving first = {van}/{total}\nb) Probability of lorry leaving first = {lorry}/{total}\nc) Probability of car leaving second if either lorry or van left first = ({van}/{total} + {lorry}/{total}) * {car}/{total}\"\"\")\n\n\n# # task 3\n# \n# ![image.png](attachment:image.png)\n\n# ### a) 2 left handed children\n# \n# probability that there are two left handed children = 8/30\n# => 4/15\n\n# ### b) probability that there are at least 3 left handed children\n# \n# probability that there are at least 3 left handed children = (5+12+2)/30\n# => 19/30\n\n# # Task 4\n# \n# ![image.png](attachment:image.png)\n\n# \n\n# ![IMG_20221220_192850166-2.jpg](attachment:IMG_20221220_192850166-2.jpg)\n\n# # task 5\n# \n# a) Since the the smallest number on a dice is 1 and 2 dices are being rolled, the probability of the getting sum as 1 is 0\n# \n# \n# \n# b) favourable_outcomes = [[1, 3], [2, 2], [3, 1]]\n# \n# total_outcomes = 36\n# probability = 3/36\n# \n# \n# \n# c) Since hightest number on a dice is 6 and 2 are being rolled, maximum number possible to achieve through sum is 12 and the question's requirement is less than 13.\n# \n# Therefore, the favourable outcomes are 36 and total possible outcomes are also 36. \n# \n# Hence, probability is 36/36, ie, 1.\n\n# # task 6\n# \n# 20 tickets from number 1 to 20\n# \n# a) probability that the ticket selected is even = 10/20 (Since there are 10 even numbers till 20)\n# \n# b) probability of selecting a number divisible by 3 \n# \n# numbers divisible by 3 range 1 to 20 = [3, 6, 9, 12, 15, 18] = 6 numbers\n# probability of selecting a number divisible by 3 = 6/20\n# \n# c) probability of selecting a prime number\n# \n# prime numbers between 1 and 20 = [2, 3, 5, 7, 11, 13, 17, 19] = 8 numbers\n# probability of selecting a prime number = 8/20\n# = 4/10\n# = 2/5\n# \n# d) probability of selecting a number divisible by 5\n# \n# numbers divisible by 5 = [5, 10, 15, 20] = 4 numbers\n# probability of selecting a number divisible by 5 = 4/20\n# = 2/10\n# = 1/5\n\n# # task 7\n# \n# \n# 3 dice\n# \n# 11 or 12 as sum more likely\n# \n# all possible outcomes = 6^3 = 216\n# \n# \n# probability of 11 => 27/216\n# \n# probability of 12 => 25/216\n# \n# Sum of 11 from a rolling of 3 dices is more likely to occur\n\n# In[ ]:\n\n\n\n\n","repo_name":"ActuallyKushagra/AlmostEveryPracticeAndChallengeFromNIIT","sub_path":"kushagrasindhi_dsft13_dsl1-master/COURSE5/SPRINT2/DS1_C5_S2_Challenge_KushagraSindhi.py","file_name":"DS1_C5_S2_Challenge_KushagraSindhi.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74854320194","text":"#this is a guess the number game\r\n\r\nimport random\r\n\r\nprint('Hello What is your name????')\r\nname = input()\r\n\r\n\r\nprint('Well. ' + name + ', I am thinking of a number between 1 & 20')\r\nsecretNum = random.randint(1,20)\r\n\r\nfor guessesTook in range(1,7):\r\n print('Take a guess.')\r\n guess = int(input())\r\n\r\n if guess < secretNum:\r\n print('Your guess is too low.')\r\n elif guess > secretNum:\r\n print('Your guess is too high.')\r\n else:\r\n break #Guess is correct!!!\r\n\r\nif guess == secretNum:\r\n print('Great ' + name + '!! you guessed my number in '+ str(guessedTook) + ' guesses!')\r\nelse:\r\n print('Nope . My number is:' + str(secretNum))\r\n \r\n\r\n\r\n \r\n \r\n","repo_name":"IMPranshu/Python_Tutorials","sub_path":"guess_game.py","file_name":"guess_game.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39679166697","text":"#coding:utf-8\n\nimport numpy as np\nimport math\n\nclass AnchorGenerator(object):\n def __init__(self,strides,ratios,scales,pre_anchors=None,center_offset=0.):\n\n # calculate base sizes of anchors\n self.strides = [(stride,)*2 for stride in strides]\n self.base_sizes = [min(stride) for stride in self.strides] \n \n self.center_offset = center_offset\n\n self.scales = scales\n self.ratios = np.array(ratios)\n self.scale_major = True\n self.base_anchors = self.gen_base_anchors(pre_anchors)\n\n\n @property\n def num_levels(self):\n \"\"\"使用的特征图数量\"\"\"\n return len(self.strides)\n\n def gen_base_anchors(self,pre_anchors=None):\n \"\"\"Generate base anchors.\n 分别迭代生成不同尺度特征上的base anchor.\n \"\"\"\n multi_level_base_anchors = []\n #predefined anchors(yolo)\n if pre_anchors:\n for anc in pre_anchors:\n multi_level_base_anchors.append(self.gen_single_level_base_anchors(base_size=0,scales=self.scales,ratios=self.ratios,pre_anc=anc))\n #generate anchors(faster rcnn)\n for i, base_size in enumerate(self.base_sizes):\n if isinstance(self.scales[0],list):\n scales = self.scales[i]\n else:\n scales = self.scales\n scales = np.array(scales)\n multi_level_base_anchors.append(self.gen_single_level_base_anchors(base_size,scales=scales,ratios=self.ratios))\n return multi_level_base_anchors\n \n def gen_single_level_base_anchors(self,\n base_size,\n scales,\n ratios,\n pre_anc=None):\n \"\"\"Generate base anchors of a single level.\n\n Args:\n base_size (int | float): Basic size of an anchor.\n scales (torch.Tensor): Scales of the anchor.\n ratios (torch.Tensor): The ratio between between the height\n and width of anchors in a single level.\n center (tuple[float], optional): The center of the base anchor\n related to a single feature grid. Defaults to None.\n\n Returns:\n torch.Tensor: Anchors in a single-level feature maps\n \"\"\" \n w = base_size\n h = base_size\n x_center = self.center_offset * w\n y_center = self.center_offset * h\n if pre_anc:\n x_center = 0\n y_center = 0\n ws = []\n hs = []\n for wh in pre_anc:\n ws.append(wh[0])\n hs.append(wh[1])\n ws = np.array(ws)\n hs = np.array(hs)\n else:\n h_ratios = np.sqrt(ratios)\n w_ratios = 1 / h_ratios\n if self.scale_major:\n ws = (w * w_ratios[:, None] * scales[None, :]).reshape(-1)\n hs = (h * h_ratios[:, None] * scales[None, :]).reshape(-1)\n else:\n ws = (w * scales[:, None] * w_ratios[None, :]).reshape(-1)\n hs = (h * scales[:, None] * h_ratios[None, :]).reshape(-1)\n\n # 实际anchor相对于其位置中心点的坐标(像素),坐标为(xmin,ymixn,xmax,ymax)\n base_anchors = [\n x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,\n y_center + 0.5 * hs\n ]\n base_anchors = np.stack(base_anchors, axis=-1)\n\n return base_anchors\n\n def _meshgrid(self, x, y, row_major=True):\n \"\"\"Generate mesh grid of x and y.\n 生成特征图上相对原图的网格位置坐标,结果为一维向量(shape=h*w)\n \"\"\"\n xx = x.repeat(y.shape[1],axis=0).reshape(-1)\n yy = y.repeat(x.shape[1])\n if row_major:\n return xx, yy\n else:\n return yy, xx\n\n def grid_anchors(self, featmap_sizes,loc=[]):\n \"\"\"Generate grid anchors in multiple feature levels.\n 根据特征图的尺寸,生成对应的anchors\n\n args:\n featmap_sizes:[(),(),()],每个元素表示特征图的高与宽\n loc:[(),(),()],每个元素表示原图上的位置坐标\n \"\"\"\n assert self.num_levels == len(featmap_sizes)\n multi_level_anchors = []\n\n for i in range(self.num_levels):\n \n anchors = self.single_level_grid_anchors(self.base_anchors[i],featmap_sizes[i],self.strides[i],loc)\n multi_level_anchors.append(anchors)\n\n return multi_level_anchors\n\n def single_level_grid_anchors(self,\n base_anchors,\n featmap_size,\n stride=(16, 16),\n loc=None):\n \"\"\"Generate grid anchors of a single level.\n 根据单尺度的base anchor,生成相应的尺度特征图上的所有anchors.\n \"\"\"\n if loc:\n shifts = np.array([lc*2 for lc in loc])\n else:\n feat_h, feat_w = featmap_size\n shift_x = np.arange(0, feat_w).reshape(1,-1) * stride[0]\n shift_y = np.arange(0, feat_h).reshape(1,-1) * stride[1]\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n shifts = np.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)\n\n all_anchors = base_anchors[None, :, :] + shifts[:, None, :] #len(loc)x 9 x 4 or h*w x 9 x 4 \n all_anchors = all_anchors.reshape(-1, 4)\n # first A rows correspond to A anchors of (0, 0) in feature map,\n # then (0, 1), (0, 2), ...\n return all_anchors\n","repo_name":"Soulempty/hello-world","sub_path":"anchor_assign/utils/anchor_generator.py","file_name":"anchor_generator.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17339849252","text":"\"\"\"\nTakes two bin files and prepares various checksum files for them\n\"\"\"\n\nimport sys\nimport os\nimport struct\nimport shutil\n\nif len(sys.argv) != 4:\n print(\"usage: {} stm.bin esp.bin target_upd_folder/\".format(sys.argv[0]))\n exit(1)\n\n_, stm, esp, target = sys.argv\n\nshutil.copy(stm, os.path.join(target, \"stm.bin\"))\nshutil.copy(esp, os.path.join(target, \"esp.bin\"))\n\ndef crc(fname):\n crc = 0\n with open(fname, \"rb\") as f:\n for k in f:\n for i in k:\n crc ^= i\n for j in range(8, -1, -1):\n crc = (crc >> 1) ^ (0x8005 if (crc & 1) else 0)\n return crc\n\nwith open(os.path.join(target, \"chck.sum\"), \"wb\") as f:\n f.write(struct.pack(\"\")\r\n sys.exit(1)\r\n video_id = sys.argv[1]\r\n download_transcript(video_id)\r\n","repo_name":"jhumru/workflows","sub_path":"get_transcript.py","file_name":"get_transcript.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41498862931","text":"import numpy as np\nfrom util.our_utils import *\nfrom lcmtypes import mbot_status_t\n\n\n# sample structure for a complex task\nclass go_to_garbage():\n\n square_queue = []\n original_position = 0\n reached_destination = False\n mode = \"TO_GARBAGE\"\n index_to_garbage = 0\n index_to_return = 0\n\n\n PI = 3.141592\n\n def __init__(self, fsm, travel_square):\n self.fsm = fsm\n self.travel_square = travel_square\n self.state = \"idle\"\n\n def operate_task(self):\n if(self.mode == \"TO_GARBAGE\"):\n if self.state == \"moving_in_square\" and self.fsm.mbot_status == mbot_status_t.STATUS_COMPLETE:\n if self.index_to_garbage == 0:\n print(\"here\")\n self.state = \"reached_garbage\"\n else:\n self.state = \"move_to_waypoint\"\n if self.state == \"move_to_waypoint\":\n print(\"Move to waypoint_forward in square\")\n self.fsm.publish_mbot_command(mbot_command_t.STATE_MOVING, (self.square_queue[self.index_to_garbage][0], self.square_queue[self.index_to_garbage][1], 0), [], False)\n self.fsm.mbot_state = 0\n if(self.index_to_garbage == 0):\n self.index_to_garbage == len(self.square_queue) - 1\n else:\n self.index_to_garbage -= 1\n self.state = \"moving_in_square\"\n if self.state == \"reached_garbage\":\n print(\"Move to garbage can\")\n trash = [\"red\", \"green\", \"orange\"]\n if self.fsm.recent_color in trash:\n self.fsm.publish_mbot_command(mbot_command_t.STATE_MOVING, (-0.1, -0.30, self.PI), [], False)\n else:\n self.fsm.publish_mbot_command(mbot_command_t.STATE_MOVING, (-0.1, 0, self.PI), [], False)\n self.fsm.mbot_status = 0\n self.state = \"dropped_garbage\"\n if self.state == \"dropped_garbage\" and self.fsm.mbot_status == mbot_status_t.STATUS_COMPLETE:\n print(\"dropping\")\n set_erect(self.fsm.rexarm)\n put_block_in_trash(self.fsm.rexarm)\n set_snake(self.fsm.rexarm)\n self.mode = \"TO_ORIGIN\"\n self.state = \"move_to_waypoint\"\n #self.mode = \"done\"\n elif (self.mode == \"TO_ORIGIN\"):\n if self.state == \"moving_in_square\" and self.fsm.mbot_status == mbot_status_t.STATUS_COMPLETE:\n print(\"what the hell: \", self.index_to_return)\n print(\"what the hell: \", self.original_position)\n if self.index_to_return - 1 == self.original_position:\n print(\"RETURNED\")\n self.fsm.set_current_state(\"spin_state\")\n else:\n self.state = \"move_to_waypoint\"\n if self.state == \"move_to_waypoint\":\n print(\"Move to waypoint_back in square\")\n print(\"index: \", self.index_to_return)\n print(\"index: \", self.original_position)\n self.fsm.publish_mbot_command(mbot_command_t.STATE_MOVING, (self.square_queue[self.index_to_return][0], self.square_queue[self.index_to_return][1], 0), [], False)\n self.fsm.mbot_status = 0\n self.index_to_return += 1\n self.state = \"moving_in_square\"\n\n def begin_task(self):\n print(\"Go to Garbage\")\n self.fsm.mbot_status = mbot_status_t.STATUS_IN_PROGRESS\n self.square_queue = self.travel_square.square_queue\n self.original_position = self.travel_square.index\n self.index_to_garbage = self.travel_square.index\n self.index_to_return = 0\n self.mode = \"TO_GARBAGE\"\n self.state = \"move_to_waypoint\"\n","repo_name":"voyager1998/armLab-eecs467","sub_path":"a3_code/armlab-f19/go_to_garbage.py","file_name":"go_to_garbage.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72371033154","text":"# Utility Functions\r\nimport numpy as np\r\nimport seaborn as sns\r\nfrom PIL import Image\r\nimport os\r\n\r\nEPOCHS = 10\r\nBATCH_SIZE = 10\r\nHEIGHT = 256\r\nWIDTH = 256\r\nN_CLASSES = 13\r\n\r\n\r\ndef load_images(name, path):\r\n img = Image.open(os.path.join(path, name))\r\n img = np.array(img)\r\n\r\n image = img[:, :256]\r\n mask = img[:, 256:]\r\n\r\n return image, mask\r\n\r\n\r\ndef bin_image(mask):\r\n # Putting data into bins using NumPy, groups similar data points.\r\n bins = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240])\r\n new_mask = np.digitize(mask, bins)\r\n return new_mask\r\n\r\n\r\ndef get_segmentation_arr(image, classes, width=WIDTH, height=HEIGHT):\r\n seg_labels = np.zeros((height, width, classes))\r\n img = image[:, :, 0]\r\n\r\n for c in range(classes):\r\n seg_labels[:, :, c] = (img == c).astype(int)\r\n return seg_labels\r\n\r\n\r\ndef give_color_to_seg_img(seg, n_classes=N_CLASSES):\r\n seg_img = np.zeros((seg.shape[0], seg.shape[1], 3)).astype('float')\r\n colors = sns.color_palette(\"hls\", n_classes)\r\n\r\n for c in range(n_classes):\r\n segc = (seg == c)\r\n seg_img[:, :, 0] += (segc * (colors[c][0]))\r\n seg_img[:, :, 1] += (segc * (colors[c][1]))\r\n seg_img[:, :, 2] += (segc * (colors[c][2]))\r\n\r\n return seg_img\r\n","repo_name":"magedmak/Semantic-Segmentation-Unet","sub_path":"utils_func.py","file_name":"utils_func.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25930299569","text":"from spt_components.spt_elements import *\nfrom spt_lib.spt_ref import *\nfrom spt_lib.spt_course_calculations import getGradeAverage\nfrom spt_lib.spt_course_calculations import ZoomFactor as getScale\n\ncontrol_map = return_spt_reference()\n\n\nclass SptCourseDetails(UserControl):\n def __init__(self, ui):\n super().__init__()\n self.ui = ui\n self.getDetails = self.mainRow()\n self.homeRef()\n self.spt_table_data = self.tableData()\n self.currentTableRow = self.getCurrentTableRow()\n self.currentStudentID = self.getCurrentStudentID()\n self.currentModuleName = self.getCurrentModuleName()\n\n def homeRef(self):\n add_to_spt_reference(\"SptCourseDetails\", self)\n\n def getCurrentModuleName(self):\n return 'new'\n\n def tableData(self):\n return {}\n\n def getCurrentTableRow(self):\n return None\n\n def getCurrentStudentID(self):\n return None\n\n def getUI(self):\n return self.ui\n\n def getTheCurrentCourse(self):\n global spt_course\n current_course = loadTemp(S.TEMP_VALUES)\n if 'thisCourse' in current_course:\n thisCourse = current_course['thisCourse']\n else:\n thisCourse = 'new'\n thesecourses = loadDatabases(thisCourse)\n return thesecourses\n\n def mainRow(self):\n spt_table = sptDataTable()\n\n studentReport = False\n courseReport = \"\"\n\n def setZoom():\n thisZoomFactor = float(thisView.controls[0].controls[12].value)\n\n getScale.setScaleFactor2(thisZoomFactor)\n N.load_last_zoom_factor()\n print(loadTemp(S.TEMP_VALUES)['zoom'])\n thisView.update()\n pass\n\n def getChart():\n thisView.controls[3].controls[0].content.controls[0].controls.clear()\n thisView.controls[3].controls[0].content.controls[1].controls.clear()\n thisView.update()\n if str(thisView.controls[0].controls[2].value) == \"\" or str(thisView.controls[0].controls[2].value) == None:\n return 1\n else:\n average = getGradeAverage(loadTemp(S.TEMP_VALUES)['thisCourse'], 0,\n str(thisView.controls[0].controls[2].value))\n average2 = getGradeAverage(loadTemp(S.TEMP_VALUES)['thisCourse'], 1, 'practical')\n average3 = getGradeAverage(loadTemp(S.TEMP_VALUES)['thisCourse'], 1, 'theory')\n\n data = getStudentGrdaes(loadTemp(S.TEMP_VALUES)['thisCourse'], 0,\n str(thisView.controls[0].controls[2].value))\n data2 = getStudentGrdaes(loadTemp(S.TEMP_VALUES)['thisCourse'], 1, 'practical')\n data3 = getStudentGrdaes(loadTemp(S.TEMP_VALUES)['thisCourse'], 1, 'theory')\n\n thisChart = sptChart(loadTemp(S.TEMP_VALUES)['thisCourse'], data[2], data[2], data[1], 'Module Grades')\n thisBadge = sptBadge('Module Average', average)\n\n thisChart2 = sptChart(loadTemp(S.TEMP_VALUES)['thisCourse'], data3[2], data3[2], data3[1],\n 'Course Theory Grades')\n thisBadge2 = sptBadge('Course Theory Average', average3)\n\n thisChart3 = sptChart(loadTemp(S.TEMP_VALUES)['thisCourse'], data2[2], data2[2], data2[1],\n 'Course Practical Grades')\n thisBadge3 = sptBadge('Course Practical Average', average2)\n\n thisView.controls[3].controls[0].content.controls[0].controls.append(\n Row(width=N.SPT_1000, alignment=MainAxisAlignment.SPACE_BETWEEN, controls=[thisChart, thisBadge]))\n\n thisView.controls[3].controls[0].content.controls[0].controls.append(\n Row(width=N.SPT_1000, alignment=MainAxisAlignment.SPACE_BETWEEN, controls=[thisChart2, thisBadge2]))\n\n thisView.controls[3].controls[0].content.controls[0].controls.append(\n Row(width=N.SPT_1000, alignment=MainAxisAlignment.SPACE_BETWEEN, controls=[thisChart3, thisBadge3]))\n\n thisView.update()\n\n def analysisAndDetails(data, type):\n if type == 0:\n buttonData = json.loads(data)\n studentID = buttonData['tooltip']\n else:\n studentID = data\n\n thisModule = str(thisView.controls[0].controls[2].value)\n thisCourse = loadTemp(S.TEMP_VALUES)['thisCourse']\n\n thisStudent = getStudetByID(thisCourse, studentID)\n\n index = loadDatabases(thisCourse)[1]['modules_count'][thisModule]\n\n studentModuleGrade = loadDatabases(thisCourse)[1]['modules'][index][studentID]\n\n inCourseTheory = checkIfExists2(studentID, loadDatabases(thisCourse)[1]['majorAssessments'][0])\n inCoursePractical = checkIfExists2(studentID, loadDatabases(thisCourse)[1]['majorAssessments'][1])\n\n studentQR = getStudentQR(thisCourse, thisModule, studentID)\n\n if not inCourseTheory:\n studentTheoryGrade = 0\n else:\n studentTheoryGrade = loadDatabases(thisCourse)[1]['majorAssessments'][0][studentID]\n\n if not inCoursePractical:\n studentPracticalGrade = 0\n else:\n studentPracticalGrade = loadDatabases(thisCourse)[1]['majorAssessments'][1][studentID]\n\n studentDetails = thisView.controls[2].controls[3].content.controls[0].controls\n analysis = thisView.controls[2].controls[3].content.controls[2].controls\n\n text_field = thisView.controls[2].controls[3].content.controls[2].controls[0]\n dialogHome = thisView.controls[2].controls[3].content.controls[3].controls\n\n studentDetails[0].content.src = f\"{studentQR}\"\n studentDetails[1].controls[0].controls[1].value = thisStudent['id']\n studentDetails[1].controls[1].controls[1].value = thisStudent['firstName']\n studentDetails[1].controls[2].controls[1].value = thisStudent['lastName']\n\n studentDetails[2].controls[0].controls[1].value = thisStudent['email']\n studentDetails[2].controls[1].controls[1].value = thisStudent['division']\n studentDetails[2].controls[2].controls[1].value = thisStudent['station']\n\n studentDetails[3].controls[0].controls[1].value = studentModuleGrade\n studentDetails[3].controls[1].controls[1].value = studentTheoryGrade\n studentDetails[3].controls[2].controls[1].value = studentPracticalGrade\n\n average = str(getGradeAverage(thisCourse, 0, thisModule))\n average2 = str(getGradeAverage(thisCourse, 1, 'practical'))\n average3 = str(getGradeAverage(thisCourse, 1, 'theory'))\n\n data0 = 'This Course: ' + thisCourse + ' This Module: ' + thisModule + \"\\n\"\n data1 = 'ID: ' + thisStudent['id'] + \" First Name: \" + thisStudent['firstName'] + \" Last Name: \" + \\\n thisStudent['lastName'] + \"\\n\"\n data2 = 'Rank: ' + thisStudent['rank'] + ' Division: ' + thisStudent['division'] + ' Station: ' + \\\n thisStudent['station'] + \"\\n\"\n data3 = 'Student Module Grade: ' + str(studentModuleGrade) + ' Student Course Theory Grade: ' + str(\n studentTheoryGrade) + ' Student Course Practical Grade: ' + str(studentPracticalGrade) + \"\\n\"\n data4 = 'Average Combined Grade for this Module: ' + average + ' Average Combined Theory Grade for this Course: ' + average2 + ' Average Combined Practical Grade for this Course: ' + average3 + \"\\n\"\n\n thisStudentReport = data0 + data1 + data2 + data3 + data4\n\n analysisDialog = sptDialog3(thisCourse, thisModule, studentID, thisStudentReport, text_field,\n studentDetails[0].content)\n imageDialog = sptDialog4(thisCourse, thisModule, studentID)\n dialogHome.clear()\n dialogHome.append(analysisDialog)\n dialogHome.append(imageDialog)\n\n thisView.update()\n\n def generateReport():\n try:\n thisView.controls[2].controls[3].content.controls[3].controls[0].open = True\n thisView.update()\n except Exception as e:\n print(e)\n\n def showQR():\n try:\n thisView.controls[2].controls[3].content.controls[3].controls[1].open = True\n thisView.update()\n except Exception as e:\n print(e)\n\n def populateTable(val):\n spt_table.content.rows.append(val)\n spt_table.visible = True\n spt_table.update()\n\n def getCourseImage():\n if self.getTheCurrentCourse()[2]['image'] != \"\":\n return str(self.getTheCurrentCourse()[2]['image'])\n else:\n return S.DEFAULT_IMAGE\n\n def getStudentOfModule(e):\n spt_table.content.rows.clear()\n spt_table.update()\n # [attendance_db, grades_db, info_db, students_db]\n db1 = self.getTheCurrentCourse()[1]['modules']\n db2 = self.getTheCurrentCourse()[3]['students']\n db3 = self.getTheCurrentCourse()[1]['majorAssessments']\n pg = 0\n tg = 0\n currentModule = {}\n for i in db1:\n if i['name'] != str(thisView.controls[0].controls[2].value):\n continue\n else:\n currentModule = i\n self.currentModuleName = i['name']\n break\n\n def sendSelf(e):\n\n if \"object at \" in str(e.control):\n buttonData = str(e.control.tooltip)\n type = 1\n else:\n type = 0\n buttonData = str(e.control).split('textbutton ')[1].replace(\"'\", '\"')\n\n analysisAndDetails(buttonData, type)\n\n for x in db2:\n if x['id'] in currentModule:\n newRow = DataRow(cells=[])\n newRow.cells.append(DataCell(\n TextButton(x['firstName'],\n expand=True,\n tooltip=str(x['id']), on_click=lambda e: sendSelf(e),\n scale=getScale.getScaleFactor2()\n )\n ))\n\n if x['id'] not in db3[0]:\n tg = 0\n else:\n tg = db3[0][x['id']]\n\n if x['id'] not in db3[1]:\n pg = 0\n else:\n pg = db3[1][x['id']]\n\n newRow.cells.append(DataCell(\n TextButton(x['lastName'], expand=True, tooltip=str(x['id']), on_click=lambda e: sendSelf(e),\n scale=getScale.getScaleFactor2())))\n newRow.cells.append(\n DataCell(Text(x['division'], expand=True, size=N.SPT_12, scale=getScale.getScaleFactor2())))\n newRow.cells.append(\n DataCell(Text(x['station'], expand=True, size=N.SPT_12, scale=getScale.getScaleFactor2())))\n newRow.cells.append(DataCell(\n Text(currentModule[x['id']], expand=True, size=N.SPT_12, scale=getScale.getScaleFactor2())))\n newRow.cells.append(\n DataCell(Text(tg, expand=True, size=N.SPT_12, scale=getScale.getScaleFactor2())))\n newRow.cells.append(\n DataCell(Text(pg, expand=True, size=N.SPT_12, scale=getScale.getScaleFactor2())))\n newRow.cells.append(DataCell(IconButton(icon=icons.EDIT, tooltip=str(x['id']),\n icon_color=C().SPT_MEDIUM_DARK(),\n on_click=lambda e: sptEdit(e),\n scale=getScale.getScaleFactor2())))\n\n self.spt_table_data[str(x['id'])] = newRow\n\n populateTable(newRow)\n getChart()\n thisView.update()\n\n def isInt(y):\n return bool(re.match(r\"^\\d+$\", str(y)))\n\n def sptEdit(id):\n\n if 'object at ' not in str(id.control):\n cont = str(id.control).split('iconbutton ')[1].replace(\"'\", '\"')\n contentDict = json.loads(cont)\n key = contentDict['tooltip']\n else:\n key = str(id.control.tooltip)\n\n self.currentStudentID = key\n thisView.controls[2].controls[1].content.content.controls[0].controls.clear()\n thisView.controls[2].controls[1].open = True\n thisView.controls[2].controls[1].content.content.controls[0].controls.append(Row(controls=[\n TextField(value=str(self.spt_table_data[key].cells[0].content.text)\n , border=InputBorder.UNDERLINE, label='First Name:'),\n TextField(value=str(self.spt_table_data[key].cells[1].content.text)\n , border=InputBorder.UNDERLINE, label='Last Name:')\n ]))\n\n thisView.controls[2].controls[1].content.content.controls[0].controls.append(Row(controls=[\n TextField(value=str(self.spt_table_data[key].cells[2].content.value)\n , border=InputBorder.UNDERLINE, label='Division:'),\n TextField(value=str(self.spt_table_data[key].cells[3].content.value)\n , border=InputBorder.UNDERLINE, label='Station:')\n ]))\n\n thisView.controls[2].controls[1].content.content.controls[0].controls.append(Row(controls=[\n TextField(value=str(self.spt_table_data[key].cells[4].content.value)\n , border=InputBorder.UNDERLINE, label='Module Grade:'),\n TextField(value=str(self.spt_table_data[key].cells[5].content.value)\n , border=InputBorder.UNDERLINE, label='Course Theory Grade:')\n ]))\n\n thisView.controls[2].controls[1].content.content.controls[0].controls.append(Row(controls=[\n TextField(value=str(self.spt_table_data[key].cells[6].content.value)\n , border=InputBorder.UNDERLINE, label='Course Practical Grade:')\n ]))\n\n self.currentTableRow = self.spt_table_data[key]\n thisView.controls[2].controls[1].update()\n thisView.controls[2].controls[1].open = True\n\n def updateRec(e):\n getInt1 = isInt(thisView.controls[2].controls[1].content.content.controls[0].controls[2].controls[1].value)\n getInt2 = isInt(thisView.controls[2].controls[1].content.content.controls[0].controls[3].controls[0].value)\n getInt3 = isInt(thisView.controls[2].controls[1].content.content.controls[0].controls[2].controls[0].value)\n if getInt1 is True:\n theoryGrade = int(\n thisView.controls[2].controls[1].content.content.controls[0].controls[2].controls[1].value)\n else:\n theoryGrade = '0'\n\n if getInt2 is True:\n practicalGrade = int(\n thisView.controls[2].controls[1].content.content.controls[0].controls[3].controls[0].value)\n else:\n practicalGrade = '0'\n\n if getInt3 is True:\n moduleGrade = int(\n thisView.controls[2].controls[1].content.content.controls[0].controls[2].controls[0].value)\n else:\n moduleGrade = '0'\n thisView.controls[2].controls[1].open = False\n # fn\n self.currentTableRow.cells[0].content.text = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[0].controls[0].value\n # ln\n self.currentTableRow.cells[1].content.text = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[0].controls[1].value\n # div\n self.currentTableRow.cells[2].content.value = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[1].controls[0].value\n # st\n self.currentTableRow.cells[3].content.value = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[1].controls[1].value\n # mgr\n self.currentTableRow.cells[4].content.value = moduleGrade\n # tgr\n self.currentTableRow.cells[5].content.value = theoryGrade\n # pgr\n self.currentTableRow.cells[6].content.value = practicalGrade\n thisView.controls[2].controls[1].update()\n\n # [attendance_db, grades_db, info_db, students_db]\n\n selectedModule = str(thisView.controls[0].controls[2].value)\n\n moduleIndex = getObjectIndexFromList(self.getTheCurrentCourse()[1], 'modules', 'name', selectedModule)\n studentIndex = getObjectIndexFromList(self.getTheCurrentCourse()[3], 'students', 'id',\n self.currentStudentID)\n\n addStudentFinalGrades(loadTemp(S.TEMP_VALUES)['thisCourse'], self.currentStudentID, theory=theoryGrade,\n practical=practicalGrade)\n\n db1 = self.getTheCurrentCourse()[1]\n db2 = self.getTheCurrentCourse()[3]\n db1['modules'][moduleIndex][self.currentStudentID] = int(moduleGrade)\n db2['students'][studentIndex]['firstName'] = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[0].controls[0].value\n db2['students'][studentIndex]['lastName'] = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[0].controls[1].value\n db2['students'][studentIndex]['division'] = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[1].controls[0].value\n db2['students'][studentIndex]['station'] = \\\n thisView.controls[2].controls[1].content.content.controls[0].controls[1].controls[1].value\n\n print(int(moduleGrade))\n\n saveData(db1, S.GRADES_DATABASE, loadTemp(S.TEMP_VALUES)['thisCourse'])\n saveData(db2, S.STUDENTS_DATABASE, loadTemp(S.TEMP_VALUES)['thisCourse'])\n getChart()\n thisView.update()\n\n def closeBs(e):\n thisView.controls[2].controls[1].open = False\n thisView.controls[2].controls[1].update()\n\n def deleteStudent(e):\n deleteStudentsFromModule(loadTemp(S.TEMP_VALUES)['thisCourse'], self.currentModuleName,\n self.currentStudentID)\n closeBs(0)\n getStudentOfModule(0)\n\n def updateImg():\n thisView.update()\n global spt_course\n thisView.controls[0].controls[0].content.src = f\"{P.IMAGES_DIR + getCourseImage()}\"\n\n thisFilePicker = sptFilePicker()\n\n def _setCourseImage():\n thisFilePicker.pick_files(\n allow_multiple=False,\n file_type=FilePickerFileType.IMAGE\n )\n\n bottomSheet2 = sptBottomSheet2()\n thisDialog = sptDialog(loadTemp(S.TEMP_VALUES)['thisCourse'], self.ui)\n thisDialog2 = sptDialog2(loadTemp(S.TEMP_VALUES)['thisCourse'], self.ui)\n\n def launchDialog():\n thisDialog.open = True\n thisView.update()\n\n def launchDialog2():\n thisDialog2.open = True\n thisView.update()\n\n def open_bs2(e):\n bottomSheet2.open = True\n bottomSheet2.update()\n\n def getStudent(e):\n thisCourse = loadTemp(S.TEMP_VALUES)['thisCourse']\n thisModule = self.currentModuleName\n\n try:\n index = loadDatabases(thisCourse)[1]['modules_count'][thisModule]\n studentID = thisView.controls[0].controls[7].content.controls[0].controls[0].value.split(' ')[0]\n\n inModule = checkIfExists2(studentID, loadDatabases(thisCourse)[1]['modules'][index])\n\n if not inModule:\n print(studentID)\n addStudentsToModule(thisCourse, thisModule, studentID)\n getStudentOfModule(0)\n except Exception as e:\n print(e)\n\n thisView = Row(\n # scale=getScale.getScaleFactor(),\n spacing=N.SPT_50,\n scroll=ScrollMode.ALWAYS,\n alignment=MainAxisAlignment.START,\n vertical_alignment=CrossAxisAlignment.START,\n expand=True,\n controls=[\n\n Column(\n # scale=getScale.getScaleFactor(),\n alignment=MainAxisAlignment.START,\n controls=[\n Container(\n scale=getScale.getScaleFactor2(),\n on_click=lambda e: _setCourseImage(),\n border_radius=N.SPT_10,\n padding=N.SPT_10,\n width=N.SPT_200,\n bgcolor=None,\n content=Image(\n scale=getScale.getScaleFactor(),\n width=N.SPT_200,\n height=N.SPT_100,\n border_radius=border_radius.all(10),\n fit=ImageFit.CONTAIN,\n src=f\"{P.IMAGES_DIR + getCourseImage()}\"\n )\n ),\n\n TextField(\n scale=getScale.getScaleFactor(),\n multiline=True,\n width=N.SPT_250,\n height=N.SPT_100 + N.SPT_40,\n read_only=True,\n text_size=N.SPT_10 + N.SPT_2,\n text_style=TextStyle(weight=FontWeight.BOLD)\n ),\n Dropdown(\n scale=getScale.getScaleFactor(),\n content_padding=N.SPT_5,\n label=\"Course Modules\",\n hint_text=\"Choose a module\",\n height=N.SPT_50 - N.SPT_10,\n width=N.SPT_250,\n on_change=lambda e: getStudentOfModule(e),\n text_size=N.SPT_12,\n\n ),\n FilledTonalButton(\n height=N.SPT_30,\n text=\"Create module\",\n on_click=lambda e: launchDialog2()\n ),\n FilledTonalButton(\n height=N.SPT_30,\n text=\"Add students to Course\",\n on_click=lambda e: open_bs2(e)\n ),\n FilledTonalButton(\n height=N.SPT_30,\n text=\"Delete Course\",\n on_click=lambda e: launchDialog()\n ),\n thisDialog,\n Container(\n scale=getScale.getScaleFactor(),\n content=Column(\n scale=getScale.getScaleFactor(),\n controls=[\n Row(\n scale=getScale.getScaleFactor(),\n controls=[\n Dropdown(\n scale=getScale.getScaleFactor(),\n width=N.SPT_300 - N.SPT_20,\n height=N.SPT_50+N.SPT_10,\n\n label='Students',\n text_size=N.SPT_12\n ),\n FilledTonalButton(text=\"Add to module\", on_click=lambda e: getStudent(e), height=N.SPT_30,)\n ]\n )\n\n ]\n )\n ),\n thisDialog2,\n Divider(),\n FilledTonalButton(\n text=\"Home\",\n on_click=lambda e: self.ui.ui.go('/'),\n height=N.SPT_30\n ),\n thisFilePicker,\n # Slider(min=0, max=2, divisions=20, label=\"Zoom x {value}\",\n # scale=loadTemp(S.TEMP_VALUES)['zoom'], value=loadTemp(S.TEMP_VALUES)['zoom'],\n # on_change=lambda e: setZoom())\n ]\n ),\n Divider(),\n Column(\n scale=getScale.getScaleFactor(),\n width=N.SPT_950,\n height=N.SPT_1700,\n scroll=ScrollMode.ALWAYS,\n # expand=True,\n spacing=N.SPT_50,\n controls=[\n\n spt_table,\n\n BottomSheet(\n content=Container(\n scale=getScale.getScaleFactor(),\n padding=N.SPT_10,\n border_radius=N.SPT_10 + N.SPT_2,\n content=Column(\n scale=getScale.getScaleFactor(),\n tight=True,\n controls=[\n Column(\n scale=getScale.getScaleFactor(),\n tight=True\n ),\n Divider(),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[\n FilledButton(\"Save and close\", on_click=lambda e: updateRec(e)),\n FilledButton(\"Close without saving\", on_click=lambda e: closeBs(e)),\n FilledButton(\"Remove Student From module\",\n on_click=lambda e: deleteStudent(e)),\n ]\n )\n ]\n )\n )\n ),\n bottomSheet2,\n Container(\n scale=getScale.getScaleFactor(),\n bgcolor=C().SPT_LIGHT(),\n width=N.SPT_950,\n height=N.SPT_600,\n padding=N.SPT_10,\n border=border.all(N.SPT_1, C().SPT_DARK()),\n border_radius=N.SPT_10,\n content=Column(\n\n controls=[\n Row(\n scale=getScale.getScaleFactor(),\n spacing=N.SPT_10,\n controls=[\n Container(\n scale=getScale.getScaleFactor(),\n width=N.SPT_200,\n height=N.SPT_200,\n on_click=lambda e: showQR(),\n content=Image(\n scale=getScale.getScaleFactor(),\n width=N.SPT_200,\n height=N.SPT_200,\n border_radius=border_radius.all(N.SPT_10),\n fit=ImageFit.CONTAIN,\n src=f\"{P.IMAGES_DIR + S.DEFAULT_QR}\"\n )),\n Column(\n scale=getScale.getScaleFactor(),\n spacing=N.SPT_1,\n controls=[\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('ID:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT,\n value=\"\", width=N.SPT_180,\n text_size=N.SPT_12, height=N.SPT_30)]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('First Name:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT,\n value=\"\", width=N.SPT_180,\n text_size=N.SPT_12, height=N.SPT_30)]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Last Name:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT,\n value=\"\", width=N.SPT_180,\n text_size=N.SPT_12, height=N.SPT_30)]\n )\n ]\n ),\n Column(\n scale=getScale.getScaleFactor(),\n spacing=N.SPT_1,\n controls=[\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Email:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT,\n value=\"\",\n width=N.SPT_180, text_size=N.SPT_12,\n height=N.SPT_30)]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Division:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT,\n value=\"\", width=N.SPT_180,\n text_size=N.SPT_12, height=N.SPT_30)]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Station:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT,\n value=\"\", width=N.SPT_180,\n text_size=N.SPT_12, height=N.SPT_30)]\n )\n ]\n ),\n Column(\n scale=getScale.getScaleFactor(),\n spacing=N.SPT_1,\n controls=[\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Module Grade:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT, value=\"\",\n width=N.SPT_180, text_size=N.SPT_12,\n height=N.SPT_30)]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Course Theory Grade:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT, value=\"\",\n width=N.SPT_180, text_size=N.SPT_12,\n height=N.SPT_30)]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[Text('Course Practical Grade:', size=N.SPT_12),\n TextField(read_only=True,\n border_color=colors.TRANSPARENT, value=\"\",\n width=N.SPT_180, text_size=N.SPT_12,\n height=N.SPT_30)]\n )\n ]\n ),\n ]\n ),\n Row(\n controls=[\n Text(\"Analysis:\", size=N.SPT_30 + N.SPT_2, style=TextThemeStyle.TITLE_LARGE,\n scale=getScale.getScaleFactor2(), )\n ]\n ),\n Row(\n controls=[\n TextField(multiline=True, max_lines=8, scale=getScale.getScaleFactor2()),\n TextButton(\"Generate analysis\", on_click=lambda e: generateReport(),\n scale=getScale.getScaleFactor2()),\n\n ]\n ),\n Row(\n scale=getScale.getScaleFactor(),\n controls=[\n\n ]\n )\n ]\n )\n )\n ]\n ),\n Column(\n scroll=ScrollMode.ALWAYS,\n\n controls=[\n Container(\n width=N.SPT_1100,\n\n content=Row(\n width=N.SPT_1100,\n scroll=ScrollMode.ALWAYS,\n spacing=N.SPT_50,\n vertical_alignment=CrossAxisAlignment.END,\n controls=[\n Column(\n\n scroll=ScrollMode.ALWAYS,\n spacing=N.SPT_12,\n controls=[\n ]\n ),\n Column(\n spacing=N.SPT_400,\n controls=[\n ]\n )\n ]\n )\n )\n ]\n )\n\n ]\n )\n\n def getObjects(_source, _key):\n displayInfo = f\"Title: {self.getTheCurrentCourse()[2]['name']}\\nStart Date: {self.getTheCurrentCourse()[2]['start']}\\nEnd Date: {self.getTheCurrentCourse()[2]['end']}\\nStudenst: {self.getTheCurrentCourse()[2]['students']}\"\n thisView.controls[0].controls[1].value = displayInfo\n for i in _source:\n thisView.controls[0].controls[2].options.append(dropdown.Option(i[_key]))\n\n studentDropDown = thisView.controls[0].controls[7].content.controls[0].controls[0]\n loadStudents = loopStudents(loadTemp(S.TEMP_VALUES)['thisCourse'], 2)\n\n for i in loadStudents:\n studentDropDown.options.append(dropdown.Option(i))\n\n getObjects(self.getTheCurrentCourse()[1][\"modules\"], 'name')\n return thisView\n\n def build(self):\n return self.getDetails\n","repo_name":"OShane-McKenzie/student_progress_tracker","sub_path":"spt_components/spt_course_details.py","file_name":"spt_course_details.py","file_ext":"py","file_size_in_byte":40002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29832550819","text":"# Write a program (function!) that takes a list and returns a new list that contains all the elements of the first\n# list minus all the duplicates\n# Extras: Write two different functions to do this - one using a loop and constructing a list, and another using sets.\n\n\ndef remove_duplicates(input_list: list) -> list:\n return list(set(input_list))\n\n\ndef remove_duplicates_with_loop(input_list: list) -> list:\n output = []\n for element in input_list:\n if element not in output:\n output.append(element)\n return output\n\n\nif __name__ == '__main__':\n list1 = [5, 8, 'apple', 'spam', 'apple', 8, 12]\n print(remove_duplicates(list1))\n print(remove_duplicates_with_loop(list1))\n","repo_name":"PatrykGorol/Python_beginner_excercises","sub_path":"ex_14_list_remove_duplicates.py","file_name":"ex_14_list_remove_duplicates.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12552526193","text":"import os\n#!pip install PyPDF2\nfrom PyPDF2 import PdfReader, PdfMerger\n\n# This code runs on PyPDF2 version 3.0.1\n\n# put the pdf files in the 'target' folder.\ntarget = 'target'\n\nmerger = PdfMerger()\n\nfilelist = []\nfor filename in os.listdir(target):\n if filename.endswith(\".pdf\"):\n with open(os.path.join(target, filename), \"rb\") as f:\n filelist.append(filename)\n pdf_file = PdfReader(f)\n merger.append(pdf_file)\n f.close()\n\noutput_file = \"merged_output.pdf\"\nwith open(output_file, 'wb') as f:\n merger.write(f)\n f.close()\n\nprint(filelist)\nprint(f\"All (of {len(filelist)}) pdf files in '{target}' folder were merged to {output_file}.\")\n\n","repo_name":"hsjoo-lghtsnd/Utils","sub_path":"source/PDFmerger.py","file_name":"PDFmerger.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13170666613","text":"x = 0\r\ny = 200\r\nz = 2\r\n\r\nli = [x, y, z]\r\nli = sorted(li)\r\n\r\nfor x, i in enumerate(li):\r\n\r\n sign = ''\r\n\r\n if x in [0, 1]:\r\n if i == li[x+1]:\r\n sign = '='\r\n else:\r\n sign = '<'\r\n\r\n print(f'{i} {sign} ', end='') \r\n","repo_name":"Memetelve/UG","sub_path":"Wstęp do programowania/lesson1/ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36958842329","text":"import numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\nfrom keras.preprocessing import sequence\n\nfrom azureml.logging import get_azureml_logger\nrun_logger = get_azureml_logger()\n\nepochs = 100\nbatch_size = 10\ntime_steps = 153\nfeatures = 37\n\nfeatures_path = 'data/sequence_data.npy'\nlabels_path = 'data/sequence_labels.npy'\n\n# load data from .npy files into numpy arrays\nX = np.load(features_path)\nY = np.load(labels_path)\n\n# split data into training and test sets \n# this dataset is a 3D matrix, so sklearn train_test_split will not work\n# split your data as it makes sense here\ntrain_X1 = X[0:69,:,:]\ntrain_X2 = X[130:199,:,:]\ntrain_X = np.concatenate((train_X1,train_X2), axis=0)\ntest_X = X[70:129,:,:]\ntrain_Y1 = Y[0:69,:]\ntrain_Y2 = Y[130:199,:]\ntrain_Y = np.concatenate((train_Y1,train_Y2), axis = 0)\ntest_Y = Y[70:129,:]\n\n# build LSTM layers\nmodel = Sequential()\nmodel.add(LSTM(100, dropout=0.2, input_shape=(time_steps, features)))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\nmodel.fit(train_X, train_Y, validation_data=(test_X, test_Y), epochs=epochs, batch_size=batch_size)\n\n# score model and log accuracy and parameters\nscores = model.evaluate(test_X, test_Y, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\nrun_logger.log(\"Epochs\", epochs)\nrun_logger.log(\"Batch Size\", batch_size)\nrun_logger.log(\"Accuracy\", scores[1]*100)\n","repo_name":"laurentran/sequence-labeling","sub_path":"train_lstm.py","file_name":"train_lstm.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"61"} +{"seq_id":"31302762248","text":"from typing import List\nimport numpy as np\nfrom model import Detection\nfrom model import Result\n\n\nclass Frame:\n\n def __init__(self, id: int):\n self.id = id\n self.detections = []\n self.ground_truth = []\n self.cached_result = None\n\n def get_detection_iou(self, ignore_classes=False) -> List[float]:\n ret = []\n for ground_truth in self.ground_truth:\n max_iou = 0\n for detection in self.detections:\n iou = detection.iou(ground_truth)\n if (ignore_classes or detection.label == ground_truth.label) and iou > max_iou:\n max_iou = iou\n\n ret.append(max_iou)\n return ret\n\n def get_detection_iou_mean(self, ignore_classes=False) -> float:\n iou_list = self.get_detection_iou(ignore_classes)\n if len(iou_list) > 0:\n return float(np.mean(iou_list))\n else:\n return 0\n\n def to_result(self, ignore_classes=False) -> Result:\n if self.cached_result is None:\n tp = 0\n for ground_truth in self.ground_truth:\n for detection in self.detections:\n if detection.iou(ground_truth) > 0.5 and (ignore_classes or detection.label == ground_truth.label):\n tp += 1\n break\n\n fp = len(self.detections) - tp\n fn = len(self.ground_truth) - tp\n self.cached_result = Result(tp, fp, 0, fn)\n\n return self.cached_result\n","repo_name":"mcv-m6-video/mcv-m6-2019-team5","sub_path":"w1_w2/model/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13018705818","text":"# 10 - Crie a função top_5_categories\n# Esta função irá listar as cinco categorias com maior ocorrência no banco de\n# dados.\nfrom tech_news.database import find_news\n\n\ndef top_5_categories():\n # A função deve buscar as categorias do banco de dados\n category = find_news()\n # e calcular a sua \"popularidade\" com base no número de ocorrências;\n category_list = {}\n for info in category:\n category_info = info[\"category\"]\n print(f\"\\nCategorias: {category_info}\")\n if category_info in category_list:\n category_list[category_info] = category_list[category_info] + 1\n else:\n category_list[category_info] = 1\n print(f\"\\nvvv\\nLista com ocorrencias: {category_list}\")\n\n # A ordem das categorias retornadas deve ser da mais popular para a menos\n # popular, ou seja, categorias que estão em mais notícias primeiro;\n # Em caso de empate, o desempate deve ser por ordem alfabética de categoria\n def tupla_list_item(category_info):\n return (-category_info[1], category_info[0])\n category_list_sorted = sorted(category_list.items(), key=tupla_list_item)\n print(f\"\\nvvv\\nLista em ordem: {category_list_sorted}\")\n\n # As top 5 categorias da análise devem ser retornadas em uma lista no\n # formato [\"category1\", \"category2\"];\n # Caso haja menos de cinco categorias, no banco de dados, deve-se retornar\n # todas as categorias existentes;\n # Caso não haja categorias disponíveis, deve-se retornar uma lista vazia.\n top_5_list = []\n for category_info, _ in category_list_sorted[0:5]:\n top_5_list.append(category_info)\n print(f\"\\nvvv\\napenas até 5 primeiras: {top_5_list}\")\n return top_5_list\n","repo_name":"leandrojamir/python-tech-news","sub_path":"tech_news/analyzer/ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16806586213","text":"# 주사위 옆면 중 최대값을 찾는 함수\ndef max_value(dice, idx, result):\n if idx == 0 or idx == 5:\n result += max(dice[1], dice[2], dice[3], dice[4])\n if idx == 1 or idx ==2:\n result += max(dice[idx-1], dice[idx+1], dice[idx+3], dice[(idx+4)%6])\n if idx == 3 or idx == 4:\n result += max(dice[idx-1], dice[idx-3], dice[idx+1], dice[(idx+2)%6])\n return result\n\nN = int(input())\n\ndice_list = [list(map(int, input().split())) for _ in range(N)]\n\n# 첫 번째 주사위의 윗면을 정하면 나머지 주사위는 자동으로 정해짐\n# 첫 번째 주사위�� 윗면에 올 숫자를 하나씩 처리\nmax_list = []\nfor i in range(6):\n # 첫 번째 주사위의 윗면 숫자\n idx_t = i\n\n # 옆면의 합\n result = 0\n\n # 첫 번쨰 주사위의 옆면중 가장 큰 숫자 검색\n result = max_value(dice_list[0], idx_t, result)\n\n # 쌓아올린 주사위 하나씩 처리\n for j in range(1,N):\n # 앞 주사위의 윗면과 같은 숫자를 검색해서 밑면으로 정의\n idx_b = dice_list[j].index(dice_list[j-1][idx_t])\n\n # 옆면 중 최대값 검색\n result = max_value(dice_list[j], idx_b, result)\n\n # 윗면 검색\n if idx_b == 0 or idx_b == 5:\n idx_t = abs(idx_b-5)\n if idx_b == 1 or idx_b == 2:\n idx_t = idx_b + 2\n if idx_b == 3 or idx_b == 4:\n idx_t = idx_b - 2\n max_list.append(result) \n\n# 모든 경우의 수(첫 번째 주사위의 윗면) 중\n# 가장 큰 값 검색\nanswer = max(max_list)\nprint(answer)\n","repo_name":"joney0715/Algorithm_GroupStudy","sub_path":"joney0715/0817/personal_BOJ_주사위쌓기.py","file_name":"personal_BOJ_주사위쌓기.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43957684398","text":"from websaw import DefaultContext\nfrom upytl import (\n Component, UPYTL, Slot, html as h\n)\n\nfrom . import settings\nfrom upytl_standard import NavBarItem, StandardField\n\n\nclass BlogNavBar(Component):\n props = dict(\n menu = [],\n user = '',\n buttons=[]\n )\n template = {\n h.Nav(Class='navbar is-light', Role='navigation'): {\n h.Div(Class='navbar-brand'): {\n h.A(Class='navbar-item', href=\"https://bulma.io\"): '', \n },\n h.Div(Id=\"navbarBasicExample\", Class=\"navbar-menu\"):{\n h.Div(Class='navbar-start'):{\n h.Template(For='item in menu'):{\n NavBarItem(\n item = {'item'},\n ):'',\n },\n },\n h.Div(Class='navbar-end'): {\n h.Div(Class='navbar-item'): {\n h.Div(): 'Welcome [[ user ]]',\n },\n h.Div(Class='navbar-item'): {\n h.Template(If = 'not buttons'):{\n h.Div(): '',\n },\n h.Template(Else = ''):{\n h.Div(Class='buttons'):{\n h.A(For = 'b in buttons',Class={'b.get(\"class\", \"button\")'}, Href={'b.get(\"href\", \"index\")'}):'[[ b[\"name\"] ]]',\n },\n } \n }\n }\n }\n } \n }\n def get_context(self, rprops):\n ctx = DefaultContext.cctx()\n buttons = rprops['buttons']\n for i, j in enumerate(buttons):\n ref = buttons[i].get('href')\n buttons[i]['href'] = str(ctx.URL(ref))\n return{**rprops}\n\nclass BlogFlash(Component):\n props = dict(\n flash = None,\n message = None,\n f_type=None\n )\n template = {\n h.Template():{\n h.Div(If='flash', Class='{f_type}'):{\n h.Span():'[[message]]',\n h.Button(Class='delete'): None,\n \n },\n },\n h.Script():\n \"\"\"\n document.addEventListener('DOMContentLoaded', () => {\n (document.querySelectorAll('.notification .delete') || []).forEach(($delete) => {\n const $notification = $delete.parentNode;\n $delete.addEventListener('click', () => {\n $notification.parentNode.removeChild($notification);\n });\n });\n });\n \"\"\"\n \n }\n def get_context(self, rprops):\n ctx = DefaultContext.cctx()\n session = ctx.session\n flash = False\n f_message = session.get('flash_message', None)\n message = 'No Flash Message'\n flash_class = \"notification has-text-centered is-info\"\n if f_message:\n message = f_message.get('message', '')\n f_type = f_message.get('_class', None)\n \n if f_type:\n flash_class = \"notification has-text-centered \" + \"is-\" + f_type\n \n flash = True\n session['flash_message'] = ''\n return {**rprops, 'flash':flash, 'message':message, 'f_type':flash_class}\n\n\nclass BlogPage(Component):\n props = dict(\n footer_class='page-footer',\n page_title=\"Page title will be set when we initialise the component\",\n nav = \"This will be our navbar\"\n )\n template = {\n h.Html(): {\n h.Head():{\n h.Title(): '[[page_title]]',\n h.Meta(charset=b'utf-8'):'',\n },\n h.Body():{\n \n h.Link(rel='stylesheet', href='https://cdnjs.cloudflare.com/ajax/libs/bulma/0.9.4/css/bulma.min.css'):None, \n h.Link(rel=\"stylesheet\", href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.14.0/css/all.min.css\", integrity=\"sha512-1PKOgIY59xJ8Co8+NE6FZ+LOAZKjy+KY8iq0G4B3CyeY6wYHN3yt9PW0XpSriVlkMXe40PTKnXrLnZ9+fkDaog==\", crossorigin=\"anonymous\"):None,\n \n h.Script(src=\"https://code.jquery.com/jquery-3.5.1.js\"):None, \n \n Slot(SlotName=b'nav'):{\n h.Div():'No Navbar for this form' \n }, \n Slot(SlotName=b'flash'):{\n h.Div(): 'No Flash Component for this form'\n },\n Slot(SlotName=b'content'):{h.Div(): '[there is no default content]'},\n Slot(SlotName=b'footer'):{\n \n h.Footer(Class=\"footer is-small\"):{\n h.Div(Class= \"content has-text-centered\"):{\n h.Template():{\n h.P(): 'Powered by '+ \" \"+'and UPYTL-Standard components (c) 2022'\n } \n }\n }\n }\n }\n }\n }\n\nclass BlogPost(Component):\n props = dict(\n post = None,\n profile_image = None,\n user_ref = None,\n view_ref = None,\n update_ref = None,\n delete_ref = None\n )\n template = {\n h.Link(rel=\"stylesheet\", type=\"text/css\", href=\"static/css/my.css\"):None,\n \n h.Article(Class=\"media content-section\"):{\n h.Figure(Class=\"media-left\"):{\n h.P(Class=\"image article-img\"):{\n h.Img(Class=\"image is-rounded\",src={'profile_image'}):{\n }\n }\n },\n h.Div(Class=\"media-content\"):{\n h.P(Class='content'):{\n h.P():{\n h.A(Class='is-size-4', href={'user_ref'}): '[[post.post.author.username]] ',\n h.Small(Class=\"text-muted\"): ' [[post.post.date_posted.strftime(\"%B %d, %Y\")]] ',\n h.Small(): ' at [[post.post.date_posted.strftime(\"%H:%M\")]]', \n },\n h.Div():{\n h.A(If='update_ref',Class='button is-small is-light is-success', href={'update_ref'}):'Update',\n h.A(If='delete_ref', Class='button is-small is-light is-danger' , href={'delete_ref'}): 'Delete',\n \n },\n \n },\n h.A(Class=\"article-title\", href={'user_ref'}):{\n h.H2(Class=\"title is-size-3\"):'[[post.post.title]]',\n },\n h.P(Class=\"article-content\"):'[[post.post.content]]',\n \n }\n }\n }\n def get_context(self, rprops):\n ctx = DefaultContext.cctx()\n post = rprops['post']\n user = ctx.auth.user\n update_ref = None\n delete_ref = None\n \n if user and user['id'] == post.post.author.id:\n update_ref = ctx.URL('post', vars={'action':'update','pid': post.post.id})\n delete_ref = ctx.URL('post', vars={'action':'delete','pid': post.post.id})\n p_image = ctx.URL('static/images/', post.profile.image)\n u_href = ctx.URL('index', vars={'filter_by':'user','uid': post.post.author.id})\n \n return{**rprops, 'profile_image':p_image,\n 'update_ref':update_ref,\n 'delete_ref':delete_ref,\n 'user_ref':u_href\n } \n\nclass BlogIndex(Component):\n props = dict(\n posts = None,\n flash = None\n )\n template = {\n h.Div(If='flash', Class=\"notification is-info has-text-centered\"):'[[flash]]',\n \n h.Div(Class='container'):{\n h.Template(For='post in posts'):{\n h.Div(Class=\"box\"):{\n BlogPost(\n post = {'post'},\n ):'',\n }\n }\n }\n } \n\n","repo_name":"valq7711/websaw","sub_path":"apps/sample_blog/blog_components.py","file_name":"blog_components.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16013259548","text":"import random\nA=[]\nfor i in range(7):\n A.append([])\n for j in range(3):\n num=random.randint(0,10)\n A[i].append(num)\nprint('la matriz es: ')\nfor i in range(7):\n print(A[i])\nv=[]\nfor k in range(7):\n nomb=input('ingrese el nombre :'.format(k))\n v.append(nomb)\nprint('los estudiantes son:', v)\n\nfor j in range(7):\n ac=0\n for i in range(3):\n ac=ac+A[j][i]\n prom=ac/3\n print('promedio del est.',v[j], 'es: '+str(prom))\nprint('El promedio de los examenes es :')\n\nfor j in range(3):\n acexa=0\n for i in range(7):\n acexa=acexa+A[i][j]\n prom2=acexa/7\n print('promedio examen '+str(j+1)+ ' es '+str(prom2))","repo_name":"gustavofloresSz/trabajos-3er-parcial","sub_path":"Prac Matrice-1/ejer8.py","file_name":"ejer8.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31928507298","text":"import numpy\nimport math\nimport matplotlib.pyplot as plt\nimport seaborn as sns \n\nsns.set_theme()\n\nB, L = 5, 250\ndx, dy = 0.5, 0.5\nN, M = int(L/dx), int(B/dy)\n\ng = 9.8\ntmax = 3600\n\nh = numpy.zeros((N, M)) + 1.0\nu = numpy.zeros((N, M)) + 0.5\n\nKx = numpy.zeros((N, M)) + 0.05\nKy = numpy.zeros((N, M)) + 0.05\n\ncnow = numpy.zeros((N, M))\ncnew = numpy.zeros((N, M))\n\numax = numpy.max(u + numpy.sqrt(g*h))\n\nt1, t2, t3, t4, t5 = 100, 900, 1800, 3000, 3599\n\nfor i in range(N):\n for j in range(M):\n if i*dx < 5.0 and abs(j*dy - 2.5) < 1:\n cnow[i][j] = 0.1;\n\nT = []\ncx1, cx2, cx3, cx4, cx5, cx6, cx7 = [], [], [], [], [], [], []\ntime = 0\n\n# plt.figure(figsize=(20, 20))\n# ax = sns.heatmap(cnow, cmap=\"RdBu_r\")\n# ax.annotate('t = %04.1f s'%(time),\n# xy=(10, 10), xycoords='figure pixels', fontsize=30)\n# plt.savefig(\"figures/pollution/pollution_%05.1f.png\"%(time))\n\n\nwhile time < tmax:\n dt = dx / umax\n # end point is false\n for i in range(1, N-1):\n for j in range(1, M-1):\n temp1 = h[i][j]*Kx[i][j]*(cnow[i+1][j]+cnow[i-1][j]-2.0*cnow[i][j]) / (dx**2)\n temp2 = h[i][j]*Ky[i][j]*(cnow[i][j+1]+cnow[i][j-1]-2.0*cnow[i][j]) / (dy**2)\n temp3 = h[i][j]*u[i][j]*(cnow[i+1][j]-cnow[i-1][j]) / (2*dx)\n cnew[i][j] = cnow[i][j] + (temp1+temp2-temp3)*dt/h[i][j]\n for j in range(M):\n if abs(j*dy -5) < 0.6:\n cnew[0][j] = math.sin(time/1000) * 0.6\n cnew[N-1, :] = cnew[N-2, :]\n cnew[:, M-1] = cnew[:, M-2]\n cnew[:, 0] = cnew[:, 1]\n\n print(\"time: \", time)\n time += dt\n # if abs(time - (int(time)+0.5)) < 0.2:\n # if time > 0 and time < 100:\n # # changing the size of figure\n # plt.figure(figsize=(20, 20))\n # ax = sns.heatmap(cnew, cmap='RdBu_r')\n # ax.annotate('t = %04.1f s'%(time),\n # xy=(10, 10), xycoords='figure pixels', fontsize=30)\n # plt.savefig(\"figures/pollution/pollution_%05.1f.png\"%(time))\n if (time-t1) * (time - t1 -dt) < 0:\n ct1 = cnow;\n ct1.tofile('data/ct1.txt')\n if (time-t2) * (time - t2 -dt) < 0:\n ct2 = cnow;\n ct2.tofile('data/ct2.txt')\n if (time-t3) * (time - t3 -dt) < 0:\n ct3 = cnow;\n ct3.tofile('data/ct3.txt')\n if (time-t4) * (time - t4 -dt) < 0:\n ct4 = cnow;\n ct4.tofile('data/ct4.txt')\n if (time-t5) * (time - t5 -dt) < 0:\n ct5 = cnow;\n ct5.tofile('data/ct5.txt')\n\n T.append(time)\n cx1.append(cnew[ 1][5])\n cx2.append(cnew[ 10][5])\n cx3.append(cnew[ 20][5])\n cx4.append(cnew[ 30][5])\n cx5.append(cnew[ 50][5])\n cx6.append(cnew[ 80][5])\n cx7.append(cnew[100][5])\n cnow = cnew\n\n# open file in write mode\nwith open(r'data/cx.txt', 'w') as fp:\n for tt, c1, c2,c3, c4, c5, c6, c7 in zip(T, cx1, cx2, cx3, cx4, cx5, cx6, cx7):\n # write each item on a new line\n fp.write(\"%f %f %f %f %f %f %f %f\\n\" % (tt, c1, c2,c3, c4, c5, c6, c7))\nprint('Done')\n","repo_name":"L-N1988/LIBRARAY","sub_path":"solution_v0.py","file_name":"solution_v0.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8053119153","text":"r\"\"\"\n==================================================\nB103: Test for setting permissive file permissions\n==================================================\n\nPOSIX based operating systems utilize a permissions model to protect access to\nparts of the file system. This model supports three roles \"owner\", \"group\"\nand \"world\" each role may have a combination of \"read\", \"write\" or \"execute\"\nflags sets. Python provides ``chmod`` to manipulate POSIX style permissions.\n\nThis plugin test looks for the use of ``chmod`` and will alert when it is used\nto set particularly permissive control flags. A MEDIUM warning is generated if\na file is set to group executable and a HIGH warning is reported if a file is\nset world writable. Warnings are given with HIGH confidence.\n\n:Example:\n\n.. code-block:: none\n\n >> Issue: Probable insecure usage of temp file/directory.\n Severity: Medium Confidence: Medium\n Location: ./examples/os-chmod-py2.py:15\n 14 os.chmod('/etc/hosts', 0o777)\n 15 os.chmod('/tmp/oh_hai', 0x1ff)\n 16 os.chmod('/etc/passwd', stat.S_IRWXU)\n\n >> Issue: Chmod setting a permissive mask 0777 on file (key_file).\n Severity: High Confidence: High\n Location: ./examples/os-chmod-py2.py:17\n 16 os.chmod('/etc/passwd', stat.S_IRWXU)\n 17 os.chmod(key_file, 0o777)\n 18\n\n.. seealso::\n\n - https://security.openstack.org/guidelines/dg_apply-restrictive-file-permissions.html # noqa\n - https://en.wikipedia.org/wiki/File_system_permissions\n - https://security.openstack.org\n\n.. versionadded:: 0.9.0\n\n\"\"\"\n\nimport stat\n\nimport bandit\nfrom bandit.core import test_properties as test\n\n\n@test.checks('Call')\n@test.test_id('B103')\ndef set_bad_file_permissions(context):\n if 'chmod' in context.call_function_name:\n if context.call_args_count == 2:\n mode = context.get_call_arg_at_position(1)\n\n if (mode is not None and isinstance(mode, int) and\n (mode & stat.S_IWOTH or mode & stat.S_IXGRP)):\n # world writable is an HIGH, group executable is a MEDIUM\n if mode & stat.S_IWOTH:\n sev_level = bandit.HIGH\n else:\n sev_level = bandit.MEDIUM\n\n filename = context.get_call_arg_at_position(0)\n if filename is None:\n filename = 'NOT PARSED'\n return bandit.Issue(\n severity=sev_level,\n confidence=bandit.HIGH,\n text=\"Chmod setting a permissive mask %s on file (%s).\" %\n (oct(mode), filename)\n )\n","repo_name":"zeroSteiner/bandit-ss","sub_path":"bandit/plugins/general_bad_file_permissions.py","file_name":"general_bad_file_permissions.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"39585334181","text":"from typing import List\nimport requests\nimport os\ndef send_email(emails: List[str], subject: str, body: str):\n # Sends a request to my custom mail server. The server sends the actual email.\n response = requests.post(\n url=os.environ['MAIL_URL'],\n headers={\"Authorization\": os.environ['MAIL_TOKEN']},\n json={\n 'to_mails': emails,\n 'from_mail': 'contact@ieeevit.org',\n 'subject': subject,\n 'body': body\n }\n )\n if response.status_code == 200:\n return True\n return False\n","repo_name":"aryan9600/redis-hack-cowin-emailer","sub_path":"backend/utils/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24973299791","text":"import logging\n# import warning\nimport math\nfrom datetime import timedelta, datetime, date\n\nfrom odoo import api, fields, models, tools\nfrom odoo.exceptions import UserError, AccessError, ValidationError\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DTF\nfrom odoo.tools import float_compare\nfrom odoo.tools.translate import _\n\nHOURS_PER_DAY = 8\n\nclass HrHolidays(models.Model):\n _inherit = 'hr.holidays'\n\n def change_to_user_tz(self, date):\n \"\"\"\n Take date and return it in the user timezone\n :param date:\n :return:\n \"\"\"\n if not date:\n return False\n date_object = datetime.strptime(date,\n tools.DEFAULT_SERVER_DATETIME_FORMAT)\n date_user_tz = fields.Datetime.context_timestamp(self.sudo(self._uid),\n date_object)\n date_user_tz_string = date_user_tz.strftime(DTF)\n return date_user_tz_string\n\n @api.onchange('date_from')\n def _onchange_date_from(self):\n \"\"\" If there are no date set for date_to, automatically set one 8 hours later than\n the date_from. Also update the number_of_days.\n \"\"\"\n date_from = self.change_to_user_tz(self.date_from)\n date_to = self.change_to_user_tz(self.date_to)\n\n # Compute and update the number of days\n if (date_to and date_from) and (date_from <= date_to):\n # self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n num_days_raw = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\n self.compute_days(num_days_raw, date_from, date_to)\n else:\n self.number_of_days_temp = 0\n\n @api.onchange('date_to')\n def _onchange_date_to(self):\n \"\"\" Update the number_of_days. \"\"\"\n date_from = self.change_to_user_tz(self.date_from)\n date_to = self.change_to_user_tz(self.date_to)\n\n # Compute and update the number of days\n if (date_to and date_from) and (date_from <= date_to):\n # self.number_of_days_temp = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n num_days_raw = self._get_number_of_days(date_from, date_to, self.employee_id.id)\n\n self.compute_days(num_days_raw, date_from, date_to)\n else:\n self.number_of_days_temp = 0\n\n def _get_number_of_days(self, date_from, date_to, employee_id):\n \"\"\" Returns a float equals to the timedelta between two dates given as string.\"\"\"\n from_dt = fields.Datetime.from_string(date_from)\n to_dt = fields.Datetime.from_string(date_to)\n\n # if employee_id:\n # employee = self.env['hr.employee'].browse(employee_id)\n # return employee.get_work_days_count(from_dt, to_dt)\n\n time_delta = to_dt - from_dt\n\n return math.ceil(time_delta.days + float(time_delta.seconds) / 86400)\n\n # def daterange(start_date, end_date):\n # for n in range(int ((end_date - start_date).days)):\n # yield start_date + timedelta(n)\n #\n # start_date = date(2013, 1, 1)\n # end_date = date(2015, 6, 2)\n # for single_date in daterange(start_date, end_date):\n # print single_date.strftime(\"%Y-%m-%d\")\n\n def daterange(self, date_from, date_to):\n \"\"\"\n Take range of two dates and return all affected dates\n \"\"\"\n date_from = datetime.strptime(date_from, DTF)\n date_to = datetime.strptime(date_to, DTF)\n\n for n in range(int((date_to - date_from).days) + 1):\n yield date_from + timedelta(n)\n\n def compute_days(self, number_of_days, date_from, date_to):\n \"\"\"\n From a range of dates, compute the number of days that should be\n deducted from the leave (not counting weekends and public holidays)\n \"\"\"\n if self.employee_id:\n self.number_of_days_temp = self.deduct_special_days(number_of_days)\n else:\n self.number_of_days_temp = number_of_days\n\n\n def deduct_special_days(self, number_of_days=0):\n \"\"\"\n Remove the number of special days from the days count\n \"\"\"\n days_to_deduct = 0\n\n date_from = self.change_to_user_tz(self.date_from)\n date_to = self.change_to_user_tz(self.date_to)\n\n special_days = self.get_special_days(date_from, date_to,\n self.employee_id)\n\n for date in special_days:\n days_to_deduct += 1\n\n days_without_special_days = number_of_days - days_to_deduct\n return days_without_special_days\n\n def get_special_days(self, date_from, date_to, employee):\n \"\"\"\n Return dict of special days (Date: Name)\n\n Partly Deprecated: Since we now generate actual leave entries for\n public holidays they do no longer need to be deducted from the number\n of days (overlapping leaves cannot be created anyway). We should\n keep removing Sat/Sun and probably make it possible to remove other\n weekdays as well for countries with other work schedules\n \"\"\"\n public_leave_ids = self.env['hr.public.holiday.holidays'].search([])\n\n special_days = {}\n\n for date in self.daterange(date_from, date_to):\n date_str = str(date.date())\n public_leave = public_leave_ids.filtered(\n lambda r: r.date == date_str)\n\n if public_leave:\n # raise ValidationError(public_leave.name)\n special_days[date.date()] = 'Public Holiday: %s' \\\n % public_leave.name\n # return {\n # 'warning': {\n # 'title': \"Something bad happened\",\n # 'message': public_leave.name,\n # }\n # }\n elif date.weekday() == 5:\n special_days[date.date()] = 'Saturday'\n elif date.weekday() == 6:\n special_days[date.date()] = 'Sunday'\n\n return special_days","repo_name":"Kowa1229/Odoo","sub_path":"hr_public_holidays/models/hr_holiday.py","file_name":"hr_holiday.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37019795104","text":"import json\r\nimport tweepy\r\nfrom kafka import KafkaProducer\r\nfrom time import sleep\r\n\r\nconsumer_key = '9Kul4QgCP4QE7ydJvFrQcLFa4'\r\nconsumer_secret = 'ne0nWw4lmZFFhDgKcSEL9ZDtMISy6wRmfvkh8FciTRhvNPSgkz'\r\naccess_token = '1153221248039755776-5UMImVxLCzTZilh8ASrYqFErjZ73tB'\r\naccess_secret = 'pWhFC5Bfo2Lx6BLdh3i62vxieXxpppOWzNj4leB1jo4b1'\r\n\r\n\r\nclass TwitterStreamListener(tweepy.StreamListener):\r\n def __init__(self):\r\n self.producer = KafkaProducer(bootstrap_servers='Cnt7-naya-cdh63:9092',\r\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\r\n self.tweets = []\r\n\r\n def on_data(self, data):\r\n # data is the full *tweet* json data\r\n api_events = json.loads(data)\r\n\r\n # Gathring relevant values\r\n # Event-related values\r\n event_keys = ['created_at', 'id', 'text']\r\n twitter_events = {k: v for k, v in api_events.items()\r\n if k in event_keys}\r\n twitter_events['tweet_created_at'] = twitter_events.pop('created_at')\r\n twitter_events['tweet_id'] = twitter_events.pop('id')\r\n # User-related values\r\n user_keys = ['id', 'name', 'created_at', 'location', 'url', 'protected', 'verified',\r\n 'followers_count', 'friends_count', 'listed_count', 'favourites_count',\r\n 'statuses_count', 'withheld_in_countries']\r\n user_events = {k: v for k, v in api_events['user'].items()\r\n if k in user_keys}\r\n user_events['user_acount_created_at'] = user_events.pop('created_at')\r\n user_events['user_id'] = user_events.pop('id')\r\n\r\n # Marge dictioneries\r\n user_events.update(twitter_events)\r\n events = user_events\r\n\r\n # send data to kafka topic(s)\r\n self.producer.send('TweeterArchive', events)\r\n self.producer.send('TweeterData', events)\r\n self.producer.flush()\r\n\r\n print(events)\r\n sleep(10)\r\n\r\n def on_error(self, status_code):\r\n if status_code == 420:\r\n return False\r\n\r\n\r\ndef initialize():\r\n\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_secret)\r\n api = tweepy.API(auth)\r\n\r\n stream = TwitterStreamListener()\r\n twitter_stream = tweepy.Stream(auth=api.auth, listener=stream)\r\n twitter_stream.filter(track=['Trump'], languages=['en'])\r\n\r\n\r\ninitialize()\r\n# sleep(10)","repo_name":"danihello/telegram-shopping-bot","sub_path":"elk Tests/03_produser.py","file_name":"03_produser.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5041852450","text":"#!/usr/bin/env python\n\nimport sys, os\nimport argparse\nfrom pathlib import Path\n\nfrom cocore.config import Config\nfrom cocore.Logger import Logger\nfrom codb.pg_tools import PGInteraction\n\nclass ScriptRunner():\n \"\"\"\n generic class for execution of a parameterized script in postgres or redshift\n \"\"\"\n def __init__(self, database):\n self.database = database\n self.pg = None\n self.logger = Logger('ScriptRunner')\n\n def init(self):\n conf = Config()\n pg_db_name = conf[self.database]['db_name']\n pg_user = conf[self.database]['user']\n pg_host = conf[self.database]['host']\n pg_password = conf[self.database]['password']\n pg_port = conf[self.database]['port']\n self.pg = PGInteraction(dbname=pg_db_name, host=pg_host, user=pg_user, password=pg_password, port=pg_port,\n schema='public')\n self.pg.conn()\n\n return self\n\n @staticmethod\n def expand_params(sql, params):\n \"\"\"\n substitutes params in sql stagement\n\n :param sql:\n :param params:\n :return: sql, expanded with params\n \"\"\"\n for p in params.keys():\n var = '$[?' + p + ']'\n val = str(params[p])\n sql = sql.replace(var, val)\n return sql\n\n def run_script(self, script, from_date= None, to_date=None, batch_id=None, params=None):\n \"\"\"\n method for expanding and running sql statements\n\n :param script:\n :param from_date:\n :param to_date:\n :param batch_id:\n :param params:\n :return:\n \"\"\"\n paramset = {}\n\n # next we apply custom params and special metadata fields\n # convert string params to dict\n try:\n params = dict((k.strip(), v.strip()) for k, v in (item.split('-') for item in params.split(',')))\n except Exception as e:\n self.logger .l(\"issue parsing params: \" + str(e))\n\n if isinstance(params, dict):\n paramset.update(params)\n\n if from_date:\n paramset['from_date'] = from_date\n if to_date:\n paramset['to_date'] = to_date\n if batch_id:\n paramset['batch_id'] = batch_id\n\n # now defaults for special metadata fields\n if paramset.get('from_date') is None:\n paramset['from_date'] = '1776-07-04'\n if paramset.get('to_date') is None:\n paramset['to_date'] = '9999-12-31'\n if paramset.get('batch_id') is None:\n paramset['batch_id'] = '-1'\n # we'll keep batch_no for backwards compatibility\n paramset['batch_no'] = paramset['batch_id']\n\n raw_sql = open(script).read()\n sql = self.expand_params(raw_sql,paramset)\n sql_message = '\\n\\n--sql script start:\\n' + sql + '\\n--sql script end\\n\\n'\n self.logger.l(sql_message, 10)\n\n self.pg.batchOpen()\n\n self.logger.l(\"starting script\")\n try:\n self.pg.exec_sql(sql)\n self.pg.batchCommit()\n self.logger.l(\"batch commit\")\n except Exception as e:\n self.logger.l(\"execution failed with error: \" + str(e))\n raise RuntimeError(e)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--script', help=\"\"\"enter a path to your script \"\"\")\n parser.add_argument('-p', '--parameters', default='none-none',\n help=\"\"\"additional params to be substituted in script, example: -p param1-val1, param2-val2 \"\"\")\n parser.add_argument('-d', '--database', help=\"\"\"db alias from etl.cfg, default is cosmo \"\"\", default='cosmo')\n parser.add_argument('-f', '--from_date', help =\"\"\"from_date\"\"\", default=None)\n parser.add_argument('-t', '--to_date', help=\"\"\"to_date\"\"\", default=None)\n parser.add_argument('-b', '--batch_id', help=\"\"\"enter batch id \"\"\", default=None)\n args = parser.parse_args()\n\n ScriptRunner(args.database).init().run_script(args.script, args.from_date, args.to_date, args.batch_id, args.parameters)\n","repo_name":"WillLiu360/maximilian","sub_path":"script_runner/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"18728128598","text":"# -*- coding: utf-8 -*-\n\nr\"\"\"\nMain data-admin configuration file.\n\"\"\"\nimport os\nimport sys\n\nfrom django.utils.translation import gettext_lazy as _\n\ntry:\n DEBUG = \"runserver\" in sys.argv\nexcept Exception:\n DEBUG = False\nDEBUG_JS = DEBUG\n\nADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = \"%@mzit!i8b*$zc&6oev96=RANDOMSTRING\"\n\n# FrePPLe only supports the postgresql database.\n# Create additional entries in this dictionary to define scenario schemas.\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n # Database name\n \"NAME\": \"data_admin\",\n # Role name when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"USER\": \"frepple\",\n # Role password when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"PASSWORD\": \"frepple\",\n # When using TCP sockets specify the hostname,\n # the ip4 address or the ip6 address here.\n # Leave as an empty string to use Unix domain\n # socket (\"local\" lines in pg_hba.conf).\n \"HOST\": \"\",\n # Specify the port number when using a TCP socket.\n \"PORT\": \"\",\n \"OPTIONS\": {},\n \"CONN_MAX_AGE\": 60,\n \"TEST\": {\n \"NAME\": \"test_frepple\" # Database name used when running the test suite.\n },\n \"FILEUPLOADFOLDER\": os.path.normpath(\n os.path.join(FREPPLE_LOGDIR, \"data\", \"default\")\n ),\n # Role name for executing custom reports and processing sql data files.\n # Make sure this role has properly restricted permissions!\n # When left unspecified, SQL statements run with the full read-write\n # permissions of the user specified above. Which can be handy, but is not secure.\n \"SQL_ROLE\": \"report_role\",\n \"SECRET_WEBTOKEN_KEY\": SECRET_KEY,\n },\n \"scenario1\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n # Database name\n \"NAME\": \"data_admin_1\",\n # Role name when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"USER\": \"frepple\",\n # Role password when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"PASSWORD\": \"frepple\",\n # When using TCP sockets specify the hostname,\n # the ip4 address or the ip6 address here.\n # Leave as an empty string to use Unix domain\n # socket (\"local\" lines in pg_hba.conf).\n \"HOST\": \"\",\n # Specify the port number when using a TCP socket.\n \"PORT\": \"\",\n \"OPTIONS\": {},\n \"CONN_MAX_AGE\": 60,\n \"TEST\": {\n \"NAME\": \"test_scenario1\" # Database name used when running the test suite.\n },\n \"FILEUPLOADFOLDER\": os.path.normpath(\n os.path.join(FREPPLE_LOGDIR, \"data\", \"scenario1\")\n ),\n # Role name for executing custom reports and processing sql data files.\n # Make sure this role has properly restricted permissions!\n # When left unspecified, SQL statements run with the full read-write\n # permissions of the user specified above. Which can be handy, but is not secure.\n \"SQL_ROLE\": \"report_role\",\n \"SECRET_WEBTOKEN_KEY\": SECRET_KEY,\n },\n \"scenario2\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n # Database name\n \"NAME\": \"data_admin_2\",\n # Role name when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"USER\": \"frepple\",\n # Role password when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"PASSWORD\": \"frepple\",\n # When using TCP sockets specify the hostname,\n # the ip4 address or the ip6 address here.\n # Leave as an empty string to use Unix domain\n # socket (\"local\" lines in pg_hba.conf).\n \"HOST\": \"\",\n # Specify the port number when using a TCP socket.\n \"PORT\": \"\",\n \"OPTIONS\": {},\n \"CONN_MAX_AGE\": 60,\n \"TEST\": {\n \"NAME\": \"test_scenario2\" # Database name used when running the test suite.\n },\n \"FILEUPLOADFOLDER\": os.path.normpath(\n os.path.join(FREPPLE_LOGDIR, \"data\", \"scenario2\")\n ),\n # Role name for executing custom reports and processing sql data files.\n # Make sure this role has properly restricted permissions!\n # When left unspecified, SQL statements run with the full read-write\n # permissions of the user specified above. Which can be handy, but is not secure.\n \"SQL_ROLE\": \"report_role\",\n \"SECRET_WEBTOKEN_KEY\": SECRET_KEY,\n },\n \"scenario3\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n # Database name\n \"NAME\": \"data_admin_3\",\n # Role name when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"USER\": \"frepple\",\n # Role password when using md5 authentication.\n # Leave as an empty string when using peer or\n # ident authencation.\n \"PASSWORD\": \"frepple\",\n # When using TCP sockets specify the hostname,\n # the ip4 address or the ip6 address here.\n # Leave as an empty string to use Unix domain\n # socket (\"local\" lines in pg_hba.conf).\n \"HOST\": \"\",\n # Specify the port number when using a TCP socket.\n \"PORT\": \"\",\n \"OPTIONS\": {},\n \"CONN_MAX_AGE\": 60,\n \"TEST\": {\n \"NAME\": \"test_scenario3\" # Database name used when running the test suite.\n },\n \"FILEUPLOADFOLDER\": os.path.normpath(\n os.path.join(FREPPLE_LOGDIR, \"data\", \"scenario3\")\n ),\n # Role name for executing custom reports and processing sql data files.\n # Make sure this role has properly restricted permissions!\n # When left unspecified, SQL statements run with the full read-write\n # permissions of the user specified above. Which can be handy, but is not secure.\n \"SQL_ROLE\": \"report_role\",\n \"SECRET_WEBTOKEN_KEY\": SECRET_KEY,\n },\n}\n\nLANGUAGE_CODE = \"en\"\n\n# Google analytics code to report usage statistics to.\n# The value None disables this feature.\nGOOGLE_ANALYTICS = None\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = \"Europe/Brussels\"\n\nBRANDING = \"frePPLe data admin\"\n\n# Website where all documentation is available.\n# - The DOCUMENTATION_URL is used as the main URL for the about box\n# - The documentation is expected to be found in 'DOCUMENTATION_URL/docs/MAJOR_VERSION.MINOR_VERSION\"\n# - The URL shouldn't have an ending slash\nDOCUMENTATION_URL = \"http://your-website-with-documentation\"\n\n# Supported language codes, sorted by language code.\n# Language names and codes should match the ones in Django.\n# You can see the list supported by Django at:\n# https://github.com/django/django/blob/master/django/conf/global_settings.py\nLANGUAGES = (\n (\"en\", _(\"English\")),\n (\"fr\", _(\"French\")),\n (\"de\", _(\"German\")),\n (\"he\", _(\"Hebrew\")),\n (\"hr\", _(\"Croatian\")),\n (\"it\", _(\"Italian\")),\n (\"ja\", _(\"Japanese\")),\n (\"nl\", _(\"Dutch\")),\n (\"pt\", _(\"Portuguese\")),\n (\"pt-br\", _(\"Brazilian Portuguese\")),\n (\"ru\", _(\"Russian\")),\n (\"es\", _(\"Spanish\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n (\"uk\", _(\"Ukrainian\")),\n)\n\n# The remember-me checkbox on the login page allows to keep a session cookie\n# active in your browser. The session will expire after the age configured\n# in the setting below (epxressed in seconds).\n# Set the value to 0 to force users to log in for every browser session.\nSESSION_COOKIE_AGE = 3600 * 24 * 3 # 3 days\n\nMIDDLEWARE = (\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n # Uncomment the next line to automatically log on as the admin user,\n # which can be useful for development or for demo models.\n # 'freppledb.common.middleware.AutoLoginAsAdminUser',\n \"data_admin.common.middleware.MultiDBMiddleware\",\n # Optional: The following middleware allows authentication with HTTP headers\n \"data_admin.common.middleware.HTTPAuthenticationMiddleware\",\n \"data_admin.common.middleware.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n)\n\n# Installed applications.\n# The order is important: urls, templates and menus of the earlier entries\n# take precedence over and override later entries.\nINSTALLED_APPS = (\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"data_admin.boot\",\n # Add any project specific apps here\n \"data_admin_examples.example1\",\n \"data_admin.execute\",\n \"data_admin.common\",\n \"django_filters\",\n \"rest_framework\",\n \"django_admin_bootstrapped\",\n \"django.contrib.admin\",\n)\n\n# Custom attribute fields in the database\n# After each change of this setting, the following commands MUST be\n# executed to create the fields in the database(s).\n# frepplectl makemigrations\n# frepplectl migrate OR frepplectl migrate --database DATABASE\n#\n# The commands will create migration files to keep track of the changes.\n# You MUST use the above commands and the generated migration scripts. Manually\n# changing the database schema will work in simple cases, but will get you\n# in trouble in the long run!\n# You'll need write permissions in the folder where these are stored.\n#\n# See https://docs.djangoproject.com/en/1.8/topics/migrations/ for the\n# details on the migration files. For complex changes to the attributes\n# an administrator may need to edit, delete or extend these files.\n#\n# Supported field types are 'string', 'boolean', 'number', 'integer',\n# 'date', 'datetime', 'duration' and 'time'.\n# Example:\n# ATTRIBUTES = [\n# ('freppledb.input.models.Item', [\n# ('attribute1', ugettext('attribute_1'), 'string'),\n# ('attribute2', ugettext('attribute_2'), 'boolean'),\n# ('attribute3', ugettext('attribute_3'), 'date'),\n# ('attribute4', ugettext('attribute_4'), 'datetime'),\n# ('attribute5', ugettext('attribute_5'), 'number'),\n# ]),\n# ('freppledb.input.models.Operation', [\n# ('attribute1', ugettext('attribute_1'), 'string'),\n# ])\n# ]\nATTRIBUTES = []\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"filters\": {\"require_debug_false\": {\"()\": \"django.utils.log.RequireDebugFalse\"}},\n \"formatters\": {\n \"verbose\": {\n \"format\": \"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s\"\n },\n \"simple\": {\"format\": \"%(levelname)s %(message)s\"},\n },\n \"handlers\": {\n \"null\": {\"level\": \"DEBUG\", \"class\": \"logging.NullHandler\"},\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"simple\",\n },\n \"mail_admins\": {\n \"level\": \"CRITICAL\",\n \"filters\": [\"require_debug_false\"],\n \"class\": \"django.utils.log.AdminEmailHandler\",\n },\n },\n \"loggers\": {\n # A handler to log all SQL queries.\n # The setting \"DEBUG\" also needs to be set to True higher up in this file.\n #'django.db.backends': {\n # 'handlers': ['console'],\n # 'level': 'DEBUG',\n # 'propagate': False,\n # },\n \"django\": {\"handlers\": [\"console\"], \"level\": \"INFO\"},\n \"data_admin\": {\"handlers\": [\"console\"], \"level\": \"INFO\"},\n },\n}\n\n# Max total log files size in MB, if the limit is reached deletes the oldest.\nMAXTOTALLOGFILESIZE = 200\n\n# A list of available user interface themes.\n# If multiple themes are configured in this list, the user's can change their\n# preferences among the ones listed here.\n# If the list contains only a single value, the preferences screen will not\n# display users an option to choose the theme.\nTHEMES = [\n \"earth\",\n \"grass\",\n \"lemon\",\n \"odoo\",\n \"openbravo\",\n \"orange\",\n \"snow\",\n \"strawberry\",\n \"water\",\n]\n\n# A default user-group to which new users are automatically added\nDEFAULT_USER_GROUP = None\n\n# The default user interface theme\nDEFAULT_THEME = \"earth\"\n\n# The default number of records to pull from the server as a page\nDEFAULT_PAGESIZE = 100\n\n# Configuration of the default dashboard\nDEFAULT_DASHBOARD = [\n {\n \"rowname\": _(\"Welcome\"),\n \"cols\": [\n {\"width\": 8, \"widgets\": [(\"inbox\", {\"limit\": 10})]},\n {\"width\": 4, \"widgets\": [(\"news\", {})]},\n ],\n },\n]\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\"min_length\": 8},\n },\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n# Configuration of SMTP mail server\nEMAIL_USE_TLS = True\nDEFAULT_FROM_EMAIL = \"your_email@domain.com\"\nSERVER_EMAIL = \"your_email@domain.com\"\nEMAIL_HOST_USER = \"your_email@domain.com\"\nEMAIL_HOST_PASSWORD = \"frePPLeIsTheBest\"\nEMAIL_HOST = None\nEMAIL_PORT = 25\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n\n# Port number when not using Apache\nPORT = 8000\n\n# Settings for user uploaded files\nMEDIA_URL = \"/uploads/\" # Do not change this\n# This list of allowed extensions is what github.com allows.\n# Be VERY careful about security when enlarging this list!\nMEDIA_EXTENSIONS = \".gif,.jpeg,.jpg,.png,.docx,.gz,.log,.pdf,.pptx,.txt,.xlsx,.zip\"\n# Number of seconds a browser can cache uploaded content\nMEDIA_MAX_AGE = 12 * 3600\n\n# Browser to test with selenium\nSELENIUM_TESTS = \"chrome\"\nSELENIUM_HEADLESS = True\n","repo_name":"frePPLe/frepple-data-admin","sub_path":"djangosettings.py","file_name":"djangosettings.py","file_ext":"py","file_size_in_byte":14456,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"74563993473","text":"\n'''\nObject Counter class.\n'''\n\n# pylint: disable=missing-class-docstring,missing-function-docstring,invalid-name\n\nimport multiprocessing\nimport json\nimport cv2\nimport numpy as np\nfrom joblib import Parallel, delayed\n\nfrom tracker import add_new_blobs, remove_duplicates, update_blob_tracker\n\nfrom Counter import attempt_count\n\n\nNUM_CORES = multiprocessing.cpu_count()\nclasses = None\n\n\n\n\ndef draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h,classes,COLORS):\n '''\n Draws on top of current frame the bounding bo an specifies label of the object.\n '''\n\n label = str(classes[class_id])\n\n color = COLORS[class_id]\n\n cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)\n\n cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n\ndef get_output_layers(net):\n '''\n Gets all the output layer of yolo model.\n '''\n \n layer_names = net.getLayerNames()\n \n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n return output_layers\n\ndef get_bb_yolo(image,net,conf_threshold=0.5,scale=0.00392,nms_threshold=0.4):\n '''\n Returns the bounding box predictions, classes and the corresponding confidence levels.\n '''\n\n blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n (H, W) = image.shape[:2]\n #(H,W)=1080,1920\n outs = net.forward(get_output_layers(net))\n \n # initialize our lists of detected bounding boxes, confidences,\n # and class IDs, respectively\n boxes=[]\n confidences=[]\n classIDs=[]\n\n for output in outs:\n\n for detection in output:\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n if classID in [1,2,3,4,5,6,7,8]:\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > conf_threshold:\n # scale the bounding box coordinates back relative to\n # the size of the image, keeping in mind that YOLO\n # actually returns the center (x, y)-coordinates of\n # the bounding box followed by the boxes' width and\n # height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n # use the center (x, y)-coordinates to derive the top\n # and and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n # update our list of bounding box coordinates,\n # confidences, and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n # apply non-maxima suppression to suppress weak, overlapping\n # bounding boxes\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,\n nms_threshold)\n\n final_bounding_boxes = []\n final_classes = []\n final_confidences = []\n\n if len(idxs) > 0:\n # print(\"Detected BBs : \",len(idxs))\n # loop over the indexes we are keeping\n for i in idxs.flatten():\n\n # extract the bounding box coordinates\n final_bounding_boxes.append(boxes[i])\n final_classes.append(classIDs[i])\n final_confidences.append(confidences[i])\n \n \n\n\n return final_bounding_boxes,final_classes,final_confidences\n\nclass ObjectCounter():\n\n def __init__(self, frame, net, mcdf, mctf, di, counting_line,class_file,tracker='kcf',conf_threshold=0.5\\\n ,nms_threshold=0.4,scale=0.00392):\n self.frame = frame # current frame of video\n self.net = net\n self.mcdf = mcdf # maximum consecutive detection failures\n self.mctf = mctf # maximum consecutive tracking failures\n self.detection_interval = di\n self.counting_line = counting_line\n \n with open(class_file, 'r') as f:\n self.classes = [line.strip() for line in f.readlines()]\n\n self.COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3))\n\n self.blobs = {}\n self.f_height, self.f_width, _ = self.frame.shape\n self.frame_count = 0 # number of frames since last detection\n self.counts = {counting_line['label']: {}} # counts of objects by type for each counting line\n self.tracker = tracker\n self.conf_threshold=conf_threshold\n self.nms_threshold=nms_threshold\n self.scale=scale\n # self.\n \n \n # create blobs from initial frame\n _bounding_boxes, _classes, _confidences = get_bb_yolo(frame, net,conf_threshold,scale,nms_threshold)\n self.blobs = add_new_blobs(_bounding_boxes, _classes, _confidences, self.blobs, self.frame, self.tracker, self.mcdf)\n\n def get_counts(self):\n return self.counts\n\n def get_blobs(self):\n return self.blobs\n\n def count(self, frame):\n self.frame = frame\n\n blobs_list = list(self.blobs.items())\n # update blob trackers\n blobs_list = Parallel(n_jobs=NUM_CORES, prefer='threads')(\n delayed(update_blob_tracker)(blob, blob_id, self.frame) for blob_id, blob in blobs_list\n )\n self.blobs = dict(blobs_list)\n\n for blob_id, blob in blobs_list:\n # count object if it has crossed a counting line\n blob, self.counts = attempt_count(blob, blob_id, self.counting_line, self.counts,self.classes)\n\n self.blobs[blob_id] = blob\n\n # remove blob if it has reached the limit for tracking failures\n if blob.num_consecutive_tracking_failures >= self.mctf:\n del self.blobs[blob_id]\n\n if self.frame_count >= self.detection_interval:\n # rerun detection\n \n _bounding_boxes, _classes, _confidences = get_bb_yolo(self.frame,self.net,self.conf_threshold\\\n ,self.scale,self.nms_threshold)\n\n self.blobs = add_new_blobs(_bounding_boxes, _classes, _confidences, self.blobs, self.frame, self.tracker, self.mcdf)\n self.blobs = remove_duplicates(self.blobs)\n self.frame_count = 0\n\n self.frame_count += 1\n\n def visualize(self):\n frame = self.frame\n (H, W) = frame.shape[:2]\n #(H,W)=1080,1920\n # draw and label blob bounding boxes\n for _id, blob in self.blobs.items():\n\n (x, y, w, h) = [int(v) for v in blob.bounding_box]\n x_plus_w = x+w\n y_plus_h = y+h\n\n draw_prediction(frame, blob.type, blob.type_confidence, x, y, x_plus_w, y_plus_h,self.classes,self.COLORS)\n\n # draw counting line\n cv2.line(frame, self.counting_line['line'][0], self.counting_line['line'][1],(255,255,255), 3)\n \n temp_i=0\n for line in self.counts:\n cv2.putText(frame,line, (int(0.90*W),int(0.8611*H)),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)\n \n for vehicle,count in self.counts[line].items():\n temp_i=temp_i+30\n cv2.putText(frame,\"{}={}\".format(vehicle,count), (int(0.90*W),int(0.8611*H+(temp_i/1080)*H))\\\n ,cv2.FONT_HERSHEY_SIMPLEX,0.75,(0,0,255),2)\n \n \n\n #json.dumps(self.counts)\n return frame\n\n ","repo_name":"atheeth96/smart_city_traffic","sub_path":"ObjectCounter.py","file_name":"ObjectCounter.py","file_ext":"py","file_size_in_byte":7546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8494860128","text":"import random\nrandom.seed(0)\nimport numpy as np\nnp.random.seed(0)\nimport tensorflow as tf\nimport onnx_graphsurgeon as gs\nfrom onnx2tf.utils.common_functions import (\n get_constant_or_variable,\n print_node_info,\n inverted_operation_enable_disable,\n process_neg_idx,\n make_tf_node_info,\n get_replacement_parameter,\n pre_process_transpose,\n post_process_transpose,\n)\n\n\n@print_node_info\n@inverted_operation_enable_disable\n@get_replacement_parameter\ndef make_node(\n *,\n graph_node: gs.Node,\n tf_layers_dict: dict,\n **kwargs: dict,\n):\n \"\"\"ScatterND\n\n Parameters\n ----------\n graph_node: gs.Node\n graph_surgeon Node\n\n tf_layers_dict: dict\n optype, shape, dtype, tensorflow graph\n \"\"\"\n before_op_output_shape_trans_1 = \\\n tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)\n before_op_output_shape_trans_2 = \\\n tf_layers_dict.get(graph_node.inputs[1].name, {}).get('before_op_output_shape_trans', True)\n before_op_output_shape_trans_3 = \\\n tf_layers_dict.get(graph_node.inputs[2].name, {}).get('before_op_output_shape_trans', True)\n before_op_output_shape_trans = \\\n before_op_output_shape_trans_1 \\\n and before_op_output_shape_trans_2 \\\n and before_op_output_shape_trans_3\n\n graph_node_input_1 = get_constant_or_variable(\n graph_node.inputs[0],\n before_op_output_shape_trans,\n )\n graph_node_input_2 = get_constant_or_variable(\n graph_node.inputs[1],\n before_op_output_shape_trans,\n )\n graph_node_input_3 = get_constant_or_variable(\n graph_node.inputs[2],\n before_op_output_shape_trans,\n )\n graph_node_output: gs.Variable = graph_node.outputs[0]\n shape = graph_node_output.shape\n dtype = graph_node_output.dtype\n\n input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \\\n if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1\n indices_tensor = tf_layers_dict[graph_node_input_2.name]['tf_node'] \\\n if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2\n updates_tensor = tf_layers_dict[graph_node_input_3.name]['tf_node'] \\\n if isinstance(graph_node_input_3, gs.Variable) else graph_node_input_3\n\n # Inverted workaround to avoid shape errors as much as possible\n if not before_op_output_shape_trans \\\n and list(graph_node.inputs[0].shape) != list(input_tensor.shape) \\\n and list(graph_node.inputs[1].shape) == list(indices_tensor.shape) \\\n and list(graph_node.inputs[2].shape) == list(updates_tensor.shape):\n\n input_tensor_shape = input_tensor.shape\n input_tensor_rank = len(input_tensor_shape)\n indices_tensor_shape = indices_tensor.shape\n updates_tensor_shape = updates_tensor.shape\n updates_tensor_rank = len(updates_tensor_shape)\n # Obtaining the number of ranks to be operated\n number_of_ranks_operate = indices_tensor_shape[-1]\n # 1. The number of ranks to be operated must be numeric\n # 2. The number of ranks to be operated on must match the number of ranks in the input tensor\n # 3. The number of ranks to be operated on must match the number of ranks in the tensor for updating\n # 4. Forced NHWC conversion if all the above conditions are met\n if isinstance(number_of_ranks_operate, int) \\\n and input_tensor_rank == number_of_ranks_operate \\\n and updates_tensor_rank == number_of_ranks_operate:\n\n indices_convertion_table = [0] + [i for i in range(2, input_tensor_rank)] + [1, input_tensor_rank]\n indices_gather_table = [0] + [i for i in range(2, input_tensor_rank)] + [1]\n updates_convertion_table = [0] + [i for i in range(2, input_tensor_rank)] + [1]\n # Corrects tensor shape discrepancies\n if isinstance(indices_tensor, np.ndarray):\n indices_tensor = indices_tensor.transpose(indices_convertion_table)\n elif tf.keras.backend.is_keras_tensor(indices_tensor):\n indices_tensor = tf.transpose(\n a=indices_tensor,\n perm=indices_convertion_table,\n )\n if isinstance(updates_tensor, np.ndarray):\n updates_tensor = updates_tensor.transpose(updates_convertion_table)\n elif tf.keras.backend.is_keras_tensor(updates_tensor):\n updates_tensor = tf.transpose(\n a=updates_tensor,\n perm=updates_convertion_table,\n )\n # Transposition of indices\n if isinstance(indices_tensor, np.ndarray):\n indices_tensor = indices_tensor[..., indices_gather_table]\n elif tf.keras.backend.is_keras_tensor(indices_tensor):\n indices_tensor = tf.gather(\n params=indices_tensor,\n indices=indices_gather_table,\n axis=-1,\n )\n\n # Preserving Graph Structure (Dict)\n tf_layers_dict[graph_node_output.name] = {\n 'optype': graph_node.op,\n 'shape': shape,\n 'dtype': dtype,\n }\n\n # Pre-process transpose\n input_tensor = pre_process_transpose(\n value_before_transpose=input_tensor,\n param_target='inputs',\n param_name=graph_node.inputs[0].name,\n **kwargs,\n )\n indices_tensor = pre_process_transpose(\n value_before_transpose=indices_tensor,\n param_target='inputs',\n param_name=graph_node.inputs[1].name,\n **kwargs,\n )\n updates_tensor = pre_process_transpose(\n value_before_transpose=updates_tensor,\n param_target='inputs',\n param_name=graph_node.inputs[2].name,\n **kwargs,\n )\n\n # Complex ScatterND -> Simple ScatterND\n simple_scatternd = False\n # Verify if negative numbers need to be converted to positive numbers\n if isinstance(indices_tensor, np.ndarray) and None not in indices_tensor:\n flatten_indices_tensor = indices_tensor.flatten()\n if np.sum(np.where(flatten_indices_tensor < 0, 1, 0)) > 0:\n simple_scatternd = False\n else:\n simple_scatternd = True\n elif hasattr(indices_tensor, 'numpy') and None not in indices_tensor.numpy():\n flatten_indices_tensor = indices_tensor.numpy().flatten()\n if np.sum(np.where(flatten_indices_tensor < 0, 1, 0)) > 0:\n simple_scatternd = False\n else:\n simple_scatternd = True\n elif isinstance(indices_tensor, int) and indices_tensor >= 0:\n simple_scatternd = True\n else:\n simple_scatternd = False\n\n # Generation of TF OP\n if not simple_scatternd:\n indices_tensor = process_neg_idx(\n data=input_tensor,\n indices=indices_tensor,\n )\n\n tf_layers_dict[graph_node_output.name]['tf_node'] = \\\n tf.tensor_scatter_nd_update(\n tensor=input_tensor \\\n if not isinstance(input_tensor, np.ndarray) \\\n else tf.convert_to_tensor(input_tensor),\n indices=indices_tensor \\\n if not isinstance(indices_tensor, np.ndarray) \\\n else tf.convert_to_tensor(indices_tensor),\n updates=updates_tensor \\\n if not isinstance(updates_tensor, np.ndarray) \\\n else tf.convert_to_tensor(updates_tensor),\n name=graph_node.name,\n )\n\n # Post-process transpose\n tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(\n value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],\n param_target='outputs',\n param_name=graph_node.outputs[0].name,\n **kwargs,\n )\n\n # Generation of Debug Info\n tf_layers_dict[graph_node_output.name]['tf_node_info'] = \\\n make_tf_node_info(\n node_info={\n 'tf_op_type': tf.tensor_scatter_nd_update,\n 'tf_inputs': {\n 'tensor': input_tensor,\n 'indices': indices_tensor,\n 'updates': updates_tensor,\n },\n 'tf_outputs': {\n 'output': tf_layers_dict[graph_node_output.name]['tf_node'],\n },\n }\n )\n","repo_name":"PINTO0309/onnx2tf","sub_path":"onnx2tf/ops/ScatterND.py","file_name":"ScatterND.py","file_ext":"py","file_size_in_byte":8318,"program_lang":"python","lang":"en","doc_type":"code","stars":410,"dataset":"github-code","pt":"61"} +{"seq_id":"13929655559","text":"import pandas as pd\nimport math\nimport os\nimport copy\n\n\nclass Node:\n def __init__(self, id_, shape, text, isnan, right=None, left=None):\n self.id_ = id_\n self.shape = shape\n self.text = text\n self.isnan = isnan\n self.right = None\n self.left = None\n self.needed_oks = 0\n\n def tostrs(self):\n string = []\n if self.shape == \"Manual Input\":\n string = [f\"read({self.text});\"]\n if self.shape == \"Decision\": # if\n string = \"}\" if self.isnan else f\"({self.text}) {{\"\n string = string.replace(\"(if\", \"if(\")\n string = string.replace(\"(elif\", \"elif(\")\n string = [string.replace(\"( \", \"(\")]\n\n if self.shape == \"Display\": # print\n string = [f\"print({self.text});\"]\n if self.shape == \"Manual Operation\": # while\n string = \"}\" if self.isnan else f\"({self.text}) {{\"\n string = string.replace(\"(while\", \"while(\")\n string = [string.replace(\"( \", \"(\")]\n if self.shape == \"Process\": # instruction\n string = f\"{self.text}\"\n string = self.split(string)\n if self.shape == \"Terminator\":\n is_end = self.isnan or not \"(\" in self.text\n if not is_end:\n string = [f\"function {self.text} {{\"]\n else:\n temp = \"\" if self.isnan else f\"return {self.text};\"\n string = [f\"{temp}}}\"]\n # string = [\"}\" if self.isnan else f\"function {self.text} {{\"]\n\n return string\n\n def desc(self):\n left_id = self.left.id_ if self.left else -1\n right_id = self.right.id_ if self.right else -1\n return f\"DESC {self.id_} {self.shape} {self.text} {self.isnan} {left_id} {right_id}\"\n\n def split(self, s):\n split = s.split(\";\")\n if split[-1] != '':\n return ['ERROR missing ;']\n s2 = [word+\";\" for word in split if word != \"\"]\n return s2\n\n\nclass DiagramConverter():\n def __init__(self) -> None:\n self.r = []\n\n def get_page_ids(self, df):\n pages = df[df[\"Name\"] == \"Page\"]\n ids = {}\n for i, row in pages.reset_index().iterrows():\n ids[i+1] = row.Id\n\n return ids\n\n def get_page_data(self, df, selected_page):\n ids = self.get_page_ids(df)\n id_ = ids[selected_page]\n data = df[df[\"Page ID\"] == id_]\n return data\n\n def create_graph(self, data):\n lines = data[data[\"Name\"] == \"Line\"]\n blocks = data[data[\"Name\"] != \"Line\"]\n\n nodes = {}\n roots = []\n for index, row in blocks.iterrows():\n text = row[\"Text Area 1\"]\n type_ = row[\"Name\"]\n isnan = False if type(text) == str else True\n text = \"\" if type(text) == float else text\n node = Node(id_=index, shape=row.Name, text=text, isnan=isnan)\n nodes[index] = node\n\n if type_ == \"Terminator\" and \"(\" in text:\n roots.append(node)\n\n for index, row in lines.iterrows():\n source = row[\"Line Source\"]\n destination = row[\"Line Destination\"]\n text = row[\"Text Area 1\"]\n # print(source, destination, text)\n if text == \"No\":\n nodes[source].left = nodes[destination]\n else:\n nodes[source].right = nodes[destination]\n\n if nodes[destination].shape == \"Decision\":\n nodes[destination].needed_oks += 1\n\n return roots, nodes\n\n def traverse_tree(self, node, pending):\n if not node:\n raise NameError(f\"Missing main method\")\n\n string = node.tostrs()\n self.r = self.r + string\n if node.isnan and node.shape == \"Decision\":\n node.needed_oks -= 1\n if node.needed_oks == 1:\n # print(\"else {\")\n self.r = self.r + [\"else {\"]\n if node.needed_oks != 0:\n return\n if node.right:\n self.traverse_tree(node.right, pending)\n if node.left:\n self.traverse_tree(node.left, pending)\n\n return self.r\n\n def get_main_index(self, roots):\n for i, root in enumerate(roots):\n if \"main()\" in root.text:\n return i\n\n def get_rows(self, csv_path, selected_page):\n df = pd.read_csv(csv_path, index_col=0)\n df = df[df[\"Name\"] == \"Page\"]\n selected_page_index = 1\n tabs = df[\"Text Area 1\"].values\n for i, tab in enumerate(tabs):\n if tab == selected_page:\n selected_page_index = i + 1\n # selected_page_index = d[selected_page]\n df = pd.read_csv(csv_path, index_col=0)\n\n data = self.get_page_data(df, selected_page_index)\n\n premain_process = data[data[\"Name\"] == \"Predefined Process\"]\n premain_lines = \"\"\n if len(premain_process) > 0:\n premain_text = premain_process.iloc[0][\"Text Area 1\"]\n premain_rows = premain_text.split(\";\")[:-1]\n premain_rows = [row.replace(\"global\", \"\") for row in premain_rows]\n premain_rows = [\"global \" + row for row in premain_rows]\n premain_lines = \";\".join(premain_rows) + \";\"\n\n roots, _ = self.create_graph(data)\n\n main_index = self.get_main_index(roots)\n\n f = []\n roots[main_index], roots[-1] = roots[-1], roots[main_index]\n for root in roots:\n pending = []\n self.r = self.traverse_tree(root, pending)\n f = self.r\n f = [premain_lines] + f\n return f\n\n def print_rows(self, csv_path, selected_page):\n rows = self.get_rows(csv_path, selected_page)\n for row in rows:\n print(row)\n\n def get_tokens(self, csv_path, selected_page):\n r = self.get_rows(csv_path, selected_page)\n r = \" \".join(r)\n r = r.replace(\"(\", \" ( \")\n r = r.replace(\")\", \" ) \")\n r = r.replace(\"[\", \" [ \")\n r = r.replace(\"]\", \" ] \")\n r = r.replace(\";\", \" ; \")\n r = r.replace(\",\", \" , \")\n r = r.replace(\"+\", \" + \")\n r = r.replace(\"-\", \" - \")\n r = r.replace(\"\\\"\", \" \\\" \")\n r = r.replace(\"\\n\", \"\")\n r = r.split(\" \")\n\n r = list(filter(lambda x: x != \"\", r))\n\n return r\n\n def get_string(self, csv_path, selected_page):\n tokens = self.get_tokens(csv_path, selected_page)\n string = \"\".join(tokens)\n string = string.replace(\"\\n\", \"\")\n string = string.replace(\" \", \"\")\n return string\n\n def get_example_path(self):\n return \"./diagram_converter/examples/csvs/diagrams.csv\"\n\n\ndef main():\n dc = DiagramConverter()\n path = dc.get_example_path()\n selected_page = \"recursion\" # simple, if, while, lists, functions, all, recursion\n\n dc.print_rows(path, selected_page)\n # tokens = get_string(path, selected_page)\n # print(tokens)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"edlgg/vip","sub_path":"diagram_converter/diagram_converter.py","file_name":"diagram_converter.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23189426348","text":"import socket\n\nclass Server():\n def __init__(self, ip, port):\n # Create socket\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((ip, port))\n self.server_socket.listen(1)\n print('Starting server, use to stop')\n print('Listening on port %s ...' % port)\n\n def escuchar(self):\n while True: \n # Wait for client connections\n client_connection, client_address = self.server_socket.accept()\n\n # Get the client request\n request = client_connection.recv(1024).decode()\n if (request != \" \"):\n request = request.split('\\n')\n\n # Send HTTP response\n response = \"HTTP/1.1 200 OK\\n\"\n try: \n file = request[0].split()[1]\n except:\n pass\n\n if (file == \"/\"):\n archivo = open(\"files/index.html\")\n contenido = archivo.read()\n archivo.close()\n response = response + \"\\n\" + contenido\n client_connection.sendall(response.encode())\n\n elif (file == \"/request\"):\n response = response + \"\\n

    REQUEST REALIZADO:

    Direccion del cliente: \"+ str(client_address) +\"

    \"\n for i in request:\n response =response+\"

    \"+i+\"

    \"\n client_connection.sendall(response.encode()) \n\n elif (file == \"/response\"):\n response = response + \"\\n

    RESPONSE A REALIZAR:

    \"\n response = response + \"\\n

    HTTP/1.0 200 OK - version y codigo de respuesta

    \"\n response = response + \"\\n

    Dom, 7/08/2022 10:01 BGT - Fecha y hora de envio

    \"\n response = response + \"\\n

    Content-Type: text/html - tipo de archivo a enviar

    \"\n response = response + \"\\n

    Content-Length: 1998 - Longitud del archivo a enviar

    \"\n response = response + \"\\n

    - Espacio reservado para enviar el contenido -

    \"\n client_connection.sendall(response.encode())\n\n elif (file == \"/close\"):\n break\n\n else:\n try:\n archivo = open(\"files\"+file, 'rb')\n contenido = archivo.read()\n archivo.close()\n tipo = \"text/html; charset\"\n if (file.endswith(\".jpg\") or file.endswith(\".JPG\")):\n tipo = \"image/jpg\"\n elif(file.endswith(\"png\")):\n tipo = \"image/png\"\n elif(file.endswith(\"pdf\")):\n tipo = \"application/pdf\"\n \n response = response + \"Content-Type: \"+ tipo +\" \\n\\n\"\n response = response.encode('utf-8') + contenido\n client_connection.sendall(response)\n except FileNotFoundError:\n response = \"HTTP/1.0 404 NOT FOUND\\n\\n

    ERROR: File Not Found

    \"\n \n \n client_connection.close()\n\n \n def cerrar(self):\n self.server_socket.close()","repo_name":"jfmartineb/repo-jfmartineb-st0263","sub_path":"Laboratorios/Lab1/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11582759549","text":"import csv\n'''\nimport csv\nproducts = [(1,'mobile',25000,10),(2,'laptop',50000,5),(3,'pen',25,100)]\ncsvfile=open(\"data.csv\",\"w\",newline='')\nobj=csv.writer(csvfile)\nfor p in products:\n obj.writerow(p)\ncsvfile.close()\n\n\nimport csv\nproducts = [(1,'mobile',25000,10),(2,'laptop',50000,5),(3,'pen',25,100)]\ncsvfile=open(\"data1.csv\",\"w\",newline='')\nobj=csv.writer(csvfile)\nobj.writerow(products)\ncsvfile.close()\n\n\nimport csv\ncsvfile=open(\"data.csv\",\"r\",newline='')\nobj = csv.reader(csvfile)\nfor row in obj:\n print(row)\ncsvfile.close()\n\n'''\n\n\nemployees = [{'name':'John','dept':'hr','salary':500000},\n {'name':'Mary','dept':'sales','salary':600000},\n {'name':'Peter','dept':'sales','salary':700000}]\nfields = list(employees[0].keys())\ncsvfile = open('empData.csv','w',newline='')\nobj=csv.DictWriter(csvfile,fieldnames=fields)\nobj.writeheader()\nobj.writerow(employees)\ncsvfile.close()\n\n\ncsvfile = open('empData.csv','r',newline='')\nobj =csv.DictReader(csvfile)\nfor field in obj.fieldnames:\n print(field,end=\"\\t\")\nprint()\nfor row in obj:\n print(row)\ncsvfile.close();","repo_name":"veenu43/pythonBasics","sub_path":"csvTest.py","file_name":"csvTest.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24433876773","text":"from typing import List, Optional, Tuple\nimport gpflow\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom gpflow import set_trainable\nfrom gpflow.config import default_float, default_jitter\nfrom gpflow.covariances.dispatch import Kuf, Kuu\nfrom sklearn.cluster import KMeans\nfrom gpflow.models import GPModel\nfrom oak.input_measures import EmpiricalMeasure, GaussianMeasure, MOGMeasure\nfrom oak.oak_kernel import (\n KernelComponenent,\n OAKKernel,\n bounded_param,\n get_list_representation,\n)\nfrom oak.ortho_binary_kernel import OrthogonalBinary\nfrom oak.ortho_categorical_kernel import OrthogonalCategorical\nfrom oak.ortho_rbf_kernel import OrthogonalRBFKernel\n# -\n\nopt = gpflow.optimizers.Scipy()\ntfd = tfp.distributions\nf64 = gpflow.utilities.to_default_float\n\n\ndef model_to_kernel_list(model: GPModel, selected_dims: List):\n # exact list of kernels from the OAK model\n kernel = []\n model_dims = extract_active_dims(model)\n for i in range(len(selected_dims)):\n for j in range(len(model.kernel.kernels) - 1):\n if model_dims[j] == selected_dims[i]:\n kernel.append(model.kernel.kernels[j])\n # append offset kernel\n kernel.append(model.kernel.kernels[-1])\n return kernel\n\n\ndef extract_active_dims(m):\n # exact list of active dimensions from the OAK model m\n active_dims = []\n for i in range(len(m.kernel.kernels) - 1):\n # interaction with product kernel\n if type(m.kernel.kernels[i]) == gpflow.kernels.base.Product:\n sub_m = m.kernel.kernels[i].kernels\n dims = []\n for j in range(len(sub_m)):\n dim = sub_m[j].active_dims\n dims.append(dim[0])\n else:\n dims = m.kernel.kernels[i].active_dims\n\n active_dims.append(list(dims))\n return active_dims\n\n\ndef grammer_to_kernel(\n selected_dims,\n offset,\n measure=GaussianMeasure(0, 10),\n lengthscales_lo=1e-3,\n lengthscales_hi=100,\n variance_lo=0.01,\n variance_hi=100,\n):\n # construct list of kernels\n # selected_dims: list of kernel indices\n selected_kernels = []\n for i in range(len(selected_dims)):\n # loop through depth\n k_list = []\n for j in range(len(selected_dims[i])):\n\n lengthscales = np.random.uniform(low=lengthscales_lo, high=lengthscales_hi)\n variance = np.random.uniform(low=variance_lo, high=variance_hi)\n\n dim = selected_dims[i][j] + offset\n if isinstance(measure, EmpiricalMeasure):\n location = measure.location\n k = OrthogonalRBFKernel(\n gpflow.kernels.RBF(lengthscales=lengthscales, variance=variance),\n EmpiricalMeasure(np.reshape(location[:, dim], (-1, 1))),\n active_dims=[dim],\n )\n else:\n k = OrthogonalRBFKernel(\n gpflow.kernels.RBF(lengthscales=lengthscales, variance=variance),\n measure,\n active_dims=[dim],\n )\n k.base_kernel.lengthscales = bounded_param(\n lengthscales_lo, lengthscales_hi, lengthscales\n )\n k.base_kernel.variance = bounded_param(variance_lo, variance_hi, variance)\n if j > 0:\n k.base_kernel.variance.assign(1)\n set_trainable(k.base_kernel.variance, False)\n\n k_list.append(k)\n k = np.prod(k_list)\n selected_kernels.append(k)\n\n # add a constant kernel\n k0 = gpflow.kernels.Constant(variance=10)\n selected_kernels.append(k0)\n\n return selected_kernels\n\n\ndef f1(x, y, sigma, lengthscales, delta, mu):\n # eq (44) in Appendix G.1 of paper for calculating Sobol indices\n return (\n sigma ** 4\n * lengthscales\n / np.sqrt(lengthscales ** 2 + 2 * delta ** 2)\n * np.exp(-((x - y) ** 2) / (4 * lengthscales ** 2))\n * np.exp(-((mu - (x + y) / 2) ** 2) / (2 * delta ** 2 + lengthscales ** 2))\n )\n\n\ndef f2(x, y, sigma, lengthscales, delta, mu):\n # eq (45) in Appendix G.1 of paper for calculating Sobol indices\n M = 1 / (lengthscales ** 2) + 1 / (lengthscales ** 2 + delta ** 2)\n m = 1 / M * (mu / (lengthscales ** 2 + delta ** 2) + x / lengthscales ** 2)\n C = (\n x ** 2 / (lengthscales ** 2)\n + mu ** 2 / (lengthscales ** 2 + delta ** 2)\n - m ** 2 * M\n )\n return (\n sigma ** 4\n * lengthscales\n * np.sqrt((lengthscales ** 2 + 2 * delta ** 2) / (delta ** 2 * M + 1))\n * np.exp(-C / 2)\n / (lengthscales ** 2 + delta ** 2)\n * np.exp(-((y - mu) ** 2) / (2 * (lengthscales ** 2 + delta ** 2)))\n * np.exp(-((m - mu) ** 2) / (2 * (1 / M + delta ** 2)))\n )\n\n\ndef f3(x, y, sigma, lengthscales, delta, mu):\n # eq (46) in Appendix G.1 of paper for calculating Sobol indices\n return f2(y, x, sigma, lengthscales, delta, mu)\n\n\ndef f4(x, y, sigma, lengthscales, delta, mu):\n # eq (47) in Appendix G.1 of paper for calculating Sobol indices\n return (\n sigma ** 4\n * lengthscales ** 2\n * (lengthscales ** 2 + 2 * delta ** 2)\n * np.sqrt(\n (lengthscales ** 2 + delta ** 2) / (lengthscales ** 2 + 3 * delta ** 2)\n )\n / ((lengthscales ** 2 + delta ** 2) ** 2)\n * np.exp(\n -((x - mu) ** 2 + (y - mu) ** 2) / (2 * (lengthscales ** 2 + delta ** 2))\n )\n )\n\n\ndef get_model_sufficient_statistics(m, get_L=True):\n \"\"\"\n Compute a vector \"alpha\" and a matrix \"L\" which can be used for easy prediction.\n \"\"\"\n\n X_data, Y_data = m.data\n if isinstance(m, gpflow.models.SVGP):\n posterior = m.posterior()\n # details of Qinv can be found https://github.com/GPflow/GPflow/blob/develop/gpflow/posteriors.py\n alpha = posterior.alpha\n if get_L:\n L = tf.linalg.cholesky(tf.linalg.inv(posterior.Qinv[0]))\n elif isinstance(m, gpflow.models.SGPR):\n\n num_inducing = len(m.inducing_variable)\n err = Y_data - m.mean_function(X_data)\n kuf = Kuf(m.inducing_variable, m.kernel, X_data)\n kuu = Kuu(m.inducing_variable, m.kernel, jitter=default_jitter())\n\n sigma = tf.sqrt(m.likelihood.variance)\n L = tf.linalg.cholesky(kuu)\n A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(\n num_inducing, dtype=default_float()\n )\n LB = tf.linalg.cholesky(B)\n Aerr = tf.linalg.matmul(A, err)\n c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma\n\n tmp1 = tf.linalg.solve(tf.transpose(LB), c)\n alpha = tf.linalg.solve(tf.transpose(L), tmp1)\n\n if get_L:\n # compute the effective L\n LAi = tf.linalg.triangular_solve(L, np.eye(L.shape[0]))\n LBiLAi = tf.linalg.triangular_solve(LB, LAi)\n L = tf.linalg.inv(LAi - LBiLAi)\n\n elif isinstance(m, gpflow.models.GPR):\n # prepare for prediction\n K = m.kernel(X_data)\n Ktilde = K + np.eye(X_data.shape[0]) * m.likelihood.variance\n L = np.linalg.cholesky(Ktilde)\n alpha = tf.linalg.cholesky_solve(L, Y_data)\n\n else:\n raise NotImplementedError\n if get_L:\n return alpha, L\n else:\n return alpha\n\n\ndef compute_L(\n X: tf.Tensor, lengthscale: float, variance: float, dim: int, delta: float, mu: float\n) -> np.ndarray:\n # calculate the integral in eq (40) of Appendix G.1 in paper\n N = X.shape[0]\n sigma = np.sqrt(variance)\n xx = X[:, dim]\n yy = X[:, dim]\n\n x = np.repeat(xx, N)\n y = np.tile(yy, N)\n L = (\n f1(x, y, sigma, lengthscale, delta, mu)\n - f2(x, y, sigma, lengthscale, delta, mu)\n - f3(x, y, sigma, lengthscale, delta, mu)\n + f4(x, y, sigma, lengthscale, delta, mu)\n )\n L = np.reshape(L, (N, N))\n\n return L\n\n\ndef compute_L_binary_kernel(\n X: tf.Tensor, p0: float, variance: float, dim: int\n) -> np.ndarray:\n\n \"\"\"\n Compute L matrix needed for sobol index calculation for orthogonal binary kernels.\n :param X: training input tensor\n :param p0: probability measure for the data distribution (Prob(x=0))\n :param variance: variance parameter for the binary kernel, default is 1\n :param dim: active dimension of the kernel\n :return: sobol value L matrix\n\n \"\"\"\n assert 0 <= p0 <= 1\n\n N = X.shape[0]\n xx = X[:, dim]\n yy = X[:, dim]\n\n x = np.repeat(xx, N)\n y = np.tile(yy, N)\n p1 = 1 - p0\n\n L = variance * (\n p0 * (p1 ** 2 * (1 - x) - p0 * p1 * x) * (p1 ** 2 * (1 - y) - p0 * p1 * y)\n + p1 * (-p0 * p1 * (1 - x) + p0 ** 2 * x) * (-p0 * p1 * (1 - y) + p0 ** 2 * y)\n )\n L = np.reshape(L, (N, N))\n\n return L\n\n\ndef compute_L_categorical_kernel(\n X: tf.Tensor, W: tf.Tensor, kappa: tf.Tensor, p: float, variance: float, dim: int\n) -> np.ndarray:\n\n \"\"\"\n Compute L matrix needed for sobol index calculation for orthogonal categorical kernels.\n :param X: training input tensor\n :param W: parameter of categorical kernel\n :param kappa: parameter of categorical kernel\n :param p: probability measure for the data distribution (Prob(x=0))\n :param variance: variance parameter for the categorical kernel, default is 1\n :param dim: active dimension of the kernel\n :return: sobol value L matrix\n\n \"\"\"\n assert np.abs(p.sum() - 1) < 1e-6\n\n N = X.shape[0]\n\n A = tf.linalg.matmul(W, W, transpose_b=True) + tf.linalg.diag(kappa)\n Ap = tf.linalg.matmul(A, p)\n B = A - tf.linalg.matmul(Ap, Ap, transpose_b=True) / (\n tf.linalg.matmul(p, Ap, transpose_a=True)[0]\n )\n B = B * variance\n\n xx = tf.range(len(p), dtype=gpflow.config.default_float())\n\n K = tf.gather(\n tf.transpose(tf.gather(B, tf.cast(X[:, dim], tf.int32))), tf.cast(xx, tf.int32)\n )\n\n L = tf.linalg.matmul(K, K * p, transpose_a=True)\n\n return L\n\n\n@tf.function\ndef compute_L_empirical_measure(\n x: tf.Tensor, w: tf.Tensor, kernel: OrthogonalRBFKernel, z: tf.Tensor\n) -> np.ndarray:\n \"\"\"\n Compute L matrix needed for sobol index calculation with empirical measure\n :param x: location of empirical measure\n :param w: weights of empirical measure, input density of the form 1/(\\sum_i w_i) * \\sum_i w_i (x==x_i)\n :param kernel: constrained kernel\n :param z: training data in full GP or inducing points locations in sparse GP\n :return: sobol value L matrix\n \"\"\"\n\n # number of training/inducing points\n m = z.shape[0]\n # number of empirical locations\n n = x.shape[0]\n\n kxu = kernel.K(x, z)\n tf.debugging.assert_shapes([(kxu, (n, m))])\n w = tf.reshape(w, [1, n])\n L = tf.matmul(w * tf.transpose(kxu), kxu)\n\n return L\n\n\ndef compute_sobol_oak(\n model: gpflow.models.BayesianModel,\n delta: float,\n mu: float,\n share_var_across_orders: Optional[bool] = True,\n) -> Tuple[List[List[int]], List[float]]:\n \"\"\"\n Compute sobol indices for Duvenaud model\n :param model: gpflowm odel\n :param delta: prior variance of measure p(X)\n :param mu: prior mean of measure p(x)\n :param share_var_across_orders: whether to share the same variance across orders,\n if False, it uses original OrthogonalRBFKernel kernel \\prod_i(1+k_i).\n :return: list of input dimension indices and list of sobol indices\n \"\"\"\n print(model.kernel)\n assert isinstance(model.kernel, OAKKernel), \"only work for OAK kernel\"\n num_dims = model.data[0].shape[1]\n\n selected_dims_oak, kernel_list = get_list_representation(\n model.kernel, num_dims=num_dims\n )\n selected_dims_oak = selected_dims_oak[1:] # skip constant term\n if isinstance(model, (gpflow.models.SGPR, gpflow.models.SVGP)):\n X = model.inducing_variable.Z\n else:\n X = model.data[0]\n N = X.shape[0]\n alpha = get_model_sufficient_statistics(model, get_L=False)\n sobol = []\n L_list = []\n for kernel in kernel_list:\n assert isinstance(kernel, KernelComponenent)\n if len(kernel.iComponent_list) == 0:\n continue # skip constant term\n L = np.ones((N, N))\n n_order = len(kernel.kernels)\n for j in range(len(kernel.kernels)):\n if share_var_across_orders:\n if j < 1:\n v = kernel.oak_kernel.variances[n_order].numpy()\n else:\n v = 1\n else:\n v = kernel.kernels[j].base_kernel.variance.numpy()\n\n dim = kernel.kernels[j].active_dims[0]\n\n if isinstance(kernel.kernels[j], OrthogonalRBFKernel):\n\n if isinstance(kernel.kernels[j].base_kernel, gpflow.kernels.RBF) and (\n not isinstance(kernel.kernels[j].measure, EmpiricalMeasure)\n and (not isinstance(kernel.kernels[j].measure, MOGMeasure))\n ):\n l = kernel.kernels[j].base_kernel.lengthscales.numpy()\n L = L * compute_L(\n X,\n l,\n v,\n dim,\n delta,\n mu,\n )\n\n elif isinstance(kernel.kernels[j].measure, EmpiricalMeasure):\n L = (\n v ** 2\n * L\n * compute_L_empirical_measure(\n kernel.kernels[j].measure.location,\n kernel.kernels[j].measure.weights,\n kernel.kernels[j],\n tf.reshape(X[:, dim], [-1, 1]),\n )\n )\n else:\n raise NotImplementedError\n\n elif isinstance(kernel.kernels[j], OrthogonalBinary):\n p0 = kernel.kernels[j].p0\n L = L * compute_L_binary_kernel(X, p0, v, dim)\n\n elif isinstance(kernel.kernels[j], OrthogonalCategorical):\n p = kernel.kernels[j].p\n W = kernel.kernels[j].W\n kappa = kernel.kernels[j].kappa\n L = L * compute_L_categorical_kernel(X, W, kappa, p, v, dim)\n\n else:\n raise NotImplementedError\n L_list.append(L)\n mean_term = tf.tensordot(\n tf.tensordot(tf.transpose(alpha), L, axes=1), alpha, axes=1\n ).numpy()[0][0]\n sobol.append(mean_term)\n\n assert len(selected_dims_oak) == len(sobol)\n return selected_dims_oak, sobol\n\n\ndef compute_sobol(\n model: GPModel,\n kernel_list: list,\n delta: float,\n mu: float,\n alpha: np.ndarray,\n sparse_gp: bool = True,\n):\n # compute Sobol in eq (40) of G.1 of paper\n if sparse_gp:\n X = model.inducing_variable.Z\n else:\n X = model.data[0]\n N = X.shape[0]\n sobol = []\n L_list = []\n for kernel in kernel_list:\n assert not isinstance(\n kernel, KernelComponenent\n ), \"should use duvenaud sobol calculation code\"\n if isinstance(kernel, gpflow.kernels.base.Product): # exclude constant term\n L = np.ones((N, N))\n for j in range(len(kernel.kernels)):\n l = kernel.kernels[j].base_kernel.lengthscales.numpy()\n v = kernel.kernels[j].base_kernel.variance.numpy()\n dim = kernel.kernels[j].active_dims[0]\n L = L * compute_L(X, l, v, dim, delta, mu)\n L_list.append(L)\n sobol.append(\n tf.tensordot(\n tf.tensordot(tf.transpose(alpha), L, axes=1), alpha, axes=1\n ).numpy()[0][0]\n )\n\n else:\n if type(kernel) != gpflow.kernels.statics.Constant and not isinstance(\n kernel, KernelComponenent\n ):\n l = kernel.base_kernel.lengthscales.numpy()\n v = kernel.base_kernel.variance.numpy()\n dim = kernel.active_dims[0]\n L = compute_L(X, l, v, dim, delta, mu)\n\n L_list.append(L)\n sobol.append(\n tf.tensordot(\n tf.tensordot(tf.transpose(alpha), L, axes=1), alpha, axes=1\n ).numpy()[0][0]\n )\n\n return sobol\n\n\ndef get_prediction_component(\n m: gpflow.models.BayesianModel,\n alpha: tf.Tensor,\n X: np.ndarray = None,\n share_var_across_orders: Optional[bool] = True,\n) -> list:\n \"\"\"\n Return predictive mean for dataset 1 and 2\n :param m: GP model\n :param X: concatenation of data to make predictions: first half of X are from dataset 1,\n last half of X are from dataset 2. If it is None, then X is set to be the training data.\n :param alpha: statistics used to make predictions, e.g. K^{-1}y\n :param share_var_across_orders: whether to share the same variance across orders,\n if False, it uses original OrthogonalRBFKernel kernel \\prod_i(1+k_i)\n :return: prediction of each kernel component of two datasets (e.g., two different simulation runs), concatenated together\n \"\"\"\n if X is None:\n X = m.data[0]\n selected_dims, _ = get_list_representation(m.kernel, num_dims=X.shape[1])\n tuple_of_indices = selected_dims[1:]\n out = []\n if isinstance(m, gpflow.models.GPR):\n X_conditioned = m.data[0]\n elif isinstance(m, (gpflow.models.SGPR, gpflow.models.SVGP)):\n X_conditioned = m.inducing_variable.Z\n\n for n in range(len(tuple_of_indices)):\n Kxx = tf.ones([X.shape[0], alpha.shape[0]], dtype=tf.dtypes.float64)\n num_interaction = len(tuple_of_indices[n])\n for ii in range(num_interaction):\n idx = tuple_of_indices[n][ii]\n Kxx *= m.kernel.kernels[idx].K(\n np.reshape(X[:, idx], (-1, 1)), X_conditioned[:, idx : idx + 1]\n )\n if share_var_across_orders:\n Kxx *= m.kernel.variances[num_interaction]\n\n predictive_component_mean = tf.matmul(Kxx, alpha)\n out.append(predictive_component_mean[:, 0])\n return out\n\n\ndef initialize_kmeans_with_binary(\n X: tf.Tensor,\n binary_index: list,\n continuous_index: Optional[list] = None,\n n_clusters: Optional[int] = 200,\n):\n # K-means with combination of continuous and binary feature\n Z = np.zeros([n_clusters, X.shape[1]])\n\n for index in binary_index:\n kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(X[:, index][:, None])\n Z[:, index] = kmeans.cluster_centers_.astype(int)[:, 0]\n\n if continuous_index is not None:\n kmeans_continuous = KMeans(n_clusters=n_clusters, random_state=0).fit(\n X[:, continuous_index]\n )\n Z[:, continuous_index] = kmeans_continuous.cluster_centers_\n\n return Z\n\n\ndef initialize_kmeans_with_categorical(\n X: tf.Tensor,\n binary_index: list,\n categorical_index: list,\n continuous_index: list,\n n_clusters: Optional[int] = 200,\n):\n # K-means with combination of continuous and categorical feature\n Z = np.zeros([n_clusters, X.shape[1]])\n\n for index in binary_index + categorical_index:\n kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(X[:, index][:, None])\n Z[:, index] = kmeans.cluster_centers_.astype(int)[:, 0]\n\n kmeans_continuous = KMeans(n_clusters=n_clusters, random_state=0).fit(\n X[:, continuous_index]\n )\n Z[:, continuous_index] = kmeans_continuous.cluster_centers_\n\n return Z\n","repo_name":"amzn/orthogonal-additive-gaussian-processes","sub_path":"oak/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19478,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"2171676190","text":"from dependency_injector.wiring import inject, Provide\n\n\nclass SheetExtensions:\n\n @staticmethod\n @inject\n def ImportDrawing(sheetId: int, file: str, fontName, posX: float, posY: float, scale: float = 1.0, rotation: float = 0,\n instance=Provide[\"interop\"]):\n _sht_instance = instance.SheetRef\n _sht_instance.SetId(sheetId)\n region_info = SheetExtensions.GetRegionDetails()\n\n if scale > region_info[2]:\n raise ValueError(f\"A escala da folha deve ser maior do que a escala de importação selecionada. Escala selecionada [{scale}] | Disponível : [{region_info[2]}]\")\n\n _sht_instance.ImportDxf(file, scale, posX, posY, rotation, fontName, 0)\n\n @staticmethod\n @inject\n def GetRegionDetails(sheetId: int, instance=Provide[\"interop\"]) -> (float, float, float):\n _sht_instance = instance.SheetRef\n _sht_instance.SetId(sheetId)\n return _sht_instance.GetSheetRegion()\n","repo_name":"chrisbewz/e3-panel-distance-estimator","sub_path":"RouteEstimator/Extensions/Sheet.py","file_name":"Sheet.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19001373359","text":"#Uses python3\n\nimport sys\nimport queue\nimport heapq\n\ndef distance(adj, cost, s, t):\n n = len(adj)\n dist_heap = [[0,s]]\n #dist_heap[s][0] = 0\n dist_min = [float(\"inf\")]*n\n dist_min[s] = 0\n heapq.heapify(dist_heap)\n\n while dist_heap :\n d, u = heapq.heappop(dist_heap)\n if dist_min[u] == d :\n if u == t : return d\n for v, weight in zip(adj[u], cost[u]) :\n if dist_min[v] > dist_min[u] + weight :\n dist_min[v] = dist_min[u] + weight\n heapq.heappush(dist_heap, [dist_min[v], v])\n return -1\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))\n data = data[3 * m:]\n adj = [[] for _ in range(n)]\n cost = [[] for _ in range(n)]\n for ((a, b), w) in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\n s, t = data[0] - 1, data[1] - 1\n print(distance(adj, cost, s, t))\n","repo_name":"asif98/asif98-Data-Structures-and-Algorithms-Specialization","sub_path":"Algorithms_on_Graphs/week4_paths_in_graphs2/1_minimum_flight_cost/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28787507407","text":"#UPDATES:\n\n#The weights dont seem to be updating fr after the last step. I hve no idea why... Try using different data type?\n#using numpy arrays instead.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport chap_5_img_reader as m\n\nimport numpy as np\n#importing the math functions module u created\nimport math_func_practice as p\n\n#calling the reader function and setting vars equal to the data\n#ntrimages = number of training images (60,000)\ntrain_images, train_labels, ntrimages, test_images, test_labels, nteimages = m.mnist_read()\n\nx = 0\ny = 0\n\nimg_num = 0 #what img number we are on\ninputs = [] #initializing list of inputs, will be cleared at the start of each new image\nalpha = .01 #setting alpha to .01\n\n\n#initializing the weights, all to 1?\n#turning weights into a matrix, 10 x 784 (i for each number)\nweights = np.ones((10,784))\n\n\n# weight_rows = []\n# i = 0\n# j = 0\n# for j in range(10):\n# i = 0\n# weight_rows.clear()\n# for i in range(784):\n# weight_rows.append(1)\n# \n# weights.append(weight_rows)\n \n#print(weights)\n#print(len(weights))\n#initializing list of correct guesses\ncorrect_guess = []\ni = 0\nfor i in range(ntrimages):\n correct_guess.append(train_labels[i])\n \n#initializing list of possible predictions\npossible_predictions = [0,1,2,3,4,5,6,7,8,9]\n#neural network function\n\ndef neural_network(inp,weight):\n \n #multiplying each input by each corresponding weight\n #print(len(weights))\n prediction = p.vect_matrix_multiplication(inp,weight)\n #print(prediction)\n pred = p.percenter(prediction)\n #print(pred)\n return pred\n \n#loop continues until we have gone thru every image in the training set\nwhile (img_num < ntrimages):\n \n inputs.clear()\n \n # this concatates the 28 x 28 grid into a single list of inputs, 784 to be exact\n for x in range (0,28):\n for y in range (0,28):\n \n inputs.append((train_images[img_num])[x][y])\n \n #once the loop has finished adding to the inputs list (each pixel of the image), now we perform neural network calculations\n \n prediction = neural_network(inputs,weights)\n\n #initializing error vector, vector of 0's\n error = []\n i = 0\n for i in range(len(prediction)):\n error.append(0)\n \n #initializing delta vector, vector of 0's\n delta = []\n i = 0\n for i in range(len(prediction)):\n delta.append(0)\n \n #make list of desired outputs, 1 = correct, 0 = incorrect\n true = [0,0,0,0,0,0,0,0,0,0] \n if(correct_guess[img_num] == 0):\n true[0] = 1\n elif(correct_guess[img_num] == 1):\n true[1] = 1\n elif(correct_guess[img_num] == 2):\n true[2] = 1\n elif(correct_guess[img_num] == 3):\n true[3] = 1\n elif(correct_guess[img_num] == 4):\n true[4] = 1\n elif(correct_guess[img_num] == 5):\n true[5] = 1\n elif(correct_guess[img_num] == 6):\n true[6] = 1\n elif(correct_guess[img_num] == 7):\n true[7] = 1\n elif(correct_guess[img_num] == 8):\n true[8] = 1\n elif(correct_guess[img_num] == 9):\n true[9] = 1\n \n \n \n i = 0\n for i in range(len(true)):\n error[i] = ((prediction[i] - true[i]) ** 2)\n #print('error',error[i])\n delta[i] = prediction[i] - true[i]\n #print('delta',delta[i])\n \n weight_deltas = p.outer_product(delta,inputs)\n \n i = 0\n j = 0\n\n for i in range(len(weights)):\n for j in range (len(weights[0])):\n #print(weight_deltas[i][j])\n weights[i][j] -= alpha * weight_deltas[i][j]\n \n #moving the the next image \n \n img_num += 1\n\n#saving these weights to a csv file in case I want to jump right into testing on a previously trained set of weights\nnp.savetxt('output.csv',weights,delimiter=\",\")\n\nprint('Done training, time to test.')\n\n\n#done training, now lets test the weights\n \n#these values will be used to see at what percent this bot guesses the correct numbers\nnum_correct_guesses = 0\ntotal_guesses = 0\n\n\ncorrect_guess_2 = []\ni = 0\nfor i in range(nteimages):\n correct_guess_2.append(test_labels[i])\n\ninputs_2 = []\nimg_num_2 = 0\n\nwhile (img_num_2 < nteimages):\n \n inputs_2.clear()\n \n x = 0\n y = 0\n for x in range (0,28):\n for y in range (0,28):\n \n inputs_2.append((test_images[img_num_2])[x][y])\n \n prediction_2 = neural_network(inputs_2,weights)\n \n if(p.chooseNum(prediction_2) == correct_guess_2[img_num_2]):\n num_correct_guesses += 1\n \n total_guesses += 1\n img_num_2 += 1\n \n\nprint('This bot correctly guessed the number ', num_correct_guesses/total_guesses, ' percent of the time.')\n#print('imgnum',img_num)\n \n \n\n# \n# plt.imshow(train_images[255], cmap=cm.Greys)\n# plt.title(train_labels[255])\n# plt.grid()\n# plt.show()\n# \n# ","repo_name":"aidank123/deep_learning_bots","sub_path":"chap_5_neural_network.py","file_name":"chap_5_neural_network.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41417689423","text":"'''\n Please note that this file is an example, not an official Lotame-supported\n tool. The Support team at Lotame does not provide support for this script,\n as it's only meant to serve as a guide to help you use the Services API.\n\n Filename: add_behaviors_to_audiences.py\n Author: Brett Coker\n Python Version: 3.6.4\n\n Adds behaviors to a given list of audiences (one behavior per audience).\n\n Takes a .csv as an argument:\n - Header row required (contents don't matter)\n - Column A should be audience IDs\n - Column B should be behavior IDs\n\n Behaviors will be either ANDed or ORed, as chosen by the user when running\n the script. They will be appended to the end of the definition, which\n means they will be in their own group (i.e. not nested).\n'''\nimport sys\nimport csv\nimport better_lotameapi\n\n\ndef get_audience_info(lotame, audience_id):\n response = lotame.get(f'audiences/{audience_id}')\n\n status = response.status_code\n if status != 200:\n return None\n\n return response.json()\n\n\ndef set_audience_info(lotame, audience_id, info):\n status = lotame.put(f'audiences/{audience_id}', info).status_code\n return bool(status == 204)\n\n\ndef is_valid_behavior(lotame, behavior_id):\n status = lotame.get(f'behaviors/{behavior_id}').status_code\n return bool(status == 200)\n\n\ndef main():\n if len(sys.argv) != 2:\n print(f'Usage: python {sys.argv[0]} audiences.csv')\n return\n\n lotame = better_lotameapi.Lotame()\n\n print('Append options:')\n print('1. AND')\n print('2. OR')\n choice = ''\n while choice not in ['1', '2']:\n choice = input('Choice: ')\n\n if choice == '1':\n operator = 'AND'\n else:\n operator = 'OR'\n\n filename = sys.argv[1]\n with open(filename) as csv_file:\n reader = csv.reader(csv_file)\n\n # Skip header row\n next(reader)\n\n for row in reader:\n audience_id = row[0]\n behavior_id = row[1]\n\n audience_info = get_audience_info(lotame, audience_id)\n if not audience_info:\n print(f'Error: Audience {audience_id} not found')\n continue\n\n if not is_valid_behavior(lotame, behavior_id):\n print(f'Error: Behavior {behavior_id} not found')\n continue\n\n behavior = {\n 'operator': operator,\n 'complexAudienceBehavior': {\n 'behavior': {\n 'id': behavior_id\n }\n }\n }\n\n audience_info['definition']['component'].append(behavior)\n if set_audience_info(lotame, audience_id, audience_info):\n print(f'Updated audience {audience_id}')\n else:\n print(f'Error: Could not update audience {audience_id}')\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"Lotame/api-examples","sub_path":"py/add_behaviors_to_audiences.py","file_name":"add_behaviors_to_audiences.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"70039180676","text":"from api.BaseApi import BaseApi\nfrom dao import DAO\nfrom JSONDecEncoder import JSONDecEncoder\n\n\nclass installation(BaseApi):\n def __init__(self):\n super().__init__()\n self.supported = [\"get\"]\n\n def get(self, inp):\n query = inp[\"query\"]\n if (\"city\" not in query):\n return 418, \"Impossible de trouver les installations sans ville ou mauvais appel (._. )\"\n if (\"city\" in query and \"range\" in query and \"act\" in query):\n r = DAO().get_installation(query[\"city\"], query[\"act\"], int(query[\"range\"]))\n else:\n if (\"city\" in query and \"range\" in query and \"act\" not in query):\n r = DAO().get_installation(query[\"city\"], \"\", int(query[\"range\"]))\n else:\n if (\"city\" in query and \"range\" not in query and \"act\" in query):\n r = DAO().get_installation(query[\"city\"], query[\"act\"])\n else: # \"city\" in query and \"range\" not in query and \"act\" not in query\n r = DAO().get_installation(query[\"city\"], \"\")\n return {\n \"code\": 200,\n \"header\": [],\n \"content\": \"application/json\",\n \"res\": str.encode(JSONDecEncoder().encode(r))\n }\n","repo_name":"EnzDev/SportEquip","sub_path":"api/sport/installation.py","file_name":"installation.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21339322362","text":"# Useful starting lines\r\nimport numpy as np\r\nfrom proj1_helpers import *\r\nfrom cross_validation import *\r\nfrom implementations import *\r\n\r\n# Define seed for train/test random splitting\r\nseed = 10\r\n\r\nDATA_TRAIN_PATH = 'train.csv' \r\nDATA_TEST_PATH = 'test.csv' \r\n\r\ny_test, tX_test, ids_test = load_csv_data(DATA_TEST_PATH)\r\ny, tX, ids = load_csv_data(DATA_TRAIN_PATH)\r\ndegree = 2\r\ntX, tX_test = preprocessing(tX, tX_test)\r\npoly_basis = build_poly(tX, degree)\r\nprint(\"building poly\")\r\npoly_basisTest = build_poly(tX_test, 2)\r\nprint(\"building poly\")\r\n\r\nw, loss = least_squares(y, poly_basis)\r\nprint(loss)\r\ny_pred = predict_labels(w, poly_basisTest)\r\nprint(\"predicted \", str((y_pred==-1).sum()), \"-1s and \", str((y_pred==1).sum()), \"1s\")\r\ncreate_csv_submission(ids_test, y_pred, \"least_squares.csv\")\r\n\"\"\" cross validation\r\nk_fold = 10\r\n#gamma = [0.1, 0.6, 0.01, 0.001]\r\n#lambda_ = [0.000001, 0.04, 0.01, 0.00001, 0.001]\r\ngamma = [ 0.6, 0.7, 0.8, 0.9, 0.06]\r\nlambda_ = [0.000001, 0.01, 0.00001, 0.001]\r\nmax_iters = 1000 # try with less iterations maybe \r\nfrom collections import defaultdict\r\n\r\n# Split data in k-fold\r\nk_indices = k_indices(y, k_fold, seed)\r\n\r\nbest_acc_test = {}\r\nbest_acc_train = {}\r\nbest_g = {}\r\nbest_lambda = {}\r\nfor k in range(k_fold):\r\n best_acc_train[k] = 0\r\n best_acc_test[k] = 0\r\n \r\n for g in gamma:\r\n for l in lambda_:\r\n acc_train, acc_test= cross_validation(y, tX, k_indices, k, reg_logistic_regression, lambda_=l, initial_w=None, max_iters=max_iters, gamma=g)\r\n if(acc_train>best_acc_train[k]):\r\n best_acc_train[k] = acc_train\r\n if(acc_test>best_acc_test[k]):\r\n best_acc_test[k] = acc_test\r\n best_lambda[k] = l\r\n best_g[k] = g\r\n\r\n print(\"%f %f %d - Training accuracy: %f / Test accuracy : %f\" % (l, g,k,acc_train, acc_test))\r\nprint(best_acc_train)\r\nprint(best_acc_test)\r\nprint(best_g)\r\nprint(best_lambda)\r\n\"\"\"\r\n\r\n#cross_validation_visualization([0,1], best_acc_train, best_acc_test)\r\n\r\n#print(\"\\nAverage test accuracy: %f\" % np.mean(accs_test))\r\n#print(\"Variance test accuracy: %f\" % np.var(accs_test))\r\n#print(\"Min test accuracy: %f\" % np.min(accs_test))\r\n#print(\"Max test accuracy: %f\" % np.max(accs_test))\r\n#tX, tX_test= preprocessing(tX, tX_test)\r\ntX, _ = replace_empty(tX)\r\ntX_test, _ = replace_empty(tX_test)\r\n\r\nw, loss = reg_logistic_regression(y, tX, 0.01, None, 1000, 0.7)\r\nprint(loss)\r\ny_pred = predict_labels(w, tX_test)\r\nprint(\"predicted \", str((y_pred==-1).sum()), \"-1s and \", str((y_pred==1).sum()), \"1s\")\r\ncreate_csv_submission(ids_test, y_pred, \"reg_log_reg.csv\")\r\n# 0.665\t0.363 for \r\n# 0.724\t0.641 reg logistic preprocessing and 0.01, 0.7 and 100\r\n# 0.770\t0.625 least squares no preprocessing \r\n# 0.774\t0.632 least squares preprocessing\r\n# 0.2952679109171201\r\n# 0.29526791091712734 removing the cols to remove\r\n# 0.29526791091712734\r\n# 0.2952679109171201\r\n# 0.2952679109171201\r\n# 0.2952679109171201\r\n# 0.2952679109171228 also normalization\r\n# 0.2952679109171201\r\n# 0.2951405937065225\r\n# 0.2951405937065295\r\n# 0.2944385857259786 now","repo_name":"saradj/ML-Project","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43167994477","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='users'),\n url(r'^/new$', views.new, name='new'),\n url(r'^/(?P\\d+)/edit$', views.edit, name='edit'),\n url(r'^/(?P\\d+)$', views.show, name='show'),\n url(r'^/create$', views.create, name='create'),\n url(r'^/(?P\\d+)/delete$', views.delete, name='delete'),\n url(r'^/(?P\\d+)/update$', views.update, name='update'),\n]\n\n\n# url(r'^$', views.toindex, name='my_index'),\n# url(r'^this_app/new$', views.new, name='my_new'),\n# url(r'^this_app/(?P\\d+)/edit$', views.edit, name='my_edit'),\n# url(r'^this_app/(?P\\d+)/delete$', views.delete, name='my_delete'),\n# url(r'^this_app/(?P\\d+)$', views.show, name='my_show'),","repo_name":"edelafuente22/SemiRestful","sub_path":"apps/semirestful/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74442858114","text":"import cpt_tools\nfrom gui_helpers.gui_config import *\n\nimport numpy as np \n\nPREDICTION_TACC_START = 15\nPREDICTION_TACC_END = 200000\nNUM_PREDICTIONS = 20\n\n\nclass CombinedAnalysisWidget( object ) :\n\n def __init__( self, analyzer ) :\n\n self.analyzer = analyzer \n\n current_estimates_box = QGroupBox( 'Current Estimates' )\n current_estimates_layout = QVBoxLayout()\n current_estimates_box.setLayout( current_estimates_layout ) \n \n self.mass_label_str = 'Current Mass Estimate: '\n self.mass_estimate_label = QLabel( self.mass_label_str + '?' ) \n\n self.freq_label_str = 'Current Frequency Estimate: '\n self.freq_estimate_label = QLabel( self.freq_label_str + '?' ) \n\n self.ame_mass_label_str = 'AME Mass Estimate: '\n self.ame_mass_estimate_label = QLabel( self.ame_mass_label_str + '?' ) \n\n self.ame_freq_label_str = 'AME Frequency Estimate: '\n self.ame_freq_estimate_label = QLabel( self.ame_freq_label_str + '?' ) \n\n current_estimates_layout.addWidget( self.mass_estimate_label )\n current_estimates_layout.addWidget( self.freq_estimate_label ) \n current_estimates_layout.addWidget( self.ame_mass_estimate_label ) \n current_estimates_layout.addWidget( self.ame_freq_estimate_label ) \n\n \n data_box = QGroupBox( 'Processed Data' ) \n data_layout = QVBoxLayout()\n data_box.setLayout( data_layout ) \n \n data_cols = [ 'Accumulation \\nTime (\\u03bcs)', 'Measured \\u0394\\u03B8 (deg)' ]\n self.data_table = QTableWidget( 1, len( data_cols ) )\n self.data_table.setHorizontalHeaderLabels( data_cols )\n self.data_table.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch ) \n # self.data_table.verticalHeader().setSectionResizeMode( QHeaderView.Stretch )\n\n\n data_layout.addWidget( self.data_table )\n\n # for i in range( len( data_cols ) ) :\n # self.data_table.setColumnWidth( i, 100 ) \n # self.data_table.setSizePolicy( MAX_SIZE_POLICY )\n\n \n self.data_table.setMinimumWidth( 250 ) \n\n \n predictions_box = QGroupBox( 'Predictions' ) \n predictions_layout = QVBoxLayout()\n predictions_box.setLayout( predictions_layout )\n\n # self.predictions_status_label = QLabel( 'Waiting for reference...' )\n # self.predictions_status_label.setStyleSheet( 'color: #E55959' ) \n # predictions_layout.addWidget( self.predictions_status_label )\n \n predictions_cols = [ 'Accumulation \\nTime (\\u03bcs)',\n 'Corrected \\u0394\\u03B8 \\nPrediction (deg)',\n 'AME \\u0394\\u03B8 \\nPrediction (deg)' ]\n self.predictions_table = QTableWidget( NUM_PREDICTIONS, len( predictions_cols ) )\n self.predictions_table.setHorizontalHeaderLabels( predictions_cols )\n self.predictions_table.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch ) \n # self.predictions_table.verticalHeader().setSectionResizeMode( QHeaderView.Stretch )\n self.predictions_table.setMinimumWidth( 320 )\n predictions_layout.addWidget( self.predictions_table ) \n \n\n canvas_box = QGroupBox( 'Visualization' ) \n canvas_layout = QVBoxLayout() \n # self.analyzer = analysis.CPTanalyzer()\n self.canvas = FigureCanvas( self.analyzer.f )\n canvas_layout.addWidget( self.canvas )\n canvas_box.setLayout( canvas_layout ) \n\n \n self.layout = QHBoxLayout()\n tmp = QVBoxLayout()\n tmp.addWidget( current_estimates_box )\n tmp.addWidget( data_box ) \n self.layout.addLayout( tmp )\n self.layout.addWidget( predictions_box ) \n self.layout.addWidget( canvas_box ) \n\n # self.layout = QGridLayout()\n # # self.layout.addWidget( data_box, 0, 0, 0, 0, QtCore.Qt.AlignLeft )\n # self.layout.addWidget( data_box, 0, 0 )\n # # self.layout.setColumnStretch( 0, 1 ) \n # self.layout.addWidget( predictions_box, 0, 1 )\n # # self.layout.setColumnStretch( 1, 0.25 ) \n # #self.layout.addWidget( canvas_box, 0, 2 ) \n # # self.layout.setColumnStretch( 2, 0 ) \n \n\n def update( self ) :\n\n self.analyzer.update()\n \n if len( self.analyzer.data_list ) > 0 : \n self.mass_estimate_label.setText( self.mass_label_str +\n str( self.analyzer.current_mass_estimate ) )\n self.freq_estimate_label.setText( self.freq_label_str +\n str( self.analyzer.current_freq_estimate ) )\n else :\n self.mass_estimate_label.setText( self.mass_label_str + '?' ) \n self.freq_estimate_label.setText( self.freq_label_str + '?' )\n\n self.ame_mass_estimate_label.setText( self.ame_mass_label_str +\n '%.1f' % self.analyzer.ame_mass )\n self.ame_freq_estimate_label.setText( self.ame_freq_label_str +\n '%.1f' % self.analyzer.ame_freq )\n\n self.populate_predictions() \n \n self.canvas.draw() \n \n\n\n def populate_predictions( self ) :\n\n taccs = np.linspace( PREDICTION_TACC_START, PREDICTION_TACC_END, NUM_PREDICTIONS, dtype = int )\n\n # need at least one reference to generate predictions \n # if self.analyzer.reference_indices :\n # self.predictions_status_label.setText( '' )\n \n for i in range( NUM_PREDICTIONS ) :\n tacc = taccs[i]\n\n ame_prediction = 50\n while( np.abs( ame_prediction - 5 ) > 5 ) :\n tacc += 1 \n ame_prediction = cpt_tools.mass_to_phase( self.analyzer.ame_mass, self.analyzer.q,\n tacc, atomic_mass = 1 )\n \n self.predictions_table.setCellWidget( i, 0, QLabel( str( tacc ) ) )\n \n self.predictions_table.setCellWidget( i, 1, QLabel( '%.1f' % ame_prediction ) )\n # else : \n # self.predictions_status_label.setText( 'Waiting for reference...' )\n\n # for i in range( NUM_PREDICTIONS ) :\n # for j in range( 3 ) :\n # self.predictions_table.setCellWidget( i, j, QLabel( '' ) ) \n","repo_name":"jacobpierce1/cpt_tools","sub_path":"gui_controller/gui_helpers/combined_analysis_widget.py","file_name":"combined_analysis_widget.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26077429243","text":"\"\"\"\n* In this challenge, you are tasked with creating a Python script for\n analyzing the financial records of your company. You will give a set of\n financial data called [budget_data.csv](PyBank/Resources/budget_data.csv).\n The dataset is composed of two columns: `Date` and `Profit/Losses`.\n (Thankfully, your company has rather lax standards for accounting so the\n records are simple.)\n\n* Your task is to create a Python script that analyzes the records to\n calculate each of the following:\n\n * The total number of months included in the dataset\n\n * The net total amount of \"Profit/Losses\" over the entire period\n\n * Calculate the changes in \"Profit/Losses\" over the entire period, then find\n the average of those changes\n\n * The greatest increase in profits (date and amount) over the entire period\n\n * The greatest decrease in profits (date and amount) over the entire period\n\n* As an example, your analysis should look similar to the one below:\n\n ```text\n Financial Analysis\n ----------------------------\n Total Months: 86\n Total: $38382578\n Average Change: $-2315.12\n Greatest Increase in Profits: Feb-2012 ($1926159)\n Greatest Decrease in Profits: Sep-2013 ($-2196167)\n ```\n\n* In addition, your final script should both print the analysis to the\n terminal and export a text file with the results.\n\"\"\"\n\n# Import required modules\nimport os\nimport csv\n\n# Define input and output file paths\ninput_path = os.path.join(\"Resources\", \"budget_data.csv\")\noutput_path = os.path.join(\"analysis\", \"Financial_Analysis.txt\")\n\n# Set up variable initial values\nmonth_count = 0\ntotal_net_profit = 0\nmax_increase = 0\nmax_decrease = 0\nfirst_month = True\ntotal_change = 0\n\n# Open input file and step through data\nwith open(input_path) as csv_file:\n\n csv_reader = csv.reader(csv_file, delimiter=',')\n \n # Read header row - not used in calculations\n csv_header = next(csv_reader)\n \n # Step through data rows\n for row in csv_reader: # Read the current data row\n month_count += 1 # Increment month counter\n current_month = int(row[1]) # Get profit / loss for current month\n total_net_profit += current_month # Add it to the net profit accumulator\n \n if first_month == True: # Skip max increase / decrease calcs for 1st month\n first_month = False # Not the first month anymore\n else:\n total_change += (current_month - prev_month) # Accumulate total monthly change\n if total_change > max_increase: # Check for new max increase\n max_increase = total_change\n max_inc_month = row[0]\n if total_change < max_decrease: # Check for new max decrease\n max_decrease = total_change\n max_dec_month = row[0]\n prev_month = current_month # Set new previouse monthe value for next time through\n\n# Open output file and write results to both file and console\nwith open(output_path,'w') as txt_file:\n\n txt_file.write(\"Financial Analysis\\n\")\n print(\"Financial Analysis\")\n\n txt_file.write(\"----------------------------\\n\")\n print(\"----------------------------\")\n\n txt_file.write(f\"Total Months: {month_count}\\n\")\n print(f\"Total Months: {month_count}\")\n\n txt_file.write(f\"Total Net Profit: ${total_net_profit:,.2f}\\n\")\n print(f\"Total Net Profit: ${total_net_profit:,.2f}\")\n\n txt_file.write(f\"Total Period Change in Profit: ${total_change:,.2f}\\n\")\n print(f\"Total Period Change in Profit: ${total_change:,.2f}\")\n\n # Calculate average change in profit\n txt_file.write(f\"Average Change in Profit: ${total_change/(month_count-1):,.2f}\\n\")\n print(f\"Average Change in Profit: ${total_change/(month_count-1):,.2f}\")\n\n txt_file.write(f\"Greatest Increase in Profits: {max_inc_month} (${max_increase:,.2f})\\n\")\n print(f\"Greatest Increase in Profits: {max_inc_month} (${max_increase:,.2f})\")\n\n txt_file.write(f\"Greatest Decrease in Profits: {max_dec_month} (${max_decrease:,.2f})\\n\")\n print(f\"Greatest Decrease in Profits: {max_dec_month} (${max_decrease:,.2f})\")\n\n txt_file.write(\"---\\n\")\n print(\"---\")\n","repo_name":"wpj174/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24530108896","text":"from settings import MUSIC_DIR, SHUFFLE, REPEAT\r\nfrom threading import Thread\r\nfrom queue import Queue, Empty\r\nimport os\r\nimport random\r\nimport pygame\r\nfrom time import sleep\r\n\r\npygame.mixer.init(buffer=1024)\r\nif os.name != 'nt':\r\n pygame.init()\r\n\r\n\r\nclass MusicLibrary(Thread):\r\n library = dict()\r\n songs_queue = Queue()\r\n stopped = True\r\n paused = False\r\n\r\n def run(self):\r\n self.scan_library()\r\n while True:\r\n pygame.mixer.music.load(self.songs_queue.get())\r\n self.stopped = False\r\n self.paused = False\r\n pygame.mixer.music.play()\r\n pygame.mixer.music.set_volume(1.0)\r\n while pygame.mixer.music.get_busy() and not self.stopped:\r\n sleep(0.2)\r\n self.songs_queue.task_done()\r\n if self.stopped:\r\n pygame.mixer.music.stop()\r\n\r\n def scan_library(self):\r\n for item in os.listdir(MUSIC_DIR):\r\n if os.path.isdir(os.path.join(MUSIC_DIR, item)):\r\n self.library[item] = []\r\n for dir_name in self.library:\r\n category_dir = os.path.join(MUSIC_DIR, dir_name)\r\n for item in os.listdir(category_dir):\r\n full_path = os.path.join(category_dir, item)\r\n if os.path.isfile(full_path) and (item.lower()[-4:] in ('.ogg', '.mp3')):\r\n self.library[dir_name].append(full_path)\r\n\r\n def get_categories(self, empty=False):\r\n return [name for name in self.library if empty or self.library[name]]\r\n\r\n def drain_queue(self):\r\n while not self.songs_queue.empty():\r\n try:\r\n self.songs_queue.get(False)\r\n except Empty:\r\n continue\r\n else:\r\n self.songs_queue.task_done()\r\n\r\n def get_playlist(self, category=None, shuffle=SHUFFLE, repeat=REPEAT):\r\n playlist = []\r\n if category:\r\n for song in self.library[category]:\r\n playlist.append(song)\r\n else:\r\n for category in self.library:\r\n for song in self.library[category]:\r\n playlist.append(song)\r\n\r\n playlist = playlist * repeat\r\n\r\n if shuffle:\r\n random.shuffle(playlist)\r\n\r\n return playlist\r\n\r\n def play_category(self, category):\r\n if category == 'all':\r\n song_list = self.get_playlist()\r\n else:\r\n song_list = self.get_playlist(category=category)\r\n self.stop()\r\n [self.songs_queue.put(song) for song in song_list]\r\n self.stopped = False\r\n\r\n def stop(self):\r\n self.drain_queue()\r\n self.stopped = True\r\n self.paused = False\r\n self.songs_queue.join()\r\n\r\n def pause(self):\r\n if pygame.mixer.music.get_busy():\r\n self.paused = True\r\n pygame.mixer.music.pause()\r\n else:\r\n self.paused = False\r\n\r\n def resume(self):\r\n if pygame.mixer.music.get_busy():\r\n self.paused = False\r\n pygame.mixer.music.unpause()\r\n\r\nif __name__ == \"__main__\":\r\n print(MusicLibrary().get_categories())\r\n","repo_name":"daniel-leicht/FlaskJukebox","sub_path":"musiclib.py","file_name":"musiclib.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3896743665","text":"from common import parse_args_and_get_input, assert_equal\nfrom collections import Counter\n\n\ndef part_two(lines):\n c = Counter()\n inputs = [int(x) for x in lines]\n i = 0\n freq = 0\n\n while True:\n freq = freq + inputs[i]\n c[freq] += 1\n\n if c[freq] == 2:\n return freq\n i = (i + 1) % len(inputs)\n\n\nargs, lines = parse_args_and_get_input()\n\nif args.part_one:\n answer = sum([int(x) for x in lines])\nelse:\n assert_equal(14, part_two(\"+7, +7, -2, -7, -4\".split(\",\")))\n answer = part_two(lines)\n\nprint(answer)\n\n","repo_name":"jamesjrg/python-playground","sub_path":"advent-of-code-2018/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19635306829","text":"my_set = {1, 2, 3}\nprint(my_set)\n\nmy_set = {1, 2.0, 3, False, True, \"Element v10\"}\nprint(my_set)\n\nmy_set2 = {True, 1, 1.01, 2.0, 3, False, 3, 0,\n 3, True, \"Element v10\", \"Abram - Starwars\"}\n\nmy_set2.add(100)\n# verify the operation how updates are happening inside\nmy_set2.update([\"Batman v2022\", \"Inception\", False, 230])\nmy_set2.discard(True)\n\nprint(my_set2)\n\n# for value in my_set2:\n# print(value)\n\nlist_x = list(my_set2)\nprint(list_x[2])\n\nindex_count = 0\nfor value in my_set2:\n if index_count == 1:\n print(value)\n break\n\n index_count += 1\n","repo_name":"iomegak12/intel-training-usecase-1","sub_path":"essentials/collections/set.py","file_name":"set.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72247614274","text":"__author__ = 'David'\nimport sys\nimport math\n\n# Useful constants\nepsilon = sys.float_info.epsilon\n\ndef safe_float_range(start, stop, steps):\n step = (stop - start) / steps\n for i in range(0, int(steps)):\n yield (start + i * step), step\n\n\ndef float_range(start, stop, step, inclusive_list=False, include_end=False):\n i = start\n if not inclusive_list:\n stop -= step\n while i <= stop:\n yield i\n i += step\n if include_end:\n yield stop\n\n\ndef romberg_integral(f, a, b, method, accurate_digits=10, debug=False, adaptive=False):\n results = []\n if debug: print(\"Romberg Debug\\n\\t\", end=\" \")\n for iteration in range(0, math.ceil(accurate_digits / 2)): # Each iteration gives about two more digits\n sub_results = []\n results.append(sub_results)\n for sub_iteration in range(0, iteration + 1): # Each sub-iteration length is the iteration + 1\n if sub_iteration == 0: # First sub-iteration value is the integration\n steps = math.pow(2, iteration) # The first iteration is one step, second two steps, fours steps...\n if adaptive:\n epsilon = math.pow(10.0, -(iteration + 1.0)) # Rough estimate at accurate digits required\n if debug: print(\"Ep: \", epsilon, end=\" \", flush=True)\n sub_results.append(adaptive_definite_integral(f, a, b, epsilon, method))\n else:\n sub_results.append(definite_integral(f, a, b, steps, method))\n\n if debug: print(sub_results[-1], end=\" \", flush=True)\n else:\n sub_results.append((math.pow(4, sub_iteration) * results[iteration][sub_iteration - 1] -\n results[iteration - 1][sub_iteration - 1]) /\n (math.pow(4, sub_iteration) - 1)) # The critical romberg code\n if debug: print(sub_results[-1], end=\" \", flush=True)\n if debug: print('\\n\\t', end=\" \")\n if debug: print()\n return results[-1][-1] # In python -1 returns the last item, who knew?\n\n\ndef adaptive_definite_integral(f, a, b, epsilon, method):\n area = 0.0\n mid = (a + b) / 2 # Possibly find random mid-point?\n guess = method(f, a, b - a)\n guess_a = method(f, a, mid - a)\n guess_b = method(f, mid, b - mid)\n if math.fabs(guess_a + guess_b - guess) > epsilon:\n return adaptive_definite_integral(f, a, mid, epsilon, method) + \\\n adaptive_definite_integral(f, mid, b, epsilon, method)\n else:\n return guess_a + guess_b\n\n\ndef definite_integral(f, a, b, steps, method):\n area = 0.0\n for x, step in safe_float_range(a, b, steps):\n area += method(f, x, step)\n return area\n\n\ndef derivative(f, x):\n return (f(x + epsilon * 2) - f(x)) / (epsilon * 2)\n\n\ndef rect_estimate(f, x, step):\n return f(x + step / 2) * step\n\ntrap_counter = 0\n\ndef trap_estimate(f, x, step):\n global trap_counter\n trap_counter += 1\n return (f(x) + f(x + step)) * step / 2\n\n\ndef quad_estimate(f, x, step):\n return (step / 6) * (f(x) + 4 * f(x + step / 2) + f(x + step))\n\n\ndef polynomial(x):\n return 1200 * x ** 5 - 3060 * x ** 4 + 2730 * x ** 3 - 990 * x ** 2 + 120 * x\n\n\ndef trig(x):\n return math.sin(x)\n\n\ndef run_trap_test(f, a, b):\n global trap_counter\n trap_counter = 0\n print(\"Trapezoidal: \", definite_integral(f, a, b, 10, trap_estimate), trap_counter)\n trap_counter = 0\n print(\"Adaptive: \", adaptive_definite_integral(f, a, b, .01, trap_estimate), trap_counter)\n trap_counter = 0\n print(\"Romberg: \", romberg_integral(f, a, b, trap_estimate), trap_counter)\n trap_counter = 0\n print(\"Adaptive Romberg: \", romberg_integral(f, a, b, trap_estimate, adaptive=True), trap_counter)\n print()\n\n","repo_name":"dagronlund/test-projects","sub_path":"python_testbed/integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26097553010","text":"import xarray as xr\nimport sympl\nimport numpy as np\nfrom marble import InputHeightToPrincipalComponents, convert_height_to_principal_components\nimport os\n\nheight_to_pc = InputHeightToPrincipalComponents()\n\ndata_path = os.path.join(\n os.path.dirname(\n os.path.realpath(__file__)\n ),\n 'data',\n)\n\ncolumn_filename = os.path.join(data_path, 'era5_column-2016.nc')\n\n\ndef convert_dataarray_to_sympl(dict_of_dataarray):\n for name, array in dict_of_dataarray.items():\n if isinstance(array, xr.DataArray):\n dict_of_dataarray[name] = sympl.DataArray(array)\n\n\ndef get_era5_state(latent_filename, latent=True, i_timestep=0):\n state = {}\n ds = xr.open_dataset(latent_filename)\n state['total_water_mixing_ratio'] = ds['rt'][i_timestep, :]\n state['total_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['liquid_water_static_energy'] = ds['sl'][i_timestep, :]\n state['liquid_water_static_energy'].attrs['units'] = 'J/kg'\n state['height'] = sympl.DataArray(\n np.linspace(0, 3000., 20),\n dims=['z_star'],\n attrs={'units': 'm'},\n )\n state['time'] = sympl.timedelta(0)\n if latent:\n state['vertical_wind'] = ds['w'][i_timestep, :]\n state['vertical_wind'].attrs['units'] = 'm/s'\n convert_dataarray_to_sympl(state)\n if latent:\n state = height_to_pc(state)\n state.pop('vertical_wind_components')\n state['time'] = sympl.timedelta(0)\n return state\n\n\ndef get_era5_forcing(latent_filename, i_timestep, latent=True):\n state = {}\n ds = xr.open_dataset(latent_filename)\n state['surface_latent_heat_flux'] = ds['lhf'][i_timestep] / 3600. # divide by one hour to go from J/m^2 to W/m^2\n state['surface_latent_heat_flux'].attrs['units'] = 'W/m^2'\n state['surface_sensible_heat_flux'] = ds['shf'][i_timestep] / 3600.\n state['surface_sensible_heat_flux'].attrs['units'] = 'W/m^2'\n state['surface_temperature'] = ds['sst'][i_timestep]\n state['surface_temperature'].attrs['units'] = 'degK'\n state['surface_air_pressure'] = ds['p_surface'][i_timestep]\n state['surface_air_pressure'].attrs['units'] = 'Pa'\n state['vertical_wind'] = ds['w'][i_timestep, :]\n state['vertical_wind'].attrs['units'] = 'm/s'\n state['liquid_water_static_energy_horizontal_advective_tendency'] = ds['sl_adv'][i_timestep, :]\n state['total_water_mixing_ratio_horizontal_advective_tendency'] = ds['rt_adv'][i_timestep, :]\n state['downwelling_shortwave_radiation_at_3km'] = ds['swdn_tod'][i_timestep]\n state['downwelling_shortwave_radiation_at_3km'].attrs['units'] = 'W/m^2'\n state['downwelling_shortwave_radiation_at_top_of_atmosphere'] = ds['swdn_toa'][i_timestep]\n state['downwelling_shortwave_radiation_at_top_of_atmosphere'].attrs['units'] = 'W/m^2'\n state['mid_cloud_fraction'] = ds['cldmid'][i_timestep]\n state['mid_cloud_fraction'].attrs['units'] = ''\n state['high_cloud_fraction'] = ds['cldhigh'][i_timestep]\n state['high_cloud_fraction'].attrs['units'] = ''\n state['total_water_mixing_ratio_at_3km'] = ds['rt'][i_timestep, -1]\n state['total_water_mixing_ratio_at_3km'].attrs['units'] = 'kg/kg'\n state['liquid_water_static_energy_at_3km'] = ds['sl'][i_timestep, -1]\n state['liquid_water_static_energy_at_3km'].attrs['units'] = 'J/kg'\n state['rain_water_mixing_ratio_at_3km'] = ds['rrain'][i_timestep, -1]\n state['rain_water_mixing_ratio_at_3km'].attrs['units'] = 'kg/kg'\n if latent:\n state['total_water_mixing_ratio'] = ds['rt'][i_timestep, :]\n state['total_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['liquid_water_static_energy'] = ds['sl'][i_timestep, :]\n state['liquid_water_static_energy'].attrs['units'] = 'J/kg'\n convert_dataarray_to_sympl(state)\n if latent:\n state['liquid_water_static_energy_components_horizontal_advective_tendency'] = \\\n sympl.DataArray(\n convert_height_to_principal_components(\n state['liquid_water_static_energy_horizontal_advective_tendency'],\n basis_name='sl', subtract_mean=False\n ), dims=['sl_latent'], attrs={'units': 's^-1'}\n )\n state['total_water_mixing_ratio_components_horizontal_advective_tendency'] = \\\n sympl.DataArray(\n convert_height_to_principal_components(\n state['total_water_mixing_ratio_horizontal_advective_tendency'],\n basis_name='rt', subtract_mean=False\n ), dims=['rt_latent'], attrs={'units': 's^-1'}\n )\n pc_state = {}\n pc_state.update(state)\n pc_state['time'] = sympl.timedelta(0)\n pc_state = height_to_pc(pc_state)\n pc_state.pop('total_water_mixing_ratio_components')\n pc_state.pop('liquid_water_static_energy_components')\n state.update(pc_state)\n return state\n\n\ndef get_era5_diagnostics(latent_filename, i_timestep):\n state = {}\n ds = xr.open_dataset(latent_filename)\n state['cloud_fraction'] = ds['cld'][i_timestep, :]\n state['cloud_fraction'].attrs['units'] = ''\n state['surface_precipitation_rate'] = ds['precip'][i_timestep]\n state['surface_precipitation_rate'].attrs['units'] = 'mm/hr'\n state['rain_water_mixing_ratio'] = ds['rrain'][i_timestep, :]\n state['rain_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['cloud_water_mixing_ratio'] = ds['rcld'][i_timestep, :]\n state['cloud_water_mixing_ratio'].attrs['units'] = 'kg/kg'\n state['clear_sky_radiative_heating_rate'] = ds['sl_rad_clr'][i_timestep, :]\n state['clear_sky_radiative_heating_rate'].attrs['units'] = 'degK/hr'\n state['low_cloud_fraction'] = ds['cldlow'][i_timestep]\n state['low_cloud_fraction'].attrs['units'] = ''\n state['column_cloud_water'] = ds['ccw'][i_timestep]\n state['column_cloud_water'].attrs['units'] = 'kg/m^2'\n convert_dataarray_to_sympl(state)\n return state\n","repo_name":"mcgibbon/marble","sub_path":"examples/initialization.py","file_name":"initialization.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"12737146215","text":"#!/usr/bin/python3\n\"\"\" objects that handles all default RestFul API actions for cities \"\"\"\nfrom models.store import Store\nfrom models.food import Food\nfrom models import storage\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, make_response, request\n\n\n@app_views.route('/stores//foods', methods=['GET'],\n strict_slashes=False)\ndef get_fooids(store_id):\n \"\"\"\n Retrieves the list of all foods objects\n of a specific Store, or a specific food\n \"\"\"\n list_foods = []\n store = storage.get(Store, store_id)\n if not store:\n abort(404)\n for food in store.foods:\n list_foods.append(food.to_dict())\n\n return jsonify(list_foods)\n\n\n@app_views.route('/foods//', methods=['GET'], strict_slashes=False)\ndef get_foods(food_id):\n \"\"\"\n Retrieves a specific food based on id\n \"\"\"\n food = storage.get(Food, food_id)\n if not food:\n abort(404)\n return jsonify(food.to_dict())\n\n\n@app_views.route('/foods/', methods=['DELETE'], strict_slashes=False)\ndef delete_food(food_id):\n \"\"\"\n Deletes a food based on id provided\n \"\"\"\n food = storage.get(Food, food_id)\n\n if not food:\n abort(404)\n storage.delete(food)\n storage.save()\n\n return make_response(jsonify({}), 200)\n\n\n@app_views.route('/stores//foods', methods=['POST'],\n strict_slashes=False)\ndef post_food(store_id):\n \"\"\"\n Creates a Food\n \"\"\"\n store = storage.get(Store, store_id)\n if not store:\n abort(404)\n if not request.get_json():\n abort(400, description=\"Not a JSON\")\n if 'name' not in request.get_json():\n abort(400, description=\"Missing name\")\n\n data = request.get_json()\n instance = Food(**data)\n instance.store_id = store.id\n instance.save()\n return make_response(jsonify(instance.to_dict()), 201)\n\n\n@app_views.route('/foods/', methods=['PUT'], strict_slashes=False)\ndef put_food(food_id):\n \"\"\"\n Updates a Food\n \"\"\"\n food = storage.get(Food, food_id)\n if not food:\n abort(404)\n\n if not request.get_json():\n abort(400, description=\"Not a JSON\")\n\n ignore = ['id', 'store_id', 'created_at', 'updated_at']\n\n data = request.get_json()\n for key, value in data.items():\n if key not in ignore:\n setattr(food, key, value)\n storage.save()\n return make_response(jsonify(food.to_dict()), 200)\n","repo_name":"ffelipegupe/fastq","sub_path":"api/v1/views/foods.py","file_name":"foods.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6010357425","text":"from http.server import HTTPServer\nfrom socketserver import ThreadingMixIn\nimport sys\n\nfrom src.app.utils import port_check\nfrom src.app.Server import Server\nfrom src.app.data.dependencies import process_dependencies\n\ndef init():\n HOST_NAME = '127.0.0.1'\n PORT_NUMBER = 8080\n if len(sys.argv) == 3:\n PORT_NUMBER = int(sys.argv[2])\n\n isOpen = port_check(HOST_NAME, PORT_NUMBER)\n if not isOpen:\n print('ERROR: port ' + str(PORT_NUMBER) + ' is not available. Provide available port as the second command param', flush=True)\n exit()\n\n class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):\n pass\n\n process_dependencies()\n httpd = ThreadingSimpleServer((HOST_NAME, PORT_NUMBER), Server)\n try:\n print('server is running on http://%s:%s' % (HOST_NAME, PORT_NUMBER), flush=True)\n httpd.serve_forever()\n except KeyboardInterrupt:\n print('\\nExiting app...', flush=True)\n\n httpd.server_close()","repo_name":"b0000ring/npm-cockpit","sub_path":"src/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"1673141309","text":"import os\r\nimport unittest\r\nimport cv2\r\nimport numpy\r\nimport grpc\r\n\r\n\r\nfrom src.container import Container\r\nfrom server_pb2_grpc import DetectorStub\r\nfrom server_pb2 import DetectRequest\r\nfrom .image_reader import ImageReader\r\n\r\n\r\nclass ContainerTest(unittest.TestCase):\r\n\r\n def _numpy_array_to_grpc_request(self, numpy_image):\r\n # Note, that Width and Height dimensions ARE SWAPPED!!!\r\n grpc_req_options = DetectRequest.DetectRequestOptionsModel(width=numpy_image.shape[1], height=numpy_image.shape[0])\r\n grpc_req_content = numpy.ndarray.tobytes(numpy_image)\r\n grpc_request = DetectRequest(options = grpc_req_options, content = grpc_req_content)\r\n\r\n return grpc_request\r\n\r\n\r\n def setUp(self):\r\n self.container = Container ()\r\n self.container.start ()\r\n\r\n\r\n def tearDown(self):\r\n self.container.stop ()\r\n \r\n\r\n def test_detect_right_sample(self):\r\n with grpc.insecure_channel('127.0.0.1:8081', options=[('grpc.max_receive_message_length', 2048 * 2048 * 3)]) as channel:\r\n stub = DetectorStub(channel)\r\n\r\n self.image_reader = ImageReader(os.path.join('.', 'tests', 'images'))\r\n\r\n for i in range(0, self.image_reader.sample_number()):\r\n with self.subTest(sample_id=i):\r\n numpy_image = self.image_reader.read_sample(i)\r\n grpc_request = self._numpy_array_to_grpc_request(numpy_image)\r\n detections = stub.detect(grpc_request)\r\n \r\n # Assert that at least one person has been detected\r\n self.assertGreater(len(detections.boxes), 0)\r\n\r\n\r\n def test_detect_wrong_sample(self):\r\n with grpc.insecure_channel('127.0.0.1:8081', options=[('grpc.max_receive_message_length', 2048 * 2048 * 3)]) as channel:\r\n stub = DetectorStub(channel)\r\n\r\n self.image_reader = ImageReader(os.path.join('.', 'tests', 'images'), fname_prefix='wrong_sample_')\r\n\r\n for i in range(0, self.image_reader.sample_number()):\r\n with self.subTest(sample_id=i):\r\n numpy_image = self.image_reader.read_sample(i)\r\n grpc_request = self._numpy_array_to_grpc_request(numpy_image)\r\n detections = stub.detect(grpc_request)\r\n \r\n # Assert that NO person has been detected\r\n self.assertEqual(len(detections.boxes), 0)\r\n\r\n","repo_name":"programmerkz/NN_detectors_over_gRPC","sub_path":"tests/test_container.py","file_name":"test_container.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10173648906","text":"# 1436 영화감독 숌\n# N번째 종말의 수가 나올때까지 차례대로 시도하는 문제\n\nn = int(input())\n\ndefault = 666\ncount = 0\n\nwhile True:\n if(\"666\" in str(default)):\n count+=1\n if(count == n):\n print(default)\n break\n default+=1","repo_name":"kimsungbo/Algorithms","sub_path":"백준/브루트포스/1436_영화감독숌.py","file_name":"1436_영화감독숌.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6134925816","text":"from django.urls import path\nfrom core.aplicacion1.views import *\n\napp_name = 'aplicacion1'\n#/checkout/all\nurlpatterns = [\n path('sell/', form_sell, name='vender'),\n path('car/', carrito.as_view(), name='carrito'),\n path('car/cardelete/', eliminarCar, name='eliminar'),\n path('car/buy/', buy, name='comprar'),\n path('factura/', show_invoice, name='factura'),\n path('facturas/', list_all_invoices.as_view(), name='facturas'),\n path('comentar/', comentar, name='comentar'),\n path('busquedad/', list_store.as_view(), name='prueba'),\n path('editar/', edit.as_view(), name='prueba'),\n path('del/', eliminarProducto, name='eliminar'),\n path('pruebaaas/', pruebas),\n path('pruebaaas2/', pruebas2),\n #path('pruebaaas3/', Formprueba.as_view()),\n]","repo_name":"dmarcii/tesis_master","sub_path":"core/aplicacion1/urlAplicacion1.py","file_name":"urlAplicacion1.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39403573578","text":"# coding: utf-8\n\n\"\"\"\n Yahoo!広告 検索広告 API リファレンス / Yahoo! Ads Search Ads API Reference\n\n
    Yahoo!広告 検索広告 APIのWebサービスについて説明します。
    「Try it out」のご利用には、事前にアプリケーションの登録が必要です。また、アプリケーションのリダイレクトURIの1つに
    https://yahoojp-marketing.github.io/ads-search-api-documents/oauth2-redirect.htmlを登録してください。
    Search Ads API Web Services supported in Yahoo! Ads API.
    When you use \\\"Try it out\\\", you need to register your application in advance.
    As one of redirect URI for application, you need to set \\\"https://yahoojp-marketing.github.io/ads-search-api-documents/oauth2-redirect.html\\\".
    # noqa: E501\n\n The version of the OpenAPI document: v1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom yahoo_ads_search.configuration import Configuration\n\n\nclass PageFeedItemServiceReviewSummary(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'account_id': 'int',\n 'approved_count': 'int',\n 'approved_with_review_count': 'int',\n 'entity_count': 'int',\n 'feed_id': 'int',\n 'post_disapproved_count': 'int',\n 'pre_disapproved_count': 'int',\n 'review_count': 'int'\n }\n\n attribute_map = {\n 'account_id': 'accountId',\n 'approved_count': 'approvedCount',\n 'approved_with_review_count': 'approvedWithReviewCount',\n 'entity_count': 'entityCount',\n 'feed_id': 'feedId',\n 'post_disapproved_count': 'postDisapprovedCount',\n 'pre_disapproved_count': 'preDisapprovedCount',\n 'review_count': 'reviewCount'\n }\n\n def __init__(self, account_id=None, approved_count=None, approved_with_review_count=None, entity_count=None, feed_id=None, post_disapproved_count=None, pre_disapproved_count=None, review_count=None, local_vars_configuration=None): # noqa: E501\n \"\"\"PageFeedItemServiceReviewSummary - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._account_id = None\n self._approved_count = None\n self._approved_with_review_count = None\n self._entity_count = None\n self._feed_id = None\n self._post_disapproved_count = None\n self._pre_disapproved_count = None\n self._review_count = None\n self.discriminator = None\n\n self.account_id = account_id\n self.approved_count = approved_count\n self.approved_with_review_count = approved_with_review_count\n self.entity_count = entity_count\n self.feed_id = feed_id\n self.post_disapproved_count = post_disapproved_count\n self.pre_disapproved_count = pre_disapproved_count\n self.review_count = review_count\n\n @property\n def account_id(self):\n \"\"\"Gets the account_id of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    アカウントID
    Account ID
    # noqa: E501\n\n :return: The account_id of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._account_id\n\n @account_id.setter\n def account_id(self, account_id):\n \"\"\"Sets the account_id of this PageFeedItemServiceReviewSummary.\n\n
    アカウントID
    Account ID
    # noqa: E501\n\n :param account_id: The account_id of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._account_id = account_id\n\n @property\n def approved_count(self):\n \"\"\"Gets the approved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    承認済みの件数
    Count of approved
    # noqa: E501\n\n :return: The approved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._approved_count\n\n @approved_count.setter\n def approved_count(self, approved_count):\n \"\"\"Sets the approved_count of this PageFeedItemServiceReviewSummary.\n\n
    承認済みの件数
    Count of approved
    # noqa: E501\n\n :param approved_count: The approved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._approved_count = approved_count\n\n @property\n def approved_with_review_count(self):\n \"\"\"Gets the approved_with_review_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    編集内容審査中の件数
    Count of approved with review
    # noqa: E501\n\n :return: The approved_with_review_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._approved_with_review_count\n\n @approved_with_review_count.setter\n def approved_with_review_count(self, approved_with_review_count):\n \"\"\"Sets the approved_with_review_count of this PageFeedItemServiceReviewSummary.\n\n
    編集内容審査中の件数
    Count of approved with review
    # noqa: E501\n\n :param approved_with_review_count: The approved_with_review_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._approved_with_review_count = approved_with_review_count\n\n @property\n def entity_count(self):\n \"\"\"Gets the entity_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    フィードフォルダ内にあるページフィードアイテムの総件数
    Total count of page feed item in feed folder
    # noqa: E501\n\n :return: The entity_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._entity_count\n\n @entity_count.setter\n def entity_count(self, entity_count):\n \"\"\"Sets the entity_count of this PageFeedItemServiceReviewSummary.\n\n
    フィードフォルダ内にあるページフィードアイテムの総件数
    Total count of page feed item in feed folder
    # noqa: E501\n\n :param entity_count: The entity_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._entity_count = entity_count\n\n @property\n def feed_id(self):\n \"\"\"Gets the feed_id of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    フィードID
    Feed ID
    # noqa: E501\n\n :return: The feed_id of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._feed_id\n\n @feed_id.setter\n def feed_id(self, feed_id):\n \"\"\"Sets the feed_id of this PageFeedItemServiceReviewSummary.\n\n
    フィードID
    Feed ID
    # noqa: E501\n\n :param feed_id: The feed_id of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._feed_id = feed_id\n\n @property\n def post_disapproved_count(self):\n \"\"\"Gets the post_disapproved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    配信停止の件数
    Count of distribution cancelled
    # noqa: E501\n\n :return: The post_disapproved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._post_disapproved_count\n\n @post_disapproved_count.setter\n def post_disapproved_count(self, post_disapproved_count):\n \"\"\"Sets the post_disapproved_count of this PageFeedItemServiceReviewSummary.\n\n
    配信停止の件数
    Count of distribution cancelled
    # noqa: E501\n\n :param post_disapproved_count: The post_disapproved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._post_disapproved_count = post_disapproved_count\n\n @property\n def pre_disapproved_count(self):\n \"\"\"Gets the pre_disapproved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    掲載不可の件数
    Count of disapproved
    # noqa: E501\n\n :return: The pre_disapproved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pre_disapproved_count\n\n @pre_disapproved_count.setter\n def pre_disapproved_count(self, pre_disapproved_count):\n \"\"\"Sets the pre_disapproved_count of this PageFeedItemServiceReviewSummary.\n\n
    掲載不可の件数
    Count of disapproved
    # noqa: E501\n\n :param pre_disapproved_count: The pre_disapproved_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._pre_disapproved_count = pre_disapproved_count\n\n @property\n def review_count(self):\n \"\"\"Gets the review_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n\n
    審査中の件数
    Count of review
    # noqa: E501\n\n :return: The review_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :rtype: int\n \"\"\"\n return self._review_count\n\n @review_count.setter\n def review_count(self, review_count):\n \"\"\"Sets the review_count of this PageFeedItemServiceReviewSummary.\n\n
    審査中の件数
    Count of review
    # noqa: E501\n\n :param review_count: The review_count of this PageFeedItemServiceReviewSummary. # noqa: E501\n :type: int\n \"\"\"\n\n self._review_count = review_count\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PageFeedItemServiceReviewSummary):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, PageFeedItemServiceReviewSummary):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"ota2000/yahoo-ads-search","sub_path":"yahoo_ads_search/models/page_feed_item_service_review_summary.py","file_name":"page_feed_item_service_review_summary.py","file_ext":"py","file_size_in_byte":12084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25619370668","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n D = list(map(int, input().split()))\n max = 0\n height = 0\n\n # print(D)\n # 리스트의 각 원소마다\n for i in range(N):\n height = 0\n # 뒤에 오는 숫자들과 비교\n for j in range(i+1, N):\n # print(D[i], j)\n # 만약 뒤에 오는 숫자(j)가 현재숫자(D[i]) 보다 작다면, 떨어질 공간이 있다는 거니까 그만큼 떨어지는 height가 1씩 증가\n if D[i] > D[j]:\n height += 1\n # 만약 총 떨어지는 높이 height가 현재 max보다 크다면 max를 height로 대체\n if height > max:\n max = height\n\n print('#%d %d' % (t, max))","repo_name":"minnczi/Algorithm","sub_path":"Swea/swea_gravity.py","file_name":"swea_gravity.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40787527399","text":"class Empleados:\n \n def __init__(self, id_empleado = 1, nombre = ' ', direccion = ' '):\n self.__id_empleado = id_empleado\n self.__nombre = nombre\n self.__direccion = direccion\n\n def guardar(self):\n \n f = open('C:\\\\Archivos_Progra\\\\PIA\\\\EQUIPO_5\\\\Empleados.txt', 'a', encoding = 'utf8')\n f.write(f'ID: {self.__id_empleado} | NOMBRE: {self.__nombre} | DIRECCION: {self.__direccion}' + '\\n')\n f.close\n\n def consultar_todo(self):\n\n f = open('C:\\\\Archivos_Progra\\\\PIA\\\\EQUIPO_5\\\\Empleados.txt')\n print (f.read())\n f.close\n\n def consultar_por_id(self, id):\n\n self.id = str(id)\n\n f = open('C:\\\\Archivos_Progra\\\\PIA\\\\EQUIPO_5\\\\Empleados.txt')\n\n for linea in f:\n \n datos = linea.strip().split()\n \n if datos[1] == self.id:\n \n datos = linea.strip().split('|')\n print(datos)\n\n f.close","repo_name":"PROGAVANZA24/EQUIPO_5","sub_path":"Empleados.py","file_name":"Empleados.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14392720099","text":"import time\nfrom typing import Union, Optional\n\nimport numpy as np\nimport os\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom decision_trees.utils.constants import ClassifierType\nfrom decision_trees.vhdl_generators.tree import Tree\nfrom decision_trees.vhdl_generators.random_forest import RandomForest\nfrom decision_trees.utils.convert_to_fixed_point import quantize_data\nfrom decision_trees.utils.constants import get_classifier\n\n\ndef test_dataset(\n number_of_bits_per_feature: int,\n train_data: np.ndarray, train_target: np.ndarray,\n test_data: np.ndarray, test_target: np.ndarray,\n clf_type: ClassifierType,\n max_depth: Optional[int], number_of_classifiers: Optional[int],\n path: str, name: str\n):\n path = os.path.join(\n path,\n name + '_' + str(number_of_bits_per_feature) + '_' + clf_type.name + '_' + str(max_depth) + '_' + str(number_of_classifiers)\n )\n result_file = os.path.join(path, 'score.txt')\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n # first create classifier from scikit\n clf = get_classifier(clf_type, max_depth, number_of_classifiers)\n\n # first - train the classifiers on non-quantized data\n clf.fit(train_data, train_target)\n test_predicted = clf.predict(test_data)\n print(\"scikit clf with test data:\")\n report_performance(clf, clf_type, test_target, test_predicted, result_file)\n\n # perform quantization of train and test data\n # while at some point I was considering not quantizing the test data,\n # I came to a conclusion that it is not the way it will be performed in hardware\n train_data_quantized, test_data_quantized = quantize_data(\n train_data, test_data, number_of_bits_per_feature,\n flag_save_details_to_file=True, path=path\n )\n\n clf.fit(train_data_quantized, train_target)\n test_predicted_quantized = clf.predict(test_data_quantized)\n print(\"scikit clf with train and test data quantized:\")\n report_performance(clf, clf_type, test_target, test_predicted_quantized, result_file)\n\n # generate own classifier based on the one from scikit\n number_of_features = len(train_data[0])\n # TODO(MF): this +1 is very important because the comparison values are not quantized\n my_clf = generate_my_classifier(clf, number_of_features, number_of_bits_per_feature+1, path)\n my_clf_test_predicted_quantized = my_clf.predict(test_data_quantized)\n print(\"own clf with train and test data quantized:\")\n report_performance(my_clf, clf_type, test_target, my_clf_test_predicted_quantized)\n\n differences_scikit_my = np.sum(test_predicted_quantized != my_clf_test_predicted_quantized)\n print(f\"Number of differences between scikit_qunatized and my_quantized: {differences_scikit_my}\")\n\n # check if own classifier works the same as scikit one\n _compare_with_own_classifier(\n [test_predicted, test_predicted_quantized, my_clf_test_predicted_quantized],\n [\"scikit\", \"scikit_quantized\", \"own_clf_quantized\"],\n test_target,\n flag_save_details_to_file=True, path=path\n )\n\n # optionally check the performance of the scikit classifier for reference (does not work for own classifier)\n # _test_classification_performance(clf, test_data, 10, 10)\n\n\ndef report_performance(\n clf, clf_type: ClassifierType,\n expected: np.ndarray, predicted: np.ndarray,\n result_file: Optional[str]=None\n):\n if clf_type == ClassifierType.RANDOM_FOREST_REGRESSOR:\n _report_regressor(expected, predicted, result_file)\n else:\n _report_classifier(clf, expected, predicted, result_file)\n\n\ndef _report_classifier(\n clf,\n expected: np.ndarray, predicted: np.ndarray,\n result_file: Optional[str]=None\n):\n t = ''\n t += 'Detailed classification report:\\n'\n\n t += 'Classification report for classifier ' + str(clf) + '\\n'\n t += str(metrics.classification_report(expected, predicted)) + '\\n'\n cm = metrics.confusion_matrix(expected, predicted)\n cm = cm / cm.sum(axis=1)[:, None] * 100\n\n # np.set_printoptions(formatter={'float': '{: 2.2f}'.format})\n t += f'Confusion matrix:\\n {cm}\\n'\n\n f1_score = metrics.f1_score(expected, predicted, average='weighted')\n precision = metrics.precision_score(expected, predicted, average='weighted')\n recall = metrics.recall_score(expected, predicted, average='weighted')\n accuracy = metrics.accuracy_score(expected, predicted)\n t += f'f1_score: {f1_score:{2}.{4}}\\n'\n t += f'precision: {precision:{2}.{4}}\\n'\n t += f'recall: {recall:{2}.{4}}\\n'\n t += f'accuracy: {accuracy:{2}.{4}}\\n'\n\n if result_file is not None:\n with open(result_file, 'a+') as f:\n f.write(t)\n else:\n print(t)\n\n\ndef _report_regressor(\n expected: np.ndarray,\n predicted: np.ndarray,\n result_file: Optional[str]=None\n):\n t = ''\n t += 'Detailed regression report:\\n'\n\n mae = metrics.mean_absolute_error(expected, predicted)\n mse = metrics.mean_squared_error(expected, predicted)\n r2s = metrics.r2_score(expected, predicted)\n evs = metrics.explained_variance_score(expected, predicted)\n t += f'mean_absolute_error: {mae:{2}.{4}}\\n'\n t += f'mean_squared_error: {mse:{2}.{4}}\\n'\n t += f'coefficient_of_determination: {r2s:{2}.{4}}\\n'\n t += f'explained_variance_score: {evs:{2}.{4}}\\n'\n\n if result_file is not None:\n with open(result_file, 'a+') as f:\n f.write(t)\n else:\n print(t)\n\n\ndef generate_my_classifier(\n clf,\n number_of_features: int,\n number_of_bits_per_feature: int,\n path: str,\n result_file: Union[str, None]=None\n):\n if isinstance(clf, DecisionTreeClassifier):\n print(\"Creating decision tree classifier!\")\n my_clf = Tree(\"DecisionTreeClassifier\", number_of_features, number_of_bits_per_feature)\n elif isinstance(clf, RandomForestClassifier):\n print(\"Creating random forest classifier!\")\n my_clf = RandomForest(\"RandomForestClassifier\", number_of_features, number_of_bits_per_feature)\n else:\n print(\"Unknown type of classifier!\")\n raise ValueError(\"Unknown type of classifier!\")\n\n my_clf.build(clf)\n my_clf.create_vhdl_file(path)\n my_clf.print_parameters(result_file)\n\n return my_clf\n\n\ndef _compare_with_own_classifier(results: [], results_names: [str],\n test_target,\n flag_save_details_to_file: bool = True,\n path: str = \"./\"\n ):\n flag_no_errors = True\n number_of_errors = np.zeros(len(results))\n\n comparision_file = None\n if flag_save_details_to_file:\n comparision_file = open(path + \"/comparision_details.txt\", \"w\")\n\n for j in range(0, len(test_target)):\n flag_iteration_error = False\n\n for i in range(0, len(results)):\n if results[i][j] != test_target[j]:\n number_of_errors[i] += 1\n flag_no_errors = False\n flag_iteration_error = True\n\n if flag_iteration_error and flag_save_details_to_file:\n print(\"Difference between versions!\", file=comparision_file)\n print(\"Ground true: \" + str(test_target[j]), file=comparision_file)\n for i in range(0, len(results)):\n print(f\"{results_names[i]}: {results[i][j]}\", file=comparision_file)\n\n if flag_no_errors:\n print(\"All results were the same\")\n else:\n for i in range(0, len(results)):\n print(f\"Number of {results_names[i]} errors: {number_of_errors[i]}\")\n\n\ndef _test_classification_performance(clf, test_data, number_of_data_to_test=1000, number_of_iterations=1000):\n if number_of_data_to_test <= len(test_data):\n start = time.clock()\n\n for i in range(0, number_of_iterations):\n for data in test_data[:number_of_data_to_test]:\n clf.predict([data])\n\n end = time.clock()\n elapsed_time = (end - start)\n\n print(\"It takes \" +\n '% 2.4f' % (elapsed_time / number_of_iterations) +\n \"us to classify \" +\n str(number_of_data_to_test) + \" data.\")\n else:\n print(\"There is not enough data provided to evaluate the performance. It is required to provide at least \" +\n str(number_of_data_to_test) + \" values.\")\n\n\n# there is no general method for normalisation, so it was moved to be a part of each dataset\ndef normalise_data(train_data: np.ndarray, test_data: np.ndarray):\n from sklearn import preprocessing\n\n print(\"np.max(train_data): \" + str(np.max(train_data)))\n print(\"np.ptp(train_data): \" + str(np.ptp(train_data)))\n\n normalised_1 = 1 - (train_data - np.max(train_data)) / -np.ptp(train_data)\n normalised_2 = preprocessing.minmax_scale(train_data, axis=1)\n\n print(train_data[0])\n\n train_data /= 16\n test_data /= 16\n\n print(\"Are arrays equal: \" + str(np.array_equal(normalised_2, train_data)))\n print(\"Are arrays equal: \" + str(np.array_equal(normalised_1, train_data)))\n\n for i in range(0, 1):\n print(train_data[i])\n print(normalised_1)\n print(normalised_2)\n","repo_name":"PUTvision/decision_tree","sub_path":"decision_trees/dataset_tester.py","file_name":"dataset_tester.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"72201072834","text":"import pygame\nclass Bird():\n \n def __init__(self) -> None:\n # parameters\n self.INITIAL_X = 50\n self.INITIAL_Y = 200\n self.x = self.INITIAL_X\n self.y = self.INITIAL_Y\n self.FLY_DISTANCE = 20\n self.mov_speed = 0\n # sprites\n self.sprite_w = 34\n self.sprite_h = 24\n self.MID_FLAP = \"data/sprites/bluebird-midflap.png\"\n self.DOWN_FLAP = \"data/sprites/bluebird-downflap.png\"\n self.UP_FLAP = \"data/sprites/bluebird-upflap.png\"\n # hitbox\n self.hitbox = pygame.Rect(self.x, self.y, self.sprite_w -2 , self.sprite_h -2) # -2 is being more forgiving\n\n \n def fall(self, gravity):\n self.mov_speed += gravity\n # print(self.mov_speed)\n self.y += self.mov_speed\n self.hitbox.centery += self.mov_speed\n\n def fly(self):\n self.mov_speed = -1.5 # giving an initial boost to improve the animation\n self.y -= self.FLY_DISTANCE\n self.hitbox.centery -= self.FLY_DISTANCE\n\n def relocate(self, x, y):\n self.x = x\n self.y = y\n self.hitbox.centery = self.y\n\n def restart(self):\n self.relocate(self.INITIAL_X, self.INITIAL_Y)\n self.mov_speed = 0","repo_name":"fedmag/Q-bird","sub_path":"src/game/bird.py","file_name":"bird.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36428670228","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 16 18:04:23 2021\r\n\r\n@author: Monoramiro\r\n\"\"\"\r\n\r\nfrom warnings import showwarning\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\n\r\ndef animate(i):\r\n '''\r\n Toma los datos desde el CSV creado en PDC, para armar el gráfico en tiempo real\r\n '''\r\n global j\r\n \r\n data = pd.read_csv('../data/datasetPMU4.csv')\r\n x= data['TimeStamp']\r\n #x_time=datetime.time(x)\r\n #x=PDC.timestamp_to_date(unix_time)\r\n\r\n # Amplitud\r\n y1 = data['V1']\r\n y2 = data['V2']\r\n y3 = data['V3']\r\n \r\n plt.subplot(2, 1,1) \r\n plt.cla()\r\n plt.style.use('fivethirtyeight')\r\n plt.plot(x, y1, label='Channel 1')\r\n plt.plot(x, y2, label='Channel 2')\r\n plt.plot(x, y3, label='Channel 3')\r\n #plt.plot(x, y2, label='Channel 2')\r\n plt.xticks(range(0,j,5))\r\n plt.xlabel(\"TimeStamp\")\r\n plt.ylabel(\"Tensión(V) \")\r\n plt.title(\"Sincrofasores - Amplitud\")\r\n plt.legend(loc='upper left')\r\n plt.tight_layout()\r\n plt.show()\r\n \r\n # Fase\r\n p1 = data['Phase1']\r\n p2 = data['Phase2']\r\n p3 = data['Phase3']\r\n \r\n plt.subplot(2, 1,2) \r\n plt.cla()\r\n plt.style.use('fivethirtyeight')\r\n plt.plot(x, p1, label='Channel 1')\r\n plt.plot(x, p2, label='Channel 2')\r\n plt.plot(x, p3, label='Channel 3')\r\n #plt.plot(x, y2, label='Channel 2')\r\n plt.xticks(range(0,j,5))\r\n plt.xlabel(\"TimeStamp\")\r\n plt.ylabel(\"Fase(rad) \")\r\n plt.title(\"Sincrofasores - Fase\")\r\n plt.legend(loc='upper left')\r\n plt.tight_layout()\r\n plt.show()\r\n \r\n \r\n j+=1\r\n return\r\n\r\nj=0\r\n#Realiza el graficado en tiempo real\r\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\r\nplt.show()\r\n ","repo_name":"FigueredoGaston/Otro","sub_path":"Old/tiempo_real/real_time_plot.py","file_name":"real_time_plot.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15557492791","text":"from django.urls import path # used for routing URLs to the appropriate view functions within a Django application using the URL dispatcher\r\nfrom . import views # Import views\r\n\r\n# All the URL patterns for routing\r\nurlpatterns = [\r\n path('',views.index, name='index'), # The home page would render the 'index' view\r\n path('video_feed/', views.video_feed, name='video_feed'), # /video_feed/ route would render the 'video_feed' view\r\n path('meditate/',views.meditate, name='meditate'), # /meditate/ route would render the 'meditate' view\r\n path('pred_health/', views.predict_emotions, name='health_pred'), # /pred_health/ route would render the 'predict_emotions' view\r\n]","repo_name":"Anish2422/calmai-live","sub_path":"ai/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42918066577","text":"class TreeNode:\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n def __repr__(self,data):\n return self.data\nclass Tree:\n def __init__(self):\n self.root = None\n def Add(self,data):\n node = TreeNode(data)\n if not self.root:\n self.root = node\n else:\n q = [self.root]\n while True:\n pop_node = q.pop(0)\n if not pop_node.left:\n pop_node.left = node\n return\n if not pop_node.right:\n pop_node.right = node\n return\n else:\n q.append(pop_node.left)\n q.append(pop_node.right)\n def inOrder(self,root):\n if not root:\n return []\n result = [root.data]\n left_data = self.inOrder(root.left)\n right_data = self.inOrder(root.right)\n return left_data + result + right_data\ndef KthNode(root,k):\n k = k-1\n result = tree.inOrder(tree.root)\n return result[k]\n\ntree = Tree()\ntree.Add(5)\ntree.Add(3)\ntree.Add(7)\ntree.Add(2)\ntree.Add(4)\ntree.Add(6)\ntree.Add(8)\nprint(KthNode(tree.root,3))\n","repo_name":"Lmineor/Sword-to-Offer","sub_path":"bin/54.py","file_name":"54.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36404056670","text":"from direct.showbase.ShowBase import ShowBase\nfrom panda3d.core import *\nfrom direct.gui.DirectGui import *\nfrom player import *\nfrom friend import *\nfrom enemy import *\nfrom creat_fog import *\n\n\nclass Config(ShowBase):\n def __init__(self):\n super().__init__(self)\n self.font = loader.loadFont('font.ttc')\n self.setup()\n base.pusher = CollisionHandlerPusher()\n base.cTrav = CollisionTraverser()\n base.pusher.setHorizontal(True)\n base.pusher.add_in_pattern('%fn-into-%in')\n\n def setup(self):\n self.window_properties(1400, 1000)\n self.start_dialog = self.create_dialog(\n frame_size=(-1.41, 1.41, -1.01, 1.01), pos=(0, 0, 0), color=(1, 1, 1, 1), picture=\"start.png\")\n self.create_button(pos=(0, 0, -0.8), text='开始游戏', scale=0.15, parent=self.start_dialog, command=self.start, fg=(\n 255/255, 220/255, 99/255, 1), frameColor=(147/255, 88/255, 51/255, 1))\n self.woodmens = []\n self.max_woodmen_num = 6\n self.spawn_time = 2\n self.spawnpoints = [Vec3(0, 210, 0), Vec3(-200, 220, 0), Vec3(-230, -210, 0), Vec3(-80, -190, 0),\n Vec3(0, -200, 0), Vec3(-150, 20, 0), Vec3(0, -100, 0), Vec3(150, 20, 0)]\n\n self.wait_play_animation = []\n self.keep_play_animation = []\n self.end_dialog = self.create_dialog(frame_size=(-1.41,1.41,-1.01,1.01), pos = (0,0,0), color=(1,1,1,1), picture=\"win.png\")\n self.end_dialog.hide()\n self.create_button(pos = (1.1,0,-0.8), text=\"退出游戏\", scale=0.1, parent=self.end_dialog, command=self.quit, fg=(25/255, 22/255, 99/255, 1), frameColor = (247/255, 188/255, 51/255, 1))\n\n def window_properties(self, w, h):\n self.window = WindowProperties()\n self.window.setSize(w, h)\n self.win.requestProperties(self.window)\n\n def create_dialog(self, frame_size, pos, color, picture):\n return DirectDialog(frameSize=frame_size, frameColor=color,\n pos=pos, frameTexture=picture)\n\n def create_button(self, text, parent, command, scale, pos, fg, frameColor):\n DirectButton(text=text, parent=parent, command=command, scale=scale,\n pos=pos, text_font=self.font, text_fg=fg, frameColor=frameColor)\n\n def start(self):\n self.start_dialog.hide()\n self.load_model('FieldForest')\n base.cam.setHpr(-90, -4, 0)\n base.cam.setPos(-1000, -100, 100)\n self.create_fence(580, 350, 0, 580, -350, 0, 5)\n self.create_fence(-580, -350, 0, 580, -350, 0, 5)\n self.create_fence(-580, -350, 0, -580, -150, 0, 5)\n self.create_fence(-580, -40, 0, -580, 350, 0, 5)\n self.create_fence(-580, 350, 0, 580, 350, 0, 5)\n base.disableMouse()\n self.player = Player()\n self.friend = Friend(self)\n self.needle = EnemyNeedle()\n\n self.key_state = {'up': False, 'left': False,\n 'right': False, 'shoot': False}\n taskMgr.add(self.update)\n self.key_event()\n self.create_health_state()\n self.fog()\n\n def load_model(self, model):\n self.model = loader.loadModel(model)\n self.model.reparentTo(render)\n self.mina, self.minb = self.model.getTightBounds()\n return self.mina,self.minb\n\n def create_fence(self, ax, ay, az, bx, by, bz, r):\n fence_solid = CollisionCapsule(ax, ay, az, bx, by, bz, r)\n fence_node = CollisionNode('fence')\n fence_node.addSolid(fence_solid)\n render.attachNewNode(fence_node)\n mask = BitMask32()\n mask.setBit(0)\n mask.setBit(1)\n fence_node.setIntoCollideMask(mask)\n\n def change_key_state(self, direction, key_state):\n self.key_state[direction] = key_state\n\n def key_event(self):\n self.accept('w', self.change_key_state, ['up', True])\n self.accept('w-up', self.change_key_state, ['up', False])\n self.accept('d', self.change_key_state, ['right', True])\n self.accept('d-up', self.change_key_state, ['right', False])\n self.accept('a', self.change_key_state, ['left', True])\n self.accept('a-up', self.change_key_state, ['left', False])\n self.accept('woodmen-into-fence', self.change_woodmen_state)\n self.accept('mouse1', self.change_key_state, ['shoot', True])\n self.accept('mouse1-up', self.change_key_state, ['shoot', False])\n self.accept('woodmen-into-woodmen', self.change_woodmen_state)\n\n def update(self, task):\n if self.player.health > 0 and self.friend.health > 0:\n\n dt = globalClock.getDt()\n self.spawn_time -= dt\n if self.spawn_time <= 0:\n self.spawn_time = 2\n self.spawn_woodmen()\n\n [self.player.aduan_move(self.key_state, self.woodmen, dt)\n for self.woodmen in self.woodmens]\n [self.woodmen.woodmen_move(self.player, dt, self.friend)\n for self.woodmen in self.woodmens]\n [self.friend.agency_judgement(\n self.player, dt, self.woodmen) for self.woodmen in self.woodmens]\n self.needle.attack(self.player, dt)\n\n self.aduan_health_bar['value'] = self.player.health\n self.aduan_health_bar['text'] = '生命值:'+str(self.player.health)\n self.friend_health_bar['value'] = self.friend.health\n self.friend_health_bar['text'] = '生命值:'+str(self.friend.health)\n self.woodmen_health_bar['value'] = self.player.transfer_woodmen_life\n self.woodmen_health_bar['text'] = '生命值:' + \\\n str(self.player.transfer_woodmen_life)\n self.death_woodmens = [\n woodmen for woodmen in self.woodmens if woodmen.health < 0]\n [self.woodmens.remove(death_woodmen)\n for death_woodmen in self.death_woodmens]\n\n for death_woodmen in self.death_woodmens:\n death_woodmen.walking = False\n death_woodmen.collider.removeNode()\n death_woodmen.actor.disableBlend()\n death_woodmen.actor.play('die')\n self.keep_play_animation += self.death_woodmens\n\n for death_woodmen in self.keep_play_animation:\n if not death_woodmen.actor.getAnimControl('die').isPlaying():\n death_woodmen.clean_up()\n self.friend.count_health(5)\n if self.friend.search_safe_house:\n self.friend.walk_state = True\n else:\n self.wait_play_animation.append(death_woodmen)\n self.keep_play_animation = self.wait_play_animation\n self.wait_play_animation = []\n if self.friend.success:\n self.end_dialog.show()\n self.health_dialog.hide()\n else:\n self.end_dialog[\"frameTexture\"] = 'lose.png'\n self.end_dialog.show()\n self.health_dialog.hide()\n self.fog.update_fog(self.player.actor.getPos())\n\n return task.cont\n\n def change_woodmen_state(self, content):\n for self.woodmen in self.woodmens:\n if content.getFromNodePath() == self.woodmen.collider:\n self.woodmen.acceleration = -self.woodmen.acceleration\n self.woodmen.change_orientation = -self.woodmen.change_orientation\n\n def create_screen_image(self, image, pos, scale, parent):\n self.iamgeObject = OnscreenImage(\n image=image, pos=pos, scale=scale, parent=parent)\n self.iamgeObject.setTransparency(True)\n\n def create_health_bar(self, text, text_fg, text_scale, barColor, value, pos, scale, parent):\n return DirectWaitBar(text=text, text_fg=text_fg, text_scale=text_scale, text_font=self.font,\n barColor=barColor, value=value, pos=pos, scale=scale, parent=parent)\n\n def create_health_state(self):\n self.health_dialog = self.create_dialog(frame_size=(\n 0, 0, 0, 0), pos=(0, 0, 0), color=(0, 0, 0, 0), picture=None)\n self.create_screen_image(image='aduan_life.png', pos=(-1.13, 0, 0.88),\n scale=(0.24, 1, 0.09), parent=self.health_dialog)\n self.create_screen_image(image='codemao_life.png', pos=(-0.6, 0, 0.88),\n scale=(0.24, 1, 0.09), parent=self.health_dialog)\n self.create_screen_image(image='woodmen_life.png', pos=(\n 1.1, 0, 0.88), scale=(0.24, 1, 0.09), parent=self.health_dialog)\n self.aduan_health_bar = self.create_health_bar(text='生命值:'+str(self.player.health),\n text_fg=(1, 1, 0, 1), text_scale=(0.14, 0.14),\n barColor=(1, 48/255, 48/255, 1), value=self.player.health,\n pos=(-1.03, 0, 0.832), scale=(0.125, 1, 0.27),\n parent=self.health_dialog)\n self.friend_health_bar = self.create_health_bar(text='生命值:'+str(self.friend.health),\n text_fg=(1, 1, 0, 1), text_scale=(0.14, 0.14),\n barColor=(1, 48/255, 48/255, 1), value=self.friend.health,\n pos=(-0.5, 0, 0.832), scale=(0.125, 1, 0.27),\n parent=self.health_dialog)\n self.woodmen_health_bar = self.create_health_bar(text='生命值:'+str(self.player.transfer_woodmen_life),\n text_fg=(0, 178/255, 238/255, 1), text_scale=(0.14, 0.14),\n barColor=(1, 0, 1, 1), value=self.player.transfer_woodmen_life,\n pos=(1.2, 0, 0.842), scale=(0.125, 1, 0.27),\n parent=self.health_dialog)\n\n def spawn_woodmen(self):\n if len(self.woodmens) < self.max_woodmen_num:\n self.woodmen = EnemyWoodmen(random.choice(self.spawnpoints))\n self.woodmens.append(self.woodmen)\n\n def input_box(self, scale, command, pos, parent):\n DirectEntry(scale=scale, command=command, pos=pos, parent=parent)\n\n def choose_dialog(self, text, text_fg, text_scale, pos, command, frameColor):\n return YesNoDialog(text_font=self.font, text=text, text_fg=text_fg,\n text_scale=text_scale, pos=pos, command=command, frameColor=frameColor)\n \n def quit(self):\n self.player.clean_up()\n self.friend.clean_up()\n [self.woodmen.clean_up() for self.woodmen in self.woodmens]\n self.needle.clean_up()\n base.userExit()\n\n def fog(self):\n point_light = AmbientLight('ambient light')\n point_light.setColor((0.2,0.2,0.2,1))\n environment_light = render.attachNewNode(point_light)\n render.setLight(environment_light)\n self.fog = CreatFog(self.mina, self.minb)\n\n\nif __name__ == '__main__':\n start_game = Config()\n start_game.run()\n","repo_name":"PythonOrC/PythonCodeArchive","sub_path":"program/Panda3D/11-20 - 营救编程猫/13 - 动作策划 (一)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23340230378","text":"import bpy\nfrom random import random\n\nSTEPS = 18\nLENGTH = 22\nSIZE = 1.0\nmaterialList = [\n bpy.data.materials['m0'], # white\n bpy.data.materials['m1'], # red\n bpy.data.materials['m2'], # yellow\n bpy.data.materials['m3'], # black\n]\n\ndef setScale(values) :\n bpy.ops.transform.resize(value = values)\n \ndef setRandomMat(m) :\n if m < 0.9 :\n mat = materialList[0]\n elif m < 0.94 :\n mat = materialList[1]\n elif m < 0.98 :\n mat = materialList[2]\n else :\n mat = materialList[3]\n\n bpy.context.object.data.materials.append(mat)\n \ndef makeChild(pos, pSize, v, edge) :\n cSize = pSize * 0.5\n of1 = pSize * 0.75\n of2 = pSize * 0.25\n \n e = edge / 3\n\n if v < e :\n bpy.ops.mesh.primitive_cube_add(size = cSize, location = (pos[0] + of1, pos[1] + of2, pos[2] - of2))\n elif v < e*2 :\n bpy.ops.mesh.primitive_cube_add(size = cSize, location = (pos[0] + of2, pos[1] - of1, pos[2] - of2))\n else :\n bpy.ops.mesh.primitive_cube_add(size = cSize, location = (pos[0] + of1, pos[1] + of2, pos[2] + of2))\n\n m = random() \n setRandomMat(m)\n\n if m < 0.5 :\n name = bpy.context.active_object.name\n target = bpy.data.objects[name]\n makeChild(target.location, cSize, m, 0.5) \n\nfor s in range(STEPS) :\n for l in range(LENGTH) :\n r = random()\n\n if r < 0.1 :\n \n scale = 2.0\n v = scale / 4\n r2 = random()\n if r2 < 0.3 :\n scale = 3.0\n v = scale / 3\n \n o = 1.001\n \n if r < 0.05 :\n bpy.ops.mesh.primitive_cube_add(size = SIZE, location = (l - s + s, l + s - v, s))\n setScale((o, scale * o, o))\n else :\n bpy.ops.mesh.primitive_cube_add(size = SIZE, location = (l - s + s, l + s, s + v))\n setScale((o, o, scale * o))\n else :\n bpy.ops.mesh.primitive_cube_add(size = SIZE, location = (l - s + s, l + s, s))\n\n m = random()\n setRandomMat(m) \n \n r3 = random()\n edge = 0.4\n if r3 < edge :\n name = bpy.context.active_object.name\n target = bpy.data.objects[name]\n makeChild(target.location, SIZE, r3, edge) \n \n s += 1.0\n","repo_name":"ogrew/b3d-Generative","sub_path":"GenerativeArt_b3d_06.py","file_name":"GenerativeArt_b3d_06.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"3734316080","text":"import re\nfrom mongoengine import StringField\n\nclass RelaxedURLField(StringField):\n \"\"\"\n This class is a more lenient version of mongoengine's 'URLField', which allows adding the\n protocol and the 'www' part to be optional.\n\n Modified from sources:\n * https://stackoverflow.com/a/3809435\n * http://regexr.com/3e6m0\n \"\"\"\n\n _URL_REGEX = re.compile(r'(http(s)?:\\/\\/.)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-z]{1,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)', re.IGNORECASE)\n\n def __init__(self, url_regex=None, **kwargs):\n self.url_regex = url_regex or self._URL_REGEX\n super().__init__(**kwargs)\n\n def validate(self, value):\n \"\"\"\n Overriden function from 'StringField' that gets executed upon a document trying to be\n saved onto the database.\n \"\"\"\n\n if not self.null and value is None:\n self.error('Field cannot be null')\n\n if self.required and not value:\n self.error('Field is required and cannot be empty')\n\n\n if not self.null and not self.url_regex.match(value):\n # Only check full URL\n self.error(\"Invalid URL: {}\".format(value))\n","repo_name":"sproul-club/server","sub_path":"models/relaxed_url_field.py","file_name":"relaxed_url_field.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40708973959","text":"import slugid\nfrom higlass.client import Track\nfrom pathlib import Path\nfrom typing import List, Tuple, Union\nfrom numpy import cumsum\n\nimport itertools as it\nfrom pathlib import Path\n\n\ndef chromsize_pairs(chromsizes_fn):\n with open(chromsizes_fn, \"r\") as f:\n chroms = [l.strip().split(\"\\t\") for l in f.readlines()]\n chromnames = [c[0] for c in chroms]\n chromlengths = [int(c[1]) for c in chroms]\n\n return list(zip(chromnames, chromlengths))\n\n\ndef load_chromsizes(chrom_names_lengths):\n chromnames, chromlengths = zip(*chrom_names_lengths)\n\n cumlengths = list(it.accumulate(chromlengths))\n # we want the cumlengths to start 0\n cumlengths = [0] + cumlengths[:-1]\n\n return dict(\n [\n (name, {\"name\": name, \"length\": length, \"start\": start})\n for name, length, start in zip(chromnames, chromlengths, cumlengths)\n ]\n )\n\n\ndef bedtiles(\n lines: List[Tuple[str, int, int, str, str, str]],\n chroms: Union[Path, str, list],\n **kwargs,\n) -> Track:\n \"\"\"Generate a list of local tiles that can be used with a bedlike track.\n\n Parameters\n -----------\n lines: A list of bed style lines ([chrom, start, end, name, score, pos]).\n Each line can contain more than the minimum values shown above. What\n is displayed by HiGlass depends on which track this data is used with.\n If used with the `bedlike track, the only other value past these that\n is recognized is the `itemRgb` value in the 9th position.\n chromsizes: Either a) Path or str pointing to a chromsizes file or b) a list\n of [name, length] pairs or c) an (ordered) dictionary of chrom / size\n values\n\n Returns\n -------\n A higlass Track that can be used with the viewer.\n\n \"\"\"\n if isinstance(chroms, Path) or isinstance(chroms, str):\n chrom_names_lengths = chromsize_pairs(chroms)\n if isinstance(chroms, dict):\n chrom_names_lengths = list(chroms.items())\n if isinstance(chroms, list):\n chrom_names_lengths = chroms\n else:\n ValueError(f\"Unknown chroms type: {type(chroms)}. Expecting str, Path or list\")\n\n chroms = load_chromsizes(chrom_names_lengths)\n\n genome_length = sum(c[\"length\"] for c in chroms.values())\n tileset_info = {\n \"x\": {\n \"max_width\": genome_length,\n \"min_pos\": [1],\n \"max_pos\": [genome_length],\n \"max_zoom\": 0,\n }\n }\n\n tiles = {\n \"x.0.0\": [\n {\n \"xStart\": chroms[l[0]][\"start\"] + int(l[1]),\n \"xEnd\": chroms[l[0]][\"start\"] + int(l[2]),\n \"chrOffset\": chroms[l[0]][\"start\"],\n \"importance\": 0,\n \"uid\": slugid.nice(),\n \"fields\": l,\n }\n for l in lines\n ]\n }\n\n return {\"type\": \"local-tiles\", \"tilesetInfo\": tileset_info, \"tiles\": tiles}\n","repo_name":"hkariti/higlass-python","sub_path":"higlass/inline_tiles.py","file_name":"inline_tiles.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"257526050","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dianjiangapp', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='shouzhijilu',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('neirong', models.CharField(max_length=255, verbose_name='\\u5185\\u5bb9')),\n ],\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='fapiaodizhi',\n field=models.CharField(max_length=255, verbose_name='\\u53d1\\u7968\\u5730\\u5740', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='fapiaojisongshijian',\n field=models.CharField(max_length=255, verbose_name='\\u53d1\\u7968\\u5bc4\\u9001\\u65f6\\u95f4', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='fapiaomingcheng',\n field=models.CharField(max_length=255, verbose_name='\\u53d1\\u7968\\u540d\\u79f0', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='fapiaoshoujian',\n field=models.CharField(max_length=255, verbose_name='\\u53d1\\u7968\\u6536\\u4ef6\\u4eba', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='fapiaotaitou',\n field=models.CharField(max_length=255, verbose_name='\\u53d1\\u7968\\u62ac\\u5934', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='fapiaoyouzheng',\n field=models.CharField(max_length=255, verbose_name='\\u53d1\\u7968\\u90ae\\u653f\\u7f16\\u7801', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='pingjia',\n field=models.CharField(max_length=255, verbose_name='\\u8bc4\\u4ef7', blank=True),\n ),\n migrations.AlterField(\n model_name='gongcheng',\n name='yaoqiu',\n field=models.CharField(max_length=255, verbose_name='\\u8981\\u6c42', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='biaoqian',\n field=models.CharField(max_length=255, verbose_name='\\u6807\\u7b7e', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='gongzhong',\n field=models.CharField(max_length=255, verbose_name='\\u5de5\\u79cd', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='gongzuodi',\n field=models.CharField(max_length=255, verbose_name='\\u5de5\\u4f5c\\u5730', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='nichang',\n field=models.CharField(max_length=255, verbose_name='\\u6635\\u79f0', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='xingming',\n field=models.CharField(max_length=255, verbose_name='\\u59d3\\u540d', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='zhengshu',\n field=models.CommaSeparatedIntegerField(max_length=255, verbose_name='\\u8bc1\\u4e66', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='ziwojieshao',\n field=models.CharField(max_length=255, verbose_name='\\u81ea\\u6211\\u4ecb\\u7ecd', blank=True),\n ),\n migrations.AddField(\n model_name='shouzhijilu',\n name='suoshuid',\n field=models.ForeignKey(related_name='eq_xiaoxiid', to='dianjiangapp.user'),\n ),\n ]\n","repo_name":"chenfengqiannian/djdianjiang","sub_path":"dianjiangapp/migrations/0002_auto_20160411_1542.py","file_name":"0002_auto_20160411_1542.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74146599234","text":"import tensorflow as tf\nimport numpy as np\n\n\n# Hyper Parameters\nHIDDEN_1_SIZE = 40\nHIDDEN_2_SIZE = 30\n\nLR_A = 1e-1 # learning rate for actor\nTAU = 0.01 # update rate for target network\n\n\nclass Actor(object):\n def __init__(self, sess, batch_size, action_dim, action_bound, s, s_, is_training):\n self.sess = sess\n\n self.batch_size = batch_size\n self.a_dim = action_dim\n self.action_bound = action_bound\n self.s = s\n self.s_ = s_\n\n self.is_training = is_training\n\n self.lr = LR_A\n\n self.a = self._build_net(self.s)\n\n self.actor_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Actor')\n ema = tf.train.ExponentialMovingAverage(decay=1-TAU)\n self.target_update = ema.apply(self.actor_vars)\n self.a_ = self._build_net(self.s_, reuse=True, getter=self.get_getter(ema))\n\n def get_getter(self, ema):\n def ema_getter(getter, name, *args, **kwargs):\n var = getter(name, *args, **kwargs)\n ema_var = ema.average(var)\n return ema_var if ema_var else var\n\n return ema_getter\n\n def _build_net(self, s, reuse=None, getter=None):\n with tf.variable_scope('Actor', reuse=reuse, custom_getter=getter):\n init_w = tf.random_normal_initializer(0., 0.3)\n init_b = tf.constant_initializer(0.1)\n hidden_1 = tf.layers.dense(s, HIDDEN_1_SIZE, activation=tf.nn.elu,\n kernel_initializer=init_w, bias_initializer=init_b, name='hidden_1')\n hidden_2 = tf.layers.dense(hidden_1, HIDDEN_2_SIZE, activation=tf.nn.elu,\n kernel_initializer=init_w, bias_initializer=init_b, name='hidden_2')\n with tf.name_scope('action'):\n actions = tf.layers.dense(hidden_2, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,\n bias_initializer=init_b, name='a')\n scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a')\n\n return scaled_a\n\n def learn(self, s, s_): # batch update\n self.sess.run([self.train_op, self.target_update], feed_dict={self.s: s, self.s_: s_, self.is_training: True})\n\n def choose_action(self, s):\n s = s[np.newaxis, :] # single state\n return self.sess.run(self.a, feed_dict={self.s: s, self.is_training: False})[0] # single action\n\n def add_grad_to_graph(self, a_grads):\n with tf.variable_scope('policy_grads'):\n self.policy_grads = tf.gradients(ys=self.a, xs=self.actor_vars, grad_ys=a_grads)\n for ix, grad in enumerate(self.policy_grads):\n self.policy_grads[ix] = grad / self.batch_size\n\n with tf.variable_scope('A_train'):\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n opt = tf.train.AdagradOptimizer(-self.lr) # (- learning rate) for ascent policy\n self.train_op = opt.apply_gradients(zip(self.policy_grads, self.actor_vars))\n","repo_name":"fengredrum/DDPG-TensorFlow","sub_path":"actor_network.py","file_name":"actor_network.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"39108257301","text":"import cv2\nimport torch\nimport numpy as np\nfrom ultralytics import YOLO\nfrom segment_anything import sam_model_registry, SamPredictor\nimport os\nimport argparse\n \n \ndef parse_args():\n parser = argparse.ArgumentParser()\n # 输入视频路径\n parser.add_argument(\"--video_path\", type=str, default='video.mp4', help=\"input video path\")\n # 输出视频路径\n parser.add_argument(\"--save_path\", type=str, default='video_test.mp4', help=\"output video path\")\n # model checkpoint\n parser.add_argument(\"--sam_checkpoint\", type=str, default=\"model/sam_vit_h_4b8939.pth\", help=\"sam model checkpoint\")\n parser.add_argument(\"--yolo_checkpoint\", type=str, default=\"model/yolov8x.pt\", help=\"yolo model detection checkpoint\")\n # imgsize\n parser.add_argument(\"--imgsz\", type=int, default=1920, help=\"yolo track image size\")\n # gpu\n parser.add_argument(\"--device\", type=str, default='cuda:0', help=\"cuda:[0,1,2,3,4] or cpu\")\n return parser.parse_args()\n\n \ndef color_imgs(img, masks, boxes, ids, colors):\n\n grays = []\n for i in range(0, 256):\n g = i * 0.5\n grays.append(g)\n\n imw = img.shape[1]\n imh = img.shape[0]\n\n for k, (box) in enumerate(boxes):\n color = colors[int(ids[k])]\n c_b = color[0] * 0.5\n c_g = color[1] * 0.5\n c_r = color[2] * 0.5\n\n grays_r = []\n for i in range(0, 256):\n r = int(grays[i] + c_r)\n grays_r.append(r)\n\n grays_g = []\n for i in range(0, 256):\n g = int(grays[i] + c_g)\n grays_g.append(g)\n\n grays_b = []\n for i in range(0, 256):\n b = int(grays[i] + c_b)\n grays_b.append(b)\n\n mask = np.asarray(masks[k].cpu().reshape(imh, imw))\n h_array, w_array = np.where(mask == 1)\n for h_ind, w_ind in zip(h_array, w_array):\n img[h_ind, w_ind, 0] = grays_b[img[h_ind, w_ind, 0]] # grays[img[h_ind,w_ind,0]] + grays[color[0]]\n img[h_ind, w_ind, 1] = grays_g[img[h_ind, w_ind, 1]] # grays[img[h_ind,w_ind,1]] + grays[color[1]]\n img[h_ind, w_ind, 2] = grays_r[img[h_ind, w_ind, 2]]\n \n return img\n\n\ndef get_size_ratio(det_boxes):\n det_boxes = det_boxes.xyxy.cpu().numpy()\n size = [(det_box[2]-det_box[0])*(det_box[3]-det_box[1]) for det_box in det_boxes]\n max_size = max(size)\n size_ratio = np.array(size)/max_size\n return size_ratio\n\n\ndef get_det_centers(masks, w, h):\n det_centers = []\n for i, mask in enumerate(masks):\n mask = np.asarray(mask.cpu().reshape(h, w)) # 如果是fast_sam需要去掉\n y_array, x_array = np.where(mask == 1)\n center = (x_array.mean(), y_array.mean())\n det_centers.append(center)\n return det_centers\n\n\ndef load_models(sam_checkpoint='model/sam_vit_h_4b8939.pth', \n yolo_checkpoint='model/yolov8x.pt', \n device='cuda:0'):\n '''\n sam以及yolo需要的模型配置\n '''\n # sam model type\n sam_file_name = os.path.basename(sam_checkpoint) # 'sam_vit_h_4b8939.pth'\n sam_model_name = os.path.splitext(sam_file_name)[0] # 'sam_vit_h_4b8939'\n sam_name_list = sam_model_name.split(\"_\") # ['sam', 'vit', 'h', '4b8939']\n model_type = sam_name_list[1] + '_' + sam_name_list[2] # 'vit_h'\n print(model_type)\n # sam\n sam_device = torch.device(device)\n sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)\n sam.to(device=sam_device)\n predictor = SamPredictor(sam) \n # yolo\n model = YOLO(yolo_checkpoint)\n return model, predictor\n\ndef add_id_and_cls(frame, box, id, det_name, size_ratio, center, id_colors):\n cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), id_colors[id], 2) # (0, 255, 0),\n cv2.putText(frame, f\"Id {id}\", (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, id_colors[id], 2,)\n # 计算文本的宽高,baseLine\n fontFace = cv2.FONT_HERSHEY_TRIPLEX\n fontScale = min(1*size_ratio+0.5, 1)\n thickness = min(int(1*size_ratio+0.5), 1)\n center = (int(center[0]), int(center[1]))\n retval, baseLine = cv2.getTextSize(det_name, fontFace=fontFace, fontScale=fontScale, thickness=thickness) #(width,height),bottom\n color = [0, 0, 0]\n topleft = (int(center[0]-retval[0]/2), int(center[1]-(retval[1]+baseLine)/2))\n topright = (int(center[0]+retval[0]/2), int(center[1]+(retval[1]+baseLine)/2))\n # 左边界限制\n limit_x_left = box[0]\n if topleft[0] < limit_x_left:\n delta_x_left = int(limit_x_left - topleft[0])\n # 整体往右移动\n topleft = (topleft[0] + delta_x_left, topleft[1])\n # 右边界限制\n limit_x_right = box[2]\n if topright[0] > limit_x_right:\n delta_x_right = int(limit_x_right - topright[0])\n # 整体向左移动\n topright = (topright[0] - delta_x_right, topright[1])\n\n cv2.rectangle(frame, topleft, topright, color, -1)\n cv2.putText(frame, f\"{det_name}\", (topleft[0], topleft[1]+retval[1]), \n fontScale=fontScale, \n fontFace=fontFace,\n thickness=thickness,\n color=[255, 255, 255])\n return frame \n\n\ndef video_track_seg(model, predictor, id_colors,\n video_path='2.mp4', save_path=\"video_test.mp4\", \n imgsz=1920, device='cuda:0'):\n cap = cv2.VideoCapture(video_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))\n \n while True:\n ret, frame = cap.read()\n if not ret:\n break\n results = model.track(frame, persist=True, imgsz=imgsz, device=device)\n names = results[0].names\n det_boxes = results[0].boxes\n det_cls = det_boxes.cls\n det_names = [names[cls_ind.item()] for cls_ind in det_cls]\n size_ratios = get_size_ratio(det_boxes)\n ids = det_boxes.id.cpu().numpy().astype(int)\n predictor.set_image(frame)\n input_boxes = torch.tensor(det_boxes.xyxy.cpu().numpy().astype(int), device=device, dtype=torch.int32)\n transformed_boxes = predictor.transform.apply_boxes_torch(input_boxes, frame.shape[:2])\n masks, _, _ = predictor.predict_torch(point_coords=None, point_labels=None, \n boxes=transformed_boxes, multimask_output=False,)\n det_centers = get_det_centers(masks, w, h)\n # 上色\n frame = color_imgs(frame, masks, \n det_boxes.xyxy.cpu().numpy().astype(int), \n ids, id_colors)\n # 添加标签\n for box, id, det_name, size_ratio, center in zip(det_boxes.xyxy.cpu().numpy().astype(int), ids, \n det_names, size_ratios, det_centers):\n frame = add_id_and_cls(frame, box, id, det_name, size_ratio, center, id_colors)\n vid_writer.write(frame)\n vid_writer.release() \n \n \ndef process(sam_checkpoint='model/sam_vit_h_4b8939.pth', \n yolo_checkpoint='model/yolov8x.pt',\n video_path='2.mp4',\n save_path=\"video_test.mp4\",\n imgsz=1920,\n device='cuda:0'):\n id_colors = []\n for k in range(0, 1000):\n r = np.random.randint(0, 255)\n g = np.random.randint(0, 255)\n b = np.random.randint(0, 255)\n c = [r, g, b]\n id_colors.append(c)\n model, predictor = load_models(sam_checkpoint, \n yolo_checkpoint, \n device)\n video_track_seg(model, predictor, id_colors,\n video_path, save_path, \n imgsz, device)\n \n \ndef main(args):\n process(args.sam_checkpoint,\n args.yolo_checkpoint,\n args.video_path,\n args.save_path,\n args.imgsz,\n args.device\n )\n\n \nif __name__ == '__main__':\n args = parse_args()\n main(args)","repo_name":"cStor-cDeep/Segment-Any-Video","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"10109602699","text":"\"\"\"\nEntry point for polyai server.\n\"\"\"\n\nimport sys\nimport argparse\nimport pylogg as log\nimport polyai.sett as sett\nfrom polyai import __version__\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"PolyAI Server (v%s)\" %__version__)\n \n parser.add_argument(\"cmd\", choices=['server', 'examples'])\n parser.add_argument(\n \"-s\", \"--settings\", default=\"settings.yaml\",\n help=\"Settings file to load/save (default settings.yaml).\")\n parser.add_argument(\n \"--model\", default=None,\n help=\"LLM model .safetensors or .pt file to load.\")\n parser.add_argument(\n \"--ssl\", default=None, action=\"store_true\",\n help=\"Use https for the server endpoints.\")\n parser.add_argument(\n \"--vram\", default=None,\n help=\"Comma seperated max VRAM usage for the GPUs.\")\n parser.add_argument(\n \"--log\", default=None, type=int,\n help=\"Log level. Higher is more verbose.\")\n parser.add_argument(\n \"--debug\", default=None, action='store_true',\n help=\"Enable debugging.\")\n\n args = parser.parse_args()\n return args\n\n\ndef server():\n from polyai.server import loader\n from polyai.server import endpoints\n\n _start = False\n\n if sett.Model.model_file_path:\n exllama = loader.init_exllama(\n sett.TextGen.user_fmt,\n sett.TextGen.bot_fmt,\n sett.TextGen.instruction_fmt,\n sett.Model.vram_config, sett.TextGen.context_length)\n\n exllama.load_model(sett.Model.model_file_path)\n _start = True\n\n if sett.Model.lora_file_path:\n exllama.add_lora(sett.Model.lora_file_path)\n else:\n log.error(\"No language model file specified.\")\n\n if sett.Model.bert_file_path:\n bert = loader.init_bert(sett.Model.bert_device)\n bert.load_model(sett.Model.bert_file_path)\n _start = True\n else:\n log.warn(\"No BERT model file specified.\")\n\n # Start the API servers, v1 is blocking, so run it last.\n if _start:\n endpoints.run(\n polyai_port=sett.Server.api_endpoint_port,\n streaming_port=sett.Server.stream_endpoint_port,\n listen=sett.Server.listen_all,\n ssl=sett.Server.use_ssl,\n debug=sett.Server.debug\n )\n\n\ndef main() -> int:\n args = parse_arguments()\n if not sett.load_server_settings(args.settings):\n sett.save_server_settings(args.settings)\n print(\"Please update the new settings file and retry.\")\n return 1\n \n sett.save_server_settings(args.settings)\n\n # Override settings from args.\n if args.log is not None:\n sett.Server.log_level = args.log\n if args.ssl is not None:\n sett.Server.use_ssl = args.ssl\n if args.debug is not None:\n sett.Server.debug = args.debug\n if args.model is not None:\n sett.Model.model_file_path = args.model\n if args.vram is not None:\n sett.Model.vram_config = args.vram\n\n t1 = log.init(sett.Server.log_level, output_directory=\".\",\n logfile_name=sett.Server.log_file_name,\n append_to_logfile=sett.Server.log_append)\n\n if args.cmd == \"server\":\n server()\n else:\n ValueError(args.cmd)\n\n t1.note(\"All done.\")\n log.close()\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"akhlakm/polyai-python","sub_path":"polyai/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31131931171","text":"import datetime\n# Workaround for this error: \n# \"ImportError: Failed to import _strptime because the import lockis held by \n# another thread\"\nimport _strptime\n\n# These are the supported timestamp formats to parse. The first is used for\n# serializing datetimes. Functions in this file rely on specific formats from\n# this tuple so be careful when changing the indices for existing formats.\nDATETIME_FORMATS = ('%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S:%f',\n '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d',\n '%m/%d/%Y %H:%M', '%m/%d/%y %H:%M')\n\n\n#############################################################################\ndef parseTimestamp(s):\n \"\"\"Parses a textual datetime format and return a Python datetime object.\n\n The supported format is: yyyy-mm-dd h:m:s.ms\n\n The time component is optional\n hours are 00..23 (no AM/PM)\n minutes are 00..59\n seconds are 00..59\n micro-seconds are 000000..999999\n \"\"\"\n s = s.strip()\n for pattern in DATETIME_FORMATS:\n try:\n return datetime.datetime.strptime(s, pattern)\n except ValueError:\n pass\n raise ValueError('The provided timestamp %s is malformed. The supported '\n 'formats are: [%s]' % (s, ', '.join(DATETIME_FORMATS)))\n\n\n#############################################################################\ndef serializeTimestamp(t):\n return t.strftime(DATETIME_FORMATS[0])\n\n\n#############################################################################\ndef serializeTimestampNoMS(t):\n return t.strftime(DATETIME_FORMATS[2])\n\n\n#############################################################################\ndef parseBool(s):\n l = s.lower()\n if l in (\"true\", \"t\", \"1\"):\n return True\n if l in (\"false\", \"f\", \"0\"):\n return False\n raise Exception(\"Unable to convert string '%s' to a boolean value\" % s)\n\n\n#############################################################################\ndef floatOrNone(f):\n if f == 'None':\n return None\n return float(f)\n\n\n#############################################################################\ndef intOrNone(i):\n if i.strip() == 'None' or i.strip() == 'NULL':\n return None\n return int(i)\n\n\n#############################################################################\ndef escape(s):\n \"\"\"Escape commas, tabs, newlines and dashes in a string\n\n Commas are encoded as tabs\n \"\"\"\n if s is None:\n return ''\n \n assert isinstance(s, basestring), \\\n \"expected %s but got %s; value=%s\" % (basestring, type(s), s)\n s = s.replace('\\\\', '\\\\\\\\')\n s = s.replace('\\n', '\\\\n')\n s = s.replace('\\t', '\\\\t')\n s = s.replace(',', '\\t')\n return s\n\n\n#############################################################################\ndef unescape(s):\n \"\"\"Unescapes a string that may contain commas, tabs, newlines and dashes\n\n Commas are decoded from tabs\n \"\"\"\n #assert isinstance(s, str)\n assert isinstance(s, basestring)\n s = s.replace('\\t', ',')\n s = s.replace('\\\\,', ',')\n s = s.replace('\\\\n', '\\n')\n s = s.replace('\\\\\\\\', '\\\\')\n\n return s\n\n","repo_name":"tkaitchuck/nupic","sub_path":"py/nupic/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16806688433","text":"\ndef gasstation(gas, cost):\n # 연료 총합과 코스트 총합을 비교해서 불가능한 경우를 제외\n if sum(gas) < sum(cost):\n return -1\n\n start, fuel = 0, 0\n for i in range(len(gas)):\n # 출발 안되는 지점\n if gas[i] + fuel < cost[i]:\n start = i + 1\n fuel = 0\n else:\n fuel += gas[i] - cost[i]\n \n return start\n\ngas = [1, 2, 3, 4, 5]\ncost = [3, 4, 5, 1, 2]\n\nanswer = gasstation(gas, cost)\nprint(answer)","repo_name":"joney0715/Algorithm_GroupStudy","sub_path":"joney0715/0921/common_81_주유소_교재풀이2.py","file_name":"common_81_주유소_교재풀이2.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72948195073","text":"import tkinter as tk\nfrom func import file_label, file_piker_button, file_saver_button\nfrom search_bar import search_button\nfrom check_it import check_it_button\nfrom enforce import enforce_it_button\n\ndef quit():\n quit = tk.Button(text=\"QUIT\", fg=\"red\",\n command=root.destroy)\n quit.grid(row=0, column=10, padx=10, pady=10)\n\n\nroot = tk.Tk()\nroot.title(\"Security app\")\nroot.geometry(\"800x700\")\n\nfile_label()\nsearch_button()\nfile_piker_button()\nfile_saver_button()\ncheck_it_button()\nenforce_it_button()\nquit()\n\n\nroot.mainloop()\n","repo_name":"galathinius/cs-project","sub_path":"refactor.py","file_name":"refactor.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41787249825","text":"from __future__ import division, absolute_import, unicode_literals\nimport re\nimport string\n\nfrom scrapy.log import WARNING\nfrom scrapy.http import Request\n\nfrom product_ranking.items import SiteProductItem, RelatedProduct, Price\nfrom product_ranking.spiders import BaseProductsSpider, cond_set, \\\n cond_set_value, \\\n cond_replace_value, cond_replace, FLOATING_POINT_RGEX\n\nis_empty = lambda x, y=None: x[0] if x else y\n\nclass BolProductsSpider(BaseProductsSpider):\n name = 'bol_products'\n allowed_domains = [\"bol.com\"]\n start_urls = []\n SEARCH_URL = \"http://www.bol.com/nl/s/algemeen/zoekresultaten/Ntt/\" \\\n \"{search_term}/N/0/Nty/1/search/true/searchType/qck/sc/media_all/\" \\\n \"index.html\"\n\n def _parse_single_product(self, response):\n return self.parse_product(response)\n\n def parse_product(self, response):\n product = response.meta['product']\n cond_set(product, 'brand', response.xpath(\n \"//div/span/a[@itemprop='brand']/text()\").extract())\n\n cond_set(\n product,\n 'title',\n response.xpath(\n \"//div[contains(@class,'product_heading')]\"\n \"/h1[@itemprop='name']/text()\"\n ).extract(),\n conv=string.strip)\n\n cond_set(\n product,\n 'image_url',\n response.xpath(\n \"//div[contains(@class,'product_zoom_wrapper')]\"\n \"/img[@itemprop='image']/@src\"\n ).extract(),\n conv=string.strip,\n )\n\n j = response.xpath(\n \"//div[contains(@class,'product_description')]/div\"\n \"/div[@class='content']/descendant::*[text()]/text()\"\n )\n cond_set_value(product, 'description', \"\\n\".join(\n x.strip() for x in j.extract() if x.strip()))\n\n cond_set(\n product,\n 'upc',\n response.xpath(\"//meta[@itemprop='sku']/@content\").extract(),\n conv=int,\n )\n\n reseller_id = re.findall('\\/(\\d+)\\/', response.url)\n reseller_id = reseller_id[0] if reseller_id else None\n cond_set_value(product, 'reseller_id', reseller_id)\n\n cond_set(product, 'locale', response.xpath(\"//html/@lang\").extract())\n\n rel = response.xpath(\n \"//div[contains(@class,'tst_inview_box')]/div\"\n \"/div[@class='product_details_mini']/span/a\")\n recommended_prods = []\n for r in rel:\n try:\n href = r.xpath('@href').extract()[0]\n title = r.xpath('@title').extract()[0]\n recommended_prods.append(RelatedProduct(title, href))\n except IndexError:\n pass\n if recommended_prods:\n product['related_products'] = {\"recommended\": recommended_prods}\n self._price_from_html(response, product)\n\n mkt_link = is_empty(response.xpath(\n \"//div[contains(@class, 'alternative')]/a/@href\").extract())\n meta = {\"product\": product}\n if mkt_link:\n mkt_link = re.sub(\"filter=([^\\&]\\w+)\", \"\", mkt_link)\n return Request(\n url=mkt_link, \n callback=self.parse_marketplace, \n meta=meta\n )\n else:\n seller = response.xpath(\n '//p[@class=\"bottom_xs\"]/strong/text()'\n ).extract()\n if not seller:\n seller = response.xpath(\n '//div[@class=\"ratinglabel_text\"]/a/text() |'\n '//div[contains(@class, \"seller_popup_wrapper\")]/a/text()'\n ).extract()\n if seller:\n seller = seller[0].strip()\n product[\"marketplace\"] = [{\n \"name\": seller, \n \"price\": product[\"price\"]\n }]\n\n return product\n\n def _price_from_html(self, response, product):\n css = '.product-price-bol [itemprop=price]::attr(content)'\n cond_replace(product, 'price', response.css(css).extract())\n cond_set(\n product,\n 'price',\n response.xpath(\n \"//span[@class='offer_price']/meta[@itemprop='price']/@content\"\n ).extract())\n\n currency = response.css('[itemprop=priceCurrency]::attr(content)')\n currency = currency.extract()[0] if currency else 'EUR'\n price = product.get('price', '')\n price = price.replace(',', '.')\n if price and re.match(' *\\d+\\.?\\d* *\\Z', price):\n cond_replace_value(product, 'price', Price(currency, price))\n\n def _scrape_total_matches(self, response):\n totals = response.xpath(\n \"//h1[@itemprop='name']/span[@id='sab_header_results_size']/text()\"\n ).extract()\n if totals:\n total = totals[0].replace(\".\", \"\")\n try:\n total_matches = int(total)\n except ValueError:\n self.log(\n \"Failed to parse number of matches: %r\" % total, WARNING)\n total_matches = None\n elif \"Geen zoekresultaat\" in response.body_as_unicode():\n total_matches = 0\n else:\n total_matches = None\n\n return total_matches\n\n def _scrape_product_links(self, response):\n links = response.xpath(\n \"//div[contains(@class,'productlist_block')]\"\n \"/div[@class='product_details_thumb']\"\n \"/div/div/a[@class='product_name']/@href\").extract()\n if not links:\n self.log(\"Found no product links.\", WARNING)\n\n for no, link in enumerate(links):\n yield link, SiteProductItem()\n\n def _scrape_next_results_page_link(self, response):\n next_page_links = response.xpath(\n \"//div[contains(@class,'tst_searchresults_next')]/span/a/@href\")\n if next_page_links:\n return next_page_links.extract()[0]\n\n def parse_marketplace(self, response):\n product = response.meta[\"product\"]\n\n marketplaces = response.meta.get(\"marketplaces\", [])\n\n for seller in response.xpath(\n \"//tr[contains(@class, 'horizontal_row')]\"):\n price = is_empty(seller.xpath(\n \"td/p[contains(@class, 'price')]/text() |\" \\\n \"td/span[contains(@class, 'price')]/text()\"\n ).re(FLOATING_POINT_RGEX))\n if price:\n price = price.replace(\",\", \".\")\n\n name = is_empty(seller.xpath(\n \"td/div/a/img/@title |\" \\\n \"td/div[contains(@class, 'ratinglabel_text')]/a/text() |\" \\\n \"td/img/@title\"\n ).extract())\n\n if name and \"verkoper:\" in name: \n name = is_empty(re.findall(\"verkoper\\:\\s+(.*)\", name))\n if name:\n name = name.strip()\n\n marketplaces.append({\n \"price\": Price(price=price, priceCurrency=\"EUR\"),\n \"name\": name\n })\n\n next_link = is_empty(response.xpath(\"//div[contains(@class, 'left_button')]/a/@href\").extract())\n if next_link:\n meta = {\"product\": product, \"marketplaces\": marketplaces}\n return Request(\n url=next_link, \n callback=self.parse_marketplace, \n meta=meta\n )\n\n product[\"marketplace\"] = marketplaces\n\n return product\n","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/product_ranking/spiders/bol.py","file_name":"bol.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70126438916","text":"from process_distance import *\nfrom process_rects import *\n\n\nclass Screen():\n def __init__(self):\n self.x = 0\n self.y = 0\n self.w = 1366\n self.h = 768\n self.v_move = 20\n self.area_up = [0, 70]\n self.area_down = [698, 768]\n self.area_left = [0, 70]\n self.area_right = [1296, 1366]\n self.can_screen = False\n self.focus=False\n\n def focus_mainrole(self, mainrole):\n self.y = mainrole.pos_body[1] - 384\n\n def move_screen_1(self):\n if self.x < 1366:\n self.x += 10\n else:\n self.can_screen = True\n\n def move_screen_2(self):\n if self.x > 0:\n self.x -= 10\n else:\n self.can_screen = False\n\n def move_screen_3(self, main_role, mouse_y):\n if self.can_screen:\n if self.area_up[0] <= mouse_y <= self.area_up[1]:\n y_test = self.y - self.v_move\n if y_test >= main_role.pos_body[1] - 768:\n self.y = y_test\n\n if self.area_down[0] <= mouse_y <= self.area_down[1]:\n y_test = self.y + self.v_move\n if y_test <= main_role.pos_body[1] and y_test < 0:\n self.y = y_test\n\n def show_off(self, main_role, room, suf, runtime):\n room.show_room(suf, runtime, main_role)\n","repo_name":"HungKSeven/python_project_1","sub_path":"Project_1/source/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23570596721","text":"#stalls1\n#tidyNum\n\n\ndef printCase(i,result):\n\n print(\"Case #{}: {} {}\".format(i, result[0], result[1]))\n\n\n\n\ndef nextwee(stalls):\n maxGap=max(stalls)\n M=maxGap\n if M%2==0:\n maxLRS=int(M/2)\n minLRS=int((M-2)/2)\n else:\n maxLRS=int((M-1)/2)\n minLRS=int((M-1)/2)\n stalls.append(maxLRS)\n stalls.append(minLRS)\n stalls.remove(M)\n return(stalls)\n\ndef lastwee(stalls):\n maxGap=max(stalls)\n M=maxGap\n if M%2==0:\n maxLRS=int(M/2)\n minLRS=int((M-2)/2)\n else:\n maxLRS=int((M-1)/2)\n minLRS=int((M-1)/2)\n #stalls.append[maxLRS]\n #stalls.append[minLRS]\n #stalls.remove(M)\n return[maxLRS,minLRS]\n\n\n\n\n\n\n\n\n#==================\nt=int(input())\ncount=0\nresult=\"?\"\n\nfor i in range(1,t+1):#+1):\n n=[int(s) for s in input().split(\" \")] #,\n newnum=\"\"\n testF=n\n stallsize=testF[0]\n wees=testF[1]\n #print(wees)\n #print(n)\n #print(\"i\",i,t)\n # print(n[0], n[1])\n ###dostuff\n stalls=[stallsize]\n for k in range(wees-1):\n nextwee(stalls)\n result=lastwee(stalls)\n #print(\"result :\",result)\n printCase(i,result)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2526.py","file_name":"2526.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71254995073","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom subprocess import call\n\ndef print_usage():\n print(\"Usage: python pack.py assignment_number\")\n\nprefix = 'assignment-'\nzh_num = ['零', '一', '二', '三', '四', '五',\n '六', '七', '八', '九', '十']\n\nif len(sys.argv) > 1:\n try:\n n = int(sys.argv[1])\n except ValueError:\n print(\"Please input correct number!\")\n print_usage()\n exit()\n upload_dir = prefix + \"%02d\" % n\n try:\n os.chdir(upload_dir)\n except FileNotFoundError:\n print(\"Please input correct number!\")\n print_usage()\n exit()\n call(['make', 'clean'])\n os.chdir('../')\n call(['zip', '-r', 'upload/PB13011038 阴钰 第' + zh_num[n] +\n '次作业.zip', upload_dir])\nelse:\n print(\"Please input the assignment number!\")\n print_usage()\n exit()\n","repo_name":"yxonic/java-assignments","sub_path":"pack.py","file_name":"pack.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5698510976","text":"# Baekjoon online Judge - 1026번. 보물\n\nN = int(input())\ntemp_A = list(map(int, input().split())) # 임시 A\nB = list(map(int, input().split())) # 실제 B\nA = [0] * N\ntemp_B = list(B) # 임시 B를 통해 가장 큰값의 위치를 구하면서 A의 가장 작은 값을 해당 위치에 놓는다.\nfor i in range(N):\n B_max_idx = temp_B.index(max(temp_B)) # B의 가장 큰 값의 인덱스\n A[B_max_idx] = min(temp_A) # B값이 가장 큰 곳에 A의 가장 작은 값을 놓는다.\n temp_A[temp_A.index(min(temp_A))] = 101 # 이후 A의 작은 값을 계속 구하기 위해 각 원소의 가장 큰 값으로 보이는 101로 초기화\n temp_B[B_max_idx] = -1 # 임시 B에 대해서 최대 값을 구하기 위해 -1로 초기화(0이 존재할 경우 방지)\n\n# 즉, 그리디하게 고정된 B의 값에서 값이 큰 순서대로 A의 작은 값들로 채워넣어 곱한다.\nresult = 0\nfor i in range(N):\n result += (A[i] * B[i])\nprint(result)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_1026.py","file_name":"BOJ_1026.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34726171924","text":"#!/usr/bin/env python3\nfrom enemy import Enemy\nimport unittest\n\n\nclass Test_Enemy(unittest.TestCase):\n def setUp(self):\n self.henry = Enemy(health=70, mana=45, damage=20)\n\n def test_Enemy_Born(self):\n self.assertIsInstance(self.henry, Enemy)\n self.assertTrue(self.henry.mana == 45)\n self.assertEqual(self.henry.mana_regeneration, 0)\n self.assertEqual(self.henry.damage, 20)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"sevgo/PyCraft","sub_path":"enemy_test.py","file_name":"enemy_test.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7472270076","text":"import os\nimport random\nimport re\nimport subprocess\nimport sys\nimport unittest\nimport unittest.mock\n\nimport v8_commands\nimport v8_foozzie\nimport v8_fuzz_config\nimport v8_suppressions\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\nPYTHON3 = sys.version_info >= (3, 0)\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nFOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')\nTEST_DATA = os.path.join(BASE_DIR, 'testdata')\n\nKNOWN_BUILDS = [\n 'd8',\n 'clang_x86/d8',\n 'clang_x86_v8_arm/d8',\n 'clang_x64_v8_arm64/d8',\n 'clang_x64_pointer_compression/d8',\n]\n\n\nclass ConfigTest(unittest.TestCase):\n def testExperiments(self):\n \"\"\"Test integrity of probabilities and configs.\"\"\"\n CONFIGS = v8_foozzie.CONFIGS\n EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS\n FLAGS = v8_fuzz_config.ADDITIONAL_FLAGS\n # Probabilities add up to 100%.\n first_is_int = lambda x: type(x[0]) == int\n assert all(map(first_is_int, EXPERIMENTS))\n assert sum(x[0] for x in EXPERIMENTS) == 100\n # Configs used in experiments are defined.\n assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))\n assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))\n # The last config item points to a known build configuration.\n assert all(map(lambda x: x[3] in KNOWN_BUILDS, EXPERIMENTS))\n # All flags have a probability.\n first_is_float = lambda x: type(x[0]) == float\n assert all(map(first_is_float, FLAGS))\n first_between_0_and_1 = lambda x: x[0] > 0 and x[0] < 1\n assert all(map(first_between_0_and_1, FLAGS))\n # Test consistent flags.\n second_is_string = lambda x: isinstance(x[1], basestring)\n assert all(map(second_is_string, FLAGS))\n # We allow spaces to separate more flags. We don't allow spaces in the flag\n # value.\n is_flag = lambda x: x.startswith('--')\n all_parts_are_flags = lambda x: all(map(is_flag, x[1].split()))\n assert all(map(all_parts_are_flags, FLAGS))\n\n def testConfig(self):\n \"\"\"Smoke test how to choose experiments.\"\"\"\n config = v8_fuzz_config.Config('foo', random.Random(42))\n experiments = [\n [25, 'ignition', 'jitless', 'd8'],\n [75, 'ignition', 'ignition', 'clang_x86/d8'],\n ]\n flags = [\n [0.1, '--flag'],\n [0.3, '--baz'],\n [0.3, '--foo --bar'],\n ]\n self.assertEqual(\n [\n '--first-config=ignition',\n '--second-config=jitless',\n '--second-d8=d8',\n '--second-config-extra-flags=--baz',\n '--second-config-extra-flags=--foo',\n '--second-config-extra-flags=--bar',\n ],\n config.choose_foozzie_flags(experiments, flags),\n )\n self.assertEqual(\n [\n '--first-config=ignition',\n '--second-config=jitless',\n '--second-d8=d8',\n ],\n config.choose_foozzie_flags(experiments, flags),\n )\n\n\nclass UnitTest(unittest.TestCase):\n def testCluster(self):\n crash_test_example_path = 'CrashTests/path/to/file.js'\n self.assertEqual(\n v8_foozzie.ORIGINAL_SOURCE_DEFAULT,\n v8_foozzie.cluster_failures('', compact=False))\n self.assertEqual(\n v8_foozzie.ORIGINAL_SOURCE_CRASHTESTS,\n v8_foozzie.cluster_failures(crash_test_example_path, compact=False))\n self.assertEqual(\n '_o_O_',\n v8_foozzie.cluster_failures(\n crash_test_example_path,\n compact=False,\n known_failures={crash_test_example_path: '_o_O_'}))\n self.assertEqual(\n '980',\n v8_foozzie.cluster_failures('v8/test/mjsunit/apply.js', compact=False))\n self.assertEqual(\n '98',\n v8_foozzie.cluster_failures('v8/test/mjsunit/apply.js', compact=True))\n\n def testDiff(self):\n def diff_fun(one, two, skip=False):\n suppress = v8_suppressions.get_suppression(skip)\n return suppress.diff_lines(one.splitlines(), two.splitlines())\n\n one = ''\n two = ''\n diff = None, None\n self.assertEqual(diff, diff_fun(one, two))\n\n one = 'a \\n b\\nc();'\n two = 'a \\n b\\nc();'\n diff = None, None\n self.assertEqual(diff, diff_fun(one, two))\n\n # Ignore line before caret and caret position.\n one = \"\"\"\nundefined\nweird stuff\n ^\nsomefile.js: TypeError: suppressed message\n undefined\n\"\"\"\n two = \"\"\"\nundefined\nother weird stuff\n ^\nsomefile.js: TypeError: suppressed message\n undefined\n\"\"\"\n diff = None, None\n self.assertEqual(diff, diff_fun(one, two))\n\n one = \"\"\"\nStill equal\nExtra line\n\"\"\"\n two = \"\"\"\nStill equal\n\"\"\"\n diff = '- Extra line', None\n self.assertEqual(diff, diff_fun(one, two))\n\n one = \"\"\"\nStill equal\n\"\"\"\n two = \"\"\"\nStill equal\nExtra line\n\"\"\"\n diff = '+ Extra line', None\n self.assertEqual(diff, diff_fun(one, two))\n\n one = \"\"\"\nundefined\nsomefile.js: TypeError: undefined is not a constructor\n\"\"\"\n two = \"\"\"\nundefined\notherfile.js: TypeError: undefined is not a constructor\n\"\"\"\n diff = \"\"\"- somefile.js: TypeError: undefined is not a constructor\n+ otherfile.js: TypeError: undefined is not a constructor\"\"\", None\n self.assertEqual(diff, diff_fun(one, two))\n\n # Test that skipping suppressions works.\n one = \"\"\"\nv8-foozzie source: foo\nweird stuff\n ^\n\"\"\"\n two = \"\"\"\nv8-foozzie source: foo\nother weird stuff\n ^\n\"\"\"\n self.assertEqual((None, 'foo'), diff_fun(one, two))\n diff = ('- ^\\n+ ^', 'foo')\n self.assertEqual(diff, diff_fun(one, two, skip=True))\n\n def testOutputCapping(self):\n def output(stdout, is_crash):\n exit_code = -1 if is_crash else 0\n return v8_commands.Output(\n exit_code=exit_code, stdout_bytes=stdout.encode('utf-8'), pid=0)\n\n def check(stdout1, stdout2, is_crash1, is_crash2, capped_lines1,\n capped_lines2):\n output1 = output(stdout1, is_crash1)\n output2 = output(stdout2, is_crash2)\n self.assertEqual(\n (capped_lines1.encode('utf-8'), capped_lines2.encode('utf-8')),\n v8_suppressions.get_output_capped(output1, output2))\n\n # No capping, already equal.\n check('1\\n2', '1\\n2', True, True, '1\\n2', '1\\n2')\n # No crash, no capping.\n check('1\\n2', '1\\n2\\n3', False, False, '1\\n2', '1\\n2\\n3')\n check('1\\n2\\n3', '1\\n2', False, False, '1\\n2\\n3', '1\\n2')\n # Cap smallest if all runs crash.\n check('1\\n2', '1\\n2\\n3', True, True, '1\\n2', '1\\n2')\n check('1\\n2\\n3', '1\\n2', True, True, '1\\n2', '1\\n2')\n check('1\\n2', '1\\n23', True, True, '1\\n2', '1\\n2')\n check('1\\n23', '1\\n2', True, True, '1\\n2', '1\\n2')\n # Cap the non-crashy run.\n check('1\\n2\\n3', '1\\n2', False, True, '1\\n2', '1\\n2')\n check('1\\n2', '1\\n2\\n3', True, False, '1\\n2', '1\\n2')\n check('1\\n23', '1\\n2', False, True, '1\\n2', '1\\n2')\n check('1\\n2', '1\\n23', True, False, '1\\n2', '1\\n2')\n # The crashy run has more output.\n check('1\\n2\\n3', '1\\n2', True, False, '1\\n2\\n3', '1\\n2')\n check('1\\n2', '1\\n2\\n3', False, True, '1\\n2', '1\\n2\\n3')\n check('1\\n23', '1\\n2', True, False, '1\\n23', '1\\n2')\n check('1\\n2', '1\\n23', False, True, '1\\n2', '1\\n23')\n # Keep output difference when capping.\n check('1\\n2', '3\\n4\\n5', True, True, '1\\n2', '3\\n4')\n check('1\\n2\\n3', '4\\n5', True, True, '1\\n2', '4\\n5')\n check('12', '345', True, True, '12', '34')\n check('123', '45', True, True, '12', '45')\n\n @unittest.mock.patch('v8_foozzie.DISALLOWED_FLAGS', ['A'])\n @unittest.mock.patch('v8_foozzie.CONTRADICTORY_FLAGS',\n [('B', 'C'), ('B', 'D')])\n def testFilterFlags(self):\n def check(input_flags, expected):\n self.assertEqual(expected, v8_foozzie.filter_flags(input_flags))\n\n check([], [])\n check(['A'], [])\n check(['D', 'A'], ['D'])\n check(['A', 'D'], ['D'])\n check(['C', 'D'], ['C', 'D'])\n check(['E', 'C', 'D', 'F'], ['E', 'C', 'D', 'F'])\n check(['B', 'D'], ['D'])\n check(['D', 'B'], ['B'])\n check(['C', 'B', 'D'], ['C', 'D'])\n check(['E', 'C', 'A', 'F', 'B', 'G', 'D'], ['E', 'C', 'F', 'G', 'D'])\n\n\ndef cut_verbose_output(stdout, n_comp):\n # This removes the first lines containing d8 commands of `n_comp` comparison\n # runs.\n return '\\n'.join(stdout.split('\\n')[n_comp * 2:])\n\n\ndef run_foozzie(second_d8_dir, *extra_flags, **kwargs):\n second_config = 'ignition_turbo'\n if 'second_config' in kwargs:\n second_config = 'jitless'\n kwargs = {}\n if PYTHON3:\n kwargs['text'] = True\n return subprocess.check_output([\n sys.executable, FOOZZIE,\n '--random-seed', '12345',\n '--first-d8', os.path.join(TEST_DATA, 'baseline', 'd8.py'),\n '--second-d8', os.path.join(TEST_DATA, second_d8_dir, 'd8.py'),\n '--first-config', 'ignition',\n '--second-config', second_config,\n os.path.join(TEST_DATA, 'fuzz-123.js'),\n ] + list(extra_flags), **kwargs)\n\n\ndef expected_output(file_name):\n with open(os.path.join(TEST_DATA, file_name)) as f:\n return f.read()\n\n\nclass SystemTest(unittest.TestCase):\n \"\"\"This tests the whole correctness-fuzzing harness with fake build\n artifacts.\n\n Overview of fakes:\n baseline: Example foozzie output including a syntax error.\n build1: Difference to baseline is a stack trace difference expected to\n be suppressed.\n build2: Difference to baseline is a non-suppressed output difference\n causing the script to fail.\n build3: As build1 but with an architecture difference as well.\n \"\"\"\n def testSyntaxErrorDiffPass(self):\n stdout = run_foozzie('build1', '--skip-smoke-tests')\n self.assertEqual('# V8 correctness - pass\\n',\n cut_verbose_output(stdout, 3))\n # Default comparison includes suppressions.\n self.assertIn('v8_suppressions.js', stdout)\n # Default comparison doesn't include any specific mock files.\n self.assertNotIn('v8_mock_archs.js', stdout)\n self.assertNotIn('v8_mock_webassembly.js', stdout)\n\n def _testDifferentOutputFail(self, expected, *args):\n with self.assertRaises(subprocess.CalledProcessError) as ctx:\n run_foozzie('build2', '--skip-smoke-tests',\n '--first-config-extra-flags=--flag1',\n '--first-config-extra-flags=--flag2=0',\n '--second-config-extra-flags=--flag3', *args)\n e = ctx.exception\n self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)\n self.assertEqual(expected, cut_verbose_output(e.output, 2))\n\n def testDifferentOutputFail(self):\n self._testDifferentOutputFail(expected_output('failure_output.txt'))\n\n def testFailCompact(self):\n # Compact output drops the config line and uses a shorter hash.\n compact_output = expected_output('failure_output.txt')\n compact_output = re.sub(\n r'# V8 correctness configs: .*\\n', '', compact_output)\n compact_output = re.sub(\n r'sources: f60', 'sources: f6', compact_output)\n self._testDifferentOutputFail(compact_output, '--compact')\n\n def testSmokeTest(self):\n with self.assertRaises(subprocess.CalledProcessError) as ctx:\n run_foozzie('build2')\n e = ctx.exception\n self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)\n self.assertEqual(expected_output('smoke_test_output.txt'), e.output)\n\n def testDifferentArch(self):\n \"\"\"Test that the architecture-specific mocks are passed to both runs when\n we use executables with different architectures.\n \"\"\"\n # Build 3 simulates x86, while the baseline is x64.\n stdout = run_foozzie('build3', '--skip-smoke-tests')\n lines = stdout.split('\\n')\n # TODO(machenbach): Don't depend on the command-lines being printed in\n # particular lines.\n self.assertIn('v8_mock_archs.js', lines[1])\n self.assertIn('v8_mock_archs.js', lines[3])\n\n def testDifferentArchFailFirst(self):\n \"\"\"Test that we re-test against x64. This tests the path that also fails\n on x64 and then reports the error as x64.\n \"\"\"\n with open(os.path.join(TEST_DATA, 'failure_output_arch.txt')) as f:\n expected_output = f.read()\n # Build 3 simulates x86 and produces a difference on --bad-flag, but\n # the baseline build shows the same difference when --bad-flag is passed.\n with self.assertRaises(subprocess.CalledProcessError) as ctx:\n run_foozzie('build3', '--skip-smoke-tests',\n '--second-config-extra-flags=--bad-flag')\n e = ctx.exception\n self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)\n self.assertEqual(expected_output, cut_verbose_output(e.output, 3))\n\n def testDifferentArchFailSecond(self):\n \"\"\"As above, but we test the path that only fails in the second (ia32)\n run and not with x64 and then reports the error as ia32.\n \"\"\"\n with open(os.path.join(TEST_DATA, 'failure_output_second.txt')) as f:\n expected_output = f.read()\n # Build 3 simulates x86 and produces a difference on --very-bad-flag,\n # which the baseline build doesn't.\n with self.assertRaises(subprocess.CalledProcessError) as ctx:\n run_foozzie('build3', '--skip-smoke-tests',\n '--second-config-extra-flags=--very-bad-flag')\n e = ctx.exception\n self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)\n self.assertEqual(expected_output, cut_verbose_output(e.output, 3))\n\n def testJitless(self):\n \"\"\"Test that webassembly is mocked out when comparing with jitless.\"\"\"\n stdout = run_foozzie(\n 'build1', '--skip-smoke-tests', second_config='jitless')\n lines = stdout.split('\\n')\n # TODO(machenbach): Don't depend on the command-lines being printed in\n # particular lines.\n self.assertIn('v8_mock_webassembly.js', lines[1])\n self.assertIn('v8_mock_webassembly.js', lines[3])\n\n def testSkipSuppressions(self):\n \"\"\"Test that the suppressions file is not passed when skipping\n suppressions.\n \"\"\"\n # Compare baseline with baseline. This passes as there is no difference.\n stdout = run_foozzie(\n 'baseline', '--skip-smoke-tests', '--skip-suppressions')\n self.assertNotIn('v8_suppressions.js', stdout)\n\n # Compare with a build that usually suppresses a difference. Now we fail\n # since we skip suppressions.\n with self.assertRaises(subprocess.CalledProcessError) as ctx:\n run_foozzie(\n 'build1', '--skip-smoke-tests', '--skip-suppressions')\n e = ctx.exception\n self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)\n self.assertNotIn('v8_suppressions.js', e.output)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nodejs/node","sub_path":"deps/v8/tools/clusterfuzz/foozzie/v8_foozzie_test.py","file_name":"v8_foozzie_test.py","file_ext":"py","file_size_in_byte":14241,"program_lang":"python","lang":"en","doc_type":"code","stars":99492,"dataset":"github-code","pt":"61"} +{"seq_id":"20601390737","text":"#import the essential libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef calculateRMSE(y_actual, y_pred):\n sse = 0\n for i in range(len(y_pred)):\n error = y_pred[i] - y_actual[i]\n sse = sse + (error * error)\n rmse = np.sqrt(sse / len(y_pred))\n return rmse\n\n#SKLEARN LINEAR REGRESSION MODEL implementaion\ndef sk_model(X_train,y_train,X_test,y_test):\n from sklearn.linear_model import LinearRegression\n regressor = LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred_test = regressor.predict(X_test)\n y_pred_train = regressor.predict(X_train)\n rmse_test = calculateRMSE(y_test,y_pred_test)\n rmse_train = calculateRMSE(y_train,y_pred_train)\n print(\"training rmse\",rmse_train)\n print(\"testing rmse\",rmse_test)\n\n\ndef my_model_normal(X_train,y_train,X_test,y_test):\n from LinearRegression import ModelLinearRegression\n constant = np.ones((len(X_train), 1))\n X_train = np.hstack((constant, X_train))\n const = np.ones((len(X_test), 1))\n X_test = np.hstack((const, X_test))\n regressor = ModelLinearRegression()\n regressor.fit_normal(X_train, y_train)\n y_pred_test = regressor.predict(X_test)\n y_pred_train = regressor.predict(X_train)\n rmse_test = regressor.calculateRMSE(y_test,y_pred_test)\n rmse_train = regressor.calculateRMSE(y_train,y_pred_train)\n print(\"training rmse(normal)\",rmse_train)\n print(\"testing rmse(normal)\",rmse_test)\n return rmse_train,rmse_test\n\ndef main():\n #preprocess the dataset\n #to run anyother dataset just change the filename\n filename_train = 'sinData_Train.csv'\n filename_test = 'sinData_Validation.csv'\n trainingdataset = pd.read_csv(filename_train,header=None)\n X_train = trainingdataset.iloc[:,:-1].values\n y_train = trainingdataset.iloc[:,-1].values\n testingdataset = pd.read_csv(filename_test,header=None)\n X_test = testingdataset.iloc[:,:-1].values\n y_test = testingdataset.iloc[:,-1].values\n powers = range(1,16)\n rmse_train_all_normal = []\n rmse_test_all_normal = []\n x_train = X_train\n x_test = X_test\n for power in powers:\n print(\"Executing for power: \",power)\n if power != 1:\n add = X_train**power\n x_train = np.append(x_train,add,axis=1)\n add = X_test**power\n x_test = np.append(x_test, add, axis=1)\n #print(\"Executing sklearn model\")\n #sk_model(X_train,y_train,X_test,y_test)\n print(\"Executing my model\")\n rmse_train_normal, rmse_test_normal = my_model_normal(x_train, y_train, x_test, y_test)\n rmse_train_all_normal.append(rmse_train_normal)\n rmse_test_all_normal.append(rmse_test_normal)\n print(\"Average Test RMSE with normal equations\",np.mean(rmse_test_all_normal))\n print(\"Standard Deviation Test RMSE with normal equations\",np.std(rmse_test_all_normal))\n print(\"Average Train RMSE with normal equations\", np.mean(rmse_train_all_normal))\n print(\"Standard Deviation Train RMSE with normal equations\", np.std(rmse_train_all_normal))\n plt.plot(powers,rmse_train_all_normal,'b--',powers,rmse_test_all_normal,'r--')\n plt.title('RMSE ACROSS ALL POWERS WITH NORMAL EQUATIONS')\n plt.xlabel('POWERS')\n plt.ylabel('RMSE')\n plt.show()\n\n\nif __name__ == '__main__':\n main()","repo_name":"parthasj90/MachineLearning-Regression","sub_path":"SinDataPolynomialRegressionAnalysis.py","file_name":"SinDataPolynomialRegressionAnalysis.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2920876153","text":"import pygame\r\nfrom pygame import mixer\r\nimport math\r\nimport random\r\n\r\n\r\npygame.init()\r\npygame.font.init()\r\nFont = pygame.font.SysFont(\"freesansbold.ttf\", 64)\r\n\r\nscreen = pygame.display.set_mode((800, 600))\r\n\r\npygame.display.set_caption(\"A Bit Racey\")\r\nlogo = pygame.image.load(\"logo.png\")\r\npygame.display.set_icon(logo)\r\n\r\ncarImg = pygame.image.load(\"car.png\")\r\n\r\ndef car(x, y):\r\n screen.blit(carImg, (x, y))\r\n\r\n\r\ndef blue_enemy(x, y):\r\n blue_enemyImg = pygame.image.load(\"blue_enemy.png\")\r\n screen.blit(blue_enemyImg, (x, y))\r\n\r\n\r\ndef yellow_enemy(x, y):\r\n yellow_enemyImg = pygame.image.load(\"yellow_enemy.png\")\r\n screen.blit(yellow_enemyImg, (x, y))\r\n\r\n\r\nblue_enemyX = random.randint(0, 736)\r\nblue_enemyY = 0\r\nblue_enemyY_change = 2\r\n\r\nyellow_enemyX = random.randint(0, 736)\r\nyellow_enemyY = -250\r\nyellow_enemyY_change = 2\r\n\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\ngrey = (128, 128, 128)\r\nred = (255, 0, 0)\r\n\r\ncarX = 380\r\ncarY = 480\r\ncarX_change = 0\r\ncarY_change = 0\r\n\r\n\r\ndef bar1(x, y):\r\n barImg = pygame.image.load(\"road_bar.png\")\r\n screen.blit(barImg, (x, y))\r\n\r\n\r\ndef bar2(x, y):\r\n barImg = pygame.image.load(\"road_bar.png\")\r\n screen.blit(barImg, (x, y))\r\n\r\n\r\nbarX = 280\r\nbarY = 0\r\nbarY_change = 2\r\n\r\nbar2X = 280\r\nbar2Y = 400\r\nbar2Y_change = 2\r\n\r\ndodged = 0\r\n\r\n\r\ndef cars_dodged(count):\r\n dodged_font = pygame.font.Font('freesansbold.ttf', 25)\r\n text = dodged_font.render(\"Score: \" + str(count), True, black)\r\n screen.blit(text, (10, 10))\r\n\r\n\r\ndef crashed():\r\n crash_sound = mixer.Sound(\"crash.mp3\")\r\n crash_sound.play()\r\n\r\n crashed = True\r\n\r\n while crashed:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n pygame.init()\r\n pygame.font.init()\r\n crashed_font = pygame.font.Font('freesansbold.ttf', 64)\r\n crashed_text = crashed_font.render(\"YOU CRASHED!\", True, black)\r\n screen.blit(crashed_text, (180, 250))\r\n\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n pygame.draw.rect(screen, (0, 200, 0), (155, 400, 130, 50))\r\n pygame.draw.rect(screen, (255, 0, 0), (520, 400, 100, 50))\r\n\r\n button_font = pygame.font.Font('freesansbold.ttf', 20)\r\n start_text = button_font.render(\"Play Again\", True, (0, 0, 0))\r\n screen.blit(start_text, (170, 415))\r\n\r\n quit_text = button_font.render(\"QUIT\", True, (0, 0, 0))\r\n screen.blit(quit_text, (547, 415))\r\n\r\n cars_dodged(dodged)\r\n\r\n pygame.display.update()\r\n\r\n if 170 + 100 > mouse[0] > 170 and 400 + 50 > mouse[1] > 400:\r\n if click[0] == 1:\r\n mixer.music.load(\"bg_music.mp3\")\r\n mixer.music.play(-1)\r\n crashed = False\r\n\r\n if 520 + 100 > mouse[0] > 520 and 400 + 50 > mouse[1] > 400:\r\n if click[0] == 1:\r\n pygame.quit()\r\n exit(0)\r\n\r\n\r\ndef black_enemy(x, y):\r\n black_enemyImg = pygame.image.load(\"black_enemy.png\")\r\n screen.blit(black_enemyImg, (x, y))\r\n\r\n\r\nblack_enemyX = random.randint(50, 736)\r\nblack_enemyY = -700\r\nblack_enemyY_change = 2\r\nblack_enemyX_change = 2\r\n\r\n\r\ndef introduction():\r\n intro = True\r\n\r\n while intro:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n break\r\n\r\n screen.fill(grey)\r\n background = pygame.image.load(\"intro_bg.png\")\r\n screen.blit(background, (0, 0))\r\n\r\n mixer.music.load(\"bg_music.mp3\")\r\n mixer.music.play(-1)\r\n\r\n # pygame.font.init()\r\n # Font = pygame.font.SysFont(\"freesansbold.ttf\", 64)\r\n intro_font = pygame.font.Font('freesansbold.ttf', 64)\r\n controls_font = pygame.font.Font('freesansbold.ttf', 20)\r\n intro_text = intro_font.render(\"A BIT RACEY\", True, white)\r\n control1 = controls_font.render(\"W- Move Forward\", True, white)\r\n control2 = controls_font.render(\"A- Move Left\", True, white)\r\n control3 = controls_font.render(\"S- Move Backwards\", True, white)\r\n control4 = controls_font.render(\"D- Move Right\", True, white)\r\n alert = controls_font.render(\"Enemy Speed Will Increase Every 50 Points!\", True, red)\r\n screen.blit(intro_text, (200, 250))\r\n screen.blit(control1, (20, 20))\r\n screen.blit(control2, (20, 45))\r\n screen.blit(control3, (20, 70))\r\n screen.blit(control4, (20, 95))\r\n screen.blit(alert, (20, 120))\r\n\r\n pygame.draw.rect(screen, (0, 200, 0), (170, 400, 100, 50))\r\n pygame.draw.rect(screen, (255, 0, 0), (520, 400, 100, 50))\r\n\r\n button_font = pygame.font.Font('freesansbold.ttf', 20)\r\n start_text = button_font.render(\"START\", True, (0, 0, 0))\r\n screen.blit(start_text, (189, 415))\r\n\r\n quit_text = button_font.render(\"QUIT\", True, (0, 0, 0))\r\n screen.blit(quit_text, (542, 415))\r\n\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n if 170 + 100 > mouse[0] > 170 and 400 + 50 > mouse[1] > 400:\r\n if click[0] == 1:\r\n intro = False\r\n\r\n pygame.display.update()\r\n\r\n if 520 + 100 > mouse[0] > 520 and 400 + 50 > mouse[1] > 400:\r\n if click[0] == 1:\r\n pygame.quit()\r\n exit(0)\r\n\r\n\r\ndef isCollision(enemyX, enemyY, carX, carY):\r\n distance = math.sqrt(math.pow(enemyX - carX, 2) + (math.pow(enemyY - carY, 2)))\r\n\r\n if distance < 56:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef round_off(x, base=50):\r\n return int(base * round(float(x) / base))\r\n\r\n\r\nintroduction()\r\nrunning = True\r\nwhile running:\r\n # print(pygame.font.get_fonts())\r\n # print(blue_enemyY_change)\r\n screen.fill(grey)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_a:\r\n carX_change -= 1.5\r\n if event.key == pygame.K_d:\r\n carX_change += 1.5\r\n if event.key == pygame.K_w:\r\n carY_change -= 1.5\r\n if event.key == pygame.K_s:\r\n carY_change += 1.5\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_a or event.key == pygame.K_d:\r\n carX_change = 0\r\n if event.key == pygame.K_w or event.key == pygame.K_s:\r\n carY_change = 0\r\n\r\n if barY >= 790:\r\n barY = 0\r\n\r\n if bar2Y >= 790:\r\n bar2Y = 0\r\n\r\n bar1(barX, barY)\r\n barY += barY_change\r\n\r\n bar2(bar2X, bar2Y)\r\n bar2Y += bar2Y_change\r\n\r\n car(carX, carY)\r\n carX += carX_change\r\n carY += carY_change\r\n\r\n blue_enemy(blue_enemyX, blue_enemyY)\r\n blue_enemyY += blue_enemyY_change\r\n\r\n if blue_enemyY >= 600:\r\n blue_enemyY = 0\r\n blue_enemyX = random.randint(0, 736)\r\n dodged += 1\r\n\r\n yellow_enemy(yellow_enemyX, yellow_enemyY)\r\n yellow_enemyY += yellow_enemyY_change\r\n\r\n if yellow_enemyY >= 600:\r\n yellow_enemyY = 0\r\n yellow_enemyX = random.randint(0, 736)\r\n dodged += 1\r\n\r\n black_enemy(black_enemyX, black_enemyY)\r\n black_enemyX += black_enemyX_change\r\n black_enemyY += black_enemyY_change\r\n\r\n if black_enemyY >= 600:\r\n black_enemyY = 0\r\n black_enemyX = random.randint(0, 736)\r\n dodged += 5\r\n\r\n if round_off(dodged) % 50 == 0 and dodged > 49 and dodged % 50 == 0:\r\n blue_enemyY_change += 0.5\r\n yellow_enemyY_change += 0.5\r\n black_enemyY_change += 0.5\r\n black_enemyX_change += 0.5\r\n barY_change += 0.3\r\n bar2Y_change += 0.3\r\n\r\n\r\n if black_enemyX >= 736:\r\n black_enemyX_change = -2\r\n\r\n if black_enemyX <= 0:\r\n black_enemyX_change = 2\r\n\r\n if carX >= 736:\r\n carX = 736\r\n mixer.music.stop()\r\n crashed()\r\n blue_enemyY_change = 0\r\n yellow_enemyY_change = 0\r\n barY_change = 0\r\n bar2Y_change = 0\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n if carX <= 0:\r\n carX = 0\r\n mixer.music.stop()\r\n crashed()\r\n blue_enemyY_change = 0\r\n yellow_enemyY_change = 0\r\n barY_change = 0\r\n bar2Y_change = 0\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n if carY >= 536:\r\n carY = 536\r\n mixer.music.stop()\r\n crashed()\r\n blue_enemyY_change = 0\r\n yellow_enemyY_change = 0\r\n barY_change = 0\r\n bar2Y_change = 0\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n if carY <= 0:\r\n carY = 0\r\n mixer.music.stop()\r\n crashed()\r\n blue_enemyY_change = 0\r\n yellow_enemyY_change = 0\r\n barY_change = 0\r\n bar2Y_change = 0\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n if isCollision(blue_enemyX, blue_enemyY, carX, carY):\r\n mixer.music.stop()\r\n crashed()\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n if isCollision(yellow_enemyX, yellow_enemyY, carX, carY):\r\n mixer.music.stop()\r\n crashed()\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n if isCollision(black_enemyX, black_enemyY, carX, carY):\r\n mixer.music.stop()\r\n crashed()\r\n dodged = 0\r\n\r\n carX = 380\r\n carY = 480\r\n carX_change = 0\r\n carY_change = 0\r\n\r\n barX = 280\r\n barY = 0\r\n barY_change = 2\r\n\r\n bar2X = 280\r\n bar2Y = 400\r\n bar2Y_change = 2\r\n\r\n blue_enemyX = random.randint(0, 736)\r\n blue_enemyY = 0\r\n blue_enemyY_change = 2\r\n\r\n yellow_enemyX = random.randint(0, 736)\r\n yellow_enemyY = -250\r\n yellow_enemyY_change = 2\r\n\r\n black_enemyX = random.randint(50, 736)\r\n black_enemyY = -700\r\n black_enemyY_change = 2\r\n black_enemyX_change = 2\r\n\r\n cars_dodged(dodged)\r\n\r\n pygame.display.update()\r\n","repo_name":"agastya08/roadies","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22017613863","text":"import requests\r\nimport json\r\nfrom libovsdb import libovsdb\r\nimport re\r\nryu_ip = 'http://127.0.0.1:8081'\r\novn_nb = 'tcp:192.168.122.230:6641'\r\novn_sb = 'tcp:192.168.122.230:6642'\r\n\r\nswitch_all_url = ryu_ip +'/simpleswitch/allswitch/'\r\nswitch_url = ryu_ip +'/simpleswitch/switch/'\r\nrequest_url = ryu_ip + '/simpleswitch/request/'\r\npath_find_url = ryu_ip + '/simpleswitch/pathfind/'\r\n\r\nhost_url = ryu_ip +'/simpleswitch/host/'\r\n\r\n# DEFAULT_OVN_BW = \"1,000,000,000\"\r\n# DEFAULT_OVN_BW = DEFAULT_OVN_BW.replace(\",\", \"\")\r\n# DEFAULT_OVN_BW = int(DEFAULT_OVN_BW)\r\n# HFSC take almost 0.95 of link BW\r\nDEFAULT_OVN_BW = 1000000000\r\nDEFAULT_OVN_BW = 100000000\r\n\r\n# DEFAULT_OVN_BW = int(1000000000/0.97)\r\nQUEUE_TYPE = 'linux-hfsc'\r\n\r\ndef get_virtual_topo():\r\n lswitch = {}\r\n encaps = []\r\n chassises = []\r\n sb = libovsdb.OVSDBConnection(ovn_sb, \"OVN_Southbound\")\r\n tx_sb = sb.transact()\r\n\r\n # Get logical switch uuid and vni from sb\r\n res = tx_sb.row_select(table = \"Datapath_Binding\",\r\n columns = [\"_uuid\",\"tunnel_key\"],\r\n where = [])\r\n res = tx_sb.row_select(table = \"Chassis\",\r\n columns = [\"_uuid\",\"encaps\"],\r\n where = [])\r\n res = tx_sb.row_select(table = \"Encap\",\r\n columns = [\"_uuid\",\"ip\"],\r\n where = [])\r\n try:\r\n response = tx_sb.commit()\r\n lss = response['result'][0]['rows']\r\n\r\n chassises = response['result'][1]['rows']\r\n for chassis in chassises:\r\n chassis['_uuid'] = chassis['_uuid'][1]\r\n chassis['encaps'] = chassis['encaps'][1]\r\n\r\n encaps = response['result'][2]['rows']\r\n for encap in encaps:\r\n encap['_uuid'] = encap['_uuid'][1]\r\n except Exception as msg:\r\n raise ValueError(msg)\r\n else:\r\n for ls in lss:\r\n attr = {}\r\n attr['vni'] = ls.get('tunnel_key')\r\n attr['ports'] = []\r\n uuid = ls.get('_uuid')[1]\r\n\r\n response = tx_sb.row_select(table = \"Port_Binding\",\r\n columns = ['mac','tunnel_key','chassis','logical_port'],\r\n where = [[\"datapath\", \"==\", [\"uuid\",uuid]]])\r\n # try :\r\n res = tx_sb.commit()\r\n lps = res['result'][0]['rows']\r\n # print(lps)\r\n lports = []\r\n for lp in lps:\r\n temp = {'inner_ip': '', 'outter_ip': '', 'tunnel_key': '','logical_port': ''}\r\n for chassis in chassises:\r\n if chassis['_uuid'] != lp.get('chassis')[1]:\r\n continue\r\n for encap in encaps:\r\n if encap['_uuid'] == chassis['encaps']:\r\n temp['outter_ip'] = encap['ip']\r\n\r\n temp['inner_ip'] = re.findall( r'[0-9]+(?:\\.[0-9]+){3}', lp.get('mac'))\r\n temp['tunnel_key'] = int(lp.get('tunnel_key'))\r\n temp['logical_port'] = (lp.get('logical_port'))\r\n\r\n lports.append(temp)\r\n # except Exception as msg:\r\n # raise ValueError(msg)\r\n else:\r\n attr['ports'] = lports\r\n lswitch[ls.get('_uuid')[1]] = attr \r\n result = list(lswitch.values())\r\n return result\r\n\r\ndef get_switch_all():\r\n url = switch_all_url\r\n x = requests.get(url)\r\n return x\r\n \r\n\r\ndef get_switch(switch_id):\r\n url = switch_url + str(switch_id)\r\n x = requests.get(url)\r\n return x\r\n\r\n\r\ndef get_host():\r\n url = host_url \r\n x = requests.get(url)\r\n return x\r\n\r\ndef get_bw_ovn_all(virtual_topo):\r\n db = libovsdb.OVSDBConnection(ovn_nb, \"OVN_Northbound\")\r\n vm_ip_dict = {}\r\n for ls in virtual_topo:\r\n vni = ls[\"vni\"]\r\n lsps = ls[\"ports\"]\r\n for lsp in lsps:\r\n if not lsp[\"outter_ip\"]:\r\n continue\r\n vm_ip_dict[lsp[\"outter_ip\"]]=(lsp[\"inner_ip\"][0],lsp['logical_port'],vni)\r\n # vm_ip_dict[lsp[\"inner_ip\"][0]]=(lsp[\"outter_ip\"],lsp['logical_port'],vni)\r\n\r\n # print(vm_ip_dict)\r\n bw_min_list = {}\r\n for ip in vm_ip_dict.keys():\r\n port = vm_ip_dict[ip][1]\r\n res = db.select(table = \"Logical_Switch_Port\",\r\n columns = [\"_uuid\", \"name\"],\r\n where = [[\"name\", \"==\", port]])\r\n # print(json.dumps(res,indent=4))\r\n \r\n # for re in res:\r\n # print(re)\r\n # print(json.dumps(re,indent=4))\r\n # if not re[\"queue_rules\"]:\r\n # continue\r\n bw_min_sum = 0\r\n try:\r\n for queue_rule in res[0][\"queue_rules\"]:\r\n if type(queue_rule) == list:\r\n queue_rule = queue_rule[1]\r\n re = db.select(table = \"Queue\",\r\n # columns = [\"_uuid\", \"bandwidth_min\"],\r\n # where = [])\r\n where = [[\"_uuid\", \"==\", [\"uuid\",queue_rule]]])\r\n # print(re)\r\n if not re:\r\n continue\r\n for r in re:\r\n if not r[\"bandwidth_min\"]:\r\n continue\r\n bw_min_sum += int(r[\"bandwidth_min\"][0][1])\r\n except:\r\n pass\r\n min_rate = DEFAULT_OVN_BW - bw_min_sum\r\n min_rate = f\"{min_rate:,}\"\r\n bw_min_list[ip]=min_rate\r\n print(bw_min_list)\r\n return bw_min_list\r\n \r\n\r\n\r\n\r\ndef put_demand(path,src_ip,dst_ip,vni,max_rate,min_rate,mod):\r\n max_rate = max_rate.replace(\",\", \"\")\r\n min_rate = min_rate.replace(\",\", \"\")\r\n demand = {}\r\n request = {}\r\n if path == \"None\":\r\n path = None\r\n demand['path'] = path\r\n demand['src_ip'] = src_ip\r\n demand['dst_ip'] = dst_ip\r\n # print(mod)\r\n\r\n if vni == 'None':\r\n vni = None\r\n\r\n if mod == \"None\":\r\n mod = None\r\n \r\n demand['mod'] = mod\r\n demand['vni'] = vni\r\n \r\n if max_rate:\r\n request['max-rate'] = max_rate\r\n if min_rate:\r\n request['min-rate'] = min_rate\r\n # demand['vm_traffic'] = False\r\n demand['request'] = request\r\n demand['vm_traffic'] = False\r\n\r\n print(demand)\r\n \r\n x = requests.put(request_url, json = demand)\r\n # print(x.request.url)\r\n \r\n \r\n return x\r\n\r\ndef put_demand_vm(src_ip,dst_ip,max_rate,min_rate,mod,virtual_topo,vni):\r\n # Remove input format (X,XXX,XXX)\r\n max_rate = (max_rate.replace(\",\", \"\"))\r\n min_rate = (min_rate.replace(\",\", \"\"))\r\n demand = {}\r\n request = {}\r\n # print(mod)\r\n \r\n if vni == \"None\":\r\n vni = None\r\n if mod == \"None\":\r\n mod = None\r\n demand['mod'] = mod\r\n demand['vni'] = vni\r\n if vni == None:\r\n return \"missing vni\",False\r\n vni = int(vni)\r\n if max_rate:\r\n request['max-rate'] = max_rate\r\n if min_rate:\r\n request['min-rate'] = min_rate\r\n \r\n demand['request'] = request\r\n # print(demand)\r\n \r\n\r\n max_rate = int(max_rate or 0)\r\n min_rate = int(min_rate or 1)\r\n\r\n \r\n\r\n if max_rate <= min_rate and max_rate and min_rate:\r\n return \"Max rate <= Min rate\",False\r\n # Check in virtual topo where VM belong \r\n vm_ip_dict = {}\r\n for ls in virtual_topo:\r\n vni_loop = ls[\"vni\"]\r\n lsps = ls[\"ports\"]\r\n for lsp in lsps:\r\n if not lsp[\"outter_ip\"]:\r\n continue\r\n vm_ip_dict[(lsp[\"inner_ip\"][0],vni_loop)]=(lsp[\"outter_ip\"],lsp['logical_port'])\r\n \r\n if (src_ip,vni) not in vm_ip_dict or (dst_ip,vni) not in vm_ip_dict:\r\n resp = \"%s \\n%s\" %((src_ip,vni),vm_ip_dict)\r\n return \"Cant find VM IP in Logical Topology\"+resp, False\r\n bw_ovn_resv = get_bw_ovn_all(virtual_topo)\r\n if vm_ip_dict[(src_ip,vni)][0] == vm_ip_dict[(dst_ip,vni)][0]:\r\n # Internal VM traffic (check outerport)\r\n resp = \"VM to VM traffic: \"\r\n resv_min = int(bw_ovn_resv[vm_ip_dict[(src_ip,vni)][0]].replace(\",\", \"\"))\r\n\r\n if min_rate > resv_min:\r\n min_rate = f'{min_rate:,}'\r\n resp += 'Minrate: %s > Resv min rate: %s in HV %s' %\\\r\n (min_rate,bw_ovn_resv[vm_ip_dict[(src_ip,vni)][0]],vm_ip_dict[(src_ip,vni)][0])\r\n return resp, False\r\n resp1,cond = handle_ovn_internal(vm_ip_dict[(src_ip,vni)][1],vm_ip_dict[(dst_ip,vni)][1],min_rate,max_rate,mod)\r\n resp += resp1\r\n return resp,cond\r\n \r\n \r\n cond = True\r\n cond1 = True\r\n # HV to HV traffic\r\n resp = \"HV to HV traffic: \\nSDN: \"\r\n \r\n # demand['vm_traffic'] = (src_ip,dst_ip)\r\n demand['src_ip'] = vm_ip_dict[(src_ip,vni)][0]\r\n demand['dst_ip'] = vm_ip_dict[(dst_ip,vni)][0]\r\n # demand['vni'] = vm_ip_dict[dst_ip][2]\r\n demand['vm_src'] = src_ip\r\n demand['vm_dst'] = dst_ip\r\n demand['vm_traffic'] = True\r\n\r\n x = requests.put(request_url, json = demand)\r\n if x.status_code != 200: \r\n cond = False\r\n resp += x.text\r\n return resp,cond\r\n resv_min = int(bw_ovn_resv[vm_ip_dict[(src_ip,vni)][0]].replace(\",\", \"\"))\r\n if min_rate > resv_min:\r\n min_rate = f'{min_rate:,}'\r\n resp += 'Minrate: %s > Resv min rate: %s in HV %s' %\\\r\n cond == False\r\n (min_rate,bw_ovn_resv[vm_ip_dict[(dst_ip,vni)][0]],vm_ip_dict[(dst_ip,vni)][0])\r\n resp1,cond1 = handle_ovn_external(vm_ip_dict[(src_ip,vni)][0],vm_ip_dict[(src_ip,vni)][1],vm_ip_dict[(dst_ip,vni)][1],min_rate,max_rate,mod)\r\n # resp1,cond1 = handle_ovn_internal(vm_ip_dict[src_ip][1],vm_ip_dict[dst_ip][1],min_rate,max_rate,mod)\r\n \r\n resp += resp1\r\n if cond == False or cond1 == False:\r\n return resp, cond\r\n resp += \"\\nOVN: \"\r\n \r\n resp2 = x.text\r\n \r\n # print(x.request.url)\r\n # resp1 = (json.loads(resp1))\r\n resp = resp + resp2\r\n \r\n return resp,cond\r\n\r\ndef get_bw_rev_format(virutal_topo):\r\n bw_resv = get_bw_ovn_all(virutal_topo)\r\n hv_ip = []\r\n for k in bw_resv.keys():\r\n hv_ip.append(k)\r\n if hv_ip:\r\n bw_resv['outer_ip'] = hv_ip\r\n return bw_resv\r\n\r\n\r\n\r\ndef mod_request(port,parent_rate,queue_list,src_ip):\r\n ovsdb_server = \"tcp:\"+ src_ip + \":6640\"\r\n db = libovsdb.OVSDBConnection(ovsdb_server, \"Open_vSwitch\")\r\n get_port = db.select(table = \"Port\",\r\n columns = ['_uuid',\"qos\"],\r\n where = [[\"name\", \"==\", port]])\r\n port_qos = get_port[0]['qos']\r\n # print(port_qos)\r\n # print(\"select qos result: %s\" %(json.dumps(get_port, indent=4)))\r\n if not port_qos:\r\n parent_rate = ['map',[[\"max-rate\",parent_rate]]]\r\n qos = db.insert(table = \"QoS\",\r\n row = {\"other_config\":parent_rate,\"type\":QUEUE_TYPE},\r\n refer = [\"Port\", \"qos\", [\"name\", \"==\", port]])\r\n # print(\"QOS: \",qos)\r\n get_port = db.select(table = \"Port\",\r\n columns = ['_uuid',\"qos\"],\r\n where = [[\"name\", \"==\", port]])\r\n port_qos = get_port[0]['qos']\r\n \r\n list_queue = install_queue_port(db,queue_list,port_qos)\r\n return list_queue\r\n get_queue = db.select(table = \"QoS\",\r\n columns = ['_uuid',\"queues\"],\r\n where = [[\"_uuid\", \"==\", [\"uuid\",port_qos]]])\r\n # print(get_queue[0]['queues'])\r\n # print(\"select qos result: %s\" %(json.dumps(get_queue, indent=4)))\r\n\r\n if not get_queue[0]['queues']:\r\n list_queue = install_queue_port(db,queue_list,port_qos)\r\n return list_queue\r\n list_queue = get_queue[0]['queues'].copy()\r\n len_queue = len(list_queue)\r\n new_queue = []\r\n for key in range(len(queue_list)):\r\n max_rate = queue_list[key]['max-rate']\r\n min_rate = queue_list[key]['min-rate']\r\n burst = queue_list[key]['burst']\r\n\r\n config = ['map',[[\"max-rate\",max_rate],[\"min-rate\",min_rate],[\"burst\",burst]]]\r\n res = db.insert(table = \"Queue\",\r\n row = {\"other_config\":config,\"external_ids\":['map', [[\"queue_id\",str(len_queue+key+1)]]]},)\r\n queue_uuid = res['uuid'][0]\r\n resp = {\"max-rate\":max_rate,\"min-rate\":min_rate,\"burst\":burst,\"queue_id\":len_queue+key+1} \r\n new_queue.append(resp)\r\n\r\n # print(queue_uuid)\r\n \r\n list_queue.append([len_queue+key+1,['uuid',queue_uuid]])\r\n # print(list_queue)\r\n res_qos = db.update(table = \"QoS\",\r\n row = {\"queues\":['map', list_queue]},\r\n where = [[\"_uuid\", \"==\", [\"uuid\",port_qos]]])\r\n return new_queue\r\n # tx.commit()\r\n # print(res_qos)\r\n\r\n\r\n\r\ndef install_queue_port(db,queue_list,port_qos):\r\n list_queue = []\r\n new_queue = []\r\n for key in range(len(queue_list)):\r\n max_rate = queue_list[key]['max-rate']\r\n min_rate = queue_list[key]['min-rate']\r\n burst = queue_list[key]['burst']\r\n\r\n config = ['map',[[\"max-rate\",max_rate],[\"min-rate\",min_rate],[\"burst\",burst]]]\r\n res = db.insert(table = \"Queue\",\r\n row = {\"other_config\":config,\"external_ids\":['map', [[\"queue_id\",str(key+1)]]]})\r\n # print(res)\r\n resp = {\"max-rate\":max_rate,\"min-rate\":min_rate,\"burst\":burst,\"queue_id\":key+1} \r\n new_queue.append(resp)\r\n\r\n queue_uuid = res['uuid'][0]\r\n # print(queue_uuid)\r\n \r\n list_queue.append([key+1,['uuid',queue_uuid]])\r\n # print(list_queue)\r\n res_qos = db.update(table = \"QoS\",\r\n row = {\"queues\":['map', list_queue]},\r\n where = [[\"_uuid\", \"==\", [\"uuid\",port_qos]]])\r\n return new_queue\r\n # tx.commit()\r\n # print(res_qos)\r\n\r\ndef handle_ovn_internal(src_lp_name,dst_lp_name,min_rate,max_rate,mod):\r\n db = libovsdb.OVSDBConnection(ovn_nb, \"OVN_Northbound\")\r\n if not max_rate:\r\n max_rate = DEFAULT_OVN_BW\r\n\r\n res = db.select(table = \"Logical_Switch_Port\",\r\n columns = [\"_uuid\", \"name\"],\r\n where = [[\"name\", \"==\", dst_lp_name]])\r\n queue_rule_list = res[0][\"queue_rules\"]\r\n # print(\"queue:\",queue_rule_list)\r\n\r\n # print(json.dumps(queue_rule_list,indent=4))\r\n match = 'inport==\"%s\"' % src_lp_name\r\n # match = 'ip'\r\n \r\n if not queue_rule_list:\r\n # New queue\r\n # match = 'tcp'\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"to-lport\",\"priority\":200,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n print(res)\r\n return \"Success install new queue\",True\r\n elif type(queue_rule_list) == list:\r\n for rule in queue_rule_list:\r\n print(\"FIND1 %s\"%rule[1])\r\n get_source_lp = db.select(table = \"Queue\",\r\n # columns = ['_uuid',\"_uuid\"],\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule[1]]]])\r\n print(json.dumps(get_source_lp,indent=4))\r\n if get_source_lp[0]['match'] != match:\r\n continue\r\n if not mod:\r\n print(\"DUP\")\r\n return \"Duplicate Request\", False\r\n # Modify exist queue\r\n res = db.update(table = \"Queue\",\r\n row = {\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule[1]]]])\r\n print(res)\r\n return \"Success modify queue\",True\r\n # New queue\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"to-lport\",\"priority\":200,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n # print(res)\r\n return \"Success install new queue\",True\r\n elif type(queue_rule_list) == str:\r\n rule = queue_rule_list\r\n print(\"FIND2 %s\"%rule)\r\n get_source_lp = db.select(table = \"Queue\",\r\n # columns = ['_uuid',\"_uuid\"],\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule]]])\r\n print(json.dumps(get_source_lp,indent=4))\r\n if (get_source_lp[0]['match'] != match):\r\n # New Queue\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"to-lport\",\"priority\":200,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n print(res)\r\n \r\n return \"Success install new queue\",True\r\n if not mod:\r\n # print(\"DUP\")\r\n return \"Duplicate Request\", False\r\n # Modify exist queue\r\n res = db.update(table = \"Queue\",\r\n row = {\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule]]])\r\n print(res)\r\n return \"Success modify queue\",True\r\n\r\n\r\ndef handle_ovn_external(src_ip,src_lp_name,dst_lp_name,min_rate,max_rate,mod):\r\n db = libovsdb.OVSDBConnection(ovn_nb, \"OVN_Northbound\")\r\n if not max_rate:\r\n max_rate = DEFAULT_OVN_BW\r\n\r\n res = db.select(table = \"Logical_Switch_Port\",\r\n columns = [\"_uuid\", \"name\"],\r\n where = [[\"name\", \"==\", dst_lp_name]])\r\n queue_rule_list = res[0][\"queue_rules\"]\r\n # print(\"queue:\",queue_rule_list)\r\n\r\n # print(json.dumps(queue_rule_list,indent=4))\r\n match = 'inport==\"%s\"' % src_lp_name\r\n # match = 'ip'\r\n \r\n if not queue_rule_list:\r\n # New queue\r\n # Default Tunnel port name of HV\r\n port_tun = \"ovn-cca09a-0\"\r\n queue_new = [{\"max-rate\":str(max_rate),\"min-rate\":str(min_rate),\"burst\":\"0\"}]\r\n queue_ret = mod_request(port_tun,str(DEFAULT_OVN_BW),queue_new,src_ip)\r\n # match = 'tcp'\r\n queue_id = queue_ret[0][\"queue_id\"]\r\n\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"to-lport\",\"priority\":200,\"id_queue\":queue_id,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"from-lport\",\"priority\":200,\"id_queue\":queue_id,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n print(res)\r\n return \"Success install new queue\",True\r\n elif type(queue_rule_list) == list:\r\n for rule in queue_rule_list:\r\n print(\"FIND1 %s\"%rule[1])\r\n get_source_lp = db.select(table = \"Queue\",\r\n # columns = ['_uuid',\"_uuid\"],\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule[1]]]])\r\n print(json.dumps(get_source_lp,indent=4))\r\n if get_source_lp[0]['match'] != match:\r\n continue\r\n if not mod:\r\n print(\"DUP\")\r\n return \"Duplicate Request\", False\r\n # Modify exist queue\r\n res = db.update(table = \"Queue\",\r\n row = {\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule[1]]]])\r\n print(res)\r\n return \"Success modify queue\",True\r\n # New queue\r\n # Default Tunnel port name of HV\r\n port_tun = \"ovn-cca09a-0\"\r\n # port_tun = \"br-int\"\r\n\r\n queue_new = [{\"max-rate\":str(max_rate),\"min-rate\":str(min_rate),\"burst\":\"0\"}]\r\n queue_ret = mod_request(port_tun,str(DEFAULT_OVN_BW),queue_new,src_ip)\r\n # match = 'tcp'\r\n queue_id = queue_ret[0][\"queue_id\"]\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"to-lport\",\"priority\":200,\"id_queue\":queue_id,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"from-lport\",\"priority\":200,\"id_queue\":queue_id,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n print(res)\r\n return \"Success install new queue\",True\r\n elif type(queue_rule_list) == str:\r\n rule = queue_rule_list\r\n print(\"FIND2 %s\"%rule)\r\n get_source_lp = db.select(table = \"Queue\",\r\n # columns = ['_uuid',\"_uuid\"],\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule]]])\r\n print(json.dumps(get_source_lp,indent=4))\r\n if (get_source_lp[0]['match'] != match):\r\n # New Queue\r\n # Default Tunnel port name of HV\r\n port_tun = \"ovn-cca09a-0\"\r\n queue_new = [{\"max-rate\":str(max_rate),\"min-rate\":str(min_rate),\"burst\":\"0\"}]\r\n queue_ret = mod_request(port_tun,str(DEFAULT_OVN_BW),queue_new,src_ip)\r\n # match = 'tcp'\r\n queue_id = queue_ret[0][\"queue_id\"]\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"to-lport\",\"priority\":200,\"id_queue\":queue_id,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n res = db.insert(table = \"Queue\",\r\n row = {\"direction\":\"from-lport\",\"priority\":200,\"id_queue\":queue_id,\r\n \"match\":match,\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n refer = [\"Logical_Switch_Port\", \"queue_rules\", [\"name\", \"==\", dst_lp_name]])\r\n print(res)\r\n \r\n return \"Success install new queue\",True\r\n if not mod:\r\n # print(\"DUP\")\r\n return \"Duplicate Request\", False\r\n # Modify exist queue\r\n res = db.update(table = \"Queue\",\r\n row = {\"bandwidth_max\":['map',[[\"rate\",max_rate]]],\"bandwidth_min\":['map',[[\"min\",min_rate]]]},\r\n where = [[\"_uuid\", \"==\", [\"uuid\",rule]]])\r\n print(res)\r\n return \"Success modify queue\",True\r\n\r\n \r\ndef put_path_find(src_ip,dst_ip):\r\n demand = {}\r\n demand['src_ip'] = src_ip\r\n demand['dst_ip'] = dst_ip \r\n\r\n x = requests.put(path_find_url, json = demand)\r\n # print(x.request.url)\r\n return x\r\n\r\ndef del_qos_all(port):\r\n ovsdb_server = 'tcp:192.168.0.116:6640'\r\n db = libovsdb.OVSDBConnection(ovsdb_server, \"Open_vSwitch\")\r\n\r\n get_port = db.select(table = \"Port\",\r\n columns = ['_uuid',\"qos\"],\r\n where = [[\"name\", \"==\", port]],)\r\n port_qos = get_port[0]['qos']\r\n print(port_qos)\r\n\r\n\r\n get_queue = db.select(table = \"QoS\",\r\n columns = ['_uuid',\"queues\"],\r\n where = [[\"_uuid\", \"==\", [\"uuid\",port_qos]]])\r\n \r\n if not get_queue:\r\n # self.logger.info(\"Queue not ref\")\r\n tx = db.transact()\r\n uuid = port_qos\r\n\r\n tx.delete(table = \"QoS\",\r\n where = [[\"_uuid\", \"==\", [\"uuid\",uuid]]])\r\n tx.mutate(table = \"Port\",\r\n where = [[\"name\", \"==\", port]],\r\n mutations = [tx.make_mutations(\"qos\", \"delete\", {\"uuid\": port_qos})])\r\n response = tx.commit()\r\n \r\n \r\n return\r\n\r\n # QOS ref delete\r\n tx = db.transact()\r\n uuid = port_qos\r\n\r\n tx.delete(table = \"QoS\",\r\n where = [[\"_uuid\", \"==\", [\"uuid\",uuid]]])\r\n tx.mutate(table = \"Port\",\r\n where = [[\"name\", \"==\", port]],\r\n mutations = [tx.make_mutations(\"qos\", \"delete\", {\"uuid\": port_qos})])\r\n response = tx.commit()\r\n\r\n for queue in get_queue[0]['queues']:\r\n queue_uuid = queue[1][1] \r\n res = db.delete(table = \"Queue\",\r\n where = [[\"_uuid\", \"==\", [\"uuid\",queue_uuid]]],\r\n referby = [\"QoS\", port_qos, \"queues\"])\r\n return\r\nif __name__ == '__main__':\r\n # x= get_switch(19)\r\n # x = get_switch_all()\r\n \r\n \r\n src_ip = '10.1.1.5'\r\n dst_ip = '10.2.1.5'\r\n min_rate = 1000000000\r\n max_rate = 2000000000\r\n path = [1,2,3]\r\n path = 'None'\r\n mod = 1\r\n # x = put_demand(path,src_ip,dst_ip,vni,max_rate,min_rate)\r\n # x = put_path_find(src_ip,dst_ip)\r\n # print(x.status_code)\r\n # # print(x.content)\r\n # parser = (json.loads(x.text))\r\n # print(json.dumps(parser,indent=4))\r\n # print(parser)\r\n\r\n\r\n virtual_topo = get_virtual_topo()\r\n # print(json.dumps(res[1],indent=4))\r\n # put_demand_vm(path,src_ip,dst_ip,max_rate,min_rate,None,virtual_topo)\r\n\r\n # print(res)\r\n\r\n # del_qos_all(\"tapbcd579a1-27\")\r\n # del_qos_all(\"tap35f82979-1d\")\r\n\r\n hv_ip = \"192.168.0.116\"\r\n res = get_bw_ovn_all(virtual_topo)\r\n # print(res)\r\n","repo_name":"trung-source/Vxlan_controller","sub_path":"WEB/web/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":25568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18301763539","text":"import visualise as vis\n\n\ndef run_random(algorithm, data, times=10000, print_lines=True, map=False,\n line=False, hist=False):\n \"\"\" runs a random algorithm multiple times\n\n :argument algorithm: the algorithm that is to be used, in this case of the greedy family\n :argument data: a class with all important static information about run, such as max_duration\n :argument times: times the algorithm is run\n :argument print_lines: determines if lines will be printed\n :argument map: determines if map will be printed\n :argument line: determines if line graph will be printed\n :argument hist: determines if histogram will be printed\n\n :returns best solution with the score and the generated lines.\n \"\"\"\n\n best_solution = None\n\n best_solutions_scores = []\n new_solutions_scores = []\n\n print(\"generating random solution...\", end='', flush=True)\n\n for i in range(times):\n new_solution = algorithm(data)\n\n if not best_solution:\n best_solution = new_solution\n elif new_solution.score > best_solution.score:\n best_solution = new_solution\n\n new_solutions_scores.append(new_solution.score)\n best_solutions_scores.append(best_solution.score)\n\n print(\"\\t DONE\")\n\n if print_lines:\n vis.print_results(algorithm, best_solution, data)\n\n if map:\n vis.draw_map(data, best_solution.lines)\n\n if hist:\n vis.hist(new_solutions_scores, best_solution.lines)\n\n if line:\n vis.plot_line(best_solutions_scores)\n\n\n return best_solution","repo_name":"Kvtulder/RailNL","sub_path":"code/run/run_random.py","file_name":"run_random.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8612187292","text":"from gi.repository import GLib, GObject, Gtk\n\nfrom tryton.common.selection import SelectionMixin\n\nfrom .widget import Widget\n\n\nclass MultiSelection(Widget, SelectionMixin):\n expand = True\n\n def __init__(self, view, attrs):\n super(MultiSelection, self).__init__(view, attrs)\n\n if int(attrs.get('yexpand', self.expand)):\n self.widget = Gtk.ScrolledWindow()\n self.widget.set_policy(\n Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n self.widget.set_shadow_type(Gtk.ShadowType.ETCHED_IN)\n else:\n self.widget = Gtk.VBox()\n self.widget.set_size_request(100, 100)\n self.widget.get_accessible().set_name(attrs.get('string', ''))\n\n self.model = Gtk.ListStore(GObject.TYPE_PYOBJECT, GObject.TYPE_STRING)\n self.tree = self.mnemonic_widget = Gtk.TreeView()\n self.tree.set_model(self.model)\n self.tree.set_search_column(1)\n self.tree.connect('focus-out-event', lambda *a: self._focus_out())\n self.tree.set_headers_visible(False)\n selection = self.tree.get_selection()\n selection.set_mode(Gtk.SelectionMode.MULTIPLE)\n selection.connect('changed', self.changed)\n self.widget.add(self.tree)\n name_column = Gtk.TreeViewColumn()\n name_cell = Gtk.CellRendererText()\n name_column.pack_start(name_cell, expand=True)\n name_column.add_attribute(name_cell, 'text', 1)\n self.tree.append_column(name_column)\n\n self.nullable_widget = False\n self.init_selection()\n\n def _readonly_set(self, readonly):\n super(MultiSelection, self)._readonly_set(readonly)\n selection = self.tree.get_selection()\n selection.set_select_function(lambda *a: not readonly)\n\n @property\n def modified(self):\n if self.record and self.field:\n group = set(self.field.get_eval(self.record))\n value = set(self.get_value())\n return value != group\n return False\n\n def changed(self, selection):\n def focus_out():\n if self.widget.props.window:\n self._focus_out()\n # Must be deferred because it triggers a display of the form\n GLib.idle_add(focus_out)\n\n def get_value(self):\n model, paths = self.tree.get_selection().get_selected_rows()\n return [model[path][0] for path in paths]\n\n def set_value(self):\n self.field.set_client(self.record, self.get_value())\n\n def display(self):\n selection = self.tree.get_selection()\n selection.handler_block_by_func(self.changed)\n try:\n # Remove select_function to allow update,\n # it will be set back in the super call\n selection.set_select_function(lambda *a: True)\n self.update_selection(self.record, self.field)\n new_model = self.selection != [list(row) for row in self.model]\n if new_model:\n self.model.clear()\n if not self.field:\n return\n value2path = {}\n for idx, (value, name) in enumerate(self.selection):\n if new_model:\n self.model.append((value, name))\n value2path[value] = idx\n selection.unselect_all()\n values = self.field.get_eval(self.record)\n for value in values:\n selection.select_path(value2path[value])\n super(MultiSelection, self).display()\n finally:\n selection.handler_unblock_by_func(self.changed)\n","repo_name":"tryton/tryton-client","sub_path":"tryton/gui/window/view_form/view/form_gtk/multiselection.py","file_name":"multiselection.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"61"} +{"seq_id":"72314464193","text":"# client_shared.py, shared client code\n\nimport os\nimport sys\nimport optparse\n\nimport logging\nl = logging.getLogger()\nl.setLevel(logging.WARNING)\n\nadddir = os.path.join(os.getcwd(), 'source')\nsys.path.insert(0, adddir)\nfrom lib.network import object_sharer as objsh\n\nparser = optparse.OptionParser()\nparser.add_option('-d', '--disable-io', default=False, action='store_true')\nparser.add_option('-p', '--port', type=int, default=objsh.PORT,\n help='Port to connect to')\nparser.add_option('--name', default='',\n help='QTlab instance name to connect to, should be auto-detected')\nparser.add_option('--host', default='localhost',\n help='Host to connect to')\nparser.add_option('--module', default=None,\n help='Client module to import')\nparser.add_option('--config', default=None,\n help='Set config file to use, defaults to _.cfg')\nparser.add_option('--debug', default=False, action='store_true',\n help='Enable debugging mode, ie more logging')\n\ndef process_args():\n args, pargs = parser.parse_args()\n\n if args.config is None:\n args.config = os.path.split(sys.argv[0])[-1]\n if args.config.endswith('.py'):\n args.config = args.config[:-3]\n if args.module:\n args.config += '_' + args.module + '.cfg'\n\n if args.config:\n import lib.config as cfg\n global config\n config = cfg.create_config(args.config)\n if args.name:\n config['instance_name'] = args.name\n if args.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n\n return args, pargs\n\ndef close_client_cb():\n pass\n\n","repo_name":"heeres/qtlab","sub_path":"clients/client_shared.py","file_name":"client_shared.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"16795810076","text":"# -*- coding: utf-8 -*-\n\nfrom colored import fg, attr\nimport pyfiglet\n\nfrom cmdb_population import population\n\n\"\"\"\n Color definition.\n\"\"\"\nblue = fg('#46B1C9')\nred = fg('#B54653')\ngreen = fg('#86DEB7')\nreset = attr('reset')\n\n\ndef run_population(db_info, cmdb_info):\n \"\"\"\n Executes the population of the CMDB. \n\n Parameters\n -------\n db_info : dict\n The information about the database.\n\n cmdb_info : dict\n The information about the CMDB.\n \"\"\"\n open_message = pyfiglet.figlet_format(\n \"Population Phase\", font=\"small\")\n print()\n print(\n \"\\033[1m**********************************************************************\\033[0m\")\n print(open_message)\n print(\n \"\\033[1m**********************************************************************\\033[0m\\n\")\n\n population.run_cmdb_population(db_info, cmdb_info)\n","repo_name":"joanapereira115/cmdb-auto-creation","sub_path":"src/population_phase.py","file_name":"population_phase.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"39993713779","text":"import wx\nimport uliedit_gui\nimport os\nimport tempfile\nimport sys\nimport codecs\n\nclass PrintDialog:\n def __init__(self, parent, content, title):\n self.parent = parent\n self.content = content\n self.title = os.path.basename(title)\n self.form = uliedit_gui.PrintDialog(parent)\n self.initFields()\n self.bindEvents()\n self.form.btnPrint.SetFocus()\n self.form.ShowModal()\n\n\n def initFields(self):\n enviroment_command = os.environ.get('PRINT_CMD')\n if enviroment_command:\n self.form.txtCommand.SetValue(enviroment_command)\n elif sys.platform == 'win32':\n self.form.txtCommand.SetValue(\"notepad /P %1\")\n else:\n self.form.txtCommand.SetValue(\"lpr -o portrait -J \\\"$title\\\" %1\")\n \n self.form.txtNumberOfCopies.SetValue(1)\n\n\n\n def onCancel(self, evt):\n self.form.Close()\n self.parent.txtContent.SetFocus()\n\n def onPrint(self, evt):\n count = self.form.txtNumberOfCopies.GetValue()\n command = self.form.txtCommand.GetValue()\n command = command\n \n if command == \"\":\n wx.MessageDialog(self.form, \"Please Enter a printing command\",\n \"Print Error\",\n wx.ICON_ERROR | wx.OK).ShowModal()\n return\n\n tmp_file = tempfile.NamedTemporaryFile(delete = False)\n filename = tmp_file.name\n tmp_file.close()\n handle = codecs.open(filename, \"wb\", encoding = 'utf8')\n handle.write(self.content)\n handle.close()\n\n # Per lpr-Befehl $count Kopien drucken\n real_command = command.replace(\"%1\", filename)\n real_command = real_command.replace(\"$title\", self.title)\n real_command = real_command.encode(\"utf8\")\n \n for i in range(1, count + 1):\n os.system(real_command)\n \n os.unlink(filename)\n self.form.Close()\n\n \n \n \n \n def OnKeyUP(self, event): \n keyCode = event.GetKeyCode() \n if keyCode == wx.WXK_ESCAPE:\n self.onCancel(None)\n elif keyCode == wx.WXK_RETURN:\n self.onPrint(None) \n \n\n def bindEvents(self):\n self.form.btnCancel.Bind(wx.EVT_BUTTON, self.onCancel)\n self.form.btnPrint.Bind(wx.EVT_BUTTON, self.onPrint)\n self.form.Bind(wx.EVT_CHAR_HOOK, self.OnKeyUP)\n\n\n","repo_name":"derUli/ulieditpro","sub_path":"src/uliedit_print_dialog.py","file_name":"uliedit_print_dialog.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8058445290","text":"import os\nfrom restore.mysql import MySql\n\ndef file_name(file_dir):\n for pwd,dirs,files in os.walk(file_dir):\n return pwd,files\n\ndef is_chinese(string):\n \"\"\"\n 检查整个字符串是否包含中文\n :param string: 需要检查的字符串\n :return: bool\n \"\"\"\n for ch in string:\n if u'\\u4e00' <= ch <= u'\\u9fff':\n return True\n else:\n return False\n\npwd,list=file_name('../book_info/《鬼吹灯之龙岭迷窟》')\n\ntrue_path=\"D:/Web_Book/\"+pwd[3:]+'/'\n\nwrite=MySql('122.51.168.67','root','IRVing777!','Django_Book')\n\n#write.sql('insert into Book_Chapter(book_name,chapter_name,chapter_path)')\n#\n# for i in list:\n# for index,j in enumerate(i):\n# if j == '龙' and i[index-1].isdigit():\n# string='insert into Book_chapter(book_name,chapter_name,chapter_path,chapter_id) ' \\\n# 'values(\\'《鬼吹灯之龙岭迷窟》\\',\\'{0}\\',\\'{1}\\',{2})'.format(i[index:],true_path+i,i[:index])\n# #print(i[:index])\n# #print(string)\n# write.sql(string)\n\nwith open('../book_info/《鬼吹灯之���岭迷窟》/0天下霸唱',encoding='utf-8') as f:\n brief=f.read(100)\n\nstring2='insert into Book_book(book_name,book_brief) values(\\'{0}\\',\\'{1}\\')'.\\\n format('《鬼吹灯之龙岭迷窟》',brief+'...')\nprint(brief+'...')\n# write.sql(string2)\n","repo_name":"Jamly777/GCDAPIs","sub_path":"restore/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4303095065","text":"\"\"\"Handle sample state.\"\"\"\nimport asyncio\nimport json\nimport logging\nfrom abc import ABC, abstractmethod\n\nimport voluptuous as vol\n\nfrom camacq.event import Event\nfrom camacq.exceptions import SampleError\nfrom camacq.helper import BASE_ACTION_SCHEMA\nfrom camacq.util import dotdict\n\n_LOGGER = logging.getLogger(__name__)\nSAMPLE_EVENT = \"sample_event\"\nSAMPLE_IMAGE_SET_EVENT = \"sample_image_set_event\"\n\nACTION_SET_SAMPLE = \"set_sample\"\nSET_SAMPLE_ACTION_SCHEMA = BASE_ACTION_SCHEMA.extend(\n {\"sample_name\": vol.Coerce(str)}, extra=vol.ALLOW_EXTRA\n)\nBASE_SET_SAMPLE_ACTION_SCHEMA = BASE_ACTION_SCHEMA.extend(\n {vol.Required(\"name\"): vol.Coerce(str), \"values\": dict}\n)\n\nACTION_TO_METHOD = {\n ACTION_SET_SAMPLE: {\"method\": \"set_sample\", \"schema\": SET_SAMPLE_ACTION_SCHEMA},\n}\n\n\nasync def setup_module(center, config):\n \"\"\"Set up sample module.\n\n Parameters\n ----------\n center : Center instance\n The Center instance.\n config : dict\n The config dict.\n \"\"\"\n\n async def handle_action(**kwargs):\n \"\"\"Handle action call to add a state to the sample.\n\n Parameters\n ----------\n **kwargs\n Arbitrary keyword arguments. These will be passed to a\n method when an action is called.\n \"\"\"\n action_id = kwargs.pop(\"action_id\")\n method = ACTION_TO_METHOD[action_id][\"method\"]\n sample_name = kwargs.pop(\"sample_name\", None)\n silent = kwargs.pop(\"silent\", False)\n if sample_name:\n samples = [center.samples[sample_name]]\n else:\n samples = list(center.samples.values())\n tasks = []\n for sample in samples:\n try:\n kwargs = sample.set_sample_schema(kwargs)\n except vol.Invalid as exc:\n _LOGGER.log(\n logging.DEBUG if silent else logging.ERROR,\n \"Invalid action call parameters %s: %s for action: %s.%s\",\n kwargs,\n exc,\n \"sample\",\n action_id,\n )\n continue\n\n _LOGGER.debug(\n \"Handle sample %s action %s: %s\", sample.name, action_id, kwargs\n )\n tasks.append(center.create_task(getattr(sample, method)(**kwargs)))\n if tasks:\n await asyncio.wait(tasks)\n\n for action_id, options in ACTION_TO_METHOD.items():\n schema = options[\"schema\"]\n center.actions.register(\"sample\", action_id, handle_action, schema)\n\n\nclass Samples(dotdict):\n \"\"\"Hold all samples.\"\"\"\n\n # pylint: disable=too-few-public-methods\n\n def __getattr__(self, sample_name):\n \"\"\"Get a sample by name.\"\"\"\n try:\n return self[sample_name]\n except KeyError as exc:\n raise SampleError(f\"Unable to get sample with name {sample_name}\") from exc\n\n\ndef register_sample(center, sample):\n \"\"\"Register sample.\"\"\"\n sample.center = center\n sample.data = {}\n center.bus.register(sample.image_event_type, sample.on_image)\n center.samples[sample.name] = sample\n\n\nclass ImageContainer(ABC):\n \"\"\"A container for images.\"\"\"\n\n @property\n @abstractmethod\n def change_event(self):\n \"\"\":Event: Return an event class to fire on container change.\"\"\"\n\n @property\n @abstractmethod\n def images(self):\n \"\"\":dict: Return a dict with all images for the container.\"\"\"\n\n @property\n @abstractmethod\n def name(self):\n \"\"\":str: Return an identifying name for the container.\"\"\"\n\n @property\n @abstractmethod\n def values(self):\n \"\"\":dict: Return a dict with the values set for the container.\"\"\"\n\n\nclass Sample(ImageContainer, ABC):\n \"\"\"Representation of the state of the sample.\"\"\"\n\n center = None\n data = None\n\n @property\n @abstractmethod\n def image_event_type(self):\n \"\"\":str: Return the image event type to listen to for the sample.\"\"\"\n\n @property\n @abstractmethod\n def name(self):\n \"\"\":str: Return the name of the sample.\"\"\"\n\n @property\n @abstractmethod\n def set_sample_schema(self):\n \"\"\"Return the validation schema of the set_sample method.\"\"\"\n\n @abstractmethod\n async def on_image(self, center, event):\n \"\"\"Handle image event for this sample.\"\"\"\n\n def get_sample(self, name, **kwargs):\n \"\"\"Get an image container of the sample.\n\n Parameters\n ----------\n name : str\n The name of the container type.\n **kwargs\n Arbitrary keyword arguments.\n These will be used to create the id string of the container.\n\n Returns\n -------\n ImageContainer instance\n Return the found ImageContainer instance.\n \"\"\"\n id_string = json.dumps({\"name\": name, **kwargs})\n return self.data.get(id_string)\n\n async def set_sample(self, name, values=None, **kwargs):\n \"\"\"Set an image container of the sample.\n\n Parameters\n ----------\n name : str\n The name of the container type.\n values : dict\n The optional values to set on the container.\n **kwargs\n Arbitrary keyword arguments.\n These will be used to create the id string of the container.\n\n Returns\n -------\n ImageContainer instance\n Return the ImageContainer instance that was updated.\n \"\"\"\n id_string = json.dumps({\"name\": name, **kwargs})\n values = values or {}\n container = self.data.get(id_string)\n event = None\n\n if container is None:\n container = await self._set_sample(name, values, **kwargs)\n event_class = container.change_event\n event = event_class({\"container\": container})\n\n container.values.update(values)\n self.data[id_string] = container\n\n if name == \"image\":\n self.images[container.path] = container\n\n if not event and values:\n event_class = container.change_event\n event = event_class({\"container\": container})\n\n if event:\n await self.center.bus.notify(event)\n return container\n\n @abstractmethod\n async def _set_sample(self, name, values, **kwargs):\n \"\"\"Set an image container of the sample.\n\n Parameters\n ----------\n name : str\n The name of the container type.\n values : dict\n The values to set on the container.\n **kwargs\n Arbitrary keyword arguments.\n\n Returns\n -------\n ImageContainer instance\n Return the ImageContainer instance that was updated.\n \"\"\"\n\n\nclass Image(ImageContainer):\n \"\"\"An image with path and position info.\"\"\"\n\n def __init__(self, path, values=None, **kwargs):\n \"\"\"Set up instance.\"\"\"\n self._path = path\n self._values = values or {}\n for attr, val in kwargs.items():\n setattr(self, attr, val)\n\n def __repr__(self):\n \"\"\"Return the representation.\"\"\"\n return f\"\"\n\n @property\n def change_event(self):\n \"\"\":Event: Return an event class to fire on container change.\"\"\"\n return SampleImageSetEvent\n\n @property\n def images(self):\n \"\"\":dict: Return a dict with all images for the container.\"\"\"\n return {self.path: self}\n\n @property\n def name(self):\n \"\"\":str: Return an identifying name for the container.\"\"\"\n return \"image\"\n\n @property\n def path(self):\n \"\"\":str: Return the path of the image.\"\"\"\n return self._path\n\n @property\n def values(self):\n \"\"\":dict: Return a dict with the values set for the container.\"\"\"\n return self._values\n\n\nclass SampleEvent(Event):\n \"\"\"An event produced by a sample change event.\"\"\"\n\n __slots__ = ()\n\n event_type = SAMPLE_EVENT\n\n @property\n def container(self):\n \"\"\":ImageContainer instance: Return the container instance of the event.\"\"\"\n return self.data.get(\"container\")\n\n @property\n def container_name(self):\n \"\"\":str: Return the container name of the event.\"\"\"\n return self.container.name\n\n @property\n def images(self):\n \"\"\":dict: Return the container images of the event.\"\"\"\n return self.container.images\n\n @property\n def values(self):\n \"\"\":dict: Return the container values of the event.\"\"\"\n return self.container.values\n\n def __repr__(self):\n \"\"\"Return the representation.\"\"\"\n data = {\"container\": self.container}\n return f\"{type(self).__name__}(data={data})\"\n\n\nclass SampleImageSetEvent(SampleEvent):\n \"\"\"An event produced by a new image on the sample.\"\"\"\n\n __slots__ = ()\n\n event_type = SAMPLE_IMAGE_SET_EVENT\n\n\ndef get_matched_samples(sample, name, attrs=None, values=None):\n \"\"\"Return the sample items that match.\"\"\"\n attrs = attrs or {}\n values = values or {}\n items = [\n cont\n for cont in sample.data.values()\n if cont.name == name\n and (\n not attrs\n or all(getattr(cont, attr, None) == val for attr, val in attrs.items())\n )\n and (\n not values\n or all(cont.values.get(key) == val for key, val in values.items())\n )\n ]\n return items\n","repo_name":"CellProfiling/cam_acq","sub_path":"camacq/plugins/sample/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"16587055380","text":"from __future__ import division, print_function, unicode_literals\n\nimport sys\nimport os.path\nfrom torch import nn\n\n\ndef fwrite(new_doc, path, mode='w'):\n with open(path, mode) as f:\n f.write(new_doc)\n\n\ndef shell(cmd, working_directory='.', stdout=False, stderr=False):\n import subprocess\n from subprocess import PIPE, Popen\n\n subp = Popen(cmd, shell=True, stdout=PIPE,\n stderr=subprocess.STDOUT, cwd=working_directory)\n subp_stdout, subp_stderr = subp.communicate()\n\n if subp_stdout: subp_stdout = subp_stdout.decode(\"utf-8\")\n if subp_stderr: subp_stderr = subp_stderr.decode(\"utf-8\")\n\n if stdout and subp_stdout:\n print(\"[stdout]\", subp_stdout, \"[end]\")\n if stderr and subp_stderr:\n print(\"[stderr]\", subp_stderr, \"[end]\")\n\n return subp_stdout, subp_stderr\n\n","repo_name":"DuanXu-97/RelationExtraction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5870318718","text":"#!/usr/bin/env python3\nimport RPi.GPIO as GPIO\nimport time\n\n# Define los colores en modo hexadecimal\nCOLORS = [0xFF0000, 0x00FF00, 0x0000FF, 0x00FFFF, 0xFF00FF, 0xFFFF00, 0xFFFFFF,\n 0xB695C0]\n# Establece los pines conforme a GPIO\npins = {'Red': 22, 'Green': 17, 'Blue': 27}\n\n\ndef mapea(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n\n\ndef set_color(color):\n # calcula el valor para cada canal R, G, B\n R_val = (color & 0xFF0000) >> 16\n G_val = (color & 0x00FF00) >> 8\n B_val = (color & 0x0000FF) >> 0\n\n # Convierte el color de 0~255 a 0 ó 1 (entero)\n R_val = int(mapea(R_val, 0, 255, 0, 1))\n G_val = int(mapea(G_val, 0, 255, 0, 1))\n B_val = int(mapea(B_val, 0, 255, 0, 1))\n\n # asigna a cada pin el valor calculado\n GPIO.output(pins['Red'], R_val)\n GPIO.output(pins['Green'], G_val)\n GPIO.output(pins['Blue'], B_val)\n\n # imprime los resultados\n print(\"R_val = %s, G_val = %s, B_val = %s\" % (R_val, G_val, B_val))\n\n\n# quita la tensión en cada uno de los pines\ndef reset():\n for i in pins:\n GPIO.output(pins[i], 0)\n\n\ndef main():\n try:\n # Establece el modo, en este caso a los valores GPIO\n GPIO.setmode(GPIO.BCM)\n for i in pins:\n # configura los pines como de salida\n GPIO.setup(pins[i], GPIO.OUT, initial=GPIO.HIGH)\n reset()\n while True:\n for color in COLORS:\n set_color(color)\n time.sleep(2)\n reset()\n except Exception as e:\n print(e)\n finally:\n # Release resource\n reset()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"atareao/raspberrypi","sub_path":"led-rgb/rgb.py","file_name":"rgb.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3057630382","text":"import json\nimport os\nfrom json import JSONDecodeError\n\nfrom botocore.exceptions import ClientError\nfrom enums.metadata_field_names import DocumentReferenceMetadataFields\nfrom models.document_reference import DocumentReference\nfrom pydantic import ValidationError\nfrom services.document_service import DocumentService\nfrom utils.audit_logging_setup import LoggingService\nfrom utils.exceptions import DynamoServiceException\nfrom utils.lambda_exceptions import DocumentRefSearchException\n\nlogger = LoggingService(__name__)\n\n\nclass DocumentReferenceSearchService(DocumentService):\n def get_document_references(self, nhs_number: str):\n try:\n list_of_table_names = json.loads(os.environ[\"DYNAMODB_TABLE_LIST\"])\n\n results: list[dict] = []\n for table_name in list_of_table_names:\n logger.info(f\"Searching for results in {table_name}\")\n documents: list[\n DocumentReference\n ] = self.fetch_documents_from_table_with_filter(\n nhs_number,\n table_name,\n attr_filter={DocumentReferenceMetadataFields.DELETED.value: \"\"},\n )\n\n results.extend(\n document.model_dump(\n include={\"file_name\", \"created\", \"virus_scanner_result\"},\n by_alias=True,\n )\n for document in documents\n )\n return results\n except (\n JSONDecodeError,\n ValidationError,\n ClientError,\n DynamoServiceException,\n ) as e:\n logger.error(\n f\"An error occurred when using document reference search service: {str(e)}\",\n )\n raise DocumentRefSearchException(\n 500, \"An error occurred when searching for available documents\"\n )\n","repo_name":"nhsconnect/national-document-repository","sub_path":"lambdas/services/document_reference_search_service.py","file_name":"document_reference_search_service.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6505172167","text":"class Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n if not s:\n return True\n ans = \"\"\n i_s = 0\n i_t = 0\n while i_t < len(t) and i_s < len(s):\n if s[i_s] == t[i_t]:\n ans += s[i_s]\n i_s += 1\n i_t += 1\n return ans == s\n\nprint(Solution().isSubsequence(\"\", \"ahbgdc\"))","repo_name":"akimov246/leetcode","sub_path":"392. Is Subsequence.py","file_name":"392. Is Subsequence.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20568408646","text":"# bereken de som van een reeks getallen: invoer stopt bij ingave 0\n\nsom = 0\ngetal = int(input(\"geef een geheel getal in\"))\nwhile getal != 0: # while betekent zolang\n som += getal # som = som + getal\n getal = int(input(\"geef een geheel getal in\")) # deze invoer na da lus altijd zetten\nprint(\"de som van deze getallen is\", som) # de som helemaal onder aan afdrukken\n\n# extra opgave: bereken het product van een reeks getallen. Invoer stopt bij ingave 0.\n\nprod = 1 # product op 1 initialiseren en niet 0, want anders is de uitkomst altijd 0 want 0 * iets = 0\ngetal = int(input(\"geef een geheel getal in\"))\nwhile getal != 0: # while betekent zolang\n prod *= getal # prod = prod* getal\n getal = int(input(\"geef een geheel getal in\"))\nprint(\"de som van deze getallen is\", som)\n","repo_name":"SemihAltintasPXL/PXLToegepast-Informatica","sub_path":"Vakken_eerste_jaar/IT-Essentials/IT-Essentials-oefeningen/4_iteraties/voorbeelden/opdracht_4.14a.py","file_name":"opdracht_4.14a.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"nl","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"39560368171","text":"def lerElementoDoTabuleiro():\n try:\n resultado = float(input())\n return resultado;\n except EOFError as e : pass\n\ndef ehSoma():\n resultado = input()\n if(resultado == 'S'):\n return True;\n elif(resultado == 'M'):\n return False;\n else:\n return ehSoma()\n\ndef construirTabuleiro(tamanhoLinha, tamanhoColuna):\n#{\n resultado = [];\n for i in range(tamanhoColuna):\n #{\n arrayAuxiliarQueSeRenovaACadaIteracao = []\n for j in range(tamanhoLinha):\n #{\n novoNumeroDoTabuleiro = lerElementoDoTabuleiro()\n arrayAuxiliarQueSeRenovaACadaIteracao.append(novoNumeroDoTabuleiro)\n #}\n resultado.append(arrayAuxiliarQueSeRenovaACadaIteracao)\n #}\n return resultado;\n#} \n\ndef construirArrayResultante(arrayOriginal):\n posicaoLinha = 0\n resultado_array = []\n for posicaoColuna in range(len(arrayOriginal[posicaoLinha]) - posicaoLinha - 1): # percorrer um caminho com tamanho_total - posicao_atual [12-0 = 12 elementos]..[12-1 = 11 elementos]..[12-2]....\n tamanhoADireita = len(arrayOriginal[posicaoColuna])\n for colunaAtual in range(posicaoLinha+1, tamanhoADireita): # percorrer os elementos à direita da diagonal\n #começando da posição da linha atual/coluna, sempre pulando a diagonal. (por isso o posicaoLinha+1)\n #posicaoLinha é a posição do elemento da diagonal atual\n if(colunaAtual < len(arrayOriginal[posicaoColuna])): # verificação para não extrapolar meu array. vou percorrer tudo à minha direita, desde q eu não extrapole meu array\n resultado_array.append(arrayOriginal[posicaoLinha][colunaAtual])\n posicaoLinha = posicaoLinha + 1; #forçando a sempre pular linha e coluna ao mesmo tempo. De modo a obter a posição da minha diagonal. \n return resultado_array #O primeiro for pula as colunas. E essa linha de cód. força eu pular a linha\n\ndef printarResultado(ehMedia, arrayResultante):\n resultado = sum(arrayResultante)\n if(ehMedia == True):\n resultado = resultado / len(arrayResultante)\n print(\"{:.1f}\".format(resultado))\n\ndevoTirarMedia = ehSoma() == False\ntabuleiro = construirTabuleiro(12, 12)\ntabuleiro = construirArrayResultante(tabuleiro)\nprintarResultado(devoTirarMedia, tabuleiro)","repo_name":"nataliaRabelo/URI-Beecrowd","sub_path":"1183.py","file_name":"1183.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42219078149","text":"from pandas import DataFrame\nimport matplotlib.pyplot as plt\n\ndados = {\n 'x': [25,34,22,27,33,33,31,22,35,34,67,54,57,43,50,57,59,52,65,47,49,48,35,33,44,45,38,43,51,46],\n 'y': [79,51,53,78,59,74,73,57,69,75,51,32,40,47,53,36,35,58,59,50,25,20,14,12,20,5,29,27,8,7]\n}\n\ndf = DataFrame(dados, columns=['x','y'])\nprint(df.head())\n\n#Definir o modelo a utilizar\nfrom sklearn.cluster import KMeans\n\nkmeans = KMeans(n_clusters=2)\nkmeans.fit(df)\ncentroides = kmeans.cluster_centers_\nprint(centroides)\n\n#Realizar o plot\nplt.scatter(\n df['x'],\n df['y'],\n c= kmeans.labels_.astype(float),\n s=50,\n alpha=0.5\n)\nplt.scatter(\n centroides[:,0],\n centroides[:,1],\n c='red',\n s=50\n)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.show()","repo_name":"xgabrielrf/igti-python","sub_path":"modulo4/k-means_01.py","file_name":"k-means_01.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71251905796","text":"from pathlib import Path\n\n# Some paths to store data\nDATA_ROOT = Path(__file__).parent.parent.absolute() / \"data\"\nDATA_JSON = DATA_ROOT / \"json\"\nDATA_XML = DATA_ROOT / \"xml\"\n\nBUNDESTAG_BASE_URL = \"https://www.bundestag.de\"\nBUNDESTAG_OPENDATA_URL = BUNDESTAG_BASE_URL + \"/services/opendata\"\nIDS_PROTOCOL_CONTAINER: list[tuple[str, str]] = [\n (\"bt-collapse-866354\", \"pp20\"),\n (\"bt-collapse-543410\", \"pp19\"),\n]\n\nSPEECH_XPATH = (\n \"//p[@klasse => contains('J')][not(preceding-sibling::*[1]/name() = 'name')]/text()\"\n)\n","repo_name":"Bpolitycki/gerpar-scraper","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42281706670","text":"def shiftKey(key):\n new_key = [0 for x in range(4)]\n for i in range(4):\n new_key[i] = key[(i+1)%4]\n return new_key\n\ndef subKey(word, sbox):\n new_word = [0 for x in range(4)]\n for i in range(4):\n new_word[i] = sbox[\"0x{:02x}\".format(int(word[i], 16))] \n return [\"0x{:02x}\".format(int(elem, 16)) for elem in new_word]\n\ndef g(word, sbox, rcon):\n new_word = [0 for x in range(4)]\n word = shiftKey(word)\n word = subKey(word, sbox)\n for i in range(4):\n new_word[i] = hex(int(word[i], 16)^int(rcon[i],16))\n\n return [\"0x{:02x}\".format(int(elem, 16)) for elem in new_word]\n\n#Expands a 128-bit key into a set of round keys for AES encryption.\ndef keyGeneration(key, sbox):\n key_arr = [[0 for x in range(4)] for x in range(44)]\n rcon = [['0x01', '0x00', '0x00', '0x00'], \n ['0x02', '0x00', '0x00', '0x00'], \n ['0x04', '0x00', '0x00', '0x00'],\n ['0x08', '0x00', '0x00', '0x00'],\n ['0x10', '0x00', '0x00', '0x00'],\n ['0x20', '0x00', '0x00', '0x00'],\n ['0x40', '0x00', '0x00', '0x00'],\n ['0x80', '0x00', '0x00', '0x00'],\n ['0x1b', '0x00', '0x00', '0x00'],\n ['0x36', '0x00', '0x00', '0x00']]\n\n for i in range(4):\n for j in range(4):\n key_arr[i][j] = key[i][j]\n \n for i in range(4,44,4):\n x = g(key_arr[i-1], sbox, rcon[(i//4)-1])\n for k in range(4):\n for j in range(4):\n if (k == 0):\n key_arr[i+k][j] = int(key_arr[i+k-4][j], 16)^int(g(key_arr[i-1], sbox, rcon[(i//4)-1])[j], 16)\n else:\n key_arr[i+k][j] = int(key_arr[i+k-1][j], 16)^int(key_arr[i+k-4][j], 16)\n key_arr[i+k][j] = \"0x{:02x}\".format(key_arr[i+k][j])\n return key_arr","repo_name":"devayanisk/AES","sub_path":"RoundKey.py","file_name":"RoundKey.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10603868608","text":"import json\nimport boto3\nimport logging\nimport os\nimport traceback\nfrom backend.logging.logger import safeLogger\n\nlogger = safeLogger(child=True)\n\nregion = os.environ['AWS_REGION']\ndynamodb = boto3.resource('dynamodb', region_name=region)\ntable = dynamodb.Table(os.environ['TABLE_NAME'])\n\n\ndef lambda_handler(event, context):\n\n # read the key from the dynamodb table and return the set of values in a json object\n try:\n response = table.get_item(\n Key={\n 'entityType': 'claims',\n 'sk': 'observed_claims',\n },\n )\n username_as_group = event['requestContext']['authorizer']['jwt']['claims']['cognito:username']\n addl_groups = [username_as_group, \"vams:all_users\"]\n if 'Item' not in response or 'claims' not in response['Item']:\n claims = {\"claims\": addl_groups}\n else:\n claims = {\"claims\": list(response['Item']['claims']) + addl_groups}\n\n claims['claims'] = sorted(list(set(claims['claims'])))\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(claims)\n }\n except Exception as e:\n logger.error(traceback.format_exc(), event)\n return {\n 'statusCode': 500,\n 'body': json.dumps(e)\n }\n","repo_name":"awslabs/visual-asset-management-system","sub_path":"backend/backend/handlers/auth/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"61"} +{"seq_id":"73875712515","text":"# pytest configuration file\n\nimport os\n\nimport pytest\nfrom sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\nfrom sqlalchemy.orm import sessionmaker\n\nimport ispyb_graphql\nfrom ispyb_graphql.api import permissions\n\n\n@pytest.fixture()\nasync def testdb(monkeypatch):\n config_file = os.path.abspath(os.path.join(os.path.dirname(__file__), \"db.cfg\"))\n if not os.path.exists(config_file):\n pytest.skip(\n \"No configuration file for test database found. Skipping database tests\"\n )\n monkeypatch.setenv(\"ISPYB_CREDENTIALS\", config_file)\n\n SQLALCHEMY_DATABASE_URL = ispyb_graphql.database.get_database_url(\n connector=\"asyncmy\"\n )\n engine = create_async_engine(SQLALCHEMY_DATABASE_URL, future=True)\n SessionLocal = sessionmaker(engine, class_=AsyncSession)\n monkeypatch.setattr(ispyb_graphql.database, \"SessionLocal\", SessionLocal)\n return config_file\n\n\n@pytest.fixture\ndef mock_authentication(mocker):\n mocker.patch.object(\n permissions.IsAuthenticatedForProposal, \"has_permission\", return_value=True\n )\n mocker.patch.object(\n permissions.IsAuthenticatedForVisit, \"has_permission\", return_value=True\n )\n mocker.patch.object(\n permissions.IsAuthenticatedForBeamline, \"has_permission\", return_value=True\n )\n","repo_name":"rjgildea/strawberry-ispyb","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19725672064","text":"\"\"\"\nWrapper for torch.nn.functional which provides allows weights to be modified\nbefore they are used.\n\"\"\"\n\nimport torch\n\n\nclass QuantiseFunction(torch.autograd.Function):\n \"\"\"\n Function with both a forward and a backward pass. I use a\n straight-through estimator here: the gradients are passed back as though\n no quantisation happened.\n \"\"\"\n @staticmethod\n def forward(ctx, x, quantisation_fn):\n if quantisation_fn is not None:\n x = quantisation_fn(x)\n return x\n\n @staticmethod\n def backward(ctx, grad_output):\n # Need to return gradients for all inputs of `forward`, including the\n # quantisation function.\n return grad_output, None\n\n\nquantise = QuantiseFunction.apply\n","repo_name":"db434/nn-restrict","sub_path":"modifiers/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11363971087","text":"import torch\nfrom .common import MLP, SumRateLoss\nimport random\n\n\nclass Net(torch.nn.Module):\n def __init__(self,\n n_inputs,\n n_outputs,\n n_tasks,\n args):\n super(Net, self).__init__()\n\n # setup network\n hidden = [int(x) for x in args.hidden_layers.split(\"-\")]\n self.net = MLP([n_inputs] + hidden + [n_outputs])\n self.nc_per_task = n_outputs\n self.n_outputs = n_outputs\n\n # setup optimizer\n self.opt = torch.optim.RMSprop(self.parameters(), lr=args.lr)\n self.n_iter = args.n_iter\n self.mini_batch_size = args.mini_batch_size\n\n # setup losses\n self.noise = args.noise\n self.loss = torch.nn.MSELoss()\n self.loss_sumrate = SumRateLoss\n\n # allocate buffer\n self.M = []\n self.age = 0\n self.memories = args.n_memories\n\n def forward(self, x, t):\n output = self.net(x)\n return output\n\n def get_batch(self, x, y):\n if self.M:\n # combine buffer with current samples\n set_x = torch.stack([self.M[k][0] for k in range(len(self.M))], 0)\n set_y = torch.stack([self.M[k][1] for k in range(len(self.M))], 0)\n set_x = torch.cat([set_x, x], 0)\n set_y = torch.cat([set_y, y], 0)\n return set_x, set_y\n else:\n return x, y\n\n def observe(self, x, t, y, loss_type='MSE', x_tr=None, x_te=None):\n self.train()\n set_x, set_y = self.get_batch(x, y)\n for epoch in range(self.n_iter):\n permutation = torch.randperm(set_x.size()[0])\n for i in range(0, x.size()[0], self.mini_batch_size):\n self.zero_grad()\n indices = permutation[i:i + self.mini_batch_size]\n batch_x, batch_y = set_x[indices], set_y[indices]\n if loss_type == 'MSE':\n ptloss = self.loss(self.forward(batch_x, t), batch_y)\n else:\n ptloss = self.loss_sumrate(\n batch_x, self.forward(batch_x, t), self.noise)\n ptloss.backward()\n self.opt.step()\n\n for i in range(0, x.size()[0]):\n self.age += 1\n if len(self.M) < self.memories:\n # add new samples to the buffer\n self.M.append([x[i], y[i], t])\n else:\n # buffer is full\n p = random.randint(0, self.age)\n if p < self.memories:\n self.M[p] = [x[i], y[i], t]\n","repo_name":"Haoran-S/TSP_CL","sub_path":"model/reservoir_sampling.py","file_name":"reservoir_sampling.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"8894031913","text":"from tkinter import *\nfrom tkinter import ttk\n\nfrom person.create import Create\nfrom person.select import Select\n\n\ndef press(click):\n if click == \"create\":\n\n name = style['en_name']['textvariable'].get()\n family = style['en_family']['textvariable'].get()\n age = style['en_age']['textvariable'].get()\n phone = style['en_phone']['textvariable'].get()\n Create(name, family, age, phone).create_data()\n\n elif click == \"exit\":\n master.destroy()\n\n\ndef select_data():\n print(\"fhsfj\")\n phone=phone_entry.get()\n print(phone)\n s=Select(phone)\n s.select_info()\n\n\n\n\nmaster = Tk()\n\n\nstyle = {\n 'lbl_name': {\n 'text': 'Name:',\n },\n 'lbl_family': {\n 'text': 'Family:'\n },\n 'lbl_age': {\n 'text': 'Age:'\n },\n 'lbl_phone': {\n 'text': 'Phone:'\n },\n 'en_name':{\n 'textvariable': StringVar(),\n 'width': 40,\n 'bg': '#B3E5FC'\n },\n 'en_family': {\n 'textvariable': StringVar(),\n 'width': 40,\n 'bg': '#B3E5FC'\n },\n 'en_age': {\n 'textvariable': StringVar(),\n 'width': 40,\n 'bg': '#B3E5FC'\n },\n 'en_phone': {\n 'textvariable': StringVar(),\n 'width': 40,\n 'bg': '#B3E5FC'\n },\n 'btn_create': {\n 'text': 'Create',\n 'command': lambda : press('create'),\n 'width': 20,\n 'borderwidth': 3,\n 'relief': 'raised',\n 'bg': '#4CAF50',\n },\n 'btn_exit': {\n 'text': 'Exit',\n 'command': lambda : press('exit'),\n 'width': 20,\n 'borderwidth': 3,\n 'relief': 'raised',\n 'bg': '#f44336',\n }\n}\n\n#####create\nroot_tabs = ttk.Notebook(master)\n\ncreate_tab = ttk.Frame(root_tabs)\nroot_tabs.add(create_tab, text='create')\n\nselect_tab = ttk.Frame(root_tabs)\nroot_tabs.add(select_tab, text='select')\n\nupdate_tab = ttk.Frame(root_tabs)\nroot_tabs.add(update_tab, text='delete')\n\ndelete_tab = ttk.Frame(root_tabs)\nroot_tabs.add(delete_tab, text='update')\n\nroot_tabs.pack()\n\nLabel(create_tab, cnf=style['lbl_name']).grid(row=0, column=0, sticky='w')\nEntry(create_tab, cnf=style['en_name']).grid(row=1, column=0)\n\nLabel(create_tab, cnf=style['lbl_family']).grid(row=2, column=0, sticky='w')\nEntry(create_tab, cnf=style['en_family']).grid(row=3, column=0)\n\nLabel(create_tab, cnf=style['lbl_age']).grid(row=4, column=0, sticky='w')\nEntry(create_tab, cnf=style['en_age']).grid(row=5, column=0)\n\nLabel(create_tab, cnf=style['lbl_phone']).grid(row=6, column=0, sticky='w')\nEntry(create_tab, cnf=style['en_phone']).grid(row=7, column=0)\n\nButton(create_tab, cnf=style['btn_create']).grid(row=8, column=0)\nButton(create_tab, cnf=style['btn_exit']).grid(row=9, column=0)\n\n#####select\n\nphone_label=Label(select_tab,text=\"Phone\")\nphone_label.grid(row=0,column=0)\n\nphone_entry=Entry(select_tab)\nphone_entry.grid(row=0,column=1)\n\nbtn_select=Button(select_tab, text=\"search\", command=select_data)\nbtn_select.grid(row=0,column=3)\n\n###update\n\nphone_number=Label(select_tab,text=\"Phone\")\nphone_number.grid(row=0,column=0)\n\nphone_number_entry=Entry(select_tab)\nphone_number_entry.grid(row=0,column=1)\n\nbtn_update=Button(select_tab, text=\"search\")\nbtn_update.grid(row=0,column=3)\nmaster.mainloop()","repo_name":"FatemehMotamed/Term4","sub_path":"person/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71334238274","text":"from __future__ import annotations\n\nfrom inspect import getfile, signature\n\nfrom fastapi import Depends, params\nfrom fastapi.exceptions import HTTPException\n\nfrom fastapi_authorization.utils import normalize_list\n\ntry:\n import rich.repr\nexcept ModuleNotFoundError: # pragma: no cover\n ...\n\n\nclass RBAC:\n def __init__(self, role_callback: str, roles: list[Role | str] | None = None):\n self.role_callback = role_callback\n self.roles = normalize_list(roles, Role) if roles else []\n\n def add_role(\n self,\n name: str,\n *,\n description: str | None = None,\n permissions: list[Scope | str] | None = None,\n ) -> None:\n role = Role(name, description, permissions)\n self.roles.append(role)\n\n def add_permissions(self, role: str, permissions: list[Scope | str]) -> None:\n if role not in self.roles:\n raise RuntimeError(f\"Role {role} does not exist\")\n index = self.roles.index(role)\n self.roles[index].add_permissions(permissions)\n\n def Permission(self, scope: str) -> params.Depends:\n def allow(role: str) -> bool:\n if role not in self.roles:\n raise HTTPException(status_code=403, detail=\"Forbidden\")\n scopes = self.roles[self.roles.index(role)].permissions\n if scope not in scopes:\n raise HTTPException(status_code=403, detail=\"Forbidden\")\n\n sig = signature(allow)\n sig = sig.replace(\n parameters=[\n sig.parameters[\"role\"].replace(default=Depends(self.role_callback))\n ]\n )\n allow.__signature__ = sig\n return params.Depends(dependency=allow, use_cache=True)\n\n def __repr__(self) -> str:\n clb_file = getfile(self.role_callback)\n clb_path = f\"{clb_file}:{self.role_callback.__qualname__}\"\n return f\"\"\n\n def __rich_repr__(self) -> \"rich.repr.Result\":\n clb_file = getfile(self.role_callback)\n clb_path = f\"{clb_file}:{self.role_callback.__qualname__}\"\n yield \"role_callback\", clb_path\n yield \"roles\", self.roles\n\n\nclass Role:\n def __init__(\n self,\n name: str,\n description: str | None = None,\n scopes: list[Scope | str] | None = None,\n ):\n self.name = name\n self.description = description\n self.permissions = normalize_list(scopes, Scope) if scopes else []\n\n def add_permissions(self, scopes: list[Scope | str]) -> None:\n scopes = normalize_list(scopes, Scope)\n self.permissions.extend(scopes)\n\n def __eq__(self, __o: object) -> bool:\n if isinstance(__o, str):\n return __o == self.name\n elif isinstance(__o, type(self)):\n return __o.name == self.name\n return False\n\n def __hash__(self) -> int:\n return hash(self.name)\n\n def __repr__(self) -> str:\n return f\"\"\n\n def __rich_repr__(self) -> \"rich.repr.Result\":\n yield self.name\n yield \"permissions\", self.permissions\n\n\nclass Scope:\n def __init__(self, name: str, description: str | None = None):\n self.name = name\n self.description = description\n\n def __eq__(self, __o: object) -> bool:\n if isinstance(__o, str):\n return __o == self.name\n elif isinstance(__o, type(self)):\n return __o.name == self.name\n return False\n\n def __hash__(self) -> int:\n return hash(self.name)\n\n def __repr__(self) -> str:\n if self.description:\n return f\"\"\n return f\"\"\n\n def __rich_repr__(self) -> \"rich.repr.Result\":\n yield self.scope\n if self.description:\n yield \"description\", self.description\n","repo_name":"Kludex/fastapi-authorization","sub_path":"fastapi_authorization/rbac.py","file_name":"rbac.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"15856306124","text":"\ndef main():\n MIN_WEIGHT = 75\n MAX_WEIGHT = 350\n\n MIN_HEIGHT = 50\n MAX_HEIGHT = 350\n\n weight = float(input(\"Enter weight: \"))\n while weight > MAX_WEIGHT or weight < MIN_WEIGHT :\n print(\"Invalid weight try again!\")\n weight = float(input(\"Enter weight: \"))\n\n\n height = float(input(\"Enter height: \"))\n\n while height > MAX_HEIGHT or height < MIN_HEIGHT :\n print(\"Invalid height try again!\")\n height = float(input(\"Enter height: \"))\n\n display_BMI(weight, height)\n \ndef display_BMI(weight, height):\n UNDERWEIGHT_LIM = 18.5 # upper limits\n NORMAL_LIM = 24.9\n OVERWEIGHT_LIM = 29.9 # above is obese\n \n bmi = weight * 703/(height ** 2)\n status = \"\"\n \n if bmi < UNDERWEIGHT_LIM :\n status = \"underweight\"\n\n elif bmi > UNDERWEIGHT_LIM and bmi < NORMAL_LIM:\n status = \"normal\"\n\n elif bmi < OVERWEIGHT_LIM and bmi > NORMAL_LIM:\n status = \"overweight\"\n else:\n status = \"obese\"\n\n print(f\"BMI = {bmi:.1f} {status}\")\nmain()\n","repo_name":"luciacarrera/CS021_python","sub_path":"labs/lab-5/bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41510572568","text":"\nfrom flask import Flask, render_template, Response\nimport numpy as np\nimport cv2\nimport time\nfrom pygame import mixer\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\ndef gen():\n def play_sound(detected, sound):\n\n play = detected > hat_thickness[0] * hat_thickness[1] * 0.8\n\n if play and sound == 1:\n #sound_drum.play()\n time.sleep(0.001)\n\n elif play and sound == 2:\n #sound_hat.play()\n time.sleep(0.001)\n\n # This function is to check if the green object is present in the small region\n def detect_in_region(frame, sound):\n\n # converting BGR to HSV\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # creating mask\n mask = cv2.inRange(hsv, greenLower, greenUpper)\n\n # calc number of green pixels\n detected = np.sum(mask)\n\n play_sound(detected, sound)\n\n return mask\n\n # Importing Video Camera Feed\n camera = cv2.VideoCapture(0)\n ret, frame = camera.read()\n H, W = frame.shape[:2] # Skips Channel\n\n # Importing Drum sounds\n #mixer.init()\n #sound_hat = mixer.Sound('./Sounds/hat.ogg')\n #sound_drum = mixer.Sound('./Sounds/snare.wav')\n\n # Set HSV range for detecting green color\n greenLower = (25, 52, 72)\n greenUpper = (102, 255, 255)\n\n kernel = np.ones((7, 7), np.uint8)\n\n # Read the image of High Hat and the Snare drum\n hat = cv2.resize(cv2.imread('./Images/high_hat.png'), (200, 100), interpolation=cv2.INTER_CUBIC)\n snare = cv2.resize(cv2.imread('./Images/snare_drum.png'), (200, 100), interpolation=cv2.INTER_CUBIC)\n\n # Set region for detecting green\n hat_cntr = [np.shape(frame)[1] * 2 // 8, np.shape(frame)[0] * 6 // 8]\n snare_cntr = [np.shape(frame)[1] * 6 // 8, np.shape(frame)[0] * 6 // 8]\n\n hat_thickness = [200, 100]\n hat_top = [hat_cntr[0] - hat_thickness[0] // 2, hat_cntr[1] - hat_thickness[1] // 2]\n hat_btm = [hat_cntr[0] + hat_thickness[0] // 2, hat_cntr[1] + hat_thickness[1] // 2]\n\n snare_thickness = [200, 100]\n snare_top = [snare_cntr[0] - snare_thickness[0] // 2, snare_cntr[1] - snare_thickness[1] // 2]\n snare_btm = [snare_cntr[0] + snare_thickness[0] // 2, snare_cntr[1] + snare_thickness[1] // 2]\n\n time.sleep(1)\n\n while True:\n\n ret, frame = camera.read()\n frame = cv2.flip(frame, 1)\n\n # Region for the Snare\n snare_region = np.copy(frame[snare_top[1]:snare_btm[1], snare_top[0]:snare_btm[0]])\n mask = detect_in_region(snare_region, 1)\n\n # Region for the Hi hat\n hat_region = np.copy(frame[hat_top[1]:hat_btm[1], hat_top[0]:hat_btm[0]])\n mask = detect_in_region(hat_region, 2)\n\n # Output text\n cv2.putText(frame, 'Virtual Drums', (10, 30), 2, 1, (20, 20, 20), 2)\n cv2.putText(frame, 'Rick Sikka', (1100, 30), 2, 1, (20, 20, 20), 2)\n cv2.putText(frame, '\"q\" to exit', (1100, 70), 2, 1, (20, 20, 20), 2)\n\n # Display Both transparently\n frame[snare_top[1]:snare_btm[1], snare_top[0]:snare_btm[0]] = cv2.addWeighted(snare, 1, frame[snare_top[1]:snare_btm[1], snare_top[0]:snare_btm[0]], 1, 0)\n frame[hat_top[1]:hat_btm[1], hat_top[0]:hat_btm[0]] = cv2.addWeighted(hat, 1, frame[hat_top[1]:hat_btm[1], hat_top[0]:hat_btm[0]], 1, 0)\n\n ret, jpeg = cv2.imencode('.jpg', frame)\n frame = jpeg.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False)","repo_name":"ricksikka1/Virtual_Drums","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26247641558","text":"import torch\nimport torch.nn as nn\nfrom numpy import *\nfrom numpy.linalg import *\nfrom scipy.special import factorial\nfrom functools import reduce\n\n__all__ = ['M2K','K2M']\n\n\nclass PhyCell_Cell(nn.Module):\n\n def __init__(self, input_dim, F_hidden_dim, kernel_size, bias=1):\n super(PhyCell_Cell, self).__init__()\n self.input_dim = input_dim\n self.F_hidden_dim = F_hidden_dim\n self.kernel_size = kernel_size\n self.padding = kernel_size[0] // 2, kernel_size[1] // 2\n self.bias = bias\n \n self.F = nn.Sequential()\n self.F.add_module('conv1', nn.Conv2d(in_channels=input_dim, out_channels=F_hidden_dim,\n kernel_size=self.kernel_size, stride=(1,1), padding=self.padding))\n self.F.add_module('bn1',nn.GroupNorm(7 ,F_hidden_dim)) \n self.F.add_module('conv2', nn.Conv2d(in_channels=F_hidden_dim, out_channels=input_dim,\n kernel_size=(1,1), stride=(1,1), padding=(0,0)))\n\n self.convgate = nn.Conv2d(in_channels=self.input_dim + self.input_dim,\n out_channels=self.input_dim,\n kernel_size=(3,3),\n padding=(1,1), bias=self.bias)\n\n def forward(self, x, hidden): # x [batch_size, hidden_dim, height, width]\n combined = torch.cat([x, hidden], dim=1) # concatenate along channel axis\n combined_conv = self.convgate(combined)\n K = torch.sigmoid(combined_conv)\n hidden_tilde = hidden + self.F(hidden) # prediction\n next_hidden = hidden_tilde + K * (x-hidden_tilde) # correction , Haddamard product\n return next_hidden\n\n\nclass PhyCell(nn.Module):\n\n def __init__(self, input_shape, input_dim, F_hidden_dims, n_layers, kernel_size, device):\n super(PhyCell, self).__init__()\n self.input_shape = input_shape\n self.input_dim = input_dim\n self.F_hidden_dims = F_hidden_dims\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.H = [] \n self.device = device\n \n cell_list = []\n for i in range(0, self.n_layers):\n cell_list.append(PhyCell_Cell(input_dim=input_dim,\n F_hidden_dim=self.F_hidden_dims[i],\n kernel_size=self.kernel_size))\n self.cell_list = nn.ModuleList(cell_list)\n \n \n def forward(self, input_, first_timestep=False): # input_ [batch_size, 1, channels, width, height]\n batch_size = input_.data.size()[0]\n if (first_timestep): \n self.initHidden(batch_size) # init Hidden at each forward start\n for j,cell in enumerate(self.cell_list):\n if j==0: # bottom layer\n self.H[j] = cell(input_, self.H[j])\n else:\n self.H[j] = cell(self.H[j-1],self.H[j])\n return self.H, self.H\n \n def initHidden(self,batch_size):\n self.H = [] \n for i in range(self.n_layers):\n self.H.append(torch.zeros(\n batch_size, self.input_dim, self.input_shape[0], self.input_shape[1]).to(self.device))\n\n def setHidden(self, H):\n self.H = H\n\n\nclass PhyD_ConvLSTM_Cell(nn.Module):\n def __init__(self, input_shape, input_dim, hidden_dim, kernel_size, bias=1):\n \"\"\"\n input_shape: (int, int)\n Height and width of input tensor as (height, width).\n input_dim: int\n Number of channels of input tensor.\n hidden_dim: int\n Number of channels of hidden state.\n kernel_size: (int, int)\n Size of the convolutional kernel.\n bias: bool\n Whether or not to add the bias.\n \"\"\"\n super(PhyD_ConvLSTM_Cell, self).__init__()\n \n self.height, self.width = input_shape\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.kernel_size = kernel_size\n self.padding = kernel_size[0] // 2, kernel_size[1] // 2\n self.bias = bias\n \n self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,\n out_channels=4 * self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding, bias=self.bias)\n \n # we implement LSTM that process only one timestep \n def forward(self,x, hidden): # x [batch, hidden_dim, width, height]\n h_cur, c_cur = hidden\n \n combined = torch.cat([x, h_cur], dim=1) # concatenate along channel axis\n combined_conv = self.conv(combined)\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) \n i = torch.sigmoid(cc_i)\n f = torch.sigmoid(cc_f)\n o = torch.sigmoid(cc_o)\n g = torch.tanh(cc_g)\n\n c_next = f * c_cur + i * g\n h_next = o * torch.tanh(c_next)\n return h_next, c_next\n\n\nclass PhyD_ConvLSTM(nn.Module):\n\n def __init__(self, input_shape, input_dim, hidden_dims, n_layers, kernel_size, device):\n super(PhyD_ConvLSTM, self).__init__()\n self.input_shape = input_shape\n self.input_dim = input_dim\n self.hidden_dims = hidden_dims\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.H, self.C = [], [] \n self.device = device\n \n cell_list = []\n for i in range(0, self.n_layers):\n cur_input_dim = self.input_dim if i == 0 else self.hidden_dims[i-1]\n print('layer ', i, 'input dim ', cur_input_dim, ' hidden dim ', self.hidden_dims[i])\n cell_list.append(PhyD_ConvLSTM_Cell(input_shape=self.input_shape,\n input_dim=cur_input_dim,\n hidden_dim=self.hidden_dims[i],\n kernel_size=self.kernel_size))\n self.cell_list = nn.ModuleList(cell_list)\n\n def forward(self, input_, first_timestep=False): # input_ [batch_size, 1, channels, width, height]\n batch_size = input_.data.size()[0]\n if (first_timestep): \n self.initHidden(batch_size) # init Hidden at each forward start\n for j,cell in enumerate(self.cell_list):\n if j==0: # bottom layer\n self.H[j], self.C[j] = cell(input_, (self.H[j],self.C[j]))\n else:\n self.H[j], self.C[j] = cell(self.H[j-1],(self.H[j],self.C[j]))\n return (self.H,self.C) , self.H # (hidden, output)\n \n def initHidden(self,batch_size):\n self.H, self.C = [],[] \n for i in range(self.n_layers):\n self.H.append(torch.zeros(\n batch_size,self.hidden_dims[i], self.input_shape[0], self.input_shape[1]).to(self.device))\n self.C.append(torch.zeros(\n batch_size,self.hidden_dims[i], self.input_shape[0], self.input_shape[1]).to(self.device))\n \n def setHidden(self, hidden):\n H,C = hidden\n self.H, self.C = H,C\n \n\nclass dcgan_conv(nn.Module):\n\n def __init__(self, nin, nout, stride):\n super(dcgan_conv, self).__init__()\n self.main = nn.Sequential(\n nn.Conv2d(in_channels=nin, out_channels=nout, kernel_size=(3,3),\n stride=stride, padding=1),\n nn.GroupNorm(16, nout),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n def forward(self, input):\n return self.main(input)\n\n\nclass dcgan_upconv(nn.Module):\n\n def __init__(self, nin, nout, stride):\n super(dcgan_upconv, self).__init__()\n if stride==2:\n output_padding = 1\n else:\n output_padding = 0\n self.main = nn.Sequential(\n nn.ConvTranspose2d(in_channels=nin, out_channels=nout, kernel_size=(3,3),\n stride=stride, padding=1, output_padding=output_padding),\n nn.GroupNorm(16, nout),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n def forward(self, input):\n return self.main(input)\n\n\nclass encoder_E(nn.Module):\n\n def __init__(self, nc=1, nf=32, patch_size=4):\n super(encoder_E, self).__init__()\n assert patch_size in [2, 4]\n stride_2 = patch_size // 2\n # input is (1) x 64 x 64\n self.c1 = dcgan_conv(nc, nf, stride=2) # (32) x 32 x 32\n self.c2 = dcgan_conv(nf, nf, stride=1) # (32) x 32 x 32\n self.c3 = dcgan_conv(nf, 2*nf, stride=stride_2) # (64) x 16 x 16\n\n def forward(self, input):\n h1 = self.c1(input)\n h2 = self.c2(h1)\n h3 = self.c3(h2)\n return h3\n\n\nclass decoder_D(nn.Module):\n\n def __init__(self, nc=1, nf=32, patch_size=4):\n super(decoder_D, self).__init__()\n assert patch_size in [2, 4]\n stride_2 = patch_size // 2\n output_padding = 1 if stride_2==2 else 0\n self.upc1 = dcgan_upconv(2*nf, nf, stride=2) #(32) x 32 x 32\n self.upc2 = dcgan_upconv(nf, nf, stride=1) #(32) x 32 x 32\n self.upc3 = nn.ConvTranspose2d(in_channels=nf, out_channels=nc, kernel_size=(3,3),\n stride=stride_2, padding=1,\n output_padding=output_padding) #(nc) x 64 x 64\n\n def forward(self, input):\n d1 = self.upc1(input)\n d2 = self.upc2(d1)\n d3 = self.upc3(d2)\n return d3\n\n\nclass encoder_specific(nn.Module):\n\n def __init__(self, nc=64, nf=64):\n super(encoder_specific, self).__init__()\n self.c1 = dcgan_conv(nc, nf, stride=1) # (64) x 16 x 16\n self.c2 = dcgan_conv(nf, nf, stride=1) # (64) x 16 x 16\n\n def forward(self, input):\n h1 = self.c1(input)\n h2 = self.c2(h1)\n return h2\n\n\nclass decoder_specific(nn.Module):\n\n def __init__(self, nc=64, nf=64):\n super(decoder_specific, self).__init__()\n self.upc1 = dcgan_upconv(nf, nf, stride=1) #(64) x 16 x 16\n self.upc2 = dcgan_upconv(nf, nc, stride=1) #(32) x 32 x 32\n \n def forward(self, input):\n d1 = self.upc1(input)\n d2 = self.upc2(d1)\n return d2\n\n\nclass PhyD_EncoderRNN(torch.nn.Module):\n\n def __init__(self, phycell, convcell, in_channel=1, patch_size=4):\n super(PhyD_EncoderRNN, self).__init__()\n self.encoder_E = encoder_E(nc=in_channel, patch_size=patch_size) # general encoder 64x64x1 -> 32x32x32\n self.encoder_Ep = encoder_specific() # specific image encoder 32x32x32 -> 16x16x64\n self.encoder_Er = encoder_specific()\n self.decoder_Dp = decoder_specific() # specific image decoder 16x16x64 -> 32x32x32\n self.decoder_Dr = decoder_specific()\n self.decoder_D = decoder_D(nc=in_channel, patch_size=patch_size) # general decoder 32x32x32 -> 64x64x1\n\n self.phycell = phycell\n self.convcell = convcell\n\n def forward(self, input, first_timestep=False, decoding=False):\n input = self.encoder_E(input) # general encoder 64x64x1 -> 32x32x32\n \n if decoding: # input=None in decoding phase\n input_phys = None\n else:\n input_phys = self.encoder_Ep(input)\n input_conv = self.encoder_Er(input) \n\n hidden1, output1 = self.phycell(input_phys, first_timestep)\n hidden2, output2 = self.convcell(input_conv, first_timestep)\n\n decoded_Dp = self.decoder_Dp(output1[-1])\n decoded_Dr = self.decoder_Dr(output2[-1])\n \n out_phys = torch.sigmoid(self.decoder_D(decoded_Dp)) # partial reconstructions for vizualization\n out_conv = torch.sigmoid(self.decoder_D(decoded_Dr))\n\n concat = decoded_Dp + decoded_Dr \n output_image = torch.sigmoid( self.decoder_D(concat ))\n return out_phys, hidden1, output_image, out_phys, out_conv\n\n\ndef _apply_axis_left_dot(x, mats):\n assert x.dim() == len(mats)+1\n sizex = x.size()\n k = x.dim()-1\n for i in range(k):\n x = tensordot(mats[k-i-1], x, dim=[1,k])\n x = x.permute([k,]+list(range(k))).contiguous()\n x = x.view(sizex)\n return x\n\ndef _apply_axis_right_dot(x, mats):\n assert x.dim() == len(mats)+1\n sizex = x.size()\n k = x.dim()-1\n x = x.permute(list(range(1,k+1))+[0,])\n for i in range(k):\n x = tensordot(x, mats[i], dim=[0,0])\n x = x.contiguous()\n x = x.view(sizex)\n return x\n\nclass _MK(nn.Module):\n def __init__(self, shape):\n super(_MK, self).__init__()\n self._size = torch.Size(shape)\n self._dim = len(shape)\n M = []\n invM = []\n assert len(shape) > 0\n j = 0\n for l in shape:\n M.append(zeros((l,l)))\n for i in range(l):\n M[-1][i] = ((arange(l)-(l-1)//2)**i)/factorial(i)\n invM.append(inv(M[-1]))\n self.register_buffer('_M'+str(j), torch.from_numpy(M[-1]))\n self.register_buffer('_invM'+str(j), torch.from_numpy(invM[-1]))\n j += 1\n\n @property\n def M(self):\n return list(self._buffers['_M'+str(j)] for j in range(self.dim()))\n @property\n def invM(self):\n return list(self._buffers['_invM'+str(j)] for j in range(self.dim()))\n\n def size(self):\n return self._size\n def dim(self):\n return self._dim\n def _packdim(self, x):\n assert x.dim() >= self.dim()\n if x.dim() == self.dim():\n x = x[newaxis,:]\n x = x.contiguous()\n x = x.view([-1,]+list(x.size()[-self.dim():]))\n return x\n\n def forward(self):\n pass\n\n\nclass M2K(_MK):\n \"\"\"\n convert moment matrix to convolution kernel\n Arguments:\n shape (tuple of int): kernel shape\n Usage:\n m2k = M2K([5,5])\n m = torch.randn(5,5,dtype=torch.float64)\n k = m2k(m)\n \"\"\"\n def __init__(self, shape):\n super(M2K, self).__init__(shape)\n def forward(self, m):\n \"\"\"\n m (Tensor): torch.size=[...,*self.shape]\n \"\"\"\n sizem = m.size()\n m = self._packdim(m)\n m = _apply_axis_left_dot(m, self.invM)\n m = m.view(sizem)\n return m\n\n\nclass K2M(_MK):\n \"\"\"\n convert convolution kernel to moment matrix\n Arguments:\n shape (tuple of int): kernel shape\n Usage:\n k2m = K2M([5,5])\n k = torch.randn(5,5,dtype=torch.float64)\n m = k2m(k)\n \"\"\"\n def __init__(self, shape):\n super(K2M, self).__init__(shape)\n def forward(self, k):\n \"\"\"\n k (Tensor): torch.size=[...,*self.shape]\n \"\"\"\n sizek = k.size()\n k = self._packdim(k)\n k = _apply_axis_left_dot(k, self.M)\n k = k.view(sizek)\n return k\n\n\ndef tensordot(a,b,dim):\n \"\"\"\n tensordot in PyTorch, see numpy.tensordot?\n \"\"\"\n l = lambda x,y:x*y\n if isinstance(dim,int):\n a = a.contiguous()\n b = b.contiguous()\n sizea = a.size()\n sizeb = b.size()\n sizea0 = sizea[:-dim]\n sizea1 = sizea[-dim:]\n sizeb0 = sizeb[:dim]\n sizeb1 = sizeb[dim:]\n N = reduce(l, sizea1, 1)\n assert reduce(l, sizeb0, 1) == N\n else:\n adims = dim[0]\n bdims = dim[1]\n adims = [adims,] if isinstance(adims, int) else adims\n bdims = [bdims,] if isinstance(bdims, int) else bdims\n adims_ = set(range(a.dim())).difference(set(adims))\n adims_ = list(adims_)\n adims_.sort()\n perma = adims_+adims\n bdims_ = set(range(b.dim())).difference(set(bdims))\n bdims_ = list(bdims_)\n bdims_.sort()\n permb = bdims+bdims_\n a = a.permute(*perma).contiguous()\n b = b.permute(*permb).contiguous()\n\n sizea = a.size()\n sizeb = b.size()\n sizea0 = sizea[:-len(adims)]\n sizea1 = sizea[-len(adims):]\n sizeb0 = sizeb[:len(bdims)]\n sizeb1 = sizeb[len(bdims):]\n N = reduce(l, sizea1, 1)\n assert reduce(l, sizeb0, 1) == N\n a = a.view([-1,N])\n b = b.view([N,-1])\n c = a@b\n return c.view(sizea0+sizeb1)\n","repo_name":"chengtan9907/OpenSTL","sub_path":"openstl/modules/phydnet_modules.py","file_name":"phydnet_modules.py","file_ext":"py","file_size_in_byte":16064,"program_lang":"python","lang":"en","doc_type":"code","stars":403,"dataset":"github-code","pt":"61"} +{"seq_id":"5197764916","text":"import random\nimport difflib\nimport numpy as np\n\nfrom itertools import chain\nfrom string import punctuation\n\nfrom .utility import levenshtein_similarity_1_to_n\n\n\nclass Match:\n '''\n This class allows to extract the common pattern from a list of sequences.\n Create a new Match object for every pattern extraction task.\n '''\n def __init__(self, match_threshhold=0.8, add_placeholder=False):\n #self.sequences = sequences\n self.match_threshhold = match_threshhold\n self.add_placeholder = add_placeholder\n\n def sequence_matcher(self, sequences):\n unique = np.unique(sequences).tolist()\n if len(unique) <= 1:\n return unique[0]\n\n random.shuffle(unique)\n for x in unique:\n others = unique[:]\n others.remove(x)\n pattern = None\n for sequence in others:\n matches = difflib.SequenceMatcher(None, x, sequence)\n if matches.ratio() < self.match_threshhold:\n continue\n\n # We extract matching fragments of sequences\n # and change pattern to only contain those subsequences.\n # In the end this gives us a common part of all the sequences.\n match_ranges = matches.get_matching_blocks()[:-1]\n matches = [x[m.a:m.a + m.size] for m in match_ranges]\n if self.add_placeholder: # Add a placeholder between matching subsequences\n [match + ['(.*?)'] for match in matches]\n matches[-1].pop()\n pattern = list(chain(*matches)) # concatenate inner lists\n\n if not pattern:\n continue\n junk = list(punctuation) + ['_', '(.*?)', '']\n # if at least one of the items in sequence is not junk - return True\n correct = any([token not in junk for token in pattern])\n return pattern if correct else x\n return x\n\n # This basically does the same as sequence_matcher, with a couple of differences:\n # * sequence_matcher picks only unique sequences\n # * SM picks random to compare to, this picks first one\n # * SM only compares sequences that have more similarity than self.match_threshhold\n\n def matcher(self, sequences):\n x = sequences[0]\n for s in sequences:\n matches = difflib.SequenceMatcher(None, x, s)\n match_ranges = matches.get_matching_blocks()[:-1]\n matches = [x[m.a:m.a + m.size] for m in match_ranges]\n if self.add_placeholder:\n matches = [match + ['(.*?)'] for match in matches]\n matches[-1].pop()\n pattern = list(chain(*matches)) # concatenate inner lists\n junk = list(punctuation) + ['_', '(.*?)', '']\n # if at least one of the items in sequence is not junk - return True\n correct = any([token not in junk for token in pattern])\n return pattern if correct else x\n\n def matching_clusters(self, sequences, patterns):\n similarities = levenshtein_similarity_1_to_n(sequences)\n filtered, to_remove = [], []\n for i, value in enumerate(similarities):\n if value >= self.match_threshhold:\n filtered.append(sequences[i])\n to_remove.append(i)\n if not filtered:\n filtered.append(sequences[0])\n to_remove.append(0)\n patterns.append(self.matcher(filtered))\n sequences = np.delete(sequences, to_remove)\n if len(sequences) > 1:\n self.matching_clusters(sequences, patterns)\n elif len(sequences) == 1:\n patterns.append(sequences[0])\n np.delete(sequences, 0)\n\n def matrix_matching(self, sequences):\n if len(sequences) == 1:\n return sequences[0]\n else:\n x = list(map(list, zip(*sequences)))\n return [tokens[0] if len(tokens) == 1 else '(.*?)' for tokens in\n [np.unique(line) for line in x]]\n","repo_name":"maria-grigorieva/ClusterLog","sub_path":"clusterlogs/sequence_matching.py","file_name":"sequence_matching.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"35511842972","text":"import instaloader\r\nimport csv\r\nimport time\r\n\r\nL = instaloader.Instaloader()\r\n\r\n# Replace 'your username' and 'your password' by your login infos\r\nusername = 'your username'\r\npassword = 'your password'\r\n\r\n# Login\r\ntry:\r\n L.load_session_from_file(username)\r\n if not L.context.is_logged_in:\r\n L.context.log(\"Invalid session. You must log in again.\")\r\nexcept FileNotFoundError:\r\n L.context.log(\"No session found.\")\r\n\r\nif not L.context.is_logged_in:\r\n L.context.log(\"Connecting...\")\r\n L.load_session_from_file(username)\r\n if not L.context.is_logged_in:\r\n L.context.log(\"Impossible to connect. Please verify your credentials.\")\r\n L.save_session_to_file()\r\n\r\nL.context.log(\"Connected as @\" + L.context.username)\r\n \r\n# All your concerned profiles\r\nprofiles = [\r\n 'account1', 'account2', '...'\r\n]\r\n\r\n# Open a CSV file in write mode\r\nwith open('posts_instagram.csv', 'w', newline='', encoding='utf-8') as csvfile:\r\n fieldnames = ['Username', 'Creation_date']\r\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\r\n writer.writeheader()\r\n\r\n for username in profiles:\r\n # load profil\r\n print(f'\\n\\n{username}\\n')\r\n profile = instaloader.Profile.from_username(L.context, username).get_posts()\r\n\r\n # Browse posts and write to CSV\r\n for post in profile:\r\n date = post.date_utc\r\n print(f'{date}')\r\n writer.writerow({\r\n 'username': username,\r\n 'date of creation of the post': date\r\n })\r\n time.sleep(30)\r\n\r\n\r\nprint(\"Datas saved in 'posts_instagram.csv'.\")\r\n","repo_name":"mbedez/Instagram-multiaccount-posts-scraper","sub_path":"InstaScraper.py","file_name":"InstaScraper.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39550305003","text":"import math\nfrom typing import Type, Callable, Any, Union\nfrom darkai import model_configs, backends\nfrom darkai.supervised import supervised_model\nfrom darkai.optimizer import optimizer\nfrom darkai.observer import observer\n\nif \"perceptron\" not in model_configs:\n model_configs[\"perceptron\"] = {}\n\n\ndef perceptron(backend:str, *args, **kwargs) -> Type[supervised_model]:\n arch = \"perceptron\"\n model_arch = model_configs[arch]\n if backend in model_arch:\n model_config = model_arch[backend]\n model_ = perceptron_(*model_config)\n model_._backend = backend\n return model_\n else:\n raise Exception(\"Unsupported backend !\")\n\n\nclass perceptron_(supervised_model):\n _model_name = \"perceptron\"\n _model_architecture = \"perceptron\"\n _backend = None\n\n def __init__(self, *arg):\n [\n fn_returns_array_with_ones_of_given_1d_shape,\n fn_returns_dot_product_of_two_1d_arrays,\n fn_returns_size_of_arrays_1st_dim,\n fn_returns_size_of_arrays_2nd_dim,\n fn_returns_dimension_count,\n fn_returns_dot_product_2d_array_with_1d_array,\n fn_apply_given_func_to_all_elements_of_given_1d_array\n ] = arg\n\n self.ones_1d = fn_returns_array_with_ones_of_given_1d_shape\n self.dot_1d_1d = fn_returns_dot_product_of_two_1d_arrays\n self.dot_2d_1d = fn_returns_dot_product_2d_array_with_1d_array\n self.size_of_1st_dim = fn_returns_size_of_arrays_1st_dim\n self.size_of_2nd_dim = fn_returns_size_of_arrays_2nd_dim\n self.dim_count = fn_returns_dimension_count\n self.map_1d = fn_apply_given_func_to_all_elements_of_given_1d_array\n self.activation_fn = lambda x:x\n self.state = {\n \"w\": None,\n \"b\": None\n }\n self.optimizer = None\n self.common_activation_fns = {\n \"sigmoid\": lambda y: 1.0 / (1.0 + math.exp(-y)),\n \"binary\": lambda y: 0 if y<=0 else 1\n }\n self.observers = []\n self.tr_cache_predicted = None\n self.tr_cache_expected = None\n self.tr_cache_inputs = None\n\n\n def set_activation_fn(self, fn:Union[Callable, str]):\n if type(fn) is str:\n if fn in self.common_activation_fns:\n self.activation_fn = self.common_activation_fns[fn]\n else:\n raise Exception(\"Invalid Activation Function !\")\n else:\n self.activation_fn = fn\n\n\n def add_observer(self, observer):\n observer.set_model(self)\n self.observers.append(observer)\n\n\n def predict(self, input_data):\n dim_count = self.dim_count(input_data)\n if dim_count == 2:\n return self.batch_predict(input_data)\n if dim_count != 1:\n raise Exception(\"Invalid dimensions for input_data !\")\n size = self.size_of_1st_dim(input_data)\n self.prepare(size)\n return self.activation_fn(self.dot_1d_1d(input_data, self.state[\"w\"]) + self.state[\"b\"])\n\n\n def batch_predict(self, input_data):\n dim_count = self.dim_count(input_data)\n if dim_count == 1:\n return self.predict(input_data)\n if dim_count != 2:\n raise Exception(\"Invalid dimensions for input_data !\")\n size_2d = self.size_of_2nd_dim(input_data)\n self.prepare(size_2d)\n return self.map_1d(self.activation_fn , self.dot_2d_1d(input_data, self.state[\"w\"]) + self.state[\"b\"])\n\n\n def prepare(self, new_size):\n w, b = self.state[\"w\"], self.state[\"b\"]\n if w is None or self.dim_count(w) != 1 or self.size_of_1st_dim(w) != new_size:\n self.state[\"w\"] = self.ones_1d(new_size)\n if b is None or type(b) is not float:\n self.state[\"b\"] = 1.0\n\n\n def set_optimizer(self, opt:Callable):\n self.optimizer = opt(self)\n\n\n def train(self, input_data, expected_output):\n dim_count = self.dim_count(input_data)\n if dim_count == 2:\n return self.batch_train(input_data, expected_output)\n if dim_count != 1 or True:\n raise Exception(\"Invalid dimensions for input_data !\")\n # NYI - For simplicity\n # can only be trained in batches\n size = self.size_of_1st_dim(input_data)\n if type(expected_output) is int:\n expected_output = float(expected_output)\n if type(expected_output) is not float:\n raise Exception(\"Invalid type for expected_output !\")\n self.prepare(size)\n\n\n def training_prediction(self):\n return self.tr_cache_predicted\n\n\n def training_expectation(self):\n return self.tr_cache_expected\n\n\n def training_inputs(self):\n return self.tr_cache_inputs\n\n\n def handle_observers(self, it):\n for observer in self.observers:\n observer.observe(self, it)\n\n\n def batch_train(self, input_data, expected_output):\n # training with 1 iteration\n return self.train_iters(1, input_data, expected_output)\n\n\n def train_iters(self, iters, input_data, expected_output, cb=None):\n if iters <= 0:\n return\n\n dim_count = self.dim_count(input_data)\n if dim_count == 1:\n return self.train(input_data, expected_output)\n if dim_count != 2:\n raise Exception(\"Invalid dimensions for input_data !\")\n size_1d, size_2d = self.size_of_1st_dim(input_data), self.size_of_2nd_dim(input_data)\n if self.dim_count(expected_output) != 1 or self.size_of_1st_dim(expected_output) != size_1d:\n raise Exception(\"Invalid dimensions for expected_output !\")\n self.prepare(size_2d)\n\n if self.optimizer is None:\n raise Exception(\"Optimizer not specified !\")\n \n # Before training\n self.tr_cache_predicted = self.batch_predict(input_data)\n self.tr_cache_expected = expected_output\n self.tr_cache_inputs = input_data\n self.handle_observers(0)\n if cb is not None:\n cb(self, 0)\n\n for i in range(iters):\n self.state[\"w\"], self.state[\"b\"] = self.optimizer.optimize(self, self.state[\"w\"], self.state[\"b\"])\n self.tr_cache_predicted = self.batch_predict(input_data)\n self.handle_observers(i+1)\n if cb is not None:\n cb(self, i)\n\n\n\n","repo_name":"be-thomas/darkai","sub_path":"darkai/supervised/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14837947506","text":"'''\nDISCLAIMER OF WARRANTIES:\nPermission is granted to copy this Tools or Sample code for internal use only, provided that this\npermission notice and warranty disclaimer appears in all copies.\n\nTHIS TOOLS OR SAMPLE CODE IS LICENSED TO YOU AS-IS.\nIBM AND ITS SUPPLIERS AND LICENSORS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, IN SUCH SAMPLE CODE,\nINCLUDING THE WARRANTY OF NON-INFRINGEMENT AND THE IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A\nPARTICULAR PURPOSE. IN NO EVENT WILL IBM OR ITS LICENSORS OR SUPPLIERS BE LIABLE FOR ANY DAMAGES ARISING\nOUT OF THE USE OF OR INABILITY TO USE THE TOOLS OR SAMPLE CODE, DISTRIBUTION OF THE TOOLS OR SAMPLE CODE,\nOR COMBINATION OF THE TOOLS OR SAMPLE CODE WITH ANY OTHER CODE. IN NO EVENT SHALL IBM OR ITS LICENSORS AND\nSUPPLIERS BE LIABLE FOR ANY LOST REVENUE, LOST PROFITS OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL,\nCONSEQUENTIAL,INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,\nEVEN IF IBM OR ITS LICENSORS OR SUPPLIERS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\n'''\nimport requests, json\nfrom src.common.functions import Color as Color\n\nclass DeleteSecretCMS:\n region = ''\n\n\n def __init__(self, cluster_id, cis_domain, cert_manager_crn, cert_name, token):\n self.cluster_id = cluster_id\n self.cis_domain = cis_domain\n self.cert_manager_crn = cert_manager_crn\n self.cert_name = cert_name\n self.token = token\n\n def delete_secret(self):\n url = \"https://containers.cloud.ibm.com/global/ingress/v2/secret/deleteSecret\"\n headers = {\n 'Authorization': 'Bearer ' + self.token\n }\n payload = json.dumps({\n \"cluster\": self.cluster_id,\n \"delete_cert\": True,\n \"name\": self.cert_name,\n \"namespace\": \"default\"\n })\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n print(\"Started delete process for secret in IKS cluster. Check your kubernetes dashboard for progress\")\n \n\n def delete_cms_cert(self):\n cert_id = self.check_certificate()\n if not cert_id is None:\n url_cert_id = self.URLify(cert_id)\n url = f\"https://{self.region}.certificate-manager.cloud.ibm.com/api/v2/certificate/{url_cert_id}\"\n\n payload={}\n headers = {\n 'Authorization': 'Bearer ' + self.token\n }\n\n response = requests.request(\"DELETE\", url, headers=headers, data=payload)\n if response.status_code == 200:\n\n print(Color.GREEN + \"SUCCESS: Certificate successfully deleted\" + Color.END)\n else:\n print(Color.RED + \"ERROR: Failed to remove certificate from Certificate Manager\" + Color.END)\n else:\n print(Color.RED + \"ERROR: Failed to find certificate in Certificate Manager\" + Color.END)\n\n def check_certificate(self):\n url_cert_man_crn = self.URLify(self.cert_manager_crn)\n try:\n self.region = self.cert_manager_crn.split(\":\")[5]\n except:\n print(Color.RED+\"ERROR: CRN provided not in correct format\"+Color.END)\n exit(1)\n\n cert_check_url = f\"https://{self.region}.certificate-manager.cloud.ibm.com/api/v3/{url_cert_man_crn}/certificates/\"\n cert_check_headers = {\n \"Authorization\": 'Bearer ' + self.token\n }\n\n # Gets all certificates previously present in the certificate manager\n cert_check_response = requests.request(\n \"GET\", url=cert_check_url, headers=cert_check_headers)\n # print(cert_check_response.text)\n # If a valid certificate exists, it returns the CRN of that certificate\n if cert_check_response.status_code == 200:\n for cert in cert_check_response.json()[\"certificates\"]:\n if self.cis_domain in cert[\"domains\"] and (\"*.\" + self.cis_domain) in cert[\"domains\"] and cert[\"name\"] == self.cert_name:\n print(\n \"Certificate found in certificate manager\")\n return cert[\"_id\"]\n return None\n\n # Converts the certificate manager CRN into a URL-encoded CRN\n def URLify(self, replacement_str):\n new_string = replacement_str.replace(\":\", \"%3A\")\n return new_string.replace(\"/\", \"%2F\")","repo_name":"IBM/cis-integration","sub_path":"src/iks/delete_secret_cert.py","file_name":"delete_secret_cert.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"86553974930","text":"\"\"\"\nNAME\n model_hybrid\nDESCRIPTION\n This module provides access to functions that train and evaluate\n models using hybrid filtering.\nFUNCTIONS\n get_users_features_tuple(user)\n get_items_features_tuple(item, categories)\n train_model(df, user_id_col, item_id_col, item_name_col, evaluate)\n Return the trained model, dataset with user-item interactions,\n user dictionary and item dictionary.\n evaluate_model(df, user_id_col, item_id_col)\n Return the auc-roc score of the training and testing sets.\n\"\"\"\n\nimport pandas as pd\nfrom lightfm import LightFM\nfrom lightfm.evaluation import auc_score\nfrom sklearn.model_selection import train_test_split\nfrom lightfm.data import Dataset\n\n\ndef get_users_features_tuple(user):\n \"\"\" Get user's feature and return a tuple\n Arg:\n user: the user line in the dataframe\n Returns:\n Tuple(user id string, dict{user feature: feature value})\n \"\"\"\n\n return (user[0], {'average_stars': float(user[5])})\n\n\ndef get_items_features_tuple(item, categories):\n \"\"\" Get the item's feature and return a tuple\n Arg:\n item: one item line in the dataframe\n categories: item features list\n Returns:\n Tuple(item id string, dict{item feature: feature value})\n \"\"\"\n\n item.tolist()\n item_id = item[1]\n features_ind = item[10:]\n features = {}\n for i in range(len(features_ind)):\n features[categories[i]] = float(features_ind[i])\n return (item_id, features)\n\n\ndef train_model(\n df, user_id_col='user_id', item_id_col='business_id',\n item_name_col='name_business', evaluate=True):\n \"\"\" Train the model using collaborative filtering.\n Args:\n df: the input dataframe.\n user_id_col: user id column.\n item_id_col: item id column.\n item_name_col: item name column.\n evaluate: if evaluate the model performance.\n Returns:\n model_full: the trained model.\n df_interactions: dataframe with user-item interactions.\n user_dict: user dictionary containing user_id as key and\n interaction_index as value.\n item_dict: item dictionary containing item_id as key and\n item_name as value.\n user_feature_map: the feature map of users\n business_feature_map: the feature map of items\n \"\"\"\n if evaluate:\n print('Evaluating model...')\n evaluate_model(df, user_id_col='user_id', item_id_col='business_id')\n print('Training model...')\n\n # build recommendations for known users and known businesses\n # with collaborative filtering method\n ds_full = Dataset()\n # we call fit to supply userid, item id and user/item features\n user_cols = ['user_id', 'average_stars']\n categories = [c for c in df.columns if c[0].isupper()]\n item_cols = ['business_id', 'state']\n\n for i in df.columns[10:]:\n item_cols.append(str(i))\n\n user_features = user_cols[1:]\n item_features = item_cols[2:]\n\n ds_full.fit(\n df[user_id_col].unique(), # all the users\n df[item_id_col].unique(), # all the items\n user_features=user_features, # additional user features\n item_features=item_features\n )\n\n df_users = df.drop_duplicates(user_id_col)\n # df_users = df[df.duplicated(user_id_col) == False]\n users_features = []\n for i in range(len(df_users)):\n users_features.append(get_users_features_tuple(df_users.values[i]))\n users_features = ds_full.build_user_features(\n users_features, normalize=False)\n\n items = df.drop_duplicates(item_id_col)\n # items = df[df.duplicated(item_id_col) == False]\n items_features = []\n for i in range(len(items)):\n items_features.append(get_items_features_tuple(\n items.values[i], categories))\n items_features = ds_full.build_item_features(\n items_features, normalize=False)\n\n (interactions, weights) = ds_full.build_interactions(\n [(x[0], x[1], x[2]) for x in df.values])\n # model\n model_full = LightFM(\n no_components=100, learning_rate=0.05, loss='warp', max_sampled=50)\n model_full.fit(\n interactions, user_features=users_features,\n item_features=items_features, sample_weight=weights,\n epochs=10, num_threads=10)\n # mapping\n user_id_map, user_feature_map, business_id_map, business_feature_map = \\\n ds_full.mapping()\n\n # data preparation\n df_interactions = pd.DataFrame(weights.todense())\n df_interactions.index = list(user_id_map.keys())\n df_interactions.columns = list(business_id_map.keys())\n user_dict = user_id_map\n item_dict = df.set_index(item_id_col)[item_name_col].to_dict()\n return model_full, df_interactions, user_dict, \\\n item_dict, user_feature_map, business_feature_map\n\n\ndef evaluate_model(\n df, user_id_col='user_id',\n item_id_col='business_id', stratify=None):\n \"\"\" Model evaluation.\n Args:\n df: the input dataframe.\n user_id_col: user id column.\n item_id_col: item id column.\n stratify: if use stratification.\n No return value\n \"\"\"\n # create test and train datasets\n print('model evaluation')\n train, test = train_test_split(df, test_size=0.2, stratify=stratify)\n ds = Dataset()\n # we call fit to supply userid, item id and user/item features\n user_cols = ['user_id', 'average_stars']\n categories = [c for c in df.columns if c[0].isupper()]\n item_cols = ['business_id', 'state']\n\n for i in df.columns[10:]:\n item_cols.append(str(i))\n\n user_features = user_cols[1:]\n item_features = item_cols[2:]\n\n ds.fit(\n df[user_id_col].unique(), # all the users\n df[item_id_col].unique(), # all the items\n user_features=user_features, # additional user features\n item_features=item_features\n )\n\n train_users = train.drop_duplicates('user_id')\n # train_users = train[train.duplicated('user_id') == False]\n train_user_features = []\n for i in range(len(train_users)):\n train_user_features.append(get_users_features_tuple(\n train_users.values[i]))\n train_user_features = ds.build_user_features(\n train_user_features, normalize=False)\n\n test_users = test.drop_duplicates('user_id')\n # test_users = test[test.duplicated('user_id') == False]\n test_user1_features = []\n for i in range(len(test_users)):\n test_user1_features.append(get_users_features_tuple(\n test_users.values[i]))\n test_user_features = ds.build_user_features(\n test_user1_features, normalize=False)\n\n train_items = train.drop_duplicates('business_id')\n # train_items = train[train.duplicated('business_id') == False]\n train_item1_features = []\n for i in range(len(train_items)):\n train_item1_features.append(get_items_features_tuple(\n train_items.values[i], categories))\n train_item_features = ds.build_item_features(\n train_item1_features, normalize=False)\n\n test_items = test.drop_duplicates('business_id')\n # test_items = test[test.duplicated('business_id') == False]\n test_item_features = []\n for i in range(len(test_items)):\n test_item_features.append(get_items_features_tuple(\n test_items.values[i], categories))\n test_item_features = ds.build_item_features(\n test_item_features, normalize=False)\n\n # plugging in the interactions and their weights\n (train_interactions, train_weights) = ds.build_interactions(\n [(x[0], x[1], x[2]) for x in train.values])\n (test_interactions, test_weights) = ds.build_interactions(\n [(x[0], x[1], x[2]) for x in test.values])\n\n # model\n model = LightFM(\n no_components=100, learning_rate=0.05, loss='warp', max_sampled=50)\n model.fit(\n train_interactions, user_features=train_user_features,\n item_features=train_item_features, sample_weight=train_weights,\n epochs=10, num_threads=10)\n\n # auc-roc\n train_auc = auc_score(\n model, train_interactions, user_features=train_user_features,\n item_features=train_item_features, num_threads=20).mean()\n print('Training set AUC: %s' % train_auc)\n test_auc = auc_score(\n model, test_interactions, user_features=test_user_features,\n item_features=test_item_features, num_threads=20).mean()\n print('Testing set AUC: %s' % test_auc)\n","repo_name":"RH5648/yelpify","sub_path":"yelpify/model_hybrid.py","file_name":"model_hybrid.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19432411014","text":"from datetime import datetime, timedelta\r\nimport logging\r\nimport re\r\nimport sys\r\nimport time\r\n\r\nfrom lib import arginfo\r\nfrom lib import common\r\nfrom lib import global_config\r\n\r\n\r\n# Namespace for options.\r\nns = \"subrip.\"\r\n\r\n# Whether dest_file arg is used.\r\nuses_dest_file = True\r\n\r\n# Names of lib.subsystem modules that should be set up in advance.\r\nrequired_subsystems = []\r\n\r\n\r\ndef get_description():\r\n return \"Writes snarks as SubRip subtitles.\"\r\n\r\ndef get_arginfo():\r\n args = []\r\n args.append(arginfo.Arg(name=\"include_names\", type=arginfo.BOOLEAN,\r\n required=False, default=True, choices=[True,False], multiple=False,\r\n description=\"Boolean to prepend each snark msg with user.\\nDefault is True.\"))\r\n return args\r\n\r\ndef write_snarks(dest_file, snarks, show_time, options={}, keep_alive_func=None, sleep_func=None):\r\n \"\"\"Writes snarks as SubRip subtitles.\r\n\r\n :param dest_file: A binary-mode file-like object to write into.\r\n :param snarks: A list of processed snark dicts.\r\n :param show_time: Timedelta duration each msg appears on-screen.\r\n :param options: A dict of extra options specific to this exporter.\r\n include_names (optional):\r\n Boolean to prepend each snark msg with user.\r\n Default is True.\r\n :param keep_alive_func: Optional replacement to get an abort boolean.\r\n :param sleep_func: Optional replacement to sleep N seconds.\r\n \"\"\"\r\n if (keep_alive_func is None): keep_alive_func = global_config.keeping_alive\r\n if (sleep_func is None): sleep_func = global_config.nap\r\n\r\n include_names = True\r\n if (ns+\"include_names\" in options and not options[ns+\"include_names\"]):\r\n include_names = False\r\n\r\n srt_index = 0\r\n\r\n palette_start = srt_delta_str(timedelta(seconds=1))\r\n palette_end = srt_delta_str(timedelta(seconds=1) + show_time)\r\n palette_msg = \"\"\r\n\r\n unique_colors = list(set([x[\"color\"] for x in snarks if (\"color\" in x)]))\r\n if (len(unique_colors) > 0):\r\n for c in unique_colors:\r\n palette_msg += color_message(\"#\", c)\r\n\r\n if (len(palette_msg) > 0):\r\n srt_index += 1\r\n dest_file.write(str(srt_index) +\"\\r\\n\")\r\n dest_file.write(palette_start +\" --> \"+ palette_end +\"\\r\\n\")\r\n dest_file.write(palette_msg +\"\\r\\n\")\r\n dest_file.write(\"\\r\\n\")\r\n\r\n for snark in snarks:\r\n srt_start = srt_delta_str(snark[\"time\"])\r\n srt_end = srt_delta_str(snark[\"time\"] + show_time)\r\n srt_msg = snark[\"msg\"]\r\n\r\n # SubRip tolerates multiple lines, but not blank lines.\r\n srt_msg = re.sub(\"\\r\", \"\", srt_msg)\r\n srt_msg = re.sub(\"\\n\\n+\", \"\\n\", srt_msg)\r\n\r\n # Remove empty space and links.\r\n srt_msg = re.sub(\"^ +\", \"\", srt_msg)\r\n srt_msg = re.sub(\" *\\n *\", \"\\n\", srt_msg)\r\n srt_msg = srt_msg.rstrip(\" \\n\")\r\n srt_msg = re.sub(\" *https?://[^ ]+\", \"\", srt_msg)\r\n\r\n if (include_names is True):\r\n srt_msg = \"%s: %s\" % (snark[\"user\"].replace(\"@\",\"\"), srt_msg)\r\n\r\n srt_msg = re.sub(\"\\n\", \"\\r\\n\", srt_msg) # Reintroduce CR's.\r\n\r\n if (\"color\" in snark and snark[\"color\"] is not None):\r\n srt_msg = color_message(srt_msg, snark[\"color\"])\r\n\r\n srt_index += 1\r\n dest_file.write(str(srt_index) +\"\\r\\n\")\r\n dest_file.write(srt_start +\" --> \"+ srt_end +\"\\r\\n\")\r\n dest_file.write(srt_msg +\"\\r\\n\")\r\n dest_file.write(\"\\r\\n\")\r\n\r\n\r\ndef srt_delta_str(delta):\r\n \"\"\"Formats a timedelta as an srt string.\r\n A millisecond suffix is appended to make\r\n SubRip happy.\r\n\r\n :return: The string.\r\n \"\"\"\r\n return (\"%s,000\" % common.delta_str(delta))\r\n\r\n\r\ndef color_message(text, color):\r\n \"\"\"Wraps a string with an html/srt FONT tag of the given color.\r\n\r\n :param text: A snark message.\r\n :param color: An RGB float tuple (value range: 0.0-1.0).\r\n :return: The wrapped string.\r\n \"\"\"\r\n text = \"%s\" % (common.rgb_to_hex(color), text)\r\n return text\r\n","repo_name":"Vhati/CompileSubs","sub_path":"lib/exporters/subrip.py","file_name":"subrip.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2636892986","text":"\"\"\"\nЭтот файл использовался для тестов функций и операций\nпри разработке.\n\nИгнорируйте этот файл\n\"\"\"\n\n\nimport random\n\ndef num_of_nonzero(srclist): #Function - finds number of non-zero elemnts in number list\n return len([srclist[i] for i in range(len(srclist)) if srclist[i] > 0])\n\ndef cardprint(numlist, allnums, title):\n print(title.center(36, '='))\n for row in range(len(numlist)):\n for col in range(len(numlist[row])):\n if numlist[row][col] == 0:\n print(' ', end='')\n else:\n print(' -- ' if (numlist[row][col] in allnums) else f' {numlist[row][col]:2d} ', end='')\n print('')\n print('='*36)\n#create one loto card as a list\nMaxRows=3\nMaxCol=9\nnumbers = [0] * MaxRows\nfor i in range(MaxRows):\n numbers[i]=[0]*MaxCol\n\n\n#get list of all numbers for loto\nallnums = [num for num in range(1,91)]\n\n#prepare one loto card - test of random picking from number list\nfor i in range(15):\n rndint = random.randint(1, 90)\n while allnums[rndint-1] <= 0:\n rndint = random.randint(1, 90)\n allnums[rndint-1] = 0\n print(rndint)\n\n\n # calc best column\n free_col = -1\n bestcol = (rndint - 1) // 10\n print(f'bestcol {bestcol}')\n\n for shiftcol in range(MaxCol):\n nextcol = bestcol + shiftcol if (bestcol + shiftcol) < MaxCol else bestcol\n prevcol = bestcol - shiftcol if (bestcol - shiftcol) > 0 else bestcol\n print(f'nextcol {nextcol}; prevcol {prevcol}')\n # find best row - where minimum fields has been filled\n bestrow = 0\n num_of_nz = 5\n for i in range(MaxRows):\n if num_of_nonzero(numbers[i]) < num_of_nz and numbers[i][nextcol] == 0:\n num_of_nz = num_of_nonzero(numbers[i])\n bestrow = i\n free_col = nextcol\n if free_col < 0:\n bestrow = 0\n num_of_nz = 5\n for i in range(MaxRows):\n if num_of_nonzero(numbers[i]) < num_of_nz and numbers[i][prevcol] == 0:\n num_of_nz = num_of_nonzero(numbers[i])\n bestrow = i\n free_col = prevcol\n print(f'bestrow {bestrow}')\n if free_col >= 0:\n print(f'freecol {free_col}')\n break\n #worst case - no suitable place found\n\n numbers[bestrow][free_col] = rndint\n\n\n\n\n\ncardprint(numbers, allnums, 'Player1')\n\nprint(allnums)\nprint(num_of_nonzero(allnums))\n\n","repo_name":"Paul-Roger/Lesson9_Loto","sub_path":"TestOnly.py","file_name":"TestOnly.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31961841840","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/1/13 下午3:53\n# @Author : Hanxun Yu\n# @Email : \n# @File : char_table.py\n# @Software: PyCharm\n\nimport string\nimport numpy as np\n\n\nclass CharTable:\n # CharNumMap = {}\n # NumCharMap = {}\n alphabet = {}\n\n def __init__(self):\n CharTable.alphabet = string.ascii_uppercase\n # for index in range(0, 26):\n # CharTable.CharNumMap[chr(ord('a') + index)] = index\n # CharTable.NumCharMap[index] = chr(ord('a') + index)\n\n def encode_char(self, c: str) -> int:\n return CharTable.alphabet.index(c.upper())\n\n def decode_to_char(self, index: int) -> str:\n return CharTable.alphabet[index]\n\n def encode_string(self, s: str) -> list:\n ret = []\n for index, c in enumerate(s):\n if c == ' ':\n continue\n ret.append(self.encode_char(c))\n return ret\n\n def decode_to_string(self, arr: list or np.ndarray) -> str:\n ret = ''\n for i, cIndex in enumerate(arr):\n ret += (self.decode_to_char(cIndex))\n return ret\n\n def decode_to_charArr(self, arr: list) -> list:\n ret = []\n for i, cIndex in enumerate(arr):\n ret.append(self.decode_to_char(cIndex))\n return ret\n\n\nif __name__ == \"__main__\":\n ct = CharTable()\n # print(\"CharNumMap:\", ct.CharNumMap)\n # print(\"NumCharMap:\", ct.NumCharMap)\n print(str(ct.alphabet))\n print(str(ct.encode_char('c')))\n print(str(ct.decode_to_char(3)))\n\n print(ct.encode_string(\"abcde fjhdhs\"))\n\n # print(ct.decode_to_string([0, 1, 2]))\n # print(ct.decode_to_charArr([0, 1, 2]))\n","repo_name":"Hanxun-Yu/MSE-ECNU-py","sub_path":"InformationSecurityAndTechnology/util/char_table.py","file_name":"char_table.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23623667731","text":"\r\n\t\r\ndef read_data(filename):\r\n\tfile = open(filename)\r\n\tx = int(file.readline())\r\n\tdata = []\r\n\tfor i in range(x):\r\n\t\tcase = file.readline().split()\r\n\t\tfor x in range(3):\r\n\t\t\tcase[x] = int(case[x])\r\n\t\tdata.append(case)\r\n\t\t\r\n\treturn data\r\n\t\r\ndef problem(case):\r\n\ttest = 0\r\n\ttoday = []\r\n\tfor possible in range(1,case[0]+1):\r\n\t\tif (possible*(float(case[1])/100))%1 == 0:\r\n\t\t\ttest = 1\r\n\t\t\ttoday.append((possible*(float(case[1])/100), possible))\r\n\t\r\n\tif test == 0:\r\n\t\treturn False\r\n\t\r\n\tif case[2] == 100 and case[1] < 100:\r\n\t\treturn False\r\n\tif case[2] == 0 and case[1] > 0:\r\n\t\treturn False\r\n\t\t\r\n\treturn True\r\n\t\r\ndef doit(filename):\r\n\tdata = read_data(filename)\r\n\tfile = open('output.out','w')\r\n\tcase_num = 1\r\n\tfor case in data:\r\n\t\tif problem(case):\r\n\t\t\tfile.write('Case #%s: Possible\\n' % case_num)\r\n\t\telse:\r\n\t\t\tfile.write('Case #%s: Broken\\n' % case_num)\r\n\t\tcase_num += 1\r\n\t\t\r\n\t\r\n\t\r\n\t\r\n\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_78/305.py","file_name":"305.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35434274074","text":"def _async_call(fn):\n \"\"\"\n 内部异步调用\n :param fn:\n :return:\n \"\"\"\n import threading\n\n def wrapper(*args, **kwargs):\n threading.Thread(target=fn, args=args, kwargs=kwargs).start()\n\n return wrapper\n\n\nclass RabbitConsumer:\n \"\"\"\n RabbitMQ Simple Consumer\n \"\"\"\n\n def __init__(self, host: str, username: str, password: str, port: int = 5672, virtual_host: str = '/', **kwargs):\n self.password = password\n self.username = username\n self.port = port\n self.host = host\n self.virtual_host = virtual_host\n self.props = kwargs\n\n def __get_conn(self):\n import pika\n return pika.BlockingConnection(\n pika.ConnectionParameters(virtual_host=self.virtual_host, host=self.host, port=self.port,\n credentials=pika.PlainCredentials(self.username, self.password), **self.props))\n\n def registerQueue(self, queue: str, durable: bool = True):\n \"\"\"\n 绑定队列\n :param queue:\n :param durable:\n :return:\n \"\"\"\n if queue:\n mq_conn = self.__get_conn()\n mq_channel = mq_conn.channel()\n mq_channel.queue_declare(queue=queue, durable=durable)\n mq_channel.close()\n mq_conn.close()\n\n def registerExchange(self, exchange: str, exchange_type: str = 'direct', durable: bool = True):\n \"\"\"\n 绑定/申明交换机\n :param exchange:交换机名称\n :param exchange_type: 'direct','fanout','headers','topic'\n :param durable:支持化\n :return:\n \"\"\"\n if exchange:\n assert exchange_type in ['direct', 'fanout', 'headers', 'topic']\n mq_conn = self.__get_conn()\n mq_channel = mq_conn.channel()\n mq_channel.exchange_declare(exchange=exchange, exchange_type=exchange_type, durable=durable)\n mq_channel.close()\n mq_conn.close()\n\n def bind(self, exchange: str, queue: str, routing_key=None, arguments=None):\n \"\"\"\n 绑定队列和交换机\n :param exchange:交换机\n :param queue:队列\n :param routing_key: 路由key\n :param arguments:key、value\n :return:\n \"\"\"\n if exchange and queue:\n mq_conn = self.__get_conn()\n mq_channel = mq_conn.channel()\n mq_channel.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments)\n mq_channel.close()\n mq_conn.close()\n\n @_async_call\n def onListener(self, queue: str, callback, auto_ack: bool = False, prefetch_count: int = 10, durable: bool = True,\n **kwargs):\n \"\"\"\n :param queue: 队列名\n :param callback: 回调的函数对象\n :param auto_ack: 自动ack\n :param durable: 队列不存在时创建队列,持久化\n :param prefetch_count: 预取消息数量\n :return:\n \"\"\"\n assert queue is not None\n\n import pika\n def __on_message_callback(channel: pika.adapters.blocking_connection.BlockingChannel,\n deliver: pika.spec.Basic.Deliver,\n props: pika.spec.BasicProperties, message):\n\n print(\n f\"Consumer({deliver.consumer_tag})接收到[exchange={deliver.exchange}|queue={deliver.routing_key}|tag={deliver.delivery_tag}]的消息:{message}\")\n\n if auto_ack:\n return callback(message, props=props, deliver=deliver, channel=channel)\n try:\n callback(message, props=props, deliver=deliver, channel=channel)\n except Exception:\n channel.basic_nack(delivery_tag=deliver.delivery_tag)\n raise\n else:\n channel.basic_ack(delivery_tag=deliver.delivery_tag)\n\n mq_conn = self.__get_conn()\n mq_channel = mq_conn.channel()\n mq_channel.basic_qos(prefetch_count=prefetch_count)\n # 消费者创建队列,生产者创建交换机\n mq_channel.queue_declare(queue, durable=durable, **kwargs)\n mq_channel.basic_consume(\n on_message_callback=__on_message_callback,\n queue=queue,\n auto_ack=auto_ack,\n **kwargs\n )\n # 开始接收(将数据放入回调函数开始执行)\n print(f\"Start listener queue : {queue} , ack = {auto_ack} !\")\n mq_channel.start_consuming()\n # 关闭资源\n mq_channel.close()\n mq_conn.close()\n\n\nclass RabbitProducer:\n \"\"\"\n RabbitMQ Simple Producer\n \"\"\"\n\n def __init__(self, host: str, username: str, password: str, port: int = 5672, virtual_host: str = '/',\n prefetch_count: int = 10,\n **kwargs) -> None:\n self.__prefetch_count = prefetch_count\n\n # 获取与rabbitmq 服务的连接\n import pika\n self.__connection = pika.BlockingConnection(\n pika.ConnectionParameters(virtual_host=virtual_host, host=host, port=port,\n credentials=pika.PlainCredentials(username, password)), **kwargs)\n self.__channel = self.__init_channel()\n\n def __init_channel(self):\n mq_channel = self.__connection.channel()\n # prefetch_count: 通道的最大容量\n mq_channel.basic_qos(prefetch_count=self.__prefetch_count)\n return mq_channel\n\n def _get_channel(self):\n if self.__channel:\n return self.__channel\n else:\n self.__channel = self.__init_channel()\n return self.__channel\n\n def close(self):\n \"\"\"\n 关闭资源\n :return:\n \"\"\"\n if self.__channel:\n self.__channel.close()\n if self.__connection:\n self.__connection.close()\n\n def registerQueue(self, queue: str, durable: bool = True):\n \"\"\"\n 绑定队列\n :param queue:\n :param durable:\n :return:\n \"\"\"\n if queue:\n mq_channel = self._get_channel()\n mq_channel.queue_declare(queue=queue, durable=durable)\n\n def registerExchange(self, exchange: str, exchange_type: str = 'direct', durable: bool = True):\n \"\"\"\n 绑定/申明交换机\n :param exchange:交换机名称\n :param exchange_type: 'direct','fanout','headers','topic'\n :param durable:支持化\n :return:\n \"\"\"\n if exchange:\n assert exchange_type in ['direct', 'fanout', 'headers', 'topic']\n mq_channel = self._get_channel()\n mq_channel.exchange_declare(exchange=exchange, exchange_type=exchange_type, durable=durable)\n\n def bind(self, exchange: str, queue: str, routing_key='#', arguments=None):\n \"\"\"\n 绑定队列和交换机\n :param exchange:交换机\n :param queue:队列\n :param routing_key:路由key\n :param arguments:key、value\n :return:\n \"\"\"\n if exchange and queue and len(exchange) > 0:\n mq_channel = self._get_channel()\n mq_channel.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments)\n\n\n def send(self, message, exchange: str = None, routing_key: str = '#',\n delivery_mode: int = 2,\n mandatory: bool = False,\n **kwargs):\n \"\"\"\n 发送消息\n \"\"\"\n if exchange is None:\n exchange = ''\n assert routing_key is not None\n\n import pika\n mq_channel = self._get_channel()\n return mq_channel.basic_publish(\n exchange=exchange,\n routing_key=routing_key,\n body=message.encode(),\n properties=pika.BasicProperties(delivery_mode=delivery_mode, **kwargs),\n mandatory=mandatory\n )\n\n def sendExchange(self, message, exchange, routing_key: str = '#', **kwargs):\n \"\"\"\n 发送消息\n \"\"\"\n assert message is not None\n assert exchange is not None\n return self.send(message=message, exchange=exchange, routing_key=routing_key, **kwargs)\n\n def sendQueue(self, message, queue, **kwargs):\n \"\"\"\n 发送消息\n \"\"\"\n assert message is not None\n assert queue is not None\n return self.send(message=message, routing_key=queue, **kwargs)\n\n def putExchange(self, message, exchange, routing_key: str = '#', **kwargs):\n \"\"\"\n 发送消息\n \"\"\"\n assert message is not None\n assert exchange is not None\n return self.send(message=message, exchange=exchange, routing_key=routing_key, **kwargs)\n\n def putQueue(self, message, queue, **kwargs):\n \"\"\"\n 发送消息\n \"\"\"\n assert message is not None\n assert queue is not None\n return self.send(message=message, routing_key=queue, **kwargs)\n","repo_name":"kancyframework/python-plugins","sub_path":"rabbitplus/rabbitplus.py","file_name":"rabbitplus.py","file_ext":"py","file_size_in_byte":8926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25597834525","text":"import argparse\nimport logging\nimport pickle\nimport typing as tp\n\nimport networkx as nx\nimport osmnx as ox\nimport yaml\nfrom config import GRAPHML_PICKLED_DATA_DIR, GRAPHML_TEST_DATA_DIR, GRAPHML_TRAIN_DATA_DIR, GRAPHML_VALIDATION_DATA_DIR\nfrom encoder import GAE\nfrom torch import Tensor\nfrom tqdm import tqdm\n\n\ndef build_args():\n parser = argparse.ArgumentParser(description=\"GAT\")\n parser.add_argument(\"--seeds\", type=int, nargs=\"+\", default=[2137])\n parser.add_argument(\"--dataset\", type=str, default=\"bikeguessr\")\n parser.add_argument(\"--device\", type=int, default=-1)\n parser.add_argument(\"--max_epoch\", type=int, default=200,\n help=\"number of training epochs\")\n parser.add_argument(\"--warmup_steps\", type=int, default=-1)\n\n parser.add_argument(\"--num_heads\", type=int, default=4,\n help=\"number of hidden attention heads\")\n parser.add_argument(\"--num_out_heads\", type=int, default=1,\n help=\"number of output attention heads\")\n parser.add_argument(\"--num_layers\", type=int, default=2,\n help=\"number of hidden layers\")\n parser.add_argument(\"--num_hidden\", type=int, default=256,\n help=\"number of hidden units\")\n parser.add_argument(\"--num_features\", type=int, default=95,\n help=\"number of features in dataset\")\n parser.add_argument(\"--residual\", action=\"store_true\", default=False,\n help=\"use residual connection\")\n parser.add_argument(\"--in_drop\", type=float, default=.2,\n help=\"input feature dropout\")\n parser.add_argument(\"--attn_drop\", type=float, default=.1,\n help=\"attention dropout\")\n parser.add_argument(\"--norm\", type=str, default=None)\n parser.add_argument(\"--lr\", type=float, default=0.005,\n help=\"learning rate\")\n parser.add_argument(\"--weight_decay\", type=float, default=5e-4,\n help=\"weight decay\")\n parser.add_argument(\"--negative_slope\", type=float, default=0.2,\n help=\"the negative slope of leaky relu for GAT\")\n parser.add_argument(\"--activation\", type=str, default=\"prelu\")\n parser.add_argument(\"--mask_rate\", type=float, default=0.5)\n parser.add_argument(\"--drop_edge_rate\", type=float, default=0.0)\n parser.add_argument(\"--replace_rate\", type=float, default=0.0)\n\n parser.add_argument(\"--encoder\", type=str, default=\"gat\")\n parser.add_argument(\"--decoder\", type=str, default=\"gat\")\n parser.add_argument(\"--loss_fn\", type=str, default=\"sce\")\n parser.add_argument(\"--alpha_l\", type=float, default=2,\n help=\"`pow`inddex for `sce` loss\")\n parser.add_argument(\"--optimizer\", type=str, default=\"adam\")\n\n parser.add_argument(\"--max_epoch_f\", type=int, default=30)\n parser.add_argument(\"--lr_f\", type=float, default=0.001,\n help=\"learning rate for evaluation\")\n parser.add_argument(\"--weight_decay_f\", type=float,\n default=0.0, help=\"weight decay for evaluation\")\n parser.add_argument(\"--linear_prob\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--load_model\", action=\"store_true\")\n parser.add_argument(\"--save_model\", action=\"store_true\")\n parser.add_argument(\"--use_cfg\", action=\"store_true\")\n parser.add_argument(\"--logging\", action=\"store_true\")\n parser.add_argument(\"--scheduler\", action=\"store_true\", default=False)\n parser.add_argument(\"--concat_hidden\", action=\"store_true\", default=False)\n parser.add_argument(\"--path\", type=str,\n default='./data_transformed/bikeguessr.bin')\n parser.add_argument(\"--eval_epoch\", type=int, default=10)\n parser.add_argument(\"--eval_repeats\", type=int, default=5)\n parser.add_argument(\"--transform\", action=\"store_true\")\n parser.add_argument(\"--targets\", nargs='+', default=None)\n parser.add_argument(\"--wandb_key\", type=str, default=None)\n\n # for graph classification\n parser.add_argument(\"--pooling\", type=str, default=\"mean\")\n parser.add_argument(\"--deg4feat\", action=\"store_true\",\n default=False, help=\"use node degree as input feature\")\n parser.add_argument(\"--batch_size\", type=int, default=32)\n \n parser.add_argument(\"--full_pipline\", action=\"store_true\")\n\n args = parser.parse_args()\n return args\n\n\ndef load_best_configs(args, path):\n with open(path, \"r\") as f:\n configs = yaml.safe_load(f)\n\n if args.dataset not in configs:\n logging.info(\"Best args not found\")\n return args\n\n logging.info(\"Using best configs\")\n configs = configs[args.dataset]\n\n for k, v in configs.items():\n if \"lr\" in k or \"weight_decay\" in k:\n v = float(v)\n setattr(args, k, v)\n logging.info(\"------ Use best configs ------\")\n return args\n\n\ndef build_model(args) -> GAE:\n num_heads = args.num_heads\n num_out_heads = args.num_out_heads\n num_hidden = args.num_hidden\n out_dim = args.out_dim\n num_layers = args.num_layers\n residual = args.residual\n attn_drop = args.attn_drop\n in_drop = args.in_drop\n norm = args.norm\n negative_slope = args.negative_slope\n encoder_type = args.encoder\n decoder_type = args.decoder\n mask_rate = args.mask_rate\n drop_edge_rate = args.drop_edge_rate\n replace_rate = args.replace_rate\n\n activation = args.activation\n loss_fn = args.loss_fn\n alpha_l = args.alpha_l\n concat_hidden = args.concat_hidden\n num_features = args.num_features\n model = None\n\n model = GAE(\n in_dim=num_features,\n num_hidden=num_hidden,\n out_dim=out_dim,\n num_layers=num_layers,\n nhead=num_heads,\n nhead_out=num_out_heads,\n activation=activation,\n feat_drop=in_drop,\n attn_drop=attn_drop,\n negative_slope=negative_slope,\n residual=residual,\n encoder_type=encoder_type,\n decoder_type=decoder_type,\n mask_rate=mask_rate,\n norm=norm,\n loss_fn=loss_fn,\n drop_edge_rate=drop_edge_rate,\n replace_rate=replace_rate,\n alpha_l=alpha_l,\n concat_hidden=concat_hidden,\n )\n\n return model\n\n\ndef load_graphs(load_train: bool = True, load_test: bool = True, load_validation: bool = True) -> tp.Tuple[tp.List[ox.graph_from_xml], tp.List[ox.graph_from_xml], tp.List[ox.graph_from_xml]]:\n pickled_data_file = GRAPHML_PICKLED_DATA_DIR / 'graphs.pickle'\n if pickled_data_file.exists():\n logging.info('Loading pickled nx graphs')\n with open(pickled_data_file, 'rb') as f:\n return pickle.load(f)\n else:\n # Load graphml filenames\n train, test, validation = GRAPHML_TRAIN_DATA_DIR, GRAPHML_TEST_DATA_DIR, GRAPHML_VALIDATION_DATA_DIR,\n train_graph_files, test_graph_files, validation_graph_files = \\\n list(train.glob('*.xml')), list(test.glob('*.xml')), list(validation.glob('*.xml'))\n\n # Load graphs\n train_graphs = [ox.load_graphml(p) for p in tqdm(train_graph_files, desc='Loading nx train graphs')] \\\n if load_train else []\n test_graphs = [ox.load_graphml(p) for p in tqdm(test_graph_files, desc='Loading nx test graphs')] \\\n if load_test else []\n validation_graphs = [ox.load_graphml(p) for p in tqdm(validation_graph_files, desc='Loading nx validation graphs')] \\\n if load_validation else []\n\n # Save pickled data\n if load_train and load_test and load_validation:\n with open(pickled_data_file, 'wb') as f:\n pickle.dump((train_graphs, test_graphs, validation_graphs), f)\n return train_graphs, test_graphs, validation_graphs\n\n\ndef retrieve_cycle_indices(preds: Tensor) -> tp.Set[int]:\n \"\"\"Retrieve indices of cycle predictions.\n\n Args:\n preds (Tensor): Argmaxed predictions tensor whose dimensions are (num_nodes, 1).\n \"\"\"\n assert len(preds.shape) == 1, 'The tensor must be one-dimensional - (num_nodes, )'\n return set((preds > 0).nonzero().squeeze().tolist())\n\n\ndef fast_retrieve_nx_prediction_graph(graph_networkx: nx.MultiDiGraph, preds: Tensor):\n cycle_indices = retrieve_cycle_indices(preds)\n subgraph = nx.subgraph_view(graph_networkx, filter_edge=lambda u, v, e: int(graph_networkx[u][v][0]['idx']) in cycle_indices)\n return nx.MultiDiGraph(subgraph)\n","repo_name":"Belvenix/bike-guessr","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13425963746","text":"import os\nimport base64\nimport subprocess\nimport json\nimport sys\nimport configparser\nimport requests\nimport shutil\nimport uuid\nimport send2trash\nfrom bs4 import BeautifulSoup\nimport atexit\nfrom PySide2 import QtCore, QtWidgets,QtGui\nfrom PySide2.QtWidgets import QTreeView, QVBoxLayout, QMainWindow, QPushButton, QFileDialog, QMessageBox, QTableView, QTableWidgetItem, QHeaderView,QApplication, QVBoxLayout, QLineEdit,QTabWidget,QPlainTextEdit,QLabel,QSpinBox,QListView,QAction,QDialog,QCheckBox,QFontComboBox,QProgressBar,QShortcut,QSplitter,QHBoxLayout,QMenu,QInputDialog,QStatusBar\nfrom PySide2.QtGui import QKeySequence,QFont,QPalette, QColor,QStandardItem, QStandardItemModel,QIntValidator,QDesktopServices\nfrom PySide2.QtUiTools import QUiLoader\nfrom PySide2.QtCore import QThread, Signal,QFile, Qt,QUrl\n\ndef initFolder():\n folder_path = \"TranslateFiles\"\n # 检查目录是否存在\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\ndef initConfig():\n global config\n # global api_key,secret_key,enable_translate,ui_font_Family,ui_font_Size,dirname\n # 判断配置文件是否存在,若不存在则新建配置文件并初始化\n if not os.path.exists('config.ini'):\n config = configparser.ConfigParser()\n config['BAIDU_TRANSLATE_API'] = {'api_key': '',\n 'secret_key': '',\n 'enable': 'false'}\n config['UI_FONT'] = {'ui_font_Family': '5b6u6L2v6ZuF6buR',\n 'ui_font_Size': '10'}\n config['SYSTEM_SETTINGS'] = {'dirname': '',\n 'dark_mode':'false',\n 'auto_apply_layout':'true',\n 'layout':{},\n 'case_sensitive':'false',\n 'exit_dont_ask_again':'false',\n 'exit_save':'false',\n 'rename':'zh_cn.json',\n 'ensure_ascii':'false'}\n with open('config.ini', 'w', encoding='utf-8') as f:\n config.write(f)\n\n # 读取配置文件\n config = configparser.ConfigParser()\n with open('config.ini', 'r', encoding='utf-8') as f:\n config.read_file(f)\n \n # 获取百度翻译API的AK、SK和是否开启机翻\n try:\n api_key = config.get('BAIDU_TRANSLATE_API', 'api_key')\n secret_key = config.get('BAIDU_TRANSLATE_API', 'secret_key')\n enable_translate = config.getboolean('BAIDU_TRANSLATE_API', 'enable')\n ui_font_Family = config.get('UI_FONT', 'ui_font_Family')\n ui_font_Size = config.getint('UI_FONT', 'ui_font_Size')\n dirname = base64.b64decode(config.get('SYSTEM_SETTINGS', 'dirname')).decode('utf-8')\n dark_mode = config.getboolean('SYSTEM_SETTINGS', 'dark_mode')\n auto_apply_layout = config.getboolean('SYSTEM_SETTINGS', 'auto_apply_layout')\n case_sensitive = config.getboolean('SYSTEM_SETTINGS', 'case_sensitive')\n ensure_ascii = config.getboolean('SYSTEM_SETTINGS', 'ensure_ascii')\n exit_dont_ask_again = config.getboolean('SYSTEM_SETTINGS', 'exit_dont_ask_again')\n exit_save = config.getboolean('SYSTEM_SETTINGS', 'exit_save')\n layout = json.loads(config.get('SYSTEM_SETTINGS', 'layout'))\n rename = config.get('SYSTEM_SETTINGS', 'rename')\n except:\n QMessageBox.warning(None, \"错误\", \"配置文件出现错误,已重置为初始值\")\n if os.path.exists(\"config.ini\"):\n os.remove(\"config.ini\")\n initConfig()\ndef add_unique_id_to_json(file_path):\n\n # 加载并解析 JSON 文件\n def parse_json_file(file_path):\n with open(file_path, 'r',encoding='utf-8') as f:\n data = json.load(f)\n return data\n\n # 保存 JSON 文件\n def save_json_file(data, file_path):\n with open(file_path, 'w',encoding='utf-8') as f:\n json.dump(data, f, indent=4)\n\n # 解析 JSON 文件\n data = parse_json_file(file_path)\n \n # 检查是否已经包含 \"MonianHelloTranslateUUID\" 键\n if \"MonianHelloTranslateUUID\" in data:\n return data[\"MonianHelloTranslateUUID\"]\n\n # 获取第一个键值对的键\n key = list(data.keys())[0]\n\n # 生成唯一标识符\n unique_id = str(uuid.uuid4())\n\n # 将唯一标识符添加到字典中\n data['MonianHelloTranslateUUID'] = unique_id\n\n # 保存 JSON 文件\n save_json_file(data, file_path)\n\n # 返回唯一标识符\n return unique_id\ndef get_access_token(api_key, secret_key):\n url = \"https://aip.baidubce.com/oauth/2.0/token\"\n params = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": api_key,\n \"client_secret\": secret_key\n }\n response = requests.get(url, params=params)\n result = response.json()\n access_token = result[\"access_token\"]\n return access_token\ndef translate_text(text, from_lang, to_lang, access_token):\n url = \"https://aip.baidubce.com/rpc/2.0/mt/texttrans/v1\"\n headers = {\n \"Content-Type\": \"application/json;charset=utf-8\"\n }\n body = {\n \"from\": from_lang,\n \"to\": to_lang,\n \"q\": text\n }\n params = {\n \"access_token\": access_token\n }\n response = requests.post(url, headers=headers, params=params, json=body)\n\n result = response.json()\n try:\n translated_text = result[\"result\"][\"trans_result\"][0][\"dst\"]\n except Exception as e:\n translated_text = \"\"\n QMessageBox.warning(QMessageBox.warning, '翻译错误', \"由于未知原因,无法翻译文本\\\"{}\\\"。请参考以下错误信息:\\n{}\\n{}\".format(text, response.json(), e))\n return translated_text\nclass TranslatorThread(QThread):\n finished = Signal()\n progress = Signal(int)\n error = Signal(str)\n\n def __init__(self, file_path, from_lang, to_lang, api_key, secret_key):\n super().__init__()\n self.file_path = file_path\n self.from_lang = from_lang\n self.to_lang = to_lang\n self.api_key = api_key\n self.secret_key = secret_key\n\n def run(self):\n initFolder()\n #执行前保存一遍文件\n file_browser.on_savebutton_clicked()\n\n try:\n access_token = get_access_token(self.api_key, self.secret_key)\n except Exception as e:\n self.error.emit(str(e))\n else:\n uuid = add_unique_id_to_json(self.file_path)\n try:\n # 如果已经有保存翻译结果的文件,则读取该文件,将其转为 Python 字典\n with open('TranslateFiles/'+uuid+'.json', 'r', encoding='utf-8') as f:\n translated_dict = json.load(f)\n except FileNotFoundError:\n # 如果没有保存翻译结果的文件,则将 translated_dict 初始化为空字典\n translated_dict = {}\n\n with open(self.file_path, 'r', encoding='utf-8') as f:\n content = json.load(f)\n\n keys = []\n values = []\n for key, value in content.items():\n keys.append(key)\n values.append(value)\n\n item_count = len(keys)\n for i, key in enumerate(keys):\n # 如果 key 已经在字典中,则跳过本次循环\n # if key in translated_dict:\n # continue\n\n # 跳过特定 key\n if key == \"MonianHelloTranslateUUID\":\n continue\n\n value = str(values[i])\n translated_value = translate_text(str(value), self.from_lang, self.to_lang, access_token)\n\n # 添加新翻译到字典中\n translated_dict[key] = translated_value\n\n progress = (i+1) / item_count * 100\n self.progress.emit(progress) # 更新进度条\n\n # 将字典实时保存到文件中\n with open('TranslateFiles/'+uuid+'.json', 'w', encoding='utf-8') as f:\n json.dump(translated_dict, f, indent=4)\n self.finished.emit()\nclass FileBrowser(QMainWindow):\n def __init__(self):\n\n super(FileBrowser, self).__init__()\n\n self.row = 0\n self.file_name = \"\"\n self.replacelineEditonEdit = False\n \n # 加载UI文件\n ui_file = QFile('MainWindow.ui')\n ui_file.open(QFile.ReadOnly)\n loader = QUiLoader()\n self.window = loader.load(ui_file)\n ui_file.close()\n\n # # 创建一个状态栏\n # self.statusBar = QStatusBar()\n # self.setStatusBar(self.statusBar)\n\n # # 在状态栏上显示文本信息\n # self.statusBar.showMessage(\"欢迎使用我的应用!\")\n\n # 从配置文件中读取字体信息\n font_family = base64.b64decode(config.get('UI_FONT', 'ui_font_Family')).decode('utf-8')\n font_size = config.getint('UI_FONT', 'ui_font_Size')\n\n # 应用保存在配置文件中的字体\n ui_font = QFont(font_family, font_size)\n app.setFont(ui_font)\n\n self.tree_view = self.window.findChild(QTreeView, 'treeView')\n self.printbutton = self.window.findChild(QPushButton, 'printButton')\n self.savebutton = self.window.findChild(QPushButton, 'saveButton')\n self.translatebutton = self.window.findChild(QPushButton, 'translateButton')\n self.copyButton = self.window.findChild(QPushButton, 'copyButton')\n self.selectAllPushButton = self.window.findChild(QPushButton, 'selectAllPushButton')\n self.invertSelectionPushButton = self.window.findChild(QPushButton, 'invertSelectionPushButton')\n self.dict_table = self.window.findChild(QTableView, 'TableView')\n self.searchLineEdit = self.window.findChild(QLineEdit, 'searchLineEdit')\n self.reviewJumpPageLineEdit = self.window.findChild(QLineEdit, 'reviewJumpPageLineEdit')\n self.searchTableView = self.window.findChild(QTableView, 'searchTableView')\n self.tabWidget = self.window.findChild(QTabWidget, 'tabWidget')\n self.originalReviewPlainTextEdit = self.window.findChild(QPlainTextEdit, 'originalReviewPlainTextEdit')\n self.translateReviewPlainTextEdit = self.window.findChild(QPlainTextEdit, 'translateReviewPlainTextEdit')\n self.machineTranslateReviewPlainTextEdit = self.window.findChild(QPlainTextEdit, 'machineTranslateReviewPlainTextEdit')\n self.reviewPreviousPushButton = self.window.findChild(QPushButton,'reviewPreviousPushButton')\n self.reviewNextPushButton = self.window.findChild(QPushButton,'reviewNextPushButton')\n self.reviewLabel = self.window.findChild(QLabel,'reviewLabel')\n self.replacelineEdit = self.window.findChild(QLineEdit,'replacelineEdit')\n self.replacelistView = self.window.findChild(QListView,'replacelistView')\n self.actionClearSpaces = self.window.findChild(QAction, 'actionClearSpaces')\n self.actionSettings = self.window.findChild(QAction, 'actionSettings')\n self.actionAbout = self.window.findChild(QAction, 'actionAbout')\n self.actionSaveLayout = self.window.findChild(QAction, 'actionSaveLayout')\n self.actionSaveAsSafeMode = self.window.findChild(QAction, 'actionSaveAsSafeMode')\n self.translateProgressBar = self.window.findChild(QProgressBar, 'translateProgressBar')\n self.splitter = self.window.findChild(QSplitter, 'splitter')\n\n #快捷键\n \n shortcutCtrl_F = QShortcut(QKeySequence('Ctrl+F'), self.searchLineEdit)\n shortcutCtrl_F.activated.connect(self.handle_Ctrl_F_action)\n shortcutCtrl_H = QShortcut(QKeySequence('Ctrl+H'), self.searchLineEdit)\n shortcutCtrl_H.activated.connect(self.handle_Ctrl_H_action)\n shortcutCtrl_Down = QShortcut(QKeySequence('Ctrl+Down'), self.replacelineEdit)\n shortcutCtrl_Down.activated.connect(self.handle_Ctrl_Down_action)\n shortcutCtrl_Up = QShortcut(QKeySequence('Ctrl+Up'), self.replacelineEdit)\n shortcutCtrl_Up.activated.connect(self.handle_Ctrl_Up_action)\n shortcutCtrl_A = QShortcut(QKeySequence('Ctrl+Shift+A'), self.selectAllPushButton)\n shortcutCtrl_A.activated.connect(self.handle_Ctrl_A_action)\n\n self.translateProgressBar.hide()\n #QAction\n self.actionClearSpaces.triggered.connect(self.handleActionClearSpaces)\n self.actionSaveAsSafeMode.triggered.connect(self.handleActionSaveAsSafeMode)\n self.actionSettings.triggered.connect(self.handleActionSettings)\n self.actionAbout.triggered.connect(self.handleActionAbout)\n self.actionSaveLayout.triggered.connect(self.handleActionSaveLayout)\n\n #禁止用户编辑replacelistView\n self.replacelistView.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n\n #设置数字校验器\n self.reviewJumpPageLineEdit.setValidator(QIntValidator())\n #绑定回车事件\n self.searchLineEdit.returnPressed.connect(self.on_searchLineEdit_return_pressed)\n self.replacelineEdit.returnPressed.connect(self.on_replacelineEdit_return_pressed)\n self.reviewJumpPageLineEdit.returnPressed.connect(self.on_reviewJumpPageLineEdit_return_pressed)\n\n # 创建文件浏览器模型\n self.model = QtWidgets.QFileSystemModel()\n self.model.setRootPath(base64.b64decode(config.get('SYSTEM_SETTINGS', 'dirname')).decode('utf-8'))\n self.model.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.AllEntries)\n # 将模型设置为 QTreeView 的模型\n self.tree_view.setModel(self.model)\n # 设置根索引为桌面文件夹的索引\n root_index = self.model.index(base64.b64decode(config.get('SYSTEM_SETTINGS', 'dirname')).decode('utf-8'))\n self.tree_view.setRootIndex(root_index)\n \n self.tabWidget.currentChanged.connect(self.tabChanged)\n\n # 隐藏列\n self.tree_view.setColumnHidden(1, True)\n self.tree_view.setColumnHidden(2, True)\n self.tree_view.setColumnHidden(3, True)\n # 将“Name”列的宽度设置为固定值\n self.tree_view.setColumnWidth(0, 200) # 设置“Name”列的宽度为 200 像素\n\n # 绑定双击事件\n self.tree_view.doubleClicked.connect(self.on_treeView_doubleClicked)\n # 将按钮与其回调函数关联\n self.printbutton.clicked.connect(self.on_printbutton_clicked)\n self.savebutton.clicked.connect(self.on_savebutton_clicked)\n self.translatebutton.clicked.connect(self.on_translatebutton_clicked)\n self.copyButton.clicked.connect(self.on_copyButton_clicked)\n self.reviewPreviousPushButton.clicked.connect(self.on_reviewPreviousPushButton_clicked)\n self.reviewNextPushButton.clicked.connect(self.on_reviewNextPushButton_clicked)\n self.invertSelectionPushButton.clicked.connect(self.on_invertSelectionPushButton_clicked)\n self.selectAllPushButton.clicked.connect(self.on_selectAllPushButton_clicked)\n self.invertSelectionPushButton.hide()\n self.selectAllPushButton.hide()\n self.dict_table.verticalHeader().sectionClicked.connect(self.handleHeaderClicked)\n\n self.tree_view.setContextMenuPolicy(Qt.CustomContextMenu)\n self.tree_view.customContextMenuRequested.connect(self.showContextMenu)\n\n # 显示窗口\n self.window.show()\n\n # 初始化百度翻译API\n self.translate = None\n\n # 在 __init__ 函数中连接 clicked 信号到响应函数\n self.replacelistView.clicked.connect(self.handle_replacelistView_cell_clicked)\n\n #应用保存的UI配置信息\n if config.getboolean('SYSTEM_SETTINGS', 'auto_apply_layout'):\n try:\n data = json.loads(config.get('SYSTEM_SETTINGS', 'layout'))\n pos = tuple(data['pos'])\n self.window.move(pos[0], pos[1])\n except Exception as e:\n print(e)\n try:\n size = tuple(data['size'])\n self.window.resize(size[0], size[1])\n except Exception as e:\n print(e)\n try:\n if data['is_maximized']:\n self.window.showMaximized()\n except Exception as e:\n print(e)\n\n # 读取和应用布局设置\n try:\n sizes_dict = json.loads(config.get('SYSTEM_SETTINGS', 'layout'))\n left = sizes_dict.get('left')\n middle = sizes_dict.get('middle')\n right = sizes_dict.get('right')\n sizes = [left, middle, right]\n self.splitter.setSizes(sizes)\n except Exception as e:\n print(e)\n def showContextMenu(self, pos):\n index = self.tree_view.indexAt(pos)\n if not index.isValid():\n return\n\n menu = QMenu(self)\n\n open_action = QAction(\"在资源管理器中打开\", self)\n rename_action = QAction(\"重命名\", self)\n delete_action = QAction(\"移动到回收站\", self)\n copy_rename_action = QAction(\"复制并重命名\", self)\n\n menu.addAction(open_action)\n menu.addAction(delete_action)\n menu.addAction(copy_rename_action)\n menu.addAction(rename_action)\n # 连接动作的信号\n open_action.triggered.connect(self.contextMenuOpenFileInExplorer)\n rename_action.triggered.connect(self.contextMenuRenameFile)\n delete_action.triggered.connect(self.contextMenuDeleteFile)\n copy_rename_action.triggered.connect(self.contextMenuCopyAndRenameFile)\n\n # 显示菜单\n menu.exec_(self.tree_view.viewport().mapToGlobal(pos))\n\n def contextMenuCopyAndRenameFile(self):\n index = self.tree_view.currentIndex()\n file_path = self.model.filePath(index).replace('/', '\\\\')\n directory = os.path.dirname(file_path)\n\n default_file_name =config.get('SYSTEM_SETTINGS', 'rename')\n # 提示用户保存当前内容\n save_prompt = QMessageBox.question(self, \"保存提示\", \"复制前是否保存当前内容?\", QMessageBox.Yes | QMessageBox.No)\n if save_prompt == QMessageBox.Yes:\n self.on_savebutton_clicked() # 调用保存功能\n new_name, ok = QInputDialog.getText(self, \"复制并重命名\", \"请输入新的文件名:\", textEchoMode=QLineEdit.Normal, text=default_file_name)\n\n if ok and new_name:\n # 构建新的文件路径\n new_file_path = os.path.join(directory, new_name)\n try:\n # 复制文件\n shutil.copy2(file_path, new_file_path)\n\n # 更新模型数据以反映新文件的添加\n self.model.setRootPath(directory)\n except OSError as e:\n QMessageBox.warning(self, \"复制并重命名失败\", str(e))\n def contextMenuOpenFileInExplorer(self):\n index = self.tree_view.currentIndex()\n file_path = (self.model.filePath(index)).replace('/', '\\\\')\n subprocess.Popen('explorer /select,\"{}\"'.format(file_path), creationflags=subprocess.CREATE_NO_WINDOW)\n def contextMenuRenameFile(self):\n index = self.tree_view.currentIndex()\n file_path = (self.model.filePath(index)).replace('/', '\\\\')\n\n new_name, ok = QInputDialog.getText(self, \"重命名\", \"请输入新的文件名:\", textEchoMode=QLineEdit.Normal,text=os.path.basename(file_path))\n if ok and new_name:\n # 获取文件所在目录\n directory = os.path.dirname(file_path)\n # 构建新的文件路径\n new_file_path = os.path.join(directory, new_name)\n try:\n os.rename(file_path, new_file_path)\n # 更新模型数据以反映重命名\n self.model.setRootPath(directory)\n except OSError as e:\n # 处理重命名失败的情况\n QMessageBox.warning(self, \"重命名失败\", str(e))\n def contextMenuDeleteFile(self):\n index = self.tree_view.currentIndex()\n file_path = (self.model.filePath(index)).replace('/', '\\\\')\n send2trash.send2trash(file_path)\n def onExit(self):\n if config.getboolean('SYSTEM_SETTINGS', 'exit_dont_ask_again'):\n if config.getboolean('SYSTEM_SETTINGS', 'exit_save'):\n self.on_savebutton_clicked()\n return\n else:\n return\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"退出\")\n msgBox.setText(\"是否将更改保存到文件 \")\n\n # 添加QCheckBox控件,并设置靠右对齐\n dont_ask_checkbox = QCheckBox(\"不再提示\", msgBox)\n # checkbox_style = \"\"\"\n # QCheckBox::indicator {\n # subcontrol-position: left;\n # left:100%;\n # }\n # \"\"\"\n # dont_ask_checkbox.setStyleSheet(checkbox_style)\n # 创建一个水平布局管理器,并将QCheckBox添加到其中\n layout = QHBoxLayout()\n layout.addWidget(dont_ask_checkbox)\n\n # 将布局管理器添加到QMessageBox中\n box_layout = msgBox.layout()\n box_layout.addLayout(layout, 1, 0, 1, box_layout.columnCount())\n\n # 添加“保存”和“取消”按钮\n save_button = msgBox.addButton(\"保存\", QMessageBox.YesRole)\n save_button.setObjectName(\"save_button\")\n cancel_button = msgBox.addButton(\"取消\", QMessageBox.NoRole)\n cancel_button.setObjectName(\"cancel_button\")\n\n\n # 执行QMessageBox并获取用户的选择\n reply = msgBox.exec_()\n if msgBox.clickedButton() == save_button:\n self.on_savebutton_clicked()\n if dont_ask_checkbox.isChecked():\n config.set('SYSTEM_SETTINGS', 'exit_dont_ask_again', 'true')\n config.set('SYSTEM_SETTINGS', 'exit_save', 'true')\n with open(\"config.ini\", 'w', encoding='utf-8') as f:\n config.write(f)\n elif msgBox.clickedButton() == cancel_button:\n if dont_ask_checkbox.isChecked():\n config.set('SYSTEM_SETTINGS', 'exit_dont_ask_again', 'true')\n config.set('SYSTEM_SETTINGS', 'exit_save', 'false')\n with open(\"config.ini\", 'w', encoding='utf-8') as f:\n config.write(f)\n else:\n return\n def handleActionSaveLayout(self):\n sizes_dict = {'left':self.splitter.sizes()[0], 'middle':self.splitter.sizes()[1], 'right':self.splitter.sizes()[2],'pos': [self.window.pos().x(), self.window.pos().y()],'size': [self.window.size().width(), self.window.size().height()],'is_maximized': self.window.isMaximized()}\n print(sizes_dict)\n config.set(\"SYSTEM_SETTINGS\", \"layout\", json.dumps(sizes_dict))\n try:\n with open('config.ini', 'w') as f:\n config.write(f)\n except:\n QMessageBox.warning(self, \"警告\", \"保存失败\")\n else:\n QMessageBox.information(self, \"提示\", \"保存成功\")\n def handle_Ctrl_F_action(self):\n self.searchLineEdit.setFocus()\n def handle_Ctrl_H_action(self):\n self.replacelineEdit.setFocus()\n def handle_Ctrl_Down_action(self):\n self.on_reviewNextPushButton_clicked()\n def handle_Ctrl_A_action(self):\n self.on_selectAllPushButton_clicked()\n def handle_Ctrl_Up_action(self):\n self.on_reviewPreviousPushButton_clicked()\n def updateRootIndex(self):\n # 更新文件浏览器的根索引\n root_index = self.model.index(base64.b64decode(config.get('SYSTEM_SETTINGS', 'dirname')).decode('utf-8'))\n self.tree_view.setRootIndex(root_index)\n def handleActionSettings(self):\n # 创建设置对话框\n settings_dialog = SettingsDialog(self)\n # 显示新窗口\n settings_dialog.exec_()\n def handleActionClearSpaces(self):\n self.tabWidget.setCurrentIndex(0)\n model = self.dict_table.model()\n # 遍历所有行\n for row in range(model.rowCount()):\n # 获取当前行的 value 列的 QStandardItem 实例\n item = model.item(row, 1)\n # 获取该实例中的字符串并清除其中的空格\n text = item.text().replace(' ', '')\n # 将新字符串设置回该实例中\n item.setText(text)\n def handleActionSaveAsSafeMode(self):\n # 创建保存错误的警告对话框\n msg_box = QMessageBox()\n msg_box.setIcon(QMessageBox.Warning)\n msg_box.setWindowTitle(\"提示信息\")\n msg_box.setText(\"是否以安全模式保存?\")\n msg_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n msg_box.setDefaultButton(QMessageBox.No)\n\n # 获取用户选项\n user_choice = msg_box.exec_()\n\n # 根据用户选项进行操作\n if user_choice == QMessageBox.Yes:\n import os\n from PySide2.QtCore import QDateTime\n\n # 获取当前时间\n current_datetime = QDateTime.currentDateTime().toString(\"yy-MM-dd-hh-mm-ss\")\n\n # 构建保存文件路径\n \n safe_folder = \"C:/SafeMode\" # 安全模式文件夹路径\n os.makedirs(safe_folder, exist_ok=True)\n file_name = f\"{current_datetime}.json\" # 文件名,例如 13-13.json\n save_path = os.path.join(safe_folder, file_name).replace('\\\\', '/')\n self.tabWidget.setCurrentIndex(0)\n # 获取表格中的数据\n model = self.dict_table.model()\n data = {}\n for row in range(model.rowCount()):\n key = model.index(row, 0).data()\n value = model.index(row, 1).data()\n data[key] = value\n\n # 以安全模式保存文件\n with open(save_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(data, f, indent=4)\n\n # 显示保存成功消息框\n msg_box = QMessageBox()\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowTitle(\"提示信息\")\n msg_box.setText(\"文件已以安全模式保存至{}\".format(save_path))\n msg_box.exec_()\n else:\n pass\n def handle_replacelistView_cell_clicked(self, index):\n # 获取所单击单元格的值\n value = self.replacelistView.model().data(index)\n # 将其按照空格分割并获取第一片\n try:\n first_piece = value.split(' ')[0]\n except IndexError:\n # 如果切片不成功就取消焦点\n self.dict_table.clearSelection()\n return\n # 将该数字转换为整数\n try:\n row_index = int(first_piece) - 1 # 从 0 开始计算行数\n except:\n pass\n # 将焦点移动到 dict_table 中对应的行\n model = self.dict_table.model()\n if row_index >= 0 and row_index < model.rowCount():\n self.dict_table.selectRow(row_index)\n def get_file_path(self, index):\n return self.model.filePath(index)\n def on_treeView_doubleClicked(self, index):\n file_path = self.get_file_path(index)\n # 判断是否为 JSON 文件\n if not os.path.splitext(file_path)[1].lower() == '.json':\n QMessageBox.warning(self, '错误', '不是 JSON 文件!')\n return\n self.file_name = self.tree_view.selectedIndexes()[0].data()\n if os.path.isfile(file_path):\n # 如果是文件则执行打印文件的回调函数\n self.on_printbutton_clicked()\n\n # “打印文件”按钮的回调函数\n def set_model_data(self, table, data):\n table.clear()\n table.setRowCount(len(data))\n table.setColumnCount(len(data[0]))\n header = QHeaderView(Qt.Orientation.Horizontal)\n table.setHorizontalHeader(header)\n for i, row in enumerate(data):\n for j, col in enumerate(row):\n item = QTableWidgetItem(str(col))\n table.setItem(i, j, item)\n def search_dictionary(self, key):\n url = \"https://dict.mcmod.cn/connection/search.php\"\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Origin\": \"https://dict.mcmod.cn\",\n \"Referer\": \"https://dict.mcmod.cn/\"\n }\n data = {\n \"key\": key,\n \"max\":16,\n \"range\": 1\n }\n\n response = requests.post(url, headers=headers, data=data)\n response.raise_for_status()\n # print(response.text)\n try:\n soup = BeautifulSoup(response.text, 'html.parser')\n table = soup.find('table')\n rows = table.find_all('tr')\n except:\n return 0\n\n data = []\n for row in rows:\n cells = row.find_all(['td', 'th'])\n row_data = [cell.get_text(strip=True) for cell in cells]\n data.append(row_data)\n\n # print(data)\n\n def extract_first_two_elements(array):\n extracted_array = [[sublist[0], sublist[1]] for sublist in array]\n # print(extracted_array)\n return extracted_array\n def swap_values(array):\n swapped_array = [[sublist[1], sublist[0]] for sublist in array]\n return swapped_array\n\n # 调用函数提取每个一维数组的前两个元素\n extracted_array = extract_first_two_elements(data)\n swapped_array = swap_values(extracted_array)\n return swapped_array \n def on_searchLineEdit_return_pressed(self):\n searchResult= self.search_dictionary(self.searchLineEdit.text())\n # print(searchResult)\n if searchResult:\n keys = []\n values = []\n for i in searchResult:\n if i==['原文', '翻译结果']:\n continue\n keys.append(i[0])\n values.append(i[1])\n model = QStandardItemModel(len(keys), 2, self)\n for i, key in enumerate(keys):\n\n value = str(values[i])\n model.setItem(i, 0, QStandardItem(key))\n model.setItem(i, 1, QStandardItem(value))\n # 将填充好数据的表格模型应用到 QTableView 中\n self.searchTableView.setModel(model)\n\n # 设置表头标题\n model.setHeaderData(0, Qt.Horizontal, \"原文\")\n model.setHeaderData(1, Qt.Horizontal, \"翻译结果\")\n else:\n model = QStandardItemModel(1, 1, self)\n model.setItem(0, 0, QStandardItem(\"无搜索结果\"))\n self.searchTableView.setModel(model)\n model.setHeaderData(0, Qt.Horizontal, \"\")\n def on_copyButton_clicked(self):\n self.tabWidget.setCurrentIndex(0)\n # 获取当前的模型\n model = self.dict_table.model()\n index = self.tree_view.currentIndex()\n file_path = self.get_file_path(index)\n uuid = add_unique_id_to_json(file_path)\n # print(uuid)\n TranslateFilespath = 'TranslateFiles/' + uuid + '.json'\n if os.path.exists(TranslateFilespath):\n # print('翻译文件存在')\n for row in range(model.rowCount()):\n # 跳过第一列值为'MonianHelloTranslateUUID'的行\n # item = model.item(row, 0)\n # print(item.text())\n # if item and item.text() == 'MonianHelloTranslateUUID':\n # continue\n # 交换第2、3列的值\n index1 = model.index(row, 1)\n index2 = model.index(row, 2)\n data1 = model.data(index1)\n data2 = model.data(index2)\n model.setData(index1, data2)\n model.setData(index2, data1)\n def on_printbutton_clicked(self):\n self.row = 0\n # 获取当前选中文件的路径\n self.tabWidget.setCurrentIndex(0)\n index = self.tree_view.currentIndex()\n file_path = self.get_file_path(index)\n\n try:\n # 打开选中的 JSON 文件,并按 key:value 的形式显示其中的内容\n uuid = add_unique_id_to_json(file_path)\n # print(uuid)\n TranslateFilespath = 'TranslateFiles/' + uuid + '.json'\n if os.path.exists(TranslateFilespath):\n # print('翻译文件存在')\n translationFileExists = True\n else:\n # print('翻译文件不存在')\n translationFileExists = False\n with open(file_path, 'r', encoding='utf-8') as f:\n file_content = f.read()\n content = json.loads(file_content)\n keys = []\n values = []\n for key, value in content.items():\n if key == \"MonianHelloTranslateUUID\":\n continue\n keys.append(key)\n values.append(value)\n\n if translationFileExists:\n with open(TranslateFilespath, 'r',encoding='utf-8') as ff:\n data2 = json.load(ff)\n model = QStandardItemModel(len(keys), 3, self)\n for i, key in enumerate(keys):\n value = str(values[i])\n model.setItem(i, 0, QStandardItem(key))\n model.setItem(i, 1, QStandardItem(value))\n model.setItem(i, 2, QStandardItem(data2.get(key)))\n else:\n model = QStandardItemModel(len(keys), 2, self)\n for i, key in enumerate(keys):\n value = str(values[i])\n model.setItem(i, 0, QStandardItem(key))\n model.setItem(i, 1, QStandardItem(value))\n \n # 将填充好数据的表格模型应用到 QTableView 中\n self.dict_table.setModel(model)\n\n # 设置表头标题\n model.setHeaderData(0, Qt.Horizontal, \"键名\")\n model.setHeaderData(1, Qt.Horizontal, \"原文\")\n model.setHeaderData(2, Qt.Horizontal, \"译文\")\n\n except ValueError:\n # 不是 JSON 格式,直接显示文本\n self.text_edit.setPlainText(file_content) \n def on_savebutton_clicked(self):\n self.tabWidget.setCurrentIndex(0)\n # 获取当前的模型\n model = self.dict_table.model()\n index = self.tree_view.currentIndex()\n file_path = self.get_file_path(index)\n # 获取表格中的数据\n model = self.dict_table.model()\n data = {}\n for row in range(model.rowCount()):\n key = model.index(row, 0).data()\n value = model.index(row, 1).data()\n data[key] = value\n try:\n data[\"MonianHelloTranslateUUID\"] = add_unique_id_to_json(file_path)\n except:\n pass\n # 将数据写入文件\n with open(file_path, 'w',encoding='utf-8') as f:\n json.dump(data, f, indent=4,ensure_ascii=config.getboolean('SYSTEM_SETTINGS', 'ensure_ascii'))\n # 显示保存成功消息框\n msg_box = QMessageBox(QMessageBox.Information, \"提示信息\", \"文件已保存至 {}\".format(file_path))\n msg_box.exec_()\n\n def on_reviewNextPushButton_clicked(self):\n model = self.dict_table.model()\n self.tabWidget.setCurrentIndex(1)\n if 0<=self.row and self.rowJSON-i18n
    \n正式版本v1.0.7 2023.08.30
    \n作者 : MonianHello
    \nQF-project : QF-project
    \n代码库 : github.com/MonianHello/JSON-i18n''')\nclass SettingsDialog(QDialog):\n def __init__(self, parent=None):\n super(SettingsDialog, self).__init__(parent)\n \n # 加载设置对话框UI文件\n ui_file = QFile('Settings.ui')\n ui_file.open(QFile.ReadOnly)\n loader = QUiLoader()\n self.settings_ui = loader.load(ui_file)\n ui_file.close()\n \n # 将UI添加到对话框中\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(self.settings_ui)\n\n # 初始化 UI 控件\n self.akLineEdit = self.settings_ui.findChild(QLineEdit, 'akLineEdit')\n self.skLineEdit = self.settings_ui.findChild(QLineEdit, 'skLineEdit')\n self.renameLineEdit = self.settings_ui.findChild(QLineEdit, 'renameLineEdit')\n self.tranCheckBox = self.settings_ui.findChild(QCheckBox, 'tranCheckBox')\n self.tranTestPushButton = self.settings_ui.findChild(QPushButton, 'tranTestPushButton')\n self.moveWorkFolderPushButton = self.settings_ui.findChild(QPushButton, 'moveWorkFolderPushButton')\n\n self.fontSizeSpinBox = self.settings_ui.findChild(QSpinBox, 'fontSizeSpinBox')\n self.fontComboBox = self.settings_ui.findChild(QFontComboBox, 'fontComboBox')\n self.fontPreviewLabel = self.settings_ui.findChild(QLabel, 'fontPreviewLabel')\n\n self.unicodeCheckBox = self.settings_ui.findChild(QCheckBox, 'unicodeCheckBox')\n self.darkModeCheckBox = self.settings_ui.findChild(QCheckBox, 'darkModeCheckBox')\n self.caseSensitiveCheckBox = self.settings_ui.findChild(QCheckBox, 'caseSensitiveCheckBox')\n self.autoApplyLayoutCheckBox = self.settings_ui.findChild(QCheckBox, 'autoApplyLayoutCheckBox')\n self.layoutButton = self.settings_ui.findChild(QPushButton, 'layoutButton')\n self.savePushButton = self.settings_ui.findChild(QPushButton, 'savePushButton')\n self.cancelPushButton = self.settings_ui.findChild(QPushButton, 'cancelPushButton')\n \n\n # 在 UI 控件中显示当前设置\n self.akLineEdit.setText(config.get('BAIDU_TRANSLATE_API', 'api_key'))\n self.skLineEdit.setText(config.get('BAIDU_TRANSLATE_API', 'secret_key'))\n self.renameLineEdit.setText(config.get('SYSTEM_SETTINGS', 'rename'))\n self.tranCheckBox.setChecked(config.getboolean('BAIDU_TRANSLATE_API', 'enable'))\n \n\n self.fontSizeSpinBox.setValue(config.getint('UI_FONT', 'ui_font_Size'))\n\n self.fontComboBox.setCurrentFont(QFont(base64.b64decode(config.get('UI_FONT', 'ui_font_Family')).decode('utf-8')))\n self.fontPreviewLabel.setFont(QFont(base64.b64decode(config.get('UI_FONT', 'ui_font_Family')).decode('utf-8'), config.getint('UI_FONT', 'ui_font_Size')))\n\n self.unicodeCheckBox.setChecked(config.getboolean('SYSTEM_SETTINGS', 'ensure_ascii'))\n self.darkModeCheckBox.setChecked(config.getboolean('SYSTEM_SETTINGS', 'dark_mode'))\n self.autoApplyLayoutCheckBox.setChecked(config.getboolean('SYSTEM_SETTINGS', 'auto_apply_layout'))\n self.caseSensitiveCheckBox.setChecked(config.getboolean('SYSTEM_SETTINGS', 'case_sensitive'))\n\n # 连接控件信号和槽函数\n self.fontComboBox.currentFontChanged.connect(self.changeFontFamily)\n self.fontSizeSpinBox.valueChanged.connect(self.changeFontSize)\n self.unicodeCheckBox.clicked.connect(self.changeUnicode)\n self.darkModeCheckBox.clicked.connect(self.changeDarkMode)\n self.caseSensitiveCheckBox.clicked.connect(self.changeCaseSensitive)\n self.autoApplyLayoutCheckBox.clicked.connect(self.changeAutoSaveLayout)\n self.savePushButton.clicked.connect(self.saveSettings)\n self.tranTestPushButton.clicked.connect(self.tranTest)\n self.moveWorkFolderPushButton.clicked.connect(self.moveWorkFolder)\n self.cancelPushButton.clicked.connect(self.close)\n\n shortcutESC = QShortcut(QKeySequence('ESC'), self.cancelPushButton)\n shortcutESC.activated.connect(self.close)\n \n def moveWorkFolder(self):\n config.set('SYSTEM_SETTINGS', 'dirname', base64.b64encode(str(QFileDialog.getExistingDirectory(None, \"选择工作目录\", \"/\", options=QFileDialog.Options()|QFileDialog.ShowDirsOnly)).encode(\"utf-8\")).decode('utf-8'))\n def tranTest(self):\n try:\n get_access_token(self.akLineEdit.text(), self.skLineEdit.text())\n except:\n QMessageBox.warning(self, '鉴权错误', '无法通过鉴权认证。请检查您提供的百度AK/SK是否正确并具有访问该服务的权限')\n else:\n QMessageBox.information(self, '鉴权成功', \"验证成功\")\n def changeFontFamily(self, font):\n self.fontPreviewLabel.setFont(QFont(font.family(), self.fontSizeSpinBox.value()))\n\n def changeFontSize(self, size):\n self.fontPreviewLabel.setFont(QFont(self.fontComboBox.currentFont().family(), size))\n\n def changeDarkMode(self):\n if self.darkModeCheckBox.isChecked():\n config.set('SYSTEM_SETTINGS', 'dark_mode', 'True')\n else:\n config.set('SYSTEM_SETTINGS', 'dark_mode', 'False')\n def changeUnicode(self):\n if self.unicodeCheckBox.isChecked():\n config.set('SYSTEM_SETTINGS', 'ensure_ascii', 'True')\n else:\n config.set('SYSTEM_SETTINGS', 'ensure_ascii', 'False')\n def changeCaseSensitive(self):\n if self.caseSensitiveCheckBox.isChecked():\n config.set('SYSTEM_SETTINGS', 'case_sensitive', 'True')\n else:\n config.set('SYSTEM_SETTINGS', 'case_sensitive', 'False')\n def changeAutoSaveLayout(self):\n if self.autoApplyLayoutCheckBox.isChecked():\n config.set('SYSTEM_SETTINGS', 'auto_apply_layout', 'True')\n else:\n config.set('SYSTEM_SETTINGS', 'auto_apply_layout', 'False')\n\n def saveSettings(self):\n # 保存输入框和复选框的值到配置文件\n config.set('BAIDU_TRANSLATE_API', 'api_key', self.akLineEdit.text())\n config.set('BAIDU_TRANSLATE_API', 'secret_key', self.skLineEdit.text())\n config.set('SYSTEM_SETTINGS', 'rename', self.renameLineEdit.text())\n config.set('BAIDU_TRANSLATE_API', 'enable', str(self.tranCheckBox.isChecked()))\n\n config.set('UI_FONT', 'ui_font_Family', base64.b64encode(str(self.fontComboBox.currentFont().family()).encode(\"utf-8\")).decode('utf-8'))\n config.set('UI_FONT', 'ui_font_Size', str(self.fontSizeSpinBox.value()))\n try:\n with open('config.ini', 'w') as f:\n config.write(f)\n except:\n QMessageBox.warning(self, \"警告\", \"保存失败\")\n else:\n QMessageBox.information(self, \"提示\", \"保存成功\")\n file_browser.updateRootIndex()\n font_family = base64.b64decode(config.get('UI_FONT', 'ui_font_Family')).decode('utf-8')\n font_size = config.getint('UI_FONT', 'ui_font_Size')\n if config.getboolean('SYSTEM_SETTINGS', 'dark_mode'):\n darkmode()\n else:\n lightmode()\n # 应用保存在配置文件中的字体\n ui_font = QFont(font_family, font_size)\n app.setFont(ui_font)\n\n # 关闭设置对话框\n self.close()\n def changeFontFamily(self, font):\n # 更改字体类型,更新字体预览标签\n font_size = self.fontSizeSpinBox.value()\n font = QFont(font.family(), font_size)\n self.fontPreviewLabel.setFont(font)\n # print(font)\n\n def changeFontSize(self, font_size):\n # 更改字体大小,更新字体预览标签\n font_family = self.fontComboBox.currentFont().family()\n font = QFont(font_family, int(font_size))\n self.fontPreviewLabel.setFont(font)\n # print(font_size)\ndef darkmode():\n # 初始化应用程序\n app = QApplication.instance()\n\n # 设置 Fusion 风格\n app.setStyle('Fusion')\n\n # 获取系统默认调色板\n palette = QPalette()\n\n # 将窗口背景色设置为灰暗\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n\n # 将禁用状态下的文本颜色设置为暗色\n \n # 将控件的背景颜色和文本颜色设置为灰暗和浅色\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127))\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127))\n\n # 将滚动条的背景颜色和文本颜色设置为灰暗和浅色\n palette.setColor(QPalette.Highlight, QColor(64, 64, 64).lighter())\n palette.setColor(QPalette.HighlightedText, Qt.white)\n\n # 将应用程序的调色板设置为新的调色板\n app.setPalette(palette)\ndef lightmode():\n app.setStyle('Fusion')\n\n # 获取系统默认调色板\n palette = QPalette()\n\n # 将窗口背景色设置为浅灰色\n palette.setColor(QPalette.Window, QColor(240, 240, 240))\n\n # 将窗口文本颜色设置为黑色\n palette.setColor(QPalette.WindowText, QColor(0, 0, 0))\n\n # 背景色和文本颜色为浅灰色和黑色\n palette.setColor(QPalette.Button, QColor(255, 255, 255))\n palette.setColor(QPalette.Base, QColor(255, 255, 255))\n palette.setColor(QPalette.ButtonText, QColor(0, 0, 0))\n palette.setColor(QPalette.Text, QColor(0, 0, 0))\n\n # 禁用按钮的文本颜色为深灰色\n palette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127))\n\n # 将应用程序的调色板设置为新的调色板\n app.setPalette(palette)\nif __name__ == '__main__':\n app = QApplication([])\n\n initConfig()\n initFolder()\n dirname = None\n if not config.get('SYSTEM_SETTINGS', 'dirname'):\n dirname = QFileDialog.getExistingDirectory(None, \"选择工作目录\", \"/\", options=QFileDialog.Options()|QFileDialog.ShowDirsOnly)\n if not dirname:\n sys.exit()\n config.set('SYSTEM_SETTINGS', 'dirname', base64.b64encode(str(dirname).encode(\"utf-8\")).decode('utf-8'))\n with open(\"config.ini\", 'w', encoding='utf-8') as f:\n config.write(f)\n if config.getboolean('SYSTEM_SETTINGS', 'dark_mode'):\n darkmode()\n else:\n lightmode()\n # 创建文件浏览器\n file_browser = FileBrowser()\n # 注册退出处理函数\n atexit.register(file_browser.onExit)\n # 运行应用程序事件循环\n sys.exit(app.exec_())","repo_name":"MonianHello/JSON-i18n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":59315,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"5856064280","text":"import glob\nimport os\nimport subprocess\nimport sys\n\nfrom telemetry.internal.platform import profiler\n\n_PGOSWEEP_EXECUTABLE = 'pgosweep.exe'\n\n\nclass WinPGOProfiler(profiler.Profiler):\n \"\"\"A profiler that run the Visual Studio PGO utility 'pgosweep.exe' before\n terminating a browser or a renderer process.\n \"\"\"\n\n def __init__(self, browser_backend, platform_backend, output_path, state):\n super(WinPGOProfiler, self).__init__(\n browser_backend, platform_backend, output_path, state)\n\n pgosweep_is_in_path = False\n for entry in os.environ['PATH'].split(os.pathsep):\n if os.path.exists(os.path.join(entry, _PGOSWEEP_EXECUTABLE)):\n pgosweep_is_in_path = True\n break\n if not pgosweep_is_in_path:\n raise IOError(2, '%s isn\\'t in the current path, run vcvarsall.bat to fix'\n ' this.' % _PGOSWEEP_EXECUTABLE)\n\n self._browser_dir = browser_backend.browser_directory\n self._chrome_pgc_counter = self._GetNextProfileIndex('chrome')\n self._chrome_child_pgc_counter = self._GetNextProfileIndex('chrome_child')\n\n def _GetNextProfileIndex(self, dll_name):\n \"\"\"Scan the directory containing the DLL |dll_name| to find the next index\n to use for the profile data files.\n\n Args:\n dll_name: The name of the DLL for which we want to get the next index to\n to use.\n \"\"\"\n max_index = 0\n pgc_files = glob.glob(os.path.join(self._browser_dir,\n '%s!*.pgc' % dll_name))\n for pgc_file in pgc_files:\n max_index = max(max_index,\n int(os.path.splitext(os.path.split(pgc_file)[1])[0].split('!')[1]))\n return max_index + 1\n\n def _RunPGOSweep(self, pid, dll_name, index):\n \"\"\"Run the pgosweep utility to gather the profile data of a given process.\n\n Args:\n pid: The PID of the process we're interested in.\n dll_name: The name of the DLL for which we want the profile data.\n index: The index to use for the profile data file.\n\n Returns the name of the profile data file.\n \"\"\"\n pgc_filename = '%s\\\\%s!%d.pgc' % (self._browser_dir, dll_name, index)\n subprocess.Popen([_PGOSWEEP_EXECUTABLE,\n '/pid:%d' % pid,\n '%s.dll' % dll_name,\n pgc_filename]\n ).wait()\n return pgc_filename\n\n @classmethod\n def name(cls):\n return 'win_pgo_profiler'\n\n @classmethod\n def is_supported(cls, browser_type):\n # This profiler only make sense when doing a Windows build with Visual\n # Studio (minimal supported version is 2013 Update 2).\n return sys.platform.startswith('win')\n\n @classmethod\n def CustomizeBrowserOptions(cls, browser_type, options):\n # The sandbox need to be disabled if we want to be able to gather the\n # profile data.\n options.AppendExtraBrowserArgs('--no-sandbox')\n\n def CollectProfile(self):\n \"\"\"Collect the profile data for the current processes.\"\"\"\n output_files = []\n for pid, output_file in self._GetProcessOutputFileMap().iteritems():\n if 'renderer' in output_file:\n output_files.append(self._RunPGOSweep(pid,\n 'chrome_child',\n self._chrome_child_pgc_counter))\n self._chrome_child_pgc_counter += 1\n elif 'browser0' in output_file:\n output_files.append(self._RunPGOSweep(pid,\n 'chrome',\n self._chrome_pgc_counter))\n self._chrome_pgc_counter += 1\n return output_files\n","repo_name":"googlearchive/big-rig","sub_path":"app/src/thirdparty/telemetry/internal/platform/profiler/win_pgo_profiler.py","file_name":"win_pgo_profiler.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"61"} +{"seq_id":"71229091076","text":"from diffractio import plt, sp, np, um, mm, degrees, num_max_processors\nfrom diffractio.scalar_masks_X import Scalar_mask_X\nfrom diffractio.scalar_masks_XZ import Scalar_mask_XZ\nfrom diffractio.scalar_sources_X import Scalar_source_X\nfrom diffractio.scalar_fields_XZ import Scalar_field_XZ\nfrom diffractio.utils_multiprocessing import execute_multiprocessing\n\nx = np.linspace(-150*mm, 150*mm, 1000)\nz = np.linspace(0*mm, 300*mm, 1000)\nwavelength = 9 * mm\n#period = 40 * um\n# z_talbot = 2 * period**2 / wavelength\n\nSlitWidth=[9]\n\nu0 = Scalar_source_X(x, wavelength)\nu0.plane_wave(A=1)\n\nfor sw in SlitWidth:\n t = Scalar_mask_X(x, wavelength, n_background=1)\n t.ronchi_grating(x0 = 0, period = 20*mm, fill_factor=sw/20) \n\n talbot_effect = Scalar_field_XZ(x, z, wavelength)\n talbot_effect.incident_field(u0 * t)\n talbot_effect.WPM(has_edges=False)\n\n talbot_effect.draw(kind='intensity', draw_borders=True)\n #plt.ylim(-150 * mm, 150 * um)\n plt.savefig(f'Figures/temp/{sw}_0_intensity.png')\n plt.show()\n \n","repo_name":"ShashanthS/Talbot-Effect","sub_path":"TalbotEffect_SingleThinSlit.py","file_name":"TalbotEffect_SingleThinSlit.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74849551235","text":"import os\nfrom betamax import Betamax\n\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n# where betamax will store cassettes (http responses)\n# https://stackoverflow.com/a/38214137/3036129\nCASSETTE_DIR = os.path.abspath(os.path.join(HERE, \"fixtures\", \"cassettes\"))\n\nwith Betamax.configure() as config:\n config.cassette_library_dir = CASSETTE_DIR\n config.preserve_exact_body_bytes = True\n","repo_name":"jackdbd/hokuto-no-ken-api","sub_path":"hokuto_scraping/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42243253113","text":"from zope.interface import implements\nfrom zope.component import adapts\nfrom AccessControl import ClassSecurityInfo \nfrom Products.CMFCore.permissions import View\nfrom Products.Archetypes.atapi import *\nfrom Products.ATContentTypes.content import file\nfrom Products.ATContentTypes.content.schemata import finalizeATCTSchema\nfrom kk.teama.projects.interfaces import IVideo\nfrom kk.teama.projects.config import PROJECTNAME\n\nfrom plone.app.blob.field import ImageField as BlobImageField, FileField as BlobFileField\n\nfrom Products.validation import V_REQUIRED\n\nfrom Products.validation.interfaces import ivalidator\nfrom Products.validation import validation\nimport mimetypes\n\nclass VideoValidator:\n implements(ivalidator)\n def __init__(self, name):\n self.name = name\n def __call__(self, value, *args, **kwargs):\n\n filename = getattr(value, \"filename\")\n t = filename.split(\".\") \n ext = t[-1].lower()\n if ext != 'flv':\n return (\"Validation failed: file is not FLV \")\n return 1\n \nvalidation.register(VideoValidator('isVideo'))\n\nFileSchema = file.ATFileSchema.copy()\ndel(FileSchema['file'])\nVideoSchema = FileSchema + Schema((\n BlobFileField('file',\n required=True,\n primary=True,\n searchable=True,\n validators = (('isNonEmptyFile', V_REQUIRED),\n ('checkFileMaxSize', V_REQUIRED), ('isVideo')),\n widget = FileWidget(\n description = '',\n label=\"File\",\n show_content_type = False,)\n \t\t),\n \n BlobImageField('image',\n \t\t\trequired = True,\n widget=ImageWidget(label='Clip',\n description=''),\n ),\n))\n\n\n\nclass Video(file.ATFile):\n \"\"\" video \"\"\"\n schema = VideoSchema\n implements(IVideo)\n portal_type=meta_type=\"Video\"\n security = ClassSecurityInfo() \n security.declareProtected(View, 'tag') \n def tag(self, **kwargs):\n \"\"\"Generate image tag using the api of the ImageField\n \"\"\"\n if 'title' not in kwargs:\n kwargs['title'] = self.Title()\n return self.getField('image').tag(self, **kwargs) \n\nregisterType(Video, PROJECTNAME)\n","repo_name":"besja/kk.teama.projects","sub_path":"kk/teama/projects/content/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40497588437","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n dict_ = dict()\n answers = []\n\n for word in strs :\n nword = ''.join(sorted(list(word)))\n if nword in dict_ :\n dict_[nword].append(word)\n else :\n dict_[nword] = [word]\n\n # for key in dict_.keys() :\n # answers.append(dict_[key])\n\n # return answers\n return dict_.values()","repo_name":"lonebots/python-programming","sub_path":"leet-code/49-group-anagram.py","file_name":"49-group-anagram.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29184642866","text":"from Modules.B4_AnotatieTool import Annoteren\nfrom Modules.Q_LabelConvert import load_label_names\nfrom ultralytics import YOLO\nimport ipywidgets as widgets\nfrom onnx import load\nimport os\n\n\ndef LabelCheck(ImageMap, Annotaties, Model, ProjectName):\n Titel = widgets.HTML(\n \"

    Labels aanmaken

    De volgende Labels zijn al in gebruik:
    \"\n )\n # checkt of er al een LabelDoc bestaat en maakt deze anders aan\n LabelDoc = os.path.join(Annotaties, \"Labels.txt\")\n if not os.path.exists(LabelDoc):\n with open(LabelDoc, \"w+\") as a:\n a.write(\"\")\n\n # functie om de weergegeven lijst van labels te updaten\n def LaadLabelLijst():\n with open(LabelDoc, \"r\") as b:\n labels = b.read()\n labels = labels.replace(\"\\n\", \"
    \")\n if not labels:\n labels = (\"*Labels.txt is nog leeg*\")\n return labels\n\n # weergeeft labels\n LabelLijst = widgets.HTML(value=LaadLabelLijst())\n\n # weergeeft een text box die de input van nieuwe labels faciliteert\n LabelInputTitel= widgets.HTML(\"Nieuw label:\")\n LabelInput = widgets.Text(\n placeholder=\"Naam label (Geen speciale tekens!)\",\n disabled=False,\n )\n\n LabelInput.continuous_update = False\n\n # Knop voor het toevoegen van Labels\n LabelSubmit = widgets.Button(\n value=False, \n description=\"Permanent toevoegen\", \n button_style=\"\",\n )\n\n # Functie om nieuwe labels toe te voegen en de weergave te updaten\n def AddToList(PlaceHolder):\n with open(LabelDoc, \"r\") as c:\n if LabelInput.value not in c.read():\n with open(LabelDoc, \"a\") as d:\n d.write(LabelInput.value + \"\\n\")\n LabelInput.value = \"\"\n LabelLijst.value = LaadLabelLijst()\n\n LabelSubmit.on_click(AddToList)\n LabelInput.observe(AddToList)\n\n # Knop om de annotatietool te starten\n submit = widgets.Button(\n value=False, \n description=\"Start met annoteren\", \n button_style=\"success\",\n )\n\n # geeft benodigde gegevens door aan de annotatietool maar geeft een error als er 0 labels in totaal zijn\n @submit.on_click\n def SaveAndLaunch(PlaceHolder):\n load_label_names(LabelDoc)\n if load_label_names(LabelDoc):\n Annoteren(\n ImageMap=ImageMap,\n Annotaties=Annotaties,\n Labels=LabelDoc,\n Model=Model,\n ProjectName=ProjectName,\n )\n LabelCheck.close()\n else:\n ErrorCode.value = \"\"\"\n
    \n Er staan geen labels in labels.txt
    Een minimum van 1 label moet toegevoegd worden.
    \n
    \n \"\"\"\n\n line = widgets.HTML(value=\"
    \")\n ErrorCode = widgets.HTML()\n\n # Clustert de widgets in een interface\n LabelCheck = widgets.VBox([\n Titel,\n LabelLijst,\n widgets.HBox([\n LabelInputTitel,\n LabelInput,\n LabelSubmit,\n ]),\n line,\n widgets.HBox([\n ErrorCode, \n submit\n ], layout=widgets.Layout(justify_content=\"flex-end\")\n )], layout=widgets.Layout(width=\"888px\"),\n )\n\n # Checkt voordat de interface ingeladen word of de labels van het geselecteerde model (Niet verplicht!) gelijk zijn aan die van\n if Model:\n # laat het geselecteerde model in en filtert de labelnamen eruit.\n if Model.endswith(\".pt\"):\n\n model = YOLO(Model, task=\"detect\")\n MLabels = model.model.names\n ModelLabels = \"\"\n for labels in MLabels:\n ModelLabels += f\"{MLabels[labels]}
    \"\n elif Model.endswith(\".onnx\"):\n model = load(Model)\n MLabels = (\n str(model)\n .split('\\nmetadata_props {\\n key: \"names\"\\n value: \"')[-1]\n .replace(\"{\", \"\")\n .replace(\"}\", \"\")\n .replace(\"\\\\'\", \"\")\n .replace(\"\\n\", \"\")\n .replace('\"', \"\")\n .split(\", \")\n )\n ModelLabels = \"\"\n for labels in MLabels:\n ModelLabels += f\"{labels.split(': ')[-1]}
    \"\n\n if LabelLijst.value != ModelLabels:\n # Als de lijsten niet overeenkomen word een extra \"Label Check\" scherm weergegeven om om de verschillen weer te geven en de gebruiker tussen de 2 opties te laten kiezen\n LLijst = widgets.HTML(\n f\"
    Labels.txt
    {LaadLabelLijst()}\"\n )\n MLijst = widgets.HTML(\n f\"
    Model labels
    {ModelLabels}\"\n )\n\n # PickL of Pick label.txt is de knop voor het negeren van het conflict.\n PickL = widgets.Button(\n value=False, \n description=\"Gebruik Labels.txt\"\n )\n PickL.style.button_color = \"red\"\n\n @PickL.on_click\n def PL(PlaceHolder):\n Conflict.close()\n display(LabelCheck)\n\n # PickM of Pick Model is de knop om de modellabels te gebruiken. \n # De functie overschrijft Label.txt met de labels van het model. \n # Oude labels worden niet bewaard!\n PickM = widgets.Button(\n value=False, \n description=\"Vervang Labels.txt\"\n )\n PickM.style.button_color = \"green\"\n\n @PickM.on_click\n def PM(PlaceHolder):\n with open(LabelDoc, \"w+\") as e:\n e.write(\"\")\n with open(LabelDoc, \"a\") as f:\n MLabels = ModelLabels.split(\"
    \")\n for labels in MLabels:\n if labels:\n f.write(f\"{labels}\\n\")\n LabelLijst.value = LaadLabelLijst()\n Conflict.close()\n display(LabelCheck)\n\n ConflictTitel = widgets.HTML(\n \"

    Label check

    Er is een Conflict gevonden tussen Labels.txt in de label folder en de label van het gekozen model. Dit kan mogelijk gegenereerde annotaties verkeert labelen als de volgorde en/of betekenis van de labels verandert is. Negeer dit conflict (rode knop) of overschrijf de labels in Labels.txt en voorkom dit probleem in de toekomst (groene knop)\"\n )\n\n # Clustert de widgets in een interface\n Conflict = widgets.VBox([\n ConflictTitel,\n widgets.HBox([\n LLijst, \n MLijst\n ], layout=widgets.Layout(justify_content=\"center\", width=\"400px\"),\n ),\n widgets.HBox([\n PickL, \n PickM\n ], layout=widgets.Layout(justify_content=\"center\", width=\"400px\"),\n )], layout=widgets.Layout(width=\"888px\"),\n )\n display(Conflict)\n else:\n display(LabelCheck)\n else:\n display(LabelCheck)\n","repo_name":"FishingBehindTheNet/AiAssist-AnnotatieTool","sub_path":"Modules/B3_LabelCheck.py","file_name":"B3_LabelCheck.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23453560201","text":"fin = open(\"A-large.in\", 'r')\r\nfout= open(\"out.txt\", 'w')\r\n\r\n\r\n\r\ncount = 0\r\ndictd = dict()\r\n\r\ntestCaNum= int(fin.readline().strip())\r\nfor testCa in range(1, testCaNum +1):\r\n fiList = []\r\n fch = fin.readline().strip().split()\r\n li = list(fch[1])\r\n #print(fch)\r\n acc=li[0]\r\n for i in range(len(li)):\r\n li[i]=int(li[i])\r\n acc=li[0]\r\n friends = 0\r\n for i in range(1,len(li)):\r\n #print(acc,friends)\r\n if li[i] > 0:\r\n if i <= acc:\r\n acc = acc+ li[i]\r\n else:\r\n friends = friends + (i - acc)\r\n acc = i + li[i]\r\n \r\n fout.write(\"Case #\"+str(testCa)+\": \"+ str(friends)+\"\\n\")\r\n #if(j == 0):\r\n #fout.write(\"Volunteer cheated!\\n\")\r\n #elif(j==1):\r\n #fout.write(n+\"\\n\")\r\n #else:\r\n #fout.write(\"Bad magician!\\n\")\r\n \r\nfin.close()\r\nfout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/2944.py","file_name":"2944.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17778628543","text":"from npfmt import *\nfrom IsType import *\n\n\n\ndef Freq2Velo( dfreq, freq0 ) : \n\t'''\n\tDoppler effect/shift\n\n\tfreq0:\n\t\tfloat/on value in MHz\n\t\tcenter frequency\n\n\tdfreq:\n\t\tin MHz\n\t\tcan be one/int/float or list/ndarray\n\t\tdifferential from freq0\n\n\treturn:\n\t\tdvelo in km/s (from 0km/s)\n\t\tsame shape as dfreq\n\t'''\n\tfreq0 = npfmt(freq0, float).flatten()[0]\n\tistype, islist = IsType(), True\n\tif (istype.isint(dfreq) or istype.isfloat(dfreq)) : \n\t\tislist = False\n\tdfreq = npfmt(dfreq)\n\tdvelo = 3e5*dfreq/freq0\n\tif (not islist) : dvelo = dvelo[0]\n\treturn dvelo\n\n\n\ndef Velo2Freq( dvelo, freq0 ) : \n\t'''\n\tdvelo:\n\t\tin km/s\n\t\tcan be one/int/float or list/ndarray\n\t\tdifferential from 0km/s\n\n\treturn:\n\t\tdfreq in MHz (from freq0)\n\t\tsame shape as dvelo\n\t'''\n\tfreq0 = npfmt(freq0, float).flatten()[0]\n\tistype, islist = IsType(), True\n\tif (istype.isint(dvelo) or istype.isfloat(dvelo)) : \n\t\tislist = False\n\tdvelo = npfmt(dvelo)\n\tdfreq = dvelo*freq0/(3e5)\n\tif (not islist) : dfreq = dfreq[0]\n\treturn dfreq\n\n\n\n\n","repo_name":"jizhi/huangqizhi_git","sub_path":"jizhipy/Velo2Freq.py","file_name":"Velo2Freq.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14118681729","text":"\n\nimport os, sys \n\ndef main(folder):\n for root, subdir, files in os.walk(folder):\n for file in files:\n new_fname = '_'.join(file.split('_')[:-1]) + '.png'\n\n mv_command = 'mv ' + os.path.join(root, file) + ' ' + os.path.join(root, new_fname) \n\n os.system(mv_command)\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])","repo_name":"milesizydorczak12/Fingerprint-Correlation","sub_path":"helpful_scripts/remove_suffix.py","file_name":"remove_suffix.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2404974638","text":"import colorama\nfrom colorama import Fore, Style\n\ncolorama.init()\n\n\ndef print_params_in_color(seed, cfg, steps, strength, base_img, mask_img, model):\n print(\n Fore.MAGENTA\n + f\"SEED: {seed} \"\n + Fore.CYAN\n + f\"CFG: {cfg} \"\n + Fore.LIGHTGREEN_EX\n + f\"STEPS: {steps} \"\n + Fore.LIGHTYELLOW_EX\n + f\"STRENGTH: {strength} \"\n + Fore.LIGHTWHITE_EX\n + f\"BASE: {base_img} \"\n + f\"MASK: {mask_img} \"\n + Fore.LIGHTBLACK_EX\n + f\"MODEL: {model}\"\n + Style.RESET_ALL\n )\n","repo_name":"wswld/waifoo","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23610553981","text":"#!/usr/bin/python\r\n# 2010 Round1C\r\n''' Usage %s\r\n'''\r\nimport logging\r\n\r\nCurrentDebugLevel=logging.DEBUG\r\n\r\ndef ProcessCase(inFile, caseNum):\r\n logging.debug('Case %d', caseNum)\r\n param = inFile.readline().strip().split()\r\n #logging.debug(param)\r\n \r\n height = int(param[0])\r\n building = {}\r\n for i in range(height):\r\n pair = [int(x) for x in inFile.readline().strip().split()]\r\n building[pair[0] - 1] = (pair[1] - pair[0], pair[0]) # pair -1 since we start from 0\r\n\r\n total = 0\r\n for k, i in building.items():\r\n for t, j in building.items():\r\n x = (j[0] - i[0])\r\n if x == 0: continue\r\n y = (j[1] - i[1])/ x\r\n if y < 0 and y > -1: total += 1\r\n \r\n result = [total // 2]\r\n \r\n return result\r\n\r\ndef OutputResult(outFile, caseNum, result):\r\n value = result[0]\r\n outFile.write(\"Case #{0}: {1}\\n\".format(caseNum, value))\r\n logging.debug(\"Case #{0}: {1}\\n\".format(caseNum, value))\r\n\r\ndef ProcessDataFile(fileName):\r\n inFile = open(fileName, 'r')\r\n line = inFile.readline()\r\n lineCount = int(line)\r\n outFile = open(fileName + '.out.txt', 'w')\r\n for i in range(1, lineCount + 1):\r\n result = ProcessCase(inFile, i)\r\n OutputResult(outFile, i, result)\r\n outFile.close()\r\n\r\ndef main():\r\n logging.basicConfig(level=CurrentDebugLevel, datefmt='%Y.%m.%d %H:%M:%S', format='%(asctime)s %(levelname)-5s %(message)s')\r\n question = 'A'\r\n dataSet = 2\r\n attemptCount = 0 \r\n isPractice = False\r\n \r\n partName = '-practice'\r\n dataSetNames = ['test', 'small', 'large']\r\n if dataSet == 0:\r\n dataFileName = '{0}-{1}.txt'.format(question, dataSetNames[dataSet])\r\n elif dataSet == 1:\r\n if not isPractice: partName = '-attempt{}'.format(attemptCount)\r\n dataFileName = '{0}-{1}{2}.in'.format(question, dataSetNames[dataSet], partName)\r\n else:\r\n if not isPractice: partName = ''\r\n dataFileName = '{0}-{1}{2}.in'.format(question, dataSetNames[dataSet], partName)\r\n\r\n ProcessDataFile(dataFileName)\r\n\r\nif __name__ == '__main__': main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_62/302.py","file_name":"302.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1626133558","text":"# Load and test the model on 16 sample images\n\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom imutils import paths\nfrom imutils import build_montages\nimport cv2\nimport numpy as np\nimport argparse\nimport random\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--images\", required=True)\nargs = vars(ap.parse_args())\n\nmodel = load_model(\"covid19_model.h5\")\n\nimagePaths = list(paths.list_images(args[\"images\"]))\nrandom.shuffle(imagePaths) # 16 random samples\nimagePaths = imagePaths[:16]\n\n\nresults = []\nfor i in imagePaths:\n orig = cv2.imread(i)\n \n img = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (224, 224))\n img = img.astype(\"float\") / 255.0\n\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n pred = model.predict(img)\n pred = pred.argmax(axis=1)[0]\n\n label = \"negative\" if pred == 0 else \"positive\"\n color = (0, 0, 255) if pred == 0 else (0, 255, 0)\n\n orig = cv2.resize(orig, (128, 128))\n cv2.putText(orig, label, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)\n\n results.append(orig)\n\n\nmontage = build_montages(results, (128, 128), (4, 4))[0]\ncv2.imshow(\"Results\", montage)\ncv2.waitKey(0)\n","repo_name":"anthony-chang/covid19-deep-learning","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72224658113","text":"import gevent.subprocess as sp\n\n\nclass FirewallUtils:\n def __init__(self, logger):\n self.logger = logger\n\n def remove_rules_by_name(self, name):\n args = [\n 'c:\\\\windows\\\\system32\\\\Netsh.exe',\n 'advfirewall',\n 'firewall',\n 'delete',\n 'rule',\n 'name=%s' % name\n ]\n # Don't check for failure here, because it is expected to\n # fail if there are no left-over rules from a previous run.\n sp.call(args, stdout=sp.DEVNULL)\n\n def remove_rule(self, name, ip, port, protocol, allow_or_block):\n self.logger.info('remove %sing firewall rule for %s to %s port %s' %\n (allow_or_block, ip, protocol, port))\n\n args = [\n 'c:\\\\windows\\\\system32\\\\Netsh.exe',\n 'advfirewall',\n 'firewall',\n 'delete',\n 'rule',\n 'name=%s' % name,\n 'protocol=%s' % protocol,\n 'dir=in',\n 'profile=any',\n 'localport=%s' % port,\n 'remoteip=%s' % ip\n ]\n try:\n sp.check_output(args, text = True)\n except sp.CalledProcessError as e:\n self.logger.error('Failed to remove rule from firewall:\\n%s' % e.output)\n\n def add_rule(self, name, ip, port, protocol, allow_or_block):\n self.logger.info('add %sing firewall rule for %s to %s port %s' %\n (allow_or_block, ip, protocol, port))\n\n if allow_or_block not in ('allow', 'block'):\n raise RuntimeError('Invalid argument provided: %s' % allow_or_block)\n\n\n args = [\n 'c:\\\\windows\\\\system32\\\\Netsh.exe',\n 'advfirewall',\n 'firewall',\n 'add',\n 'rule',\n 'name=%s' % name,\n 'protocol=%s' % protocol,\n 'dir=in',\n 'enable=yes',\n 'profile=any',\n 'localport=%s' % port,\n 'action=%s' % allow_or_block,\n 'remoteip=%s' % ip\n ]\n try:\n sp.check_output(args, text = True)\n except sp.CalledProcessError as e:\n self.logger.error('Failed to add rule to firewall:\\n%s' % e.output)\n\n def find_tribes_ascend_rules(self):\n args = [\n 'c:\\\\windows\\\\system32\\\\Netsh.exe',\n 'advfirewall',\n 'firewall',\n 'show',\n 'rule',\n 'name=all',\n 'dir=in',\n 'status=enabled',\n 'verbose'\n ]\n try:\n output = sp.check_output(args, text = True)\n except sp.CalledProcessError as e:\n self.logger.error('Failed to request firewall rules.')\n output = ''\n\n ta_rules = []\n for line in output.splitlines():\n if line.startswith('Rule Name:'):\n newrule = {}\n elif ':' in line:\n key, value = line.split(':', maxsplit=1)\n key = key.strip()\n value = value.strip()\n\n newrule[key] = value\n\n if key == 'Program' and value.lower().endswith('tribesascend.exe'):\n ta_rules.append(newrule)\n\n return ta_rules\n\n def disable_rules_for_program_name(self, programname):\n args = [\n 'c:\\\\windows\\\\system32\\\\Netsh.exe',\n 'advfirewall',\n 'firewall',\n 'set',\n 'rule',\n 'name=all',\n 'dir=in',\n 'program=%s' % programname,\n 'new',\n 'enable=no'\n ]\n\n try:\n self.logger.info('Disabling rule for %s' % programname)\n sp.check_output(args, text = True)\n except sp.CalledProcessError as e:\n self.logger.error('Failed to remove firewall rules for program %s. Output:\\n%s' %\n (programname, e.output))\n\n","repo_name":"Griffon26/taserver","sub_path":"firewall/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"61"} +{"seq_id":"72292409473","text":"import random\r\n\r\nclass Graph:\r\n def __init__(self,vnum):\r\n self.v=vnum\r\n self.adjlist={}\r\n for i in range(self.v):\r\n self.adjlist.setdefault(i,[]).append(0)\r\n self.adjlist[i].remove(0)\r\n self.e = 0\r\n self.alledges={}\r\n\r\n def randominit(self):\r\n for i in range(self.v):\r\n weight = (random.random()+0.01)*1000\r\n self.adjlist.setdefault(i,[]).append([(i+1)%self.v,weight])\r\n self.adjlist.setdefault((i+1)%self.v,[]).append([i,weight])\r\n self.e+=1\r\n self.alledges[i, (i+1)%self.v] = weight\r\n\r\n def addedge(self,u,v,weight):\r\n #pass\r\n if self.v==0:\r\n raise ValueError(\"can not add edge to the empty graph!\")\r\n self.adjlist.setdefault(u,[]).append([v,weight])\r\n self.adjlist.setdefault(v,[]).append([u,weight])\r\n self.e+=1\r\n self.alledges[u,v]=weight\r\n\r\n def edgeexist(self,u,v):\r\n flag=False\r\n for edges in self.adjlist[v]:\r\n if u == edges[0]:\r\n flag=True\r\n return flag\r\n\r\n def edgenum(self):\r\n return self.e\r\n\r\n def getdegree(self):\r\n degree = 0\r\n for value in self.adjlist.values():\r\n degree += len(value)\r\n return degree\r\n","repo_name":"xqjiang423/homework","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24462256341","text":"import subprocess\nimport platform\nfrom shutil import which\ndef getestimatedcpufrequency():\n # We should be checking the architecture we are running on, but platform.processor()\n # is \"unknown\" in our container\n if platform.system() != \"Linux\":\n return None\n p = subprocess.run([\"gcc\", \"-O2\", \"-o\", \"cpu_frequency_estimate\", \"cpu_frequency_estimate.c\"])\n if p.returncode != 0:\n print(\"Failed to compile\")\n raise Exception(\"cpu_frequency_estimate.c failed to compile\")\n p = subprocess.Popen([\"./cpu_frequency_estimate\"], shell=False,stdout=subprocess.PIPE)\n lines = p.communicate()[0].decode()\n frequency = None\n for line in lines.splitlines():\n if \"Estimated frequency\" in line:\n l = line.split(\"=\")[1]\n l = l.rstrip().lstrip()\n return l\n \n return None\n\ndef getcpuinfo(required_tags):\n tags = {}\n try:\n fp = open(\"/proc/cpuinfo\", \"r\")\n lines = fp.readlines()\n except:\n lines = None\n if lines is None:\n required_tags = [\"machdep.cpu.brand_string\", \"machdep.cpu.core_count\"]\n p = subprocess.Popen([\"sysctl\", \"-a\"], stdout=subprocess.PIPE)\n lines = []\n while True:\n line = p.stdout.readline()\n if not line: break\n lines.append(line.decode())\n\n for line in lines:\n if \":\" in line:\n (t,v) = line.split(\":\",1)\n t = t.rstrip().lstrip()\n v = v.rstrip().lstrip()\n if (t in required_tags) and (not t in tags.keys()):\n tags[t] = v \n return tags\n","repo_name":"open-quantum-safe/profiling","sub_path":"perf/scripts/get_cpuinfo.py","file_name":"get_cpuinfo.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"6084361953","text":"import requests\r\nimport time\r\nfrom win10toast import ToastNotifier\r\nimport sys\r\n\r\nalert = ToastNotifier()\r\n\r\nurltocheck = \"https://mangaesp.co/ver/la-piba-del-tinder\"\r\nstringtocheck = \"https://mangaesp.co/ver/la-piba-del-tinder/\" + sys.argv[1]\r\nmsg = \"The new chapter is here.\"\r\n\r\n\r\n\r\nwhile True:\r\n\tchecker = requests.get(urltocheck)\r\n\tif stringtocheck in checker.text:\r\n\t\talert.show_toast(msg)\r\n\t\tbreak\r\n\telse:\r\n\t\ttime.sleep(120)","repo_name":"haya123421321/Scripts","sub_path":"Scanlation-script/Checker.py","file_name":"Checker.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30767772425","text":"# Models\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline, make_pipeline\n# Sampling\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom imblearn.over_sampling import RandomOverSampler\n# Plotting\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix, accuracy_score, plot_precision_recall_curve, plot_roc_curve, plot_confusion_matrix\n# IO\nimport numpy as np\nimport pandas as pd\nimport pickle\n\npd.options.display.float_format = \"{:,.2f}\".format\n\n# Read dataset into memory\ndata = pd.read_csv(\"./JSVulnerabilityDataSet-1.0.csv\")\nparametersIndex = data.columns.get_loc(\"CC\")\nresultIndex = data.columns.get_loc(\"Vuln\")\nX = data.iloc[:, parametersIndex:resultIndex]\nY = data.iloc[:, resultIndex]\n\n# X = SelectKBest(chi2, k=20).fit_transform(X, Y)\n# print(X_new)\n\n# Prepare plots\nfig_confusion, subs_confusion = plt.subplots(3,3)\nfig_metrics, subs_metrics = plt.subplots(3,3)\nsubs_metrics[0][0].set_title('precision vs recall')\nsubs_metrics[0][1].set_title('ROC')\nsubs_metrics[0][2].set_title('Accuracy')\nsubs_confusion[0][0].set_title('no sampling')\nsubs_confusion[0][1].set_title('over sampling')\nsubs_confusion[0][2].set_title('under sampling')\nfor idx, row in enumerate(subs_metrics):\n row[0].set_yticklabels([])\n row[0].set_xticklabels([])\n row[0].get_xaxis().set_visible(False)\n row[1].set_yticklabels([])\n row[1].set_xticklabels([])\n row[1].get_yaxis().set_visible(False)\n row[1].get_xaxis().set_visible(False)\n row[2].set_yticklabels([])\n row[2].set_xticklabels([])\n row[2].get_yaxis().set_visible(False)\n row[2].get_xaxis().set_visible(False)\n\nfor idx, row in enumerate(subs_confusion):\n row[0].set_yticklabels([])\n row[0].set_xticklabels([])\n row[0].get_xaxis().set_visible(False)\n row[1].set_yticklabels([])\n row[1].set_xticklabels([])\n row[1].get_yaxis().set_visible(False)\n row[1].get_xaxis().set_visible(False)\n row[2].set_yticklabels([])\n row[2].set_xticklabels([])\n row[2].get_yaxis().set_visible(False)\n row[2].get_xaxis().set_visible(False)\n\nrow = 0\n\ndef train_model(model_type, X_train, Y_train, sampling, grid):\n if sampling != \"no\":\n sampler = RandomOverSampler() if sampling == \"over\" else RandomUnderSampler()\n X_train, Y_train = sampler.fit_resample(X_train, Y_train)\n trained_model = GridSearchCV(make_pipeline(StandardScaler(), model_type), grid, scoring=\"f1\")\n trained_model.fit(X_train, Y_train)\n\n print(\"Done training model, the following hyper parameters were used:\")\n print(trained_model.best_params_)\n print(\"\\n\")\n return trained_model\n\ndef plot_metric(title, metric_plotter, trained_model, X_test, Y_test, row, col):\n plot_configuration = metric_plotter(trained_model, X_test, Y_test)\n plot_configuration.plot(ax=subs_metrics[row][col], name=title)\n\n\ndef get_metrics(model, X_test, Y_test):\n threshs = []\n prec = []\n rec = []\n f1s = []\n for threshold in np.arange(0, 1, 0.1):\n prediction = (model.predict_proba(X_test)[:, 1] > threshold).astype('float')\n threshs.append(threshold)\n prec.append(precision_score(Y_test, prediction, zero_division=0))\n rec.append(recall_score(Y_test, prediction))\n f1s.append(f1_score(Y_test, prediction))\n\n df = pd.DataFrame([prec, rec, f1s], index=[\"Precision\", \"Recall\", \"F1\"])\n df.columns = threshs\n return df\n\nmodels = {\n \"Logistic Regression\": {\n \"model\": LogisticRegression(solver='liblinear', max_iter=10000),\n \"grid\": {\n 'logisticregression__penalty': ['l1','l2'],\n 'logisticregression__C': [0.001,0.01,0.1,1,10,100,1000],\n }\n },\n \"KNN\": {\n \"model\": KNeighborsClassifier(),\n \"grid\": {\"kneighborsclassifier__n_neighbors\": range(1, 10)}\n },\n \"Decision Tree\": {\n \"model\": DecisionTreeClassifier(),\n \"grid\": {\n 'decisiontreeclassifier__max_leaf_nodes': list(range(2, 100)),\n 'decisiontreeclassifier__min_samples_split': [2, 3, 4]\n }\n }\n}\n\nfor model_name, params in models.items():\n # Split the data for training and validation\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y)\n \n # Train the model with different sampling strategies\n print(\"Training: \" + model_name)\n print(\"will preform grid-search for parameters, optimising for F1 score\")\n print(\"====================\\n\\n\")\n model_over_sampling = train_model(params[\"model\"], X_train, Y_train, \"over\", params[\"grid\"])\n print(get_metrics(model_over_sampling, X_test, Y_test))\n get_metrics(model_over_sampling, X_test, Y_test).to_html(open(\"./tables/\" + model_name + \"-over.html\", \"w\"))\n print(\"====================\")\n\n model_under_sampling = train_model(params[\"model\"], X_train, Y_train, \"under\", params[\"grid\"])\n print(get_metrics(model_under_sampling, X_test, Y_test))\n get_metrics(model_over_sampling, X_test, Y_test).to_html(open(\"./tables/\" + model_name + \"-under.html\", \"w\"))\n print(\"====================\")\n\n model_no_sampling = train_model(params[\"model\"], X_train, Y_train, \"no\", params[\"grid\"])\n print(get_metrics(model_no_sampling, X_test, Y_test))\n get_metrics(model_over_sampling, X_test, Y_test).to_html(open(\"./tables/\" + model_name + \"-no.html\", \"w\"))\n print(\"====================\")\n\n # Report confusion matrices\n confusion_plot_no = plot_confusion_matrix(model_no_sampling, X_test, Y_test)\n confusion_plot_no.plot(ax=subs_confusion[row][0])\n\n confusion_plot_over = plot_confusion_matrix(model_over_sampling, X_test, Y_test)\n confusion_plot_over.plot(ax=subs_confusion[row][1])\n\n confusion_plot_under = plot_confusion_matrix(model_under_sampling, X_test, Y_test)\n confusion_plot_under.plot(ax=subs_confusion[row][2])\n \n # Report precision recall curve\n plot_metric('no sampling', plot_precision_recall_curve, model_no_sampling, X_test, Y_test, row, 0)\n plot_metric('over sampling', plot_precision_recall_curve, model_over_sampling, X_test, Y_test, row, 0)\n plot_metric('under sampling', plot_precision_recall_curve, model_under_sampling, X_test, Y_test, row, 0)\n\n # Report ROC curve\n plot_metric('no sampling', plot_roc_curve, model_no_sampling, X_test, Y_test, row, 1)\n plot_metric('over sampling', plot_roc_curve, model_over_sampling, X_test, Y_test, row, 1)\n plot_metric('under sampling', plot_roc_curve, model_under_sampling, X_test, Y_test, row, 1)\n\n # Report accuracy\n prediction_no_sampling = model_no_sampling.predict(X_test)\n prediction_over_sampling = model_over_sampling.predict(X_test)\n prediction_under_sampling = model_under_sampling.predict(X_test)\n\n accuracy_no_sampling = accuracy_score(Y_test, prediction_no_sampling)\n accuracy_over_sample = accuracy_score(Y_test, prediction_over_sampling)\n accuracy_under_sample = accuracy_score(Y_test, prediction_under_sampling)\n subs_metrics[row][2].bar(['no sampling'], [accuracy_no_sampling], color='b')\n subs_metrics[row][2].bar(['over sampling'], [accuracy_over_sample], color='orange')\n subs_metrics[row][2].bar(['under sampling'], [accuracy_under_sample], color='g')\n subs_metrics[row][2].legend([\n \"no sampling - {:.0%}\".format(accuracy_no_sampling),\n \"over sampling - {:.0%}\".format(accuracy_over_sample),\n \"under sampling - {:.0%}\".format(accuracy_under_sample)\n ])\n\n # Add model name to plots row\n subs_metrics[row][0].set_ylabel(model_name)\n subs_confusion[row][0].set_ylabel(model_name)\n\n # Clear un-needed figures to coserve memory\n for fig_num in plt.get_fignums():\n if fig_confusion.number != fig_num and fig_metrics.number != fig_num:\n plt.close(fig_num)\n\n # Save model result to file\n pickle.dump(model_no_sampling, open('./models/' + model_name.lower().replace(' ', '_') + '_no_sampling', 'wb'))\n pickle.dump(model_over_sampling, open('./models/' + model_name.lower().replace(' ', '_') + '_over_sampling', 'wb'));\n pickle.dump(model_under_sampling, open('./models/' + model_name.lower().replace(' ', '_') + '_under_sampling', 'wb'));\n row += 1\n print(\"====================\\n\\n\")\n\nplt.show()\n\n","repo_name":"Dor256/software-vulnerability-detection","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18210505419","text":"import base64\nfrom io import BytesIO\nfrom PIL import Image\nfrom flask import Flask, render_template, request,jsonify\nimport cv2\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom keras.preprocessing.image import img_to_array\nimport numpy as np\n# with open('model/model.json', 'r') as f:\n# model = keras.models.model_from_json(f.read())\nmodel = keras.models.load_model('model/model.h5')\nlabel = [\n\"kue dadar gulung\",\n\"kue kastengel\",\n\"kue klepon\",\n\"kue lapis\",\n\"kue lumpur\",\n\"kue putri salju\",\n\"kue risoles\",\n\"kue serabi\",\n]\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\n@app.route('/api', methods=['POST'])\ndef api():\n if request.method == \"POST\":\n print(\"I am a post\")\n if request.form:\n print(\"I have form data\")\n #print(request.form['kommentar'])\n if request.data:\n data = request.get_json()\n image_data = data.get('content').split(\",\")[1]\n # with open(data.get('token')+'.jpg',\"wb\") as f:\n # f.write(base64.b64decode(image_data))\n # image = cv2.imread(data.get('token')+'.jpg')\n # npimg = np.fromstring(data.get('content'), np.uint8)\n # image = cv2.imdecode(npimg, cv2.IMREAD_COLOR)\n im_bytes = base64.b64decode(image_data)\n im_arr = np.frombuffer(im_bytes, dtype=np.uint8) # im_arr is one-dim Numpy array\n image = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)\n # cv2.imwrite('Test_gray.png', image)\n #PRE-PROCESSING\n image = cv2.resize(image, (150, 150))\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n image = image.astype(\"float\") / 255.0\n #PREDICT\n proba = model.predict(image)[0]\n idx = np.argmax(proba)\n print(label[idx])\n print(proba[idx] * 100)\n # print(\"i have data\",data.get('content'))\n # if request.json:\n # print(\"I have json\",request.json['content'])\n # image_data = request.json['content'].split(\",\")[1]\n # with open(\"clientimage.png\",\"wb\") as f:\n # f.write(base64.b64decode(image_data))\n # Do stuff with the data...\n return jsonify(success=1, label=label[idx], percent=(proba[idx] * 100))\n else:\n print(\"fail\")\n\n return jsonify({})\n \n\nif __name__ == '__main__':\n app.run(host= '0.0.0.0',debug=True,ssl_context='adhoc',port=5000)","repo_name":"fadhelmurphy/Realtime-streaming-classification","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29629746280","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nfrom ..person import Provider as PersonProvider\n\n\nclass Provider(PersonProvider):\n formats = (\n '{{first_name}} {{last_name}} {{last_name}}',\n '{{first_name}} {{last_name}} {{last_name}}',\n '{{first_name}} {{last_name}} {{last_name}}',\n '{{first_name}} {{last_name}} {{last_name}}',\n '{{first_name}} {{last_name}} {{last_name}}',\n '{{first_name}} {{last_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{prefix}} {{last_name}}',\n '{{first_name}} {{last_name}}-{{last_name}}',\n '{{first_name}} {{first_name}} {{last_name}} {{last_name}}',\n )\n\n first_names = (\n 'Abel', 'Abraham', 'Abram', 'Ada', 'Adán', 'Adrián', 'Adriana', 'Alan', 'Alana',\n 'Alejandra', 'Alejandro', 'Alex', 'Alexa', 'Alexandra', 'Alexandría', 'Alexia',\n 'Alexis', 'Alexis', 'Alfredo', 'Alicia', 'Alina', 'Alonzo', 'Amanda', 'Amaya',\n 'Amelia', 'América', 'Ana', 'Andrea', 'Andrés', 'Ángel', 'Ángela', 'Angélica',\n 'Angelina', 'Antonio', 'Ariana', 'Armando', 'Arturo', 'Asia', 'Aurora',\n 'Benjamín', 'Brenda', 'Camila', 'Carlos', 'Carmen', 'Carolina', 'Cecilia',\n 'Celeste', 'César', 'Clara', 'Claudia', 'Daniel', 'Daniela', 'Daniella',\n 'David', 'Diana', 'Diego', 'Dulce', 'Édgar', 'Eduardo', 'Elena', 'Eliana',\n 'Elías', 'Elisa', 'Elsa', 'Emanuel', 'Emilia', 'Emiliano', 'Emilio', 'Enrique',\n 'Ernesto', 'Esmeralda', 'Esteban', 'Estrella', 'Eva', 'Ezequiel', 'Fabián',\n 'Fátima', 'Felipe', 'Fernanda', 'Fernando', 'Francisco', 'Gabriel', 'Gabriela',\n 'Gerardo', 'Gloria', 'Guadalupe', 'Guillermo', 'Gustavo', 'Héctor', 'Hugo',\n 'Iris', 'Isabel', 'Isabela', 'Israel', 'Iván', 'Jaime', 'Jairo' 'Javier',\n 'Jazmín', 'Jesús', 'Jimena', 'Joaquín', 'Joel', 'Jorge', 'José', 'Josué',\n 'Juan', 'Julia', 'Julián', 'Juliana', 'Julio', 'Laura', 'Leila', 'Leonardo',\n 'Lía', 'Lila', 'Lilian', 'Liliana', 'Linda', 'Lola', 'Lorenzo', 'Lucas',\n 'Lucía', 'Lucián', 'Luis', 'Manuel', 'Marco', 'Marcos', 'María', 'Mariana',\n 'Marina', 'Mario', 'Marisol', 'Martín', 'Mateo', 'Matías', 'Mauricio', 'Max',\n 'Maximiliano', 'Maya', 'Melina', 'Mercedes', 'Miguel', 'Miranda', 'Miriam',\n 'Mónica', 'Isabella', 'Natalia', 'Nicolás', 'Noé', 'Nora', 'Óliver', 'Olivia',\n 'Omar', 'Orlando', 'Óscar', 'Pablo', 'Paola', 'Patricia', 'Paúl', 'Pedro',\n 'Penélope', 'Perla', 'Rafael', 'Ramón', 'Raquel', 'Raúl', 'Regina', 'Ricardo',\n 'Roberto', 'Rodrigo', 'Román', 'Rosa', 'Rubén', 'Salvador', 'Samuel', 'Sandra',\n 'Santiago', 'Sara', 'Saúl', 'Sebastián', 'Selena', 'Serena', 'Sergio', 'Simón',\n 'Sofía', 'Talía', 'Tatiana', 'Tobías', 'Tomás', 'Valentina', 'Valeria',\n 'Verónica', 'Víctor', 'Victoria', 'Virginia', 'Vivian', 'Viviana', 'Xavier',\n 'Ximena'\n )\n\n\n last_names = (\n 'Abad', 'Adadia', 'Abascal', 'Abella', 'Abellán', 'Abril', 'Acedo', 'Acevedo',\n 'Acero', 'Acosta', 'Acuña', 'Adán', 'Aguado', 'Agudo', 'Águila', 'Aguilar',\n 'Aguilera', 'Aguiló', 'Aguirre', 'Agullo', 'Agustí', 'Agustín', 'Álamo',\n 'Alarcón', 'Alba', 'Alberdi', 'Albero', 'Alberola', 'Alberto', 'Alcalá',\n 'Alcalde', 'Alcántara', 'Alcaraz', 'Alcázar', 'Alcolea', 'Alegre', 'Alegria',\n 'Alemán', 'Alemany', 'Alfaro', 'Alfonso', 'Aliaga', 'Almagro', 'Almansa',\n 'Almazán', 'Almeida', 'Alonso', 'Alsina', 'Alvarado', 'Álvarez', 'Álvaro',\n 'Aller', 'Amador', 'Amat', 'Amaya', 'Amigó', 'Amo', 'Amor', 'Amores', 'Amorós',\n 'Anaya', 'Andrade', 'Andrés', 'Andreu', 'Ángel', 'Anglada', 'Angulo', 'Anguita',\n 'Antón', 'Antúnez', 'Aparicio', 'Aragón', 'Aragonés', 'Aramburu', 'Arana',\n 'Aranda', 'Araujo', 'Arce', 'Arco', 'Arcos', 'Arellano', 'Arenas', 'Arévalo',\n 'Arias', 'Ariño', 'Ariza', 'Arjona', 'Armas', 'Armengol', 'Arnaiz', 'Arnal',\n 'Arnau', 'Aroca', 'Arranz', 'Arregui', 'Arribas', 'Arrieta', 'Arroyo',\n 'Arteaga', 'Artigas', 'Asenjo', 'Asensio', 'Atienza', 'Ávila', 'Avilés',\n 'Ayala', 'Ayllón', 'Ayuso', 'Azcona', 'Aznar', 'Azorin', 'Badía', 'Baena',\n 'Báez', 'Baeza', 'Balaguer', 'Ballester', 'Ballesteros', 'Baños', 'Baquero',\n 'Barba', 'Barberá', 'Barbero', 'Barceló', 'Bárcena', 'Barco', 'Baró', 'Barón',\n 'Barragán', 'Barral', 'Barranco', 'Barreda', 'Barrena', 'Barrera', 'Barriga',\n 'Barrio', 'Barrios', 'Barros', 'Barroso', 'Bartolomé', 'Bas', 'Bastida',\n 'Batalla', 'Batlle', 'Bautista', 'Bauzà', 'Bayo', 'Bayón', 'Bayona', 'Becerra',\n 'Bejarano', 'Belda', 'Belmonte', 'Beltrán', 'Bellido', 'Bello', 'Benavent',\n 'Benavente', 'Benavides', 'Benet', 'Benítez', 'Benito', 'Berenguer', 'Bermejo',\n 'Bermúdez', 'Bernad', 'Bernal', 'Bernat', 'Berrocal', 'Bertrán', 'Bilbao',\n 'Blanca', 'Blanco', 'Blanch', 'Blanes', 'Blasco', 'Blázquez', 'Boada', 'Boix',\n 'Bolaños', 'Bonet', 'Bonilla', 'Borja', 'Borrás', 'Borrego', 'Borrell', 'Bosch',\n 'Botella', 'Bou', 'Bravo', 'Briones', 'Bru', 'Buendía', 'Bueno', 'Burgos',\n 'Busquets', 'Bustamante', 'Bustos', 'Caballero', 'Cabanillas', 'Cabañas',\n 'Cabello', 'Cabeza', 'Cabezas', 'Cabo', 'Cabrera', 'Cabrero', 'Cáceres',\n 'Cadenas', 'Cal', 'Calatayud', 'Calderón', 'Calvet', 'Calvo', 'Calleja',\n 'Calzada', 'Camacho', 'Cámara', 'Camino', 'Campillo', 'Campo', 'Campos',\n 'Campoy', 'Camps', 'Canales', 'Canals', 'Canet', 'Cano', 'Cánovas', 'Cantero',\n 'Cantón', 'Cañas', 'Cañellas', 'Cañete', 'Cañizares', 'Caparrós', 'Capdevila',\n 'Carballo', 'Carbajo', 'Carbó', 'Carbonell', 'Cárdenas', 'Cardona', 'Carlos',\n 'Carmona', 'Carnero', 'Caro', 'Carpio', 'Carranza', 'Carrasco', 'Carreño',\n 'Carrera', 'Carreras', 'Carretero', 'Carrillo', 'Carrión', 'Carro', 'Carvajal',\n 'Casado', 'Casal', 'Casals', 'Casanova', 'Casanovas', 'Casares', 'Casas',\n 'Cases', 'Castañeda', 'Castejón', 'Castell', 'Castellanos', 'Castelló',\n 'Castells', 'Castilla', 'Castillo', 'Castrillo', 'Castro', 'Catalá', 'Catalán',\n 'Cazorla', 'Cepeda', 'Cerdá', 'Cerdán', 'Cerezo', 'Cerro', 'Cervantes',\n 'Cervera', 'Céspedes', 'Cid', 'Cifuentes', 'Cisneros', 'Clavero', 'Clemente',\n 'Cobo', 'Cobos', 'Coca', 'Codina', 'Coello', 'Colom', 'Coloma', 'Colomer',\n 'Coll', 'Collado', 'Comas', 'Company', 'Conde', 'Conesa', 'Contreras',\n 'Corbacho', 'Cordero', 'Córdoba', 'Cornejo', 'Corominas', 'Coronado', 'Corral',\n 'Correa', 'Cortés', 'Cortina', 'Costa', 'Cózar', 'Criado', 'Crespi', 'Crespo',\n 'Cruz', 'Cuadrado', 'Cuéllar', 'Cuenca', 'Cuervo', 'Cuesta', 'Cueto', 'Cuevas',\n 'Chacón', 'Chamorro', 'Chaparro', 'Chaves', 'Checa', 'Chico', 'Dalmau',\n 'Dávila', 'Daza', 'Delgado', 'Díaz', 'Diego', 'Diéguez', 'Díez', 'Doménech',\n 'Domingo', 'Domínguez', 'Donaire', 'Donoso', 'Duarte', 'Dueñas', 'Duque',\n 'Durán', 'Echevarría', 'Echeverría', 'Egea', 'Elías', 'Elorza', 'Enríquez',\n 'Escalona', 'Escamilla', 'Escobar', 'Escolano', 'Escribano', 'Escrivá',\n 'Escudero', 'Espada', 'España', 'Español', 'Esparza', 'Espejo', 'Espinosa',\n 'Esteban', 'Esteve', 'Estévez', 'Estrada', 'Expósito', 'Fabra', 'Fábregas',\n 'Fabregat', 'Fajardo', 'Falcó', 'Falcón', 'Farré', 'Feijoo', 'Feliu',\n 'Fernández', 'Ferrán', 'Ferrández', 'Ferrándiz', 'Ferrando', 'Ferrer',\n 'Ferrera', 'Ferrero', 'Ferreras', 'Figueras', 'Figueroa', 'Figuerola', 'Fiol',\n 'Flor', 'Flores', 'Folch', 'Fonseca', 'Font', 'Fortuny', 'Franch', 'Francisco',\n 'Franco', 'Franch', 'Frías', 'Frutos', 'Fuente', 'Fuentes', 'Fuertes', 'Fuster',\n 'Gabaldón', 'Galán', 'Galiano', 'Galindo', 'Galván', 'Gálvez', 'Gallardo',\n 'Gallart', 'Gallego', 'Gallo', 'Gámez', 'Gárate', 'Garay', 'Garcés', 'García',\n 'Gargallo', 'Garmendia', 'Garrido', 'Garriga', 'Garzón', 'Gascón', 'Gaya',\n 'Gelabert', 'Gibert', 'Gil', 'Gilabert', 'Giménez', 'Gimeno', 'Giner', 'Giralt',\n 'Girón', 'Girona', 'Gisbert', 'Godoy', 'Goicoechea', 'Gómez', 'Gomila', 'Gomis',\n 'González', 'Gonzalo', 'Goñi', 'Gordillo', 'Gracia', 'Granados', 'Grande',\n 'Gras', 'Grau', 'Gual', 'Guardia', 'Guardiola', 'Guerra', 'Guerrero',\n 'Guijarro', 'Guillén', 'Guitart', 'Gutiérrez', 'Guzmán', 'Haro', 'Heras',\n 'Heredia', 'Hernández', 'Hernando', 'Herranz', 'Herrera', 'Herrero', 'Hervás',\n 'Hervia', 'Hidalgo', 'Hierro', 'Higueras', 'Hoyos', 'Hoz', 'Huerta', 'Huertas',\n 'Huguet', 'Hurtado', 'Ibáñez', 'Ibarra', 'Iborra', 'Iglesia', 'Iglesias',\n 'Infante', 'Iniesta', 'Íñigo', 'Iñiguez', 'Iriarte', 'Isern', 'Izaguirre',\n 'Izquierdo', 'Jaén', 'Jara', 'Jaume', 'Jáuregui', 'Jerez', 'Jiménez', 'Jódar',\n 'Jordá', 'Jordán', 'Jove', 'Jover', 'Juan', 'Juárez', 'Juliá', 'Julián',\n 'Jurado', 'Lago', 'Laguna', 'Lamas', 'Landa', 'Lara', 'Larrañaga', 'Larrea',\n 'Lasa', 'Lastra', 'Leal', 'Ledesma', 'Leiva', 'León', 'Lerma', 'Lillo',\n 'Linares', 'Lobato', 'Lobo', 'López', 'Lorenzo', 'Losa', 'Losada', 'Lozano',\n 'Lucas', 'Lucena', 'Luís', 'Luján', 'Lumbreras', 'Luna', 'Luque', 'Luz',\n 'Llabrés', 'Lladó', 'Llamas', 'Llano', 'Llanos', 'Lledó', 'Llobet', 'Llopis',\n 'Llorens', 'Llorente', 'Lloret', 'Lluch', 'Macías', 'Machado', 'Madrid',\n 'Madrigal', 'Maestre', 'Maldonado', 'Malo', 'Mancebo', 'Manjón', 'Manrique',\n 'Manso', 'Manuel', 'Manzanares', 'Manzano', 'Marco', 'Marcos', 'Marí', 'Marín',\n 'Mariño', 'Mariscal', 'Mármol', 'Marqués', 'Márquez', 'Martí', 'Martín',\n 'Martínez', 'Martorell', 'Mas', 'Mascaró', 'Mata', 'Matas', 'Mate', 'Mateo',\n 'Mateos', 'Mateu', 'Mayo', 'Mayol', 'Mayoral', 'Maza', 'Medina', 'Meléndez',\n 'Melero', 'Mena', 'Méndez', 'Mendizábal', 'Mendoza', 'Menéndez', 'Mercader',\n 'Merino', 'Mesa', 'Miguel', 'Milla', 'Millán', 'Mínguez', 'Mir', 'Miralles',\n 'Miranda', 'Miró', 'Moles', 'Molina', 'Moliner', 'Molins', 'Moll', 'Monreal',\n 'Montalbán', 'Montaña', 'Montenegro', 'Montero', 'Montes', 'Montesinos',\n 'Montoya', 'Montserrat', 'Mora', 'Moraleda', 'Morales', 'Morán', 'Morante',\n 'Morata', 'Morcillo', 'Morell', 'Moreno', 'Morera', 'Morillo', 'Mosquera',\n 'Moya', 'Múgica', 'Mulet', 'Múñiz', 'Muñoz', 'Mur', 'Murcia', 'Murillo', 'Muro',\n 'Nadal', 'Naranjo', 'Narváez', 'Navarrete', 'Navarro', 'Navas', 'Nebot',\n 'Neira', 'Nevado', 'Nicolau', 'Nicolás', 'Nieto', 'Niño', 'Nogueira', 'Noguera',\n 'Nogués', 'Noriega', 'Novoa', 'Núñez', 'Ocaña', 'Ochoa', 'Ojeda', 'Oliva',\n 'Olivares', 'Olivé', 'Oliver', 'Olivera', 'Oliveras', 'Olmedo', 'Olmo', 'Oller',\n 'Ordóñez', 'Orozco', 'Ortega', 'Ortiz', 'Ortuño', 'Osorio', 'Osuna', 'Otero',\n 'Pablo', 'Pacheco', 'Padilla', 'Páez', 'Pagès', 'Palacio', 'Palacios', 'Palau',\n 'Palma', 'Palmer', 'Palomar', 'Palomares', 'Palomino', 'Palomo', 'Pallarès',\n 'Paniagua', 'Pardo', 'Paredes', 'Pareja', 'Parejo', 'Parra', 'Pascual',\n 'Pastor', 'Patiño', 'Pavón', 'Paz', 'Pazos', 'Pedraza', 'Pedrero', 'Pedro',\n 'Pedrosa', 'Peinado', 'Peiró', 'Peláez', 'Pelayo', 'Pellicer', 'Peña',\n 'Peñalver', 'Peñas', 'Pera', 'Peral', 'Perales', 'Peralta', 'Perea', 'Pereira',\n 'Perelló', 'Perera', 'Pérez', 'Pi', 'Pina', 'Pineda', 'Pinedo', 'Pinilla',\n 'Pino', 'Pinto', 'Pintor', 'Piña', 'Piñeiro', 'Piñol', 'Piquer', 'Pizarro',\n 'Pla', 'Plana', 'Planas', 'Plaza', 'Pol', 'Polo', 'Pomares', 'Pombo', 'Ponce',\n 'Pons', 'Pont', 'Porcel', 'Porras', 'Porta', 'Portero', 'Portillo', 'Posada',\n 'Pou', 'Poza', 'Pozo', 'Pozuelo', 'Prada', 'Prado', 'Prat', 'Prats', 'Priego',\n 'Prieto', 'Puente', 'Puerta', 'Puga', 'Puig', 'Pujadas', 'Pujol', 'Pulido',\n 'Quero', 'Querol', 'Quesada', 'Quevedo', 'Quintana', 'Quintanilla', 'Quintero',\n 'Quiroga', 'Quirós', 'Ramírez', 'Ramis', 'Ramón', 'Ramos', 'Raya', 'Real',\n 'Rebollo', 'Recio', 'Redondo', 'Reguera', 'Reig', 'Reina', 'Requena', 'Revilla',\n 'Rey', 'Reyes', 'Riba', 'Ribas', 'Ribera', 'Ribes', 'Ricart', 'Rico', 'Riera',\n 'Rincón', 'Río', 'Ríos', 'Ripoll', 'Riquelme', 'Rius', 'Rivero', 'Robledo',\n 'Robles', 'Roca', 'Rocamora', 'Rocha', 'Roda', 'Ródenas', 'Rodrigo',\n 'Rodríguez', 'Roig', 'Rojas', 'Roldán', 'Roma', 'Román', 'Romero', 'Romeu',\n 'Ropero', 'Ros', 'Rosa', 'Rosado', 'Rosales', 'Rosell', 'Roselló', 'Rosselló',\n 'Roura', 'Rovira', 'Royo', 'Rozas', 'Ruano', 'Rubio', 'Rueda', 'Ruiz',\n 'Saavedra', 'Sabater', 'Sacristán', 'Sáenz', 'Sáez', 'Sainz', 'Sala',\n 'Salamanca', 'Salas', 'Salazar', 'Salcedo', 'Saldaña', 'Sales', 'Salgado',\n 'Salinas', 'Salmerón', 'Salom', 'Salvà', 'Salvador', 'Samper', 'Sanabria',\n 'Sánchez', 'Sancho', 'Sandoval', 'Sanjuan', 'Sanmartín', 'Sanmiguel', 'Sans',\n 'Santamaría', 'Santos', 'Sanz', 'Sarabia', 'Sarmiento', 'Sastre', 'Saura',\n 'Sebastián', 'Seco', 'Sedano', 'Segarra', 'Segovia', 'Seguí', 'Segura', 'Serna',\n 'Serra', 'Serrano', 'Sevilla', 'Sevillano', 'Sierra', 'Silva', 'Simó',\n 'Sobrino', 'Sola', 'Solana', 'Solano', 'Solé', 'Soler', 'Solera', 'Solís',\n 'Solsona', 'Somoza', 'Soria', 'Soriano', 'Sosa', 'Sotelo', 'Soto', 'Suárez',\n 'Sureda', 'Taboada', 'Talavera', 'Tamarit', 'Tamayo', 'Tapia', 'Tejada',\n 'Tejedor', 'Tejera', 'Tejero', 'Téllez', 'Tello', 'Tena', 'Tenorio', 'Terrón',\n 'Teruel', 'Tirado', 'Toledo', 'Tolosa', 'Tomás', 'Tomé', 'Tormo', 'Toro',\n 'Torralba', 'Torre', 'Torrecilla', 'Torrens', 'Torrent', 'Torrents', 'Torres',\n 'Torrijos', 'Tovar', 'Trillo', 'Trujillo', 'Tudela', 'Tur', 'Ugarte', 'Ureña',\n 'Uría', 'Uriarte', 'Uribe', 'Urrutia', 'Valbuena', 'Valcárcel', 'Valderrama',\n 'Valdés', 'Valencia', 'Valenciano', 'Valentín', 'Valenzuela', 'Valera',\n 'Valero', 'Valverde', 'Vall', 'Valle', 'Vallejo', 'Vallés', 'Valls', 'Vaquero',\n 'Vara', 'Varela', 'Vargas', 'Vázquez', 'Vega', 'Velasco', 'Velázquez', 'Vélez',\n 'Vendrell', 'Vera', 'Verdejo', 'Verdú', 'Verdugo', 'Vergara', 'Viana',\n 'Vicente', 'Vicens', 'Vidal', 'Vigil', 'Vila', 'Vilanova', 'Vilalta',\n 'Vilaplana', 'Vilar', 'Villa', 'Villalba', 'Villalobos', 'Villalonga',\n 'Villanueva', 'Villar', 'Villaverde', 'Villegas', 'Villena', 'Viña', 'Viñas',\n 'Vives', 'Vizcaíno', 'Yáñez', 'Yuste', 'Zabala', 'Zabaleta', 'Zamora',\n 'Zamorano', 'Zapata', 'Zaragoza', 'Zorrilla', 'Zurita'\n )\n\n prefixes = ('de', 'del')\n\n @classmethod\n def prefix(cls):\n return cls.random_element(cls.prefixes)","repo_name":"pomeh/sublime-FakeDataGenerator","sub_path":"faker/faker/providers/es_ES/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":15052,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29169568043","text":"import qrcode\r\nimport qrcode as qr\r\nfrom PIL import Image\r\nqr=qrcode.QRCode(\r\n version=1,\r\n error_correction=qrcode.constants.ERROR_CORRECT_H,\r\n box_size=10,\r\n border=4,\r\n)\r\nqr.add_data(\"https://www.linkedin.com/in/chetan-gudditi-922b38272/\")\r\nqr.make(fit=True)\r\nimg=qr.make_image(fill_color=\"Red\",back_color=\"white\")\r\nimg.save(\"LinkedinQR-Color.png\")","repo_name":"Chetan696/QRCode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15796822148","text":"#!/usr/bin/env python\n#\n# Purpose: Calculate the Area-Compressibility from a GROMACS bilayer \n# simulation. \n# Note: 1. A prerequisite is to calculate the XY-Area time series using the\n# \"calc_area.py\" in prior. \n# 2. Input is in \"nm\", output in \"\".\n# 3. Formula:\n# kA = kB*T* / var(A)\n# 4. Results are accumulated results along the time series, intended \n# to show the convergence trend, thus they are not independent. \n# So it's NOT correct to perform average over these values.\n# Syntax: calc_kA.py *.xvg temperature > kA.xvg\n# Created: 2016/May/02\n#\n\nfrom __future__ import print_function\nimport numpy as np\nimport sys\nfrom constants import kB as kB\n\n\ndef gate_keeping():\n if len(sys.argv) != 3:\n print(\"Syntax: calc_kA.py area.xvg T\")\n sys.exit(0)\n return sys.argv[1:]\n\ndef print_captions():\n print(\"# Time kA\")\n print(\"# ps mN/m (dyn/cm)\")\n print(\"# NOTE: Values are accumulated results, thus not independent.\")\n print(\"# So it's NOT correct to perform average over these values.\")\n\ndef main(args):\n\n inFile = args[0]\n temp = float(args[1])\n print (\"# Calculated from %s (T = %s)\" %(inFile, temp))\n with open(inFile) as f:\n lines = f.readlines()\n \n print_captions()\n xyarea = []\n for line in lines:\n if line[0] in ['#', '@',]:\n continue\n words = line.split()\n xyarea.append(float(words[1]))\n \n mean_A = np.mean(xyarea) \n var_A = np.var(xyarea) if len(xyarea) is not 1 else float('inf')\n kA__J_per_AA2 = kB * temp * mean_A / var_A\n kA__mN_per_m = kA__J_per_AA2 * 1e23 \n print(words[0], kA__mN_per_m)\n\nif __name__ == '__main__':\n arguments = gate_keeping() \n main(arguments)\n","repo_name":"wdingsjtu/tools-bilayer-gromacs","sub_path":"calc_kA.py","file_name":"calc_kA.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39510748431","text":"import sys\nimport copy\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\ns = []\nq = deque()\nq_index = 0\ncrush_q = deque()\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\nresult_list = []\n\nfor _ in range(N):\n row = list(input().strip())\n s.append(row)\n \ns[0][0] = 1\ncrush_q.append([0, 0])\n\nwhile q_index < len(crush_q):\n o, x = crush_q[q_index]\n sum = copy.deepcopy(s)\n if not(o == 0 and x == 0):\n sum[o][x] = '0'\n \n q.append([0, 0])\n print(crush_q)\n while q:\n a, b = q.popleft()\n \n for i in range(4):\n x = a + dx[i]\n y = b + dy[i]\n\n if 0 <= x < N and 0 <= y < M and sum[x][y] == '0':\n q.append([x, y])\n sum[x][y] = sum[a][b] + 1\n \n if 0 <= x < N and 0 <= y < M and sum[x][y] == '1':\n if [x, y] not in crush_q:\n crush_q.append([x, y])\n \n if sum[N - 1][M -1] != '0':\n result_list.append(sum[N - 1][M - 1])\n \n q_index += 1\n\nif result_list:\n print(min(result_list))\nelse:\n print(-1)","repo_name":"jwYunn/Baekjoon_Algorithm","sub_path":"baekjoon_2206.py","file_name":"baekjoon_2206.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28513778384","text":"import pandas as pd\nimport numpy as np\nimport math\n# from sklearn.neural_network import MLPClassifier # neural network\n# from sklearn.model_selection import train_test_split\n# from sklearn import metrics\n\ndef sigmoid(x):\n\treturn 1 / (1 + math.exp(-x))\n\nclass Perceptron:\n\n\tdef __init__(self, rate, input_length):\n\t\tself.data = []\n\t\tself.weight = []\n\t\tself.delta_weight = []\n\t\tself.rate = rate\n\n\t\trandom_matrix = np.random.randn(1, input_length) * np.sqrt(1 / input_length)\n\t\tfor rand_array in random_matrix:\n\t\t\tfor rand_num in rand_array:\n\t\t\t\tself.weight.append(rand_num)\n\t\t\n\t\t# print(self.weight)\n\n\t\tfor inp in range(input_length):\n\t\t\tself.delta_weight.append(0)\n\t\t\t\n\n\tdef input_data(self, data):\n\t\tself.data = []\n\t\tfor datum in data:\n\t\t\tself.data.append(datum)\n\n\tdef calc_sigmoid(self):\n\t\tjumlah = 0\n\t\tfor i in range(len(self.data)):\n\t\t\tjumlah += self.data[i] * self.weight[i]\n\t\tself.output = sigmoid(jumlah)\n\n\t#for backprop\n\tdef calc_delta(self, multiplier):\n\t\tself.delta = self.output * (1-self.output) * multiplier\n\n\tdef update_delta_weight(self):\n\t\tfor i in range(len(self.delta_weight)):\n\t\t\tself.delta_weight[i] += self.rate * self.delta * self.data[i]\n\t\n\t# End of batch-size\n\tdef update_weight(self):\n\t\tfor i in range(len(self.weight)):\n\t\t\tself.weight[i] += self.delta_weight[i]\n\t\t\tself.delta_weight[i] = 0\n\nclass myMLP:\n\n\tdef __init__(self, hidden_layer_sizes=[2, 3], learning_rate=0.001, max_iter=200, error_treshold=0.0001, batch_size=32):\n\t\t# Attributes\n\t\tself.layers = []\n\t\tself.hidden_layer_sizes = hidden_layer_sizes\n\t\tself.learning_rate = learning_rate\n\t\tself.max_iter = max_iter\n\t\tself.error_treshold = error_treshold\n\t\tself.batch_size = batch_size\n\t\tself.output_print = []\n\n\n\tdef fit(self, data_inputs, target):\n\t\tself.data_inputs = data_inputs\n\t\tself.target = target\n\t\tself.classes = self.target.unique()\n\n\t\ttry:\n\t\t\t#\n\t\t\tnumber_of_inputs_from_previous_layer = len(self.data_inputs.columns)\n\t\t\t# Initialize perceptrons in the hidden layers (from index 1)\n\t\t\tfor layer_idx in range(len(self.hidden_layer_sizes)):\n\t\t\t\t# hidden_layer = Array of perceptrons\n\t\t\t\tnumber_of_perceptrons_current_layer = self.hidden_layer_sizes[layer_idx]\n\t\t\t\thidden_layer = self.initialize_perceptrons_in_layer(number_of_perceptrons_current_layer, number_of_inputs_from_previous_layer)\n\t\t\t\tnumber_of_inputs_from_previous_layer = self.hidden_layer_sizes[layer_idx]\n\t\t\t\tself.layers.append(hidden_layer)\n\n\t\t\t# Construct last (output) layer of perceptrons\n\t\t\tnumber_of_perceptrons_last_layer = len(self.target.unique())\n\t\t\tnumber_of_inputs_from_previous_layer = self.hidden_layer_sizes[-1]\n\n\t\t\toutput_layer = self.initialize_perceptrons_in_layer(number_of_perceptrons_last_layer, number_of_inputs_from_previous_layer)\n\t\t\tself.layers.append(output_layer)\n\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\t# Construct last (output) layer of perceptrons\n\t\t\tnumber_of_perceptrons_last_layer = len(self.target.unique())\n\t\t\tnumber_of_inputs_from_previous_layer = len(self.data_inputs.columns)\n\t\t\toutput_layer = self.initialize_perceptrons_in_layer(number_of_perceptrons_last_layer, number_of_inputs_from_previous_layer)\n\t\t\tself.layers.append(output_layer)\n\n\t\t# Start feed forward and backward prop\n\t\tnumber_of_rows = len(data_inputs)\n\t\tfor iteration in range(self.max_iter):\n\t\t\terror_total = 0\n\t\t\tfor row in range(number_of_rows):\n\t\t\t\t# print(\"row\")\n\t\t\t\t# print(row)\n\t\t\t\tself.feed_forward(row)\n\n\t\t\t\t# Do backward prop then get error\n\t\t\t\terror = self.backward_prop(row)\n\t\t\t\terror_total += error\n\t\t\t\t\n\t\t\t\tif (row % self.batch_size == 0):\n\t\t\t\t\tself.update_all_weights()\n\n\t\t\tself.update_all_weights()\n\n\t\t\tif (error_total < self.error_treshold):\n\t\t\t\tbreak\n\n\tdef update_all_weights(self):\n\t\tfor layer in self.layers:\n\t\t\tfor perceptron in layer:\n\t\t\t\tperceptron.update_weight()\n\n\tdef calculate_error(self, diff):\n\t\treturn 0.5 * (diff ** 2)\n\n\tdef initialize_perceptrons_in_layer (self, number_of_perceptrons, number_of_inputs):\n\t\tlayer = []\n\t\tfor idx_perceptron in range(number_of_perceptrons):\n\t\t\tlayer.append(Perceptron(self.learning_rate, number_of_inputs+1))\n\t\treturn layer\n\n\tdef feed_forward(self, row):\n\t\tinputs = []\n\t\toutputs = []\n\t\t# Initial inputs\n\t\tfor column in self.data_inputs.columns:\n\t\t\tinputs.append(self.data_inputs[column][row])\n\t\tinputs.append(1)\n\n\t\tfor layer_idx in range(len(self.layers)):\n\t\t\toutputs.clear()\n\t\t\tfor perceptron in self.layers[layer_idx]:\n\t\t\t\tperceptron.input_data(inputs)\n\t\t\t\tperceptron.calc_sigmoid()\n\t\t\t\toutputs.append(perceptron.output)\n\n\t\t\tinputs.clear()\n\t\t\tfor output_data in outputs:\n\t\t\t\tinputs.append(output_data)\n\n\t\t\tinputs.append(1)\n\n\tdef backward_prop(self, row):\n\t\t# Last layer\n\t\ttotal_error = 0\n\t\tfor i in range(len(self.layers[-1])):\n\t\t\tperceptron = self.layers[-1][i]\n\t\t\t# Calculate diff (multiplier):\n\t\t\tif self.classes[i] == self.target[row]:\n\t\t\t\tresult = 1\n\t\t\telse:\n\t\t\t\tresult = 0\n\t\t\tdiff = result - perceptron.output\n\t\t\tperceptron.calc_delta(diff)\n\t\t\tperceptron.update_delta_weight()\n\t\t\ttotal_error += self.calculate_error(diff)\n\n\t\t# Hidden layers\n\t\tfor layer_idx in range(len(self.layers)-1): #banyaknya layer di layers, kecuali output layer\n\t\t\tlayer_size = len(self.layers[-layer_idx-2]) #banyaknya perceptron di layer itu\n\t\t\tfor perc_idx in range(layer_size): #untuk setiap perceptron di layer itu\n\t\t\t\tdiff = 0\n\t\t\t\tfor next_perceptron in self.layers[-layer_idx-1]:\n\n\t\t\t\t\tdiff += next_perceptron.delta * next_perceptron.weight[perc_idx]\n\t\t\t\tself.layers[-layer_idx-2][perc_idx].calc_delta(diff)\n\t\t\t\tself.layers[-layer_idx-2][perc_idx].update_delta_weight()\n\n\t\treturn total_error\n\n\tdef predict(self, data_inputs):\n\t\tinputs = []\n\t\toutputs = []\n\t\tpredictions = []\n\t\tfor row in range(len(data_inputs)):\n\t\t\tinputs.clear()\n\t\t\toutputs.clear()\n\t\t\t# Initial inputs\n\t\t\tfor column in data_inputs.columns:\n\t\t\t\tinputs.append(data_inputs[column][row])\n\t\t\tinputs.append(1)\n\n\t\t\tfor layer_idx in range(len(self.layers)):\n\t\t\t\toutputs.clear()\n\t\t\t\tfor perceptron in self.layers[layer_idx]:\n\t\t\t\t\tperceptron.input_data(inputs)\n\t\t\t\t\tperceptron.calc_sigmoid()\n\t\t\t\t\toutputs.append(perceptron.output)\n\t\t\t\tinputs.clear()\n\t\t\t\tfor output in outputs:\n\t\t\t\t\tinputs.append(output)\n\t\t\t\tinputs.append(1)\n\t\t\tidx = outputs.index(max(outputs))\n\t\t\tpredictions.append(self.classes[idx])\n\t\treturn predictions\n\n\tdef show_model(self, n=None):\n\t\tself.output_print.clear()\n\t\tfor layer_idx in range(len(self.layers)):\n\t\t\tfor perceptron_idx in range(len(self.layers[layer_idx])):\n\t\t\t\tfor weight_idx in range(len(self.layers[layer_idx][perceptron_idx].weight)):\n\t\t\t\t\tif (weight_idx != len(self.layers[layer_idx][perceptron_idx].weight) - 1):\n\t\t\t\t\t\tself.output_print.append(str(\"Weight \" + str(weight_idx) + \"-\" + \"[\" + str(layer_idx) + \"][\" + str(perceptron_idx) + \"]: \" + str(self.layers[layer_idx][perceptron_idx].weight[weight_idx])))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.output_print.append(str(\"Bias \" + \"[\" + str(layer_idx) + \"][\" + str(perceptron_idx) + \"]: \" + str(self.layers[layer_idx][perceptron_idx].weight[weight_idx])))\n\t\t\n\t\tif (n is None):\n\t\t\tfor output in self.output_print:\n\t\t\t\tprint(output)\n\t\telse:\n\t\t\tfor i in range(n):\n\t\t\t\tprint(self.output_print[i])\n\t\t\t\t","repo_name":"fata-nugraha/MLP","sub_path":"submit/src/MultilayerPerceptron.py","file_name":"MultilayerPerceptron.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"641181921","text":"from PIL import Image\nimport requests\nfrom io import BytesIO\nimport os\n\nZOOM_LEVEL = 10 # specifies how zoomed in satellite map image is (10x is slightly larger than a typical city)\nSCALE_FACTOR = 100 # specifies how much to scale brightness/population score by (100 puts it in 1-2 digit range for typical city)\n\n# mapbox api credentials\nMAPBOX_ACCESS_TOKEN = \"pk.eyJ1IjoibHVjY2MiLCJhIjoiY2s5d2ZrYXhjMDRtZTNkbzZiYmNjM21ucyJ9.qZs4pcfb6Tn2i6xVwMpp8Q\"\nMAPBOX_MAP_STYLE = \"/luccc/ck9x4eozb0lci1irpnz0qpnti\" # custom map style for high constrast between houses, roads, and nature\n\n# geonames api credentials\nGEONAMES_USERNAME = \"luc_c\"\n\n\ndef get_loc_img(longitude, latitude):\n # make a call to mapbox api to get image centered on city's location\n img_data = requests.get(\"https://api.mapbox.com/styles/v1\" + MAPBOX_MAP_STYLE + \"/static/\"\n + longitude + \",\" + latitude + \",\" + str(ZOOM_LEVEL)\n + \",0/400x400?access_token=\" + MAPBOX_ACCESS_TOKEN)\n\n img_io = BytesIO() # create output buffer to hold image\n img_io.write(img_data.content) # write image to buffer\n city_img = Image.open(img_io) # open image from buffer\n city_img = city_img.convert(\"L\") # convert to grayscale for ease of manipulation\n return city_img\n\n\ndef get_sprawl_score(img, population):\n total_bright = 0 # tracks number of bright(non-nature) pixels found\n\n # loop through image and find bright pixels\n for x in range(img.width):\n for y in range(img.height):\n if img.getpixel((x, y)) > 100:\n total_bright += 1\n img.putpixel((x, y), 255) # set bright pixel to white (helps visualize what algorithm is finding)\n\n score = total_bright/int(population)*SCALE_FACTOR\n return score\n\n\ndef get_city_info(city):\n # make a call to geonames api to search for given city name\n info = requests.get(\"http://api.geonames.org/search?q=\" + city + \"&maxRows=1&type=json&username=\" + GEONAMES_USERNAME).json()\n # check to ensure result exists and is a city\n if info['totalResultsCount'] == 0 or info['geonames'][0]['population'] == 0 or 'countryName' not in info['geonames'][0]:\n return\n\n # get relevant info from api result\n pop = info['geonames'][0]['population']\n lat = info['geonames'][0]['lat']\n long = info['geonames'][0]['lng']\n cntry = info['geonames'][0]['countryName']\n name = info['geonames'][0]['name']\n\n city_img = get_loc_img(long, lat)\n score = get_sprawl_score(city_img, pop)\n score = round(score, 2) # score becomes hard to read/process at more than 2 decimal places\n\n return name, cntry, pop, lat, long, score, city_img\n","repo_name":"LucCote/sprawl-calculator","sub_path":"sprawlcalculator.py","file_name":"sprawlcalculator.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22605935697","text":"from hmm_taggin.newsrc import dataloader\nfrom hmm_taggin.newsrc import myHMM\n\nif __name__ == '__main__':\n training_path = '../train.conll'\n dataset = dataloader.Loader(training_path)\n N = dataset.N\n V = dataset.V\n hmm = myHMM.Hmm(N, V, dataset, alpha=0)\n # print(hmm.A[:3])\n\n # print([i for i in range(len(list(hmm.B[2]))) if list(hmm.B[2])[i] != 0])\n\n print(f'A.shape: {hmm.A.shape}')\n print(f'B.shape: {hmm.B.shape}')\n print(hmm.Pi)\n print(len(dataset.sent_tag_list))\n\n # A = hmm.A\n # B = hmm.B\n # Pi = hmm.Pi\n\n '''\n dev: sent_list[ [], [] ]\n '''\n # print(dataset.vocab_list)\n\n # dev_path = '../dev.conll'\n dev_path = '../train.conll'\n dev = dataloader.Loader(dev_path)\n\n input_list = dev.sent_list\n tag_list = dev.sent_tag_list\n # print(input_list[:10])\n\n count = 0\n total_acc = 0\n total_tag = 0\n total_correct_tag = 0\n for pair in zip(input_list, tag_list):\n count += 1\n # print(pair)\n input_seq = pair[0]\n gold_tag = pair[1]\n total_tag += len(gold_tag)\n T = len(input_seq)\n predict_tag, predict_prob = hmm.viterbi_predict(input_seq, T)\n print(input_seq)\n print(f'gold: {gold_tag}')\n print(f'predict: {predict_tag}')\n # acc = hmm.evaluate(gold_tag, input_seq)\n # print(acc)\n\n correct = 0\n for i in range(len(gold_tag)):\n if gold_tag[i] == predict_tag[i]:\n correct += 1\n acc = correct/len(gold_tag)\n print(f'acc: {acc}')\n total_acc += acc\n total_correct_tag += correct\n\n # if count >= 10:\n # break\n\n print(f'acc/sent over dev: {total_acc/ len(input_list)}')\n print(f'acc/tag over dev: {total_correct_tag / total_tag}')\n\n '''\n \n alpha = 0\n acc/sent over dev: 0.4343904967934592\n acc/tag over dev: 0.37099306425008444\n \n alpha = 0.3\n acc/sent over dev: 0.7327572893724613\n acc/tag over dev: 0.7378326278344164\n '''","repo_name":"guangyuli-uoe/sulzh","sub_path":"hmm_taggin/newsrc/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40837943377","text":"# 复制图片\n\norigin = r'D:\\picture\\1.jpg'\n\ndest = r'D:\\picture\\copy.jpg'\n\ntry:\n\ts = open(origin, 'rb')\n\td = open(dest, 'wb')\n\tfor line in s.readlines():\n\t\td.write(line)\n\nfinally:\n\ts.close()\n\td.close()\n\n","repo_name":"MrQuJL/python-scripts","sub_path":"10_IO编程/copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31695079853","text":"\"\"\"project00 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom main import views\n\nurlpatterns = [\n path('', views.page_main),\n path('login/', views.page_login),\n path('signup/', views.page_signup),\n path('pictures/', views.page_pictures),\n path('about/', views.page_about),\n\n path('ajax_say_hello/', views.ajax_say_hello),\n # path('ajax_sign_up/', views.ajax_sign_up)\n]\n","repo_name":"ArthurGartner/csci656-semester-work","sub_path":"project00/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24666039914","text":"import random\n\nimport discord\nfrom discord import Embed, Option\nfrom discord.ext import commands\n\nfrom munchi.config import Config\nfrom munchi.gifs import random_gif\n\nconfig = Config()\n\n\nclass FunCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.slash_command(description=\"Search for a random GIF\")\n async def gif(self, ctx, q: Option(str, description=\"Search query\")):\n \"\"\"Sends a random gif from a search query\"\"\"\n embed = discord.Embed(\n description=f\"Random gif for {q!r}\", colour=discord.Colour.blue()\n )\n embed.set_image(url=await random_gif(q))\n\n await ctx.respond(embed=embed)\n\n @commands.slash_command(description=\"Give someone a hug 🤗\")\n async def hug(\n self, ctx, user: Option(discord.User, description=\"Who you want to hug\")\n ):\n \"\"\"Sends a hugging GIF and says author hugged user\"\"\"\n SEARCHES = [\"hug anime\", \"hug lesbian anime\", \"hug gay anime\"]\n\n embed = discord.Embed(\n description=f\"{ctx.author.mention} hugged {user.mention} :)\",\n colour=discord.Colour.blue(),\n )\n embed.set_image(url=await random_gif(random.choice(SEARCHES), 25))\n embed.set_footer(text=\"Powered by Tenor\")\n\n await ctx.respond(embed=embed)\n\n @commands.slash_command(description=\"Give someone a kiss 💋\")\n async def kiss(\n self, ctx, user: Option(discord.User, description=\"Who you want to kiss\")\n ):\n \"\"\"Sends a kissing GIF and says author kissed user\"\"\"\n SEARCHES = [\"kiss anime\", \"kiss lesbian anime\", \"kiss gay anime\"]\n\n embed = discord.Embed(\n description=f\"{ctx.author.mention} kissed {user.mention} <3\",\n colour=discord.Colour.blue(),\n )\n embed.set_image(url=await random_gif(random.choice(SEARCHES), 25))\n embed.set_footer(text=\"Powered by Tenor\")\n\n await ctx.respond(embed=embed)\n\n @commands.slash_command(description=\"Pull a Will Smith 🥊\")\n @commands.guild_only()\n async def slap(\n self, ctx, user: Option(discord.User, description=\"Who you want to slap\")\n ):\n \"\"\"Sends a slapping GIF and says author slapped user\"\"\"\n embed = discord.Embed(\n description=f\"{ctx.author.mention} slapped {user.mention} 🥊\",\n colour=discord.Colour.blue(),\n )\n embed.set_image(url=await random_gif(\"slap anime\", 50))\n embed.set_footer(text=\"Powered by Tenor\")\n\n await ctx.respond(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(FunCog(bot))\n","repo_name":"wxllow/munchi","sub_path":"cogs/ext/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8880181239","text":"from datetime import datetime, timezone, timedelta\r\nimport re\r\nimport singer\r\nimport singer.metrics as metrics\r\nfrom singer import metadata\r\nfrom singer.bookmarks import get_bookmark\r\nfrom tap_aconex.utility import (\r\n get_generic,\r\n get_all_pages,\r\n datetime_format,\r\n parse_date,\r\n format_date,\r\n date_format,\r\n coerce_to_list,\r\n)\r\n\r\n\r\ndef handle_projects(resource, schemas, state, mdata):\r\n bookmark = get_bookmark(state, resource, \"since\")\r\n # Current time in local timezone as \"aware datetime\", per https://stackoverflow.com/a/25887393/7170445\r\n extraction_time = datetime.now(timezone.utc).astimezone()\r\n sync_organisations = \"organisations\" in schemas\r\n # store in a dict to deduplicate results (faster than Redshift doing so)\r\n organisations = {}\r\n\r\n res = get_generic(resource, \"projects\")\r\n rows = res[\"ProjectResults\"][\"SearchResults\"][\"Project\"]\r\n\r\n for row in rows:\r\n row[\"Active\"] = row[\"@Active\"]\r\n write_record(row, resource, schemas[resource], mdata, extraction_time)\r\n\r\n # don't try to fetch documents or mail for inactive projects as the API will error\r\n if row[\"Active\"] == \"false\":\r\n continue\r\n\r\n if \"documents\" in schemas:\r\n handle_documents(row[\"ProjectId\"], schemas[\"documents\"], state, mdata)\r\n if \"mail\" in schemas:\r\n mail_schemas = {\"mail\": schemas[\"mail\"]}\r\n if \"organisations\" in schemas:\r\n mail_schemas[\"organisations\"] = schemas[\"organisations\"]\r\n orgs = handle_mail(\r\n row[\"ProjectId\"], mail_schemas, state, mdata, sync_organisations\r\n )\r\n for (id, name) in orgs:\r\n organisations[id] = name\r\n\r\n if sync_organisations:\r\n organisations_rows = [\r\n {\"OrganizationId\": k, \"OrganizationName\": v}\r\n for (k, v) in organisations.items()\r\n ]\r\n write_many(\r\n organisations_rows,\r\n \"organisations\",\r\n schemas[\"organisations\"],\r\n mdata,\r\n extraction_time,\r\n )\r\n\r\n if \"documents\" in schemas:\r\n state = write_bookmark(state, \"documents\", extraction_time)\r\n if \"mail\" in schemas:\r\n state = write_bookmark(state, \"mail\", extraction_time)\r\n if \"organisations\" in schemas:\r\n state = write_bookmark(state, \"organisations\", extraction_time)\r\n\r\n return write_bookmark(state, resource, extraction_time)\r\n\r\n\r\ndef handle_documents(project_id, schema, state, mdata):\r\n resource = \"documents\"\r\n bookmark = get_bookmark(state, resource, \"since\")\r\n # Current time in local timezone as \"aware datetime\", per https://stackoverflow.com/a/25887393/7170445\r\n extraction_time = datetime.now(timezone.utc).astimezone()\r\n\r\n fields = \"&return_fields=approved,author,contractnumber,date1,docno,doctype,filename,fileSize,fileType,modifiedby,numberOfMarkups,packagenumber,received,reference,registered,reviewed,revision,scale,statusid,tagNumber,title,toclient,trackingid,versionnumber\"\r\n filter = get_filter_string(bookmark, extraction_time, \"registered\")\r\n\r\n rows = get_all_pages(\r\n resource,\r\n f\"projects/{project_id}/register\",\r\n \"Document\",\r\n \"RegisterSearch\",\r\n extra_query_string=f\"&{fields}&{filter}\",\r\n )\r\n\r\n for r in rows:\r\n r[\"ProjectId\"] = project_id\r\n r[\"DocumentId\"] = r[\"@DocumentId\"]\r\n\r\n write_many(rows, resource, schema, mdata, extraction_time)\r\n\r\n\r\ndef handle_mail(project_id, schemas, state, mdata, sync_organisations):\r\n resource = \"mail\"\r\n schema = schemas[resource]\r\n bookmark = get_bookmark(state, resource, \"since\")\r\n # Current time in local timezone as \"aware datetime\", per https://stackoverflow.com/a/25887393/7170445\r\n extraction_time = datetime.now(timezone.utc).astimezone()\r\n\r\n fields = \"&return_fields=attribute,closedoutdetails,confidential,corrtypeid,docno,fromUserDetails,inreftomailno,mailRecipients,reasonforissueid,responsedate,secondaryattribute,sentdate,subject,tostatusid,totalAttachmentsSize,attachedDocumentCount\"\r\n filter = get_filter_string(bookmark, extraction_time, \"sentdate\")\r\n\r\n rows = get_all_pages(\r\n resource,\r\n f\"projects/{project_id}/mail\",\r\n \"Mail\",\r\n \"MailSearch\",\r\n extra_query_string=f\"&mail_box=sentbox{fields}{filter}\",\r\n )\r\n\r\n organisations = []\r\n if sync_organisations:\r\n organisations = [\r\n (r[\"OrganizationId\"], r[\"OrganizationName\"])\r\n for row in rows\r\n for r in coerce_to_list(row[\"ToUsers\"][\"Recipient\"])\r\n ]\r\n\r\n for r in rows:\r\n r[\"ProjectId\"] = project_id\r\n r[\"MailId\"] = r[\"@MailId\"]\r\n try:\r\n r[\"sent_to\"] = next(\r\n r[\"OrganizationName\"]\r\n for r in coerce_to_list(r[\"ToUsers\"][\"Recipient\"])\r\n if r[\"DistributionType\"] == \"TO\"\r\n )\r\n except:\r\n pass\r\n\r\n write_many(rows, resource, schema, mdata, extraction_time)\r\n return organisations\r\n\r\n\r\ndef get_filter_string(bookmark, extraction_time, field):\r\n if bookmark is None:\r\n return \"\"\r\n else:\r\n filter_date_format = \"%Y%m%d\"\r\n filter_start = format_date(\r\n parse_date(bookmark, datetime_format) - timedelta(days=2),\r\n filter_date_format,\r\n )\r\n filter_end = format_date(\r\n extraction_time + timedelta(days=2), filter_date_format\r\n )\r\n return f\"&search_query={field}:[{filter_start} TO {filter_end}]\"\r\n\r\n\r\ndef write_many(rows, resource, schema, mdata, dt):\r\n with metrics.record_counter(resource) as counter:\r\n for row in rows:\r\n write_record(row, resource, schema, mdata, dt)\r\n counter.increment()\r\n\r\n\r\ndef write_record(row, resource, schema, mdata, dt):\r\n with singer.Transformer() as transformer:\r\n rec = transformer.transform(row, schema, metadata=metadata.to_map(mdata))\r\n singer.write_record(resource, rec, time_extracted=dt)\r\n\r\n\r\ndef write_bookmark(state, resource, dt):\r\n singer.write_bookmark(state, resource, \"since\", format_date(dt))\r\n return state\r\n","repo_name":"FosterConstructionGroup/tap-aconex","sub_path":"tap_aconex/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":6175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"42729529800","text":"import os\nimport pandas as pd\n\ndata_file_folder = './excel'\n\ndf = []\n\nfor file in os.listdir(data_file_folder):\n if file.endswith('.xls'):\n print('Loading file {0}...'.format(file))\n df.append(pd.read_excel(os.path.join(data_file_folder, file), sheet_name='Sheet 1'))\n\nprint(len(df))\n\n\n# df_master = pd.concat(df, axis=0)\n# df_master.to_excel('master_excel.xls', index=False)","repo_name":"HAtherlolz/flagma_script","sub_path":"excel_merge.py","file_name":"excel_merge.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42204608708","text":"class Solution :\n def duplicateZeros(self, arr: list[int]) -> None:\n arr1=[]\n for i in arr :\n arr1.append(i)\n if i==0 :\n arr1.append(0)\n\n arr=arr1[:len(arr)]\n return arr\n\nobj=Solution()\nprint(obj.duplicateZeros([1,0,2,3,0,4,5,0]))\n\n#leet code submission\n# arr1=[]\n# for i in arr :\n# if i==0 :\n# arr1.append(0)\n# arr1.append(i)\n# for i in range(len(arr)):\n# arr[i]=arr1[i]","repo_name":"harshita1611/leet_code","sub_path":"101_Array/duplicate_zeroes.py","file_name":"duplicate_zeroes.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6501185586","text":"from my_alg import *\nimport numpy as np\nimport cv2\n\n\ndef preprocess(img):\n myalg = ImgAlg()\n\n # 使用顏色區分 HSV\n blur = cv2.GaussianBlur(img, (5, 5), 0)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n dilate = cv2.dilate(blur, kernel, iterations=5)\n morph = cv2.erode(dilate, kernel, iterations=5)\n\n hsv = cv2.cvtColor(morph, cv2.COLOR_BGR2HSV)\n\n mask_B = myalg.colorRange(hsv, 'blue')\n mask_Gr = myalg.colorRange(hsv, 'gray', optrange=(np.array([35, 20, 46]), np.array([180, 43, 220])))\n img_mask = mask_B + mask_Gr\n\n morph[img_mask != 255] = [0, 0, 0]\n gray = cv2.cvtColor(morph, cv2.COLOR_BGR2GRAY)\n\n # applyColorMap\n color_map = cv2.applyColorMap(gray, cv2.COLORMAP_JET)\n hsv = cv2.cvtColor(color_map, cv2.COLOR_BGR2HSV)\n\n mask_Y = myalg.colorRange(hsv, 'yellow')\n mask_Or = myalg.colorRange(hsv, 'orange')\n map_img_mask = mask_Y + mask_Or\n gray[map_img_mask != 255] = 0\n\n # cv2.imwrite('./Demo_img_mask_gray3.jpg', gray)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return gray\n\n\ndef drawSolar(ori_img, img_gray):\n myalg = ImgAlg()\n\n blur = cv2.GaussianBlur(img_gray, (7, 7), 0)\n # thres1, thres2, method = myalg.adjust_threshold(\n # img=blur,\n # name='adj thres',\n # param1='thres1',\n # param2='thres2',\n # param1_lim=(0, 255),\n # param2_lim=(0, 255),\n # method=cv2.THRESH_BINARY\n # )\n\n # threshold\n thres1, thres2 = 95, 255\n method = cv2.THRESH_BINARY\n _, thres = cv2.threshold(blur, thres1, thres2, method)\n\n # drawContour\n contours, hierarchy = cv2.findContours(thres, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(ori_img, contours, -1, (0, 255, 255), 2)\n\n # cv2.imwrite('./Demo_img_draw.jpg', draw)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return contours, ori_img\n\n\ndef counter(contours, draw_img):\n # 找已知是太陽能板的位置, 算出一塊太陽能板的面積\n x1, x2 = 805, 830\n y1, y2 = 465, 515\n find_solar = draw_img.copy()\n\n cv2.rectangle(find_solar, (x1, y1), (x2, y2), (0, 0, 255), 2)\n # cv2.imwrite(\"./Demo_find.jpg\", find_solar)\n\n # 算輪廓面積除單塊的求數量\n target_area = (x2 - x1) * (y2 - y1)\n total_areas = 0\n length = len(contours)\n\n for index in range(0, length):\n area = cv2.contourArea(contours[index])\n total_areas += area\n\n nums = (total_areas // target_area) + 1\n return int(nums)\n\n\nif __name__ == '__main__':\n file = \"./Demo.JPG\"\n ori = cv2.imread(file)\n handle_gray = preprocess(ori)\n contour, draw = drawSolar(ori, handle_gray)\n counts = counter(contour, draw)\n print(counts)\n\n","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/opencv_image/count/opencv_solar_v2.py","file_name":"opencv_solar_v2.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4728583184","text":"import subprocess\nfrom pathlib import Path\nfrom MakeExecutable import main as MakeExecutableFile\nfrom version import Version\nimport os\nimport shutil\n\n\ncurrent_path = Path(os.path.dirname(os.path.realpath(__file__)))\ninstall_script_template = current_path / 'template' / 'install_script.iss'\n\ninstaller_executable_path = current_path / 'dist' / 'installer'\nversion_info = Version()\nversion_info.read_file()\n\nif os.path.exists(installer_executable_path) and os.path.isdir(installer_executable_path):\n print(\"removing old installers\")\n shutil.rmtree(installer_executable_path)\n\nprint('Making Executable')\nMakeExecutableFile()\nversion_info.read_file()\nprint('Compiling Installer')\nwith open(install_script_template) as f:\n template_text = f.read()\n template_text = template_text.replace(r'{{AppVersion}}', str(version_info))\n\nwith open('install_script_tmp.iss', 'w') as f:\n f.write(template_text)\n\niss_loc = Path.cwd() / 'install_script_tmp.iss'\ncompil32_path = Path('C:/Program Files (x86)/Inno Setup 6') / 'iscc'\nsubprocess.check_call([compil32_path, '/Qp', str(iss_loc)])\n","repo_name":"dgaiero/SubVid","sub_path":"MakeInstaller.py","file_name":"MakeInstaller.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10800477953","text":"#!/usr/bin/env python\n\n\"\"\"Description:\nThe train.py is to build your CNN model, train the model, and save it for later evaluation(marking)\nThis is just a simple template, you feel free to change it according to your own style.\nHowever, you must make sure:\n1. Your own model is saved to the directory \"model\" and named as \"model.h5\"\n2. The \"test.py\" must work properly with your model, this will be used by tutors for marking.\n3. If you have added any extra pre-processing steps, please make sure you also implement them in \"test.py\"\n so that they can later be applied to test images.\n\n©2018 Created by Yiming Peng and Bing Xue\n\"\"\"\nimport glob\nimport os\nimport shutil\n\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\nimport numpy as np\nimport tensorflow as tf\nimport random\n\nfrom tensorflow.python.distribute.multi_process_lib import multiprocessing\nfrom tensorflow.python.keras import Input\nfrom tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten\n\n# Set random seeds to ensure the reproducible results\nSEED = 309\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntf.random.set_seed(SEED)\n\nbatch_size = 64\nimage_size = (300, 300)\ntraining_size = 3600\nvalidation_size = 450\ntesting_size = 150\n\n\ndef create_data_generators():\n \"\"\"\n Create the data generators from file for each of the datasets.\n :return: Created Data_Generators\n \"\"\"\n class_tuple = ['cherry', 'strawberry', 'tomato']\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n directory='data/train',\n target_size=image_size,\n classes=class_tuple,\n batch_size=batch_size)\n\n validation_generator = test_datagen.flow_from_directory(\n directory='data/validation',\n target_size=image_size,\n classes=class_tuple,\n batch_size=batch_size)\n\n test_generator = test_datagen.flow_from_directory(\n directory='data/test',\n target_size=image_size,\n classes=class_tuple,\n batch_size=batch_size,\n shuffle=False)\n\n assert train_generator.n == training_size\n assert validation_generator.n == validation_size\n assert test_generator.n == testing_size\n assert train_generator.num_classes == validation_generator.num_classes == test_generator.num_classes == 3\n\n return train_generator, validation_generator, test_generator\n\n\ndef construct_model():\n \"\"\"\n Construct the CNN model.\n ***\n Please add your model implementation here, and don't forget compile the model\n E.g., model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n NOTE, You must include 'accuracy' in as one of your metrics, which will be used for marking later.\n ***\n :return: model: the initial CNN model\n \"\"\"\n model = Sequential()\n\n # Block 1 convolution layer\n model.add(Conv2D(input_shape=(300, 300, 3), filters=32, kernel_size=(3, 3), padding='same', activation='relu'))\n model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # Block 2 convolution layer\n model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # Block 3 convolution layer\n model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # Block 4 convolution layer\n model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # Fully connected classifier using softmax\n model.add(Flatten())\n model.add(Dense(units=1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(units=1024, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(units=3, activation='softmax'))\n\n model.summary()\n\n model.compile(loss='categorical_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n metrics=['accuracy'])\n\n return model\n\n\ndef construct_mlp_model():\n \"\"\"\n Construct the MLP model.\n :return: model: the initial MLP model.\n \"\"\"\n model = Sequential()\n\n model.add(Input(shape=(300, 300, 3)))\n model.add(Flatten())\n model.add(Dense(units=256, activation='relu'))\n model.add(Dense(units=256, activation='relu'))\n model.add(Dense(units=3, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n metrics=['accuracy'])\n\n return model\n\n\ndef train_model(model, train_generator, validation_generator):\n \"\"\"\n Train the CNN model\n ***\n Please add your training implementation here, including pre-processing and training\n ***\n :param model: the initial CNN model\n :return:model: the trained CNN model\n \"\"\"\n callbacks = [\n EarlyStopping(monitor=\"val_loss\", min_delta=1e-2, patience=10, verbose=1),\n ModelCheckpoint(filepath='./model/checkpoints/model.{epoch:02d}-{val_loss:.2f}.h5', save_best_only=True),\n TensorBoard(log_dir='./logs')\n ]\n\n model.fit(train_generator,\n epochs=200,\n steps_per_epoch=training_size // batch_size,\n validation_data=validation_generator,\n validation_steps=validation_size // batch_size,\n callbacks=callbacks,\n workers=multiprocessing.cpu_count(),\n max_queue_size=512,\n verbose=2)\n\n return model\n\n\ndef save_model(model):\n \"\"\"\n Save the keras model for later evaluation\n :param model: the trained CNN model\n :return:\n \"\"\"\n model.save(\"model/model.h5\")\n print(\"Model Saved Successfully.\")\n\n\ndef split_data():\n \"\"\"\n Split the data into training, validation, and testing sets.\n :return:\n \"\"\"\n class_training_size = training_size // 3\n class_validation_size = validation_size // 3\n class_testing_size = testing_size // 3\n\n if os.path.isdir('data/train/cherry') is False:\n os.chdir('data')\n\n os.makedirs('train/cherry')\n os.makedirs('train/strawberry')\n os.makedirs('train/tomato')\n\n os.makedirs('validation/cherry')\n os.makedirs('validation/strawberry')\n os.makedirs('validation/tomato')\n\n os.makedirs('test/cherry')\n os.makedirs('test/strawberry')\n os.makedirs('test/tomato')\n\n for c in random.sample(glob.glob('cherry*'), class_training_size):\n shutil.move(c, 'train/cherry')\n for c in random.sample(glob.glob('strawberry*'), class_training_size):\n shutil.move(c, 'train/strawberry')\n for c in random.sample(glob.glob('tomato*'), class_training_size):\n shutil.move(c, 'train/tomato')\n\n for c in random.sample(glob.glob('cherry*'), class_validation_size):\n shutil.move(c, 'validation/cherry')\n for c in random.sample(glob.glob('strawberry*'), class_validation_size):\n shutil.move(c, 'validation/strawberry')\n for c in random.sample(glob.glob('tomato*'), class_validation_size):\n shutil.move(c, 'validation/tomato')\n\n for c in random.sample(glob.glob('cherry*'), class_testing_size):\n shutil.move(c, 'test/cherry')\n for c in random.sample(glob.glob('strawberry*'), class_testing_size):\n shutil.move(c, 'test/strawberry')\n for c in random.sample(glob.glob('tomato*'), class_testing_size):\n shutil.move(c, 'test/tomato')\n\n os.chdir('..')\n\n\ndef test_model(model):\n print(\"Testing model...\")\n loss_and_metrics = model.evaluate(train_generator, verbose=0)\n print(\"Train loss:{}\\nTrain accuracy:{}\\n\".format(loss_and_metrics[0], loss_and_metrics[1]))\n loss_and_metrics = model.evaluate(validation_generator, verbose=0)\n print(\"Validation loss:{}\\nValidation accuracy:{}\\n\".format(loss_and_metrics[0], loss_and_metrics[1]))\n loss_and_metrics = model.evaluate(test_generator, verbose=0)\n print(\"Test loss:{}\\nTest accuracy:{}\\n\".format(loss_and_metrics[0], loss_and_metrics[1]))\n\n\nif __name__ == '__main__':\n # Split the dataset into smaller size, may remove later\n split_data()\n\n # Create data generators\n train_generator, validation_generator, test_generator = create_data_generators()\n\n # Construct the model\n model = construct_model()\n\n # Train the model\n model = train_model(model, train_generator, test_generator)\n\n # Test the model\n test_model(model)\n\n # Save the model\n save_model(model)\n","repo_name":"MattHillWakatipu/DeepConvolutionalNeuralNetwork","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23007897085","text":"from flask import *\nimport random, string\nfrom application import db\nfrom application.models import flashcard_users as fc_u\nimport requests\nfrom urllib import urlencode\n \n \n\ndef randomword(length):\n return ''.join(random.choice(string.lowercase) for i in range(length))\n\n# EB looks for an 'application' callable by default.\napplication = Flask(__name__)\n\n# TODO: Change this\napplication.secret_key = 'YOUR_SECRET_KEY '\n\n\n@application.route('/')\n@application.route('/index')\ndef index_route():\n # TODO: Change this\n client_id = 'GET_FROM_QUIZLET'\n scope = 'read'\n state = randomword(10)\n\n redirect_uri = 'https://google.com'\n\n url = 'https://quizlet.com/authorize?response_type=code&client_id={}&scope={}&state={}'.format(\n client_id, scope, state)\n\n context = {\n 'url' : url\n }\n\n session['state'] = state\n\n return render_template('index.html', **context)\n\n@application.route('/steptwo')\ndef step_two_route():\n state = request.args.get('state')\n\n assert(state == session.get('state'))\n\n code = request.args.get('code')\n \n grant_type = 'authorization_code'\n \n # TODO: Change this\n redirect_uri = 'REDIRECT_URI'\n #redirect_uri = 'http://localhost:5000/steptwo'\n\n # TODO: Change this\n user_and_pass = 'CHANGE'\n\n headers = { 'Authorization' : 'Basic %s' % user_and_pass }\n\n url = 'https://api.quizlet.com/oauth/token'\n\n params = {\n 'code' : code,\n 'grant_type' : grant_type,\n 'redirect_uri': redirect_uri\n }\n\n r = requests.post(url, params=params, headers=headers)\n\n quizlet_username = json.loads(r.text)['user_id']\n access_token = json.loads(r.text)['access_token']\n\n pin = None\n\n context = {}\n\n result = fc_u.query.filter_by(quizlet_username=quizlet_username).first()\n\n if result == None:\n while True:\n pin_try =\"%04d\" % random.randint(0,9999)\n result = fc_u.query.filter_by(pin_code=pin_try).first()\n if result == None:\n print(pin_try, \"is the pin for new user\", quizlet_username)\n new_user = fc_u(quizlet_username, access_token, pin_try)\n\n db.session.add(new_user)\n db.session.commit() \n db.session.close()\n\n pin = pin_try\n print('added, commited, and closed.')\n context['greeting'] = 'Welcome to Flashcard Helper {}'.format(quizlet_username)\n context['pin_code'] = pin\n break\n else:\n print(result)\n pin = result.pin_code\n context['greeting'] = 'Welcome back to Flashcard Helper {}'.format(quizlet_username)\n context['pin_code'] = pin\n print('found a user')\n \n return render_template('index.html', **context)\n\n\n# run the app.\nif __name__ == \"__main__\":\n application.run(host='0.0.0.0')\n","repo_name":"bhairavmehta95/flashcard-helper-eb","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585701071","text":"#! /usr/bin/python\n\nimport os\nimport sys\nimport copy\nimport math\n\ndef debug(msg):\n if len(sys.argv) > 1 and sys.argv[1] == '-d':\n sys.stderr.write('%s' % msg)\n sys.stderr.write('\\n')\n\ndef solve(pancakes, K):\n pancakes.sort()\n pancakes.reverse()\n best = 0\n for i in range(len(pancakes)):\n debug('trying i=%s' % i)\n if i > N - K:\n break\n my_run = (pancakes[i][0]*pancakes[i][0]) + pancakes[i][1] * pancakes[i][0] * 2\n debug(' *** a=%s (%s) %s' % (my_run, pancakes[i], my_run))\n pancakes_rest = list(reversed(sorted(pancakes[i+1:], key=lambda x: x[0]*x[1])))\n for j in range(K-1):\n debug(' ** adding j=%s' % j)\n my_run += pancakes_rest[j][1] * pancakes_rest[j][0] * 2 \n debug(' * my_best = %s' % my_run)\n if my_run > best:\n best = my_run\n return best * math.pi\n\nsys.setrecursionlimit(15000)\n\nT = int(sys.stdin.readline())\n# For each test case\nfor t in range(1, T+1):\n debug(' ************* case %s' % t)\n pancakes = []\n [N, K] = [int(x) for x in sys.stdin.readline().strip().split(' ')]\n for i in range(N):\n [R_i, H_i] = [int(x) for x in sys.stdin.readline().strip().split(' ')]\n pancakes.append((R_i, H_i))\n ret = solve(pancakes, K)\n sys.stdout.write('Case #%s: %.6f\\n' % (t, ret))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/279.py","file_name":"279.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30848819370","text":"from __future__ import annotations\n\nimport time\nfrom typing import Literal\n\nimport optuna\nimport torch\n\nfrom mfglib.alg.abc import Algorithm\nfrom mfglib.alg.greedy_policy_given_mean_field import Greedy_Policy\nfrom mfglib.alg.utils import (\n _ensure_free_tensor,\n _print_fancy_header,\n _print_fancy_table_row,\n _print_solve_complete,\n _trigger_early_stopping,\n tuple_prod,\n)\nfrom mfglib.env import Environment\nfrom mfglib.mean_field import mean_field\nfrom mfglib.metrics import exploitability_score\n\n\nclass FictitiousPlay(Algorithm):\n \"\"\"Fictitious Play algorithm.\n\n Notes\n -----\n The implementation is based on Fictitious Play Damped.\n\n When ``alpha=None``, the algorithm is the same as the original Fictitious Play\n algorithm. When ``alpha=1``, the algorithm is the same as Fixed Point Iteration\n algorithm.\n\n See [#fp1]_ and [#fp2]_ for algorithm details.\n\n .. [#fp1] Perrin, Sarah, et al. \"Fictitious play for mean field games: Continuous\n time analysis and applications.\" Advances in Neural Information Processing\n Systems 33 (2020): 13199-13213. https://arxiv.org/abs/2007.03458\n\n .. [#fp2] Perolat, Julien, et al. \"Scaling up mean field games with online mirror\n descent.\" arXiv preprint arXiv:2103.00623 (2021).\n https://arxiv.org/abs/2103.00623\n\n \"\"\"\n\n def __init__(self, alpha: float | None = None) -> None:\n \"\"\"Fictitious Play algorithm.\n\n Attributes\n ----------\n alpha\n Learning rate hyperparameter. If None, in iteration n the\n learning rate is 1 / (n + 1).\n \"\"\"\n if alpha:\n if not isinstance(alpha, (int, float)) or not 0 <= alpha <= 1:\n raise ValueError(\"if not None, `alpha` must be a float in [0, 1]\")\n self.alpha = alpha\n\n def __str__(self) -> str:\n \"\"\"Represent algorithm instance and associated parameters with a string.\"\"\"\n return f\"FictitiousPlay(alpha={self.alpha})\"\n\n def solve(\n self,\n env_instance: Environment,\n *,\n pi: Literal[\"uniform\"] | torch.Tensor = \"uniform\",\n max_iter: int = 100,\n atol: float | None = 1e-3,\n rtol: float | None = 1e-3,\n verbose: bool = False,\n ) -> tuple[list[torch.Tensor], list[float], list[float]]:\n \"\"\"Run the algorithm and solve for a Nash-Equilibrium policy.\n\n Args\n ----\n env_instance\n An instance of a specific environment.\n pi\n A tensor of size (T+1,)+S+A representing the initial policy. If\n 'uniform', the initial policy will be the uniform distribution.\n max_iter\n Maximum number of iterations to run.\n atol\n Absolute tolerance criteria for early stopping.\n rtol\n Relative tolerance criteria for early stopping.\n verbose\n Print convergence information during iteration.\n \"\"\"\n S = env_instance.S\n A = env_instance.A\n\n # Auxiliary variables\n l_s = len(S)\n l_a = len(A)\n n_a = tuple_prod(A)\n ones_ts = (1,) * (1 + l_s)\n ats_to_tsa = tuple(range(l_a, l_a + 1 + l_s)) + tuple(range(l_a))\n\n pi = _ensure_free_tensor(pi, env_instance)\n\n solutions = [pi]\n argmin = 0\n scores = [exploitability_score(env_instance, pi)]\n runtimes = [0.0]\n\n if verbose:\n _print_fancy_header(\n alg_instance=self,\n env_instance=env_instance,\n max_iter=max_iter,\n atol=atol,\n rtol=rtol,\n )\n _print_fancy_table_row(\n n=0,\n score_n=scores[0],\n score_0=scores[0],\n argmin=argmin,\n runtime_n=runtimes[0],\n )\n\n if _trigger_early_stopping(scores[0], scores[0], atol, rtol):\n if verbose:\n _print_solve_complete(seconds_elapsed=runtimes[0])\n return solutions, scores, runtimes\n\n t = time.time()\n for n in range(1, max_iter + 1):\n # Compute the greedy policy and its induced mean-field\n L = mean_field(env_instance, pi)\n pi_br = Greedy_Policy(env_instance, L)\n L_br = mean_field(env_instance, pi_br)\n\n # Update policy\n mu_rptd = (\n L.flatten(start_dim=1 + l_s)\n .sum(-1)\n .repeat(A + ones_ts)\n .permute(ats_to_tsa)\n )\n mu_br_rptd = (\n L_br.flatten(start_dim=1 + l_s)\n .sum(-1)\n .repeat(A + ones_ts)\n .permute(ats_to_tsa)\n )\n weight = self.alpha if self.alpha else 1 / (n + 1)\n\n pi_next_num = (1 - weight) * pi.mul(mu_rptd) + weight * pi_br.mul(\n mu_br_rptd\n )\n pi_next_den = (1 - weight) * mu_rptd + weight * mu_br_rptd\n pi = pi_next_num.div(pi_next_den).nan_to_num(\n nan=1 / n_a, posinf=1 / n_a, neginf=1 / n_a\n ) # using uniform distribution when divided by zero\n\n solutions.append(pi.clone().detach())\n scores.append(exploitability_score(env_instance, pi))\n if scores[n] < scores[argmin]:\n argmin = n\n runtimes.append(time.time() - t)\n\n if verbose:\n _print_fancy_table_row(\n n=n,\n score_n=scores[n],\n score_0=scores[0],\n argmin=argmin,\n runtime_n=runtimes[n],\n )\n\n if _trigger_early_stopping(scores[0], scores[n], atol, rtol):\n if verbose:\n _print_solve_complete(seconds_elapsed=runtimes[n])\n return solutions, scores, runtimes\n\n if verbose:\n _print_solve_complete(seconds_elapsed=time.time() - t)\n\n return solutions, scores, runtimes\n\n @classmethod\n def _tuner_instance(cls, trial: optuna.Trial) -> FictitiousPlay:\n alpha_bool = trial.suggest_categorical(\"alpha_bool\", [False, True])\n alpha_num = trial.suggest_float(\"alpha_num\", 0.0, 1.0)\n alpha = None if alpha_bool else alpha_num\n return FictitiousPlay(alpha=alpha)\n\n def tune(\n self,\n env_suite: list[Environment],\n *,\n max_iter: int = 100,\n atol: float = 1e-3,\n rtol: float = 1e-3,\n metric: Literal[\"shifted_geo_mean\", \"failure_rate\"] = \"shifted_geo_mean\",\n n_trials: int | None = 10,\n timeout: float = 30.0,\n ) -> FictitiousPlay:\n \"\"\"Tune the algorithm over a given environment suite.\n\n Args\n ----\n env_suite\n A list of environment instances.\n max_iter\n The number of iterations to run the algorithm on each environment\n instance.\n atol\n Absolute tolerance criteria for early stopping.\n rtol\n Relative tolerance criteria for early stopping.\n metric\n Determines which metric to be used for scoring a trial. Either\n ``shifted_geo_mean`` or ``failure_rate``.\n n_trials\n The number of trials. If this argument is not given, as many\n trials are run as possible.\n timeout\n Stop tuning after the given number of second(s) on each\n environment instance. If this argument is not given, as many trials are\n run as possible.\n \"\"\"\n params = self._optimize_optuna_study(\n env_suite=env_suite,\n max_iter=max_iter,\n atol=atol,\n rtol=rtol,\n metric=metric,\n n_trials=n_trials,\n timeout=timeout,\n )\n if params:\n self.alpha = None if params[\"alpha_bool\"] else params[\"alpha_num\"]\n return self\n","repo_name":"radar-research-lab/MFGLib","sub_path":"mfglib/alg/fictitious_play.py","file_name":"fictitious_play.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"40152178992","text":"from django.conf.urls import patterns, include, url\nfrom Recipe import views\n\nurlpatterns = patterns('Recipe.views',\n url(r'^design/$',\n 'recipe_design'),\n url(r'^hop/(?P\\d+)/$',\n 'get_hop'),\n url(r'^fermentable/(?P\\d+)/$',\n 'get_fermentable'),\n url(r'^yeast/(?P\\d+)/$',\n 'get_yeast'),\n url(r'^view_recipe/(?P\\d+)/$',\n 'get_recipe'),\n)\n","repo_name":"phillipsra16/BrewMe","sub_path":"Recipe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17798007303","text":"# Define a class named Shape and its subclass Square. The Square class has an init function which\n# takes length as argument. Both classes have an area function which can print the area of the shape\n# where Shape’s area is 0 by default.\n\nclass Shape:\n def area(self):\n shape_area=0\n print(f\"the area of the shape is {shape_area}\")\n\nclass Square(Shape):\n def __init__(self,length):\n self.length=length\n\n def area(self):\n square_area=self.length*self.length\n print(f\"the area of a square is {square_area} square units\")\n\n#creating shape object\nshape_obj=Shape()\nshape_obj.area()\nprint(\"\")\n\n#creating square object\nsquare_obj=Square(5)\nsquare_obj.area()\n\n#output\n# the area of the shape is 0\n#\n# the area of a square is 25 square units\n#\n# Process finished with exit code 0\n\n","repo_name":"deepa-karthik/Python_assignments","sub_path":"task7/task7_2.py","file_name":"task7_2.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23369451911","text":"from time import perf_counter\n\nimport casadi as ca\nimport numpy as np\nfrom acados_template import AcadosModel\n\nfrom ..common import export_model\n\n__all__ = [\"export_mass_chain_model\", \"find_mass_chain_steady_state\"]\n\n\ndef export_mass_chain_model(dt: float, M: int, num_rk4_nodes: int = 10) -> AcadosModel:\n \"\"\"\n Create an AcadosModel for the mass chain model.\n\n :param dt: sampling time\n :type dt: float\n :param M: number of chained masses\n :type M: int\n :param num_rk4_nodes: number of nodes for the Runge-Kutta 4 integrator\n :type num_rk4_nodes: int\n\n :return: the AcadosModel instance and the function giving the accelerations of the\n intermediate masses\n \"\"\"\n X = ca.SX.sym(\"X\", 3 * (M + 1), 1)\n V = ca.SX.sym(\"V\", 3 * M, 1)\n x = ca.vertcat(X, V)\n u = ca.SX.sym(\"u\", 3, 1)\n\n # model constants\n g = np.array([0.0, 0.0, -9.81]) # [m.s^-2]\n L = 0.033 # [m]\n D = 0.1 # [N]\n m = 0.03 # [kg]\n\n A = ca.SX.zeros(3 * M, 1)\n\n for i in range(M):\n A[3 * i + 2] = -9.81\n\n for i in range(M + 1):\n if i == 0:\n dist = X[:3]\n else:\n dist = X[3 * i : 3 * (i + 1)] - X[3 * (i - 1) : 3 * i]\n\n scale = D / m * (1 - L / ca.norm_2(dist))\n F = scale * dist\n if i < M:\n A[3 * i : 3 * (i + 1)] -= F\n\n if i > 0:\n A[3 * (i - 1) : 3 * i] += F\n\n # forces = [\n # D\n # * (1 - L / ca.norm_2(X[3 * (i + 1) : 3 * (i + 2)] - X[3 * i : 3 * (i + 1)]))\n # * (X[3 * (i + 1) : 3 * (i + 2)] - X[3 * i : 3 * (i + 1)])\n # for i in range(M)\n # ]\n # forces.insert(0, D * (1 - L / ca.norm_2(X[:3])) * (X[:3]))\n #\n # A = ca.vertcat(*[(forces[i] - forces[i - 1]) / m + g for i in range(1, M + 1)])\n\n f_cont = ca.Function(\n \"f_cont\",\n [x, u],\n [ca.vertcat(V, u, A)],\n )\n\n return export_model(\"mass_chain_\" + str(M), x, u, f_cont, dt, num_rk4_nodes)\n\n\ndef find_mass_chain_steady_state(M: int, x_last: np.ndarray) -> np.ndarray:\n model = export_mass_chain_model(0.1, M)\n nx = model.x.size()[0]\n nu = model.u.size()[0]\n\n # initial guess for the state\n x0 = np.zeros(nx)\n x0[: 3 * (M + 1)] = np.array(\n [\n np.linspace(0.0, x_last[0], M + 2)[1:],\n np.linspace(0.0, x_last[1], M + 2)[1:],\n np.linspace(0.0, x_last[2], M + 2)[1:],\n ]\n ).ravel(\"F\")\n\n # decision variable\n w = ca.vertcat(model.x, model.xdot, model.u)\n # initial guess\n w0 = ca.vertcat(x0, np.zeros(nx), np.zeros(nu))\n\n # misuse IPOPT as a nonlinear equation solver\n nlp = {\n \"x\": w,\n \"f\": 0.0,\n \"g\": ca.vertcat(\n model.f_expl_expr, # steady state equations\n model.x[3 * M : 3 * (M + 1)] - x_last, # last mass position\n ),\n }\n solver = ca.nlpsol(\n \"solver\",\n \"ipopt\",\n nlp,\n {\n \"print_time\": False,\n \"ipopt.print_level\": 0,\n \"ipopt.sb\": \"yes\",\n },\n )\n start = perf_counter()\n res = solver(x0=w0, lbg=0, ubg=0)\n end = perf_counter()\n print(f\"steady state computation time: {1000*(end - start):.3f} ms\")\n w_opt = res[\"x\"]\n\n return w_opt[:nx].full().flatten()\n","repo_name":"tudoroancea/paper_rrlb_mpc","sub_path":"rrlb/mass_chain/mass_chain_model.py","file_name":"mass_chain_model.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23628595811","text":"#!/usr/bin/env python\n\ndef solve(X,S,R,T,N, Wraw):\n\t# run during the slowest parts of the journey\n\n\tassert S > 0\n\tassert R > 0\n\n\tWraw.sort()\n\n\t#find the distance between walkways run at least those\n\tnoWalkway = 0\n\tlastEnd = 0\n\tfor start, end, speed in Wraw:\n\t\tnoWalkway += start - lastEnd\n\t\tlastEnd = end\n\tnoWalkway += X-lastEnd\n\n\ttime = 0\n\n\tif (T * R) <= noWalkway:\n\t\t# run as much as possible\n\t\ttime = T + (noWalkway - (T * R)) / float(S)\n\t\tT = 0\n\telse:\n\t\t# run all of it\n\t\ttime = noWalkway / float(R)\n\t\tT -= (noWalkway / float(R))\n\t\tassert T >= 0\n\n\tassert time >= 0\n\n\tWs = sorted([(s, Start, End) for Start, End, s in Wraw])\n\n\tfor ws, Start, End in Ws:\n\t\tdiff = End - Start\n\n\t\tassert diff >= 0\n\t\tassert ws >= 0\n\n\t\tif T == 0:\n\t\t\ttime += diff / float(ws + S)\n\t\telif diff > ((ws + R) * T):\n\t\t\tnotRunning = diff - ((ws + R) * T)\n\t\t\tassert notRunning >= 0\n\t\t\ttime += T + notRunning / float(ws + S) \n\t\t\tT = 0\t# used all run\n\t\telse:\n\t\t\ttime += diff / float(ws + R)\n\t\t\tT -= diff / float(ws + R)\n\t\t\tassert T >= 0\n\n\t\tassert time >= 0\n\n\n\treturn time\n\ndef solveFile(Filename):\n\tinFile = open(Filename, \"r\")\n\toutFile = open(Filename[:-2]+\"out\", \"w\")\n\ttests = int(inFile.readline())\n\tfor test in xrange(tests):\n\t\tX, S, R, T, N = map(int, inFile.readline().strip().split())\n\t\tWs = [map(int, inFile.readline().strip().split()) for w in xrange(N)]\n\n\t\toutFile.write(\"Case #{0}: {1}\\n\".format(test+1, solve(X,S,R,T,N,Ws)))\n\n#solveFile(\"example.in\")\n#solveFile(\"A-small-attempt1.in\")\nsolveFile(\"A-large.in\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_87/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25939547784","text":"\n\n\n#stargin vars\nregisters = {'a':0, 'b':0}\n\ninstructions = []\n\n\n\nwith open('input2015_23.txt', 'r') as myfile: #read the input\n\tinput = myfile.read().split('\\n')\n\n\ninstructions = list(input) #make a list of all instructions (as strings). list because it needs to be in order (could have done other way, for example: just walj over input)\n\ndef execute(s, i): #function for executing the instructions. returns new i, (if normal -> +1, if jump -> the appropriate location)\n\ts_list = s.split()\n\tnew_i = i\n\n\tif (s_list[0] == \"hlf\"): #half\n\t\tregisters[s_list[1]] = registers[s_list[1]] // 2 #non-negative integers (whole number divisiono / floor division)\n\t\tnew_i += 1\n\n\telif (s_list[0] == \"tpl\"): #triple\n\t\tregisters[s_list[1]] = registers[s_list[1]] * 3\n\t\tnew_i += 1\n\n\telif (s_list[0] == \"inc\"): #increase\n\t\tregisters[s_list[1]] = registers[s_list[1]] + 1\n\t\tnew_i += 1\n\n\telif (s_list[0] == \"jmp\"): #jump\n\t\tif (s_list[1][0] == '+'): #if the first char of offset is +, we increase the i by the rest offset\n\t\t\tnew_i += int(s_list[1][1:])\n\t\telse: #else we decrease the i by the rest of offset\n\t\t\tnew_i -= int(s_list[1][1:])\n\t\t\n\telif (s_list[0] == \"jie\"): #jup if odd\n\t\tif (registers[s_list[1][:-1]] % 2 == 0): #if value of r is cleanly divisible by 2 (is even)\n\t\t\tif (s_list[2][0] == '+'):\n\t\t\t\tnew_i += int(s_list[2][1:])\n\t\t\telse:\n\t\t\t\tnew_i -= int(s_list[2][1:])\n\t\telse:\n\t\t\tnew_i += 1\n\n\telif (s_list[0] == \"jio\"):\n\t\tif (registers[s_list[1][:-1]] == 1): #if value of r is equal to 1\n\t\t\tif (s_list[2][0] == '+'):\n\t\t\t\tnew_i += int(s_list[2][1:])\n\t\t\telse:\n\t\t\t\tnew_i -= int(s_list[2][1:])\n\t\telse:\n\t\t\tnew_i += 1\n\n\treturn (new_i) #return the new value of i (new location)\n\ni = 0\n\nwhile (True): #infinte loop\n\tif (i >= len(instructions)): #break condition -> if location of next instruction is outside our \"memory\", if location greater than the number of instructions\n\t\tbreak\n\ti = execute(instructions[i], i)\n\n\nprint(\"1. the value of register b after executing all instructions:\", registers['b'])\n\n\n#second part, same process\nregisters = {'a':1, 'b':0} #we reuse the same vars, makes it simpler\n\ni = 0\n\nwhile (True):\n\tif (i >= len(instructions)):\n\t\tbreak\n\t#print(i, registers['a'], registers['b'])\n\ti = execute(instructions[i], i)\n\n\nprint(\"2. the value of register b after executing all instructions if a starts as 1:\", registers['b'])\n\n\"\"\"\nhlf r (set r to half its value)\ntpl r (set r to triple its value)\ninc r (increase the value of r by 1)\njmp offset (jump to +/- instruction indicated by offset. +/-0 is infinite loop(jumping to itself), +1 is next instruction)\njie r, offset (jump if even. if r is even jump to instruction indicated by offset)\njio r, offset is (jump if one, NOT ODD. if r is ==1 jump to instruction indicated by offset)\n\"\"\"","repo_name":"JureRot/adventofcode","sub_path":"2015/2015_23.py","file_name":"2015_23.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70408429635","text":"from django.urls import path\nfrom .views import CreatePublicationView, PublicationDetailView, PublicationListView, PublicationSearchView, RelatedPublicationsListView, ListBySearchView, UpdatePublicationView, DeletePublicationView\n\n\n\nurlpatterns = [\n path('get-publication//', PublicationDetailView.as_view(), name='publication'),\n path('create/', CreatePublicationView.as_view(), name='create'),\n path('update//', UpdatePublicationView.as_view(), name='update'),\n path('delete//', DeletePublicationView.as_view(), name='delete'),\n path('get-publications/', PublicationListView.as_view(), name='publications'),\n path('search/', PublicationSearchView.as_view(), name='search'),\n path('related//', RelatedPublicationsListView.as_view(), name='related'),\n path('by/search/', ListBySearchView.as_view(), name='by-search'),\n]","repo_name":"MohaJabri/Proyecto_TrendySwap","sub_path":"Backend/apps/publication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35225105118","text":"import sys\n\nfrom .runner import run\n\ndef runAgent(render=False):\n agent_args = \"--bot_race P \" \\\n \"--agent_race Z \" \\\n \"--difficulty 1 \" \\\n \"--max_agent_steps 0 \" \\\n \"--game_steps_per_episode 80000 \" \\\n \"--map Simple64 \" \\\n \"--agent agent.simple_agent_keras.RandomAgent \" \\\n \"--step_mul 500 \"\\\n \"--parallel 1 \"+ \\\n (\"--render True\" if render else \"--norender\")\n\n run(\"pysc2.bin.agent\",\n agent_args)\n\nif __name__ == \"__main__\":\n runAgent(render=sys.argv[1]=='True')\n","repo_name":"andreahi/CraftTensorStars","sub_path":"agent/agent_runner.py","file_name":"agent_runner.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10835372638","text":"'''\nABUNDANCE(S)\n let D be an empty dictionary (of strings to numbers)\n for string in S do\n let c_string be an empty string\n for character in reversed(string) do\n if character == A then\n append T to c_string\n else if character == T then\n append A to c_string\n else if character == C then\n append G to c_string\n else if character == G then\n append C to c_string\n if string in keys of D then\n D[string] += 1\n else if c_string in keys of D then\n D[c_string] += 1\n else \n if string < c_string then --- order alphabetically\n D[string] = 1\n else \n D[c_string] = 1\n return each key and its value of the sorted D\n'''\n\nimport sys\n\n\ndef abundance(S):\n D = {}\n for s in S:\n cs = ''\n for c in s[::-1]:\n if c == 'A':\n cs += 'T'\n elif c == 'T':\n cs += 'A'\n elif c == 'C':\n cs += 'G'\n elif c == 'G':\n cs += 'C'\n if s in D:\n D[s] += 1\n elif cs in D:\n D[cs] += 1\n else:\n if s < cs:\n D[s] = 1\n else:\n D[cs] = 1\n for k in sorted(D):\n print(k, D[k])\n\n\nabundance(sys.stdin.read().splitlines())\n","repo_name":"AlessandroGiulivo/prog2","sub_path":"Week 3/X28783_en/abundance.py","file_name":"abundance.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11575464334","text":"\"\"\"add ad manager role\n\nRevision ID: 3dad1b95543f\nRevises: 7a747fe99a36\nCreate Date: 2023-08-11 18:33:39.343745+00:00\n\n\"\"\"\nfrom typing import Any\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '3dad1b95543f'\ndown_revision = '7a747fe99a36'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade(is_dev: bool, **kw: Any) -> None:\n op.execute('''\n INSERT INTO role(name)\n VALUES ('Ad Manager')\n ''')\n\n\ndef downgrade(is_dev: bool, **kw: Any) -> None:\n op.execute(\"DELETE FROM user_role \"\n \"WHERE role_id IN \"\n \"(SELECT id FROM role WHERE name = 'Ad Manager')\")\n op.execute('''\n DELETE FROM role WHERE name = 'Ad Manager'\n ''')\n","repo_name":"ruslan-simonenko/stc-ad-credit","sub_path":"server/db_migration/versions/2023_08_11_1833-3dad1b95543f_add_ad_manager_role.py","file_name":"2023_08_11_1833-3dad1b95543f_add_ad_manager_role.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35022809041","text":"from selenium import webdriver \nfrom selenium.webdriver.common.by import By \nimport time\nimport math \n\nlink=\"http://suninjuly.github.io/redirect_accept.html\"\n\ntry:\n browser=webdriver.Chrome()\n browser.get(link)\n button=browser.find_element(By.CSS_SELECTOR,\"button.btn\")\n button.click()\n new_window=browser.window_handles[1]\n browser.switch_to.window(new_window)\n \n \n def calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\n # Ваш код, который заполняет обязательные поля\n x_element = browser.find_element(By.ID, \"input_value\")\n x = x_element.text\n y = calc(x)\n #Ввести ответ в текстовое поле \n element = browser.find_element(By.CSS_SELECTOR,\"[class ='form-control']\") \n element.send_keys(y)\n # Нажать Submit\n button1 = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button1.click()\n \n \n \n \nfinally:\n time.sleep(10)\n browser.quit()","repo_name":"ElenaProkorym/stepik_auto_tests_course","sub_path":"lesson6_step17.py","file_name":"lesson6_step17.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35747336336","text":"from socket import AF_INET, SOCK_STREAM\nimport logging\nimport time\nfrom collections import deque\nimport asynchat\nimport asyncore\nfrom os import getcwd, execl\nfrom os.path import join\nfrom traceback import format_exc\nfrom types import GeneratorType\nfrom sys import stdout, version_info\nfrom string import ascii_lowercase\n\n# In Python 3.4+ imp is depreciated in favor of the easier\n# `importlib`. This block detects if importlib is available,\n# and falls back to `imp` when not available.\nif version_info >= (3, 4):\n from importlib import import_module, reload\nelse:\n from imp import load_module\n\nimport ircpacket as ircp # NOQA\nfrom irctools import CLR_NICK, CLR_RESET, CLR_HGLT, require_auth, load_json, penalize_user # NOQA\nimport plugins # NOQA\nimport irc_argparse # NOQA\n\n# Make sure we don't send spam when send do smilies\nALLOWABLE_START_CHARS = set(ascii_lowercase)\nBAD_START_CHARS = {'d', 'p', 'o'}\nfor ch in BAD_START_CHARS:\n ALLOWABLE_START_CHARS.remove(ch)\n\nALL_PLUGINS = set() # All plugins as string\nPLUGIN_LIST = set() # Loaded plugins as modules\nDISABLED_PLUGINS = set() # Disabled plugins (strings)\nFAILED_PLUGINS = set() # Plugins which failed to load (strings)\n\nVERSION = '0.9'\nFORMATTING = 'UTF-8'\n\nSTOP = 0\nRESTART = 1\n\n\ndef is_iterable(obj):\n ''' Figure out if an object is iterable '''\n return (isinstance(obj, list) or isinstance(obj, tuple) or\n isinstance(obj, GeneratorType) or isinstance(obj, set))\n\n\nclass IRCClient(asynchat.async_chat):\n ''' Asyncronous IRC client that handles chat, networking IO,\n and everything else that goes along with that.\n '''\n def __init__(self, nick, shared_data):\n asynchat.async_chat.__init__(self)\n self.ibuffer = bytes()\n self.set_terminator(bytes('\\r\\n', FORMATTING))\n self.nick = nick\n self.shared_data = shared_data\n self.restart = False\n self.trace = False\n\n def write(self, text):\n ''' Write some text to the open socket '''\n print('DEBUG OUT: {}'.format(text))\n self.push(bytes('{}\\r\\n'.format(text), FORMATTING))\n\n def handle_connect(self):\n ''' Responsible for inital connection to the\n IRC server, as well as setting our nickname\n '''\n self.write('NICK {}'.format(self.nick))\n self.write('USER {0} {0} {0} :The best IRC bot around'.format(self.nick))\n\n def collect_incoming_data(self, data: str):\n self.ibuffer += data\n\n def found_terminator(self):\n line_bytes = self.ibuffer\n self.ibuffer = bytes()\n\n line = line_bytes.decode(encoding=FORMATTING)\n print('DEBUG IN: {}'.format(line))\n\n reply = handle_incoming(line, self.shared_data)\n if reply is None:\n return\n\n if isinstance(reply, str):\n self.write(reply)\n elif is_iterable(reply):\n for message in reply:\n if isinstance(message, str):\n self.write(message)\n elif isinstance(message, int):\n self.handlequit(message)\n\n def handlequit(self, flag):\n ''' Method to handle restarts and shutdowns\n '''\n if flag == STOP:\n self.close()\n elif flag == RESTART:\n self.restart = True\n self.close()\n else:\n print('Don\\'t know what to do with flag value {}'.format(flag))\n print('So I\\'m just gonna quit')\n self.close()\n\n def run(self, host, port):\n ''' Run the client targeted at a host on a port '''\n self.create_socket(AF_INET, SOCK_STREAM)\n time.sleep(0.15)\n self.connect((host, port))\n time.sleep(0.1)\n asyncore.loop(65)\n return self.restart\n\n def handle_error(self):\n ''' We handle the error here so that we don't\n disconnect from the server. After all, uptime is\n the #1 priority!\n '''\n trace = format_exc()\n try:\n print(trace)\n logging.debug('An error occurred...\\nHere\\'s the traceback:')\n logging.debug(trace)\n except Exception:\n print('An error broke loose!')\n\n\ndef load_builtins(shared: dict):\n ''' Small set of internal commands used to maintain state.\n This *CANNOT* crash, so it's maintained internally. This\n is not allowed fail loading, or be reloaded.\n '''\n com = shared['commands']\n\n com['stop'] = stop_command\n com['restart'] = stop_command\n com['reload'] = reload_command\n com['plugin'] = plugin_info_command\n com['plugins'] = list_plugins\n com['auth'] = auth_command\n com['disable'] = plugin_toggle\n com['enable'] = plugin_toggle\n\n shared['help']['stop'] = 'Stop this bot and make it quit (admins only) || :stop'\n shared['help']['restart'] = 'Stop this bot and make it restart (admins only) || :restart'\n shared['help']['reload'] = 'Reload this bot\\'s plugins || :reload'\n shared['help']['plugin'] = ('Get information about a plugin '\n '|| :plugin || :plugin wikipedia')\n shared['help']['plugins'] = 'List all plugins available || :plugins'\n shared['help']['auth'] = 'Authenticate yourself || :auth || :auth hunter2'\n shared['help']['enable'] = 'Enable a plugin || :enable || :enable rekt'\n shared['help']['disable'] = 'Disable a plugin || :disable || :disable told'\n\n shared['cooldown']['stop'] = 5\n shared['cooldown']['restart'] = 5\n shared['cooldown']['reload'] = 3\n shared['cooldown']['plugin'] = 2\n shared['cooldown']['plugins'] = 5\n shared['cooldown']['auth'] = 1\n shared['cooldown']['enable'] = 1\n shared['cooldown']['disable'] = 1\n\n\ndef load_plugins(shared: dict):\n ''' (Re)Load all plugins for the bot\n\n This is a bug function, look into breaking it up into smaller things\n '''\n # We don't clear DISABLED_PLUGINS because it's just strings that persist\n # between `reloads and restarts\n ALL_PLUGINS.clear()\n PLUGIN_LIST.clear()\n FAILED_PLUGINS.clear()\n\n shared['commands'].clear()\n shared['help'].clear()\n shared['regexes'].clear()\n shared['re_response'].clear()\n\n load_builtins(shared)\n\n desc = ('.py', 'r', 1)\n\n if version_info >= (3, 4):\n # Python 3.4+\n reload(plugins)\n else:\n # Python 3.2+\n pl_path = '{}/plugins/__init__.py'.format(getcwd())\n print('looking in {}'.format(pl_path))\n py_file = open(pl_path, 'r')\n # The below lines makes flake8 upset. Let's ignore it.\n load_module('plugins', py_file, pl_path, desc) # NOQA\n\n ALL_PLUGINS.clear()\n # if no modules already enabled\n from pkgutil import iter_modules\n for importer, modname, ispkg in iter_modules([join(shared['dir'], 'plugins')]): # pylint: disable=unused-variable\n ALL_PLUGINS.add(modname)\n\n for modname in ALL_PLUGINS:\n print('loading {}'.format(modname))\n ALL_PLUGINS.add(modname)\n try:\n module = None\n\n if version_info >= (3, 4):\n # Python 3.4+\n module = import_module('.' + modname, package='plugins')\n reload(module)\n else:\n # Python 3.2 - 3.3\n pl_path = '{}/plugins/{}.py'.format(getcwd(), modname)\n pl_name = 'plugins.{}'.format(modname)\n #print('looking in {}'.format(pl_path))\n py_file = open(pl_path, 'r')\n module = load_module(pl_name, py_file, pl_path, desc)\n\n # Plugins without __plugin_enabled__ are never loaded.\n if '__plugin_enabled__' in dir(module):\n PLUGIN_LIST.add(module)\n\n if module.__plugin_enabled__:\n continue\n\n DISABLED_PLUGINS.add(modname)\n else:\n print('I found a plugin called \"{}\" that I didn\\'t load.'.format(modname))\n raise ImportError('No __plugin_enabled__ :(')\n\n except ImportError as error:\n print('Couldn\\'t load {}'.format(modname))\n print('Exception: {}'.format(error))\n DISABLED_PLUGINS.add(modname)\n FAILED_PLUGINS.add(modname)\n\n # TODO: Gracefully handle plugin setup fail\n for plug in PLUGIN_LIST:\n short_name = plug.__name__.lstrip('plugins.')\n if short_name not in DISABLED_PLUGINS:\n print('setting up {}'.format(plug))\n plug.setup_resources(shared['conf'], shared)\n plug.setup_commands(shared['commands'])\n\n # Set up stats\n shared['stats']['plugins.available'] = len(ALL_PLUGINS)\n shared['stats']['plugins.disabled'] = len(DISABLED_PLUGINS)\n shared['stats']['plugins.failed'] = len(FAILED_PLUGINS)\n\n\n@require_auth\ndef reload_command(_: tuple, packet: ircp.Packet, shared: dict):\n '''\n Reloads all plugins as well as their data files\n '''\n print('Reload command called')\n load_plugins(shared)\n if len(FAILED_PLUGINS) + len(DISABLED_PLUGINS) == 0:\n return packet.notice('All {} plugins reloaded!'.format(len(PLUGIN_LIST)))\n\n response = [packet.notice('{} plugins were reloaded.'.format(len(PLUGIN_LIST))),\n packet.notice('The following were NOT loaded: ')]\n\n if len(FAILED_PLUGINS) > 0:\n response.append(packet.notice('Fail to Load: ' + ', '.join(FAILED_PLUGINS)))\n if len(DISABLED_PLUGINS) > 0:\n response.append(packet.notice('Disabled: ' + ', '.join(DISABLED_PLUGINS)))\n\n response.append(packet.notice('Please check your logs for further information.'))\n\n return response\n\n\n@require_auth\ndef stop_command(arg: list, packet: ircp.Packet, shared: dict):\n '''\n Stops this bot\n '''\n if arg[0].lower() == 'stop':\n logging.info('Stop command received; stopping bot.')\n return STOP\n elif arg[0].lower() == 'restart':\n logging.info('Restart command received; stopping bot.')\n return RESTART\n else:\n print('wtf just happened here?')\n\n\n@require_auth\ndef list_plugins(arg: tuple, packet: ircp.Packet, shared: dict):\n ''' List all plugins available\n\n :plugins\n '''\n enabled = ALL_PLUGINS.difference(DISABLED_PLUGINS).difference(FAILED_PLUGINS)\n\n output = [packet.notice('Enabled plugins ({}): '.format(len(enabled))),\n packet.notice(', '.join(enabled))]\n if len(DISABLED_PLUGINS) > 0:\n output.append(packet.notice('Disabled plugins ({})'.format(len(DISABLED_PLUGINS))))\n output.append(packet.notice(', '.join(DISABLED_PLUGINS)))\n\n return output\n\n\n@require_auth\ndef plugin_info_command(arg: tuple, packet: ircp.Packet, shared: dict):\n '''\n Does stuff to plugins\n '''\n if len(arg) < 2:\n return packet.notice('You need to specify a plugin to inspect!')\n\n name = arg[1].lower()\n\n if name not in ALL_PLUGINS:\n return packet.notice('{} is not a valid plugin name'.format(name))\n\n is_enabled = not (name in DISABLED_PLUGINS or name in FAILED_PLUGINS)\n\n module = None\n full_name = 'plugins.{}'.format(name)\n for plug in PLUGIN_LIST:\n if plug.__name__ == full_name:\n module = plug\n break\n\n output = []\n enabled_text = (lambda x: 'ENABLED' if x else 'DISABLED')(is_enabled)\n output.append(packet.notice('{} is {}'.format(name, enabled_text)))\n if module:\n if '__plugin_description__' in dir(module):\n output.append(packet.notice(module.__plugin_description__))\n if '__plugin_author__' in dir(module):\n output.append(packet.notice('Author: {}'.format(module.__plugin_author__)))\n if '__plugin_version__' in dir(module):\n output.append(packet.notice('Version: {}'.format(module.__plugin_version__)))\n if '__plugin_type__' in dir(module):\n output.append(packet.notice('Plugin Type: {}'.format(module.__plugin_type__)))\n\n return output\n\n\n@require_auth\ndef plugin_toggle(arg: tuple, packet: ircp.Packet, shared: dict):\n ''' Enable or disable plugins\n\n :enable \n :disable \n '''\n if len(arg) < 2:\n return packet.notice('You need to specify a plugin to disable')\n if len(arg) > 2:\n return packet.notice('Too many arguments! The command only uses 1 argument.')\n\n command = arg[0].lower()\n name = arg[1].lower()\n\n if command == 'enable':\n if not (name in DISABLED_PLUGINS or name in FAILED_PLUGINS):\n return packet.notice('Plugin is already enabled. Doing nothing.')\n\n if name in DISABLED_PLUGINS:\n DISABLED_PLUGINS.remove(name)\n if name in FAILED_PLUGINS:\n FAILED_PLUGINS.remove(name)\n\n return packet.notice('Plugin is now enabled.')\n elif command == 'disable':\n if name in DISABLED_PLUGINS:\n return packet.notice('Plugin is already disabled!')\n else:\n DISABLED_PLUGINS.add(name)\n # TODO: Reload plugins to get rid of leftovers\n return packet.notice('Plugin is now disabled!')\n else:\n print('You screwed up.')\n\n\n# LOL, don't @require_auth here!\ndef auth_command(arg: tuple, packet: ircp.Packet, shared: dict):\n ''' Authenticate yourself\n\n :auth \n '''\n if len(arg) < 2:\n return packet.notice('You must specify a password!')\n\n passphrase = arg[1]\n\n if passphrase == shared['conf']['adminpass']:\n if packet.sender in shared['auth']:\n return packet.notice('You are already logged in!')\n else:\n shared['auth'].add(packet.sender)\n print('{} successfully authenticated.'.format(packet.sender))\n return packet.notice('Authentication success!')\n else:\n return packet.notice('Authentication failure. Try again later.')\n\n\ndef get_cooldown(command: str, now: float, shared: dict):\n ''' Get the time that a user should be off of their\n cooldown for using a command\n\n c - the command\n now - the current (unix) time (in seconds)\n shared - shared data dictionary\n '''\n cool = now\n\n if command in shared['cooldown']:\n value = shared['cooldown'][command]\n\n # Handle aliases\n if isinstance(value, str):\n value = shared['cooldown'][value]\n\n return cool + value\n else:\n # Default cooldown is 5 seconds\n return cool + 5\n\n\ndef setup(config):\n \"\"\"\n Main loop for second thread of program\n\n config - dictionary of configuration values for probot\n \"\"\"\n m_config = config\n\n config = {\n 'bot_nick': m_config['nick'],\n 'channels': m_config['channels'],\n 'password': m_config['password'],\n 'logged_in': False,\n 'active': True,\n 'prefix': m_config['prefix'],\n 'admin': m_config['admin'],\n 'last_save': time.time(),\n 'dict_update': False,\n 'intro': m_config['intro'],\n 'adminpass': m_config['adminpass'],\n 'oxr_id': m_config['oxr_id'],\n }\n\n info_str = 'probot version {0}. My owner is {2}{1}{3}.'.format(\n VERSION, config['admin'], CLR_NICK, CLR_RESET)\n\n commands = dict()\n\n # This object acts as a form of persistent memory for the commands. This is particularly\n # useful for commands like `def` and `told`\n shared_data = {\n 'conf': config,\n 'info': info_str,\n 'chan': set(),\n 'dir': getcwd(),\n 'commands': commands,\n 'help': dict(),\n 'regexes': dict(),\n 're_response': dict(),\n 'cooldown_user': dict(),\n 'cooldown': dict(),\n 'auth': set(),\n 'recent_messages': deque(maxlen=30),\n 'stats': dict(),\n }\n\n stats = shared_data['stats']\n stats['num_messages'] = 0\n stats['starttime'] = int(time.time())\n stats['commands_run'] = 0\n stats['regex_matches'] = 0\n print('stats:')\n print(shared_data['stats'])\n\n # load plugins. This *has* to happend *after* shared_data is set up\n load_plugins(shared_data)\n print('plugins: {}'.format(PLUGIN_LIST))\n\n return shared_data\n\n\ndef handle_commands(packet: ircp.Packet, shared: dict):\n ''' Handle commands as needed '''\n if not (len(packet.text) > 1 and packet.text[0] == shared['conf']['prefix']):\n return None\n\n now = time.time()\n\n if (packet.sender not in shared['cooldown_user'] or\n now > shared['cooldown_user'][packet.sender]):\n stripped_text = packet.text[1:]\n words = irc_argparse.parse(stripped_text)\n if words[0] != '':\n c = words[0].lower()\n commands = shared['commands']\n if c in commands:\n cool = get_cooldown(c, now, shared)\n shared['cooldown_user'][packet.sender] = cool\n reply = commands[c](words, packet, shared)\n shared['stats']['commands_run'] += 1\n return reply\n elif c[0] in ALLOWABLE_START_CHARS:\n return packet.notice('Sorry, but the command {1}{0}{2} '\n 'does not exist.'.format(c, CLR_HGLT, CLR_RESET))\n else:\n time_left = (shared['cooldown_user'][packet.sender] - int(now))\n return packet.notice('[Cooldown]: You need to wait for {:.1f} seconds '\n 'before you can use a command.'.format(time_left))\n\n\ndef handle_regexes(packet: ircp.Packet, shared: dict):\n ''' Handle regex matching and figuring out the output '''\n now = time.time()\n\n\n for re_name in shared['regexes']:\n regex = shared['regexes'][re_name]\n match = regex.search(packet.text)\n\n if match is not None:\n # Penalize users who try to game the system\n if (packet.sender in shared['cooldown_user'] and\n now < shared['cooldown_user'][packet.sender]):\n penalize_user(packet.sender, shared)\n return None\n\n print('matched to regex \"{}\"'.format(re_name))\n shared['stats']['regex_matches'] += 1\n cool = get_cooldown(re_name, time.time(), shared)\n shared['cooldown_user'][packet.sender] = cool\n return shared['re_response'][re_name](match, packet, shared)\n\n\ndef handle_incoming(line, shared_data):\n ''' Handles, and replies to incoming IRC messages\n\n line - the line to parse\n shared_data - the shared_data with literally everything in it\n '''\n config = shared_data['conf']\n reply = None # Reset reply\n msg_packet = ircp.Packet(line)\n\n # Determine if prefix is at beginning of message\n # If it is, then parse for commands\n if msg_packet.msg_type == 'PRIVMSG':\n # TODO: Let use know they are being penalized for cooldown\n reply = handle_commands(msg_packet, shared_data)\n if reply is None:\n reply = handle_regexes(msg_packet, shared_data)\n elif msg_packet.msg_type == 'NUMERIC':\n if (config['password'] and not config['logged_in'] and\n msg_packet.numeric == ircp.numerics.RPL_ENDOFMOTD):\n reply = [] # pylint: disable=redefined-variable-type\n reply.append(ircp.make_message('identify {} {}'.format(config['bot_nick'],\n config['password']),\n 'nickserv'))\n for channel in config['channels'].split(' '):\n reply.append(ircp.join_chan(channel))\n shared_data['conf']['logged_in'] = True # Stop checking for login numerics\n elif msg_packet.numeric == ircp.numerics.RPL_ENDOFMOTD:\n reply = (ircp.join_chan(c) for c in shared_data['conf']['channels'].split(' '))\n\n elif msg_packet.msg_type == 'PING':\n reply = 'PONG {}'.format(msg_packet.host)\n\n elif msg_packet.msg_type == 'NICK':\n print('{} changed nick to {}'.format(msg_packet.sender, msg_packet.nick_to))\n if msg_packet.sender in shared_data['auth']:\n shared_data['auth'].remove(msg_packet.sender)\n shared_data['auth'].add(msg_packet.nick_to)\n print('moved {} to {} on auth list'.format(msg_packet.sender, msg_packet.nick_to))\n\n elif msg_packet.msg_type in ('PART', 'QUIT'):\n if msg_packet.sender in shared_data['auth']:\n shared_data['auth'].remove(msg_packet.sender)\n print('removed {} from auth list'.format(msg_packet.sender))\n\n elif msg_packet.msg_type == 'JOIN':\n if msg_packet.sender == shared_data['conf']['bot_nick']:\n shared_data['chan'].add(msg_packet.target)\n reply = ircp.make_message(shared_data['conf']['intro'], msg_packet.target)\n\n if isinstance(reply, int):\n flag = int(reply)\n reply = [ircp.make_message('kthxbai', c) for c in shared_data['chan']]\n reply.append(flag) # Makes sure to close out.\n\n shared_data['recent_messages'].append(msg_packet)\n shared_data['stats']['num_messages'] += 1\n\n return reply\n\n\ndef main():\n ''' Start up the client and whatnot.\n This is what is run when executing the bot.\n '''\n # Setup logging\n logging.basicConfig(filename='probot.log', level=logging.DEBUG)\n\n # Load configuration file\n logging.info('Loading configuration (main)')\n config = load_json('config.json')\n\n server = config['address']\n port = int(config['port'])\n bot_nick = config['nick']\n logging.info('Loaded config (main)')\n\n shared = setup(config)\n\n # Create second thread\n client = IRCClient(bot_nick, shared)\n restart = client.run(server, port)\n\n if restart > 0:\n print('restarting')\n stdout.flush()\n execl('./probot.py', '')\n\n # Do any needed tying of loose ends\n # Wait for other thread to stop then close pipe\n logging.info('Shutting down m8')\n\n # Shutdown socket gracefully\n print('Socket closed; bye!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"camconn/probot","sub_path":"probot.py","file_name":"probot.py","file_ext":"py","file_size_in_byte":21816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37409589489","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 1 17:11:25 2020\n\n@author: NorinaSun\n\"\"\"\n\ndef find_unique(full_list):\n unique = [] \n for i in full_list: \n if i not in unique: \n unique.append(i) \n return unique\n\ndef get_diffs(value_list):\n diff_list = []\n for j in range(0,len(value_list)):\n for i in range(j+1,len(value_list)):\n diff = abs(value_list[j] - value_list[i])\n diff_list.append(diff)\n return diff_list\n \n#question two\ndef new_channel_check(new_channel,existing_channels):\n combined_channels = existing_channels + [new_channel]\n combined_diffs = get_diffs(combined_channels)\n \n if len(find_unique(combined_diffs)) != len(combined_diffs):\n valid = False\n print(\"new valid is invalid\")\n else:\n valid = True\n print(\"new value is valid\")\n \n return valid\n \n# =============================================================================\n# channels = [3,5,8]\n# new_channel = 11\n# \n# new_channel_check(new_channel, channels)\n# \n# =============================================================================\n\n#question three\n\ndef max_channels(values):\n \n selected_channels = []\n val = values[0]\n selected_channels.append(values[0])\n diff = 1\n diff_list = list(range(1,max(values)))\n \n while val + diff < max(values):\n val = val + diff\n if new_channel_check(val, selected_channels) == True:\n pass\n else: \n while new_channel_check(val, selected_channels) == False:\n val = val + 1\n \n selected_channels.append(val)\n print(\"selected_channels\",selected_channels)\n \n updated_diffs = get_diffs(selected_channels)\n \n for i in updated_diffs:\n try:\n diff_list.remove(i)\n except:\n pass\n \n diff = min(diff_list)\n \n return selected_channels\n\n#validating\ndef validating(results):\n #print(find_unique(results))\n #print(results)\n if len(find_unique(get_diffs(results))) != len(get_diffs(results)):\n print(\"this selection is not valid\")\n else:\n print(\"this selection is valid\")\n\n# =============================================================================\n# test = range(1,200)\n# validating(max_channels(test))\n# \n# =============================================================================\n","repo_name":"NorinaSun/algorithmsoptimization-midterm","sub_path":"problem3_midterm.py","file_name":"problem3_midterm.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43668390970","text":"import pytest\n\nfrom madlib_cli.madlib import read_template, parse_template\n\ndef test_read_template_returns_stripped_string():\n actual = read_template(\"assets/video_game.txt\")\n expected = \"I the {Adjective} and {Adjective} {A First Name}\"\n assert actual == expected\n\ndef test_parse_template():\n actual_stripped, actual_parts = parse_template(\n \"I the {Adjective} and {Adjective} {A First Name}\"\n )\n expected_stripped = \"I the {} and {} {}\"\n expected_parts = (\"Adjective\", \"Adjective\", \"A First Name\")\n\n assert actual_stripped == expected_stripped\n assert actual_parts == expected_parts\n\n# @pytest.mark.skip(\"pending\")\n# def test_merge():\n# actual = merge(\"It was a {} and {} {}.\", (\"dark\", \"stormy\", \"night\"))\n# expected = \"It was a dark and stormy night.\"\n# assert actual == expected\n\n# @pytest.mark.skip(\"pending\")\n# def test_read_template_raises_exception_with_bad_path():\n\n# with pytest.raises(FileNotFoundError):\n# path = \"missing.txt\"\n# read_template(path)","repo_name":"codeslayer-787/madlib-cli","sub_path":"tests/test_madlib.py","file_name":"test_madlib.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37105912633","text":"from torch import nn\nfrom torchvision import models\n\nfrom src.utils.training.loss import NNCLRLoss\nfrom src.utils.support_set import QueueSupportSet\nfrom src.utils.training.metrics import k_accuracy\nfrom src.utils.training.modeling_out import NNCLRModelOutput, NNCLRModelOutputWithLinearEval\n\nclass NNCLR(nn.Module):\n def __init__(self, embed_size=256, queue_size=10_000, projection_hidden_size=2048, prediction_hidden_size=4096, online_eval=True, num_classes=100) -> None:\n super().__init__()\n self.online_eval = online_eval\n\n resnet18 = models.resnet18()\n resnet18.conv1 = nn.Conv2d(\n 3, 64, kernel_size=3, stride=1, padding=2, bias=False\n )\n resnet18.maxpool = nn.Identity()\n self.backbone = nn.Sequential(*list(resnet18.children())[:-1])\n hidden_size = resnet18.fc.in_features\n\n self.projection_mlp = nn.Sequential(\n nn.Linear(hidden_size, projection_hidden_size),\n nn.BatchNorm1d(projection_hidden_size),\n nn.ReLU(),\n nn.Linear(projection_hidden_size, projection_hidden_size),\n nn.BatchNorm1d(projection_hidden_size),\n nn.ReLU(),\n nn.Linear(projection_hidden_size, embed_size),\n nn.BatchNorm1d(embed_size)\n )\n\n self.prediction_mlp = nn.Sequential(\n nn.Linear(embed_size, prediction_hidden_size),\n nn.BatchNorm1d(prediction_hidden_size),\n nn.ReLU(),\n nn.Linear(prediction_hidden_size, embed_size)\n )\n\n if self.online_eval:\n self.classifier = nn.Linear(hidden_size, num_classes)\n self.cls_criterion = nn.CrossEntropyLoss()\n self.model_output_cls = NNCLRModelOutputWithLinearEval if self.online_eval else NNCLRModelOutput\n\n self.nearest_neighbor = QueueSupportSet(queue_size=queue_size)\n self.criterion = NNCLRLoss()\n \n def forward(self, x1, x2=None, labels=None):\n # x1 and x1 are two views of the same batch, if both are given we are in the pre-training phase\n # if only x1 is given we are in the online linear evaluation phase for validation set\n\n f1 = self.backbone(x1).squeeze()\n f2 = proj1 = proj2 = loss = None\n\n if x2 is not None:\n f2 = self.backbone(x2).squeeze()\n\n proj1, proj2 = self.projection_mlp(f1), self.projection_mlp(f2)\n pred1, pred2 = self.prediction_mlp(proj1), self.prediction_mlp(proj2)\n\n nn1 = self.nearest_neighbor(proj1)\n nn2 = self.nearest_neighbor(proj2)\n\n loss = self.criterion(preds=(pred1, pred2), neighbors=(nn1, nn2))\n\n # Only update queue with training batches\n if self.training:\n self.nearest_neighbor.update_queue(proj1)\n \n out_dict = {'f1':f1, 'f2':f2,\n 'proj1':proj1, 'proj2':proj2,'loss':loss}\n\n if self.online_eval:\n cls_loss, logits = self.__compute_cls_loss(f1, labels)\n acc1, acc5 = k_accuracy(logits, labels, k=5)\n\n if f2 is not None:\n cls_loss2, logits2 = self.__compute_cls_loss(f2, labels)\n cls_loss = (cls_loss + cls_loss2) / 2\n acc1_f2, acc5_f2 = k_accuracy(logits2, labels, k=5)\n acc1 = (acc1 + acc1_f2) / 2\n acc5 = (acc5 + acc5_f2) / 2\n\n out_dict['cls_loss'] = cls_loss\n out_dict['acc1'] = acc1\n out_dict['acc5'] = acc5 \n \n out = self.model_output_cls(**out_dict)\n\n return out\n \n def __compute_cls_loss(self, features, labels=None):\n logits = self.classifier(features.detach())\n if labels is not None:\n loss = self.cls_criterion(logits, labels)\n return loss, logits\n","repo_name":"mwritescode/nnclr-cifar100","sub_path":"src/models/nnclr.py","file_name":"nnclr.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"14868462862","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 28 10:42:52 2020\n\n@author: carlosalcantara\n\"\"\"\n\n'''\nCreates separate precision and recall 99% confidence interval plots for each machine\nlearning algorithm based on the feature subset utilized. \n\nUsage: ci_viz.py numTestSets path/to/report/dir/ path/to/save/figures/\n'''\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport os\n\n# Check for command line argument\nif len(sys.argv) < 3:\n print('Usage: ci_viz.py numTestSets path/to/report/dir/ path/to/save/figures/')\n sys.exit(-1)\n\nnumTestSets = int(sys.argv[1])\nreportDirectory = sys.argv[2]\nvizDirectory = sys.argv[3]\n\n# if specified result path does not exist, it is created\nif not os.path.exists(vizDirectory):\n os.makedirs(vizDirectory)\n\n# calculate 99% confidence intervals for all ml algorithms using the test sets\nci = {}\nfor ml in ['knn','dt','rf']:\n temp = pd.read_csv(reportDirectory+'/precision_recall_'+ml+'_experiments.csv')\n for metric in ['precision','recall']:\n df = pd.DataFrame()\n \n for i in range(temp.shape[0]):\n for test in range(1,numTestSets+1):\n df = df.append( {'features':temp['features'][i],\n 'type':metric,\n 'test':test,\n 'score':temp.iloc[i]['test'+str(test)+' '+metric]}, ignore_index=True)\n ci[ml] = pd.DataFrame()\n for x in df['features'].unique():\n tempdf = df[df['features']==x]\n mean = np.mean(tempdf['score'])\n error = 3.291 * ( np.std(tempdf['score']) / np.sqrt(numTestSets) )\n ci[ml] = ci[ml].append({'features':x, 'mean':mean, 'error':error}, ignore_index=True)\n\n# find absolute max and min values for errorbars\nmax_lim = 0\nmin_lim = 100\nmax_er = 0\nfor i in ci.keys():\n if max_lim < ci[i]['mean'].max():\n max_lim = ci[i]['mean'].max()\n if min_lim > ci[i]['mean'].min():\n min_lim = ci[i]['mean'].min()\n if max_er < ci[i]['error'].max():\n max_er = ci[i]['error'].max()\nmax_lim = int((max_lim+max_er+.02)*100)/100\nmin_lim = int((min_lim-max_er-.01)*100)/100\n \n# generate plots\nfor ml in ['knn','dt','rf']:\n for metric in ['precision','recall']: \n plt.style.use('seaborn-whitegrid')\n # create errorbars\n ax = plt.errorbar(x=ci[ml]['features'], y=ci[ml]['mean'], yerr=ci[ml]['error'], capsize=5, fmt='.k')\n \n # plot labels\n plt.xlabel('FEATURES')\n plt.ylabel('SCORE')\n plt.ylim(min_lim, max_lim)\n if ml == 'knn':\n plt.title('KNN '+metric)\n elif ml == 'dt':\n plt.title('Decision Tree '+metric)\n else:\n plt.title('Random Forest '+metric)\n # rotate x-axis labels for better readability\n plt.xticks(rotation=90)\n\n # save plots noting if figure represents precision or recall\n if metric == 'recall':\n plt.savefig(vizDirectory+'/ci_'+ml.upper()+'_'+metric, bbox_inches = \"tight\")\n else:\n plt.savefig(vizDirectory+'/ci_'+ml.upper(), bbox_inches = \"tight\")\n plt.close()","repo_name":"carlos-alcan/network_app_classification","sub_path":"ci_viz.py","file_name":"ci_viz.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25795186248","text":"from collections import OrderedDict\nfrom typing import Iterable, cast\n\nfrom docstring_parser import parse\n\nfrom panoramic.cli.husky.core.tel.evaluator.function_specs import (\n AcceptedArg,\n TelTypedFunction,\n)\nfrom panoramic.cli.husky.core.tel.evaluator.functions import TEL_FUNCTIONS\n\n\ndef function_definitions():\n result = {}\n for fun_name, fun in OrderedDict(sorted(TEL_FUNCTIONS.items())).items():\n docstring = parse(fun.__doc__)\n expected_args = []\n phase = None\n return_type = None\n invalid_value = None\n\n if issubclass(fun, TelTypedFunction):\n tfun = cast(TelTypedFunction, fun)\n expected_args = tfun.expected_arg_types\n phase = tfun.phase_spec\n return_type = tfun.return_type_spec\n invalid_value = tfun.invalid_value_spec\n\n description = {\n 'arguments': [\n {\n 'name': param.arg_name,\n 'typeName': param.type_name,\n 'description': param.description,\n **(_tel_arg_remote_spec(param.arg_name, expected_args) or {}),\n }\n for param in docstring.params\n ],\n 'raises': [{'typeName': exc.type_name, 'description': exc.description} for exc in docstring.raises],\n 'shortDescription': docstring.short_description,\n 'longDescription': docstring.long_description,\n 'returns': {\n 'typeName': docstring.returns.type_name,\n 'description': docstring.returns.description,\n **(return_type.to_remote_spec() if return_type else {}),\n },\n **({'phase': phase.to_remote_spec()} if phase else {}),\n **({'invalidValue': invalid_value.to_remote_spec()} if invalid_value else {}),\n }\n\n result[fun_name] = description\n\n return result\n\n\ndef _tel_arg_remote_spec(arg_name: str, expected_args: Iterable[AcceptedArg]):\n for arg in expected_args:\n if arg.name == arg_name:\n return arg.to_remote_spec()\n","repo_name":"panoramichq/panoramic-cli","sub_path":"src/panoramic/cli/husky/core/tel/evaluator/function_docs.py","file_name":"function_docs.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"21952311553","text":"# Em Python, a estrutura de decisão é o 'if'. Seu formato segue um modelo de 'estrutura de seleção':\n\n# if :\n# bloco verdadeiro\n\n# Nas situações em que duas alternativas dependem de uma mesma condição, uma de a condição ser verdadeira e a outra de a condição ser falsa, usamos a estrutura de 'seleção composta'.\n# A estrutura de seleção composta segue o modelo do exemplo abaixo:\n\nidade = int(input(\"Digite a idade de seu carro:\"))\nif idade <= 3:\n print(\"Seu carro é novo\")\nelse:\n print(\"Seu carro é velho\")\n\n# É importante notar que devemos escrever o else na mesma coluna do if. Assim, o interpretador reconhece que else se refere a um determinado if.\n\n\n","repo_name":"Rodalsilva/IntroducaoPython","sub_path":"Cap_4/1-if.py","file_name":"1-if.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7057461448","text":"# ===================================\n# Archivo donde se almacenan todas\n# las funciones relacionadas a la\n# transformacion de variables\n# ==================================\n\n\ndef date_list(date: str) -> list:\n # str(del(\",\")) -> list\n # \"2, 5, 6\" -> [2,5,6]\n\n days = date.split(\",\") # Divide el string\n c_days = [] # Se almacenara la lista revisada\n\n for day in days:\n # Se revisa la lista para que sea un entero y este en el rango 0-7\n c_days.append(int(day)) # Se transforma a entero\n if not (int(day) > 0 and int(day) <= 7):\n raise AssertionError\n\n return c_days # Se retorna la lista de enteros\n\n\ndef date_str(days: list or tuple) -> str:\n # list -> str(del(\",\"))\n # [2,3,7] -> \"2,3,7\"\n\n c_days = \"\" # Se almacenara el str\n\n for day in days:\n # elemento por elemento se añade y se suma una coma al final\n c_days += str(day) + \",\"\n\n return c_days[:-1] # Se devuelve sin la ultima coma, ya que sobra\n\n\ndef hour_list(hour: str) -> list:\n # str(del(\":\")) -> list\n # \"23:59\" -> [23, 59]\n\n c_hour = hour.split(\":\") # Se divide el string\n if len(c_hour) != 2:\n raise AssertionError\n\n # Se convierte a numero entero\n c_hour[0] = int(c_hour[0])\n c_hour[1] = int(c_hour[1])\n\n # Se revisa que sea una hora valida\n if not (c_hour[0] < 24 and c_hour[0] >= 0):\n raise AssertionError\n if not (c_hour[1] < 60 and c_hour[1] >= 0):\n raise AssertionError\n\n return c_hour # Se retorna la hora convertida\n","repo_name":"fraco-oxza/classgo","sub_path":"class_go/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27794636414","text":"#Import the basic libraries\r\nimport os\r\nimport pygame\r\nfrom random import randint\r\n\r\npygame.init()\r\nclock = pygame.time.Clock()\r\nFPS = 60\r\n\r\npygame.init()\r\npygame.font.init()\r\n\r\n#Game Music\r\n\r\npygame.mixer.init()\r\npygame.mixer.music.load(\"Gaming music.mp3\")\r\npygame.mixer.music.set_volume(0.5)\r\npygame.mixer.music.play()\r\n\r\n#Colors\r\n\r\nBLACK = (0,0,0)\r\nPURPLE = (200,0,255)\r\nCOLOR = (0,155,200)\r\nCOLORB=(240,20,100)\r\n\r\n#Setting the window faetures and the backround\r\n\r\nwin_width=700\r\nwin_height=500\r\n\r\nwindow = pygame.display.set_mode((win_width, win_height))\r\npygame.display.set_caption(\"Pong\")\r\nbackground = pygame.transform.scale(pygame.image.load(\"Court2.png\"),(win_width,win_height))\r\nwindow.blit(background,(0, 0))\r\n \r\n#Class for the paddles\r\nclass Paddle(pygame.sprite.Sprite):\r\n def __init__(self,color, width, height):\r\n super().__init__()\r\n self.image = pygame.Surface((width, height))\r\n self.image.fill(BLACK)\r\n self.image.set_colorkey(BLACK)\r\n pygame.draw.rect(self.image, color, (0, 0, width, height))\r\n self.rect = self.image.get_rect()\r\n\r\n #The moving functions\r\n def moveUp(self, pixels):\r\n self.rect.y -= pixels\r\n if self.rect.y < 0:\r\n self.rect.y = 0\r\n def moveDown(self, pixels):\r\n self.rect.y += pixels\r\n if self.rect.y > 400:\r\n self.rect.y = 400\r\n\r\npaddleA = Paddle(PURPLE, 10, 100)\r\npaddleA.rect.x = 0\r\npaddleA.rect.y = 200\r\n\r\npaddleB = Paddle(COLORB, 10, 100)\r\npaddleB.rect.x = 690\r\npaddleB.rect.y = 200\r\n \r\n#The group list of sprites\r\nall_sprites_list = pygame.sprite.Group()\r\n\r\n#The Ball class\r\nclass Ball(pygame.sprite.Sprite):\r\n def __init__(self,ball_image,ball_x,ball_y,ball_speed,speed_x,speed_y):\r\n super().__init__()\r\n self.image = ball_image\r\n self.speed=ball_speed\r\n self.rect=self.image.get_rect()\r\n self.rect.y=ball_y\r\n self.rect.x=ball_x\r\n self.speedx=speed_x\r\n self.speedy=speed_y\r\n def reset(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\n\r\n#Class for the moovement of the ball\r\nclass BallMove(Ball):\r\n def update(self):\r\n self.rect.y += self.speedy\r\n self.rect.x -= self.speedx\r\n if self.rect.y >= 460:\r\n self.speedy *= -1\r\n if self.rect.y == -20:\r\n self.speedy *= -1\r\n\r\nball_image=pygame.transform.scale(pygame.image.load('PongBall2.png'), (60, 50))\r\nball_x = 320\r\nball_y = 200\r\nball_speed=speed_x=speed_y=5\r\nball = BallMove(ball_image,ball_x,ball_y,ball_speed,speed_y,speed_x)\r\n\r\n#Adding the paddles and the ball in the sprite list\r\nall_sprites_list.add(paddleA)\r\nall_sprites_list.add(paddleB)\r\nall_sprites_list.add(ball)\r\n\r\n#Setting score values\r\nscoreA=0\r\nscoreB=0\r\n\r\n#Winning images for every player\r\nplay1_win=pygame.transform.scale(pygame.image.load('pl1.png'), (300, 200))\r\nplay2_win=pygame.transform.scale(pygame.image.load('Win22.png'), (500, 200))\r\n\r\n#The game loop\r\ngame =True\r\nwhile game:\r\n for e in pygame.event.get():\r\n if e.type == pygame.QUIT:\r\n game = False\r\n window.blit(background,(0, 0))\r\n\r\n #Keys for mooving paddles upsdie down\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_w]:\r\n paddleA.moveUp(5)\r\n if keys[pygame.K_s]:\r\n paddleA.moveDown(5)\r\n if keys[pygame.K_UP]:\r\n paddleB.moveUp(5)\r\n if keys[pygame.K_DOWN]:\r\n paddleB.moveDown(5)\r\n\r\n #Scoring System\r\n if ball.rect.x >= 660:\r\n scoreA += 1\r\n ball.speedx *= -1\r\n score_snd=pygame.mixer.Sound(\"ScoreSound.mp3\")\r\n inicial_som = pygame.mixer.Sound((os.path.join('ScoreSound.mp3')))\r\n score_snd.play()\r\n if ball.rect.x == -20:\r\n scoreB += 1\r\n ball.speedx *= -1\r\n score_snd=pygame.mixer.Sound(\"ScoreSound.mp3\")\r\n inicial_som = pygame.mixer.Sound((os.path.join('ScoreSound.mp3')))\r\n score_snd.play()\r\n\r\n #Fonts for score\r\n\r\n font1 = pygame.font.Font(None, 60)\r\n score = font1.render(str(scoreA) , 1, (0, 50, 255))\r\n window.blit(score,(130,30))\r\n \r\n font2 = pygame.font.Font(None, 60)\r\n score = font2.render(str(scoreB) , 1, (100, 255, 255))\r\n window.blit(score,(540,30))\r\n\r\n #Winning sprites for player 1 and player 2\r\n\r\n if scoreA==10:\r\n window.blit(play1_win,(50,100))\r\n ball.rect.x=325\r\n ball.rect.y=200\r\n paddleA.rect.y=200\r\n paddleB.rect.y=200\r\n if scoreB==10:\r\n window.blit(play2_win,(250,80))\r\n ball.rect.x=315\r\n ball.rect.y=200\r\n paddleB.rect.y=200\r\n paddleA.rect.y=200\r\n\r\n #Collide conditions with baddle and ball\r\n\r\n if pygame.sprite.collide_mask(ball, paddleA):\r\n ball.speedx=ball.speedx * -1\r\n paddle_snd=pygame.mixer.Sound(\"PaddleSound.mp3\")\r\n inicial_som = pygame.mixer.Sound((os.path.join('PaddleSound.mp3')))\r\n paddle_snd.play()\r\n if pygame.sprite.collide_mask(ball, paddleB):\r\n ball.speedx=ball.speedx * -1\r\n paddle_snd=pygame.mixer.Sound(\"PaddleSound.mp3\")\r\n inicial_som = pygame.mixer.Sound((os.path.join('PaddleSound.mp3')))\r\n paddle_snd.play()\r\n\r\n #Update the sprite list\r\n all_sprites_list.update()\r\n\r\n ball.reset()\r\n\r\n #Draw the net\r\n pygame.draw.line(window, COLOR, (349, 0), (349, 500), 5)\r\n \r\n #Display the Sprites on screen\r\n all_sprites_list.draw(window) \r\n\r\n clock.tick(FPS)\r\n pygame.display.update()","repo_name":"PetrosK21/Pong-Game-v-1.0-","sub_path":"Repo1/PongGame.py","file_name":"PongGame.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38560944394","text":"# Given a binary tree, imagine you're standing to the right of the tree.\n# Return an array of the values of the nodes you can see ordered from top to bottom\n\nimport queue\n\n\nclass Node:\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n\nclass BinaryTree:\n def __init__(self):\n self.root = None\n self.elements = 0\n\n def add(self, value):\n node = Node(value)\n if self.root is None:\n self.root = node\n self.elements += 1\n else:\n current = self.root\n\n while current:\n parent = current\n\n if current.data > value:\n current = current.left\n\n if current is None:\n parent.left = node\n return\n\n elif current.data < value:\n current = current.right\n\n if current is None:\n parent.right = node\n return\n elif current.data == value:\n print(\"Unable to add duplicates\")\n return\n\n def print_in_order(self, node):\n if node:\n self.print_in_order(node.left)\n print(f\"{node.data}\")\n self.print_in_order(node.right)\n\n def print_post_order(self, node):\n if node:\n self.print_post_order(node.left)\n self.print_post_order(node.right)\n print(f\"{node.data}\")\n\n def print_preorder(self, node):\n if node:\n print(f\"{node.data}\")\n self.print_post_order(node.right)\n self.print_post_order(node.left)\n\n def right_view_bfs(self, root):\n if self.root is None:\n return []\n # Holds the resulting list with level ordered items\n result = []\n # Queue that holds all nodes in each level\n q = queue.Queue()\n\n # Add root to the queue\n q.put(root)\n\n # While there are nodes in the queue\n while q.empty() is not True:\n # The number of items in the queue\n length = q.qsize()\n # The count of how many items have been popped from the queue\n count = 0\n # List that holds values per level\n current_vals = []\n while count < length:\n # Get the node from the queue\n current_node = q.get()\n # Save its current value and add it to the placeholder list\n current_vals.append(current_node.data)\n\n # If there are nodes to the left or right, add them to the queue\n\n if current_node.left:\n q.put(current_node.left)\n if current_node.right:\n q.put(current_node.right)\n\n # Increase count\n count += 1\n # Only save the last number in each level\n result.append(current_vals[-1])\n\n # The following returns all numbers from the left\n # result.append(current_vals[0])\n\n return result\n\n # 1.Prioritize finding right side nodes\n # 2. Keep track of the level of our nodes\n\n def right_view_dfs(self, node, level, result):\n ''' Will find the right-facing nodes of the tree as if looking from the right'''\n # If the node exists\n if node:\n # If it is the first number we reach per level (PRIORITIZING GOING RIGHT FIRST)\"\n if len(result) == level:\n # Add the node's value to the result list\n result.append(node.data)\n # Move to the right and then left recursively and increase the level each call. Maintain the result\n self.right_view_dfs(node.right, level + 1, result)\n self.right_view_dfs(node.left, level + 1, result)\n\n return result\n\n\n__name__ = \"__main__\"\n\nbt = BinaryTree()\nbt.add(50)\nbt.add(41)\nbt.add(10)\nbt.add(30)\nbt.add(25)\nbt.add(6)\nbt.add(7)\nbt.add(69)\n\nprint(bt.right_view_bfs(bt.root))\n\n# print(\"In order:\")\n# bt.print_in_order(bt.root)\n# print(\"Post-order:\")\n# bt.print_post_order(bt.root)\n# print(\"Pre-order:\")\n# bt.print_preorder(bt.root)\nprint()\nresult = []\nresult = bt.right_view_dfs(bt.root, 0, result)\nprint(result)\n","repo_name":"AK0613/BT_Right_Side_View","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73584245953","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom .views import post_create,destination_create\nfrom .views import (\n PostListView,PostDetailView,PostDeleteView,PostUpdateView,\n PostCreateView, gallerycreate,hotels,AboutView,ContactView,test_form,del_testform,update_form,DestinationList,DestinationDetail,thingstodo_create,)\nfrom .forms import UserLoginForm\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [\n path('', views.home, name='home'),\n path('accounts/login/',auth_views.LoginView.as_view(template_name = 'login.html',authentication_form = UserLoginForm),name = 'login'),\n path('accounts/profile/',views.profile, name = 'profile'),\n path('newpost/',views.post_create,name = 'new-post'),\n path('destinationscreate/',views.destination_create,name = 'destination-create'),\n path('thingstodocreate/', views.thingstodo_create, name='thingstodo-create'),\n path('postcreate/',PostCreateView.as_view(),name = 'post-create'),\n path('postlist/',PostListView.as_view(),name = 'post-list'),\n path('postdetail//', PostDetailView.as_view(), name='post-detail'),\n path('postdelete//',PostDeleteView.as_view(),name = 'post-delete'),\n path('postupdate//',PostUpdateView.as_view(),name = 'post-update'),\n path('dashboard/', views.dashboard, name = 'dashboard'),\n path('addgallery/', views.gallerycreate, name = 'gallery-add'),\n path('gallery/',views.gallery, name = 'gallery'),\n path('hotels/',views.hotels, name = 'hotels'),\n path('about/',AboutView.as_view(),name = 'about'),\n path('contact/',ContactView.as_view(),name = 'contact'),\n path('testform/',views.test_form,name ='testform'),\n path('testform/delete//',views.del_testform,name = 'testform-delete'),\n path('testform/update//',views.update_form,name = 'testform-update'),\n path('destinations/',DestinationList.as_view(),name = 'destinations'),\n path('destinations/detail//',DestinationDetail.as_view(),name = 'destinations-detail'),\n path('password-reset/',auth_views.PasswordResetView.as_view(template_name = 'password-reset.html'),name = 'password-reset')\n ]\n","repo_name":"ByanjuP/bdhkl","sub_path":"bdhkl_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10020999411","text":"\"\"\"\nRESTful API Role resources\n--------------------------\n\"\"\"\n\nimport logging\n\nfrom flask import request\nfrom flask_restplus import Resource\nfrom app.restplus import api_v1 as api\nfrom .models import db, Role\nfrom app.modules.serializers import page_of_roles, role, role_with_users\nfrom app.modules.parsers import pagination_arguments\n\nlog = logging.getLogger(__name__)\nns = api.namespace('roles', description=\"Operations related to Roles\")\n\n\n@ns.route('/')\nclass Roles(Resource):\n \"\"\"\n Manipulations with roles.\n \"\"\"\n\n @api.expect(pagination_arguments)\n @api.marshal_with(page_of_roles)\n def get(self):\n \"\"\"\n List of roles.\n\n Returns a list of roles starting from ``page`` limited by ``per_page``\n parameter.\n \"\"\"\n args = pagination_arguments.parse_args(request)\n page = args.get('page', 1)\n per_page = args.get('per_page', 10)\n\n role_query = Role.query\n roles_page = role_query.paginate(page, per_page, error_out=False)\n\n return roles_page\n\n @api.expect(role, validate=True)\n @api.response(201, 'Role successfully updated.')\n def post(self):\n \"\"\"\n Registers a new role.\n \"\"\"\n data = request.json\n\n name = data.get('name')\n description = data.get('description')\n role = Role(name=name,\n description=description)\n db.session.add(role)\n db.session.commit()\n\n return None, 201\n\n\n@ns.route('/')\n@api.response(404, 'Role not found.')\nclass UserById(Resource):\n\n @api.marshal_with(role_with_users)\n def get(self, id):\n \"\"\"\n Returns a user by id.\n \"\"\"\n return Role.query.filter(Role.id == id).one()\n\n @api.expect(role, validate=True)\n @api.response(204, 'Role successfully updated.')\n def put(self, id):\n \"\"\"\n Updates a user and assign a specific role by id.\n \"\"\"\n data = request.json\n role = Role.query.filter(Role.id == id).one()\n if 'description' in data:\n role.description = data.get('description')\n if 'name' in data:\n role.name = data.get('name')\n db.session.add(role)\n db.session.commit()\n return None, 204","repo_name":"CockyAmoeba/flask-restplus-leave-demo","sub_path":"app/modules/roles/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20460940456","text":"import random\nfrom sims4.tuning.tunable import TunableList\nfrom ui.ui_dialog import UiDialogOkCancel\nimport services\nimport sims\nimport sims4.commands\n\nclass CheatCommandTuning:\n __qualname__ = 'CheatCommandTuning'\n ENABLE_CHEATS_DIALOG = UiDialogOkCancel.TunableFactory()\n JOKES = TunableList(str)\n\n@sims4.commands.Command('AutomationTestingCheats', command_type=sims4.commands.CommandType.Automation)\ndef automation_test_cheats(enable:bool=False, _connection=None):\n tgt_client = services.client_manager().get(_connection)\n output = sims4.commands.CheatOutput(_connection)\n household = tgt_client.household\n household.cheats_enabled = enable\n if enable:\n output('Cheats are enabled.')\n else:\n output('Cheats are disabled.')\n\n@sims4.commands.Command('testingcheats', command_type=sims4.commands.CommandType.Live)\ndef test_cheats(enable:bool=False, _connection=None):\n tgt_client = services.client_manager().get(_connection)\n output = sims4.commands.CheatOutput(_connection)\n household = tgt_client.household\n cheats_active = household.cheats_enabled\n if cheats_active == enable:\n if enable:\n output('Cheats are already enabled.')\n else:\n output('Cheats are already disabled.')\n return False\n household.cheats_enabled = enable\n if enable:\n output('Cheats are enabled.')\n else:\n output('Cheats are disabled.')\n return True\n\n@sims4.commands.Command('setage', command_type=sims4.commands.CommandType.Live)\ndef set_age(age:str='Adult', _connection=None):\n output = sims4.commands.Output(_connection)\n tgt_client = services.client_manager().get(_connection)\n if tgt_client.active_sim is None:\n output('Set Sim Age Failure: No Sim Selected')\n return False\n age_to_set = sims.sim_info_types.Age.ADULT\n if age == 'Child':\n age_to_set = sims.sim_info_types.Age.CHILD\n elif age == 'Teen':\n age_to_set = sims.sim_info_types.Age.TEEN\n elif age == 'Young Adult':\n age_to_set = sims.sim_info_types.Age.YOUNGADULT\n elif age == 'Adult':\n age_to_set = sims.sim_info_types.Age.ADULT\n elif age == 'Elder':\n age_to_set = sims.sim_info_types.Age.ELDER\n else:\n output('Set Sim Age Failure: Invalid Age. Options are: Child, Young Adult, Adult, Elder')\n return False\n tgt_client.active_sim.sim_info.advance_age(force_age=age_to_set)\n output('Selected Sim Set to Age: ' + age)\n return True\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/server_commands/cheat_commands.py","file_name":"cheat_commands.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"41648330027","text":"# 실습 y2를 없애서 다(2):1로 바꾸자. 분기를 안한다는 힌트\n\nimport numpy as np\n\n#1. 데이터 제공\nx1 = np.array([range(100), range(301, 401), range(1, 101)])\nx2 = np.array([range(101, 201), range(411,511), range(100,200)])\ny1 = np.array([range(711, 811), range(1,101), range(201,301)])\n\nx1 = np.transpose(x1)\nx2 = np.transpose(x2)\ny1 = np.transpose(y1)\n\nfrom sklearn.model_selection import train_test_split\nx1_train, x1_test, x2_train, x2_test, y1_train, y1_test = train_test_split(x1, x2, y1, train_size=0.8, shuffle = False)\n#이렇게 세개도 가능\n\n#2. 모델 구성\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Input\n\n# 모델 1\ninput1 = Input(shape=(3,))\ndense1 = Dense(10, activation = 'relu')(input1)\ndense1 = Dense(5, activation = 'relu')(dense1)\n\n# 모델 2\ninput2 = Input(shape=(3,))\ndense2 = Dense(10, activation = 'relu')(input2)\ndense2 = Dense(5, activation = 'relu')(dense2)\ndense2 = Dense(5, activation = 'relu')(dense2)\ndense2 = Dense(5, activation = 'relu')(dense2)\n\n# 모델 병합 / concatenate = 사슬처럼 엮다\nfrom tensorflow.keras.layers import concatenate, Concatenate\n\nmerge1 = concatenate([dense1, dense2])\nmiddle1 = Dense(30)(merge1)\nmiddle1 = Dense(10)(middle1)\nmiddle1 = Dense(10)(middle1) \n\n#모델 분기 #이번에는 나누는게 아니니까 아웃풋을 한 모델만 두어도 된다.\noutput1 = Dense(30)(middle1)\noutput1 = Dense(7)(output1)\noutput1 = Dense(3)(output1)\n\n# 모델 선언\nmodel = Model(inputs = [input1, input2], outputs = output1)\nmodel.summary()\n\n#3. 컴파일, 훈련\nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\nmodel.fit([x1_train, x2_train], y1_train, epochs=100, batch_size=1, validation_split=0.2, verbose=0)\n\n#4. 평가, 예측\nloss, metrics = model.evaluate([x1_test, x2_test], y1_test, batch_size=1)\n\nprint('model.metrics_names: ', model.metrics_names)\nprint('loss, metrics: ', loss, metrics)\n\ny1_predict= model.predict([x1_test, x2_test])\n\nprint('================================')\nprint('y1_predict :\\n', y1_predict)\nprint('================================')\n\n#y1_test, y2_test와 비교하자 RMSE랑 R2로\nfrom sklearn.metrics import mean_squared_error\ndef RMSE(y_test, y_predict):\n return np.sqrt(mean_squared_error(y_test, y_predict))\n\nRMSE = RMSE(y1_test, y1_predict)\nprint('RMSE: ', RMSE)\n# RMSE정의에 구�� y1이라고 안 해줘도 괜찮다. r2도 마찬가지\n\nfrom sklearn.metrics import r2_score\n\nR2= r2_score(y1_test, y1_predict)\nprint('R2: ', R2)\n","repo_name":"YoungriKIM/STUDY","sub_path":"keras/keras15_ensemble2.py","file_name":"keras15_ensemble2.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71113072836","text":"from ctyper import PixelOffset, Number\n\n\nclass Offsetter:\n def __init__(self, o: PixelOffset, scale: float) -> None:\n self.offset = o\n self.scale = scale\n\n def calc(self, x: Number, y: Number) -> tuple[int, int]:\n return (\n int(self.offset[0] + x * self.scale),\n int(self.offset[1] + y * self.scale),\n )\n","repo_name":"Sped0n/vega","sub_path":"lumanos/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27510206503","text":"\"\"\" \nWrite a function, level_averages, that takes in the root of a binary tree that contains number values. \nThe function should return a list containing the average value of each level.\n\"\"\"\n\nclass Node:\n def __init__(self, val, left=None, right=None) -> None:\n self.val = val \n self.left = left \n self.right = right \n\ndef level_averages(node):\n if node is None:\n return [] \n\n queue = [(node, 0)]\n average = [] \n average1 = []\n\n while queue:\n current, level = queue.pop(0)\n\n if len(average) == level:\n average.append([current.val]) \n else:\n average[level].append(current.val) \n\n if current.left is not None:\n queue.append((current.left, level+1))\n if current.right is not None:\n queue.append((current.right, level+1))\n\n for level in average:\n average1.append(sum(level)/len(level))\n \n return average1\n\n# Driver code\n# Test case 01\na = Node(3)\nb = Node(11)\nc = Node(4)\nd = Node(4)\ne = Node(-2)\nf = Node(1)\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\n\n# 3\n# / \\\n# 11 4\n# / \\ \\\n# 4 -2 1\n\nprint(level_averages(a))\n","repo_name":"monika0603/glowing-spork","sub_path":"practice_trees/level_averages_BT.py","file_name":"level_averages_BT.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2338964660","text":"import glob, os\nimport numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport h5py\nfrom src.util import read_spike_mat, read_behav_mat, read_behav_congent\n\n# misc\nimport warnings\n\n# ran locally on the network drive; uploaded here for book keeping\n\nfreq = 1000\nspikeMAT_path = \"/mnt/s/psychiatry/Depersonalisation_Psychosis/SUBJECT_DATA/BEHAV_SPIKE_COLLATED/{}.mat\"\ndataMAT_path = (\n \"/mnt/s/psychiatry/Depersonalisation_Psychosis/SUBJECT_DATA/BEHAV_SPIKE/{}/{}.mat\"\n)\ndataCOG_path = (\n \"/mnt/s/psychiatry/Depersonalisation_Psychosis/SUBJECT_DATA/BEHAV_SPIKE/{}/{}*.wri\"\n)\n\n# Subject ID\nwith open(\"./spike_prepro_list.txt\") as f:\n ID_list = [l.strip() for l in f.readlines()]\n\n\nfor subj in ID_list:\n # subj = ID_list[1]\n\n # compile all behavioural data logs into CSV\n print(subj)\n if subj[:4] == \"HC16\": # cogent file only\n cog_path = glob.glob(dataCOG_path.format(subj, subj))\n df = read_behav_congent(cog_path[0])\n else:\n beh_path = dataMAT_path.format(subj, subj)\n df = read_behav_mat(beh_path)\n\n df.to_csv(\n \"/home/bsms9gxx/Psychosis_Sarah/data/processed/behaviour/{}.csv\".format(subj)\n )\n","repo_name":"htwangtw/depersonalisation","sub_path":"data/code/batch_cogent2csv.py","file_name":"batch_cogent2csv.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42898962196","text":"class XprintTorch:\n '''\n A tool for debuging tensorflow variables.\n '''\n\n def __init__(self):\n self.content = ''\n self.alreadyprint = False\n self.alreadyprintstructure = False\n self.alreadycollected = False\n self.watchmodel = None\n self.watchoptimizer = None\n\n def col(self, module=None, inputs=None):\n '''\n Use \"xp.collect( )\" to collect variables.\n '''\n if module is not None:\n self.content += '\\n\\n'+str(module)\n for k, v in module._parameters.items():\n self.content+='\\n\\t- has param **\"'+k+'\"** with shape '+str(v.shape)\n\n if inputs is not None:\n self.content += '\\n- The input has shape of '+str(inputs.shape)\n \n def print(self, show=True, mustmanytimes=False):\n if mustmanytimes or not self.alreadyprint:\n self.alreadyprint = True\n if show:\n print(self.content)\n return self.content\n else:\n return ''\n\nxpt = XprintTorch()\n\ndef p_stru(torch_module, treelevel=2):\n '''打印pytorch里面的模块层次结构\n treelevel是打印内部多少层,-1为全部层都打印\n 可以用p_stru(model._module['xx'], 2)打印子层\n '''\n res_str = stru(torch_module, treelevel=treelevel)\n print(res_str)\n\ndef stru(torch_module, treelevel=-1, intend=1):\n r\"\"\"打印pytorch里面的模块层次结构\n\n treelevel是打印内部多少层,-1为全部层都打印\n \n 可以用p_stru(model._modules['xx'], 2)打印子层\n \"\"\"\n stru_str = ''\n stru_str += torch_module.__class__.__name__\n childs = list(torch_module._modules.keys())\n # stru_str += ':['+', '.join(childs)+']'\n stru_str += ':'+str(childs)\n for index, child_module in enumerate(torch_module.children()):\n if treelevel != 0:\n child_stru_str = stru(child_module, treelevel=treelevel-1, \n intend=intend+1)\n stru_str += '\\n'+' '*intend+'%d.'%(index+1)+\\\n child_stru_str+' ('+childs[index]+')'\n else:\n child_stru_str = 'end'\n # print(stru_str)\n # if torch_module.__class__.__name__ == 'MaskRCNN':\n # import pdb; pdb.set_trace()\n return stru_str\n\n","repo_name":"xianpf/xtool","sub_path":"xtool/xpt.py","file_name":"xpt.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12735553663","text":"from PIL import Image\nimport os\n\nimages = [f for f in os.listdir() if f.endswith('jpg')]\n\ncount = 0\nfor image in images:\n count = str(count)\n with Image.open(image) as img:\n img.convert('RGB').save(f\"{count}.jpg\".replace('.png', '.jpg'), 'JPEG')\n os.remove(image)\n count = int(count)\n count += 1","repo_name":"MuhammadShahzeb123/Image_Processing","sub_path":"Turner.py","file_name":"Turner.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17386259827","text":"############################################################################\r\n## $ python3 ##\r\n## ##\r\n############################################################################\r\n\r\nimport csv\r\nimport time\r\n\r\n\r\n# Opens a text file that holds the appropriate CSV files to read for data\r\n# processing. \r\nfiles_to_use = open(\"files-to-read.txt\",'r')\r\nfiles_list = files_to_use.readlines()\r\nflist = []\r\nfor i in range(len(files_list)):\r\n flist.append(files_list[i].rstrip('\\n'))\r\nfiles_to_use.close()\r\n\r\n# Generating time stamp for file name\r\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n\r\n# Create a new text file to hold fractures \r\ncheese = open(\"fractures-\" + timestr + \".txt\",'w+')\r\n\r\nN = 20 # number of points to running average\r\nchange = round(1/100.0,3) # amount of change in load for fracture detection\r\n # generally leave at 0.010 \r\n\r\n\r\n# This section reads through all the files in the read text file to search\r\n# for fractures. \r\n\r\nheader_skip = 42 # Number of rows to skip to enter header section of\r\n # data file. Typically around 42 to get to labels.\r\n\r\n \r\nfor j in range(0, len(files_list)):\r\n\r\n with open(flist[j], newline='\\r\\n') as csvfile:\r\n # skipping preamble rows to header row\r\n for i in range(header_skip):\r\n next(csvfile)\r\n # reading csv data\r\n reader = csv.reader(csvfile)\r\n my_Data = list(reader)\r\n\t\r\n cheese.write(str(flist[j]) + '\\n' +'\\n') # writes the name of the file\r\n\r\n\r\n numrows = len(my_Data)\r\n numcols = len(my_Data[0])\r\n\r\n Axial = [] # axial counter holder array\r\n\r\n # iterating through the column of data\r\n for k in range(0, numcols):\r\n\r\n if \"Axial\" in my_Data[0][k]:\r\n for w in range(2, numrows):\r\n Axial.append(my_Data[w][k])\r\n\r\n run_avg = [] # holder for running average\r\n run_sum = 0 # re-zeroing the running sum for averaging.\r\n\r\n if \"Load\" in my_Data[0][k]:\r\n\r\n cheese.write(\"---------------------\" + '\\n')\r\n\r\n # checking to see if the column of data is on the valley side.\r\n # if on the peak side, then skipping.\r\n if float(my_Data[50][k]) > -0.5:\r\n continue\r\n \r\n for w in range(2, numrows):\r\n\r\n currentForce = round(float(my_Data[w][k]),3)\r\n\r\n if w < N+2:\r\n run_sum += currentForce\r\n\r\n if w > N+5:\r\n run_avg = round(run_sum / N,3)\r\n\r\n## print(\"running avg = \")\r\n## print(run_avg)\r\n##\r\n## time.sleep(.1)\r\n\r\n # Fracture detection, if the current read values is\r\n # greater than the running average, the write the \r\n # location into the text file.\r\n if currentForce > (1 - change) * run_avg:\r\n cheese.write(\"Fracture found at \" +\r\n Axial[w-2] +\r\n \" cycles, for \" +\r\n my_Data[0][k] + '\\n')\r\n\r\n # Updating the running sum.\r\n run_sum += round(currentForce -\r\n float(my_Data[w-N][k]),3)\r\n\r\n # Writes a dividing line between files being read. \r\n cheese.write(\"~~~~ ---- ~~~~ ---- ~~~~~ ---- ~~~~ ---- ~~~~\" +\r\n '\\n')\r\n\r\n# closing the fracture.txt file after writing all the lines to it.\r\ncheese.close()\r\n","repo_name":"Patrae/fracturedetection","sub_path":"CSVfractureDetector.py","file_name":"CSVfractureDetector.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41619568607","text":"#coding: utf-8\n#2018.10\nimport dataset\n\ndef txt2table(fn, cols, dbname, tablename):\n db = dataset.connect('sqlite:///%s' %dbname)\n infile = open(fn, 'r', encoding='utf-8')\n lines = infile.readlines()\n infile.close()\n\n idx = 0\n for l in lines:\n tab = l.split('\\t')\n if len(tab) < len(cols) :\n print('txt file columns !=', cols)\n return\n record = {}\n for i, col in enumerate(cols) :\n record[col] = tab[i]\n record['idx'] = idx\n idx += 1\n\n db[tablename].upsert(record, ['idx'])\n\n\ndef table2txt(dbname, tablename, cols, fn):\n db = dataset.connect('sqlite:///%s' %dbname)\n outfile = open(fn, 'w', encoding='utf-8')\n\n records = db[tablename].all()\n for rec in records:\n item = []\n for col in cols :\n item.append(rec[col])\n outfile.write('%s\\n' %('\\t'.join(item)))\n outfile.close()\n\ndef xlsx2table(dbname, tablename, fn):\n import pandas as pd\n import sqlite3 #pandas.to_sql() uses sqlite3 or sqlalchemy\n condb = sqlite3.connect(dbname)\n idata = pd.read_excel(fn, dtype=str) #without dtype(str), '00001' is to be '1'\n idata.to_excel('tmp.xlsx')\n idata.to_sql(name=tablename, con=condb, if_exists='replace') #'fail', 'replace'. 'append'\n condb.close()\n return\n\ndef table2xlsx(dbname, tablename, fn):\n #sudo pip3 install openpyxl\n import pandas as pd\n db = dataset.connect('sqlite:///%s' %dbname)\n records = db[tablename].all()\n columns = db[tablename].columns\n all_rec = [rec for rec in records]\n odata = pd.DataFrame(all_rec, columns=columns)\n outfile = pd.ExcelWriter(fn)\n odata.to_excel(outfile, 'meta')\n outfile.save()\n\nif __name__ == \"__main__\":\n try:\n #Week 7:2018.10.16\n '''\n txt2table('data/한국학.txt', ['date', 'url', 'title', 'btext'], 'data/kdb', 'kstudies')\n txt2table('data/한글날.txt', ['date', 'url', 'title', 'btext'], 'data/kdb', 'HangulDay')\n table2txt('data/kdb.db', 'HangulDay', ['title', 'btext'], 'mydata.txt')\n '''\n #Week 9\n #Task1: Make xlxs file from kstudies table in kdb.db\n '''xlsx2table('data/kdb.db', 'students', 'students.xlsx')\n table2xlsx('data/kdb.db', 'students', 'data/students_db.xlsx')\n '''\n #week 14\n table2txt('data/periodicals.db', 'article_body_ma_013', ['body'], 'ma_013.txt')\n except Exception as e:\n print(e)\n","repo_name":"eunkyoung-jo/BDA_KS","sub_path":"util_sqldata.py","file_name":"util_sqldata.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36599775749","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torchvision.transforms as transforms\r\n\r\n\r\n# 工具类\r\nclass Other(object):\r\n # 展示图像的函数\r\n def imshow(self, img):\r\n self.img1 = img / 2 + 0.5 # unnormalize\r\n self.npimg = self.img1.numpy()\r\n\r\n # plt.imshow()函数负责对图像进行处理,并显示其格式\r\n plt.imshow(np.transpose(self.npimg, (1, 2, 0)))\r\n # plt.show()则是将plt.imshow()处理后的函数显示出来\r\n plt.show()\r\n\r\n # 将tensor格式的图片可视化\r\n def tensorToimg(self, tensor):\r\n self.unloader = transforms.ToPILImage()\r\n self.image = tensor.cpu().clone() # we clone the tensor to not do changes on it\r\n self.image = self.image.squeeze(0) # remove the fake batch dimension\r\n self.image = self.unloader(self.image)\r\n plt.imshow(self.image)\r\n plt.show()\r\n\r\n # 该标量值张量内的单个正确预测数\r\n def get_num_correct(preds, labels):\r\n return preds.argmax(dim=1).eq(labels).sum().item()\r\n","repo_name":"orange51-CL/FCN","sub_path":"other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8376540538","text":"import numpy as np\r\n\r\ndef count_results(log_dir):\r\n fdr, tpr, fpr, shd, nnz, cor_e = [],[],[],[],[],[]\r\n with open(log_dir, 'r') as f:\r\n contents = f.readlines()\r\n for line in contents:\r\n if 'after pruning' in line:\r\n info = line.rstrip().split(': ')[-1].split(', ')\r\n fdr_val = float(info[0][4:])\r\n fdr.append(fdr_val)\r\n tpr.append(float(info[1][4:]))\r\n fpr.append(float(info[2][4:]))\r\n shd.append(int(info[3][4:]))\r\n nnz_val = int(info[4][4:])\r\n nnz.append(nnz_val)\r\n cor_e.append(round((1-fdr_val)*nnz_val))\r\n return fdr, tpr, fpr, shd, nnz, cor_e\r\n\r\ndef best_result(log_dir, exp):\r\n fdr, tpr, fpr, shd, nnz, cor_e = count_results(log_dir)\r\n if exp >= 4:\r\n entry = np.stack([shd, cor_e, nnz])\r\n min_shd_ind = np.where(entry[0] == np.amin(entry[0]))[0]\r\n filter_entry = entry[:, min_shd_ind]\r\n max_cor_ind = np.argmax(filter_entry[1])\r\n shd, cor_e, nnz = filter_entry[:, max_cor_ind]\r\n return shd, cor_e, nnz\r\n \r\nres = best_result('training.log', 5)","repo_name":"szftandy/TRC-SDGAT","sub_path":"utils/result_util.py","file_name":"result_util.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12087287878","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom mainapp import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about_us/', views.about_us, name='about_us'),\n path('contact_us/', views.contact_us, name='contact_us'),\n path('service/', views.service, name='service'),\n path('contact_us/send_mail/', views.send_mail, name='send_mail'),\n]\n","repo_name":"bhyeanhasan/My-Freelancing-Orders","sub_path":"Min Tec Trix Service (Django)/mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28593865003","text":"from collections import Counter, OrderedDict\nimport numpy as np\n\n\nclass OrderedCounter(Counter, OrderedDict):\n \"\"\"Counter that remembers the order elements are first seen\"\"\"\n\n def __repr__(self):\n return '%s(%r)' % (self.__class__.__name__,\n OrderedDict(self))\n\n def __reduce__(self):\n return self.__class__, (OrderedDict(self),)\n\n\nclass Vocabulary:\n \"\"\"A vocabulary, assigns IDs to tokens\"\"\"\n\n def __init__(self):\n self.freqs = OrderedCounter()\n self.w2i = {}\n self.i2w = []\n\n def count_token(self, t):\n self.freqs[t] += 1\n\n def add_token(self, t):\n self.w2i[t] = len(self.w2i)\n self.i2w.append(t)\n\n def build(self, min_freq=0):\n self.add_token(\"\")\n self.add_token(\"\")\n\n tok_freq = list(self.freqs.items())\n tok_freq.sort(key=lambda x: x[1], reverse=True)\n for tok, freq in tok_freq:\n if freq >= min_freq:\n self.add_token(tok)\n\n\ndef load_glove(glove_path, vocab, glove_dim=300):\n \"\"\"\n Load Glove embeddings and update vocab.\n :param glove_path:\n :param vocab:\n :param glove_dim:\n :return:\n \"\"\"\n vectors = []\n w2i = {}\n i2w = []\n\n # Random embedding vector for unknown words\n vectors.append(np.random.uniform(\n -0.05, 0.05, glove_dim).astype(np.float32))\n w2i[\"\"] = 0\n i2w.append(\"\")\n\n # Zero vector for padding\n vectors.append(np.zeros(glove_dim).astype(np.float32))\n w2i[\"\"] = 1\n i2w.append(\"\")\n\n with open(glove_path, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n w2i[word] = len(vectors)\n i2w.append(word)\n vectors.append(np.array(vec.split(), dtype=np.float32))\n\n # fix brackets\n w2i[u'-LRB-'] = w2i.pop(u'(')\n w2i[u'-RRB-'] = w2i.pop(u')')\n\n i2w[w2i[u'-LRB-']] = u'-LRB-'\n i2w[w2i[u'-RRB-']] = u'-RRB-'\n\n vocab.w2i = w2i\n vocab.i2w = i2w\n\n return np.stack(vectors)\n","repo_name":"Skvidvardin/project_s","sub_path":"vocab.py","file_name":"vocab.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31190443926","text":"import datetime\nimport subprocess\nimport sys\nfrom typing import TextIO\n\nCSV_DEFAULT_DELIM = '//'\nDEFAULT_INDENTATION = 3\n\n###################################################################################\n##\n## VIATURA E CATÁLOGO\n##\n###################################################################################\n\nclass Viatura:\n def __init__(self, matricula: str, marca: str, modelo: str, data: datetime):\n\n #TODO IF'S\n\n\n self.matricula = matricula\n self.marca = marca\n self.modelo = modelo\n self.data = data\n\n @classmethod\n def from_csv(cls, linha: str, delim = CSV_DEFAULT_DELIM) -> 'Viatura':\n attrs = linha.split(delim)\n return cls(\n matricula = attrs[0],\n marca = attrs[1],\n modelo = attrs[2],\n data = datetime.datetime.strptime(attrs[3], '%Y-%m-%d').date()\n )\n\n def __str__(self):\n return f\"{self.matricula} {self.marca} {self.modelo} {self.data}\"\n\n\n def __repr__(self) -> str:\n cls_name = self.__class__.__name__\n return f'{cls_name}(matricula={self.matricula}, marca={self.marca}, modelo={self.modelo}, data={self.data})'\n\n\nclass InvalidProdAttribute(ValueError):\n pass\n\n\nclass CatalogoViaturas:\n def __init__(self):\n self._viatura = {}\n\n\n def append(self, viatura: Viatura):\n if viatura.matricula in self._viatura:\n raise DuplicateValue(f'Já existe veículo com matrícula {viatura.matricula} no catálogo')\n self._viatura[viatura.matricula] = viatura\n\n def _dump(self):\n for viatura in self._viatura.values():\n print(viatura)\n\n def obtem_por_matricula(self, matricula: str) -> Viatura | None:\n return self._viatura.get(matricula)\n\n def pesquisa(self, criterio) -> 'CatalogoViaturas':\n encontrados = CatalogoViaturas()\n for viatura in self._viatura.values():\n if criterio(viatura):\n encontrados.append(viatura)\n return encontrados\n\n def __str__(self):\n class_name = self.__class__.__name__\n return f'{class_name}[#veículos = {len(self._viatura)}]'\n\n def __iter__(self):\n for viatura in self._viatura.values():\n yield viatura\n\n def __len__(self):\n return len(self._viatura)\n\n\nclass DuplicateValue(Exception):\n pass\n\n\n###################################################################################\n##\n## LEITURA DE FICHEIROS\n##\n###################################################################################\n\ndef le_viatura(caminho_fich: str, delim = CSV_DEFAULT_DELIM) -> CatalogoViaturas:\n viaturas = CatalogoViaturas()\n # ler ficheiro e popular catalogo com cada viatura\n # uma linha do ficheiro corresponde a um viatura\n with open(caminho_fich, 'rt') as fich:\n for linha in linhas_relevantes(fich):\n viaturas.append(Viatura.from_csv(linha, delim))\n return viaturas\n\ndef linhas_relevantes(fich: TextIO):\n for linha in fich:\n linha = linha.strip()\n if len(linha) == 0 or linha[0] == '#':\n continue\n yield linha\n\n\n##################################################################################\n##\n## MENU, OPÇÕES E INTERACÇÃO COM UTILIZADOR\n##\n##################################################################################\n\n\ndef exibe_msg(*args, ident = DEFAULT_INDENTATION, **kargs):\n print(' ' * (ident - 1), *args, **kargs)\n\n\ndef entrada(msg: str, ident = DEFAULT_INDENTATION):\n return input(f\"{' ' * DEFAULT_INDENTATION}{msg}\")\n\n\ndef cls():\n if sys.platform == 'win32':\n subprocess.run(['cls'], shell=True, check=True)\n elif sys.platform in ('darwin', 'linux', 'bsd', 'unix'):\n subprocess.run(['clear'], check=True)\n\n\ndef pause(msg: str=\"Pressione ENTER para continuar...\", ident = DEFAULT_INDENTATION):\n input(f\"{' ' * ident}{msg}\")\n\n\nviaturas : CatalogoViaturas | None = None\n\n\ndef exec_menu():\n\n while True:\n cls()\n exibe_msg(\"*******************************************\")\n exibe_msg(\"* 1 - Listar Viaturas *\")\n exibe_msg(\"* 2 - Pesquisar Viaturas *\")\n exibe_msg(\"* 3 - Adicionar Viatura *\")\n exibe_msg(\"* 4 - Remover Viatura *\")\n exibe_msg(\"* 5 - Atualizar Catálogo *\")\n exibe_msg(\"* 6 - Recarregar Catálogo *\")\n exibe_msg(\"* *\")\n exibe_msg(\"* T - Terminar programa *\")\n exibe_msg(\"*******************************************\")\n\n print()\n opcao = entrada(\"OPCAO >> \").strip().upper()\n\n if opcao in ('1', 'UM'):\n exec_listar()\n elif opcao in ('T', 'TERMINAR'):\n break\n exec_terminar()\n else:\n exibe_msg(f\"Opção {opcao} inválida!\")\n pause()\n\n#TODO all the menu options\n\ndef exec_listar():\n cabecalho = f'{\"Matrícula\":^16}|{\"Marca\":^20}|{\"Modelo\":^20}|{\"Data\":^16}'\n separador = f'{\"-\" * 16}+{\"-\" * 20}+{\"-\" * 20}+{\"-\" * 16}'\n print()\n exibe_msg(cabecalho)\n exibe_msg(separador)\n for viatura in viaturas:\n linha = f'{viatura.matricula:^16}|{viatura.marca:^20}|{viatura.modelo:^20}|{viatura.data.strftime(\"%Y-%m-%d\"):^16}'\n exibe_msg(linha)\n\n exibe_msg(separador)\n print()\n pause()\n\n\ndef exec_terminar():\n sys.exit(0)\n\n\n\ndef main() -> None:\n global viaturas\n viaturas = le_viatura('viaturas.csv')\n exec_menu()\n\n\nif __name__ == '__main__':\n main()","repo_name":"diamantinoM/ServerLabs","sub_path":"python/gestao_viaturas.py","file_name":"gestao_viaturas.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34569691991","text":"from tkinter import *\r\nimport xlrd\r\nimport xlwt\r\nimport os\r\nclass excelT:\r\n\tdef __init__(self):\r\n\t\twindow = Tk(className = \"处理数据\")\r\n\t\twindow.geometry('190x90+150+150')\r\n\t\tlable = Label(window,text = \"Filename\")\r\n\r\n\r\n\t\tself.msg = StringVar()\r\n\t\tself.msg1 = StringVar()\r\n\t\tentryName = Entry(window,textvariable= self.msg)\r\n\t\tentryName1 = Entry(window, textvariable=self.msg1)\r\n\r\n\t\treadButton = Button(window, text=\"Read\", fg=\"red\", command=self.read_excel)\r\n\t\twriteButton = Button(window, text=\"Write\", fg=\"red\", command=self.save_excel)\r\n\r\n\r\n\t\tlable.grid(row=1, column=1)\r\n\t\tentryName.grid(row=2, column=1)\r\n\t\tentryName1.grid(row = 3,column=1)\r\n\t\treadButton.grid(row=2, column=2)\r\n\t\twriteButton.grid(row = 3,column = 2)\r\n\r\n\t\twindow.mainloop()\r\n\r\n\r\n\tdef clickMe(self):\r\n\t\tprint(\"Clicke Me \",self.msg.get())\r\n\t\tself.read_excel()\r\n\r\n\tdef read_excel(self):\r\n\t\tprint(\"Come\", self.msg.get())\r\n\t\texcelName = self.msg.get() + \".xlsx\"\r\n\t\t# 打开文件\r\n\t\tfile_Name = 'F:\\data' + '\\\\' + excelName\r\n\t\tworkbook = xlrd.open_workbook(file_Name)\r\n\t\tsheet2 = workbook.sheet_by_index(0)\r\n\t\tcols = sheet2.col_values(0) # 获取第一列内容\r\n\t\tprint(\"/****************Wright Excel*******************/\")\r\n\t\tm = 0\r\n\t\tself.listMax = []\r\n\t\tfor i in range(21, len(cols)):\r\n\t\t\tif int(cols[i]) == m:\r\n\t\t\t\tm = m + 1\r\n\t\t\t\tlist0 = sheet2.row_values(i)\r\n\t\t\t\tself.listMax.append(list0)\r\n\t\t\t\tprint(self.listMax)\r\n\t\t\t\tprint(\"/****************INT*******************/\")\r\n\r\n\r\n\tdef save_excel(self):\r\n\t\tprint(os.getcwd())\r\n\t\tos.chdir('f:\\\\data\\\\NewData')\r\n\t\tprint(os.getcwd())\r\n\t\tprint(\"Come\",self.msg1.get(),self.listMax)\r\n\t\texcelName = self.msg1.get() + \".xls\"\r\n\t\tf = xlwt.Workbook()\r\n\t\tsheet2 = f.add_sheet('sheet1', cell_overwrite_ok=True)\r\n\t\trow0 = ['时间s', '力kN', '变形mm', '位移mm', '扩展', '应力MPa', '应变%']\r\n\r\n\t\tfor i in range(0, len(row0)):\r\n\t\t\tsheet2.write(0, i, row0[i])\r\n\r\n\t\tfor j in range(0, len(self.listMax)):\r\n\t\t\tdata1 = self.listMax[j]\r\n\t\t\tfor i in range(0, len(row0)):\r\n\t\t\t\tsheet2.write(j + 1, i, data1[i])\r\n\t\tf.save(excelName) # 保存文件\r\n\r\n\r\nexcelT()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"JingChuanHe/PythonTools","sub_path":"Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8799725900","text":"import numpy as np\n\nfrom scipy.optimize import fsolve\n\ndef f(x):\n x0 = float(x[0])\n x1 = float(x[1])\n x2 = float(x[2])\n return [10*x0+2*x1-4*x2-16,-12*x0-5*x1+x2+6,x0+x1+x2-5]\n\ndef J(x):\n x0 = float(x[0])\n x1 = float(x[1])\n x2 = float(x[2])\n return [[10,2,-4],[-12,-5,-1],[1,1,1]]\n\nresult = fsolve(f,[1,1,1],fprime=J)\n\nprint(result)\nprint(f(result))\n","repo_name":"xuming0629/xm-study","sub_path":"xm-study/matlab-test/pthhon-math/J_fsolve.py","file_name":"J_fsolve.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12127159185","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom math import log\nfrom math import sqrt\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\n# 读取数据\n# 第三问\ndata=pd.read_excel('Chapter2.xlsx')\ndata=data.values\nX0=[]\nY=[]\nfor i in range(len(data)):\n X0.append(log(data[i][1]))\n Y.append(log(data[i][0]))\nX=sm.add_constant(X0)\nmodel=sm.OLS(Y,X)\nresults=model.fit()\n# 回归系数标准差\nprint('回归模型拟合结果:')\nprint(results.summary())\n\n\nplt.scatter(X0,Y,s=5)\nX0=np.array(X0)\nY2=1.0308*X0-2.2571\nplt.plot(X0,Y2,c='r')\nplt.title('Fit Result')\n# plt.title('Data Profile')\nplt.show()","repo_name":"Wang-Fulin-SDUWH/Jiliang","sub_path":"Chapter2_3.py","file_name":"Chapter2_3.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36377988008","text":"# https://www.acmicpc.net/problem/11048\n# 1188 ms\n\nimport sys\nN, M = map(int, input().split())\n\nroomInfo = []\nfor _ in range(N):\n row = list(map(int, input().split()))\n roomInfo.append(row)\n\nDP = [[0 for _ in range(M)] for _ in range(N)]\n\nfor r in range(N):\n for c in range(M):\n left = DP[r][c-1] if c - 1 >= 0 else 0\n upper = DP[r-1][c] if r - 1 >= 0 else 0\n diagonal = DP[r-1][c-1] if c - 1 >= 0 and r - 1 >= 0 else 0\n DP[r][c] = max(left, upper, diagonal) + roomInfo[r][c]\n\nprint(DP[N-1][M-1])\n\n# 596 ms\ninput = sys.stdin.readline\n\n# 620 ms\n\n\ndef solution():\n N, M = map(int, input().split())\n roomInfo = [[0 for _ in range(M+1)]]\n for _ in range(N):\n row = list(map(int, input().split()))\n roomInfo.append([0] + row)\n\n DP = [[0 for _ in range(M+1)] for _ in range(N + 1)]\n\n for r in range(1, N + 1):\n for c in range(1, M + 1):\n DP[r][c] = max(DP[r][c-1], DP[r-1][c],\n DP[r-1][c-1]) + roomInfo[r][c]\n\n return DP[N][M]\n\n\nprint(solution())\n","repo_name":"studying-ice-bear/pparkkkimeom","sub_path":"KimChaeJung/dynamicProgramming/11048_이동하기.py","file_name":"11048_이동하기.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72821467074","text":"## Crear un programa que me pida edad de una persona, \n# si la edad es mayor a 18 que muestre un menssaje 'Eres mayor de edad'\n#caso contrario que muestre un mensaje 'Eres menor de edad'.\n\n\n# nombre=input(' Ingrese su nombre: ')\n# edad = int(input(\"Ingresa tu edad: \"))\n# if edad>=18:\n# print( nombre + ' ' + 'Eres mayor de edad')\n# else:\n# print(nombre + ' ' +'Eres menor de edad')\n \n\n# Una tienda comercial desea hacer un descuento del 20%, crear un programa que me determine si el cliente se hace acreedor\n#del descuento teniendo en cuenta los siiguiente: si el cliente realiza una compra de ygual o mayor a s/. 1000 soles mostrar un mensaje que diga \n# 'GANASTE EL DESCUENTO DE20%, AHORA PAGARAS ' en caso la compra o supere los S/.1000 soles entonses \n#mostrar un mensaje que diga'NO APLICAS AL DESCUENTO = 1000:\n# descuento = MONT_COMPRA * 0.2\n# total_pagar = MONT_COMPRA - descuento\n# print(f\"¡GANASTE EL DESCUENTO DE 20%! Ahora pagarás {total_pagar} soles.\")\n# else:\n# print(f\"NO APLICAS AL DESCUENTO. El monto de la compra es de {MONT_COMPRA} soles.\")\n\n# crear un programa que me pida 5 veces un nombre y por cada ve que lo pida muestre la cantidad de \n# veces que ingreso el nombre \n\n# for n in range(1,6):\n# nombre=input(\"ingrese un nombre: \")\n# print(f\"ingresaste {n} veces el nombre\")\n\n# crear un programa que pida un numero y lo evalue con el numero premiado si el numero\n# ingresado es el premiado el programa finalizara si el numero ingresado es incorrecto \n# el programa seguira pidiendo el numero premiado \n\n# numero_ganador=10\n# condicion=True\n# while condicion:\n# numero_ingresado=int(input(\"ingrese un numero: \"))\n# if numero_ingresado==numero_ganador:\n# print(\"ganste\")\n# condicion=False\n# else:\n# (\"sigue intentando\")\n\n\n# lista=[12,3,4,34,2]\n# mi_print(min(lista))\n\n# def mi_min(lista):\n# numero_menor=lista[0]\n# for numero < numeroi_menor:\n# numero_menor=numero\n# return numero_menor\n# print(mi_min(lista))\n\n##crear una funcion por cada operador aritmetico que resiva \n##dos parametros y retorne el resultado de la operacion. crearse\n##una funcion que nos permita imprimir el resultado \n# a=int(input(\" SAIRE INGRESE EL NUMERO: \"))\n# b=int(input(\" SAIRE INGRESE EL NUMERO: \"))\n# def suma(a,b):\n# total=a+b\n# return total\n# print(suma(a,b))\n\n\n# def resta(a,b):\n# total=a-b\n# return total\n# print(resta(a,b))\n\n# def multiplicar(a,b):\n# total=a*b\n# return total\n# print(multiplicar(a,b))\n\n# def dividir(a,b):\n# total=a/b\n# return total\n# print(dividir(a,b))\n\n##escribe una funcion que reciba un numero entero positivo\n##y devuelva su factorial\ndef factorial(numero):\n if numero == 0:\n return 1\n else:\n return numero * factorial(numero - 1)\n\nnumero = int(input(\"Ingresa un número entero positivo: \"))\nresultado = factorial(numero)\nprint(\"El factorial de\", numero, \"es\", resultado)\n## escribir una funcion que resiva como parametros una \n##una lista de numeros y retorne una nueva lista con cada \n##numero elevados al cuadrado.\ndef elevar_al_cuadrado(lista):\n nueva_lista = []\n for numero in lista:\n nueva_lista.append(numero ** 2)\n return nueva_lista\n##Escribir un programa que reciba una cadena de caracteres\n##y devuelva un objeto con cada palabra que contiene y su \n##frecuencia\ndef contar_palabras(cadena):\n palabras = cadena.split()\n frecuencia = {}\n for palabra in palabras:\n if palabra in frecuencia:\n frecuencia[palabra] += 1\n else:\n frecuencia[palabra] = 1\n return frecuencia\n\ncadena = input(\"Ingresa una cadena de caracteres: \")\nfrecuencia_palabras = contar_palabras(cadena)\nprint(frecuencia_palabras)","repo_name":"lopeznando/CLASES_PYTHON","sub_path":"REPASO_PYTHON/EJERCICIOS.py","file_name":"EJERCICIOS.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23829361151","text":"from datetime import datetime\n\n\nclass Post:\n def __init__(self, owner, title, contents, owner_id, image=None, post_id=0):\n self.id = post_id\n self.owner = str(owner)\n self.title = str(title)\n self.contents = str(contents)\n self.created_at = datetime.now().strftime(\"%B %d %Y %H:%M:%S\")\n self.modified_at = ''\n self.owner_id = owner_id\n self.image = image\n","repo_name":"TeodorescuIonut/Blog-Web-App-using-Python","sub_path":"models/dtos/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21150965871","text":"import numpy as np\nimport torch\nimport getpass\nfrom torch import autocast\nfrom diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline\nfrom PIL import Image\nfrom random import randint\nfrom accelerate import Accelerator\nimport os\nimport argparse\n\n\nclass Text_To_Image :\n \n def __init__(self, token, prompt, seed):\n self.token = token\n self.prompt = prompt\n self.seed = seed\n pass\n \n # Text To Image\n def sd_texttoimg_pipeline(self, token):\n device = \"cuda\"\n accelerator = Accelerator()\n device = accelerator.device\n\n model_id = \"runwayml/stable-diffusion-v1-5\"\n pipe = StableDiffusionPipeline.from_pretrained(\n model_id,\n revision = 'fp16', \n torch_dtype = torch.float16,\n use_auth_token=token\n ).to(device)\n\n return pipe\n \n def sd_texttoimg_function(self, pipe, prompt, seed):\n device = \"cuda\"\n\n if seed == \"\":\n seed_no = randint(1, 999999999)\n else:\n seed_no = int(seed)\n\n generator = torch.Generator(device=device).manual_seed(seed_no)\n with autocast(device):\n image = pipe(prompt=prompt, generator=generator)['images'][0]\n\n print(\"prompt : \", prompt)\n print(\"seed : \", seed_no)\n \n output_path = os.getcwd() + \"/TexttoImage\"\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n \n image.save(output_path + f\"/T2I_{prompt}_{seed_no}.png\", \"png\")\n return image\n\nclass Image_To_Image :\n \n def __init__(self, token, file_name, prompt, strength, seed):\n self.token = token\n self.file_name = file_name\n self.prompt = prompt \n self.strength = strength\n self.seed = seed\n \n \n # Text To Image\n def sd_imgtoimg_pipeline(self, token):\n device = \"cuda\"\n accelerator = Accelerator()\n device = accelerator.device\n \n model_id = \"runwayml/stable-diffusion-v1-5\"\n pipe = StableDiffusionImg2ImgPipeline.from_pretrained(\n model_id,\n revision=\"fp16\", \n torch_dtype=torch.float16,\n use_auth_token=token\n ).to(device)\n \n return pipe\n \n def sd_imgtoimg_function(self, pipe, prompt, file_name, strength, seed):\n image = Image.open(file_name).convert(\"RGB\").resize((512,512), resample=Image.LANCZOS)\n\n device = \"cuda\"\n\n if seed == \"\" or seed == None:\n seed_no = randint(1, 999999999)\n else:\n seed_no = int(seed)\n\n generator = torch.Generator(device=device).manual_seed(seed_no)\n with autocast(device):\n image = pipe(prompt=prompt, image=image, strength=strength, guidance_scale=7.5, generator=generator).images[0]\n \n print(\"kr_prompt : \", prompt) \n print(\"seed : \", seed_no)\n \n output_path = os.getcwd() + \"/ImagetoImage\"\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n \n image.save(output_path + f\"/I2I_{prompt}_{seed_no}.png\", \"png\")\n return image\n \ndef image_to_image(token, prompt, file_name, strength, seed):\n \n diffusion = Image_To_Image(token, file_name, prompt, strength, seed)\n \n try:\n image = diffusion.sd_imgtoimg_function(pipe_i2i, prompt, file_name, strength, seed)\n except:\n pipe_i2i = diffusion.sd_imgtoimg_pipeline(token)\n image = diffusion.sd_imgtoimg_function(pipe_i2i, prompt, file_name, strength, seed)\n \n return image\n\n\nclass Image_Extend:\n \n def __init__(self, token, prompt, file_name, a, b, seed):\n self.token = token\n self.prompt = prompt\n self.file_name = file_name\n self.a = a\n self.b = b\n self.seed = seed\n \n def sd_extend_pipeline(self, token):\n device = \"cuda\"\n accelerator = Accelerator()\n device = accelerator.device\n model_id = \"runwayml/stable-diffusion-inpainting\"\n\n pipe = StableDiffusionInpaintPipeline.from_pretrained(\n model_id,\n revision=\"fp16\", \n torch_dtype=torch.float16,\n use_auth_token=token\n ).to(device)\n \n return pipe\n \n def sd_extend_crop_mask(self, file_name, a, b):\n main_img = Image.open(file_name).convert(\"RGBA\")\n\n main_width, main_height = main_img.size\n\n extend_width = main_width + (512 * 2)\n extend_height = main_height + (512 * 2)\n extend_square_w = np.full((extend_height, extend_width, 4), (255, 255, 255, 0), dtype=np.uint8)\n\n main_array = np.array(main_img)\n for width in range(0, main_width):\n for height in range(0, main_height):\n extend_square_w[height+512][width+512] = main_array[height][width]\n\n extend_main_img = Image.fromarray(extend_square_w)\n\n # crop extend_main_img\n extend_crop = extend_main_img.crop((a,b,a+512,b+512))\n extend_crop\n\n # a, b value 검증\n crop_array = np.array(extend_crop)\n zero_count = crop_array[:,:,3].reshape(-1).tolist().count(0)\n if zero_count == 0:\n print(\"a,b 값 다시 설정 필요.\")\n return\n\n # 5. crop_array와 투명도를 이용하여 마스크 생성\n mask_array = crop_array.copy()\n for i in range(512):\n for j in range(512):\n if mask_array[i][j][3] == 255:\n mask_array[i][j] = [0,0,0,255]\n else:\n mask_array[i][j] = [255,255,255,255]\n mask = Image.fromarray(mask_array)\n\n return extend_main_img, extend_crop, mask\n \n def sd_extend_result_img(self, pipe, prompt, extend_img, image, mask_image, a, b, seed):\n num_samples = 1\n if seed == \"\" or seed == None:\n seed_no = randint(0,9999999999)\n else:\n seed_no = int(seed)\n \n device = \"cuda\"\n accelerator = Accelerator()\n device = accelerator.device\n generator = torch.Generator(device=device).manual_seed(seed_no) # change the seed to get different results\n\n images = pipe(\n prompt=prompt,\n image=image,\n mask_image=mask_image,\n guidance_scale=7.5,\n generator=generator,\n num_images_per_prompt=num_samples,\n ).images[0]\n\n extend_img_array = np.array(extend_img)\n images_array = np.array(images.convert(\"RGBA\"))\n for i in range(512):\n for j in range(512):\n extend_img_array[b+i][a+j] = images_array[i][j]\n\n for_crop_h, for_crop_w = extend_img_array.shape[:2]\n\n w_list, h_list = [], []\n\n for h in range(for_crop_h):\n for w in range(for_crop_w):\n pixel = extend_img_array[h][w][3]\n if pixel == 255:\n w_list.append(w)\n h_list.append(h)\n\n result_img = Image.fromarray(extend_img_array)\n final_crop = result_img.crop((min(w_list),min(h_list),max(w_list),max(h_list)))\n \n print(\"prompt : \", prompt) \n print(\"seed : \", seed_no)\n \n output_path = os.getcwd() + \"/ImageExtend\"\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n \n final_crop.save(output_path + f\"/IEx_{prompt}_{seed_no}.png\", \"png\")\n return final_crop\n \n def sd_extend_function(self, pipe, file_name, prompt, a, b, seed = \"\"):\n \n extend_img, image, mask_image = self.sd_extend_crop_mask(file_name, a, b)\n \n\n final_result = self.sd_extend_result_img(pipe, prompt, extend_img, image, mask_image, a, b, seed)\n \n\n return final_result\n\n\nclass FineTuning:\n \n def __init__(self, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, INSTANCE_DIR):\n self.UNet_Training_Steps = UNet_Training_Steps, \n self.UNet_Learning_Rate = UNet_Learning_Rate,\n self.Text_Encoder_Training_Steps = Text_Encoder_Training_Steps,\n self.Text_Encoder_Learning_Rate = Text_Encoder_Learning_Rate, \n self.Session_Name = Session_Name\n self.WORKSPACE='/content/Fast-Dreambooth'\n self.OUTPUT_DIR=\"/content/models/\"+ Session_Name\n self.SESSION_DIR=self.WORKSPACE+'/Sessions/'+ Session_Name\n self.INSTANCE_DIR=INSTANCE_DIR\n self.MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n self.PT=\"\"\n pass\n \n # Line Logging\n def line_logging(self, *messages):\n import datetime\n import sys\n today = datetime.datetime.today()\n log_time = today.strftime('[%Y/%m/%d %H:%M:%S]')\n log = []\n for message in messages:\n log.append(str(message))\n print(log_time + '::' + ' '.join(log) + '')\n sys.stdout.flush()\n \n \n # Environment Setting\n def sd_custom_environment(self):\n import os\n import subprocess\n import shutil\n import glob\n from distutils.dir_util import copy_tree\n import time\n self.line_logging(\"Start Env. Setting\")\n os.chdir('/content/')\n subprocess.run(['pip', 'install', '-q', '--no-deps', 'accelerate==0.12.0'])\n subprocess.call (['wget', '-q', '-i', '/', \"https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dependencies/dbdeps.txt\"])\n\n f = open('/content/dbdeps.txt')\n lines = f.readlines()\n for i in range(len(lines)):\n subprocess.call (['wget', '-q', '/', f\"https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dependencies/deps.{i+1}\"])\n for i in range(len(lines)):\n try:\n shutil.move(f\"deps.{i+1}\", f\"deps.zip.00{i+1}\")\n except:\n pass\n\n cmd = ['7z', 'x', 'deps.zip.001']\n sp = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n time.sleep(20)\n\n file_source = '/content/usr/local/lib/python3.8/dist-packages'\n file_destination = '/usr/local/lib/python3.8/dist-packages'\n copy_tree(file_source, file_destination)\n time.sleep(20)\n \n shutil.rmtree('/content/usr')\n\n file_list = []\n file_list.extend(glob.glob(\"*.00*\"))\n file_list.extend(glob.glob(\"*.txt\"))\n for file_name in file_list:\n os.remove(file_name)\n subprocess.run([\"git\", \"clone\", \"https://github.com/TheLastBen/diffusers\", '--depth=1', '--branch=updt'])\n self.line_logging('Done, proceed')\n \n # create Session\n def sd_custom_create_session(self, MODEL_NAME, SESSION_DIR, INSTANCE_DIR):\n import os\n if os.path.exists(str(SESSION_DIR)):\n self.line_logging('Loading session with no previous model, using the original model or the custom downloaded model')\n if MODEL_NAME==\"\":\n self.line_logging('No model found, use the \"Model Download\" cell to download a model.')\n else:\n self.line_logging('Session Loaded, proceed to uploading instance images')\n\n\n elif not os.path.exists(str(SESSION_DIR)):\n # %mkdir -p \"$INSTANCE_DIR\"\n os.makedirs(SESSION_DIR)\n self.line_logging('Creating session...')\n if MODEL_NAME==\"\":\n self.line_logging('No model found, use the \"Model Download\" cell to download a model.')\n else:\n self.line_logging('Session created, proceed to uploading instance images')\n \n # upload_image_replace\n # 이미지, 캡션에 들어간 띄어쓰기를 \"-\" 로 바꿔주는 함수\n def sd_custom_upload_image_replace(self, directory):\n import shutil\n import glob\n\n inst_list = glob.glob(directory+\"/*\")\n for i in inst_list:\n old_name = i.split(\"/\")[-1]\n new_name = old_name.replace(\" \", \"-\")\n shutil.move(directory +\"/\"+old_name, directory + \"/\" + new_name)\n \n # Upload Image\n def sd_custom_upload_image(self, SESSION_DIR, INSTANCE_DIR, IMAGE_DIR):\n import shutil\n import os\n from glob import glob\n from tqdm import tqdm\n \n self.line_logging(\"Start : Upload Image...\")\n\n if not os.path.exists(str(INSTANCE_DIR)):\n os.makedirs(INSTANCE_DIR)\n\n if os.path.exists(INSTANCE_DIR+\"/.ipynb_checkpoints\"):\n shutil.rmtree(str(INSTANCE_DIR) + \"/.ipynb_checkpoints\")\n \n\n # up=\"\" \n # uploaded = files.upload()\n \n # # 캡션과 이미지 파일 분리\n # for filename in uploaded.keys():\n # if filename.split(\".\")[-1]==\"txt\":\n # shutil.move(filename, CAPTIONS_DIR)\n # up=[filename for filename in uploaded.keys() if filename.split(\".\")[-1]!=\"txt\"]\n \n # 이미지 파일들 INST_DIR로 이동, bar_Format은 막대기 모양인듯\n \n\n \n #d 이미지, 캡션 파일 이름의 빈칸을 \"-\"로 바꿔줌\n for directory in [INSTANCE_DIR]:\n self.sd_custom_upload_image_replace(directory) \n \n # 파일 압축 \n os.chdir(SESSION_DIR)\n if os.path.exists(\"instance_images.zip\"):\n os.remove(\"instance_images.zip\")\n \n # if os.path.exists(\"captions.zip\"):\n # os.remove(\"captions.zip\")\n \n shutil.make_archive('instance_images', 'zip', './instance_images')\n # shutil.make_archive('captions', 'zip', './captions')\n \n self.line_logging(\"Done : Upload Image...\")\n \n # Model Download (Ver 1.5)\n def sd_custom_model_download(self,):\n self.line_logging(\"Start : Model Download...\")\n \n import shutil\n import os\n\n if os.path.exists('/content/stable-diffusion-v1-5'):\n shutil.rmtree('/content/stable-diffusion-v1-5')\n\n os.chdir('/content')\n os.mkdir('/content/stable-diffusion-v1-5')\n os.chdir('/content/stable-diffusion-v1-5')\n os.system('git init')\n os.system('git lfs install --system --skip-repo')\n os.system('''git remote add -f origin \"https://huggingface.co/runwayml/stable-diffusion-v1-5\"''')\n os.system(\"git config core.sparsecheckout true\")\n os.system('''echo -e \"\\nscheduler\\ntext_encoder\\ntokenizer\\nunet\\nfeature_extractor\\nsafety_checker\\nmodel_index.json\\n!*.safetensors\" > .git/info/sparse-checkout''')\n os.system(\"git pull origin main\")\n if os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n os.system('''git clone \"https://huggingface.co/stabilityai/sd-vae-ft-mse\"''')\n os.system('''mv /content/stable-diffusion-v1-5/sd-vae-ft-mse /content/stable-diffusion-v1-5/vae''')\n os.system(\"rm -r /content/stable-diffusion-v1-5/.git\")\n os.chdir(\"/content/stable-diffusion-v1-5\")\n os.system('''sed -i 's@\"clip_sample\": false@@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json''')\n os.system('''sed -i 's@\"trained_betas\": null,@\"trained_betas\": null@g' /content/stable-diffusion-v1-5/scheduler/scheduler_config.json''')\n os.system('''sed -i 's@\"sample_size\": 256,@\"sample_size\": 512,@g' /content/stable-diffusion-v1-5/vae/config.json''')\n os.chdir(\"/content\")\n\n self.line_logging('DONE : Model Download...')\n else:\n while not os.path.exists('/content/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin'):\n self.line_logging('Model Download : Something went wrong')\n \n # TextEnc, UNet Training\n def sd_custom_training(self, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, OUTPUT_DIR, Session_Name, PT): \n self.line_logging(\"Start : Fine Tuning\")\n import random\n import os\n import shutil\n\n MODELT_NAME = MODEL_NAME\n \n # UNet\n UNet_Training_Steps=UNet_Training_Steps \n UNet_Learning_Rate = UNet_Learning_Rate\n untlr=UNet_Learning_Rate\n\n # Text_Encoder\n Enable_text_encoder_training= True\n Text_Encoder_Training_Steps=Text_Encoder_Training_Steps\n Text_Encoder_Learning_Rate = Text_Encoder_Learning_Rate #param [\"2e-6\", \"1e-6\",\"8e-7\",\"6e-7\",\"5e-7\",\"4e-7\"] {type:\"raw\"}\n stptxt=Text_Encoder_Training_Steps\n txlr=Text_Encoder_Learning_Rate\n\n # Seed\n Seed=\"\"\n if Seed =='' or Seed=='0':\n Seed=random.randint(1, 999999)\n else:\n Seed=int(Seed)\n \n trnonltxt=\"\"\n extrnlcptn=\"\"\n Style=\"\"\n Res = 512\n\n prec=\"fp16\"\n precision=prec\n GC=\"--gradient_checkpointing\"\n\n stp=0\n Start_saving_from_the_step=0\n stpsv=Start_saving_from_the_step\n\n\n dump_only_textenc = f\"\"\"accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n {trnonltxt} \\\n --image_captions_filename \\\n --train_text_encoder \\\n --dump_only_text_encoder \\\n --pretrained_model_name_or_path=\"{MODELT_NAME}\" \\\n --instance_data_dir=\"{INSTANCE_DIR}\" \\\n --output_dir=\"{OUTPUT_DIR}\" \\\n --instance_prompt=\"{PT}\" \\\n --seed={Seed} \\\n --resolution=512 \\\n --mixed_precision={precision} \\\n --train_batch_size=1 \\\n --gradient_accumulation_steps=1 {GC} \\\n --use_8bit_adam \\\n --learning_rate={txlr} \\\n --lr_scheduler=\"polynomial\" \\\n --lr_warmup_steps=0 \\\n --max_train_steps={stptxt}\n \"\"\"\n\n train_only_unet = f\"\"\"accelerate launch /content/diffusers/examples/dreambooth/train_dreambooth.py \\\n {Style} \\\n {extrnlcptn} \\\n --stop_text_encoder_training={stptxt} \\\n --image_captions_filename \\\n --train_only_unet \\\n --save_starting_step={stpsv} \\\n --save_n_steps={stp} \\\n --Session_dir=\"{SESSION_DIR}\" \\\n --pretrained_model_name_or_path=\"{MODELT_NAME}\" \\\n --instance_data_dir=\"{INSTANCE_DIR}\" \\\n --output_dir=\"{OUTPUT_DIR}\" \\\n --captions_dir=\"\" \\\n --instance_prompt={PT} \\\n --seed={Seed} \\\n --resolution={Res} \\\n --mixed_precision={precision} \\\n --train_batch_size=1 \\\n --gradient_accumulation_steps=1 {GC} \\\n --use_8bit_adam \\\n --learning_rate={untlr} \\\n --lr_scheduler=\"polynomial\" \\\n --lr_warmup_steps=0 \\\n --max_train_steps={UNet_Training_Steps}\n \"\"\"\n os.chdir('/content')\n # Text Encoder Training\n if Enable_text_encoder_training :\n self.line_logging('Training the text encoder...')\n if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):\n shutil.rmtree(OUTPUT_DIR+'/'+'text_encoder_trained')\n os.system(dump_only_textenc)\n\n # UNet Training\n if UNet_Training_Steps!=0:\n self.line_logging('Training the UNet...')\n os.system(train_only_unet)\n\n # Copy feature_extractor, safety_checker, model_index.json 슈틸 3형제\n try:\n shutil.copytree(\"/content/stable-diffusion-v1-5/feature_extractor\", OUTPUT_DIR + \"/feature_extractor\")\n except:\n print(f\"File exists: '/content/models/{Session_Name}/feature_extractor'\")\n try: \n shutil.copytree(\"/content/stable-diffusion-v1-5/safety_checker\", OUTPUT_DIR + \"/safety_checker\")\n except:\n print(f\"File exists: '/content/models/{Session_Name}/safety_checker'\")\n\n shutil.copyfile('/content/stable-diffusion-v1-5/model_index.json', OUTPUT_DIR + \"/model_index.json\")\n self.line_logging(\"Done : FineTuning...\")\n # Total Function\n \n def sd_custom_function(self, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, IMAGE_DIR):\n import os\n WORKSPACE='/content/gdrive/MyDrive/Fast-Dreambooth'\n OUTPUT_DIR=\"/content/models/\"+ Session_Name\n SESSION_DIR=WORKSPACE+'/Sessions/'+ Session_Name\n INSTANCE_DIR=SESSION_DIR+'/instance_images'\n MODEL_NAME=\"/content/stable-diffusion-v1-5\"\n PT=\"\"\n ### 1. Environment Setting\n # try:\n # import wget\n # except:\n # self.sd_custom_environment()\n\n ### 2. Create Session\n self.sd_custom_create_session(MODEL_NAME, SESSION_DIR, INSTANCE_DIR)\n\n ### 3. Image Upload\n self.sd_custom_upload_image(SESSION_DIR, INSTANCE_DIR, IMAGE_DIR)\n\n ### 4. Model Download (진행중)\n if not os.path.exists('/content/stable-diffusion-v1-5'):\n self.sd_custom_model_download()\n else:\n print(\"The v1.5 model already exists, using this model.\") \n\n ### 5. Training\n self.sd_custom_training(UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, OUTPUT_DIR, Session_Name, PT)\n\n\ndef text_to_image(token, prompt, seed):\n\n diffusion = Text_To_Image(token, prompt, seed)\n \n try:\n image = diffusion.sd_texttoimg_function(pipe_t2i, prompt, seed)\n except:\n pipe_t2i = diffusion.sd_texttoimg_pipeline(token)\n image = diffusion.sd_texttoimg_function(pipe_t2i, prompt, seed)\n \n\n \n return image\n\n\ndef image_to_image(token, prompt, file_name, strength, seed):\n \n diffusion = Image_To_Image(token, file_name, prompt, strength, seed)\n \n try:\n image = diffusion.sd_imgtoimg_function(pipe_i2i, prompt, file_name, strength, seed)\n except:\n pipe_i2i = diffusion.sd_imgtoimg_pipeline(token)\n image = diffusion.sd_imgtoimg_function(pipe_i2i, prompt, file_name, strength, seed)\n \n return image\n\n\ndef image_extend(token, file_name, prompt, a, b, seed):\n a = int(a)\n b = int(b)\n \n diffusion = Image_Extend(token, file_name, prompt, a, b, seed)\n \n try:\n image = diffusion.sd_extend_function(pipe_ie, file_name, prompt, a, b, seed)\n except:\n pipe_ie = diffusion.sd_extend_pipeline(token)\n image = diffusion.sd_extend_function(pipe_ie, file_name, prompt, a, b, seed)\n \n return image\n\ndef fine_tuning_env(UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, IMAGE_DIR):\n \n diffusion = FineTuning(UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, IMAGE_DIR)\n \n diffusion.sd_custom_environment()\n \ndef fine_tuning(UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, IMAGE_DIR):\n\n diffusion = FineTuning(UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, IMAGE_DIR)\n \n diffusion.sd_custom_function(UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Learning_Rate, Session_Name, IMAGE_DIR)\n \n \ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--module\",\n required=True,\n type=str\n )\n parser.add_argument(\n \"--token\",\n type=str\n )\n parser.add_argument(\n \"--prompt\",\n type=str\n )\n parser.add_argument(\n \"--file_name\",\n type=str\n )\n parser.add_argument(\n \"--seed\",\n default = None,\n type=str\n )\n parser.add_argument(\n \"--strength\",\n type=str,\n default=\"0.6\"\n )\n parser.add_argument(\n \"--output_name\",\n type=str\n )\n parser.add_argument(\n \"--a\",\n type=str\n )\n parser.add_argument(\n \"--b\",\n type=str\n )\n parser.add_argument(\n \"--UNet_Training_Steps\",\n type=str\n )\n parser.add_argument(\n \"--UNet_Learning_Rate\",\n type=str\n )\n parser.add_argument(\n \"--Text_Encoder_Training_Steps\",\n type=str\n )\n parser.add_argument(\n \"--Text_Encoder_Learning_Rate\",\n type=str\n )\n parser.add_argument(\n \"--Session_Name\",\n type=str\n )\n parser.add_argument(\n \"--INSTANCE_DIR\",\n type=str\n )\n \n args = parser.parse_args()\n \n return args\n\n\ndef main():\n args = parse_args()\n \n # Text to Image\n if args.module == \"texttoimage\":\n image = text_to_image(args.token, args.prompt, args.seed)\n return image\n \n elif args.module == \"imagetoimage\":\n image = image_to_image(args.token, args.prompt, args.file_name, float(args.strength), args.seed)\n return image\n \n elif args.module == \"imageextend\":\n image = image_extend(args.token, args.file_name, args.prompt, args.a, args.b, args.seed)\n return image\n elif args.module == \"finetuning_env\":\n fine_tuning_env(args.UNet_Training_Steps, args.UNet_Learning_Rate, args.Text_Encoder_Training_Steps, args.Text_Encoder_Learning_Rate, args.Session_Name, args.INSTANCE_DIR)\n \n elif args.module == \"finetuning\":\n fine_tuning(args.UNet_Training_Steps, args.UNet_Learning_Rate, args.Text_Encoder_Training_Steps, args.Text_Encoder_Learning_Rate, args.Session_Name, args.INSTANCE_DIR)\n \n else:\n print(\"argument module must be 'texttoimage', 'imagetoimage', 'imageextend', 'finetuning'.\")\n \nif __name__ == \"__main__\":\n main()","repo_name":"WAI-bijetk/WAI_StableDiffusion","sub_path":"code/stable_diffusion_total.py","file_name":"stable_diffusion_total.py","file_ext":"py","file_size_in_byte":25848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25376853585","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport re\nfrom datetime import datetime\n\n#-----------------------------------------------------------------------------\n# read in an SME file from JHU/APL \n#-----------------------------------------------------------------------------\n\ndef read_ae(filein):\n\n fpin = open(filein, 'r')\n\n # Read header:\n for line in fpin:\n\n m = re.match(r'',line)\n if m:\n break\n\n times = []\n ae = []\n al = []\n au = []\n \n # Read main data\n for line in fpin:\n cols = line.split()\n\n times.append(datetime(int(cols[0]),\n int(cols[1]),\n int(cols[2]),\n int(cols[3]),\n int(cols[4]),\n int(cols[5]),\n 0))\n ae.append(float(cols[6]))\n al.append(float(cols[7]))\n au.append(float(cols[8]))\n\n data = {'time' : times,\n 'ae' : ae,\n 'al' : al,\n 'au' : au}\n \n return data\n \n","repo_name":"aaronjridley/GITM","sub_path":"srcPython/read_ae.py","file_name":"read_ae.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"14076760769","text":"stock_list = []\n\nprint(stock_list)\n\nnumber_added = int(input(\"How many item you want to add? : \"))\n\nfor add in range(number_added):\n name_added = str(input(\"Item name : \"))\n stock_list.append(name_added)\n\nprint(\"Item in stock is\")\nfor show in stock_list:\n print(stock_list)\n\n\nusername_added = str(input(\"Please type your username? : \" \"\\n\"))\nprint(username_added)","repo_name":"BulkyGorilla/python","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11312113335","text":"import hashlib, binascii\r\n\r\nkey = 'bgvyzdsv'\r\nc = 0\r\nwhile True:\r\n m = hashlib.md5()\r\n m.update((key+str(c)).encode('utf-8'))\r\n if (binascii.hexlify(m.digest())[:6] == b'000000'):\r\n print(c)\r\n break\r\n c += 1","repo_name":"maxbergmark/old-work","sub_path":"Egna projekt/AdventOfCode/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43292215986","text":"# Run simulation (Figure 4 and Figure 5)\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TKAgg')\nimport matplotlib.pyplot as plt\n\nimport scipy\nimport scipy.stats\nimport time\n\nCOLORS = ['C0', 'C1']\nfontsize = 25\nlegendsize = 20\nticksize = 17.5\nlinewidth = 2.5\nmarkersize = 10\nmarkeredgewidth = 4\naxissize = 17.5\nMARKERS = ['o','s']\nLINESTYLES = ['solid', (0, (1, 1))]\nLABELS = ['Least squares', 'Induced ranking']\n\n# ============\n# Estimator\n# ============\n# Least-squares estimator implemented by the insertion algorithm\ndef estimate_order_insertion(scores):\n\tn = len(scores)\n\torder = np.array([])\n\n\tfor i in range(0, n): # 1 = second item (0-idxed)\n\t\tthresholds = np.arange(1, i+2) / (i+2)\n\t\tpos = np.argmin(np.abs(scores[i] - thresholds)) # for simplicity, take the smaller position in tie-breaking\n\t\torder = np.insert(order, pos, i)\n\n\treturn np.argsort(order)\n\n# Compute noiseless scores according to the parametric model\n# Returns: \n# \tSCORES: n-array\ndef compute_scores(xs):\n\n\t# Compute the noiseless score of the last item\n\tdef compute_scores_last(xs):\n\t\tn = len(xs)\n\t\tx = xs[-1]\n\t\trank = np.sum(x > xs[:-1]) + np.sum(x == xs[:-1]) *0.5 +1 # 1-indexed rank of last element\n\t\treturn int(rank) / (n+1)\n\n\tn = len(xs)\n\tscores = np.zeros(n)\n\tfor i in range(n):\n\t\tscores[i] = compute_scores_last(xs[:i+1])\n\treturn scores\n\n# ============\n# Dist\n# ============\ndef dist_entrywise(perm, perm_gt):\n\treturn np.abs(scipy.stats.rankdata(perm) - scipy.stats.rankdata(perm_gt))\n\n# Spearmans' footrule (normalized)\ndef dist_footrule(perm, perm_gt):\n\tn = len(perm)\n\treturn np.mean(dist_entrywise(perm, perm_gt)) / n\n\n# ============\n# Simulation\n# ============\n# Plot the Spearman's footrule and maximal entrywise error (Figure 4)\ndef simulate_err():\n\t## Vary n\n\tdelta = 0.1 # noise level\n\tns = np.array([20, 50, 100, 250, 500])\n\tN = len(ns)\n\trepeat = 1000\n\n\terrs_insert_sf = np.zeros((N, repeat))\n\terrs_induce_sf = np.zeros((N, repeat))\n\n\terrs_insert_max_expect = np.zeros((N, 2)) # (mean, std)\n\terrs_induce_max_expect = np.zeros((N, 2)) # (mean, std)\n\n\tidxs_insert = np.zeros(N)\n\tidxs_induce = np.zeros(N)\n\n\ttic = time.time()\n\tprint('===Vary n===')\n\tfor i in range(N):\n\t\tprint('n: %d/%d (%.1f sec)' % (i+1, N, time.time() - tic))\n\t\tn = ns[i]\n\n\t\terrs_insert = np.zeros((n, repeat)) # entrywise\n\t\terrs_induce = np.zeros((n, repeat))\n\n\t\tfor r in range(repeat):\n\t\t\tperm = np.random.permutation(n)\n\t\t\tscores = compute_scores(perm)\n\t\t\tnoise_unit = np.random.uniform(low=-1, high=1, size=n)\n\n\t\t\tys = scores + noise_unit * delta\n\t\t\test_insert = estimate_order_insertion(ys)\n\t\t\test_induce = scipy.stats.rankdata(ys)\n\n\t\t\terrs_insert_sf[i, r] = dist_footrule(perm, est_insert)\n\t\t\terrs_induce_sf[i, r] = dist_footrule(perm, est_induce)\n\t\t\terrs_insert[:, r] = dist_entrywise(perm, est_insert)\n\t\t\terrs_induce[:, r] = dist_entrywise(perm, est_induce)\n\n\t\t# aggregate max error\n\t\tmeans_insert, stds_insert = np.mean(errs_insert, axis=1), np.std(errs_insert, axis=1)\n\t\tidx_insert = np.argmax(means_insert)\n\t\terrs_insert_max_expect[i, :] = means_insert[idx_insert] / n, stds_insert[idx_insert] / n\n\n\t\tmeans_induce, stds_induce = np.mean(errs_induce, axis=1), np.std(errs_induce, axis=1)\n\t\tidx_induce = np.argmax(means_induce)\n\t\terrs_induce_max_expect[i, :] = means_induce[idx_induce] / n, stds_induce[idx_induce] / n\n\n\t\tidxs_insert[i] = idx_insert\n\t\tidxs_induce[i] = idx_induce\n\n\n\t## Plot\n\t# Spearman's footrule\n\tfig, ax = plt.subplots()\n\tax.errorbar(ns, np.mean(errs_insert_sf, axis=1), yerr=np.std(errs_insert_sf, axis=1) / np.sqrt(repeat), label=LABELS[0],\n\t\t\t\tmarker=MARKERS[0], linestyle=LINESTYLES[0], color=COLORS[0],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.errorbar(ns, np.mean(errs_induce_sf, axis=1), yerr=np.std(errs_induce_sf, axis=1) / np.sqrt(repeat), label=LABELS[1],\n\t\t\t\tmarker=MARKERS[1], linestyle=LINESTYLES[1], color=COLORS[1],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.set_xlabel('Number of item (%s)' % r'$n$', fontsize=axissize)\n\tax.set_ylabel(\"Spearman's footrule distance\", fontsize=axissize)\n\n\tax.tick_params(axis='x', labelsize=ticksize)\n\tax.tick_params(axis='y', labelsize=ticksize)\n\tax.set_xticks([20, 50, 100, 250, 500])\n\tax.set_xticklabels([r'$20$', r'$50$', r'$100$', r'$250$', r'$500$'])\n\n\tax.set_ylim([0, None])\n\tax.legend(fontsize=legendsize)\n\tfig.tight_layout()\n\n\t# Maximal entrywise error\n\tfig, ax = plt.subplots()\n\tax.errorbar(ns, errs_insert_max_expect[:, 0], yerr=errs_insert_max_expect[:, 1] / np.sqrt(repeat), label='insert',\n\t\t\t\tmarker=MARKERS[0], linestyle=LINESTYLES[0], color=COLORS[0], \n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.errorbar(ns, errs_induce_max_expect[:, 0], yerr=errs_induce_max_expect[:, 1] / np.sqrt(repeat), label='induce',\n\t\t\t\tmarker=MARKERS[1], linestyle=LINESTYLES[1], color=COLORS[1],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.set_xlabel('Number of item (%s)' % r'$n$', fontsize=axissize)\n\tax.set_ylabel('Maximum entry-wise error', fontsize=axissize)\n\n\tax.tick_params(axis='x', labelsize=ticksize)\n\tax.tick_params(axis='y', labelsize=ticksize)\n\tax.set_xticks([20, 50, 100, 250, 500])\n\tax.set_xticklabels([r'$20$', r'$50$', r'$100$', r'$250$', r'$500$'])\n\n\tax.set_ylim([0, None])\n\tfig.tight_layout()\n\n\tplt.show()\n\n\t######\n\t## Vary delta\n\tdeltas = [0.02, 0.05, 0.1, 0.2]\n\tD = len(deltas)\n\tn = 100\n\trepeat = 1000\n\n\terrs_insert_sf = np.zeros((D, repeat))\n\terrs_induce_sf = np.zeros((D, repeat))\n\terrs_insert_entrywise = np.zeros((D, n, repeat))\n\terrs_induce_entrywise = np.zeros((D, n, repeat))\n\n\tidxs_insert = np.zeros(D)\n\tidxs_induce = np.zeros(D)\n\n\tprint('===Vary delta===')\n\tfor idd in range(D):\n\t\tprint('delta: %d/%d (%.1f sec)' % (idd+1, D, time.time() - tic))\n\t\tdelta = deltas[idd]\n\n\t\tfor r in range(repeat):\n\t\t\tperm = np.random.permutation(n)\n\t\t\tscores = compute_scores(perm)\n\t\t\tnoise_unit = np.random.uniform(low=-1, high=1, size=n)\n\n\t\t\tys = scores + noise_unit * delta\n\t\t\test_insert = estimate_order_insertion(ys)\n\t\t\test_induce = scipy.stats.rankdata(ys)\n\n\t\t\terrs_insert_sf[idd, r] = dist_footrule(perm, est_insert)\n\t\t\terrs_induce_sf[idd, r] = dist_footrule(perm, est_induce)\n\t\t\terrs_insert_entrywise[idd, :, r] = dist_entrywise(perm, est_insert)\n\t\t\terrs_induce_entrywise[idd, :, r] = dist_entrywise(perm, est_induce)\n\n\t## Plot\n\t# Spearman's footrule\n\tfig, ax = plt.subplots()\n\tax.errorbar(deltas, np.mean(errs_insert_sf, axis=1), yerr=np.std(errs_insert_sf, axis=1) / np.sqrt(repeat), label='insert',\n\t\t\t\tmarker=MARKERS[0], linestyle=LINESTYLES[0], color=COLORS[0],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.errorbar(deltas, np.mean(errs_induce_sf, axis=1), yerr=np.std(errs_induce_sf, axis=1) / np.sqrt(repeat), label='induce',\n\t\t\t\tmarker=MARKERS[1], linestyle=LINESTYLES[1], color=COLORS[1],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.set_xlabel('Noise level (%s)' % r'$\\delta$', fontsize=axissize)\n\tax.set_ylabel(\"Spearman's footrule distance\", fontsize=axissize)\n\n\tax.tick_params(axis='x', labelsize=ticksize)\n\tax.tick_params(axis='y', labelsize=ticksize)\n\tax.set_xticks([0.02, 0.05, 0.1, 0.2])\n\tax.set_xticklabels([r'$0.02$', r'$0.05$', r'$0.1$', r'$0.2$'])\n\n\tax.set_ylim([0, None])\n\tfig.tight_layout()\n\n\t# Maximal entrywise error\n\tfig, ax = plt.subplots()\n\tmeans_insert = np.mean(errs_insert_entrywise, axis=2) # D x n\n\tmeans_induce = np.mean(errs_induce_entrywise, axis=2)\n\n\terrs_insert = np.zeros((D, repeat))\n\terrs_induce = np.zeros((D, repeat))\n\tfor idd in range(D):\n\t\tidx_insert = np.argmax(means_insert[idd, :])\n\t\tidx_induce = np.argmax(means_induce[idd, :])\n\n\t\tidxs_insert[idd] = idx_insert\n\t\tidxs_induce[idd] = idx_induce\n\n\t\terrs_insert[idd, :] = errs_insert_entrywise[idd, idx_insert, :] / n\n\t\terrs_induce[idd, :] = errs_induce_entrywise[idd, idx_induce, :] / n\n\n\tax.errorbar(deltas, np.mean(errs_insert, axis=1), yerr=np.std(errs_insert, axis=1) / np.sqrt(repeat), label=LABELS[0],\n\t\t\t\tmarker=MARKERS[0], linestyle=LINESTYLES[0], color=COLORS[0],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\tax.errorbar(deltas, np.mean(errs_induce, axis=1), yerr=np.std(errs_induce, axis=1) / np.sqrt(repeat), label=LABELS[1],\n\t\t\t\tmarker=MARKERS[1], linestyle=LINESTYLES[1], color=COLORS[1],\n\t\t\t\tmarkersize=markersize, markeredgewidth=markeredgewidth, linewidth=linewidth)\n\n\t(handles, labels) = ax.get_legend_handles_labels()\n\tax.set_xlabel('Noise level (%s)' % r'$\\delta$', fontsize=axissize)\n\tax.set_ylabel('Maximum entry-wise error', fontsize=axissize)\n\n\tax.tick_params(axis='x', labelsize=ticksize)\n\tax.tick_params(axis='y', labelsize=ticksize)\n\tax.set_xticks([0.02, 0.05, 0.1, 0.2])\n\tax.set_xticklabels([r'$0.02$', r'$0.05$', r'$0.1$', r'$0.2$'])\n\n\tax.set_ylim([0, None])\n\tfig.tight_layout()\n\n\tplt.show()\n\n# Plot error at each individual position (Figure 5)\ndef simulate_err_per_item():\n\tdelta = 0.1\n\tn = 100\n\trepeat = 1000\n\n\terrs_induce = np.zeros((n, repeat))\n\terrs_insert = np.zeros((n, repeat))\n\n\ttic = time.time()\n\tfor r in range(repeat):\n\t\tperm = np.random.permutation(n)\n\t\tscores = compute_scores(perm)\n\n\t\tnoise_unit = np.random.uniform(low=-1, high=1, size=n)\n\t\tys = scores + noise_unit * delta\n\n\t\test_insert = estimate_order_insertion(ys)\n\t\test_induce = scipy.stats.rankdata(ys)\n\n\t\terrs_insert[:, r] = np.abs(est_insert - perm) / n\n\t\terrs_induce[:, r] = np.abs(est_induce - perm) / n\n\n\t# Plot error per item\n\tfig, ax = plt.subplots()\n\tax.errorbar(np.arange(n)+1, np.mean(errs_insert, axis=1),\n\t\t\t\tlinestyle=LINESTYLES[0], linewidth=linewidth,\n\t\t\t\tcolor=COLORS[0], label=LABELS[0])\n\tax.errorbar(np.arange(n)+1, np.mean(errs_induce, axis=1),\n\t\t\t\tlinestyle=LINESTYLES[1], color=COLORS[1], linewidth=linewidth,\n\t\t\t\tlabel=LABELS[1])\n\n\tax.set_xlabel('Position (%s)' % r'$t$', fontsize=axissize)\n\tax.set_ylabel('Entry-wise error', fontsize=axissize)\n\n\tax.tick_params(axis='x', labelsize=ticksize)\n\tax.tick_params(axis='y', labelsize=ticksize)\n\n\tax.set_xlim(0, n+1)\n\tax.set_ylim([0, None])\n\tax.legend(fontsize=legendsize)\n\tfig.tight_layout()\n\n\tprint('Position w/ maximal error (1-idxed): %d' % (np.argmax(np.mean(errs_insert, axis=1))+1))\n\n\tplt.show()\n\n\nif __name__ == '__main__':\n\tnp.random.seed(0)\n\tsimulate_err()\n\tsimulate_err_per_item()\n","repo_name":"jingyanw/sequential-bias","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":10295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25381198062","text":"#!/usr/bin/env python3\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom pprint import pprint\nimport time\nimport signal\n\nimport rospy\nfrom duckietown.dtros import DTROS, NodeType, TopicType\nfrom duckietown_msgs.msg import LanePose, Twist2DStamped, Pose2DStamped # type: ignore\n\nclass DataCollectorNode(DTROS):\n def __init__(self, node_name):\n super(DataCollectorNode, self).__init__(node_name=node_name, node_type=NodeType.PERCEPTION)\n self.namespace = rospy.get_namespace()\n self.rate = rospy.Rate(2.5)\n self.lane_pose_sub = rospy.Subscriber(str(self.namespace + \"lane_filter_node/lane_pose\"), LanePose, self.lane_pose_cb)\n self.pose_sub = rospy.Subscriber(str(self.namespace + \"velocity_to_pose_node/pose\"), Pose2DStamped, self.pose_cb)\n #self.wheels_cmd_sub = rospy.Subscriber(str(self.namespace + \"wheels_driver_node/wheels_cmd_executed\"), WheelsCmdStamped, self.wheels_cmd_cb)\n self.rl_agent_pub = rospy.Publisher(str(self.namespace + \"joy_mapper_node/car_cmd\"), Twist2DStamped, queue_size=1)\n # register the interrupt signal handler\n signal.signal(signal.SIGINT, self.shutdown)\n self.lane_pose = [0, 0]\n self.pose = [0, 0, 0]\n self.vel_cmd = [0, 0]\n self.lane_d_buffer = [0, 0, 0, 0, 0]\n self.lane_phi_buffer = [0, 0, 0, 0, 0]\n self.is_shutdown = False\n self.dt = 0.2\n\n def lane_pose_cb(self, msg):\n actual_dist = np.round(msg.d, 2)\n actual_angle = np.round(msg.phi, 2)\n self.lane_d_buffer.append(actual_dist)\n self.lane_phi_buffer.append(actual_angle)\n self.lane_d_buffer.pop(0)\n self.lane_phi_buffer.pop(0)\n actual_dist = np.mean(self.lane_d_buffer)\n self.lane_pose = actual_dist, actual_angle\n\n def pose_cb(self, msg):\n pose_x, pose_y, pose_theta = msg.x, msg.y, msg.theta\n self.pose = [pose_x, pose_y, pose_theta]\n\n \"\"\"def wheels_cmd_cb(self, msg):\n self.last_wheels_cmd = [msg.vel_left, msg.vel_right]\"\"\"\n\n def assemble_data(self):\n data = [self.lane_pose[0], self.lane_pose[1], self.vel_cmd[0], self.vel_cmd[1]]\n return data\n \n def update_dt(self, dt):\n self.dt = dt\n\n def shutdown(self, signal, frame):\n # wheels_cmd_msg = WheelsCmdStamped(vel_left=0, vel_right=0)\n twist_msg = Twist2DStamped(v=0, omega=0)\n self.rl_agent_pub.publish(twist_msg)\n rospy.logerr(\"[DataCollector] Shutdown complete.\")\n time.sleep(1)\n self.is_shutdown = True\n \nif __name__ == '__main__':\n data_collector_node = DataCollectorNode(node_name='data_collector_node')\n rospy.wait_for_message(str(data_collector_node.namespace + \"lane_filter_node/lane_pose\"), LanePose)\n with open(\"data.txt\", \"w\") as f:\n f.write(\"\")\n rospy.loginfo(\"[Collector] Ready to collect data.\")\n previous_state = data_collector_node.assemble_data()\n # while singal is not interrupted\n while not data_collector_node.is_shutdown:\n # publish random velocities\n time_before = time.time()\n new_v = np.random.uniform(0.15, 0.4)\n new_omega = np.random.uniform(-3, 3)\n velocities = np.array([new_v, new_omega])\n velocities = np.round(velocities, 2)\n twist_msg = Twist2DStamped()\n twist_msg.v = velocities[0]\n twist_msg.omega = velocities[1]\n twist_msg.header.stamp = rospy.get_rostime()\n data_collector_node.vel_cmd = velocities\n data_collector_node.rl_agent_pub.publish(twist_msg)\n print(\"Published: \", velocities)\n data_collector_node.rate.sleep()\n time_after = time.time()\n data_collector_node.update_dt(time_after - time_before)\n # collect data\n current_state = data_collector_node.assemble_data()[:2]\n previous_state.append(data_collector_node.dt)\n print(\"previous state: \", previous_state)\n print(\"current state: \", current_state)\n # save data to file\n print(\"Saving data to file...\")\n with open(\"data.txt\", \"a\") as f:\n f.write(str(previous_state) + \"/\" + str(current_state) + \"\\n\")\n print(\"Data saved.\") \n previous_state = data_collector_node.assemble_data()\n new_rate = np.random.uniform(0.5, 2.5)\n data_collector_node.rate = rospy.Rate(new_rate)\n # stopping for next action\n twist_msg = Twist2DStamped()\n twist_msg.v = 0\n twist_msg.omega = 0\n twist_msg.header.stamp = rospy.get_rostime()\n data_collector_node.rl_agent_pub.publish(twist_msg)","repo_name":"Janst1000/Safe-RL-Duckietown","sub_path":"packages/rl_agent/src/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"36064998306","text":"#Asked by microsoft and facebook \n#sove by time complexity 0(n)\n\n# https://www.hackerearth.com/practice/interviews/\n\narr = [2,1,0,2,0] #list(map(int,input().split(',')))\n# print(sorted(arr))\narr_new = []\nfor i in range(len(arr)):\n if arr[0] >= arr[-1]:\n arr_new.append(arr[0])\n arr.pop(0)\n else:\n arr_new.append(arr[-1])\n arr.pop(-1)\nprint(arr_new)\n#solve using for loop, while loop and use function\n","repo_name":"chethanbr86/Python_programs","sub_path":"hackerearth/sort_it_up.py","file_name":"sort_it_up.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72330796675","text":"import os\nfrom datetime import timedelta\n\nfrom celery import Celery\nfrom django.conf import settings\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')\napp = Celery('core')\n\napp.autodiscover_tasks()\nif settings.DEBUG:\n app.conf.broker_url = \"redis://127.0.0.1:6379\"\n app.conf.result_backend = \"redis://127.0.0.1:6379\"\nelse:\n app.conf.broker_url = \"redis://redis:6379\"\n app.conf.result_backend = \"redis://redis:6379\"\napp.conf.task_serializer = 'json'\napp.conf.result_serializer = 'pickle'\napp.conf.accept_content = ['json', 'pickle']\napp.conf.result_expires = timedelta(days=1)\napp.conf.task_always_eager = False\napp.conf.worker_prefetch_multiplier = 4\napp.conf.beat_schedule = {\n 'fetch-all-data-every-30-minutes': {\n 'task': 'main_module.tasks.fetch_full_data',\n 'schedule': 60 * 60,\n },\n}\napp.conf.timezone = 'UTC'\n","repo_name":"amirdks/web_scraping_project","sub_path":"core/celery_conf.py","file_name":"celery_conf.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72340447874","text":"from os import environ\nfrom threading import Thread\n\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Email, To, Content, Mail\n\nfrom flask import render_template\n\nimport logging\nlogger = logging.getLogger('app.mail')\n\nSENDGRID_API_KEY = environ.get('SENDGRID_API_KEY')\nSOURCE_EMAIL = environ.get('SOURCE_EMAIL')\nTARGET_EMAIL = environ.get('TARGET_EMAIL')\n\ndef _send_via_sendgrid(subject, body):\n sg = SendGridAPIClient()\n from_email = Email(SOURCE_EMAIL)\n to_email = To(TARGET_EMAIL)\n content = Content(\"text/plain\", body)\n email = Mail(from_email, to_email, subject, content)\n response = sg.client.mail.send.post(request_body=email.get())\n logger.info(response.status_code)\n logger.info(response.body)\n logger.info(response.headers)\n\ndef mailSend(subject, body):\n _send_via_sendgrid(subject, body)\n # thr = Thread(target=_send_via_sendgrid, args=[body])\n # thr.start()\n # return thr\n\ndef mailFormat(data):\n return render_template('email/order.txt', data=data);","repo_name":"Fen1kz/outdoors","sub_path":"src/util/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"492529899","text":"PLACEHOLDER = \"[name]\"\n\nwith open(\"./Input/Names/invited_names.txt\", \"r\") as names:\n names_contents = names.readlines()\n print(names_contents)\n\nfor name in names_contents:\n stripped_name = name.strip()\n file_name = f\"letter_for_{stripped_name}.txt\"\n\n with open(\"./Input/Letters/starting_letter.txt\", \"r\") as letter:\n letter_contents = letter.read()\n letter_contents = letter_contents.replace(PLACEHOLDER, str(stripped_name))\n\n with open(f\"./Output/ReadyToSend/{file_name}\", \"w\") as file:\n file.write(letter_contents)\n","repo_name":"PikePullen/mailMerge","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29388972451","text":"# coding: utf-8\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nfrom testinfra.backend import base\n\n\nclass KubectlBackend(base.BaseBackend):\n NAME = \"kubectl\"\n\n def __init__(self, name, *args, **kwargs):\n self.name = name\n self.container = kwargs.get('container')\n self.namespace = kwargs.get('namespace')\n super(KubectlBackend, self).__init__(self.name, *args, **kwargs)\n\n def run(self, command, *args, **kwargs):\n cmd = self.get_command(command, *args)\n # `kubectl exec` does not support specifying the user to run as.\n # See https://github.com/kubernetes/kubernetes/issues/30656\n kcmd = 'kubectl '\n kcmd_args = []\n if self.namespace is not None:\n kcmd += '-n %s '\n kcmd_args.append(self.namespace)\n if self.container is not None:\n kcmd += '-c %s '\n kcmd_args.append(self.container)\n kcmd += 'exec %s -- /bin/sh -c %s'\n kcmd_args.extend([self.name, cmd])\n out = self.run_local(kcmd, *kcmd_args)\n return out\n","repo_name":"amitvashist7/ansible-development-CTS","sub_path":"molecule/my_env/lib/python2.7/site-packages/testinfra/backend/kubectl.py","file_name":"kubectl.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"22025213115","text":"import boto3\n\nTABLE_NAME = \"audio_table\"\n\ndynamodb = boto3.resource(\n \"dynamodb\",\n endpoint_url=\"http://localhost:8000\",\n region_name=\"us-east1\",\n aws_access_key_id=\"dummy_access_key\",\n aws_secret_access_key=\"dummy_secret_key\",\n verify=False,\n)\n\n\nresponse = dynamodb.create_table(\n TableName=TABLE_NAME,\n AttributeDefinitions=[{\"AttributeName\": \"audio_id\", \"AttributeType\": \"S\"}],\n KeySchema=[{\"AttributeName\": \"audio_id\", \"KeyType\": \"HASH\"}],\n ProvisionedThroughput={\"ReadCapacityUnits\": 1, \"WriteCapacityUnits\": 1},\n)\n","repo_name":"Alexoidozaver/AudioProcessing","sub_path":"app/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43121602744","text":"import threading\nimport os.path\nimport time\nfrom tcp import TCP\nfrom udp import UDP\n\nENCODING = 'utf-8'\n\n\nclass TransferManager:\n\n def __init__(self, directory, max_clients, listener_info):\n self.directory = directory\n self.max_clients = max_clients\n self.tcp_server = TCP()\n self.udp_response = UDP(0)\n self.number_of_clients = 0\n self.listener_info = listener_info\n\n def send_file(self, receiver, file_name):\n if os.path.isfile(self.directory + file_name):\n if self.number_of_clients < self.max_clients:\n self.number_of_clients += 1\n\n server_info = self.tcp_server.get_host_tcp_info()\n message = 'server\\n' + server_info[0] + ' ' + str(server_info[1]) + '\\n' + file_name + '\\n' \\\n + self.listener_info[0] + ' ' + str(self.listener_info[1])\n data = message.encode(ENCODING)\n\n sender_thread = threading.Thread(target=self.tcp_server.listen, args=(self.directory + file_name,))\n sender_thread.start()\n self.udp_response.send(data, receiver)\n\n sender_thread.join()\n print(file_name, \"is sent\")\n self.number_of_clients -= 1\n else:\n print(\"Maximum number of clients exceeded!\")\n else:\n print(\"Requested file doesn't exist!\")\n\n def receive_file(self, server_address, file_name):\n TCP.receive_file(self.directory + file_name, server_address)\n print(file_name, \"is received from\", server_address)\n\n def stop(self):\n time.sleep(1)\n self.tcp_server.close_server()\n self.udp_response.close()\n","repo_name":"matinaghaei/Netwolf","sub_path":"transfer_manager.py","file_name":"transfer_manager.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11697186899","text":"# https://www.acmicpc.net/problem/1644\n# 소수의 연속합\n\nn = int(input())\n\nis_prime = [True] * (n + 1)\n\nfor i in range(2, int(n**0.5) + 1):\n if is_prime[i]:\n j = 2\n while i * j <= n:\n is_prime[i * j] = False\n j += 1\n\nprimes = [i for i in range(2, n + 1) if is_prime[i]]\n\ntotal, answer = 0, 0\nstart = 0\nfor end in range(len(primes)):\n total += primes[end]\n\n while total > n and start < len(primes):\n total -= primes[start]\n start += 1\n\n if total == n:\n answer += 1\n total -= primes[start]\n start += 1\n if start >= len(primes) or start > end:\n break\n\nprint(answer)\n","repo_name":"harryjhnam/CodingInterview","sub_path":"BOJ/1644.py","file_name":"1644.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71759616833","text":"#!/usr/bin/env python\nimport roslib\nimport rospy\nimport socket\nimport time\nimport math\nimport numpy\nfrom ackermann_msgs.msg import AckermannDrive\n\ntest_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\ntarget_x = 1\ntarget_y = 0\nmax_angle = 0.5 # the max angle is 0.5\nmax_speed = 1 # the max speed is 1\nk_p_speed = 1.5\nk_p_angle = 0.5\nv0 = 0\nackermann = AckermannDrive()\nackermann.speed = 0.0\nackermann.steering_angle = 0.0\nackermann_cmd_pub = rospy.Publisher('/tianracer/ackermann_cmd', AckermannDrive, queue_size=5)\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect((\"127.0.0.1\", 6666))\n\ndef get_goal_position():\n \"\"\"\n docstring\n \"\"\"\n msgs = s.recv(1000).replace('(', '').replace(')', '').replace(' ', '').split(',') \n x = float(msgs[4])\n y = float(msgs[2])\n return x, y\n\ndef get_v0(history_distance, T):\n \"\"\"\n docstring\n \"\"\"\n if -1 in history_distance:\n return 0\n else:\n v_first_car = (history_distance[-1] - history_distance[0]) * T + ackermann.speed\n return v_first_car\n\ndef is_zero(test_number, max_number, tolerance):\n if abs(test_number)/max_number < tolerance:\n return True\n else:\n return False\n\ndef get_command(x, y, v0):\n \"\"\"\n position control\n \"\"\"\n dx = x - target_x\n dy = y - target_y\n if dx > 0:\n d_theda = -y / x\n else:\n\t d_theda = y / x\n if d_theda > max_angle:\n angle = max_angle\n elif d_theda < -max_angle:\n angle = -max_angle\n else:\n angle = d_theda * k_p_angle\n # the max angle is 0.5\n d_speed = dx * k_p_speed + v0\n #d_speed = (math.sqrt(x*x+y*y)-1)*k_p_speed + v0\n if d_speed > max_speed:\n speed = max_speed\n elif d_speed < -max_speed:\n speed = -max_speed\n else:\n speed = d_speed\n\n if (not is_zero(angle, max_angle, 0.1)) and is_zero(speed, max_speed, 0.05):\n speed = -0.5\n # the max speed is 1\n return speed, angle\n\nif __name__ == \"__main__\":\n rospy.init_node('ros_talker')\n cnt = 0\n T = 20\n r = rospy.Rate(T)\n f = open('/home/tianbot/tianbot_ws/src/tianracer/tianracer_test/scripts/test.txt','a')\n f.write('\\n%s' % test_time)\n #history_distance = [-1,-1,-1]\n try:\n #f.write('\\n%s' % test_time)\n while not rospy.is_shutdown():\n x1, y1 = get_goal_position()\n # r.sleep()\n x2, y2 = get_goal_position()\n x = (x1 + x2) / 2\n y = (y1 + y2) / 2\n #history_distance.append(x)\n #history_distance.pop(0)\n #v0 = get_v0(history_distance, T)\n speed, angle = get_command(x, y, v0)\n ackermann.speed = speed\n ackermann.steering_angle = angle\n\t #rospy.loginfo(\"x, y, speed, angle: %s, %s, %s, %s\" % (x, y, speed, angle))\n ackermann_cmd_pub.publish(ackermann)\n cnt += 1\n print(cnt,x,y,speed,angle)\n f.write('\\nx, y, speed, angle: %s, %s, %s, %s' % (x, y, speed, angle))\n # r.sleep()\n \n\n except Exception as e:\n print(e)\n\n finally:\n ackermann = AckermannDrive()\n ackermann.speed = 0.0\n ackermann.steering_angle = 0.0\n ackermann_cmd_pub.publish(ackermann)\n s.close()\n f.close()\n","repo_name":"JackChengj/Multi-vehicle-control","sub_path":"motion_test/script/follow_car.py","file_name":"follow_car.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22548843198","text":"#importing the necessary library\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ntrain_data = pd.read_csv('Train.csv')\r\ntest_data = pd.read_csv('Test.csv')\r\n\r\ntrain_data.info()\r\ntest_data.info()\r\n\r\ntrain_data.head()\r\ntest_data.head()\r\n\r\n#creating copy of the dataset\r\ntrain_copy = train_data.copy()\r\ntest_copy = test_data.copy()\r\n\r\ntrain_copy.info()\r\ntrain_copy.head()\r\n\r\n#homogenizing the dataset\r\ntrain_copy['X_12']=train_copy['X_12'].fillna(0).astype('int')\r\ntest_copy['X_12']=test_copy['X_12'].fillna(0).astype('int')\r\n\r\nfrom scipy.stats import mode\r\ntrain_copy['X_12'].fillna(mode(train_copy['X_12']).mode[0], inplace=True)\r\ntest_copy['X_12'].fillna(mode(test_copy['X_12']).mode[0], inplace=True)\r\n\r\n\r\n#checking if still our daataset contains null value\r\nprint(train_copy.isnull().values.sum())\r\nprint(test_copy.isnull().values.sum())\r\n\r\nprint(train_copy['INCIDENT_ID'].value_counts().sum())\r\nprint(test_copy['INCIDENT_ID'].value_counts().sum())\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n#making the categorical variable as category to make processing easy\r\ntrain_copy['INCIDENT_ID']=train_copy['INCIDENT_ID'].astype('category')\r\ntrain_copy.info()\r\ntest_copy['INCIDENT_ID']=test_copy['INCIDENT_ID'].astype('category')\r\ntest_copy.info()\r\n\r\n#Label Encoding\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlnc = LabelEncoder()\r\ntrain_copy['INCIDENT_ID'] = lnc.fit_transform(train_copy['INCIDENT_ID'])\r\ntrain_copy['INCIDENT_ID'].head()\r\ntrain_copy=train_copy.drop(['DATE'], axis=1)\r\ntrain_copy.info()\r\n\r\nX = train_copy.iloc[:, :-1].values\r\ny = train_copy.iloc[:, -1].values\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\r\n#similarly for test dataset\r\ntest_copy['INCIDENT_ID'] = lnc.fit_transform(test_copy['INCIDENT_ID'])\r\ntest_copy['INCIDENT_ID'].head()\r\ntest_copy=test_copy.drop(['DATE'], axis=1)\r\n\r\n#using Decesion Tree algorithm\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ntest_copy.info()\r\nregressor = DecisionTreeRegressor(random_state = 0)\r\nregressor.fit(X, y)\r\n\r\nY_pred = regressor.predict(test_copy)\r\ndt = DecisionTreeClassifier()\r\ndt.fit(X_train, y_train)\r\ny_pred = dt.predict(test_copy)\r\n\r\n\r\nprint(y_pred)\r\n\r\na=test_data['INCIDENT_ID']\r\n\r\ndict = {'INCIDENT_ID': a, 'MULTIPLE_OFFENSE': Y_pred} \r\n \r\ndf = pd.DataFrame(dict) \r\ndf.head()\r\ndf.shape\r\ntype(df)\r\nf = df.to_csv('solfile.csv',columns=['INCIDENT_ID','MULTIPLE_OFFENSE'],index=False)\r\ndataset=pd.read_csv('solfile.csv',index_col=0)\r\ndataset.info\r\n\r\n\r\nt_m = train_data['MULTIPLE_OFFENSE'].iloc[:15903]\r\nfrom sklearn.metrics import confusion_matrix \r\nresult = confusion_matrix(t_m,Y_pred)\r\nprint(result)\r\n\r\n\r\n\r\nfrom sklearn.metrics import accuracy_score \r\nfrom sklearn.metrics import classification_report \r\nrecall=accuracy_score(t_m, Y_pred)\r\nprint(recall)\r\n\r\nclssfi = classification_report(t_m, Y_pred)\r\nprint(clssfi)","repo_name":"shrey0506/Online_Malicious_hacker_attack","sub_path":"final_solution.py","file_name":"final_solution.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71232379073","text":"from django import template\n\nregister = template.Library()\n\n# Use the link below to know more about the custom template tags.\n# https://docs.djangoproject.com/en/4.1/howto/custom-template-tags/\n\n@register.filter\ndef ingredients_are_available(order):\n ingredients = order.order_ingredients.all()\n for ingredient in ingredients:\n if ingredient.quantity==0:\n return False\n return True","repo_name":"BarriBarri20/e-choffee-shop","sub_path":"coffeshop/templatetags/ingredients_are_available.py","file_name":"ingredients_are_available.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25755783253","text":"from flask import Flask, render_template\nfrom scripts.MEM import MEMM\n\napp = Flask(__name__)\n\nclassifier = MEMM()\nclassifier.load_model()\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/query/')\ndef query(sentence):\n sentence = sentence.replace(\"%20\", \" \")\n words = sentence.strip().replace(\".\", \"\").split(\" \")\n pdists = classifier.predict(sentence)\n\n return render_template('query.html', sentence=sentence, words=words, pdists=pdists)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n# DEBUG\n# Chrome - Local webserver access denied\n# Fix: GOTO chrome://net-internals/#sockets and flush socket pools\n# chrome://net-internals/#sockets","repo_name":"K4Lok/nlp-ner-web-application","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8708360114","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 2 13:49:44 2020\r\n\r\n@author: foxbee\r\n\"\"\"\r\n\r\nimport glassdoor_scraper as gs\r\npath = \"C:/Users/foxbee/Documents/ds_salary_proj/chromedriver\"\r\n\r\ndf = gs.get_jobs('data scientist', 1000, False, path, 5)\r\n\r\ndf.to_csv('Salary_Data.csv', index = False)\r\n","repo_name":"SandeepYadav-05/Data_Scientist_job_profile_salary_estimation_of_US","sub_path":"Data_Collection.py","file_name":"Data_Collection.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73834652354","text":"# -*- coding: utf-8 -*-\n\"\"\"\nClass to study households' income distribution, for validation purposes, based on Wealth and Assets Survey data.\n\n@author: Adrian Carro\n\"\"\"\n\nfrom __future__ import division\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef readResults(file_name, _start_time, _end_time):\n \"\"\"Read micro-data from file_name, structured on a separate line per year. In particular, read from start_year until\n end_year, both inclusive\"\"\"\n # Read list of float values, one per household\n data_float = []\n with open(file_name, \"r\") as _f:\n for line in _f:\n if _start_time <= int(line.split(',')[0]) <= _end_time:\n for column in line.split(',')[1:]:\n data_float.append(float(column))\n return data_float\n\n\n# Set control variables and addresses. Note that available variables to print and plot are \"GrossTotalIncome\",\n# \"NetTotalIncome\", \"GrossRentalIncome\", \"NetRentalIncome\", \"GrossNonRentIncome\" and \"NetNonRentIncome\"\nprintResults = False\nplotResults = True\nstart_time = 1000\nend_time = 2000\nmin_log_income_bin_edge = 4.0\nmax_log_income_bin_edge = 12.25\nvariableToPlot = \"GrossNonRentIncome\"\nrootData = r\"\" # ADD HERE PATH TO WAS DATA FOLDER\nrootResults = r\"\" # ADD HERE PATH TO RESULTS FOLDER\n\n# Read Wealth and Assets Survey data for households\nchunk = pd.read_csv(rootData + r\"/was_wave_3_hhold_eul_final.dta\", usecols={\"w3xswgt\", \"DVTotGIRw3\", \"DVTotNIRw3\",\n \"DVGrsRentAmtAnnualw3_aggr\",\n \"DVNetRentAmtAnnualw3_aggr\"})\n\n# List of household variables currently used\n# DVTotGIRw3 Household Gross Annual (regular) income\n# DVTotNIRw3 Household Net Annual (regular) income\n# DVGrsRentAmtAnnualw3_aggr Household Gross Annual income from rent\n# DVNetRentAmtAnnualw3_aggr Household Net Annual income from rent\n\n# Rename columns to be used and add all necessary extra columns\nchunk.rename(columns={\"w3xswgt\": \"Weight\"}, inplace=True)\nchunk.rename(columns={\"DVTotGIRw3\": \"GrossTotalIncome\"}, inplace=True)\nchunk.rename(columns={\"DVTotNIRw3\": \"NetTotalIncome\"}, inplace=True)\nchunk.rename(columns={\"DVGrsRentAmtAnnualw3_aggr\": \"GrossRentalIncome\"}, inplace=True)\nchunk.rename(columns={\"DVNetRentAmtAnnualw3_aggr\": \"NetRentalIncome\"}, inplace=True)\nchunk[\"GrossNonRentIncome\"] = chunk[\"GrossTotalIncome\"] - chunk[\"GrossRentalIncome\"]\nchunk[\"NetNonRentIncome\"] = chunk[\"NetTotalIncome\"] - chunk[\"NetRentalIncome\"]\n\n# Filter down to keep only columns of interest\nchunk = chunk[[\"GrossTotalIncome\", \"NetTotalIncome\", \"GrossRentalIncome\", \"NetRentalIncome\",\n \"GrossNonRentIncome\", \"NetNonRentIncome\", \"Weight\"]]\n\n# Filter out the 1% with highest GrossTotalIncome and the 1% with lowest NetTotalIncome\none_per_cent = int(round(len(chunk.index) / 100))\nchunk_ord_by_net = chunk.sort_values(\"NetTotalIncome\")\nchunk_ord_by_gross = chunk.sort_values(\"GrossTotalIncome\")\nmin_net_total_income = chunk_ord_by_net.iloc[one_per_cent][\"NetTotalIncome\"]\nmax_gross_total_income = chunk_ord_by_gross.iloc[-one_per_cent][\"GrossTotalIncome\"]\nchunk = chunk[chunk[\"NetTotalIncome\"] >= min_net_total_income]\nchunk = chunk[chunk[\"GrossTotalIncome\"] <= max_gross_total_income]\n\n# If printing data to files is required, histogram data and print results\nif printResults:\n number_of_bins = int(max_log_income_bin_edge - min_log_income_bin_edge) * 4 + 2\n income_bin_edges = np.linspace(min_log_income_bin_edge, max_log_income_bin_edge, number_of_bins)\n income_bin_widths = [b - a for a, b in zip(income_bin_edges[:-1], income_bin_edges[1:])]\n for name in [\"GrossTotalIncome\", \"NetTotalIncome\",\n \"GrossRentalIncome\", \"NetRentalIncome\",\n \"GrossNonRentIncome\", \"NetNonRentIncome\"]:\n frequency = np.histogram(np.log(chunk[chunk[name] > 0.0][name].values), bins=income_bin_edges,\n density=True, weights=chunk[chunk[name] > 0.0][\"Weight\"].values)[0]\n with open(name + \"-Weighted.csv\", \"w\") as f:\n f.write(\"# \" + name + \" (lower edge), \" + name + \" (upper edge), Probability\\n\")\n for element, lowerEdge, upperEdge in zip(frequency, income_bin_edges[:-1], income_bin_edges[1:]):\n f.write(\"{}, {}, {}\\n\".format(lowerEdge, upperEdge, element))\n\n# If plotting data and results is required, read model results, histogram data and results and plot them\nif plotResults:\n # Define bin edges and widths\n number_of_bins = int(max_log_income_bin_edge - min_log_income_bin_edge) * 4 + 2\n income_bin_edges = np.logspace(min_log_income_bin_edge, max_log_income_bin_edge, number_of_bins, base=np.e)\n income_bin_widths = [b - a for a, b in zip(income_bin_edges[:-1], income_bin_edges[1:])]\n # Read model results\n results = readResults(rootResults + r\"/test/MonthlyGrossEmploymentIncome-run1.csv\", start_time, end_time)\n # Histogram model results\n model_hist = np.histogram([12.0 * x for x in results if x > 0.0], bins=income_bin_edges, density=False)[0]\n model_hist = model_hist / sum(model_hist)\n # Histogram data from WAS\n WAS_hist = np.histogram(chunk[chunk[variableToPlot] > 0.0][variableToPlot].values, bins=income_bin_edges,\n density=False, weights=chunk[chunk[variableToPlot] > 0.0][\"Weight\"].values)[0]\n WAS_hist = WAS_hist / sum(WAS_hist)\n # Plot both model results and data from WAS\n plt.bar(income_bin_edges[:-1], height=model_hist, width=income_bin_widths, align=\"edge\",\n label=\"Model results\", alpha=0.5, color=\"b\")\n plt.bar(income_bin_edges[:-1], height=WAS_hist, width=income_bin_widths, align=\"edge\",\n label=\"WAS data\", alpha=0.5, color=\"r\")\n # Final plot details\n plt.gca().set_xscale(\"log\")\n plt.xlabel(\"Income\")\n plt.ylabel(\"Frequency (fraction of cases)\")\n plt.legend()\n plt.title(\"Distribution of {}\".format(variableToPlot))\n plt.show()\n","repo_name":"INET-Complexity/housing-model","sub_path":"src/main/resources/validation-code/IncomeDist.py","file_name":"IncomeDist.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"21767992752","text":"import os\nfrom typing import List, TYPE_CHECKING\n\nfrom langchain.chains import RetrievalQA\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.llms.base import LLM\nfrom langchain.embeddings.base import Embeddings\nfrom langchain.embeddings import OpenAIEmbeddings\n\nfrom qna.constants import (\n OPENAI_COMPLETIONS_ENGINE,\n OPENAI_EMBEDDINGS_ENGINE,\n)\n\nif TYPE_CHECKING:\n from langchain.vectorstores.redis import Redis as RedisVDB\n\n\ndef get_llm(max_tokens=100) -> LLM:\n llm = ChatOpenAI(model_name=OPENAI_COMPLETIONS_ENGINE, max_tokens=max_tokens)\n return llm\n\n\ndef get_embeddings() -> Embeddings:\n embeddings = OpenAIEmbeddings(model_name=OPENAI_EMBEDDINGS_ENGINE)\n return embeddings\n\n\ndef make_qna_chain(llm: LLM, vector_db: \"RedisVDB\", prompt: str = \"\", **kwargs):\n \"\"\"Create the QA chain.\"\"\"\n\n search_type = \"similarity\"\n if \"search_type\" in kwargs:\n search_type = kwargs.pop(\"search_type\")\n\n # Create retreival QnA Chain\n chain = RetrievalQA.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=vector_db.as_retriever(search_kwargs=kwargs, search_type=search_type),\n return_source_documents=True,\n chain_type_kwargs={\"prompt\": prompt},\n verbose=True\n )\n return chain\n","repo_name":"RedisVentures/ArXivChatGuru","sub_path":"app/qna/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":452,"dataset":"github-code","pt":"61"} +{"seq_id":"73354357955","text":"from randomList import randomList\nfrom typing import *\nilist = randomList.randomList(20)\n\n\ndef selectionSort(l: List[int]):\n if len(l) <= 1:\n return l\n for i in range(len(l) - 1):\n minindex = i\n for j in range(i + 1, len(l)):\n if l[j] < l[minindex]:\n minindex = j\n l[i], l[minindex] = l[minindex], l[i]\n print(\"第{}轮排序结果:\".format(i + 1), end=\" \")\n print(l)\n return l\n\n\nif __name__ == '__main__':\n print(ilist)\n print(selectionSort(ilist))\n","repo_name":"Xu109/data_structure","sub_path":"sort/选择排序.py","file_name":"选择排序.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74715014275","text":"from kinto_amo import __version__ as amo_version\nfrom kinto_amo.tests.support import AMOTestCase\n\n\nclass HelloViewTest(AMOTestCase):\n\n def test_capability_is_exposed(self):\n self.maxDiff = None\n resp = self.app.get('/')\n capabilities = resp.json['capabilities']\n self.assertIn('blocklist-xml', capabilities)\n\n expected = {'url': 'https://github.com/mozilla-services/kinto-amo/',\n 'description': 'An endpoint to generate v2 and v3 XML '\n 'blocklist export.',\n 'version': amo_version,\n 'resources': {\n 'blocklist': {\n 'addons': {'bucket': 'blocklists',\n 'collection': 'addons'},\n 'plugins': {'bucket': 'blocklists',\n 'collection': 'plugins'},\n 'gfx': {'bucket': 'blocklists',\n 'collection': 'gfx'},\n 'certificates': {'bucket': 'blocklists',\n 'collection': 'certificates'}}}}\n self.assertEqual(expected, capabilities['blocklist-xml'])\n","repo_name":"mozilla-services/kinto-amo","sub_path":"kinto_amo/tests/test_plugin_setup.py","file_name":"test_plugin_setup.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24680617090","text":"#!/usr/bin/python3\n\"\"\"\nmodule ptints out a name\n\"\"\"\n\n\ndef say_my_name(first_name, last_name=\"\"):\n \"\"\"\n checks if type of input is a string, else raise errors\n \"\"\"\n if type(first_name) is not str:\n raise TypeError(\"first_name must be a string\")\n if type(last_name) is not str:\n raise TypeError(\"last_name must be a string\")\n print(\"My name is {} {}\".format(first_name, last_name))\n","repo_name":"meistens/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/3-say_my_name.py","file_name":"3-say_my_name.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43641542146","text":"import nltk\nimport sqlalchemy\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport work._database_\nimport work.reClass\nimport pymysql\nimport sys\nimport re\nfrom wordcloud import WordCloud\nimport jieba\nfrom sqlalchemy import create_engine\nimport analysis.blog_analysis\n_db = work._database_.DbClass()\n_re = work.reClass.Re()\n_db.connectDatabase()\n\n# userdetail_data数据3499条,无效数据79条\n\ndef getUser_data():\n sql = 'select * from userdetail_data'\n user_data = analysis.blog_analysis.pandas_read(sql)\n return user_data\n\ndef getbinpicture():\n user = getUser_data()\n # 根据地区分为不同的dataframe\n bei_count = user[user['detail'].str.contains('北京')]\n shang_count = user[user['detail'].str.contains('上海')]\n jiang_count = user[user['detail'].str.contains('江苏')]\n guang_count = user[user['detail'].str.contains('广东')]\n shan_count = user[user['detail'].str.contains('山东')]\n liao_count = user[user['detail'].str.contains('辽宁')]\n he_count = user[user['detail'].str.contains('河南')]\n si_count = user[user['detail'].str.contains('四川')]\n # 取补集,即其他地区\n other = user.append(bei_count)\n other = other.append(shang_count)\n other = other.append(jiang_count)\n other = other.append(guang_count)\n other = other.append(shan_count)\n other = other.append(liao_count)\n other = other.append(he_count)\n other = other.append(si_count)\n other = other.drop_duplicates(subset=['detail', 'user_name'], keep=False)\n labels = ['北京', '上海', '江苏', '广东', '山东', '辽宁', '河南', '四川','其他']\n number = [bei_count.user_name.count(), shang_count.user_name.count(),jiang_count.user_name.count(),\n guang_count.user_name.count(),shan_count.user_name.count(),liao_count.user_name.count(),\n he_count.user_name.count(),si_count.user_name.count(),other.user_name.count()]\n number = np.array(number) # nparray类型\n plt.pie(number, labels=labels, autopct='%1.1f%%')\n plt.axis('equal')\n plt.legend()\n plt.show()\n # 根据学历\n u_count = user[user['detail'].str.contains('大学')] # 本科及以上\n u_other = user.append(u_count)\n u_other = u_other.drop_duplicates(subset=['detail', 'user_name'], keep=False) # 其他,可能未填写\n u_labels = ['本科及以上', '其他']\n u_number = [u_count.user_name.count(), u_other.user_name.count()]\n u_number = np.array(u_number)\n plt.pie(u_number, labels=u_labels, autopct='%1.1f%%')\n plt.axis('equal')\n plt.legend()\n plt.show()\n # 写入excel,根据地区分布\n writer = pd.ExcelWriter('E://univisity//Python\\project//bigproject//excel//user.xlsx')\n bei_count.to_excel(writer, sheet_name='北京')\n shang_count.to_excel(writer, sheet_name='上海')\n jiang_count.to_excel(writer, sheet_name='江苏')\n guang_count.to_excel(writer, sheet_name='广东')\n shan_count.to_excel(writer, sheet_name='山东')\n liao_count.to_excel(writer, sheet_name='辽宁')\n he_count.to_excel(writer, sheet_name='河南')\n si_count.to_excel(writer, sheet_name='四川')\n other.to_excel(writer, sheet_name='其他')\n writer.save()\n writer.close()\n\ndef getword():\n user = getUser_data()\n word_list = ''.join(user['detail'])\n word_list = _re.Chinese(word_list)\n # 去除标签\n word_list = word_list.replace(re.findall('[简介]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[标签]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[Lv]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[年]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[月]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[日]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[公司]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[其他]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[个性域名]+',word_list)[0], '')\n word_list = word_list.replace(re.findall('[毕业]+',word_list)[0], '')\n word_count = ' '.join(jieba.cut(word_list))\n wordcloud = WordCloud(font_path=\" C:\\\\Windows\\\\Fonts\\\\STXINGKA.TTF\",\n background_color=\"black\", width=600,\n height=300, max_words=400, min_font_size=8).generate(word_count)\n image = wordcloud.to_image()\n image.show()\nif __name__ == \"__main__\":\n # getUser_data()\n # getbinpicture()\n getword()","repo_name":"L-Trunks/weibo-python-project","sub_path":"analysis/user_analysis.py","file_name":"user_analysis.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41193340803","text":"import FWCore.ParameterSet.Config as cms\n\nPhase2ITValidateRecHit = cms.EDProducer('Phase2ITValidateRecHit',\n DeltaX = cms.PSet(\n name = cms.string('Delta_X'),\n title = cms.string('Delta_X;RecHit resolution X dimension'),\n xmin = cms.double(-0.2),\n switch = cms.bool(True),\n xmax = cms.double(0.2),\n NxBins = cms.int32(100)\n ),\n DeltaY = cms.PSet(\n name = cms.string('Delta_Y'),\n title = cms.string('Delta_Y;RecHit resolution Y dimension;'),\n xmin = cms.double(-0.2),\n switch = cms.bool(True),\n xmax = cms.double(0.2),\n NxBins = cms.int32(100)\n ),\n PullX = cms.PSet(\n name = cms.string('Pull_X'),\n title = cms.string('Pull_X;pull x;'),\n xmin = cms.double(-4),\n switch = cms.bool(True),\n xmax = cms.double(4),\n NxBins = cms.int32(100)\n ),\n PullY = cms.PSet(\n name = cms.string('Pull_Y'),\n title = cms.string('Pull_Y;pull y;'),\n xmin = cms.double(-4),\n switch = cms.bool(True),\n xmax = cms.double(4),\n NxBins = cms.int32(100)\n ),\n DeltaX_eta = cms.PSet(\n name = cms.string('Delta_X_vs_Eta'),\n title = cms.string('Delta_X_vs_Eta;#eta;#Delta x'),\n ymax = cms.double(0.02),\n NxBins = cms.int32(82),\n switch = cms.bool(True),\n xmax = cms.double(4.1),\n xmin = cms.double(-4.1),\n ymin = cms.double(-0.02)\n ),\n DeltaY_eta = cms.PSet(\n name = cms.string('Delta_Y_vs_Eta'),\n title = cms.string('Delta_Y_vs_Eta;#eta;#Delta y'),\n ymax = cms.double(0.02),\n NxBins = cms.int32(82),\n switch = cms.bool(True),\n xmax = cms.double(4.1),\n xmin = cms.double(-4.1),\n ymin = cms.double(-0.02)\n ),\n PullX_eta = cms.PSet(\n name = cms.string('Pull_X_vs_Eta'),\n title = cms.string('Pull_X_vs_Eta;#eta;pull x'),\n ymax = cms.double(4),\n NxBins = cms.int32(82),\n switch = cms.bool(True),\n xmax = cms.double(4.1),\n xmin = cms.double(-4.1),\n ymin = cms.double(-4)\n ),\n PullY_eta = cms.PSet(\n name = cms.string('Pull_Y_vs_Eta'),\n title = cms.string('Pull_Y_vs_Eta;#eta;pull y'),\n ymax = cms.double(4),\n NxBins = cms.int32(82),\n switch = cms.bool(True),\n xmax = cms.double(4.1),\n xmin = cms.double(-4.1),\n ymin = cms.double(-4)\n ),\n nRecHits_primary = cms.PSet(\n name = cms.string('Number_RecHits_matched_PrimarySimTrack'),\n title = cms.string('Number of RecHits matched to primary SimTrack;;'),\n xmin = cms.double(0),\n switch = cms.bool(True),\n xmax = cms.double(0),\n NxBins = cms.int32(100)\n ),\n DeltaX_primary = cms.PSet(\n name = cms.string('Delta_X_SimHitPrimary'),\n title = cms.string('Delta_X_SimHitPrimary;#delta x;'),\n xmin = cms.double(-0.2),\n switch = cms.bool(True),\n xmax = cms.double(0.2),\n NxBins = cms.int32(100)\n ),\n DeltaY_primary = cms.PSet(\n name = cms.string('Delta_Y_SimHitPrimary'),\n title = cms.string('Delta_Y_SimHitPrimary;#Delta y;'),\n xmin = cms.double(-0.2),\n switch = cms.bool(True),\n xmax = cms.double(0.2),\n NxBins = cms.int32(100)\n ),\n PullX_primary = cms.PSet(\n name = cms.string('Pull_X_SimHitPrimary'),\n title = cms.string('Pull_X_SimHitPrimary;pull x;'),\n ymax = cms.double(4),\n NxBins = cms.int32(82),\n switch = cms.bool(True),\n xmax = cms.double(4.1),\n xmin = cms.double(-4.1),\n ymin = cms.double(-4)\n ),\n PullY_primary = cms.PSet(\n name = cms.string('Pull_Y_SimHitPrimary'),\n title = cms.string('Pull_Y_SimHitPrimary;pull y;'),\n ymax = cms.double(4),\n NxBins = cms.int32(82),\n switch = cms.bool(True),\n xmax = cms.double(4.1),\n xmin = cms.double(-4.1),\n ymin = cms.double(-4)\n ),\n associatePixel = cms.bool(True),\n associateStrip = cms.bool(False),\n usePhase2Tracker = cms.bool(True),\n associateRecoTracks = cms.bool(False),\n associateHitbySimTrack = cms.bool(True),\n pixelSimLinkSrc = cms.InputTag('simSiPixelDigis', 'Pixel'),\n ROUList = cms.vstring(\n 'TrackerHitsPixelBarrelLowTof',\n 'TrackerHitsPixelBarrelHighTof',\n 'TrackerHitsPixelEndcapLowTof',\n 'TrackerHitsPixelEndcapHighTof'\n ),\n simTracksSrc = cms.InputTag('g4SimHits'),\n SimVertexSource = cms.InputTag('g4SimHits'),\n SimTrackMinPt = cms.double(2),\n rechitsSrc = cms.InputTag('siPixelRecHits'),\n TopFolderName = cms.string('TrackerPhase2ITRecHitV'),\n Verbosity = cms.bool(False),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"Validation/SiTrackerPhase2V/Phase2ITValidateRecHit_cfi.py","file_name":"Phase2ITValidateRecHit_cfi.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16238719144","text":"from pymongo import MongoClient\nfrom config import MONGO_CONECTION\nimport json\nfrom flask import Flask, request, Response\n# from flask_cors import CORS\nfrom flask import jsonify\nfrom urllib.parse import urlparse\n\napp = Flask(__name__)\n# CORS(app)\n\nclient = MongoClient(MONGO_CONECTION)\ndb = client.Detecting_Malicious_URL_db\ncollection = db.formated_url\ncollection_reported = db.user_report_url\ncollection_excluded = db.user_exclude_url\n\n\n@app.route('/api/check', methods=['GET', 'POST', 'PATCH', 'PUT', 'DELETE'])\ndef check_request_url():\n if request.method == 'GET': # waiting\n return \"GET\"\n elif request.method == 'POST':\n url = request.form[\"url\"]\n print(url)\n o = urlparse(url)\n if o.scheme != '':\n url = url[len(o.scheme) + 3:]\n if url[-1] == \"/\":\n url = url[0:-1]\n\n result = collection.find_one({\"url\": url})\n print(url)\n print(result)\n if result == None:\n response_data = {\n \"result\": {\n \"label\": \"-1\",\n \"scource\": \"databse\"\n }\n }\n return jsonify(response_data)\n else:\n response_data = {\n \"result\": {\n \"label\": result[\"label\"],\n \"scource\": \"databse\"\n }\n }\n return jsonify(response_data)\n\n elif request.method == 'PATCH': # waiting\n return \"PATCH\"\n\n elif request.method == 'PUT': # waiting\n return \"PUT\"\n\n elif request.method == 'DELETE': # waiting\n return \"ECHO: DELETE\"\n\n\n# for exclude url to black or white list\n@app.route('/api/exclude/url', methods=['POST'])\ndef request_exclude_url():\n if request.method == 'POST':\n data = []\n userId = request.form[\"user_id\"]\n url = request.form[\"url_exclude\"]\n label = request.form[\"label\"]\n\n o = urlparse(url)\n if o.scheme != '':\n url = url[len(o.scheme) + 3:]\n if url[-1] == \"/\":\n url = url[0:-1]\n\n data.append({\n \"user_id\": userId,\n \"url\": url.strip(),\n \"label\": label.strip()\n })\n\n collection_excluded.insert_many(data)\n\n response_data = {\n \"result\": {\n \"status\": \"Done !\",\n }\n }\n\n return jsonify(response_data)\n\n\n# for add url to black or white list\n@app.route('/api/report/url', methods=['POST'])\ndef request_report_url():\n if request.method == 'POST':\n userId = request.form[\"user_id\"]\n url = request.form[\"url_report\"]\n label = request.form[\"label\"]\n content_report = request.form[\"content_report\"]\n data = []\n\n o = urlparse(url)\n if o.scheme != '':\n url = url[len(o.scheme) + 3:]\n if url[-1] == \"/\":\n url = url[0:-1]\n\n data.append({\n \"user_Id\": userId,\n \"url\": url,\n \"label\": label,\n \"reason\": content_report\n })\n\n collection_reported.insert_many(data)\n response_data = {\n \"result\": {\n \"status\": \"Report success, thanks your reported !\",\n }\n }\n\n return jsonify(response_data)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"whoops01001/my-extension","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70165608514","text":"#!/usr/bin/env python \nfrom __future__ import print_function, with_statement\nimport roslib\nimport rospy\nimport math\nimport tf\nimport transformations as tr\nimport numpy\nfrom aruco_msgs.msg import MarkerArray,Marker\nfrom std_msgs.msg import UInt32MultiArray\n\nimport os\nimport sys\nimport json\nimport argparse\nfrom threading import *\n\nlock = Lock()\n\nglobal_markers = set()\n\ndef process_markers(markers):\n global listener, br, marker_trans_mat\n\n sum_mat = None\n num = 0\n t_latest = rospy.Time(0)\n for marker in markers.markers:\n print(\"Seeing marker %s\" %marker.id)\n marker_link = '/Marker' + str(marker.id) + \"__link\"\n marker_id = \"/Marker\" + str(marker.id)\n tw = listener.getLatestCommonTime(marker_link, 'world')\n to = listener.getLatestCommonTime(marker_id, 'odom')\n t = tw if tw > to else to\n t_latest = t if t > t_latest else t_latest\n (trans_w,rot_w) = listener.lookupTransform(marker_link, 'world', tw)\n (trans_o,rot_o) = listener.lookupTransform(marker_id, 'odom', to)\n\n\n w_mat = numpy.dot(tr.translation_matrix(trans_w), tr.quaternion_matrix(rot_w))\n t_o = tr.concatenate_matrices(tr.translation_matrix(trans_o), tr.quaternion_matrix(rot_o))\n # Need to do the transform equivalent to 0.25 0 0 0 0.5 -0.5 -0.5 0.5 on marker_id\n #t_o = numpy.dot(t_o, marker_trans_mat);\n o_mat_i = tr.inverse_matrix(t_o)\n mat3 = numpy.dot(w_mat, o_mat_i)\n mat3 = numpy.inverse_matrix(mat3)\n if sum_mat is None:\n sum_mat = mat3\n else:\n sum_mat = sum_mat + mat3\n num = num + 1\n\n avg_mat = sum_mat / num\n\n trans = tr.translation_from_matrix(avg_mat)\n rot = tr.quaternion_from_matrix(avg_mat)\n br.sendTransform(trans, rot, t_latest, \"odom\", \"map\")\n\ngazebo_world_pose = tr.identity_matrix()\n\n\ndef getMarkerTFFromMap(m):\n # This doesn't work\n mid = \"Marker%s\" %m\n\n marker = marker_dict[mid]\n w_mat = None\n\n if marker is not None:\n \n w = 0 # east\n p=(-marker[\"x\"], -marker[\"y\"], -0.25) #east\n if marker[\"wall\"] == \"north\":\n p=(marker[\"y\"], -marker[\"x\"], -0.25)\n w = math.pi / 2\n elif marker[\"wall\"] == \"south\":\n p=(-marker[\"y\"], marker[\"x\"], -0.25)\n w = -math.pi / 2\n elif marker[\"wall\"] == \"west\":\n p=(marker[\"x\"], marker[\"y\"], -0.25)\n w = math.pi\n q = tr.quaternion_from_euler(0,0,w)\n\n print(\"Map: (%s) Marker%s: %s %s\" %(marker[\"wall\"],m,p,q))\n t = tr.translation_matrix(p)\n r = tr.quaternion_matrix(q)\n w_mat = numpy.dot(t, r)\n\n return (w_mat, None)\n\n\ndef getWorldMarkerMatrixFromTF(m):\n global listener\n marker_link = \"Marker%s__link\" %m\n try:\n tw = listener.getLatestCommonTime(marker_link, 'world')\n (t,r) = listener.lookupTransform(marker_link, 'world', tw)\n print(\"TF: Marker%s: %s %s\" %(m,t, r))\n w_mat = numpy.dot(tr.translation_matrix(t), tr.quaternion_matrix(r))\n except:\n return (None, None)\n return (w_mat, tw)\n\ndef list_markers(markers):\n global listener, br, marker_trans_mat, global_markers\n if len(markers.data) == 0:\n return\n with lock:\n global_markers |= set(markers.data)\n\n# def publish_marker_transforms():\n# global global_markers\n# sum_mat = None\n# num = 0.0\n# t_latest = rospy.Time(0)\n# with lock:\n\n# if len(global_markers) == 0:\n# rospy.loginfo(\"There are no markers to process\")\n# return\n\n# for marker in global_markers:\n \n# marker_id = \"/Marker%s\" %marker\n\n# to = listener.getLatestCommonTime(marker_id, 'odom')\n\n# t = to\n# (w_mat, tw) = getMarkerTFFromMap(marker)\n# #(w_mat, tw) = getWorldMarkerMatrixFromTF(marker)\n\n# if w_mat is None:\n# continue\n# if tw is not None:\n# t = tw if tw > to else to\n\n# t_latest = t if t > t_latest else t_latest\n\n \n# (trans_o,rot_o) = listener.lookupTransform(marker_id, 'odom', to)\n\n\n# t_o = numpy.dot(tr.translation_matrix(trans_o), tr.quaternion_matrix(rot_o))\n\n# o_mat_i = tr.inverse_matrix(t_o) # need odom wrt marker\n# mat3 = numpy.dot(o_mat_i, w_mat) # odom wrt world\n# mat3 = tr.inverse_matrix(mat3) # world wrt odom\n# num = num + 1\n\n# if sum_mat is None:\n# sum_mat = mat3\n \n# else:\n# sum_mat = sum_mat + mat3\n# if sum_mat is None:\n# return\n# avg_mat = sum_mat / num\n# #r_o = numpy.dot(tr.translation_matrix((0,0,0)), tr.quaternion_matrix((0,0,0, 1)))\n\n# #avg_mat = numpy.dot(r_o, avg_mat)\n\n\n# trans = tr.translation_from_matrix(avg_mat)\n# rot = tr.quaternion_from_matrix(avg_mat)\n\n# br.sendTransform(trans, rot, t_latest, \"odom\", \"map\")\n# global_markers.clear()\n\n\n# #br.sendTransform((0,0,0), (0,0,0,1), t_latest, \"odom\", \"map\")\n\ndef thread_main():\n rate = rospy.Rate(10)\n while True:\n publish_marker_transforms()\n rate.sleep()\n\nfront_transform = {\"transform\" : None, \"time\" : None}\nback_transform = {\"transform\" : None, \"time\" : None}\n\ndef list_markers_gen(transform):\n def process_marker_transform(markers):\n sum_mat = None\n num = 0.0\n t_latest = rospy.Time(0)\n #print(\"Got some markers: %s\" %markers)\n for marker in markers.data:\n marker_id = \"/Marker%s\" %marker\n try:\n to = listener.getLatestCommonTime(marker_id, 'odom')\n\n t = to\n (w_mat, tw) = getMarkerTFFromMap(marker)\n #(w_mat_tr, tw) = getWorldMarkerMatrixFromTF(marker)\n #(w_mat, tw) = getWorldMarkerMatrixFromTF(marker)\n\n # if marker==412:\n # print(\"Marker%s\" %marker)\n # print(w_mat)\n # print(w_mat_tr)\n \n if w_mat is None:\n continue\n if tw is not None:\n t = tw if tw > to else to\n\n t_latest = t if t > t_latest else t_latest\n\n \n (trans_o,rot_o) = listener.lookupTransform(marker_id, 'odom', to)\n\n\n t_o = numpy.dot(tr.translation_matrix(trans_o), tr.quaternion_matrix(rot_o))\n\n o_mat_i = tr.inverse_matrix(t_o) # need odom wrt marker\n mat3 = numpy.dot(o_mat_i, w_mat) # odom wrt world\n mat3 = tr.inverse_matrix(mat3) # world wrt odom\n num = num + 1\n\n if sum_mat is None:\n sum_mat = mat3\n else:\n sum_mat = sum_mat + mat3\n except:\n pass\n if sum_mat is None:\n return\n avg_mat = sum_mat / num\n\n transform[\"transform\"] = avg_mat;\n transform[\"time\"] = t_latest;\n return process_marker_transform;\n\npublished_transform = None\n\ndef back_front_thread():\n global published_transform\n rate = rospy.Rate(10)\n while True:\n time = rospy.Time.now()\n num = 0\n sum_mat = None\n #print(\"Time diff = %s\" %(str(time.to_sec() - front_transform[\"time\"].to_sec()) if front_transform[\"time\"] is not None else \"??\"))\n if front_transform[\"transform\"] is not None and time.to_sec() - front_transform[\"time\"].to_sec() < 1:\n sum_mat = front_transform[\"transform\"]\n num = 1.0\n # BRS Let's not use averages - seems to cause normailization errors?\n if back_transform[\"transform\"] is not None and time.to_sec() - back_transform[\"time\"].to_sec() < 1:\n sum_mat = back_transform[\"transform\"] if sum_mat is None else sum_mat + back_transform[\"transform\"]\n num = num + 1.0\n if num > 0:\n transform = sum_mat / num\n published_transform = transform\n trans = tr.translation_from_matrix(transform)\n rot = tr.quaternion_from_matrix(transform)\n r,p,y = tr.euler_from_quaternion(rot)\n rot = tr.quaternion_from_euler(r,p,y)\n br.sendTransform(trans, rot, time, \"odom\", \"map\")\n # elif published_transform is not None:\n # transform = published_transform\n # trans = tr.translation_from_matrix(transform)\n # rot = tr.quaternion_from_matrix(transform)\n # br.sendTransform(trans, rot, time, \"odom\", \"map\")\n rate.sleep()\n\nmarker_dict = {}\n\nif __name__ == '__main__':\n\n marker_trans_mat = tr.concatenate_matrices(tr.translation_matrix([0, 0, 0]), tr.quaternion_matrix ([0.5,-0.5,-0.5,0.5])) #0.25,\n \n rospy.init_node('marker_pose_publisher')\n\n markerfile = None\n try:\n markerfile = rospy.get_param('~marker_file')\n except KeyError:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"marker_file\", type=str, help='The marker file to read in markers')\n args = parser.parse_args()\n markerfile = args.marker_file\n\n if not os.path.isfile(markerfile):\n print(\"The marker file '%s' does not exist\" %markerfile)\n sys.exit()\n\n f = open(markerfile)\n s = f.read()\n markers = json.loads(s)\n\n for m in markers:\n marker_dict[m[\"id\"]] = m\n\n listener = tf.TransformListener()\n br = tf.TransformBroadcaster()\n rate = rospy.Rate(60.0)\n\n #rospy.Subscriber(\"aruco_marker_publisher/markers\", MarkerArray, process_markers)\n #rospy.Subscriber(\"aruco_marker_publisher_front/markers_list\", UInt32MultiArray, list_markers)\n #rospy.Subscriber(\"aruco_marker_publisher_back/markers_list\", UInt32MultiArray, list_markers)\n rospy.Subscriber(\"aruco_marker_publisher_front/markers_list\", UInt32MultiArray, list_markers_gen(front_transform))\n rospy.Subscriber(\"aruco_marker_publisher_back/markers_list\", UInt32MultiArray, list_markers_gen(back_transform))\n rospy.loginfo(\"Marker Pose Publisher waiting for info\")\n\n #marker_thread = Thread(target=thread_main)\n marker_thread = Thread(target=back_front_thread)\n marker_thread.start()\n\n rospy.spin()","repo_name":"cmu-mars/cp3_base","sub_path":"cp3_base/scripts/marker_pose_publisher.py","file_name":"marker_pose_publisher.py","file_ext":"py","file_size_in_byte":10260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32199656737","text":"#importing all the necessary Libraries\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torchvision\r\nfrom torch.utils.data import Dataset, DataLoader,random_split\r\nimport math\r\nfrom torch import optim \r\nfrom torch import nn \r\nimport torch.nn.functional as F \r\nimport cv2\r\n\r\n#Creating a Class\r\n\r\nclass CNN(nn.Module):\r\n def __init__(self, in_channels, num_classes=20):\r\n super(CNN, self).__init__()\r\n self.conv1 = nn.Conv2d(in_channels,8,3,stride=1,padding=1) #Convulation(Stride=1,Padding=1)\r\n self.pool = nn.MaxPool2d(2,2) #Pooling(Which reduces the Dimension of image by half)\r\n self.conv2 = nn.Conv2d(8,16,3,stride=1,padding=1)\r\n self.fc1 = nn.Linear(16 * 7 * 7, num_classes)\r\n\r\n def forward(self, x): #Forward Function\r\n x = F.relu(self.conv1(x)) #Convulation on Image\r\n x = self.pool(x) #Pooling\r\n x = F.relu(self.conv2(x))\r\n x = self.pool(x)\r\n x = x.reshape(x.shape[0], -1) #Reshaping The image into a Column Vector/Matrix\r\n x = self.fc1(x)\r\n return x\r\n\r\n# Defining Classes Of Different Objects\r\n\r\nclasses=('airplane','ant','banana','baseball','bird','bucket','butterfly','cat','coffee cup','dolphin','donut','duck','fish','leaf','mountain','pencil','smiley face','snake','umbrella','wine bottle')\r\n\r\n\r\n#Defining The no of Channels In Input Image and No of classes\r\n\r\nin_channels=1\r\nnum_classes=20\r\n\r\n#Loading the File\r\nFILE=\".vscode\\model.pth\" #Enter the adress of Your File\r\nloaded_model = CNN(in_channels, num_classes)\r\nloaded_model.load_state_dict(torch.load(FILE)) \r\nloaded_model.eval()\r\n\r\n\r\n#Creating A Drawing Pad using Opencv Library\r\n\r\ndrawing = False \r\npt1_x , pt1_y = None , None\r\nl=0\r\n# mouse callback function\r\ndef line_drawing(event,x,y,flags,param):\r\n global pt1_x,pt1_y,drawing,l\r\n\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n drawing=True\r\n pt1_x,pt1_y=x,y\r\n \r\n elif event==cv2.EVENT_MOUSEMOVE:\r\n if drawing==True:\r\n cv2.line(img,(pt1_x,pt1_y),(x,y),color=(255,255,255),thickness=20)\r\n pt1_x,pt1_y=x,y\r\n \r\n elif event==cv2.EVENT_LBUTTONUP:\r\n drawing=False\r\n cv2.line(img,(pt1_x,pt1_y),(x,y),color=(255,255,255),thickness=20) \r\n l=27\r\n\r\nimg = np.zeros((500,500,3), np.uint8) #Defining The Size of Drawing Pad\r\ncv2.namedWindow('test draw')\r\ncv2.setMouseCallback('test draw',line_drawing)\r\n\r\nwhile(1):\r\n cv2.imshow('test draw',img)\r\n p=cv2.waitKey(1) & 0xFF\r\n \r\n if p==ord('z'): #Press 'z' To end the program\r\n break\r\n if p==ord('a'): #Press 'a' To draw a new Object\r\n img = np.zeros((500,500,3), np.uint8)\r\n if l==27:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n input_img=cv2.resize(gray,(28,28),interpolation=cv2.INTER_AREA)\r\n x=torch.from_numpy(input_img)\r\n x=x.reshape(1,1,28,28)\r\n\r\n res=loaded_model(x.float())\r\n i=torch.argmax(res)\r\n\r\n print(\"I Guess It is a \",classes[i.item()])\r\n l=0\r\n \r\n \r\ncv2.destroyAllWindows()","repo_name":"shyamsah23/Real-Time-Doodle-Classifier","sub_path":"Real Time Doodle Classifier.py","file_name":"Real Time Doodle Classifier.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38671995730","text":"\n'''\nThis script is designed to initialise and run a server which displays graphs\nusing the data from the QA database.\n\nEach individual tab is created by a seperate script saved in the 'scripts'\nfolder, with this main script calling these individual scripts and as such\nallowing for easier management and development of this application.\n\nNB: MS Access on trust PCs is 32-bit. 64-bit python cannot run a 32-bit MS\nAccess driver therefore this code will only work with 32-bit python.\n\n'''\n\n####################### IMPORT LIBRARIES AND SCRIPTS ##########################\n\nimport os\nimport sys\nimport time\n\n# Import some stuff from bokeh and tornado libraries to maniulate tabs and\n# create/run the server.\nfrom tornado.ioloop import IOLoop\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.server.server import Server\nfrom bokeh.models.widgets import Tabs\nfrom bokeh.io import curdoc\n\n# Import some other libraries\nfrom functools import partial\nfrom configparser import ConfigParser\nimport multiprocessing\nimport asyncio\nimport webbrowser\nimport pypyodbc\nimport pandas as pd\nimport easygui as eg\n\n# Import the tab scripts.\nfrom scripts.PBT_Isocentre import pbt_isocentre_graph\nfrom scripts.ElectronOutput import Electron_Output_Graph\nfrom scripts.Sym import Sym_Graph\nfrom scripts.FlexitronOutput import Flexitron_Output_Graph\nfrom scripts.ElectronEnergy import Electron_Energy_Graph\nfrom scripts.PhotonOutput import Photon_Output_Graph\nfrom scripts.GulmayOutput import Gulmay_Output_Graph\nfrom config import Config\n\n# Import time and start a timer\nstart = time.time()\n\n# Start of patch!\n# Need a fix for running the Tornado Server in Python 3.8 on Windows. This\n# piece of code seems to allow it to run correctly (something about needing\n# to change a Windows default?):\n# https://github.com/tornadoweb/tornado/issues/2751\n# https://github.com/tornadoweb/tornado/issues/2608\nif sys.platform == 'win32':\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n# End of patch!\n\n\n###############################################################################\n\n\ndef produce_doc(doc):\n '''\n This function produces the doccument containing all of the graphs, which\n is later called by the main function.\n\n It does this by connecting to the database, passing the cursor to the other\n tab scripts and compiling the tabs into one doccument\n '''\n\n photon_db_path_fe = Config.Main.photon_db_path_fe\n proton_db_path_fe = Config.Main.proton_db_path_fe\n # Connect to the database.\n photon_conn = pypyodbc.connect(r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=' + photon_db_path_fe + ';'\n # r'PWD=JoNiSi;' # May need a line here for the database password????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????\n )\n proton_conn = pypyodbc.connect(r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=' + proton_db_path_fe + ';'\n r'PWD=Pr0ton5%;' # May need a line here for the database password????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????\n )\n\n # With the connection made run a check to find out how long it took.\n endconn = time.time()\n print('\\nConnection made in: ' + str(endconn - start) + 'sec')\n\n # User interface\n choice = eg.buttonbox('Click on what you want to plot.', 'Graphing Code',\n ('All', 'Proton', 'TrueBeam', 'Gulmay', 'Flexitron'))\n\n # Create the tabs\n if choice == 'All':\n # Create each tab by running the relevant scripts\n tab1 = Photon_Output_Graph(photon_conn, Config)\n tab2 = Electron_Energy_Graph(photon_conn, Config)\n tab3 = Electron_Output_Graph(photon_conn, Config)\n tab4 = Sym_Graph(photon_conn, Config)\n tab5 = Gulmay_Output_Graph(photon_conn, Config)\n tab6 = Flexitron_Output_Graph(photon_conn, Config)\n # Put all the tabs into one application\n tabs = Tabs(tabs=[tab1, tab2, tab3, tab4, tab5, tab6])\n elif choice == 'Proton':\n # Create each tab by running the relevant scripts\n tab1 = pbt_isocentre_graph(proton_conn, Config)\n # Put all the tabs into one application\n tabs = Tabs(tabs=[tab1])\n elif choice == 'TrueBeam':\n # Create each tab by running the relevant scripts\n tab1 = Photon_Output_Graph(photon_conn, Config)\n tab2 = Electron_Energy_Graph(photon_conn, Config)\n tab3 = Electron_Output_Graph(photon_conn, Config)\n tab4 = Sym_Graph(photon_conn, Config)\n # Put all the tabs into one application\n tabs = Tabs(tabs=[tab1, tab2, tab3, tab4])\n elif choice == 'Gulmay':\n tab1 = Gulmay_Output_Graph(photon_conn, Config)\n # Put all the tabs into one application\n tabs = Tabs(tabs=[tab1])\n elif choice == 'Flexitron':\n tab1 = Flexitron_Output_Graph(photon_conn, Config)\n # Put all the tabs into one application\n tabs = Tabs(tabs=[tab1])\n else:\n eg.msgbox('Error')\n exit()\n\n # Put all of the tabs into the doccument\n doc.add_root(tabs)\n\n endtabs = time.time()\n print('\\nTabs made in: ' + str(endtabs - endconn) + 'sec')\n\n return doc\n\n\ndef main():\n '''\n This function creates a server and opens it in a web browser. It calls the\n produce_doc function, opening this document (containing the bokeh graphs)\n within the browser.\n\n This is the method to build a Bokeh server within code without needing to\n use the Bokeh server command line tool.\n\n This code was originally written by VR (Bill).\n Commented and altered by CB (Christian)\n '''\n\n print('\\nPreparing a bokeh application.')\n\n # Try and connect to google chrome\n try:\n webbrowser.get(\"chrome\")\n found_chrome = True\n except:\n # If connection fails then look in a couple of typical locations for chrome\n if os.path.isfile(\"C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"):\n chrome_path = \"C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"\n webbrowser.register(\n 'chrome', None, webbrowser.BackgroundBrowser(chrome_path))\n chrome = webbrowser.get('chrome')\n found_chrome = True\n elif os.path.isfile(\"C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"):\n chrome_path = \"C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"\n webbrowser.register(\n 'chrome', None, webbrowser.BackgroundBrowser(chrome_path))\n chrome = webbrowser.get('chrome')\n found_chrome = True\n else:\n found_chrome = False\n\n # Start an Input/Output Loop. (Specifically a Tornado asynchronous I/O Loop)\n io_loop = IOLoop.current()\n port = 5001\n kwargs = {'io_loop': io_loop, 'port': port, }\n\n # Define the application using the bokeh application function handler.\n # https://docs.bokeh.org/en/latest/docs/reference/application/handlers/function.html\n app = Application(FunctionHandler(produce_doc))\n\n # http://matthewrocklin.com/blog/work/2017/06/28/simple-bokeh-server\n try:\n server = Server({'/': app}, **kwargs)\n server.start()\n print('\\nOpening Bokeh application on http://localhost:5001/')\n\n if found_chrome:\n server.show('/', browser=\"chrome\")\n else:\n server.show('/')\n # Start the Input/Output Loop\n io_loop.start()\n except OSError:\n url = 'http://localhost:5001'\n if found_chrome:\n chrome.open_new_tab(url)\n else:\n webbrowser.open_new_tab(url)\n\n return\n\n\nif __name__ == '__main__':\n\n # This line is necessary for packaging as an executable and avoiding\n # unnecessary loops\n multiprocessing.freeze_support()\n\n # Start bar as a process\n p = multiprocessing.Process(target=main)\n p.start()\n\n # Wait for 1200 seconds (20 mins) or until process finishes\n delay = 1200\n interval = 600\n p.join(delay)\n\n while True:\n if p.is_alive():\n if eg.ynbox('Program has been running for approximatly ' + str(int(delay/60)) + ' minutes. Do you want to close it?'):\n p.terminate()\n sys.exit()\n else:\n delay = delay + interval\n time.sleep(interval)\n\n\n#\n","repo_name":"UCLHp/Database_Graphing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9161692361","text":"#! /usr/bin/env python3\n\n__author__ = 'Paul Hancock'\n__date__ = '2019/09/06'\n\nimport argparse\nfrom astropy import table\nimport numpy as np\nimport os\nimport sys\n\n\ndef add_epoch(table, epoch):\n \"\"\"\n Add an epoch column to the table.\n\n :param table:\n :param epoch:\n :return:\n \"\"\"\n col = table.Column(data=np.ones(len(table), dtype=np.int32)*epoch, name='epoch')\n table.add_column(col)\n return table\n\n\ndef join_all(flist):\n \"\"\"\n Concatenate all the tables and add an extra column which identifies the epoch for each row\n\n parameters\n ==========\n flist : [str, ...]\n A list of the catalogue file names to be read\n\n return\n ======\n tab : astropy.table.Table\n A joined table\n \"\"\"\n print(\"Reading {0}\".format(flist[0]))\n tab = table.Table.read(flist[0])\n tab = add_epoch(tab, 0)\n\n if len(flist) <= 1:\n return tab\n\n for i in range(1, len(flist)):\n print(\"Appending {0}\".format(flist[i]))\n tab2 = add_epoch(table.Table.read(flist[i]), i)\n tab = table.vstack([tab, tab2])\n\n return tab\n\n\ndef clean_flist(flist):\n \"\"\"\n remove files that don't exist from the flist\n\n :param flist:\n :return:\n \"\"\"\n exists = []\n for f in flist:\n if os.path.exists(f):\n exists.append(f)\n else:\n print(\"Ingore missing file {0}\".format(f))\n return exists\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n group1 = parser.add_argument_group(\"Collect catalogues\")\n group1.add_argument(\"--infile\", dest='infile', type=str, default=None,\n help=\"A list of catalogues in a file. [optional]\")\n group1.add_argument(\"--in\", dest='files', type=str, default=None, nargs='+',\n help=\"Explicit list of catalogues to include. [optional]\")\n group1.add_argument(\"--out\", dest='outfile', type=str, default=None,\n help=\"Output filename.\")\n group1.add_argument(\"--ignoremissing\", dest='ignore_missing', action='store_true', default=False,\n help=\"If true, ignore missing input files. Default=False\")\n results = parser.parse_args()\n\n if results.outfile is None:\n parser.print_help()\n sys.exit(1)\n\n flist = []\n if results.infile:\n flist.extend([a.strip() for a in open(results.infile).readlines()])\n if results.files:\n flist.extend(results.files)\n\n if results.ignore_missing:\n flist = clean_flist(flist)\n\n tab = join_all(flist)\n if os.path.exists(results.outfile):\n os.remove(results.outfile)\n tab.write(results.outfile)\n","repo_name":"PaulHancock/Robbie","sub_path":"scripts/collect_transients.py","file_name":"collect_transients.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3383050505","text":"from typing import Dict\n\nfrom aws_cdk import aws_ec2 as ec2, aws_ssm as ssm, Stack\n\nfrom utils.stack_util import add_tags_to_stack\nfrom .vpc import Vpc\nfrom constructs import Construct\n\n\nclass NetworkStack(Stack):\n _vpc: ec2.IVpc\n config: Dict\n\n def __init__(self, scope: Construct, id: str, config: Dict, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n self.config = config\n # Apply common tags to stack resources.\n add_tags_to_stack(self, config)\n\n vpcConstruct = Vpc(self, \"Vpc\", config)\n self._vpc = vpcConstruct.vpc\n self.__push_vpc_id_cidr()\n self.__push_subnets_route_tables_ids()\n\n def __push_vpc_id_cidr(self):\n vpc_id = self._vpc.vpc_id\n vpc_cidr_block = self._vpc.vpc_cidr_block\n\n ssm.StringParameter(\n scope=self,\n id=\"vpcId\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=vpc_id,\n parameter_name=self.config[\"ssm_infra\"] + \"vpc\",\n )\n\n ssm.StringParameter(\n scope=self,\n id=\"vpcCidr\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=vpc_cidr_block,\n parameter_name=self.config[\"ssm_infra\"] + \"vpcCidrBlock\",\n )\n\n def __push_subnets_route_tables_ids(self):\n if self.config[\"network\"][\"vpc\"][\"create_natgateway\"] == 1:\n private_subnets = self._vpc.select_subnets(\n subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS\n ).subnets\n\n for index, subnet in enumerate(private_subnets):\n ssm.StringParameter(\n scope=self,\n id=f\"privateSubnet{index+1}\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=subnet.subnet_id,\n parameter_name=self.config[\"ssm_infra\"] + f\"privateSubnet{index+1}\",\n )\n\n ssm.StringParameter(\n scope=self,\n id=f\"privateRouteTable{index+1}\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=subnet.route_table.route_table_id,\n parameter_name=self.config[\"ssm_infra\"]\n + f\"privateRouteTable{index+1}\",\n )\n\n ssm.StringParameter(\n scope=self,\n id=f\"privateSubnetAz{index+1}\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=subnet.availability_zone,\n parameter_name=self.config[\"ssm_infra\"]\n + f\"privateSubnetAz{index+1}\",\n )\n\n public_subnets = self._vpc.select_subnets(\n subnet_type=ec2.SubnetType.PUBLIC\n ).subnets\n\n for index, subnet in enumerate(public_subnets):\n ssm.StringParameter(\n scope=self,\n id=f\"publicSubnet{index+1}\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=subnet.subnet_id,\n parameter_name=self.config[\"ssm_infra\"] + f\"publicSubnet{index+1}\",\n )\n\n ssm.StringParameter(\n scope=self,\n id=f\"publicRouteTable{index+1}\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=subnet.route_table.route_table_id,\n parameter_name=self.config[\"ssm_infra\"] + f\"publicRouteTable{index+1}\",\n )\n\n ssm.StringParameter(\n scope=self,\n id=f\"publicSubnetAz{index+1}\",\n tier=ssm.ParameterTier.STANDARD,\n string_value=subnet.availability_zone,\n parameter_name=self.config[\"ssm_infra\"] + f\"publicSubnetAz{index+1}\",\n )\n","repo_name":"mfahadm8/ecs-rds-postgres","sub_path":"src/network_stack/network_stack.py","file_name":"network_stack.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7739413978","text":"# -*- coding: utf-8 -*-\n# @Time : 2020-07-20 10:07\n# @Author : Yinchengjie\n# @Email : yinchengjie@zhehekeji.com\n# @File: RandomInfo.py\n# @Project : python-selenium-UI-automation-frame\n# @Software: PyCharm\n\"\"\"\n生成随机姓名、电话号码、身份证号、性别、银行卡号、邮箱\n\"\"\"\nimport random\nfrom util.RandomGeneration.province_id import province_id\nfrom util.RandomGeneration.phone_number import phone_number\n\n\nclass RandomInfo(object):\n\n # 随机生成姓名\n def get_name():\n # 删减部分,比较大众化姓氏\n firstName = \"赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜戚谢邹喻水云苏潘葛奚范彭郎鲁韦昌马苗凤花方俞任袁柳鲍史唐费岑薛雷贺倪汤滕殷罗毕郝邬安常乐于时傅卞齐康伍余元卜顾孟平\" \\\n \"黄和穆萧尹姚邵湛汪祁毛禹狄米贝明臧计成戴宋茅庞熊纪舒屈项祝董粱杜阮席季麻强贾路娄危江童颜郭梅盛林刁钟徐邱骆高夏蔡田胡凌霍万柯卢莫房缪干解应宗丁宣邓郁单杭洪包诸左石崔吉\" \\\n \"龚程邢滑裴陆荣翁荀羊甄家封芮储靳邴松井富乌焦巴弓牧隗山谷车侯伊宁仇祖武符刘景詹束龙叶幸司韶黎乔苍双闻莘劳逄姬冉宰桂牛寿通边燕冀尚农温庄晏瞿茹习鱼容向古戈终居衡步都耿满弘国文东殴沃曾关红游盖益桓公晋楚闫\"\n # 百家姓全部姓氏\n # firstName = \"赵钱孙李周吴郑王冯陈褚卫蒋沈韩杨朱秦尤许何吕施张孔曹严华金魏陶姜戚谢邹喻柏水窦章云苏潘葛奚范彭郎鲁韦昌马苗凤花方俞任袁柳酆鲍史唐费廉岑薛雷贺倪汤滕殷罗毕郝邬安常乐于时傅皮卞齐康伍余元卜顾孟平\" \\\n # \"黄和穆萧尹姚邵湛汪祁毛禹狄米贝明臧计伏成戴谈宋茅庞熊纪舒屈项祝董粱杜阮蓝闵席季麻强贾路娄危江童颜郭梅盛林刁钟徐邱骆高夏蔡田樊胡凌霍虞万支柯昝管卢莫经房裘缪干解应宗丁宣贲邓郁单杭洪包诸左石崔吉钮\" \\\n # \"龚程嵇邢滑裴陆荣翁荀羊於惠甄麴家封芮羿储靳汲邴糜松井段富巫乌焦巴弓牧隗山谷车侯宓蓬全郗班仰秋仲伊宫宁仇栾暴甘钭厉戎祖武符刘景詹束龙叶幸司韶郜黎蓟薄印宿白怀蒲邰从鄂索咸籍赖卓蔺屠蒙池乔阴欎胥能苍\" \\\n # \"双闻莘党翟谭贡劳逄姬申扶堵冉宰郦雍舄璩桑桂濮牛寿通边扈燕冀郏浦尚农温别庄晏柴瞿阎充慕连茹习宦艾鱼容向古易慎戈廖庾终暨居衡步都耿满弘匡国文寇广禄阙东殴殳沃利蔚越夔隆师巩厍聂晁勾敖融冷訾辛阚那简饶空\" \\\n # \"曾毋沙乜养鞠须丰巢关蒯相查後荆红游竺权逯盖益桓公晋楚闫法汝鄢涂钦归海帅缑亢况后有琴梁丘左丘商牟佘佴伯赏南宫墨哈谯笪年爱阳佟言福百家姓终\"\n # 百家姓中双姓氏\n firstName2 = \"万俟司马上官欧阳夏侯诸葛闻人东方赫连皇甫尉迟公羊澹台公冶宗政濮阳淳于单于太叔申屠公孙仲孙轩辕令狐钟离宇文长孙慕容鲜于闾丘司徒司空亓官司寇仉督子颛孙端木巫马公西漆雕乐正壤驷公良拓跋夹谷宰父谷梁段干百里东郭南门呼延羊舌微生梁丘左丘东门西门南宫南宫\"\n # 女孩名字\n girl = '秀娟英华慧巧美娜静淑惠珠翠雅芝玉萍红娥玲芬芳燕彩春菊兰凤洁梅琳素云莲真环雪荣爱妹霞香月莺媛艳瑞凡佳嘉琼勤珍贞莉桂娣叶璧璐娅琦晶妍茜秋珊莎锦黛青倩婷姣婉娴瑾颖露瑶怡婵雁蓓纨仪荷丹蓉眉君琴蕊薇菁梦岚苑婕馨瑗琰韵融园艺咏卿聪澜纯毓悦昭冰爽琬茗羽希宁欣飘育滢馥筠柔竹霭凝晓欢霄枫芸菲寒伊亚宜可姬舒影荔枝思丽'\n # 男孩名字\n boy = '伟刚勇毅俊峰强军平保东文辉力明永健世广志义兴良海山仁波宁贵福生龙元全国胜学祥才发武新利清飞彬富顺信子杰涛昌成康星光天达安岩中茂进林有坚和彪博诚先敬震振壮会思群豪心邦承乐绍功松善厚庆磊民友裕河哲江超浩亮政谦亨奇固之轮翰朗伯宏言若鸣朋斌梁栋维启克伦翔旭鹏泽晨辰士以建家致树炎德行时泰盛雄琛钧冠策腾楠榕风航弘'\n # 名\n name = '中笑贝凯歌易仁器义礼智信友上都卡被好无九加电金马钰玉忠孝'\n\n # 10%的机遇生成双数姓氏\n if random.choice(range(100)) > 10:\n firstName_name = firstName[random.choice(range(len(firstName)))]\n else:\n i = random.choice(range(len(firstName2)))\n firstName_name = firstName2[i:i + 2]\n\n sex = random.choice(range(2))\n name_1 = \"\"\n # 生成并返回一个名字\n if sex > 0:\n girl_name = girl[random.choice(range(len(girl)))]\n if random.choice(range(2)) > 0:\n name_1 = name[random.choice(range(len(name)))]\n return firstName_name + name_1 + girl_name\n else:\n boy_name = boy[random.choice(range(len(boy)))]\n if random.choice(range(2)) > 0:\n name_1 = name[random.choice(range(len(name)))]\n return firstName_name + name_1 + boy_name\n\n\n # 随机生成身份证号\n def get_idnum():\n id_num = ''\n # 随机选择地址码\n id_num +=str(random.choice(province_id))\n # 随机生成4-6位地址码\n for i in range(4):\n ran_num = str(random.randint(0, 9))\n id_num += ran_num\n b = RandomInfo.get_birthday()\n id_num+=b\n # 生成15、16位顺序号\n num = ''\n for i in range(2):\n num += str(random.randint(0, 9))\n id_num += num\n # 通过性别判断生成第十七位数字 男单 女双\n s = RandomInfo.get_sex()\n # print(\"性别:\", s)\n if s =='男':\n # 生成奇数\n seventeen_num = random.randrange(1,9,2)\n else:\n seventeen_num = random.randrange(2,9,2)\n id_num+=str(seventeen_num)\n eighteen_num = str(random.randint(1,10))\n if eighteen_num == '10':\n eighteen_num = 'X'\n id_num += eighteen_num\n return id_num\n\n\n\n # 随机生成出生日期\n def get_birthday():\n # 随机生成年月日\n year = random.randint(1960, 2000)\n month = random.randint(1, 12)\n # 判断每个月有多少天随机生成日\n if year%4 ==0:\n if month in (1,3,5,7,8,10,12):\n day = random.randint(1,31)\n elif month in (4,6,9,11):\n day = random.randint(1,30)\n else:\n day = random.randint(1,29)\n else:\n if month in (1,3,5,7,8,10,12):\n day = random.randint(1,31)\n elif month in (4,6,9,11):\n day = random.randint(1,30)\n else:\n day = random.randint(1,28)\n # 小于10的月份前面加0\n if month < 10:\n month = '0' + str(month)\n if day < 10:\n day = '0' + str(day)\n birthday = str(year)+str(month)+str(day)\n return birthday\n\n # 随机生成性别\n def get_sex():\n return random.choice(['男', '女'])\n # get_sex = lambda :random.choice(['男', '女'])\n\n\n # 随机生成手机号\n def get_tel():\n tel = ''\n tel+=str(random.choice(phone_number))\n ran = ''\n for i in range(8):\n ran += str(random.randint(0,9))\n tel +=ran\n return tel\n\n # 随机生成银行卡号\n def get_card_id():\n card_id = '62'\n for i in range(17):\n ran = str(random.randint(0,9))\n card_id += ran\n return card_id\n\n # 随机生成邮箱\n def get_email():\n email_suf = random.choice(['@163.com','@qq.com','@126.com','@sina.com','@sina.cn','@soho.com','@yeah.com'])\n phone = RandomInfo.get_tel()\n email = phone + email_suf\n # print(\"手机号:\",phone)\n return email\n\nif __name__ == '__main__':\n x = RandomInfo.get_name()\n print(\"姓名:\", x)\n sex = RandomInfo.get_sex()\n print(\"性别:\", sex)\n IdCardNum = RandomInfo.get_idnum()\n print(\"身份证号:\", IdCardNum)\n email = RandomInfo.get_email()\n phone_number = RandomInfo.get_tel()\n print(\"手机号:\", phone_number)\n print(\"邮箱:\", email)\n BankCardNum = RandomInfo.get_card_id()\n print(\"银行卡号:\", BankCardNum)","repo_name":"zhoujiaqi123456/uiauto","sub_path":"util/RandomGeneration/RandomInfo.py","file_name":"RandomInfo.py","file_ext":"py","file_size_in_byte":8579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42660211183","text":"import math\nimport pygame as pg\nfrom socket_creation import make_sockets\nclass Tile:\n def __init__(self, img, color_lookup):\n self.img = img\n self.img_copy = self.img.copy()\n\n self.edges = make_sockets(self.img, color_lookup)\n self.up = []\n self.right = []\n self.down = []\n self.left = []\n self.color_lookup = color_lookup\n\n\n def rotate(self, num):\n # rotate the image\n img = pg.transform.rotate(self.img_copy, math.degrees(math.pi/2) * num)\n\n # rotate edges\n # new_edges = []\n # length = len(self.edges)\n # for i in range(length):\n # new_edges.append(self.edges[(i - num + length) % length])\n\n\n return Tile(img, self.color_lookup)\n\n def compare(self, edge1, edge2):\n return edge1 == edge2\n\n def analyze(self, tiles):\n\n for i in range(len(tiles)):\n # UP\n if self.compare(tiles[i].edges[2], self.edges[0]):\n self.up.append(i)\n # RIGHT\n if self.compare(tiles[i].edges[3], self.edges[1]):\n self.left.append(i)\n # DOWN\n if self.compare(tiles[i].edges[0], self.edges[2]):\n self.down.append(i)\n # LEFT\n if self.compare(tiles[i].edges[1], self.edges[3]):\n self.right.append(i)\n\n\n\nclass Cell:\n def __init__(self, num):\n self.collapsed = False\n self.options = []\n if type(num) == list:\n self.options = num\n else:\n for i in range(num):\n self.options.append(i)\n","repo_name":"Mimkaa/Wave_Function_Collapse","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71060435715","text":"import cv2, numpy as np\nimport os\n\n\nclass ThermogramUtils:\n @staticmethod\n def detect_edges(image):\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred_gray_image = cv2.GaussianBlur(gray_image, (3, 3), 0)\n\n edges = cv2.Canny(blurred_gray_image, 120, 370)\n\n return edges\n\n @staticmethod\n def view_hot_regions(image):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n lower_range = np.array([0, 50, 50])\n upper_range = np.array([25, 255, 255])\n\n mask = cv2.inRange(hsv, lower_range, upper_range)\n\n return mask\n","repo_name":"BabafemiOyinlola/Thermogram-Checker","sub_path":"utils/thermogramUtils.py","file_name":"thermogramUtils.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35456103419","text":"from django.urls import include, path\n\nfrom .views import (AddCouponView, CheckoutView, HomeView, ItemDetailView,\n OrderSummaryView, PaymentView, RequestRefundView,\n add_to_cart, remove_from_cart,\n remove_single_item_from_cart)\n\napp_name = 'core'\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path('checkout/', CheckoutView.as_view(), name='checkout'),\n path('cart/', OrderSummaryView.as_view(), name='cart'),\n path('product//', ItemDetailView.as_view(), name='product'),\n path('add-to-cart//', add_to_cart, name='add-to-cart'),\n path('add-coupon/', AddCouponView.as_view(), name='add-coupon'),\n path(\n 'remove-from-cart//',\n remove_from_cart,\n name='remove-from-cart',\n ),\n path(\n 'remove-single-item-from-cart//',\n remove_single_item_from_cart,\n name='remove-single-item-from-cart',\n ),\n path('accounts/', include('allauth.urls')),\n path('payment//', PaymentView.as_view(), name='payment'),\n path(\n 'request-refund/',\n RequestRefundView.as_view(),\n name='request-refund',\n ),\n]\n","repo_name":"RemLampa/django_ecommerce","sub_path":"src/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3038321624","text":"from __future__ import division\nfrom math import sin, cos\n\ndef convert_params(_x):\n # Unpack function arguments\n rr, rrt, rf, rft, w, c, q1, xb, zb, xh, zh, mr, mb, mh, mf, IRxx, IRyy, IBxx, IByy, IBzz, IBxz, IHxx, IHyy, IHzz, IHxz, IFxx, IFyy, g = _x\n\n # Trigonometric functions\n c1 = cos(q1)\n s1 = sin(q1)\n\n # Calculate return values\n lr = (c + w)*c1 + (-rr - rrt)*s1\n ls = w*s1 + (rr + rrt - rf - rft)*c1\n lf = (rf + rft)*s1 - c*c1\n l1 = (-rr - rrt - (mb*zb + mr*(-rr - rrt))/(mb + mr))*s1 + mb*xb*c1/(mb + mr)\n l2 = -(-rr - rrt - (mb*zb + mr*(-rr - rrt))/(mb + mr))*c1 + mb*xb*s1/(mb + mr)\n l3 = (-rf - rft - (mf*(-rf - rft) + mh*zh)/(mf + mh))*s1 + mh*(xh - w)*c1/(mf + mh)\n l4 = -(-rf - rft - (mf*(-rf - rft) + mh*zh)/(mf + mh))*c1 + mh*(xh - w)*s1/(mf + mh)\n\n # Nested terms\n l1c = -l1\n l1d = -l1 + xb*c1 - (rr + rrt + zb)*s1\n l1e = -l3 - lf - lr + xh*c1 - (rr + rrt + zh)*s1\n l1f = -l3\n l3c = -l2\n l3d = -l2 + xb*s1 + (rr + rrt + zb)*c1\n l3e = -l4 - ls + xh*s1 + (rr + rrt + zh)*c1\n l3f = -l4\n\n mcd = mb + mr\n mef = mf + mh\n IC22 = IRyy\n ICD11 = IRxx - 2*IBxz*c1*s1 + IBxx*c1**2 + IBzz*s1**2 + mb*l3d**2 + mr*l3c**2\n ICD13 = IBxx*c1*s1 - IBzz*c1*s1 - l1c*l3c*mr - l1d*l3d*mb + IBxz*c1**2 - IBxz*s1**2\n ICD22 = IByy + mb*(l1d**2 + l3d**2) + mr*(l1c**2 + l3c**2)\n ICD33 = IRxx + 2*IBxz*c1*s1 + IBxx*s1**2 + IBzz*c1**2 + mb*l1d**2 + mr*l1c**2\n IEF11 = IFxx - 2*IHxz*c1*s1 + IHxx*c1**2 + IHzz*s1**2 + mf*l3f**2 + mh*l3e**2\n IEF22 = IHyy + mf*(l1f**2 + l3f**2) + mh*(l1e**2 + l3e**2)\n IEF33 = IFxx + 2*IHxz*c1*s1 + IHxx*s1**2 + IHzz*c1**2 + mf*l1f**2 + mh*l1e**2\n IEF13 = IHxx*c1*s1 - IHzz*c1*s1 - l1e*l3e*mh - l1f*l3f*mf + IHxz*c1**2 - IHxz*s1**2\n IF22 = IFyy\n\n # Return calculated values\n return [rr, rrt, rf, rft, lr, ls, lf, l1, l2, l3, l4, mcd, mef, IC22,\n ICD11, ICD22, ICD33, ICD13, IEF11, IEF22, IEF33, IEF13, IF22, g]\n\n","repo_name":"hazelnusse/pydy","sub_path":"examples/bicycle/convert_parameters.py","file_name":"convert_parameters.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"25892566998","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#import sys\n#sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')\nimport glob \nimport pickle \nimport cv2\nimport rospy\nimport numpy as np\nfrom sensor_msgs.msg import Image\n# OpenCV를 ROS에서 사용할 수 있게 해주는 모듈\nfrom cv_bridge import CvBridge\nbridge = CvBridge()\n# 이미지를 담을 빈 배열 생성\n#cv_image = np.empty(shape=[0])\n\nrospy.init_node('image_pulisher', anonymous=True)\ncamera_pub = rospy.Publisher('/TFF/camera_topic', Image, queue_size=10)\ndef get_cameramat_dist(filename): \n f = open(filename, 'rb') \n mat, dist, rvecs, tvecs = pickle.load(f) \n f.close() \n #print(\"camera matrix\") \n #print(mat) \n #print(\"distortion coeff\") \n #print(dist) \n return mat,dist \n\n\ndef main(): \n mat, dist = get_cameramat_dist(\"/home/nohs/catkin_ws/src/enet_ros/src/cam_calib.pkl\") \n cap = cv2.VideoCapture(\"v4l2src device=/dev/video0 ! video/x-raw, width=640, height=480, format=(string)YUY2,framerate=30/1 ! videoconvert ! video/x-raw,width=640,height=480,format=BGR ! appsink\")\n ret, frame = cap.read()\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n print(\"width:\",width,\"height:\",height)\n #fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n #fps = cap.get(cv2.CAP_PROP_FPS)\n frame = cv2.flip(frame, -1) \n rsz = cv2.resize(frame, dsize=(640,480)) \n gray = cv2.cvtColor(rsz, cv2.COLOR_BGR2GRAY)\n h, w = gray.shape[:2] \n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mat,dist,(w,h),0,(w,h))\n #out = cv2.VideoWriter('out.avi', fourcc, fps, (int(480), int(360))) \n while(True): \n ret, frame = cap.read() \n frame = cv2.flip(frame,-1)\n #print(frame.shape) \n rsz = cv2.resize(frame, dsize=(640,480))\n gray = rsz\n# undistort \n mapx,mapy = cv2.initUndistortRectifyMap(mat,dist,None,newcameramtx,(w,h),5) \n res = cv2.remap(gray,mapx,mapy,cv2.INTER_LINEAR) \n# crop the image \n x,y,w,h = roi \n res = res[y:y+h, x:x+w]\n res = cv2.resize(res,(480,360))\n #out.write(res)\n #cv2.imshow('res',res)\n cv_image = bridge.cv2_to_imgmsg(res,'bgr8') \n camera_pub.publish(cv_image)\n if cv2.waitKey(10) & 0xFF == ord('q'): \n break\n cap.release() \n cv2.destroyAllWindows()\nif __name__ == \"__main__\": \n main()\n","repo_name":"NOHYC/autonomous_driving_car_project","sub_path":"NX/catkin_ws/src/enet_ros/src/pub_camera.py","file_name":"pub_camera.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"20461061506","text":"import services\nimport sys\nimport sims4.collections\nimport interactions\nFILTERS = {'basic_filter': ['_key', '_value'], 'memory_filter': ['_key', '_key_size', '_address', '_value', '_value_size', '_value_addr']}\ndata_filter = FILTERS['basic_filter']\n\nclass HttpDisplayData:\n __qualname__ = 'HttpDisplayData'\n __slots__ = ('_key', '_key_size', '_address', '_value', '_value_size', '_value_addr')\n\n def __init__(self, key='', key_size='', address='', value='', value_size='', value_addr=''):\n self._key = key\n self._key_size = key_size\n self._address = address\n self._value = value\n self._value_size = value_size\n self._value_addr = value_addr\n\n def __iter__(self):\n for slot in self.__slots__:\n while slot in data_filter:\n yield getattr(self, slot)\n\ndef generate_link(key_string, key_value=None, link_name=None, link_params=None):\n import services.http_service\n link_str = 'http://{}:{}'.format(services.http_service.http_server.server_address[0], services.http_service.http_server.server_address[1])\n if link_params is not None:\n for param in link_params[1:]:\n link_str += '/{}'.format(param)\n if key_value == None:\n return '{}'.format(link_str, key_string, link_name)\n return '{}'.format(link_str, key_string, key_value, link_name)\n\ndef remove_html_formatting(value):\n try:\n obj_str = str(value)\n except AttributeError:\n obj_str = str(value.__class__)\n string_list = list(obj_str)\n for (index, char) in enumerate(string_list):\n if char == '<':\n string_list[index] = '('\n else:\n while char == '>':\n string_list[index] = ')'\n return ''.join(string_list)\n\nclass GenericHandler:\n __qualname__ = 'GenericHandler'\n\n def __init__(self):\n self.key = 'generic_handler'\n\n def handle_link(self, cur_obj, link_value):\n return services.object_manager().get(int(link_value))\n\n def is_type(self, value):\n return True\n\n def generate_display(self, params, key, value):\n return HttpDisplayData(key=key, key_size=sys.getsizeof(key), address=id(key), value=remove_html_formatting(value), value_size=sys.getsizeof(value), value_addr=id(value))\n\nclass ObjTypeHandler(GenericHandler):\n __qualname__ = 'ObjTypeHandler'\n\n def __init__(self):\n self.key = 'obj_id'\n\n def handle_link(self, cur_obj, link_value):\n return services.object_manager().get(int(link_value))\n\n def is_type(self, value):\n return False\n\nclass ServiceTypeHandler(GenericHandler):\n __qualname__ = 'ServiceTypeHandler'\n\n def __init__(self):\n self.key = 'service'\n\n def handle_link(self, cur_obj, link_value):\n for (index, service) in enumerate(services._service_manager.services):\n while index == int(link_value):\n break\n return service\n\n def is_type(self, value):\n return False\n\nclass VarTypeHandler(GenericHandler):\n __qualname__ = 'VarTypeHandler'\n\n def __init__(self):\n self.key = 'var'\n\n def handle_link(self, cur_obj, link_value):\n if hasattr(cur_obj, link_value):\n return getattr(cur_obj, link_value)\n return cur_obj.get(link_value)\n\n def is_type(self, value):\n if hasattr(value, 'gsi_data') or hasattr(value, '__dict__') or hasattr(value, '__slots__'):\n return True\n return False\n\n def generate_display(self, params, key, value):\n http_display = super().generate_display(params, key, value)\n http_display._key = generate_link(self.key, key, key, params)\n http_display._value = remove_html_formatting(value)\n return http_display\n\nclass ListTypeHandler(GenericHandler):\n __qualname__ = 'ListTypeHandler'\n\n def __init__(self):\n self.key = 'list'\n\n def handle_link(self, cur_obj, link_value):\n val_key_and_index = link_value.split(':')\n cur_list = getattr(cur_obj, val_key_and_index[0])\n return cur_list[int(val_key_and_index[1])]\n\n def is_type(self, value):\n if isinstance(value, list) or isinstance(value, interactions.interaction_queue.BucketBase):\n return True\n return False\n\n def generate_display(self, params, key, value):\n display_str = ''\n for (index, list_item) in enumerate(value):\n try:\n while hasattr(list_item, '__slots__') or hasattr(list_item, '__dict__') or hasattr(list_item, 'gsi_data'):\n display_str += generate_link(self.key, '{}:{}'.format(key, index), remove_html_formatting(list_item), params)\n except KeyError:\n display_str += remove_html_formatting(list_item)\n display_str += remove_html_formatting(list_item)\n display_str += '
    '\n http_display = super().generate_display(params, key, value)\n http_display._value = display_str\n return http_display\n\nclass DictTypeHandler(GenericHandler):\n __qualname__ = 'DictTypeHandler'\n\n def __init__(self):\n self.key = 'dict'\n\n def handle_link(self, cur_obj, link_value):\n val_key_and_key_address = link_value.split(':')\n cur_dict = getattr(cur_obj, val_key_and_key_address[0])\n for (key, value) in cur_dict.items():\n while id(key) == int(val_key_and_key_address[1]):\n return value\n\n def is_type(self, value):\n if isinstance(value, dict):\n return True\n return False\n\n def generate_display(self, params, key, value):\n display_str = ''\n for (dict_key, dict_val) in value.items():\n if hasattr(dict_val, 'gsi_data') or hasattr(dict_val, '__dict__') or hasattr(dict_val, '__slots__'):\n display_str += '{} : '.format(dict_key)\n display_str += generate_link(self.key, '{}:{}'.format(key, id(dict_key)), remove_html_formatting(dict_val), params)\n else:\n display_str += remove_html_formatting('{} : {}'.format(dict_key, dict_val))\n display_str += '
    '\n http_display = super().generate_display(params, key, value)\n http_display._value = display_str\n return http_display\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/services/http_service_handlers.py","file_name":"http_service_handlers.py","file_ext":"py","file_size_in_byte":6268,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"17531003282","text":"# Out iterator implementation\ndef count_to(count):\n\t# Out list\n\tnumbers_in_spanish = [\"uno\", \"dos\", \"tres\", \"cuatro\", \"cinco\"]\n\n\t# Out built-in iterator\n\t# Create a tuple such as (1, \"uno\")\n\titerator = zip(range(count), numbers_in_spanish)\n\n\t# Iterate through our iterable list\n\t# Extract the Spanish numbers\n\t# Put them in a generator called number\n\tfor position, number in iterator:\n\t\t# Return a 'generator' containing numbers in Spanish\n\t\tyield number\n\n# Test the generator returned by our iterator\nfor num in count_to(3):\n\tprint(\"{}\".format(num))","repo_name":"Escartin85/scriptsPy","sub_path":"design_patterns/iterator_test.py","file_name":"iterator_test.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11950843066","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom calendar import Calendar\nfrom datetime import date\n\n\nimport cycle_db\nfrom cycle_calc import color_dict\n\n\nclass View(tk.Frame):\n \"\"\"\n Graphic display class\n y, m - start year and month, determined at the start of the program\n username - one session - one user\n dates - load from DB and change by user\n colors - colors of days according to cycle\n \"\"\"\n y, m = map(int, str(date.today()).split('-')[:2])\n username = ''\n dates = []\n colors = {}\n\n def __init__(self, root):\n tk.Frame.__init__(self, root)\n self.db = cycle_db.DB()\n self.start_screen()\n\n # =======================WINDOWS==============================\n def start_screen(self):\n \"\"\"Start screen, where going on selection user or registration new.\"\"\"\n\n # frame placement and settings\n self.pack(fill=tk.BOTH, expand=1)\n self.configure(bg='SpringGreen2')\n\n # greeting widget\n lebel_hi = tk.Label(self, text='HI! Who are you?', width=25)\n lebel_hi.place(x=95, y=150)\n\n # user select menu options\n variants = (\n self.db.get_usernames() + ['New user', 'OH i changed my mind!!!']\n )\n\n # user select menu\n menu = ttk.Combobox(self, state='readonly', values=variants)\n menu.current(0)\n menu.place(x=115, y=180)\n\n # button 'HI'\n btn_ok = tk.Button(self, text='HI!')\n btn_ok.place(x=165, y=205)\n btn_ok.bind('<1>', lambda event: self.hi(menu.get()))\n\n def registration_screen(self):\n \"\"\"Add fields for registration\"\"\"\n\n # label for text\n label_nuser = tk.Label(self, text='Enter your name.', width=25)\n label_nuser.place(x=95, y=250)\n\n # field for entering text\n enter_screen = tk.Entry(self, width=22)\n enter_screen.place(x=115, y=275)\n\n # button 'Enter'\n btn_entr = tk.Button(self, text='Enter')\n btn_entr.place(x=160, y=300)\n btn_entr.bind('<1>', lambda event: self.registration(enter_screen.get()))\n\n def display_w_screen(self):\n \"\"\"Main operating screen for the user\"\"\"\n\n # clear screen from previous widgets\n for w in self.winfo_children():\n w.destroy()\n\n # frame placement and settings\n self.pack(fill=tk.BOTH, expand=1)\n self.configure(bg='SpringGreen3')\n\n month_dict = {1: 'January', 2: 'February', 3: 'March',\n 4: 'April', 5: 'May', 6: 'June',\n 7: 'July', 8: 'August', 9: 'September',\n 10: 'October', 11: 'November', 12: 'December'}\n\n # labels for printing username, year and month\n label_username = tk.Label(self, text=f'{View.username}')\n label_y = tk.Label(self, text=f'{View.y}')\n label_m = tk.Label(self, text=f'{month_dict[View.m]}')\n label_username.grid(row=0, column=0, columnspan=7, sticky='w' + 'e')\n label_y.grid(row=1, column=2, columnspan=3, sticky='w' + 'e')\n label_m.grid(row=2, column=2, columnspan=3, sticky='w' + 'e')\n\n # buttons for scrolling months\n btn_l = tk.Button(self, text='<-', command=self.minus_month)\n btn_r = tk.Button(self, text='->', command=self.plus_month)\n btn_l.grid(row=1, rowspan=2, column=0, columnspan=2,\n sticky='n' + 's' + 'w' + 'e')\n btn_r.grid(row=1, rowspan=2, column=5, columnspan=2,\n sticky='n' + 's' + 'w' + 'e')\n\n # button for register changes\n btn_register_changes = tk.Button(self, text='Enter', height=2, width=10)\n btn_register_changes.place(x=140, y=380)\n btn_register_changes.bind('<1>', self.register_changes)\n\n d = Calendar()\n # determine and placement day's buttons for each months\n btn = []\n for i in range(len(d.monthdayscalendar(View.y, View.m) * 7)):\n c, r = divmod(i, len(d.monthdayscalendar(View.y, View.m)))\n # text for button is a day\n btn_text = str(d.monthdayscalendar(View.y, View.m)[r][c])\n\n # button's color according to existing date in [dates]\n color = View.colors[f'{View.y} {View.m} {btn_text}'] if \\\n f'{View.y} {View.m} {btn_text}' in View.colors else 'OliveDrab2'\n\n # form list of buttons\n btn.append(tk.Button(\n self, text=btn_text, height=3, width=6, bg=color))\n\n # buttons, not in current month not place\n if str(d.monthdayscalendar(View.y, View.m)[r][c]) != '0':\n btn[i].grid(row=r + 3, column=c)\n btn[i].bind('', self.click)\n\n # =====================BUTTON_FUNCTIONS========================\n def hi(self, choice):\n \"\"\"Actions according to choice from user select menu options\"\"\"\n\n if choice == 'OH i changed my mind!!!':\n self.tk.quit() # quit program\n elif choice == 'New user': # registration new user\n self.registration_screen()\n else: # upload existing user's data\n View.dates = [x[0] for x in self.db.upload(choice)]\n View.username = choice\n View.colors = color_dict(View.dates)\n self.display_w_screen()\n\n def registration(self, username):\n \"\"\"Creating new db for new user\"\"\"\n View.username = username\n self.db.create(username)\n self.display_w_screen()\n\n def plus_month(self):\n \"\"\"Scrolling months to the right\"\"\"\n View.m += 1\n if View.m > 12:\n View.y += 1\n View.m = 1\n self.display_w_screen()\n\n def minus_month(self):\n \"\"\"Scrolling months to the left\"\"\"\n View.m -= 1\n if View.m < 1:\n View.y -= 1\n View.m = 12\n self.display_w_screen()\n\n @staticmethod\n def click(event):\n \"\"\"Date click processing\"\"\"\n\n # insertion and deletion data in [dates]:\n # first click write, second click - delete\n if f'{View.y} {View.m} {event.widget.cget(\"text\")}' in View.dates:\n View.dates.remove(f'{View.y} {View.m} {event.widget.cget(\"text\")}')\n event.widget.config(bg='OliveDrab2')\n else:\n View.dates.append(f'{View.y} {View.m} {event.widget.cget(\"text\")}')\n event.widget.config(bg='red')\n\n def register_changes(self, event):\n \"\"\"\n Entering date changes in the database, overriding the color dictionary\n \"\"\"\n View.dates.sort(key=lambda x: date(*map(int, x.split())))\n self.db.download(View.username, View.dates)\n # upload existing user's data\n View.colors = color_dict(View.dates)\n self.display_w_screen()\n\n\ndef main():\n \"\"\"Main function\"\"\"\n root = tk.Tk()\n View(root)\n root.title('Cycle Calendar')\n root.geometry('365x500')\n root.resizable(False, False)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MaxGonchar/cycle_calendar","sub_path":"cycle_calendar/cycle_calendar_main.py","file_name":"cycle_calendar_main.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1024644108","text":"from datetime import datetime\n\nfrom django.forms import widgets\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom lunch.models import Period\nfrom pendulum import Pendulum\n\n\nclass ReceiptWidget(widgets.Widget):\n\n supports_microseconds = False\n name_weekday = '-weekday'\n name_time = '-time'\n\n def __init__(self, store, orderedfood, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.store = store\n self.orderedfood = orderedfood\n\n def render(self, name, value, attrs=None):\n return mark_safe(\n render_to_string(\n template_name='widgets/receipt_field.html',\n context={\n 'name_weekday': name + self.name_weekday,\n 'name_time': name + self.name_time,\n 'value': value,\n 'attrs': attrs,\n 'store': self.store,\n 'orderedfood': self.orderedfood,\n }\n )\n )\n\n def value_from_datadict(self, data, files, name):\n try:\n time = datetime.strptime(\n data.get(name + self.name_time),\n '%H:%M'\n ).time()\n return Period.weekday_as_datetime(\n weekday=int(data.get(name + self.name_weekday)),\n time=time,\n store=self.store\n )._datetime\n except (TypeError, ValueError):\n return None\n\n\nclass DayWidget(widgets.Widget):\n\n supports_microseconds = False\n query_param = 'day'\n\n def __init__(self, group, days, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.group = group\n self.days = days\n\n def render(self, name, value, attrs=None):\n return mark_safe(\n render_to_string(\n template_name='widgets/day_field.html',\n context={\n 'days': self.days,\n 'query_param': self.query_param,\n 'value': value,\n 'group': self.group,\n }\n )\n )\n\n def value_from_datadict(self, data, files, name):\n try:\n result = Pendulum.parse(\n data.get(self.query_param)\n ).date()\n if result in self.days:\n return result\n except (ValueError, OverflowError):\n pass\n now = Pendulum.now()\n try:\n return min(\n self.days,\n key=lambda date: now.diff(\n Pendulum.create(\n year=date.year,\n month=date.month,\n day=date.day\n )\n )\n )\n # Thrown if self.days is empty\n except ValueError:\n return None\n","repo_name":"ssprasad100/Lunchbreak_backend_again","sub_path":"lunchbreak/frontend/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23409713201","text":"testcasenum = int(input())\n\nfor tc in range(testcasenum):\n row = int(input()) - 1\n nums1 = set()\n for i in range(4):\n line = input()\n if i == row:\n for n in line.split():\n nums1.add(int(n))\n row = int(input()) - 1\n nums2 = set()\n for i in range(4):\n line = input()\n if i == row:\n for n in line.split():\n nums2.add(int(n))\n result = nums1 & nums2\n res = \"\"\n if len(result) == 1:\n res = list(result)[0]\n elif len(result) > 1:\n res = \"Bad magician!\"\n else:\n res = \"Volunteer cheated!\"\n print(\"Case #{}: {}\".format(tc + 1, res))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1081.py","file_name":"1081.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36519024578","text":"import codecademylib3_seaborn\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Add context for each code block!!!!\n# Print comments were for checking my work\n\nweb_info = requests.get('https://content.codecademy.com/courses/beautifulsoup/cacao/index.html')\nsoup = BeautifulSoup(web_info.content, 'html.parser')\n#print(soup)\n\nrating_tags = soup.find_all(attrs={\"class\": \"Rating\"})\nratings = []\nfor i in range(1, len(rating_tags)):\n temp1 = rating_tags[i].get_text()\n ratings.append(float(temp1))\n#print(ratings)\n\nrating_plot = plt.hist(ratings)\nplt.show(rating_plot)\n\ncompany_tags = soup.find_all(attrs={\"class\": \"Company\"})\ncompanies = []\nfor i in range(1, len(company_tags)):\n temp2 = company_tags[i].get_text()\n companies.append(temp2)\n#print(companies)\n\ncpercent_tags = soup.find_all(attrs={\"class\": \"CocoaPercent\"})\ncpercent = []\nfor i in range(1, len(cpercent_tags)):\n temp3 = cpercent_tags[i].get_text().strip('%')\n cpercent.append(float(temp3))\n#print(cpercent)\n\ndf = pd.DataFrame({'Company': companies, 'Rating': ratings, 'CocoaPercentage': cpercent})\n#print(df)\n\ncomp_group = df.groupby('Company').Rating.mean()\ntop_ten = comp_group.nlargest(10)\n#print(top_ten)\n\nplt.scatter(df.CocoaPercentage, df.Rating)\nz = np.polyfit(df.CocoaPercentage, df.Rating, 1)\nline_function = np.poly1d(z)\nplt.plot(df.CocoaPercentage, line_function(df.CocoaPercentage), \"r--\")\nplt.show()\nplt.clf()\n","repo_name":"Tykhist/Chocolate-Web-Scraping-Scenario","sub_path":"Chocolate-Data.py","file_name":"Chocolate-Data.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10458592475","text":"from typing import Generator\nfrom typing import List\nfrom typing import Tuple\n\nimport day9\n\n\nclass CameraOutput(day9.OutDevice):\n def __init__(self):\n self._output: List[int] = []\n self._intersections: List[int] = []\n self.width: int = -1\n\n def out(self, item: int) -> None:\n if item == ord('\\n'):\n if self.width == -1:\n self.width = len(self._output)\n else:\n self._output.append(item)\n\n def print(self) -> None:\n for i, ch in enumerate(self._output):\n if i % self.width == 0:\n print()\n print(chr(ch), end='')\n print()\n\n def _get_neighbours(self, index: int) -> Tuple[int, int, int, int]:\n return index - 1, index - self.width, index + 1, index + self.width\n\n def _put_intersections(self) -> None:\n for i in self._intersections:\n self._output[i] = ord('O')\n\n def _calculate_intersections(self) -> None:\n for i, ch in enumerate(self._output):\n if ch != ord('#'):\n continue\n\n neighbours = self._get_neighbours(i)\n is_intersection = True\n for neighbour in neighbours:\n try:\n n_ch = self._output[neighbour]\n if n_ch != ord('#'):\n is_intersection = False\n break\n except IndexError:\n # at the boundary, definitely not an intersection\n is_intersection = False\n break\n\n if is_intersection:\n self._intersections.append(i)\n\n self._put_intersections()\n\n def get_alignment_parameters(self) -> Generator[int, None, None]:\n self._calculate_intersections()\n for intersection in self._intersections:\n yield (intersection // self.width) * (intersection % self.width)\n\n\nif __name__ == '__main__':\n program = day9.get_puzzle('data/day17.txt')\n camera_output = CameraOutput()\n day9.program_loop(program, None, camera_output)\n\n alignment_parameters_sum = sum(camera_output.get_alignment_parameters())\n camera_output.print()\n print(alignment_parameters_sum)\n","repo_name":"kamilwu/aoc2019","sub_path":"day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72849750273","text":"import unittest\nimport numpy as np\nfrom ase.build import bulk\nfrom nnpotential import neural_network as nn\n\nclass TestNeighborList( unittest.TestCase ):\n def test_neighborlist(self):\n Rcut = 4.1 # Should cover second nearest neighbors on Al FCC\n pot = nn.NNPotential( Rcut=Rcut )\n atoms = bulk(\"Al\",\"fcc\",a=4.05)\n atoms = atoms*(4,4,4)\n nlist = pot.get_neighbor_list(atoms)\n indices, offsets = nlist.get_neighbors(0)\n\n # 5.73 should cover third nearest neighbors\n n_nearest_neighbors = 12\n n_second_nearest_neighbors = 6\n n_neighbors = n_nearest_neighbors+n_second_nearest_neighbors\n mic_distance = pot.offsets_to_mic_distance( atoms, 0, indices, offsets )\n lengths = np.sqrt(np.sum(mic_distance**2,axis=1))\n self.assertEqual( len(indices), n_neighbors )\n for i in range(len(lengths)):\n self.assertTrue( lengths[i] < Rcut )\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"davidkleiven/NeuralNetworkPotential","sub_path":"tests/test_neighborlist.py","file_name":"test_neighborlist.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31818884482","text":"# myfirstsqlserv.database.windows.net\n# Report about income from sales by product, client and sales person.\n# Please mind discounts.\n# Also mind that for some combinations of values in these dimensions\n# there are no sales at all, so create two versions of queries\n# with and without zero values.\nimport pyodbc\nfrom tabulate import tabulate\n\nqueries = ['''SELECT pr.ProductID,\n cs.CustomerID,\n cs.SalesPerson,\n COALESCE(SUM(sod.UnitPrice * (1 - sod.UnitPriceDiscount) * sod.OrderQty), 0) AS Sum\n FROM SalesLT.SalesOrderHeader soh\n JOIN SalesLT.Customer cs ON soh.CustomerID = cs.CustomerID\n JOIN SalesLT.SalesOrderDetail sod ON soh.SalesOrderID = sod.SalesOrderID\n JOIN SalesLT.Product pr ON sod.ProductID = pr.ProductID\n GROUP BY GROUPING SETS(pr.ProductID, cs.CustomerID, cs.SalesPerson)''',\n\n '''SELECT pr.ProductID,\n cs.CustomerID,\n cs.SalesPerson,\n COALESCE(SUM(sod.UnitPrice * (1 - sod.UnitPriceDiscount) * sod.OrderQty), 0) AS Sum\n FROM SalesLT.SalesOrderHeader soh\n FULL JOIN SalesLT.Customer cs ON soh.CustomerID = cs.CustomerID\n FULL JOIN SalesLT.SalesOrderDetail sod ON soh.SalesOrderID = sod.SalesOrderID\n FULL JOIN SalesLT.Product pr ON sod.ProductID = pr.ProductID\n GROUP BY GROUPING SETS(pr.ProductID, cs.CustomerID, cs.SalesPerson)''']\n\n\ndef get_data(query: str):\n connection = pyodbc.connect(\n 'DRIVER={ODBC Driver 17 for SQL Server};SERVER=myfirstsqlserv.database.windows.net;DATABASE=myFirstDatabase;'\n 'UID=your_login;PWD=your_password')\n cursor = connection.cursor()\n rows = cursor.execute(query)\n head = [tpl[0] for tpl in rows.description]\n rows = cursor.fetchall()\n connection.close()\n return tabulate(rows, headers=head, tablefmt='grid'), len(rows)\n\n\nif __name__ == '__main__':\n qtype = 1 if int(input('Input query type (0 or 1): ')) else 0\n result, amount = get_data(queries[qtype])\n print(result)\n print(f'Rows fetched: {amount}')\n","repo_name":"AntonyBazin/ds-2021","sub_path":"lab01/taskA.py","file_name":"taskA.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70950677956","text":"import random\n\"\"\"\nr = random.sample(range(1000), 1000)\n# random 1000\nfile1 = open(\"random_1000.txt\", \"w\")\nfile1.write('1000 ')\nfor i in r:\n s = str(i) + \" \"\n file1.write(s)\nfile1.close()\n\n# random 10000\nr = random.sample(range(10000), 10000)\nfile1 = open(\"random_10000.txt\", \"w\")\nfile1.write('10000 ')\nfor i in r:\n s = str(i) + \" \"\n file1.write(s)\nfile1.close()\n\"\"\"\n# random 100000\nr = random.sample(range(5000), 5000)\nfile1 = open(\"random_5000.txt\", \"w\")\nfile1.write('5000 ')\nfor i in r:\n s = str(i) + \" \"\n file1.write(s)\nfile1.close()\n\n# ascending 1000\nfile1 = open(\"ascending_5000.txt\", \"w\")\nfile1.write('5000 ')\nfor i in range(5000):\n s = str(i) + \" \"\n file1.write(s)\nfile1.close()\n\"\"\"\n# ascending 10000\nfile1 = open(\"ascending_10000.txt\", \"w\")\nfile1.write('10000 ')\nfor i in range(10000):\n s = str(i) + \" \"\n file1.write(s)\nfile1.close()\n\n# ascending 100000\nfile1 = open(\"ascending_100000.txt\", \"w\")\nfile1.write('100000 ')\nfor i in range(100000):\n s = str(i) + \" \"\n file1.write(s)\nfile1.close()\n\n\"\"\"\n# descending 1000\nfile1 = open(\"descending_5000.txt\", \"w\")\nfile1.write('5000 ')\nfor i in range(5000):\n s = str(5000 - i) + \" \"\n file1.write(s)\nfile1.close()\n\"\"\"\n# descending 10000\nfile1 = open(\"descending_10000.txt\", \"w\")\nfile1.write('10000 ')\nfor i in range(10000):\n s = str(10000 - i) + \" \"\n file1.write(s)\nfile1.close()\n\n# descending 100000\nfile1 = open(\"descending_100000.txt\", \"w\")\nfile1.write('100000 ')\nfor i in range(100000):\n s = str(100000 - i) + \" \"\n file1.write(s)\nfile1.close()\n\n\"\"\"","repo_name":"shrishtinigam/DAA_Lab","sub_path":"Ex3/text files/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20963459423","text":"##**********************************************************##\n## ##\n## Name : Abbie Dyck ##\n## S/N : 36800668 ##\n## Progam Name : MultiplesOfNWithoutLoops.py ##\n## Desc : This program will print out the multiples ##\n## of m, and it will print out the first n ##\n## numbers. ##\n## ##\n##**********************************************************##\ndef multiple(m, n): #Creates a function\n a = range(n, (m * n)+1, n) #Makes a equal to the range of n - ((m*n)+1) so it will know how many numbers to print out\n \n print(*a) #Prints out all of the multiples of m\n \nm = int(input(\"Enter m number: \")) #Variable m\nn = int(input(\"Enter n number: \")) #Variable n\nmultiple(m, n) #Runs the function\n","repo_name":"AbbieDewhirst/PythonCodeGr12","sub_path":"MultiplesOfNWithoutLoops.py","file_name":"MultiplesOfNWithoutLoops.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10372445162","text":"import cv2\nfrom pathlib import Path\ndef video_to_img(video_path, output_path):\n \n # id = Path(video_path).stem\n Path(output_path).mkdir(exist_ok=True)\n # save_dir = Path(output_path) / id\n # save_dir_images_path = create_folder_template(save_dir)\n \n video = cv2.VideoCapture(video_path)\n fps = video.get(cv2.CAP_PROP_FPS)\n w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n \n success, image = video.read()\n count = 0\n list_of_frames = []\n while success:\n cv2.imwrite(str(output_path / \"frame_{}.jpg\".format(count)), image) # save frame as JPEG file \n success,image = video.read()\n print('Reading frame:', count)\n # list_of_frames.append(str(output_path / \"{}_frame_{}.jpg\".format(id, count)))\n count += 1\n\nif __name__ == '__main__':\n video_to_img(\n \"/root/data/ltnghia/projects/visual_communication/htluc/custom_code/00421.mp4\",\n Path(\"/root/data/ltnghia/projects/visual_communication/htluc/custom_code/00421\"),\n )","repo_name":"LouisDo2108/CHI_MUGCAT","sub_path":"src/video2img.py","file_name":"video2img.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26381146319","text":"import logging\nfrom flask import Flask, url_for, request, render_template\n\napp = Flask(__name__)\napp.logger.setLevel(logging.INFO)\n\n\n@app.route('/')\ndef index():\n return 'Index Page'\n\n\n@app.route('/hello', methods=['GET', 'POST'])\n@app.route('/hello/')\ndef hello(name=None):\n return render_template('hello.html', name=name)\n\n\nwith app.test_request_context('/hello', method='POST'):\n # now you can do something with the request until the\n # end of the with block, such as basic assertions:\n assert request.path == '/hello'\n assert request.method == 'POST'\n print('Request posted to hello')\n\n\n@app.route('/log')\ndef logger():\n print('LOG: Print Logging')\n print(app.logger.level)\n\n app.logger.debug('A value for debugging')\n app.logger.info('A info value')\n app.logger.warning('A warning occurred (%d apples)', 42)\n app.logger.error('An error occurred')\n\n return 'Logging...'\n\n\n@app.route('/user/')\ndef show_user_profile(username):\n print(type(username))\n # show the user profile for that user\n return 'User %s' % username\n\n\n@app.route('/post/')\ndef show_post(post_id):\n print(type(post_id))\n # show the post with the given id, the id is an integer\n return 'Post %d' % post_id\n\n\n@app.route('/path/')\ndef show_subpath(subpath):\n print(type(subpath))\n # show the subpath after /path/\n return 'Subpath %s' % subpath\n\n\n@app.route('/projects/')\ndef projects():\n return 'The project page'\n\n\n@app.route('/about', methods=['GET'])\ndef about():\n print(request)\n print(type(request))\n print(request.__class__.mro())\n print(request.method)\n # print(vars(request))\n return 'The about page'\n\n\nwith app.test_request_context():\n print(url_for('index'))\n print(url_for('projects'))\n print(url_for('projects', next='/'))\n print(url_for('show_user_profile', username='John Doe'))\n print(url_for('static', filename='style.css'))\n","repo_name":"kl-sinclair/study.py","sub_path":"flask/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43072044809","text":"import itertools\n\nimport spacy\nfrom spacy import displacy\nfrom spacy.tokens import Span, Doc\n\nimport random\nimport json\nimport re\n\n\nimport os,sys,io,re\n\nfrom html.parser import HTMLParser\n\nclass TimexHTMLParser(HTMLParser): # courtesy of @dzajic\n def __init__ (self):\n super().__init__()\n self.underlying = \"\"\n self.inTIMEX3 = False\n self.timex3 = []\n self.timex3_attr = []\n\n def handle_starttag(self, tag, attrs):\n if tag == \"timex3\":\n self.inTIMEX3 = True\n self.timex3_attr.append({k:v for (k,v) in attrs})\n\n def handle_endtag(self, tag):\n if tag == \"timex3\":\n self.inTIMEX3 = False\n\n def handle_data(self, data):\n if self.inTIMEX3:\n self.timex3.append((data, len(self.underlying), len(self.underlying + data) - 1))\n self.underlying += data\n\ndef normalize_timex(source, article_id):\n filename = f\"/Users/jinzhao/schoolwork/lab-work/COVID19_DATA/heideltime_output/{source}.txt\"\n with open(filename, 'r') as f:\n heideltime_parse = f.read()\n annotated = re.search(\"([\\s\\S]*)<\\/TimeML>\", heideltime_parse).group(1).strip('\\n')\n annotated_articles = annotated.split('\\n\\n')\n\n timex_html_parser = TimexHTMLParser()\n timex_html_parser.feed(annotated_articles[article_id])\n normalized_dict = {}\n for i,(timex_text, start_char, end_char) in enumerate(timex_html_parser.timex3):\n # print(timex_html_parser.timex3_attr[i])\n # print(timex_text)\n # print(start_char)\n # print(end_char)\n # print(\"************\")\n normalized_dict[timex_text] = timex_html_parser.timex3_attr[i]\n return normalized_dict\n\npronoun_set = {\n 'I',\n 'you',\n 'my',\n 'mine',\n 'myself',\n 'we',\n 'us',\n 'our',\n 'ours',\n 'ourselves',\n 'you',\n 'you',\n 'your',\n 'yours',\n 'yourself',\n 'you',\n 'you',\n 'your',\n 'your',\n 'yourselves',\n 'he',\n 'him',\n 'his',\n 'his',\n 'himself',\n 'she',\n 'her',\n 'her',\n 'her',\n 'herself',\n 'it',\n 'it',\n 'its',\n 'itself',\n 'they',\n 'them',\n 'their',\n 'theirs',\n 'themself',\n 'they',\n 'them',\n 'their',\n 'theirs',\n 'themselves'\n}\ndef get_entity_options(tags):\n \"\"\" generating color options for visualizing the replaced pronouns and time stamps \"\"\"\n\n def color_generator(number_of_colors):\n color = [\"#\"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(number_of_colors)]\n return color\n\n colors = {\"ENT\":\"#E8DAEF\"}\n\n color = color_generator(len(tags))\n for i in range(len(tags)):\n if tags[i].startswith(\"#\"):\n colors[tags[i]] = '#ddd'\n else:\n colors[tags[i]] = color[i]\n\n options = {\"ents\": tags, \"colors\": colors}\n return options\n\ndef populate_doc(source:str, article_id:int):\n with open(f'/Users/jinzhao/schoolwork/lab-work/COVID19_DATA/e2e-coref_output/bert-base-cased_{source}_output.jsonl', 'r') as e2e_output_file:\n for i, line in enumerate(e2e_output_file):\n if i==article_id:\n jsonline = json.loads(line)\n break\n\n with open(f'/Users/jinzhao/schoolwork/lab-work/COVID19_DATA/temporal_model_output/bert-base-cased_{source}_temporal_auto_nodes.txt', 'r') as temporal_output_file:\n content = temporal_output_file.read()\n edge_lists = re.split(\"filename:[.\\S\\s]+?EDGE_LIST\", content.strip())\n edge_lists = [x.strip() for x in edge_lists if x]\n\n edge_list = edge_lists[article_id]\n nlp = spacy.load(\"en_core_web_sm\")\n sentences = jsonline[\"sentences\"]\n\n words = [item for sublist in sentences for item in sublist]\n sent_starts = []\n for snt in sentences:\n sent_starts.extend([True] + [False] * (len(snt) - 1))\n spaces = [True] * len(words)\n assert len(sent_starts) == len(words)\n doc = Doc(nlp.vocab, words=words, spaces=spaces, sent_starts=sent_starts)\n edges = [line.split() for line in edge_list.strip().split('\\n')]\n snts_len = [len(s) for s in sentences]\n acc_len = list(itertools.accumulate(snts_len))\n spans = []\n tags = set()\n\n normalized_timex_dict = normalize_timex(source, article_id)\n\n for edge in edges:\n if edge[0] in (\"-7_-7_-7\", \"-1_-1_-1\"):\n continue\n sentence_id_token, start_token, end_token = [int(e) for e in edge[0].split('_')]\n if sentence_id_token:\n start_offset = acc_len[sentence_id_token - 1] + start_token\n end_offset = acc_len[sentence_id_token - 1] + end_token\n else:\n start_offset = start_token\n end_offset = end_token\n\n token_type = edge[1]\n rel = edge[3]\n\n if token_type == 'Event':\n if edge[2] in (\"-7_-7_-7\", \"-1_-1_-1\"):\n time = \" \".join(sentences[1][:2]) #DCT\n else:\n sentence_id_ref, start_ref, end_ref = [int(e) for e in edge[2].split('_')]\n tokens = sentences[sentence_id_ref][start_ref: end_ref + 1]\n time = \" \".join(tokens)\n if time in normalized_timex_dict and normalized_timex_dict[time]['type'] == 'DATE':\n time = normalized_timex_dict[time]['value']\n tags.add(f\"#{rel} {time}\")\n spans.append(Span(doc, start_offset, end_offset + 1, f\"#{rel} {time}\"))\n\n\n for cluster in jsonline[\"predicted_clusters\"]:\n tag = \" \".join(words[cluster[0][0]:cluster[0][1] + 1])\n for token_span in cluster: #decide on tag in the first loop\n w = \" \".join(words[token_span[0]:token_span[1]+1]).lower()\n if any(ele.isupper() for ele in w) and w.lower() not in pronoun_set:\n tag = w\n for token_span in cluster:\n tags.add(tag)\n spans.append(Span(doc, token_span[0], token_span[1]+1, f\"{tag}\"))\n\n doc.set_ents(spacy.util.filter_spans(spans)) #filter here is used to resolve overlapping spans\n return doc, tags\n\ndef visualize(doc, tags):\n options = get_entity_options(list(tags))\n displacy.serve(doc, style=\"ent\", options=options)\n\ndef output_modified_doc2txt(doc):\n output_string = \"\"\n for sentence in doc.sents:\n tokens = []\n for spacy_token in sentence:\n if spacy_token.ent_iob_ == 'B':\n if spacy_token.ent_type_.startswith('#'):\n tokens.append(f\"{spacy_token.text}({spacy_token.ent_type_})\")\n else:\n tokens.append(spacy_token.ent_type_)\n elif spacy_token.ent_iob_ == 'I':\n pass\n else:\n tokens.append(spacy_token.text)\n output_string += ' '.join(tokens) + '\\n'\n return output_string\n\nif __name__ == '__main__':\n doc, tags = populate_doc(\"business-standard\", 2)\n output_modified_doc2txt(doc)\n visualize(doc, tags)\n","repo_name":"jinzhao3611/COVID19_DATA","sub_path":"process_pronoun_time2spacy_doc.py","file_name":"process_pronoun_time2spacy_doc.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23956226988","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Object Following - Live Demo\n# \n# In this notebook we'll show how you can follow an object with JetBot! We'll use a pre-trained neural network\n# that was trained on the [COCO dataset](http://cocodataset.org) to detect 90 different common objects. These include\n# \n# * Person (index 0)\n# * Cup (index 47)\n# \n# and many others (you can check [this file](https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_complete_label_map.pbtxt) for a full list of class indices). The model is sourced from the [TensorFlow object detection API](https://github.com/tensorflow/models/tree/master/research/object_detection),\n# which provides utilities for training object detectors for custom tasks also! Once the model is trained, we optimize it using NVIDIA TensorRT on the Jetson Nano.\n# \n# This makes the network very fast, capable of real-time execution on Jetson Nano! We won't run through all of the training and optimization steps in this notebook though.\n# \n# Anyways, let's get started. First, we'll want to import the ``ObjectDetector`` class which takes our pre-trained SSD engine.\n\n# ### Compute detections on single camera image\n\n# In[ ]:\n\nfrom queue import Empty\nimport torch.nn.functional as F\nimport cv2\nimport numpy as np\nimport traitlets\nimport os\nimport time\n\n# from jetbot import ObjectDetector\n# from jetbot.object_detection_yolo import ObjectDetector_YOLO\nfrom jetbot import Camera\nfrom jetbot import Robot\nfrom jetbot import bgr8_to_jpeg\nfrom jetbot import ObjectDetector\n# from jetbot import RoadCruiser\nfrom jetbot.utils import get_cls_dict_yolo, get_cls_dict_ssd\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport time\n\nclass Fleeter(traitlets.HasTraits):\n \n cap_image = traitlets.Any()\n label = traitlets.Integer(default_value=1).tag(config=True)\n label_text = traitlets.Unicode(default_value='').tag(config=True)\n speed = traitlets.Float(default_value=0.15).tag(config=True)\n speed_gain = traitlets.Float(default_value=0.01).tag(config=True)\n turn_gain = traitlets.Float(default_value=0.3).tag(config=True)\n steering_bias = traitlets.Float(default_value=0.0).tag(config=True)\n blocked = traitlets.Float(default_value=0).tag(config=True)\n target_view= traitlets.Float(default_value=0.6).tag(config=True)\n mean_view = traitlets.Float(default_value=0).tag(config=True)\n e_view = traitlets.Float(default_value=0).tag(config=True)\n is_dectecting = traitlets.Bool(default_value=True).tag(config=True)\n is_dectected = traitlets.Bool(default_value=False).tag(config=True)\n \n def __init__(self, follower_model='ssd_mobilenet_v2_coco_onnx.engine', type_follower_model=\"SSD\", cruiser_model='resnet18', type_cruiser_model='resnet'):\n\n self.follower_model = follower_model\n self.type_follower_model = type_follower_model\n\n # self.obstacle_detector = Avoider(model_params=self.avoider_model)\n if self.type_follower_model == \"SSD\" or self.type_follower_model == \"YOLO\":\n # from jetbot import ObjectDetector\n self.object_detector = ObjectDetector(self.follower_model, self.type_follower_model)\n # elif type_model == \"YOLO\":\n # from jetbot.object_detection_yolo import ObjectDetector_YOLO\n # self.object_detector = ObjectDetector_YOLO(self.follower_model)\n \n self.detections = None\n self.matching_detections = None\n self.object_center = None\n self.closest_objec = None\n self.is_dectecting = True\n self.is_dectected = False\n\n # Camera instance would be better to put after all models instantiation\n self.capturer = Camera()\n # self.capturer = self.road_cruiser.camera\n self.img_width = self.capturer.width\n self.img_height = self.capturer.height\n self.cap_image = np.empty((self.img_height, self.img_width, 3), dtype=np.uint8).tobytes()\n self.current_image = np.empty((self.img_height, self.img_width, 3))\n \n self.default_speed = self.speed\n self.detect_duration_max = 5\n self.no_detect = self.detect_duration_max\n self.target_view = 0.5\n self.mean_view = 0\n self.mean_view_prev = 0\n self.e_view = 0\n self.e_view_prev = 0\n\n self.execution_time = []\n self.fps = []\n \n # self.robot = self.road_cruiser.robot\n self.robot = Robot.instance()\n \n self.cruiser_model = cruiser_model\n self.type_cruiser_model = type_cruiser_model\n self.road_cruiser = RoadCruiser_4_ft(cruiser_model = self.cruiser_model, type_cruiser_model = self.type_cruiser_model)\n # self.road_cruiser.robot = self.robot\n # self.robot = self.road_cruiser.robot\n\n\n def run_objects_detection(self):\n # self.image = self.capturer.value\n # print(self.image[1][1], np.shape(self.image))\n self.detections = self.object_detector(self.current_image)\n self.matching_detections = [d for d in self.detections[0] if d['label'] == int(self.label)]\n \n if self.type_follower_model == \"SSD\":\n self.label_text = get_cls_dict_ssd('coco')[int(self.label)]\n elif self.type_follower_model == \"YOLO\":\n self.label_text = get_cls_dict_yolo('coco')[int(self.label)]\n # print(int(self.label), \"\\n\", self.matching_detections)\n \n def object_center_detection(self, det):\n \"\"\"Computes the center x, y coordinates of the object\"\"\"\n # print(self.matching_detections)\n bbox = det['bbox']\n center_x = (bbox[0] + bbox[2]) / 2.0 - 0.5\n center_y = (bbox[1] + bbox[3]) / 2.0 - 0.5\n object_center = (center_x, center_y)\n return object_center\n \n def norm(self, vec):\n \"\"\"Computes the length of the 2D vector\"\"\"\n return np.sqrt(vec[0] ** 2 + vec[1] ** 2)\n\n def closest_object_detection(self):\n \"\"\"Finds the detection closest to the image center\"\"\"\n closest_detection = None\n if len(self.matching_detections) != 0:\n for det in self.matching_detections:\n if closest_detection is None:\n closest_detection = det\n elif self.norm(self.object_center_detection(det)) < self.norm(self.object_center_detection(closest_detection)):\n closest_detection = det\n \n self.closest_object = closest_detection\n \n \n def execute_of(self):\n # print(\"start execute_of !\")\n \n start_time = time.process_time()\n\n # self.current_image = change['new']\n width = self.img_width\n height = self.img_height\n\n # compute all detected objects\n self.run_objects_detection()\n self.closest_object_detection()\n # detections = self.object_detector(image)\n # print(self.detections)\n \n # draw all detections on image\n for det in self.detections[0]:\n \n bbox = det['bbox']\n cv2.rectangle(self.current_image, (int(width * bbox[0]), int(height * bbox[1])),\n (int(width * bbox[2]), int(height * bbox[3])), (255, 0, 0), 2)\n \n # select detections that match selected class label\n # get detection closest to center of field of view and draw it\n cls_obj = self.closest_object\n if cls_obj is not None:\n self.is_dectected = True\n self.no_detect = self.detect_duration_max # set max detection no to prevent temperary loss of object detection\n bbox = cls_obj['bbox']\n cv2.rectangle(self.current_image, (int(width * bbox[0]), int(height * bbox[1])),\n (int(width * bbox[2]), int(height * bbox[3])), (0, 255, 0), 5)\n \n self.mean_view = 0.8 * (bbox[2] - bbox[0]) + 0.2 * self.mean_view_prev\n self.e_view = self.target_view - self.mean_view\n self.speed = self.speed + self.speed_gain * self.e_view + 0.5 * (self.e_view - self.e_view_prev)\n \n self.mean_view_prev = self.mean_view\n self.e_view_prev = self.e_view\n \n # otherwise go forward if no target detected\n if cls_obj is None:\n end_time = time.process_time()\n self.execution_time.append(end_time - start_time)\n \n if self.no_detect <= 0: # if object is not detected for a duration, road cruising\n self.mean_view = 0.0\n self.mean_view_prev = 0.0\n self.is_dectected = False\n # self.no_detect = self.detect_duration_max\n \n # self.road_cruiser.execute_rc(self.current_image)\n # print('no_objects detected !')\n self.cap_image = bgr8_to_jpeg(self.current_image)\n # self.speed = self.default_speed\n return\n \n else:\n self.no_detect -= 1 # observe no objects for a duration for the miss of object detection\n # self.robot.forward(float(self.speed))\n \n # otherwise steer towards target\n else:\n # move robot forward and steer proportional target's x-distance from center\n center =self.object_center_detection(cls_obj)\n self.robot.set_motors(\n float(self.speed + self.turn_gain * center[0] + self.steering_bias),\n float(self.speed - self.turn_gain * center[0] + self.steering_bias)\n )\n \n end_time = time.process_time()\n self.execution_time.append(end_time - start_time)\n\n # update image widget\n self.cap_image = bgr8_to_jpeg(self.current_image)\n # print(\"ok!\")\n # return self.cap_image\n \n def execute_fleeting(self, change):\n # print(\"start running execute_fleeting\")\n # do object following\n # start_time = time.process_time()\n self.current_image = change['new']\n self.execute_of()\n # end_time = time.process_time()\n # self.execution_time.append(end_time - start_time + self.capturer.cap_time)\n # self.execution_time.append(end_time - start_time)\n # self.fps.append(1/(end_time - start_time))\n\n # if closest object is not detected and followed, do road cruising\n # print('check objects detectd !', self.is_dectected)\n if not self.is_dectected:\n # self.road_cruiser.speed = self.speed_gain\n # self.road_cruiser.speed = self.speed_gain\n # print('no_objects detected !')\n self.road_cruiser.execute_rc(self.current_image)\n\n def start_run(self, change):\n self.capturer.unobserve_all()\n print(\"start running\")\n self.capturer.observe(self.execute_fleeting, names='value')\n # self.capturer.observe(self.execute_of, names='value')\n\n def stop_run(self, change):\n from jetbot.utils import plot_exec_time\n print(\"start stopping!\")\n \n self.capturer.unobserve_all()\n time.sleep(1.0)\n self.robot.stop()\n self.capturer.stop()\n\n # self.road_cruiser.stop_cruising()\n # plot exection time of road cruiser model processing\n cruiser_model_name = \"road cruiser model\"\n plot_exec_time(self.road_cruiser.execution_time[1:], cruiser_model_name, self.road_cruiser.cruiser_model_str)\n \n # plot exection time of fleet controller model processing\n fleet_model_name = \"fleet controller model\"\n plot_exec_time(self.execution_time[1:], fleet_model_name, self.follower_model.split(\".\")[0])\n\n\nclass RoadCruiser_4_ft(traitlets.HasTraits):\n speed_gain = traitlets.Float(default_value=0.15).tag(config=True)\n steering_gain = traitlets.Float(default_value=0.08).tag(config=True)\n steering_dgain = traitlets.Float(default_value=1.5).tag(config=True)\n steering_bias = traitlets.Float(default_value=0.0).tag(config=True)\n steering = traitlets.Float(default_value=0.0).tag(config=True)\n x_slider = traitlets.Float(default_value=0).tag(config=True)\n y_slider = traitlets.Float(default_value=0).tag(config=True)\n speed = traitlets.Float(default_value=0).tag(config=True)\n\n def __init__(self, cruiser_model='resnet18', type_cruiser_model='resnet'):\n super().__init__()\n self.cruiser_model_str = cruiser_model\n self.cruiser_model = getattr(torchvision.models, cruiser_model)(pretrained=False)\n self.type_cruiser_model = type_cruiser_model\n if type_cruiser_model == \"mobilenet\":\n self.cruiser_model.classifier[3] = torch.nn.Linear(self.cruiser_model.classifier[3].in_features, 2)\n self.cruiser_model.load_state_dict(torch.load('best_steering_model_xy_' + cruiser_model + '.pth'))\n\n elif type_cruiser_model == \"resnet\":\n self.cruiser_model.fc = torch.nn.Linear(self.cruiser_model.fc.in_features, 2)\n self.cruiser_model.load_state_dict(torch.load('best_steering_model_xy_' + cruiser_model + '.pth'))\n # self.cruiser_model.load_state_dict(torch.load('best_steering_model_xy_resnet34.pth'))\n # model.load_state_dict(torch.load('best_steering_model_xy_resnet50.pth'))\n\n # self.camera = Camera()\n # self.robot = Robot.instance()\n self.robot = Robot.instance()\n self.angle = 0.0\n self.angle_last = 0.0\n self.execution_time = []\n self.fps = []\n self.x_slider = 0\n self.y_slider = 0\n\n # model = torchvision.models.mobilenet_v3_large(pretrained=False)\n # model.classifier[3] = torch.nn.Linear(model.classifier[3].in_features, 2)\n\n # model = torchvision.models.resnet18(pretrained=False)\n # model = torchvision.models.resnet34(pretrained=False)\n # model = torchvision.models.resnet50(pretrained=False)\n # model.fc = torch.nn.Linear(model.fc.in_features, 2)\n # model.load_state_dict(torch.load('best_steering_model_xy_mobilenet_v3_large.pth'))\n # model.load_state_dict(torch.load('best_steering_model_xy_resnet18.pth'))\n # model.load_state_dict(torch.load('best_steering_model_xy_resnet34.pth'))\n # model.load_state_dict(torch.load('best_steering_model_xy_resnet50.pth'))\n\n self.device = torch.device('cuda')\n self.cruiser_model = self.cruiser_model.to(self.device)\n self.cruiser_model = self.cruiser_model.eval().half()\n # self.cruiser_model = self.cruiser_model.float()\n # self.cruiser_model = self.cruiser_model.to(self.device, dtype=torch.float)\n # self.cruiser_model = self.cruiser_model.eval()\n\n # ---- Creating the Pre-Processing Function\n # 1. Convert from HWC layout to CHW layout\n # 2. Normalize using same parameters as we did during training (our camera provides values in [0, 255] range and training loaded images in [0, 1] range so we need to scale by 255.0\n # 3. Transfer the data from CPU memory to GPU memory\n # 4. Add a batch dimension\n\n def preprocess(self, image):\n mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()\n std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()\n # mean = torch.Tensor([0.485, 0.456, 0.406]).cuda()\n # std = torch.Tensor([0.229, 0.224, 0.225]).cuda()\n image = PIL.Image.fromarray(image)\n # resize the cam captured image to (224, 224) for optimal resnet model inference\n image = image.resize((224, 224))\n image = transforms.functional.to_tensor(image).to(self.device).half()\n # image = transforms.functional.to_tensor(image).to(self.device)\n image.sub_(mean[:, None, None]).div_(std[:, None, None])\n return image[None, ...]\n\n def execute_rc(self, current_image):\n # print('enter road cruising !')\n start_time = time.process_time()\n # global angle, angle_last\n # current_image = change['new']\n xy = self.cruiser_model(self.preprocess(current_image)).detach().float().cpu().numpy().flatten()\n x = xy[0]\n # y = (0.5 - xy[1]) / 2.0\n y = (1 + xy[1])\n\n self.x_slider = x.item()\n self.y_slider = y.item()\n\n self.speed = self.speed_gain\n\n # angle = np.sqrt(xy)*np.arctan2(x, y)\n angle_1 = np.arctan2(x, y)\n self.angle = 0.5 * np.pi * np.tanh(0.5 * angle_1)\n pid = self.angle * self.steering_gain + (self.angle - self.angle_last) * self.steering_dgain\n self.angle_last = self.angle\n\n self.steering = pid + self.steering_bias\n \n # print('steering : ', self.steering)\n # self.robot.left_motor.value = max(min(self.speed_gain + self.steering, 1.0), 0.0)\n # self.robot.right_motor.value = max(min(self.speed_gain - self.steering, 1.0), 0.0)\n # print('left motor value :', self.robot.left_motor.value)\n \n end_time = time.process_time()\n # self.execution_time.append(end_time - start_time + self.camera.cap_time)\n self.execution_time.append(end_time - start_time)\n\n # We accomplish that with the observe function.\n # def start_cruising(self):\n # self.execute({'new': self.camera.value})\n # self.camera.observe(self.execute, names='value')\n\n # def stop_cruising(self):\n # from jetbot.utils import plot_exec_time\n # self.camera.unobserve(self.execute, names='value')\n # self.camera.unobserve_all()\n # time.sleep(1.0)\n # self.robot.stop()\n # self.camera.stop()\n\n # plot exection time of road cruiser model processing\n # model_name = \"road cruiser model\"\n # plot_exec_time(self.execution_time[1:], self.fps[1:], model_name, self.cruiser_model_str)\n","repo_name":"cuter9/Cuterbot","sub_path":"jetbot/fleet_manager_1.py","file_name":"fleet_manager_1.py","file_ext":"py","file_size_in_byte":17723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10164260736","text":"# use code from 8-7\n# use while loop to get input\n\ndef make_album(artist_name, album_title, song_count=None):\n \"\"\"\n Create dictionary of artist name, album title,\n and song count (optional)\n \"\"\"\n album = {\n \"Artist\" : artist_name.title(),\n \"Album\" : album_title.title(),\n }\n\n if song_count:\n album[\"Song\"] = song_count\n \n return album\n\nwhile True:\n proceed = input(\"Input details? y/n \")\n\n if proceed == \"n\":\n break\n\n artist_name2 = input(\"\\nArtist: \")\n album_name2 = input(\"Album: \")\n song_count2 = input(\"Song count(optional, hit Enter \"\n \"to skip): \")\n print(make_album(artist_name2, album_name2, song_count2))\n","repo_name":"skibidibidop/testing-area","sub_path":"python-practice/python-crash-course-matthes/chapter_8/ex8-8-user-album.py","file_name":"ex8-8-user-album.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11015801763","text":"from symbol import atom\nimport os\nimport numpy as np\nfrom Bio.PDB import *\nimport Bio.PDB\nfrom typing import Union\nimport json\nimport torch\nimport torch.nn as nn\n\n\n# A number of functions/classes are adopted from these two code pages:\n# 1. https://github.com/jingraham/neurips19-graph-protein-design\n# 2. https://github.com/dauparas/ProteinMPNN/blob/main/protein_mpnn_utils.py\n\n\n\ndef structure_parser(protein: Union[str, Bio.PDB.Structure.Structure]) -> list:\n parser = PDBParser()\n if isinstance(protein, str):\n protein = parser.get_structure(\"parser\", file=protein)\n\n coord = []\n for chain in protein.get_chains():\n if list(chain.get_residues())[0].id[0] != \" \": # drop other chains\n continue\n N = []\n CA = []\n C = []\n O = []\n for residue in chain.get_residues():\n if residue.has_id(\"O\") and residue.has_id(\"CA\") and residue.has_id(\"C\") and residue.has_id(\"N\") and alphabet(residue) != \"X\" and isinstance(residue, Bio.PDB.Residue.Residue):\n atoms = list(residue.get_atoms())\n N.append(list(atoms[0].get_vector()))\n CA.append(list(atoms[1].get_vector()))\n C.append(list(atoms[2].get_vector()))\n O.append(list(atoms[3].get_vector()))\n if chain.id == \" \":\n coord.append({\"None\": (N, CA, C, O)})\n else:\n coord.append({chain.id: (N, CA, C, O)})\n return coord\n\n\ndef alphabet(res: Bio.PDB.Residue.Residue) -> str:\n if not isinstance(res, Bio.PDB.Residue.Residue):\n raise TypeError(\"Not correct input\")\n code_standard = {\n 'ALA': 'A', 'VAL': 'V', 'PHE': 'F', 'PRO': 'P', 'MET': 'M',\n 'ILE': 'I', 'LEU': 'L', 'ASP': 'D', 'GLU': 'E', 'LYS': 'K',\n 'ARG': 'R', 'SER': 'S', 'THR': 'T', 'TYR': 'Y', 'HIS': 'H',\n 'CYS': 'C', 'ASN': 'N', 'GLN': 'Q', 'TRP': 'W', 'GLY': 'G',\n }\n # standard aa & not hetero residue\n if res.get_resname() in code_standard.keys() and res.id[0] == \" \":\n return code_standard[res.get_resname()]\n else:\n return \"X\"\n\n\ndef sequence_parse(structure: Union[str, Bio.PDB.Structure.Structure]) -> list:\n parser = PDBParser()\n if isinstance(structure, str):\n structure = parser.get_structure(\"parser\", file=structure)\n\n sequence_list = []\n\n for chain in structure.get_chains():\n # exclude which contains water or hetero molecules\n if list(chain.get_residues())[0].id[0] != \" \":\n continue\n seq = \"\"\n num = 0\n for res in chain.get_residues():\n if alphabet(res) != \"X\" and isinstance(res, Bio.PDB.Residue.Residue):\n seq += alphabet(res)\n num += 1\n if chain.id == \" \":\n sequence_list.append({\"None\": (seq, num)})\n else:\n sequence_list.append({chain.id: (seq, num)})\n\n return sequence_list\n\n\ndef load_jsonl(json_file: str) -> list:\n data = []\n with open(json_file, \"r\") as f:\n for line in f:\n try:\n # 每一行代表一个序列的字典字符串加一个'\\n',对于字符串应该用json.loads()文件进行读取\n data.append(json.loads(line.replace(\"\\n\", \"\")))\n except ValueError:\n pass\n return data\n\n\ndef write_jsonl(input_dir=None, output_dir=None, name_output=False):\n file_name = \"load_pdb.jsonl\"\n name_list = []\n dataset = []\n input_list = os.listdir(input_dir)\n for file in input_list:\n if \".pdb\" in file:\n try:\n protein = os.path.join(input_dir, file)\n sequence_list = sequence_parse(protein)\n structure_list = structure_parser(protein)\n for i in range(len(sequence_list)):\n # dict {chain_id : (seq,length)}\n sequence = sequence_list[i]\n chain_id = list(sequence.keys())[0]\n seq, length = sequence[chain_id]\n\n # dict {chain_id : (N,CA,C,O)}\n structure = structure_list[i]\n chain_backup = list(structure.keys())[0]\n N, CA, C, O = structure[chain_backup]\n\n if chain_backup != chain_id: # check pdb files\n raise ValueError(\n f\"{chain_id} not equals to {chain_backup},check {file}\")\n\n if chain_id != \" \":\n name = file.replace(\".pdb\", f\"_{chain_id}\")\n else:\n name = file.replace(\".pdb\", \"\")\n\n entity = {\n \"seq\": seq,\n \"coords\": {\n \"N\": N,\n \"CA\": CA,\n \"C\": C,\n \"O\": O,\n },\n \"name\": name,\n \"length\": length,\n \"chain\": chain_id\n }\n\n dataset.append(entity)\n name_list.append(name)\n except:\n pass\n\n name_dic = {\"data\": name_list}\n\n outfile1 = os.path.join(output_dir, file_name)\n with open(outfile1, 'w') as f1:\n for entry in dataset:\n f1.write(json.dumps(entry) + '\\n')\n if name_output:\n outfile2 = os.path.join(output_dir, \"protein_list.json\")\n with open(outfile2, \"w\") as f2:\n f2.write(json.dumps(name_dic))\n\n\ndef featurize(batch, device, shuffle_fraction=0.,mask_fraction=1.0):\n \"\"\" Pack and pad batch into torch tensors \"\"\"\n # pdb file {\"seq\":AAA,\"coords\"{},\"name\":5cbo_A,\"length\":100,\"chain\":\"A\"}\n # alphabet = 'ACDEFGHIKLMNPQRSTVWY'\n alphabet = 'ACDEFGHIKLMNPQRSTVWYX'\n cctop_code = 'IMOSL'\n\n B = len(batch)\n lengths = np.array([b['length'] for b in batch], dtype=np.int32)\n L_max = max([b['length'] for b in batch])\n X = np.zeros([B, L_max, 5, 3])\n S = np.zeros([B, L_max], dtype=np.int32)\n C = np.zeros([B, L_max], dtype=np.int32)\n\n def shuffle_subset(n, p):\n n_shuffle = np.random.binomial(n, p)\n ix = np.arange(n)\n ix_subset = np.random.choice(ix, size=n_shuffle, replace=False)\n ix_subset_shuffled = np.copy(ix_subset)\n np.random.shuffle(ix_subset_shuffled)\n ix[ix_subset] = ix_subset_shuffled\n return ix\n\n # Build the batch\n for i, b in enumerate(batch):\n # Consider all the proteins in the batch\n x = np.stack([b['coords'][c] for c in ['N', 'CA', 'C', 'CB', 'O']], 1)\n\n l = b['length']\n # x : [l, 4, 3]\n x_pad = np.pad(x, [[0, L_max-l], [0, 0], [0, 0]],\n 'constant', constant_values=(np.nan, ))\n # x_pad : [L_max, 4, 4]\n X[i, :, :, :] = x_pad\n\n # X : [Batch, L_max , 4, 3]\n # X[i] : [L_max , 4, 3]\n\n # Convert sequences to labels\n indices_aa = np.asarray([alphabet.index(a)\n for a in b['seq']], dtype=np.int32)\n indeices_cctop = np.asarray([cctop_code.index(a)\n for a in b['cctop']], dtype=np.int32)\n if shuffle_fraction > 0.:\n idx_shuffle = shuffle_subset(l, shuffle_fraction)\n S[i, :l] = indices_aa[idx_shuffle]\n C[i, :l] = indeices_cctop[idx_shuffle]\n else:\n S[i, :l] = indices_aa\n C[i, :l] = indeices_cctop\n # Mask\n isnan = np.isnan(X)\n mask = np.isfinite(np.sum(X, (2, 3))).astype(np.float32)\n X[isnan] = 0.\n\n # Conversion\n # S为一个batch的标签, S : [Batch,Length_max] (0,19 int)对应每个位置的氨基酸类型,注意mask为True的0代表Alanine,mask为Falfse的0代表这个序列长度没有这么长,没有这个氨基酸\n S = torch.from_numpy(S).to(dtype=torch.long, device=device)\n bernoulli_mask = torch.rand(S.shape,device=device)\n # mask_fraction_matrix = torch.full(S.shape,mask_fraction,device=device)\n S_mask = torch.where( bernoulli_mask< mask_fraction, torch.tensor(20,dtype=torch.long,device=device) ,S)\n\n C = torch.from_numpy(C).to(dtype=torch.long, device=device)\n # X为一个batch的数据, X : [Batch,Length_max,4,3] (float32)\n X = torch.from_numpy(X).to(dtype=torch.float32, device=device)\n # mask为一个batch的mask标签, mask : [Batch, Length_max] (0,1 float32)\n mask = torch.from_numpy(mask).to(\n dtype=torch.float32, device=device)\n # Length 为一个列表,长度为batch,存储这一个batch里蛋白质的长度\n return X, S, C,mask, lengths, S_mask\n\n\ndef loss_nll(S, log_probs, mask):\n \"\"\" Negative log probabilities \"\"\"\n criterion = torch.nn.NLLLoss(reduction='none')\n loss = criterion(\n log_probs.contiguous().view(-1, log_probs.size(-1)), S.contiguous().view(-1)\n ).view(S.size())\n # S.view(-1) [B*L,]\n # log_probs.view(-1,log_probs.size(-1)) [B*L,20]\n # loss [B,L]\n loss_av = torch.sum(loss * mask) / torch.sum(mask)\n return loss, loss_av\n\n\ndef loss_cse(S, logits, mask, smooth=0.0):\n \"\"\"Cross Entropy Loss with mask\"\"\"\n # Logits : [Batch, Length_max, 20] float\n # S : [Batch, Length] long\n # Mask : [Batch, Length] 0/1\n criterion = torch.nn.CrossEntropyLoss(\n reduction=\"none\", label_smoothing=smooth)\n loss = criterion(\n logits.reshape(-1, logits.shape[-1]), S.reshape(-1)).reshape(S.shape)\n loss_av = torch.sum(loss * mask) / torch.sum(mask)\n return loss, loss_av\n\ndef loss_smoothed(S, log_probs, mask, weight=0.1,num_classes=20):\n \"\"\" Negative log probabilities \"\"\"\n S_onehot = torch.nn.functional.one_hot(S,num_classes).float()\n\n # Label smoothing\n S_onehot = S_onehot + weight / float(S_onehot.size(-1))\n S_onehot = S_onehot / S_onehot.sum(-1, keepdim=True)\n\n loss = -(S_onehot * log_probs).sum(-1)\n loss_av = torch.sum(loss * mask) / torch.sum(mask)\n return loss,loss_av\n\ndef backbone_select(pdb_structure: Union[str, Bio.PDB.Structure.Structure], output_path, atom_select=4):\n parser = PDBParser()\n if isinstance(pdb_structure, str):\n if not os.path.exists(pdb_structure):\n raise FileNotFoundError(f\"Not exists {pdb_structure}\")\n pdb_structure = parser.get_structure(\"test\", pdb_structure)\n\n class BackboneSelect(Select):\n def accept_residue(self, residue):\n if residue.get_id()[0] != \" \": # exclude other chains\n return 0\n else:\n return 1\n\n def accept_atom(self, atom):\n if atom_select == 1:\n if atom.get_ed() == \"CA\":\n return 1\n else:\n return 0\n else:\n if atom.get_id() == \"N\":\n return 1\n elif atom.get_id() == \"CA\":\n return 1\n elif atom.get_id() == \"C\":\n return 1\n elif atom.get_id() == \"O\" and atom_select == 4:\n return 1\n else:\n return 0\n io = PDBIO()\n io.set_structure(pdb_structure)\n io.save(output_path, BackboneSelect())\n\n\ndef load_checkpoint(checkpoint_path, model,device=\"cpu\"):\n print('Loading checkpoint from {}'.format(checkpoint_path))\n state_dicts = torch.load(checkpoint_path, map_location=device)\n model.load_state_dict(state_dicts['model_state_dict'])\n print('\\tEpoch {}'.format(state_dicts['epoch']))\n return\n\n\n\ndef gaussian(x, std):\n pi = torch.tensor(torch.pi)\n s2 = 2.0*torch.tensor(std).square()\n x2 = torch.tensor(x).square().neg()\n\n return torch.exp(x2 / s2) * torch.rsqrt(s2 * pi)\n\n\ndef gaussian_kernel(kernel_size, std=1.0):\n kernel = [gaussian(i - (kernel_size // 2), std)\n for i in range(kernel_size)]\n\n kernel = torch.tensor(kernel)\n kernel = kernel / kernel.sum()\n\n return kernel\n\n","repo_name":"sirius777coder/tmpnn","sub_path":"tmpnn/tmpnn_beta/vanilia_version/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2578673253","text":"# This file is part of GenMap and released under the MIT License, see LICENSE.\n# Author: Takuya Kojima\n\nfrom EvalBase import EvalBase\nfrom DataPathAnalysis import DataPathAnalysis\nfrom SolverSetup import SolverSetup\n\nimport networkx as nx\nimport pulp\nimport numpy as np\nimport cvxpy as cp\n\nimport copy\nimport math\nimport time\n\nPENALTY_COST = 1000\nMIN_SW = 1.5 # lower limit of SE's switching count \n\n\n# setting up for pulp solver\ntry:\n ilp_solver = SolverSetup(\"ILP\").getSolver()\nexcept SolverSetup.SolverSetupError as e:\n print(\"Fail to setup ILP solver:\", e)\n sys.exit()\n\n# setting up for cvxpy solver\ntry:\n cp_solver = SolverSetup(\"CP\").getSolver()\nexcept SolverSetup.SolverSetupError as e:\n print(\"Fail to setup CP solver:\", e)\n sys.exit()\n\nisCpOpt = True\nleakmodel = None\ndelaymodel = None\n\nclass ModelBase():\n def __init__(self, sim_params):\n self.__k_gamma = sim_params.getUserdata(\"delay_power_model\")[\"k_gamma\"]\n self.__vth0 = sim_params.getUserdata(\"delay_power_model\")[\"vth0\"]\n self.bbv_min = sim_params.getUserdata(\"delay_power_model\")[\"bbv_min\"]\n self.bbv_max = sim_params.getUserdata(\"delay_power_model\")[\"bbv_max\"]\n if \"bbv_step\" in sim_params.getUserdata(\"delay_power_model\").keys():\n self.need_quantize = True\n self.quant_step = sim_params.getUserdata(\"delay_power_model\")[\"bbv_step\"]\n self.bias = 10 ** math.ceil(-math.log10(self.quant_step))\n else:\n self.need_quantize = False\n\n def vthreshold(self, bbv):\n return self.__vth0 - self.__k_gamma * bbv\n\n\nclass DelayModel(ModelBase):\n \"\"\" delay model based on alpha-power-raw\n \"\"\"\n def __init__(self, sim_params):\n super().__init__(sim_params)\n self.weight = sim_params.getUserdata(\"delay_power_model\")[\"weight\"]\n self.alpha = sim_params.getUserdata(\"delay_power_model\")[\"alpha\"]\n\n\n def delayScale(self, vdd, bbv):\n return vdd * ((vdd - self.vthreshold(bbv)) ** (- self.alpha))\n\nclass LeakModel(ModelBase):\n \"\"\" Leakage power model\n For more details, please see\n Fujita, Yu, et al. \"Power optimization considering the chip temperature of low power reconfigurable accelerator CMA-SOTB.\" 2015 Third International Symposium on Computing and Networking (CANDAR). IEEE, 2015.\n \"\"\"\n def __init__(self, sim_params):\n super().__init__(sim_params)\n self.coeff_vb = sim_params.getUserdata(\"delay_power_model\")[\"coeff_vb\"]\n self.coeff_vdd = sim_params.getUserdata(\"delay_power_model\")[\"coeff_vdd\"]\n self.coeff_tmp = sim_params.getUserdata(\"delay_power_model\")[\"coeff_tmp\"]\n self.leak0 = sim_params.getUserdata(\"delay_power_model\")[\"leak0\"]\n\n def leackage(self, bbv, p0, \\\n mult = lambda x, y: x * y, exp = lambda x : math.e ** x):\n return mult(exp(self.coeff_vb * bbv), p0)\n\n\nclass PowerEval(EvalBase):\n class DependencyError (Exception):\n pass\n\n def __init__(self):\n \"\"\"This evaluation must be carried out after MapWidthEval evaluation\n if you want mapping duplication.\n \"\"\"\n pass\n\n @staticmethod\n def eval(CGRA, app, sim_params, individual, **info):\n \"\"\"Return estimated power\n\n Args:\n CGRA (PEArrayModel): A model of the CGRA\n app (Application): An application to be optimized\n sim_params (SimParameters): parameters for some simulations\n individual (Individual): An individual to be evaluated\n Options:\n duplicate_enable (bool): True if you need the mapped data-flow\n to be duplicated horizontally.\n\n Returns:\n float: evaluated power\n\n Saved evaluation results:\n body_bias: optimized body bias voltage\n dynamic_power: dynamic power consumption\n leakage_power: leackage power consumption\n \"\"\"\n global isCpOpt, leakmodel, delaymodel\n if individual.isValid() == False:\n return PENALTY_COST\n\n # get body bias domain\n bb_domains = CGRA.getBBdomains()\n if len(bb_domains) != 0 and len(sim_params.bias_range) > 1:\n do_bb_opt = True\n else:\n do_bb_opt = False\n\n duplicate_enable = False\n if \"duplicate_enable\" in info.keys():\n if info[\"duplicate_enable\"] is True:\n duplicate_enable = True\n # chech dependency\n if individual.getEvaluatedData(\"map_width\") is None:\n raise PowerEval.DependencyError(\"PowerEval must be carried out after map width evaluation\")\n if \"convex_program\" in info.keys() and isCpOpt:\n # first setup\n if leakmodel is None:\n try:\n delaymodel = DelayModel(sim_params)\n leakmodel = LeakModel(sim_params)\n except KeyError as e:\n raise KeyError(\"Some parameters for delay, leak model such as {0} are missing\".format(e))\n eval_leak = PowerEval.eval_leak_cp\n else:\n isCpOpt = False\n eval_leak = PowerEval.eval_leak_ilp\n\n leak_power = eval_leak(CGRA, app, sim_params, individual, do_bb_opt)\n dyn_energy = PowerEval.eval_glitch(CGRA, app, sim_params, individual, duplicate_enable)\n\n # get dynamic energy of pipeline regs\n if CGRA.getPregNumber() > 0:\n dyn_energy += sim_params.preg_dynamic_energy * sum(individual.preg)\n\n dyn_power = sim_params.calc_power(app.getClockPeriod(sim_params.getTimeUnit()), \\\n dyn_energy)\n\n individual.saveEvaluatedData(\"dynamic_power\", dyn_power)\n individual.saveEvaluatedData(\"leakage_power\", leak_power)\n\n return dyn_power + leak_power\n\n\n @staticmethod\n def get_opcodes(CGRA, app, individual):\n \"\"\"Gets opcodes for each used ALU.\n\n Args:\n CGRA (PEArrayModel): A model of the CGRA\n app (Application): An application to be optimized\n individual (Individual): an individual\n Returns:\n Dict: opcodes of used ALUs\n keys (str): ALU name of routed graph\n values (str): opcode of the ALU\n \"\"\"\n mapping = individual.mapping\n graph = individual.routed_graph\n op_attr = nx.get_node_attributes(app.getCompSubGraph(), \"opcode\")\n opcodes = {CGRA.getNodeName(\"ALU\", pos): op_attr[op_label] \\\n if op_label in op_attr.keys() else \"CAT\" \\\n for op_label, pos in mapping.items()}\n # for routing ALU\n for alu, flag in nx.get_node_attributes(graph, \"route\").items():\n if flag:\n opcodes[alu] = CGRA.getRoutingOpcode(alu)\n return opcodes\n\n @staticmethod\n def eval_leak_cp(CGRA, app, sim_params, individual, leak_optimize,\n _round_method = None):\n \"\"\"Evaluates leackage power consumption.\n If necessary, it will optimize body bias voltage assignments\n by using convex optimization programming.\n\n Args:\n CGRA (PEArrayModel): A model of the CGRA\n app (Application): An application to be optimized\n sim_params (SimParameters): parameters for simulations\n individual (Individual): An individual to be evaluated\n leak_optimize (bool): True if you need body bias optimization.\n _round_method (Function): specifies the method for\n rounding the body bias voltages\n Returns:\n float: leakage power of whole PE array.\n \"\"\"\n if _round_method is None:\n round_method = PowerEval.round_bbv_greedy\n else:\n round_method = _round_method\n\n if leak_optimize:\n # obtain domains\n bb_domains = CGRA.getBBdomains()\n Ndom = len(bb_domains.keys())\n\n # mapping domain name to domain ID\n domkey2ID = {dom: i for dom, i in zip(bb_domains.keys(), range(Ndom))}\n\n # set zero bias leak\n Pleak = [0.0 for _ in range(Ndom)]\n for domain in bb_domains.keys():\n Pleak[domkey2ID[domain]] = \\\n leakmodel.leak0 * len(bb_domains[domain][\"ALU\"])\n\n # make delay table\n # keys: node name\n # values: coeff of alpha-power-model\n opcodes = PowerEval.get_opcodes(CGRA, app, individual)\n delay_table = {node: delaymodel.weight[opcode] \\\n for node, opcode in opcodes.items()}\n\n # make domain table\n # key: node name, value: domain ID\n domain_table = {}\n for node in individual.routed_graph.nodes():\n for domain in bb_domains.keys():\n if node in bb_domains[domain][\"ALU\"] or \\\n node in bb_domains[domain][\"SE\"]:\n domain_table[node] = domkey2ID[domain]\n\n # get maximum latency\n max_lat = app.getClockPeriod(sim_params.getTimeUnit())\n\n # obtain data path\n dpathes = DataPathAnalysis.get_data_path(CGRA, individual)\n path_count = len(dpathes)\n\n # make delay matrix\n # size (path count x # of domains)\n D = np.full((path_count, Ndom), 0.0)\n for i in range(path_count):\n dp = dpathes[i]\n for node in dp:\n D[i][domain_table[node]] += delay_table[node] \\\n if CGRA.isALU(node) else delaymodel.weight[\"SE\"]\n\n # constructs convex optimization problem\n Dreqvec = cp.Parameter(path_count, value = np.full(path_count, max_lat))\n if Ndom > 1:\n # problem variables (vector of bbv)\n bbv = cp.Variable(Ndom)\n # body bias effects\n effvec = delaymodel.delayScale(0.9, bbv)\n # delay and power table\n dtable = cp.Parameter((path_count, Ndom), value = D, nonneg=True)\n ptable = cp.Parameter(Ndom, value=Pleak, nonneg=True)\n # range of bbv\n lbound = cp.Parameter(Ndom, value = np.full(Ndom, leakmodel.bbv_min))\n ubound = cp.Parameter(Ndom, value = np.full(Ndom, leakmodel.bbv_max))\n constraints = [dtable @ effvec <= Dreqvec, \\\n bbv <= ubound, bbv >= lbound]\n else:\n # for single domain\n # problem variables (vector of bbv)\n bbv = cp.Variable()\n # body bias effects\n effvec = delaymodel.delayScale(0.9, bbv)\n # delay and power table\n dtable = cp.Parameter(path_count, value = D.reshape(path_count), \\\n nonneg=True)\n ptable = cp.Parameter(value=Pleak[0], nonneg=True)\n # range of bbv\n lbound = cp.Parameter(value=leakmodel.bbv_min)\n ubound = cp.Parameter(value=leakmodel.bbv_max)\n constraints = [dtable * effvec <= Dreqvec, \\\n bbv <= ubound, bbv >= lbound]\n\n power = leakmodel.leackage(bbv, ptable, mult=cp.multiply, \\\n exp=cp.exp)\n # create minimization problem\n prob = cp.Problem(cp.Minimize(cp.sum(power)), constraints)\n solve_fail = False\n\n # solve\n try:\n prob.solve(**cp_solver)\n except cp.SolverError as e:\n solve_fail = True\n\n # check status\n solve_fail |= prob.status in [\"infeasible\", \"unbounded\"]\n\n # get status\n if not solve_fail:\n # get optimal value\n leak_power = prob.value\n individual.saveEvaluatedData(\"before_round_leakage\", leak_power)\n # statistics\n stats = prob.solver_stats\n # print(\"solve time\", stats.solve_time)\n # print(\"iter\", stats.num_iters)\n\n # get optimal bbv assignment\n opt_bbv = [v for v in bbv.value] if Ndom > 1 else [bbv.value]\n # voltage rouding\n if delaymodel.need_quantize:\n opt_bbv = PowerEval.round_bbv(D, opt_bbv, max_lat)\n\n leak_power = 0.0\n for bbv, p in zip(opt_bbv, Pleak):\n leak_power += leakmodel.leackage(bbv, p)\n\n individual.saveEvaluatedData(\"body_bias\", \\\n {domain: opt_bbv[domkey2ID[domain]] \\\n for domain in bb_domains.keys()})\n\n else:\n # fails to solve\n leak_power = PENALTY_COST\n individual.saveEvaluatedData(\"body_bias\", {})\n individual.invalidate()\n\n else:\n width, height = CGRA.getSize()\n leak_power = leakmodel.leak0 * width * height\n\n return leak_power\n\n @staticmethod\n def round_bbv_greedy(delay_table, ptable, bbv_vec, max_lat):\n \"\"\"Rounds the body bias voltages to minimize the leakage\n while satisfying the timing constraints\n\n This rounding strategy is greedy.\n\n Args:\n delay_table (matrix): each row corresponds to each data path\n and each column corresponds to each domain\n ptable (list): zero bias leakage for each domain\n bbv_vec (list): optimal body bias voltage to be rounded\n max_lat (float): maximum path delay for timing constraint\n\n Returns:\n list: rounded body bias voltage for each domain\n \"\"\"\n bias = delaymodel.bias\n round_bbv_vec = []\n # floored voltages\n # key: domain ID\n # value: diff b/w rounded and original\n floored = {}\n Ndom = len(bbv_vec)\n print(delaymodel.quant_step)\n\n # firstly, all of voltages are floored\n for i in range(Ndom):\n v = bbv_vec[i]\n diff = (v * bias - delaymodel.bbv_min * bias)\n step = math.floor(diff / (delaymodel.quant_step * bias))\n rounded = step * delaymodel.quant_step + \\\n delaymodel.bbv_min\n floored[i] = v - rounded\n round_bbv_vec.append(rounded)\n\n floored_sorted = sorted(floored.items(), key=lambda x: -x[1])\n while True:\n effvec = delaymodel.delayScale(0.9, \\\n np.array(round_bbv_vec).reshape((Ndom, 1)))\n lat = max(np.matmul(delay_table, effvec))\n if lat < max_lat:\n # no timing violation\n return round_bbv_vec\n elif len(floored_sorted) == 0:\n raise RuntimeError(\"Fail in voltage rouding\")\n else:\n i, _ = floored_sorted[0]\n floored_sorted = floored_sorted[1:]\n round_bbv_vec[i] = round_bbv_vec[i] + \\\n delaymodel.quant_step\n\n @staticmethod\n def round_bbv_bb(delay_table, ptable, bbv_vec, max_lat):\n \"\"\"Rounds the body bias voltages to minimize the leakage\n while satisfying the timing constraints\n\n This rounding result is exact based on\n Branch-and-Bound algorithm\n\n Args:\n delay_table (matrix): each row corresponds to each data path\n and each column corresponds to each domain\n ptable (list): zero bias leakage for each domain\n bbv_vec (list): optimal body bias voltage to be rounded\n max_lat (float): maximum path delay for timing constraint\n\n Returns:\n list: rounded body bias voltage for each domain\n \"\"\"\n bias = delaymodel.bias\n floored_bbv_vec = []\n floored = {}\n Ndom = len(bbv_vec)\n v_step = delaymodel.quant_step\n # firstly, all of voltages are floored\n for i in range(Ndom):\n v = bbv_vec[i]\n diff = (v * bias - delaymodel.bbv_min * bias)\n step = math.floor(diff / (delaymodel.quant_step * bias))\n rounded = step * delaymodel.quant_step + \\\n delaymodel.bbv_min\n floored_bbv_vec.append(rounded)\n floored[i] = v - rounded\n\n floored_sorted = [k for k, _ in \\\n sorted(floored.items(), key=lambda x: -x[1])]\n\n # floored power\n floored_pleak_table = [leakmodel.leackage(v, p) for v, p in \\\n zip(floored_bbv_vec, ptable)]\n # ceiled power\n ceiled_pleak_table = [leakmodel.leackage(v + v_step, p) for v,p in \\\n zip(floored_bbv_vec, ptable)]\n\n # delay effect\n floored_effvec = delaymodel.delayScale(0.9, \\\n np.array(floored_bbv_vec).reshape((Ndom, 1)))\n ceiled_effvec = delaymodel.delayScale(0.9, \\\n np.array([v + v_step for v in floored_bbv_vec]).reshape((Ndom, 1)))\n\n\n # fixed dict:\n # key: dom ID\n # value: True if floored else False\n FLOORED = True\n CEILED = False\n seq_cnt = 0\n while not q.empty():\n # print(seq_cnt, q.qsize())\n prob = q.get()\n # print(\"after get\", seq_cnt, q.qsize())\n seq_cnt += 1\n target = prob[\"remains\"][0]\n remains = prob[\"remains\"][1:]\n # flooring\n fixed = dict(prob[\"fixed\"])\n fixed[target] = FLOORED\n # in the case of remains are ceiled\n floored_flag = [fixed[i] if i in fixed else CEILED \\\n for i in range(Ndom)]\n effvec = np.array([floored_effvec[i] if floored_flag[i] \\\n else ceiled_effvec[i] \\\n for i in range(Ndom)]).reshape((Ndom, 1))\n lat = max(np.matmul(delay_table, effvec))\n if lat < max_lat:\n pleak = sum([floored_pleak_table[i] if floored_flag[i] \\\n else ceiled_pleak_table[i] \\\n for i in range(Ndom)])\n subprob = {\"fixed\": fixed, \"remains\": remains}\n if len(remains) > 0:\n q.put(subprob)\n if pleak < leak_ub:\n leak_ub = pleak\n else:\n if pleak <= leak_ub:\n leak_ub = pleak\n final = subprob\n continue\n\n\n # ceiling\n fixed = dict(prob[\"fixed\"])\n fixed[target] = CEILED\n # in the case of remains are floored\n # assuming the best case about leakage\n floored_flag = [fixed[i] if i in fixed else FLOORED \\\n for i in range(Ndom)]\n pleak = sum([floored_pleak_table[i] if floored_flag[i] \\\n else ceiled_pleak_table[i] \\\n for i in range(Ndom)])\n if pleak <= leak_ub:\n subprob = {\"fixed\": fixed, \"remains\": remains}\n if len(remains) > 0:\n q.put(subprob)\n else:\n final = subprob\n\n return [floored_bbv_vec[i] if final[\"fixed\"][i] else \\\n floored_bbv_vec[i] + v_step for i in range(Ndom)]\n\n @staticmethod\n def round_bbv_ilp(delay_table, ptable, bbv_vec, max_lat):\n \"\"\"Rounds the body bias voltages to minimize the leakage\n while satisfying the timing constraints\n\n This rounding result is exact based on ILP\n\n Args:\n delay_table (matrix): each row corresponds to each data path\n and each column corresponds to each domain\n ptable (list): zero bias leakage for each domain\n bbv_vec (list): optimal body bias voltage to be rounded\n max_lat (float): maximum path delay for timing constraint\n\n Returns:\n list: rounded body bias voltage for each domain\n \"\"\"\n bias = delaymodel.bias\n floored_bbv_vec = []\n Ndom = len(bbv_vec)\n v_step = delaymodel.quant_step\n # firstly, all of voltages are floored\n for i in range(Ndom):\n v = bbv_vec[i]\n diff = (v * bias - delaymodel.bbv_min * bias)\n step = math.floor(diff / (delaymodel.quant_step * bias))\n rounded = step * delaymodel.quant_step + \\\n delaymodel.bbv_min\n floored_bbv_vec.append(rounded)\n\n # for i in range(Ndom):\n # print(i, \"{0:2.2f}~{1:2.2f}\".format(floored_bbv_vec[i],\\\n # floored_bbv_vec[i] + v_step))\n\n # floored power\n floored_pleak_table = [leakmodel.leackage(v, p) for v, p in \\\n zip(floored_bbv_vec, ptable)]\n # ceiled power\n ceiled_pleak_table = [leakmodel.leackage(v + v_step, p) for v,p in \\\n zip(floored_bbv_vec, ptable)]\n\n # delay effect\n floored_effvec = delaymodel.delayScale(0.9, \\\n np.array(floored_bbv_vec).reshape((Ndom, 1)))\n ceiled_effvec = delaymodel.delayScale(0.9, \\\n np.array([v + v_step for v in floored_bbv_vec]).reshape((Ndom, 1)))\n\n problem = pulp.LpProblem()\n\n isFloored = pulp.LpVariable.dicts(\"isFloored\", range(Ndom), \\\n 0, 1, cat = \"Binary\")\n isCeiled = pulp.LpVariable.dicts(\"isCeiled\", range(Ndom), \\\n 0, 1, cat = \"Binary\")\n\n # problem definition\n problem += pulp.lpSum([isFloored[i] * floored_pleak_table[i] + \\\n isCeiled[i] * ceiled_pleak_table[i] \\\n for i in range(Ndom)])\n\n # Constraints\n # 1. exclusivity for ceiling or flooring\n for i in range(Ndom):\n problem += (isFloored[i] + isCeiled[i]) == 1\n\n # 2. no timing violation\n for dp in delay_table:\n problem += pulp.lpSum([dp[i] * ( floored_effvec[i] * isFloored[i]\\\n + ceiled_effvec[i] * isCeiled[i]) \\\n for i in range(Ndom)]) <= max_lat\n\n # solve ILP\n stat = problem.solve(ilp_solver)\n result = problem.objective.value()\n leak = pulp.value(problem.objective)\n if pulp.LpStatus[stat] == \"Optimal\" and result != None:\n rounded_bbv = []\n for i in range(Ndom):\n if round(isFloored[i].value()) == 1:\n rounded_bbv.append(floored_bbv_vec[i])\n else:\n rounded_bbv.append(floored_bbv_vec[i] + v_step)\n\n return rounded_bbv\n\n\n\n @staticmethod\n def eval_leak_ilp(CGRA, app, sim_params, individual, leak_optimize):\n \"\"\"Evaluates leackage power consumption.\n If necessary, it will optimize body bias voltage assignments\n by using integer linear programming.\n\n Args:\n CGRA (PEArrayModel): A model of the CGRA\n app (Application): An application to be optimized\n sim_params (SimParameters): parameters for simulations\n individual (Individual): An individual to be evaluated\n leak_optimize (bool): True if you need body bias optimization.\n Returns:\n float: leakage power of whole PE array.\n \"\"\"\n\n if leak_optimize:\n bb_domains = CGRA.getBBdomains()\n # Probrem Declaration\n problem = pulp.LpProblem()\n\n # Variable Declaration\n # 1st key: body bias domain\n # 2nd key: body bias voltage\n isBBV = pulp.LpVariable.dicts(\"isBBV\", (bb_domains.keys(), sim_params.bias_range),\\\n 0, 1, cat = \"Binary\")\n\n # Problem definition\n problem += pulp.lpSum([isBBV[domain][bbv] * sim_params.PE_leak[bbv] * len(bb_domains[domain][\"ALU\"]) \\\n for domain in bb_domains.keys() \\\n for bbv in sim_params.bias_range])\n\n # Constraints\n # 1. Body Bias Voltage Exclusivity\n for domain in bb_domains.keys():\n problem += pulp.lpSum(isBBV[domain][bbv] for bbv in sim_params.bias_range) == 1\n\n # 2. Latancy Satisfaction\n # make delay table\n opcodes = PowerEval.get_opcodes(CGRA, app, individual)\n delay_table = {node: sim_params.delay_info[opcode]\n for node, opcode in opcodes.items()}\n\n # make domain table\n # key: node name, value: domain name\n domain_table = {}\n for node in individual.routed_graph.nodes():\n for domain in bb_domains.keys():\n if node in bb_domains[domain][\"ALU\"] or \\\n node in bb_domains[domain][\"SE\"]:\n domain_table[node] = domain\n\n # get maximum latency\n max_lat = app.getClockPeriod(sim_params.getTimeUnit())\n\n # add constrain for each data path\n for dp in DataPathAnalysis.get_data_path(CGRA, individual):\n problem += pulp.lpSum([(delay_table[node][bbv] if CGRA.isALU(node) \\\n else sim_params.delay_info[\"SE\"][bbv]) \\\n * isBBV[domain_table[node]][bbv] \\\n for node in dp\\\n for bbv in sim_params.bias_range]) <= max_lat\n\n # solve this ILP\n # start = time.time()\n stat = problem.solve(ilp_solver)\n # end = time.time()\n # print(end - start, \"sec\")\n result = problem.objective.value()\n leak_power = pulp.value(problem.objective)\n\n # check result\n bbv_assign = {}\n if pulp.LpStatus[stat] == \"Optimal\" and result != None:\n # success\n for domain in bb_domains.keys():\n for bbv in sim_params.bias_range:\n if round(isBBV[domain][bbv].value()) == 1:\n bbv_assign[domain] = bbv\n individual.saveEvaluatedData(\"body_bias\", bbv_assign)\n else:\n individual.saveEvaluatedData(\"body_bias\", {})\n individual.invalidate()\n else:\n PE_leak = sim_params.PE_leak[0]\n width, height = CGRA.getSize()\n leak_power = PE_leak * width * height\n\n if CGRA.getPregNumber() != 0:\n leak_power += sim_params.preg_leak * CGRA.getPregNumber()\n\n return leak_power\n\n @staticmethod\n def eval_glitch(CGRA, app, sim_params, individual, duplicate_enable = False):\n \"\"\"Evaluates dynamic energy consumption of the PE array considering glitch effects.\n\n Args:\n CGRA (PEArrayModel): A model of the CGRA\n app (Application): An application to be optimized\n sim_params (SimParameters): parameters for simulations\n individual (Individual): An individual to be evaluated\n duplicate_enable (bool): True if you need the mapped data-flow\n to be duplicated horizontally.\n Returns:\n float: evaluated energy consumption.\n Note that the value does not contain pipeline register &\n clock tree energy.\n \"\"\"\n graph = copy.deepcopy(individual.routed_graph)\n graph.add_node(\"root\")\n nx.set_node_attributes(graph, 0, \"switching\")\n nx.set_node_attributes(graph, 0, \"len\")\n opcodes = PowerEval.get_opcodes(CGRA, app, individual)\n\n if CGRA.getPregNumber() != 0:\n stage_domains = CGRA.getStageDomains(individual.preg)\n nx.set_node_attributes(graph, -1, \"stage\")\n for v in graph.nodes():\n graph.node[v][\"stage\"] = PowerEval.__getStageIndex(stage_domains, v)\n else:\n nx.set_node_attributes(graph, -1, \"stage\")\n\n for i_port in set(individual.routed_graph.nodes()) & set(CGRA.getInputPorts()):\n graph.add_edge(\"root\", i_port)\n\n # analyze distance from pipeline register\n for u, v in nx.bfs_edges(graph, \"root\"):\n if CGRA.isALU(v) or CGRA.isSE(v):\n if graph.node[u][\"stage\"] == graph.node[v][\"stage\"] and\\\n graph.node[u][\"len\"] + 1 > graph.node[v][\"len\"]:\n graph.node[v][\"len\"] = graph.node[u][\"len\"] + 1\n\n # evaluate glitch propagation\n traversed_list = []\n for u, v in nx.bfs_edges(graph, \"root\"):\n if v in traversed_list:\n continue\n else:\n traversed_list.append(v)\n\n\n if CGRA.isALU(v):\n graph.node[v][\"switching\"] = sim_params.switching_info[opcodes[v]]\n # propagation part\n if graph.node[v][\"len\"] > 0:\n prev_sw = max([graph.node[prev][\"switching\"] for prev in graph.predecessors(v)])\n graph.node[v][\"switching\"] += sim_params.switching_propagation * \\\n (sim_params.switching_decay ** graph.node[v][\"len\"]) * \\\n prev_sw\n\n elif CGRA.isSE(v):\n prev_sws = [graph.node[prev][\"switching\"] for prev in graph.predecessors(v)]\n prev_sws.append(MIN_SW)\n graph.node[v][\"switching\"] = max(prev_sws) * sim_params.se_weight\n\n\n S_total = sum(nx.get_node_attributes(graph, \"switching\").values())\n \n if duplicate_enable:\n width, __ = CGRA.getSize()\n S_total *= width // individual.getEvaluatedData(\"map_width\")\n\n del graph\n\n return S_total * sim_params.switching_energy\n\n @staticmethod\n def __getStageIndex(stage_domains, node):\n for stage in range(len(stage_domains)):\n if node in stage_domains[stage]:\n break\n else:\n stage = -1\n\n return stage\n\n\n @staticmethod\n def isMinimize():\n return True\n\n @staticmethod\n def name():\n return \"Power_Consumption\"\n","repo_name":"hal-lab-u-tokyo/GenMap","sub_path":"PowerEval.py","file_name":"PowerEval.py","file_ext":"py","file_size_in_byte":30916,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"8494744585","text":"# Group Members\n# Jenalea Rajab: 562262\n# Amy Pegram: 1825142\n# Claudio Surmon: 1830290\n# Rushil Daya: 1830490\n\n# The following tutorials/public gits were used for the implementation of this Assignment:\n# References\n# [1] A. Paszke, “Reinforcement learning (dqn) tutorial.” [Online]. Available: https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html\n# [2] J. TORRES.AI. Deep q-network (dqn)-i. [Online]. Available: https://towardsdatascience.com/deep-q-network-dqn-i-bce08bdf2af\n# [3] K. Tessera. Dqn atari. [Online]. Available: https://github.com/KaleabTessera/DQN-Atari\n# [4] https://github.com/Pieter-Cawood/Reinforcement-Learning/blob/master/NLE_DQN/Agent.py\n# [5] The skeleton code for the COMS4047A/COMS7053A Lab 3 - Deep Q-Network\n\nimport random\nfrom minihack import reward_manager\nimport numpy as np\nimport gym\nimport torch\n\nfrom dqn.agent import DQNAgent\nfrom dqn.replay_buffer import ReplayBuffer\nimport time\nimport minihack \nfrom nle import nethack\nimport copy\nimport skimage.io as io\nfrom minihack import RewardManager\nimport random\nimport os\n\nif __name__ == \"__main__\":\n frame = 0\n hyper_params = {\n \"seed\": random.randint(0,1000), # which seed to use\n \"env\": \"MiniHack-Quest-Hard-v0\", # name of the game\n \"replay-buffer-size\": int(2e3), # replay buffer size\n \"learning-rate\": 0.99, # learning rate for RMSprop optimizer\n \"discount-factor\": 0.99, # discount factor\n \"num-steps\": int(1e6), # total number of steps to run the environment for\n \"batch-size\": 32, # number of transitions to optimize at the same time\n \"learning-starts\": 10000, # number of steps before learning starts\n \"learning-freq\": 5, # number of iterations between every optimization step\n \"use-double-dqn\": True, # use double deep Q-learning\n \"target-update-freq\": 1000, # number of iterations between every target network update\n \"eps-start\": 1.0, # e-greedy start threshold\n \"eps-end\": 0.1, # e-greedy end threshold\n \"eps-fraction\": 0.1, # fraction of num-steps\n \"print-freq\": 10,\n }\n #Actions used by the agent to navigate the world\n MOVE_ACTIONS = tuple(nethack.CompassDirection)\n\n #More actions the action can do\n NAVIGATE_ACTIONS = MOVE_ACTIONS + (\n nethack.Command.PICKUP, #the agent can pickup items that are then stored in the inventory\n nethack.Command.INVENTORY, #an agent can use items in its inventory, such as leviation boots to cross the lava pool\n nethack.Command.LOOK, #an agent can look what is here\n nethack.Command.OPEN, # an agent can open a door\n ) \n\n #Function used to simplify and normalize the observation space (based on code from [4])\n def format_observations(observation):\n # - and | The walls of a room, or an open door. Or a grave (|).\n # . The floor of a room, ice, or a doorless doorway.\n # # A corridor, or iron bars, or a tree, or possibly a kitchen sink (if your dungeon has\n # sinks), or a drawbridge.\n # > Stairs down: a way to the next level.\n # < Stairs up: a way to the previous level.\n # + A closed door, or a spellbook containing a spell you may be able to learn.\n # @ Your character or a human.\n # $ A pile of gold.\n # ^ A trap (once you have detected it).\n # ) A weapon.\n # [ A suit or piece of armor.\n # % Something edible (not necessarily healthy).\n # ? A scroll.\n # / A wand.\n # = A ring.\n # ! A potion.\n # ( A useful item (pick-axe, key, lamp . . . ).\n # \" An amulet or a spider web.\n # * A gem or rock (possibly valuable, possibly worthless).\n # ` A boulder or statue.\n # 0 An iron ball.\n # _ An altar, or an iron chain.\n # { A fountain.\n # } A pool of water or moat or a pool of lava.\n # \\ An opulent throne\n # I This marks the last known location of an invisible or otherwise unseen monster. Note that the monster could have moved\n\n #translate all the text characters to ASCII\n walls = ord('-')\n doors = ord('|')\n closed_door = ord('+')\n corridor = ord('#')\n lava = ord('}')\n monster = ord('I')\n demon = ord('&')\n\n #create a copy of the observation space\n copy_obs = observation['chars_crop']\n\n #create numpy array to represent our observation space\n obs_chars = np.zeros(copy_obs.shape) #set everything to 0\n obs_chars[np.where((copy_obs == lava) & (copy_obs == monster) & (copy_obs == demon))] = 0.2 #set hostile objects to 0.2\n obs_chars[np.where((copy_obs == walls) | (copy_obs == doors) | copy_obs == closed_door | (copy_obs == corridor))] = 0.5 #set environment to 0.5\n\n return obs_chars\n\n #get the seed from the given hyperparameters\n np.random.seed(hyper_params[\"seed\"])\n random.seed(hyper_params[\"seed\"])\n\n #Change the built in reward manager to include a reward for opening a door and killing a demon\n reward_gen = RewardManager()\n reward_gen.add_location_event(\"door\", reward=0.2)\n reward_gen.add_kill_event(\"demon\", reward=0.2)\n\n #create the env, with glyphs_crop and not glyphs so that observation space will be 9x9\n env = gym.make(hyper_params[\"env\"], observation_keys=(\"glyphs_crop\", \"chars_crop\", \"colors\", \"pixel\", \"blstats\"), actions = NAVIGATE_ACTIONS, reward_manager = reward_gen)\n env.seed(hyper_params[\"seed\"]) #environment is created with the random seed\n action_space = env.action_space\n\n replay_buffer = ReplayBuffer(hyper_params[\"replay-buffer-size\"])\n\n agent = DQNAgent(env.observation_space[\"glyphs_crop\"], env.action_space, replay_buffer,\n hyper_params[\"learning-rate\"],\n hyper_params[\"batch-size\"],\n hyper_params[\"discount-factor\"])\n\n eps_timesteps = hyper_params[\"eps-fraction\"] * float(hyper_params[\"num-steps\"])\n episode_rewards = [0.0]\n episode_loss = []\n\n state = env.reset() #reset the state space\n glyphs = format_observations(state) #format the observation space for normalization\n\n # Episode loop taken from the Lab [5]\n for t in range(hyper_params[\"num-steps\"]):\n fraction = min(1.0, float(t) / eps_timesteps) \n eps_threshold = hyper_params[\"eps-start\"] + fraction * (\n hyper_params[\"eps-end\"] - hyper_params[\"eps-start\"]\n )\n sample = random.random()\n\n if sample <= eps_threshold:\n action = env.action_space.sample()\n else:\n action = agent.act(glyphs)\n\n # Take step in env\n next_state, reward, done, _ = env.step(action)\n \n # Add state, action, reward, next_state, float(done) to reply memory - cast done to float\n done = float(done)\n\n glyph_next_state = format_observations(next_state) # format observation before adding to the replay buffer\n agent.replay_buffer.add(glyphs,action,reward,glyph_next_state,done)\n \n # Update the state\n state = next_state\n\n # Add reward to episode_reward\n episode_rewards[-1] += reward\n\n if done:\n state = env.reset()\n episode_rewards.append(0.0)\n\n #if the program has run for a certain amount of steps, the neural will start learning using the replay buffer\n if (t > hyper_params[\"learning-starts\"] and t % hyper_params[\"learning-freq\"] == 0): \n episode_loss.append(agent.optimise_td_loss()) #parameters are updated every 5 steps\n\n if (t > hyper_params[\"learning-starts\"] and t % hyper_params[\"target-update-freq\"] == 0):\n agent.update_target_network() #update the target network every 1000 steps\n\n num_episodes = len(episode_rewards)\n #Use the pixel parameter to create a video for our agent at the last step of the epsiode\n if num_episodes > 900 and num_episodes <= 901:\n if not os.path.isdir(f\"video_\" + str(hyper_params[\"seed\"])):\n os.mkdir(f\"video_\" + str(hyper_params[\"seed\"]))\n\n io.imsave(f\"video_\"+ str(hyper_params[\"seed\"]) +f\"/frame_{frame}.png\",next_state[\"pixel\"])\n frame += 1\n \n if (done and hyper_params[\"print-freq\"] is not None and len(episode_rewards) % hyper_params[\"print-freq\"] == 0):\n #Save the reward and the loss\n np.savetxt('rewards_'+ str(hyper_params[\"seed\"]) +'.csv', episode_rewards, delimiter=',', fmt='%1.5f')\n np.savetxt('loss_'+ str(hyper_params[\"seed\"]) +'.csv', episode_loss,delimiter=',', fmt='%1.5f')\n\n mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)\n print(\"********************************************************\")\n print(\"steps: {}\".format(t))\n print(\"episodes: {}\".format(num_episodes))\n print(\"mean 100 episode reward: {}\".format(mean_100ep_reward))\n print(\"% time spent exploring: {}\".format(int(100 * eps_threshold)))\n print(\"********************************************************\")\n torch.save(agent.policy_network, 'model'+ str(hyper_params[\"seed\"]) +'.pt')\n","repo_name":"ClaudzTheEngineer/COMS4047_Assignment","sub_path":"train_dqn.py","file_name":"train_dqn.py","file_ext":"py","file_size_in_byte":9167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15875214195","text":"import sys\nimport urllib.request\n# from time import sleep\n\nif __name__ == \"__main__\":\n url_list = sys.argv\n for url in url_list:\n try:\n fp = urllib.request.urlopen(url)\n mybytes = fp.read()\n html_output = mybytes.decode(\"utf8\")\n fp.close()\n except:\n html_output = \"\"\n\n print(\"### URL source ######################################################### \\n\\n\")\n print(html_output)\n\n# if __name__ == \"__main__\":\n# num = int(sys.argv[1])\n# for i in range(num):\n# print (str(i)+\" \"+str(i*i*i))\n# sleep(1)\n","repo_name":"bakhmat/itc2020_docker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21226971886","text":"#Method 1:\n\nfib=[0,1]\ni=1\nwhile fib[i-1]+fib[i]<=100:\n fib.append(fib[i-1]+fib[i])\n i += 1\nprint(fib)\n\n#Method 2:\nfibonacci=[]\nfor i in range(100):\n\tif i+1 <= 2: \t\t\t#\tsetting first 2 items \n\t\tfibonacci.append(i)\n\telse:\t\t\t\t\t\t#\tgenerate next item with rule\n\t\tfibonacci.append(fibonacci[i-1]+fibonacci[i-2])\n\t\n\tif fibonacci[i]>= 100:\t\t#\t > 100 ? stop\n\t\tfibonacci.pop(i)\n\t\tbreak\nprint(fibonacci)\n","repo_name":"magedus/python-11","sub_path":"zhangruijie/Week02/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"26494981675","text":"# -*- coding: utf-8 -*-\n'''\nSphinx/docutils extension to create links to Wikipedia articles.\n\n :wikipedia:`Sphinx`\n\n :wikipedia:`mythical creature `\n\n :wikipedia:`:zh:斯芬克斯`\n\n :wikipedia:`Answer to the Ultimate Question of Life, the Universe, and Everything <:de:42 (Antwort)>`\n\n'''\n\nimport re\nimport urllib.request, urllib.parse, urllib.error\nfrom docutils import nodes, utils\nfrom sphinx.util.nodes import split_explicit_title\n\nbase_url = 'http://%s.wikipedia.org/wiki/'\ndef make_wikipedia_link(name, rawtext, text, lineno, inliner,\n options={}, content=[]):\n env = inliner.document.settings.env\n lang = env.config.wikipedia_lang\n\n text = utils.unescape(text)\n has_explicit, title, target = split_explicit_title(text)\n \n m = re.match(r'\\:(.*?)\\:(.*)', target)\n if m:\n lang, target = m.groups()\n if not has_explicit:\n title = target\n ref = base_url % lang + urllib.parse.quote(target.replace(' ', '_').encode('utf8'), safe='')\n\n node = nodes.reference(rawtext, title, refuri=ref, **options)\n return [node],[]\n\ndef setup(app):\n app.add_config_value('wikipedia_lang', \n 'en', \n 'env')\n app.add_role('wikipedia', make_wikipedia_link)\n","repo_name":"netxms/netxms-doc","sub_path":"_lib/wikipedia.py","file_name":"wikipedia.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"26730803346","text":"from datetime import datetime, timedelta\nfrom ciso8601 import parse_datetime\nimport numpy as np\n\nimport pandas as pd\nfrom datetime import datetime\nimport statistics as s\n\nimport sys\nsys.path.append(\"..\")\nfrom data_processing import Preprocessor\n\niv_sec = {'1m':60, '3m':60*3, '5m':60*5, '15m':60*15, '30m':60*30, '1h':60*60, '2h':60*60*2, '4h':60*60*4, '8h':60*60*8, '1d':60*60*24, '3d':60*60*24*3, '1w':60*60*24*7}\n\nclass Account():\n def __init__(self, leverage, order_size, pyramid_max, FEE, liq_bump, start_balance, MODEL, secondary_model=None):\n self.ORDER_SIZE = order_size\n self.pyramid_max = pyramid_max\n self.awaiting_orders = []\n\n self.MODEL = MODEL\n self.leverage = leverage\n self.FEE = FEE\n self.liq_bump = liq_bump\n\n self.start_balance = start_balance\n self.balance = start_balance\n self.position = 0\n self.pos_fee = 0\n self.margin = 0\n self.entry_price = 0\n self.time = 0 \n self.PNL = 0\n\n self.trade = s.Trade()\n self.pyramid = 0\n self.pyramid_awaiting = 0\n\n\n preprocessor = Preprocessor()\n\n self.klines = preprocessor.klines_load()\n preprocessor.repreprocess(self.MODEL)\n\n self.symbol = preprocessor.SYMBOL\n self.interval = preprocessor.INTERVAL\n self.k_i = preprocessor.PAST_SEQ_LEN + 1\n self.kline_limit = preprocessor.PAST_SEQ_LEN + 1\n\n self.pred_df = preprocessor.pred_df\n\n if secondary_model:\n self.secondary_model = secondary_model\n preprocessor_2 = Preprocessor(klines='futures')\n preprocessor_2.repreprocess(secondary_model, do_not_use_ready=True)\n self.pred_df_2 = preprocessor_2.pred_df\n else:\n self.secondary_model= None\n\n #self.klines = self.klines.reset_index()\n #self.klines = self.klines.drop(columns=[\"index\"])\n\n print(f\"Loaded klines starting from {datetime.fromtimestamp(int(self.klines.values[0][0])/1000)}\"\n + f\" ending on {datetime.fromtimestamp(int(self.klines.values[-1][0]/1000))}\")\n\n def tick(self):\n if self.k_i+1==len(self.klines.index):\n print(f\"KONIEC, roi: {round(self.balance/self.start_balance*100-100,2)}%\")\n return pd.DataFrame(), None\n else: \n self.k_i+=1\n\n if self.klines.index[self.k_i-1]+1 != self.klines.index[self.k_i]:\n print(\"jump\")\n self.close(\"SHORT\", cancel_awaiting_orders=True)\n self.close(\"LONG\", cancel_awaiting_orders=True) \n self.k_i+=self.kline_limit\n\n candles = self.klines[self.k_i-1:self.k_i] # kiedys bylo self.klines[self.k_i-self.kline_limit:self.k_i]\n\n self.price = candles.values[-1][4]\n self.time = candles.values[-1][0]\n\n high = candles.values[-1][2]\n low = candles.values[-1][3]\n\n for i, (side, price) in enumerate(self.awaiting_orders[::-1]):\n if side == 'LONG':\n if lowprice:\n self.pyramid_awaiting-=1\n self.awaiting_orders.pop()\n self.open(side, price)\n else:\n \"Cos nie tak z arumentem side\"\n self.pyramid_awaiting-=1\n self.awaiting_orders.pop() \n\n #calculating pnl and checking for liq\n self.PNL = self.position*(self.price - self.entry_price)\n\n if self.entry_price != 0: \n self.change = self.price/self.entry_price - 1\n else:\n self.change = 0\n\n\n if self.position>0: #LONG\n PNL_pct_high = (self.position*(high - self.entry_price) - (self.pos_fee + self.FEE*self.position*high))/self.margin\n PNL_pct_low = (self.position*(low - self.entry_price) - (self.pos_fee + self.FEE*self.position*low))/self.margin\n\n if PNL_pct_low<-1+self.liq_bump: #nie jest dokladne bo wlicza juz fees a normalnie to by bylo osobne\n self.liq()\n\n self.trade.update_pnl(PNL_pct_high, PNL_pct_low)\n\n elif self.position<0: #SHORT\n PNL_pct_low = (self.position*(high - self.entry_price) - (self.pos_fee + self.FEE*self.position*high))/self.margin\n PNL_pct_high = (self.position*(low - self.entry_price) - (self.pos_fee + self.FEE*self.position*low))/self.margin\n\n if PNL_pct_low<-1+self.liq_bump:\n self.liq()\n\n self.trade.update_pnl(PNL_pct_high, PNL_pct_low)\n\n pred = self.pred_df['preds'][self.klines.index[self.k_i-1]]\n if self.secondary_model:\n pred_2 = self.pred_df_2['preds'][self.klines.index[self.k_i-1]]\n return candles, pred, pred_2\n return candles, pred\n\n def close(self, side, cancel_awaiting_orders=True):\n if self.position<0 and side == \"SHORT\" or self.position>0 and side == \"LONG\":\n #print(\"Closing\", side)\n self.pos_fee+=abs(self.FEE*self.position*self.price)\n self.balance+=self.PNL - self.pos_fee\n self.trade.close(self.price, self.time, (self.PNL - self.pos_fee)/self.margin)\n\n self.position = 0\n self.entry_price = 0\n self.margin = 0\n self.pos_fee = 0\n self.PNL = 0\n self.pyramid = 0\n else:\n #print(f'No {side} to close')\n pass\n\n if cancel_awaiting_orders==True:\n for i, (awaiting_side, price) in enumerate(self.awaiting_orders[::-1]):\n if awaiting_side == side:\n self.awaiting_orders.pop(-i)\n self.pyramid_awaiting-=1\n\n\n\n def create_order(self, side, price=None):\n if self.pyramid+self.pyramid_awaiting>=self.pyramid_max:\n #print(f\"Would be {side} but max pyramid ({self.pyramid_max})\")\n return 0\n\n if price:\n self.awaiting_orders.append((side, price))\n self.pyramid_awaiting+=1\n else:\n self.open(side, self.price)\n\n\n def open(self, side, price): #Nie ma przebijania, nie ma zmniejszania poz, tylko nowa pozycja lub dokladam\n if self.pyramid+self.pyramid_awaiting>=self.pyramid_max:\n #print(f\"Would be {side} but max pyramid ({self.pyramid_max})\")\n return 0\n\n quantity = self.order_size(side)\n\n if self.position==0:\n margin = abs(self.position + quantity)*price/self.leverage\n entry = price\n elif self.position*quantity>0:\n margin = self.margin + abs(quantity*price/self.leverage)\n entry = (self.position*self.entry_price + quantity*price)/(self.position+quantity)\n else:\n print(\"Nowa pozycja albo dokladka, nic innego nie przewiduje\")\n\n if margin <= self.balance:\n self.position+=quantity\n self.margin = margin\n self.pos_fee += abs(self.FEE*quantity*price)\n self.entry_price = entry\n self.pyramid+=1\n\n if self.position>0:\n liq = (self.entry_price)*(1-1/self.leverage+self.liq_bump)\n elif self.position<0:\n liq = (self.entry_price)*(1+1/self.leverage+self.liq_bump)\n\n if self.pyramid==1:\n self.trade.open(side=side)\n self.trade.add(price, self.time, amount=abs(quantity)*price/(self.leverage*self.balance), liq=liq)\n\n else:\n print(\"Za duzy order\")\n\n def liq(self):\n print(\"Liq\")\n self.position = 0\n self.entry_price = 0\n self.balance -= self.margin\n self.margin = 0\n self.pos_fee = 0\n self.PNL = 0\n self.pyramid = 0\n\n self.trade.liquidate(self.time) \n\n def order_size(self, side):\n if side == \"LONG\":\n return self.ORDER_SIZE*(self.balance-self.margin)*self.leverage/self.price\n elif side == \"SHORT\":\n return -self.ORDER_SIZE*(self.balance-self.margin)*self.leverage/self.price\n else:\n print(\"order wrong side\")\n\n def additional_interval(self, interval):\n d = f\"RAW_DATA/Binance_{self.symbol}USDT_{interval}.json\"\n self.klines_additional = pd.read_json(d)\n self.klines_additional = self.klines_additional.set_index(0)\n\n def get_additional_price(self, interval):\n time=int(self.time - 1000*iv_sec[interval] - self.time%(1000*iv_sec[interval]))\n try:\n to_return = self.klines_additional[4].loc[time-100:time]\n except:\n #print(\"Nie mozna znalezc dodatkowej swiecy dziennej\")\n return self.price\n return to_return\n\n def print_details(self):\n print(\"-----------------\")\n print(\"time : \", datetime.fromtimestamp(self.time/1000))\n print(\"balance : \", self.balance)\n print(\"margin : \", self.margin)\n print(\"position : \", self.position)\n print(\"pos_fee : \", self.pos_fee)\n print(\"entry_price : \", self.entry_price)\n print(\"pnl : \", self.PNL)\n print(\"pyramid : \", self.pyramid)\n print(\"Awaiting_orders: \")\n for side, price in self.awaiting_orders:\n print(side, price)\n\n def get_max_pnl(self):\n return self.trade.pnl_max\n \n\n\n\n\n","repo_name":"wojtke/crypto-algorithmic-trading","sub_path":"backtesting/heart.py","file_name":"heart.py","file_ext":"py","file_size_in_byte":9402,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23282554218","text":"import time\nimport random\nimport logging\n\nlogging.basicConfig(level=logging.ERROR)\n\nseed = 0.5\n\n\ndef randomize(seed):\n random_number = random.random()\n\n if random_number >= seed:\n cell_state = 0\n else:\n cell_state = 1\n\n return cell_state\n\n\ndef dead_state(width, height):\n state = []\n\n for _ in range(width):\n row = []\n for _ in range(height):\n row.append(0)\n \n state.append(row)\n\n return state\n\n\ndef get_state_width(state):\n return len(state)\n\n\ndef get_state_height(state):\n return len(state[0])\n\n\ndef random_state(width, height):\n state = dead_state(width, height)\n\n for item in state:\n for index in range(len(item)):\n item[index] = randomize(seed)\n\n return state\n\n\ndef render_state(state):\n state_height = get_state_height(state)\n\n fence = '-' * (state_height + 2)\n print(fence)\n\n for row in state:\n pretty_row = '|'\n\n for item in row:\n if item == 0:\n symbol = ' '\n else:\n symbol = u\"\\u2588\"\n pretty_row += symbol\n\n pretty_row += '|'\n \n print(pretty_row)\n\n print(fence)\n\n\ndef update_cell_status(cell, state):\n state_width = get_state_width(state)\n state_height = get_state_height(state)\n\n cell_row = cell.get('row')\n cell_col = cell.get('col')\n cell_status = cell.get('status')\n\n logging.debug(f\"cell coords: {cell_row},{cell_col} | status: {cell_status}\")\n\n live_neighbors = 0\n\n for neighbor_row in range(cell_row - 1, cell_row + 2):\n if neighbor_row >= 0 and neighbor_row < state_width:\n for neighbor_col in range(cell_col - 1, cell_col + 2):\n if neighbor_col >= 0 and neighbor_col < state_height:\n if neighbor_row == cell_row and neighbor_col == cell_col:\n continue\n else:\n logging.debug(f\"neighbor coords: {neighbor_row},{neighbor_col} | status: {state[neighbor_row][neighbor_col]}\")\n if state[neighbor_row][neighbor_col] == 1:\n live_neighbors += 1\n\n if cell_status == 1:\n if live_neighbors <= 1:\n logging.debug(\"live cell dies because of underpopulation\")\n cell_status = 0\n\n if live_neighbors >= 2 and live_neighbors <= 3:\n logging.debug(\"live cell lives because of right population\")\n cell_status = 1\n\n if live_neighbors > 3:\n logging.debug(\"live cell dies because of overpopulation\")\n cell_status = 0\n\n else:\n if live_neighbors == 3:\n logging.debug(\"dead cell alives because of population\")\n cell_status = 1\n\n else:\n logging.debug(\"dead cell stays dead\")\n cell_status = 0\n\n logging.debug(\"updating next state:\")\n\n return cell_status\n\n\ndef next_state(state):\n state_width = get_state_width(state)\n state_height = get_state_height(state)\n\n next_state = dead_state(state_width, state_height)\n\n for row in range(0, state_width):\n for col in range(0, state_height):\n cell = {'row':row, 'col':col, 'status':state[row][col]}\n next_state[row][col] = update_cell_status(cell, state)\n\n return next_state\n\n\ndef run(w, h):\n dead = dead_state(w, h)\n state = random_state(w, h)\n while True:\n if state == dead:\n render_state(state)\n break\n render_state(state)\n state = next_state(state)\n time.sleep(0.1)\n\nif __name__ == '__main__':\n run(5, 5)\n\n","repo_name":"MaksVe/game-of-life","sub_path":"gol.py","file_name":"gol.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1180400432","text":"import numpy as np\nfrom scipy.ndimage import shift\n\ndef gen_offsets(shift, neighbor=8):\n assert neighbor == 4 or neighbor == 8, 'neigbor must be 4 or 8!'\n if neighbor == 4:\n return [[-shift, 0], [0, -shift]]\n else:\n return [[-shift, 0], [0, -shift], [-shift, -shift], [-shift, shift]]\n\ndef multi_offset(shifts, neighbor=8):\n out = []\n for shift in shifts:\n out += gen_offsets(shift, neighbor=neighbor)\n return out\n\n\ndef gen_affs_stmap(labels, offsets=[[-1,0],[0,-1]], max_neighbor = 60, ignore=False, padding=False):\n \n n_channels = len(offsets)\n \n affinities = np.zeros((n_channels,) + labels.shape, dtype=np.float32)\n\n masks = np.zeros((n_channels,) + labels.shape, dtype=np.uint8)\n\n for cid, off in enumerate(offsets): \n shift_off = [-x for x in off] \n shifted = shift(labels, shift_off, order=0, prefilter=False) \n mask = np.ones_like(labels) \n mask = shift(mask, shift_off, order=0, prefilter=False) \n dif = labels - shifted \n out = dif.copy() \n out[dif == 0] = 1 \n out[dif != 0] = 0 \n if ignore: \n out[labels == 0] = 0 \n out[shifted == 0] = 0 \n if padding: \n out[mask==0] = 1 \n else: \n out[mask==0] = 0 \n\n affinities[cid] = out \n masks[cid] = mask \n \n ################ find instances share same time window ################\n # neighbor_masks is used to push different trajectories in the same window far apart\n\n obj_ids = np.unique(labels)\n\n num_instances = len(obj_ids) - 1 # not include background 0\n\n neighbor_masks = np.zeros((max_neighbor, max_neighbor), dtype=int)\n\n for obj_id in obj_ids:\n\n if obj_id == 0: continue\n\n if obj_id > np.ceil(num_instances/2):\n break\n\n rows_x, cols_y = np.where(labels == obj_id)\n\n min_col_y, max_col_y = np.min(cols_y), np.max(cols_y)\n\n time_win_labels = labels[:, min_col_y : max_col_y] \n\n neighbor_instance_ids = np.unique(time_win_labels)\n \n for neighbor_instance_id in neighbor_instance_ids:\n\n if neighbor_instance_id != obj_id and neighbor_instance_id != 0:\n\n neighbor_masks[obj_id - 1, neighbor_instance_id - 1] = 1\n\n neighbor_masks = neighbor_masks + np.transpose(neighbor_masks)\n\n ################ find instances share same time window ################\n\n return affinities, masks, neighbor_masks\n","repo_name":"TeRyZh/Spatial-Temporal-Deep-Embedding-for-Vehicle-Trajectory-Reconstruction-from-High-Angle-Video","sub_path":"SpatioTemporal Correlation Learning/utils/affinity_stmap.py","file_name":"affinity_stmap.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39336237365","text":"from dotenv import load_dotenv\nfrom concurrent import futures\nimport time\nfrom publisher import Publisher\nfrom compiled.encrypter_pb2_grpc import (\n EncrypterServicer,\n add_EncrypterServicer_to_server,\n)\nfrom compiled.encrypter_pb2 import Response\n\nfrom utils import encrypt, decrypt\nimport grpc\n\nload_dotenv()\n\nclass EncrypterService(EncrypterServicer):\n def __init__(self, publisher) -> None:\n self._publisher = publisher\n\n def Encrypt(self, request, context):\n encrypted = encrypt(request.key, request.message)\n self._publisher.publish(\"encrypted\", request.message)\n return Response(result=encrypted)\n\n def Decrypt(self, request, context):\n decrypted = decrypt(request.key, request.message)\n self._publisher.publish(\"decrypted\", request.message)\n return Response(result=decrypted)\n\n\ndef serve():\n publisher = Publisher()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n add_EncrypterServicer_to_server(EncrypterService(publisher=publisher), server)\n\n server.add_insecure_port(\"[::]:50051\")\n server.start()\n try:\n while True:\n time.sleep(86400)\n except KeyboardInterrupt:\n server.stop(0)\n publisher.close_connection()\n\n\nif __name__ == \"__main__\":\n serve()\n","repo_name":"4L3X4NND3RR/PythonMicroservices","sub_path":"encryption_service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20376837671","text":"from bs4 import BeautifulSoup\nfrom dataclasses import dataclass\nfrom meya.button.spec import ButtonElementSpec\nfrom meya.button.spec import ButtonType\nfrom meya.component.element import Component\nfrom meya.element.field import element_field\nfrom meya.element.field import response_field\nfrom meya.entry import Entry\nfrom meya.salesforce.knowledge.integration import (\n SalesforceKnowledgeIntegration,\n)\nfrom meya.salesforce.knowledge.integration import (\n SalesforceKnowledgeIntegrationRef,\n)\nfrom meya.salesforce.knowledge.payload.article import (\n SalesforceKnowledgeArticle,\n)\nfrom meya.tile.spec import TileElementSpec\nfrom typing import List\nfrom typing import Union\n\n\n@dataclass\nclass SalesforceKnowledgeArticleDisplay(Component):\n @dataclass\n class Response:\n result: List[TileElementSpec] = response_field(sensitive=True)\n\n search_response: List[SalesforceKnowledgeArticle] = element_field(\n help=(\n \"The response from the Salesforce Knowledge search API. \"\n \"This is usually available in flow scope, (@ flow.get('result')), \"\n \"after using the \"\n \"`meya.salesforce.knowledge.component.search.component`.\"\n )\n )\n\n locale: str = element_field(default=\"en-US\")\n\n url_button_text: str = element_field(\n default=\"Read article\",\n help=(\n \"The text of the link button which is displayed with each \"\n \"article tile.\"\n ),\n )\n\n snippet_length: int = element_field(\n default=125,\n help=\"Length of the article snippet displayed in the tile body.\",\n )\n\n button_type: ButtonType = element_field(\n default=ButtonType.URL, help=\"Tile button type\"\n )\n\n integration: SalesforceKnowledgeIntegrationRef = element_field()\n\n async def start(self) -> List[Entry]:\n integration: SalesforceKnowledgeIntegration = await self.resolve(\n self.integration\n )\n tiles = []\n\n for article in self.search_response:\n tiles.append(\n TileElementSpec(\n title=article.title,\n description=self.snippet_format(\n article.summary or article.body, self.snippet_length\n ),\n buttons=[\n ButtonElementSpec(\n text=self.url_button_text,\n url=f\"{integration.knowledge_base_url}/articles/Knowledge/{article.url_name}\",\n type=self.button_type,\n )\n ],\n )\n )\n\n return self.respond(data=self.Response(result=tiles))\n\n @staticmethod\n def snippet_format(\n description: str, snippet_length: int\n ) -> Union[str, None]:\n if not description:\n return\n\n description = BeautifulSoup(description, \"html.parser\").text\n return (\n (description[:snippet_length] + \"...\")\n if len(description) > snippet_length\n else description\n )\n","repo_name":"meya-customers/meya-sdk","sub_path":"meya/salesforce/knowledge/component/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9522585391","text":"from django.shortcuts import render,redirect\nfrom products.models import CheckOut,Cart\nfrom .forms import NewProductForm,ProductForm\nfrom django.contrib import messages\nfrom main.models import Product\nfrom django.shortcuts import get_object_or_404\n# Create your views here.\n\n\n\n# def AddProductView(request):\n# return render(request,'admin/add-product.html')\n\n\n\n\ndef AddProductView(request):\n if request.method==\"GET\":\n form=NewProductForm()\n return render(request, 'admiin/add-product.html',{'form':form})\n \n elif request.method == \"POST\":\n form =NewProductForm(data=request.POST,files=request.FILES) # \"data=request.POST\" malumotlarimizni oladi' \"files=request.FILES\" Rasmlarimizni oladi\n if form.is_valid():\n form.save(request)\n \n messages.success(request, \"Succesfully Created\")\n return redirect('index')\n return render(request, 'admiin/add-product.html',{'form':form})\n \n\n\n\n\ndef OrderView(request,user_id):\n orders=get_object_or_404(CheckOut,id=user_id)\n order=Cart.objects.filter(sav=orders)\n sum = 0\n for p in order:\n sum += p.count\n context={\n 'orders':orders,\n 'order':order,\n 'sum':sum,\n }\n return render(request, 'admiin/view-order.html',context=context)\n\n\n\n\n\n\ndef AdminIndexView(request):\n products=Product.objects.all()\n orders=CheckOut.objects.all()\n context={\n 'products':products,\n 'orders':orders,\n }\n return render(request, 'admiin/index.html',context=context)\n\n\n\n\n\n\ndef product_update(request,product_id):\n product=get_object_or_404(Product,id=product_id)\n if request.user.is_superuser:\n if request.method==\"GET\":\n form=ProductForm(instance=product)\n return render(request, 'admiin/product_update.html',{'form':form, 'pr':product})\n elif request.method==\"POST\":\n form=ProductForm(instance=product,data=request.POST,files=request.FILES)\n if form.is_valid():\n form.save()\n # if request.FILES.getlist('images'):\n # ProductImage.objects.filter(product=product).delete()\n # for i in request.FILES.getlist(\"images\"):\n # ProductImage.objects.create(product=product,image=i)\n messages.success(request, \"Succesfully Updated!\")\n return redirect('detail',product.id)\n return render(request, 'admiin/product_update.html',{'form':form, 'pr':product})\n else:\n messages.error(request, \"Acces danied!\")\n return redirect('index')\n \n\n# def delete(request,product_id):\n # product=get_object_or_404(Product,id=product_id)\n # if request.user.is_superuser:\n # product.delete()\n # messages.info(request, \"Succesfully Deleted!\")\n # return redirect('aindex')\n # # else:\n # # messages.error(request, \"Acces danied!\")\n # # return redirect('index')\n # return render(request, \"admiin/product_delete.html\")\n\n\n\n\ndef delete(request,product_id):\n product=get_object_or_404(Product,id=product_id)\n if request.method==\"POST\":\n product.delete()\n messages.info(request, \"Succesfully Deleted!\")\n return redirect('index')\n return render(request, \"admiin/product_delete.html\", {'product':product})\n ","repo_name":"Husan-Muhiddinov/Rest-Stone-app","sub_path":"admiin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70023399235","text":"\r\nimport tkinter as tk\r\ndef on_button_click(event):\r\n text = event.widget.cget(\"text\")\r\n if text == \"=\":\r\n try:\r\n result = eval(entry.get())\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, str(result))\r\n except Exception as e:\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, \"Error\")\r\n elif text == \"C\":\r\n entry.delete(0, tk.END)\r\n else:\r\n entry.insert(tk.END, text)\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Calculator\")\r\n\r\nentry = tk.Entry(root, font=(\"Helvetica\", 20), justify=\"right\")\r\nentry.grid(row=0, column=0, columnspan=4)\r\n\r\nbuttons = [\r\n (\"7\", 1, 0), (\"8\", 1, 1), (\"9\", 1, 2), (\"/\", 1, 3),\r\n (\"4\", 2, 0), (\"5\", 2, 1), (\"6\", 2, 2), (\"*\", 2, 3),\r\n (\"1\", 3, 0), (\"2\", 3, 1), (\"3\", 3, 2), (\"-\", 3, 3),\r\n (\"0\", 4, 0), (\".\", 4, 1), (\"=\", 4, 2), (\"+\", 4, 3)\r\n]\r\n\r\nfor button_text, row, column in buttons:\r\n button = tk.Button(root, text=button_text, font=(\"Helvetica\", 20), padx=20, pady=20)\r\n button.grid(row=row, column=column, sticky=\"nsew\")\r\n button.bind(\"\", on_button_click)\r\n\r\n# Adjust column and row weights so the buttons expand with the window\r\nfor i in range(4):\r\n root.grid_columnconfigure(i, weight=1)\r\nfor i in range(5):\r\n root.grid_rowconfigure(i, weight=1)\r\n\r\nroot.mainloop()\r\n","repo_name":"yoginipawar/Calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25333611903","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[130]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.stats as stats\nimport researchpy as rp\n\n\n# In[131]:\n\n\nheat_trainner = pd.read_csv(\"c:/users/fadic/anaconda3/Library/lib/Untitled Folder/heat_trainner.csv\")\n# printing the first 5 rows of dataset\nheat_trainner.head()\n\n\n# In[132]:\n\n\n# getting the columns of the dataset\ncolumns = list(heat_trainner.columns)\ncolumns\n\n\n# In[159]:\n\n\n# check datatype in each column\nprint(\"Column datatypes: \")\nprint(heat_trainner.dtypes)\n\n\n# In[ ]:\n\n\n\n\n\n# In[160]:\n\n\n# Loading the dataset\n\nnumeric_col = ['heat_group', 'heat_stroke', 'heat_syn_cramps', 'heat_exh', 'heat_other', 'RHABDO', 'HYPONATREMIA', 'AGE_GROUP',\n 'LOS', 'RACE_CODE']\n \n#Using Correlation analysis to depict the relationship between the numeric/continuous data variables\ncorr = heat_trainner.loc[:,numeric_col].corr()\nprint(corr)\n\n\n# In[134]:\n\n\n# names of the columns\ncolumns = ['DX1', 'DX2', 'DX3', 'DX4', 'Diagnosis__Admitting']\n\n# looping through the columns to fill the entries with NaN values with \"\"\nfor column in columns:\n heat_trainner[column] = heat_trainner[column].fillna(\"\")\n\n\n# In[135]:\n\n\n# printing the first 5 rows of dataset\nheat_trainner.head()\n\n\n# In[136]:\n\n\n# names of the columns\ncolumns = ['heat_stroke', 'heat_syn_cramps', 'heat_exh', 'heat_other', 'RHABDO','HYPONATREMIA','Death_Indicator']\n\n# looping through the columns to fill the entries with NaN values with \"\"\nfor column in columns:\n heat_trainner[column] = heat_trainner[column].fillna(0)\n\n\n# In[59]:\n\n\n# names of the columns\ncolumns = ['heat_stroke', 'heat_syn_cramps', 'heat_exh', 'heat_other', 'RHABDO','HYPONATREMIA','Death_Indicator']\n\n# looping through the columns to fill the entries with NaN values with \"\"\nfor column in columns:\n heat_trainner[column] = heat_trainner[column].format(1)\n\n\n# In[72]:\n\n\nformatted_number = round(number, 0)\nprint(formatted_number)\n\n\n# In[137]:\n\n\n# printing the first 5 rows of dataset\nheat_trainner.head()\n\n\n# In[138]:\n\n\npd.crosstab(index=heat_trainner['LOS'], columns=heat_trainner['AGE_GROUP'])\n\n\n# In[143]:\n\n\ncontigency = pd.crosstab(heat_trainner['LOS'], heat_trainner['AGE_GROUP'] )\ncontigency\n\n\n# In[144]:\n\n\n# Chi-square test of independence.\nc, p, dof, expected = chi2_contingency(contigency)\np\n\n\n# In[85]:\n\n\npd.crosstab(index=heat_trainner['AGE_GROUP'], columns=heat_trainner['RACE_CODE'])\n\n\n# In[86]:\n\n\npd.crosstab(index=heat_trainner['AGE_GROUP'], columns=heat_trainner['WORK_POSITION'])\n\n\n# In[109]:\n\n\nfrom scipy.stats import chi2_contingency\n \n# defining the table\nstat, p, dof, expected = chi2_contingency(heat_trainner)\n \n# interpret p-value\nalpha = 0.05\nprint(\"p value is \" + str(p))\nif p <= alpha:\n print('Dependent (reject H0)')\nelse:\n print('Independent (H0 holds true)')\n\n\n# In[161]:\n\n\nstats.ttest_ind(heat_trainner['heat_stroke'][heat_trainner['SEX'] == 1 ],\n heat_trainner['heat_stroke'][heat_trainner['SEX'] == 2 ])\n\n\n# In[162]:\n\n\nrp.ttest(group1= heat_trainner['heat_stroke'][heat_trainner['SEX'] == 1], group1_name= \"Male\",\n group2= heat_trainner['heat_stroke'][heat_trainner['SEX'] == 2], group2_name= \"Female\")\n\n\n# In[164]:\n\n\nsummary, results = rp.ttest(group1= heat_trainner['heat_stroke'][heat_trainner['SEX'] == 1], group1_name= \"Male\",\n group2= heat_trainner['heat_stroke'][heat_trainner['SEX'] == 2], group2_name= \"Female\")\nprint(summary)\n\n\n# In[165]:\n\n\nprint(results)\n\n\n# In[184]:\n\n\nrandom={1, 2, 5, 4, 3, 5, 2, 1, 3, 3, 1, 4, 3, 3, 3, 2, 3, 3, 2, 5}\n\n\n# In[185]:\n\n\nrandom.head()\n\n\n# In[177]:\n\n\nx=2\ny=3\n\n#print('x =', x)\n\n#b. print('Value of', x, '+', x, 'is', (x + x))\n\n#c. print('x =')\n\n#d. print((x + y), 'x =', (y + x))\n\n\n# In[178]:\n\n\nrandom.head()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"fadice/getthis","sub_path":"heat data project.py","file_name":"heat data project.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7743559398","text":"import random\n\nimport plotly\nimport plotly.graph_objs as go\nimport plotly.express as px\nfrom more_itertools import powerset\nfrom plotly.subplots import make_subplots\nimport numpy as np\n\nfrom visualisation import Visualization\n\nN = 1000\nx = np.linspace(0, 1, N)\nz = 20 * np.sin(2 * np.pi * 3 * x) + 100 * np.exp(x)\nerror = 10 * np.random.randn(N)\nt = z + error\n\ntr = 0.8\nval = 0.1\nind_prm = np.random.permutation(np.arange(N))\ntrain_ind = ind_prm[:int(tr * N)]\nvalid_ind = ind_prm[int(tr * N):int((val + tr) * N)]\ntest_ind = ind_prm[int((val + tr) * N):]\nx_train, t_train, x_valid, t_valid, x_test, t_test = x[train_ind], t[train_ind], x[valid_ind], t[valid_ind], x[\n test_ind], t[test_ind]\n\nfunctions = [lambda x: np.sin(x), lambda x: np.cos(x), lambda x: np.exp(x), lambda x: x, lambda x: x ** 2,\n lambda x: x ** 3, lambda x: x ** 4, lambda x: x ** 5, lambda x: x ** 6, lambda x: x ** 7, lambda x: x ** 8,\n lambda x: x ** 9, lambda x: x ** 10, lambda x: x ** 20, lambda x: x ** 30, lambda x: x ** 40,\n lambda x: x ** 50, lambda x: x ** 60, lambda x: x ** 70, lambda x: x ** 80, lambda x: x ** 90,\n lambda x: x ** 100]\nindexes = [i for i in range(len(functions))]\nsets = [i for i in list(powerset(indexes)) if len(i) in [2, 3, 4, 5]]\nfunc_names = [\"sin(x)\", \"cos(x)\", \"exp(x)\", \"x\", \"x^2\", \"x^3\", \"x^4\", \"x^5\",\n \"x^6\", \"x^7\", \"x^8\", \"x^9\", \"x^10\", \"x^20\",\n \"x^30\", \"x^40\", \"x^50\", \"x^60\", \"x^70\", \"x^80\", \"x^90\", \"x^100\"]\nlamb = [0., 0.01, 0.001, 0.1, 0.5, 1., 5, 10, 50, 100]\nchosen_sets = random.sample(sets, 30)\nchosen_lamb = [random.sample(lamb, 5) for i in range(len(chosen_sets))]\n\n\ndef matrix_F(x, ind):\n F = np.ones((1, len(x)))\n for i in ind:\n F = np.append(F, [functions[i](x)], axis=0)\n return F.T\n\n\ndef learn(F, t, l):\n I = np.eye(F.shape[1])\n I[0][0] = 0\n return ((np.linalg.pinv(F.T.dot(F) + l * I)).dot(F.T)).dot(t)\n\n\ndef error(W, t, x, ind):\n F = matrix_F(x, ind)\n return (1 / 2) * sum((W.dot(F.T) - t) ** 2)\n\n\ndef toFixed(numObj, digits=0):\n return f\"{numObj:.{digits}f}\"\n\n\ndef get_name(functions_names, set, W):\n str = f\"y = {toFixed(W[0], 2)}\"\n for j in range(1, len(set) + 1):\n if W[j] > 0:\n str += f\" + {toFixed(W[j], 2)}{functions_names[set[j - 1]]}\"\n else:\n str += f\" - {toFixed(abs(W[j]), 2)}{functions_names[set[j - 1]]}\"\n return str\n\n\ndef get_names(functions_names, min_errors_sets, l_lst):\n names = []\n for i in range(10):\n W = learn(matrix_F(x_train, min_errors_sets[i]), t_train, l_lst[i])\n str = get_name(functions_names, min_errors_sets[i], W)\n names.append(str)\n return np.array(names)\n\n\ndef create_graphics(x, z, t, title, test_error, la, best_func, name=None,\n path2save=None):\n weights = learn(matrix_F(x_train, best_func), t_train, la)\n F = matrix_F(x, best_func)\n y = F.dot(weights)\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=x, y=y, name='y(x, w)'))\n fig.add_trace(go.Scatter(x=x, y=z, name='z(x)'))\n fig.add_trace(go.Scatter(x=x, y=t, mode=\"markers\", name='t(x)'))\n fig.update_layout(title=f\"{title}, test error = {test_error}, {get_name(func_names, best_func, weights)}\")\n fig.show()\n fig.write_html(f\"{path2save}/{name}.html\")\n\n\nerror_valid = []\nerror_test = []\nfor i in chosen_sets:\n F = matrix_F(x_train, i)\n for l in chosen_lamb[chosen_sets.index(i)]:\n W = learn(F, t_train, l)\n error_valid.append(error(W, t_valid, x_valid, i))\n error_test.append(error(W, t_test, x_test, i))\nmin_errors_index = np.argsort(error_valid)[:10]\nmin_error_valid = np.asarray(error_valid)[min_errors_index]\nmin_error_test = np.asarray(error_test)[min_errors_index]\nl_lst = np.array([chosen_lamb[i // 5][i % 5] for i in min_errors_index])\nmin_errors_sets = [chosen_sets[i // 5] for i in min_errors_index]\n\ncreate_graphics(x, z, t, 'BEST MODEL', min_error_test[0], l_lst[0], min_errors_sets[0], name=\"ML_HW3_best_model\",\n path2save=\"C:/Users/26067/PycharmProjects/ML_HW3\")\n\nvisualisation = Visualization()\nvisualisation.models_error_scatter_plot(min_error_valid, min_error_test,\n get_names(func_names, min_errors_sets, l_lst), l_lst,\n '10 best models', show=True, save=True,\n name=\"ML_HW3_10_models\",\n path2save=\"C:/Users/26067/PycharmProjects/ML_HW3\")\n","repo_name":"Liza133/ML_HW3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7857260247","text":"import os\n\n\nold_file = os.path.join(\"directory\", \"a.txt\")\nnew_file = os.path.join(\"directory\", \"b.kml\")\nos.rename(old_file, new_file)\n\nfrom os import rename, listdir \n\nbadprefix = \"cheese_\" \nfnames = listdir('.') \n\nfor fname in fnames: \n\tif fname.startswith(badprefix*2): \t\t\n\t\trename(fname, fname.replace(badprefix, '', 1))\n\t\t\nimport os\nitems = os.listdir(\".\")\n\nnewlist = []\nfor names in items:\n if names.endswith(\".txt\"):\n newlist.append(names)\nprint(newlist)\n'''\ndef find(str, ch): \n index = 0 \n while index < len(str): \n if str[index] == ch: \n return index \n index = index + 1 \n return -1 \n \n import string\n >>> fruit = \"banana\" \n>>> index = string.find(fruit, \"a\") \n>>> print index \n1 \n'''","repo_name":"thstoet/Unit_Test","sub_path":"04_Random_Stuff/edit_file.py","file_name":"edit_file.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31355645026","text":"\nfrom rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework.exceptions import NotAuthenticated, PermissionDenied\n# from .utils import average_rating\nfrom birthday.models import staffDetails\n\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['lastname', 'email']\n \n\nclass StaffDetailsSerializer(serializers.ModelSerializer):\n creator = UserSerializer(read_only=True)\n book = serializers.StringRelatedField(read_only=True)\n\n class Meta:\n model = staffDetails\n fields = [\n 'staff_image', 'first_name', 'middle_name', 'last_name', \n 'gender_text', 'level', 'step', 'email', 'cadre', 'department'\n ]\n\n def create(self, validated_data):\n request = self.context[\"request\"]\n creator = request.user\n \n if not creator.is_authenticated:\n raise NotAuthenticated('Authentication required.')\n book = Book.objects.get(pk=request.data['book_id'])\n return Review.objects.create(content=validated_data['content'], book=book, creator=creator,\n rating=validated_data['rating'])\n\n def update(self, instance, validated_data):\n request = self.context['request']\n creator = request.user\n if not creator.is_authenticated or instance.creator_id != creator.pk:\n raise PermissionDenied('Permission denied, you are not the creator of this review')\n instance.content = validated_data['content']\n instance.rating = validated_data['rating']\n instance.date_edited = timezone.now()\n instance.save()\n return instance\n","repo_name":"ethernalarts/mitcbdayapp","sub_path":"staffapp/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1117306604","text":"def Out_response(error=False, mensaje=\"OK\", datos=None):\n res = {\n \"error\": error,\n \"message\": mensaje,\n \"data\": datos,\n }\n return res\n\n\ndef Error_response(err, mensaje, codigo_error=None):\n res = {\n \"error\": True,\n \"message\": mensaje,\n \"data\": {\n \"Code\": codigo_error,\n \"Error message\": err\n }\n }\n return res","repo_name":"BrandonVasquez97/prueba-quick-help","sub_path":"Proyecto_Quick/api/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16441442986","text":"import packages\nfrom tensorflow import keras\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom scikeras.wrappers import KerasClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\n\nclass AE_Wrapper(BaseEstimator, TransformerMixin):\n def __init__(self, x_shape, hidden_layers):\n self.x_shape= x_shape\n self.hidden_layers = hidden_layers\n self.autoencoder = None\n\n def create_autoencoder_model(self):\n input_layer = keras.layers.Input(shape=(self.x_shape,))\n self.encoded = input_layer\n for neuron in self.hidden_layers:\n self.encoded = keras.layers.Dense(neuron, activation='relu')(encoded)\n for neuron in reversed(self.hidden_layers):\n encoded = keras.layers.Dense(neuron, activation='relu')(encoded)\n decoded = keras.layers.Dense(self.x_shape, activation='sigmoid')(encoded)\n self.autoencoder = keras.Model(input_layer, decoded)\n self.autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n return self.autoencoder\n\n def fit(self, X, y=None, **kwargs):\n self.clf_ = KerasClassifier(\n build_fn=self.create_autoencoder_model,\n **kwargs\n )\n self.clf_.fit(X, X)\n return self\n\n def transform(self, X):\n return self.clf_.predict(X)\n\n def fit_transform(self, X, y=None, **kwargs):\n self.clf_ = KerasClassifier(\n build_fn=self.create_autoencoder_model,\n **kwargs\n )\n self.clf_.fit(X, X)\n return self.clf_.predict(X)\n \n def get_encoder(self):\n return self.encoded","repo_name":"Nick-prog/Phishing-AutoEncoder","sub_path":"packages/ae_wrapper.py","file_name":"ae_wrapper.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39413825161","text":"import praw\nimport text2emotion as te\nimport datetime\nimport json\nimport sqlite3\nimport time\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ndef CryptoGauge():\n reddit = praw.Reddit(client_id='############################', \\\n client_secret='###########################', \\\n user_agent='#######################', \\\n username='', \\\n password='')\n\n def reddit_run(reddit):\n\n subred = reddit.subreddit('CryptoCurrency')\n hot = subred.hot(limit = 11)\n top = subred.top(limit = 11)\n\n y = next(top)\n dir(y)\n\n x = next(hot)\n dir(x)\n\n titles = []\n for i in hot:\n titles.append(i.title)\n\n\n for i in top:\n titles.append(i.title)\n\n\n \n f = open('cryptonames.json')\n\n data = json.load(f)\n\n conn = sqlite3.connect('cryptotable.db')\n connection = conn.cursor()\n\n connection.execute(\"\"\"CREATE TABLE IF NOT EXISTS crypto(\n Symbol VARCHAR(255) PRIMARY KEY,\n Total_appearances INTEGER);\"\"\")\n\n connection.execute(''' CREATE TABLE IF NOT EXISTS Post(\n Posts VARCHAR(2555) PRIMARY KEY);''')\n\n def edit_table(connection, key, value, conn, title):\n \n\n connection.execute('''\n INSERT INTO Post(Posts)\n VALUES (?)''', [title])\n conn.commit()\n\n connection.execute('''\n SELECT * FROM crypto\n ''')\n current_symbols = connection.fetchall()\n print(current_symbols)\n Found = False\n\n for j in range(0, len(current_symbols)):\n if (key.lower() == current_symbols[j][0].lower()) or (value.lower() == current_symbols[j][0].lower()): \n Found = True\n connection.execute('''\n UPDATE crypto SET Total_appearances = (?) WHERE Symbol = (?)''', (current_symbols[j][1]+1, key))\n\n conn.commit()\n \n if Found == False:\n connection.execute('''\n INSERT INTO crypto(Symbol, Total_appearances)\n VALUES(?, ?)''', (key, 1))\n conn.commit()\n \n return current_symbols\n\n\n for i in range(0, len(titles)): \n\n dictio = te.get_emotion(titles[i])\n pos_val = list(dictio.values())[0]\n neg_val = list(dictio.values())[3]\n \n\n if pos_val >= neg_val:\n s = titles[i]\n\n for key, value in data.items():\n \n if (key in s) or (value in s):\n connection.execute('''\n SELECT * FROM Post\n ''')\n checker = connection.fetchall()\n if len(checker) == 0:\n edit_table(connection, key, value, conn, s)\n else:\n found = 0\n for j in range(0, len(checker)):\n \n if checker[j][0] == s:\n \n found += 1\n \n if found == 0:\n edit_table(connection, key, value, conn, s)\n\n subject = 'Crypto scraper results'\n \n body = \"\"\n \n sender_email = \"####################################\"\n receiver_email = \"####################################\"\n\n\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message[\"Subject\"] = subject\n message[\"Bcc\"] = receiver_email \n\n message.attach(MIMEText(body, \"plain\"))\n\n \n text = current_symbols\n\n\n server = smtplib.SMTP_SSL('##################', 465)\n server.login(\"######################\", \"################\")\n server.sendmail(\n \"#########################\", \n \"########################\", \n text)\n server.quit()\n\n count = 0\n while count == 0:\n times = str(datetime.datetime.now())\n calltimes = ['12:00']\n hourtime = times[11:16]\n print(hourtime)\n if hourtime in calltimes:\n reddit_run(reddit)\n time.sleep(3600)\n\nif __name__ == '__main__':\n CryptoGauge()","repo_name":"McodesM/Investment-Forecaster","sub_path":"CryptoGauger.py","file_name":"CryptoGauger.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71324425476","text":"def chooseRandomWord(listOfWords):\n import random\n word = (''.join(random.choices(listOfWords)))\n print(word) # just for practise purposes the word is printed\n guessed = []\n falseGuesses = []\n while True:\n clue = \"\"\n wordGuessed = \"\"\n for letter in word:\n if letter in guessed:\n clue = clue + letter+\" \" # this word is used as we need to print the gussed word as - - - - - \n wordGuessed = wordGuessed + letter # To store letters to check if the word is guessed or not\n else:\n clue = clue + \"_ \"\n wordGuessed = wordGuessed + \"-\"\n if (wordGuessed == word):\n print(\"Congratulations you guessed the word:\", word)\n break\n print(\"Guess a letter in the word:\", clue)\n guessedChar = input()\n if (guessedChar in guessed or guessedChar in falseGuesses):\n print(\"Already guessed\",guessedChar)\n elif (guessedChar in word):\n print(\"Correct\")\n guessed.append(guessedChar)\n else:\n print(\"Incorrect\")\n falseGuesses.append(guessedChar)\n\n \n\ndef main():\n listOfWords = ['APPLE', 'BILBO', 'CHORUSED', 'DISIMAGINE','ENSURING', 'FORMALISING', 'GLITCHES',\n 'HARMINE', 'INDENTATION', 'JACKED', 'KALPACS', 'LAUNDRY', 'MASKED', 'NETTED',\n 'OXFORD', 'PARODY', 'QUOTIENTS', 'RACERS', 'SADNESS', 'THYREOID', 'UNDUE',\n 'VENT', 'WEDGED', 'XERIC', 'YOUTHHOOD', 'ZIFFS']\n chooseRandomWord(listOfWords)\nif __name__ == \"__main__\":\n main()\n","repo_name":"ghulamghousdev/Python-Tasks","sub_path":"Assignment 1/Part 3/Task-8(b).py","file_name":"Task-8(b).py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27420473555","text":"from datetime import datetime\nimport socket\nimport threading\n\n\nclass TCPHandler:\n \"\"\"Class used to handle a TCP connection for a server-client chat\n\n ----------------------------------------------------------------\n\n Attributes\n ----------\n host : str\n name of the server host\n port : int\n number of the port bound to the host\n hostname : str\n name of the host\n socket_server = tuple\n socket of the server\n\n\n Methods\n -------\n connection_handler\n handling of the incoming request\n receiving\n ask the client for a username and receive his messages\n sending\n send messages to the client connected\n disconnection\n disconnect the client\n \"\"\"\n\n def __init__(self, host_server, port_server, hostname):\n \"\"\"Creating the server's socket.\"\"\"\n\n self.host = host_server\n self.port = port_server\n self.hostname = hostname\n self.socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # UPDATE Reuse address in case of many subsequent tries because of the time wait after closing the connection\n self.socket_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket_server.bind((self.host, self.port))\n self.socket_server.listen(5)\n\n def connection_handler(self):\n \"\"\"Handle the incoming request, starting the threads to send and receive messages simultaneously,\n manage exception handling in case of the closing of the server.\"\"\"\n\n try:\n client_socket, address = self.socket_server.accept()\n print('Press \"CTRL+C\" to close the server.')\n print(f'{address[0]}::{address[1]} is connected')\n sending_thread = threading.Thread(target=self.sending, args=(client_socket, ))\n receiving_thread = threading.Thread(target=self.receiving, args=(client_socket, ))\n sending_thread.start()\n receiving_thread.start()\n try:\n sending_thread.join()\n receiving_thread.join()\n except KeyboardInterrupt:\n print('Server closed!')\n client_socket.sendall(bytes('The server has disconnected! '\n 'You may leave the chat.', 'utf-8'))\n # UPDATE Double two-way handshake\n client_socket.shutdown(1)\n\n client_socket.close()\n self.socket_server.close()\n\n except KeyboardInterrupt:\n print('Server closed!')\n self.socket_server.close()\n # In case of FIN segment\n except OSError:\n pass\n\n def receiving(self, sock):\n \"\"\"Receive and print messages from the client connected.\"\"\"\n\n try:\n sock.sendall(bytes('Enter your username (Type \"[quit]\" or close the window to quit the chat):\\n', 'utf-8'))\n client_name = sock.recv(2048).decode()\n if client_name == '[quit]':\n # UPDATE Double two-way handshake\n sock.shutdown(1)\n\n sock.close()\n print('User left the chat before joining.')\n else:\n sock.sendall(bytes(f'Welcome to the chat with {self.hostname}!\\nStart typing your messages!\\n'\n f'Type \"[quit]\" or close the window to quit the chat.\\n', 'utf-8'))\n print(client_name, 'has joined the chat!')\n while True:\n message = sock.recv(1024).decode()\n if message == '[quit]':\n self.disconnection(sock, client_name)\n elif message:\n current_time = datetime.now()\n clock_time = current_time.strftime('%H:%M:%S')\n print(f'[{clock_time}] {client_name} > {message}')\n else:\n self.disconnection(sock, client_name)\n except ConnectionAbortedError:\n pass\n # In case of FIN\n except OSError:\n pass\n\n def sending(self, sock):\n \"\"\"Send messages to the client connected and print the message sent.\"\"\"\n\n try:\n while True:\n input_message = input()\n current_time = datetime.now()\n clock_time = current_time.strftime('%H:%M:%S')\n message = f'{self.hostname} > {input_message}'\n print(f'[{clock_time}] {message}')\n sock.sendall(bytes(f'[{clock_time}] {message}\\r\\n', 'utf-8'))\n except EOFError:\n pass\n except OSError:\n print('The connection has been terminated.')\n\n def disconnection(self, sock, username):\n \"\"\"Closing the socket of the client when he is disconnecting, print that the user has left the chat.\"\"\"\n # UPDATE Double two-way handshake\n sock.shutdown(1)\n\n sock.close()\n print(f'{username} left the chat!')\n\n\ndef main():\n host = socket.gethostname()\n port = 10000\n my_ip = socket.gethostbyname(host)\n print('My IP is', my_ip)\n print('The host name is:', host)\n print(f\"Waiting for someone to connect to {host}::{port}\")\n running_server = TCPHandler('', port, host)\n running_server.connection_handler()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Wilscos/chat-application","sub_path":"TCP_Server.py","file_name":"TCP_Server.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28357276751","text":"class AF2P:\r\n def __init__(self, Q, q0, F, Sigma, Gamma, Delta):\r\n self.Q = Q\r\n self.q0 = q0\r\n self.F = F\r\n self.Sigma = Sigma\r\n self.Gamma = Gamma\r\n self.Delta = Delta\r\n self.Stack1 = []\r\n self.Stack2 = []\r\n\r\n def constructor(nombreArchivo):\r\n if not nombreArchivo.endswith(\".msm\"):\r\n nombreArchivo += \".msm\"\r\n Q = []\r\n q0 = ''\r\n F = []\r\n Sigma = []\r\n Gamma = []\r\n Delta = {}\r\n leyendo = ''\r\n with open(nombreArchivo) as f:\r\n for line in f:\r\n \r\n if line[0] == \"#\":\r\n leyendo = line.rstrip()\r\n \r\n elif leyendo == '#states':\r\n Q.append(line.rstrip())\r\n \r\n elif leyendo == '#initial':\r\n q0 = line.rstrip()\r\n \r\n elif leyendo == '#accepting':\r\n F.append(line.rstrip())\r\n \r\n elif leyendo == '#tapeAlphabet':\r\n cadena = line.rstrip()\r\n if '-' not in cadena:\r\n Sigma.append(cadena)\r\n else:\r\n inicio, fin = cadena.split('-')\r\n letras = ''.join(chr(i) for i in range(ord(inicio), ord(fin) + 1))\r\n for x in letras:\r\n Sigma.append(x)\r\n \r\n elif leyendo == '#stackAlphabet':\r\n cadena = line.rstrip()\r\n if '-' not in cadena:\r\n Gamma.append(cadena)\r\n else:\r\n inicio, fin = cadena.split('-')\r\n letras = ''.join(chr(i) for i in range(ord(inicio), ord(fin) + 1))\r\n for x in letras:\r\n Gamma.append(x)\r\n \r\n elif leyendo == '#transitions':\r\n partes = line.rstrip().split('>')\r\n estadoAnterior, entrada, delta1Anterior, delta2Anterior = partes[0].split(':')\r\n transiciones = partes[1].split(';')\r\n for transicion in transiciones:\r\n estadoNuevo, delta1Nuevo, delta2Nuevo = transicion.split(':')\r\n if (estadoAnterior, entrada, delta1Anterior, delta2Anterior) not in Delta:\r\n Delta[(estadoAnterior, entrada, delta1Anterior, delta2Anterior)] = []\r\n Delta[(estadoAnterior, entrada, delta1Anterior, delta2Anterior)].append((estadoNuevo, delta1Nuevo, delta2Nuevo))\r\n\r\n return AF2P(Q, q0, F, Sigma, Gamma, Delta)\r\n \r\n def ToString(self):\r\n print('#states')\r\n for x in self.Q:\r\n print(x)\r\n print('#initial')\r\n print(self.q0)\r\n print('#accepting')\r\n for x in self.F:\r\n print(x)\r\n print('#tapeAlphabet')\r\n if len(self.Sigma)>1:\r\n print(self.Sigma[0]+'-'+self.Sigma[-1])\r\n else:\r\n print(self.Sigma[0])\r\n print('#stackAlphabet')\r\n if len(self.Gamma)>1:\r\n print(self.Gamma[0]+'-'+self.Gamma[-1])\r\n else:\r\n print(self.Gamma[0])\r\n print('#transitions')\r\n for key, transitions in self.Delta.items():\r\n estadoAnterior, entrada, delta1Anterior, delta2Anterior = key\r\n transition_str = []\r\n for transition in transitions:\r\n estadoNuevo, delta1Nuevo, delta2Nuevo = transition\r\n transition_str.append(f\"{estadoNuevo}:{delta1Nuevo}:{delta2Nuevo}\")\r\n \r\n transitions_str = ';'.join(transition_str)\r\n print(f\"{estadoAnterior}:{entrada}:{delta1Anterior}:{delta2Anterior}>{transitions_str}\")\r\n\r\n def exportar(self, nombreArchivo):\r\n if not nombreArchivo.endswith(\".msm\"):\r\n nombreArchivo += \".msm\"\r\n\r\n with open(nombreArchivo, \"w\") as archivo:\r\n archivo.write(\"#states\\n\")\r\n for estado in self.Q:\r\n archivo.write(estado + \"\\n\")\r\n\r\n archivo.write(\"#initial\\n\")\r\n archivo.write(self.q0 + \"\\n\")\r\n\r\n archivo.write(\"#accepting\\n\")\r\n for estado in self.F:\r\n archivo.write(estado + \"\\n\")\r\n\r\n archivo.write(\"#tapeAlphabet\\n\")\r\n if len(self.Sigma) > 1:\r\n archivo.write(self.Sigma[0] + \"-\" + self.Sigma[-1] + \"\\n\")\r\n else:\r\n archivo.write(self.Sigma[0] + \"\\n\")\r\n\r\n archivo.write(\"#stackAlphabet\\n\")\r\n if len(self.Gamma) > 1:\r\n archivo.write(self.Gamma[0] + \"-\" + self.Gamma[-1] + \"\\n\")\r\n else:\r\n archivo.write(self.Gamma[0] + \"\\n\")\r\n\r\n archivo.write(\"#transitions\\n\")\r\n for key, transitions in self.Delta.items():\r\n estadoAnterior, entrada, delta1Anterior, delta2Anterior = key\r\n transition_str = []\r\n for transition in transitions:\r\n estadoNuevo, delta1Nuevo, delta2Nuevo = transition\r\n transition_str.append(f\"{estadoNuevo}:{delta1Nuevo}:{delta2Nuevo}\")\r\n\r\n transitions_str = \";\".join(transition_str)\r\n archivo.write(f\"{estadoAnterior}:{entrada}:{delta1Anterior}:{delta2Anterior}>{transitions_str}\\n\")\r\n \r\n#afpd1 = AF2P(['q0', 'q1','q2'], 'q0', ['q2'], ['a', 'b'], ['A-D'],\r\n # {('q0', 'a', '$','$'): [('q0','A','A'),('q2','B','A')], \r\n # ('q0', 'a', 'A','B'): [('q1', '$','B')],\r\n # ('q1', 'b', 'A','D'): [('q1', '$','B')]})\r\n#afpd1.ToString()\r\n#Factor 16\r\n#afpd2 = AF2P.constructor(\"intentar\")\r\n#Factor 17\r\n#afpd2 = AF2P.constructor(\"intentar\")\r\n#afpd2.ToString()\r\n#Factor 18\r\n# afpd2 = AF2P.constructor(\"intentar\")\r\n# afpd2.exportar(\"hola\")","repo_name":"glondonot/ProcesamientodeAutomatas","sub_path":"T3/AF2P.py","file_name":"AF2P.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73661464833","text":"import functools\nimport logging\nfrom spaceone.api.inventory.v1 import region_pb2\nfrom spaceone.core.pygrpc.message_type import *\nfrom spaceone.inventory.model.region_model import Region\n\n__all__ = ['RegionInfo', 'RegionsInfo', 'RegionMemberInfo', 'RegionMembersInfo']\n\n_LOGGER = logging.getLogger()\n\n\ndef RegionInfo(region_vo: Region, minimal=False):\n info = {\n 'region_id': region_vo.region_id,\n 'name': region_vo.name,\n 'state': region_vo.state\n }\n\n if not minimal:\n info.update({\n 'created_at': change_timestamp_type(region_vo.created_at),\n 'deleted_at': change_timestamp_type(region_vo.deleted_at),\n 'tags': change_struct_type(region_vo.tags),\n 'domain_id': region_vo.domain_id\n })\n\n return region_pb2.RegionInfo(**info)\n\n\ndef RegionsInfo(region_vos, total_count, **kwargs):\n return region_pb2.RegionsInfo(results=list(map(functools.partial(RegionInfo, **kwargs), region_vos)),\n total_count=total_count)\n\n\ndef RegionMemberInfo(region_map_info):\n info = {\n 'region_info': RegionInfo(region_map_info['region'], minimal=True),\n 'user_info': change_struct_type(region_map_info['user']),\n 'labels': change_list_value_type(region_map_info['labels'])\n }\n\n return region_pb2.RegionMemberInfo(**info)\n\n\ndef RegionMembersInfo(region_map_vos, total_count, **kwargs):\n results = list(map(lambda region_map_vo: RegionMemberInfo(region_map_vo), region_map_vos))\n\n return region_pb2.RegionMembersInfo(results=results, total_count=total_count)\n","repo_name":"choonho/inventory","sub_path":"src/spaceone/inventory/info/region_info.py","file_name":"region_info.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72920037633","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Pose, PoseWithCovariance\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import LaserScan\n\nimport matplotlib.pyplot as plt\nimport ast\n\ndef ekf_cb(data):\n EKFP.xbuffer.append(data.pose.pose.position.x)\n EKFP.ybuffer.append(data.pose.pose.position.y)\n EKFP._plot_buffer()\n\nclass EKF_Printer():\n def __init__(self):\n self.xbuffer = []\n self.ybuffer = []\n plt.ion()\n def _plot_buffer(self):\n plt.plot(self.xbuffer, self.ybuffer, '.')\n plt.draw()\n plt.pause(0.001)\n \n\ndef VicParkListener():\n \n rospy.init_node(\"VicParkListener\", anonymous=True)\n rospy.Subscriber('odom_ekf', Odometry, ekf_cb)\n rospy.spin()\n\nif __name__ == \"__main__\":\n EKFP = EKF_Printer()\n VicParkListener()\n","repo_name":"adleris/msp_vicpark","sub_path":"scripts/vicpark_listener.py","file_name":"vicpark_listener.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73833948033","text":"# USAGE\n# python train.py --dataset data/train_images --file input/train.json --model fashion.model --labelbin mlb.pickle\n\n# import the necessary packages\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import img_to_array\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom imageCNN.usevgg16 import UseVGG16\nfrom imutils import paths\nimport numpy as np\nimport argparse\nimport random\nimport pickle\nimport cv2\nimport os\nimport json\nfrom pandas.io.json import json_normalize\nimport sys\nimport gc\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,\n\thelp=\"path to input dataset (i.e., directory of images)\")\nap.add_argument(\"-f\", \"--file\", required=True,\n\thelp=\"path to input label json file (i.e., json of images, labels)\")\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to output model\")\nap.add_argument(\"-l\", \"--labelbin\", required=True,\n\thelp=\"path to output label binarizer\")\nargs = vars(ap.parse_args())\n\n# initialize the number of epochs to train for, initial learning rate,\n# batch size, and image dimensions\nEPOCHS = 3\nINIT_LR = 1e-3\nBS = 64\nIMAGE_DIMS = (150, 150, 3)\n\n# grab the image paths and randomly shuffle them\nprint(\"[INFO] loading images...\")\nimagePaths = sorted(list(paths.list_images(args[\"dataset\"])))\nrandom.seed(42)\nrandom.shuffle(imagePaths)\n\n# process json annotation dictionary\noriJSON = json.load(open( args[\"file\"] ))\nori_label = json_normalize(oriJSON[\"annotations\"])\nlabeldist = {}\nsetLis = set()\nfor i in range( len( ori_label) ):\n\tlis = ori_label.loc[i]['labelId']\n\timgId = ori_label.loc[i]['imageId']\n\tlabeldist[imgId] = lis\n\tsetLis = setLis | set(lis)\n\n# binarize the labels using scikit-learn's special multi-label\n# binarizer implementation: one-hot encoding the label\n# Initiate with a set of classes\nmlb = MultiLabelBinarizer(classes = list(setLis))\nmlb.fit_transform([(\"1\", \"2\"), (\"3\",\"4\"),(\"5\",)]) # lazy evaulation. need to init first. Otherwise, problematic.\n\nprint(\"[INFO] compiling model...\")\nmodel = UseVGG16.build(\n\twidth=IMAGE_DIMS[1], height=IMAGE_DIMS[0],\n\tdepth=IMAGE_DIMS[2], classes=len(mlb.classes_),\n\tfinalAct=\"sigmoid\")\n\n# initialize the optimizer (SGD is sufficient)\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) #learning rate = INIT_LR = 1e-3\n\n# compile the model using binary cross-entropy\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\t \ndef getImgs(files):\n#\tNeed to manipulate the images here later -- shake , shift, rotation, flip.., etc.\n\tchunkdata = []\n\tfor imagePath in files:\n\t\t# load the image, pre-process it, and store it in the data list\n\t\timage = cv2.imread(imagePath)\n\t\timage = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))\n\t\timage = img_to_array(image)\n\t\tchunkdata.append(image)\n\t# scale the raw pixel intensities to the range [0, 1]\n\tchunkdata = np.array(chunkdata, dtype=\"float\") / 255.0\n\treturn chunkdata\n\ndef getLabels(files):\n\tchunklabels = []\n\tfor imagePath in files:\n\t\t# extract set of class labels from the image path and update the labels list\n\t\timgId = imagePath.split(os.path.sep)[-1].split(\".\")[0]\n\t\tl = labeldist[imgId]\n\t\tchunklabels.append(l)\n\tchunklabels = np.array(chunklabels)\n\tchunklabels = mlb.fit_transform(chunklabels)\n\treturn chunklabels\n\t\ndef imageLoader(files, batch_size):\n\tL = len(files)\n\twhile 1:\n\t\tbatch_start = 0\n\t\tbatch_end = batch_size\n\t\twhile batch_start < L:\n\t\t\tlimit = min(batch_end, L)\n\t\t\tX = getImgs(files[batch_start:limit])\n\t\t\tY = getLabels(files[batch_start:limit])\n\t\t\tyield (X, Y) #a tuple with two numpy arrays with batch_size samples\n\t\t\tdel (X, Y) \n\t\t\tgc.collect() \n\t\t\tbatch_start += batch_size \n\t\t\tbatch_end += batch_size\n\n# train the network\nprint(\"[INFO] training network...\")\nimglab_gen = imageLoader(imagePaths, batch_size = BS )\nmodel.fit_generator( imglab_gen , steps_per_epoch= len(imagePaths) // BS, epochs=EPOCHS, verbose=1)\n\n# save the model to disk\nprint(\"[INFO] serializing network...\")\nmodel.save( args[\"model\"] )\n\n# save the multi-label binarizer to disk\nprint(\"[INFO] serializing label binarizer...\")\nf = open( args[\"labelbin\"] , \"wb\")\nf.write(pickle.dumps(mlb))\nf.close()\n\n\n\t\n\t","repo_name":"hlk217/capstone","sub_path":"Hueyling/IMG_ANA/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27316911245","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\n\r\n#square\r\n#coordinates=[(0,0),(1*0.25,0),(1*0.25,1*0.25),(1*0.25,-1*0.25),(0,1*0.25),(-1*0.25,1*0.25),(-1*0.25,0),(-1*0.25,-1*0.25),(0,-0.25),(3,0),(3,3),(3,-3),(0,3),(-3,3),(-3,0),(-3,3),(0,-3),(-3,-3)]\r\n\r\n#Y configuration\r\n#coordinates=[(0, 1), (0.69802, 1.403), (1.60362, 1.92585), (2.60862, 2.50609), (3.68415, 3.12705), (4.81537, 3.78016), (5.99304, 4.46008), (-0.69802, 1.403), (-1.60362, 1.92585), (-2.60862, 2.50609), (-3.68415, 3.12705), (-4.81537, 3.78016), (-5.99304, 4.46008), (-0.0, 0.194), (-0.0, -0.8517), (-0.0, -2.01218), (-0.0, -3.25409), (-0.0, -4.56031), (-0.0, -5.92017)]\r\n\r\n#Circular concentric1\r\ncoordinates=[(0,0),(1,0),(0,1),(-1,-1),(0.5,-1)]\r\nfor theta in np.arange(0,2*np.pi,2*np.pi/9):\r\n coordinates.append((6*np.cos(theta),6*np.sin(theta)))\r\nfor theta in np.arange(0,2*np.pi,2*np.pi/5):\r\n coordinates.append((4*np.cos(theta),4*np.sin(theta)))\r\n#Circular\r\n'''coordinates=[(0,0)]\r\nfor theta in np.arange(0,2*np.pi,2*np.pi/19):\r\n coordinates.append((6*np.cos(theta),6*np.sin(theta)))'''\r\n\r\n#semi-circle\r\n'''coordinates=[(0,0)]\r\nfor theta in np.arange(0,np.pi+np.pi/17,np.pi/17):\r\n e=1\r\n A=6\r\n coordinates.append((A*np.cos(theta),A*np.sin(theta)))'''\r\n\r\n#random\r\n#coordinates=[(5.93, -2.49), (5.15, -1.39), (4.52, 0.06), (-1.62, 3.07), (2.67, 2.29), (-4.43, -5.39), (-1.06, -1.85), (-4.2, 2.18), (2.58, 1.08), (3.34, -2.59), (5.74, 3.83), (4.55, -2.94), (4.2, 2.1), (2.07, 0.66), (3.9, 1.01), (-4.0, -5.43), (-0.32, -2.12), (0.1, 1.37), (1.84, 4.74)]\r\n# creating list to store values of u,v\r\nuvplane=[]\r\n\r\n#setting plot parameters\r\nplt.figure(1)\r\nax=plt.axes(projection='3d')\r\nplt.figure(2)\r\naxes2=plt.axes([0.05,0.1,0.4,0.8],xlim=(-6.5,6.5),ylim=(-6.5,6.5))\r\naxes3=plt.axes([0.55,0.1,0.4,0.8],xlim=(-14,14),ylim=(-14,14))\r\nno_of_tel=0\r\n\r\nfor x1,y1 in coordinates:\r\n #counting number of telescopes\r\n no_of_tel+=1\r\n\r\n # plotting location of telescope\r\n axes2.plot(x1,y1,'rs',markersize=2)\r\n \r\n for x2,y2 in coordinates:\r\n y=(y1-y2)\r\n x=(x1-x2)\r\n\r\n # step size for snapshot \r\n h=np.pi/8\r\n\r\n # setting u and v list for plotting\r\n uplane=[]\r\n vplane=[]\r\n\r\n # declination\r\n delta=np.pi/4\r\n\r\n # lattitude\r\n theta=np.pi/6\r\n\r\n # number of snapshots\r\n snapshot=0\r\n \r\n # calculating values of u,v with earth rotation\r\n \r\n for phi in np.arange(0,h,h):\r\n #adding number of snapshots \r\n snapshot+=1\r\n\r\n #calculating u and v from X, Y in telescopes and declination and hour angle\r\n v=y*(np.cos(theta)*np.cos(phi)*np.sin(delta)+np.cos(delta)*np.sin(theta))+x*np.sin(phi)*np.sin(delta)\r\n u=-y*np.cos(theta)*np.sin(phi)+x*np.cos(phi)\r\n\r\n # appending it to u-plane and v-plane\r\n uplane.append(u)\r\n vplane.append(v)\r\n\r\n # multiplying by 1000 for wavelength = 0.1 m and unit of 100m \r\n u=1000*u\r\n v=1000*v\r\n\r\n # adding u,v coordinate to list\r\n uvplane.append((u,v))\r\n\r\n # plotting uv line for 1 pair\r\n axes3.plot(uplane,vplane,\"bs-\",markersize=0.5,linewidth=0.3)\r\n\r\n# removing zeros from uv plane\r\nfor j in range(no_of_tel*snapshot):\r\n uvplane.remove((0,0))\r\n####del\r\nprint(no_of_tel)\r\n \r\n#axes2.grid()\r\n#axes3.grid()\r\n \r\n# defining exp(i*2pi(ux+vy))\r\ndef z(u,v):\r\n l=np.arange(l_min,l_max+h,h)\r\n m=np.arange(m_min,m_max+h,h)\r\n X,Y=np.meshgrid(l,m)\r\n y=np.exp(2*np.pi*(complex(0,1))*(-u*X-v*Y))\r\n return y\r\n\r\n# setting limits of lm plane\r\nl_min=-2*(10**-3)\r\nl_max=2*(10**-3)\r\nm_min=-2*(10**-3)\r\nm_max=2*(10**-3)\r\nh=10**-5\r\ndelta_v=12.5*(10**6)\r\nR=377\r\nkb=1.38*(10**-23)\r\nTb=585/(5**0.5)\r\nAo=np.pi*(4.5**2)\r\nlambda1=0.1\r\n#defining actual brightness distribution\r\ndef I(l,m):\r\n return (kb*Tb*Ao/(lambda1**2))*(np.exp(1000*(10**6)*(-(l-lo)**2-(m-mo)**2))+np.exp(-1000*(10**6)*(((l-l1)**2+(m-m1)**2)))+np.exp(-1000*(10**6)*(((l-l2)**2+(m-m2)**2))))\r\n\r\n#creating dictionary to store response of each uv pair\r\nw={}\r\n\r\n#setting l amd m axis\r\nl=np.arange(l_min,l_max+h,h)\r\nm=np.arange(m_min,m_max+h,h)\r\n\r\n#setting arrays for image , synthesised beam and meshgrid\r\ns=np.size(m)\r\nimage=np.zeros((s,s))\r\nSbeam=np.zeros((s,s))\r\nX,Y=np.meshgrid(l,m)\r\nNoise_image=np.zeros((s,s))\r\n# adding elements in actual brightness distribution\r\nlo=0.4*(10**-3)\r\nmo=0.2*(10**-3)\r\nl1=-0.4*(10**-3)\r\nm1=0.6*(10**-3)\r\nl2=0.7*(10**-3)\r\nm2=0.5*(10**-3)\r\n\r\n#weight\r\nweight={}\r\n\r\n# list for evaluation of overlappig of \r\noverlap1=[]\r\noverlap2=[]\r\nfor u,v in uvplane:\r\n c1=0\r\n c2=0\r\n for u1,v1 in uvplane:\r\n if ((u-u1)**2+(v-v1)**2)<8100:\r\n c1=c1+1\r\n if ((u-u1)**2+(v-v1)**2)<2025:\r\n c2=c2+1\r\n overlap1.append(c1)\r\n overlap2.append(c2)\r\n\r\n # adding element to weight dictionary\r\n weight[(u,v)]=1\r\n \r\n# adding noise\r\nooo=1\r\nnoise_temp=60\r\nnoise_stddev=6.98*(10**-16)\r\nnoise_mean=0\r\nimport os\r\nos.chdir('C:\\\\Users\\\\lenovo\\\\Desktop\\\\project\\\\Graphs\\\\configurations folder')\r\nout_text=open('Data_config.txt',mode='a')\r\nout_text.write('\\n\\n\\n')\r\nout_text.write('enter name of configuration {}'.format(input('enter name of configuration')))\r\nbbb=[]\r\nddd=[]\r\nfor rep in range(128):\r\n Noise_image=np.zeros((s,s))\r\n for u,v in uvplane:\r\n exponent=z(-u,-v)\r\n # calculating response\r\n noise_response=(np.random.normal(0,noise_stddev)/(R*delta_v))\r\n\r\n # response of uv pair \r\n #w[(u,v)]=(np.sum(np.sum(I(X,Y)*z(u,v),axis=0)))*h*h+ noise_response\r\n\r\n # creating image from response\r\n #image=image+(z(-u,-v)*(w[(u,v)]/(weight[(u,v)]*2*np.pi)))*8100\r\n\r\n #creating noise image from noise response\r\n Noise_image=Noise_image+(exponent*(noise_response/(weight[(u,v)])))\r\n \r\n #creating synthesised beam\r\n #Sbeam=Sbeam+(z(-u,-v)/(weight[(u,v)]))\r\n Noise_image=Noise_image*8100/(2*np.pi)\r\n # parametrs/outputs for evaluation in noise reduction of configurations\r\n power=(image.real**2)\r\n output1=np.std(Noise_image.real)\r\n output2=np.max(Noise_image.real)\r\n output3=np.min(Noise_image.real)\r\n output4=output2-output3\r\n out_text.write('\\nmean noise deviation is {}, max is {}, min is {}, bound is {}, max of image is {} '.format(output1,output2,output3,output4,np.max(image.real)))\r\n print('mean noise deviation is {}, max is {}, min is {}, bound is {}, max of image is {} '.format(output1,output2,output3,output4,np.max(image.real)))\r\n bbb.append(abs(output2))\r\n bbb.append(abs(output3))\r\n ddd.append(output1)\r\nprint(np.mean(bbb))\r\nprint(np.std(bbb))\r\nprint(np.std(bbb)/16)\r\nprint(np.mean(ddd))\r\nprint('here is alpha {}'.format(np.mean(bbb)/16))\r\nout_text.write( '\\nmean max is {}'.format(np.mean(bbb)))\r\nout_text.write('\\nstd dev is {}'.format(np.std(bbb)))\r\nout_text.write('\\n mean deviation in noise {}'.format(np.mean(ddd)))\r\n#plotting synthesised beam\r\n'''surf=ax.contourf(X,Y,Sbeam.real,30,cmap='gnuplot')\r\nplt.sca(ax)\r\nplt.colorbar(surf)\r\nax.set_title('Synthesised Beam')'''\r\n\r\n# settig labels of telescope arrangement plot\r\n'''axes2.set_title('Arrangement of Telescopic array')\r\naxes3.set_title('uv plane')\r\naxes3.xaxis.set_label('u')\r\naxes3.yaxis.set_label('v')'''\r\n\r\n# setting plot for image analysis\r\n'''plt.figure(3)\r\naxesf=plt.axes([0.05,0.1,0.45,0.8],projection='3d')\r\naxesf2=plt.axes([0.55,0.1,0.45,0.8],projection='3d')\r\naxesf.set_title('actual Distribution')\r\naxesf2.set_title('synthesised image')\r\nplot1=axesf.contourf(X,Y,I(X,Y),30,cmap='gnuplot')\r\nplt.sca(axesf)\r\nplt.colorbar(plot1)\r\nplot2=axesf2.contourf(X,Y,image.real,30,cmap='gnuplot',vmin=0)\r\nplt.sca(axesf2)\r\nplt.colorbar(plot2)'''\r\npower=(Sbeam.real)**2\r\n\r\n# setting plot for noise image analysis\r\n'''plt.figure()\r\nax5=plt.axes(projection='3d')\r\nnoise_ax=plt.contourf(X,Y,Noise_image.real,30,cmap='gnuplot')'''\r\n\r\n# creating plot for overlapping analysis\r\nplt.figure()\r\nplt.subplot(211)\r\na,bb,cc=plt.hist(overlap1,np.arange(-0.5,10,1),density=True)\r\nprint(a)\r\nplt.subplot(212)\r\na1,bb,cc=plt.hist(overlap2,np.arange(-0.5,10,1),density=True)\r\nprint(a1)\r\n\r\n# calculating THETAhpbw\r\n'''P_l=power[200,:]\r\nP_m=power[:,200]\r\nc3=0\r\nratio=1\r\nwhile ratio>0.5 :\r\n ratio=P_l[200+c3]/P_l[200]\r\n c3=c3+1\r\nTheta_l=2*60*h*c3*180/np.pi\r\nc4=0\r\nratio=1\r\nwhile ratio>0.5 :\r\n ratio=P_m[200+c4]/P_m[200]\r\n c4=c4+1\r\nTheta_m=(2*60*h*c4)*180/np.pi'''\r\n\r\n#creating output file\r\n\r\n\r\n\r\nout_text.close()\r\n# calculating Side lobe level\r\n#print('dimension of theta hpbw are {}x{}'.format(Theta_l,Theta_m))\r\n\r\n#plt.show()\r\ndef plotj(i):\r\n\tplt.plot(l,power[i,:])\r\n\tplt.grid()\r\n\tplt.title(str(i))\r\n\tplt.show()\r\n\r\nfor j in np.arange(175,201,1):\r\n #plt.get_current_fig_manager().window.state('zoomed')\r\n #plotj(j)\r\n ll=0\r\ndef Slc(SL):\r\n SLL=10*np.log(SL/power[200,200])/np.log(10)\r\n print('SLL is {}'.format(SLL))\r\n return SLL\r\n\r\n","repo_name":"harsimranss/SRP","sub_path":"Noise_analysis.py","file_name":"Noise_analysis.py","file_ext":"py","file_size_in_byte":9255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23584103211","text":"f1 = open(\"B-small-attempt3.in\",\"r\")\nf2 = open(\"out.txt\",\"w\")\n\ndata = f1.readlines()\nk = int((data[0].split())[0])\nfor ind in range(1,k+1):\n dim=data[ind].split()\n n=int(dim[0])\n r=int(dim[1])\n o=int(dim[2])\n y=int(dim[3])\n g=int(dim[4])\n b=int(dim[5])\n v=int(dim[6])\n if b<=o and o!=0 and n>b+o:\n f2.write(\"Case #\"+str(ind)+\": IMPOSSIBLE\\n\")\n continue\n if r<=g and g!=0 and n>r+g:\n f2.write(\"Case #\"+str(ind)+\": IMPOSSIBLE\\n\")\n continue\n if y<=v and v!=0 and n>y+v:\n f2.write(\"Case #\"+str(ind)+\": IMPOSSIBLE\\n\")\n continue\n R = \"RG\"*g+\"R\"\n Y = \"YV\"*v+\"Y\"\n B = \"BO\"*o+\"B\"\n r-=g\n y-=v\n b-=o\n if r>y+b or y>b+r or b>y+r:\n f2.write(\"Case #\"+str(ind)+\": IMPOSSIBLE\\n\")\n continue\n st=\"\"\n somma=r+y+b\n while somma>1:\n if r==max(r,y,b):\n if y==max(y,b):\n st+=\"RY\"\n r-=1\n y-=1\n else:\n st+=\"RB\"\n r-=1\n b-=1\n elif y==max(r,y,b):\n if r==max(r,b):\n st+=\"YR\"\n y-=1\n r-=1\n else:\n st+=\"YB\"\n y-=1\n b-=1\n else:\n if r==max(r,y):\n st+=\"BR\"\n b-=1\n r-=1\n else:\n st+=\"BY\"\n b-=1\n y-=1\n somma-=2\n if r==1:\n st+=\"R\"\n elif b==1:\n st+=\"B\"\n elif y==1:\n st+=\"Y\"\n ss = list(st)\n l = len(ss)\n corr = True\n if l>0 and ss[0]==ss[l-1]:\n corr = False\n i=l-1\n while corr==False:\n ss[i-1],ss[i]=ss[i],ss[i-1]\n i-=1\n if ss[i]!=ss[i-1]:\n corr=True\n if \"R\" in ss:\n ss[ss.index(\"R\")]=R\n if \"Y\" in ss:\n ss[ss.index(\"Y\")]=Y\n if \"B\" in ss:\n ss[ss.index(\"B\")]=B\n f2.write(\"Case #\"+str(ind)+\": \"+\"\".join(ss)+\"\\n\")\n\nf1.close()\nf2.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_207/705.py","file_name":"705.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23457223981","text":"\nf=open(\"/tmp/A-large.in\",\"r\")\ng=open(\"/home/akshay/Desktop/codejam11.txt\",\"w\")\n \n\n \nt=f.readline()\nt=int(t)\n\n\n#if t>=1 and t<=100:\ntest=0\n#print test\n\nwhile t>0:\n \n suml=0\n count2=0\n test=test+1\n #print sum\n var=f.readline()\n #print var\n n,ml=var.split()\n n=int(n)\n ml=str(ml)\n ml=list(ml)\n for k in ml:\n k=int(k)\n #print n, ml\n \n for i,j in enumerate(ml):\n count=0\n\n if i>0:\n suml=suml+int(ml[i-1])\n\n if suml str:\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n s = s.lower()\n exclude = set(string.punctuation)\n s = \"\".join(char for char in s if char not in exclude)\n s = re.sub(r\"\\b(a|an|the)\\b\", \" \", s)\n # remove token:\n s = re.sub(r\"\\b()\\b\", \" \", s)\n s = \" \".join(s.split())\n return s\n\n\ndef match(s1: str, s2: str) -> bool:\n s1 = normalize(s1)\n s2 = normalize(s2)\n return s2 in s1\n\ndef eval_acc(prediction, answer):\n matched = 0.\n for a in answer:\n if match(prediction, a):\n matched += 1\n return matched / len(answer)\n\ndef eval_hit(prediction, answer):\n for a in answer:\n if match(prediction, a):\n return 1\n return 0\n\ndef eval_f1(prediction, answer):\n if len(prediction) == 0:\n return 0, 0, 0\n matched = 0\n prediction_str = ' '.join(prediction)\n for a in answer:\n if match(prediction_str, a):\n matched += 1\n precision = matched / len(prediction)\n recall = matched / len(answer)\n if precision + recall == 0:\n return 0, precision, recall\n else:\n return 2 * precision * recall / (precision + recall), precision, recall\n\ndef extract_topk_prediction(prediction, k=-1):\n results = {}\n for p in prediction:\n if p in results:\n results[p] += 1\n else:\n results[p] = 1\n if k > len(results) or k < 0:\n k = len(results)\n results = sorted(results.items(), key=lambda x: x[1], reverse=True)\n return [r[0] for r in results[:k]]\n\ndef eval_result(predict_file, cal_f1=True, topk = -1):\n # predict_file = os.path.join(result_path, 'predictions.jsonl')\n eval_name = \"detailed_eval_result_top_{topk}.jsonl\" if topk > 0 else 'detailed_eval_result.jsonl'\n detailed_eval_file = predict_file.replace('predictions.jsonl', eval_name)\n # Load results\n acc_list = []\n hit_list = []\n f1_list = []\n precission_list = []\n recall_list = []\n with open(predict_file, 'r') as f, open(detailed_eval_file, 'w') as f2:\n for line in f:\n try:\n data = json.loads(line)\n except:\n print(line)\n continue\n id = data['id']\n prediction = data['prediction']\n answer = data['ground_truth']\n if cal_f1:\n if not isinstance(prediction, list):\n prediction = prediction.split(\"\\n\")\n else:\n prediction = extract_topk_prediction(prediction, topk)\n f1_score, precision_score, recall_score = eval_f1(prediction, answer)\n f1_list.append(f1_score)\n precission_list.append(precision_score)\n recall_list.append(recall_score)\n prediction_str = ' '.join(prediction)\n acc = eval_acc(prediction_str, answer)\n hit = eval_hit(prediction_str, answer)\n acc_list.append(acc)\n hit_list.append(hit)\n f2.write(json.dumps({'id': id, 'prediction': prediction, 'ground_truth': answer, 'acc': acc, 'hit': hit, 'f1': f1_score, 'precission': precision_score, 'recall': recall_score}) + '\\n')\n else:\n acc = eval_acc(prediction, answer)\n hit = eval_hit(prediction, answer)\n acc_list.append(acc)\n hit_list.append(hit)\n f2.write(json.dumps({'id': id, 'prediction': prediction, 'ground_truth': answer, 'acc': acc, 'hit': hit}) + '\\n')\n \n if len(f1_list) > 0:\n result_str = \"Accuracy: \" + str(sum(acc_list) * 100 / len(acc_list)) + \" Hit: \" + str(sum(hit_list) * 100 / len(hit_list)) + \" F1: \" + str(sum(f1_list) * 100 / len(f1_list)) + \" Precision: \" + str(sum(precission_list) * 100 / len(precission_list)) + \" Recall: \" + str(sum(recall_list) * 100 / len(recall_list))\n else:\n result_str = \"Accuracy: \" + str(sum(acc_list) * 100 / len(acc_list)) + \" Hit: \" + str(sum(hit_list) * 100 / len(hit_list))\n print(result_str)\n result_name = \"eval_result_top_{topk}.txt\" if topk > 0 else 'eval_result.txt'\n eval_result_path = predict_file.replace('predictions.jsonl', result_name)\n with open(eval_result_path, 'w') as f:\n f.write(result_str)\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n argparser.add_argument('-d', type=str, default='results/KGQA/csqa/alpaca_default/test')\n argparser.add_argument('--cal_f1', action=\"store_true\")\n argparser.add_argument('--top_k', type=int, default=-1)\n args = argparser.parse_args()\n \n eval_result(args.d, args.cal_f1, args.top_k)","repo_name":"RManLuo/reasoning-on-graphs","sub_path":"src/qa_prediction/evaluate_results.py","file_name":"evaluate_results.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"61"} +{"seq_id":"40709038519","text":"from higlass.client import Track, View, ViewConf\nimport json\n\n\ndef test_viewconf_creation():\n conf = ViewConf()\n\n view = conf.create_view()\n\n track = view.create_track(\n \"heatmap\",\n server=\"http://localhost:{}/api/v1/\".format(8000),\n tileset_uuid=\"xx\",\n height=200,\n position=\"top\",\n )\n\n conf1_dict = conf.to_dict()\n conf2 = ViewConf.from_dict(conf1_dict)\n assert conf2.to_dict() == conf1_dict\n","repo_name":"hkariti/higlass-python","sub_path":"test/higlass_test.py","file_name":"higlass_test.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"21108461181","text":"from cspot.models import DBSession\nfrom cspot.models.users import User\nfrom cspot.models.projects import Project\nfrom cspot.models.forms import Widget\nfrom cspot.models.records import Record\nfrom cspot.models.records import ItemRecord\n\nfrom cspot.util import plural_to_singular\nfrom cspot.auth import get_temp_user\n\nfrom cspot.views.projects import project_menu\nfrom cspot.views.forms import FormController\nfrom cspot.views.forms import widget_controller_factory\n\nfrom pyramid.url import route_url\nfrom pyramid.view import view_config\nfrom pyramid.security import remember\nfrom pyramid.httpexceptions import HTTPFound\n\n@view_config(route_name='project:records',\n permission='manage_project')\ndef record_first(project, request):\n if project.items:\n return HTTPFound(\n location=route_url('project:record', request, project_id=project.id, record_id=project.items[0].id)\n )\n else:\n return HTTPFound(\n location=route_url('project:record:add', request, project_id=project.id)\n )\n\n@view_config(route_name='project:record:add',\n permission='manage_project',\n renderer='cspot:templates/projects/record.pt')\n@view_config(route_name='project:record',\n permission='manage_project',\n renderer='cspot:templates/projects/record.pt')\ndef record(project, request):\n form_controller = FormController(project.item_form)\n\n record_id = request.matchdict.get('record_id', None)\n\n if record_id is not None:\n record = project.get_item(record_id)\n else:\n record = None\n\n if record and not record.reviewed:\n record.reviewed = True\n\n if request.method == 'POST':\n title = request.params.get('title', '').strip()\n submit = request.params.get('submit','')\n\n if not title and submit.find('finish') >= 0:\n return HTTPFound(\n location=route_url('project:feedback_form', request, project_id=project.id)\n )\n\n elif not title:\n request.session.flash('%s Name or Title is required!' % project.item_name, 'errors')\n\n elif title:\n form_controller = FormController(project.item_form)\n form_controller.validate_from_request(request)\n\n if form_controller.errors:\n request.session.flash('There was a problem with your submission', 'errors')\n\n else:\n request.session.flash('%s saved!' % title, 'messages')\n\n if record is None:\n record = ItemRecord(project, title)\n\n record.title = title\n\n form_controller.populate_record_from_request(record, request)\n \n session = DBSession()\n session.add(record)\n session.flush()\n \n if submit.find('add') >= 0:\n route = 'project:record:add'\n elif submit.find('finish') >= 0:\n route = 'project:feedback_form'\n else:\n route = 'project:record'\n \n return HTTPFound(\n location=route_url(route, request, project_id=project.id, record_id=record.id)\n )\n\n return dict(\n project=project,\n menu=project_menu(project, request, 'records'),\n form_widgets=form_controller.render_widgets(request, record),\n record=record\n )\n \n\n@view_config(route_name='project:record:download',\n permission='review_project')\ndef file_download(project, request):\n \"\"\"\n Download a file from a widget\n \"\"\"\n\n session = DBSession()\n\n record_id = request.matchdict['record_id']\n widget_id = request.matchdict['widget_id']\n filename = request.matchdict['filename']\n\n record = session.query(Record).filter(Record.project_id==project.id).filter(Record.id==record_id).first()\n widget = session.query(Widget).filter(Widget.id==widget_id).first()\n widget_controller = widget_controller_factory(widget)\n\n value = record.get_widget_value(widget)\n\n return widget_controller.download(value, filename, request)\n\n\n@view_config(route_name='project:record:import', permission='review_project',\n renderer='cspot:templates/projects/premium_import.pt')\ndef record_import(project, request):\n return dict(\n project=project,\n menu=project_menu(project, request, 'records'),\n )\n\n","repo_name":"trip42/CommitteeSpot","sub_path":"cspot/views/records.py","file_name":"records.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7733673646","text":"import streamlit as st\nimport pandas as pd\n\n# Choose a list of DVDs available in the shop\nliste_dvds_availables = [\"Back To The Future 1\", \"Back To The Future 2\", \"Back To The Future 3\", \"La chèvre\", \"Harry Potter à l'école des sorciers\"]\n\nst.title(\"Boutique DVDs\")\n\nst.write(\"\"\"\n## DVDs disponibles\n\"\"\")\nst.write(\"\"\"(cliquez pour ajouter l'objet au panier)\"\"\")\n\ndef panier_prix(panier_dict):\n\n liste_dvds = []\n prix_bttf = 0\n prix_autres = 0\n compteur = 0\n mod_prix = 1\n\n for dvd, nb_dvds in panier_dict.items():\n for i in range(nb_dvds):\n liste_dvds.append(dvd)\n\n for dvd in liste_dvds:\n if \"back to the future\" in dvd.lower():\n prix_bttf = prix_bttf + 15\n\n else:\n prix_autres = prix_autres + 20 \n\n liste_dvds_no_duplicate = list(dict.fromkeys(liste_dvds))\n \n for dvd in liste_dvds_no_duplicate:\n if \"back to the future\" in dvd.lower():\n compteur = compteur + 1\n\n if compteur == 2:\n mod_prix = 0.9\n \n if compteur >= 3:\n mod_prix = 0.8\n\n prix_tot = mod_prix*prix_bttf + prix_autres\n\n return prix_tot\n\nfor dvd in liste_dvds_availables:\n if dvd not in st.session_state:\n st.session_state[dvd] = 0\n\nfor dvd in liste_dvds_availables:\n if st.button(dvd):\n st.session_state[dvd] += 1\n\nst.write(\"\"\"\n## Panier\n\"\"\")\n\nprix = 0\nfor dvd in liste_dvds_availables:\n st.write(dvd, st.session_state[dvd])\n\nif st.button('Reset panier'):\n for dvd in liste_dvds_availables:\n st.session_state[dvd] = 0\n\nst.write(\"## Prix à payer\")\nst.write(panier_prix(st.session_state))\n\n\n\n","repo_name":"Krakmo/Technical_tests","sub_path":"data_engineer_test/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1205290998","text":"## from https://github.com/skaro94/vtt_qa_pipeline/\nfrom collections import defaultdict\n\nfrom pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer\nfrom pycocoevalcap.bleu.bleu import Bleu\nfrom pycocoevalcap.meteor.meteor import Meteor\nfrom pycocoevalcap.rouge.rouge import Rouge\nfrom pycocoevalcap.cider.cider import Cider\n\nfrom ignite.metrics.metric import Metric\nfrom ignite.exceptions import NotComputableError\n\nfrom utils import to_string, to_string2, make_fake_vocab\nfrom config import *\n\nimport torch\nfrom ignite.metrics.metric import Metric\nfrom ignite.exceptions import NotComputableError\nfrom collections import defaultdict\nfrom pdb import set_trace\n\n\n\nclass Ngram(Metric):\n def __init__(self, args, vocab, output_transform=lambda x: x):\n super(Ngram, self).__init__(output_transform)\n\n self.vocab = vocab\n self.args = args\n\n self.tokenizer = PTBTokenizer()\n scorers = {\n 'bleu': (Bleu(4), [\"Bleu_1\", \"Bleu_2\", \"Bleu_3\", \"Bleu_4\"]),\n 'meteor': (Meteor(),\"METEOR\"),\n 'rouge': (Rouge(), \"ROUGE_L\"),\n 'cider': (Cider(), \"CIDEr\")\n }\n self.scorers = [v for k, v in scorers.items() if k in args.metrics]\n\n @staticmethod\n def default_transform(x):\n return (x[-2], x[-1])\n\n def reset(self):\n self._data = defaultdict(lambda: 0)\n self._num_ex = 0\n\n def format_string(self, x):\n if self.args.beamsize==1 or self.args._training:\n x = to_string(self.vocab, x)\n return {str(i): [v] for i, v in enumerate(x)}\n else:\n x = to_string2(self.vocab, x)\n return {str(i): [v[0]] for i, v in enumerate(x)}#only best beam results are recorded here\n\n def update(self, output):\n y_pred, y = output # y == trg\n num_ex = len(y_pred) # y_pred could be list of tensors thus no y_pred.shape\n res = self.format_string(y_pred)\n gts = self.format_string(y)\n\n for scorer, method in self.scorers:\n score, scores = scorer.compute_score(gts, res)\n if type(method) == list:\n for sc, m in zip(score, method):\n self._data[m] += sc * num_ex\n else:\n self._data[method] += score * num_ex\n self._num_ex += num_ex\n\n def compute(self):\n if self._num_ex == 0:\n raise NotComputableError(\n 'Loss must have at least one example before it can be computed.')\n return {k: v * 100 / self._num_ex for k, v in self._data.items()}\n # *100 for bleu, rouge score percentages\n\n\nclass Ldiff_Square(Metric):\n def __init__(self, args, output_transform=lambda x:x):\n super().__init__(output_transform)\n self.args = args\n self._measure = {\n \"lendiff2sum\": 0\n }\n self._num_ex =0\n\n def reset(self):\n self._measure = {\n \"lendiff2sum\": 0\n }\n self._num_ex = 0\n\n def reformat(self, x):\n if self.args.beamsize==1 or self.args._training or type(x)!= type([]):\n return x\n\n else:\n return torch.cat(x, 1)#list of tensors [1, len ]\n\n def get_len(self, x):\n mask = (x!=PAD_TOKEN) & (x!=SOS_TOKEN) & (x!=EOS_TOKEN)\n mask = mask.long()\n lens = mask.sum(dim=1)\n return lens\n\n def update(self, output):\n decoded, trg = output\n\n num_ex = len(trg)\n decoded = self.reformat(decoded)\n d_lens = self.get_len(decoded)\n trg_lens = self.get_len(trg)\n lendiff2 = ((d_lens-trg_lens)**2).sum()\n self._measure['lendiff2sum']+=lendiff2\n self._num_ex+= num_ex\n\n\n def compute(self):\n return self._measure[\"lendiff2sum\"]/self._num_ex\n","repo_name":"sonsus/papago_test","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20060426358","text":"import os\n\nimport pandas as pd\n\ndata_header_dict = {\"bj\": {\"aq\": [\"utc_time\", \"pm2.5\", \"pm10\", \"no2\", \"co\", \"o3\", \"so2\"],\n \"meo\": [\"utc_time\", \"temperature\", \"pressure\", \"humidity\", \"wind_direction\", \"wind_speed\"]},\n \"ld\": {\"aq\": [\"utc_time\", \"pm2.5\", \"pm10\", \"no2\"],\n \"meo\": [\"utc_time\", \"temperature\", \"pressure\", \"humidity\", \"wind_direction\", \"wind_speed\"]}}\n\n# api_data_directory.format(city, data_type) to get real directory\napi_data_directory = \"../competition/data_{}_api_m/{}\"\nfilled_data_directory = {\"bj\": \"../data_m/aq_filled\", \"ld\": \"../data_ld_m/aq_filled\"}\nhistory_data_directory = {\"bj\": {\"aq\": \"../data_m/aq\", \"meo\": \"../data/meo\"},\n \"ld\": {\"aq\": \"../data_ld/aq\", \"meo\": \"../data/meo\"}}\n\n\ndef load_directory_data(directory, data_header=None, drop=None, export_none=False, additional_index=None):\n \"\"\"\n Fetching all data from directory, have the ability of recursive scanning.\n\n :param directory: str or list, representing the directory(s) you want to fetch from.\n :param data_header: indicating the head of data frame you want to append.\n :param drop: list, containing all the column name you want to drop. Can be set to none to drop nothing\n :param export_none: bool, indicating drop rows that contain empty data or not.\n Noted that this drop are proceed after the drop of columns.\n :param additional_index: list, indicating the name of column you want to assign as index.\n :return: dict, containing all the data.\n :except: FileNotFoundError if the given directory not exist.\n \"\"\"\n # Fetch all csv files recursively in given directory.\n df_array = []\n print(\"Loading data from {}\".format(directory))\n if not isinstance(directory, list):\n directory = [directory]\n for one_directory in directory:\n for root, directories, filenames in os.walk(one_directory):\n for filename in filenames:\n name, ext = os.path.splitext(filename)\n if ext == \".csv\":\n file_dir = os.path.join(root, filename)\n df_single = pd.read_csv(file_dir, \"w\", delimiter=\",\", names=data_header)\n date_index = pd.to_datetime(df_single[\"utc_time\"])\n df_single[\"id\"] = name\n df_single = df_single.set_index([\"id\"])\n df_single = df_single.drop([\"utc_time\"], axis=1)\n df_single = df_single.set_index([date_index], append=True)\n if additional_index is not None:\n df_single = df_single.set_index(additional_index, append=True)\n df_array.append(df_single)\n\n # Assign index and proceed drop\n df = pd.concat(df_array)\n df = df.sort_index()\n if drop is not None:\n df = df.drop(drop, axis=1)\n if not export_none:\n df = df.dropna(axis=0, how='any')\n df = df[~df.index.duplicated(keep='last')]\n return df\n","repo_name":"Logan-Lin/KDD_Data_process_NG","sub_path":"competition/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70758242756","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val = 0, neighbors = None):\n self.val = val\n self.neighbors = neighbors if neighbors is not None else []\n\"\"\"\n\nclass Solution:\n # DFS recursively\n def cloneGraph2(self, node: 'Node') -> 'Node':\n\n dd = {} # map of [oldNode] -> newNode\n\n def dfs(node):\n if not node:\n return None\n\n if node in dd:\n return dd[node]\n\n dd[node] = Node(node.val)\n\n for nei in node.neighbors:\n dd[node].neighbors.append(dfs(nei))\n return dd[node]\n\n return dfs(node)\n\n \n # DFS iteratively\n def cloneGraph3(self, node: 'Node') -> 'Node':\n if not node:\n return node\n\n dd = {node: Node(node.val)}\n stack = [node]\n\n while stack:\n cur = stack.pop()\n for nei in cur.neighbors:\n if nei not in dd:\n dd[nei] = Node(nei.val)\n stack.append(nei)\n dd[cur].neighbors.append(dd[nei])\n return dd[node]\n\n \n # BFS iteratively\n def cloneGraph(self, node: 'Node') -> 'Node':\n if not node:\n return node\n \n dd = {node: Node(node.val)}\n q = collections.deque([node])\n\n while q:\n cur = q.popleft()\n for nei in cur.neighbors:\n if nei not in dd:\n dd[nei] = Node(nei.val)\n q.append(nei)\n dd[cur].neighbors.append(dd[nei])\n return dd[node]","repo_name":"aso2001/LeetCode","sub_path":"0133-clone-graph/0133-clone-graph.py","file_name":"0133-clone-graph.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2843385326","text":"from dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.graph_objs as go\n\n\nclass SliderUpdateGraphModule(object):\n def __init__(self):\n self.id = 'slider-update-graph'\n self.callback_check = 'Slider Update Graph'\n\n self.df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')\n\n self.layout = html.Div(id=self.id,\n children=[\n dcc.Graph(id='graph-with-slider'),\n dcc.Slider(\n id='year-slider',\n min=self.df['year'].min(),\n max=self.df['year'].max(),\n value=self.df['year'].min(),\n marks={str(year): str(year) for year in self.df['year'].unique()}\n )\n ], style={'display': 'none'}\n )\n\n def set_callbacks(self, app):\n\n @app.callback(Output(self.id, 'style'), [Input('tabs', 'value'), Input('tab-subcategories', 'value')])\n def display_module(tab, tab_subcategory):\n if (tab == 2) & (tab_subcategory == self.callback_check):\n return {'display': 'block'}\n return {'display': 'none'}\n\n @app.callback(\n Output('graph-with-slider', 'figure'),\n [Input('year-slider', 'value')])\n def update_figure(selected_year):\n filtered_df = self.df[self.df.year == selected_year]\n traces = []\n for i in filtered_df.continent.unique():\n df_by_continent = filtered_df[filtered_df['continent'] == i]\n traces.append(go.Scatter(\n x=df_by_continent['gdpPercap'],\n y=df_by_continent['lifeExp'],\n text=df_by_continent['country'],\n mode='markers',\n opacity=0.7,\n marker={\n 'size': 15,\n 'line': {'width': 0.5, 'color': 'white'}\n },\n name=i\n ))\n\n return {\n 'data': traces,\n 'layout': go.Layout(\n xaxis={'type': 'log', 'title': 'GDP Per Capita'},\n yaxis={'title': 'Life Expectancy', 'range': [20, 90]},\n margin={'l': 40, 'b': 40, 't': 10, 'r': 10},\n legend={'x': 0, 'y': 1},\n hovermode='closest'\n )\n }\n\n return\n","repo_name":"jxb5778/dash_example","sub_path":"app/pages/part_2/slider_update_graph_module.py","file_name":"slider_update_graph_module.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13021253249","text":"import subprocess\nimport sys\nimport time\nimport subprocess\n\ndef get_devices():\n # Ejecuta el comando `netsh wlan show networks`\n process = subprocess.run([\"netsh\", \"wlan\", \"show\", \"networks\"], capture_output=True)\n\n # Obtén la salida del comando como una secuencia de bytes\n output = process.stdout\n\n # Decodifica la secuencia de bytes a una cadena Unicode usando la codificación ISO-8859-1\n output = output.decode(\"ISO-8859-1\")\n\n # Divide la cadena Unicode en líneas\n lines = output.splitlines()\n\n # Filtra las líneas que contienen \"SSID\"\n devices = [line for line in lines if \"SSID\" in line]\n\n return devices\n\n\n\ndef get_rssi(device):\n # Separa la información del dispositivo en una lista\n info = device.split(\" \")\n\n # Verifica si la lista contiene la información RSSI\n if len(info) >= 9:\n # Obtén la señal RSSI\n rssi = info[8]\n return rssi\n else:\n return None\n\n\nwhile True:\n # Obtén la lista de dispositivos\n devices = get_devices()\n\n # Procesa la lista de dispositivos\n for device in devices:\n # Obtén la señal RSSI\n rssi = get_rssi(device)\n\n # Imprime la señal RSSI\n print(rssi)\n\n # Espera un segundo\n time.sleep(1)\n","repo_name":"FreddyOjeda/proyecto_de_grado","sub_path":"bard.py","file_name":"bard.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16072980178","text":"#!/usr/bin/env python2.7\n#\n# Code was written by asinggih (Aditya Singgih)\n#\n#\n# Note: Problemset belongs to the owner. Not me\n\n\"\"\"\n\nGiven a spaced-separated string of numbers,\n\nreturn the number that occurs the most.\n\nIf 2 or more numbers have the same occurences, return the largest number.\n\nIf all numbers have same occurences, return the largest number\n\n\"\"\"\n\n\ndef find_most_freq(string):\n\n array = userinput.split(\" \")\n array = [int(i) for i in array]\n\n distinct = set(array)\n lookup = dict()\n\n largest_num = array[0]\n for i in array:\n if i > largest_num:\n largest_num = i\n\n if i in lookup:\n lookup[i] += 1\n else:\n lookup[i] = 1\n\n output = None\n largest_count = 0\n for num in distinct:\n if lookup[num] >= largest_count:\n largest_count = lookup[num]\n output = num\n\n if output is None:\n return largest_num\n\n return output\n\n\nif __name__ == \"__main__\":\n\n # userinput = \"1 5 1 4 9 0 4\"\n # userinput = \"1 5 1 9 0 4\"\n userinput = \"1 5 10 4 9 0\"\n\n print(find_most_freq(userinput))\n","repo_name":"asinggih/coding-challenges","sub_path":"highest_count.py","file_name":"highest_count.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18077880283","text":"import numpy\nimport string\nfrom PIL import Image\nimport math\nimport sys\nfrom fileHandler import readWFAFromFile\nwfa = readWFAFromFile(sys.argv[1])\ndef WFAToImage(wfa,k):\n\tsize = 2 ** k\n\tnew_array = numpy.empty([size,size,2], dtype=numpy.uint8)\n\tC = wfa.I\n\tx = int(0)\n\ty = int(0)\n\tresolutionInter = int((2 ** k)/2)\n\ti = 0\n\tstring = \"\"\n\twhile len(int2base(i,4)) < k+1:\n\t\tstring = int2base(i,4)\n\t\twhile len(string) < k:\n\t\t\tstring = \"0\"+string\n\t\tfor letter in string:\n\t\t\tif letter == \"0\":\n\t\t\t\tx = x + resolutionInter\n\t\t\t\tC = C.dot(wfa.A[0])\n\t\t\telif letter == \"1\":\n\t\t\t\tx = x + resolutionInter\n\t\t\t\ty = y + resolutionInter\n\t\t\t\tC = C.dot(wfa.A[1])\n\t\t\telif letter == \"2\":\n\t\t\t\tC = C.dot(wfa.A[2])\t\n\t\t\telif letter == \"3\":\n\t\t\t\ty = y + resolutionInter\n\t\t\t\tC = C.dot(wfa.A[3])\n\t\t\tresolutionInter = int(resolutionInter / 2)\n\t\tC = C.dot(wfa.F)\n\t\tgrey = round(C[0]\t* 255)\n\t\tnew_array[x][y] = [grey,255]\n\t\tx = int(0)\n\t\ty = int(0)\n\t\tresolutionInter\t= int((2 ** k)/2)\n\t\ti = i + 1\n\t\tC = wfa.I\n\treturn new_array\n\n\ndigs = string.digits + string.ascii_letters\ndef int2base(x, base):\n if x < 0:\n sign = -1\n elif x == 0:\n return digs[0]\n else:\n sign = 1\n\n x *= sign\n digits = []\n\n while x:\n digits.append(digs[int(x % base)])\n x = int(x / base)\n\n if sign < 0:\n digits.append('-')\n\n digits.reverse()\n\n return ''.join(digits)\nimg_array = WFAToImage(wfa,int(sys.argv[3]))\nimg = Image.fromarray(img_array)\t\t\nimg.save(\"C:\\\\Users\\\\idriss\\\\Documents\\\\GitHub\\\\WFA-image\\\\\"+sys.argv[2])","repo_name":"idriss1998/WFA-image","sub_path":"WFAToImage.py","file_name":"WFAToImage.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73019456514","text":"\n# SASL is intended as a final answer in authentication.\n# It has been that answer to all desires in SMTP, IMAP,\n# POP3, IRC, LDAP and probably more; there is only one\n# protocol that continues to invent its own wheels,\n# namely HTTP. The lack of generic security means that\n# web applications fall behind on updates of their\n# security, leaving gaping holes in the protection of\n# its users. Many applications implement mediocre\n# security levels, and it is precisely those that will\n# never change. The reason is clear: applications\n# take another kind of thinking (and perhaps another\n# kind of programmer) than security software.\n#\n# HTTP supports user interaction, and there is no end\n# to applications asking the user to enter data, in\n# a fashion that could be seen as one-sided automation.\n# Other protocols that are less interactive and less\n# inventive use extra software to interact with the\n# user with normal software, no systems that are\n# dynamic and always implemented in a slightly different\n# manner.\n# \n# Though it is possible to interact with users from\n# the web pages exchanged over HTTP,\n# this is usually an unsafe environment full of dynamic\n# content that comes from a variety of sources. Many\n# pages contain advertisements and are prone to scan as\n# much of a user's behaviour as possible. There is no\n# reason why such heterogenic content would stop to\n# respect passwords.\n#\n# Passwords are also the inevitable result of various\n# programmers each inventing their own access control\n# mechanism. These programmers tend to specialise in\n# application logic, not security. The result is a\n# highly conservative style of security, which sticks\n# to old habits that have long considered insecure.\n#\n# The use of passwords is horrible, and the attempts\n# to create web-only single sign-on systems have led\n# to slow response times and still a high risk for\n# tapping into personal data.\n#\n# SASL is not a specific authentication mechanism, but\n# rather a generic wrapper that captures them all, by\n# defining a channeling system through which they can\n# all travel. A gradually evolving set of mechanisms\n# have been defined for SASL, and even for passwords\n# there are decent mechanisms, though nothing compared\n# to a system like Kerberos. None of these need to\n# be forced upon users however; a properly broad SASL\n# implementations offers a list of mechanisms from\n# which a user chooses. These settings are not made\n# by application software, and they need not be\n# inconsistent.\n#\n# Better even, upgrade SASL to get new mechanisms\n# installed. Not just in websites, but also in\n# mail, ldap and all the other applications of SASL.\n# There is no need for involvement from any of the\n# application programs, as long as they pass on the\n# traffic to sufficiently clueful software that also\n# follows the upgrade advise.\n#\n# The web is a dangerous place with fatal attraction.\n# We can mitigate most threats by at least treating\n# security-sensitive aspects in layers out of reach\n# by dangerously dynamic components like JavaScript\n# running in an HTML page. A similar choice would\n# be wise for privacy-sensitive aspects. SASL does\n# just that, without the need to bother a programmer\n# whose primary interest is his application, not\n# the user's online security or privacy.\n\n\nfrom __future__ import print_function\n\n\nimport re\n\n\n# Regular expressions to match against request headers\n#\n# From RFC 7325:\n#\n# credentials = auth-scheme [ 1*SP ( token68 / #auth-param ) ]\n# \n# auth-scheme = token\n# -- this token is case-insensitive\n#\n# auth-param = token BWS \"=\" BWS ( token / quoted-string )\n# -- this token is case-insensitive\n# -- we always use the quoted-string form\n# \n# token68 = 1*( ALPHA / DIGIT /\n# \"-\" / \".\" / \"_\" / \"~\" / \"+\" / \"/\" ) *\"=\"\n#\n# BWS references *( SP / HTAB ) and then there is\n#\n# quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE\n# qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text\n# obs-text = %x80-FF\n# quoted-pair = \"\\\" ( HTAB / SP / VCHAR / obs-text )\n#\n# Our syntax is more constrained, allowing space-separated\n# GSS-API mechanism names, which we can formalise as\n#\n# mech-string = DQUOTE mech-name *( SP sasl-mech ) DQUOTE\n#\n# The sasl-mech is defined in RFC 4422 as:\n#\n# sasl-mech = 1*20mech-char\n# mech-char = UPPER-ALPHA / DIGIT / HYPHEN / UNDERSCORE\n# ; mech-char is restricted to A-Z (uppercase only), 0-9, -, and _\n# ; from ASCII character set.\n#\n# UPPER-ALPHA = %x41-5A ; A-Z (uppercase only)\n# DIGIT = %x30-39 ; 0-9\n# HYPHEN = %x2D ; hyphen (-)\n# UNDERSCORE = %x5F ; underscore (_)\n#\n# The following scheme finds pairs of matched parameters.\n# Both the quoted string and token68 forms are not unpacked.\n#\n#TODO# Probably need to get a better quoted-string.\n#\n\nre_sasl_mech = '(?:[A-Z0-9-_]{1,20})'\nre_mechstring = '(?:[\"](' + re_sasl_mech + '(?:[ ]' + re_sasl_mech + ')*)[\"])'\n\nre_dnsstring = '(?:\"(' + '[a-zA-Z0-9-_]+(?:\\.[a-zA-Z0-9-_]+)+' + ')\")'\n\nre_bws = re_ows = '(?:[ \\\\t]*)'\nre_token68 = '(?:[a-zA-Z0-9-._~+/]+[=]*)'\nre_auth_param = ( '(?:' + '([CcSs][2][CcSs])' + re_bws + '[=]' + re_bws + '(' + re_token68 + ')' +\n\t'|' + '[Mm][Ee][Cc][Hh]' + re_bws + '[=]' + re_bws + '(' + re_mechstring + ')' +\n\t'|' + '[Rr][Ee][Aa][Ll][Mm]' + re_bws + '[=]' + re_bws + '' + re_dnsstring + ')' )\nre_auth_scheme = '[Ss][Aa][Ss][Ll]'\nre_credentials = '(?:' + re_auth_scheme + '(?:[ ]+' + re_auth_param + '(?:' + re_ows + '[,]' + re_ows + re_auth_param + ')+)?)'\n\nre_credentials = '(?:' + re_auth_scheme + '(?:[ ]+(' + re_auth_param + ')(?:' + re_ows + '[,]' + re_ows + re_auth_param + ')+)?)'\n\n# We use authorization_stx to check the syntax of Authorization: and\n# Proxy-Authorization: headers, and auth_param_finder to findall()\n#\nauthorization_stx = re.compile ('^' + re_credentials + '$')\nauth_param_finder = re.compile (re_auth_param)\n\n\n# A few simple tests on the complex regular expressions.\n#\n#TODO# Should be optional, and possibly external to this file.\n#\n_test = 'SAsL c2s=11bbaa=, s2s=190284ijrjwerowieu987d9fs===, c2c=2kkasjf923y92i3h4, s2c=alskjoeiqwr98237492834=====,mech=\\t\"TRA LA LALALA\", realm\\t = \\t\\t \\t \"dynamo.nep\"'\nassert (authorization_stx.match (_test) is not None)\nassert (auth_param_finder.findall (_test) == [\n\t('c2s', '11bbaa=', '', '', ''),\n\t('s2s', '190284ijrjwerowieu987d9fs===', '', '', ''),\n\t('c2c', '2kkasjf923y92i3h4', '', '', ''),\n\t('s2c', 'alskjoeiqwr98237492834=====', '', '', ''),\n\t('', '', '\"TRA LA LALALA\"', 'TRA LA LALALA', ''),\n\t('', '', '', '', 'dynamo.nep')])\n\n\n# The sasl_mechanisms provided\n#TODO# Derive from SASL stack\n#\nsasl_mechanisms = [ 'GSSAPI', 'PLAIN', 'CRAM-MD5', 'DIGEST-MD5', 'SCRAM-SHA1' ]\n\n\n# Extend the content of a server-sent 401 or 407 header\n# WWW-Authenticate or Proxy-Authenticate with options\n# to authenticate with SASL. If the header does not\n# yet exist, add it to make the inner WSGI layer conform\n# to the HTTP specification.\n#\ndef add_sasl_chal (realm,got_remote=False,hdrval=None):\n\thdrval = hdrval + ', ' if hdrval is not None else ''\n\tmechs = ' '.join (sasl_mechanisms)\n\tif got_remote and 'EXTERNAL' not in mechs:\n\t\tmechs += ' EXTERNAL'\n\thdrval += 'SASL realm=\"' + realm + '\", mech=\"' + mechs + '\"'\n\n\n# Build a SASL response header from its name and the attributes\n# that were updated by SASL,starting from those in the request.\n# In addition, a base directory of headers is prepared to find\n# the new dictionary of response headers.\n#\ndef build_sasl_header (hdrnm, attrs, basedir):\n\thdrval = 'SASL'\n\tcomma = ''\n\tfor atnm in ['mech', 'realm', 'name', 'c2c', 's2c', 's2s', 'text']:\n\t\tif attrs.has_key (atnm):\n\t\t\tif '2' in atnm:\n\t\t\t\thdrval += comma + ' %s=%s' % (atnm, attrs [atnm])\n\t\t\telse:\n\t\t\t\thdrval += comma + ' %s=\"%s\"' % (atnm, attrs [atnm])\n\t\tcomma = ','\n\tif basedir.has_key (hdrnm):\n\t\t# Does this ever occur? Probably not\n\t\tbasedir [hdrnm] += ',' + hdrval\n\telse:\n\t\tbasedir [hdrnm] = hdrval\n\n\nclass WSGI_SASL (object):\n\n\t\"\"\"\n\tWSGI-SASL middleware filters HTTP traffic before\n\tit reaches an application that may want to use a\n\t`REMOTE_USER` header. The application will raise\n\t401 or 407 if it lacks one, thereby triggering the\n\tSASL exchange that it may or may not know about.\n\n\tThe client may provide credentials, either\n\tpro-actively or reminiscent of a foregoing\n\tSASL interaction. When these lead to the\n\testablishment of a `REMOTE_USER`.\n\n\tWhen a `REMOTE_USER` already exists, it is\n\tacceptable to the `SASL EXTERNAL` method.\n\tBy default it is actually passed through.\n\tWhen SASL is tried in spite of this value,\n\tit is assumed that different negotiation\n\tis required to replace `REMOTE_USER`, or to\n\tat least give the client such an opportunity.\n\n\tThis layer allows other mechanisms to be setup\n\tin preceding or follow-up layers:\n\n\t * It passes `REMOTE_USER` trough; the preceding\n\t stack can be incorporated as `SASL EXTERNAL`\n\t so be mindful that it is sufficiently\n\t secure for the application's purposes;\n\n\t * It passes `Authorize` headers that reference\n\t another security protocol;\n\n\t * It externds to a list of challenges in a\n\t 401 or 407 Initial Response or, if the list\n\t has not been started yet, it starts it.\n\n\t * It passes 200 and 403 Final Responses, along\n\t with all the other status codes to which\n\t HTTP-SASL has nothing to add.\n\n\tThis class implements WWW authentication. The\n\tsubclass WSGI_SASL_Proxy overrides a few aspects\n\tto produce Proxy authentication.\n\t\"\"\"\n\n\tstatus = '401 Unauthorized'\n\tresp_header = 'WWW-Authenticate'\n\treq_header = 'Authorization'\n\treq_envvar = 'HTTP_AUTHORIZATION'\n\n\tdef __init__ (self, inner_app, realm='secure.realm'):\n\t\t\"\"\"Instantiate WSGI-SASL as middleware\n\t\t on the path towards the `inner_app`.\n\t\t\"\"\"\n\t\tself.inner_app = inner_app\n\t\tself.realm = realm\n\t\tself.resp_header_lowcase = self.resp_header.lower ()\n\n\tdef __call__ (self, outer_env, outer_resp):\n\t\t\"\"\"This function serves to make the\n\t\t WSGI-SASL instance callable, using the\n\t\t common WSGI pattern.\n\t\t\"\"\"\n\t\tif outer_env.has_key ('HTTP_PROXY_AUTHORIZATION'):\n\t\t\tprint ('Processing Proxy-Authorization: header')\n\t\t\tpass #TODO#\n\t\tif outer_env.has_key ('HTTP_AUTHORIZATION'):\n\t\t\tprint ('Processing Authorization: header')\n\t\t\tpass #TODO#\n\t\t#\n\t\t# Process Proxy-Authorization: or Authorization: headers.\n\t\tresult = None\n\t\tif environ.has_key (req_envvar):\n\t\t\tprint ('Processing [Proxy-]Authorization: header')\n\t\t\tpass #TODO#\n\t\tif result is not None:\n\t\t\treturn result\n\t\t#\n\t\t# Forward the call to the inner application\n\t\tinner_env = outer_env\n\t\tgot_remote = outer_env.has_key ('REMOTE_USER')\n\t\tinner_resp = self._curried_inner_resp (outer_env, outer_resp, got_remote)\n\t\tself.inner_app (inner_env, inner_resp)\n\n\tdef _curried_inner_resp (self, outer_env, outer_resp, got_remote):\n\t\t\"\"\"This function is called to produce an\n\t\t inner start_response function, building\n\t\t on the information for the outer and\n\t\t maintaining state on things like SASL\n\t\t negotiation progress.\n\t\t\"\"\"\n\t\t#\n\t\tdef parse_header (hdrval):\n\t\t\tbad = False\n\t\t\t#\n\t\t\t# First ensure overal syntax; this makes sure that our\n\t\t\t# upcoming iteration works as expected.\n\t\t\tif not authorization_stx.match (hdrval):\n\t\t\t\tbad = True\n\t\t\t#\n\t\t\t# Continue to parse the structure and check even more\n\t\t\tattrs = { }\n\t\t\tfor (x2y,data,_,mech,realm) in auth_param_finder.findall (hdrval):\n\t\t\t\tx2y = x2y.lower ()\n\t\t\t\tif x2y != '':\n\t\t\t\t\tbad = bad or attrs.has_key (x2y)\n\t\t\t\t\tattrs [x2y] = data\n\t\t\t\telif mech != '':\n\t\t\t\t\tbad = bad or attrs.has_key ('mech')\n\t\t\t\t\tattrs ['mech'] = mech\n\t\t\t\telif realm != '':\n\t\t\t\t\tbad = bad or attrs.has_key ('realm')\n\t\t\t\t\tattrs ['realm'] = realm\n\t\t\tbad = bad or not attrs.has_key ('c2s')\n\t\t\t#\n\t\t\t# Return an error if there was a problem with the syntax\n\t\t\tif bad:\n\t\t\t\tstart_response ('403 Forbidden', { 'Content-Type', 'text/plain' })\n\t\t\t\treturn ['Unrecognised %s: header' % self.resp_header]\n\t\t\t#TODO# Process and enjoy; pass or return when decided\n\t\t\tsasl_status = '200 OK'\n\t\t\tif need_to_continue_sasl:\n\t\t\t\tresphdr = build_sasl_header (self.resp_header, attrs, { 'Content-Type': 'text/plain' })\n\t\t\t\tstart_response (sasl_status or self.status, resphdr)\n\t\t\t\treturn ['Please continue the SASL exchange']\n\t\t\treturn None\n\t\t#\n\t\tdef inner_resp (status, inner_resphdr):\n\t\t\tprint ('Response status', status)\n\t\t\tprint ('Response headers', inner_resphdr)\n\t\t\tif status [:3] != self.status [:3]:\n\t\t\t\treturn outer_resp (status, inner_resphdr)\n\t\t\t#\n\t\t\thdrset = False\n\t\t\touter_resphdr = [ ]\n\t\t\tfor (name,hval) in inner_resphdr:\n\t\t\t\tif name.lower () == self.resp_header_lowcase:\n\t\t\t\t\thval = add_sasl_chal (self.realm, got_remote, value)\n\t\t\t\t\thdrset = True\n\t\t\t\touter_resphdr.append ( (name,hval) )\n\t\t\tif not hdrset:\n\t\t\t\touter_resphdr.append ( (self.resp_header,add_sasl_chal (realm, got_remote) ) )\n\t\t\t#\n\t\t\treturn outer_resp (status, outer_resphdr)\n\t\t#\n\t\t# Return the inner function, bound to our context\n\t\treturn inner_resp\n\n\nclass WSGI_SASL_Proxy (WSGI_SASL):\n\n\t\"\"\"This object handles Proxy authentication over WSIG-SASL.\n\t It usually comes before the handler for WWW authentication,\n\t because Proxy authentication is more local, as in per-leg,\n\t than WWW authentication. Other than a few settings, this\n\t class does not override the logic of plain WWW as defined\n\t in the WSGI_SASL superclass.\n\t\"\"\"\n\n\tstatus = '407 Proxy Authentication Requires'\n\tresp_header = 'Proxy-Authenticate'\n\treq_header = 'Proxy-Authorization'\n\treq_envvar = 'HTTP_PROXY_AUTHORIZATION'\n\n","repo_name":"arpa2/wsgi-byoid","sub_path":"wsgisasl.py","file_name":"wsgisasl.py","file_ext":"py","file_size_in_byte":13526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29174036196","text":"__author__ = 'gerardwhittey'\nclass PowerSchoolRouter(object):\n \"\"\"\n A router to control all database operations on models in the\n auth application.\n \"\"\"\n def db_for_read(self, model, **hints):\n \"\"\"\n Attempts to read auth models go to auth_db.\n \"\"\"\n if model._meta.app_label == 'pwrschool':\n return 'pwrschool_db'\n return None","repo_name":"gwhittey23/iPassCon","sub_path":"pwrschool/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18981088068","text":"import numpy as np\n\nclass InitializationOperator:\n def __init__(self):\n pass\n\n def BinaryEncoding(self, n_individuals, chromosome_length):\n\n pop = np.random.rand(n_individuals, chromosome_length)\n pop[pop < 0.5] = 0\n pop[pop >= 0.5] = 1\n population = np.copy(pop.astype('int'))\n\n return population\n\n def RealValueEncoding(self, n_individuals, chromosome_length):\n\n pop = np.random.rand(n_individuals, chromosome_length)\n population = np.copy(pop.astype('int'))\n\n return population\n\n def LinearProgramming(self, n_individuals, n_variables, n_constants, n_operators, n_genes):\n\n population = [0]*n_individuals\n for i in range(n_individuals):\n population[i] = {'chromosome' : []}\n\n for j in range(np.random.randint(n_genes[0],n_genes[1])+1):\n N = np.random.randint(n_variables)\n O = np.random.randint(n_operators)\n X1 = np.random.randint(n_variables + n_constants)\n X2 = np.random.randint(n_variables + n_constants)\n\n population[i] = {'chromosome' : np.append(population[i]['chromosome'], [N, O, X1, X2])}\n\n return population\n\ndef InitializePopulation(scheme, n_individuals, chromosome_length):\n \"\"\"\n Initialize population for a genetic algorithm.\n # Arguments:\n scheme: string, binary or realvalue - denotes the type of encoding to be\n used when creating the population\n\n n_individuals: positive, non-zero integer, how many individuals should be in the\n population, equivalent to population_size\n\n chromosome_length: positive, non-zero integer, how many genes should each\n individual be made up of\n\n # returns:\n population: a (n_individuals,chromosome_length)-sized numpy.array\n \"\"\"\n operator = InitializationOperator()\n\n if scheme.lower() == 'binary':\n population = operator.BinaryEncoding(n_individuals, chromosome_length)\n\n if scheme.lower() == 'realvalue':\n population = operator.RealValueEncoding(n_individuals, chromosome_length)\n\n return population\n\ndef InitializeLGPPopulation(n_individuals, n_variables, n_constants, n_operators=4, n_genes=[4,24]):\n \"\"\"\n Initialize a population for Linear Genetic Programming (LGP).\n # Arguments:\n n_individuals: positive, non-zero integer, how many individuals should be in the\n population, equivalent to population_size\n n_variables: positive, non-zero integer, number of variable registers\n n_constants: positive, non-zero integer, number of constant registers\n n_operators: positive, non-zero integer, number of operators,\n 4 by default : [addition, subtraction, multiplication, division]\n n_genes: set of two positive, non-zero integers with n_genes[0] None:\n reference = np.random.normal(0, 1, (100, 3))\n paths = []\n for offset in [0, 3]:\n path = tmp_path / f\"{offset}.pkl\"\n util.dump_pickle({\n \"params\": reference,\n \"samples\": reference[:, None, :] + offset + np.random.normal(0, 1, (100, 1000, 3))\n }, path)\n paths.append(str(path))\n\n csv = tmp_path / \"result.csv\"\n parts = [f\"--csv={csv}\"]\n if bounds:\n parts.append(f\"--bounds={bounds}\")\n\n __main__([*parts, *paths])\n\n result = pd.read_csv(csv).set_index(\"path\")\n assert result.loc[\"0.pkl\"].rmise < result.loc[\"3.pkl\"].rmise\n","repo_name":"tillahoffmann/summaries2","sub_path":"tests/scripts/test_evaluate.py","file_name":"test_evaluate.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10180373779","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\"\"\"\nComplete the 'countApplesAndOranges' function below.\n\nThe function accepts following parameters:\n 1. INTEGER s\n 2. INTEGER t\n 3. INTEGER a\n 4. INTEGER b\n 5. INTEGER_ARRAY apples\n 6. INTEGER_ARRAY oranges\n\"\"\"\n\n\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n \"\"\"modified location of apples\"\"\"\n '''method 1 '''\n apple_location = [a + apple for apple in apples]\n orange_location = [b + orange for orange in oranges]\n\n fruit_in_range = []\n count_apple = 0\n count_orange = 0\n\n for idx in range(s, t + 1):\n if idx in apple_location:\n count_apple += 1\n\n if idx in orange_location:\n count_orange += 1\n\n fruit_in_range.append(count_apple)\n fruit_in_range.append(count_orange)\n\n for ele in fruit_in_range:\n print(ele)\n\n '''method 2'''\n print(sum([1 for apple in apples if s <= (a + apple) <= t]))\n print(sum([1 for orange in oranges if s <= (b + orange) <= t]))\n\n\nif __name__ == '__main__':\n first_multiple_input = input(\"enter s and t\").rstrip().split()\n\n s = int(first_multiple_input[0])\n\n t = int(first_multiple_input[1])\n\n second_multiple_input = input(\"enter a and b\").rstrip().split()\n\n a = int(second_multiple_input[0])\n\n b = int(second_multiple_input[1])\n\n third_multiple_input = input(\"enter m and n\").rstrip().split()\n\n m = int(third_multiple_input[0])\n\n n = int(third_multiple_input[1])\n\n apples = list(map(int, input(\"enter coordinates of apple\").rstrip().split()))\n\n oranges = list(map(int, input('enter coordinates of oranges ').rstrip().split()))\n\n countApplesAndOranges(s, t, a, b, apples, oranges)\n","repo_name":"amits0003/Selenium_Study_Files","sub_path":"problem_solving_section/apple_orange_distance_problem.py","file_name":"apple_orange_distance_problem.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42007845961","text":"# error highlighting style sheet\r\nhighlightStyleSheet = \"\"\"/*HIGHLIGHT STYLESHEET START*/\r\n\t\t\t\t\t\t border-color: rgb(248, 28, 109);\r\n\t\t\t\t\t\t background-color: rgba(248, 28, 109, 50);\r\n\t\t\t\t\t\t selection-background-color: rgba(248, 28, 109, 250);\r\n\t\t\t\t\t\t /*HIGHLIGHT STYLESHEET END*/\"\"\"\r\n\r\n# error highlighting\r\ndef highlightWidget(widget, focus=True):\r\n\tglobal highlightStyleSheet\r\n\t# create a new style sheet without removing the previous one (if present)\r\n\tstylesheet = f\"{widget.styleSheet()}\\n{highlightStyleSheet}\"\r\n\t# set the new style sheet to the widget\r\n\twidget.setStyleSheet(stylesheet)\r\n\r\n\t# set focus to the widget if permitted\r\n\tif focus:\r\n\t\twidget.setFocus(True)\r\n\r\n# remove highlight error\r\ndef unhighlightWidget(widget, focus=False):\r\n\tglobal highlightStyleSheet\r\n\t# remove the highlighting style sheet only\r\n\tstylesheet = widget.styleSheet().replace(highlightStyleSheet, '')\r\n\t# set the new style sheet to the widget\r\n\twidget.setStyleSheet(stylesheet)\r\n\r\n\t# set focus to the widget if permitted\r\n\tif focus:\r\n\t\twidget.setFocus(True)\r\n","repo_name":"MahirHamiAbrar/ImageToAsciiGui","sub_path":"ImageToAsciiApp/WidgetHighlighter.py","file_name":"WidgetHighlighter.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29008270258","text":"# https://leetcode.com/problems/unique-paths/description/\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n dp = [[0 for _ in range(n+1)] for _ in range(m+1)]\n\n for i in range(1, m+1):\n for j in range(1, n+1):\n if i == 1 and j == 1:\n dp[i][j] = 1\n continue\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n return dp[-1][-1]\n \n \n","repo_name":"harshraj22/problem_solving","sub_path":"solution/leetcode/62.py","file_name":"62.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"32223726731","text":"from typing import List\n\nclass Solution:\n def hIndex(self, citations: List[int]) -> int:\n for index, citation in enumerate(citations):\n h = len(citations) - index\n if h <= citation:\n return h \n return 0\n\nsol = Solution()\ncitations = [1,2,2]\nres = sol.hIndex(citations)\nprint(res)\n\n","repo_name":"chrisbyd/leetcode_chris","sub_path":"array/275.py","file_name":"275.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3461455993","text":"from pytube import YouTube\nfrom lameenc import Encoder \nfrom AudioManager import * \nimport tempfile\nfrom moviepy.editor import AudioFileClip\n\n# Creates a temp dir\ntemp_dir = tempfile.TemporaryDirectory()\ntemp_path = temp_dir.name\nvideo_url = input(\"Insert video URL: \")\n\nis_wav = False\nbitrate = None\n\nwhile True:\n print(\"\\nSelect audio quality:\\n 1. Low (128kbps)\\n 2. Medium (256kbps)\\n 3. High (320kbps)\\n 4. HiFi (WAV)\\n\")\n choice = int(input(\"Your choice: \"))\n\n if choice == 1:\n bitrate = 128\n break\n\n elif choice == 2:\n bitrate = 256\n break\n\n elif choice == 3:\n bitrate = 320\n break\n\n elif choice == 4:\n is_wav = True\n break\n\n# Creates instances of Youtube and Encoder\nyoutube = YouTube(video_url)\nencoder = Encoder()\n\n# Creates an AudioManager instance with the previous ones as parameters\naudio_manager = AudioManager(youtube=youtube, encoder=encoder, temp_path=temp_path)\nformatted_filename = audio_manager.get_formatted_filename()\nwav_path = audio_manager.get_wav_path(formatted_filename)\n\n# Converts the audio\nwebm_path = audio_manager.get_webm(formatted_filename)\nwebm_file = AudioFileClip(webm_path)\naudio_manager.webm_to_wav(formatted_filename, webm_file, webm_path)\n\nif is_wav is True:\n audio_manager.save_as_wav(formatted_filename, wav_path)\nelse:\n audio_manager.save_as_mp3(bitrate, formatted_filename)\n\n\nprint(\"Song converted successfully.\")\n\n# Closes the temp dir\ntemp_dir.cleanup()","repo_name":"emiaguero/youtube-audio-converter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27815994947","text":"#!/usr/bin/python\n# coding: utf-8\n\nfrom common import *\n\nimport json\n\nfrom ppf.api import Data\nfrom ppf.indexer import Article\nfrom tools.post import writeArticle, updateIndex, \\\n updateServerFile, deleteComment, writeComment, \\\n updateArticle, updateFile, updateFileWithFtp\n\n\nclass Post_test(TestCase):\n\n @classmethod\n def setUpClass(cls):\n destroy() # Remove pre-installed\n init()\n\n @classmethod\n def tearDownClass(cls):\n destroy()\n\n def test_writeArticles(self):\n\n doc_id = '1111111111'\n _base64_content = \"열라 테스트\"\n dd = writeArticle(doc_id, _base64_content)\n assert dd == '\\nOK\\n'\n\n def test_updateIndex(self):\n doc_id = '1111111111'\n _jsonBase64_dict = {'category': 'emacs planner',\n 'date': '1108170951',\n 'author': 'this is author',\n 'update': '1108170952',\n 'title': '가나다 우리는 사는 사람들 이 닫 다다. fjdksf kdsf ',\n 'doc_id': '1111111111'}\n dd = updateIndex(doc_id, _jsonBase64_dict)\n assert dd == '\\nOK\\n'\n\n def test_writeComment_deleteComment(self):\n doc_id = '1111111111'\n _base64_content = '어찌하다가 우리가 이런일에 \\\\n 아'\n _base64_name = '달수'\n password = '12'\n\n dd = writeComment(doc_id, _base64_content, _base64_name, password)\n assert dd == '\\nOK\\n'\n\n doc_id = '1111111111'\n comment_id = '1'\n\n dd = deleteComment(doc_id, comment_id)\n assert dd == '\\nOK\\n'\n\n dd = deleteComment(doc_id, comment_id)\n assert dd == '\\nFalse\\n'\n\n def test_updateFile(self):\n filename = 'bbk'\n _base64_content = 'abc'\n dd = updateFile(filename, _base64_content)\n assert dd == '\\nOK\\n'\n\n dd = updateFile(filename, _base64_content)\n assert dd == '\\nFalse\\n'\n\n # TODO: I need a method to identify the result of test_updateArticle.\n # One way is to add a error handling for updateArticle.\n def test_updateArticle(self):\n # Let's use dummy file\n doc_id = '9999999999'\n updateArticle(doc_id)\n\n # Client side index updated?\n fd = file(config.index_filename(), 'r')\n client_index = fd.read()\n fd.close()\n\n # Server side index updated?\n # Server side was tested in other member.\n\n def test_updateFileWithFtp(self):\n\n dummy2_name = 'dummy.py'\n dummy2_path = config.root_abpath + dummy2_name\n fd = file(dummy2_path, 'w')\n fd.write('dummy')\n fd.close()\n\n # Root directory uploading\n # Check absolute path\n dummy2_server_path = config.server_root_directory + '/' + dummy2_name\n updateFileWithFtp(dummy2_path)\n assert os.path.exists(dummy2_server_path) == True\n os.remove(dummy2_server_path)\n\n os.remove(dummy2_path)\n\n\nclass TestingTools_test(TestCase):\n @classmethod\n def setUpClass(cls):\n destroy()\n init()\n updateArticle(Var.dummy_id)\n # Write a comment for dummy article\n doc_id = Var.dummy_id\n _base64_content = '어찌하다가 우리가 이런일에 \\\\n 아'\n _base64_name = '달수'\n password = '12'\n writeComment(doc_id, _base64_content, _base64_name, password)\n \n @classmethod\n def tearDownClass(cls):\n destroy()\n\n def test_basic(self):\n b = get_browser()\n query = Data()\n query.cmd = 'articles_length'\n\n url = config.url_root + 'api.py?' + query.urlencode()\n b.go(url)\n result = b.get_html()\n # For Secure key. server is using secure key?\n self.assertNotEqual(result.find('Error'), -1)\n\n # For articles_length\n query.secure_key = config.SECURE_KEY\n url = config.url_root + 'api.py?' + query.urlencode()\n\n b.go(url)\n result = b.get_html()\n self.assertEqual(libs.removeBlank(result), '2')\n\n # For comments_length\n query.cmd = 'comments_length'\n query.doc_id = Var.dummy_id\n\n url = config.url_root + 'api.py?' + query.urlencode()\n b.go(url)\n result = b.get_html()\n self.assertEqual(libs.removeBlank(result), '1')\n\n # For article_json\n query.cmd = 'article_json'\n url = config.url_root + 'api.py?' + query.urlencode()\n b.go(url)\n result = b.get_html()\n # Get the index of the local dummy\n article = Article()\n article.set(Var.dummy_id)\n result = json.loads(libs.removeBlank(result))\n self.assertEqual(result['doc_id'], unicode(article.__dict__['doc_id']))\n\n","repo_name":"ptmono/ppf","sub_path":"tests/api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23609997151","text":"#! /usr/bin/python\n\n# (c) 2010 Wott (http://wott.net.ru/ , wott@gmail.com)\n\n__author__=\"Wott\"\n__date__ =\"$23.05.2010 13:05:47$\"\n\nimport sys\n\ndef case(N,m):\n ret = 0\n #print ([[(i[0]>j[0] and i[1]j[1]) for i in m] for j in m])\n return (sum([sum([1 if (i[0]-j[0])*(i[1]-j[1])<0 else 0 for i in m]) for j in m])/2)\n for i in range(N):\n for j in range(N):\n if (m[i][0]>m[j][0] and m[i][1]m[j][1]): ret+=1;\n ret=ret//2\n print(\"LINE: %d,%s => %d\" % (N,m,ret))\n return(ret)\n\ndef main():\n args = sys.argv[1:]\n if not args:\n print(\"Usage: %s in.file out.file\" % sys.argv[0])\n return\n with open(args[0]) as infile:\n with open(args[1],'w') as outfile:\n T = int(infile.readline())\n for t in range(T):\n N=int(infile.readline().rstrip())\n m = [[int(i) for i in infile.readline().rstrip().split()] for n in range(N)]\n c = case(N,m)\n outfile.write(\"Case #%d: %d\\n\" % (t+1,c))\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_62/116.py","file_name":"116.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17755868453","text":"class Solution(object):\n def candy(self, ratings):\n \"\"\"\n :type ratings: List[int]\n :rtype: int\n \"\"\"\n if not ratings:\n return 0\n count=1\n last=1\n i=1\n while i < len(ratings):\n if ratings[i]>ratings[i-1]:\n last+=1\n count+=last\n elif ratings[i]==ratings[i-1]:\n last=1 # If two kids have equal ratings, one kid can have less candy, we can drop it to 1\n count+=last \n else:\n num=0\n while i=last:\n count+=num+1-last #need to add this much to last position first\n count+=(num+1)*num/2 #need to add the area of the triangle\n last=1\n i-=1 # need to subtract 1 to neutalize the affect of increment\n i+=1\n return count","repo_name":"ErkangXu/Leetcode","sub_path":"135 Candy/candy.py","file_name":"candy.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26045232574","text":"from django import template\nregister = template.Library()\nimport re\nfrom otree.api import safe_json\n\n\n@register.inclusion_tag('clicktracking/widget.html', takes_context=True)\ndef clicktrack(context, *args, **kwargs):\n if 'participant' not in context:\n context['vars_for_clicktracking'] = {}\n return context\n\n #print(\"Who made context happen? {}\".format(context))\n participant = context['participant']\n\n channel = 'clicktracking-{}'.format(participant.code)\n\n # channel name should not contain illegal chars,\n # so that it can be used in JS and URLs\n if not re.match(r'^[a-zA-Z0-9_-]+$', channel):\n raise ValueError(\n \"'channel' can only contain ASCII letters, numbers, underscores, and hyphens. \"\n \"Value given was: {}\".format(context['channel']))\n\n context['channel'] = channel\n\n vars_for_js = {\n 'channel': context['channel'],\n 'participant_code': participant.code,\n }\n\n context['vars_for_clicktracking'] = safe_json(vars_for_js)\n\n return context\n","repo_name":"SDALMinerva/coordination-game","sub_path":"clicktracking/templatetags/clicktracking.py","file_name":"clicktracking.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31773856511","text":"#Encodes UTF-16 to UTF-8\n\nimport sys\nimport array\n\ndef toBits(ipText):\n bits = bin(array.array('B', ipText)[0])\n bits = bits.replace('b', '')\n if(len(bits)>8):\n bits=bits[-8:]\n bits = '00000000'[len(bits):] + bits\n return bits\n\ndef toUTF8(ipChar):\n utf8Arr = []\n # convert to bits\n bitSeq1 = toBits(ipChar[0])\n bitSeq2 = toBits(ipChar[1])\n # 0 - 127 1 byte\n # 128 - 2047 2 byte\n # 2048 - 65535 3 byte\n # 65536 - 1114111 4 byte\n \n # input count\n number = int(bitSeq1 + bitSeq2, 2)\n numberBits = bitSeq1 + bitSeq2\n \n if 0 <= number <= 127:\n numBytes = 1\n elif 128 <= number <= 2047:\n numBytes = 2\n elif 2048 <= number <= 65535:\n numBytes = 3\n elif 65536 <= number <= 1114111:\n numBytes = 4\n \n if(numBytes > 1):\n # prepare MSBs\n# print numberBits\n# print number\n msb = ''\n for i in range(numBytes):\n msb += '1'\n msb = msb + '0'\n\n for i in range(numBytes - 1):\n # get 6 bits by 6 bits from the last\n last6Bits = numberBits[-6:]\n numberBits = numberBits[:-6]\n num = '10' + last6Bits\n utf8Arr.append(int(num, 2))\n# print num\n \n msb = msb + numberBits[-(8 - len(msb)):]\n# print msb\n utf8Arr.append(int(msb,2))\n else:\n #num of bytes required is one. straight conversion\n # convert bits to byte int\n utf8Arr.append(int(toBits(ipText[1]), 2))\n \n utf8Arr.reverse()\n return utf8Arr\n#Take input file path\nipFile = sys.argv[1]\nnewFileBytes = [] \n \nwith open(ipFile, 'rb') as f:\n ipText = f.read(2) \n while ipText:\n newFileBytes += toUTF8(ipText)\n ipText = f.read(2)\n \n f.close()\n\n# Write binary data to a file\nwith open('utf8encoder_out.txt', 'wb') as f:\n f.write(bytearray(newFileBytes))\n f.close()\n","repo_name":"smadha/MissionNLP","sub_path":"src/nlp/as1/utf8encoder.py","file_name":"utf8encoder.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7347464900","text":"def solution(n, roads, sources, destination):\n INF = float('inf')\n li = [INF]*(n+1)\n road = [[] for _ in range(n+1)]\n for a,b in roads:\n road[a].append(b)\n road[b].append(a)\n \n from collections import deque\n que = deque()\n que.append((0,destination)) # count, index\n \n while que:\n count, index = que.popleft()\n if li[index] <= count:\n continue\n li[index] = count\n count += 1\n for i in road[index]:\n que.append((count,i))\n \n answer = []\n for s in sources:\n count = li[s]\n if count == INF:\n answer.append(-1)\n else:\n answer.append(li[s])\n \n return answer","repo_name":"soulchicken/crush-programmers-cote","sub_path":"Python/Level_3/16_부대복귀.py","file_name":"16_부대복귀.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16316175135","text":"from pathlib import Path\n\nimport torch\n\nimport Resources.training as r\nfrom Models.erfh5_ConvModel import S80Deconv2ToDrySpotEff\nfrom Pipeline.data_gather import get_filelist_within_folder_blacklisted\nfrom Pipeline.data_loader_dryspot import DataloaderDryspots\nfrom Trainer.ModelTrainer import ModelTrainer\nfrom Trainer.evaluation import BinaryClassificationEvaluator\nfrom Utils.training_utils import read_cmd_params\n\nif __name__ == \"__main__\":\n args = read_cmd_params()\n\n dl = DataloaderDryspots(sensor_indizes=((1, 4), (1, 4)))\n\n # def get_sampler(data_source):\n # return RandomOverSampler(data_source, multiply_by=2)\n\n m = ModelTrainer(\n lambda: S80Deconv2ToDrySpotEff(pretrained=\"deconv_weights\",\n checkpoint_path=r.chkp_S80_to_ff2,\n freeze_nlayers=9,\n round_at=0.8),\n data_source_paths=r.get_data_paths_base_0(),\n save_path=r.save_path,\n load_datasets_path=r.datasets_dryspots,\n cache_path=r.cache_path,\n batch_size=8192,\n train_print_frequency=100,\n epochs=1000,\n num_workers=75,\n num_validation_samples=131072,\n num_test_samples=1048576,\n data_processing_function=dl.get_sensor_bool_dryspot,\n data_gather_function=get_filelist_within_folder_blacklisted,\n loss_criterion=torch.nn.MSELoss(),\n optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),\n classification_evaluator_function=lambda summary_writer:\n BinaryClassificationEvaluator(summary_writer=summary_writer),\n # lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.8),\n # sampler=get_sampler\n )\n\n if not args.run_eval:\n m.start_training()\n else:\n m.inference_on_test_set(\n Path(args.eval),\n Path(args.checkpoint_path),\n lambda summary_writer: BinaryClassificationEvaluator(\n Path(args.eval) / \"eval_on_test_set\",\n ),\n )\n","repo_name":"isse-augsburg/rtm-predictions","sub_path":"ModelTrainerScripts/model_trainer_sensor_80_to_dryspot_thresholded.py","file_name":"model_trainer_sensor_80_to_dryspot_thresholded.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2040823828","text":"#\n# @lc app=leetcode id=105 lang=python\n#\n# [105] Construct Binary Tree from Preorder and Inorder Traversal\n#\nclass TreeNode(object):\n def __init__(self, v=0, left=None, right=None):\n self.v = v\n self.left = left\n self.right = right\n\n# @lc code=start\n# Definition for a binary tree node.\nclass Solution(object):\n def buildTree(self, preorder, inorder):\n \"\"\"\n :type preorder: List[int]\n :type inorder: List[int]\n :rtype: TreeNode\n \"\"\"\n\n # LEN = len(inorder)\n\n # mDict = {}\n # for i, v in enumerate(inorder):\n # mDict[v] = i\n\n\n # def dfs():\n\n # val = preorder.pop(0)\n # node = TreeNode(val)\n # index = mDict[val]\n # inorder[index] = None\n\n # # left\n # if index > 0 and inorder[index-1] != None:\n # node.left = dfs()\n\n # # right\n # if index < LEN-1 and inorder[index+1] != None:\n # node.right = dfs()\n\n # return node\n \n # return dfs()\n\n LEN = len(inorder)\n in_ = {}\n for index in range(len(inorder)):\n in_[inorder[index]] = index\n\n def rec():\n\n v = preorder.pop(0)\n node = TreeNode(v)\n index = in_[v]\n inorder[index] = None\n\n if index > 0 and inorder[index-1] != None:\n node.left = rec()\n\n if index < LEN-1 and inorder[index+1] != None:\n node.right = rec()\n\n return node\n \n return rec()\n\n\n\n \n \n# @lc code=end\nSolution().buildTree(preorder = [3,9,20,15,7], inorder = [9,3,15,20,7])\n","repo_name":"aryanjain28/DSA","sub_path":"revision_2_0.py/105.construct-binary-tree-from-preorder-and-inorder-traversal.py","file_name":"105.construct-binary-tree-from-preorder-and-inorder-traversal.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28631958156","text":"import datetime\nimport json\nimport os\nimport threading\nimport time\n\n#数字化编码的音频数据\nimport wave\n\nfrom collections import Counter\n\nimport cv2\nimport numpy as np\nimport pyaudio\nfrom draft import requests\nfrom aip import AipOcr, AipSpeech\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pydub import AudioSegment\n\nfrom utils.utils import image_to_base64, image_to_base64_dep#少了一个函数\n\n\n#对于整数的+-*/也是用浮点数方法计算,所以对于计算的四种错误,我们会设置不同的方式处理\n# warn会提示警告,ignore不采取措施,raise显示错误,等等详情看文档\nolderr = np.seterr(all='ignore')\n\n#-----------------------------#\n# 记录声音用的client\n#-----------------------------#\n#这里为啥要开3个应用\nAPP_ID = \"22729954\"\nAPI_KEY = \"LrVGobWoa29Q1lkimusywdwA\"\nSECRET_KEY = \"wacYgjZG8mPMTGAGuT7oqOCv0zinkmux\"\nclient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\n#-----------------------------#\n# 输出声音用的client\n#-----------------------------#\nAPP_ID = \"22808383\"\nAPI_KEY = \"Cu8BfWlmVG70sOeyGaQlCkcV\"\nSECRET_KEY = \"GhFN86sg3FpB0h7WVCSOprtgi2wqYAyx\"\nclient_for_speak = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\n#-----------------------------#\n# 文字识别用的client\n#-----------------------------#\nAPP_ID = \"22729886\"\nAPI_KEY = \"EDrxKsEyjZ6VZXPMc9yZrnlp\"\nSECRET_KEY = \"Ax5HgTPasDqpLocaMIFQEjGWI6bQWkdH\"\nclient_for_ocr = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n\n#-----------------------------#\n# 不同模式所使用的类\n#-----------------------------#\nmode_classes = [\n [],\n ['person','cup','apple','chair','diningtable','book','banana','laptop',\"cell phone\"],\n ['person','car','bicycle','motorbike','bus','truck','traffic light','chair','diningtable',\"traffic sign\"],\n ['book'],\n ['person','car','bicycle','motorbike','bus','truck','chair','diningtable',],\n [],\n [],\n ['chair'],\n ['cup'],\n]\n\n# ----------------------------#\n# 用于记录声音\n# ----------------------------#\nclass Recorder():\n #多通道声音,1024位为一个数据块,传输速率为rate\n def __init__(self, chunk=1024, channels=1, rate=16000):\n self.CHUNK = chunk\n #pyaudio的一种规定音频格式\n self.FORMAT = pyaudio.paInt16#定义有符号整数16位输入格式\n self.CHANNELS = channels\n self.RATE = rate\n self._running = True\n self._frames = []\n\n #考虑多线程的重要性,防止阻塞和等待\n def start(self):\n #新建音频下载线程并且传递参数\n threading._start_new_thread(self.__recording, ())\n\n#播放语音的函数\n def __recording(self):\n #控制系统运行\n self._running = True\n #接受传输帧\n self._frames = []\n #实例化一个音频播放系统\n p = pyaudio.PyAudio()\n #读入数据流\n stream = p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n frames_per_buffer=self.CHUNK)\n while (self._running):\n #chunk是数据流块\n data = stream.read(self.CHUNK)\n self._frames.append(data)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n def stop(self):\n self._running = False\n\n #设置音频数据的操作\n #存储函数\n def save(self, filename):\n p = pyaudio.PyAudio()\n if not filename.endswith(\".wav\"):#检查后缀文件\n filename = filename + \".wav\"\n\n #音频文件都用二进制读写\n wf = wave.open(filename, 'wb')\n\n #声道数\n wf.setnchannels(self.CHANNELS)\n wf.setsampwidth(p.get_sample_size(self.FORMAT))\n wf.setframerate(self.RATE)\n\n '''\n Python3的字符串的编码语言用的是unicode编码,\n 由于Python的字符串类型是str,在内存中以Unicode表示,一个字符对应若干字节,\n 如果要在网络上传输,或保存在磁盘上就需要把str变成以字节为单位的bytes \n '''\n #将字符串形式的帧 :列表形式 全部存储到filename中\n wf.writeframes(b''.join(self._frames))\n wf.close()\n print(\"Saved\")\n\n\n#这个函数干嘛的,怪怪的\n#计算摄像头-物体距离的式子: 均值\ndef inlinerDepth(object_photo):\n #reshape(-1) 转换成一列\n seq = object_photo.reshape(-1)\n #取出<4的元素===>数组\n seq = seq[(seq < 4)]\n #标准差和均值\n std = np.std(seq)\n mean = np.mean(seq)\n #\n inlier = seq[np.abs(seq - mean) < 1.2 *std]\n if len(inlier)==0:\n #中位数\n return np.median(object_photo)\n else:\n return np.mean(inlier)\n\n# ----------------------------#\n# 读取声音文件\n# ----------------------------#\n\n#读取声音文件要用二进制读取\n#二进制读取全部内容\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n# ----------------------------#\n# 发送图片的代码\n# ----------------------------#\n#request库函数\n\n#这里向服务器写数据\ndef json_send(dataPModel,url):\n #请求头\n #请求报文可通过一个“Accept”报文头属性告诉服务端 客户端接受什么类型的响应。\n #如下报文头相当于告诉服务端,俺客户端能够接受的响应类型仅为纯文本数据啊,\n # 你丫别发其它什么图片啊,视频啊过来,那样我会歇菜的\n\n #客户端支持的访问服务器内容,客户端接收的格式和客户端的编码方式\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"text/plain\", \"charset\": \"UTF-8\"}\n #dumps将json数据模型dict格式转换成字符串格式\n response = requests.post(url=url, headers=headers, data=json.dumps(dataPModel))\n response_text = response.text\n #loads再转换成dict模式\n return json.loads(response_text)\n\n\ndef run_send_sentence(sentence, sentence_url):\n try:\n dataPModel = {\"sentence\": sentence}\n # result = json_send(dataPModel, self.urls['sentence_url'])[\"result\"]\n\n #只要result对应的body值\n result = json_send(dataPModel, sentence_url)[\"result\"]\n except:\n pass\n\n# ----------------------------#\n# 用于检测判断与播放声音\n# ----------------------------#\nclass Detect():\n def __init__(self, height, width, mode=0, face_mode=0, seg_mode=0, \n navigation_mode=0, navigation_place=0, urls=None, speck_speed=8):\n # ----------------------------#\n # 目标信息\n # ----------------------------#\n self.object_bounding_boxes = []\n self.object_distance = []\n self.object_class_all = []\n self.object_x_center = []\n self.object_y_center = []\n\n # ----------------------------#\n # 交通信息\n # ----------------------------#\n self.traffic_bounding_boxes = []\n self.traffic_class_all = []\n\n # ----------------------------#\n # 文字识别结果\n # ----------------------------#\n self.result_list = []\n self.orc_save_sentence = \"\"\n\n # ----------------------------#\n # 人脸信息\n # ----------------------------#\n self.face_bounding_boxes = []\n self.face_class_all = []\n self.face_x_center = []\n self.face_y_center = []\n\n # ----------------------------#\n # 斑马线与盲道信息\n # ----------------------------#\n self.pspnet_bounding_boxes = []\n self.pspnet_center = []\n self.pspnet_class_all = [\"盲道\", \"斑马线\"]\n\n # ----------------------------#\n # 导航信息\n # ----------------------------#\n self.navigation_place = navigation_place\n self.navigation_end = 0\n self.navigation_sentence = \"\"\n\n # ----------------------------#\n # 输入宽高\n # ----------------------------#\n self.height = height\n self.width = width\n self.image = np.zeros([height,width,3])\n self.depth = np.zeros([height,width])\n\n # ----------------------------#\n # 各种模式\n # ----------------------------#\n self.mode = mode\n self.face_mode = face_mode\n self.seg_mode = seg_mode\n self.navigation_mode = navigation_mode\n\n\n self.wake_signal = False\n self.speak_flag = True\n\n # ----------------------------#\n # Flask用到的各种地址\n # ----------------------------#\n self.urls = urls\n\n # ----------------------------#\n # 语音速度\n # ----------------------------#\n self.speck_speed = speck_speed\n\n #服务器端的数据\n self.object_list = self._get_class(\"model_data/yolo_tiny_blind.txt\")\n self.object_dic = {\n \"person\": \"人\", \"car\": \"汽车\", \"bicycle\": \"自行车\", \"motorbike\": \"摩托车\", \"bus\": \"公交车\",\n \"truck\": \"卡车\", \"traffic light\": \"交通信号灯\", \"cup\": \"杯子\", \"spoon\": \"勺子\",\n \"bowl\": \"碗\", \"apple\": \"苹果\", \"chair\": \"椅子\", \"diningtable\": \"桌子\", \"book\": \"书\",\n \"toothbrush\": \"牙刷\", \"banana\": \"香蕉\", \"laptop\": \"平板\", \"cell phone\": \"手机\",\n \"traffic sign\": \"交通标志牌\"\n }\n\n #文件有这个路径吗?\n #列表相加,直接连接就可以\n self.traffic_list = self._get_class(\"model_data/traffic_classes.txt\") + [\"无颜色\", \"绿灯\", \"红灯\"]\n\n # ---------------------------------------------------#\n # 获得所有的分类\n # ---------------------------------------------------#\n def _get_class(self, classes_path):\n #下面这个函数是用来展开缩写的路劲的,比如~,.等\n classes_path = os.path.expanduser(classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n #返回类别列表\n return class_names\n\n def recognize_init(self):\n # ----------------------------#\n # 目标信息\n # ----------------------------#\n self.object_bounding_boxes = []\n self.object_class_all = []\n self.object_x_center = []\n self.object_y_center = []\n self.object_distance = []\n\n # ----------------------------#\n # 交通信息\n # ----------------------------#\n self.traffic_bounding_boxes = []\n self.traffic_class_all = []\n\n # ----------------------------#\n # 文字识别结果\n # ----------------------------#\n self.result_list = []\n self.orc_save_sentence = \"\"\n\n # ----------------------------#\n # 人脸信息\n # ----------------------------#\n self.face_bounding_boxes = []\n self.face_class_all = []\n self.face_x_center = []\n self.face_y_center = []\n\n # ----------------------------#\n # 斑马线与盲道信息\n # ----------------------------#\n self.pspnet_bounding_boxes = []\n self.pspnet_center = []\n\n # ----------------------------------------#\n # 根据预测框的位置计算中心\n # ----------------------------------------#\n def computer_object_center(self):\n self.object_x_center = []\n self.object_y_center = []\n for bounding_box in self.object_bounding_boxes:\n center = (bounding_box[2] + bounding_box[0]) / 2\n self.object_x_center.append(center)\n for bounding_box in self.object_bounding_boxes:\n center = (bounding_box[3] + bounding_box[1]) / 2\n self.object_y_center.append(center)\n\n # ----------------------------------------#\n # 计算目标所处的方位\n # ----------------------------------------#\n def computer_pos(self, i):\n x_pos = self.object_x_center[i]\n y_pos = self.object_y_center[i]\n if x_pos < self.width * 0.3 and self.height * 0.7 >= y_pos >= self.height * 0.3:\n return \"左边\"\n elif x_pos < self.width * 0.3 and y_pos > self.height * 0.7:\n return \"左下方\"\n elif x_pos < self.width * 0.3 and y_pos < self.height * 0.3:\n return \"左上方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and self.height * 0.7 > y_pos > self.height * 0.3:\n return \"正前方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and y_pos > self.height * 0.7:\n return \"正下方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and y_pos < self.height * 0.3:\n return \"正上方\"\n elif x_pos > self.width * 0.7 and self.height * 0.7 >= y_pos >= self.height * 0.3:\n return \"右边\"\n elif x_pos > self.width * 0.7 and y_pos > self.height * 0.7:\n return \"右下方\"\n elif x_pos > self.width * 0.7 and y_pos < self.height * 0.3:\n return \"右上方\"\n\n # ----------------------------------------#\n # 计算目标的距离\n # ----------------------------------------#\n def computer_object_distance(self, depth_image):\n for box in self.object_bounding_boxes:\n left, top, right, bottom = box\n height = bottom-top\n width = right-left\n crop_image = depth_image[int(top+height*0.3):int(bottom-height*0.3),int(left+width*0.3):int(right-width*0.3)]\n \n distance = inlinerDepth(crop_image)\n self.object_distance.append(distance)\n\n # ----------------------------------------#\n # 根据人脸框的位置计算中心\n # ----------------------------------------#\n def computer_face_center(self):\n self.face_x_center = []\n self.face_y_center = []\n for bounding_box in self.face_bounding_boxes:\n center = (bounding_box[2] + bounding_box[0]) / 2\n self.face_x_center.append(center)\n for bounding_box in self.face_bounding_boxes:\n center = (bounding_box[3] + bounding_box[1]) / 2\n self.face_y_center.append(center)\n\n # ----------------------------------------#\n # 计算人脸所处的方位\n # ----------------------------------------#\n def computer_face_pos(self, i):\n x_pos = self.face_x_center[i]\n y_pos = self.face_y_center[i]\n if x_pos < self.width * 0.3 and self.height * 0.7 >= y_pos >= self.height * 0.3:\n return \"左边\"\n elif x_pos < self.width * 0.3 and y_pos > self.height * 0.7:\n return \"左下方\"\n elif x_pos < self.width * 0.3 and y_pos < self.height * 0.3:\n return \"左上方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and self.height * 0.7 > y_pos > self.height * 0.3:\n return \"正前方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and y_pos > self.height * 0.7:\n return \"正下方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and y_pos < self.height * 0.3:\n return \"正上方\"\n elif x_pos > self.width * 0.7 and self.height * 0.7 >= y_pos >= self.height * 0.3:\n return \"右边\"\n elif x_pos > self.width * 0.7 and y_pos > self.height * 0.7:\n return \"右下方\"\n elif x_pos > self.width * 0.7 and y_pos < self.height * 0.3:\n return \"右上方\"\n\n # ----------------------------------------#\n # 计算盲道所处的方位\n # ----------------------------------------#\n def computer_pspnet_pos(self, i):\n x_pos = self.pspnet_center[i][0]\n y_pos = self.pspnet_center[i][1]\n if x_pos < self.width * 0.3 and self.height * 0.7 >= y_pos >= self.height * 0.3:\n return \"左边\"\n elif x_pos < self.width * 0.3 and y_pos > self.height * 0.7:\n return \"左下方\"\n elif x_pos < self.width * 0.3 and y_pos < self.height * 0.3:\n return \"左上方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and self.height * 0.7 > y_pos > self.height * 0.3:\n return \"正前方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and y_pos > self.height * 0.7:\n return \"正下方\"\n elif self.width * 0.7 >= x_pos >= self.width * 0.3 and y_pos < self.height * 0.3:\n return \"正上方\"\n elif x_pos > self.width * 0.7 and self.height * 0.7 >= y_pos >= self.height * 0.3:\n return \"右边\"\n elif x_pos > self.width * 0.7 and y_pos > self.height * 0.7:\n return \"右下方\"\n elif x_pos > self.width * 0.7 and y_pos < self.height * 0.3:\n return \"右上方\"\n\n def recognize_all(self, image, depth):\n '''\n self.mode的值可选范围为:0,1,2,3,4,5,6,7,8\n 0为无工作状态\n 1为室内模式\n 2为室外模式\n 3为阅读模式\n 4为障碍物识别模式\n 5为交通灯识别模式\n 6为交通标志牌识别模式\n 7为椅子识别模式\n 8为杯子识别模式\n \n self.face_mode的值可选范围为:0,1,2,3\n 0为无工作状态\n 1为正常模式\n 2为仅人脸识别\n 3为仅表情识别\n\n self.seg_mode的值可选范围为:0,1\n 0为无工作状态\n 1为开启斑马线识别\n '''\n self.image = image\n self.depth = depth\n self.recognize_init()\n\n #加密到base64编码\n #dataPmodel就是请求和响应的帧\n img_base64 = image_to_base64(image)\n #dataPmodel 就是对应一个字典,我们可以有不同的模式\n dataPModel = {\"img_base64\": img_base64}\n dataPModel[\"mode\"] = self.mode\n dataPModel[\"face_mode\"] = self.face_mode\n\n if self.wake_signal:\n return\n\n if self.mode == 1:\n result = json_send(dataPModel, self.urls['object_url'])['result']\n self.object_bounding_boxes, self.object_class_all, self.traffic_bounding_boxes, self.traffic_class_all = \\\n result[\"bounding_boxes\"], result[\"class_all\"], result[\"traffic_boxes\"], result[\"traffic_class\"]\n self.computer_object_center()\n self.computer_object_distance(depth)\n\n if self.mode == 2:\n result = json_send(dataPModel, self.urls['object_url'])['result']\n self.object_bounding_boxes, self.object_class_all, self.traffic_bounding_boxes, self.traffic_class_all = \\\n result[\"bounding_boxes\"], result[\"class_all\"], result[\"traffic_boxes\"], result[\"traffic_class\"]\n self.computer_object_center()\n self.computer_object_distance(depth)\n\n if self.mode == 3:\n result = json_send(dataPModel, self.urls['object_url'])['result']\n self.object_bounding_boxes, self.object_class_all, self.traffic_bounding_boxes, self.traffic_class_all = \\\n result[\"bounding_boxes\"], result[\"class_all\"], result[\"traffic_boxes\"], result[\"traffic_class\"]\n self.computer_object_center()\n self.orc_save_sentence = result[\"orc_save_sentence\"]\n\n if self.mode == 4 or self.mode == 7 or self.mode==8:\n result = json_send(dataPModel, self.urls['object_url'])['result']\n self.object_bounding_boxes, self.object_class_all, self.traffic_bounding_boxes, self.traffic_class_all = \\\n result[\"bounding_boxes\"], result[\"class_all\"], result[\"traffic_boxes\"], result[\"traffic_class\"]\n self.computer_object_center()\n self.computer_object_distance(depth)\n \n if self.mode == 5 or self.mode == 6:\n result = json_send(dataPModel, self.urls['traffic_url'])['result']\n self.object_bounding_boxes, self.object_class_all, self.traffic_bounding_boxes, self.traffic_class_all = \\\n result[\"bounding_boxes\"], result[\"class_all\"], result[\"traffic_boxes\"], result[\"traffic_class\"]\n self.computer_object_center()\n self.computer_object_distance(depth)\n \n if self.face_mode == 1 or self.face_mode == 2 or self.face_mode == 3:\n result = json_send(dataPModel,self.urls['face_url'])['result']\n self.face_bounding_boxes, self.face_class_all = result[\"face_locations\"], result[\"class_all\"]\n self.computer_face_center()\n \n if self.seg_mode == 1:\n result = json_send(dataPModel, self.urls['pspnet_url'])[\"result\"]\n self.pspnet_bounding_boxes = result[\"bounding_boxes\"]\n self.pspnet_center = result[\"centers\"]\n\n def send_images(self, color_image, depth_map):\n if self.navigation_mode:\n img_base64 = image_to_base64(color_image)\n dep_img_base64 = image_to_base64_dep(depth_map*10)\n \n dataPModel = {\"img_base64\": img_base64}\n dataPModel[\"dep_img_base64\"] = dep_img_base64\n dataPModel[\"navigation_mode\"] = self.navigation_mode\n dataPModel[\"navigation_place\"] = self.navigation_place\n result = json_send(dataPModel, self.urls['send_image_url'])['result']\n\n if result[\"navigation_end\"] == 1:\n dataPModel = {\"img_base64\": img_base64}\n dataPModel[\"dep_img_base64\"] = dep_img_base64\n dataPModel[\"navigation_mode\"] = 0\n dataPModel[\"navigation_place\"] = 0\n result = json_send(dataPModel, self.urls['send_image_url'])['result']\n\n self.navigation_end = result[\"navigation_end\"]\n self.navigation_sentence = result[\"navigation_sentence\"]\n\n def process_object(self):\n '''\n self.mode的值可选范围为:0,1,2,3,4,5,6,7,8\n 0为无工作状态\n 1为室内模式\n 2为室外模式\n 3为阅读模式\n 4为障碍物识别模式\n 5为交通灯识别模式\n 6为交通标志牌识别模式\n 7为椅子识别模式\n 8为杯子识别模式\n '''\n # --------------------------#\n # 是否对用户提醒视角调整\n # --------------------------#\n adjust_flag = False\n \n sentence = \"\"\n num_all = np.zeros(len(self.object_list))\n pos_cls2pos = []\n pos_cls2dep = []\n # -------------------------------#\n # 创建全部的空列表\n # -------------------------------#\n for _ in num_all:\n pos_cls2pos.append([])\n pos_cls2dep.append([])\n \n # -------------------------------#\n # 判断每个类的数量以及位置\n # -------------------------------#\n for i, classe in enumerate(self.object_class_all):\n num_all[self.object_list.index(classe)] += 1\n pos_cls2pos[self.object_list.index(classe)].append(self.computer_pos(i))\n pos_cls2dep[self.object_list.index(classe)].append(self.object_distance[i])\n\n pos_pos2cls = {}\n pos_pos2depth = {}\n for direction in [\"左边\",\"左下方\",\"左上方\",\"正前方\",\"正下方\",\"正上方\",\"右边\",\"右下方\",\"右上方\"]:\n pos_pos2cls[direction] = {}\n pos_pos2depth[direction] = {}\n\n for i, num in enumerate(num_all):\n signal = 0\n if self.mode == 1:\n if num > 0:\n if self.object_list[i] in [\"person\", \"cup\", \"bowl\", \"banana\", \"apple\", \"laptop\", \"cell phone\"]:\n liangci = \"个\" \n signal = 1\n elif self.object_list[i] in [\"diningtable\", \"chair\"]:\n liangci = \"张\"\n signal = 1\n elif self.object_list[i] in [\"book\"]:\n liangci = \"本\"\n signal = 1\n elif self.object_list[i] in [\"toothbrush\", \"spoon\"]:\n liangci = \"支\"\n signal = 1\n\n if self.mode == 2:\n if num > 0:\n if self.object_list[i] in [\"person\"]:\n liangci = \"个\"\n signal = 1\n elif self.object_list[i] in [\"bicycle\", \"car\", \"motorbike\", \"bus\", \"truck\"]:\n liangci = \"辆\"\n signal = 1\n\n if self.mode == 4:\n if num > 0:\n if self.object_list[i] in [\"person\"]:\n liangci = \"个\"\n signal = 1\n elif self.object_list[i] in [\"diningtable\", \"chair\"]:\n liangci = \"张\"\n signal = 1\n elif self.object_list[i] in [\"bicycle\", \"car\", \"motorbike\", \"bus\", \"truck\"]:\n liangci = \"辆\"\n signal = 1 \n\n if self.mode == 7:\n if num > 0:\n if self.object_list[i] in [\"chair\"]:\n liangci = \"张\"\n signal = 1\n if num == 1:\n adjust_flag = True\n\n if self.mode == 8:\n if num > 0:\n if self.object_list[i] in [\"cup\"]:\n liangci = \"个\" \n signal = 1\n if num == 1:\n adjust_flag = True\n\n if signal == 1:\n pos_outs = Counter(pos_cls2pos[i])\n pos_keys = pos_outs.keys()\n for pos in pos_keys:\n if pos_outs[pos] >= 1:\n pos_pos2cls[pos][self.object_dic[self.object_list[i]]] = str(pos_outs[pos]) + liangci\n pos_pos2depth[pos][self.object_dic[self.object_list[i]]] = \"距离为\"\n \n for index, direction in enumerate(pos_cls2pos[i]):\n if direction == pos:\n if self.object_list[i] in [\"chair\"]:\n if self.mode == 7:\n if pos_cls2dep[i][index] < 0.8:\n return \"已经到达椅子附近。\"\n else:\n pos_cls2dep[i][index] = (pos_cls2dep[i][index]**2 - 0.8)**(1/2)\n \n pos_pos2depth[pos][self.object_dic[self.object_list[i]]] += \"%.2f\"%pos_cls2dep[i][index] + \"米。\"\n\n #-------------------------------#\n # 创建文字\n #-------------------------------#\n for pos in pos_pos2cls.keys():\n if len(pos_pos2cls[pos]) != 0:\n sentence = sentence + pos + \"有\" \n for classe in pos_pos2cls[pos].keys():\n sentence = sentence + pos_pos2cls[pos][classe] + classe + \",\"\n if self.mode == 7 or self.mode == 8:\n sentence = sentence + pos_pos2depth[pos][classe] + \"。\"\n if adjust_flag:\n if pos != \"正前方\":\n sentence = sentence + \"请往\" + pos + \"调整。\"\n\n if sentence != \"\":\n sentence = \"现在播报物体信息!识别到\" + sentence\n print(\"目标信息如下:\", self.object_class_all)\n return sentence\n\n def process_traffic(self):\n '''\n self.mode的值可选范围为:0,1,2,3,4,5,6,7,8\n 0为无工作状态\n 1为室内模式\n 2为室外模式\n 3为阅读模式\n 4为障碍物识别模式\n 5为交通灯识别模式\n 6为交通标志牌识别模式\n 7为椅子识别模式\n 8为杯子识别模式\n '''\n sentence = \"\"\n red_num = 0\n green_num = 0\n num_all = np.zeros(len(self.traffic_list))\n\n #-------------------------------#\n # 判断每个类的数量\n #-------------------------------#\n for i, classe in enumerate(self.traffic_class_all):\n num_all[self.traffic_list.index(classe)] = num_all[self.traffic_list.index(classe)] + 1\n\n for i, num in enumerate(num_all):\n if self.mode == 2:\n if num > 0:\n if self.traffic_list[i] in [\"红灯\"]:\n red_num = num\n elif self.traffic_list[i] in [\"绿灯\"]:\n green_num = num\n else:\n sentence = sentence + self.traffic_list[i] + \"标志。\"\n\n if self.mode == 5:\n if num > 0:\n if self.traffic_list[i] in [\"红灯\"]:\n red_num = num\n elif self.traffic_list[i] in [\"绿灯\"]:\n green_num = num\n\n if self.mode == 6:\n if num > 0:\n sentence = sentence + self.traffic_list[i] + \"标志。\"\n\n if red_num == 0 and green_num == 0 and sentence != \"\":\n sentence = sentence\n elif red_num >= 1 and green_num == 0:\n if sentence == \"\":\n sentence = str(int(red_num)) + \"个红灯,注意安全!\"\n else:\n sentence = str(int(red_num)) + \"个红灯,注意安全!前方还有\" + sentence\n elif red_num == 0 and green_num >= 1:\n if sentence == \"\":\n sentence = str(int(green_num)) + \"个绿灯,通过马路注意安全!\"\n else:\n sentence = str(int(green_num)) + \"个绿灯,通过马路注意安全!前方还有\" + sentence\n elif red_num >= 1 and green_num >= 1:\n if sentence == \"\":\n sentence = str(int(red_num)) + \"个红灯和\" + str(int(green_num)) + \"个绿灯,无法正确判断是否可以前行,请向周围人群寻求帮助!\"\n else:\n sentence = str(int(red_num)) + \"个红灯和\" + str(int(green_num)) + \"个绿灯,无法正确判断是否可以前行,请向周围人群寻求帮助!前方还有\" + sentence\n\n if sentence != \"\":\n sentence = \"现在播报交通信息!识别到\" + sentence\n print(\"交通信息如下:\", self.traffic_class_all)\n return sentence\n\n def process_ocr(self):\n '''\n self.mode的值可选范围为:0,1,2,3,4,5,6,7,8\n 0为无工作状态\n 1为室内模式\n 2为室外模式\n 3为阅读模式\n 4为障碍物识别模式\n 5为交通灯识别模式\n 6为交通标志牌识别模式\n 7为椅子识别模式\n 8为杯子识别模式\n '''\n sentence = \"\"\n #-------------------------------#\n # 判断是否存在书本\n #-------------------------------#\n num_all = np.zeros(len(self.object_list))\n #-------------------------------#\n # 创建全部的空列表\n # 判断每个类的数量以及位置\n #-------------------------------#\n pos_all = []\n for _ in num_all:\n pos_all.append([])\n\n for i, classe in enumerate(self.object_class_all):\n num_all[self.object_list.index(classe)] = num_all[self.object_list.index(classe)] + 1\n pos_all[self.object_list.index(classe)].append(self.computer_pos(i))\n\n for i, num in enumerate(num_all):\n signal = 0\n if num > 0:\n if self.object_list[i] in [\"book\"]:\n sentence = sentence + str(int(num)) + \"本\" + self.object_dic[self.object_list[i]] + \"。\"\n signal = 1\n\n if signal == 1:\n if \"正前方\" in pos_all[i]:\n sentence = \"一本书,在镜头正前方。\"\n else:\n for index in range(int(num)):\n if num==1:\n sentence = sentence + \"在\" + str(pos_all[i][index]) + \"。\"\n sentence = sentence + \"当前书本并非最佳识别位置,请把镜头往\" + str(pos_all[i][index]) + \"调整。\"\n else:\n sentence = sentence + \"一本在\" + str(pos_all[i][index]) + \"。\"\n sentence = sentence + \"当前书本并非最佳识别位置,请注意调整到正前方。\"\n\n sentence = \"现在播报当页书本检测结果,识别到\" + sentence\n\n if len(self.orc_save_sentence)>0:\n sentence = \"已经完成本页文本识别,可以翻到下一页。\" + sentence + \"获得文字信息如下,\" + self.orc_save_sentence\n else:\n sentence = \"未识别到书本与文字,请注意调整书本的位置。\"\n print(\"文字信息如下:\", self.orc_save_sentence)\n return sentence\n\n def process_face(self):\n '''\n self.face_mode的值可选范围为:0,1,2,3\n 0为无工作状态\n 1为正常模式\n 2为仅人脸识别\n 3为表情识别\n '''\n sentence = \"\"\n unknow_num = 0\n for i, classe in enumerate(self.face_class_all):\n if self.face_mode == 1:\n name = classe.split(\";\")[0]\n if name == \"未知\":\n unknow_num += 1\n continue\n emotion = classe.split(\";\")[1]\n sentence = sentence + name + \"在\" + self.computer_face_pos(i) + \",表情为\" + emotion + \"。\"\n\n if self.face_mode == 2:\n name = classe.split(\";\")[0]\n if name == \"未知\":\n unknow_num += 1\n continue\n sentence = sentence + name + \"在\" + self.computer_face_pos(i) + \"。\"\n\n if self.face_mode == 3:\n emotion = classe.split(\";\")[1]\n sentence = sentence + \"一人在\" + self.computer_face_pos(i) + \",表情为\" + emotion + \"。\"\n\n if unknow_num > 0:\n sentence = sentence + str(unknow_num) + \"张人脸未知。\"\n\n # if len(self.face_class_all) >= 1:\n # sentence = str(len(self.face_class_all)) + \"张人脸。\" + sentence\n\n if sentence != \"\":\n sentence = \"现在播报人脸信息!识别到\" + sentence\n\n print(\"人脸信息如下:\", self.face_class_all)\n return sentence\n\n def process_pspnet(self):\n '''\n self.seg_mode的值可选范围为:0,1\n 0为无工作状态\n 1为开启斑马线识别\n '''\n sentence = \"\"\n for i, center in enumerate(self.pspnet_center):\n if self.seg_mode == 1:\n if center[0]==0 and center[1] == 0:\n continue\n sentence = sentence + self.pspnet_class_all[i] + \"在\" + self.computer_pspnet_pos(i) + \"。\"\n\n if len(sentence) > 0:\n sentence = \"现在播报道路信息!识别到\" + sentence\n\n print(\"道路信息如下:\", self.pspnet_center)\n return sentence\n\n def play_wav(self, dir, name, wake_signal=False):\n wf = wave.open(dir + \"/\" + name + \".wav\", 'rb')\n chunk = 1024\n p = pyaudio.PyAudio()\n stream = p.open(\n format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n data = wf.readframes(chunk)\n if wake_signal:\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(chunk)\n else:\n while len(data) > 0:\n if self.wake_signal:\n break\n stream.write(data)\n data = wf.readframes(chunk)\n stream.close()\n p.terminate()\n\n def mp3towav(self, dir, name):\n mp3 = AudioSegment.from_file(dir + \"/\" + name + \".mp3\", format=\"mp3\")\n # 修改对象参数\n wav = mp3.set_frame_rate(16000)\n wav = wav.set_channels(1)\n wav = wav.set_sample_width(2)\n # 导出wav文件\n wav.export(dir + \"/\" + name + \".wav\", format='wav', )\n\n def record_voice(self, second): # 记录声音\n rec = Recorder()\n # 记录五秒内的声音\n begin = time.time()\n print(\"Start recording\")\n rec.start()\n time.sleep(second)\n print(\"Stop recording\")\n rec.stop()\n # 保存\n fina = time.time()\n t = fina - begin\n print('录音时间为%ds' % t)\n rec.save(\"mp3andwav/order.wav\") # 存取录音\n\n # 判断录音的内容\n word_dict = client.asr(get_file_content(\"mp3andwav/order.wav\"), \"wav\", 16000, {\n 'dev_pid': 1537,\n })\n final_result = \"\"\n # 如果录音为空则无\n if word_dict[\"err_no\"] != 0:\n return final_result\n\n word_list = word_dict[\"result\"]\n # 打印录到的内容\n for i in word_list:\n final_result = final_result + i + \" \"\n print(final_result)\n return final_result\n\n def play_sentence(self, sentence, name=\"sentence\", wake_signal=False):\n # ----------------------------#\n # 转为mp3\n # ----------------------------#\n result = client_for_speak.synthesis(sentence, 'zh', 2, {\n 'vol': 5, 'per': 0, 'spd': self.speck_speed\n })\n if not isinstance(result, dict):\n with open('mp3andwav/' + name +'.mp3', 'wb') as f:\n f.write(result)\n else:\n time.sleep(1)\n return\n # ----------------------------#\n # mp3转wav\n # ----------------------------#\n self.mp3towav(\"mp3andwav\", name)\n\n # ----------------------------#\n # 播放\n # ----------------------------#\n self.play_wav(\"mp3andwav\", name, wake_signal = wake_signal)\n\n def wake_up(self):\n # 判断应该怎么搞\n self.wake_signal = True\n sentence = \"请说!\"\n try:\n t1 = threading.Thread(target=run_send_sentence, args=(sentence, self.urls['sentence_url']))\n t1.start()\n except:\n pass\n # ----------------------------#\n # 播放\n # ----------------------------#\n if os.path.exists(\"mp3andwav/IamHere.mp3\"):\n self.play_wav(\"mp3andwav\", \"IamHere\", wake_signal=True)\n else:\n self.play_sentence(sentence,\"IamHere\", wake_signal=True)\n\n final_result = self.record_voice(4)\n\n # 判断应该怎么搞\n sentence = \"请说出正确的模式\"\n try:\n # ----------------------------#\n # 转为mp3\n # ----------------------------#\n if final_result.find(u\"录入\") >= 0:\n self.mode = 0\n self.face_mode = 0\n self.seg_mode = 0\n # ----------------------------#\n # 播放\n # ----------------------------#\n if os.path.exists(\"mp3andwav/say_name.mp3\"):\n self.play_wav(\"mp3andwav\", \"say_name\", wake_signal=True)\n else:\n self.play_sentence(\"请说出人脸名称\",\"say_name\", wake_signal=True)\n\n final_result = self.record_voice(3)\n \n img_base64 = image_to_base64(self.image)\n dataPModel = {\"img_base64\": img_base64}\n dataPModel = {\"name\": final_result}\n\n result = json_send(dataPModel,self.face_record_url)['result']\n\n bounding_boxes, class_all = result[\"face_locations\"], result[\"class_all\"]\n\n if len(bounding_boxes)!=1:\n sentence = \"存在多个人脸或者不存在人脸\"\n else:\n sentence = \"已录入\"\n\n if final_result.find(u\"关闭所有\") >= 0:\n self.mode = 0\n self.face_mode = 0\n self.seg_mode = 0\n sentence = \"关闭所有功能\"\n print(\"关闭所有功能\")\n\n if final_result.find(u\"关闭室内导航\") >= 0:\n self.navigation_mode = 0\n self.navigation_place = 0\n img_base64 = image_to_base64(self.image)\n dep_img_base64 = image_to_base64_dep(self.depth*10)\n\n dataPModel = {\"img_base64\": img_base64}\n dataPModel[\"dep_img_base64\"] = dep_img_base64\n dataPModel[\"navigation_mode\"] = self.navigation_mode\n dataPModel[\"navigation_place\"] = self.navigation_place\n sentence = \"关闭室内导航\"\n print(\"关闭室内导航\")\n\n if final_result.find(u\"关闭室内模式\") >= 0:\n self.mode = 0\n sentence = \"关闭室内模式\"\n print(\"关闭室内模式\")\n\n if final_result.find(u\"关闭室外\") >= 0:\n self.mode = 0\n sentence = \"关闭室外模式\"\n print(\"关闭室外模式\")\n\n if final_result.find(u\"关闭文字\") >= 0:\n self.mode = 0\n sentence = \"关闭文字识别\"\n print(\"关闭文字识别\")\n\n if final_result.find(u\"关闭人脸\") >= 0:\n self.face_mode = 0\n sentence = \"关闭人脸识别\"\n print(\"关闭人脸识别\")\n\n if final_result.find(u\"关闭道路\") >= 0:\n self.seg_mode = 0\n sentence = \"关闭道路识别\"\n print(\"关闭道路识别\")\n\n if final_result.find(u\"开启室内导航\") >= 0 or final_result.find(u\"打开室内导航\") >= 0 or final_result.find(u\"开始室内导航\") >= 0:\n self.navigation_place = 0\n # ----------------------------#\n # 播放\n # ----------------------------#\n if os.path.exists(\"mp3andwav/say_place_name.mp3\"):\n self.play_wav(\"mp3andwav\", \"say_place_name\", wake_signal=True)\n else:\n self.play_sentence(\"请说出地点名称\",\"say_place_name\", wake_signal=True)\n\n try:\n sentence = \"请说出地点名称\"\n t1 = threading.Thread(target=run_send_sentence, args=(sentence, self.urls['sentence_url']))\n t1.start()\n except:\n pass\n final_result = self.record_voice(3)\n \n if final_result.find(u\"会议室\") >= 0:\n self.navigation_place = 1\n place = \"会议室\"\n\n if self.navigation_place == 0:\n self.navigation_mode = 0\n sentence = \"请说出正确的目标点\"\n else:\n self.navigation_mode = 1\n sentence = \"已经打开室内导航,导航到\" + place\n\n print(\"打开室内导航\")\n\n if final_result.find(u\"开启室内模式\") >= 0 or final_result.find(u\"打开室内模式\") >= 0 or final_result.find(u\"开始室内模式\") >= 0:\n self.mode = 1\n sentence = \"打开室内模式\"\n print(\"打开室内模式\")\n\n if final_result.find(u\"开启室外\") >= 0 or final_result.find(u\"打开室外\") >= 0 or final_result.find(u\"开始室外\") >= 0:\n self.mode = 2\n sentence = \"打开室外模式\"\n print(\"打开室外模式\")\n\n if final_result.find(u\"开启文字\") >= 0 or final_result.find(u\"打开文字\") >= 0 or final_result.find(u\"开始文字\") >= 0:\n self.mode = 3\n self.face_mode = 0\n self.seg_mode = 0\n sentence = \"打开文字识别\"\n print(\"打开文字识别\")\n\n if final_result.find(u\"开启人脸\") >= 0 or final_result.find(u\"打开人脸\") >= 0 or final_result.find(u\"开始人脸\") >= 0:\n self.face_mode = 2\n sentence = \"打开人脸识别\"\n print(\"打开人脸识别\")\n\n if final_result.find(u\"开启道路识别\") >= 0 or final_result.find(u\"打开道路识别\") >= 0 or final_result.find(u\"开始道路识别\") >= 0:\n self.seg_mode = 1\n sentence = \"打开道路识别\"\n print(\"打开道路识别\")\n\n if final_result.find(u\"寻找椅子\") >= 0:\n self.mode = 7\n sentence = \"开始寻找椅子\"\n print(\"开始寻找椅子\")\n\n if final_result.find(u\"寻找杯子\") >= 0:\n self.mode = 8\n sentence = \"开始寻找杯子\"\n print(\"开始寻找杯子\")\n except:\n pass\n \n # ----------------------------#\n # 播放\n # ----------------------------#\n try:\n t1 = threading.Thread(target=run_send_sentence, args=(sentence, self.urls['sentence_url']))\n t1.start()\n except:\n pass\n\n if os.path.exists(\"mp3andwav/\" + sentence + \".mp3\"):\n self.play_wav(\"mp3andwav\", sentence, wake_signal=True)\n else:\n self.play_sentence(sentence, sentence, wake_signal=True)\n self.recognize_init()\n self.wake_signal = False\n\n def play_sound(self):\n sentence = \"\"\n if self.wake_signal:\n return\n if self.mode == 0 and self.face_mode == 0 and self.seg_mode == 0 and self.navigation_mode == 0:\n return\n \n if self.mode == 2 or self.mode == 5 or self.mode == 6:\n sentence += self.process_traffic()\n\n if self.mode == 1 or self.mode == 2 or self.mode == 4 or self.mode==7 or self.mode==8:\n sentence += self.process_object()\n\n if self.face_mode == 1 or self.face_mode == 2 or self.face_mode == 3:\n sentence += self.process_face()\n\n if self.mode == 3:\n sentence += self.process_ocr()\n\n if self.seg_mode == 1:\n sentence += self.process_pspnet()\n\n if self.navigation_end:\n self.navigation_mode = 0\n self.navigation_place = 0\n self.navigation_end = 0\n sentence += \"导航结束!\"\n \n if self.navigation_mode == 1:\n sentence += self.navigation_sentence\n\n if sentence != \"\":\n self.speak_flag = True\n else:\n sentence = \"未检测到内容\"\n\n if self.speak_flag:\n try:\n t1 = threading.Thread(target=run_send_sentence, args=(sentence, self.urls['sentence_url']))\n t1.start()\n except:\n pass\n\n if len(sentence)>12:\n self.play_sentence(sentence, \"sentence\")\n else:\n if os.path.exists(\"mp3andwav/\" + sentence + \".mp3\"):\n self.play_wav(\"mp3andwav\", sentence)\n else:\n self.play_sentence(sentence, sentence)\n # self.play_sentence(sentence, \"recognize\")\n \n if sentence == \"未检测到内容\":\n self.speak_flag = False\n\n\n def save_images(self, image, start_Time):\n '''\n self.mode的值可选范围为:0,1,2,3,4,5,6,7\n 0为无工作状态\n 1为室内模式\n 2为室外模式\n 3为阅读模式\n 4为障碍物识别模式\n 5为交通灯识别模式\n 6为交通标志牌识别模式\n 7为盲道和斑马线识别\n self.face_mode的值可选范围为:0,1,2,3\n 0为无工作状态\n 1为正常模式\n 2为仅人脸识别\n 3为表情识别\n '''\n path = \"./Saves/photo/\" + start_Time + \"/\"\n file = open(\"./Saves/txt/\" + start_Time + \".txt\", \"a\")\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(np.uint8(image))\n\n nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n classes = []\n bounding_boxes = []\n if self.mode == 1:\n for i,j in zip(self.object_class_all,self.object_bounding_boxes):\n if i in mode_classes[self.mode]:\n classes.append(i)\n bounding_boxes.append(j)\n\n elif self.mode == 2:\n for i,j in zip(self.object_class_all,self.object_bounding_boxes):\n if i in mode_classes[self.mode]:\n classes.append(i)\n bounding_boxes.append(j)\n \n for i,j in zip(self.traffic_class_all,self.traffic_bounding_boxes):\n classes.append(i)\n bounding_boxes.append(j)\n\n elif self.mode == 3:\n if (self.mode != 0 or self.face_mode != 0) and self.wake_signal == False:\n image.save(path + nowTime + \".jpg\")\n print(self.orc_save_sentence)\n file.write(nowTime + \" : \" + self.orc_save_sentence + \"\\n\")\n return\n \n elif self.mode == 4 or self.mode==7 or self.mode==8:\n for i,j in zip(self.object_class_all,self.object_bounding_boxes):\n if i in mode_classes[self.mode]:\n classes.append(i)\n bounding_boxes.append(j)\n\n elif self.mode == 5:\n for i,j in zip(self.traffic_class_all,self.traffic_bounding_boxes):\n classes.append(i)\n bounding_boxes.append(j)\n\n elif self.mode == 6:\n for i,j in zip(self.traffic_class_all,self.traffic_bounding_boxes):\n classes.append(i)\n bounding_boxes.append(j)\n \n if self.face_mode == 1:\n try:\n for i,j in zip(self.face_class_all,self.face_bounding_boxes[:, :4]):\n classes.append(i)\n bounding_boxes.append(j)\n except:\n pass\n elif self.face_mode == 2:\n try:\n for i,j in zip(self.face_class_all,self.face_bounding_boxes[:, :4]):\n i = i.split(\";\")[0]\n classes.append(i)\n bounding_boxes.append(j)\n except:\n pass\n elif self.face_mode == 3:\n try:\n for i,j in zip(self.face_class_all,self.face_bounding_boxes[:, :4]):\n i = i.split(\";\")[-1]\n classes.append(i)\n bounding_boxes.append(j)\n except:\n pass\n \n font = ImageFont.truetype(font='model_data/simhei.ttf',\n size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))\n thickness = 5\n\n if (self.mode != 0 or self.face_mode != 0) and self.wake_signal == False:\n classes_sentence = \"\"\n for cls in classes:\n classes_sentence += cls + \" ; \"\n file.write(nowTime + \" : \" + classes_sentence + \"\\n\")\n\n for i, c in enumerate(classes):\n predicted_class = c\n\n left, top, right, bottom = bounding_boxes[i]\n top = top - 5\n left = left - 5\n bottom = bottom + 5\n right = right + 5\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))\n right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))\n\n # 画框框\n label = '{}'.format(predicted_class)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=(255,0,0))\n\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=(255,0,0))\n draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)\n del draw\n\n image.save(path + nowTime + \".jpg\")\n","repo_name":"AGuangzzz/Help_blind_project","sub_path":"flask_blind_client/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":53189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22398757408","text":"from tendo import singleton\nme = singleton.SingleInstance()\n\nfrom food.psql import *\nfrom food.tools import get_logger\nlogger = get_logger(engine,'bot_logs','food')\nlogger.debug({'msg':'starting bot'})\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.types import ContentType\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.types.message import ContentTypes\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom sqlalchemy import update\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.utils.callback_data import CallbackData\nimport typing\nimport numpy as np\n\n\nAPI_TOKEN = \"5091011572:AAG4NfkC_zZjcsaAFkwLm4ZXOvhEqyLpQhY\"\n\nfrom food.paths import *\nfrom food.search import *\nimport pandas as pd\nimport pytz\ntimezones = pytz.all_timezones\nimport requests\nfrom requests.structures import CaseInsensitiveDict\nimport urllib\nfrom tzwhere import tzwhere\n\nimport nest_asyncio\nnest_asyncio.apply()\n\ndef geocode(q):\n geocoding_key = '5d96ac126bcb462cb373297924ab2cb4'\n url = \"https://api.geoapify.com/v1/geocode/search?\"\n\n params = {\"apiKey\":geocoding_key, \n \"text\":q}\n\n resp = requests.get(url + urllib.parse.urlencode(params)).json()\n return pd.json_normalize(resp['features']).sort_values('properties.rank.importance',ascending = False)[['properties.lat','properties.lon']].iloc[0].to_list()\n \ndef get_tz(q):\n lat,lon = geocode(q)\n return tzwhere.tzwhere().tzNameAt(lat,lon)\nasync def async_get_tz(q):\n return get_tz(q)\nasync def async_search_image(url, env='prod'):\n return search_image(url,env)\nasync def async_geocode(q):\n return geocode(q)\nasync def async_insert_on_conflict(*args, **qwargs):\n return insert_on_conflict(*args, **qwargs)\nasync def add_sender(message):\n sender = message['from'].to_python()\n sender = pd.DataFrame(sender,index=[0]).drop(columns =['is_bot'])\n await async_insert_on_conflict(sender,'users',unique_cols=['id'])\ndef get_msg(query):\n dish = pd.read_sql(f\"\"\"select energy,protein,carb,fat from food.dishes \n where user_id={query['from']['id']} and \n message_id = {query['message']['message_id']}\n order by id desc limit 1\"\"\",engine)\n plot_numtients = dish[['energy','protein','carb','fat']].reset_index(drop=True)\n plot_numtients.index = ['']\n return plot_numtients.astype(int).to_string()\ndef get_today_consumed(user_id):\n today_consumed = pd.read_sql(f\"\"\"select energy,grams,timestamp from {schema}.dishes\n where user_id = {user_id} and timestamp > now() - interval '24 hours'\n and grams is not null;\"\"\",engine).set_index(\"timestamp\")\n today_consumed= today_consumed['energy']/100*today_consumed['grams']\n user_tz = engine.execute(f\"\"\"select value from food.user_properties \n where user_id={user_id} and\n property='tz'\n order by id desc limit 1\"\"\").first()\n\n user_tz = user_tz[0] if user_tz else 'UTC'\n today_consumed = today_consumed.tz_convert(user_tz)\n now = pd.Timestamp.now(tz = user_tz)\n today_consumed = today_consumed.reset_index()\t\n this_morning = pd.Timestamp(year = now.year,month = now.month,day = now.day,hour = 3,tz = user_tz)\n today_consumed = today_consumed[today_consumed['timestamp'] > pd.Timestamp(this_morning)][0].sum()\n return int(today_consumed),user_tz\n\n\nimport asyncio\n\n\nbot = Bot(token=API_TOKEN)\nstorage = MemoryStorage()\ndp = Dispatcher(bot, storage=storage)\n\ndishes_table = Dishes.__table__\n\nadd_dish_cb = CallbackData('add dish', 'action')\nmeasurment_cb = CallbackData('measurment', 'weight')\nedit_dish_cb = CallbackData('edit_dish', 'action')\nchoose_metr_cb = CallbackData('choose_metr', 'choice')\n\nml_version = 0.2\n\n\nset_timezone_command = types.BotCommand('set_timezone','set you timezone so that we know when your day starts')\ncommands = [set_timezone_command]\nasyncio.run(bot.set_my_commands(commands))\n\ngrams_grid = list(np.arange(10,1000,10)[:56])\ngrams_grid = [str(int(v)) for v in grams_grid]\nounces_grid = list(np.arange(0.4,23,0.4)[:56])\nounces_grid = [str(round(v,1)) for v in ounces_grid]\ngrid_values = list(set(grams_grid+ounces_grid))\ndef get_keyboard(t, unit = None):\n markup = types.InlineKeyboardMarkup()\n if t == 'add dish' : \n markup.add(types.InlineKeyboardButton('add dish', callback_data=add_dish_cb.new(action='add_dish')))\n \n elif t == 'measurment':\n\n btns_text = tuple(ounces_grid) if unit == 'ounces' else grams_grid\n\n\n markup = types.InlineKeyboardMarkup(row_width=8)\n \n markup.add(*(types.InlineKeyboardButton(text, callback_data=measurment_cb.new(weight=text)) for text in btns_text))\n\n elif t == 'edit_dish':\n\n btns_text = ('remove','edit weight','add again')\n markup.add(*(types.InlineKeyboardButton(text, callback_data=edit_dish_cb.new(action=text)) for text in btns_text))\n\n\n elif t == 'choose_metr':\n\n btns_text = ('grams','ounces')\n markup.add(*(types.InlineKeyboardButton(text, callback_data=choose_metr_cb.new(choice=text)) for text in btns_text))\n\n\n \n\n return markup \n\nasync def measurment(unit, query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'measurment','id_key':'user_id','id_value':query['from']['id'],'msg':'measurment'})\n\n\n await query.answer()\n\n msg = query.to_python()['message']['text']\n msg = msg.split('\\xa0')[0] if '\\xa0' in msg else msg\n msg = f\"{msg}\\n \\xa0 please choose weight of the dish in {unit}\"\n\n await bot.edit_message_text(\n msg,\n query.from_user.id,\n query.message.message_id,\n reply_markup=get_keyboard('measurment',unit),\n )\ndef get_update(query,weight):\n energy = engine.execute(f\"\"\"select energy from food.dishes \n where user_id={query['from']['id']}\n and message_id = {query['message']['message_id']}\n order by id desc limit 1\"\"\").first()[0]\n stmt = (\n dishes_table.update()\n .where(dishes_table.c.message_id == query['message']['message_id'])\n .values(grams=weight)\n .returning(dishes_table.c.id)\n )\n session.execute(stmt)\n session.commit()\n\n return int(energy)\n\n#photo recieved\n@dp.message_handler(content_types=ContentType.PHOTO,state='*')\nasync def process_photo(message: types.Message, state: FSMContext):\n logger.debug({'func':'process_photo','id_key':'user_id','id_value':message['from']['id'],'msg':'process_photo started'})\n \n\n await state.finish()\n\n \n await types.ChatActions.typing()\n\n await add_sender(message)\n\n photo = message['photo'][-1]\n await photo.download(reference_images_path/photo['file_id'])\n image_url = await photo.get_url()\n dish = await async_search_image(url=image_url, env='prod')\n description = dish['description'].iloc[0]\n\n\n dish['photo_id'] = photo['file_id']\n dish['photo_message_id'] = message['message_id']\n sender = message['from'].to_python()\n dish['user_id'] = sender['id']\n dish['ml_version'] = ml_version \n dish['timestamp']=pd.Timestamp.utcnow()\n\n \n plot_numtients = dish[['energy','protein','carb','fat']].reset_index(drop=True)\n plot_numtients.index = ['']\n\n msg = f'{description}, per 100 gram \\n {plot_numtients.astype(int).to_string()}'\n \n # msg = description + '\\n'+ plot_numtients.astype(int).to_string()\n\n reply_message = await message.reply(msg, reply_markup=get_keyboard('add dish'))\n dish['message_id'] = reply_message['message_id']\n \n dish.to_sql('dishes',schema = schema,if_exists = 'append',index = False,con=engine)\n\n logger.debug({'func':'process_photo','id_key':'user_id','id_value':message['from']['id'],'msg':'process_photo finished'})\n\n\n\n \nclass CState(StatesGroup): \n set_timezone = State()\n@dp.message_handler(commands=['set_timezone'])\nasync def set_timezone_command(message: types.Message, state: FSMContext):\n logger.debug({'func':'set_timezone_command','id_key':'user_id','id_value':message['from']['id'],'msg':'set_timezone pushed'})\n await CState.set_timezone.set()\n await message.reply(f\"please search your town to set timezone\")\n@dp.message_handler(state=CState.set_timezone)\nasync def set_timezone(message: types.Message, state: FSMContext):\n logger.debug({'func':'set_timezone','id_key':'user_id','id_value':message['from']['id'],'msg':f'set_timezone to {message.text} started'})\n await types.ChatActions.typing()\n await add_sender(message)\n tz = await async_get_tz(message.text)\n\n df = pd.DataFrame([[message['from']['id'],'tz',tz,pd.Timestamp.utcnow()]],columns = ['user_id','property','value','timestamp'])\n df.to_sql('user_properties',schema = schema,con = engine,if_exists = 'append',index = False)\n\n await state.finish()\n\n await message.reply(f\"your tz is set to {tz}\")\n\n logger.debug({'func':'set_timezone','id_key':'user_id','id_value':message['from']['id'],'msg':f'set_timezone to {message.text} finished'})\n\ndef get_metric_unit(user_id):\n unit = engine.execute(f\"\"\"select value from food.user_properties \n where user_id={user_id} and\n property='metric_unit'\n order by id desc limit 1\"\"\").first()\n\n return unit[0] if unit else None\n\n@dp.message_handler(commands=['start'])\nasync def start_command(message: types.Message):\n\n logger.debug({'func':'start_command','id_key':'user_id','id_value':message['from']['id'],'msg':'start'})\n \n await message.reply(\"\"\"Counting calories as easy as taking pictures. Just capture everything before you eat it\\n\n Now send a photo of your meal to try\"\"\")\n#add_dish pushed\n@dp.callback_query_handler(add_dish_cb.filter(action=['add_dish']))\nasync def add_dish(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'add_dish','id_key':'user_id','id_value':query['from']['id'],'msg':'add_dish'})\n\n unit = get_metric_unit(query['from']['id'])\n if not unit:\n\n msg = query.to_python()['message']['text']\n msg = msg.split('\\xa0')[0] if '\\xa0' in msg else msg\n msg = f\"{msg}\\n \\xa0 please choose unit for your food weight measurement\"\n\n\n await bot.edit_message_text(\n msg,\n query.from_user.id,\n query.message.message_id,\n reply_markup=get_keyboard('choose_metr'))\n\n else: \n await measurment(unit,query, callback_data)\n#add_dish pushed and no metric selected\n@dp.callback_query_handler(choose_metr_cb.filter(choice=['grams']))\nasync def select_metric_grams(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'select_metric_grams','id_key':'user_id','id_value':query['from']['id'],'msg':'select_metric_grams'})\n \n df = pd.DataFrame([[query['from']['id'],'metric_unit','grams',pd.Timestamp.utcnow()]],columns = ['user_id','property','value','timestamp'])\n df.to_sql('user_properties',schema = schema,con = engine,if_exists = 'append',index = False)\n\n await measurment('grams',query, callback_data)\n#add_dish pushed and no metric selected\n@dp.callback_query_handler(choose_metr_cb.filter(choice=['ounces']))\nasync def callback_vote_action(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'select_metric_ounces','id_key':'user_id','id_value':query['from']['id'],'msg':'select_metric_ounces'})\n\n df = pd.DataFrame([[query['from']['id'],'metric_unit','ounces',pd.Timestamp.utcnow()]],columns = ['user_id','property','value','timestamp'])\n df.to_sql('user_properties',schema = schema,con = engine,if_exists = 'append',index = False)\n\n await measurment('ounces',query, callback_data)\n#add_dish pushed\n@dp.callback_query_handler(edit_dish_cb.filter(action=['edit weight']))\nasync def edit_weight(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'edit_weight','id_key':'user_id','id_value':query['from']['id'],'msg':'edit_weight'})\n unit = get_metric_unit(query['from']['id'])\n await measurment(unit,query, callback_data)\n#measure provided\n@dp.callback_query_handler(measurment_cb.filter(weight=grid_values))\nasync def weight_processing(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n\n logger.debug({'func':'weight_processing','id_key':'user_id','id_value':query['from']['id'],'msg':'weight_processing started'})\n\n await query.answer()\n\n\n t = 'ounces' if 'ounces' in query.to_python()['message']['text'] else 'grams'\n u = 28.3495 if t == 'ounces' else 1\n\n weight = float(callback_data['weight'])\n\n energy = get_update(query,weight)\n\n msg = query.to_python()['message']['text']\n msg = msg.split('\\xa0')[0] if '\\xa0' in msg else msg #\n\n msg = f\"{msg} \\xa0 \\n consumed {weight} {t} \\xa0 \\n {int(energy/100*u*weight)} kcall\"\n\n today_consumed,usertz = get_today_consumed(query['from']['id'])\n msg = f\"{msg} \\xa0 \\n today consumed {today_consumed}\"\n if usertz=='UTC': \n msg = f\"{msg} \\xa0 \\n please /set_timezone so bot knows when your day is started\"\n # await bot.send_message(chat_id=query['from']['id'], \n # text='please /set_timezone so bot knows when your day is started')\n\n \n\n await bot.edit_message_text(\n msg,\n query.from_user.id,\n query.message.message_id,\n reply_markup=get_keyboard('edit_dish')\n )\n\n logger.debug({'func':'weight_processing','id_key':'user_id','id_value':query['from']['id'],'msg':'weight_processing finished'})\n \n#remove pushed\n@dp.callback_query_handler(edit_dish_cb.filter(action=['remove']))\nasync def remove_dish(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'remove_dish','id_key':'user_id','id_value':query['from']['id'],'msg':'remove_dish'})\n _ = get_update(query,0)\n\n\n msg = query.to_python()['message']['text']\n msg = msg.split('\\xa0')[0] if '\\xa0' in msg else msg\n\n today_consumed,usertz = get_today_consumed(query['from']['id'])\n msg = f\"{msg} \\xa0 \\n today consumed {today_consumed}\"\n if usertz=='UTC': \n msg = f\"{msg} \\xa0 \\n please /set_timezone so bot knows when your day is started\"\n # await bot.send_message(chat_id=query['from']['id'], \n # text='please /set_timezone so bot knows when your day is started')\n\n\n await query.answer()\n await bot.edit_message_text(\n msg,\n query.from_user.id,\n query.message.message_id,\n reply_markup=get_keyboard('add dish'))\n#add again pushed\n@dp.callback_query_handler(edit_dish_cb.filter(action=['add again']))\nasync def add_again(query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n logger.debug({'func':'add_again','id_key':'user_id','id_value':query['from']['id'],'msg':'add_again'})\n\n\n dish = pd.read_sql(f\"\"\"select description,energy,protein,carb,fat,score,photo_id,user_id,ml_version,photo_message_id\n from food.dishes \n where user_id={query['from']['id']}\n and message_id = {query['message']['message_id']} limit 1\"\"\",engine)\n dish['timestamp'] = pd.Timestamp.utcnow()\n\n\n msg = query.to_python()['message']['text']\n msg = msg.split('\\xa0')[0] if '\\xa0' in msg else msg\n\n today_consumed,usertz = get_today_consumed(query['from']['id'])\n msg = f\"{msg} \\xa0 \\n today consumed {today_consumed}\"\n if usertz=='UTC': \n msg = f\"{msg} \\xa0 \\n please /set_timezone so bot knows when your day is started\"\n\n\n # await bot.send_message(chat_id=query['from']['id'], \n # text='please /set_timezone so bot knows when your day is started')\n\n\n\n await query.answer()\n message = await bot.send_message(chat_id=query['from']['id'], \n reply_to_message_id = dish['photo_message_id'].iloc[0],\n text=msg, \n reply_markup=get_keyboard('add dish'))\n\n dish['message_id'] = message['message_id']\n dish.to_sql('dishes',schema = schema,if_exists = 'append',index = False,con=engine)\n \n \n\nif __name__ == '__main__':\n executor.start_polling(dp)","repo_name":"DmitriyG228/food","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":16666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71101944513","text":"from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.ComponentFactory import CompFactory\nfrom AthenaConfiguration.Enums import MetadataCategory\n\nBPHYDerivationName = \"BPHY6\"\nstreamName = \"StreamDAOD_BPHY6\"\n\ndef BPHY6Cfg(ConfigFlags):\n from DerivationFrameworkBPhys.commonBPHYMethodsCfg import (BPHY_V0ToolCfg, BPHY_InDetDetailedTrackSelectorToolCfg, BPHY_VertexPointEstimatorCfg, BPHY_TrkVKalVrtFitterCfg)\n from JpsiUpsilonTools.JpsiUpsilonToolsConfig import PrimaryVertexRefittingToolCfg\n acc = ComponentAccumulator()\n isSimulation = ConfigFlags.Input.isMC\n # General Variables\n dimuon_chi2_max = 50.\n dimuon_mass_min = 100.\n dimuon_mass_max = 150e3\n\n V0Tools = acc.popToolsAndMerge(BPHY_V0ToolCfg(ConfigFlags, BPHYDerivationName))\n vkalvrt = acc.popToolsAndMerge(BPHY_TrkVKalVrtFitterCfg(ConfigFlags, BPHYDerivationName)) # VKalVrt vertex fitter\n acc.addPublicTool(vkalvrt)\n acc.addPublicTool(V0Tools)\n trackselect = acc.popToolsAndMerge(BPHY_InDetDetailedTrackSelectorToolCfg(ConfigFlags, BPHYDerivationName))\n acc.addPublicTool(trackselect)\n vpest = acc.popToolsAndMerge(BPHY_VertexPointEstimatorCfg(ConfigFlags, BPHYDerivationName))\n acc.addPublicTool(vpest)\n from TrkConfig.AtlasExtrapolatorConfig import InDetExtrapolatorCfg\n extrap = acc.popToolsAndMerge(InDetExtrapolatorCfg(ConfigFlags))\n acc.addPublicTool(extrap)\n BPHY6_Extrap_Tool = CompFactory.DerivationFramework.MuonExtrapolationTool(name = \"BPHY6_ExtrapolationTool\", Extrapolator = extrap)\n BPHY6JpsiFinder = CompFactory.Analysis.JpsiFinder(\n name = \"BPHY6JpsiFinder\",\n muAndMu = True,\n muAndTrack = False,\n TrackAndTrack = False,\n assumeDiMuons = True, # If true, will assume dimu hypothesis and use PDG value for mu mass\n invMassUpper = dimuon_mass_max,\n invMassLower = dimuon_mass_min,\n Chi2Cut = dimuon_chi2_max,\n oppChargesOnly = True,\n atLeastOneComb = True,\n useCombinedMeasurement = False, # Only takes effect if combOnly=True \n muonCollectionKey = \"Muons\",\n TrackParticleCollection = \"InDetTrackParticles\",\n V0VertexFitterTool = None, # V0 vertex fitter\n useV0Fitter = False, # if False a TrkVertexFitterTool will be used\n TrkVertexFitterTool = vkalvrt, # VKalVrt vertex fitter\n TrackSelectorTool = trackselect,\n VertexPointEstimator = vpest,\n useMCPCuts = False )\n\n BPHY6_Reco_mumu = CompFactory.DerivationFramework.Reco_Vertex(\n name = \"BPHY6_Reco_mumu\",\n VertexSearchTool = BPHY6JpsiFinder,\n OutputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n PVRefitter = acc.popToolsAndMerge(PrimaryVertexRefittingToolCfg(ConfigFlags)),\n PVContainerName = \"PrimaryVertices\",\n RefPVContainerName = \"BPHY6RefittedPrimaryVertices\")\n\n BPHY6_Select_Jpsi2mumu = CompFactory.DerivationFramework.Select_onia2mumu(\n name = \"BPHY6_Select_Jpsi2mumu\",\n HypothesisName = \"Jpsi\",\n InputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n VtxMassHypo = 3096.916,\n MassMin = 2700.0,\n MassMax = 3500.0,\n Chi2Max = 20)\n\n BPHY6_Select_Psi2mumu = CompFactory.DerivationFramework.Select_onia2mumu(\n name = \"BPHY6_Select_Psi2mumu\",\n HypothesisName = \"Psi\",\n InputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n VtxMassHypo = 3686.09,\n MassMin = 3200.0,\n MassMax = 4200.0,\n Chi2Max = 20)\n\n BPHY6_Select_Upsi2mumu = CompFactory.DerivationFramework.Select_onia2mumu(\n name = \"BPHY6_Select_Upsi2mumu\",\n HypothesisName = \"Upsi\",\n InputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n VtxMassHypo = 9460.30,\n MassMin = 8000.0,\n MassMax = 12000.0,\n Chi2Max = 20)\n \n BPHY6_Select_Bmumu2mumu = CompFactory.DerivationFramework.Select_onia2mumu(\n name = \"BPHY6_Select_Bmumu2mumu\",\n HypothesisName = \"Bmumu\",\n InputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n VtxMassHypo = 5366.77,\n MassMin = 4200.0,\n MassMax = 8000.0,\n Chi2Max = 20)\n \n BPHY6_Select_Zmumu2mumu = CompFactory.DerivationFramework.Select_onia2mumu(\n name = \"BPHY6_Select_Zmumu2mumu\",\n HypothesisName = \"Zmumu\",\n InputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n VtxMassHypo = 91187.6,\n MassMin = 60000.0,\n MassMax = 120000.0,\n Chi2Max = 20)\n\n BPHY6_Select_Onia2mumu = CompFactory.DerivationFramework.Select_onia2mumu(\n name = \"BPHY6_Select_Onia2mumu\",\n HypothesisName = \"Onia\",\n InputVtxContainerName = \"BPHY6OniaCandidates\",\n V0Tools = V0Tools,\n VtxMassHypo = 3096.916,\n MassMin = dimuon_mass_min,\n MassMax = dimuon_mass_max,\n Chi2Max = 20)\n\n trigger_list = [r'HLT_\\d?mu\\d+']\n\n BPHY6TrigSkimmingTool = CompFactory.DerivationFramework.TriggerSkimmingTool( name = \"BPHY6TrigSkimmingTool\",\n TriggerListOR = trigger_list )\n expression = \"count(BPHY6OniaCandidates.passed_Onia) > 0 \"\n BPHY6_SelectEvent = CompFactory.DerivationFramework.xAODStringSkimmingTool(name = \"BPHY6_SelectEvent\",\n expression = expression)\n BPHY6Thin_vtxTrk = CompFactory.DerivationFramework.Thin_vtxTrk(\n name = \"BPHY6Thin_vtxTrk\",\n TrackParticleContainerName = \"InDetTrackParticles\",\n StreamName = streamName,\n VertexContainerNames = [\"BPHY6OniaCandidates\"],\n PassFlags = [\"passed_Onia\"], )\n BPHY6MuonTPThinningTool = CompFactory.DerivationFramework.MuonTrackParticleThinning(name = \"BPHY6MuonTPThinningTool\",\n MuonKey = \"Muons\",\n StreamName = streamName,\n InDetTrackParticlesKey = \"InDetTrackParticles\")\n\n\n BPHY6ThinningTools = [BPHY6Thin_vtxTrk, BPHY6MuonTPThinningTool]\n if isSimulation:\n BPHY6TruthThinTool = CompFactory.DerivationFramework.GenericTruthThinning(name = \"BPHY6TruthThinTool\",\n ParticleSelectionString = \"TruthParticles.pdgId == 443 || TruthParticles.pdgId == 100443 || TruthParticles.pdgId == 553 || TruthParticles.pdgId == 100553 || TruthParticles.pdgId == 200553 || TruthParticles.pdgId == 23 || TruthParticles.pdgId == 531 || TruthParticles.pdgId == 511 || TruthParticles.pdgId == 521 || TruthParticles.pdgId == 541\",\n PreserveDescendants = True,\n StreamName = streamName,\n PreserveAncestors = True)\n BPHY6ThinningTools.append(BPHY6TruthThinTool)\n\n SkimmingORTool = CompFactory.DerivationFramework.FilterCombinationOR(\"BPHY6SkimmingOR\",\n FilterList = [BPHY6_SelectEvent,BPHY6TrigSkimmingTool])\n\n augTools = [BPHY6_Reco_mumu, BPHY6_Select_Jpsi2mumu, BPHY6_Select_Psi2mumu, BPHY6_Select_Upsi2mumu,BPHY6_Select_Bmumu2mumu,\n BPHY6_Select_Zmumu2mumu,BPHY6_Select_Onia2mumu, BPHY6_Extrap_Tool]\n for t in augTools + BPHY6ThinningTools + [SkimmingORTool] + [BPHY6_SelectEvent,BPHY6TrigSkimmingTool]: acc.addPublicTool(t)\n acc.addEventAlgo(CompFactory.DerivationFramework.DerivationKernel(\"BPHY6Kernel\",\n AugmentationTools = augTools,\n SkimmingTools = [SkimmingORTool],\n ThinningTools = BPHY6ThinningTools ))\n\n from DerivationFrameworkCore.SlimmingHelper import SlimmingHelper\n from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg\n from xAODMetaDataCnv.InfileMetaDataConfig import SetupMetaDataForStreamCfg\n BPHY6SlimmingHelper = SlimmingHelper(\"BPHY6SlimmingHelper\", NamesAndTypes = ConfigFlags.Input.TypedCollections, ConfigFlags = ConfigFlags)\n \n # Needed for trigger objects\n BPHY6SlimmingHelper.IncludeMuonTriggerContent = True\n BPHY6SlimmingHelper.IncludeBPhysTriggerContent = True\n from DerivationFrameworkBPhys.commonBPHYMethodsCfg import getDefaultAllVariables\n AllVariables = getDefaultAllVariables()\n StaticContent = []\n AllVariables += [\"LVL1MuonRoIs\"]\n \n ## primary vertices\n AllVariables += [\"PrimaryVertices\"]\n StaticContent += [\"xAOD::VertexContainer#BPHY6RefittedPrimaryVertices\"]\n StaticContent += [\"xAOD::VertexAuxContainer#BPHY6RefittedPrimaryVerticesAux.\"]\n \n ## ID track particles\n AllVariables += [\"InDetTrackParticles\"]\n \n AllVariables += [\"HLT_xAOD__TrackParticleContainer_InDetTrigTrackingxAODCnv_Muon_EFID\"]\n AllVariables += [\"HLT_xAOD__TrackParticleContainer_InDetTrigTrackingxAODCnv_Muon_IDTrig\"]\n AllVariables += [\"HLT_xAOD__TrackParticleContainer_InDetTrigTrackingxAODCnv_Muon_FTF\"]\n AllVariables += [\"HLT_xAOD__TrackParticleContainer_InDetTrigTrackingxAODCnv_Bphysics_FTF\"]\n AllVariables += [\"HLT_xAOD__TrackParticleContainer_InDetTrigTrackingxAODCnv_Bphysics_IDTrig\"]\n \n \n \n ## combined / extrapolated muon track particles \n ## (note: for tagged muons there is no extra TrackParticle collection since the ID tracks\n ## are store in InDetTrackParticles collection)\n AllVariables += [\"CombinedMuonTrackParticles\"]\n AllVariables += [\"ExtrapolatedMuonTrackParticles\"]\n AllVariables += [\"MuonSpectrometerTrackParticles\"]\n \n ## muon container\n AllVariables += [\"Muons\"]\n AllVariables += [\"HLT_xAOD__L2StandAloneMuonContainer_MuonL2SAInfo\"]\n AllVariables += [\"HLT_xAOD__L2CombinedMuonContainer_MuonL2CBInfo\"]\n AllVariables += [\"HLT_xAOD__MuonContainer_MuonEFInfo\"]\n \n \n AllVariables += [\"HLT_xAOD__TrigBphysContainer_L2BMuMuXFex\" ]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_EFBMuMuXFex\" ]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_L2BMuMuFex\" ]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_EFBMuMuFex\" ]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_L2TrackMass\" ]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_EFTrackMass\" ]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_L2MultiMuFex\"]\n AllVariables += [\"HLT_xAOD__TrigBphysContainer_EFMultiMuFex\"]\n \n \n ## Jpsi candidates \n StaticContent += [\"xAOD::VertexContainer#%s\" % BPHY6_Reco_mumu.OutputVtxContainerName]\n StaticContent += [\"xAOD::VertexAuxContainer#%sAux.-vxTrackAtVertex\" % BPHY6_Reco_mumu.OutputVtxContainerName]\n \n if isSimulation:\n AllVariables += [\"TruthEvents\",\"TruthParticles\",\"TruthVertices\",\"MuonTruthParticles\"]\n BPHY6SlimmingHelper.AllVariables = AllVariables\n BPHY6SlimmingHelper.StaticContent = StaticContent\n BPHY6ItemList = BPHY6SlimmingHelper.GetItemList()\n acc.merge(OutputStreamCfg(ConfigFlags, \"DAOD_BPHY6\", ItemList=BPHY6ItemList, AcceptAlgs=[\"BPHY6Kernel\"]))\n acc.merge(SetupMetaDataForStreamCfg(ConfigFlags, \"DAOD_BPHY6\", AcceptAlgs=[\"BPHY6Kernel\"], createMetadata=[MetadataCategory.CutFlowMetaData]))\n acc.printConfig(withDetails=True, summariseProps=True, onlyComponents = [], printDefaults=True, printComponentsOnly=False)\n return acc\n","repo_name":"Yusuf-Manjra/athena","sub_path":"PhysicsAnalysis/DerivationFramework/DerivationFrameworkBPhys/python/BPHY6.py","file_name":"BPHY6.py","file_ext":"py","file_size_in_byte":13031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72985052993","text":"from contextlib import closing\nimport array, wave, sys, os, optparse\ntry:\n import numpy as np\nexcept ImportError:\n using_numpy = False\nelse:\n using_numpy = True\n\ndef naive_convolve(fircoeffs, wavdata):\n lside = len(fircoeffs) - 1\n rside = len(fircoeffs) - lside\n return [sum(a * b\n for a, b in zip(fircoeffs[max(lside - i, 0):],\n wavdata[max(i - lside, 0): i + rside]))\n for i in range(len(wavdata) + len(fircoeffs) - 1)]\n\nconvolve = np.convolve if using_numpy else naive_convolve\n\ndef convolve_test():\n fircoeffs = [1/16, 4/16, 6/16, 4/16, 1/16]\n data = [0, 1, 0, 0, 0, 1, 2, 3, 4, 5]\n print(naive_convolve(fircoeffs, data))\n print(list(np.convolve(fircoeffs, data)))\n\ndef load_wave_as_mono_s16(filename):\n little = array.array('H', b'\\x01\\x00')[0] == 1\n with closing(wave.open(filename, \"rb\")) as infp:\n bytedepth = infp.getsampwidth()\n if bytedepth not in (1, 2):\n raise ValueError(\"unsupported sampwidth\")\n n_ch = infp.getnchannels()\n datatype = 'h' if bytedepth == 2 else 'B'\n freq = infp.getframerate()\n length = infp.getnframes()\n data = array.array(datatype, infp.readframes(length))\n if datatype == 'B':\n # Expand 8 to 16 bit\n data = array.array('h', ((c - 128) << 8 for c in data))\n elif not little:\n # 16-bit data is little-endian in the wave file; it needs to\n # be byteswapped for big-endian platforms\n data.byteswap()\n if n_ch > 1:\n # average all channels\n data = array.array('h', (int(round(sum(data[i:i + n_ch]) / n_ch))\n for i in range(0, len(data), n_ch)))\n return (freq, data)\n\ndef save_wave_as_mono_s16(filename, freq, data):\n data = array.array('h', (min(max(s, -32767), 32767) for s in data))\n little = array.array('H', b'\\x01\\x00')[0] == 1\n if not little:\n data.byteswap()\n with closing(wave.open(filename, \"wb\")) as outfp:\n outfp.setnchannels(1)\n outfp.setsampwidth(2)\n outfp.setframerate(freq)\n outfp.writeframes(data.tobytes())\n\ndef brr_preemphasize(wavdata):\n \"\"\"Emphasize a sample's highs to compensate for S-DSP Gaussian interpolation.\"\"\"\n preem = \\\n [0.00417456, -0.017636, 0.03906008, -0.06200069, 0.07113042,\n -0.0255472, -0.26998287, 1.52160339, -0.26998287, -0.0255472,\n 0.07113042, -0.06200069, 0.03906008, -0.017636, 0.00417456]\n return [int(round(s)) for s in convolve(preem, wavdata)[7:-7]]\n\ndef brr_deemphasize(wavdata):\n \"\"\"Approximate the effect of Gaussian interpolation.\"\"\"\n deem = \\\n [372/2048, 1304/2048, 372/2048]\n return [int(round(s)) for s in convolve(deem, wavdata)[1:-1]]\n \n\nbrr_filters = [\n (0, 0),\n (0, 0.9375),\n (-0.9375, 1.90625),\n (-0.8125, 1.796875)\n]\n\ndef enfilter(wav, coeffs, prevs=[], quant=None):\n \"\"\"Calculates residuals from an IIR predictive filter.\n\nwav: iterable of sample values\nprevs: previous block of quantized and decoded samples\nquant: None during test filtering; 2 to 4096 during encoding\n\n\"\"\"\n prevs = list(prevs)[-len(coeffs):]\n if len(prevs) < len(coeffs):\n prevs = ([0] * len(coeffs) + prevs)[-len(coeffs):]\n out = []\n for c in wav:\n pred = sum(coeff * prev\n for coeff, prev in zip(coeffs, prevs[-len(coeffs):]))\n rescaled = resid = (c - pred)\n if quant:\n resid = int(round(resid / quant))\n resid = max(-8, min(7, resid))\n rescaled = resid * quant\n out.append(resid)\n prevs.append(rescaled + pred)\n return out, prevs[-len(coeffs):]\n\ndef defilter(wav, coeffs, prevs=[], quant=1):\n \"\"\"Applies an IIR predictive filter to residuals.\n\nwav: block of unpacked residuals\nprevs: previous block of decoded samples\nquant: number by which all residuals shall be multiplied\n\n\"\"\"\n prevs = list(prevs)[-len(coeffs):]\n if len(prevs) < len(coeffs):\n prevs = ([0] * len(coeffs) + prevs)[-len(coeffs):]\n out = []\n for resid in wav:\n rescaled = resid * quant\n pred = sum(coeff * prev\n for coeff, prev in zip(coeffs, prevs[-len(coeffs):]))\n c = (rescaled + int(round(pred)))\n out.append(c)\n prevs.append(c)\n return out\n\nCLIP_MAX = 32000\n\ndef encode_brr(wav, looped=False):\n prevs = []\n out = bytearray()\n for t in range(0, len(wav), 16):\n piece = wav[t:t + 16]\n\n # Occasionally, treble boost may cause a sample to clip.\n peak = max(abs(c) for c in piece)\n if peak > CLIP_MAX:\n## print(\"clip peak %d at time %d\" % (peak, t))\n piece = [max(min(c, CLIP_MAX), -CLIP_MAX) for c in piece]\n\n if prevs:\n # Calculate the peak residual for this piece with\n # each filter, then choose the smallest\n trials = [max(abs(resid)\n for resid in enfilter(piece, coeffs, prevs)[0])\n for coeffs in brr_filters]\n peak, filterid = min((r, i) for (i, r) in enumerate(trials))\n else:\n # first block always uses filter 0\n peak = max(abs(resid) for resid in piece)\n filterid = 0\n\n logquant = 0\n while logquant < 12 and peak >= (7 << logquant):\n logquant += 1\n resids, prevs = enfilter(piece, brr_filters[filterid],\n prevs or [], 1 << (logquant + 0))\n resids.extend([0] * (16 - len(resids)))\n byte0 = (logquant << 4) | (filterid << 2)\n if t + 16 >= len(wav):\n byte0 = byte0 | (3 if looped else 1)\n## print(\"filter #%d, scale %d,\\nresids %s\\n%s\"\n## % (filterid, 1 << logquant, repr(resids), repr(list(piece))))\n out.append(byte0)\n for i in range(0, len(resids), 2):\n hinibble = resids[i] & 0x0F\n lonibble = resids[i + 1] & 0x0F\n out.append((hinibble << 4) | lonibble)\n return bytes(out)\n\ndef decode_brr(brrdata):\n prevs = [0, 0]\n out = []\n for i in range(0, len(brrdata), 9):\n piece = bytes(brrdata[i:i + 9])\n logquant = piece[0] >> 4\n filterid = (piece[0] >> 2) & 0x03\n resids = [((b >> i & 0x0F) ^ 8) - 8\n for b in piece[1:] for i in (4, 0)]\n decoded = defilter(resids, brr_filters[filterid],\n prevs, 1 << (logquant + 0))\n## print(\"filter #%d, scale %d,\\nresids %s\\n%s\"\n## % (filterid, 1 << logquant, repr(resids), repr(decoded)))\n out.extend(decoded)\n prevs = decoded\n return out\n\n# Command line parsing and help #####################################\n#\n# Easy is hard. It takes a lot of code and a lot of text to make a\n# program self-documenting and resilient to bad input.\n\nusageText = \"usage: %prog [options] [-i] INFILE [-o] OUTFILE\"\nversionText = \"\"\"wav2brr 0.04\n\nCopyright 2014 Damian Yerrick\nCopying and distribution of this file, with or without\nmodification, are permitted in any medium without royalty provided\nthe copyright notice and this notice are preserved in all source\ncode copies. This file is offered as-is, without any warranty.\n\"\"\"\ndescriptionText = \"\"\"\nAudio converter for Super NES S-DSP.\n\"\"\"\n\ndef parse_argv(argv):\n parser = optparse.OptionParser(usage=usageText, version=versionText,\n description=descriptionText)\n parser.add_option(\"-i\", \"--input\", dest=\"infilename\",\n help=\"read from INFILE\",\n metavar=\"INFILE\")\n parser.add_option(\"-o\", \"--output\", dest=\"outfilename\",\n help=\"write output to OUTFILE\",\n metavar=\"OUTFILE\")\n parser.add_option(\"-d\", \"--decompress\",\n action=\"store_true\", dest=\"decompress\", default=False,\n help=\"decompress BRR to wave (default: compress wave to BRR)\")\n parser.add_option(\"--emph\", \"--emphasize\",\n action=\"store_true\", dest=\"emph\", default=False,\n help=\"read wave, write preemphasized (treble boosted) wave\")\n parser.add_option(\"--deemph\", \"--deemphasize\",\n action=\"store_true\", dest=\"deemph\", default=False,\n help=\"read wave, write deemphasized wave\")\n parser.add_option(\"-r\", \"--rate\", dest=\"rate\",\n metavar=\"RATE\", type=\"int\", default=None,\n help=\"output wave sample rate in Hz \"\n \"(default: 8372 for -d; input rate for --emph and --deeemph)\")\n parser.add_option(\"--loop\",\n action=\"store_true\", dest=\"loop\", default=False,\n help=\"set the BRR's loop bit\")\n parser.add_option(\"--skip-filter\",\n action=\"store_true\", dest=\"skipfilter\", default=False,\n help=\"skip (de)emphasis when (de)compressing\")\n\n (options, pos) = parser.parse_args(argv[1:])\n\n if options.rate is not None and not 10 <= options.rate <= 128000:\n parser.error(\"output sample rate must be 10 to 128000 Hz\")\n\n # Fill unfilled roles with positional arguments\n pos = iter(pos)\n try:\n options.infilename = options.infilename or next(pos)\n except StopIteration:\n parser.error(\"no input file; try %s --help\"\n % os.path.basename(sys.argv[0]))\n try:\n options.outfilename = options.outfilename or next(pos)\n except StopIteration:\n parser.error(\"no output file\")\n\n # make sure no trailing arguments\n try:\n next(pos)\n parser.error(\"too many filenames\")\n except StopIteration:\n pass\n return options\n\nDEFAULT_RATE = 8372\n\ndef main(argv=None):\n opts = parse_argv(argv or sys.argv)\n if opts.emph:\n freq, wave = load_wave_as_mono_s16(opts.infilename)\n wave = brr_preemphasize(wave)\n save_wave_as_mono_s16(opts.outfilename, opts.rate or freq, wave)\n elif opts.deemph:\n freq, wave = load_wave_as_mono_s16(opts.infilename)\n wave = brr_deeemphasize(wave)\n save_wave_as_mono_s16(opts.outfilename, opts.rate or freq, wave)\n elif opts.decompress:\n with open(opts.infilename, 'rb') as infp:\n brrdata = infp.read()\n out = decode_brr(brrdata)\n if not opts.skipfilter:\n out = brr_deemphasize(out)\n save_wave_as_mono_s16(opts.outfilename, opts.rate or DEFAULT_RATE, out)\n else:\n freq, wave = load_wave_as_mono_s16(opts.infilename)\n if not opts.skipfilter:\n wave = brr_preemphasize(wave)\n brrdata = encode_brr(wave, looped=opts.loop)\n with open(opts.outfilename, 'wb') as outfp:\n outfp.write(brrdata)\n\nif __name__=='__main__':\n if \"idlelib\" in sys.modules:\n main()\n else:\n main()\n## convolve_test()\n","repo_name":"NovaSquirrel/NovaTheSquirrel2","sub_path":"tools/wav2brr.py","file_name":"wav2brr.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"71756687553","text":"import pypdfium2 as pdfium\nfrom PIL import Image\n\n#Pdf to Image\n\n# Load a document\nfilepath = input(\"Enter the pdf path : \")\npdf = pdfium.PdfDocument(filepath)\n# render a single page (in this case: the first one)\nfor i in range(len(pdf)):\n page = pdf[i]\n pil_image = page.render(scale=4).to_pil()\n pil_image.save(f\"output{i}.jpg\".format(i))\n\n\n#convert jpg to pdf\n\nimage1 = Image.open(r'output0.jpg')\noutputImage = image1.convert('RGB')\noutputImage.save(r'image_to_pdf.pdf')\nprint(\"Image convert to PDF\")","repo_name":"AshkaraAli005/Freshers-Training","sub_path":"Day6/conversion/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23476231521","text":"import copy\r\nimport sys\r\n\r\n\r\ndef parse_case(instrm):\r\n return int(instrm.readline().strip())\r\n\r\n\r\ndef solve_case(N):\r\n\r\n if N == 0:\r\n return \"INSOMNIA\"\r\n \r\n seen = set()\r\n i = 0\r\n while len(seen) < 10:\r\n i += 1\r\n digits = str(N*i)\r\n for d in digits:\r\n seen.add(d)\r\n if len(seen) == 10:\r\n return digits \r\n\r\n\r\nif __name__ == \"__main__\":\r\n instrm = open(sys.argv[1])\r\n ncases = int(instrm.readline().strip())\r\n for i in range(ncases):\r\n case = parse_case(instrm)\r\n ans = solve_case(case)\r\n print(\"Case #{}: {}\".format(i+1, ans))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/1117.py","file_name":"1117.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27254253003","text":"def solution(s):\n p, y = [], []\n\n s = s.lower()\n\n for c in s:\n if c == 'p':\n p.append(c)\n elif c == 'y':\n y.append(c)\n\n if len(p) == len(y):\n return True\n else:\n return False","repo_name":"elice-algorithm-study/codingtest","sub_path":"minzy/[220205] 문자열 내 p와 y의 개수.py","file_name":"[220205] 문자열 내 p와 y의 개수.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17949857738","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\ndef load_data(messages_filepath, categories_filepath):\n '''\n 1) load the messages and categories data \n 2) merge messages and categories on id column and return the merged dataframe\n \n Args:\n messages_filepath: filepath of the messages data in .csv format\n categories_filepath: filepath of the categories data in .csv format\n Returns: \n df: merged data as a pandas dataframe\n '''\n\n # load messages data\n messages = pd.read_csv(messages_filepath)\n # load categories data\n categories = pd.read_csv(categories_filepath)\n # merge datasets\n df = messages.merge(categories, on='id')\n\n return df\n\n\ndef clean_data(df):\n '''\n Cleans the input dataframe and returns the dataframe:\n 1) create a new categories dataframe\n 2) extract and assign attribute (column) names\n 3) extract and assign observation values (int)\n 4) drop the original category column and concat the new categories\n 5) remove duplicate data (original category and messages data had 68 duplicate rows)\n\n Args: \n df: pandas dataframe containing merged raw data\n Returns: \n df: pandas dataframe cleaned data\n '''\n \n # step 1\n # create a dataframe with individual category columns\n categories = df.categories.str.split(';',expand=True)\n \n # step 2\n # extract column names for categories.\n row = categories.iloc[0]\n category_col_names = row.astype(str).str[:-2]\n # rename/assign column names for categories\n categories.columns = category_col_names\n \n # step 3\n # set category values as the last character of the string\n for column in categories:\n categories[column] = categories[column].astype(str).str[-1].astype(int)\n \n # step 4\n # drop the original categories column from df\n df.drop(columns=['categories'], inplace=True)\n # concatenate the original dataframe with the new categories dataframe\n df = pd.concat([df, categories], axis=1)\n\n # step 5\n # drop duplicates\n df.drop_duplicates(inplace=True)\n\n # step 6 \n # the 'df.related' column has three distinc values, 0, 1, and 2\n # dropping 2, as it doesn't make sense\n df.drop(df[df.related == 2].index, inplace=True)\n\n return df\n\n\ndef save_data(df, database_filename):\n '''\n save the df as a sqlite database\n Args:\n df: pandas dataframe\n database_filename: str\n Returns: None\n '''\n\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('Disaster_Response_Table'\n , con=engine\n , if_exists='replace'\n , index=False\n )\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"snejadi/Disaster-response","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15668242776","text":"zada=[]\na=range(3,100,3)\nfor n in a:\n zada.append(n)\nprint(zada)\n#b\ndel zada[4:len(zada):3]\nprint(zada)\n#c\nsuma=sum(zada)/len(zada)\nprint('Średnia to:', suma)","repo_name":"ArekWaa/jsp2021","sub_path":"Lista 2/Zadanie 10.py","file_name":"Zadanie 10.py","file_ext":"py","file_size_in_byte":162,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73187933635","text":"import dpkt\nimport socket\n\ndef process_pcap_file(file):\n print(\"_\" * 50)\n print(f\"READING IPS AND TIMESTAMPS ON: {file}\")\n print(\"_\" * 50)\n \n try:\n with open(file, 'rb') as f:\n pcap = dpkt.pcap.Reader(f)\n\n for timestamp, data in pcap:\n eth = dpkt.ethernet.Ethernet(data)\n ip = eth.data\n tcp = ip.data\n\n try:\n destination_ip = socket.inet_ntoa(ip.dst)\n print(f\"Destination IP Address: {destination_ip}\\tTimestamp: {timestamp}\")\n except socket.error:\n pass\n\n except FileNotFoundError:\n print(f\"File '{file}' not found.\")\n except dpkt.dpkt.NeedData:\n print(f\"Failed to read data from '{file}'.\")\n\n# List of PCAP files to process\npcap_files = [\"Part1.pcap\", \"Part2.pcap\", \"Part3.pcap\", \"Part4.pcap\", \"Part5.pcap\"]\n\nfor file in pcap_files:\n process_pcap_file(file)\n","repo_name":"Richard-Voragen/Network-Analysis","sub_path":"Problem_1/GetIpAddresses.py","file_name":"GetIpAddresses.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34618838297","text":"import time\nimport pytest\nfrom src.api.setups.setup_customer_location import SetupCustomerLocation\nfrom src.api.setups.setup_location import SetupLocation\nfrom src.api.distributor.settings_api import SettingsApi\nfrom src.api.distributor.location_api import LocationApi\nfrom src.api.distributor.transaction_api import TransactionApi\nfrom src.api.mocks_api import MocksApi\n\n@pytest.fixture(scope=\"function\")\ndef customer_organization_location_preset():\n def wrapper(context, ohi=\"MAX\", reorder_controls_settings=\"DEFAULT\", price=None):\n setup_customer_location = SetupCustomerLocation(context)\n setup_customer_location.add_option(\"ohi\", ohi)\n setup_customer_location.setup_organization.add_option(\"site\")\n setup_customer_location.setup_organization.add_option(\"subsite\")\n setup_customer_location.setup_organization.add_option(\"supplier\")\n setup_customer_location.setup_organization.add_option(\"shipto\")\n setup_customer_location.setup_customer_product.add_option(\"price\", price)\n setup_customer_location.setup_organization.setup_customer_shipto.add_option(\"reorder_controls_settings\", reorder_controls_settings)\n return setup_customer_location.setup()\n return wrapper\n\n@pytest.fixture(scope=\"function\")\ndef serialized_location_preset():\n def wrapper(context, round_buy=1, serialization_settings=None, lot=None):\n setup_location = SetupLocation(context)\n setup_location.add_option(\"serialized\")\n setup_location.add_option(\"lot\", lot)\n setup_location.setup_product.add_option(\"round_buy\", round_buy)\n setup_location.setup_shipto.add_option(\"serialization_settings\", serialization_settings)\n return setup_location.setup()\n return wrapper\n\n@pytest.fixture(scope=\"function\")\ndef transaction_location_preset():\n def wrapper(context, ohi=0, location_min=None, location_max=None, package_conversion=None, round_buy=None):\n setup_location = SetupLocation(context)\n setup_location.setup_shipto.add_option(\"reorder_controls_settings\", {\"scan_to_order\": False, \"track_ohi\": True, \"enable_reorder_controls\": True})\n setup_location.add_option(\"transaction\", 'ACTIVE')\n setup_location.add_option(\"ohi\", ohi)\n setup_location.add_option(\"min\", location_min)\n setup_location.add_option(\"max\", location_max)\n setup_location.setup_product.add_option(\"package_conversion\", package_conversion)\n setup_location.setup_product.add_option(\"round_buy\", round_buy)\n return setup_location.setup()\n return wrapper\n\n@pytest.fixture(scope=\"function\")\ndef sync_order_location_preset():\n def wrapper(context, sync_endpoint, disable_reorder_controls=False):\n sa = SettingsApi(context)\n ma = MocksApi(context)\n la = LocationApi(context)\n ta = TransactionApi(context)\n\n endpoints_list = [\"quoteOrders\", \"salesOrders\"]\n endpoints_list.append(sync_endpoint)\n ma.set_list_of_available_endpoints(endpoints_list)\n sa.sync_erp_connection_settings()\n\n setup_location = SetupLocation(context)\n setup_location.add_option(\"min\", 10)\n setup_location.add_option(\"max\", 100)\n setup_location.add_option(\"ohi\", 100)\n setup_location.setup_product.add_option(\"round_buy\", 10)\n setup_location.setup_shipto.add_option(\"rl_submit_integration\")\n setup_location.setup_shipto.add_option(\"customer_id\", \"lowest\")\n setup_location.setup_shipto.add_option(\"reorder_controls_settings\", {\"enable_reorder_control\": True, \"track_ohi\": True, \"reorder_controls\": \"ISSUED\"})\n preset = setup_location.setup()\n\n #create ACTIVE transaction\n location = la.get_locations(shipto_id=preset[\"shipto_id\"], customer_id=preset[\"customer_id\"])[0]\n location[\"onHandInventory\"] = 0\n la.update_location([location], preset[\"shipto_id\"], customer_id=preset[\"customer_id\"])\n time.sleep(5)\n transaction = ta.get_transaction(sku=preset[\"product\"][\"partSku\"], shipto_id=preset[\"shipto_id\"], status=\"ACTIVE\")\n preset[\"transaction\"] = {\n \"transaction_id\": transaction[\"entities\"][0][\"id\"],\n \"reorder_quantity\": transaction[\"entities\"][0][\"reorderQuantity\"]\n }\n if disable_reorder_controls:\n sa.set_reorder_controls_settings_for_shipto(preset[\"shipto_id\"], enable_reorder_control=False)\n return preset\n return wrapper\n","repo_name":"fisher1706/srx","sub_path":"src/fixtures/presets.py","file_name":"presets.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23123042466","text":"#%%\nimport sys\nimport matplotlib\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\nimport sklearn as sk\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal as mvn\nfrom sklearn.neighbors import KernelDensity as KD\nfrom matplotlib.colors import ListedColormap\n# Fix random state for reproducibility\nnp.random.seed(1978081)\n# Matplotlib setting\nplt.rcParams['text.usetex'] = True\nmatplotlib.rcParams['figure.dpi']= 300\n\ndef plot_kd(ax, x0, y0, x1, y1, Z):\n cmap_light = ListedColormap([\"#FFE0C0\",\"#B7FAFF\"])\n plt.rc(\"xtick\",labelsize=16)\n plt.rc(\"ytick\",labelsize=16)\n ax.plot(x0,y0,\".r\",markersize=8) # class 0\n ax.plot(x1,y1,\".b\",markersize=8) # class 1\n ax.set_title(\"N={},h={}\".format(Ns[i],h))\n ax.set_xlim([-3,9])\n ax.set_ylim([-3,9])\n ax.pcolormesh(xx,yy,Z,cmap=cmap_light)\n ax.contour(xx,yy,Z,colors=\"black\",linewidths=0.5)\n return ax\n\n\nmm0 = np.array([2,2])\nmm1= np.array([4,4])\nSig0 = 4*np.identity(2)\nSig1 = 4*np.identity(2)\nNs = [50, 100, 250, 500]\n#Ns = [50]\nhs = [0.1,0.3,0.5,1, 2, 5]\n#hs = [0.1]\nXs = [[mvn.rvs(mm0, Sig0, n), mvn.rvs(mm1,Sig1,n)] for n in Ns]\n\nclf0s = [[KD() for i in range(0, len(hs))] for j in range(0, len(Ns))]\nclf1s = [[KD() for i in range(0, len(hs))] for j in range(0, len(Ns))]\n\ns = .1 #0.01 # mesh step size\nplts = [plt.subplots(figsize=(8,8), dpi=150) for i in range(0, len(Ns)*len(hs))]\nfigs, axs = list(zip(*plts))\n\nfor (i, X) in enumerate(Xs):\n x0,y0 = np.split(X[0],2,1)\n x1,y1 = np.split(X[1],2,1)\n y = np.concatenate((np.zeros(Ns[i]),np.ones(Ns[i])))\n x_min,x_max = (-3,9)\n y_min,y_max = (-3,9)\n for (j, h) in enumerate(hs):\n clf0s[i][j] = KD(bandwidth=h)\n clf0s[i][j].fit(X[0])\n clf1s[i][j] = KD(bandwidth=h)\n clf1s[i][j].fit(X[1])\n xx,yy = np.meshgrid(np.arange(x_min,x_max,s),np.arange(y_min,y_max,s))\n Z0 = clf0s[i][j].score_samples(np.c_[xx.ravel(), yy.ravel()])\n Z1 = clf1s[i][j].score_samples(np.c_[xx.ravel(), yy.ravel()])\n Z = Z0<=Z1\n Z = Z.reshape(xx.shape)\n plot_kd(axs[i+j], x0, y0, x1, y1, Z)\n figs[i+j].savefig(\"img/c05_kernel\"+\"_h_\"+str(int(10*h))+\"_N_\"+str(int(Ns[i]))+\".png\",bbox_inches=\"tight\",facecolor=\"white\")\n# %% Test error\n\ndef measure_test_error(clf0, clf1, xxs, yys, ys):\n Z0 = clf0.score_samples(np.c_[xxs, yys])\n Z1 = clf1.score_samples(np.c_[xxs, yys])\n Z = Z0<=Z1\n return np.count_nonzero(Z != ys) / len(ys)\n\nnt = 500\nX_test = [mvn.rvs(mm0, Sig0, nt), mvn.rvs(mm1,Sig1,nt)] \nx0,y0 = np.split(X_test[0],2,1)\nx1,y1 = np.split(X_test[1],2,1)\nys = np.concatenate((np.zeros(nt),np.ones(nt)))\n\npltts = [plt.subplots(figsize=(8,8), dpi=150) for i in range(0, len(Ns)*len(hs))]\n\nfor i in range(0, len(Ns)):\n xxs = np.concatenate((x0, x1))\n yys = np.concatenate((y0, y1))\n ys = np.concatenate((np.zeros(Ns[i]),np.ones(Ns[i])))\n for (j, h) in enumerate(hs):\n err = measure_test_error(clf0s[i][j], clf1s[i][j], xxs, yys, ys)\n\n axs[i+j].annotate(\"Test Error: {}\\n Bayes Error: {}\".format(err, 2), (6,8))\n figs[i+j].savefig(\"img/c05_kernel_test\"+\"_h_\"+str(int(10*h))+\"_N_\"+str(int(Ns[i]))+\".png\",bbox_inches=\"tight\",facecolor=\"white\")\n\n\n# %%\n","repo_name":"stevengogogo/ECEN649_Pattern-Recognition","sub_path":"hw/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7678178880","text":"from mysql.connector.fabric import connect\nfrom pylibmodbus import ModbusTcp\nfrom paho.mqtt.client import Client as MQTTClient, MQTT_ERR_SUCCESS\nimport json\nfrom multiprocessing import Process, Event\nfrom queue import Queue as LocalQueue\nfrom typing import Tuple, List, Dict\nimport mysql.connector\nfrom db_credentials import db_credentials\nimport time\nimport signal\nfrom pygame.time import Clock\n\n\nclass MsgCallback:\n def __init__(self):\n self.que = LocalQueue()\n def handler(self, mqtt_cli, userdata, msg):\n self.que.put((msg.topic.split(\"/\")[-1], msg.payload.decode()))\n\n\nclass CtrlC:\n def __init__(self):\n self.sig_received = False\n def handler(self, signal, frame):\n self.sig_received = True\n\n\ndef plc_loop_process(tt_str: str, plc_ip: str, plc_id: int, quit_event: Event):\n\n tt = json.loads(tt_str)\n assert isinstance(tt, list)\n \n if not tt: # empty\n quit_event.wait()\n return\n\n highest_bool, highest_hold = max_addrs(tt)\n tt_by_name = {t[\"tag_name\"]: t for t in tt}\n\n modbus_cli = ModbusTcp(plc_ip)\n modbus_cli.connect()\n\n mqtt_cli = MQTTClient()\n msg_callback = MsgCallback()\n mqtt_cli.on_message = msg_callback.handler\n mqtt_cli.connect(\"localhost\")\n assert mqtt_cli.subscribe(f\"nodered/plc/write/{plc_id}/+\", qos=0)[0] == MQTT_ERR_SUCCESS\n\n clock = Clock()\n while not quit_event.is_set():\n clock.tick(1)\n\n if highest_bool >= 0:\n bool_reg = modbus_cli.read_bits(0, highest_bool + 1)\n if highest_hold >= 0:\n hold_reg = modbus_cli.read_registers(0, highest_hold + 1)\n for tag in tt:\n if not tag[\"reported\"]:\n continue\n\n if tag[\"data_type\"] == \"Bool\":\n if bool_reg[tag[\"address\"]]:\n message_out = \"[true,false,false,false,false,false,false,false]\"\n else:\n message_out = \"[false,false,false,false,false,false,false,false]\"\n elif tag[\"data_type\"] == \"Int\":\n message_out = f'[{hold_reg[tag[\"address\"]]}]'\n else: # Real\n message_out = f'[{hold_reg[tag[\"address\"]]},{hold_reg[tag[\"address\"] + 1]}]'\n\n if tag[\"tag_name\"].upper().startswith(\"FAULT_\"):\n severity = \"fault\"\n elif tag[\"tag_name\"].upper().startswith(\"WARNING_\"):\n severity = \"warning\"\n else:\n severity = \"normal\"\n\n assert mqtt_cli.publish(\n f'nodered/plc/announce/{plc_id}/{severity}/{tag[\"tag_name\"]}',\n message_out,\n qos=0\n )[0] == MQTT_ERR_SUCCESS\n \n while not msg_callback.que.empty():\n tag_name, write_val = msg_callback.que.get()\n write_val = json.loads(write_val)\n tag = tt_by_name[tag_name]\n if tag[\"data_type\"] == \"Bool\":\n bit = 1 if write_val[0] else 0\n modbus_cli.write_bit(tag[\"address\"], bit)\n elif tag[\"data_type\"] == \"Int\":\n modbus_cli.write_register(tag[\"address\"], write_val[0])\n else: # Real\n modbus_cli.write_registers(tag[\"address\"], write_val)\n\n mqtt_cli.loop()\n\n modbus_cli.close()\n mqtt_cli.disconnect()\n\n\ndef max_addrs(tt: List[dict]) -> Tuple[int, int]:\n highest_bool = -1\n highest_hold = -1\n for tag in tt:\n if tag[\"data_type\"] == \"Bool\":\n if tag[\"address\"] > highest_bool:\n highest_bool = tag[\"address\"]\n else: # Int or Real\n if tag[\"address\"] > highest_hold:\n highest_hold = tag[\"address\"]\n if tag[\"data_type\"] == \"Real\":\n highest_hold += 1\n return highest_bool, highest_hold\n\n\ndef query_db() -> Dict[int, str]:\n db = mysql.connector.connect(**db_credentials)\n cur = db.cursor()\n cur.execute(\n \"\"\"\n SELECT ip_address, node_red_id\n FROM powerplant_exp_unit_2.plc\n ;\n \"\"\"\n )\n id_ip = {id_: ip for ip, id_ in cur}\n db.disconnect()\n return id_ip\n\n# {\n# \"tag_name\": \"time_UTC\",\n# \"fc\": {\n# \"read\": 3,\n# \"write\": 16\n# },\n# \"address\": 60,\n# \"quantity\": 2,\n# \"data_type\": \"Real\"\n# }\n\ndef main():\n\n mqtt_cli = MQTTClient()\n msg_callback = MsgCallback()\n mqtt_cli.on_message = msg_callback.handler\n mqtt_cli.connect(\"localhost\")\n assert mqtt_cli.subscribe(\"nodered/plc/tag_table/+\", qos=0)[0] == MQTT_ERR_SUCCESS\n\n ctrl_c = CtrlC()\n signal.signal(signal.SIGINT, ctrl_c.handler)\n processes: Dict[int, Tuple[Process, Event]] = dict()\n tt_backup: Dict[int, str] = dict()\n\n while not ctrl_c.sig_received:\n\n if not msg_callback.que.empty():\n id_ip = query_db()\n while not msg_callback.que.empty():\n id_, tt_str = msg_callback.que.get()\n id_ = int(id_)\n tt_backup[id_] = tt_str\n p_e = processes.get(id_)\n if p_e is not None:\n p, e = p_e\n e.set()\n p.join(timeout=1)\n if p.is_alive():\n print(\"had to commit murder...\")\n p.terminate()\n e = Event()\n p = Process(\n target=plc_loop_process,\n args=(tt_str, id_ip[id_], id_, e),\n daemon=True\n )\n p.start()\n processes[id_] = (p, e)\n\n died = []\n for id_, (p, e) in processes.items():\n if not p.is_alive():\n print(f\"PLC {id_} died for some reason\")\n died.append(id_)\n if died:\n id_ip = query_db()\n for id_ in died:\n e = Event()\n p = Process(\n target=plc_loop_process,\n args=(tt_backup[id_], id_ip[id_], id_, e),\n daemon=True\n )\n p.start()\n processes[id_] = (p, e)\n\n mqtt_cli.loop()\n\n time.sleep(0.25)\n\n print(\" ctrl-c\")\n\n for p, e in processes.values():\n e.set()\n\n mqtt_cli.disconnect()\n\n t = time.time()\n for p, e in processes.values():\n p.join(timeout=1)\n if time.time() - t >= 1:\n print(\"outta time\")\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"liamHowatt/PowerPlant_code_showcase","sub_path":"pyred.py","file_name":"pyred.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26262651820","text":"import os\nfrom datetime import date, datetime, time\nfrom typing import List\n\nimport requests\n\nfrom skill.schemas import PlannedLesson, Student\n\n\nclass NotFoundError(Exception):\n pass\n\n\n# region URLs\n\n\ndef base_url():\n if os.environ.get(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\"):\n url = \"https://journal.bpo.edu.n3demo.ru/api/journal\"\n else:\n url = \"https://dnevnik2.petersburgedu.ru/api/journal\"\n\n return url\n\n\ndef schedule_url():\n return f\"{base_url()}/schedule/list-by-education\"\n\n\ndef students_url():\n return f\"{base_url()}/person/related-child-list\"\n\n\n# endregion\n\n\ndef get_schedule_on_date(token: str, id: str, day=None) -> List[PlannedLesson]:\n\n if day is None:\n day = date.today()\n\n start_time = datetime.combine(day, time.min)\n finish_time = datetime.combine(day, time.max)\n\n response = requests.get(\n schedule_url(),\n params={\n \"p_educations[]\": id,\n \"p_datetime_from\": datetime.strftime(start_time, \"%d.%m.%Y %H:%M:%S\"),\n \"p_datetime_to\": datetime.strftime(finish_time, \"%d.%m.%Y %H:%M:%S\"),\n },\n cookies={\"X-JWT-Token\": token},\n headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"},\n )\n\n if response.status_code == 500:\n return []\n\n if response.status_code == 401:\n raise Exception(\"Не удалось авторизоваться\")\n\n result = []\n for lesson in response.json().get(\"data\", {}).get(\"items\", []):\n lesson_from = datetime.strptime(\n lesson[\"datetime_from\"], \"%d.%m.%Y %H:%M:%S\"\n ).time()\n lesson_to = datetime.strptime(lesson[\"datetime_to\"], \"%d.%m.%Y %H:%M:%S\").time()\n result.append(\n PlannedLesson(lesson[\"subject_name\"], lesson_from, lesson_to),\n )\n return sorted(result)\n\n\ndef get_students(token: str) -> List[Student]:\n response = requests.get(\n students_url(),\n cookies={\"X-JWT-Token\": token},\n headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"},\n )\n\n if response.status_code == 401:\n raise Exception(\"Не удалось авторизоваться\")\n\n result = []\n for student in response.json().get(\"data\", {}).get(\"items\", []):\n name = student.get(\"firstname\", \"\")\n id = student.get(\"educations\", [])[0].get(\"education_id\", \"\")\n result.append(Student(name, id))\n\n return result\n","repo_name":"kontur-1c/AliceDiary","sub_path":"skill/diary_api.py","file_name":"diary_api.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4950015784","text":"# -*- coding: utf-8 -*-\nimport config\nimport requests\nimport json\nimport os\nfrom urllib.parse import urlparse\n\ndata_pool = []\n\nprint('> rate_limit: \\n', requests.get('https://api.github.com/rate_limit').content.decode())\nprint('> start')\n\ndef get_value(elem):\n return elem['stargazers_count']\n\ndef save_json(path, content):\n root = 'v1'\n dir = root + path + '/'\n file = dir + 'data.json'\n # 创建路径\n isExists = os.path.exists(dir)\n if not isExists:\n os.makedirs(dir)\n # 写入文件\n with open(file, 'w', encoding = 'utf-8') as file_obj:\n json.dump(content, file_obj, ensure_ascii = False, indent = 2)\n\ntry:\n print('> links: ', config.read('links'))\n for link in config.read('links'):\n print('> get: ', link)\n url = urlparse(link)\n req = requests.get(link)\n repos = json.loads(req.content.decode())\n for repo in repos:\n data_pool.append(repo)\n data_pool.sort(key=get_value, reverse=True)\n for i in config.read('output'):\n # 取出前n条\n save_json('/top'+str(i), data_pool[0:i])\n\n\nexcept Exception as e:\n print('> exception: ', e)\n\nprint('\\n> rate_limit: \\n', requests.get('https://api.github.com/rate_limit').content.decode())\nprint('> end')\n","repo_name":"xaoxuu/repos-sort-by-stars","sub_path":"generator/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"30276548705","text":"from Visualization_VGG16.config import *\r\nfrom Visualization_VGG16.utils.data_handling import *\r\nfrom Visualization_VGG16.Visualizer_VGG16 import *\r\n\r\n\r\n# change according to your desire\r\nLAYER_NUMBER = 'block3_conv2'\r\nFEATURE_MAP_NUMBER = 35\r\nIMAGE_FILE_PATH = 'example_data/cat.jpg'\r\n#IMAGE_FILE_PATH = 'example_data/car.jpg'\r\n\r\n\r\n# read image\r\nraw_image = Read_image(IMAGE_FILE_PATH)\r\n\r\n# reshape image to fit the model\r\nimage = Visualization_Tool_fit_image(raw_image, INPUT_DIMENSIONS)\r\n\r\n# create the model ready to visualize one layer and one feature map\r\nVisualizer = VGG16_Visualizer(VGG16_LAYER_NAMES_LIST, VGG16_WEIGHT_DIMENSIONS_LIST, TRAINED_VGG16_WEIGHTS_PATH, INPUT_DIMENSIONS)\r\n\r\nad_hoc_model = Visualizer.Generate_Model(LAYER_NUMBER, FEATURE_MAP_NUMBER)\r\n\r\n# get the feature map down to pixel space\r\npixel_space_feature = ad_hoc_model.predict(image)\r\n\r\n# plot\r\nPlot_image(array_image=image)\r\nPlot_image(array_image=pixel_space_feature, cv2_colormap=cv2.COLORMAP_HOT)\r\n","repo_name":"LauraMasaracchia/Visualization_VGG16","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27944622679","text":"from flask import Flask, request, abort, redirect\nfrom db import db_session, init_db\n\nfrom views import user_views, todo_list_views\nfrom models import User\n\napp = Flask(__name__)\napp.register_blueprint(todo_list_views.todo_list_api)\napp.register_blueprint(user_views.user_api)\n\napp.config.update(\n SECRET_KEY = 'secret_dshjhdj2jhgg87'\n)\n\n# For this purpose, refresh the db each time\ninit_db()\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')","repo_name":"await-rescue/todo-api","sub_path":"todo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31233812168","text":"import math\n\nimport cv2\n\nfrom model.building_blocks import *\nfrom nn_utils.geometry_utils import indices_to_world, world_to_ipm, get_tangents, \\\n indices_to_ipm, indices_to_ipm_vec\n\n\ndef _integrate_attractor_debug(attractor, extract_correction_fn, pos, momentum, num_steps, only_attractor=False, momentum_weight=0.7, normalize_momentum=True,\n local_grid=None, per_point_back_prop=False):\n points = []\n points_guess = []\n corrections = []\n magnitude_sq = (momentum ** 2).sum(dim=1, keepdim=True)\n for i in range(num_steps):\n if local_grid is not None:\n grid = pos.view(attractor.shape[0], 1, 1, 2) + local_grid\n else:\n # Default: this is not really a grid, but just a single interpolated lookup in the attractor\n grid = pos.view(attractor.shape[0], 1, 1, 2)\n # lookup index corrections for the approximate position in the attractor tensor\n # align_corners=False is important, so that each cell in the tensor covers a receptive field of the same size\n attractor_lookup = torch.nn.functional.grid_sample(attractor, grid, align_corners=False, padding_mode=\"border\", mode=\"bilinear\")\n correction = extract_correction_fn(attractor_lookup, momentum)\n points_guess.append(pos)\n corrections.append(correction)\n pos = pos + correction\n points.append(pos)\n if i >= 1:\n new_momentum = momentum * momentum_weight + correction * (1 - momentum_weight)\n if normalize_momentum:\n # normalize integration speed so that we use approximately equidistant steps\n momentum = new_momentum * torch.sqrt(magnitude_sq / (new_momentum ** 2).sum(dim=1, keepdim=True))\n if not only_attractor:\n pos = pos + momentum\n if per_point_back_prop:\n pos = pos.detach()\n points = torch.stack(points, dim=2)\n points_guess = torch.stack(points_guess, dim=2)\n corrections = torch.stack(corrections, dim=2)\n return points, points_guess, corrections\n\n\ndef _draw_vector_field(img, vector_field, color, normalize=False, step=15):\n x_field = cv2.resize(vector_field[0, ...], (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LINEAR)\n y_field = cv2.resize(vector_field[1, ...], (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LINEAR)\n for y in range(0, img.shape[0], step):\n for x in range(0, img.shape[1], step):\n v = [x_field[y, x], y_field[y, x]]\n if normalize:\n v_len = math.sqrt(v[0] ** 2 + v[1] ** 2) / step / 0.5\n v[0] /= v_len\n v[1] /= v_len\n else:\n v[0] *= img.shape[1] / 2\n v[1] *= img.shape[0] / 2\n t = int(x + v[0]), int(y + v[1])\n cv2.arrowedLine(img, (x, y), t, color, thickness=1, tipLength=0.1)\n\n\ndef _attractor_example():\n import argparse\n import numpy as np\n from dataset.io_data_utils import smart_parse_args, init_data_loaders, get_coordinate_limits_from_dataset\n from nn_utils.lane_metrics import lane_mse\n from loss.vector_field_loss import indirect_xy_attractor_integration_loss, gt_to_index_xy_attractor\n import random\n random.seed(24)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data', type=str, default=None, help='path of training data')\n args = smart_parse_args(parser)\n args.no_normalize = True\n dataloader_train, dataloader_val, dataloader_test = init_data_loaders(args, shuffle=False)\n sample = dataloader_train.dataset[2]\n transform = sample[\"local_map\"][\"transform\"]\n\n coordinate_limits = get_coordinate_limits_from_dataset(dataloader_train.dataset)\n gt_points = sample[\"local_map\"][\"right_lane\"][\"left_marking\"].unsqueeze(0)\n gt_viz_mask = sample[\"local_map\"][\"visibility_mask\"] = torch.from_numpy(sample[\"local_map\"][\"visibility_mask\"]).unsqueeze(0)\n sample[\"local_map\"][\"right_lane\"][\"left_marking\"] = gt_points\n sample[\"local_map\"][\"step_size\"] = torch.tensor(sample[\"local_map\"][\"step_size\"])\n tangents = get_tangents(sample[\"local_map\"][\"right_lane\"])\n attractor = gt_to_index_xy_attractor(gt_points, coordinate_limits, (12, 20))\n attractor = (torch.rand_like(attractor) - 0.5) * 0.1\n attractor = torch.zeros_like(attractor)\n # attractor = torch.cat([attractor] * 8, dim=1)\n attractor = nn.Parameter(attractor)\n\n optimizer = torch.optim.SGD([attractor], lr=0.5, weight_decay=0)\n for i in range(200):\n pred = {\"lane_attractor\": torch.tanh(attractor), \"local_map_rl\": gt_points, \"main_flow\": attractor}\n # loss = indirect_biased_xy_attractor_integration_loss(pred, sample, coordinate_limits, bias_directions)\n loss = indirect_xy_attractor_integration_loss(pred, sample, coordinate_limits, iterations=1, ohem_thresh=0.02 ** 2, min_forward_step_length=0.05,\n max_forward_step_length=0.2, only_train_representable=True)\n # loss = indirect_attractor_step_loss(pred, sample, coordinate_limits)\n # loss += indirect_xy_direction_loss(pred, sample, coordinate_limits)\n # loss = indirect_main_flow_loss(pred, sample, coordinate_limits, lookahead_meters=0.8)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # attractor = torch.tanh(attractor) * 0.2\n attractor = attractor.detach().cpu()\n\n # print(attractor.shape)\n index_per_x_meter = 2.0 / (coordinate_limits[1, 0] - coordinate_limits[0, 0])\n initial_pos = torch.tensor([[0.0, 0.95]], dtype=torch.float32)\n initial_momentum = torch.tensor([0.0, -index_per_x_meter * 0.2], dtype=torch.float32).view(-1, 2)\n initial_momentum = torch.tensor([0.0, -0.03], dtype=torch.float32).view(-1, 2)\n\n def extract_correction(attractor_lookup, curr_momentum):\n # remove spatial dimensions, which are 1-sized because of the 1x1 grid_sample() lookup\n return attractor_lookup.mean(dim=[-2, -1])\n\n points, points_n, corrections = _integrate_attractor_debug(attractor, extract_correction, initial_pos, initial_momentum, 60, momentum_weight=0.7)\n # print(points)\n # print(\"gt\", gt_points)\n # print(\"gt int\", indices_to_world(points, self.coordinate_limits))\n pred_points = indices_to_world(points, coordinate_limits)\n # pred_points = _gen_random_noisy_samples_around_gt(sample[\"local_map\"][\"right_lane\"], max_noise_inner_curve=0.2)[0]\n img = cv2.cvtColor(sample[\"img\"][0].cpu().numpy(), cv2.COLOR_GRAY2BGR)\n up_scaling = 5\n img = cv2.resize(img, dsize=None, fx=up_scaling, fy=up_scaling)\n # _, pred_points, gt_points = indirect_attractor_step_loss(pred, sample, coordinate_limits)\n ipm_points = indices_to_ipm(points, img.shape)\n ipm_points_n = indices_to_ipm(points_n, img.shape)\n ipm_corrections = indices_to_ipm_vec(corrections, img.shape)\n ipm_points_gt = world_to_ipm(gt_points.squeeze().permute(1, 0).numpy(), img.shape[1], img.shape[0],\n pixels_per_world_unit=transform[\"pixels_per_meter\"] * up_scaling, car_to_image_offset=transform[\"car_to_image_offset\"])\n attractor = attractor[0, :, ...].cpu().numpy()\n attractor_viz = cv2.resize(attractor[0, ...] + 0.5, sample[\"img\"].shape[-2:][::-1], interpolation=cv2.INTER_NEAREST)\n _draw_vector_field(img, attractor, (0, 0, 100), step=up_scaling * 5, normalize=False)\n attractor_viz = cv2.cvtColor(attractor_viz, cv2.COLOR_GRAY2BGR)\n # print(ipm_points.shape)\n # print(np.sqrt(((ipm_points[1:] - ipm_points[:-1]) ** 2).sum(axis=-1)))\n ipm_points = ipm_points.squeeze().permute(1, 0).numpy().astype(np.int)\n ipm_points_n = ipm_points_n.squeeze().permute(1, 0).numpy().astype(np.int)\n ipm_points_gt = ipm_points_gt.astype(np.int)\n # uncomment to visualize how gen_random_noisy_samples_around_gt works\n # for r in range(100):\n # random_points = _gen_random_noisy_samples_around_gt(sample[\"local_map\"][\"right_lane\"], sample[\"local_map\"][\"step_size\"],\n # max_noise_inner_curve=0.2)[0]\n # random_points = world_to_indices(random_points, coordinate_limits)\n # random_points = indices_to_ipm(random_points, img.shape).squeeze().permute(1, 0).numpy().astype(np.int)\n # for i in range(len(ipm_points_gt) - 1):\n # cv2.line(img, tuple(random_points[i]), tuple(random_points[i]), (255, 255, 0), thickness=6, lineType=cv2.LINE_4)\n for i in range(len(ipm_points_gt) - 1):\n cv2.line(img, tuple(ipm_points_gt[i]), tuple(ipm_points_gt[i]), (255, 0, 0), thickness=6, lineType=cv2.LINE_4)\n for i in range(len(ipm_points) - 1):\n cv2.line(img, tuple(ipm_points[i]), tuple(ipm_points[i + 1]), (0, 255, 0), thickness=2, lineType=cv2.LINE_4)\n cv2.line(attractor_viz, tuple(ipm_points[i]), tuple(ipm_points[i]), (255, 0, 255), thickness=2, lineType=cv2.LINE_4)\n cv2.arrowedLine(img, tuple(ipm_points_n[i]), tuple(ipm_points[i]), (255, 0, 255), thickness=2)\n cv2.arrowedLine(img, tuple(ipm_points[i]), tuple(ipm_points_n[i + 1]), (255, 255, 255), thickness=2)\n # cv2.imshow(\"attractor\", cv2.resize(attractor_viz, dsize=None, fx=3, fy=3))\n cv2.imshow(\"test\", img[:, :, ...])\n # cv2.imshow(\"test\", img)\n while cv2.waitKey(0) != 27:\n continue\n\n print(\"Model's MSE on image: \", lane_mse(pred_points, gt_points, (sample[\"local_map\"][\"visibility_mask\"])))\n\n\nif __name__ == '__main__':\n _attractor_example()\n","repo_name":"svenlr/deep-lane-vector-fields","sub_path":"model/vector_fields/debug_vf.py","file_name":"debug_vf.py","file_ext":"py","file_size_in_byte":9478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39501037531","text":"class Solution:\n # @param n: an integer\n # @return: a boolean which equals to True if the first player will win\n\n # dp[i]: current player win with i stones\n def firstWillWin(self, n):\n if n == 0:\n return False\n if n <= 2:\n return True\n dp = [False, True, True]\n for i in xrange(3, n + 1):\n dp[i % 3] = not (dp[(i - 1) % 3] and dp[(i - 2) % 3])\n return dp[n % 3]\n\n","repo_name":"jwyx3/practices","sub_path":"python/coins-in-a-line.py","file_name":"coins-in-a-line.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21981971494","text":"from collections import defaultdict\n\ntest=\"\"\"\n6,10\n0,14\n9,10\n0,3\n10,4\n4,11\n6,0\n6,12\n4,1\n0,13\n10,12\n3,4\n3,0\n8,4\n1,10\n2,14\n8,10\n9,0\n\nfold along y=7\nfold along x=5\n\"\"\"\n\ndef parsedata(data=None):\n prefixx=\"fold along x=\"\n prefixy=\"fold along y=\"\n if data is None:\n with open(\"input13.txt\") as f:\n data=f.read()\n data=data.splitlines()\n \n P=[]\n F=[]\n for l in data:\n if \",\" in l:\n x,y=l.split(',')\n P.append((int(x),int(y)))\n elif \"y=\" in l:\n F.append(('y',int(l.split('=')[1])))\n elif \"x=\" in l:\n F.append(('x',int(l.split('=')[1])))\n return P,F\n\ndef foldy(P,folds):\n XtoY=defaultdict(lambda:[])\n for x,y in P:\n XtoY[x].append(y)\n for x in XtoY:\n ys=XtoY[x]\n for _,fy in folds:\n nys=[]\n for y in ys:\n if y 10 :\n u = [int(x) for x in list(str(t))]\n v = sorted(u)\n w = \"\".join([str(x) for x in v])\n if str(t) == w:\n break\n else:\n t -= 1\n return t\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n f = open(sys.argv[1], 'r')\n n = int(f.readline())\n n_case = 1\n while n_case <= n:\n s = solve(int(f.readline()))\n print(f\"Case #{n_case}: {s}\")\n n_case += 1\n f.close()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4955.py","file_name":"4955.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32468842343","text":"# -*- coding: utf-8 -*-\n \nfrom numpy import *\nfrom scipy import * \nimport numpy as np \nimport cv2\nimport skimage\n\ndef SaltAndPepper(src, percentage, _amount):\n img = skimage.io.imread(src)\n SP_Noise = skimage.util.random_noise(img, mode=\"s&p\", amount=_amount, seed=None, clip=True, salt_vs_pepper=percentage)\n return SP_Noise\n \ndef addGaussianNoise(imgName, _var):\n img = skimage.io.imread(imgName) \n Gaussian_Noise = skimage.util.random_noise(img, mode=\"gaussian\", var=_var, seed=None, clip=True)\n return Gaussian_Noise\n\ndef addSpeckleNoise(imgName):\n img = skimage.io.imread(imgName)\n Speckle_Noise = skimage.util.random_noise(img, mode=\"speckle\", var=2, seed=None, clip=True)\n return Speckle_Noise\n\ndef addPoissonNoise(imgName):\n img = skimage.io.imread(imgName)\n Poisson_Noise = skimage.util.random_noise(img, mode=\"poisson\", seed=None, clip=True)\n return Poisson_Noise\n\nif __name__ == \"__main__\":\n src_imgs = [\"./data/MSRA-B/{}.jpg\".format(i) for i in range(3001, 3021)]\n sp_paths = [\"./val_pic/sp_{}.jpg\".format(i) for i in range(3001, 3021)]\n gauss_paths = [\"./val_pic/gauss_{}.jpg\".format(i) for i in range(3001, 3021)]\n speckle_paths = [\"./val_pic/speckle_{}.jpg\".format(i) for i in range(3001, 3021)]\n poisson_paths = [\"./val_pic/poisson_{}.jpg\".format(i) for i in range(3001, 3021)]\n for i in range(20):\n srcImage = src_imgs[i] \n # SaltAndPepper_noiseImage = SaltAndPepper(srcImage,0.5, 1.0) #再添加10%的椒盐噪声\n # gauss_noiseImage = addGaussianNoise(srcImage, 0.5) \n speckle_noiseImage = addSpeckleNoise(srcImage)\n # poisson_noiseImage = addPoissonNoise(srcImage)\n\n # sp_path = sp_paths[i]\n # gauss_path = gauss_paths[i]\n speckle_path = speckle_paths[i]\n # poisson_path = poisson_paths[i]\n\n # skimage.io.imsave(sp_path, SaltAndPepper_noiseImage)\n # skimage.io.imsave(gauss_path, gauss_noiseImage)\n skimage.io.imsave(speckle_path, speckle_noiseImage)\n # skimage.io.imsave(poisson_path, poisson_noiseImage)\n\n cv2.waitKey(0) \n cv2.destroyAllWindows() \n\n","repo_name":"vc-nju/drfi_python","sub_path":"measures/generate_noise.py","file_name":"generate_noise.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"13417342298","text":"def main():\r\n num1 = int(input(\"digite o primeiro ângulo: \"))\r\n num2 = int(input(\"digite o segundo ângulo: \"))\r\n num3 = int(input(\"digite o terceiro ângulo: \"))\r\n if num1 == 0:\r\n print('<<>>.')\r\n elif num2 == 0:\r\n print('<<>>.')\r\n elif num3 == 0:\r\n print('<<>>Os ângulos internos {num1}°, {num2}° é {num3}° formam um triângulo')\r\n else:\r\n print(f'>>>Os ângulos internos {num1}°, {num2}° é {num3}° Não formam um triângulo')\r\n input('Se deseja consultar outros ângulos, Aperte <>')\r\n main()\r\n\r\n\r\ndef acutangulo3(num3):\r\n if num3 < 90:\r\n print('>>>Do tipo acutângulo, pois, os três ângulos são menores que 90°.')\r\n elif num3 ==90:\r\n print(f'>>>É como o terceiro ângulo é igual á 90°, Formam um triângulo Retângulo.')\r\n input('Se deseja consultar outros ângulos, Aperte <>')\r\n main()\r\n elif num3 > 90:\r\n print('>>>É como o terceiro ângulo é maior que 90°, Formam um triângulo Obtusângulo.')\r\n input('Se deseja consultar outros ângulos, Aperte <>')\r\n main()\r\n else:\r\n print(f'>>> Porém,como o terceiro ângulo ({num3}°) é maior ou igual 90°, Não formam um triângulo Acutângulo.')\r\n\r\n\r\ndef acutangulo2(num2,num3):\r\n if num2 < 90:\r\n acutangulo3(num3)\r\n elif num2 ==90:\r\n print('>>>É como o segundo ângulo é igual á 90°, Formam um triângulo Retângulo.')\r\n input('Se deseja consultar outros ângulos, Aperte <>')\r\n main()\r\n elif num2 > 90:\r\n print('>>>É como o segundo ângulo é maior que 90°, Formam um triângulo Obtusângulo.')\r\n input('Se deseja consultar outros ângulos, Aperte <>')\r\n main()\r\n else:\r\n print(f'>>> Porém,como o segundo ângulo ({num2}°) é maior ou igual 90°, Não formam um triângulo Acutângulo.')\r\n\r\n\r\ndef acutangulo(num1, num2, num3):\r\n if num1 < 90:\r\n acutangulo2(num2, num3)\r\n elif num1 ==90:\r\n print('>>>É como o primeiro ângulo é igual á 90°, Formam um triângulo Retângulo.')\r\n input('Se deseja consultar outros ângulos, Aperte em <>')\r\n main()\r\n elif num1 > 90:\r\n print('>>>É como o primeiro ângulo é maior que 90°, Formam um triângulo Obtusângulo.')\r\n input('Se deseja consultar outros ângulos, Aperte <>')\r\n main()\r\n else:\r\n print(f\">>>Porém, Não formam um triângulo Acutângulo, pois o primeiro ângulo ({num1}°) é maior ou igual á 90°.\")\r\n\r\n\r\nmain()\r\n\r\n","repo_name":"jose-rgb/-ifpi-ads-algoritmos2020","sub_path":"Exercicios_de_condicionais/exercicio06_lista2a.py","file_name":"exercicio06_lista2a.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39509416942","text":"from django.http import HttpResponse\nfrom django.template import Context\nfrom django.template.loader import render_to_string, get_template\nfrom django.core.mail import EmailMessage\n\ndef email_one(request):\n subject = \"I am a text email\"\n to = ['buddy@buddylindsey.com']\n from_email = 'test@example.com'\n\n ctx = {\n 'user': 'buddy',\n 'purchase': 'Books'\n }\n\n message = render_to_string('restapi/email/email.txt', ctx)\n\n EmailMessage(subject, message, to=to, from_email=from_email).send()\n\n return HttpResponse('email_one')\n\n","repo_name":"flaviocnn/skoobackend","sub_path":"restapi/boh.py","file_name":"boh.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33972369293","text":"import datetime\r\nimport pytz\r\nimport logging\r\nimport ast\r\nimport cryptopt.utils as utils\r\nfrom .option import Option\r\nfrom .deribitREST import DeribitREST\r\n\r\n\r\nclass TheoEngine:\r\n def __init__(self, underlying_pair,\r\n db,\r\n underlying_price=None,\r\n expirations=[],\r\n strikes={},\r\n atm_volatility=0.5,\r\n interest_rate=0):\r\n self.underlying_pair = underlying_pair\r\n self.db = db\r\n self.underlying_price = underlying_price\r\n self.expirations = expirations\r\n self.strikes = {e: strikes for e in self.expirations}\r\n self.atm_volatility = atm_volatility\r\n self.interest_rate = interest_rate\r\n self.currency = self.underlying_pair.split('/')[0]\r\n self.time = utils.get_current_time()\r\n self.options = {\r\n 'call': {},\r\n 'put': {}\r\n }\r\n self.exchange_symbols = []\r\n self.options_by_name = {}\r\n self.underlying_exchange_symbol = self.get_exchange_symbol(pair=self.underlying_pair)\r\n self.client = None\r\n if underlying_price is None:\r\n self.setup_client()\r\n self.get_underlying_price()\r\n\r\n def setup_client(self):\r\n self.client = DeribitREST()\r\n\r\n def get_atm_option(self, expiry):\r\n atm_option = None\r\n best_delta_diff = 1\r\n for option in self.iterate_options():\r\n if option.expiry == expiry:\r\n if option.delta is None:\r\n print(\"No delta found for \" + str(option))\r\n continue\r\n delta_diff = abs(abs(option.delta) - .5)\r\n if atm_option is None or delta_diff < best_delta_diff:\r\n best_delta_diff = delta_diff\r\n atm_option = option\r\n return atm_option\r\n\r\n def get_exchange_symbol(self, pair):\r\n if pair == \"BTC/USD\":\r\n return \"BTC-PERPETUAL\"\r\n if pair == \"ETH/USD\":\r\n return \"ETH-PERPETUAL\"\r\n return None\r\n\r\n def get_exchange_symbols(self):\r\n if not self.exchange_symbols:\r\n for option in self.iterate_options():\r\n self.exchange_symbols.append(option.exchange_symbol)\r\n return self.exchange_symbols\r\n\r\n def get_underlying_price(self):\r\n orderbook = self.client.getorderbook(self.underlying_exchange_symbol)\r\n self.underlying_price = (orderbook['bids'][0]['price'] + orderbook['asks'][0]['price']) / 2\r\n for option in self.iterate_options():\r\n option.set_underlying_price(self.underlying_price)\r\n return self.underlying_price\r\n\r\n def get_option(self, option_name):\r\n if option_name in self.options_by_name:\r\n return self.options_by_name[option_name]\r\n return None\r\n\r\n def build_options(self):\r\n if self.strikes is not None and self.expirations is not None:\r\n for expiry in self.expirations:\r\n for option_type in ['call', 'put']:\r\n self.options[option_type][expiry] = {}\r\n for strike in self.strikes[expiry]:\r\n for option_type in ['call', 'put']:\r\n option = Option(\r\n underlying_pair=self.underlying_pair,\r\n option_type=option_type,\r\n strike=strike,\r\n expiry=expiry,\r\n interest_rate=0,\r\n volatility=self.atm_volatility,\r\n underlying_price=self.underlying_price,\r\n time=self.time\r\n )\r\n option.calc_greeks()\r\n self.options[option_type][expiry][strike] = option\r\n self.options_by_name[option.exchange_symbol] = option\r\n\r\n def parse_option_metadata(self, option_metadata):\r\n for metadata in option_metadata:\r\n expiry = metadata['expiry']\r\n [year, month, day] = expiry.split('-')\r\n expiry = datetime.datetime(year=int(year), month=int(month), day=int(day), tzinfo=pytz.UTC)\r\n option_type = metadata['type']\r\n strike = int(float(metadata['strike']))\r\n option = Option(underlying_pair=self.underlying_pair,\r\n option_type=option_type,\r\n strike=strike,\r\n expiry=expiry\r\n )\r\n option.delta = float(metadata['delta'])\r\n option.gamma = float(metadata['gamma'])\r\n option.theta = float(metadata['theta'])\r\n option.wvega = float(metadata['wvega'])\r\n option.vega = float(metadata['vega'])\r\n option.vol = float(metadata['vol'])\r\n if 'best_bid' in metadata:\r\n option.best_bid = ast.literal_eval(metadata['best_bid'])\r\n if 'best_ask' in metadata:\r\n option.best_ask = ast.literal_eval(metadata['best_ask'])\r\n if 'exchange_symbol' in metadata:\r\n option.exchange_symbol = metadata['exchange_symbol']\r\n if option_type not in self.options:\r\n self.options[option_type] = {}\r\n if expiry not in self.options[option_type]:\r\n self.options[option_type][expiry] = {}\r\n self.options[option_type][expiry][strike] = option\r\n self.options_by_name[option.exchange_symbol] = option\r\n\r\n def iterate_options(self):\r\n expirys_to_remove = []\r\n for option_type in self.options:\r\n for expiry in self.options[option_type]:\r\n if expiry > utils.get_current_time():\r\n for strike in self.options[option_type][expiry]:\r\n yield self.options[option_type][expiry][strike]\r\n else:\r\n logging.info(\"Expiry to remove: \" + str(expiry))\r\n expirys_to_remove.append(expiry)\r\n if expirys_to_remove:\r\n for option_type in self.options:\r\n self.options[option_type] = {k: v for k, v in self.options[option_type].items()\r\n if k not in expirys_to_remove}\r\n logging.info(\"Expirys after removal: \" + str(self.options[option_type]))\r\n\r\n def calc_all_greeks(self):\r\n for option in self.iterate_options():\r\n option.calc_greeks()\r\n for option_type in self.options:\r\n for expiry in self.options[option_type]:\r\n atm_vega = self.get_atm_option(expiry).vega\r\n for strike in self.options[option_type][expiry]:\r\n option = self.options[option_type][expiry][strike]\r\n option.calc_wvega(atm_vega)\r\n\r\n def persist_orderbooks(self):\r\n for option in self.iterate_options():\r\n orderbook_result = self.client.getorderbook(option.exchange_symbol)\r\n self.db.insert_snapshot(orderbook_result, option)\r\n logging.info(\"Persisted orderbook for option \" + option.exchange_symbol)\r\n\r\n def build_deribit_options(self):\r\n if self.client is None:\r\n self.setup_client()\r\n self.options_by_name = {}\r\n self.expirations = []\r\n self.strikes = {}\r\n instruments = [i for i in self.client.getinstruments() if i['baseCurrency'] == self.currency]\r\n options = [i for i in instruments if i['kind'] == 'option']\r\n for option_info in options:\r\n option_type = option_info['optionType']\r\n strike = option_info['strike']\r\n expiry = pytz.timezone('GMT').localize(\r\n datetime.datetime.strptime(option_info['expiration'], '%Y-%m-%d %H:%M:%S GMT')\r\n )\r\n if expiry not in self.expirations:\r\n self.expirations.append(expiry)\r\n if expiry not in self.strikes:\r\n self.strikes[expiry] = []\r\n if strike not in self.strikes[expiry]:\r\n self.strikes[expiry].append(strike)\r\n option = Option(\r\n underlying_pair=self.underlying_pair,\r\n option_type=option_type,\r\n strike=strike,\r\n expiry=expiry,\r\n interest_rate=0,\r\n volatility=self.atm_volatility,\r\n underlying_price=self.underlying_price,\r\n time=self.time,\r\n exchange_symbol=option_info['instrumentName']\r\n )\r\n if expiry in self.options[option_type]:\r\n self.options[option_type][expiry][strike] = option\r\n else:\r\n self.options[option_type][expiry] = {strike: option}\r\n self.options_by_name[option.exchange_symbol] = option\r\n logging.info(\"Added option to options by name: \" + option.exchange_symbol)\r\n\r\n def calc_deribit_implied_vols(self, max_market_width=20):\r\n for option in self.iterate_options():\r\n orderbook = self.client.getorderbook(instrument=option.exchange_symbol)\r\n if len(orderbook['bids']) and len(orderbook['asks']):\r\n option.best_bid = orderbook['bids'][0]['price']\r\n option.best_ask = orderbook['asks'][0]['price']\r\n msg = \"Set best market for option: \" + option.exchange_symbol \\\r\n + \": bid: \" + str(option.best_bid) + \", ask: \" + str(option.best_ask)\r\n print(msg)\r\n logging.info(msg)\r\n option.mid_market = (option.best_bid + option.best_ask) / 2\r\n market_width = (((option.best_ask - option.mid_market) / option.mid_market) - 1) * 100\r\n if market_width < max_market_width:\r\n option.set_mid_market(option.mid_market)\r\n option.calc_implied_vol(option.mid_market)\r\n else:\r\n msg = \"No liquid market for \" + str(option) + \", market is \" + str(market_width) + \" percent wide\"\r\n print(msg)\r\n logging.info(msg)\r\n else:\r\n msg = \"No market for \" + option.exchange_symbol\r\n print(msg)\r\n logging.info(msg)\r\n\r\n def update_underlying_price(self, underlying_price):\r\n self.underlying_price = underlying_price\r\n for option in self.iterate_options():\r\n option.set_underlying_price(self.underlying_price)\r\n option.calc_greeks()\r\n\r\n def load_historical_trades(self, pair=None):\r\n for option in self.iterate_options():\r\n option.historical_trades = self.client.getlasttrades(instrument=option.exchange_symbol, count=100000)\r\n print(\"Loaded \" + str(len(option.historical_trades)) + \" trades for \" + option.exchange_symbol)\r\n","repo_name":"dwasse/vol-surface-visualizer","sub_path":"cryptopt/theoEngine.py","file_name":"theoEngine.py","file_ext":"py","file_size_in_byte":10853,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"40837819637","text":"names = ['Michael', 'Bob', 'Tracy']\n\nfor name in names:\n\tprint(name)\n\nsum = 0\na = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nfor i in a:\n\tsum += i\nprint(sum)\n\nprint(list(range(6)))\n\nfor x in range(101):\n\tsum += x;\nprint(sum)\n\nprint('''------我是分割线------''')\n\nsum = 0\nn = 99\nwhile n > 0:\n\tsum += n\n\tn -= 2\nprint(sum)\n\nsum = 0\nn = 1\nwhile n < 100:\n\tif n & 0x1 == 1:\n\t\tsum += n\n\tn = n + 1\nprint(sum)\n\nprint('''------我是分割线------''')\n\nL = ['Bart', 'Lisa', 'Adam']\n\nfor x in L:\n\tprint('Hello,', x, '!')\n\nn = 1\nwhile n <= 100:\n\tif n > 10:\n\t\tbreak\n\tprint(n)\n\tn = n + 1\nprint('END!')\n\nn = 0\nwhile n < 10:\n\tn = n + 1\n\tif n & 0x1 == 0:\n\t\tcontinue\n\tprint(n)\nprint('End odd!')\n\n\n\n","repo_name":"MrQuJL/python-scripts","sub_path":"02_python基础/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41188084537","text":"import image_utils\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom base_style_transfer import StyleTransferBase\nfrom losses.content_loss import ContentLoss\nfrom losses.mrf_style_loss import MrfBasedStyleLoss\nfrom losses.total_variation_loss import TotalVariationLoss\nfrom nets import vgg\nfrom fcn.fcn_16s import FCN_16s\n\n\nclass SemanticMrfBasedStyleTransfer(StyleTransferBase):\n def __init__(self):\n super().__init__()\n\n self.session = tf.Session()\n\n self.name = \"soft_masks\"\n\n self.x = None\n self.content_loss = ContentLoss()\n self.style_loss = MrfBasedStyleLoss()\n self.total_variation_loss = TotalVariationLoss()\n\n self.content_weight = 10\n self.style_weight = 25\n self.semantic_weight = 100\n self.tv_weight = 1\n\n self.learning_rate_value = 8e-1\n self.learning_rate = tf.Variable(self.learning_rate_value, name=\"learning_rate\")\n self.num_iterations = 350\n\n self.content_layers = [\"conv4_2\"]\n self.style_layers = [\"conv4_1\", \"conv5_1\"]\n self.model = vgg.vgg_19\n self.model_arg_scope = vgg.vgg_arg_scope()\n self.checkpoint_path = \"checkpoints/vgg_19.ckpt\"\n\n self.number_of_classes = 21\n self.content_semantic_features = None\n self.style_semantic_features = None\n self.semantic_model_checkpoint_path = \"checkpoints/fcn_16s_checkpoint/model_fcn16s_final.ckpt\"\n\n def build_graph(self):\n print(\"[.] building graph\")\n\n self.precompute_semantic_features()\n\n content_features = self.get_content_features(self.preprocessed_content_image)\n style_features = self.get_style_features(self.preprocessed_style_image)\n style_features = self.append_semantic_features(style_features, self.style_semantic_features)\n\n self.x = tf.Variable(tf.random_uniform(self.content_image.shape, 0, 1, dtype=tf.float32), name=\"x\")\n x_content_features = self.get_content_features(self.x)\n x_style_features = self.get_style_features(self.x)\n x_style_features = self.append_semantic_features(x_style_features, self.content_semantic_features)\n\n with tf.variable_scope(\"content_loss\"):\n self.content_loss_v = self.content_weight * \\\n self.content_loss.get_value(content_features, x_content_features)\n with tf.variable_scope(\"style_loss\"):\n self.style_loss_v = self.style_weight * \\\n self.style_loss.get_value(style_features, x_style_features)\n with tf.variable_scope(\"total_variation_loss\"):\n self.tv_loss_v = self.tv_weight * \\\n self.total_variation_loss.get_value(self.x)\n\n with tf.variable_scope(\"total_loss\"):\n self.loss_v = self.content_loss_v + self.style_loss_v + self.tv_loss_v\n\n with tf.variable_scope(\"optimizer\") as opt_scope:\n self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_v, var_list=[self.x])\n self.opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=opt_scope.name)\n\n print(\"[.] graph built\")\n\n def append_semantic_features(self, style_features, semantic_features):\n means = tf.reduce_mean(semantic_features, axis=[0, 1])\n _, indices = tf.nn.top_k(means, k=5)\n semantic_features = tf.gather(semantic_features, indices, axis=2)\n semantic_features = semantic_features * self.semantic_weight\n\n with tf.name_scope(\"semantic_features_downsampling\"):\n map = {}\n prev = tf.expand_dims(semantic_features, 0)\n for i in range(5):\n if i == 0:\n pooled = prev\n else:\n pooled = slim.avg_pool2d(prev, [2, 2])\n map[i + 1] = pooled\n prev = pooled\n\n with tf.name_scope(\"semantic_features_appending\"):\n i = 0\n for style_layer in self.style_layers:\n key = int(style_layer[4])\n style_features[i] = tf.concat([style_features[i], map[key]], axis=3)\n i += 1\n\n return style_features\n\n def precompute_semantic_features(self):\n semantic_features, _ = FCN_16s(tf.stack([self.content_image, self.style_image]),\n self.number_of_classes, is_training=False)\n\n restore_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"fcn_16\")\n saver = tf.train.Saver(var_list=restore_variables)\n saver.restore(self.session, self.semantic_model_checkpoint_path)\n computed_semantic_features = self.session.run(semantic_features)\n\n with tf.name_scope(\"semantic_features_precomputation\"):\n with tf.variable_scope(\"content_semantic_features\"):\n self.content_semantic_features = self.normalize_tensor(computed_semantic_features[0])\n with tf.variable_scope(\"style_semantic_features\"):\n self.style_semantic_features = self.normalize_tensor(computed_semantic_features[1])\n\n def normalize_tensor(self, tensor):\n return tf.div(\n tf.subtract(tensor, tf.reduce_min(tensor)),\n tf.maximum(\n tf.subtract(tf.reduce_max(tensor), tf.reduce_min(tensor)),\n 1e-12\n )\n )\n\n\nif __name__ == \"__main__\":\n content_image_path = \"styles/cars/golf7r.jpg\"\n style_image_path = \"styles/car_drawing.jpg\"\n\n size = (500, 343)\n content_image, original_content_image_size = image_utils.load_image_pil(content_image_path, size)\n style_image, _ = image_utils.load_image_pil(style_image_path, size)\n\n # content_image = image_utils.preprocess(content_image)\n # style_image = image_utils.preprocess(style_image)\n\n st = SemanticMrfBasedStyleTransfer()\n generated = st.run(content_image, style_image)\n # generated = image_utils.deprocess(generated)\n\n image_utils.save_generated_image(generated, \"summary\", \"generated_image_name_stub\", original_content_image_size)\n","repo_name":"VladLevochko/StyleTransfer","sub_path":"semantic_mrf_based_style_transfer.py","file_name":"semantic_mrf_based_style_transfer.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35201293237","text":"from flask import request, jsonify, Response\n\n\ndef create_caesar_views(app, caesar_encrypt, caesar_decrypt):\n @app.route('/api/caesar/encrypt', methods=['POST'])\n def handle_encrypt_caesar_request():\n try:\n data = request.get_json()\n padding = data.get('padding')\n text = data.get('text')\n if padding is None or text is None:\n raise Exception()\n encrypted = caesar_encrypt(text, padding)\n return jsonify({'data': encrypted})\n except Exception:\n return Response('{\"message\": \"Invalid data\"}', status=400, mimetype='application/json')\n\n @app.route('/api/caesar/decrypt', methods=['POST'])\n def handle_decrypt_caesar_request():\n try:\n data = request.get_json()\n padding = data.get('padding')\n text = data.get('text')\n if padding is None or text is None:\n raise Exception()\n decrypted = caesar_decrypt(text, padding)\n return jsonify({'data': decrypted})\n except Exception:\n return Response('{\"message\": \"Invalid data\"}', status=400, mimetype='application/json')\n","repo_name":"teimurjan/ciphers-web-app","sub_path":"backend/app/views/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25062545914","text":"from __future__ import absolute_import\nimport functools\nimport pika.exceptions\nimport tornado.concurrent\nfrom tornado import gen\n\n__all__ = 'wait', 'create_future', 'create_task', 'iscoroutinepartial'\n\n\ndef iscoroutinepartial(coro):\n \"\"\"\n Function returns True if function it's a partial instance of coroutine. See additional information here_.\n\n :param coro: Function\n :return: bool\n\n .. _here: https://goo.gl/C0S4sQ\n\n \"\"\"\n\n while True:\n parent = coro\n\n coro = getattr(parent, 'func', None)\n\n if coro is None:\n break\n\n return gen.is_coroutine_function(parent)\n\n\ndef create_future(loop):\n \"\"\" Helper for `create a new future`_ with backward compatibility for Python 3.4\n\n .. _create a new future: https://goo.gl/YrzGQ6\n \"\"\"\n\n try:\n return loop.create_future()\n except AttributeError:\n # Compatibility with older tornado\n return tornado.concurrent.Future()\n\n\ndef create_task(yielded):\n \"\"\" Helper for `create a new Task`_ with backward compatibility for Python 3.4\n\n .. _create a new Task: https://goo.gl/g4pMV9\n \"\"\"\n\n return gen.convert_yielded(yielded)\n\n\n@gen.coroutine\ndef wait(tasks):\n \"\"\"\n Simple helper for gathering all passed :class:`Task`s.\n\n :param tasks: list of the :class:`asyncio.Task`s\n :param _loop: Event loop (:func:`tornado.ioloop.IOLoop.current()` when :class:`None`)\n :return: :class:`tuple` of results\n \"\"\"\n raise gen.Return((yield gen.multi(tasks)))\n\n\ndef ensure_connection_exception(exception_or_message):\n \"\"\"\n If passed an exception this will be returned. Otherwise it is assumed\n a string is passed giving the reason for the connection error\n\n :param exception_or_message:\n :return:\n \"\"\"\n if isinstance(exception_or_message, Exception):\n return exception_or_message\n\n # We got a string message\n return pika.exceptions.AMQPConnectionError(exception_or_message)\n\n\n# Get rid of the stupid default replace_callback in tornado coroutine\ncoroutine = functools.partial(gen._make_coroutine_wrapper, replace_callback=False)\n","repo_name":"aiidateam/topika","sub_path":"topika/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74368112515","text":"# -*- coding: utf8 -*-\n'''\nSimple Write MD Server Program\n\nProvide list, query, edit, save, etc\n\n'''\n\nimport os\nimport codecs\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nfrom flask import abort\n\napp = Flask(__name__)\napp.config.from_object('config')\n\n@app.route('/')\ndef index():\n '''\n Index page of this tiny site\n '''\n return render_template('index.html')\n\n@app.route('/list')\ndef list_files():\n '''\n List files in specified path\n '''\n path = request.args.get('p')\n if path is None:\n path = ''\n path = path.strip()\n\n # security check\n\n # get file list here\n fns = os.listdir(os.path.join(app.config['DOC_ROOT'], path))\n # get details\n files = []\n for fn in fns:\n full_fn = os.path.join(app.config['DOC_ROOT'], path, fn)\n is_file = 'file' if os.path.isfile(full_fn) else 'dir'\n size = os.path.getsize(full_fn)\n atime = os.path.getatime(full_fn)\n if is_file == 'file':\n files.append((fn, path, is_file, size, atime))\n else:\n files.insert(0, (fn, path, is_file, size, atime))\n \n return render_template('list.html', path=path, files=files)\n\n@app.route('/edit')\ndef edit():\n '''\n Edit specified file\n '''\n act = request.args.get('a')\n path = request.args.get('p')\n fn = request.args.get('f')\n\n if act is None: abort(404)\n if path is None: abort(404)\n if fn is None: abort(404)\n\n # security check\n path = path.strip()\n fn = fn.strip()\n\n # edit now\n if act == 'new':\n ctt = ''\n else:\n ctt = codecs.open(os.path.join(app.config['DOC_ROOT'], path, fn), 'r', 'utf-8').read()\n\n return render_template('edit.html', fn=fn, path=path, ctt=ctt)\n\n@app.route('/save', methods=['GET', 'POST'])\ndef save():\n '''\n Save submitted md content\n '''\n if request.method == 'GET':\n return 'save'\n\n path = request.form.get('p')\n fn = request.form.get('f')\n ctt = request.form.get('c')\n\n full_fn = os.path.join(app.config['DOC_ROOT'], path, fn)\n\n codecs.open(full_fn, 'w', 'utf-8').write(ctt)\n\n # Handle the save file operation\n ret = {'success': True}\n return jsonify(ret)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=51101)\n","repo_name":"focusheart/simple-write-md","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11260139842","text":"import os\nimport glob\nimport itertools\n\nsrc = \"../Files/TIMIT_SR/TRAIN/*/*/SA*.*\"\ndst = \"../Files/TIMIT_SR/TEST\"\n\nsrc = [os.path.expanduser(k) for k in src.strip().split()]\ndirs = itertools.chain(*(glob.glob(d) for d in src))\n\nfor d in dirs:\n f = os.path.split(d)\n g = os.path.split(f[0])\n h = os.path.split(g[0])\n \n if not(os.path.exists(dst + \"/\" + h[1] + \"/\" + g [1])):\n os.makedirs(dst + \"/\" + h[1] + \"/\" + g [1])\n dest = dst + \"/\" + h[1] + \"/\" + g [1] + \"/\" + f[1]\n if os.path.exists(dest):\n continue\n \n print(dest, \" moved !\")\n os.rename(d, dest)\n","repo_name":"neuralsyn/real-time-speaker-recognition","sub_path":"Pre-process/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43677119958","text":"class Solution:\n def minNumber(self, nums: List[int]) -> str:\n\n #if nums == []: return 0 不需要\n\n def quicksort(l, r):\n if l >= r: return\n i, j = l, r\n while (i < j):\n while (s[j] + s[l] >= s[l] + s[j] and i < j):\n j -= 1\n while (s[i] + s[l] <= s[l] + s[i] and i < j):\n i += 1\n s[i], s[j] = s[j], s[i] # 指针交换\n s[l], s[i] = s[i], s[l] # pivot换至目标位置\n quicksort(l, i - 1) # 递归\n quicksort(i + 1, r)\n\n s = [str(n) for n in nums]\n quicksort(0, len(s) - 1)\n\n return \"\".join(s)","repo_name":"zheyuanWang/hunter_playground","sub_path":"sort/minNum.py","file_name":"minNum.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29684006329","text":"import unittest\n\nfrom pynusmv.init import init_nusmv, deinit_nusmv\nfrom pynusmv.fsm import BddFsm\nfrom pynusmv.dd import BDD\nfrom pynusmv.mc import eval_simple_expression as evalSexp\nfrom pynusmv.exception import NuSMVBddPickingError\n\nclass TestEnc(unittest.TestCase):\n \n def setUp(self):\n init_nusmv()\n \n def tearDown(self):\n deinit_nusmv()\n \n def model(self):\n fsm = BddFsm.from_filename(\"tests/pynusmv/models/constraints.smv\")\n self.assertIsNotNone(fsm)\n return fsm\n \n \n def cardgame_post_fair(self):\n fsm = BddFsm.from_filename(\"tests/pynusmv/models/\"\n \"cardgame-post-fair.smv\")\n self.assertIsNotNone(fsm)\n return fsm\n \n \n def counters_model(self):\n fsm = BddFsm.from_filename(\"tests/pynusmv/models/counters.smv\")\n self.assertIsNotNone(fsm)\n return fsm\n \n \n def test_stateVars(self):\n fsm = self.counters_model()\n enc = fsm.bddEnc\n self.assertEqual(enc.stateVars, {\"c1.c\", \"c2.c\"})\n \n \n def test_inputsVars(self):\n fsm = self.counters_model()\n enc = fsm.bddEnc\n self.assertEqual(enc.inputsVars, {\"run\"})\n \n \n def test_definedVars(self):\n fsm = self.counters_model()\n enc = fsm.bddEnc\n self.assertEqual(enc.definedVars, {\"start\", \"stop\"})\n \n \n def test_statesMask(self):\n fsm = self.model()\n enc = fsm.bddEnc\n \n p = evalSexp(fsm, \"p\")\n q = evalSexp(fsm, \"q\")\n a = evalSexp(fsm, \"a\")\n \n self.assertEqual(enc.statesMask, (p | ~p) & (q | ~q))\n \n \n def test_inputsMask(self):\n fsm = self.model()\n enc = fsm.bddEnc\n \n p = evalSexp(fsm, \"p\")\n q = evalSexp(fsm, \"q\")\n a = evalSexp(fsm, \"a\")\n \n self.assertEqual(enc.inputsMask, a | ~a)\n \n \n def test_statesCube(self):\n fsm = self.model()\n enc = fsm.bddEnc\n \n p = evalSexp(fsm, \"p\")\n q = evalSexp(fsm, \"q\")\n a = evalSexp(fsm, \"a\")\n \n self.assertTrue(p & q <= enc.statesCube)\n self.assertFalse(a <= enc.statesCube)\n self.assertFalse(~a <= enc.statesCube)\n \n \n def test_inputsCube(self):\n fsm = self.model()\n enc = fsm.bddEnc\n \n p = evalSexp(fsm, \"p\")\n q = evalSexp(fsm, \"q\")\n a = evalSexp(fsm, \"a\")\n \n self.assertTrue(a <= enc.inputsCube)\n self.assertFalse(p & q <= enc.inputsCube)\n \n \n def test_inputs_vars_cube(self):\n fsm = self.model()\n enc = fsm.bddEnc\n \n p = evalSexp(fsm, \"p\")\n q = evalSexp(fsm, \"q\")\n a = evalSexp(fsm, \"a\")\n \n self.assertTrue(a <= enc.cube_for_inputs_vars({'a'}))\n \n \n def test_var_ordering(self):\n fsm = self.cardgame_post_fair()\n enc = fsm.bddEnc\n \n variables = {'player.action', 'player.played',\n 'dealer.action', 'dealer.played',\n 'step', 'pcard', 'dcard', 'ddcard'}\n bits = {'player.action.0', 'player.action.1',\n 'player.played.0', 'player.played.1',\n 'dealer.action.0', 'dealer.action.1', 'dealer.action.2',\n 'dealer.played.0', 'dealer.played.1', 'dealer.played.2',\n 'step.0', 'step.1',\n 'pcard.0', 'pcard.1',\n 'dcard.0', 'dcard.1',\n 'ddcard.0', 'ddcard.1'}\n \n self.assertSetEqual(variables, set(enc.get_variables_ordering()))\n self.assertSetEqual(bits,\n set(enc.get_variables_ordering(var_type=\"bits\")))\n \n def test_force_var_ordering(self):\n fsm = self.model()\n \n new_order = (\"a\", \"q\", \"p\")\n fsm.bddEnc.force_variables_ordering(new_order)\n self.assertTupleEqual(new_order, fsm.bddEnc.get_variables_ordering())\n \n new_order = (\"p\", \"a\")\n fsm.bddEnc.force_variables_ordering(new_order)\n self.assertTupleEqual(new_order,\n fsm.bddEnc.get_variables_ordering()\n [:len(new_order)])\n ","repo_name":"sbusard/pynusmv","sub_path":"src/tests/pynusmv/testEnc.py","file_name":"testEnc.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"20020886043","text":"from flask import request\nfrom flask_restx import Resource\n\nfrom ..utils.dto import DepartmentsDto\nfrom ..services.department_service import get_department_list\n\napi = DepartmentsDto.api\n_department_list = DepartmentsDto.department_list\n\n@api.route(\"\",\n doc={\n \"description\": \"List of department names and IDs.
    \"\n })\n@api.response(401, \"Not found.\")\nclass Departments(Resource):\n @api.doc(\"List of departments.\", description=\"Gets a list of department names and IDs.\")\n @api.marshal_with(_department_list, 200)\n def get(self):\n lang = request.args[\"lang\"] or \"en\" # For now, default to english if no language specified.\n if get_department_list(lang):\n return get_department_list(lang)\n return abort(401, \"Not found.\")\n","repo_name":"Collinbrown95/geds-api","sub_path":"app/main/controllers/departments.py","file_name":"departments.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72329356353","text":"#!/usr/bin/env python3\r\n\r\nimport os\r\nimport subprocess\r\nimport sys\r\nimport tempfile\r\nfrom os import path\r\nfrom pathlib import Path\r\n\r\nimport termcolor\r\nfrom dulwich import porcelain, index\r\nfrom dulwich.client import (\r\n HttpGitClient,\r\n SSHGitClient,\r\n LocalGitClient,\r\n get_transport_and_path,\r\n)\r\nfrom dulwich.repo import Repo\r\nfrom github import Github\r\nimport operator\r\nimport shutil\r\nfrom .arg_parser import ArgParser\r\n\r\nSEPARATOR = os.sep\r\nREPOS = \"repos\"\r\n\r\n\r\ndef fatal_error(error):\r\n os.system(\"color\")\r\n sys.exit(termcolor.colored(\"ERROR: \" + error, \"red\"))\r\n\r\n\r\ndef get_repos(prefix, operation, num_repos, org):\r\n repos = []\r\n for repo in g.get_organization(org).get_repos(type=\"private\"):\r\n if repo.name.startswith(prefix):\r\n repos.append(repo)\r\n\r\n matcher = \"exactly\"\r\n if operation == operator.gt:\r\n matcher = \"at least\"\r\n\r\n if not operation(len(repos), num_repos):\r\n fatal_error(\r\n f'prefix \"{prefix}\" must match {matcher} {num_repos} repositories, but matches {repos}'\r\n )\r\n\r\n return repos\r\n\r\n\r\ndef switch_branch(dulwich_repo, branch_name):\r\n \"\"\" Switch current branch to branch_name \"\"\"\r\n branch = bytes(\"refs/heads/\" + branch_name, encoding=\"utf8\")\r\n if branch in dulwich_repo:\r\n dulwich_repo.reset_index(dulwich_repo[branch].tree)\r\n else:\r\n raise Exception(\r\n f'branch \"{branch_name}\" does not exist for repo \"{dulwich_repo.path}\"'\r\n )\r\n dulwich_repo.refs.set_symbolic_ref(b\"HEAD\", branch)\r\n\r\n\r\ndef safe_push(github_repo, dulwich_repo, branch):\r\n \"\"\" Push if branch doesn't already exist in remote \"\"\"\r\n # TODO check if to branch already exists\r\n if any(b.name == branch for b in github_repo.get_branches()):\r\n fatal_error(\r\n 'Branch \"'\r\n + branch\r\n + '\" already exists for '\r\n + REPOS\r\n + 'itory \"'\r\n + github_repo.name\r\n + '\"'\r\n )\r\n try:\r\n # Force push seems to be required by dulwich, but we check for the branch existing beforehand\r\n porcelain.push(dulwich_repo, refspecs=branch, force=True)\r\n except AttributeError as e:\r\n # Ignore failed push\r\n if e.__str__() != \"'NoneType' object has no attribute 'encode'\":\r\n fatal_error(e)\r\n except Exception as e:\r\n fatal_error(e)\r\n\r\n\r\nargs = ArgParser().parse_github_copy_args().parse_args()\r\n\r\nif args.sourceDirectory is not None and args.sourcePrefix is not None:\r\n fatal_error(\"cannot specify both a source-directory and a source-prefix\")\r\n\r\nif \"GITHUB_TOKEN\" not in os.environ:\r\n fatal_error(\"you must specify a GITHUB_TOKEN environment variable\")\r\ng = Github(os.environ[\"GITHUB_TOKEN\"])\r\ndestinationRepositories = get_repos(\r\n args.destinationPrefix, operator.ge, 1, args.destinationOrg\r\n)\r\n\r\nif args.sourcePrefix == \"grace-actions\" and args.actionType == \"\":\r\n fatal_error(\"action-type parameter is required when running grace-actions\")\r\n\r\n\r\ntemp_dir = tempfile.gettempdir() + SEPARATOR + REPOS + SEPARATOR\r\nshutil.rmtree(temp_dir, ignore_errors=True)\r\nPath(temp_dir).mkdir(parents=True, exist_ok=True)\r\n\r\nif args.sourceDirectory is None:\r\n sourceRepositories = get_repos(args.sourcePrefix, operator.eq, 1, args.sourceOrg)\r\n source_path = temp_dir + sourceRepositories[0].name\r\n dulwich_repo = porcelain.clone(sourceRepositories[0].ssh_url, source_path)\r\n os.system(f\"cd {source_path}; git fetch origin {args.sourceBranch}; git checkout {args.sourceBranch}\")\r\nelse:\r\n source_path = args.sourceDirectory\r\n\r\nfor github_repo in destinationRepositories:\r\n destination_path = temp_dir + github_repo.name\r\n dulwich_repo = porcelain.clone(github_repo.ssh_url, destination_path)\r\n\r\n # switch to destination branch\r\n os.system(f\"cd {destination_path}; git fetch origin {args.destinationBranch}; git checkout {args.destinationBranch}\")\r\n\r\n porcelain.branch_create(destination_path, args.temporaryBranch)\r\n switch_branch(dulwich_repo, args.temporaryBranch)\r\n\r\n if args.sourcePrefix == \"grace-actions\":\r\n args.script = \"python3 \" + path.join(source_path, \"actions\", \"run.py\")\r\n\r\n # TODO allow relative paths for the request.json file\r\n args.scriptParameters = args.scriptParameters + f\" --action {args.actionType}\"\r\n else:\r\n args.script = path.join(\"~/.local/bin\", args.script + \".py\")\r\n\r\n command = f\"{args.script} {args.scriptParameters} --src-dir={source_path} --dst-dir={destination_path}\"\r\n # FIXME fix race condition with transformer for print statements\r\n termcolor.colored(f\"Running transformer with command '{command}'\", \"yellow\")\r\n process = subprocess.Popen(\r\n [command],\r\n shell=True,\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE,\r\n universal_newlines=True,\r\n )\r\n output, err = process.communicate()\r\n sys.stdout.write(output)\r\n sys.stderr.write(err)\r\n\r\n status = porcelain.status(dulwich_repo)\r\n\r\n dulwich_repo.stage(status.untracked)\r\n dulwich_repo.stage(status.unstaged)\r\n\r\n porcelain.commit(\r\n dulwich_repo,\r\n args.pullRequestName,\r\n \"grace-production \",\r\n )\r\n\r\n logging_verb1 = \"Would copy\"\r\n logging_verb2 = \"would be\"\r\n logging_color = \"yellow\"\r\n if not args.dryRun:\r\n logging_verb1 = \"Copied\"\r\n logging_verb2 = \"were\"\r\n logging_color = \"green\"\r\n\r\n safe_push(github_repo, dulwich_repo, args.temporaryBranch)\r\n print(\r\n termcolor.colored(f\"Successfully pushed to {github_repo.ssh_url}\", \"green\")\r\n )\r\n\r\n pr = github_repo.create_pull(\r\n title=args.pullRequestName,\r\n body=args.pullRequestName,\r\n head=args.temporaryBranch,\r\n base=args.destinationBranch,\r\n )\r\n print(\r\n termcolor.colored(f\"Pull request {pr.id} created\", logging_color)\r\n )\r\n print(\r\n termcolor.colored(f\"Pull request URL: {pr.html_url}\", logging_color)\r\n )\r\n\r\n for file in status.unstaged + status.untracked:\r\n print(\r\n termcolor.colored(\r\n f\"{logging_verb1} '{file}' from '{source_path}' to '{destination_path}'\",\r\n logging_color,\r\n )\r\n )\r\n\r\n if len(status.untracked + status.unstaged) == 0:\r\n print(termcolor.colored(f\"No changes {logging_verb2} made\", logging_color))\r\n","repo_name":"GSA/github-copy","sub_path":"github-copy/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"586375863","text":"# encoding: utf-8\r\n\r\n# The MIT License\r\n#\r\n# Copyright (c) 2009-2011 the bpython authors.\r\n# Copyright (c) 2012-2013,2015 Sebastian Ramacher\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n\r\nfrom __future__ import absolute_import\r\n\r\nimport code\r\nimport inspect\r\nimport io\r\nimport os\r\nimport pkgutil\r\nimport pydoc\r\nimport re\r\nimport shlex\r\nimport subprocess\r\nimport sys\r\nimport tempfile\r\nimport textwrap\r\nimport time\r\nimport traceback\r\nfrom itertools import takewhile\r\nfrom .six import itervalues\r\nfrom types import ModuleType\r\n\r\nfrom pygments.token import Token\r\n\r\nfrom . import autocomplete\r\nfrom . import inspection\r\nfrom ._py3compat import PythonLexer, py3, prepare_for_exec\r\nfrom .clipboard import get_clipboard, CopyFailed\r\nfrom .config import getpreferredencoding\r\nfrom .formatter import Parenthesis\r\nfrom .history import History\r\nfrom .lazyre import LazyReCompile\r\n#from .paste import PasteHelper, PastePinnwand, PasteFailed\r\nfrom .patch_linecache import filename_for_console_input\r\nfrom .translations import _, ngettext\r\nfrom . import simpleeval\r\n\r\n\r\nclass RuntimeTimer(object):\r\n \"\"\"Calculate running time\"\"\"\r\n\r\n def __init__(self):\r\n self.reset_timer()\r\n self.time = time.monotonic if hasattr(time, 'monotonic') else time.time\r\n\r\n def __enter__(self):\r\n self.start = self.time()\r\n\r\n def __exit__(self, ty, val, tb):\r\n self.last_command = self.time() - self.start\r\n self.running_time += self.last_command\r\n return False\r\n\r\n def reset_timer(self):\r\n self.running_time = 0.0\r\n self.last_command = 0.0\r\n\r\n def estimate(self):\r\n return self.running_time - self.last_command\r\n\r\n\r\nclass Interpreter(code.InteractiveInterpreter):\r\n \"\"\"Source code interpreter for use in bpython.\"\"\"\r\n\r\n bpython_input_re = LazyReCompile(r'')\r\n\r\n def __init__(self, locals=None, encoding=None):\r\n \"\"\"Constructor.\r\n\r\n The optional 'locals' argument specifies the dictionary in which code\r\n will be executed; it defaults to a newly created dictionary with key\r\n \"__name__\" set to \"__main__\".\r\n\r\n The syntaxerror callback can be set at any time and will be called\r\n on a caught syntax error. The purpose for this in bpython is so that\r\n the repl can be instantiated after the interpreter (which it\r\n necessarily must be with the current factoring) and then an exception\r\n callback can be added to the Interpreter instance afterwards - more\r\n specifically, this is so that autoindentation does not occur after a\r\n traceback.\r\n\r\n encoding is only used in Python 2, where it may be necessary to add an\r\n encoding comment to a source bytestring before running it.\r\n encoding must be a bytestring in Python 2 because it will be templated\r\n into a bytestring source as part of an encoding comment.\r\n \"\"\"\r\n\r\n self.encoding = encoding or getpreferredencoding()\r\n self.syntaxerror_callback = None\r\n\r\n if locals is None:\r\n # instead of messing with sys.modules, we should modify sys.modules\r\n # in the interpreter instance\r\n sys.modules['__main__'] = main_mod = ModuleType('__main__')\r\n locals = main_mod.__dict__\r\n\r\n # Unfortunately code.InteractiveInterpreter is a classic class, so no\r\n # super()\r\n code.InteractiveInterpreter.__init__(self, locals)\r\n self.timer = RuntimeTimer()\r\n\r\n def reset_running_time(self):\r\n self.running_time = 0\r\n\r\n def runsource(self, source, filename=None, symbol='single',\r\n encode='auto'):\r\n \"\"\"Execute Python code.\r\n\r\n source, filename and symbol are passed on to\r\n code.InteractiveInterpreter.runsource. If encode is True,\r\n an encoding comment will be added to the source.\r\n On Python 3.X, encode will be ignored.\r\n\r\n encode should only be used for interactive interpreter input,\r\n files should always already have an encoding comment or be ASCII.\r\n By default an encoding line will be added if no filename is given.\r\n\r\n In Python 3, source must be a unicode string\r\n In Python 2, source may be latin-1 bytestring or unicode string,\r\n following the interface of code.InteractiveInterpreter.\r\n\r\n Because adding an encoding comment to a unicode string in Python 2\r\n would cause a syntax error to be thrown which would reference code\r\n the user did not write, setting encoding to True when source is a\r\n unicode string in Python 2 will throw a ValueError.\"\"\"\r\n # str means bytestring in Py2\r\n if encode and not py3 and isinstance(source, unicode):\r\n if encode != 'auto':\r\n raise ValueError(\"can't add encoding line to unicode input\")\r\n encode = False\r\n if encode and filename is not None:\r\n # files have encoding comments or implicit encoding of ASCII\r\n if encode != 'auto':\r\n raise ValueError(\r\n \"shouldn't add encoding line to file contents\")\r\n encode = False\r\n\r\n if encode and not py3 and isinstance(source, str):\r\n # encoding makes sense for bytestrings, so long as there\r\n # isn't already an encoding comment\r\n comment = inspection.get_encoding_comment(source)\r\n if comment:\r\n # keep the existing encoding comment, but add two lines\r\n # because this interp always adds 2 to stack trace line\r\n # numbers in Python 2\r\n source = source.replace(comment, b'%s\\n\\n' % comment, 1)\r\n else:\r\n source = b'# coding: %s\\n\\n%s' % (self.encoding, source)\r\n elif not py3 and filename is None:\r\n # 2 blank lines still need to be added\r\n # because this interpreter always adds 2 to stack trace line\r\n # numbers in Python 2 when the filename is \"\"\r\n newlines = u'\\n\\n' if isinstance(source, unicode) else b'\\n\\n'\r\n source = newlines + source\r\n # we know we're in Python 2 here, so ok to reference unicode\r\n if filename is None:\r\n filename = filename_for_console_input(source)\r\n with self.timer:\r\n return code.InteractiveInterpreter.runsource(self, source,\r\n filename, symbol)\r\n\r\n def showsyntaxerror(self, filename=None):\r\n \"\"\"Override the regular handler, the code's copied and pasted from\r\n code.py, as per showtraceback, but with the syntaxerror callback called\r\n and the text in a pretty colour.\"\"\"\r\n if self.syntaxerror_callback is not None:\r\n self.syntaxerror_callback()\r\n\r\n exc_type, value, sys.last_traceback = sys.exc_info()\r\n sys.last_type = exc_type\r\n sys.last_value = value\r\n if filename and exc_type is SyntaxError:\r\n # Work hard to stuff the correct filename in the exception\r\n try:\r\n msg, (dummy_filename, lineno, offset, line) = value.args\r\n except:\r\n # Not the format we expect; leave it alone\r\n pass\r\n else:\r\n # Stuff in the right filename and right lineno\r\n # strip linecache line number\r\n if self.bpython_input_re.match(filename):\r\n filename = ''\r\n if filename == '' and not py3:\r\n lineno -= 2\r\n value = SyntaxError(msg, (filename, lineno, offset, line))\r\n sys.last_value = value\r\n exc_formatted = traceback.format_exception_only(exc_type, value)\r\n self.writetb(exc_formatted)\r\n\r\n def showtraceback(self):\r\n \"\"\"This needs to override the default traceback thing\r\n so it can put it into a pretty colour and maybe other\r\n stuff, I don't know\"\"\"\r\n try:\r\n t, v, tb = sys.exc_info()\r\n sys.last_type = t\r\n sys.last_value = v\r\n sys.last_traceback = tb\r\n tblist = traceback.extract_tb(tb)\r\n del tblist[:1]\r\n\r\n for i, (fname, lineno, module, something) in enumerate(tblist):\r\n # strip linecache line number\r\n if self.bpython_input_re.match(fname):\r\n fname = ''\r\n tblist[i] = (fname, lineno, module, something)\r\n # Set the right lineno (encoding header adds an extra line)\r\n if fname == '' and not py3:\r\n tblist[i] = (fname, lineno - 2, module, something)\r\n\r\n l = traceback.format_list(tblist)\r\n if l:\r\n l.insert(0, \"Traceback (most recent call last):\\n\")\r\n l[len(l):] = traceback.format_exception_only(t, v)\r\n finally:\r\n tblist = tb = None\r\n\r\n self.writetb(l)\r\n\r\n def writetb(self, lines):\r\n \"\"\"This outputs the traceback and should be overridden for anything\r\n fancy.\"\"\"\r\n for line in lines:\r\n self.write(line)\r\n\r\n\r\nclass MatchesIterator(object):\r\n \"\"\"Stores a list of matches and which one is currently selected if any.\r\n\r\n Also responsible for doing the actual replacement of the original line with\r\n the selected match.\r\n\r\n A MatchesIterator can be `clear`ed to reset match iteration, and\r\n `update`ed to set what matches will be iterated over.\"\"\"\r\n\r\n def __init__(self):\r\n # word being replaced in the original line of text\r\n self.current_word = ''\r\n # possible replacements for current_word\r\n self.matches = None\r\n # which word is currently replacing the current word\r\n self.index = -1\r\n # cursor position in the original line\r\n self.orig_cursor_offset = None\r\n # original line (before match replacements)\r\n self.orig_line = None\r\n # class describing the current type of completion\r\n self.completer = None\r\n\r\n def __nonzero__(self):\r\n \"\"\"MatchesIterator is False when word hasn't been replaced yet\"\"\"\r\n return self.index != -1\r\n\r\n def __bool__(self):\r\n return self.index != -1\r\n\r\n @property\r\n def candidate_selected(self):\r\n \"\"\"True when word selected/replaced, False when word hasn't been\r\n replaced yet\"\"\"\r\n return bool(self)\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def current(self):\r\n if self.index == -1:\r\n raise ValueError('No current match.')\r\n return self.matches[self.index]\r\n\r\n def next(self):\r\n return self.__next__()\r\n\r\n def __next__(self):\r\n self.index = (self.index + 1) % len(self.matches)\r\n return self.matches[self.index]\r\n\r\n def previous(self):\r\n if self.index <= 0:\r\n self.index = len(self.matches)\r\n self.index -= 1\r\n\r\n return self.matches[self.index]\r\n\r\n def cur_line(self):\r\n \"\"\"Returns a cursor offset and line with the current substitution\r\n made\"\"\"\r\n return self.substitute(self.current())\r\n\r\n def substitute(self, match):\r\n \"\"\"Returns a cursor offset and line with match substituted in\"\"\"\r\n start, end, word = self.completer.locate(self.orig_cursor_offset,\r\n self.orig_line)\r\n return (start + len(match),\r\n self.orig_line[:start] + match + self.orig_line[end:])\r\n\r\n def is_cseq(self):\r\n return bool(\r\n os.path.commonprefix(self.matches)[len(self.current_word):])\r\n\r\n def substitute_cseq(self):\r\n \"\"\"Returns a new line by substituting a common sequence in, and update\r\n matches\"\"\"\r\n cseq = os.path.commonprefix(self.matches)\r\n new_cursor_offset, new_line = self.substitute(cseq)\r\n if len(self.matches) == 1:\r\n self.clear()\r\n else:\r\n self.update(new_cursor_offset, new_line, self.matches,\r\n self.completer)\r\n if len(self.matches) == 1:\r\n self.clear()\r\n return new_cursor_offset, new_line\r\n\r\n def update(self, cursor_offset, current_line, matches, completer):\r\n \"\"\"Called to reset the match index and update the word being replaced\r\n\r\n Should only be called if there's a target to update - otherwise, call\r\n clear\"\"\"\r\n\r\n if matches is None:\r\n raise ValueError(\"Matches may not be None.\")\r\n\r\n self.orig_cursor_offset = cursor_offset\r\n self.orig_line = current_line\r\n self.matches = matches\r\n self.completer = completer\r\n self.index = -1\r\n self.start, self.end, self.current_word = self.completer.locate(\r\n self.orig_cursor_offset, self.orig_line)\r\n\r\n def clear(self):\r\n self.matches = []\r\n self.cursor_offset = -1\r\n self.current_line = ''\r\n self.current_word = ''\r\n self.start = None\r\n self.end = None\r\n self.index = -1\r\n\r\n\r\nclass Interaction(object):\r\n def __init__(self, config, statusbar=None):\r\n self.config = config\r\n\r\n if statusbar:\r\n self.statusbar = statusbar\r\n\r\n def confirm(self, s):\r\n raise NotImplementedError\r\n\r\n def notify(self, s, n=10, wait_for_keypress=False):\r\n raise NotImplementedError\r\n\r\n def file_prompt(self, s):\r\n raise NotImplementedError\r\n\r\n\r\nclass SourceNotFound(Exception):\r\n \"\"\"Exception raised when the requested source could not be found.\"\"\"\r\n\r\n\r\nclass Repl(object):\r\n \"\"\"Implements the necessary guff for a Python-repl-alike interface\r\n\r\n The execution of the code entered and all that stuff was taken from the\r\n Python code module, I had to copy it instead of inheriting it, I can't\r\n remember why. The rest of the stuff is basically what makes it fancy.\r\n\r\n It reads what you type, passes it to a lexer and highlighter which\r\n returns a formatted string. This then gets passed to echo() which\r\n parses that string and prints to the curses screen in appropriate\r\n colours and/or bold attribute.\r\n\r\n The Repl class also keeps two stacks of lines that the user has typed in:\r\n One to be used for the undo feature. I am not happy with the way this\r\n works. The only way I have been able to think of is to keep the code\r\n that's been typed in in memory and re-evaluate it in its entirety for each\r\n \"undo\" operation. Obviously this means some operations could be extremely\r\n slow. I'm not even by any means certain that this truly represents a\r\n genuine \"undo\" implementation, but it does seem to be generally pretty\r\n effective.\r\n\r\n If anyone has any suggestions for how this could be improved, I'd be happy\r\n to hear them and implement it/accept a patch. I researched a bit into the\r\n idea of keeping the entire Python state in memory, but this really seems\r\n very difficult (I believe it may actually be impossible to work) and has\r\n its own problems too.\r\n\r\n The other stack is for keeping a history for pressing the up/down keys\r\n to go back and forth between lines.\r\n\r\n XXX Subclasses should implement echo, current_line, cw\r\n \"\"\"\r\n\r\n def __init__(self, interp, config):\r\n \"\"\"Initialise the repl.\r\n\r\n interp is a Python code.InteractiveInterpreter instance\r\n\r\n config is a populated bpython.config.Struct.\r\n \"\"\"\r\n\r\n self.config = config\r\n self.cut_buffer = ''\r\n self.buffer = []\r\n self.interp = interp\r\n self.interp.syntaxerror_callback = self.clear_current_line\r\n self.match = False\r\n self.rl_history = History(duplicates=config.hist_duplicates,\r\n hist_size=config.hist_length)\r\n self.s_hist = []\r\n self.history = []\r\n self.evaluating = False\r\n self.matches_iter = MatchesIterator()\r\n self.funcprops = None\r\n self.arg_pos = None\r\n self.current_func = None\r\n self.highlighted_paren = None\r\n self._C = {}\r\n self.prev_block_finished = 0\r\n self.interact = Interaction(self.config)\r\n # previous pastebin content to prevent duplicate pastes, filled on call\r\n # to repl.pastebin\r\n self.prev_pastebin_content = ''\r\n self.prev_pastebin_url = ''\r\n self.prev_removal_url = ''\r\n # Necessary to fix mercurial.ui.ui expecting sys.stderr to have this\r\n # attribute\r\n self.closed = False\r\n self.clipboard = get_clipboard()\r\n\r\n pythonhist = os.path.expanduser(self.config.hist_file)\r\n if os.path.exists(pythonhist):\r\n try:\r\n self.rl_history.load(pythonhist,\r\n getpreferredencoding() or \"ascii\")\r\n except EnvironmentError:\r\n pass\r\n\r\n self.completers = autocomplete.get_default_completer(\r\n config.autocomplete_mode)\r\n # if self.config.pastebin_helper:\r\n # self.paster = PasteHelper(self.config.pastebin_helper)\r\n # else:\r\n # self.paster = PastePinnwand(self.config.pastebin_url,\r\n # self.config.pastebin_expiry,\r\n # self.config.pastebin_show_url,\r\n # self.config.pastebin_removal_url)\r\n\r\n @property\r\n def ps1(self):\r\n try:\r\n if not py3:\r\n return sys.ps1.decode(getpreferredencoding())\r\n else:\r\n return sys.ps1\r\n except AttributeError:\r\n return u'>>> '\r\n\r\n @property\r\n def ps2(self):\r\n try:\r\n if not py3:\r\n return sys.ps2.decode(getpreferredencoding())\r\n else:\r\n return sys.ps2\r\n\r\n except AttributeError:\r\n return u'... '\r\n\r\n def startup(self):\r\n \"\"\"\r\n Execute PYTHONSTARTUP file if it exits. Call this after front\r\n end-specific initialisation.\r\n \"\"\"\r\n filename = os.environ.get('PYTHONSTARTUP')\r\n if filename:\r\n encoding = inspection.get_encoding_file(filename)\r\n with io.open(filename, 'rt', encoding=encoding) as f:\r\n source = f.read()\r\n if not py3:\r\n # Early Python 2.7.X need bytes.\r\n source = source.encode(encoding)\r\n self.interp.runsource(source, filename, 'exec', encode=False)\r\n\r\n def current_string(self, concatenate=False):\r\n \"\"\"If the line ends in a string get it, otherwise return ''\"\"\"\r\n tokens = self.tokenize(self.current_line)\r\n string_tokens = list(takewhile(token_is_any_of([Token.String,\r\n Token.Text]),\r\n reversed(tokens)))\r\n if not string_tokens:\r\n return ''\r\n opening = string_tokens.pop()[1]\r\n string = list()\r\n for (token, value) in reversed(string_tokens):\r\n if token is Token.Text:\r\n continue\r\n elif opening is None:\r\n opening = value\r\n elif token is Token.String.Doc:\r\n string.append(value[3:-3])\r\n opening = None\r\n elif value == opening:\r\n opening = None\r\n if not concatenate:\r\n string = list()\r\n else:\r\n string.append(value)\r\n\r\n if opening is None:\r\n return ''\r\n return ''.join(string)\r\n\r\n def get_object(self, name):\r\n attributes = name.split('.')\r\n obj = eval(attributes.pop(0), self.interp.locals)\r\n while attributes:\r\n with inspection.AttrCleaner(obj):\r\n obj = getattr(obj, attributes.pop(0))\r\n return obj\r\n\r\n @classmethod\r\n def _funcname_and_argnum(cls, line):\r\n \"\"\"Parse out the current function name and arg from a line of code.\"\"\"\r\n # each list in stack:\r\n # [full_expr, function_expr, arg_number, opening]\r\n # arg_number may be a string if we've encountered a keyword\r\n # argument so we're done counting\r\n stack = [['', '', 0, '']]\r\n try:\r\n for (token, value) in PythonLexer().get_tokens(line):\r\n if token is Token.Punctuation:\r\n if value in '([{':\r\n stack.append(['', '', 0, value])\r\n elif value in ')]}':\r\n full, _, _, start = stack.pop()\r\n expr = start + full + value\r\n stack[-1][1] += expr\r\n stack[-1][0] += expr\r\n elif value == ',':\r\n try:\r\n stack[-1][2] += 1\r\n except TypeError:\r\n stack[-1][2] = ''\r\n stack[-1][1] = ''\r\n stack[-1][0] += value\r\n elif value == ':' and stack[-1][3] == 'lambda':\r\n expr = stack.pop()[0] + ':'\r\n stack[-1][1] += expr\r\n stack[-1][0] += expr\r\n else:\r\n stack[-1][1] = ''\r\n stack[-1][0] += value\r\n elif (token is Token.Number or\r\n token in Token.Number.subtypes or\r\n token is Token.Name or token in Token.Name.subtypes or\r\n token is Token.Operator and value == '.'):\r\n stack[-1][1] += value\r\n stack[-1][0] += value\r\n elif token is Token.Operator and value == '=':\r\n stack[-1][2] = stack[-1][1]\r\n stack[-1][1] = ''\r\n stack[-1][0] += value\r\n elif token is Token.Number or token in Token.Number.subtypes:\r\n stack[-1][1] = value\r\n stack[-1][0] += value\r\n elif token is Token.Keyword and value == 'lambda':\r\n stack.append([value, '', 0, value])\r\n else:\r\n stack[-1][1] = ''\r\n stack[-1][0] += value\r\n while stack[-1][3] in '[{':\r\n stack.pop()\r\n _, _, arg_number, _ = stack.pop()\r\n _, func, _, _ = stack.pop()\r\n return func, arg_number\r\n except IndexError:\r\n return None, None\r\n\r\n def get_args(self):\r\n \"\"\"Check if an unclosed parenthesis exists, then attempt to get the\r\n argspec() for it. On success, update self.funcprops,self.arg_pos and\r\n return True, otherwise set self.funcprops to None and return False\"\"\"\r\n\r\n self.current_func = None\r\n\r\n if not self.config.arg_spec:\r\n return False\r\n\r\n func, arg_number = self._funcname_and_argnum(self.current_line)\r\n if not func:\r\n return False\r\n\r\n try:\r\n if inspection.is_eval_safe_name(func):\r\n f = self.get_object(func)\r\n else:\r\n try:\r\n fake_cursor = self.current_line.index(func) + len(func)\r\n f = simpleeval.evaluate_current_attribute(\r\n fake_cursor, self.current_line, self.interp.locals)\r\n except simpleeval.EvaluationError:\r\n return False\r\n except Exception:\r\n # another case of needing to catch every kind of error\r\n # since user code is run in the case of descriptors\r\n # XXX: Make sure you raise here if you're debugging the completion\r\n # stuff !\r\n return False\r\n\r\n if inspect.isclass(f):\r\n class_f = None\r\n\r\n if (hasattr(f, '__init__') and\r\n f.__init__ is not object.__init__):\r\n class_f = f.__init__\r\n if ((not class_f or\r\n not inspection.getfuncprops(func, class_f)) and\r\n hasattr(f, '__new__') and\r\n f.__new__ is not object.__new__ and\r\n # py3\r\n f.__new__.__class__ is not object.__new__.__class__):\r\n\r\n class_f = f.__new__\r\n\r\n if class_f:\r\n f = class_f\r\n\r\n self.current_func = f\r\n self.funcprops = inspection.getfuncprops(func, f)\r\n if self.funcprops:\r\n self.arg_pos = arg_number\r\n return True\r\n self.arg_pos = None\r\n return False\r\n\r\n def get_source_of_current_name(self):\r\n \"\"\"Return the unicode source code of the object which is bound to the\r\n current name in the current input line. Throw `SourceNotFound` if the\r\n source cannot be found.\"\"\"\r\n\r\n obj = self.current_func\r\n try:\r\n if obj is None:\r\n line = self.current_line\r\n if not line.strip():\r\n raise SourceNotFound(_(\"Nothing to get source of\"))\r\n if inspection.is_eval_safe_name(line):\r\n obj = self.get_object(line)\r\n return inspection.get_source_unicode(obj)\r\n except (AttributeError, NameError) as e:\r\n msg = _(u\"Cannot get source: %s\") % (e, )\r\n except IOError as e:\r\n msg = u\"%s\" % (e, )\r\n except TypeError as e:\r\n if \"built-in\" in u\"%s\" % (e, ):\r\n msg = _(\"Cannot access source of %r\") % (obj, )\r\n else:\r\n msg = _(\"No source code found for %s\") % (self.current_line, )\r\n raise SourceNotFound(msg)\r\n\r\n def set_docstring(self):\r\n self.docstring = None\r\n if not self.get_args():\r\n self.funcprops = None\r\n if self.current_func is not None:\r\n try:\r\n self.docstring = pydoc.getdoc(self.current_func)\r\n except IndexError:\r\n self.docstring = None\r\n else:\r\n # pydoc.getdoc() returns an empty string if no\r\n # docstring was found\r\n if not self.docstring:\r\n self.docstring = None\r\n\r\n # What complete() does:\r\n # Should we show the completion box? (are there matches, or is there a\r\n # docstring to show?)\r\n # Some completions should always be shown, other only if tab=True\r\n # set the current docstring to the \"current function's\" docstring\r\n # Populate the matches_iter object with new matches from the current state\r\n # if none, clear the matches iterator\r\n # If exactly one match that is equal to current line, clear matches\r\n # If example one match and tab=True, then choose that and clear matches\r\n\r\n def complete(self, tab=False):\r\n \"\"\"Construct a full list of possible completions and\r\n display them in a window. Also check if there's an available argspec\r\n (via the inspect module) and bang that on top of the completions too.\r\n The return value is whether the list_win is visible or not.\r\n\r\n If no matches are found, just return whether there's an argspec to show\r\n If any matches are found, save them and select the first one.\r\n\r\n If tab is True exactly one match found, make the replacement and return\r\n the result of running complete() again on the new line.\r\n \"\"\"\r\n\r\n self.set_docstring()\r\n\r\n matches, completer = autocomplete.get_completer(\r\n self.completers,\r\n cursor_offset=self.cursor_offset,\r\n line=self.current_line,\r\n locals_=self.interp.locals,\r\n argspec=self.funcprops,\r\n current_block='\\n'.join(self.buffer + [self.current_line]),\r\n complete_magic_methods=self.config.complete_magic_methods,\r\n history=self.history)\r\n\r\n if len(matches) == 0:\r\n self.matches_iter.clear()\r\n return bool(self.funcprops)\r\n\r\n self.matches_iter.update(self.cursor_offset,\r\n self.current_line, matches, completer)\r\n\r\n if len(matches) == 1:\r\n if tab:\r\n # if this complete is being run for a tab key press, substitute\r\n # common sequence\r\n self._cursor_offset, self._current_line = \\\r\n self.matches_iter.substitute_cseq()\r\n return Repl.complete(self) # again for\r\n elif self.matches_iter.current_word == matches[0]:\r\n self.matches_iter.clear()\r\n return False\r\n return completer.shown_before_tab\r\n\r\n else:\r\n return tab or completer.shown_before_tab\r\n\r\n def format_docstring(self, docstring, width, height):\r\n \"\"\"Take a string and try to format it into a sane list of strings to be\r\n put into the suggestion box.\"\"\"\r\n\r\n lines = docstring.split('\\n')\r\n out = []\r\n i = 0\r\n for line in lines:\r\n i += 1\r\n if not line.strip():\r\n out.append('\\n')\r\n for block in textwrap.wrap(line, width):\r\n out.append(' ' + block + '\\n')\r\n if i >= height:\r\n return out\r\n i += 1\r\n # Drop the last newline\r\n out[-1] = out[-1].rstrip()\r\n return out\r\n\r\n def next_indentation(self):\r\n \"\"\"Return the indentation of the next line based on the current\r\n input buffer.\"\"\"\r\n if self.buffer:\r\n indentation = next_indentation(self.buffer[-1],\r\n self.config.tab_length)\r\n if indentation and self.config.dedent_after > 0:\r\n def line_is_empty(line):\r\n return not line.strip()\r\n empty_lines = takewhile(line_is_empty, reversed(self.buffer))\r\n if sum(1 for _ in empty_lines) >= self.config.dedent_after:\r\n indentation -= 1\r\n else:\r\n indentation = 0\r\n return indentation\r\n\r\n def formatforfile(self, session_ouput):\r\n \"\"\"Format the stdout buffer to something suitable for writing to disk,\r\n i.e. without >>> and ... at input lines and with \"# OUT: \" prepended to\r\n output lines.\"\"\"\r\n\r\n def process():\r\n for line in session_ouput.split('\\n'):\r\n if line.startswith(self.ps1):\r\n yield line[len(self.ps1):]\r\n elif line.startswith(self.ps2):\r\n yield line[len(self.ps2):]\r\n elif line.rstrip():\r\n yield \"# OUT: %s\" % (line,)\r\n return \"\\n\".join(process())\r\n\r\n def write2file(self):\r\n \"\"\"Prompt for a filename and write the current contents of the stdout\r\n buffer to disk.\"\"\"\r\n\r\n try:\r\n fn = self.interact.file_prompt(_('Save to file (Esc to cancel): '))\r\n if not fn:\r\n self.interact.notify(_('Save cancelled.'))\r\n return\r\n except ValueError:\r\n self.interact.notify(_('Save cancelled.'))\r\n return\r\n\r\n if fn.startswith('~'):\r\n fn = os.path.expanduser(fn)\r\n if not fn.endswith('.py') and self.config.save_append_py:\r\n fn = fn + '.py'\r\n\r\n mode = 'w'\r\n if os.path.exists(fn):\r\n mode = self.interact.file_prompt(_('%s already exists. Do you '\r\n 'want to (c)ancel, '\r\n ' (o)verwrite or '\r\n '(a)ppend? ') % (fn, ))\r\n if mode in ('o', 'overwrite', _('overwrite')):\r\n mode = 'w'\r\n elif mode in ('a', 'append', _('append')):\r\n mode = 'a'\r\n else:\r\n self.interact.notify(_('Save cancelled.'))\r\n return\r\n\r\n stdout_text = self.formatforfile(self.getstdout())\r\n\r\n try:\r\n with open(fn, mode) as f:\r\n f.write(stdout_text)\r\n except IOError as e:\r\n self.interact.notify(_(\"Error writing file '%s': %s\") % (fn, e))\r\n else:\r\n self.interact.notify(_('Saved to %s.') % (fn, ))\r\n\r\n def copy2clipboard(self):\r\n \"\"\"Copy current content to clipboard.\"\"\"\r\n\r\n if self.clipboard is None:\r\n self.interact.notify(_('No clipboard available.'))\r\n return\r\n\r\n content = self.formatforfile(self.getstdout())\r\n try:\r\n self.clipboard.copy(content)\r\n except CopyFailed:\r\n self.interact.notify(_('Could not copy to clipboard.'))\r\n else:\r\n self.interact.notify(_('Copied content to clipboard.'))\r\n\r\n def pastebin(self, s=None):\r\n \"\"\"Upload to a pastebin and display the URL in the status bar.\"\"\"\r\n\r\n if s is None:\r\n s = self.getstdout()\r\n\r\n if (self.config.pastebin_confirm and\r\n not self.interact.confirm(_(\"Pastebin buffer? (y/N) \"))):\r\n self.interact.notify(_(\"Pastebin aborted.\"))\r\n return\r\n return self.do_pastebin(s)\r\n\r\n def do_pastebin(self, s):\r\n \"\"\"Actually perform the upload.\"\"\"\r\n if s == self.prev_pastebin_content:\r\n self.interact.notify(_('Duplicate pastebin. Previous URL: %s. '\r\n 'Removal URL: %s') %\r\n (self.prev_pastebin_url,\r\n self.prev_removal_url), 10)\r\n return self.prev_pastebin_url\r\n\r\n self.interact.notify(_('Posting data to pastebin...'))\r\n # try:\r\n # paste_url, removal_url = self.paster.paste(s)\r\n # except PasteFailed as e:\r\n # self.interact.notify(_('Upload failed: %s') % e)\r\n # return\r\n return\r\n\r\n self.prev_pastebin_content = s\r\n self.prev_pastebin_url = paste_url\r\n self.prev_removal_url = removal_url\r\n\r\n if removal_url is not None:\r\n self.interact.notify(_('Pastebin URL: %s - Removal URL: %s') %\r\n (paste_url, removal_url), 10)\r\n else:\r\n self.interact.notify(_('Pastebin URL: %s') % (paste_url, ), 10)\r\n\r\n return paste_url\r\n\r\n def push(self, s, insert_into_history=True):\r\n \"\"\"Push a line of code onto the buffer so it can process it all\r\n at once when a code block ends\"\"\"\r\n s = s.rstrip('\\n')\r\n self.buffer.append(s)\r\n\r\n if insert_into_history:\r\n self.insert_into_history(s)\r\n\r\n more = self.interp.runsource('\\n'.join(self.buffer))\r\n\r\n if not more:\r\n self.buffer = []\r\n\r\n return more\r\n\r\n def insert_into_history(self, s):\r\n pythonhist = os.path.expanduser(self.config.hist_file)\r\n try:\r\n self.rl_history.append_reload_and_write(s, pythonhist,\r\n getpreferredencoding())\r\n except RuntimeError as e:\r\n self.interact.notify(u\"%s\" % (e, ))\r\n\r\n def prompt_undo(self):\r\n \"\"\"Returns how many lines to undo, 0 means don't undo\"\"\"\r\n if (self.config.single_undo_time < 0 or\r\n self.interp.timer.estimate() < self.config.single_undo_time):\r\n return 1\r\n est = self.interp.timer.estimate()\r\n n = self.interact.file_prompt(\r\n _(\"Undo how many lines? (Undo will take up to ~%.1f seconds) [1]\")\r\n % (est,))\r\n try:\r\n if n == '':\r\n n = '1'\r\n n = int(n)\r\n except ValueError:\r\n self.interact.notify(_('Undo canceled'), .1)\r\n return 0\r\n else:\r\n if n == 0:\r\n self.interact.notify(_('Undo canceled'), .1)\r\n return 0\r\n else:\r\n message = ngettext('Undoing %d line... (est. %.1f seconds)',\r\n 'Undoing %d lines... (est. %.1f seconds)',\r\n n)\r\n self.interact.notify(message % (n, est), .1)\r\n return n\r\n\r\n def undo(self, n=1):\r\n \"\"\"Go back in the undo history n steps and call reevaluate()\r\n Note that in the program this is called \"Rewind\" because I\r\n want it to be clear that this is by no means a true undo\r\n implementation, it is merely a convenience bonus.\"\"\"\r\n if not self.history:\r\n return None\r\n\r\n self.interp.timer.reset_timer()\r\n\r\n if len(self.history) < n:\r\n n = len(self.history)\r\n\r\n entries = list(self.rl_history.entries)\r\n\r\n self.history = self.history[:-n]\r\n self.reevaluate()\r\n\r\n self.rl_history.entries = entries\r\n\r\n def flush(self):\r\n \"\"\"Olivier Grisel brought it to my attention that the logging\r\n module tries to call this method, since it makes assumptions\r\n about stdout that may not necessarily be true. The docs for\r\n sys.stdout say:\r\n\r\n \"stdout and stderr needn't be built-in file objects: any\r\n object is acceptable as long as it has a write() method\r\n that takes a string argument.\"\r\n\r\n So I consider this to be a bug in logging, and this is a hack\r\n to fix it, unfortunately. I'm sure it's not the only module\r\n to do it.\"\"\"\r\n\r\n def close(self):\r\n \"\"\"See the flush() method docstring.\"\"\"\r\n\r\n def tokenize(self, s, newline=False):\r\n \"\"\"Tokenizes a line of code, returning pygments tokens\r\n with side effects/impurities:\r\n - reads self.cpos to see what parens should be highlighted\r\n - reads self.buffer to see what came before the passed in line\r\n - sets self.highlighted_paren to (buffer_lineno, tokens_for_that_line)\r\n for buffer line that should replace that line to unhighlight it,\r\n or None if no paren is currently highlighted\r\n - calls reprint_line with a buffer's line's tokens and the buffer\r\n lineno that has changed if line other than the current line changes\r\n \"\"\"\r\n highlighted_paren = None\r\n\r\n source = '\\n'.join(self.buffer + [s])\r\n cursor = len(source) - self.cpos\r\n if self.cpos:\r\n cursor += 1\r\n stack = list()\r\n all_tokens = list(PythonLexer().get_tokens(source))\r\n # Unfortunately, Pygments adds a trailing newline and strings with\r\n # no size, so strip them\r\n while not all_tokens[-1][1]:\r\n all_tokens.pop()\r\n all_tokens[-1] = (all_tokens[-1][0], all_tokens[-1][1].rstrip('\\n'))\r\n line = pos = 0\r\n parens = dict(zip('{([', '})]'))\r\n line_tokens = list()\r\n saved_tokens = list()\r\n search_for_paren = True\r\n for (token, value) in split_lines(all_tokens):\r\n pos += len(value)\r\n if token is Token.Text and value == '\\n':\r\n line += 1\r\n # Remove trailing newline\r\n line_tokens = list()\r\n saved_tokens = list()\r\n continue\r\n line_tokens.append((token, value))\r\n saved_tokens.append((token, value))\r\n if not search_for_paren:\r\n continue\r\n under_cursor = (pos == cursor)\r\n if token is Token.Punctuation:\r\n if value in parens:\r\n if under_cursor:\r\n line_tokens[-1] = (Parenthesis.UnderCursor, value)\r\n # Push marker on the stack\r\n stack.append((Parenthesis, value))\r\n else:\r\n stack.append((line, len(line_tokens) - 1,\r\n line_tokens, value))\r\n elif value in itervalues(parens):\r\n saved_stack = list(stack)\r\n try:\r\n while True:\r\n opening = stack.pop()\r\n if parens[opening[-1]] == value:\r\n break\r\n except IndexError:\r\n # SyntaxError.. more closed parentheses than\r\n # opened or a wrong closing paren\r\n opening = None\r\n if not saved_stack:\r\n search_for_paren = False\r\n else:\r\n stack = saved_stack\r\n if opening and opening[0] is Parenthesis:\r\n # Marker found\r\n line_tokens[-1] = (Parenthesis, value)\r\n search_for_paren = False\r\n elif opening and under_cursor and not newline:\r\n if self.cpos:\r\n line_tokens[-1] = (Parenthesis.UnderCursor, value)\r\n else:\r\n # The cursor is at the end of line and next to\r\n # the paren, so it doesn't reverse the paren.\r\n # Therefore, we insert the Parenthesis token\r\n # here instead of the Parenthesis.UnderCursor\r\n # token.\r\n line_tokens[-1] = (Parenthesis, value)\r\n (lineno, i, tokens, opening) = opening\r\n if lineno == len(self.buffer):\r\n highlighted_paren = (lineno, saved_tokens)\r\n line_tokens[i] = (Parenthesis, opening)\r\n else:\r\n highlighted_paren = (lineno, list(tokens))\r\n # We need to redraw a line\r\n tokens[i] = (Parenthesis, opening)\r\n self.reprint_line(lineno, tokens)\r\n search_for_paren = False\r\n elif under_cursor:\r\n search_for_paren = False\r\n self.highlighted_paren = highlighted_paren\r\n if line != len(self.buffer):\r\n return list()\r\n return line_tokens\r\n\r\n def clear_current_line(self):\r\n \"\"\"This is used as the exception callback for the Interpreter instance.\r\n It prevents autoindentation from occurring after a traceback.\"\"\"\r\n\r\n def send_to_external_editor(self, text):\r\n \"\"\"Returns modified text from an editor, or the original text if editor\r\n exited with non-zero\"\"\"\r\n\r\n encoding = getpreferredencoding()\r\n editor_args = shlex.split(prepare_for_exec(self.config.editor,\r\n encoding))\r\n with tempfile.NamedTemporaryFile(suffix='.py') as temp:\r\n temp.write(text.encode(encoding))\r\n temp.flush()\r\n\r\n args = editor_args + [prepare_for_exec(temp.name, encoding)]\r\n if subprocess.call(args) == 0:\r\n with open(temp.name) as f:\r\n if py3:\r\n return f.read()\r\n else:\r\n return f.read().decode(encoding)\r\n else:\r\n return text\r\n\r\n def open_in_external_editor(self, filename):\r\n encoding = getpreferredencoding()\r\n editor_args = shlex.split(prepare_for_exec(self.config.editor,\r\n encoding))\r\n args = editor_args + [prepare_for_exec(filename, encoding)]\r\n return subprocess.call(args) == 0\r\n\r\n def edit_config(self):\r\n if not os.path.isfile(self.config.config_path):\r\n if self.interact.confirm(_(\"Config file does not exist - create \"\r\n \"new from default? (y/N)\")):\r\n try:\r\n default_config = pkgutil.get_data('bpython',\r\n 'sample-config')\r\n if py3: # py3 files need unicode\r\n default_config = default_config.decode('ascii')\r\n containing_dir = os.path.dirname(\r\n os.path.abspath(self.config.config_path))\r\n if not os.path.exists(containing_dir):\r\n os.makedirs(containing_dir)\r\n with open(self.config.config_path, 'w') as f:\r\n f.write(default_config)\r\n except (IOError, OSError) as e:\r\n self.interact.notify(_(\"Error writing file '%s': %s\") %\r\n (self.config.config.path, e))\r\n return False\r\n else:\r\n return False\r\n\r\n try:\r\n if self.open_in_external_editor(self.config.config_path):\r\n self.interact.notify(_('bpython config file edited. Restart '\r\n 'bpython for changes to take effect.'))\r\n except OSError as e:\r\n self.interact.notify(_('Error editing config file: %s') % e)\r\n\r\n\r\ndef next_indentation(line, tab_length):\r\n \"\"\"Given a code line, return the indentation of the next line.\"\"\"\r\n line = line.expandtabs(tab_length)\r\n indentation = (len(line) - len(line.lstrip(' '))) // tab_length\r\n if line.rstrip().endswith(':'):\r\n indentation += 1\r\n elif indentation >= 1:\r\n if line.lstrip().startswith(('return', 'pass', 'raise', 'yield')):\r\n indentation -= 1\r\n return indentation\r\n\r\n\r\ndef next_token_inside_string(code_string, inside_string):\r\n \"\"\"Given a code string s and an initial state inside_string, return\r\n whether the next token will be inside a string or not.\"\"\"\r\n for token, value in PythonLexer().get_tokens(code_string):\r\n if token is Token.String:\r\n value = value.lstrip('bBrRuU')\r\n if value in ['\"\"\"', \"'''\", '\"', \"'\"]:\r\n if not inside_string:\r\n inside_string = value\r\n elif value == inside_string:\r\n inside_string = False\r\n return inside_string\r\n\r\n\r\ndef split_lines(tokens):\r\n for (token, value) in tokens:\r\n if not value:\r\n continue\r\n while value:\r\n head, newline, value = value.partition('\\n')\r\n yield (token, head)\r\n if newline:\r\n yield (Token.Text, newline)\r\n\r\n\r\ndef token_is(token_type):\r\n \"\"\"Return a callable object that returns whether a token is of the\r\n given type `token_type`.\"\"\"\r\n\r\n def token_is_type(token):\r\n \"\"\"Return whether a token is of a certain type or not.\"\"\"\r\n token = token[0]\r\n while token is not token_type and token.parent:\r\n token = token.parent\r\n return token is token_type\r\n\r\n return token_is_type\r\n\r\n\r\ndef token_is_any_of(token_types):\r\n \"\"\"Return a callable object that returns whether a token is any of the\r\n given types `token_types`.\"\"\"\r\n is_token_types = tuple(map(token_is, token_types))\r\n\r\n def token_is_any_of(token):\r\n return any(check(token) for check in is_token_types)\r\n\r\n return token_is_any_of\r\n\r\n\r\ndef extract_exit_value(args):\r\n \"\"\"Given the arguments passed to `SystemExit`, return the value that\r\n should be passed to `sys.exit`.\r\n \"\"\"\r\n if len(args) == 0:\r\n return None\r\n elif len(args) == 1:\r\n return args[0]\r\n else:\r\n return args\r\n","repo_name":"BlenderCL/weed","sub_path":"debugger_tools/sitepackages_libs/bpython/repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":48431,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"32164503690","text":"'''\n동적 계획법의 핵심은 memoization 리스트를 사용하는 것\n\n- 빈 리스트를 만들�� (입력값에 따른)\n- 초기값을 설정\n- 점화식 기반으로 계산값 적용하기\n- 특정 입력값에 따른 계산값을 리스트에서 추출하기\n\n일반적인 동적 계획법 문제는\n\n통상 코드 자체는 간결하므로,\n\n**가장 적은 경우의 수부터 계산을 해본 후, 패턴을 찾아,**식(점화식)을 세우는 것이 핵심\n\nn = 1 부터 차례대로 나열하면서 점화식 패턴을 찾아봅니다\n'''\n\n\n\nn = int(input())\ndp = [0] * 1001 # memoization\ndp[1] = 1\ndp[2] = 2\n\n\nfor index in range(3, 1001):\n dp[index] = dp[index - 1] + dp[index - 2] # 점화식 설립\nprint (dp[n] % 10007)","repo_name":"HaJunYoo/Algorithm_Study","sub_path":"DP/BOJ/bj 11726.py","file_name":"bj 11726.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70984048195","text":"import os\nimport json\nimport shutil\nimport re\n\n\ndef combine_all(directory_path):\n print(\"=== Combining all data files\")\n build_path = os.path.join(directory_path, \"build\")\n if os.path.exists(build_path):\n shutil.rmtree(build_path)\n os.mkdir(build_path)\n\n # Create a new empty file\n for key in [\"organizations\", \"practitioners\", \"patients\", \"encounters\", \"observations\"]:\n with open(\"{}/{}.json\".format(build_path, key), \"w\") as f:\n bundle = \"\"\"\\\n{\n \"resourceType\": \"Bundle\",\n \"type\": \"transaction\",\n \"entry\": [\\\n \"\"\"\n f.write(bundle)\n\n organizations = list(\n filter(\n lambda x: os.path.isdir(os.path.join(directory_path, \"organizations\", x)),\n os.listdir(os.path.join(directory_path, \"organizations\")),\n )\n )\n for org_index, organization in enumerate(organizations):\n org_path = os.path.join(directory_path, \"organizations\", organization)\n with open(\"{}/{}.json\".format(org_path, organization), \"r\") as f:\n org_object = json.loads(f.read())\n org_object[\"request\"] = {\"method\": \"POST\", \"url\": \"Organization\"}\n\n text = json.dumps(org_object, indent=2)\n with open(\"{}/organizations.json\".format(build_path), \"a\") as f:\n f.write(\"\\n \" + \"\\n \".join(text.split(\"\\n\")) + \",\")\n\n patients = list(\n filter(\n lambda x: os.path.isdir(os.path.join(org_path, \"patients\", x)),\n os.listdir(os.path.join(org_path, \"patients\")),\n )\n )\n for patient in patients:\n patient_path = os.path.join(org_path, \"patients\", patient)\n with open(\"{}/{}.json\".format(patient_path, patient), \"r\") as f:\n patient_object = json.loads(f.read())\n patient_object[\"request\"] = {\"method\": \"POST\", \"url\": \"Patient\"}\n text = json.dumps(patient_object, indent=2)\n with open(\"{}/patients.json\".format(build_path), \"a\") as f:\n f.write(\"\\n \" + \"\\n \".join(text.split(\"\\n\")) + \",\")\n\n encs = list(\n filter(\n lambda x: os.path.isdir(os.path.join(patient_path, \"encounters\", x)),\n os.listdir(os.path.join(patient_path, \"encounters\")),\n )\n )\n for encounter in encs:\n encounter_path = os.path.join(patient_path, \"encounters\", encounter)\n with open(\"{}/{}.json\".format(encounter_path, encounter), \"r\") as f:\n encounter_object = json.loads(f.read())\n encounter_object[\"request\"] = {\n \"method\": \"POST\",\n \"url\": \"Encounter\",\n }\n text = json.dumps(encounter_object, indent=2)\n with open(\n \"{}/encounters.json\".format(build_path), \"a\"\n ) as f:\n f.write(\"\\n \" + \"\\n \".join(text.split(\"\\n\")) + \",\")\n\n obss = list(os.listdir(os.path.join(encounter_path, \"observations\")))\n for observation in obss:\n observation_path = os.path.join(encounter_path, \"observations\", observation)\n with open(observation_path, \"r\") as f:\n observation_object = json.loads(f.read())\n observation_object[\"request\"] = {\n \"method\": \"POST\",\n \"url\": \"Observation\",\n }\n text = json.dumps(observation_object, indent=2)\n with open(\n \"{}/observations.json\".format(build_path), \"a\"\n ) as f:\n f.write(\"\\n \" + \"\\n \".join(text.split(\"\\n\")) + \",\")\n\n pracs = os.listdir(os.path.join(directory_path, \"all_practitioners\"))\n\n for prac in pracs:\n prac_path = os.path.join(directory_path, \"all_practitioners\", prac)\n with open(prac_path, \"r\") as f:\n prac_object = json.loads(f.read())\n prac_object[\"request\"] = {\"method\": \"POST\", \"url\": \"Practitioner\"}\n\n text = json.dumps(prac_object, indent=2)\n with open(\"{}/practitioners.json\".format(build_path), \"a\") as f:\n f.write(\"\\n \" + \"\\n \".join(text.split(\"\\n\")) + \",\")\n\n for key in [\"organizations\", \"practitioners\", \"patients\", \"encounters\", \"observations\"]:\n with open(\"{}/{}.json\".format(build_path, key), \"a\") as f:\n bundle = \"\"\"\n ]\n}\\\n\"\"\"\n f.write(bundle)\n\n for key in [\"organizations\", \"practitioners\", \"patients\", \"encounters\", \"observations\"]:\n json_file = open(\"{}/{}.json\".format(build_path, key), \"r+\")\n fix_urn_data = re.sub(\n \"[a-zA-Z]*https:\\/\\/syntheticmass.mitre.org\\/v1\\/fhir\\/[a-zA-Z]*\\/\",\n \"urn:uuid:\",\n json_file.read(),\n flags=re.MULTILINE,\n )\n # Find the last comma and remove it\n for index in range(1, len(fix_urn_data) + 1):\n if fix_urn_data[-index] == \",\":\n fix_urn_data = fix_urn_data[:-index] + fix_urn_data[-(index - 1):]\n break\n data = json.loads(fix_urn_data)\n json_file.seek(0)\n json_file.truncate()\n json_file.write(json.dumps(data, indent=2))\n json_file.close()\n\n print(\"Done.\")\n","repo_name":"glipR/FHIR_Comp_2020_S2","sub_path":"dataset_generator/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22596039315","text":"import re\nimport pandas as pd \nimport streamlit as st \nfrom PIL import Image\nfrom streamlit.proto.Checkbox_pb2 import Checkbox\nfrom streamlit.proto.RootContainer_pb2 import SIDEBAR\nimport streamlit_info as info \n\n\n\ndef main_selector(image = info.tittle_image):\n\n @st.cache(persist=False)\n def csv_loader():\n \n st.balloons()\n\n st.title('Electric rechargers in Madrid')\n\n st.image(image,caption='Image footing')\n\ndef expander(image = info.dumb_dumber):\n\n with st.beta_expander('Description'):\n\n st.write('Dashboard prepared with the purpose of showing all rechage points for electric cars in Madrid area ')\n\n st.image(image)\n\ndef echo_data(data):\n\n with st.echo(code_location='above'):\n\n st.write('Find below the necesary code for loading and printing our data')\n\n st.write(data)\n\n \ndef graph(data):\n\n df_map = data.iloc[:,-2:]\n\n df_map.columns=['longitude','lat']\n\n st.map(df_map)\n\n df_district = data[['DISTRITO','Nº CARGADORES']]\n\n df_bar_chart = df_district.groupby('DISTRITO')['Nº CARGADORES'].sum()\n\n df_bar_chart = pd.DataFrame(df_bar_chart)\n\n st.bar_chart(df_bar_chart)\n\n df_operador = data[['OPERADOR','Nº CARGADORES']]\n\n df_bar_chart = df_operador.groupby('OPERADOR')['Nº CARGADORES'].sum()\n\n df_bar_chart = pd.DataFrame(df_bar_chart)\n\n st.bar_chart(df_bar_chart)\n\n\ndef filter_options(data):\n\n #charger_by_distr = data.groupby(data.DISTRITO)[['Nº CARGADORES'].sum()\n\n filter_district = st.sidebar.selectbox('District',(data.DISTRITO.unique()))\n district_check = st.sidebar.checkbox('Filter by District?')\n filter_operator = st.sidebar.selectbox('Operator',(data.OPERADOR.unique()))\n operator_check = st.sidebar.checkbox('Filter by Operator?')\n filter_size = st.sidebar.select_slider('Size',options=[1,2,3,4,5,6,7,8,9,10],value = (1,10))\n size_check = st.sidebar.checkbox('Filter by Size?')\n \n if district_check:\n\n data= data.loc[data['DISTRITO'] == filter_district, :]\n\n if operator_check:\n\n data = data.loc[data['OPERADOR'] == filter_operator, :]\n\n if size_check:\n\n min_size = filter_size[0]\n\n max_size = filter_size[1]\n\n data = data.loc[(data[min_size]<=data['Nº CARGADORES']) and (data[max_size]>=data['Nº CARGADORES']), :]\n \n\n data = data['Nº CARGADORES'] >= min_size and data['Nº CARGADORES'] <= max_size\n\n return data\n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"chabbey/bootcamp_pch","sub_path":"streamlit/streamlit_func.py","file_name":"streamlit_func.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35043244906","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ChannelAttentionModule(nn.Module):\n\n\n def __init__(self, dim, reduction=1):\n super(ChannelAttentionModule, self).__init__()\n mid_dim = dim // reduction\n self.BottleNeck = nn.Sequential(\n nn.Linear(in_features=dim, out_features=dim),\n nn.ReLU(inplace=True),\n nn.Linear(in_features=dim, out_features=dim)\n )\n\n self.sigmoid = nn.Sigmoid()\n\n self.conv = nn.Conv2d(in_channels=1,\n out_channels=1,\n kernel_size=7,\n stride=1,\n padding=3)\n\n def forward(self, x): \n\n\n out = self.BottleNeck(x)\n\n return self.sigmoid(out)\n\n\nclass SpatialAttentionModule(nn.Module):\n def __init__(self, k_size = 7):\n super(SpatialAttentionModule, self).__init__()\n _kernel_size = k_size\n _padding = (_kernel_size - 1) // 2\n _outchannel = 1\n self.conv1d = nn.Conv1d(in_channels=2, out_channels=_outchannel,\n kernel_size=_kernel_size,\n stride=1,\n padding=_padding)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n\n avgout = torch.mean(x, dim=-1, keepdim=True) \n maxout, _ = torch.max(x, dim=-1, keepdim=True) \n out = torch.cat([avgout, maxout], dim=-1) \n out = out.permute(0, 2, 1) \n out_conv = self.conv1d(out)\n \n out_f = self.sigmoid(out_conv) \n out_f = out_f.permute(0, 2, 1) \n return out_f\n\n\nclass CSAM(nn.Module):\n def __init__(self, args, RS_dim, KG_dim=None, _kernel_size=7, _reduction=16):\n super(CSAM, self).__init__()\n self.args = args\n self.channel_attention = ChannelAttentionModule(RS_dim, _reduction)\n self.spatial_attention = SpatialAttentionModule(_kernel_size)\n\n self.Liner_KG_RS = nn.Linear(in_features=KG_dim, out_features=RS_dim)\n\n if KG_dim != None:\n self.channel_attention_KG = ChannelAttentionModule(KG_dim, _reduction)\n self.spatial_attention_KG = SpatialAttentionModule(_kernel_size)\n\n _padding = (_kernel_size - 1) // 2\n _outchannel = 1\n self.G_conv1d = nn.Conv1d(in_channels=RS_dim, out_channels=_outchannel,\n kernel_size=_kernel_size,\n stride=1,\n padding=_padding)\n self.G_Sigmod = nn.Sigmoid()\n self.G_Relu = nn.ReLU()\n\n def Generate_gate(self, x):\n x_input = x.permute(0, 2, 1) \n out = self.G_Sigmod(self.G_Relu(self.G_conv1d(x_input))) \n _out = 1 - out\n out = out.permute(0, 2, 1)\n _out = _out.permute(0, 2, 1)\n return out, _out\n\n def forward(self, x, x_kg=None):\n if self.args.isKG:\n residual = x\n x_inial = x\n \n rate_RS, rate_KG = self.Generate_gate(x_inial)\n\n out_RS = self.channel_attention(x) * x\n out_RS = self.spatial_attention(out_RS) * out_RS\n\n out_KG = self.channel_attention_KG(x_kg) * x_kg\n out_KG = self.spatial_attention_KG(out_KG) * out_KG\n out_KG = self.Liner_KG_RS(out_KG) \n\n out = rate_RS * out_RS + rate_KG * out_KG + residual\n else:\n residual = x\n out = self.channel_attention(x) * x\n out = self.spatial_attention(out) * out\n out = out + residual\n\n return out","repo_name":"LYedu9993/SRS_KAeDCN","sub_path":"Model/KAeDCN/Model_class/CSAM.py","file_name":"CSAM.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"28589515332","text":"import cv2\r\nimport dlib\r\nimport imutils\r\nfrom imutils import face_utils\r\n\r\n# Inicializar o detector de faces do dlib\r\ndetector = dlib.get_frontal_face_detector()\r\n\r\n# Inicializar o modelo de pontos faciais do dlib\r\npredictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\r\n\r\n# Função para detectar e desenhar os pontos faciais e retângulo na imagem\r\ndef draw_landmarks(image, shape, face_rect):\r\n for (x, y) in shape:\r\n cv2.circle(image, (x, y), 1, (255, 255, 0), -1)\r\n (x, y, w, h) = face_rect\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n# Inicializar a captura de vídeo\r\nvideo_capture = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n # Ler o próximo quadro de vídeo\r\n ret, frame = video_capture.read()\r\n\r\n # Redimensionar o quadro para melhor desempenho\r\n frame = imutils.resize(frame, width=800)\r\n\r\n # Converter o quadro para escala de cinza\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # Detectar as faces no quadro\r\n faces = detector(gray)\r\n\r\n for face in faces:\r\n # Prever os pontos faciais e retângulo para a face atual\r\n shape = predictor(gray, face)\r\n shape = face_utils.shape_to_np(shape)\r\n (x, y, w, h) = face.left(), face.top(), face.width(), face.height()\r\n\r\n # Desenhar os pontos faciais e retângulo na imagem\r\n draw_landmarks(frame, shape, (x, y, w, h))\r\n\r\n # Mostrar o quadro com os pontos faciais e retângulo\r\n cv2.imshow('Pontos Faciais', frame)\r\n\r\n # Verificar se a tecla 'q' foi pressionada para sair do loop\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# Liberar os recursos\r\nvideo_capture.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"RoseBorges44/Facial_Landmarks_em_Video_com_Opencv_e_Dlib","sub_path":"-Main.py","file_name":"-Main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13001314973","text":"import os\r\nimport tkinter as tk\r\nimport pandas as pd\r\nimport win32com.client\r\nimport datetime\r\nimport re\r\nfrom tkinter import filedialog, Label, Entry, Button, StringVar, messagebox\r\nfrom shutil import copy2\r\n\r\nallowed_orders = []\r\n\r\n\r\ndef main(): #파이썬 GUI폼 수정\r\n window = tk.Tk()\r\n window.title(\"DPCS파일 복사 자동화\")\r\n\r\n file_label = Label(window, text=\"File Address:\")\r\n file_label.grid(row=0, column=0)\r\n\r\n file_path = StringVar()\r\n file_entry = Entry(window, textvariable=file_path, width=50)\r\n file_entry.grid(row=0, column=1)\r\n \r\n date_label = Label(window, text=\"# 2023-07-10 by 출력실 김윤기\")\r\n date_label.grid(row=2, column=2, sticky=\"e\") # 오른쪽 하단에 배치하기 위해 \r\n\r\n def browse_button():\r\n filename = filedialog.askopenfilename(title=\"Select a file\", filetypes=((\"Microsoft Excel Worksheet\", \"*.xlsx\"),))\r\n file_path.set(filename)\r\n\r\n def choose_excel_file():\r\n filename = filedialog.askopenfilename(\r\n title=\"변환할 xls 파일 선택\", filetypes=((\"Microsoft Excel Worksheet\", \"*.xls\"),))\r\n if filename:\r\n save_xls_to_xlsx(filename)\r\n\r\n \r\n\r\n#xls to xlsx 부분은 건들지마세요!\r\n def save_xls_to_xlsx(file_path):\r\n #파일경로 재수정\r\n file_path = file_path.replace(\"/\", \"\\\\\")\r\n file_path = file_path.replace(\",\", \"_\")\r\n print('File path:', file_path) #디버그용\r\n # Set the proper file name and check if it is already in the xlsx format\r\n input_filename, extension = os.path.splitext(file_path)\r\n if extension.lower() == \".xlsx\":\r\n return\r\n\r\n output_filename = input_filename + \".xlsx\"\r\n \r\n excel = None\r\n try:\r\n excel = win32com.client.Dispatch('Excel.Application')\r\n excel.Visible = False\r\n wb = excel.Workbooks.Open(file_path)\r\n wb.SaveAs(output_filename, FileFormat=51)\r\n wb.Close(False)\r\n except Exception as e:\r\n if excel:\r\n excel.Application.Quit()\r\n excel = None\r\n raise e\r\n finally:\r\n if excel:\r\n excel.Application.Quit()\r\n excel = None\r\n\r\n return output_filename\r\n\r\n file_convert_button = tk.Button(window, text=\"XLS to XLSX\", width=15, command=choose_excel_file)\r\n file_convert_button.grid(row=1, column=0, columnspan=3)\r\n\r\n browse_button = Button(window, text=\"Browse\", command=browse_button)\r\n browse_button.grid(row=0, column=2)\r\n\r\n button1 = Button(window, text=\"1차 실행\", width=15, command=lambda: execute_process(file_path.get(), \"1차\"))\r\n button1.grid(row=1, column=0)\r\n button2 = Button(window, text=\"2차 실행\", width=15, command=lambda: execute_process(file_path.get(), \"2차\"))\r\n button2.grid(row=1, column=2)\r\n\r\n window.mainloop()\r\n\r\ndef execute_process(excel_file: str, option: str): \r\n #프로세스\r\n #액자가 달라지거나 수정이필요할시 []배열안에 추가,변경바람\r\n global allowed_orders\r\n # '602(마트)4번' 가타은염제외\r\n classification_conditions = ['604(라미)5번', '라미코팅', '우드끌레르', '마트'] #2차 은화지 은염할것들을 당일,기타로 따로 또 분류하기위해\r\n \r\n if option == \"1차\":\r\n allowed_orders = ['끌레르액자','N끌레르','기본액자','베이직액자','마틸스']\r\n elif option == \"2차\":\r\n allowed_orders = ['뉴욕갤러리','PAS','N디아섹','디아섹','빌리프우드','빌리프데코','프라임우드','프라임데코','캔버스액자','캔버스(무광)액자','메탈라인','원목액자','모던우드','패브릭데코','루이','빌트랩','아트플러스','프리즘','프리모(마트)','엣지우드','파인아트','이태리(골드)','이태리(화이트)','블랙갤러리','감성사진관']\r\n else:\r\n print(\"올바르지 않은 옵션입니다.\")\r\n return\r\n \r\n # 엑셀파일을 읽습니다. 엑셀파일은 .xlsx 으로 변경해서 읽어야됩니다.\r\n try:\r\n df = pd.read_excel(excel_file)\r\n except FileNotFoundError:\r\n print(\"DPCS내 엑셀을 알려주십시오\")\r\n return\r\n\r\n #열 공백제거\r\n df.columns = df.columns.str.strip()\r\n #엑셀 필터링 조건식 주문취소=0 AND 접수증=0 AND 다운로드와다운로드1 가 1일때\r\n filtered_df = df.loc[(df[\"주문취소\"]==0) & (df[\"접수증\"]==0) & (df[\"다운로드\"]==1) & (df[\"다운로드1\"]==1)].copy()\r\n #출고예정일 현재날짜랑 비교후 1이나오면 \"기타\" 0일시 \"당일\" 나머지는 그대로 \r\n #filtered_df[\"due_date_diff\"] = (pd.to_datetime(filtered_df[\"출고예정\"]) - pd.to_datetime('today')).dt.days\r\n\r\n #엑셀의 날짜부 추출 '주문목록2023-05-23 이면 2023-05-23부 추출\r\n excel_date = excel_file.split('\\\\')[-1].split('.')[0][-10:]\r\n\r\n today = datetime.date.today()\r\n tomorrow = today + datetime.timedelta(days=1)\r\n\r\n for index, row in filtered_df.iterrows():\r\n #order_type = re.sub(r\"\\([^0]*\\)\",\"\",order_type)\r\n order_type = row[\"주문종류\"] #order_type에 주문종류행을 돌면서 찾는다\r\n\r\n if order_type not in allowed_orders:\r\n continue\r\n\r\n due_date = pd.to_datetime(row[\"출고예정\"]).date()\r\n if option != '2차' or row['분류'] in classification_conditions:\r\n if due_date == today:\r\n target_sub_folder = \"당일\"\r\n elif due_date == tomorrow:\r\n target_sub_folder = \"기타\"\r\n else:\r\n target_sub_folder = \"\"\r\n else:\r\n target_sub_folder = \"\"\r\n\r\n \r\n order_no = str(row['주문번호']) #주문번호 획득\r\n order_no = re.sub(r\"\\s*\\([^()]*\\)\", \"\", order_no) #주문번호에 괄호공백제거\r\n \r\n source_folder = os.path.join(r\"\\\\arttre-down\\tmp_client\", excel_date, order_type)\r\n \r\n if target_sub_folder in [\"당일\", \"기타\"]:\r\n target_folder = os.path.join(r\"E:\\아크릴작업\", target_sub_folder, order_type)\r\n else:\r\n target_folder = os.path.join(r\"E:\\아크릴작업\", order_type)\r\n \r\n copy_files(source_folder, target_folder, order_no, row[\"출고예정\"])\r\n\r\n#파일복사\r\ndef copy_files(source_folder, target_folder, order_no, due_date):\r\n print(f\"Source folder: {source_folder}\") #디버그용\r\n print(f\"Target folder: {target_folder}\")\r\n print(f\"Order_no: {order_no}\")\r\n\r\n if not os.path.exists(source_folder):\r\n print(f\"Source folder not found: {source_folder}\")\r\n return\r\n\r\n for folder in os.listdir(source_folder):\r\n if order_no in folder:\r\n folder_path = os.path.join(source_folder, folder)\r\n new_target_folder = os.path.join(target_folder, folder)\r\n\r\n if not os.path.exists(new_target_folder):\r\n os.makedirs(new_target_folder)\r\n\r\n for root, dirs, files in os.walk(folder_path):\r\n for file in files:\r\n print(f\"Current file: {file}\")\r\n source_file = os.path.join(root, file)\r\n print(f\"Copying: {source_file} -> {os.path.join(new_target_folder, file)}\")\r\n copy2(source_file, new_target_folder)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n# # 2023-07-21 by 출력실 김윤기","repo_name":"rklpoi1234/attre-","sub_path":"1차2차당겨오기자동화.py","file_name":"1차2차당겨오기자동화.py","file_ext":"py","file_size_in_byte":7460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25490452413","text":"from dali.command import Command\nfrom dali.exceptions import CommunicationError\nimport dali.frame\nimport logging\nimport socket\nimport struct\n\n\n###############################################################################\n# XXX: Adopt API to ``dali.driver.base``\n###############################################################################\n\n\nclass DaliServer:\n \"\"\"Communicate with daliserver\n (https://github.com/onitake/daliserver)\n\n NB this requires daliserver commit\n 90e34a0cd2945dc7a15681f11647e708f858521e or later.\n \"\"\"\n\n def __init__(self, host=\"localhost\", port=55825,\n multiple_frames_per_connection=False):\n self._target = (host, port)\n self._s = None\n self._multiple_frames_per_connection = multiple_frames_per_connection\n\n def __enter__(self):\n if self._multiple_frames_per_connection:\n self._s = socket.create_connection(self._target)\n return self\n\n def __exit__(self, *vpass):\n if self._multiple_frames_per_connection:\n self._s.close()\n self._s = None\n\n def send(self, command):\n if self._s:\n s = self._s\n else:\n s = socket.create_connection(self._target)\n\n assert isinstance(command, Command)\n message = struct.pack(\"BB\", 2, 0) + command.frame.pack\n\n logging.info(\"command: {}{}\".format(\n command, \" (twice)\" if command.sendtwice else \"\"))\n\n # Set a default result which may be used if the first send fails\n result = \"\\x02\\xff\\x00\\x00\"\n\n try:\n s.send(message)\n result = s.recv(4)\n if command.sendtwice:\n s.send(message)\n result = s.recv(4)\n except:\n raise\n finally:\n if not self._s:\n s.close()\n\n response = self.unpack_response(command, result)\n\n if response:\n logging.info(\" -> {0}\".format(response))\n\n return response\n\n def unpack_response(self, command, result):\n \"\"\"Unpack result from the given bytestream and creates the\n corresponding response object\n\n :param command: the command which waiting for it's response\n :param result: the result bytestream which came back\n :return: the result object\n \"\"\"\n\n assert isinstance(command, Command)\n\n ver, status, rval, pad = struct.unpack(\"BBBB\", result)\n response = None\n\n if command.response:\n if status == 0:\n response = command.response(None)\n elif status == 1:\n response = command.response(dali.frame.BackwardFrame(rval))\n elif status == 255:\n # This is \"failure\" - daliserver seems to be reporting\n # this for a garbled response when several ballasts\n # reply. It should be interpreted as \"Yes\".\n response = command.response(dali.frame.BackwardFrameError(255))\n else:\n raise CommunicationError(\"status was %d\" % status)\n\n return response\n\n__all__ = [\"DaliServer\"]\n","repo_name":"sde1000/python-dali","sub_path":"dali/driver/daliserver.py","file_name":"daliserver.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"61"} +{"seq_id":"39073940913","text":"from Calendar_2020.ReadFiles import readfile\n\n\ndef task_1():\n lines = list(map(int, readfile(1)))\n for x in lines:\n for y in lines:\n if x + y == 2020:\n return x * y * y\n\n\ndef task_2():\n lines = list(map(int, readfile(1)))\n for x in lines:\n for y in lines:\n for z in lines:\n if x + y + z == 2020:\n return x * y * z\n","repo_name":"Strawl/advent-of-code","sub_path":"2020/Calendar_2020/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20584962903","text":"from sys import argv\n\nfrom Server import Server\n\nif __name__ == '__main__':\n args = argv.copy()\n\n if len(args) <= 1:\n args.append('1440')\n if len(args) <= 2:\n args.append('1440')\n\n server = Server(int(args[1]), int(args[2]))\n server.start()\n","repo_name":"junhg0211/Paint","sub_path":"server/src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17028887943","text":"from collections import defaultdict, deque\nfrom functools import reduce\nfrom itertools import permutations, product\nimport math\nimport copy\nfrom intcode import IntCodeComputer\n\nNORTH, SOUTH, WEST, EAST = 'NORTH', 'SOUTH', 'WEST', 'EAST'\n\ndir_map = {\n NORTH: 1,\n SOUTH: 2,\n WEST: 3,\n EAST: 4,\n}\n\nbacktrack = {\n NORTH: SOUTH,\n SOUTH: NORTH,\n WEST: EAST,\n EAST: WEST,\n}\n\nmove = {\n NORTH: (0, 1),\n SOUTH: (0, -1),\n EAST: (1, 0),\n WEST: (-1, 0),\n}\n\ncomputer = None\n\n\ndef dfs(c_x, c_y, grid, dist):\n global computer\n for dir in [NORTH, SOUTH, WEST, EAST]:\n n_x, n_y = c_x + move[dir][0], c_y + move[dir][1]\n\n # Don't try to explore further\n if (n_x, n_y) in grid.keys():\n continue\n\n # Save current computer state\n prev_computer = copy.deepcopy(computer)\n\n computer.add_input(dir_map[dir])\n computer.run()\n response = computer.output_queue.popleft()\n grid[(n_x, n_y)] = response + 1\n\n if response == 2:\n print(\"DONE\", dist + 1)\n return dist\n if response == 1:\n dfs(n_x, n_y, grid, dist + 1)\n computer = prev_computer\n\n\ndef aoc_day15(input_file):\n global computer\n with open(input_file, \"r\") as f:\n input_program = [line.strip() for line in f][0]\n\n computer = IntCodeComputer(\n computer_id=1,\n input_program=input_program,\n )\n\n grid = defaultdict(int)\n grid[(0, 0)] = 2 # EMPTY\n\n # GENERATE THE GRID\n s_x, s_y = 0, 0\n print(dfs(0, 0, grid, 0))\n\n grid_size = 25\n for i in range(grid_size, -grid_size, -1):\n for j in range(-grid_size, grid_size, 1):\n if grid[(j, i)] == 1: # HEX\n print(\"#\", end=\"\")\n elif grid[(j, i)] == 2: # EMPTY\n print(\".\", end=\"\")\n elif grid[(j, i)] == 3: # DESTINATION\n print(\"%\", end=\"\")\n s_x, s_y = j, i\n else:\n print(\" \", end=\"\")\n print()\n\n queue = deque()\n vis = defaultdict(int)\n dist = defaultdict(int)\n queue.append((s_x, s_y))\n vis[(s_x, s_y)] = 1\n dist[(s_x, s_y)] = 0\n\n while queue:\n c_x, c_y = queue.popleft()\n vis[(c_x, c_y)] = 1\n for dir in [NORTH, SOUTH, WEST, EAST]:\n next_pos = (c_x + move[dir][0], c_y + move[dir][1])\n if grid[next_pos] == 2 and not vis[next_pos]: # EMPTY\n dist[next_pos] = dist[(c_x, c_y)] + 1\n queue.append(next_pos)\n\n return max(dist.values())\n\n\ndef test_program():\n DAY = 15\n test_arr = [\n (\n (f\"{DAY}/aoc{DAY}.in1\", ),\n 1,\n ),\n # (\n # (f\"{DAY}/aoc{DAY}.in2\", ),\n # 165,\n # ),\n # (\n # (f\"{DAY}/aoc{DAY}.in3\", ),\n # 13312,\n # ),\n # (\n # (f\"{DAY}/aoc{DAY}.in4\", ),\n # 180697,\n # ),\n # (\n # (f\"{DAY}/aoc{DAY}.in5\", ),\n # 2210736,\n # ),\n # (\n # (f\"{DAY}/aoc{DAY}.in6\", ),\n # 0,\n # ),\n ]\n\n for inp, expected in test_arr:\n actual = aoc_day15(*inp)\n\n if actual == expected:\n print(\"OK\")\n else:\n print(f\"ERROR: actual: {actual} expected: {expected}\")\n\n\ntest_program()\n","repo_name":"astraldawn/aoc2019","sub_path":"aoc15.py","file_name":"aoc15.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16755349915","text":"import requests\nimport time\nfrom datetime import datetime\nimport pytz\nimport pyttsx3\n\nurl = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode=324001&date=08-05-2021\"\ntimeInSec = 5\nserverStatus = \"start\"\nspeechEngine = pyttsx3.init()\n\nwhile True:\n response = requests.get(url)\n if response.status_code != 200:\n if serverStatus != \"down\":\n speechEngine.say(\"Server is down\")\n speechEngine.runAndWait()\n serverStatus = \"down\"\n timeInSec = 300\n print(\"REQUEST STATUS = \" + str(response.status_code))\n print(\"Server is down, will notify when server will be up again.\\n Be patient....\")\n else:\n if serverStatus != \"up\":\n speechEngine.say(\"Server is up\")\n serverStatus = \"up\"\n speechEngine.runAndWait()\n print(\"Server is up.\\n Will notify when schedule booking opens....\")\n response = response.json()\n timeInSec = 5\n if len(response[\"sessions\"]) != 0:\n speechEngine.say(\"Schedule Booking OPEN\")\n speechEngine.runAndWait()\n print(\"REGISTRATION OPEN AT TIME : \" + str(datetime.now(pytz.timezone('Asia/Kolkata'))))\n print(response)\n time.sleep(timeInSec)\n","repo_name":"VijitSaxena/CoWin_check_RegistrationOpen","sub_path":"vaccineV3.py","file_name":"vaccineV3.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7811866441","text":"import random\r\nimport pygame\r\n\r\nclass maze_solver_full_memory:\r\n def __init__(self, start_point=None):\r\n if(start_point == None):\r\n self.x = 0\r\n self.y = 0\r\n else:\r\n self.x = start_point[0]\r\n self.y = start_point[1]\r\n self.total_steps = 0\r\n self.prev_move = -1\r\n self.branches = {}\r\n self.result = ''\r\n \r\n def tell_pos(self):\r\n print(\"I am currently at the position ({}, {})\".format(self.x, self.y))\r\n\r\n def draw(self, display):\r\n draw_x = self.x\r\n draw_y = self.y\r\n pygame.draw.rect(display, (255, 0, 0), pygame.Rect(draw_y*50, draw_x*50, 50, 50))\r\n pygame.time.wait(100)\r\n \r\n def move(self, env):\r\n if(self.x == env.get_goal()[0] and self.y == env.get_goal()[1]):\r\n print(\"You have solved the maze and reached the end...\")\r\n self.result = \"You have solved the maze and reached the end...\"\r\n return False\r\n elif(self.total_steps >= env.limit*env.limit):\r\n print(\"Did not halt, mostly unsolvable maze...\")\r\n self.result = \"You have solved the maze and reached the end...\"\r\n return False\r\n else:\r\n self.surrondings = [env.get_position(self.x+1, self.y),\r\n env.get_position(self.x, self.y+1),\r\n env.get_position(self.x-1, self.y), \r\n env.get_position(self.x, self.y-1)]\r\n possible_moves = []\r\n for i in range(len(self.surrondings)):\r\n if self.surrondings[i] == 'F':\r\n possible_moves.append(i)\r\n \r\n if(self.prev_move == 0):\r\n if(2 in possible_moves):\r\n possible_moves.remove(2)\r\n elif(self.prev_move == 2):\r\n if(0 in possible_moves):\r\n possible_moves.remove(0)\r\n elif(self.prev_move == 1):\r\n if(3 in possible_moves):\r\n possible_moves.remove(3)\r\n elif(self.prev_move == 3):\r\n if(1 in possible_moves):\r\n possible_moves.remove(1)\r\n \r\n move_turn = 1\r\n if(len(possible_moves) == 1):\r\n current_move = possible_moves[0]\r\n elif(len(possible_moves) > 1):\r\n if((self.x, self.y) not in self.branches):\r\n current_move = random.choice(possible_moves)\r\n possible_moves.remove(current_move)\r\n self.branches[(self.x, self.y)] = possible_moves\r\n else:\r\n if(self.branches[(self.x, self.y)]):\r\n current_move = random.choice(self.branches[(self.x, self.y)])\r\n self.branches[(self.x, self.y)].remove(current_move)\r\n else:\r\n current_move = random.choice(possible_moves)\r\n possible_moves.remove(current_move)\r\n self.branches[(self.x, self.y)] = possible_moves\r\n else:\r\n if(not self.branches):\r\n print(\"Unsolvable maze, no route to exit...\")\r\n self.result = \"Unsolvable maze, no route to exit...\"\r\n return False\r\n last_choice_position = self.branches.popitem()\r\n self.x = last_choice_position[0][0]\r\n self.y = last_choice_position[0][1]\r\n self.branches[(self.x, self.y)] = last_choice_position[1]\r\n self.total_steps = 0\r\n move_turn = 0\r\n \r\n if(move_turn):\r\n if(current_move == 0):\r\n self.x += 1\r\n self.prev_move = 0\r\n elif(current_move == 1):\r\n self.y += 1\r\n self.prev_move = 1\r\n elif(current_move == 2):\r\n self.x -= 1\r\n self.prev_move = 2\r\n elif(current_move == 3):\r\n self.y -= 1\r\n self.prev_move = 3\r\n else:\r\n print(\"Maze is blocked for all the places...\")\r\n self.result = \"Maze is blocked for all the places...\"\r\n return False\r\n \r\n self.total_steps += 1\r\n return True","repo_name":"Nihal-Srivastava05/Maze-Solving-AI-Agent","sub_path":"Simulations/Agent4_BranchMemory.py","file_name":"Agent4_BranchMemory.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16327053371","text":"import numpy as np\nimport time\nfrom numpy import save,load\nimport matplotlib\nmatplotlib.use('AGG')\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.size'] = 12\nmatplotlib.rcParams['font.family'] = 'serif'\nmatplotlib.rcParams['savefig.bbox'] = 'tight'\nmatplotlib.rcParams['savefig.pad_inches'] = 0\nfont = {'family': 'serif', 'size': 25,}\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport pickle\nimport sys\nimport subprocess\nimport os\nfrom os import mkdir\nfrom os.path import join,exists\nfrom shutil import copyfile\ncodefolder = \"/home/jf4241/SHORT\"\nos.chdir(codefolder)\nfrom model_obj import Model\nfrom doublewell_model import DoubleWellModel\nimport doublewell_params\nimport helper\nfrom data_obj import Data\nimport function_obj \nfrom tpt_obj import TPT\n\n# -----------------------\n# Make folders\ndatafolder = \"/scratch/jf4241/SHORT_doublewell\"\nif not exists(datafolder): mkdir(datafolder)\nsimfolder = join(datafolder,\"runs\")\nif not exists(simfolder): mkdir(simfolder)\nresultfolder = join(datafolder,\"results\")\nif not exists(resultfolder): mkdir(resultfolder)\ndayfolder = join(resultfolder,\"2021-05-30\")\nif not exists(dayfolder): mkdir(dayfolder)\nexpfolder = join(dayfolder,\"3\")\nif not exists(expfolder): mkdir(expfolder)\n\nasymb = r\"$\\mathbf{a}$\"\nbsymb = r\"$\\mathbf{b}$\"\n\n# ------------------------\n# Decide what to do\nleast_action_flag = 0\nrun_long_flag = 0\nrun_short_flag = 0\ncompute_tpt_flag = 0\n# ------------------------\n\n# ------------------------\n# Set parameters\nalgo_params,algo_param_string = doublewell_params.get_algo_params()\nphysical_params,physical_param_string = doublewell_params.get_physical_params()\n# Set savefolder accordingly\nphysical_param_folder = join(expfolder,physical_param_string)\nif not exists(physical_param_folder): mkdir(physical_param_folder)\nsavefolder = join(physical_param_folder,algo_param_string)\nif not exists(savefolder): mkdir(savefolder)\ncopyfile(join(codefolder,\"doublewell_params.py\"),join(savefolder,\"doublewell_params.py\"))\n# -------------------------\nnp.random.seed(0)\n# -------------------------\n\n# ------------------------\n# 1. Initialize the model\nmodel = DoubleWellModel(physical_params)\n# ------------------------\n\n# -------------------------------------------\n# 2 Find the least action pathway\nif least_action_flag:\n model_lap = DoubleWellModel(physical_params)\n model_lap.minimize_action(50.0,physical_param_folder,dirn=-1,maxiter=100)\n model_lap.minimize_action(50.0,physical_param_folder,dirn=1,maxiter=100)\n# Plot least action\nmodel.plot_least_action(physical_param_folder)\n# --------------------------------------------\n\n# -------------------------------------------\n# 2. Long simulation\nlong_simfolder,t_long,x_long = model.generate_data_long(simfolder,algo_params,run_long_flag=run_long_flag)\nseed_weights = helper.reweight_data(x_long,model.sampling_features,model.sampling_density)\n# -------------------------------------------\n\n# -------------------------------------------\n# Run short trajectories, either with single- or multiprocessor\nshort_simfolder = model.generate_data_short_multithreaded(x_long,simfolder,algo_params,seed_weights,run_short_flag=run_short_flag,overwrite_flag=False)\n# ---------------------------------------------\n\n# ---------------------------------------------\n# Initialize TPT\ntpt = TPT(algo_params,physical_param_folder,long_simfolder,short_simfolder,savefolder)\n# Initialize data\ndata = tpt.compile_data(model)\n# Initialize function approximator as MSM basis\nfunction = function_obj.MSMBasis(algo_params)\n# ------------------------------------------------\n\n# -----------------------------------------------\n# Perform DGA\nif compute_tpt_flag:\n tpt.label_x_long(model)\n tpt.compute_change_of_measure(model,data,function)\n tpt.compute_dam_moments_abba(model,data,function,num_moments=3) \n tpt.compute_mfpt_unconditional(model,data,function)\n pickle.dump(tpt,open(join(savefolder,\"tpt\"),\"wb\"))\n tpt.write_compare_generalized_rates(model,data)\n pickle.dump(tpt,open(join(savefolder,\"tpt\"),\"wb\"))\ntpt = pickle.load(open(join(savefolder,\"tpt\"),\"rb\"))\n#tpt.write_compare_generalized_rates(model,data)\n#pickle.dump(tpt,open(join(savefolder,\"tpt\"),\"wb\"))\n# Computation: done\n# -------------------------------------------\n\n# -------------------------------------------\n# Displays: starting\n# -------------------------------------------\nfunlib = model.observable_function_library()\n\n# ------------------------------------------\n# Long trajectory plots\n# 1D\nlong_fun = funlib[\"x0\"]\ntpt.plot_field_long(model,data,long_fun['fun'](data.X[:,0]),long_fun['name'],'x0',field_fun=long_fun['fun'],units=long_fun['units'],tmax=150)\n# 2D\nfield_abbs = [\"x0\",\"x1\"]\nfieldnames = [funlib[f][\"name\"] for f in field_abbs]\nfield_funs = [funlib[f][\"fun\"] for f in field_abbs]\nfield_units = [funlib[f][\"units\"] for f in field_abbs]\nfield_unit_symbols = [funlib[f][\"unit_symbol\"] for f in field_abbs]\ntpt.plot_field_long_2d(model,data,fieldnames,field_funs,field_abbs,units=field_units,tmax=150,field_unit_symbols=field_unit_symbols)\n# -------------------------------------------\n\n# -------------------------------------------\n# Casts and currents\ntheta_2d_abbs = [[\"x0\",\"x1\"]]\nprint(\"About to start displaying casts\")\nfor i in range(len(theta_2d_abbs)):\n tpt.display_casts_abba(model,data,theta_2d_abbs[i:i+1])\n tpt.display_2d_currents(model,data,theta_2d_abbs[i:i+1])\n# -------------------------------------------\n\n# -------------------------------------------\n# Validation\ntheta_1d_fun = lambda x: x[:,:1]\ntheta_1d_name = r\"$x_0$\"\ntheta_1d_units = 1.0\ntheta_2d_fun = lambda x: x[:,:2]\ntheta_2d_names = [r\"$x_0$\",r\"$x_1$\"]\ntheta_2d_units = np.ones(2)\ntheta_2d_unit_symbols = [\"\",\"\"]\ntheta_2d_abbs = [\"x0\",\"x1\"]\ntpt.display_change_of_measure_current(model,data,theta_2d_fun,theta_2d_names,theta_2d_units,theta_2d_unit_symbols,theta_2d_abbs)\ntpt.display_change_of_measure_validation(model,data,theta_1d_fun,theta_2d_fun,theta_1d_name,theta_2d_names,theta_1d_units,theta_2d_units)\ntpt.display_dam_moments_abba_current(model,data,theta_2d_fun,theta_2d_names,theta_2d_units,theta_2d_unit_symbols,theta_2d_abbs)\ntpt.display_dam_moments_abba_validation(model,data,theta_1d_fun,theta_2d_fun,theta_1d_name,theta_2d_names,theta_1d_units,theta_2d_units)\n# ----------------------------\n","repo_name":"justinfocus12/SHORT","sub_path":"doublewell_driver.py","file_name":"doublewell_driver.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19113629871","text":"'''\n\nRemove empty strings from a list of strings\n\ninput:\nstr_list = [\"Emma\", \"Jon\", \"\", \"Kelly\", None, \"Eric\", \"\"]\n\noutput:\n['Emma', 'Jon', 'Kelly', 'Eric']\n\n'''\n\ndef remove_empty(s):\n string = []\n for i in s:\n if i :\n string.append(i)\n return string\n\ns1 = remove_empty(['q','','er',' qr'])\nprint(s1)","repo_name":"vithanivyom/Python_","sub_path":"4/4_11_remove_empty_string.py","file_name":"4_11_remove_empty_string.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31834610286","text":"from queue import Queue\n\ndef read_maze(fileMaze=\"maze-for-me.txt\"):\n maze = [list(line.replace(\"#\", \"1\").replace(\" \", \"0\")) for line in open(fileMaze).read().split(\"\\n\")[:-1]]\n return maze\n\n# Узнаём размеры лабиринта\ntry:\n maze = read_maze()\n size_x = len(maze)\n size_y = len(maze[0])\nexcept Exception:\n print(\"Исходный файл не найден!\")\n\ndef near(x, y, maze):\n near = []\n if y == 0:\n if x == 0:\n near = [[\"#\", maze[x][y + 1]],\n [maze[x + 1][y], maze[x + 1][y + 1]]]\n elif x == (size_x - 1):\n near = [[maze[x - 1][y], maze[x - 1][y + 1]],\n [\"#\", maze[x][y + 1]]]\n else:\n near = [[maze[x - 1][y], maze[x - 1][y + 1]],\n [\"#\", maze[x][y + 1]],\n [maze[x + 1][y], maze[x + 1][y + 1]]]\n elif y == (size_y - 1):\n if x == 0:\n near = [[maze[x][y - 1], \"#\"],\n [maze[x + 1][y - 1], maze[x + 1][y]]]\n elif x == (size_x - 1):\n near = [[maze[x - 1][y - 1], maze[x - 1][y]],\n [maze[x][y - 1], \"#\"]]\n else:\n near = [[maze[x - 1][y - 1], maze[x - 1][y]],\n [maze[x][y - 1], \"#\"],\n [maze[x + 1][y - 1], maze[x + 1][y]]]\n elif x == 0:\n near = [[maze[x][y - 1], \"#\", maze[x][y + 1]],\n [maze[x + 1][y - 1], maze[x + 1][y], maze[x + 1][y + 1]]]\n elif x == (size_x - 1):\n near = [[maze[x - 1][y - 1], maze[x - 1][y], maze[x - 1][y + 1]],\n [maze[x][y - 1], \"#\", maze[x][y + 1]]]\n else:\n near = [[maze[x - 1][y - 1], maze[x - 1][y], maze[x - 1][y + 1]],\n [maze[x][y - 1], \"#\", maze[x][y + 1]],\n [maze[x + 1][y - 1], maze[x + 1][y], maze[x + 1][y + 1]]]\n return near # Возможные пути\n\ndef sel_start(maze): # узнаем координаты входа\n while 1:\n try:\n x = int(input(\"Введите точку входа X: \"))\n if x >= 0 and x < size_x:\n y = int(input(\"Введите точку входа Y: \"))\n if y >= 0 and y < size_y:\n if str(maze[x][y]) == \"1\":\n print(\"\\nТут нет входа..\")\n else:\n return (x, y)\n else:\n print(\"\\nВы ушли за пределы карты\")\n continue\n else:\n print(\"\\nВы ушли за пределы карты\")\n continue\n except Exception:\n print(\"\\nЧто-то не то, давайте попробуем ещё раз\")\n\ndef sel_tresuaerr(maze): # узнаём координаты нашего сокровища\n while 1:\n try:\n x = int(input(\"Координата ключа X: \"))\n if x >= 0 and x < size_x:\n y = int(input(\"Координата ключа Y: \"))\n if y >= 0 and y < size_y:\n if str(maze[x][y]) == \"1\":\n print(\"\\nТут стенка\")\n else:\n return (x, y)\n else:\n print(\"\\nВы ушли за пределы карты\")\n continue\n else:\n print(\"\\nВы ушли за пределы карты\")\n continue\n except Exception:\n print(\"\\nЧто-то не то, давайте попробуем ещё раз\")\n\ndef sel_exit(maze): # находим доступный выход\n for y in range(size_y):\n if int(maze[size_x-1][y]) == 0:\n return (size_x - 1,y)\n\nstart = sel_start(maze) # задаём точку старта\nzvezda = sel_tresuaerr(maze) # задаём точку сокровища\nend = sel_exit(maze) # задаём выход\n\n\ndef get_moves(pos, maze): # доступные движения (вверх, вниз, влево, вправо)\n moves = []\n if pos[0] > 0 and not int(maze[pos[0] - 1][pos[1]]):\n moves.append((pos[0] - 1, pos[1]))\n if pos[0] < size_x - 1 and not int(maze[pos[0] + 1][pos[1]]):\n moves.append((pos[0] + 1, pos[1]))\n if pos[1] > 0 and not int(maze[pos[0]][pos[1] - 1]):\n moves.append((pos[0], pos[1] - 1))\n if pos[1] < size_y - 1 and not int(maze[pos[0]][pos[1] + 1]):\n moves.append((pos[0], pos[1] + 1))\n return moves\n\ndef bfs(start, end): # делаем поиск в ширину \n queue = Queue()\n queue.put((start, [start]))\n while not queue.empty():\n pos, path = queue.get()\n if pos == end:\n return path\n for move in get_moves(pos, maze):\n if move not in path:\n queue.put((move, path + [move]))\n return None\n\nway_bfs = bfs(start, zvezda) # поиск в ширину ДО сокровища\n\ndef heuristic(current, end): # узнаём цену выхода\n return abs(current[0] - end[0]) + abs(current[1] - end[1])\n\ndef a(maze, start, end): # делаем метод A звёздочка\n open_list = [start]\n closed_list = []\n came_from = {}\n g_score = {start: 0}\n f_score = {start: heuristic(start, end)}\n\n while open_list:\n current = min(open_list, key=lambda x: f_score[x])\n if current == end:\n path = [end]\n while current in came_from:\n current = came_from[current]\n path.append(current)\n path.reverse()\n return path\n\n open_list.remove(current)\n closed_list.append(current)\n\n for neighbor in get_moves(current, maze):\n if neighbor in closed_list:\n continue\n tentative_g_score = g_score[current] + 1\n if neighbor not in open_list or tentative_g_score < g_score[neighbor]:\n came_from[neighbor] = current\n g_score[neighbor] = tentative_g_score\n f_score[neighbor] = tentative_g_score + heuristic(neighbor, end)\n if neighbor not in open_list:\n open_list.append(neighbor)\n\n return None\n\nway_a = a(maze, zvezda, end) # находим путь от сокровища до выхода\n\ntry:\n for point in way_bfs:\n maze[point[0]][point[1]] = \".\" # От входа, до сокровища\n\n # Заносим путь от ключа до выхода\n for point in way_a:\n maze[point[0]][point[1]] = \",\" # От сокровища, до выхода\n\n maze[zvezda[0]][zvezda[1]] = \"*\"\n\n with open(\"itog.txt\", \"w\") as f:\n f.write('\\n'.join([''.join(map(str, line)) for line in maze]))\nexcept Exception:\n print(\"Ошибка поиска пути\") \n\nwith open('itog.txt') as f:\n text = f.read()\n \ntext = text.replace('0', ' ').replace('1', '#')\n \nwith open('itog.txt', 'w') as f:\n f.write(text)\n","repo_name":"kit8nino/2023-python","sub_path":"ИС34/Рылова Екатерина/Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"37861843112","text":"# coding=utf-8\nimport numpy as np\nimport cv2\nimport json\nfrom guidblur import guideFilter\n\ndef singleScaleRetinex(img, sigma):\n '''单尺度Retinex函数'''\n\n retinex = np.log10(img) - np.log10(cv2.GaussianBlur(img, (0, 0), sigma))\n return retinex\n\n\ndef multiScaleRetinex(img, sigma_list):\n '''多尺度Retinex函数'''\n # 提前分配空间\n retinex = np.zeros_like(img)\n # 遍历所有的尺度\n for sigma in sigma_list:\n # 对计算的结果进行叠加\n retinex += singleScaleRetinex(img, sigma)\n # 计算多个尺度的平均值\n retinex = retinex / len(sigma_list)\n return retinex\n\n\ndef colorRestoration(img, alpha, beta):\n '''颜色灰度函数'''\n img_sum = np.sum(img, axis=2, keepdims=True)\n color_restoration = beta * (np.log10(alpha * img) - np.log10(img_sum))\n return color_restoration\n\n\ndef simplestColorBalance(img, low_clip, high_clip):\n '''最简单的颜色均衡函数'''\n total = img.shape[0] * img.shape[1]\n for i in range(img.shape[2]):\n unique, counts = np.unique(img[:, :, i], return_counts=True)\n current = 0\n for u, c in zip(unique, counts):\n if float(current) / total < low_clip:\n low_val = u\n if float(current) / total < high_clip:\n high_val = u\n current += c\n img[:, :, i] = np.maximum(np.minimum(img[:, :, i], high_val), low_val)\n return img\n\n\ndef MSRCR(img, sigma_list, G, b, alpha, beta, low_clip, high_clip):\n '''MSRCR函数'''\n img = np.float64(img) + 1.0\n # 对原图先做多尺度的Retinex\n img_retinex = multiScaleRetinex(img, sigma_list)\n # 对原图做颜色恢复\n img_color = colorRestoration(img, alpha, beta)\n # 进行图像融合\n img_msrcr = G * (img_retinex * img_color + b)\n\n for i in range(img_msrcr.shape[2]):\n img_msrcr[:, :, i] = (img_msrcr[:, :, i] - np.min(img_msrcr[:, :, i])) / \\\n (np.max(img_msrcr[:, :, i]) - np.min(img_msrcr[:, :, i])) * \\\n 255\n # 将图像调整到[0,255]范围内\n img_msrcr = np.uint8(np.minimum(np.maximum(img_msrcr, 0), 255))\n # 做简单的颜色均衡\n img_msrcr = simplestColorBalance(img_msrcr, low_clip, high_clip)\n return img_msrcr\n\n\ndef automatedMSRCR(img, sigma_list):\n '''automatedMSRCR函数'''\n img = np.float64(img) + 1.0\n img_retinex = multiScaleRetinex(img, sigma_list)\n for i in range(img_retinex.shape[2]):\n unique, count = np.unique(np.int32(img_retinex[:, :, i] * 100), return_counts=True)\n for u, c in zip(unique, count):\n if u == 0:\n zero_count = c\n break\n\n low_val = unique[0] / 100.0\n high_val = unique[-1] / 100.0\n for u, c in zip(unique, count):\n if u < 0 and c < zero_count * 0.1:\n low_val = u / 100.0\n if u > 0 and c < zero_count * 0.1:\n high_val = u / 100.0\n break\n img_retinex[:, :, i] = np.maximum(np.minimum(img_retinex[:, :, i], high_val), low_val)\n img_retinex[:, :, i] = (img_retinex[:, :, i] - np.min(img_retinex[:, :, i])) / \\\n (np.max(img_retinex[:, :, i]) - np.min(img_retinex[:, :, i])) \\\n * 255\n img_retinex = np.uint8(img_retinex)\n return img_retinex\n\n\ndef MSRCP(img, sigma_list, low_clip, high_clip):\n '''MSRCP函数'''\n img = np.float64(img) + 1.0\n intensity = np.sum(img, axis=2) / img.shape[2]\n retinex = multiScaleRetinex(intensity, sigma_list)\n intensity = np.expand_dims(intensity, 2)\n retinex = np.expand_dims(retinex, 2)\n intensity1 = simplestColorBalance(retinex, low_clip, high_clip)\n intensity1 = (intensity1 - np.min(intensity1)) / \\\n (np.max(intensity1) - np.min(intensity1)) * \\\n 255.0 + 1.0\n img_msrcp = np.zeros_like(img)\n\n for y in range(img_msrcp.shape[0]):\n for x in range(img_msrcp.shape[1]):\n B = np.max(img[y, x])\n A = np.minimum(256.0 / B, intensity1[y, x, 0] / intensity[y, x, 0])\n img_msrcp[y, x, 0] = A * img[y, x, 0]\n img_msrcp[y, x, 1] = A * img[y, x, 1]\n img_msrcp[y, x, 2] = A * img[y, x, 2]\n img_msrcp = np.uint8(img_msrcp - 1.0)\n return img_msrcp\n\n\nif __name__ == '__main__':\n # eps = 0.01\n # winSize = (3, 3) # 类似卷积核(数字越大,磨皮效果越好)\n # image = cv2.imread(r'202-233/10.jpg', cv2.IMREAD_ANYCOLOR)\n # image = cv2.resize(image, None, fx=0.8, fy=0.8, interpolation=cv2.INTER_CUBIC)\n # I = image / 255.0 # 将图像归一化\n # p = I\n # s = 3 # 步长\n # guideFilter_img = guideFilter(I, p, winSize, eps, s)\n #\n\n # # cv2.imshow(\"image\", image)\n # cv2.imshow(\"winSize_16\", guideFilter_img)\n\n with open('config.json', 'r') as f:\n config = json.load(f)\n img1_path = '../202-233/10.jpg'\n img = cv2.imread(img1_path)\n # img=img[100:551,500:951]\n\n\n img_msrcr = MSRCR(\n img,\n config['sigma_list'],\n config['G'],\n config['b'],\n config['alpha'],\n config['beta'],\n config['low_clip'],\n config['high_clip']\n )\n\n img_amsrcr = automatedMSRCR(\n img,\n config['sigma_list']\n )\n\n img_msrcp = MSRCP(\n img,\n config['sigma_list'],\n config['low_clip'],\n config['high_clip']\n )\n\n\n cv2.imshow('Image', img)\n cv2.imshow('retinex', img_msrcr)\n cv2.imshow('Automated retinex', img_amsrcr)\n cv2.imshow('MSRCP', img_msrcp)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"PengQ0203/Torch","sub_path":"CV/main/retniex.py","file_name":"retniex.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72353709314","text":"\"\"\"\nGiven an integer array, output all the unique pairs that sum up to a specific value k.\n\nSo the input:\n\npair_sum([1,3,2,2],4)\nwould return 2 pairs:\n\n (1,3)\n (2,2)\n \"\"\"\n\n # O(N) - set data structure\ndef pair_sum(arr, k):\n # edge case\n if len(arr)<2:\n return\n \n # sets for tracking - sets get rid of duplicates \n seen = set()\n output = set()\n\n # do a linear scan of the array \n for num in arr:\n # set target as k - the current element = what you want to search for \n target = k-num\n # add target if its not recorded in set seen yet\n # note that k = num + target\n if target not in seen:\n seen.add(num)\n # output is a tuple that contains the minimum of num, target and max of num, target\n else:\n # min first then max to order the tuple\n output.add(((min(num,target), max(num, target))))\n \n # map() iterates over all elements of list(output) and makes them strings\n # print(\"\\n\".join(map(str,list(output))))\n\n # returns number of unique pairs that add up to k \n return len(output)\n\n# testing\n# pair_sum([1,3,2,2],4)","repo_name":"ecyoung/data-structures-algorithms","sub_path":"arrays/array_pair_sum.py","file_name":"array_pair_sum.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11272357889","text":"import os\nimport cv2\nimport glob\n\nINPUT_FOLDER = 'data/output/*'\nOUTPUT_FOLDER = 'data/output_frames/'\n\ncolumns = 7\nrows = 4\nimage_count = 0\n\nimages = glob.glob(INPUT_FOLDER)\n\nfor image in images:\n im = cv2.imread(image)\n width = im.shape[1]\n height = im.shape[0]\n step_x = width // columns\n step_y = height // rows\n\n x = 0\n y = 0\n while x + step_x <= width:\n y = 0\n while y + step_y <= height:\n temp_im = im[y:y + step_y, x:x + step_x]\n cv2.imwrite(f'{OUTPUT_FOLDER}{image_count}.png', temp_im)\n image_count += 1\n y += step_y\n x += step_x\n","repo_name":"MatthewYancey/ml_art","sub_path":"src/image_processing_cut_samples.py","file_name":"image_processing_cut_samples.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72329233793","text":"import argparse\n\nimport numpy as np\nfrom helper import broadcast\n\n\ndef parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-v\",\n \"--values\",\n action=\"store_true\",\n help=\"not only print shapes, but also values of the arrays\"\n )\n return parser.parse_args()\n\n\n# General information:\n#\n# The broadcasting rules apply to arithmetic element-wise operations\n# (*, /, +, -, ...) between two (multi-dimensional) vectors.\n#\n# The rules are:\n# 1. if the two vectors do not have the same number of dimensions,\n# expand the one with fewer dimensions with ones to the left\n# until they have the same number of dimensions\n#\n# 2. if shapes are not compatible, for each dimension do the following:\n# check for compatibility of the dimensions:\n# - they are equal --> do nothing\n# - one of them is 1 -->\n# repeat it to match the other dimension\n# (this is only conceptually true, in the actual implementation\n# the values are simply reused to save memory)\n# - else --> fail\n#\n# 3. now that the shapes match exactly, perform the element-wise\n# operation on the two vectors\n\n\ndef run(args: argparse.Namespace):\n # matching example\n print(\"Example: matching shapes\")\n a = np.array([1, 2, 3, 4])\n b = np.array([4, 3, 2, 1])\n res = broadcast(a, b, \"*\", args.values)\n assert res is not None\n assert np.array_equal(res, a * b)\n print()\n\n # non-matching example\n print(\"Example: non-matching shapes\")\n a = np.array([\n [1, 2, 3, 4],\n [5, 6, 7, 8]\n ])\n b = np.array([4, 3, 2, 1])\n res = broadcast(a, b, \"+\", args.values)\n assert res is not None\n assert np.array_equal(res, a + b)\n print()\n\n # scalar example\n print(\"Example: scalar\")\n a = np.array([1, 2, 3, 4])\n b = np.array(2)\n res = broadcast(a, b, \"*\", args.values)\n assert res is not None\n assert np.array_equal(res, a * b)\n print()\n\n # advanced example\n # use broadcasting to create grids of values,\n # e.g. scale a with every value in b\n print(\"Example: grid\")\n a = np.array([[1, 2, 3, 4]]) # 1 x 4\n b = np.array([[4], [3], [2], [1]]) # 4 x 1\n res = broadcast(a, b, \"*\", args.values)\n assert res is not None\n assert np.array_equal(res, a * b)\n print()\n\n # real world example: shifting image\n # we have a 10 x 10 image with rgb values (range [0..255]),\n # and mean rgb values, e.g. estimated from a training set\n print(\"Example: image\")\n image = np.random.randint(0, 256, (10, 10, 3))\n mean = np.array([127, 125, 130])\n res = broadcast(image, mean, \"-\", args.values)\n assert res is not None\n assert np.array_equal(res, image - mean)\n\n\nif __name__ == \"__main__\":\n run(parse_args())\n","repo_name":"ad-freiburg/ir_additional_material","sub_path":"broadcasting/broadcasting.py","file_name":"broadcasting.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17886527195","text":"import logging\nimport configparser\nfrom pathlib import Path\n\n# Config variables & logger\nconfig = configparser.ConfigParser()\nconfig.read('config/config.ini')\nLOGGER_NAME = config.get(\"LOGGER\", \"LOGGER_NAME\")\nlogger = logging.getLogger(LOGGER_NAME)\n\n# Parser Globals\nLINE_SEP = \"\\n\"\nVALUE_SEP = \",\"\n\n\nclass PuzzleParser():\n @classmethod\n def parsePuzzle(cls, file_path):\n logger.info(f\"loading puzzle data from {file_path}\")\n with Path(file_path).open() as puzzle:\n size_line = cls._nextLine(puzzle)\n puzzle_size = int(size_line[0])\n values = []\n for _ in range(puzzle_size):\n row = cls._nextLine(puzzle)\n row_values = cls._getValues(row)\n values += cls._parseValuesFromStrings(puzzle_size, row_values)\n return (puzzle_size, values)\n\n @classmethod\n def printPuzzle(cls, puzzle, level=logging.INFO):\n puzzle_size = puzzle.puzzle_size\n values = [v.value for v in puzzle.variables]\n logger.log(level, '---Current Solution---')\n for i in range(puzzle_size):\n start_row = 0 if (i <= 0) else (i * puzzle_size)\n end_row = len(values) if (i >= puzzle_size) else ((i + 1) * puzzle_size)\n row = cls._parseStringsFromValues(puzzle_size, values[start_row:end_row])\n logger.log(level, VALUE_SEP.join(row))\n logger.log(level, '---------------------\\n')\n\n @staticmethod\n def _nextLine(file):\n return file.readline().replace(LINE_SEP, '')\n\n @staticmethod\n def _getValues(row):\n return row.split(VALUE_SEP)\n\n @staticmethod\n def _parseValuesFromStrings(puzzle_size, values):\n value_map = {\n '_': None,\n }\n for i in range(1, puzzle_size + 1):\n value_map[str(i)] = i\n return[value_map.get(val, \"Invalid character used in puzzle definition\") for val in values]\n\n @staticmethod\n def _parseStringsFromValues(puzzle_size, values):\n value_map = {\n '_': None,\n }\n for i in range(1, puzzle_size + 1):\n value_map[str(i)] = i\n inv_map = {v: k for k, v in value_map.items()}\n return[inv_map.get(val, \"Invalid character used in puzzle definition\") for val in values]\n","repo_name":"Dtphelan1/sudoku-csp-solver","sub_path":"classes/PuzzleParser.py","file_name":"PuzzleParser.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44474414496","text":"from . import verilator_python as pybind\nfrom gnuradio import gr\nimport os\nimport tempfile\nimport subprocess\nimport traceback\nfrom . import template\n\n\nWRAPPER_H = 'verilator_tb_axis.h'\nWRAPPER_CPP = 'verilator_tb_axis.cpp'\nOBJECT_DIRECTORY = 'obj_dir'\nCPP_CLASS_PREFIX = 'DesignUnderTest'\nOBJECT_SO = 'dut.so'\n\n\nclass WorkingDirectory(object):\n def __init__(self, path):\n self._path = path\n self._origin = os.getcwd()\n\n def __enter__(self):\n os.chdir(self._path)\n def __exit__(self, exc_type, exc_val, exc_tb):\n os.chdir(self._origin)\n\n\nclass axis_xx(object):\n def __init__(self, verilog_file_path, io_ratio=1, options=None, trace=False):\n self.logger = gr.logger(self.alias())\n self.data_width = 64\n self.heart = None\n self.trace_filepath = None\n\n ##################################################\n # Parameters\n ##################################################\n if trace:\n (script_path, line_number, function_name, text_line)=traceback.extract_stack()[-3]\n trace_dirname = os.path.dirname(script_path)\n pos_verilator_axis = text_line.find('verilator_axis')\n pos_assignment = text_line.find('=')\n if pos_verilator_axis >= 0 and pos_assignment >= 0:\n trace_filename = text_line[pos_verilator_axis:pos_assignment].strip()\n self.trace_filepath = os.path.join(trace_dirname, f'{trace_filename}.vcd')\n\n self.verilog = verilog_file_path\n self.io_ratio = io_ratio\n self.verilator_options = options\n\n def build(self):\n verilog_dirpath = os.path.dirname(self.verilog)\n\n with tempfile.TemporaryDirectory() as build_dirpath:\n with open(f'{build_dirpath}/{WRAPPER_H}','w') as f:\n f.write(template.h.content)\n\n with open(f'{build_dirpath}/{WRAPPER_CPP}','w') as f:\n f.write(template.cpp.content)\n\n # Build shared library\n with WorkingDirectory(build_dirpath):\n verilator_args = [\n 'verilator',\n '-Mdir', OBJECT_DIRECTORY,\n '--prefix', CPP_CLASS_PREFIX,\n '--cc',\n '--exe',\n f'-GDATA_WIDTH={self.data_width}',\n '-o', OBJECT_SO,\n '-y', verilog_dirpath,\n '-Wno-fatal',\n self.verilog,\n WRAPPER_CPP\n ]\n if self.verilator_options and isinstance(self.verilator_options, list):\n verilator_args.extend(self.verilator_options)\n if (self.trace_filepath):\n verilator_args.extend(['-CFLAGS', f'-fPIC --std=c++11 -Wall -DDUT_TRACE=\"{self.trace_filepath}\"', '--trace'])\n else:\n verilator_args.extend(['-CFLAGS', '-fPIC --std=c++11 -Wall'])\n subprocess.run(verilator_args, stdout=subprocess.PIPE, check=True)\n\n make_args = [\n 'make',\n '-j',\n '-C', OBJECT_DIRECTORY,\n '-f', f'{CPP_CLASS_PREFIX}.mk'\n ]\n make_env = {**os.environ, 'USER_LDFLAGS': '-shared'}\n self.logger.info(f'Building {OBJECT_SO}. It takes a while...')\n subprocess.run(make_args, env=make_env, stdout=subprocess.PIPE, check=True)\n\n libso_filepath = os.path.join(build_dirpath, OBJECT_DIRECTORY, OBJECT_SO)\n if not os.path.exists(libso_filepath):\n raise Exception(f'Expects \"{libso_filepath}\"')\n\n self.heart = self.new_axis(libso_filepath)\n\n # Must be overrided\n def new_axis(self, libso_filepath):\n raise Exception(f'Undefined type')\n\n\nclass axis_ii(gr.hier_block2, axis_xx):\n \"\"\"\n Axi-stream instance for int32 data type\n \"\"\"\n def __init__(self, verilog_file_path, **kwargs):\n gr.hier_block2.__init__(\n self,\n \"Verilator AXI-Stream\",\n gr.io_signature(1, 1, gr.sizeof_int),\n gr.io_signature(1, 1, gr.sizeof_int)\n )\n axis_xx.__init__(self, verilog_file_path, **kwargs)\n self.data_width = 32\n self.build()\n\n ##################################################\n # Conections\n ##################################################\n self.connect((self, 0), (self.heart, 0))\n self.connect((self.heart, 0), (self, 0))\n\n def new_axis(self, libso_filepath):\n return pybind.axis_ii(\n libso_filepath,\n ''\n )\n","repo_name":"korolv/gr-verilator","sub_path":"python/verilator/axis_xx.py","file_name":"axis_xx.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29374973691","text":"#!/usr/bin/python\n# (c) 2018, NetApp, Inc\n# GNU General Public License v3.0+ (see COPYING or\n# https://www.gnu.org/licenses/gpl-3.0.txt)\n\n'''\nElement Software Node Drives\n'''\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'certified'}\n\n\nDOCUMENTATION = '''\n\nmodule: na_elementsw_drive\n\nshort_description: NetApp Element Software Manage Node Drives\nextends_documentation_fragment:\n - netapp.solidfire\nversion_added: '2.7'\nauthor: NetApp Ansible Team (ng-ansibleteam@netapp.com)\ndescription:\n - Add, Erase or Remove drive for nodes on Element Software Cluster.\n\noptions:\n drive_id:\n description:\n - Drive ID or Serial Name of Node drive.\n - If not specified, add and remove action will be performed on all drives of node_id\n\n state:\n description:\n - Element SW Storage Drive operation state.\n - present - To add drive of node to participate in cluster data storage.\n - absent - To remove the drive from being part of active cluster.\n - clean - Clean-up any residual data persistent on a *removed* drive in a secured method.\n choices: ['present', 'absent', 'clean']\n default: 'present'\n\n node_id:\n description:\n - ID or Name of cluster node.\n required: true\n\n force_during_upgrade:\n description:\n - Flag to force drive operation during upgrade.\n type: 'bool'\n\n force_during_bin_sync:\n description:\n - Flag to force during a bin sync operation.\n type: 'bool'\n'''\n\nEXAMPLES = \"\"\"\n - name: Add drive with status available to cluster\n tags:\n - elementsw_add_drive\n na_element_drive:\n hostname: \"{{ elementsw_hostname }}\"\n username: \"{{ elementsw_username }}\"\n password: \"{{ elementsw_password }}\"\n state: present\n drive_id: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J3221807\n force_during_upgrade: false\n force_during_bin_sync: false\n node_id: sf4805-meg-03\n\n - name: Remove active drive from cluster\n tags:\n - elementsw_remove_drive\n na_element_drive:\n hostname: \"{{ elementsw_hostname }}\"\n username: \"{{ elementsw_username }}\"\n password: \"{{ elementsw_password }}\"\n state: absent\n force_during_upgrade: false\n node_id: sf4805-meg-03\n drive_id: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J321208\n\n - name: Secure Erase drive\n tags:\n - elemensw_clean_drive\n na_elementsw_drive:\n hostname: \"{{ elementsw_hostname }}\"\n username: \"{{ elementsw_username }}\"\n password: \"{{ elementsw_password }}\"\n state: clean\n drive_id: scsi-SATA_SAMSUNG_MZ7LM48S2UJNX0J432109\n node_id: sf4805-meg-03\n\n - name: Add all the drives of a node to cluster\n tags:\n - elementsw_add_node\n na_elementsw_drive:\n hostname: \"{{ elementsw_hostname }}\"\n username: \"{{ elementsw_username }}\"\n password: \"{{ elementsw_password }}\"\n state: present\n force_during_upgrade: false\n force_during_bin_sync: false\n node_id: sf4805-meg-03\n\n\"\"\"\n\n\nRETURN = \"\"\"\n\nmsg:\n description: Success message\n returned: success\n type: string\n\n\"\"\"\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nimport ansible.module_utils.netapp as netapp_utils\n\nHAS_SF_SDK = netapp_utils.has_sf_sdk()\n\n\nclass ElementSWDrive(object):\n \"\"\"\n Element Software Storage Drive operations\n \"\"\"\n\n def __init__(self):\n self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()\n self.argument_spec.update(dict(\n state=dict(required=False, choices=['present', 'absent', 'clean'], default='present'),\n drive_id=dict(required=False, type='str'),\n node_id=dict(required=True, type='str'),\n force_during_upgrade=dict(required=False, type='bool'),\n force_during_bin_sync=dict(required=False, type='bool')\n ))\n\n self.module = AnsibleModule(\n argument_spec=self.argument_spec,\n supports_check_mode=True\n )\n\n input_params = self.module.params\n\n self.state = input_params['state']\n self.drive_id = input_params['drive_id']\n self.node_id = input_params['node_id']\n self.force_during_upgrade = input_params['force_during_upgrade']\n self.force_during_bin_sync = input_params['force_during_bin_sync']\n\n if HAS_SF_SDK is False:\n self.module.fail_json(\n msg=\"Unable to import the SolidFire Python SDK\")\n else:\n self.sfe = netapp_utils.create_sf_connection(module=self.module)\n\n def get_node_id(self):\n \"\"\"\n Get Node ID\n :description: Find and retrieve node_id from the active cluster\n\n :return: node_id (None if not found)\n :rtype: node_id\n \"\"\"\n if self.node_id is not None:\n list_nodes = self.sfe.list_active_nodes()\n for current_node in list_nodes.nodes:\n if self.node_id == str(current_node.node_id):\n return current_node.node_id\n elif current_node.name == self.node_id:\n self.node_id = current_node.node_id\n return current_node.node_id\n self.node_id = None\n return self.node_id\n\n def get_drives_listby_status(self):\n \"\"\"\n Capture list of drives based on status for a given node_id\n :description: Capture list of active, failed and available drives from a given node_id\n\n :return: None\n \"\"\"\n if self.node_id is not None:\n list_drives = self.sfe.list_drives()\n for drive in list_drives.drives:\n if drive.node_id == self.node_id:\n if drive.status in ['active', 'failed']:\n self.active_drives[drive.serial] = drive.drive_id\n elif drive.status == \"available\":\n self.available_drives[drive.serial] = drive.drive_id\n return None\n\n def get_active_drives(self, drive_id=None):\n \"\"\"\n return a list of active drives\n if drive_id is specified, only [] or [drive_id] is returned\n else all available drives for this node are returned\n \"\"\"\n action_list = list()\n if self.drive_id is not None:\n if self.drive_id in self.active_drives.values():\n action_list.append(int(self.drive_id))\n if self.drive_id in self.active_drives:\n action_list.append(self.active_drives[self.drive_id])\n else:\n action_list.extend(self.active_drives.values())\n\n return action_list\n\n def get_available_drives(self, drive_id=None):\n \"\"\"\n return a list of available drives (not active)\n if drive_id is specified, only [] or [drive_id] is returned\n else all available drives for this node are returned\n \"\"\"\n action_list = list()\n if self.drive_id is not None:\n if self.drive_id in self.available_drives.values():\n action_list.append(int(self.drive_id))\n if self.drive_id in self.available_drives:\n action_list.append(self.available_drives[self.drive_id])\n else:\n action_list.extend(self.available_drives.values())\n\n return action_list\n\n def add_drive(self, drives=None):\n \"\"\"\n Add Drive available for Cluster storage expansion\n \"\"\"\n try:\n self.sfe.add_drives(drives,\n force_during_upgrade=self.force_during_upgrade,\n force_during_bin_sync=self.force_during_bin_sync)\n except Exception as exception_object:\n self.module.fail_json(msg='Error add drive to cluster %s' % (to_native(exception_object)),\n exception=traceback.format_exc())\n\n def remove_drive(self, drives=None):\n \"\"\"\n Remove Drive active in Cluster\n \"\"\"\n try:\n self.sfe.remove_drives(drives,\n force_during_upgrade=self.force_during_upgrade)\n except Exception as exception_object:\n self.module.fail_json(msg='Error remove drive from cluster %s' % (to_native(exception_object)),\n exception=traceback.format_exc())\n\n def secure_erase(self, drives=None):\n \"\"\"\n Secure Erase any residual data existing on a drive\n \"\"\"\n try:\n self.sfe.secure_erase_drives(drives)\n except Exception as exception_object:\n self.module.fail_json(msg='Error clean data from drive %s' % (to_native(exception_object)),\n exception=traceback.format_exc())\n\n def apply(self):\n \"\"\"\n Check, process and initiate Drive operation\n \"\"\"\n changed = False\n result_message = None\n self.active_drives = {}\n self.available_drives = {}\n action_list = []\n self.get_node_id()\n self.get_drives_listby_status()\n\n if self.module.check_mode is False and self.node_id is not None:\n if self.state == \"present\":\n action_list = self.get_available_drives(self.drive_id)\n if len(action_list) > 0:\n self.add_drive(action_list)\n changed = True\n elif self.drive_id is not None and (self.drive_id in self.active_drives.values() or self.drive_id in self.active_drives):\n changed = False # No action, so setting changed to false\n elif self.drive_id is None and len(self.active_drives) > 0:\n changed = False # No action, so setting changed to false\n else:\n self.module.fail_json(msg='Error - no drive(s) in available state on node to be included in cluster')\n\n elif self.state == \"absent\":\n action_list = self.get_active_drives(self.drive_id)\n if len(action_list) > 0:\n self.remove_drive(action_list)\n changed = True\n\n elif self.state == \"clean\":\n action_list = self.get_available_drives(self.drive_id)\n if len(action_list) > 0:\n self.secure_erase(action_list)\n changed = True\n else:\n self.module.fail_json(msg='Error - no drive(s) in available state on node to be cleaned')\n\n else:\n result_message = \"Skipping changes, No change requested\"\n self.module.exit_json(changed=changed, msg=result_message)\n\n\ndef main():\n \"\"\"\n Main function\n \"\"\"\n\n na_elementsw_drive = ElementSWDrive()\n na_elementsw_drive.apply()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"amitvashist7/ansible-development-CTS","sub_path":"molecule/my_env/lib/python2.7/site-packages/ansible/modules/storage/netapp/na_elementsw_drive.py","file_name":"na_elementsw_drive.py","file_ext":"py","file_size_in_byte":11033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"39575015284","text":"from . import match_regex\nimport base64\nimport re\n\ndef get(folder):\n\tregex = {\n\t\t#\"encrypt\": \"RC4|AES|DES|DESX|3DES|RSA|DSA|ECDSA|IDEA|Blowfish|Twofish|ElGamal|Diffie-Hellman\",\n\t\t\"base64\": \"([A-Za-z0-9+/]{6,}={1,2}|[A-Za-z0-9+/]{6,})\",\n\t\t\"telegram_id\": \"[^0-9](100[0-9]{10})[^0-9]\",\n\t\t\"known\": \"hakon|standby|LoaderGGPlay|AmexTroll|have been Encrypted|killbot|main_wang|vnc_open|keylog_active|sentSMS\",\n\t\t}\n\n\texclude = []\n\n\tresult = {}\n\tfor item in regex.keys():\n\t\tstring_list = match_regex.inFolder(folder, regex[item], exclude)\n\n\t\tif item == \"base64\":\n\t\t\tvalid_base64 = []\n\t\t\texclude = [\"endsWith\", \"Visually\"] # known false positives\n\t\t\tfor string in string_list:\n\t\t\t\t# is odd\n\t\t\t\tif not (len(string) % 2) == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# ok, it is even!\n\t\t\t\t\n\t\t\t\t# validate base64: UPPER, lower\n\t\t\t\tif string not in exclude \\\n\t\t\t\t\tand any(char.isupper() for char in string) \\\n\t\t\t\t\tand any(char.islower() for char in string):\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmessage = base64.b64decode(string).decode('ascii')\n\t\t\t\t\t\t# checks if decoded base64 sting matches the regex\n\t\t\t\t\t\tif None in [re.match(\"[A-Za-z0-9}{)(-+/.=]\", i) for i in message]:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tvalid_base64.append((string, message))\n\t\t\tstring_list = valid_base64\n\t\t\n\t\tresult.update({item: string_list})\n\n\treturn result\n","repo_name":"guelfoweb/artifacts","sub_path":"lib/match_strings.py","file_name":"match_strings.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15796122814","text":"import os\nimport argparse\nimport json\nimport sys\nimport time\n\nfrom object_database import connect\nfrom object_database.util import configureLogging, formatTable, secondsToHumanReadable\nfrom object_database.service_manager.ServiceManager import ServiceManager\nfrom object_database.service_manager.ServiceSchema import service_schema\nfrom object_database.service_manager.aws.AwsWorkerBootService import (\n AwsWorkerBootService,\n AwsApi,\n BootConfig,\n)\nfrom object_database.service_manager.aws.AwsWorkerBootService import (\n schema as aws_worker_boot_schema,\n)\n\n\nDEFAULT_AMI = \"ami-759bc50a\" # ubuntu 16.04 hvm-ssd\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(\"Control the AWS service\")\n\n parser.add_argument(\n \"--hostname\", default=os.getenv(\"ODB_HOST\", \"localhost\"), required=False\n )\n parser.add_argument(\n \"--port\", type=int, default=int(os.getenv(\"ODB_PORT\", 8000)), required=False\n )\n parser.add_argument(\n \"--auth\",\n type=str,\n default=os.getenv(\"ODB_AUTH_TOKEN\"),\n required=False,\n help=\"Auth token to use to connect.\",\n )\n\n subparsers = parser.add_subparsers()\n\n config_parser = subparsers.add_parser(\"config\", help=\"configure the service\")\n config_parser.set_defaults(command=\"config\")\n\n config_parser.add_argument(\"--region\", required=False)\n config_parser.add_argument(\"--vpc_id\", required=False)\n config_parser.add_argument(\"--subnet\", required=False)\n config_parser.add_argument(\"--security_group\", required=False)\n config_parser.add_argument(\"--keypair\", required=False)\n config_parser.add_argument(\"--worker_name\", required=False)\n config_parser.add_argument(\"--worker_iam_role_name\", required=False)\n config_parser.add_argument(\"--defaultStorageSize\", required=False, type=int)\n config_parser.add_argument(\"--max_to_boot\", required=False, type=int)\n config_parser.add_argument(\"--available_subnets\", required=False, type=json.loads)\n config_parser.add_argument(\"--cpu_docker_image\", required=False, type=str)\n config_parser.add_argument(\"--cpu_ami\", required=False, type=str)\n config_parser.add_argument(\"--gpu_docker_image\", required=False, type=str)\n config_parser.add_argument(\"--gpu_ami\", required=False, type=str)\n\n install_parser = subparsers.add_parser(\"install\", help=\"install the service\")\n install_parser.set_defaults(command=\"install\")\n\n list_parser = subparsers.add_parser(\"list\", help=\"list machines\")\n list_parser.set_defaults(command=\"list\")\n\n boot_parser = subparsers.add_parser(\"boot\", help=\"set the number of desired boxes\")\n boot_parser.set_defaults(command=\"boot\")\n boot_parser.add_argument(\"instance_type\")\n boot_parser.add_argument(\"placementGroup\")\n boot_parser.add_argument(\"count\", type=int)\n\n killall_parser = subparsers.add_parser(\"killall\", help=\"kill everything\")\n killall_parser.set_defaults(command=\"killall\")\n\n reset_parser = subparsers.add_parser(\"reset\", help=\"kill everything\")\n reset_parser.set_defaults(command=\"reset\")\n\n configureLogging()\n\n parsedArgs = parser.parse_args(argv[1:])\n\n db = connect(parsedArgs.hostname, parsedArgs.port, parsedArgs.auth)\n db.subscribeToSchema(service_schema, lazySubscription=True)\n db.subscribeToSchema(aws_worker_boot_schema)\n\n if parsedArgs.command == \"reset\":\n with db.transaction():\n for s in aws_worker_boot_schema.State.lookupAll():\n s.delete()\n\n if parsedArgs.command == \"config\":\n cpu_boot_config = BootConfig(\n docker_image=parsedArgs.cpu_docker_image,\n ami=parsedArgs.cpu_ami or DEFAULT_AMI,\n )\n gpu_boot_config = BootConfig(\n docker_image=parsedArgs.gpu_docker_image or parsedArgs.cpu_docker_image,\n ami=parsedArgs.gpu_ami or parsedArgs.cpu_ami or DEFAULT_AMI,\n )\n if gpu_boot_config == cpu_boot_config:\n gpu_boot_config = None\n\n with db.transaction():\n AwsWorkerBootService.configure(\n db_hostname=parsedArgs.hostname,\n db_port=parsedArgs.port,\n region=parsedArgs.region,\n vpc_id=parsedArgs.vpc_id,\n default_subnet=parsedArgs.subnet,\n security_group=parsedArgs.security_group,\n keypair=parsedArgs.keypair,\n worker_name=parsedArgs.worker_name,\n worker_iam_role_name=parsedArgs.worker_iam_role_name,\n defaultStorageSize=parsedArgs.defaultStorageSize,\n max_to_boot=parsedArgs.max_to_boot,\n available_subnets=parsedArgs.available_subnets,\n cpu_boot_config=cpu_boot_config,\n gpu_boot_config=gpu_boot_config,\n )\n\n if parsedArgs.command == \"install\":\n with db.transaction():\n ServiceManager.createOrUpdateService(\n AwsWorkerBootService, \"AwsWorkerBootService\", placement=\"Master\"\n )\n\n if parsedArgs.command == \"list\":\n print()\n print()\n\n if False:\n with db.view():\n api = AwsApi()\n booted = AwsWorkerBootService.currentBooted()\n targets = AwsWorkerBootService.currentTargets()\n\n table = [[\"Instance Type\", \"Booted\", \"Desired\"]]\n\n for instance_type in sorted(set(list(booted) + list(targets))):\n table.append(\n [\n instance_type,\n booted.get(instance_type, 0),\n targets.get(instance_type, 0),\n ]\n )\n\n print(formatTable(table))\n\n print()\n print()\n\n with db.view():\n api = AwsApi()\n print(api.allSpotRequests())\n for spot in [False, True]:\n table = [[\"InstanceID\", \"InstanceType\", \"IP\", \"Uptime\", \"SPOT\"]]\n for instanceId, instance in api.allRunningInstances(spot=spot).items():\n table.append(\n [\n instance[\"InstanceId\"],\n instance[\"InstanceType\"],\n instance[\"PrivateIpAddress\"],\n secondsToHumanReadable(\n time.time() - instance[\"LaunchTime\"].timestamp()\n ),\n spot,\n ]\n )\n print(formatTable(table))\n\n print()\n print()\n\n if parsedArgs.command == \"boot\":\n with db.transaction():\n AwsWorkerBootService.setBootState(\n parsedArgs.instance_type, parsedArgs.count, parsedArgs.placementGroup\n )\n\n if parsedArgs.command == \"killall\":\n with db.transaction():\n AwsWorkerBootService.shutdownAll()\n\n with db.view():\n api = AwsApi()\n\n for i in api.allRunningInstances():\n try:\n api.terminateInstanceById(i)\n except Exception:\n pass\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","repo_name":"APrioriInvestments/object_database","sub_path":"object_database/frontends/aws_config.py","file_name":"aws_config.py","file_ext":"py","file_size_in_byte":7231,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"70282720836","text":"import json, calendar\nfrom datetime import datetime\n\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom cashbook.models import Cash\n\n# Create your views here.\n\ndef export_income_data(year):\n income = Cash.objects.filter(stat=True)\n income = income.filter(date__year=year)\n monthly = [0] * 12\n for i in income :\n #print(i.date.month)\n monthly[i.date.month -1] += i.amount/10000\n #print(monthly)\n return monthly\ndef export_expense_data(year):\n expense = Cash.objects.filter(stat=False)\n expense = expense.filter(date__year=year)\n monthly = [0] * 12\n for i in expense:\n monthly[i.date.month - 1] += i.amount / 10000\n return monthly\n\ndef category_div_1(year, month):\n data = Cash.objects.filter(category=\"6\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_2(year, month):\n data = Cash.objects.filter(category=\"7\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_3(year, month):\n data = Cash.objects.filter(category=\"8\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_4(year, month):\n data = Cash.objects.filter(category=\"9\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_5(year, month):\n data = Cash.objects.filter(category=\"10\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_6(year, month):\n data = Cash.objects.filter(category=\"11\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_7(year, month):\n data = Cash.objects.filter(category=\"12\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_8(year, month):\n data = Cash.objects.filter(category=\"13\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\ndef category_div_9(year, month):\n data = Cash.objects.filter(category=\"14\")\n data = data.filter(date__year=year)\n data = data.filter(date__month=month)\n data_sum = 0\n for i in data:\n data_sum += i.amount\n return data_sum\n\ndef category_sum(c1,c2,c3,c4,c5,c6,c7,c8,c9):\n total_sum = c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9\n return total_sum\n\n@login_required(login_url='/login/')\ndef mainchart(request):\n year = datetime.now().year\n month = datetime.now().month\n month_name = calendar.month_name[month]\n\n acash = Cash.objects.all()\n amount_data_in = export_income_data(year)\n json.dumps(amount_data_in)\n amount_data_out = export_expense_data(year)\n json.dumps(amount_data_out)\n\n ct1 = category_div_1(year, month)\n json.dumps(ct1)\n ct2 = category_div_2(year, month)\n json.dumps(ct2)\n ct3 = category_div_3(year, month)\n json.dumps(ct3)\n ct4 = category_div_4(year, month)\n json.dumps(ct4)\n ct5 = category_div_5(year, month)\n json.dumps(ct5)\n ct6 = category_div_6(year, month)\n json.dumps(ct6)\n ct7 = category_div_7(year, month)\n json.dumps(ct7)\n ct8 = category_div_8(year, month)\n json.dumps(ct8)\n ct9 = category_div_9(year, month)\n json.dumps(ct9)\n total_sum = ct1 + ct2 + ct3 + ct4 + ct5 + ct6 + ct7 + ct8 + ct9\n json.dumps(total_sum)\n\n return render(\n request,\n 'expense_chart/main_chart.html',\n {\n 'acash' : acash,\n 'amount_data_in' : amount_data_in,\n 'amount_data_out' : amount_data_out,\n 'year' : year,\n 'month_name' : month_name,\n 'ct1': ct1,\n 'ct2': ct2,\n 'ct3': ct3,\n 'ct4': ct4,\n 'ct5': ct5,\n 'ct6': ct6,\n 'ct7': ct7,\n 'ct8': ct8,\n 'ct9': ct9,\n 'total_sum': total_sum,\n }\n )","repo_name":"seojung87/cloud_project","sub_path":"expense_chart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74629294913","text":"from importlib import metadata\nfrom typing import TYPE_CHECKING, Any, Optional, Sequence\n\nimport django.apps\nfrom django.contrib.auth.signals import user_logged_in, user_logged_out\nfrom django.db.models.signals import post_migrate, pre_migrate\nfrom django.http import HttpRequest\n\nfrom dynamic_preferences.signals import preference_updated\nfrom license_expression import Licensing\nfrom oauthlib.common import Request as OauthlibRequest\nfrom spdx_license_list import LICENSES\n\nfrom .core_helpers import copyright_years\n\nif TYPE_CHECKING:\n from oauth2_provider.models import AbstractApplication\n\n\nclass AppConfig(django.apps.AppConfig):\n \"\"\"An extended version of DJango's AppConfig container.\"\"\"\n\n default_auto_field = \"django.db.models.BigAutoField\"\n\n def ready(self):\n super().ready()\n\n # Register default listeners\n pre_migrate.connect(self.pre_migrate, sender=self)\n post_migrate.connect(self.post_migrate, sender=self)\n preference_updated.connect(self.preference_updated)\n user_logged_in.connect(self.user_logged_in)\n user_logged_out.connect(self.user_logged_out)\n\n # Getting an app ready means it should look at its config once\n self.preference_updated(self)\n\n def get_distribution_name(self):\n \"\"\"Get distribution name of application package.\"\"\"\n if hasattr(self, \"dist_name\"):\n return self.dist_name\n elif self.name.lower().startswith(\"aleksis.apps.\"):\n return self.name.lower().replace(\"aleksis.apps.\", \"AlekSIS-App-\")\n\n return None\n\n def get_distribution(self):\n \"\"\"Get distribution of application package.\"\"\"\n dist_name = self.get_distribution_name()\n if dist_name:\n try:\n dist = metadata.distribution(dist_name)\n except metadata.PackageNotFoundError:\n return None\n\n return dist\n\n def get_name(self):\n \"\"\"Get name of application package.\"\"\"\n if hasattr(self, \"verbose_name\"):\n return self.verbose_name\n else:\n dist_name = self.get_distribution_name()\n if dist_name:\n return dist_name\n return self.name\n\n def get_version(self):\n \"\"\"Get version of application package.\"\"\"\n if hasattr(self, \"version\"):\n return self.version\n else:\n dist = self.get_distribution()\n if dist:\n return dist.version\n else:\n return \"unknown\"\n\n @classmethod\n def get_licence(cls) -> tuple:\n \"\"\"Get tuple of licence information of application package.\"\"\"\n # Get string representation of licence in SPDX format\n licence = getattr(cls, \"licence\", None)\n\n default_dict = {\n \"isDeprecatedLicenseId\": False,\n \"isFsfLibre\": False,\n \"isOsiApproved\": False,\n \"licenseId\": \"unknown\",\n \"name\": \"Unknown Licence\",\n \"referenceNumber\": -1,\n \"url\": \"\",\n }\n if licence:\n # Parse licence string into object format\n licensing = Licensing(LICENSES.keys())\n parsed = licensing.parse(licence).simplify()\n readable = parsed.render_as_readable()\n\n # Collect flags about licence combination (drop to False if any licence is False)\n flags = {\n \"isFsfLibre\": True,\n \"isOsiApproved\": True,\n }\n\n # Fill information dictionaries with missing data\n licence_dicts = []\n for symbol in parsed.symbols:\n # Get licence base information, stripping the \"or later\" mark\n licence_dict = LICENSES.get(symbol.key.rstrip(\"+\"), None)\n\n if licence_dict is None:\n # Fall back to the default dict\n licence_dict = default_dict\n else:\n # Add missing licence link to SPDX data\n licence_id = licence_dict[\"licenseId\"]\n licence_dict[\"url\"] = f\"https://spdx.org/licenses/{licence_id}.html\"\n\n # Drop summed up flags to False if this licence is False\n flags[\"isFsfLibre\"] = flags[\"isFsfLibre\"] and licence_dict[\"isFsfLibre\"]\n flags[\"isOsiApproved\"] = flags[\"isOsiApproved\"] and licence_dict[\"isOsiApproved\"]\n\n licence_dicts.append(licence_dict)\n\n return (readable, flags, licence_dicts)\n else:\n # We could not find a valid licence\n return (\"Unknown\", [default_dict])\n\n @classmethod\n def get_urls(cls):\n \"\"\"Get list of URLs for this application package.\"\"\"\n return getattr(cls, \"urls\", {})\n # TODO Try getting from distribution if not set\n\n @classmethod\n def get_copyright(cls) -> Sequence[tuple[str, str, str]]:\n \"\"\"Get copyright information tuples for application package.\"\"\"\n copyrights = getattr(cls, \"copyright_info\", tuple())\n\n copyrights_processed = []\n for copyright_info in copyrights:\n copyrights_processed.append(\n (\n # Sort copyright years and combine year ranges for display\n copyright_info[0]\n if isinstance(copyright_info[0], str)\n else copyright_years(copyright_info[0]),\n copyright_info[1],\n copyright_info[2],\n )\n )\n\n return copyrights_processed\n # TODO Try getting from distribution if not set\n\n def preference_updated(\n self,\n sender: Any,\n section: Optional[str] = None,\n name: Optional[str] = None,\n old_value: Optional[Any] = None,\n new_value: Optional[Any] = None,\n **kwargs,\n ) -> None:\n \"\"\"Call on every app instance if a dynamic preference changes, and once on startup.\n\n By default, it does nothing.\n \"\"\"\n pass\n\n def pre_migrate(\n self,\n app_config: django.apps.AppConfig,\n verbosity: int,\n interactive: bool,\n using: str,\n plan: list[tuple],\n apps: django.apps.registry.Apps,\n **kwargs,\n ) -> None:\n \"\"\"Call on every app instance before its models are migrated.\n\n By default, it does nothing.\n \"\"\"\n pass\n\n def post_migrate(\n self,\n app_config: django.apps.AppConfig,\n verbosity: int,\n interactive: bool,\n using: str,\n **kwargs,\n ) -> None:\n \"\"\"Call on every app instance after its models have been migrated.\n\n By default, asks all models to do maintenance on their default data.\n \"\"\"\n self._maintain_default_data()\n\n def user_logged_in(\n self, sender: type, request: Optional[HttpRequest], user: \"User\", **kwargs\n ) -> None:\n \"\"\"Call after a user logged in.\n\n By default, it does nothing.\n \"\"\"\n pass\n\n def user_logged_out(\n self, sender: type, request: Optional[HttpRequest], user: \"User\", **kwargs\n ) -> None:\n \"\"\"Call after a user logged out.\n\n By default, it does nothing.\n \"\"\"\n pass\n\n @classmethod\n def get_all_scopes(cls) -> dict[str, str]:\n \"\"\"Return all OAuth scopes and their descriptions for this app.\"\"\"\n return {}\n\n @classmethod\n def get_available_scopes(\n cls,\n application: Optional[\"AbstractApplication\"] = None,\n request: Optional[HttpRequest] = None,\n *args,\n **kwargs,\n ) -> list[str]:\n \"\"\"Return a list of all OAuth scopes available to the request and application.\"\"\"\n return list(cls.get_all_scopes().keys())\n\n @classmethod\n def get_default_scopes(\n cls,\n application: Optional[\"AbstractApplication\"] = None,\n request: Optional[HttpRequest] = None,\n *args,\n **kwargs,\n ) -> list[str]:\n \"\"\"Return a list of all OAuth scopes to always include for this request and application.\"\"\"\n return []\n\n @classmethod\n def get_additional_claims(cls, scopes: list[str], request: OauthlibRequest) -> dict[str, Any]:\n \"\"\"Get claim data for requested scopes.\"\"\"\n return {}\n\n def _maintain_default_data(self):\n from django.contrib.auth.models import Permission\n from django.contrib.contenttypes.models import ContentType\n\n if not self.models_module:\n # This app does not have any models, so bail out early\n return\n\n for model in self.get_models():\n if hasattr(model, \"maintain_default_data\"):\n # Method implemented by each model object; can be left out\n model.maintain_default_data()\n if hasattr(model, \"extra_permissions\"):\n ct = ContentType.objects.get_for_model(model)\n for perm, verbose_name in model.extra_permissions:\n Permission.objects.get_or_create(\n codename=perm,\n content_type=ct,\n defaults={\"name\": verbose_name},\n )\n","repo_name":"deepanshumehtaa/AlekSIS-Core","sub_path":"aleksis/core/util/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":9173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40001493323","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:\n start, curr, index = head, head, 1\n while index < left:\n start = curr\n curr = curr.next\n index += 1\n \n reverse, tail = None, curr\n \n while index >= left and index <= right:\n next = curr.next\n [curr.next, reverse] = [reverse, curr]\n curr = next\n index += 1\n \n start.next = reverse\n tail.next = curr\n \n return head if left > 1 else reverse\n ","repo_name":"frankudoags/Leetcode-Solutions","sub_path":"0092-reverse-linked-list-ii/0092-reverse-linked-list-ii.py","file_name":"0092-reverse-linked-list-ii.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72287404995","text":"import time\n\nimport adafruit_dht\nimport board\n\ndht = adafruit_dht.DHT11(board.GP16)\n\nwhile True:\n try:\n temperature = dht.temperature\n humidity = dht.humidity\n print(\"Temp: {:.1f} *C \\t Humidity: {}%\".format(temperature, humidity))\n except RuntimeError as e:\n print(\"Reading from DHT failure: \", e.args)\n\n time.sleep(1)","repo_name":"lesp/MakerAdvent2022","sub_path":"Day 12 - DHT11/Code/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39869941563","text":"import cv2\nimport glob, os\nimport time\n\nwater = cv2.imread('Watermark/watermark.jpg')\nwater = cv2.resize(water,(50,50))\nos.chdir(\"Imgs\")\nlistOfImages = glob.glob(\"*.jpg\")\n\ndef Operacoes(file):\n img = cv2.imread(file)\n\n img = cv2.copyMakeBorder(img,40,10,10,10,cv2.BORDER_CONSTANT,value=[255,255,255])\n\n img = cv2.resize(img,(512,512))\n\n img = cv2.putText(img,file, \n (10,25), \n cv2.FONT_HERSHEY_SIMPLEX, \n 1,\n (0,0,0),\n 2)\n\n x_offset=450\n y_offset=450\n img[y_offset:y_offset+water.shape[0], x_offset:x_offset+water.shape[1]] = water\n\n cv2.imshow('Img',img)\n print(img.shape)\n\nwhile True :\n for file in listOfImages:\n Operacoes(file)\n \n key = cv2.waitKey(5000)\n \n if key == 113:\n break\n elif key == 81:\n break\n elif key == 39:\n continue\n\n if key == 113 :\n cv2.destroyAllWindows()\n break\n if key == 81 :\n cv2.destroyAllWindows()\n break\n elif key == 39:\n continue\n ","repo_name":"ViniciusPierina/ProcImagemAc1","sub_path":"Projeto/aula03/AC1.py","file_name":"AC1.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72058609794","text":"from aiogram import types, Dispatcher\r\nfrom aiogram.dispatcher import FSMContext\r\nfrom config import ADMIN\r\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\r\nfrom config import bot\r\n\r\nclass FSMAdmin(StatesGroup): # Finite State Machine\r\n photo = State()\r\n title = State()\r\n description = State()\r\n price = State()\r\n\r\nasync def fsm_start(message: types.Message):\r\n if message.from_user.id in ADMIN:\r\n await FSMAdmin.photo.set()\r\n await message.answer(f\"Привет {message.from_user.full_name} \"\r\n f\"Отправьте фото блюда.\")\r\n else:\r\n await message.reply(\"Пишите строго в личку!\")\r\n\r\nasync def load_photo(message: types.Message, state: FSMContext):\r\n async with state.proxy() as menu:\r\n menu['id'] = message.from_user.id\r\n menu['username'] = f\"@{message.from_user.username}\"\r\n menu['photo'] = message.photo[0].file_id\r\n await FSMAdmin.next()\r\n await message.answer(\"Введите название блюда.\")\r\n\r\nasync def load_title(message: types.Message, state: FSMContext):\r\n async with state.proxy() as menu:\r\n menu['title'] = message.text\r\n await FSMAdmin.next()\r\n await message.answer('Опишите ваше блюдо.')\r\n\r\nasync def load_description(message: types.Message, state: FSMContext):\r\n async with state.proxy() as menu:\r\n menu['description'] = message.text\r\n await FSMAdmin.next()\r\n await message.answer('Цена вашего блюда.')\r\n\r\nasync def load_price(message: types.Message, state: FSMContext):\r\n try:\r\n if int(message.text) < 100000:\r\n async with state.proxy() as menu:\r\n menu['price'] = float(message.text)\r\n await bot.send_photo(message.from_user.id, menu['photo'],\r\n caption=f\"Title: {menu['title']}\\n\"\r\n f\"description: {menu['description']}\\n\"\r\n f\"Price: {menu['price']}\\n\\n\"\r\n f\"{menu['username']}\")\r\n else:\r\n await bot.send_message(message.chat.id, 'Да...Дада...')\r\n except:\r\n await bot.send_message('Вводите строго цифры!')\r\n\r\n await state.finish()\r\n await message.answer(\"Интересное блюдо\")\r\n\r\ndef register_handlers_fsmAdminmenu(dp: Dispatcher):\r\n dp.register_message_handler(fsm_start, commands=['menu'])\r\n dp.register_message_handler(load_photo, state=FSMAdmin.photo,\r\n content_types=['photo'])\r\n dp.register_message_handler(load_title, state=FSMAdmin.title)\r\n dp.register_message_handler(load_description, state=FSMAdmin.description)\r\n dp.register_message_handler(load_price, state=FSMAdmin.price)","repo_name":"denis2000001/daaa","sub_path":"handlers/FSMAdminMenu.py","file_name":"FSMAdminMenu.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18827588599","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\ndef init_weights(shape):\n return tf.Variable(tf.random_normal(shape,stddev=0.01))\n\ndef plotLine(slope,bias):\n x = np.arange(-3,3,0.5)\n y = x*slope+bias\n plt.plot(x,y)\n\nif __name__ == \"__main__\":\n df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)\n features = df.iloc[1:len(df.index),[0,2]].values\n labels = df.iloc[1:len(df.index),4].values\n\n scaler = preprocessing.StandardScaler().fit(features)\n features_standard = scaler.transform(features)\n\n for index,label in enumerate(labels):\n if label == \"Iris-setosa\":\n plt.scatter(features[index,0],features[index,1],color='red',marker='o',label='setosa')\n else:\n plt.scatter(features[index,0],features[index,1],color='blue',marker='x',label='versicolor')\n\n plt.xlabel('petal len')\n plt.ylabel('sepal len')\n plt.show()\n\n labels = np.where(labels==\"Iris-setosa\",1,-1)\n features_train,features_test,labels_train,labels_test = train_test_split(features_standard,labels,test_size=0.33)\n\n X = tf.placeholder(tf.float32)\n Y = tf.placeholder(tf.float32)\n\n w = init_weights([2,1])\n b = tf.Variable(tf.zeros([1,1]))\n\n predict_Y = tf.sign(tf.matmul(X,w)+b)\n\n loss = tf.reduce_mean(tf.square(predict_Y-labels_train))\n\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train_step = optimizer.minimize(loss)\n\n init = tf.global_variables_initializer()\n \n sess = tf.Session()\n sess.run(init)\n\n for i in range(300):\n sess.run(train_step,feed_dict={X:features_train, Y:labels_train})\n\n w1 = sess.run(w).flatten()[0]\n w2 = sess.run(w).flatten()[1]\n b = sess.run(b).flatten()\n\n for index,label in enumerate(labels_test):\n if label == 1:\n plt.scatter(features_test[index,0],features_test[index,1],color='red',marker='o',label='setosa')\n else:\n plt.scatter(features_test[index,0],features_test[index,1],color='blue',marker='x',label='versicolor')\n\n plt.xlabel('petal len')\n plt.ylabel('sepal len')\n plotLine(-w1/w2,-b/w2)\n plt.show()\n","repo_name":"caozheng1127/tensorflow_demo","sub_path":"headfirst_tensorflow/iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3518516273","text":"from models.cv_models import create_model, compiling\nfrom config.configloader import model_cfg, data_cfg\nfrom preprocessing.data import read_hdf5\nfrom pathlib import Path\nimport tensorflow as tf\nimport numpy as np\n\n# data\nmodel_name = model_cfg['model_name']\nrows, cols = int(data_cfg['rows']), int(data_cfg['cols'])\ninput_shape = (rows, cols, 3)\ninit = model_cfg['init']\ntrainable = model_cfg.getboolean('trainable')\nfc_layers = list(map(int, eval(model_cfg['fc_layers'])))\nclasses = int(model_cfg['classes'])\nhdf5_dir = Path(data_cfg['data_dir'])\n\n# creating global model\nmodel = create_model(model_name, input_shape, classes, fc_layers, trainable, init)\ncompiling(model, finetuning=trainable)\n\n\ndef load_model():\n \"\"\"Loads model\"\"\"\n return model\n\n\ndef summary(lite_model):\n \"\"\"Returns the summary of a TensorFlow Lite model\"\"\"\n return tf.lite.experimental.Analyzer.analyze(model_content=lite_model)\n\n\ndef representative_data_gen():\n \"\"\"\n This function is needed for TensorFlow Lite if you wish to do post-training integer quantization, as stated in\n https://www.tensorflow.org/lite/performance/model_optimization. It is used to send unlabeled representative\n samples which is read from a .h5 file containing unlabeled data.\n \"\"\"\n file_unlabel = read_hdf5(hdf5_dir, 'unlabeled', rows, cols)\n train_images = np.array(file_unlabel['/images'], dtype=np.float32)\n for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):\n yield [input_value]\n\n\ndef create_lite_model(old_model, lite_model_type):\n \"\"\"\n Creates a TensorFlow Lite model from the old model, and then optimizes the precision rate of the model. Default is\n float32, can create float16, uint8, and float8.\n\n Args:\n old_model: The model you wish to quantize.\n lite_model_type: What value, i.e. float16, uint8, float8.\n\n Returns:\n A quantized model.\n \"\"\"\n converter = tf.lite.TFLiteConverter.from_keras_model(old_model)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n if 'int' in lite_model_type:\n converter.representative_dataset = representative_data_gen\n # Ensure that if any ops can't be quantized, the converter throws an error\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n # Set the input and output tensors to uint8 (APIs added in r2.3)\n converter.inference_input_type = tf.uint8\n converter.inference_output_type = tf.uint8\n elif lite_model_type == 'float16':\n converter.target_spec.supported_types = [tf.float16]\n\n lite_model = converter.convert()\n return lite_model\n","repo_name":"miun-jvig/fl_distracted_driver_detection","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23575167031","text":"def flood(cake, x, y, letter):\n if x < 0 or y < 0:\n return\n if cake[x][y] != \"?\":\n return\n cake[x][y] = letter\n flood(cake, x-1, y, letter)\n flood(cake, x, y-1, letter)\n\nn = int(input())\n\nfor i in range(n):\n s = input().split(\" \")\n r = int(s[0])\n c = int(s[1])\n cake = []\n for j in range(r):\n cake.append(list(input()))\n\n for x in range(r):\n last = None\n for y in range(c):\n if cake[x][y] == \"?\":\n continue\n flood(cake, x-1, y, cake[x][y])\n flood(cake, x, y-1, cake[x][y])\n last = cake[x][y]\n if last:\n flood(cake, x, c-1, last)\n\n for y in range(c):\n x=r-1\n while cake[x][y] == \"?\":\n x -= 1\n flood(cake, r-1, y, cake[x][y])\n \n print(\"Case #{}:\".format(i+1))\n print(\"\\n\".join([''.join(x) for x in cake])) \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_203/301.py","file_name":"301.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2545693783","text":"import os\nimport random\nimport subprocess\nimport sys\nimport time\nfrom enum import Enum\nfrom PIL import Image\n\nfrom common import screenshot\nfrom button_models import s1080x1920 as button_points\n\ndebug_mode = False\nStatus = Enum('Status', ('START', 'CAN_TALK', 'TALKING', \"GOING\"))\n\nglobal marked_point\nmarked_point = [0, 0]\n\n\ndef drift():\n return random.uniform(0, 10)\n\n\ndef pixel_match(im, target_point, target_rgb, loss, debug=False):\n pixel = im.getpixel((target_point[0], target_point[1]))\n if (debug):\n print(\"Debug:pixel_rgb:\", pixel[0], pixel[1], pixel[2])\n print(\"Debug:target_rgb:\", target_rgb[0], target_rgb[1], target_rgb[2])\n return True if (abs(pixel[0] - target_rgb[0]) <= loss\n and abs(pixel[1] - target_rgb[1]) <= loss\n and abs(pixel[2] - target_rgb[2]) <= loss) else False\n\n\ndef array_match(im, target_points, target_rgb, loss):\n global marked_point\n for point in target_points:\n if pixel_match(im, point, target_rgb, loss):\n marked_point = point\n return True\n return False\n\n\ndef check_status(im):\n if array_match(im, button_points.LOCATIONS, [243, 113, 147], 30):\n return Status[\"CAN_TALK\"]\n elif pixel_match(\n im, button_points.AUTO, [240, 125, 145], 50, debug=debug_mode):\n return Status[\"TALKING\"]\n elif pixel_match(\n im, button_points.GO, [126, 186, 232], 30, debug=debug_mode):\n return Status[\"GOING\"]\n elif pixel_match(\n im, button_points.START, [250, 140, 155], 30, debug=debug_mode):\n return Status[\"START\"]\n\n\ndef action(status):\n if status is None:\n return\n elif status.value == Status[\"CAN_TALK\"].value:\n do_talk()\n elif status.value == Status[\"TALKING\"].value:\n do_auto_talk()\n elif status.value == Status[\"GOING\"].value:\n do_going()\n elif status.value == Status[\"START\"].value:\n do_start()\n\n\ndef do_talk():\n global marked_point\n print(\"INFO:开始对话\")\n tap(marked_point)\n time.sleep(1)\n\n\ndef do_auto_talk():\n tap(button_points.AUTO)\n print(\"INFO:开启自动对话,等待结束\")\n time.sleep(10)\n while True:\n temp_im = screenshot.pull_screenshot()\n if not (check_status(temp_im) == Status[\"TALKING\"]):\n break\n else:\n temp_im.close()\n time.sleep(0.5)\n\n\ndef do_going():\n print(\"INFO:开始前往\")\n tap(button_points.GO)\n time.sleep(1)\n\n\ndef do_start():\n print(\"INFO:开始出发\")\n tap(button_points.START)\n time.sleep(1)\n\n\ndef tap(point):\n point[0] = point[0] + drift()\n point[1] = point[1] + drift()\n print(\"INFO:模拟点击({},{})\".format(point[0], point[1]))\n si = subprocess.STARTUPINFO()\n si.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n cmd = 'adb shell input tap {x} {y}'.format(x=point[0], y=point[1])\n subprocess.Popen(cmd, startupinfo=si)\n time.sleep(0.1)\n\n\ndef main():\n screenshot.check_screenshot()\n while True:\n print(\"INFO:正在截取手机屏幕\")\n im = screenshot.pull_screenshot()\n if debug_mode:\n print(\"Debug:image_size:\", im.size)\n print(\"INFO:正在检查状态\")\n status = check_status(im)\n print(\"INFO:当前状态:\", status)\n action(status)\n time.sleep(random.uniform(1, 2))\n im.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ctrysbita/EvolWalker","sub_path":"EvolWalker.py","file_name":"EvolWalker.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2100641361","text":"from collections import deque\n\ndef solution(dartResult):\n answer = 0\n dartResult=list(dartResult)\n result = []\n temp = \"\"\n for dart in dartResult:\n x = dart\n if x != \"S\" and x != \"D\" and x != \"T\" and x != \"*\" and x != \"#\":\n temp += x\n continue\n \n \n if x == \"S\":\n result.append(str(int(temp)))\n temp = \"\"\n elif x == \"D\":\n result.append(str(int(temp)**2))\n temp = \"\"\n elif x == \"T\":\n result.append(str(int(temp)**3))\n temp = \"\"\n elif x == \"*\":\n if len(result) == 1:\n result[0] += \"*2\"\n else:\n result[-1] += \"*2\"\n result[-2] += \"*2\"\n temp = \"\" \n elif x == \"#\":\n result[-1] += \"*(-1)\"\n temp = \"\"\n \n for re in result:\n answer += eval(re)\n return answer","repo_name":"YuHyeonGeun-KOR/My-Algorithm-Journey","sub_path":"Programmers/level_1/dart.py","file_name":"dart.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23423728101","text":"\r\n\r\ndef solveCase(casenum):\r\n [price,rate,goal]=[float(e) for e in input().split()]\r\n return \"Case #\"+str(casenum)+\": \"+str(time(price,rate,goal))\r\n\r\ndef time(price,rate,goal):\r\n farms=0\r\n ans=0\r\n while(True):\r\n nofarm=goal/(2+farms*rate)\r\n farm=price/(2+farms*rate)+goal/(2+(farms+1)*rate)\r\n if nofarm 1:\n # Use multiple voxels to estimate the covariance\n from fsl_mrs.utils.preproc.combine import estimate_noise_cov\n data_array = np.moveaxis(\n data[:],\n (coil_dim, 3),\n (-1, -2))\n data_array = data_array.reshape((-1, ) + data_array.shape[-2:])\n coil_cov = estimate_noise_cov(data_array)\n elif data.ndim > 5:\n # Use multiple dynamics to estimate the covariance\n from fsl_mrs.utils.preproc.combine import estimate_noise_cov\n stacked_data = []\n for dd, idx in data.iterate_over_dims(dim='DIM_COIL',\n iterate_over_space=True,\n reduce_dim_index=True):\n stacked_data.append(dd)\n stacked_data = np.asarray(stacked_data)\n coil_cov = estimate_noise_cov(stacked_data)\n else:\n coil_cov = None\n\n if reference is not None:\n weights_shape = reference.shape[:3] + reference.shape[4:]\n ref_weights = np.zeros(weights_shape, dtype=complex)\n # Run wSVD on the reference data storing up weights\n for ref, idx in reference.iterate_over_dims(dim='DIM_COIL',\n iterate_over_space=True,\n reduce_dim_index=False):\n # TODO make this without_time an kwarg in iterate_over_dims\n idx_no_t = idx[:3] + idx[4:]\n _, ref_weights[idx_no_t], _ = preproc.combine_FIDs(\n ref,\n 'svd_weights',\n do_prewhiten=not no_prewhiten,\n cov=coil_cov)\n\n # Axes swapping fun for broadcasting along multiple dimensions.\n weighted_data = np.moveaxis(data[:], 3, -1).T * ref_weights.T\n weighted_data = np.moveaxis(weighted_data.T, -1, 3)\n\n combinedc_obj[:] = np.sum(weighted_data, axis=coil_dim)\n\n if (figure or report):\n from fsl_mrs.utils.preproc.combine import combine_FIDs_report\n for main, idx in data.iterate_over_dims(dim='DIM_COIL',\n iterate_over_space=True,\n reduce_dim_index=True):\n\n if (report_all or first_index(idx)):\n fig = combine_FIDs_report(\n main,\n combinedc_obj[idx],\n data.bandwidth,\n data.spectrometer_frequency[0],\n data.nucleus[0],\n ncha=data.shape[data.dim_position('DIM_COIL')],\n ppmlim=(0.0, 6.0),\n method='svd',\n dim='DIM_COIL',\n html=report)\n if figure:\n fig.show()\n if first_index(idx):\n break\n\n else:\n # If there is no reference data (or [TODO] supplied weights) then have to run\n # per-voxel wSVD. This is slow for high-res MRSI data\n for main, idx in data.iterate_over_dims(dim='DIM_COIL',\n iterate_over_space=True,\n reduce_dim_index=True):\n\n combinedc_obj[idx] = preproc.combine_FIDs(\n list(main.T),\n 'svd',\n do_prewhiten=not no_prewhiten,\n cov=coil_cov)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.combine import combine_FIDs_report\n fig = combine_FIDs_report(\n main,\n combinedc_obj[idx],\n data.bandwidth,\n data.spectrometer_frequency[0],\n data.nucleus[0],\n ncha=data.shape[data.dim_position('DIM_COIL')],\n ppmlim=(0.0, 6.0),\n method='svd',\n dim='DIM_COIL',\n html=report)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.coilcombine, '\n if reference is None:\n processing_info += 'reference=None, '\n else:\n processing_info += f'reference={reference.filename}, '\n processing_info += f'no_prewhiten={no_prewhiten}.'\n\n update_processing_prov(combinedc_obj, 'RF coil combination', processing_info)\n\n return combinedc_obj\n\n\ndef average(data, dim, figure=False, report=None, report_all=False):\n '''Average (take the mean) of FIDs across a dimension\n specified by a tag.\n\n :param NIFTI_MRS data: Data to average\n :param str dim: NIFTI-MRS dimension tag\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Combined data in NIFTI_MRS format.\n '''\n combined_obj = data.copy(remove_dim=dim)\n for dd, idx in data.iterate_over_dims(dim=dim,\n iterate_over_space=True,\n reduce_dim_index=True):\n combined_obj[idx] = preproc.combine_FIDs(dd, 'mean')\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.combine import combine_FIDs_report\n fig = combine_FIDs_report(dd,\n combined_obj[idx],\n data.bandwidth,\n data.spectrometer_frequency[0],\n data.nucleus[0],\n ncha=data.shape[data.dim_position(dim)],\n ppmlim=(0.0, 6.0),\n method=f'Mean along dim = {dim}',\n html=report)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.average, '\n processing_info += f'dim={dim}.'\n\n update_processing_prov(combined_obj, 'Signal averaging', processing_info)\n\n return combined_obj\n\n\ndef align(data, dim, target=None, ppmlim=None, niter=2, apodize=10, figure=False, report=None, report_all=False):\n '''Align frequency and phase of spectra. Can be run across a dimension (specified by a tag), or all spectra\n stored in higher dimensions.\n\n :param NIFTI_MRS data: Data to align\n :param str dim: NIFTI-MRS dimension tag, or 'all'\n :param target: Optional target FID\n :param ppmlim: ppm search limits.\n :param int niter: Number of total iterations\n :param float apodize: Apply apodisation in Hz.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Combined data in NIFTI_MRS format.\n '''\n\n aligned_obj = data.copy()\n\n if dim.lower() == 'all':\n generator = data.iterate_over_spatial()\n else:\n generator = data.iterate_over_dims(dim=dim,\n iterate_over_space=True,\n reduce_dim_index=False)\n for dd, idx in generator:\n\n if dim == 'all':\n # flatten\n original_shape = dd.shape\n dd = dd.reshape(original_shape[0], -1)\n\n out = preproc.phase_freq_align(\n dd.T,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=ppmlim,\n niter=niter,\n apodize=apodize,\n verbose=False,\n target=target)\n\n if dim == 'all':\n aligned_obj[idx], phi, eps = out[0].T.reshape(original_shape), out[1], out[2]\n else:\n aligned_obj[idx], phi, eps = out[0].T, out[1], out[2]\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.align import phase_freq_align_report\n fig = phase_freq_align_report(dd.T,\n out[0],\n phi,\n eps,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=ppmlim,\n html=report)\n if figure:\n for ff in fig:\n ff.show()\n\n # Update processing prov\n processing_info = f'{__name__}.align, '\n processing_info += f'dim={dim}, '\n if target is not None:\n processing_info += 'target used, '\n else:\n processing_info += 'target=None, '\n processing_info += f'ppmlim={ppmlim}, '\n processing_info += f'niter={niter}, '\n processing_info += f'apodize={apodize}.'\n\n update_processing_prov(aligned_obj, 'Frequency and phase correction', processing_info)\n\n return aligned_obj\n\n\ndef aligndiff(data,\n dim_align,\n dim_diff,\n diff_type,\n target=None,\n ppmlim=None,\n figure=False,\n report=None,\n report_all=False):\n '''Align frequencies of difference spectra across a dimension\n specified by a tag.\n\n :param NIFTI_MRS data: Data to align\n :param str dim_align: NIFTI-MRS dimension tag to align along\n :param str dim_diff: NIFTI-MRS dimension across which diffrence is taken.\n :param str diff_type: Either 'add' or 'sub'\n :param target: Optional target FID\n :param ppmlim: ppm search limits.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Aligned data in NIFTI_MRS format.\n '''\n if data.shape[data.dim_position(dim_diff)] != 2:\n raise DimensionsDoNotMatch('Diff dimension must be of length 2')\n\n aligned_obj = data.copy()\n diff_index = data.dim_position(dim_diff)\n data_0 = []\n data_1 = []\n index_0 = []\n for dd, idx in data.iterate_over_dims(dim=dim_align,\n iterate_over_space=True,\n reduce_dim_index=False):\n if idx[diff_index] == 0:\n data_0.append(dd)\n index_0.append(idx)\n else:\n data_1.append(dd)\n\n for d0, d1, idx in zip(data_0, data_1, index_0):\n out = preproc.phase_freq_align_diff(\n d0.T,\n d1.T,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n diffType=diff_type,\n ppmlim=ppmlim,\n target=target)\n\n aligned_obj[idx], _, phi, eps = np.asarray(out[0]).T, out[1], out[2], out[3]\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.align import phase_freq_align_diff_report\n fig = phase_freq_align_diff_report(d0.T,\n d1.T,\n aligned_obj[idx].T,\n d1.T,\n phi,\n eps,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n diffType=diff_type,\n ppmlim=ppmlim,\n html=report)\n if figure:\n for ff in fig:\n ff.show()\n\n # Update processing prov\n processing_info = f'{__name__}.aligndiff, '\n processing_info += f'dim_align={dim_align}, '\n processing_info += f'dim_diff={dim_diff}, '\n processing_info += f'diff_type={diff_type}, '\n if target is not None:\n processing_info += 'target used, '\n else:\n processing_info += 'target=None, '\n processing_info += f'ppmlim={ppmlim}.'\n\n update_processing_prov(aligned_obj, 'Alignment of subtraction sub-spectra', processing_info)\n\n return aligned_obj\n\n\ndef ecc(data, reference, figure=False, report=None, report_all=False):\n '''Apply eddy current correction using a reference dataset\n :param NIFTI_MRS data: Data to eddy current correct\n :param NIFTI_MRS reference: reference dataset to calculate phase\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Corrected data in NIFTI_MRS format.\n '''\n if data.shape != reference.shape\\\n and reference.ndim > 4:\n raise DimensionsDoNotMatch('Reference and data shape must match'\n ' or reference must be single FID.')\n\n corrected_obj = data.copy()\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n\n if data.shape == reference.shape:\n # Reference is the same shape as data, voxel-wise and spectrum-wise iteration\n ref = reference[idx]\n else:\n # Only one reference FID, only iterate over spatial voxels.\n ref = reference[idx[0], idx[1], idx[2], :]\n\n corrected_obj[idx] = preproc.eddy_correct(dd, ref)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.eddycorrect import eddy_correct_report\n fig = eddy_correct_report(dd,\n corrected_obj[idx],\n ref,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n html=report)\n if figure:\n for ff in fig:\n ff.show()\n\n # Update processing prov\n processing_info = f'{__name__}.ecc, '\n processing_info += f'reference={reference.filename}.'\n\n update_processing_prov(corrected_obj, 'Eddy current correction', processing_info)\n\n return corrected_obj\n\n\ndef remove_peaks(data, limits, limit_units='ppm+shift', figure=False, report=None, report_all=False):\n '''Apply HLSVD to remove peaks from specta\n :param NIFTI_MRS data: Data to remove peaks from\n :param limits: ppm limits between which peaks will be removed\n :param str limit_units: Can be 'Hz', 'ppm' or 'ppm+shift'.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Corrected data in NIFTI_MRS format.\n '''\n corrected_obj = data.copy()\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n\n corrected_obj[idx] = preproc.hlsvd(dd,\n data.dwelltime,\n data.spectrometer_frequency[0],\n limits,\n limitUnits=limit_units)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.remove import hlsvd_report\n fig = hlsvd_report(dd,\n corrected_obj[idx],\n limits,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n limitUnits=limit_units,\n html=report)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.remove_peaks, '\n processing_info += f'limits={limits}, '\n processing_info += f'limit_units={limit_units}.'\n\n update_processing_prov(corrected_obj, 'Nuisance peak removal', processing_info)\n\n return corrected_obj\n\n\ndef hlsvd_model_peaks(data, limits,\n limit_units='ppm+shift', components=5, figure=False, report=None, report_all=False):\n '''Apply HLSVD to model spectum\n :param NIFTI_MRS data: Data to model\n :param limits: ppm limits between which spectrum will be modeled\n :param str limit_units: Can be 'Hz', 'ppm' or 'ppm+shift'.\n :param int components: Number of lorentzian components to model\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Corrected data in NIFTI_MRS format.\n '''\n corrected_obj = data.copy()\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n\n corrected_obj[idx] = preproc.model_fid_hlsvd(\n dd,\n data.dwelltime,\n data.spectrometer_frequency[0],\n limits,\n limitUnits=limit_units,\n numSingularValues=components)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.remove import hlsvd_report\n fig = hlsvd_report(dd,\n corrected_obj[idx],\n limits,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n limitUnits=limit_units,\n html=report)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.hlsvd_model_peaks, '\n processing_info += f'limits={limits}, '\n processing_info += f'limit_units={limit_units}, '\n processing_info += f'components={components}.'\n\n update_processing_prov(corrected_obj, 'HLSVD modeling', processing_info)\n\n return corrected_obj\n\n\ndef tshift(data, tshiftStart=0.0, tshiftEnd=0.0, samples=None, figure=False, report=None, report_all=False):\n '''Apply time shift or resampling to each FID\n :param NIFTI_MRS data: Data to shift\n :param float tshiftStart: Shift start time (s), negative padds with zeros\n :param float tshiftEnd: Shift end time (s), negative truncates\n :param float samples: Resample to this many points\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Shifted data in NIFTI_MRS format.\n '''\n if samples is None:\n samples = data.shape[3]\n shifted_obj = data.copy()\n else:\n new_shape = list(data.shape)\n new_shape[3] = samples\n shifted_obj = NIFTI_MRS(\n np.zeros(new_shape, dtype=data.dtype),\n header=data.header)\n\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n shifted_obj[idx], newDT = preproc.timeshift(dd,\n data.dwelltime,\n tshiftStart,\n tshiftEnd,\n samples)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.shifting import shift_report\n\n original_hdr = {'bandwidth': data.bandwidth,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n new_hdr = {'bandwidth': 1 / newDT,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n fig = shift_report(dd,\n shifted_obj[idx],\n original_hdr,\n new_hdr,\n html=report,\n function='timeshift')\n if figure:\n fig.show()\n\n shifted_obj.dwelltime = newDT\n\n # Update processing prov\n processing_info = f'{__name__}.tshift, '\n processing_info += f'tshiftStart={tshiftStart}, '\n processing_info += f'tshiftEnd={tshiftEnd}, '\n processing_info += f'samples={samples}.'\n\n update_processing_prov(shifted_obj, 'Temporal resample', processing_info)\n\n return shifted_obj\n\n\ndef truncate_or_pad(data, npoints, position, figure=False, report=None, report_all=False):\n '''Truncate or pad by integer number of points\n :param NIFTI_MRS data: Data to truncate or pad\n :param int npoints: Pad (positive) or truncate (negative) by npoints\n :param str position: 'first' or 'last', add or remove points at the\n start or end of the FID\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Padded or truncated data in NIFTI_MRS format.\n '''\n\n new_shape = list(data.shape)\n new_shape[3] += npoints\n trunc_obj = NIFTI_MRS(\n np.zeros(new_shape, dtype=data.dtype),\n header=data.header)\n\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n if npoints > 0:\n trunc_obj[idx] = preproc.pad(dd,\n np.abs(npoints),\n position)\n rep_func = 'pad'\n elif npoints < 0:\n trunc_obj[idx] = preproc.truncate(dd,\n np.abs(npoints),\n position)\n rep_func = 'truncate'\n else:\n rep_func = 'none'\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.shifting import shift_report\n original_hdr = {'bandwidth': data.bandwidth,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n\n fig = shift_report(dd,\n trunc_obj[idx],\n original_hdr,\n original_hdr,\n html=report,\n function=rep_func)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.truncate_or_pad, '\n processing_info += f'npoints={npoints}, '\n processing_info += f'position={position}.'\n\n update_processing_prov(trunc_obj, 'Zero-filling', processing_info)\n\n return trunc_obj\n\n\ndef apodize(data, amount, filter='exp', figure=False, report=None, report_all=False):\n '''Apodize FIDs using a exponential or Lorentzian to Gaussian filter.\n Lorentzian to Gaussian filter takes requires two window parameters (t_L and t_G)\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param tuple amount: If filter='exp' single valued. If filter='l2g' then two valued.\n :param str filter: 'exp' or 'l2g'. Choose exponential or Lorentzian to Gaussian filter\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Filtered data in NIFTI_MRS format.\n '''\n apod_obj = data.copy()\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n apod_obj[idx] = preproc.apodize(dd,\n data.dwelltime,\n amount,\n filter=filter)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.filtering import apodize_report\n fig = apodize_report(dd,\n apod_obj[idx],\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n html=report)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.apodize, '\n processing_info += f'amount={amount}, '\n processing_info += f'filter={filter}.'\n\n update_processing_prov(apod_obj, 'Apodization', processing_info)\n\n return apod_obj\n\n\ndef fshift(data, amount, figure=False, report=None, report_all=False):\n '''Apply frequency shift.\n\n Two modes of operation:\n 1) Specify a single shift which is applied to all FIDs/spectra - amount has float type\n 2) Specify a shift per FID/spectra - amount is numpy array matching data shape\n\n :param NIFTI_MRS data: Data to frequency shift\n :param amount: Shift amount in Hz, can be array matching data size\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Shifted data in NIFTI_MRS format.\n '''\n if not isinstance(amount, float) and amount.size > 1:\n required_shape = data.shape[:3] + data.shape[4:]\n if amount.shape != required_shape:\n raise ValueError(\n 'Shift map must be the same size as the NIfTI-MRS spatial + higher dimensions. '\n f'Current size = {amount.shape}, required shape = {required_shape}.')\n shift_map = True\n else:\n shift_map = False\n toshift = amount\n\n shift_obj = data.copy()\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n if shift_map:\n toshift = amount[idx[:3] + idx[4:]]\n shift_obj[idx] = preproc.freqshift(dd,\n data.dwelltime,\n toshift)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.shifting import shift_report\n original_hdr = {'bandwidth': data.bandwidth,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n fig = shift_report(dd,\n shift_obj[idx],\n original_hdr,\n original_hdr,\n html=report,\n function='freqshift')\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.fshift, '\n if shift_map:\n processing_info += 'amount=per-voxel shifts specified.'\n else:\n processing_info += f'amount={amount}.'\n\n update_processing_prov(shift_obj, 'Frequency and phase correction', processing_info)\n\n return shift_obj\n\n\ndef shift_to_reference(data, ppm_ref, peak_search, use_avg=False, figure=False, report=None, report_all=False):\n '''Shift peak to known reference\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param float ppm_ref: Reference shift that peak will be moved to\n :param tuple peak_search: Search for peak between these ppm limits e.g. (2.8, 3.2) for tCr\n :param bool use_avg: If multiple spectra in higher dimensions,\n use the average of all the higher dimension spectra to calculate shift correction.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Shifted data in NIFTI_MRS format.\n '''\n\n shift_obj = data.copy()\n if use_avg:\n # Combine all higher dimensions\n shift = np.zeros(data.shape[:3])\n for dd, idx in data.iterate_over_spatial():\n comb_data = preproc.combine_FIDs(dd.reshape(dd.shape[0], -1), 'mean')\n # Run shift estimation\n _, shift[idx[:3]] = preproc.shiftToRef(\n comb_data,\n ppm_ref,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=peak_search)\n\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n if use_avg:\n shift_obj[idx] = preproc.freqshift(\n dd,\n data.dwelltime,\n - shift[idx[:3]] * data.spectrometer_frequency[0])\n else:\n shift_obj[idx], _ = preproc.shiftToRef(\n dd,\n ppm_ref,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=peak_search)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.shifting import shift_report\n original_hdr = {'bandwidth': data.bandwidth,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n fig = shift_report(dd,\n shift_obj[idx],\n original_hdr,\n original_hdr,\n html=report,\n function='shiftToRef')\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.shift_to_reference, '\n processing_info += f'ppm_ref={ppm_ref}, '\n processing_info += f'peak_search={peak_search}, '\n processing_info += f'use_avg={use_avg}.'\n\n update_processing_prov(shift_obj, 'Frequency and phase correction', processing_info)\n\n return shift_obj\n\n\ndef remove_unlike(data, ppmlim=None, sdlimit=1.96, niter=2, figure=False, report=None):\n '''Remove unlike dynamics operating on DIM_DYN\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n\n :return: Data passing likeness criteria.\n :return: Data failing likness criteria\n '''\n if data.shape[:3] != (1, 1, 1):\n raise OnlySVS(\"remove_unlike only specified for SVS data\")\n\n if data.ndim > 5:\n raise ValueError('remove_unlike only makes sense for a single dynamic dimension. Combined coils etc. first')\n elif data.ndim < 5:\n raise ValueError('remove_unlike only makes sense for data with a dynamic dimension')\n\n goodFIDs, badFIDs, gIndicies, bIndicies, metric = \\\n preproc.identifyUnlikeFIDs(data[0, 0, 0, :, :].T,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=ppmlim,\n sdlimit=sdlimit,\n iterations=niter,\n shift=True)\n\n if figure or report:\n from fsl_mrs.utils.preproc.unlike import identifyUnlikeFIDs_report\n fig = identifyUnlikeFIDs_report(goodFIDs,\n badFIDs,\n gIndicies,\n bIndicies,\n metric,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=ppmlim,\n sdlimit=sdlimit,\n html=report)\n if figure:\n fig.show()\n\n goodFIDs = np.asarray(goodFIDs).T\n goodFIDs = goodFIDs.reshape([1, 1, 1] + list(goodFIDs.shape))\n\n good_out = NIFTI_MRS(\n goodFIDs,\n header=data.header)\n\n if len(badFIDs) > 0:\n badFIDs = np.asarray(badFIDs).T\n badFIDs = badFIDs.reshape([1, 1, 1] + list(badFIDs.shape))\n bad_out = NIFTI_MRS(\n badFIDs,\n header=data.header)\n else:\n bad_out = None\n\n # Update processing prov\n processing_info = f'{__name__}.remove_unlike, '\n if ppmlim is None:\n processing_info += 'ppmlim=None, '\n else:\n processing_info += f'ppmlim={ppmlim}, '\n processing_info += f'sdlimit={sdlimit}, '\n processing_info += f'niter={niter}.'\n\n update_processing_prov(good_out, 'Outlier removal', processing_info)\n\n return good_out, bad_out\n\n\ndef phase_correct(data, ppmlim, hlsvd=False, use_avg=False, figure=False, report=None, report_all=False):\n '''Zero-order phase correct based on peak maximum\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param float ppmlim: Search for peak between limits\n :param bool hlsvd: Use HLSVD to remove peaks outside the ppmlim\n :param bool use_avg: If multiple spectra in higher dimensions,\n use the average of all the higher dimension spectra to calculate phase correction.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Phased data in NIFTI_MRS format.\n '''\n\n phs_obj = data.copy()\n if use_avg:\n # Combine all higher dimensions\n p0 = np.zeros(data.shape[:3])\n pos_all = np.zeros(data.shape[:3], int)\n for dd, idx in data.iterate_over_spatial():\n comb_data = preproc.combine_FIDs(dd.reshape(dd.shape[0], -1), 'mean')\n # Run phase correction estimation\n _, p0[idx[:3]], pos_all[idx[:3]] = preproc.phaseCorrect(\n comb_data,\n phs_obj.bandwidth,\n phs_obj.spectrometer_frequency[0],\n nucleus=phs_obj.nucleus[0],\n ppmlim=ppmlim,\n use_hlsvd=hlsvd)\n\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n if use_avg:\n phs_obj[idx] = preproc.applyPhase(\n dd,\n p0[idx[:3]])\n pos = pos_all[idx[:3]]\n else:\n phs_obj[idx], _, pos = preproc.phaseCorrect(\n dd,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=ppmlim,\n use_hlsvd=hlsvd)\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.phasing import phaseCorrect_report\n fig = phaseCorrect_report(dd,\n phs_obj[idx],\n pos,\n data.bandwidth,\n data.spectrometer_frequency[0],\n nucleus=data.nucleus[0],\n ppmlim=ppmlim,\n html=report)\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.phase_correct, '\n processing_info += f'ppmlim={ppmlim}, '\n processing_info += f'hlsvd={hlsvd}, '\n processing_info += f'use_avg={use_avg}.'\n\n update_processing_prov(phs_obj, 'Phasing', processing_info)\n\n return phs_obj\n\n\ndef apply_fixed_phase(data, p0, p1=0.0, p1_type='shift', figure=False, report=None, report_all=False):\n '''Apply fixed phase correction\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param float p0: Zero order phase correction in degrees\n :param float p1: First order phase correction in seconds\n :param str p1_type: 'shift' for interpolated time-shift, 'linphase' for frequency-domain phasing.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Phased data in NIFTI_MRS format.\n '''\n phs_obj = data.copy()\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n phs_obj[idx] = preproc.applyPhase(dd,\n p0 * (np.pi / 180.0))\n\n if p1 != 0.0:\n if p1_type.lower() == 'shift':\n phs_obj[idx], _ = preproc.timeshift(\n phs_obj[idx],\n data.dwelltime,\n p1,\n p1,\n samples=data.shape[3])\n elif p1_type.lower() == 'linphase':\n from fsl_mrs.utils.misc import calculateAxes\n faxis = calculateAxes(\n data.spectralwidth,\n data.spectrometer_frequency[0],\n data.shape[3],\n 0.0)['freq']\n phs_obj[idx] = preproc.applyLinPhase(\n phs_obj[idx],\n faxis,\n p1)\n else:\n raise ValueError(\"p1_type kwarg must be 'shift' or 'linphase'.\")\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.general import generic_report\n original_hdr = {'bandwidth': data.bandwidth,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n fig = generic_report(dd,\n phs_obj[idx],\n original_hdr,\n original_hdr,\n ppmlim=(0.2, 4.2),\n html=report,\n function='fixed phase')\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.apply_fixed_phase, '\n processing_info += f'p0={p0}, '\n processing_info += f'p1={p1}, '\n processing_info += f'p1_type={p1_type}.'\n\n update_processing_prov(phs_obj, 'Phasing', processing_info)\n\n return phs_obj\n\n\ndef subtract(data0, data1=None, dim=None, figure=False, report=None, report_all=False):\n '''Either subtract data1 from data0 or subtract index 1 from\n index 0 along specified dimension\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param data1: If specified data1 will be subtracted from data0\n :param dim: If specified index 1 will be subtracted from 0 across this dimension.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Subtracted data in NIFTI_MRS format.\n '''\n\n if dim is not None:\n # Check dim is of correct size\n if data0.shape[data0.dim_position(dim)] != 2:\n raise DimensionsDoNotMatch('Subtraction dimension must be of length 2.'\n f' Currently {data0.shape[data0.dim_position(dim)]}')\n\n sub_ob = data0.copy(remove_dim=dim)\n for dd, idx in data0.iterate_over_dims(dim=dim,\n iterate_over_space=True,\n reduce_dim_index=True):\n sub_ob[idx] = preproc.subtract(dd.T[0], dd.T[1])\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.general import add_subtract_report\n fig = add_subtract_report(dd.T[0],\n dd.T[1],\n sub_ob[idx],\n data0.bandwidth,\n data0.spectrometer_frequency[0],\n nucleus=data0.nucleus[0],\n ppmlim=(0.2, 4.2),\n html=report,\n function='subtract')\n if figure:\n fig.show()\n\n elif data1 is not None:\n\n sub_ob = data0.copy()\n sub_ob[:] = (data0[:] - data1[:]) / 2\n\n else:\n raise ValueError('One of data1 or dim arguments must not be None.')\n\n # Update processing prov\n processing_info = f'{__name__}.subtract, '\n if data1 is None:\n processing_info += 'data1=None, '\n else:\n processing_info += f'data1={data1.filename}, '\n if dim is None:\n processing_info += 'dim=None.'\n else:\n processing_info += f'dim={dim}.'\n\n update_processing_prov(sub_ob, 'Subtraction of sub-spectra', processing_info)\n\n return sub_ob\n\n\ndef add(data0, data1=None, dim=None, figure=False, report=None, report_all=False):\n '''Either add data1 to data0 or add index 1 to\n index 0 along specified dimension\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param data1: If specified data1 will be added to data0\n :param dim: If specified index 1 will be added to 0 across this dimension.\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Subtracted data in NIFTI_MRS format.\n '''\n\n if dim is not None:\n # Check dim is of correct size\n if data0.shape[data0.dim_position(dim)] != 2:\n raise DimensionsDoNotMatch('Addition dimension must be of length 2.'\n f' Currently {data0.shape[data0.dim_position(dim)]}')\n\n add_ob = data0.copy(remove_dim=dim)\n for dd, idx in data0.iterate_over_dims(dim=dim,\n iterate_over_space=True,\n reduce_dim_index=True):\n add_ob[idx] = preproc.add(dd.T[0], dd.T[1])\n\n if (figure or report) and (report_all or first_index(idx)):\n from fsl_mrs.utils.preproc.general import add_subtract_report\n fig = add_subtract_report(dd.T[0],\n dd.T[1],\n add_ob[idx],\n data0.bandwidth,\n data0.spectrometer_frequency[0],\n nucleus=data0.nucleus[0],\n ppmlim=(0.2, 4.2),\n html=report,\n function='add')\n if figure:\n fig.show()\n\n elif data1 is not None:\n\n add_ob = data0.copy()\n add_ob[:] = (data0[:] + data1[:]) / 2\n\n else:\n raise ValueError('One of data1 or dim arguments must not be None.')\n\n # Update processing prov\n processing_info = f'{__name__}.add, '\n if data1 is None:\n processing_info += 'data1=None, '\n else:\n processing_info += f'data1={data1.filename}, '\n if dim is None:\n processing_info += 'dim=None.'\n else:\n processing_info += f'dim={dim}.'\n\n update_processing_prov(add_ob, 'Addition of sub-spectra', processing_info)\n\n return add_ob\n\n\ndef conjugate(data, figure=False, report=None, report_all=False):\n '''Conjugate the data\n\n :param NIFTI_MRS data: Data to truncate or pad\n :param figure: True to show figure.\n :param report: Provide output location as path to generate report\n :param report_all: True to output all indicies\n\n :return: Conjugated data in NIFTI_MRS format.\n '''\n\n conj_data = data.copy()\n conj_data[:] = conj_data[:].conj()\n\n if report:\n for dd, idx in data.iterate_over_dims(iterate_over_space=True):\n if report_all or first_index(idx):\n from fsl_mrs.utils.preproc.general import generic_report\n original_hdr = {'bandwidth': data.bandwidth,\n 'centralFrequency': data.spectrometer_frequency[0],\n 'ResonantNucleus': data.nucleus[0]}\n fig = generic_report(dd,\n conj_data[idx],\n original_hdr,\n original_hdr,\n ppmlim=(0.2, 4.2),\n html=report,\n function='conjugate')\n if figure:\n fig.show()\n\n # Update processing prov\n processing_info = f'{__name__}.conjugate.'\n update_processing_prov(conj_data, 'Conjugation', processing_info)\n\n return conj_data\n","repo_name":"wtclarke/fsl_mrs","sub_path":"fsl_mrs/utils/preproc/nifti_mrs_proc.py","file_name":"nifti_mrs_proc.py","file_ext":"py","file_size_in_byte":47775,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"72137991555","text":"# @File : numba_test.py\n# @Author: 沈昌力\n# @Date : 2018/4/2\n# @Desc :\nimport numpy as np\nfrom numba import jit\nfrom numba import vectorize\nimport time\n\n@jit\ndef mattest2(m1:np.ndarray, m2:np.ndarray):\n l = len(m1)\n m3 = np.zeros(shape=m1.shape)\n for i in range(l):\n for j in range(l):\n a = m1[i][j]\n b = m2[i][j]\n c = np.sqrt(a ** 2 + b ** 2)\n m3[i][j] = c\n return m3\n\n# @jit\ndef compu(a, b):\n return np.sqrt(a**2 + b**2)\n\n@jit\ndef mattest(m1, m2):\n l = len(m1)\n m3 = []\n for i in range(l):\n tmp = []\n for j in range(l):\n a = m1[i][j]\n b = m2[i][j]\n c = compu(a, b)\n tmp.append(c)\n m3.append(tmp)\n return m3\n\n\nsz = 2000\na = np.random.random((sz, sz))\nA = a.tolist()\nb = np.random.random((sz, sz))\nB = b.tolist()\nstart = time.time()\n# mattest2(a, b)\nmattest(A, B)\nend = time.time()\nprint(end - start)","repo_name":"karmueo/numbatest","sub_path":"numba_test.py","file_name":"numba_test.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18816981994","text":"import math\ndef check(n):\n if n < 2: return 0\n for i in range(2 , int(math.sqrt(n) + 1)):\n if n % i == 0: return 0\n return 1\n\nn,m = map(int, input().split())\na = []\nfor i in range(n):\n hang = list(map(int, input().split()))\n a.append(hang)\nres = []\nok = 0\nfor i in range(n):\n for j in range(m):\n if check(a[i][j]):\n ok = 1\n res.append(a[i][j])\nif ok == 0: print(\"NOT FOUND\")\nelse:\n maxx = max(res)\n print(maxx)\n for i in range(n):\n for j in range(m):\n if a[i][j] == maxx:\n print('Vi tri [',i,'][' ,j, ']', sep='')\n","repo_name":"NguyenVanDuc0405/Code_Python_PTIT","sub_path":"songuyentolonnhattrongmatran.py","file_name":"songuyentolonnhattrongmatran.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6562235006","text":"import cv2\nimport winsound\nimport requests\n\n# Python request reference https://www.datacamp.com/tutorial/making-http-requests-in-python\n\nurl = \"http://localhost/callapi.php?id=no\"\nrequests.get(url)\n\n# The API endpoint\ncam = cv2.VideoCapture(0)\nwhile cam.isOpened():\n ret, frame1 = cam.read()\n ret , frame2 = cam.read()\n diff = cv2.absdiff(frame1, frame2)\n gray = cv2.cvtColor(diff,cv2.COLOR_RGB2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh, None, iterations=3)\n contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)\n for c in contours:\n\n if cv2.contourArea(c) < 7000:\n continue\n x, y, w, h = cv2.boundingRect(c)\n cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)\n url = \"http://localhost/callapi.php?id=yes\"\n requests.get(url)\n # winsound.Beep(500,200)\n # winsound.PlaySound('alert.wav', winsound.SND_ASYNC)\n\n if cv2.waitKey(10) == ord('q'):\n url = \"http://localhost/callapi.php?id=no\"\n requests.get(url)\n break\n cv2.imshow('Granny Cam', frame1)\n","repo_name":"winh321/webcode","sub_path":"Pyhton Php opencv/python parts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21929368477","text":"from math import radians, sin, cos, acos\r\nimport numpy as np\r\nimport traceback\r\n\r\nclass City_distance:\r\n def distance_calc(self):\r\n \"\"\"\r\n input variables\r\n city1\r\n location of city 1\r\n location of city 2\r\n\r\n Purpose of the class:\r\n To calculate distance between two cities\r\n\r\n \"\"\"\r\n\r\n city1 = input('City 1:').strip()\r\n city2 = input('City 2:').strip()\r\n try:\r\n lat1,lon1 = city1.split(',')\r\n lat2,lon2 = city2.split(',')\r\n lat1 = float(lat1[0:-1])\r\n lon1 = float(lon1[0:-1])\r\n lat2 = float(lat2[0:-1])\r\n lon2 = float(lon2[0:-1])\r\n r = 6371.01\r\n phi1 = np.radians(lat1)\r\n phi2 = np.radians(lat2)\r\n delta_phi = np.radians(lat2 - lat1)\r\n delta_lambda = np.radians(lon2 - lon1)\r\n a = np.sin(delta_phi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(delta_lambda / 2) ** 2\r\n res = r * (2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)))\r\n del lat2,lon1,lon2,r,phi1,phi2,delta_lambda,delta_phi,city2,city1,lat1\r\n print(\"City1 and City2 are \" +str(np.round(res,2))+ \" km apart\")\r\n except Exception as e:\r\n traceback.print_exc()\r\n print(e)\r\ndistance = City_distance()\r\ndistance.distance_calc()\r\n","repo_name":"kasmitharam/zelthy_assignment123-","sub_path":"city_distance.py","file_name":"city_distance.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23253972898","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load sales data.\nsales_data = pd.read_csv('../sales_data.csv')\n\n# Filter, sort, and add a new column.\nfiltered_sales_data = sales_data[sales_data['Region'] == 'South']\nsorted_sales_data = filtered_sales_data.sort_values(by='Quantity', ascending=False)\nsorted_sales_data['Total revenue'] = sorted_sales_data['Quantity'] * sorted_sales_data['Price']\n\n# Visualize data trends.\nplt.figure(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nsorted_sales_data.groupby('Date')['Total revenue'].sum().plot(kind='line')\nplt.title('Total revenue over time')\nplt.xlabel('Date')\nplt.ylabel('Total revenue')\n\nplt.subplot(1, 2, 2)\nsorted_sales_data.groupby('Product')['Total revenue'].sum().plot(kind='bar')\nplt.title('Total revenue by product')\nplt.xlabel('Product')\nplt.ylabel('Total revenue')\nplt.tight_layout()\nplt.show()\n","repo_name":"pcafrica/advanced_programming_2023-2024","sub_path":"exercises/11/solutions/ex3_1.py","file_name":"ex3_1.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"16848824658","text":"#!/usr/bin/python3\n\nclass Tree:\n\tleftSide = None\n\trightSide = None\n\tdata = None\n\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.leftSide = None\n\t\tself.rightSide = None\n\t# end constructor\n\n\tdef addNodeToTree(self, tree, newData):\n\t\tif tree is None:\n\t\t\ttree = Tree(newData)\n\t\telse:\n\t\t\tleftSide = False\n\t\t\talreadyExists = False\n\t\t\tcurrent = tree\n\t\t\ttmp = None\n\n\t\t\twhile current is not None:\n\t\t\t\ttmp = current\n\n\t\t\t\tif current.data > newData:\n\t\t\t\t\tleftSide = True\n\t\t\t\t\tcurrent = current.leftSide\n\t\t\t\telif current.data < newData:\n\t\t\t\t\tleftSide = False\n\t\t\t\t\tcurrent = current.rightSide\n\t\t\t\telse:\n\t\t\t\t\talreadyExists = True\n\t\t\t\t\tbreak\n\t\t\t\t# end if\n\t\t\t# end while\n\n\t\t\tif leftSide and not alreadyExists:\n\t\t\t\ttmp.leftSide = Tree(newData)\n\t\t\telif not leftSide and not alreadyExists:\n\t\t\t\ttmp.rightSide = Tree(newData)\n\t\t\t# end if\n\t\t# end if\n\t# end method\n\n\tdef containsNode(self, tree, searchKey):\n\t\tif tree is not None:\n\t\t\ttmp = tree\n\n\t\t\twhile tmp is not None:\n\t\t\t\tif searchKey > tmp.data:\n\t\t\t\t\ttmp = tmp.rightSide\n\t\t\t\telif searchKey < tmp.data:\n\t\t\t\t\ttmp = tmp.leftSide\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\t# end if\n\t\t\t# end while\n\n\t\t\tif tmp is not None:\n\t\t\t\treturn True\n\t\t\t# end if\n\t\t# end if\n\n\t\treturn False\n\t# end function\n\n\tdef removeNode(self, tree, removeKey):\n\t\tif tree is None:\n\t\t\treturn tree\n\t\t# end if\n\n\t\tif removeKey < tree.data:\n\t\t\ttree.leftSide = self.removeNode(tree.leftSide, removeKey)\n\t\telif removeKey > tree.data:\n\t\t\ttree.rightSide = self.removeNode(tree.rightSide, removeKey)\n\t\telse:\n\t\t\tif tree.leftSide is None:\n\t\t\t\tbackup = tree.rightSide\n\t\t\t\ttree = None\n\t\t\t\treturn backup\n\t\t\telif tree.rightSide is None:\n\t\t\t\tbackup = tree.leftSide\n\t\t\t\ttree = None\n\t\t\t\treturn backup\n\t\t\t# end if\n\n\t\t\tbackup = self.getMinimalNode(tree.rightSide)\n\t\t\ttree.data = backup.data\n\n\t\t\ttree.rightSide = self.removeNode(tree.rightSide, backup.data)\n\t\t# end if\n\n\t\treturn tree\n\t# end method\n\n\tdef getMinimalNode(self, subTree):\n\t\tcurrent = subTree\n\n\t\twhile current.leftSide is not None:\n\t\t\tcurrent = current.leftSide\n\t\t# end if\n\n\t\treturn current\n\t# end method\n\n\tdef printPreOrder(self, root):\n\t\tif root is not None:\n\t\t\tcontentLeft = None\n\t\t\tcontentRight = None\n\n\t\t\tif (root.leftSide is not None):\n\t\t\t\tcontentLeft = root.leftSide.data\n\t\t\t# end if\n\n\t\t\tif (root.rightSide is not None):\n\t\t\t\tcontentRight = root.rightSide.data\n\t\t\t# end if\n\n\t\t\tprint(\"data: {0} (left: {1}, right: {2})\".format(root.data, contentLeft, contentRight))\n\n\t\t\tself.printPreOrder(root.leftSide)\n\t\t\tself.printPreOrder(root.rightSide)\n\t\t# end if\n\t# end method\n\n\tdef printInOrder(self, root):\n\t\tif root is not None:\n\t\t\tself.printInOrder(root.leftSide)\n\n\t\t\tcontentLeft = None\n\t\t\tcontentRight = None\n\n\t\t\tif (root.leftSide is not None):\n\t\t\t\tcontentLeft = root.leftSide.data\n\t\t\t# end if\n\n\t\t\tif (root.rightSide is not None):\n\t\t\t\tcontentRight = root.rightSide.data\n\t\t\t# end if\n\n\t\t\tprint(\"data: {0} (left: {1}, right: {2})\".format(root.data, contentLeft, contentRight))\n\n\t\t\tself.printInOrder(root.rightSide)\n\t\t# end if\n\t# end method\n\n\tdef printPostOrder(self, root):\n\t\tif root is not None:\n\t\t\tself.printPostOrder(root.leftSide)\n\t\t\tself.printPostOrder(root.rightSide)\n\n\t\t\tcontentLeft = None\n\t\t\tcontentRight = None\n\n\t\t\tif (root.leftSide is not None):\n\t\t\t\tcontentLeft = root.leftSide.data\n\t\t\t# end if\n\n\t\t\tif (root.rightSide is not None):\n\t\t\t\tcontentRight = root.rightSide.data\n\t\t\t# end if\n\n\t\t\tprint(\"data: {0} (left: {1}, right: {2})\".format(root.data, contentLeft, contentRight))\n\t\t# end if\n\t# end method\n\n\tdef cleanUp(self, root):\n\t\tif root is not None:\n\t\t\tself.cleanUp(root.leftSide)\n\t\t\tself.cleanUp(root.rightSide)\n\n\t\t\tdel root\n\t\t# end if\n\t# end method\n# end class","repo_name":"ITWorks4U/programming-workshop","sub_path":"02_binary_tree/Python/BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13580391267","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nimport sys\nimport shutil\nimport learning_utils as lu\nsys.path.insert(0, '../Features')\nimport splitSets\n\n\ndef outlierSearch(iterations):\n \"\"\"\n Creates a table with one line per healthCode and decreasingly sorted by an index\n that ranks how likely the healthCode is of being an outlier.\n\n Input:\n - iterations: integer\n The number of different distributions of the dataset in Training, Validation and Test\n sets in which the outlier analysis is performed. For more stable results at least 200\n iterations are recommended.\n \"\"\"\n outlierRemoval()\n demographics = pd.read_csv(\"../data/demographics.csv\", index_col=0)\n demographics.loc[:, \"outlierCounter\"] = 0\n demographics.loc[:, \"valTestCounter\"] = 0\n demographics.loc[:, \"valTestCounterBad\"] = 0\n demographics = demographics.loc[:, [\"healthCode\", \"outlierCounter\", \"valTestCounter\", \"valTestCounterBad\"]]\n\n total_auc = 0\n for i in range(iterations):\n print(\"\\nIteration {}\".format(i))\n splitSets.generateSetTables(quickSplit=True)\n val_auc, test_auc = randomForestModel(dropAge=True, ensemble_size=11)\n total_auc += (val_auc + test_auc)\n\n possibleOutliers = pd.read_csv(\"../data/val_extra_columns.csv\", index_col=0).healthCode.unique()\n rowsToAdd = demographics['healthCode'].isin(possibleOutliers)\n demographics.loc[rowsToAdd, \"valTestCounter\"] += 1\n demographics.loc[rowsToAdd, \"outlierCounter\"] += 1 - val_auc\n if val_auc < 0.6:\n demographics.loc[rowsToAdd, \"valTestCounterBad\"] += 1\n\n possibleOutliers = pd.read_csv(\"../data/test_extra_columns.csv\", index_col=0).healthCode.unique()\n rowsToAdd = demographics['healthCode'].isin(possibleOutliers)\n demographics.loc[rowsToAdd, \"valTestCounter\"] += 1\n demographics.loc[rowsToAdd, \"outlierCounter\"] += 1 - test_auc\n if test_auc < 0.6:\n demographics.loc[rowsToAdd, \"valTestCounterBad\"] += 1\n\n demographics.loc[:, \"valTestBadProportion\"] = demographics.valTestCounterBad / demographics.valTestCounter\n demographics.loc[:, \"avgOutlierCounter\"] = demographics.outlierCounter / demographics.valTestCounter\n demographics.sort_values(by=['avgOutlierCounter'], ascending=False, inplace=True)\n demographics.to_csv(\"outlierSort.csv\")\n\n # Restoring the original table\n shutil.move(\"../data/walking_activity_featuresOriginalTemp.csv\", \"../data/walking_activity_features.csv\")\n\n print(\"\\nAvg AUC score:\", total_auc / (2 * iterations))\n\n\ndef outlierRemoval():\n \"\"\"\n Removes a collection of outliers already identified from the walking_activity_features\n table and creates a new table walking_activity_featuresOriginalTemp to enable a way\n to restore the original walking_activity_features table later.\n \"\"\"\n\n outliers1 = [\"e31788d0-7834-477a-a718-fef116c04816\",\n \"9a41dd95-337d-4f23-8b3e-f0f0dd40fc4d\",\n \"64aedea6-b1f9-49da-8b10-3f02d8ed04b6\",\n \"bae1bf32-94bf-42a7-96d0-ee23fd98245e\",\n \"7fb7afc9-b006-4a44-99dc-409ba90d3fe8\"]\n\n outliers2 = [\"080274a4-cddf-47b7-9b8e-679153859229\",\n \"6ed887bb-394b-40dc-a8d5-96e836468a8b\"]\n\n outliers = outliers1 + outliers2\n\n walking_activity_features = pd.read_csv(\"../data/walking_activity_features.csv\", index_col=0)\n shutil.move(\"../data/walking_activity_features.csv\", \"../data/walking_activity_featuresOriginalTemp.csv\")\n dropRows = walking_activity_features[walking_activity_features.healthCode.isin(outliers)].index\n walking_activity_features.drop(dropRows, inplace=True)\n walking_activity_features.to_csv(\"../data/walking_activity_features.csv\")\n\n\ndef randomForestModel(criterion='gini', ensemble_size=11, dropAge=False):\n \"\"\"\n Input:\n - criterion: string (default='gini')\n The function to measure the quality of a split: 'gini' or 'entropy'\n - ensemble_size: int\n Number of classifiers trained on different training sets when undersampling is applied. This number must be odd.\n - dropAge: bool\n Whether to use age as a feature.\n\n Outputs a tuple with AUROC score on the validation and test sets.\n \"\"\"\n\n X_train = {}\n y_train = {}\n rnd_clf = {}\n y_pred_total = {\n \"val\": 0,\n \"test\": 0,\n }\n metrics_train_total = {\n \"Accuracy\": 0,\n \"Precision\": 0,\n \"Recall\": 0,\n \"F1 Score\": 0,\n \"ROC score\": 0\n }\n X = {}\n X[\"val\"], y_val, feature_names = lu.load_dataStandart(\"val\", selectOldAge=True, dropAge=dropAge)\n X[\"test\"], y_test, _ = lu.load_dataStandart(\"test\", selectOldAge=True, dropAge=dropAge)\n\n for i in range(ensemble_size):\n X_train[i], y_train[i], _ = lu.load_dataStandart(\"train\", selectOldAge=False, dropAge=dropAge,\n balance_undersampling=True)\n\n rnd_clf[i] = RandomForestClassifier(n_estimators=13, criterion=criterion, max_depth=5, min_samples_split=12, n_jobs=-1)\n\n rnd_clf[i].fit(X_train[i], y_train[i])\n lu.metricsAccumulate(X_train[i], y_train[i], rnd_clf[i], metrics_train_total)\n for setName in [\"val\", \"test\"]:\n y_pred_total[setName] += rnd_clf[i].predict_proba(X[setName]) > 0.5 # threshold\n\n lu.metricsShowAccumulate(metrics_train_total, ensemble_size)\n val_auc = lu.metricsShowEnsemble(y_val, y_pred_total[\"val\"], \"Validation\", ensemble_size, threshold=0.5)\n test_auc = lu.metricsShowEnsemble(y_test, y_pred_total[\"test\"], \"Test\", ensemble_size, threshold=0.5)\n\n return val_auc, test_auc\n","repo_name":"pedroig/Parkinsons-Disease-Digital-Biomarker","sub_path":"Random_Forest/outlierSearch.py","file_name":"outlierSearch.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"32648736096","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> Optional[ListNode]:\n tempA = headA\n tempB = headB\n lenA = lenB = 0\n while tempA.next:\n lenA += 1\n tempA = tempA.next\n\n while tempB.next:\n lenB += 1\n tempB = tempB.next \n\n if tempA != tempB:\n return None \n\n if lenA > lenB:\n travx = headA\n travy = headB\n else:\n travx = headB\n travy = headA\n\n cnt = 0\n while travx:\n if cnt >= abs(lenA - lenB):\n if travx == travy:\n return travx\n travy = travy.next \n travx = travx.next\n cnt += 1\n\n","repo_name":"parasv24/grind","sub_path":"0160-intersection-of-two-linked-lists/0160-intersection-of-two-linked-lists.py","file_name":"0160-intersection-of-two-linked-lists.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31841391650","text":"import sys\n\nfai_index = sys.argv[1]\n\nfile = open(fai_index, \"r\")\n\nlines = [i.strip() for i in file.readlines()]\n\n\ntab_var = []\nfor l in lines:\n chrom = l.split(\"\\t\")[0]\n size = l.split(\"\\t\")[1]\n tab_var.append([chrom, int(size)])\n\nprint(\"var BioCircosGenome = \" + str(tab_var))\n\n\n","repo_name":"DrosophilaGenomeEvolution/TrEMOLO","sub_path":"lib/python/create_biocircosgenome.py","file_name":"create_biocircosgenome.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"4003041700","text":"from panoptes.ling.tree.deep.base import DeepArgument\nfrom panoptes.ling.tree.surface.direction import SurfaceDirection\n\n\nclass DeepDirection(DeepArgument):\n def __init__(self, which, of):\n self.which = which\n assert self.which\n assert isinstance(self.which, basestring)\n\n self.of = of\n assert isinstance(self.of, DeepArgument)\n\n # --------------------------------------------------------------------------\n # From base.\n\n def dump(self):\n return {\n 'type': 'DeepDirection',\n 'which': self.which,\n 'of': self.of.dump() if self.of else None,\n }\n\n # --------------------------------------------------------------------------\n # From deep.\n\n def to_surface(self, transform_state, say_state, idiolect):\n of = self.of.to_surface(transform_state, say_state, idiolect)\n return SurfaceDirection(self.which, of)\n\n # --------------------------------------------------------------------------\n # Static.\n\n def load(d, loader):\n return DeepDirection(d['which'], loader.load(d['of']))\n","repo_name":"knighton/babi","sub_path":"panoptes/ling/tree/deep/direction.py","file_name":"direction.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9024731075","text":"\"\"\"\r\n\r\n\tCopyright Sikkema Software B.V. 2021 - All rights Reserved\r\n\r\n\tYou may not copy, reproduce, distribute, modify or create \r\n\tderivative works sell or offer it for sale or use such content\r\n\tto construct any kind of database or disclose the source without\r\n\texplicit permission of the copyright holder. You may not alter\r\n\tor remove any copyright or other notices from copies of the content. \r\n\tFor permission to use the content please contact sikkemasoftware@gmailcom\r\n\r\n\tAll content and data is provided on an as is basis. The copyright holder\r\n\tmakes no claisms to the accuracy, complentness, currentness, suistainability\r\n\tor validity of the code and information and will not be liable for any\r\n\terrors, omissions, or delays in this information or any losses, injuries\r\n\tor damages arising from the use of this software. \r\n\r\n\"\"\"\r\n\r\n\r\nfrom Util.Const import Const\r\nfrom Util.CSVFile import CSVFile\r\nimport os, os.path\r\nimport codecs\r\nimport re\r\nfrom datetime import datetime\r\n\r\n# Class to connect to email server and send error reports \r\nclass Frequencies(object):\r\n\tdef __init__(self, config, path):\r\n\t\tself.m_config\t\t= config\r\n\t\tself.m_frequencies\t= {}\r\n\t\tself.m_path\t\t= path\r\n\t\t\r\n\tdef view(self, id):\r\n\t\tnow = datetime.now().strftime('%Y%m%d-%H%M%S')\r\n\t\tif not id in self.m_frequencies:\r\n\t\t\tself.m_frequencies[id] = {}\r\n\t\t\tself.m_frequencies[id]['frequency'] = 0\r\n\t\t\tself.m_frequencies[id]['last_viewed'] = \"\"\r\n\t\tself.m_frequencies[id]['frequency'] += 1\r\n\t\tself.m_frequencies[id]['last_viewed'] = now\r\n\r\n\tdef key_by_frequency(self):\r\n\t\tdata = [[key, value['frequency']] for (key, value) in self.m_frequencies.items()] \r\n\t\treturn [line[0] for line in sorted(data, key=lambda x: x[1], reverse=True)]\r\n\t\t\r\n\tdef key_by_last_viewed(self):\r\n\t\tdata = [[key, value['last_viewed']] for (key, value) in self.m_frequencies.items()] \r\n\t\treturn [line[0] for line in sorted(data, key=lambda x: x[1], reverse=True)]\r\n\t\r\n\t\t\t\r\n\tdef __enter__(self):\r\n\t\tprint(self.m_config)\r\n\t\tprint(self.m_path)\r\n\t\tprint(Const.CSV_SEPARATOR)\r\n\t\tprint(self.m_config[Const.OUTPUT_CSV_SEPARATOR])\r\n\t\twith CSVFile(self.m_path, 'r', self.m_config[Const.OUTPUT_CSV_SEPARATOR]) as infile:\r\n\t\t\tprint(infile.header)\r\n\t\t\tassert infile.header == ['id', 'frequency', 'last_viewed'], \"Util.Frequencies - Invalid header for storage file. Required {0:s}\".format(self.m_config[Const.OUTPUT_CSV_SEPARATOR].join(['id', 'frequency', 'last_viewed']))\r\n\r\n\t\t\tfor item in infile:\r\n\t\t\t\tself.m_frequencies[item['id']] = {}\r\n\t\t\t\tself.m_frequencies[item['id']]['frequency'] = int(item['frequency'])\r\n\t\t\t\tself.m_frequencies[item['id']]['last_viewed'] = item['last_viewed']\r\n\t\treturn self\r\n\t\t\r\n\tdef __exit__(self, type, value, trace):\r\n\t\twith CSVFile(self.m_path, 'w', self.m_config[Const.OUTPUT_CSV_SEPARATOR]) as outfile:\r\n\t\t\toutfile.header = ['id', 'frequency', 'last_viewed']\r\n\t\t\tfor (key, value) in self.m_frequencies.items():\r\n\t\t\t\titem = {}\r\n\t\t\t\titem['id'] = key\r\n\t\t\t\titem['frequency'] = str(value['frequency'])\r\n\t\t\t\titem['last_viewed'] = value['last_viewed']\r\n\t\t\t\toutfile.write(item)\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\r\n\tdef reset(self):\r\n\t\tfor key in self.m_frequencies.keys():\r\n\t\t\tself.m_frequencies[key]['frequency'] = 0\r\n\t\t\tself.m_frequencies[key]['last_viewed'] = \"\"\r\n\t\t\t\r\n\t\t\r\n","repo_name":"Bywire-News-Official/bywire-online-fake-news-ai-ann","sub_path":"src/Util/Frequencies.py","file_name":"Frequencies.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"35003072449","text":"class QuizBrain:\n def __init__(self, q_list):\n self.question_no = 0\n self.score = 0\n self.question_list = q_list\n\n def still_have_questions(self):\n return self.question_no < len(self.question_list)\n\n def next_question(self):\n current_question = self.question_list[self.question_no]\n self.question_no += 1\n ans = input(f\"Q.{self.question_no} {current_question.question}, (True/False)? \")\n self.check_answer(ans,current_question.correct_answer.lower())\n\n\n def check_answer(self,user_answer,correct_answer):\n\n if user_answer == correct_answer :\n print(\"Hurray! you got it right\")\n self.score += 1\n else:\n print(\"Dude you're so wrong. Go and learn..\")\n\n print(f\"The correct answer is: {correct_answer.upper()}\")\n print(f\"Your score: {self.score}/{self.question_no}\\n\")\n\n\n\n","repo_name":"ArslanKAS/Projects","sub_path":"QuizGame/quiz_brain.py","file_name":"quiz_brain.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20945124317","text":"import numpy as np\r\nfrom tempfile import TemporaryFile\r\n\r\n# Parameters\r\nt0 = 0 # t_0\r\ntn = 2000 # t_max \r\neps = 0.00000001 # epsilon\r\n\r\n# Parameters and initial conditions that depend on L and/or M.\r\ndef update_parameters(gridsize, current_domain):\r\n\r\n # Discretization\r\n global L, M, dt, nt, Q_dim, Dx, Dxx, Dxxxx, k_lin\r\n L = current_domain\r\n M = gridsize\r\n dt = 0.0005 # stepsize\r\n nt = int((tn - t0) / dt) # amount of steps \r\n Q_dim = M # length of Q\r\n D = np.linspace(-M//2,M//2,M,endpoint=False)\r\n Dx = np.fft.fftshift(np.multiply(D,(2*np.pi/L)))*np.sqrt(-1+0j)\r\n Dxx = -np.fft.fftshift(np.power(np.multiply(D,(2*np.pi/L)),2))\r\n Dxxxx = np.fft.fftshift(np.power(np.multiply(D,(2*np.pi/L)),4))\r\n k_lin = -Dxx - Dxxxx # linear part of the KS equation\r\n \r\n # Solution space\r\n u = np.zeros((M,nt))\r\n Q = np.random.rand(M, Q_dim)\r\n \r\n # Initial condition\r\n x = np.linspace(start=0, stop=L-(L/M), num=M)\r\n u0 = np.cos((2 * np.pi * x)/ L) + 0.01 * np.cos((4 * np.pi * x)/L)\r\n u[:,0] = u0\r\n \r\n return u,Q,\r\n\r\n\"\"\"\r\n Define function for each derivative.\r\n\"\"\"\r\ndef u_x(u):\r\n return np.real(np.fft.ifft(np.multiply(Dx,np.fft.fft(u))))\r\n\r\ndef u_xx(u):\r\n return np.real(np.fft.ifft(np.multiply(Dxx,np.fft.fft(u))))\r\n\r\ndef u_xxxx(u):\r\n return np.real(np.fft.ifft(np.multiply(Dxxxx,np.fft.fft(u))))\r\n\r\ndef Lu(u):\r\n return (-u_xx(u) - u_xxxx(u))\r\n\r\ndef fu(u):\r\n return (-1)*np.multiply(u, u_x(u))\r\n\r\n\"\"\"\r\n IMEX RK function for calculating u_{i+1}\r\n\"\"\"\r\n# Calculate the half step using the IMEX RK method.\r\ndef u_half(u):\r\n G = np.fft.fft(np.add(np.multiply((0.5*dt),fu(u)), u))\r\n Uhat = np.divide(G,(1-np.multiply((0.5*dt), k_lin)))\r\n \r\n return np.real(np.fft.ifft(Uhat))\r\n\r\n# Calculate the full step using the IMEX RK method.\r\ndef u_plus1(u, u_half):\r\n return u + np.multiply(dt,Lu(u_half)) + np.multiply(dt,fu(u_half))\r\n\r\n\"\"\"\r\n Functions needed for the calculation of B_ii\r\n\"\"\"\r\n# Explicitly evaluates f'(u) q.\r\ndef fu_dotq(u, q):\r\n return (fu(u+eps*q) - fu(u))/eps\r\n\r\n# Define AQ.\r\ndef AQ(u, Q):\r\n \r\n # Solution space\r\n AQ = np.zeros(Q.shape)\r\n fuu = fu(u)\r\n Luu = Lu(u)\r\n \r\n for i in range(len(Q[0])):\r\n AQ[:,i] = (fu(u+eps*Q[:,i]) - fuu)/eps + Lu(Q[:,i])\r\n\r\n return AQ\r\n\r\n\"\"\"\r\n IMEX RK function for calculating q_{i+1}\\tilde{r}\r\n\"\"\"\r\n# Calculate the half step using the IMEX RK method.\r\ndef q_half(u, q):\r\n G = np.fft.fft(q + 0.5*dt*fu_dotq(u,q))\r\n Qhat = np.divide(G,(1-np.multiply((0.5*dt), k_lin)))\r\n \r\n return np.real(np.fft.ifft(Qhat))\r\n\r\n# Calculate the full step using the IMEX RK method.\r\ndef q_plus1(u_half, q, q_half):\r\n return q + np.multiply(dt,fu_dotq(u_half, q_half)) + np.multiply(dt,Lu(q_half))\r\n\r\n# Orthogonalize Y while applying QR factorization using the Modified Gramm Schmidt method.\r\ndef MGS(Q):\r\n for m in range(len(Y[0])):\r\n for j in range(0, m):\r\n R_mj = np.vdot(Q[:,j].transpose(),Q[:,m])\r\n Q[:,m] = Q[:,m] - R_mj*Q[:,j]\r\n\r\n R_mm = np.linalg.norm(Q[:,m])\r\n Q[:,m] = Q[:,m]/R_mm\r\n \r\n return Q\r\n\r\n\"\"\"\r\n Functions that combine everything.\r\n\"\"\"\r\n# Solve the Kuramoto-Sivanchinsky Differential equation while calculating Q using the IMEX RK method.\r\ndef KS_Solve_and_get_Bii(u, Q):\r\n B_ii = np.zeros((M,nt-1))\r\n \r\n for i in range(nt-1):\r\n u_i = u[:,i]\r\n\r\n # IMEX RK for calculating u_{i+1}\r\n u_ihalf = u_half(u_i)\r\n u[:,i+1] = u_plus1(u_i, u_ihalf)\r\n\r\n # Solution space\r\n A_iQ_i = np.zeros(Q.shape)\r\n \r\n # Iterate through each column in Q\r\n for j in range(len(Q[0])):\r\n q_j = Q[:,j]\r\n q_ihalf = q_half(u_i, q_j)\r\n A_iQ_i[:,j] = q_plus1(u_ihalf, q_j, q_ihalf)\r\n\r\n # Compute Q_{i+1} as the QR factorization of A_iQ_i using the modified Gram-Schmidt method\r\n Q = MGS(A_iQ_i)\r\n\r\n B_ii[:,i] = np.dot(Q.transpose(),AQ(u[:,i+1], Q)).diagonal()\r\n\r\n return u, B_ii\r\n\r\n# Returns the Lyapunov exponents given the diagonal B_ii.\r\ndef calc_Lyapunov(B_ii):\r\n\r\n # Solution space\r\n lambda_i = np.zeros(len(B_ii[:,0]))\r\n\r\n # Calculate the integral as a Riemann sum.\r\n for i in range(len(B_ii[:,0])):\r\n for t in range(len(B_ii[0])):\r\n lambda_i[i] = lambda_i[i] + B_ii[i][t]*(dt)\r\n\r\n # Divide by t\r\n lambda_i = lambda_i / (tn-t0)\r\n \r\n return lambda_i\r\n\r\n# Returns the Lyapunov dimension given the Lyapunov exponents.\r\ndef calc_D_L(lambda_i):\r\n\r\n # Sort the lyapunov exponents from the biggest to the smallest.\r\n lambda_sorted = -np.sort(-lambda_i)\r\n \r\n # Find the maximum k.\r\n k = 0\r\n for i in range(1, len(lambda_sorted)):\r\n if (sum(lambda_sorted[:i]) <= 0):\r\n break\r\n k=i\r\n\r\n # If the dimension can't be calculated, return -1.\r\n if (k == len(lambda_sorted)-1):\r\n D_L = -1\r\n else:\r\n # Calculate the Lyapunov dimension.\r\n D_L = k + (np.sum(lambda_sorted[:k]) / abs(lambda_sorted[k+1]))\r\n return D_L\r\n\r\n# Main function that calls everything.\r\ndef main(gridsize, domain):\r\n\r\n u, Q = update_parameters(gridsize, domain)\r\n u_result, B_ii = KS_Solve_and_get_Bii(u, Q)\r\n lambda_i = calc_Lyapunov(B_ii)\r\n D_L = calc_D_L(lambda_i)\r\n\r\n return B_ii, D_L\r\n\r\n\"\"\"\r\n Input and output\r\n\"\"\"\r\n\r\n# Range of the gridsize\r\nM_min = 30\r\nM_max = 70\r\nstepsize = 5\r\n\r\n# Compute for L = 36\r\nDimension_values_L36 = np.zeros(int((M_max-M_min) / stepsize))\r\n\r\nfor m in range(M_min, M_max+stepsize, stepsize):\r\n B_ii, Dimension_values_L36[int((m-M_min) / stepsize)] = main(m, 36)\r\n\r\n # Save B_ii for plots of Lyapunov exponent development.\r\n np.savetxt(\"B_ii_M\" + str(m) + \"_L36.csv\", B_ii, delimiter=\",\")\r\n \r\n # Print D_L\r\n print(\"M = \" + str(m) + \", D_L = \" + str(Dimension_values_L36[int((m-M_min) / stepsize)]))\r\n\r\n# Save D_L values of the devolopment of M\r\nnp.savetxt(\"D_L_M\" + str(M_min) + \"to\" + str(M_max) + \"_t2000_L36.csv\", Dimension_values_L36, delimiter=\",\")","repo_name":"szasadny/Calculate-Attractor-Dimension-of-KSE","sub_path":"Main files/KS_Lyapunov_Dimension.py","file_name":"KS_Lyapunov_Dimension.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26053162627","text":"import json\n\nimport pystache\n\n\nclass JsonRenderer(pystache.Renderer):\n def __init__(self,\n file_encoding=None,\n string_encoding=None,\n decode_errors=None,\n search_dirs=None,\n file_extension=None,\n escape=None,\n partials=None,\n missing_tags=None):\n # json would be html escaped otherwise\n def escape_noop(u):\n return u\n if escape is None:\n escape = escape_noop\n return super(JsonRenderer, self).__init__(file_encoding,\n string_encoding,\n decode_errors, search_dirs,\n file_extension, escape,\n partials, missing_tags)\n\n def str_coerce(self, val):\n if val is None:\n return b''\n return json.dumps(val)\n","repo_name":"openstack/os-apply-config","sub_path":"os_apply_config/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"61"} +{"seq_id":"6665832351","text":"from django.contrib.auth.hashers import make_password\nfrom django.db.models import Sum\nfrom rest_framework import serializers\n\nfrom utils.model_choices import GENDER_CHOICES, WORK_TYPE_CHOICES\nfrom manager.models import User, UserWork, Work, UserAuth\nfrom utils.errors import ParamError\nfrom utils.return_info import USER_AREADY_EXIST, TEL_AREADY_EXIST, USER_NOT_EXIST, WORK_NOT_EXIST\n\n\nclass UserSerializer(serializers.ModelSerializer):\n '''用户序列化表'''\n\n class Meta:\n model = User\n fields = ('uuid', 'name', 'address', 'gender', 'age', 'status')\n\nclass UserPostserializer(serializers.ModelSerializer):\n\n name = serializers.CharField(min_length=2,\n max_length=10,\n required=True,\n error_messages={\n 'min_length': \"姓名最少两个字符\",\n 'max_length': \"姓名最多10个字符\"})\n address = serializers.CharField(min_length=2,\n max_length=30,\n required=True,\n error_messages={\n 'min_length': \"地址最少两个字符\",\n 'max_length': \"地址最多30个字符\",})\n gender = serializers.ChoiceField(choices=GENDER_CHOICES, required=True,\n error_messages={\"required\": \"课程介绍必填\"})\n age = serializers.IntegerField(max_value=150, min_value=0,\n error_messages={\"required\": \"年龄必填\", \"min_value\": \"年龄有误\",\n \"max_value\": \"年龄有误\"})\n tel = serializers.CharField(min_length=2,\n max_length=30,\n required=True,\n error_messages={\"required\": \"手机号必填\"})\n password = serializers.CharField(min_length=6,\n max_length=12,\n required=True,\n error_messages={\n 'min_length': \"密码最少两个字符\",\n 'max_length': \"密码最多12个字符\"})\n\n def validate(self, data):\n checkUser = User.objects.filter(name=data[\"name\"], address=data[\"address\"],status=1).first()\n if checkUser:\n raise ParamError(USER_AREADY_EXIST)\n checkTel = UserAuth.objects.filter(tel=data[\"tel\"], status=1).first()\n if checkTel:\n raise ParamError(TEL_AREADY_EXIST)\n return data\n\n def create_user(self, validated_data):\n user_dict= {}\n user_dict[\"name\"] = validated_data[\"name\"]\n user_dict[\"address\"] = validated_data[\"address\"]\n user_dict[\"gender\"] = validated_data[\"gender\"]\n user_dict[\"age\"] = validated_data[\"age\"]\n user = User.objects.create(**user_dict)\n return user\n\n def create_user_auth(self, validated_data, user):\n auth_dict = {}\n auth_dict[\"tel\"] = validated_data[\"tel\"]\n auth_dict[\"userUuid\"] = user\n auth_dict[\"password\"] = make_password(validated_data[\"password\"])\n auth = UserAuth.objects.create(**auth_dict)\n return auth\n\n class Meta:\n model = User\n fields = ('uuid', 'name', 'address', 'gender', 'age', 'status', \"tel\", \"password\")\n\nclass UserUpdateserializer(serializers.ModelSerializer):\n\n name = serializers.CharField(min_length=2,\n max_length=10,\n required=True,\n error_messages={\n 'min_length': \"姓名最少两个字符\",\n 'max_length': \"姓名最多10个字符\"})\n address = serializers.CharField(min_length=2,\n max_length=30,\n required=True,\n error_messages={\n 'min_length': \"地址最少两个字符\",\n 'max_length': \"地址最多30个字符\",})\n gender = serializers.ChoiceField(choices=GENDER_CHOICES, required=True,\n error_messages={\"required\": \"性别必填\"})\n age = serializers.IntegerField(max_value=150, min_value=0,\n error_messages={\"required\": \"年龄必填\", \"min_value\": \"年龄有误\",\n \"max_value\": \"年龄有误\"})\n tel = serializers.CharField(min_length=2,\n max_length=30,\n required=True,\n error_messages={\"required\": \"手机号必填\"})\n password = serializers.CharField(min_length=6,\n max_length=12,\n required=True,\n error_messages={\n 'min_length': \"密码最少两个字符\",\n 'max_length': \"密码最多12个字符\"})\n\n\n def check_data(self, data, uuid):\n checkUser = User.objects.exclude(uuid=uuid).filter(name=data[\"name\"], address=data[\"address\"], status=1).first()\n if checkUser:\n raise ParamError(USER_AREADY_EXIST)\n checkTel = UserAuth.objects.exclude(userUuid__uuid=uuid).filter(tel=data[\"tel\"]).first()\n if checkTel:\n raise ParamError(TEL_AREADY_EXIST)\n return data\n\n def update_user(self, instance, validated_data):\n instance.name = validated_data.get(\"name\")\n instance.address = validated_data.get(\"address\")\n instance.gender = validated_data.get(\"gender\")\n instance.age = validated_data.get(\"age\")\n instance.save()\n return instance\n\n def update_auth(self, instance, validated_data):\n auth = instance.userAuthkUuid.first()\n auth.tel = validated_data.get(\"tel\")\n auth.password = make_password(validated_data.get(\"password\"))\n auth.save()\n return auth\n\n class Meta:\n model = User\n fields = ('uuid', 'name', 'address', 'gender', 'age', 'status', \"tel\", \"password\")\n\nclass WorkSerializer(serializers.ModelSerializer):\n '''事务表'''\n\n totalMoney = serializers.SerializerMethodField()\n\n def get_totalMoney(self, obj):\n allMoney = UserWork.objects.annotate(num_money=Sum('money')).filter(workUuid=obj.uuid).values('money')\n if len(allMoney) == 0:\n return 0\n return allMoney[0]['num_money']\n\n class Meta:\n model = Work\n fields = ('userUuid', 'uuid', 'type', 'name', 'startTime', 'endTime', 'remarks', 'totalMoney')\n\n\nclass WorkPostserializer(serializers.ModelSerializer):\n\n userUuid = serializers.CharField(min_length=2,\n max_length=64,\n required=True,\n error_messages={\n \"required\": \"事务用户必填\"})\n\n name = serializers.CharField(min_length=2,\n max_length=50,\n required=True,\n error_messages={\n 'min_length': \"姓名最少两个字符\",\n 'max_length': \"姓名最多50个字符\",\n \"required\": \"名称必填\"})\n\n type = serializers.ChoiceField(choices=WORK_TYPE_CHOICES, required=True,\n error_messages={\"required\": \"事务类型必填\"})\n\n startTime = serializers.IntegerField(required=True,\n error_messages={\n \"required\": \"开始时间必填\"})\n endTime = serializers.IntegerField(required=True,\n error_messages={\n \"required\": \"结束时间必填\"})\n remarks = serializers.CharField(required=False)\n\n def validate(self, data):\n userUuid = data[\"userUuid\"]\n user = User.objects.filter(uuid=userUuid,status=1).first()\n if not user:\n raise ParamError(USER_NOT_EXIST)\n return data\n\n def create_work(self, validated_data):\n work = Work.objects.create(**validated_data)\n return work\n\n def update_work(self, instance, validate_data):\n instance.userUuid = validate_data.get('userUuid')\n instance.type = validate_data.get('type')\n instance.name = validate_data.get('name')\n instance.startTime = validate_data.get('startTime')\n instance.endTime = validate_data.get('endTime')\n instance.remarks = validate_data.get('remarks')\n instance.save()\n return instance\n\n class Meta:\n model = Work\n fields = ('userUuid', 'type', 'name', 'startTime', 'endTime', 'remarks')\n\n\nclass UserWorkSerializer(serializers.ModelSerializer):\n '''用户送礼表'''\n\n class Meta:\n model = UserWork\n fields = ('workUuid', 'uuid', 'name', 'remarks', 'money', 'quilt', 'woollen', 'fireworks', 'artillery', 'wreath', 'status')\n\nclass UserWorkPostserializer(serializers.ModelSerializer):\n\n workUuid = serializers.CharField(min_length=2,\n max_length=64,\n required=True,\n error_messages={\n \"required\": \"事务必填\"})\n name = serializers.CharField(min_length=2,\n max_length=50,\n required=True,\n error_messages={\n 'min_length': \"姓名最少两个字符\",\n 'max_length': \"姓名最多50个字符\",\n \"required\": \"送礼人姓名必填\"})\n remarks = serializers.CharField(required=False)\n money = serializers.IntegerField(required=True,\n error_messages={\"required\": \"送礼金额必填\"})\n quilt = serializers.IntegerField(required=True,\n error_messages={\"required\": \"被子数量必填\"})\n woollen = serializers.IntegerField(required=True,\n error_messages={\"required\": \"毛毯数量必填\"})\n fireworks = serializers.IntegerField(required=True,\n error_messages={\"required\": \"烟花数量必填\"})\n artillery = serializers.IntegerField(required=True,\n error_messages={\"required\": \"火炮数量必填\"})\n wreath = serializers.IntegerField(required=True,\n error_messages={\"required\": \"花圈数量必填\"})\n\n def validate(self, data):\n work = Work.objects.filter(uuid=data[\"workUuid\"],status=1).first()\n if not work:\n raise ParamError(WORK_NOT_EXIST)\n return data\n\n def create_user_work(self,validate_data):\n user_work = UserWork.objects.create(**validate_data)\n return user_work\n\n def update_user_work(self,instance,validate_data):\n instance.name = validate_data.get(\"name\")\n instance.remarks = validate_data.get(\"remarks\")\n instance.workUuid = validate_data.get(\"workUuid\")\n instance.money = validate_data.get(\"money\")\n instance.quilt = validate_data.get(\"quilt\")\n instance.woollen = validate_data.get(\"woollen\")\n instance.fireworks = validate_data.get(\"fireworks\")\n instance.artillery = validate_data.get(\"artillery\")\n instance.wreath = validate_data.get(\"wreath\")\n instance.save()\n return instance\n\n class Meta:\n model = UserWork\n fields = ('workUuid', 'name', 'remarks', 'money', 'quilt', 'woollen', 'fireworks', 'artillery', 'wreath')","repo_name":"Fish-pro/gift_server","sub_path":"manager/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":12218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44167617250","text":"\"\"\"\nNa maioria das linguagens de programação, quando precisamos ou desejamos inverter o\nvalor entre duas variaveis, fazemos o uso de uma variavel auxiliar para armazenar\ntemporariamente o valor de uma variavel a fim de possibilitar a troca. Essa solução\né valida, porém em Python esse processo pode ser muito mais simples e de outra forma.\n\"\"\"\n\n# 1° solução : tradicional\n\nx = 6\ny = 4\n\nz = x\nx = y\ny = z\n\nprint()\nprint(f'X = {x} | Y = {y}')\n\n# 2° solução: pythonica\n\nx, y = y, x\n\nprint(f'X = {x} | Y = {y}')\n","repo_name":"WillJR183/python_learnings","sub_path":"python_basico_logica_programacao/aula26_trocando_valor_entre_variaveis.py","file_name":"aula26_trocando_valor_entre_variaveis.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23835542768","text":"import numpy as np\nimport pandas as pd\nfrom pytest import raises\nfrom scipy.spatial import Voronoi\n\nfrom tyssue import BulkGeometry, Epithelium, config, generation\nfrom tyssue.core.sheet import Sheet, get_opposite\nfrom tyssue.generation import (\n extrude,\n from_2d_voronoi,\n from_3d_voronoi,\n hexa_grid2d,\n hexa_grid3d,\n subdivide_faces,\n)\n\n\ndef test_3faces():\n\n datasets, _ = generation.three_faces_sheet()\n assert datasets[\"edge\"].shape[0] == 18\n assert datasets[\"face\"].shape[0] == 3\n assert datasets[\"vert\"].shape[0] == 13\n\n\ndef test_from_3d_voronoi():\n\n grid = hexa_grid3d(6, 4, 3)\n datasets = from_3d_voronoi(Voronoi(grid))\n assert datasets[\"vert\"].shape[0] == 139\n assert datasets[\"edge\"].shape[0] == 1272\n assert datasets[\"face\"].shape[0] == 282\n assert datasets[\"cell\"].shape[0] == 70\n bulk = Epithelium(\"bulk\", datasets, config.geometry.bulk_spec())\n bulk.reset_index()\n bulk.reset_topo()\n BulkGeometry.update_all(bulk)\n bulk.sanitize()\n\n # GH 137\n assert (\n bulk.edge_df.groupby(\"face\").apply(lambda df: df[\"cell\"].unique().size).max()\n == 1\n )\n assert bulk.validate()\n\n\ndef test_from_2d_voronoi():\n\n grid = hexa_grid2d(6, 4, 1, 1)\n datasets = from_2d_voronoi(Voronoi(grid))\n assert datasets[\"vert\"].shape[0] == 32\n assert datasets[\"edge\"].shape[0] == 82\n assert datasets[\"face\"].shape[0] == 24\n\n\ndef test_extrude():\n\n datasets, specs = generation.three_faces_sheet()\n sheet = Sheet(\"test\", datasets, specs)\n extruded = extrude(sheet.datasets, method=\"translation\")\n assert extruded[\"cell\"].shape[0] == 3\n assert extruded[\"face\"].shape[0] == 24\n assert extruded[\"edge\"].shape[0] == 108\n assert extruded[\"vert\"].shape[0] == 26\n\n\ndef test_subdivide():\n\n datasets, specs = generation.three_faces_sheet()\n sheet = Sheet(\"test\", datasets, specs)\n subdivided = subdivide_faces(sheet, [0])\n assert subdivided[\"face\"].shape[0] == 3\n assert subdivided[\"edge\"].shape[0] == 30\n assert subdivided[\"vert\"].shape[0] == 14\n\n datasets_3d = extrude(datasets, method=\"translation\")\n sheet_3d = Sheet(\"test3d\", datasets_3d, specs)\n subdivided_3d = subdivide_faces(sheet_3d, [0])\n assert subdivided_3d[\"face\"].shape[0] == 24\n assert subdivided_3d[\"edge\"].shape[0] == 120\n assert subdivided_3d[\"vert\"].shape[0] == 27\n assert subdivided_3d[\"cell\"].shape[0] == 3\n\n\ndef test_extrude_invalid_method():\n datasets, _ = generation.three_faces_sheet()\n with raises(ValueError):\n extrude(datasets, method=\"invalid_method\")\n\n\ndef test_hexagrid3d_noise():\n np.random.seed(1)\n grid = hexa_grid3d(6, 4, 3, noise=0.1)\n datasets = from_3d_voronoi(Voronoi(grid))\n assert datasets[\"vert\"].shape[0] == 318\n assert datasets[\"edge\"].shape[0] == 3300\n assert datasets[\"face\"].shape[0] == 670\n assert datasets[\"cell\"].shape[0] == 72\n\n\ndef test_anchors():\n datasets, specs = generation.three_faces_sheet()\n sheet = Sheet(\"test_anchors\", datasets, specs)\n\n sheet.edge_df[\"opposite\"] = get_opposite(sheet.edge_df)\n\n expected_dict = {\n 18: [1, 13],\n 19: [2, 14],\n 20: [3, 15],\n 21: [4, 16],\n 22: [5, 17],\n 23: [6, 18],\n 24: [7, 19],\n 25: [8, 20],\n 26: [9, 21],\n 27: [10, 22],\n 28: [11, 23],\n 29: [12, 24],\n }\n\n expected_res = pd.DataFrame.from_dict(expected_dict, orient=\"index\")\n expected_res.columns = [\"srce\", \"trgt\"]\n generation.create_anchors(sheet)\n\n res_srce_trgt_anchors = sheet.edge_df.loc[18:, [\"srce\", \"trgt\"]]\n assert res_srce_trgt_anchors.equals(expected_res)\n\n\ndef test_extract():\n datasets, specs = generation.three_faces_sheet()\n sheet = Sheet(\"test_sheet_extract_coordinate\", datasets, specs)\n sheet.face_df.loc[0, \"is_alive\"] = 0\n subsheet = sheet.extract(\"is_alive\")\n\n assert subsheet.face_df[\"is_alive\"].all()\n assert subsheet.Nf == 2\n\n\ndef test_sheet_extract_coordinate():\n grid = hexa_grid2d(6, 4, 3, 3)\n datasets = from_2d_voronoi(Voronoi(grid))\n sheet = Sheet(\"test_extract_bounding_box\", datasets)\n subsheet = sheet.extract_bounding_box(\n [sheet.face_df[\"x\"].min(), sheet.face_df[\"x\"].max() / 2],\n [sheet.face_df[\"y\"].min(), sheet.face_df[\"y\"].max() / 2],\n )\n assert subsheet.face_df[\"x\"].max() <= sheet.face_df[\"x\"].max() / 2\n assert subsheet.face_df[\"x\"].min() >= sheet.face_df[\"x\"].min()\n assert subsheet.face_df[\"y\"].max() <= sheet.face_df[\"y\"].max() / 2\n assert subsheet.face_df[\"y\"].min() >= sheet.face_df[\"y\"].min()\n assert subsheet.face_df[\"z\"].max() <= sheet.face_df[\"z\"].max()\n assert subsheet.face_df[\"z\"].min() >= sheet.face_df[\"z\"].min()\n","repo_name":"DamCB/tyssue","sub_path":"tests/generation/test_generation.py","file_name":"test_generation.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"61"} +{"seq_id":"23641133901","text":"#!/usr/bin/env python\n\nt = int(input())\nfor case in range(1,t+1):\n nums = tuple(map(int,input().strip().split()))\n n,s,p = nums[:3]\n scores = sorted(nums[3:])\n total = 0\n for i in scores:\n div, rem = i//3, i % 3\n if p == 0:\n total += 1\n continue\n elif i == 0:\n continue\n if rem == 0:\n if div >= p:\n total += 1\n elif div == p-1 and s > 0:\n s -= 1\n total += 1\n elif rem == 1 and div+1 >= p:\n total += 1\n elif rem == 2:\n if div+1 >= p:\n total += 1\n elif div == p-2 and s > 0:\n s -= 1\n total += 1\n\n print('Case #{}: {}'.format(case,total))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/636.py","file_name":"636.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20004038637","text":"import sys\nimport os\nimport plac\nfrom pathlib import Path\nimport yaml\nimport pandas as pd\n\nsys.path.append('/home/jeremy/Library')\n\nfrom storage.cherrytree_xml import CherryTree\nfrom utility.config import load_config\n\ndef main(config):\n \"\"\"Match files in target directory with nodes containing file link\n config fields: content_index, content_base_name, content_file_dir\n output: html or csv\n \"\"\"\n try:\n c = load_config(config)\n except:\n exit()\n cpath = Path(config).with_suffix('.yaml')\n if not cpath.exists():\n print('no config file found')\n exit()\n cf = yaml.load(cpath.read_text())\n\n ct = CherryTree(cf['content_index'])\n\n cbn = cf['content_base_name']\n cfd = cf['content_file_dir']\n\n content_base_node = ct.find_node_by_name(cbn)\n\n if not content_base_node:\n print(f' {content_base_name} not in index')\n exit()\n\n for node in content_base_node.descendants:\n for item in node.text:\n print(item)\n\n\n\n\n\nif __name__ == '__main__':\n plac.call(main)\n","repo_name":"jallanxjallan/scripts","sub_path":"write_content_files.py","file_name":"write_content_files.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19008924183","text":"#!/usr/bin/env python\n\n# This script will convert WPC3 JSON from Tellstick to W, Wh and kWh.\n# https://github.com/senilio/tellstick-wpc-proxy\n\nimport web\nimport json\nimport time\ntry:\n from urllib.request import urlopen, Request\nexcept ImportError:\n from urllib2 import urlopen, Request\n\n# URL to your Tellstick local API sensor\nurl = 'http://192.168.88.17/api/sensor/info?id=9'\n\n# Tellstick API key\napi_key = 'Bearer bloody_long_key'\n\n# Electric meter blink factor\n# blinkFactor is 1000 divided by the Electric meter parameter.\n# Usually 1000 or 10000 impulses per kWh\nblinkFactor = 1000.0/10000.0\n\n# Create urllib object\nreq = Request(url)\nreq.add_header('Authorization', api_key)\n\n# Path to execute class\ncommands = ('/get_wpc', 'get_wpc')\napp = web.application(commands, globals())\n\n# How often in seconds are you polling for new data?\n# This is used to properly calculate momentary power usage [Watt].\npoll_frequency = 12\n\n# Init variables\nprev_count = 0\nprev_time = 0\n\n# Class to handle the WPC3 data\nclass get_wpc:\n def GET(self):\n # Use prev_count and prev_time from global()\n global prev_count, prev_time\n\n # Read data from Tellstick and create JSON object\n response = urlopen(req).read().decode('utf-8')\n jsonvar = json.loads(response)\n\n # Set current time\n current_time = time.time()\n\n # Get current counter and multiplier\n current_value = jsonvar['data'][0]['value']\n multiplier = jsonvar['data'][1]['value']\n\n # Do the math to convert Tellstick data to real count\n if current_value >= 0.0:\n count = multiplier*4096 + 10*current_value\n else:\n count = multiplier*4096 - 10*current_value + 2048\n\n # Fix for messed up first poll\n if prev_count == 0:\n prev_count = count\n if prev_time == 0:\n prev_time = current_time - poll_frequency\n\n # Calc diff between latest and 2nd latest count\n count_diff = count - prev_count\n prev_count = count\n\n # Time since last poll. Used to calc momentary W.\n time_diff = current_time - prev_time\n\n # Check if count have overflowed\n if count_diff < 0:\n count_diff += 393216\n\n # Calc usage\n powerW = count_diff*blinkFactor*60.0*(60.0/time_diff)\n energykWh = count_diff*blinkFactor / 1000\n\n # Submit new value and rename variables\n jsonvar['data'][0]['name'] = 'powerW'\n jsonvar['data'][1]['name'] = 'energykWh'\n jsonvar['data'][0]['value'] = powerW\n jsonvar['data'][1]['value'] = energykWh\n\n # Set prev_time\n prev_time = current_time\n\n # Return modified JSON\n return json.dumps(jsonvar, indent=4, sort_keys=True)\n\nif __name__ == \"__main__\":\n # Start web service\n app.run()\n","repo_name":"senilio/tellstick-wpc-proxy","sub_path":"tellstick-wpc-proxy.py","file_name":"tellstick-wpc-proxy.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23449546721","text":"import sys\n\n\ndef solve_case(case_no, case_input):\n\n data_arr = case_input.strip().split(' ')\n data = data_arr[1]\n \n invitations = 0\n clappers = 0\n for i in range(len(data)):\n x = int(data[i])\n if clappers >= i:\n clappers += x\n else:\n invite = i - clappers\n invitations += invite\n clappers += invite\n clappers += x\n res = ('Case #%i: %i' % (case_no, invitations))\n ##print(res)\n return res\n\ndef process_file(input_file, output_file):\n file_in = open(input_file, 'rU')\n file_out = open(output_file, 'w')\n\n \n num_cases = None\n case_num = 0\n\n for row in file_in:\n \n if not num_cases:\n num_cases = int(row)\n\n else:\n case_num += 1\n result = solve_case(case_num, row.rstrip())\n file_out.write(result + '\\n')\n \n file_out.close()\n \n\ndef main():\n if len(sys.argv) == 3:\n print('Program starts')\n process_file(sys.argv[1], sys.argv[2])\n sys.exit(1)\n \n else:\n print('Give two arguments (INPUT_FILE OUTPUT_FILE)')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1600.py","file_name":"1600.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24221348073","text":"import urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter location: ')\nprint(\"Retrieving \"+url)\nhtml = urllib.request.urlopen(url, context=ctx).read()\nprint('Retrieved '+str(len(html))+' characters')\ninfo=json.loads(html)\nprint('Count:',len(info['comments']))\nsum=0\nfor item in info['comments']:\n sum=sum+item['count']\nprint('Sum: '+str(sum))","repo_name":"sdukesameer/python","sub_path":"awdweek6_1.py","file_name":"awdweek6_1.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41771316827","text":"from selenium import webdriver\nimport time\nimport re\n\n\ndef get_ss(aim_qq,name):\n #实例化出一个PhantomJS浏览器\n driver = webdriver.PhantomJS(r'D:\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe')\n # driver = webdriver.Firefox()\n\n driver.get('http://qzone.qq.com')\n #登录表单在页面的框架中,所以要切换到该框架\n driver.switch_to_frame('login_frame')\n #通过使用选择器选择到表单元素进行模拟输入和点击按钮提交\n driver.find_element_by_id('switcher_plogin').click()\n # time.sleep(3)\n driver.find_element_by_id('u').clear()\n driver.find_element_by_id('u').send_keys('951948132')\n driver.find_element_by_id('p').clear()\n driver.find_element_by_id('p').send_keys('hyggbgb12345**')\n driver.find_element_by_id('login_button').click()\n time.sleep(5)\n # driver.implicitly_wait(10)\n driver.switch_to.default_content()#回主框架\n driver.get(r'https://user.qzone.qq.com/'+aim_qq+r'/311')\n time.sleep(3)\n driver.switch_to_frame('app_canvas_frame')\n temp=driver.page_source\n driver.switch_to.default_content()\n a=r'title=\"末页\".*?'\n b=re.compile(a)\n c=re.findall(b,temp)[0]\n a=r'title=\"末页\".*?'\n b=re.compile(a)\n d=re.findall(b,c)\n c=c.replace(d[0],'')\n c = c.replace(r'', '')\n c=int(c)\n pages=[]\n try:\n for i in range(0,c):\n driver.execute_script(\"var q=document.documentElement.scrollTop=document.body.clientHeight\")\n driver.switch_to_frame('app_canvas_frame')\n temp=driver.page_source\n pages.append(temp)\n driver.find_element_by_id('pager_next_'+str(i)).click()\n time.sleep(5)\n driver.switch_to.default_content()\n print(name+' 共'+str(c)+'页,正在爬取第'+str(i+1)+'页......')\n except:\n pass\n # i=1\n # while i<10:\n # driver.execute_script(\"var q=document.documentElement.scrollTop=document.body.clientHeight\")\n # # driver.execute_script('window.scrollTo(0, document.body.scrollHeight')\n # time.sleep(3)\n # i=i+1\n # driver.execute_script(\"var q=document.documentElement.scrollTop=0\")\n f=open(r'D:\\Personal\\Desktop\\各种python小项目\\QQ空间'+'\\\\'+name+'.html','w+',encoding='utf-8')\n for temp in pages:\n f.writelines(temp)\n f.close()\n driver.quit()\n # soup=BeautifulSoup(a,'lxml')\n # c=soup.find_all('ul',id=\"feed_friend_list\")\n # b=r'user-info.+?'\n # c=re.findall(re.compile(b),a)\n # c=soup.find_all(re.compile(b))\n # print(c)\n\nif __name__=='__main__':\n # aim_qqs = {'玉米':'2262957290',\n # '我':'951948132',\n # '东木':'992443086',\n # '江总':'2694571928',\n # '施宇琦':'1159436724'\n # '高铖': '664308541',\n # '高晨阳': '331753380'\n # }\n aim_qqs = {'吴麟':'1664780080'}\n keys=aim_qqs.keys()\n for key in keys:\n get_ss(aim_qqs[key],key)","repo_name":"fly-bear/mygit","sub_path":"python各类小程序/QQ空间/qzone.py","file_name":"qzone.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"44061378451","text":"def method_cache(seconds=0):\n \"\"\"\n A `seconds` value of `0` means that we will not memcache it.\n\n If a result is cached on instance, return that first. If that fails, check\n memcached. If all else fails, hit the db and cache on instance and in\n memcache.\n\n ** NOTE: Methods that return None are always \"recached\".\n \"\"\"\n\n from hashlib import sha224\n from django.core.cache import cache\n\n def inner_cache(method):\n\n def x(instance, *args, **kwargs):\n key = sha224(str(method.__module__).encode()\n + str(method.__name__).encode()\n + str(instance.id).encode()\n + str(args).encode()\n + str(kwargs).encode()).hexdigest()\n\n if hasattr(instance, key):\n # has on class cache, return that\n result = getattr(instance, key)\n else:\n result = cache.get(key)\n\n if result is None:\n # all caches failed, call the actual method\n result = method(instance, *args, **kwargs)\n\n # save to memcache and class attr\n if seconds and isinstance(seconds, int):\n cache.set(key, result, seconds)\n setattr(instance, key, result)\n\n return result\n\n return x\n\n return inner_cache\n","repo_name":"vegaelle/atable","sub_path":"atable/recipes/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"23436295781","text":"import itertools\n\ndef KenMove( value, ken ):\n\tfor i in range( len( ken ) ):\n\t\tif ken[i] > value:\n\t\t\treturn i\n\treturn 0\n\ndef DeceitfulWar( naomi, ken ):\n\tresult = 0\n\tfor i in range( len( naomi ) ):\n\t\tpos = KenMove( 1000000, ken )\n\t\tif naomi[i] > ken[pos]:\n\t\t\tresult = result + 1\n\t\t\tdel ken[ pos ]\n\treturn result\t\n\ndef War( naomi, ken ):\n\tresult = 0\n\tnaomi.reverse()\n\tfor i in range( len( naomi ) ):\n\t\tpos = KenMove( naomi[i], ken )\n\t\tif naomi[i] > ken[pos]:\n\t\t\tresult = result + 1\n\t\tdel ken[ pos ]\n\treturn result\n\nfin = open(\"input.txt\", \"r\" )\nfout = open(\"output.txt\", \"w\" )\n\ntestcases = int( fin.readline().split()[0] )\n\nfor test_id in range( testcases ):\n\n\tn = int( fin.readline().split()[0] )\n\n\tnaomi = [ x for x in fin.readline().split() ]\n\n\tken = [ x for x in fin.readline().split() ]\n\n\tfor i in range( len( naomi ) ):\n\t\tnaomi[i] = \"\".join( naomi[i][ 2:7 ] )\n\t\twhile len( naomi[i] ) < 5:\n\t\t\tnaomi[i] = naomi[i] + '0'\n\t\tnaomi[i] = int( naomi[i] )\n\n\tfor i in range( len( ken ) ):\n\t\tken[i] = \"\".join( ken[i][ 2:7 ] )\n\t\twhile len( ken[i] ) < 5:\n\t\t\tken[i] = ken[i] + '0'\n\t\tken[i] = int( ken[i] )\n\n\tnaomi.sort()\n\tken.sort()\n\n\tfout.write( \"Case #%d: %d %d\\n\" % ( test_id + 1, DeceitfulWar( naomi[:], ken[:] ), War( naomi[:], ken[:] ) ) )\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/852.py","file_name":"852.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23210349104","text":"#!/usr/bin/python3\n# coding:utf-8\n\nimport sys,os\nfrom PyQt5 import QtWidgets,QtGui\nVERSION='0.2.1'\n\ndef main():\n app=QtWidgets.QApplication(sys.argv)\n screen=Form()\n screen.show()\n\n sys.exit(app.exec_())\n\nclass PicPanel(QtWidgets.QWidget):\n def __init__(self,pix=None,pixdir='',parent=None):\n super().__init__(parent)\n self.BW=False\n self.cursel=[0,0,0]\n self.pix=self._populatepix(pixdir) if pix==None else pix\n self._initLayout()\n\n def _populatepix(self):\n pix={}\n for f0 in os.listdir(pixdir):\n f0d=os.path.join(pixdir,f0)\n pix[f0]={}\n for f1 in os.listdir(f0d):\n f1d=os.path.join(f0d,f1)\n pix[f0][f1]={}\n for f2 in os.listdir(f1d):\n f2d=os.path.join(f1d,f2)\n pix[f0][f1][f2]=[os.path.join(f2d,s) for s in os.listdir(f2d)]\n if len(pix[f0][f1][f2])>1:\n print('Warning: Ignored second file in same folder:\\n\\t','\\n\\t'.join(pix[f0][f1][f2]))\n return pix\n\n def updateList0(self):\n self.lists[0].clear()\n self.lists[0].addItems(sorted([s for s in self.pix]))\n self.lists[0].setCurrentRow(0)\n\n def updateList1(self):\n self.lists[1].clear()\n self.lists[1].addItems(sorted([s for s in self.pix[self.lists[0].currentItem().text()]]))\n self.lists[1].setCurrentRow(0)\n\n def updateList2(self):\n self.lists[2].clear()\n [t1,t2]=[self.lists[0].currentItem().text(),\n self.lists[1].currentItem().text()]\n self.lists[2].addItems(sorted([s for s in self.pix[t1][t2]]))\n self.lists[2].setCurrentRow(0)\n\n def updateImage(self):\n [t1,t2,t3]=[self.lists[0].currentItem().text(),\n self.lists[1].currentItem().text(),\n self.lists[2].currentItem().text()]\n self.txt.setText(self.pix[t1][t2][t3][0])\n if not self.BW:\n limg=QtGui.QPixmap(self.pix[t1][t2][t3][0])\n else:\n limg0=QtGui.QImage(self.pix[t1][t2][t3][0])\n for y in range(limg0.height()):\n for x in range(limg0.width()):\n color=QtGui.QColor(limg0.pixel(x,y))\n pcol=color.getRgb()[:-1]\n mn=round(sum(pcol)/3)\n limg0.setPixel(x,y,QtGui.QColor(mn,mn,mn).rgb())\n limg=QtGui.QPixmap(limg0)\n \n limg.scaledToWidth(True)\n self.img.setPixmap(limg)\n \n def _initLayout(self):\n def updateList1(row):\n if row!=-1:\n self.updateList1()\n \n def updateList2(row):\n if row!=-1:\n self.updateList2()\n\n def updateImage(row):\n if row!=-1:\n self.updateImage()\n\n self.txt=QtWidgets.QLineEdit()\n self.lists=[]\n for y in range(3): # a/b/c\n self.lists.append(QtWidgets.QListWidget())\n self.img=QtWidgets.QLabel()\n self.img.setScaledContents(True)\n\n self.updateList0()\n self.updateList1()\n self.updateList2()\n self.updateImage()\n \n VB0=QtWidgets.QVBoxLayout()\n HB1=QtWidgets.QHBoxLayout()\n for i in range(3):\n HB1.addWidget(self.lists[i])\n VB0.addWidget(self.img)\n VB0.addWidget(self.txt)\n VB0.addLayout(HB1)\n \n self.lists[0].currentRowChanged.connect(updateList1)\n self.lists[1].currentRowChanged.connect(updateList2)\n self.lists[2].currentRowChanged.connect(updateImage)\n self.setLayout(VB0)\n\n def ToggleBW(self,togbool):\n self.BW=togbool\n self.updateImage()\n\n \nclass Form(QtWidgets.QWidget):\n def __init__(self,parent=None):\n super().__init__(parent)\n if not os.path.isfile('Settings.conf'):\n QtWidgets.QMessageBox.warning(self,'No Settings.conf file found','The settings file could not be found. Please specify the image directory.')\n self.pixdir=self.getPath()\n else:\n self.pixdir=self.getpixdir()\n self.pix=self.populatepix()\n self.mbar=self._initMenu()\n self._initLayout()\n\n def BWToggle(self,togbool):\n self.panels[0].ToggleBW(togbool)\n self.panels[1].ToggleBW(togbool)\n\n def _initLayout(self):\n self.panels=[PicPanel(self.pix,self.pixdir,self),PicPanel(self.pix,self.pixdir,self)]\n\n self.VB=QtWidgets.QVBoxLayout()\n self.VB.addWidget(self.mbar)\n\n HB0=QtWidgets.QHBoxLayout()\n HB0.addWidget(self.panels[0])\n HB0.addWidget(self.panels[1])\n\n self.VB.addLayout(HB0)\n \n self.setLayout(self.VB)\n self.setWindowTitle('Fly Viewer v. {}'.format(VERSION))\n\n def _initMenu(self):\n mbar=QtWidgets.QMenuBar() \n mFile=mbar.addMenu('&File')\n mEdit=mbar.addMenu('&Edit')\n mPath=QtWidgets.QAction('&Change Directory',mbar)\n mQuit=QtWidgets.QAction('&Quit',mbar)\n mBW=QtWidgets.QAction('&Black and White',mbar,checkable=True)\n\n mPath.triggered.connect(self.getPath)\n mQuit.triggered.connect(self.close)\n mBW.triggered.connect(self.BWToggle)\n\n mFile.addAction(mPath)\n mFile.addAction(mQuit)\n mEdit.addAction(mBW)\n\n return mbar\n\n def getPath(self):\n confpath=QtWidgets.QFileDialog.getExistingDirectory(self,'Image Directory','/mnt/',QtWidgets.QFileDialog.ShowDirsOnly)\n while not os.path.isdir(confpath):\n QtWidgets.QMessageBox.warning(self,'No such directory','The image directory was not found. Please choose a valid existing directory.')\n confpath=QtWidgets.QFileDialog.getExistingDirectory(self,'Image Directory','/mnt/',QtWidgets.QFileDialog.ShowDirsOnly)\n with open('Settings.conf','w') as settings:\n settings.write('pixdir=%s'%confpath)\n return confpath\n \n def getpixdir(self):\n with open('Settings.conf','r') as f:\n for line in f:\n if line[:7]=='pixdir=':\n pixdir=line.split('=')[1].replace('\\n','')+'/'\n if not os.path.isdir(pixdir):\n chdir=QtWidgets.QMessageBox.question(self,'No such directory','Settings.conf points to a nonexistant directory. Change directory?',\n QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.No)\n if chdir==QtWidgets.QMessageBox.Yes:\n return self.getPath()\n else:\n return self.getpixdir()\n else:\n return pixdir\n\n def populatepix(self): \n pix={}\n for f0 in os.listdir(self.pixdir):\n f0d=os.path.join(self.pixdir,f0)\n pix[f0]={}\n for f1 in os.listdir(f0d):\n f1d=os.path.join(f0d,f1)\n pix[f0][f1]={}\n for f2 in os.listdir(f1d):\n f2d=os.path.join(f1d,f2)\n pix[f0][f1][f2]=[os.path.join(f2d,s) for s in os.listdir(f2d)]\n if len(pix[f0][f1][f2])>1:\n print('Warning: Ignored second file in same folder:\\n\\t','\\n\\t'.join(pix[f0][f1][f2]))\n return pix\n \n\nif __name__=='__main__':\n main()\n","repo_name":"helasraizam/FlyViewer","sub_path":"FlyViewer.py","file_name":"FlyViewer.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31343061073","text":"import random\nimport os\nimport numpy as np\nfrom logging import getLogger, StreamHandler, Formatter, FileHandler, DEBUG\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport timm\n\n\n# local files\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), 'src'))\nfrom src.config import config\nfrom src.dataset import CassavaDataset\n\n\n#Pytorchで再現性を保つ\ndef seed_torch(seed=42):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n\ndef setup_logger(log_folder, modname=__name__):\n logger = getLogger(modname)\n logger.setLevel(DEBUG)\n \n sh = StreamHandler()\n sh.setLevel(DEBUG)\n formatter = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n \n fh = FileHandler(log_folder)\n fh.setLevel(DEBUG)\n fh_formatter = Formatter('%(asctime)s - %(filename)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s')\n fh.setFormatter(fh_formatter)\n logger.addHandler(fh)\n return logger\n\n\ndef get_model(model_name, pretrained=True, num_classes=1000):\n model = timm.create_model(model_name, pretrained=pretrained)\n num_features = model.classifier.in_features\n model.classifier = nn.Linear(num_features, num_classes)\n model.to(config.device)\n \n return model\n\n\ndef get_dataloaders_dict(train_data, val_data):\n train_dataset = CassavaDataset(train_data, mode=\"train\")\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=config.batch_size, num_workers=config.num_workers, pin_memory=True, drop_last=True)\n\n val_dataset = CassavaDataset(val_data, mode=\"val\")\n val_dataloader = DataLoader(val_dataset, shuffle=False, batch_size=config.batch_size, num_workers=config.num_workers, pin_memory=True)\n\n return {\"train\": train_dataloader, \"val\": val_dataloader}","repo_name":"lunarmlcat/cassava-leaf-disease-classification","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19170620151","text":"# -------------------------------------------------------------------------------\n# modules\n#\nfrom netCDF4 import Dataset\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom plotcosmomap import plotcosmo04sm_notick, pole04, colorbar\nimport cartopy.crs as ccrs\nfrom numpy import inf\nimport matplotlib.gridspec as gridspec\nimport cmcrameri.cm as cmc\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom mycolor import custom_div_cmap, cbr_wet, cbr_drywet, drywet, custom_seq_cmap_, wind, hotcold, conv\nfrom pyproj import Transformer\nimport scipy.ndimage as ndimage\nimport matplotlib\n\n# %% -------------------------------------------------------------------------------\n# read data\nsims = ['CTRL04', 'TRED04']\nseasons = \"JJA\"\n\n# --- edit here\nctrlpath = \"/project/pr133/rxiang/data/cosmo/EAS04_ctrl/monsoon/CAPE_ML/smr\"\ntopo2path = \"/project/pr133/rxiang/data/cosmo/EAS04_topo2/monsoon/CAPE_ML/smr\"\npaths = [ctrlpath, topo2path]\ndata = {}\nvars = ['CAPE_ML']\n\n[pole_lat04, pole_lon04, lat04, lon04, rlat04, rlon04, rot_pole_crs04] = pole04()\n\nlb = [['a', 'b', 'c']]\n\nfor i in range(len(sims)):\n sim = sims[i]\n path = paths[i]\n data[sim] = {}\n f = xr.open_dataset(f'{path}/01-05.CAPE_ML.smr.nc')\n ds = f[\"CAPE_ML\"].values[:, :]\n data[sim][\"CAPE\"] = np.nanmean(ds, axis=0)\n\ndata['diff'] = {}\ndata['diff'][\"CAPE\"] = data['TRED04'][\"CAPE\"] - data['CTRL04'][\"CAPE\"]\n\n# %% -------------------------------------------------------------------------------\n# plot\nar = 1.0 # initial aspect ratio for first trial\nwi = 9.5 # height in inches #15\nhi = 3 # width in inches #10\nncol = 3 # edit here\nnrow = 1\naxs, cs, ct, topo, q, qk, topo1 = np.empty(shape=(nrow, ncol), dtype='object'), np.empty(shape=(nrow, ncol),\n dtype='object'), \\\n np.empty(shape=(nrow, ncol), dtype='object'), np.empty(shape=(nrow, ncol),\n dtype='object'), \\\n np.empty(shape=(nrow, ncol), dtype='object'), np.empty(shape=(nrow, ncol),\n dtype='object'), np.empty(\n shape=(nrow, ncol), dtype='object')\n\nfig = plt.figure(figsize=(wi, hi))\ngs1 = gridspec.GridSpec(1, 2, left=0.06, bottom=0.024, right=0.575,\n top=0.97, hspace=0.1, wspace=0.1, width_ratios=[1, 1])\ngs2 = gridspec.GridSpec(1, 1, left=0.665, bottom=0.024, right=0.91,\n top=0.97, hspace=0.01, wspace=0.1)\n\nlevel1 = MaxNLocator(nbins=20).tick_values(0, 2000)\ncmap1 = cmc.roma_r\nnorm1 = BoundaryNorm(level1, ncolors=cmap1.N, clip=True)\n\nlevel2 = MaxNLocator(nbins=20).tick_values(-400, 200)\ncmap2 = custom_div_cmap(21, cmc.vik)\nnorm2 = matplotlib.colors.Normalize(vmin=-400, vmax=200)\n\nfor j in range(2):\n sim = sims[j]\n axs[0, j] = fig.add_subplot(gs1[0, j], projection=rot_pole_crs04)\n axs[0, j] = plotcosmo04sm_notick(axs[0, j])\n cs[0, j] = axs[0, j].pcolormesh(rlon04, rlat04, data[sim][\"CAPE\"], norm=norm1, cmap=cmap1,\n shading=\"auto\", transform=rot_pole_crs04)\n\naxs[0, 2] = fig.add_subplot(gs2[0, 0], projection=rot_pole_crs04)\naxs[0, 2] = plotcosmo04sm_notick(axs[0, 2])\ncs[0, 2] = axs[0, 2].pcolormesh(rlon04, rlat04, data['diff'][\"CAPE\"], norm=norm2, cmap=cmap2,\n shading=\"auto\", transform=rot_pole_crs04)\n\nfor i in range(nrow):\n cax = fig.add_axes(\n [axs[i, 1].get_position().x1 + 0.01, axs[i, 1].get_position().y0, 0.015, axs[i, 1].get_position().height])\n cbar = fig.colorbar(cs[i, 1], cax=cax, orientation='vertical', extend='max')\n cbar.ax.tick_params(labelsize=13)\n\nfor i in range(nrow):\n cax = fig.add_axes(\n [axs[i, 2].get_position().x1 + 0.01, axs[i, 2].get_position().y0, 0.015, axs[i, 2].get_position().height])\n cbar = fig.colorbar(cs[i, 2], cax=cax, orientation='vertical', extend='both')\n cbar.ax.tick_params(labelsize=13)\n\n\nfor i in range(nrow):\n axs[i, 0].text(-0.01, 0.91, '30°N', ha='right', va='center', transform=axs[i, 0].transAxes, fontsize=14)\n axs[i, 0].text(-0.01, 0.45, '25°N', ha='right', va='center', transform=axs[i, 0].transAxes, fontsize=14)\n\nfor j in range(ncol):\n axs[0, j].text(0.04, -0.02, '95°E', ha='center', va='top', transform=axs[0, j].transAxes, fontsize=14)\n axs[0, j].text(0.46, -0.02, '100°E', ha='center', va='top', transform=axs[0, j].transAxes, fontsize=14)\n axs[0, j].text(0.88, -0.02, '105°E', ha='center', va='top', transform=axs[0, j].transAxes, fontsize=14)\n\nplt.show()\n","repo_name":"ruolanxixi/cosmo_scripts","sub_path":"paper1/EAS04/analysis/topo2/cape.py","file_name":"cape.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33151760590","text":"\"\"\"\n使用互斥锁完成2个线程对同一个全局变量各加100万次的操作\n\n => 加上互斥锁,那个线程抢到这个锁我们决定不了,那线程抢到锁那个线程先执行,没有抢到的线程需要等待\n 加上互斥锁多任务瞬间变成单任务,性能会下降,也就是说同一时刻只能有一个线程去执行\n\n\n# 没有显示出异常..., 这个代码有问题...\n\"\"\"\nimport threading\n\nimport time\n\nnum = 0\n\n# 创建全局互斥锁\n# lock = threading.Lock()\n\ndef sum_num1():\n\n # lock.acquire()\n global num\n for i in range(1000000):\n num += 1\n\n print(\"sum 1:\", num)\n # lock.release()\n\ndef sum_num2():\n\n # lock.acquire()\n global num\n for i in range(1000000):\n num += 1\n\n print(\"sum 2:\", num)\n # lock.release()\n\n\ndef main():\n\n thread_01 = threading.Thread(target=sum_num1)\n thread_02 = threading.Thread(target=sum_num2)\n thread_01.start()\n thread_02.start()\n time.sleep(1)\n print(\"num: \", num)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Mumujane/PythonAdvance","sub_path":"Thread/Introduction/threadingLock.py","file_name":"threadingLock.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23625601841","text":"'''\nI was in an airplane yesterday, so I couldn't compete on Draw Mohammed Day.\nBut I have something to make up for it!\nDraw Mohammed Day 2011 Strikes Back!!!\n\nAnyone who says I should not draw Mohammed can fuck off!\nI am not a Muslim. I do not have to obey Islamic rules.\nAll religions can be criticised. Islam is not an exception.\nIslam is not perfect.\nWhen someone criticises Islam,\nMuslims should either fix their mistakes or get used to the criticism.\nMuslims should not try to scare their critics into submission.\nCritics should not submit to threats of violence.\nThe state of freedom in this world is high enough\nfor people to not be scared of doing something\nas vanilla as criticising a religion.\n\n ________________\n /___________/ /\\\n / DERKA / \\\n /_______________/ /\\\n /___/ \\ \\________/__\\\n/ /\\ DERKA \\\n|______/ \\______________|\n| \\ \\_____________|\n| /\\ MOHAMMED |\n|____/__\\________________|\n | XXXX XXXX |\n | /O \\ / O\\ |\n | \\__/ /\\ \\__/ |\n | | | |\n | /__\\ |\n | XXXXXXXXXX |\n \\ XX ______ XX /\n \\ XX / \\ XX / \n \\ |JIHAD!| /\n \\ \\______/ /\n \\ XX /\n \\ XXXXXXXX /\n XXXXXXXXXXXX\n XXXXXXXXXX\n XXXXXXXX\n XXXXXX\n ____\n / -- \\\n ||__||\n | |\n | __ |\n | -- |\n | |\n | |\n | __ |\n ____ | -- | ____ \n / -- \\| |/ -- \\\n | || || | ___ \n ___ | ||/ \\|| |/ - \\\n / --\\ |/ \\| | | |/ \\|| |\n || \\\\ || | \\ / | | | |\n \\\\__/ \\ |\\ / \\ / | \\\n \\ \\ | / \\ \\\n \\ / \\ | DERKA DERKA \\ / |\n \\// | \\ | MOHAMMED JIHAD |\n \\_/ \\__/ |\n \\ |\n \\ /\n \\ /\n \\ /\n \\ /\n'''\n\ndef output(f, solution_iter):\n s = list()\n for solution in solution_iter:\n s.append('Case #')\n s.append(str(solution['number']))\n s.append(':\\n')\n for rpi_element in solution['rpi_array']:\n s.append(str(rpi_element))\n s.append('\\n')\n f.write(''.join(s))\n pass\n\ndef read(f):\n fiter = iter(f)\n cases = int(next(fiter))\n raw_problem_iter = list()\n for x in range(cases):\n player_count = int(next(fiter))\n game_array = list()\n for y in range(player_count):\n game_array.append(next(fiter).rstrip('\\n'))\n raw_problem_iter.append({\n 'number': x + 1,\n 'game_array': game_array,\n })\n pass\n return iter(raw_problem_iter)\n\nwin_map = {\n '.': 0,\n '1': 1,\n '0': 0,\n}\n\nloss_map = {\n '.': 0,\n '1': 0,\n '0': 1,\n}\n\ndef convert(raw_problem):\n play_tuple_array = list()\n for game in raw_problem['game_array']:\n win = list()\n loss = list()\n for symbol in game:\n win.append(win_map[symbol])\n loss.append(loss_map[symbol])\n play_tuple_array.append((win, loss))\n return {\n 'number': raw_problem['number'],\n 'play_tuple_array': play_tuple_array,\n }\n\ndef rpi(wp, owp, oowp):\n return float(wp + owp + owp + oowp) / 4\n\ndef winloss(play_tuple):\n win = sum(play_tuple[0])\n loss = sum(play_tuple[1])\n return win, win + loss, float(win) / (win + loss)\n\ndef calc_mwp(win, total, play_tuple, competitor_index):\n if play_tuple[0][competitor_index]:\n return float(win - 1) / (total - 1)\n else:\n if play_tuple[1][competitor_index]:\n return float(win) / (total - 1)\n else:\n return float(win) / total\n\ndef calc_oowp(owp_sum, owp_array, competitor_index, competitor_count_m1):\n return float(owp_sum - owp_array[competitor_index]) / competitor_count_m1\n\ndef solution(problem):\n player_count = len(problem['play_tuple_array'])\n win_list = list()\n total_list = list()\n wp_list = list()\n for play_tuple in problem['play_tuple_array']:\n win, total, wp = winloss(play_tuple)\n win_list.append(win)\n total_list.append(total)\n wp_list.append(wp)\n owp_list = list()\n pta = problem['play_tuple_array']\n for x in range(player_count):\n mwp_sum = 0\n competitor_count = 0\n for y in range(player_count):\n if (x != y) and (pta[x][0][y] or pta[x][1][y]):\n competitor_count += 1\n win = win_list[y]\n total = total_list[y]\n play_tuple = problem['play_tuple_array'][y]\n bla = calc_mwp(win, total, play_tuple, x)\n mwp_sum += bla\n owp_list.append(float(mwp_sum) / competitor_count)\n oowp_list = list()\n for x in range(player_count):\n oowp_sum = 0\n competitor_count = 0\n for y in range(player_count):\n if (x != y) and (pta[x][0][y] or pta[x][1][y]):\n competitor_count += 1\n oowp_sum += owp_list[y]\n oowp_list.append(float(oowp_sum) / competitor_count)\n rpi_array = list()\n for x in range(player_count):\n rpi_array.append(rpi(wp_list[x], owp_list[x], oowp_list[x]))\n solu = dict()\n solu['rpi_array'] = rpi_array\n solu['number'] = problem['number']\n return solu\n\ndef frame(in_file_path, out_file_path):\n solist = list()\n for raw_problem in read(open(in_file_path)):\n p = convert(raw_problem)\n s = solution(p)\n solist.append(s)\n pass\n output(open(out_file_path, 'w'), iter(solist))\n pass\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_81/525.py","file_name":"525.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35720917782","text":"# Author: Abdulhalim Yusuf\n# Date: November 17, 2015\n# Project: Selection Sort\n\n\n\ndef selectionSort(a):\n\tn = len(a)\n\tfor i in range(0,n):\n\t\tk = i\n\t\tfor j in range (i+1,n):\n\t\t\tif a[j] < a[k]:\n\t\t\t\tk = j\n\t\ta[i], a[k] = a[k], a[i]\n","repo_name":"whoislimos/Python-Codes","sub_path":"Selection_Sort.py","file_name":"Selection_Sort.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2097230567","text":"from CoreOOP import *\nimport sys\n\n\ndef main():\n app = QApplication(sys.argv)\n\n loop = QEventLoop(app)\n asyncio.set_event_loop(loop)\n window = My_UI()\n window.show()\n with loop:\n sys.exit(loop.run_forever())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"manhhv87/MultiDrone-3Drone","sub_path":"MyApp.py","file_name":"MyApp.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70110988354","text":"#loop Detection\n#Given a circular linkedList, implement an algorithm that returns the node at the beginning of the loop\n\ndef loopDetection(LL):\n myset = set()\n current = LL.head\n while current != None:\n if current in myset:\n return current\n else:\n myset.add(current)\n current = current.next\n return False\n","repo_name":"Cammac7/CodingPractice","sub_path":"python/chapter2/2-8.py","file_name":"2-8.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17618159343","text":"import logging\nimport sys\n\nfrom Products.ZenRelations.Exceptions import ZenSchemaError\nfrom Products.ZenRelations.utils import importClass, importClasses\n\nlogging.basicConfig()\nroot = logging.getLogger()\nroot.setLevel(logging.CRITICAL)\n\n\ndef lookupSchema(cls, relname):\n \"\"\"\n Lookup the schema definition for a relationship.\n All base classes are checked until RelationshipManager is found.\n \"\"\"\n for name, schema in cls._relations:\n if name == relname:\n return schema\n raise ZenSchemaError(\n \"Schema for relation %s not found on %s\" % (relname, cls.__name__)\n )\n\n\ndef checkRelationshipSchema(cls, baseModule):\n \"\"\"\n Walk all relationship schema definitions and confirm that they\n have reciprical peers on the remote class.\n \"\"\"\n for relname, rel in cls._relations:\n try:\n remoteClass = importClass(rel.remoteClass, None)\n except AttributeError:\n logging.critical(\n \"RemoteClass '%s' from '%s.%s' not found\",\n rel.remoteClass,\n cls.__name__,\n relname,\n )\n continue\n try:\n rschema = lookupSchema(remoteClass, rel.remoteName)\n except ZenSchemaError:\n logging.critical(\n \"Inverse def '%s' for '%s.%s' not found on '%s'\",\n rel.remoteName,\n cls.__name__,\n relname,\n rel.remoteClass,\n )\n continue\n except Exception as e:\n logging.critical(\n \"RemoteClass '%s' for '%s.%s' problem.\",\n rel.remoteName,\n cls.__name__,\n relname,\n )\n logging.critical(e)\n continue\n try:\n localClass = importClass(rschema.remoteClass, None)\n except AttributeError as e:\n logging.critical(e)\n if not issubclass(cls, localClass):\n logging.critical(\n \"Inverse def '%s' from '%s.%s' wrong \" \"remoteClass: '%s'\",\n rel.remoteName,\n cls.__name__,\n relname,\n rschema.remoteClass,\n )\n if rschema.remoteName != relname:\n logging.critical(\n \"Inverse def '%s' from '%s.%s' wrong \" \"remoteName: '%s'\",\n rel.remoteName,\n cls.__name__,\n relname,\n rschema.remoteName,\n )\n if rel.remoteType != rschema.__class__:\n logging.critical(\n \"'%s.%s' inverse '%s' type %s != %s\",\n cls.__name__,\n relname,\n rel.remoteName,\n rschema.__class__.__name__,\n rel.remoteType.__name__,\n )\n\n\nbaseModule = None\nif len(sys.argv) > 1:\n baseModule = sys.argv[1]\n\nclassList = importClasses(\n basemodule=baseModule, skipnames=(\"ZentinelPortal\", \"ZDeviceLoader\")\n)\n\nfor classdef in classList:\n if hasattr(classdef, \"_relations\"):\n logging.info(\"checking class %s...\", classdef.__name__)\n checkRelationshipSchema(classdef, baseModule)\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenRelations/checkrel.py","file_name":"checkrel.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"7686308064","text":"def quicksort(tengah, awal, akhir):\n if akhir - awal > 1:\n p = partition(tengah, awal, akhir)\n quicksort(tengah, awal, p)\n quicksort(tengah, p + 1, akhir)\n \n \ndef partition(tengah, awal, akhir):\n pivot = tengah[awal]\n i = awal + 1\n j = akhir - 1\n \n while True:\n while (i <= j and tengah[i] <= pivot):\n i = i + 1\n while (i <= j and tengah[j] >= pivot):\n j = j - 1\n \n if i <= j:\n tengah[i], tengah[j] = tengah[j], tengah[i]\n else:\n tengah[awal], tengah[j] = tengah[j], tengah[awal]\n return j\n \n \nurut = input('Enter the list of numbers: ').split()\nurutan = [int(x) for x in urut]\nquicksort(urutan, 0, len(urutan))\nprint('Sorted list: ', end='')\nprint(urutan)\n","repo_name":"Vputri/Tugas-Algoritma-Pengolahan-Paralel","sub_path":"qiucksort.py","file_name":"qiucksort.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4901657654","text":"import math\n\ndef sss_solver(side1: float, side2: float, side3: float): # typed function\n a = side1\n b = side2\n c = side3\n # Use the law of cosines to find the angle lengths for the respecting Length\n big_a = radians_to_degrees(math.acos(((a * a) - (b * b) - (c * c)) / (-2 * b * c))) \n big_b = radians_to_degrees(math.acos(((b * b) - (a * a) - (c * c)) / (-2 * a * c)))\n big_c = radians_to_degrees(math.acos(((c * c) - (b * b) - (a * a)) / (-2 * a * b)))\n if (round(big_a + big_b + big_c)) != 180: # This should *never* happen, but if it does, panic panic panic!\n print(\"Error: Not a triangle! Aborting...\")\n exit()\n s = (a + b + c) / 2 #\n area = math.sqrt(s * (s - a) * (s - b) * (s - c)) # Heron's formula\n print(\"Solved! \\nThe angle of A is \" + str(round(big_a, 3)) + \" degrees. \\nThe angle of B is \" + str(round(big_b, 3)) + \" degrees. \\nThe angle of C is \" + str(round(big_c, 3)) + \" degrees. \\nThe area of the triangle is \" + str(round(area, 3)) + \" units squared.\")\n\ndef radians_to_degrees(radians):\n return (radians * 180 / math.pi)\n\nsss_solver(7, 12, 18)\n","repo_name":"FxllenCode/APCSPWork","sub_path":"Day12/14_StringProcedure.py","file_name":"14_StringProcedure.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32689088781","text":"#!/usr/bin/env python2\n# -*- coding:utf-8 -*\n\nfrom pwn import *\nfrom libformatstr import FormatStr\nfrom sys import argv\nfrom time import sleep\n\ncontext.terminal = ['terminator','-e']\ncontext.log_level = \"debug\"\n\nchall = \"./distfiles/chall\"\nlibc = ELF(\"./distfiles/libc-2.27.so\")\nelf = ELF(chall)\n\nif len(argv) >= 2 and argv[1] == \"r\":\n p = remote(\"localhost\", 9003)\nelif len(argv) >= 2 and argv[1] == \"d\":\n\tcmd = \"\"\"\n\t\tb main\n\t\tc\n\t\"\"\"\n\tp = gdb.debug(chall,cmd)\nelse:\n p = process(chall)\n\nprintf_got = elf.got['printf']\nsystem_plt = elf.plt['system']\nlog.info(\"printf@got = 0x{:08x}\".format(printf_got))\nlog.info(\"system@plt = 0x{:08x}\".format(system_plt))\n\n# cache system@libc to got\np.recvuntil(\"$ \")\np.sendline(\"ls\")\n\n# got overwrite\nf = FormatStr(isx64=1)# for x86_64\nf[printf_got] = system_plt\nf[printf_got + 4] = 0\np.recvuntil(\"$ \")\np.sendline(f.payload(12))\np.recvuntil(\"$ \")\np.sendline(\"/bin/sh\")\n\np.interactive()\n","repo_name":"t3mp-0xCC/write-up","sub_path":"InterKosenCTF2019/bullsh/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32903021664","text":"from __future__ import print_function\n\nimport os\n\nimport connexion\nimport swagger_client_pre\nimport json\nfrom types import SimpleNamespace\n\nfrom .config_uploader import *\nfrom swagger_client_pre.rest import ApiException\nfrom pprint import pprint\n\nfrom threading import Thread\nfrom queue import Queue\n\napp = connexion.FlaskApp(__name__)\n\nclass InitRoutine:\n\n def __init__(self, path):\n ConfigUploaderInst = ConfigUploader(path)\n\n PreprocessorRoutineInst = SendToPreprocessorRoutine()\n PreprocessorRoutineInst.start(ConfigUploaderInst.dataToTransfer)\n\n\nclass SendToPreprocessorRoutine:\n\n resultBitness = None\n inputNumber = None\n Response_nodes = []\n gotMaskedInput = False\n maskedInputLocal = None\n maskedInputNeighbour = None\n\n if os.getenv('CLIENT_A', None) is not None:\n adversary = True\n if os.getenv('CLIENT_B', None) is not None:\n if os.getenv('CLIENT_B_NUMBER', None) is not None:\n inputNumber = int(os.getenv('CLIENT_B_NUMBER', None)) # клиент B должен иметь свой API, а я его забыл написать(((\n adversary = False\n\n def __init__(self):\n pass\n\n def start(self, dataToTransfer=None):\n\n if not self.adversary:\n api_instance = swagger_client_pre.PreprocessorApi()\n\n try:\n print(\"ClientB call get_table from SendToPreprocessorRoutine\")\n api_response = api_instance.get_table()\n print(\"ClientA get preprocessed value successfully\")\n self.config = json.loads(json.dumps(api_response[0]['config']), object_hook=lambda d: SimpleNamespace(**d))\n\n self.numOfLinks = int(self.config.numOfLinks) # количество линк��в в цепи (конфиге)\n self.numOfNodes = int(self.config.numOfNodes) # количество узлов в цепи (конфиге)\n self.masksBitness = int(self.config.masksBitness) # количество бит клиента А\n self.inputMasks = int(self.config.inputMasks) # количество бит клиента B\n self.outputMasks = int(self.config.outputMasks) # количество бит результата\n\n self.nodes = [0] * self.numOfNodes\n\n for i in range(self.numOfNodes):\n self.nodes[i] = json.loads(json.dumps(api_response[0]['node' + str(i + 1)]), object_hook=lambda d: SimpleNamespace(**d))\n print(\"ClientA parsed preprocessed data successfully\")\n\n except ApiException as e:\n print(\"Exception when calling InteractionApi->get_table: %s\\n\" % e)\n\n if self.adversary:\n api_instance = swagger_client_pre.PreprocessorApi()\n self.uploadedData = dataToTransfer\n\n try:\n # start preprocessing procedure\n print(\"ClientA send start_2pc post request to preprocessor\")\n api_response = api_instance.start2_pc(body=dataToTransfer)\n print(\"ClientA get preprocessed value successfully\")\n self.config = json.loads(json.dumps(api_response[0]['config']), object_hook=lambda d: SimpleNamespace(**d))\n\n self.numOfLinks = int(self.config.numOfLinks) # количество линков в цепи (конфиге)\n self.numOfNodes = int(self.config.numOfNodes) # количество узлов в цепи (конфиге)\n self.masksBitness = int(self.config.masksBitness) # битность маски\n self.inputMasks = int(self.config.inputMasks) # маскируюшие биты входа\n self.outputMasks = int(self.config.outputMasks) # маскируюшие биты выхода\n\n self.nodes = [0] * self.numOfNodes\n\n for i in range(self.numOfNodes):\n self.nodes[i] = json.loads(json.dumps(api_response[0]['node' + str(i + 1)]), object_hook=lambda d: SimpleNamespace(**d))\n print(\"ClientA parsed preprocessed data successfully\")\n\n except ApiException as e:\n print(\"Exception when calling InteractionApi->start2_pc: %s\\n\" % e)\n\nSendToPreprocessorRoutineInst = SendToPreprocessorRoutine()","repo_name":"MIPT-2PC/user","sub_path":"server-side/swagger_server/controllers/service/init_routine.py","file_name":"init_routine.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28839620712","text":"import copy\nimport os\nimport logging\nimport ntpath\nimport subprocess\nfrom itertools import chain\nfrom copy import deepcopy\n\nfrom os.path import join, normpath,dirname\nfrom project_generator_definitions.definitions import ProGenDef\n\nfrom .tool import Tool, Exporter\nfrom ..util import SOURCE_KEYS\n\n\nclass MakefileTool(Tool, Exporter):\n\n ERRORLEVEL = {\n 0: 'no errors',\n 1: 'targets not already up to date',\n 2: 'errors'\n }\n\n SUCCESSVALUE = 0\n\n optimization_options = ['O0', 'O1', 'O2', 'O3', 'Os']\n\n generated_projects = {\n 'path': '',\n 'files': {\n 'makefile' : '',\n }\n }\n\n def __init__(self, workspace, env_settings, logging):\n self.workspace = workspace\n self.env_settings = env_settings\n self.logging = logging\n\n def _parse_specific_options(self, data):\n \"\"\" Parse all specific setttings. \"\"\"\n data['common_flags'] = []\n data['ld_flags'] = []\n data['c_flags'] = []\n data['cxx_flags'] = []\n data['asm_flags'] = []\n for k, v in data['misc'].items():\n if type(v) is list:\n if k not in data:\n data[k] = []\n data[k].extend(v)\n else:\n if k not in data:\n data[k] = ''\n data[k] = v\n\n def _get_libs(self, project_data):\n project_data['lib_paths'] =[]\n project_data['libraries'] =[]\n for lib in project_data['source_files_lib']:\n head, tail = ntpath.split(lib)\n file = tail\n if (os.path.splitext(file)[1] != \".a\"):\n self.logging.debug(\"Found %s lib with non-valid extension (!=.a)\" % file)\n continue\n else:\n file = file.replace(\".a\",\"\")\n project_data['lib_paths'].append(head)\n project_data['libraries'].append(file.replace(\"lib\",''))\n\n def export_workspace(self):\n vars = { 'projects': [os.path.basename(p['path']) for p in self.workspace['projects']] }\n generated_projects = deepcopy(self.generated_projects)\n generated_projects['path'], makefile = \\\n self.gen_file_jinja('makefile_workspace.tmpl', vars, 'Makefile',\n os.path.dirname(self.workspace['settings']['path']))\n generated_projects['files']['makefile'] = [makefile] + \\\n [os.path.basename(p['files']['makefile']) for p in self.workspace['projects']]\n return self.generated_projects\n\n def get_generated_project_files(self):\n return {'path': self.workspace['path'], 'files': [self.workspace['files']['makefile']]}\n\n def process_data_for_makefile(self, project_data):\n # Flatten source dictionaries, we don't need groups\n for key in SOURCE_KEYS:\n project_data[key] = list(sorted(chain(*project_data[key].values())))\n # flatten also include files\n project_data['include_files'] = list(sorted(chain(*project_data['include_files'].values())))\n\n self._get_libs(project_data)\n self._parse_specific_options(project_data)\n\n pro_def = ProGenDef()\n\n if pro_def.get_mcu_core(project_data['target'].lower()):\n project_data['core'] = pro_def.get_mcu_core(project_data['target'].lower())[0]\n else:\n raise RuntimeError(\n \"Target: %s not found, Please add the target to https://github.com/project-generator/project_generator_definitions\" % project_data['target'].lower())\n\n # gcc arm is funny about cortex-m4f.\n if project_data['core'] == 'cortex-m4f':\n project_data['core'] = 'cortex-m4'\n\n # change cortex-m0+ to cortex-m0plus\n if project_data['core'] == 'cortex-m0+':\n project_data['core'] = 'cortex-m0plus'\n\n def build_project(self, **kwargs):\n # cwd: relpath(join(project_path, (\"gcc_arm\" + project)))\n # > make all\n path = dirname(self.workspace['files']['makefile'])\n self.logging.debug(\"Building make project: %s\" % path)\n\n args = ['make']\n try:\n args += ['-j', str(kwargs['jobs'])]\n except KeyError:\n pass\n if 'verbose' in kwargs:\n args += [\"VERBOSE=%d\" % (1 if kwargs['verbose'] else 0)]\n args += ['all']\n self.logging.debug(args)\n\n try:\n ret_code = None\n ret_code = subprocess.call(args, cwd=path)\n except:\n self.logging.error(\"Project: %s build error whilst calling make. Is it in your PATH?\" % self.workspace['files']['makefile'])\n return -1\n else:\n if ret_code != self.SUCCESSVALUE:\n # Seems like something went wrong.\n if ret_code < 3:\n self.logging.error(\"Project: %s build failed with the status: %s\" %\n (self.ERRORLEVEL[ret_code], self.workspace['files']['makefile']))\n else:\n self.logging.error(\"Project: %s build failed with unknown error. Returned: %s\" %\n (ret_code, self.workspace['files']['makefile']))\n return -1\n else:\n name = os.path.basename(self.workspace['path'])\n self.logging.info(\"Built %s with the status: %s\" %\n (name, self.ERRORLEVEL[ret_code]))\n return 0\n","repo_name":"project-generator/project_generator","sub_path":"project_generator/tools/makefile.py","file_name":"makefile.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"61"} +{"seq_id":"44448472136","text":"from src.data_store import data_store\nfrom src.error import InputError, AccessError\nfrom src.tokens import check_token_valid, decode_jwt, get_userid_from_token\nfrom src.helper_functions import check_user\n\n\ndef search_v1(token, query_str):\n \n info = data_store.get()\n \n # checking query_str length \n if len(query_str) < 1 or len(query_str) > 1000:\n raise InputError(\"The given string doesn't have a valid length (1 - 1000 chars).\")\n \n found_messages = []\n \n for message_data in info['messages']:\n if str(query_str) in str(message_data['message']):\n found_messages.append(message_data)\n \n \n return {\n 'messages': found_messages\n }\n \n","repo_name":"JJaytra/Project-Backend","sub_path":"src/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21629976864","text":"class Product:\n def __init__(self, title, in_stock):\n self.title = title\n self.in_stock = in_stock\n\n\n def __eq__(self, other):\n title_match = False\n stock_match = False\n\n if isinstance(other, Product):\n if self.title == other.title:\n title_match = True\n if self.in_stock == other.in_stock:\n stock_match = True\n\n return title_match, stock_match\n","repo_name":"parkerjstewart/FarmSteadWebScraper","sub_path":"Product.py","file_name":"Product.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37659374761","text":"#Kushendra Ramrup\ns= [1,2,3,4,5,6,7,8]\n\ndef filterodd(l):\n l2 = [] \n for i in l: \n if i % 2 == 1: \n l2.append(i) \n return l2 \nprint(filterodd(s))\n\ndef mapsquare(l):\n l2 = []\n for i in l:\n i = (i*i)\n l2.append(i)\n return l2\nprint(mapsquare(s))","repo_name":"Kushendra1/csci127-assignments","sub_path":"hw_05/hw_05.py","file_name":"hw_05.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71019250436","text":"import sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.readline\n\"\"\"\n메모리 초과\n- N의 값이 100,000으로 2차원 배열을 사용하게 되면 메모리가 클 수 밖에 없음, \n값이 많이 들어가지도 않는데 초기 부터 큰 값을 넣어서 진행하면 메모리가 남아돔\n--> 입접행렬 보다는 연결리스트로 충분히 구현 가능\n\"\"\"\nN = int(input())\nvisit = [0] * (N+1)\nmatrix = {} \nfor i in range(N):\n matrix[i+1] = set()\nfor i in range(N-1):\n a, b = map(int, input().split(\" \"))\n matrix[a].add(b)\n matrix[b].add(a)\n \ndef dfs(V):\n #방문할 정점\n for i in matrix[V]: #연결되어 있고\n if visit[i] == 0: # 방문하지 않았으면\n visit[i] = V #부모노드 입력\n dfs(i)\n\ndfs(1)\n\nfor i in range(2, len(visit)):\n print(visit[i])\n","repo_name":"algojunior/sunjungAn","sub_path":"DFS,BFS/11725_silver.py","file_name":"11725_silver.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28769712995","text":"import re\n\n\ndef read_input():\n with open(\"input.txt\", \"r\") as f:\n return f.readlines()\n\n\ndef parse_input(input):\n parsed_input = [line.rstrip('\\n') for line in input]\n\n return parsed_input\n\n\ndef get_total_disk_space(input):\n total_disk_space = 0\n folder_sizes = []\n\n for line in input:\n if '$ ls' in line or 'dir' in line:\n continue\n elif '$ cd ..' in line:\n if folder_sizes[-1] <= 100000:\n total_disk_space += folder_sizes[-1]\n folder_sizes[-2] += folder_sizes[-1]\n folder_sizes.pop()\n elif '$ cd ' in line:\n folder_sizes.append(0)\n else:\n folder_sizes[-1] += int(line.split(' ')[0])\n\n return total_disk_space\n\n\ndef main():\n input = parse_input(read_input())\n print(get_total_disk_space(input))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mattiasahle/aoc22","sub_path":"07/day71.py","file_name":"day71.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28097242579","text":"import sys\nsys.setrecursionlimit(100000)\n\nclass Solution:\n # @param A : string\n # @param B : list of strings\n # @return an integer\n\n def __init__(self):\n self.hash_map = dict()\n\n def solve_word_break_dp(self, string, word_dict):\n n = len(string)\n dp = [False] * (n+1)\n dp[0] = True\n\n for i in range(1, n+1):\n for j in range(i):\n if dp[j] and string[j:i] in self.hash_map:\n dp[i] = True\n else:\n pass\n\n return dp[-1]\n\n\n def solve_word_break(self, string, start):\n print(f'string: {string} start: {start}')\n if start >= len(string):\n return True\n\n for end in range(start+1, len(string)+1):\n if string[start:end] in self.hash_map and self.solve_word_break(string, end):\n return True\n return False\n\n def wordBreak(self, A, B):\n for word in B:\n if word not in self.hash_map:\n self.hash_map[word] = 1\n print(self.hash_map)\n ans = self.solve_word_break(A, 0)\n print(f'recursive ans is {ans}')\n ans = self.solve_word_break_dp(A, B)\n print(f'iterative ans is {ans}')\n\n\nif __name__ == '__main__':\n A = \"leetleetleethash\"\n B = [\"leet\", \"code\", \"hash\"]\n obj = Solution()\n obj.wordBreak(A, B)","repo_name":"navkant/ds_algo_practice","sub_path":"scaler/dp2/dp5/word_break.py","file_name":"word_break.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34693246316","text":"import flask\nfrom flask.json import jsonify\nfrom flask_cors import CORS\nimport json\nimport os\nfrom model import *\n\napp = flask.Flask(__name__)\ncors = CORS(app)\n\n# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT\n# When running this app on the local machine, default the port to 8000\nport = int(os.getenv('PORT', 8000))\n\n@app.route('/')\ndef root():\n return jsonify([{\"message\":\"Hello World from IBM Cloud!!!\"}])\n\n@app.route(\"/simulation\", methods=[\"POST\"])\ndef create():\n global c \n c = City()\n return {\n 'statusCode': 201,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\n },\n 'body': json.dumps('Hello from Lambda!')\n }\n\n@app.route(\"/step\", methods=[\"GET\"])\ndef queryState():\n # response = jsonify{\"data\": []}\n steps = []\n # Return a 15 steps batch\n for _ in range(0, 60):\n steps.append(c.step())\n return {\n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Allow-Origin': '*',\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'\n },\n 'body': json.dumps({'data': (steps)})\n }\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=port, debug=True)\n","repo_name":"CarLosVegga/BACK-city","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13348147434","text":"from builtins import range\nimport sys, os\nsys.path.insert(1, os.path.join(\"..\",\"..\",\"..\"))\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.deeplearning import H2ODeepLearningEstimator\n\ndef missing():\n # Connect to a pre-existing cluster\n\n\n missing_ratios = [0, 0.1, 0.25, 0.5, 0.75, 0.99]\n errors = [0, 0, 0, 0, 0, 0]\n\n for i in range(len(missing_ratios)):\n data = h2o.upload_file(pyunit_utils.locate(\"smalldata/junit/weather.csv\"))\n data[15] = data[15].asfactor() #ChangeTempDir\n data[16] = data[16].asfactor() #ChangeTempMag\n data[17] = data[17].asfactor() #ChangeWindDirect\n data[18] = data[18].asfactor() #MaxWindPeriod\n data[19] = data[19].asfactor() #RainToday\n data[21] = data[21].asfactor() #PressureChange\n data[23] = data[23].asfactor() #RainTomorrow\n\n print(\"For missing {0}%\".format(missing_ratios[i]*100))\n\n # add missing values to the data section of the file (leave the response alone)\n if missing_ratios[i] > 0:\n resp = data[23]\n pred = data[:,list(range(23))+list(range(24,data.ncol))]\n data_missing = pred.insert_missing_values(fraction=missing_ratios[i])\n data_fin = data_missing.cbind(resp)\n else:\n data_fin = data\n\n # split into train + test datasets\n ratio = data_fin[0].runif()\n train = data_fin[ratio <= .75]\n test = data_fin[ratio > .75]\n\n\n hh = H2ODeepLearningEstimator(epochs=5, reproducible=True, seed=12345,\n activation='RectifierWithDropout', l1=1e-5,\n input_dropout_ratio=0.2)\n hh.train(x=list(range(2,22)),y=23, training_frame=train, validation_frame=test)\n errors[i] = hh.error()[0][1]\n\n for i in range(len(missing_ratios)):\n print(\"missing ratio: {0}% --> classification error: {1}\".format(missing_ratios[i]*100, errors[i]))\n\n assert sum(errors) < 2.2, \"Sum of classification errors is too large!\"\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(missing)\nelse:\n missing()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/deeplearning/pyunit_missing_deeplearning.py","file_name":"pyunit_missing_deeplearning.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"70672708355","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import ListView\nfrom sprinklers.models import Circuit, detect_current_state\nfrom sensor_data.views import CellarCurrent, OutsideCurrent, CellarSummary, \\\n OutsideSummary, CellarDetail, OutsideDetail\n\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\n# Make sure we have current state from the sprinklers\ndetect_current_state()\n\nurlpatterns = patterns(\n '',\n url(r'^$', 'sprinklers.views.summary', name='overview'),\n\n url(r'^circuits/$',\n ListView.as_view(\n queryset=Circuit.objects.order_by('label'),\n context_object_name='circuits', # Default would be poll_list\n template_name='sprinklers/index.html'),\n name='circuits'),\n url(r'^circuits/(?P\\d+)/$',\n 'sprinklers.views.detail', name='circuit_details'),\n url(r'^circuits/(?P\\d+)/update/$', 'sprinklers.views.update'),\n\n url(r'^outside$', OutsideCurrent.as_view(), name='outside_current'),\n url(r'^outside/detail/'\n '(?P\\d+)$',\n OutsideDetail.as_view(), name='outside_detail'),\n url(r'^outside/summary/'\n '(?P\\d+)$',\n OutsideSummary.as_view(), name='outside_summary'),\n url(r'^outside/ytd/$',\n OutsideSummary.as_view(), name='outside_ytd'),\n\n url(r'^cellar$', CellarCurrent.as_view(), name='cellar_current'),\n url(r'^cellar/detail/'\n '(?P\\d+)$',\n CellarDetail.as_view(), name='cellar_detail'),\n url(r'^cellar/summary/'\n '(?P\\d+)$',\n CellarSummary.as_view(), name='cellar_summary'),\n url(r'^cellar/ytd/$',\n CellarSummary.as_view(), name='cellar_ytd'),\n\n url(r'^rain$', 'sensor_data.views.rain_data', name='rain_summary'),\n url(r'^rain/season/$', 'sensor_data.views.rain_data', name='rain_season'),\n url(r'^rain/'\n '(?P\\d+)$',\n 'sensor_data.views.rain_data', name='rain_details'),\n url(r'^rain/ytd/$',\n 'sensor_data.views.rain_ytd', name='rain_ytd'),\n)\nurlpatterns += patterns(\n '',\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"dhiltgen/home_automation","sub_path":"home_automation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33764098797","text":"\nwhile(True):\n lol = int(input('What is your age bro'))\n\n if(lol < 3):\n print('Woah you get in for free')\n\n elif(lol <= 12 and lol >= 3):\n print('Ticket costs 10 quid mate')\n\n elif(lol > 12):\n print('Thatll be 15 quid my guy')\n\n else:\n print('Enter a number dumbass')\n\n\n","repo_name":"DanLeech1/ComputerScienceWorkshop","sub_path":"Movietickets.py","file_name":"Movietickets.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8603692213","text":"def pr(text, profile):\n p = 1\n for i in range(len(text)):\n p *= profile[text[i]][i]\n return p\n \ndef profileMostProbableKmer(text, k, profile):\n mostP = -1\n mostK = \"\"\n for i in range(len(text) - k + 1):\n newP = pr(text[i:i + k], profile)\n if newP > mostP:\n mostP = newP\n mostK = text[i:i + k]\n return mostK\n","repo_name":"fokamo/Coursera-Bioinformatics-Functions","sub_path":"profileMostProbableKmer.py","file_name":"profileMostProbableKmer.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2155990860","text":"import pandas as pd\nfrom get_user_media import *\nimport os,json\nfrom func import getCredentials\nfrom hashtag import *\nfrom helper.access_token import AccessToken\nimport datetime\n\n\n\n\ndef AccessInstagramAPI():\n\tparams = getCredentials() \n\tparams['debug'] = 'yes' \n\tresponse = AccessToken( params ) \n\n\ndef AccessYourAccount():\n\t#GETTING USER POSTS AND COMMENTS\n\n\tparams=getCredentials()\n\tparams['debug'] = 'no'\n\tresponse = getUserMedia(params)\n\n\n\tresponse_json=dict()\n\ti=0\n\tresponse_json['id'] = dict()\n\tresponse_json['permalink'] = dict()\n\tresponse_json['caption'] = dict()\n\tresponse_json['media_type'] = dict()\n\tresponse_json['timestamp'] = dict()\n\tresponse_json['comments'] = dict()\n\tresponse_json['like_count'] = dict()\n\n\n\t#response_json['comments']=dict()\n\n\tfor post in response['json_data']['data'] :\n\t\tprint (\"\\n\\n---------- POST ----------\\n\") \n\t\tprint (\"\\nPost_id:\")\n\t\tprint (post['id'])\n\n\t\tresponse_json['id'][i] = post['id']\n\n\t\tprint (\"\\nLink to post:\") \n\t\tprint (post['permalink'])\n\t\tresponse_json['permalink'][i] = post['permalink']\n\n\t\tprint (\"\\nPost caption:\") \n\t\tprint (post['caption']) \n\t\tresponse_json['caption'][i] = post['caption']\n\n\t\tprint (\"\\nMedia type:\") \n\t\tprint (post['media_type'])\n\t\tresponse_json['media_type'][i] = post['media_type']\n\n\t\tprint (\"\\nPosted at:\") \n\t\tprint (post['timestamp']) \n\t\tresponse_json['timestamp'][i] = post['timestamp']\n\t\t\n\n\t\tif('comments' in post):\n\t\t\tj=0\n\t\t\tresponse_json['comments'][i]=dict()\n\t\t\tfor com in post['comments']['data']:\n\t\t\t\t\tresponse_json['comments'][i][j] = com\n\t\t\t\t\tj+=1\n\t\telse:\n\t\t\tresponse_json['comments'][i]=\"null\"\n\t\tprint (post['like_count'])\n\t\tresponse_json['like_count'][i] = post['like_count']\n\t\ti=i+1\n\t\t\n\n\n\tos.chdir('/home/devaki/Desktop/insta')\n\treq = json.dumps(response_json)\n\tdf=pd.read_json(req)\n\tdf.to_csv('Insta_posts.csv',index=False)\n\t\n\n\n\ndef getPopularMedia():\n\n\tstr_hashtag=input('Enter hashtag:#')\n\t\n\tparams=getCredentials()\n\tparams['hashtag_name'] = str_hashtag \n\thashtagInfoResponse = getHashtagInfo( params ) \n\tparams['hashtag_id'] = hashtagInfoResponse['json_data']['data'][0]['id']; \n\tparams['hashtag_id'] = hashtagInfoResponse['json_data']['data'][0]['id'];\n\n\tfilename=str_hashtag+'_top'+'.csv'\n\t\n\n\tprint (\"\\n\\n\\n\\t-------------------HASHTAG INFO -----------------------\\n\")\n\tprint (\"\\nHashtag: \" + str_hashtag) \n\tprint (\"Hashtag ID: \" + params['hashtag_id']) \n\n\tprint (\"\\n\\n\\n\\t\\t\\t ------------------- HASHTAG TOP MEDIA --------------------\\n\") \n\tparams['type'] = 'top_media' \n\thashtagPopularMediaResponse = getHashtagMedia( params ) \n\t\n\tprint(hashtagPopularMediaResponse)\n\n\n\thashtag_response=dict()\n\thashtag_response['id']=dict()\n\thashtag_response['permalink']=dict()\n\thashtag_response['caption']=dict()\n\thashtag_response['media_type']=dict()\n\thashtag_response['like_count']=dict()\n\thashtag_response['comments_count']=dict()\n\t#hashtag_response['comments']=dict()\n\n\ti=0\n\n\tfor post in hashtagPopularMediaResponse['json_data']['data'] :\n\n\t\t\n\t\tprint (\"\\n\\n---------- POST ----------\\n\") \n\t\tprint (\"Postid:\")\n\t\tprint (post['id'])\n\n\t\thashtag_response['id'][i]=post['id']\n\t\t\t\n\t\tprint (\"Link to post:\") \n\t\tprint (post['permalink']) \n\t\thashtag_response['permalink'][i]=post['permalink']\n\t\t\n\n\t\tprint (\"\\nPost caption:\") \n\t\tprint (post['caption']) \n\t\thashtag_response['caption'][i]=post['caption']\n\t\t\n\n\t\tprint (\"\\nMedia type:\") \n\t\tprint (post['media_type']) \n\t\thashtag_response['media_type'][i]=post['media_type']\n\n\t\tprint (\"\\nlike_count:\") \n\t\tprint (post['like_count']) \n\t\thashtag_response['like_count'][i]=post['like_count']\n\n\t\tprint (\"\\nComments:\") \n\t\tprint (post['comments_count']) \n\t\thashtag_response['comments_count'][i]=post['comments_count']\n\t\t\n\t\ti+=1\n\n\treq = json.dumps(hashtag_response)\n\tdf=pd.read_json(req)\n\tdf.to_csv(filename,index=False)\n\n\n\ndef getRecentMedia():\n\n\tstr_hashtag=input('Enter hashtag:#')\n\t\n\tparams=getCredentials()\n\tparams['hashtag_name'] = str_hashtag \n\thashtagInfoResponse = getHashtagInfo( params ) \n\tparams['hashtag_id'] = hashtagInfoResponse['json_data']['data'][0]['id']; \n\tparams['hashtag_id'] = hashtagInfoResponse['json_data']['data'][0]['id'];\n\n\tfilename=str_hashtag+'_recent'+'.csv'\n\t\n\n\tprint (\"\\n\\n\\n\\t-------------------HASHTAG INFO -----------------------\\n\")\n\tprint (\"\\nHashtag: \" + str_hashtag) \n\tprint (\"Hashtag ID: \" + params['hashtag_id']) \n\n\tprint (\"\\n\\n\\n\\t\\t\\t ------------------- HASHTAG RECENT MEDIA --------------------\\n\") \n\tparams['type'] = 'recent_media' \n\thashtagRecentMediaResponse = getHashtagMedia( params ) \n\t\n\thashtag_response=dict()\n\thashtag_response['id']=dict()\n\thashtag_response['permalink']=dict()\n\thashtag_response['caption']=dict()\n\thashtag_response['media_type']=dict()\n\thashtag_response['like_count']=dict()\n\thashtag_response['comments_count']=dict()\n\t#hashtag_response['comments']=dict()\n\n\ti=0\n\n\tfor post in hashtagRecentMediaResponse['json_data']['data'] :\n\n\t\t\n\t\tprint (\"\\n\\n---------- POST ----------\\n\") \n\t\tprint (\"Postid:\")\n\t\tprint (post['id'])\n\n\t\thashtag_response['id'][i]=post['id']\n\t\t\t\n\t\tprint (\"Link to post:\") \n\t\tprint (post['permalink']) \n\t\thashtag_response['permalink'][i]=post['permalink']\n\t\t\n\n\t\tprint (\"\\nPost caption:\") \n\t\tprint (post['caption']) \n\t\thashtag_response['caption'][i]=post['caption']\n\t\t\n\n\t\tprint (\"\\nMedia type:\") \n\t\tprint (post['media_type']) \n\t\thashtag_response['media_type'][i]=post['media_type']\n\n\t\tprint (\"\\nlike_count:\") \n\t\tprint (post['like_count']) \n\t\thashtag_response['like_count'][i]=post['like_count']\n\n\t\tprint (\"\\nComments:\") \n\t\tprint (post['comments_count']) \n\t\thashtag_response['comments_count'][i]=post['comments_count']\n\t\t\n\t\ti+=1\n\n\treq = json.dumps(hashtag_response)\n\tdf=pd.read_json(req)\n\tdf.to_csv(filename,index=False)\n\n\n\nprint ('\\n-----------------------------------------MENU--------------------------------------------')\nprint ('\\n1.Access Instagram account\\n2.Account contents\\n3.Hashtag search(PopularMedia)\\n4.Hashtag Search(RecentMedia)')\n\nchoice=int(input('Enter your choice::'))\n\n\n\nif(choice==1):\n\tAccessInstagramAPI()\nelif(choice==2):\n\tAccessYourAccount()\nelif(choice==3):\n\tgetPopularMedia()\nelif(choice==4):\n\tgetRecentMedia()\nelse:\n\tprint('Invalid option')\n\n\nprint('\\n----------------------------------------------------------------------------------------------------')\n\n\n","repo_name":"devaki9/NLP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6119778559","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom pyauto import visualization_helper as plt_helper\nfrom sqlalchemy import create_engine\n\nfrom functions.visual_helper import hist_ecdf\n\nsns.set()\n\n# connection to btc_usdbase\nengine = create_engine('postgresql://postgres:postgres@localhost:5432/bitcoin_test')\n\ncon = engine.connect()\n\n# get btc_usd_by_day_agg\nrs = con.execute('SELECT * FROM btc_usd_by_day_agg')\nbtc_usd = pd.DataFrame(rs.fetchall())\nbtc_usd.columns = [name.replace('weighted_price', 'value') for name in rs.keys()]\n\n# date aggregations\nbtc_usd['weekday'] = btc_usd['date_'].dt.weekday\nbtc_usd['day_of_month'] = btc_usd['date_'].dt.day\nbtc_usd['month'] = btc_usd['date_'].dt.month\nbtc_usd['year'] = btc_usd['date_'].dt.year\nbtc_usd['weekend'] = btc_usd['date_'].dt.weekday > 4\nbtc_usd['month_area'] = pd.cut(btc_usd['day_of_month'], bins=4, labels=[0.25, 0.5, 0.75, 1])\nbtc_usd['year_area'] = pd.cut(btc_usd['month'], bins=4, labels=[0.25, 0.5, 0.75, 1])\n\n# sort by date_ and assign date_ as index\nbtc_usd.set_index('date_', inplace=True)\nbtc_usd.sort_index(inplace=True)\n\n# prepare target variables\nbtc_usd.dropna(inplace=True)\n\n# get nasdaq_agg\nrs = con.execute('SELECT * FROM nasdaq_agg')\nnasdaq = pd.DataFrame(rs.fetchall())\nnasdaq.columns = [name.replace('price', 'value') for name in rs.keys()]\nnasdaq = nasdaq.add_prefix('n_')\nnasdaq.rename({'n_date_':'date_'}, inplace=True, axis='columns')\n\n\n# get btc_usd_by_day_agg\nrs = con.execute('SELECT * FROM sp500_agg')\nsp500 = pd.DataFrame(rs.fetchall())\nsp500.columns = [name.replace('price', 'value') for name in rs.keys()]\nsp500 = sp500.add_prefix('s_')\nsp500.rename({'s_date_':'date_'}, inplace=True, axis='columns')\n\n\n# get btc_usd_by_day_agg\nrs = con.execute('SELECT * FROM btc_hacks_clean_resample')\nbtc_hacks = pd.DataFrame(rs.fetchall())\nbtc_hacks.columns = rs.keys()\n\nbtc_usd = btc_usd.merge(nasdaq, how='left', on='date_', suffixes=('', ''))\nbtc_usd = btc_usd.merge(sp500, how='left', on='date_', suffixes=('', ''))\nbtc_usd = btc_usd.merge(btc_hacks, how='left', on='date_', suffixes=('', ''))\n\n# columns for values and related to date\nmeasures = ['perc_close_open', 'perc_high_low', 'perc_value', 'volume_btc', 'value', 'days_without_hacks']\naggregations = ['weekday', 'month_area', 'year_area', 'year', 'weekend']\n\n# plot type\ndraw_plots = False\ndraw_pair = False\ndraw_join = False\ndraw_ecdf = True\ndraw_box = False\ndraw_heatmap = True\nthreshold_heat = 0.3\ndraw_swarm = False\n\n# -----------------Plots----------------------#\nfig, ax1 = plt.subplots()\nbtc_usd.set_index('date_', inplace=True)\nbtc_usd_m = btc_usd.resample('W').mean()\n\ndata2 = btc_usd_m['volume_btc']\ny_label2 = 'volume_btc'\n\ndata1 = btc_usd['loss_usd']\ny_label1 = 'hacks'\n\ncolor = 'tab:red'\nax1.set_ylabel(y_label1, color=color)\nax1.plot(data1, 'rd')\nax1.tick_params(axis='y', labelcolor=color)\n\nplt.yscale('log')\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\ncolor = 'tab:blue'\nax2.set_ylabel(y_label2, color=color) # we already handled the x-label with ax1\nax2.plot(data2, color=color)\nax2.tick_params(axis='y', labelcolor=color)\n\nplt.yscale('log')\n\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.savefig('images/volume_btc_hacks.png')\n\n# --------------------------------------------#\n\n# plt.figure()\n# btc_usd_m[['volume_currency', 'n_volume_currency']].plot()\n# plt.savefig('images/volumes.png')\n\n# -----------------Pairplot-------------------#\nif draw_pair:\n plt.figure()\n sns.pairplot(btc_usd[measures], hue='change_volume_bool', diag_kind='kde', height=2)\n sns.pairplot(btc_usd[aggregations], hue='change_volume_bool', diag_kind='kde', height=2)\n\nplt.close()\n# --------------------------------------------#\n\n# -----------------Joinplots------------------#\nif draw_join:\n plt.figure()\n sns.set_style('white')\n ax = sns.jointplot(btc_usd['change_volume'], btc_usd['weekday'], xlim=[-0.06, 0.06])\n ax.ax_joint.set_xlabel('BTC Volume change')\n ax.ax_joint.set_ylabel('Week days')\n ax.ax_joint.set_yticks([0, 1, 2, 3, 4, 5, 6])\n ax.ax_joint.set_yticklabels(labels=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])\n ax.fig.suptitle('BTC change vs Weekdays')\n\nplt.close()\n# --------------------------------------------#\n\n# ------------------ECDF----------------------#\nif draw_ecdf:\n # compute ECDF and histogram of perc_value\n for name in btc_usd.columns:\n # no need to plot binary features\n if len(btc_usd[name].unique()) > 2:\n hist_ecdf(btc_usd[name], name=name, ecdf_theor=False)\n\n # hist_ecdf(btc_usd['perc_value'], name='perc_value', ecdf_theor=True)\n hist_ecdf(btc_usd['value'], name='value', ecdf_theor=False, xlog_scale=False)\n\nplt.close()\n# --------------------------------------------#\n\n# ------------------Box plots-----------------#\nif draw_box:\n for measure in measures:\n for aggregation in aggregations:\n plt.figure()\n sns.boxplot(x=aggregation, y=measure, data=btc_usd, showfliers=False, whis=2.5)\n plt.title('Boxplot of ' + measure + ' vs ' + aggregation)\n plt.savefig('images/' + measure + '_vs_' + aggregation + '_boxplot.png')\n\n plt.figure()\n sns.boxplot(x=aggregation, y=measure, data=btc_usd, showfliers=True)\n plt.title('Boxplot of ' + measure + ' vs ' + aggregation + ' with outliers')\n plt.savefig('images/' + measure + '_vs_' + aggregation + '_boxplot_outliers.png')\n\nplt.close()\n# --------------------------------------------#\n\n# ------------------Heatmap corr--------------#\nif draw_heatmap:\n # calculate the correlation matrix\n corr = btc_usd.corr().round(decimals=2)\n\n # plot the heatmap\n plt.figure(figsize=(15, 10))\n sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, annot=True, cbar=False)\n plt_helper.rotate_axis(90, xy_axis='x', bottom_pad=0.2)\n plt.title('Correlation between variables')\n plt.savefig('images/correlation.png')\n\n if threshold_heat is not None:\n corr_threshold = corr[(corr >= threshold_heat) | (corr <= - threshold_heat)]\n\n plt.figure(figsize=(15, 10))\n sns.heatmap(corr_threshold, xticklabels=corr_threshold.columns, yticklabels=corr_threshold.columns, annot=True,\n cbar=False)\n plt_helper.rotate_axis(90, xy_axis='x', bottom_pad=0.2)\n plt.title('Correlation between variables')\n plt.savefig('images/correlation_threshold_{}.png'.format(threshold_heat))\n\nplt.close()\n# --------------------------------------------#\n\n# ------------------Swarnplots----------------#\nif draw_swarm:\n for measure in measures:\n for aggregation in aggregations:\n plt.figure()\n sns.swarmplot(x=aggregation, y=measure, data=btc_usd, size=2)\n plt.title('Swarmplot of ' + measure + ' vs ' + aggregation)\n plt.savefig('images/' + measure + '_vs_' + aggregation + '_swarm.png')\n\nplt.close()\n# --------------------------------------------#\n\n# plt.show()\nprint(btc_usd.head())\n","repo_name":"SamuelNLP/bitcoin-test","sub_path":"btc_usd_eda.py","file_name":"btc_usd_eda.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10168583505","text":"from django.conf import settings\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMultiAlternatives\nfrom backend_app import twitter_api\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef send_email_message(subject=\"\", body={}, template=\"vuln\", recipients=[]):\n tmpl_txt = \"{}.txt\".format(template)\n tmpl_html = \"{}.html\".format(template)\n msg_text = render_to_string(tmpl_txt, {'data': body})\n msg_html = render_to_string(tmpl_html, {'data': body})\n\n if subject in [\"\", None]:\n subject = '[PatrowlFeeds-Alert] ' + subject\n from_email = settings.EMAIL_HOST_USER\n try:\n msg = EmailMultiAlternatives(subject, msg_text, from_email, recipients)\n msg.attach_alternative(msg_html, \"text/html\")\n msg.send()\n except Exception as e:\n logger.error('Unable to send Email message:', e)\n return False\n return True\n\n\n# https://python-twitter.readthedocs.io/en/latest/twitter.html#twitter.api.Api.PostUpdate\ndef send_tweet_message(message=\"\"):\n # TWEET_CHARACTER_LIMIT = 280\n twitter_api.PostUpdate(status=message)\n logger.info('Tweet posted:', message)\n","repo_name":"Patrowl/PatrowlHears","sub_path":"backend_app/alerts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"61"} +{"seq_id":"9289817057","text":"\"\"\"\nRegions of Interest \n+++++++++++++++++++\n\n.. autosummary::\n\n ~RectROI\n ~LineROI\n ~PlaneROI\n\n\"\"\"\n\nimport math\nimport numpy as np\nfrom skimage.draw import line_nd\n\nclass RectROI:\n \"\"\"\n A rectangular region of interest that can be applied to 3D datasets.\n \n Users can define the coordinate bounds for the region, define a\n calculation to be carried out on the selected region, and then apply the ROI\n to multple datasets. This tool is scriptable, and the region bounds/calculation \n can be modified at any point.\n\n ATTRIBUTES\n\n bounds\n *dict* :\n Coordinate bounds for the region of interest.\n\n calculation\n *dict* :\n Calculation to be applied. This includes the calculation type\n (average, max) and dimesions to calculate along.\n\n output\n *dict* :\n Dataset and coordinates associated with applied calculation.\n \"\"\"\n\n bounds = None\n calculation = None\n output = None\n \n def __init__(self, dims: list=None) -> None:\n\n if dims is None:\n self.bounds = {\n \"x\": (None, None),\n \"y\": (None, None),\n \"z\": (None, None)\n }\n else:\n if len(dims) != 3:\n raise ValueError(\"Invalid dims provided.\")\n self.bounds = dict((dim, (None, None)) for dim in dims)\n \n self.calculation = {\n \"output_data\": None,\n \"dims\": None\n }\n\n self.output = {\n \"data\": None,\n \"coords\": None\n }\n \n def set_bounds(self, bounds: dict) -> None:\n \"\"\"\n Sets coordinate bounds for the RectROI.\n \n PARAMETERS\n\n bounds\n *dict* :\n Coordinate bounds for the region of interest.\n \"\"\"\n \n if type(bounds) != dict:\n raise ValueError(\"Invalid bounds provided.\")\n if len(list(bounds.keys())) != 3:\n raise ValueError(\"Invalid bounds provided.\")\n for dim in list(bounds.keys()):\n dim_bounds = bounds[dim]\n if type(dim_bounds) is None:\n bounds[dim] == (None, None)\n if type(dim_bounds) != list and type(dim_bounds) != tuple:\n raise ValueError(\"Invalid bounds provided.\")\n \n if len(dim_bounds) != 2:\n raise ValueError(\"Invalid bounds provided.\")\n if None not in bounds[dim] and dim_bounds[1] < dim_bounds[0]:\n raise ValueError(\"Invalid bounds provided.\")\n\n if set(list(bounds.keys())) == set(list(self.bounds.keys())):\n self.bounds = {dim: bounds[dim] for dim in list(self.bounds.keys())}\n else:\n self.bounds = {dim: bounds[dim] for dim in list(bounds.keys())}\n\n def set_calculation(self, output: str, dims: list) -> None:\n \"\"\"\n Sets the output calculation and the dimensions to calculate on.\n \n PARAMETERS\n\n output\n *str* :\n Output type. Either \"average\" or \"max\" accepted.\n\n dims\n *list* :\n Dimensions to calculate on.\n \"\"\"\n\n if dims is not None:\n if not set(list(self.bounds.keys())).issuperset(set(dims)):\n raise ValueError(\"Invalid dimension list provided.\")\n \n if output not in [\"average\", \"max\"]:\n raise ValueError(\"Invalid output type provided. Accepted values are 'average' and 'max'.\")\n \n self.calculation = {\n \"output\": output,\n \"dims\": dims\n }\n \n def apply(self, data, coords) -> None:\n \"\"\"\n Carries out an ROI's selected calculation on a dataset and its respective coordinate system.\n \"\"\"\n\n output_dims = self.calculation[\"dims\"]\n output_type = self.calculation[\"output\"]\n \n if output_dims is None:\n output_dims = []\n if output_type is None:\n raise ValueError(\"No output type found. Please add a output type using 'set_calculation'.\")\n\n coords = coords.copy()\n\n # Find bounding pixels for ROI\n roi_idx = []\n roi_coords = {}\n for dim in list(coords.keys()):\n bound_1, bound_2 = None, None\n dim_coords = coords[dim]\n dim_bounds = self.bounds[dim]\n\n if dim_bounds[0] is None or np.searchsorted(dim_coords, dim_bounds[0]) == 0:\n if dim_bounds[1] is None or np.searchsorted(dim_coords, dim_bounds[1]) == len(dim_coords):\n roi_idx.append(np.s_[:])\n roi_coords.update({dim: dim_coords[np.s_[:]]})\n else:\n bound_2 = np.searchsorted(dim_coords, dim_bounds[1])\n roi_idx.append(np.s_[:bound_2])\n roi_coords.update({dim: dim_coords[np.s_[:bound_2]]})\n else:\n bound_1 = np.searchsorted(dim_coords, dim_bounds[0])\n if dim_bounds[1] is None or np.searchsorted(dim_coords, dim_bounds[1]) == len(dim_coords):\n roi_idx.append(np.s_[bound_1:])\n roi_coords.update({dim: dim_coords[np.s_[bound_1:]]})\n else:\n bound_2 = np.searchsorted(dim_coords, dim_bounds[1])\n roi_idx.append(np.s_[bound_1:bound_2])\n roi_coords.update({dim: dim_coords[np.s_[bound_1:bound_2]]})\n roi_data = data[tuple(roi_idx)]\n\n # Run output calculation\n if output_type == \"average\":\n\n if len(output_dims) == 0:\n raise ValueError(\"Dimension to average on not provided.\")\n \n elif len(output_dims) == 1:\n avg_dim_idx = list(coords.keys()).index(output_dims[0])\n self.output[\"data\"] = np.mean(roi_data, axis=avg_dim_idx)\n\n del(roi_coords[output_dims[0]])\n self.output[\"coords\"] = roi_coords\n\n elif len(output_dims) == 2:\n avg_dim_idxs = [list(coords.keys()).index(dim) for dim in output_dims]\n self.output[\"data\"] = np.mean(roi_data, axis=tuple(avg_dim_idxs))\n\n del(roi_coords[output_dims[0]])\n del(roi_coords[output_dims[1]])\n self.output[\"coords\"] = roi_coords\n\n elif len(output_dims) == 3:\n self.output[\"data\"] = np.mean(roi_data, axis=(0, 1, 2))\n\n else:\n raise ValueError(\"Invalid dimension list.\")\n \n if output_type == \"max\":\n\n if len(output_dims) == 0:\n raise ValueError(\"Dimension to average on not provided.\")\n \n elif len(output_dims) == 1:\n avg_dim_idx = list(coords.keys()).index(output_dims[0])\n self.output[\"data\"] = np.amax(roi_data, axis=avg_dim_idx)\n\n del(roi_coords[output_dims[0]])\n self.output[\"coords\"] = roi_coords\n\n elif len(output_dims) == 2:\n avg_dim_idxs = [list(coords.keys()).index(dim) for dim in output_dims]\n self.output[\"data\"] = np.amax(roi_data, axis=tuple(avg_dim_idxs))\n\n del(roi_coords[output_dims[0]])\n del(roi_coords[output_dims[1]])\n self.output[\"coords\"] = roi_coords\n\n elif len(output_dims) == 3:\n self.output[\"data\"] = np.amax(roi_data, axis=(0, 1, 2))\n\n else:\n raise ValueError(\"Invalid dimension list.\")\n\n def apply_to_scan(self, scan, data_type) -> None:\n \n if data_type == \"raw\":\n data = scan.raw_data[\"data\"]\n coords = scan.raw_data[\"coords\"]\n elif data_type == \"gridded\":\n data = scan.gridded_data[\"data\"]\n coords = scan.gridded_data[\"coords\"]\n else:\n raise(\"Invalid data type provided.\")\n \n self.apply(data, coords)\n \n def get_output(self) -> dict:\n \"\"\"Returns the output from the most recent apply() run.\"\"\"\n \n return self.output\n\n\nclass LineROI:\n \"\"\"A line segment region of interest that can be applied to a 3D dataset.\"\"\"\n\n endpoints = None\n calculation = None\n output = None\n\n def __init__(self, dims: list=None) -> None:\n\n if dims is None:\n self.endpoints = {\n \"A\": {\n \"x\": None,\n \"y\": None,\n \"z\": None\n },\n \"B\": {\n \"x\": None,\n \"y\": None,\n \"z\": None\n } \n }\n else:\n if len(dims) != 3:\n raise ValueError(\"Invalid dims provided.\")\n self.endpoints = {\n \"A\": dict((dim, None) for dim in dims),\n \"B\": dict((dim, None) for dim in dims)\n }\n \n self.calculation = {\n \"output_data\": None,\n \"dims\": None,\n \"smoothing_radius\": 0,\n \"smoothing_shape\": \"cube\"\n }\n\n self.output = {\n \"data\": None,\n \"coords\": None\n }\n\n def set_endpoints(self, endpoint_A: dict, endpoint_B: dict) -> None:\n \"\"\"Sets the endpoint coordinates for the region.\"\"\"\n\n # Ensuring that the function parameters are valid dictionaries\n if type(endpoint_A) != dict or type(endpoint_B) != dict:\n raise ValueError(\"Invalid bounds provided.\")\n if len(list(endpoint_A.keys())) != 3 or len(list(endpoint_B.keys())) != 3:\n raise ValueError(\"Invalid bounds provided.\")\n if list(endpoint_A.keys()) != list(endpoint_B.keys()):\n raise ValueError(\"Invalid bounds provided.\")\n \n self.endpoints[\"A\"] = dict((dim, None) for dim in list(endpoint_A.keys()))\n self.endpoints[\"B\"] = dict((dim, None) for dim in list(endpoint_A.keys()))\n\n for dim in list(endpoint_A.keys()):\n dim_endpoint_A, dim_endpoint_B = endpoint_A[dim], endpoint_B[dim]\n\n if type(dim_endpoint_A) is None:\n self.endpoints[\"A\"][dim] == None\n\n if type(dim_endpoint_B) is None:\n self.endpoints[\"B\"][dim] == None\n\n self.endpoints[\"A\"][dim] = dim_endpoint_A\n self.endpoints[\"B\"][dim] = dim_endpoint_B\n\n def set_calculation(self, output: str, dims: list, smoothing_radius=0, smoothing_shape=\"cube\") -> None:\n \"\"\" Sets the calculation type for the region of interest.\n \n This is not necessarily a dataset-specific function -- the selected \n calculation can be applied to a series of datasets.\n \"\"\"\n\n if dims is not None:\n if not set(list(self.endpoints[\"A\"].keys())).issuperset(set(dims)):\n raise ValueError(\"Invalid dimension list provided.\")\n if not set(list(self.endpoints[\"B\"].keys())).issuperset(set(dims)):\n raise ValueError(\"Invalid dimension list provided.\")\n \n if output not in [\"values\", \"average\", \"max\"]:\n raise ValueError(\"Invalid output type provided. Accepted values are 'average' and 'max'.\")\n \n self.calculation = {\n \"output\": output,\n \"dims\": dims,\n \"smoothing_radius\": smoothing_radius,\n \"smoothing_shape\": smoothing_shape\n }\n\n def apply(self, data, coords) -> None:\n \"\"\"Applies the selected calculation to a dataset.\"\"\"\n\n output_type = self.calculation[\"output\"]\n\n if output_type == \"values\":\n output_data, output_coords = self._get_values(data=data, coords=coords)\n elif output_type == \"average\":\n output_data, output_coords = self._get_average(data=data, coords=coords)\n elif output_type == \"max\":\n output_data, output_coords = self._get_max(data=data, coords=coords)\n \n self.output[\"data\"] = output_data\n self.output[\"coords\"] = output_coords\n\n def apply_to_scan(self, scan, data_type) -> None:\n \"\"\"Applies the selected calculation to a scan dataset.\"\"\"\n\n if data_type == \"raw\":\n data = scan.raw_data[\"data\"]\n coords = scan.raw_data[\"coords\"]\n elif data_type == \"gridded\":\n data = scan.gridded_data[\"data\"]\n coords = scan.gridded_data[\"coords\"]\n else:\n raise(\"Invalid data type provided.\")\n \n self.apply(data, coords)\n\n def get_output(self) -> None:\n \"\"\"Returns the output dictionary.\"\"\"\n \n return self.output\n \n def _get_values(self, data, coords) -> tuple:\n \"\"\"Retreives dataset values from provided coordinate bounds.\"\"\"\n\n # Retreives the pixels that the ROI crosses through\n roi_pixels = self._get_pixels(data, coords)\n\n if self.calculation[\"smoothing_radius\"] == 0:\n output_data = self._get_data_from_pixels(pixels=roi_pixels, data=data)\n else:\n output_data = self._get_smoothed_data(pixels=roi_pixels, data=data)\n output_coords = self._get_output_coords_from_pixels(pixels=roi_pixels, coords=coords)\n\n return (output_data, output_coords)\n\n def _get_average(self, data, coords) -> tuple:\n \"\"\"Retreives the average dataset values from provided coordinate bounds.\"\"\"\n \n value_data, output_coords = self._get_values(data=data, coords=coords)\n \n output_dims = self.calculation[\"dims\"]\n dim_list = list(self.endpoints[\"A\"].keys())\n\n if output_dims is None or len(output_dims) == 0:\n output_data = np.mean(value_data)\n elif len(output_dims) == 1:\n output_data = np.mean(value_data, axis=dim_list.index(output_dims[0]))\n\n return (output_data, output_coords)\n \n def _get_max(self, data, coords) -> tuple:\n \"\"\"Retreives the max dataset values from provided coordinate bounds.\"\"\"\n \n value_data, output_coords = self._get_values(data=data, coords=coords)\n \n output_dims = self.calculation[\"dims\"]\n dim_list = list(self.endpoints[\"A\"].keys())\n\n if output_dims is None or len(output_dims) == 0:\n\n output_data = np.mean(value_data)\n\n elif len(output_dims) == 1:\n\n output_data = np.amax(value_data, axis=dim_list.index(output_dims[0]))\n\n return (output_data, output_coords)\n\n def _get_pixels(self, data: np.ndarray, coords: dict) -> list:\n \"\"\"Utilizes Bresenham's line algorithm to pull out pixels that the line ROI intersects.\"\"\"\n\n coords = coords.copy()\n\n # Defines endpoint pixel indicies\n endpoint_A_pixels = self._get_endpoint_pixel_indicies(coords=coords, endpoint=self.endpoints[\"A\"])\n endpoint_B_pixels = self._get_endpoint_pixel_indicies(coords=coords, endpoint=self.endpoints[\"B\"])\n \n # Bresenham line drawing step\n intersected_pixels = self._bresenham_3d(endpoint_A_pixels, endpoint_B_pixels)\n \n # Determines which pixels lie within the shape of the dataset\n valid_intersected_pixels = self._get_valid_pixels(pixels=intersected_pixels, data=data)\n \n return valid_intersected_pixels\n \n def _get_endpoint_pixel_indicies(self, coords: dict, endpoint: dict) -> list:\n \"\"\"Returns the pixel indicies that correspond with an endpoint.\"\"\"\n\n endpoint_pixel_idxs = [] # Will hold pixel indicies\n\n dim_list = list(coords.keys()) # Ordered list of dimension labels (e.g. [\"H\", \"K\", \"L\"])\n\n # Loops through all three dimensions\n for dim in dim_list:\n\n dim_coords = coords[dim] # Full coordinates for given dimension\n dim_endpoint_coord = endpoint[dim] # Coordinate of endpoint for given dimension\n dim_endpoint_pixel_idx = None # Will hold pixel index for given dimension\n\n # Denotes width of pixels for a given dimension\n pixel_size = (dim_coords[-1] - dim_coords[0]) / len(dim_coords)\n\n # Checks if endpoint was specified\n if dim_endpoint_coord is None:\n dim_endpoint_pixel_idx = 0\n else:\n dim_endpoint_pixel_idx = int((dim_endpoint_coord - dim_coords[0]) / pixel_size)\n\n endpoint_pixel_idxs.append(dim_endpoint_pixel_idx)\n\n return endpoint_pixel_idxs\n\n def _bresenham_3d(self, endpoint_1_pixel_idxs: list, endpoint_2_pixel_idxs: list) -> np.ndarray:\n \n return np.transpose(line_nd(endpoint_1_pixel_idxs, endpoint_2_pixel_idxs))\n \n def _get_valid_pixels(self, pixels: np.ndarray, data: np.ndarray) -> np.ndarray:\n\n valid_indices = np.all(\n (pixels >= 0) & (pixels < data.shape),\n axis=1\n )\n valid_pixels = pixels[valid_indices] \n\n\n return valid_pixels\n \n def _mask_pixels_for_validity(self, pixels: np.ndarray, data: np.ndarray) -> np.ndarray:\n \n mask = np.all((pixels >= 0) & (pixels < data.shape), axis=1)\n mask = np.column_stack((mask, mask, mask))\n \n masked_pixels = np.ma.array(pixels, mask=~mask)\n\n return masked_pixels\n \n def _get_data_from_pixels(self, pixels: np.ndarray, data: np.ndarray) -> np.ndarray:\n\n output_dims = self.calculation[\"dims\"]\n dim_list = list(self.endpoints[\"A\"].keys())\n\n if output_dims is None or len(output_dims) == 0:\n output_data = data[pixels[:, 0], pixels[:, 1], pixels[:, 2]]\n\n elif len(output_dims) == 1:\n if dim_list.index(output_dims[0]) == 0:\n output_data = data[:, pixels[:, 1], pixels[:, 2]]\n elif dim_list.index(output_dims[0]) == 1:\n output_data = data[pixels[:, 0], :, pixels[:, 2]]\n elif dim_list.index(output_dims[0]) == 2:\n output_data = data[pixels[:, 0], pixels[:, 1], :]\n else: \n raise ValueError(\"Invalid dimension list.\") \n \n else:\n raise ValueError(\"Invalid dimension list.\")\n \n return output_data\n\n def _get_smoothed_data(self, data, pixels) -> np.ndarray:\n smoothing_radius = self.calculation[\"smoothing_radius\"]\n smoothing_shape = self.calculation[\"smoothing_shape\"]\n\n if smoothing_radius > 10:\n raise ValueError(\"Too large of a smoothing radius\")\n \n smoothed_data = []\n\n offsets = np.arange(-smoothing_radius, smoothing_radius + 1)\n offsets_grid = np.meshgrid(offsets, offsets, offsets)\n offsets_array = np.stack(offsets_grid, axis=-1).reshape(-1, 3)\n\n if smoothing_shape == \"sphere\":\n offsets_array = self._get_spherical_smoothing_offsets(offsets_array, smoothing_radius)\n\n pixels_to_average = np.repeat(pixels, offsets_array.shape[0], axis=0) + np.tile(offsets_array, (pixels.shape[0], 1))\n pixels_to_average = np.reshape(pixels_to_average, (pixels.shape[0], -1, 3))\n \n for i, px in enumerate(pixels):\n valid_pixels = self._get_valid_pixels(pixels_to_average[i], data)\n smoothed_data_point = np.mean(data[valid_pixels[:, 0], valid_pixels[:, 1], valid_pixels[:, 2]])\n smoothed_data.append(smoothed_data_point)\n \n return np.array(np.array(smoothed_data))\n\n def _get_spherical_smoothing_offsets(self, offsets_array, smoothing_radius) -> np.ndarray:\n distances = np.linalg.norm(offsets_array, axis=1)\n valid_offsets = offsets_array[distances <= smoothing_radius]\n return valid_offsets\n\n def _get_output_coords_from_pixels(self, pixels: np.ndarray, coords: dict) -> dict:\n \n output_type = self.calculation[\"output\"]\n output_dims = self.calculation[\"dims\"]\n dim_list = list(self.endpoints[\"A\"].keys())\n\n if output_dims is None:\n output_dims = []\n\n coords = coords.copy()\n output_coords = None\n\n if len(output_dims) == 0:\n\n if output_type == \"values\":\n\n output_coords_label = f\"{', '.join(dim_list)}\"\n\n output_coords_list = []\n\n for dim, px in zip(dim_list, pixels.T):\n dim_coords = coords[dim]\n roi_coords_for_dim = [dim_coords[i] for i in px]\n output_coords_list.append(roi_coords_for_dim)\n\n output_coords_list = np.array(output_coords_list).T\n\n output_coords = {output_coords_label: output_coords_list}\n\n elif len(output_dims) == 1:\n\n if output_type == \"values\":\n\n # 1 x variable and 2 y variables\n output_coords_x_label, output_coords_y_label = None, []\n output_coords_x_list, output_coords_y_list = [], []\n \n for dim, px in zip(dim_list, pixels.T):\n dim_coords = coords[dim]\n roi_coords_for_dim = [dim_coords[i] for i in px]\n \n if dim in output_dims:\n output_coords_x_label = dim\n output_coords_x_list = roi_coords_for_dim\n else:\n output_coords_y_label.append(dim)\n output_coords_y_list.append(roi_coords_for_dim)\n\n output_coords_y_label = f\"{', '.join(output_coords_y_label)}\"\n output_coords_x_list = np.array(output_coords_x_list)\n output_coords_y_list = np.array(output_coords_y_list).T\n\n output_coords = {\n output_coords_x_label: output_coords_x_list,\n output_coords_y_label: output_coords_y_list\n }\n\n elif output_type == \"average\" or output_type == \"max\":\n\n x_dim = output_dims[0]\n x_dim_coords = coords[x_dim]\n roi_coords_for_dim = np.array([x_dim_coords[i] for i in pixels.T[dim_list.index(x_dim)]])\n output_coords = {x_dim: roi_coords_for_dim}\n\n else:\n raise ValueError(\"Invalid dimension list.\")\n\n return output_coords\n\n\nclass PlaneROI:\n\n plane = None\n calculation = None\n output = None\n\n def __init__(self, dims: list=None) -> None:\n if dims is None:\n self.plane = {\n \"point\": {\"x\": None, \"y\": None, \"z\": None},\n \"normal\": {\"x\": 1, \"y\": 1, \"z\": 1},\n }\n else:\n if len(dims) != 3:\n raise ValueError(\"Invalid dims provided.\")\n self.plane = {\n \"point\": dict((dim, None) for dim in dims),\n \"normal\": dict((dim, 0) for dim in dims),\n }\n\n self.calculation = {\"output_data\": None, \"dims\": None}\n self.output = {\"data\": None, \"coords\": None}\n\n def set_plane(self, point, normal) -> None:\n\n # Ensuring that the function parameters are valid dictionaries\n if type(point) != dict or type(normal) != dict:\n raise ValueError(\"Invalid points provided.\")\n if len(list(point.keys())) != 3 or len(list(normal.keys())) != 3:\n raise ValueError(\"Invalid points provided.\")\n if list(point.keys()) != list(normal.keys()):\n raise ValueError(\"Invalid points provided.\")\n \n self.plane[\"point\"] = dict((dim, None) for dim in list(point.keys()))\n self.plane[\"normal\"] = dict((dim, 0) for dim in list(normal.keys()))\n \n for dim in list(point.keys()):\n dim_point = point[dim]\n dim_normal = normal[dim]\n\n if type(dim_point) is None:\n self.plane[\"point\"][dim] == None\n\n if type(dim_normal) is None:\n self.plane[\"normal\"][dim] == 0\n\n self.plane[\"point\"][dim] = dim_point\n self.plane[\"normal\"][dim] = dim_normal\n \n def set_calculation(self, output=\"values\") -> None:\n\n if output not in [\"values\"]:\n raise ValueError(\"Invalid output type provided.\")\n \n self.calculation[\"output\"] = output\n \n def apply(self, data, coords) -> None:\n output_data, output_coords = self._get_values(data=data, coords=coords)\n\n self.output[\"data\"] = output_data\n self.output[\"coords\"] = output_coords\n\n def apply_to_scan(self, scan, data_type) -> None:\n \"\"\"Applies the selected calculation to a scan dataset.\"\"\"\n\n if data_type == \"raw\":\n data = scan.raw_data[\"data\"]\n coords = scan.raw_data[\"coords\"]\n elif data_type == \"gridded\":\n data = scan.gridded_data[\"data\"]\n coords = scan.gridded_data[\"coords\"]\n else:\n raise(\"Invalid data type provided.\")\n \n self.apply(data, coords)\n\n def get_output(self) -> dict:\n return self.output\n \n def _get_values(self, data, coords) -> tuple:\n \"\"\"Returns output data and coordinates.\"\"\"\n \n # Retrieves pixel indicies for plane\n plane_pixels, dim_order = self._get_plane_pixels(data, coords)\n\n if plane_pixels is None:\n return (None, None)\n\n # Retrieves output data for plane\n output_data = self._get_data_from_plane_pixels(plane_pixels=plane_pixels, data=data)\n\n # Retrieves output coordinates for plane\n output_coords = self._get_output_coords_from_plane_pixels(plane_pixels=plane_pixels, coords=coords, dim_order=dim_order)\n\n if output_coords is None:\n return (None, None)\n else:\n return (output_data, output_coords)\n \n def _get_plane_pixels(self, data, coords) -> np.ndarray:\n \"\"\"Returns pixel indicies that correspond to plane.\"\"\"\n\n coords = coords.copy()\n\n # Defining the 2D plane with a point and normal direction\n point_pixel = self._get_point_pixel_indicies(point=self.plane[\"point\"], coords=coords)\n normal = list(self.plane[\"normal\"].values())\n\n a, b, c = self._get_unit_vector(normal)\n d = -(a * point_pixel[0] + b * point_pixel[1] + c * point_pixel[2])\n\n # Calculates the indicies where the plane and edges of the dataset intersect\n edge_intersection_points = []\n x_0, y_0, z_0 = 0, 0, 0\n x_1, y_1, z_1 = data.shape\n\n for x in [None, x_0, x_1]:\n for y in [None, y_0, y_1]:\n for z in [None, z_0, z_1]:\n\n # Checks for 0's in the normal vector\n if a == 0:\n x = point_pixel[0]\n if b == 0:\n y = point_pixel[1]\n if c == 0:\n z = point_pixel[2]\n\n # Only one of x, y, or z is allowed to be \"None\" at a single \n # time --- that represents the variable being solved for.\n if (\n (x is None and y is None) or \n (x is None and z is None) or \n (y is None and z is None)\n ):\n pass\n else:\n if x is None:\n edge_x = self._solve_for_plane(a, b, c, d, x=x, y=y, z=z)\n edge_y = y\n edge_z = z\n elif y is None:\n edge_x = x\n edge_y = self._solve_for_plane(a, b, c, d, x=x, y=y, z=z)\n edge_z = z\n elif z is None:\n edge_x = x\n edge_y = y\n edge_z = self._solve_for_plane(a, b, c, d, x=x, y=y, z=z)\n\n if (\n (edge_x >= x_0 and edge_x <= x_1) and\n (edge_y >= y_0 and edge_y <= y_1) and\n (edge_z >= z_0 and edge_z <= z_1)\n ):\n edge_intersection_points.append([edge_x, edge_y, edge_z])\n\n edge_intersection_points_T = np.array(edge_intersection_points).T\n if 0 in edge_intersection_points_T.shape:\n return None, None\n \n # Minimum and maximum coordinate for each dimension\n # These values exist within the bounds of the dataset\n x_min, x_max = np.amin(edge_intersection_points_T[0]), np.amax(edge_intersection_points_T[0])\n y_min, y_max = np.amin(edge_intersection_points_T[1]), np.amax(edge_intersection_points_T[1])\n z_min, z_max = np.amin(edge_intersection_points_T[2]), np.amax(edge_intersection_points_T[2])\n\n # Determines coordinate bounds of output data\n dim_bounds = np.array([[x_min, y_min, z_min], [x_max, y_max, z_max]]).astype(np.int64)\n dim_order = np.argsort(dim_bounds[1] - dim_bounds[0])\n\n # Determine axes for output plane\n x_axis_dim = dim_order[0]\n y_axis_dim = dim_order[1]\n x_axis_range = np.arange(dim_bounds[0][x_axis_dim], dim_bounds[1][x_axis_dim])\n y_axis_range = np.arange(dim_bounds[0][y_axis_dim], dim_bounds[1][y_axis_dim])\n\n # Creates the plane of pixel indicies\n if x_axis_dim == 0:\n if y_axis_dim == 1:\n X, Y = np.meshgrid(x_axis_range, y_axis_range)\n Z = -(a * X + b * Y + d) / c\n elif y_axis_dim == 2:\n X, Z = np.meshgrid(x_axis_range, y_axis_range)\n Y = -(a * X + c * Z + d) / b\n elif x_axis_dim == 1:\n if y_axis_dim == 0:\n Y, X = np.meshgrid(x_axis_range, y_axis_range)\n Z = -(a * X + b * Y + d) / c\n elif y_axis_dim == 2:\n Y, Z = np.meshgrid(x_axis_range, y_axis_range)\n X = -(b * Y + c * Z + d) / a\n elif x_axis_dim == 2:\n if y_axis_dim == 0:\n Z, X = np.meshgrid(x_axis_range, y_axis_range)\n Y = -(a * X + c * Z + d) / b\n elif y_axis_dim == 1:\n Z, Y = np.meshgrid(x_axis_range, y_axis_range)\n X = -(b * Y + c * Z + d) / a\n\n plane_pixels = np.array([X, Y, Z], dtype=np.int64).T\n \n return plane_pixels, dim_order\n\n def _get_data_from_plane_pixels(self, plane_pixels, data) -> np.ndarray:\n \"\"\"Returns the data points that correspond to a given plane of indicies.\"\"\"\n\n # Flattens plane for masking purposes\n flat_plane_pixels = plane_pixels.reshape(-1, plane_pixels.shape[-1])\n\n print(plane_pixels.shape)\n\n # Mask to omit invalid indicies\n flat_mask = np.any(\n (flat_plane_pixels < 0) | (flat_plane_pixels >= data.shape),\n axis=1\n )\n\n # Pulls data from given indicies, invalid indicies yield value of 0\n flat_data_plane = []\n for i, m in enumerate(flat_mask):\n if m:\n flat_data_plane.append(0)\n else:\n flat_data_plane.append(data[\n flat_plane_pixels[i, 0], \n flat_plane_pixels[i, 1], \n flat_plane_pixels[i, 2]]\n )\n\n data_plane = np.array(flat_data_plane).reshape((\n plane_pixels.shape[0], \n plane_pixels.shape[1]\n ))\n\n print(data_plane.shape)\n print()\n data_plane = np.fliplr(data_plane)\n\n return data_plane\n \n def _get_output_coords_from_plane_pixels(self, plane_pixels, coords, dim_order) -> dict:\n \n if 0 in plane_pixels.shape:\n return None\n \n output_coords = {}\n dim_list = list(self.plane[\"point\"].keys())\n coords = coords.copy()\n \n primary_x_axis_dim = dim_order[0]\n primary_y_axis_dim = dim_order[1]\n secondary_axis_dim = dim_order[2]\n\n # Determines the pixels that correspond to the axes of the output image\n if primary_x_axis_dim == 0:\n if primary_y_axis_dim == 1:\n # Defines the pixels for the coordinates across the x-axis of the output image\n x_axis_pixels = plane_pixels[0, :, :].T\n # Defines the pixels for the coordinates across the y-axis of the output image\n y_axis_pixels = plane_pixels[:, 0, :].T\n elif primary_y_axis_dim == 2:\n x_axis_pixels = plane_pixels[0, :, :].T\n y_axis_pixels = plane_pixels[:, :, 0].T\n elif primary_x_axis_dim == 1:\n if primary_y_axis_dim == 0:\n x_axis_pixels = plane_pixels[:, 0, :].T\n y_axis_pixels = plane_pixels[0, :, :].T\n elif primary_y_axis_dim == 2:\n x_axis_pixels = plane_pixels[:, 0, :].T\n y_axis_pixels = plane_pixels[:, :, 0].T\n elif primary_x_axis_dim == 2:\n if primary_y_axis_dim == 0:\n x_axis_pixels = plane_pixels[:, :, 0].T\n y_axis_pixels = plane_pixels[0, :, :].T\n elif primary_y_axis_dim == 1:\n x_axis_pixels = plane_pixels[:, :, 0].T\n y_axis_pixels = plane_pixels[:, 0, :].T\n\n x_output_coords_label = []\n y_output_coords_label = []\n \n x_coords = []\n y_coords = []\n\n for i in dim_order:\n dim, dim_x_px, dim_y_px = dim_list[i], x_axis_pixels[i], y_axis_pixels[i]\n dim_coords = coords[dim]\n dim_delta = dim_coords[1] - dim_coords[0]\n\n if dim_x_px[0] < dim_x_px[-1]:\n\n if dim_x_px[0] >= 0 and dim_x_px[-1] < len(dim_coords):\n x_dim_coords = [dim_coords[px] for px in dim_x_px]\n else:\n print(dim, dim_x_px[0], dim_x_px[-1], len(dim_coords)) \n \n elif dim_x_px[0] > dim_x_px[-1]:\n ...\n else:\n pass\n\n if dim_x_px[0] != dim_x_px[-1]:\n x_output_coords_label.append(dim)\n x_dim_coords = [dim_delta * i for i in dim_x_px]\n x_coords.append(x_dim_coords)\n \n if dim_y_px[0] != dim_y_px[-1]:\n y_output_coords_label.append(dim)\n y_dim_coords = [dim_delta * i for i in dim_y_px]\n y_coords.append(y_dim_coords)\n\n x_output_coords_label = \",\".join(x_output_coords_label)\n y_output_coords_label = \",\".join(y_output_coords_label)\n\n x_coords = np.array(x_coords).T\n y_coords = np.array(y_coords).T\n\n output_coords = {\n x_output_coords_label: x_coords,\n y_output_coords_label: y_coords\n }\n\n return output_coords\n\n def _get_point_pixel_indicies(self, coords: dict, point: dict) -> list:\n \"\"\"Returns the pixel indicies that correspond with an endpoint.\"\"\"\n\n point_pixel_idxs = [] # Will hold pixel indicies\n\n dim_list = list(coords.keys()) # Ordered list of dimension labels (e.g. [\"H\", \"K\", \"L\"])\n\n # Loops through all three dimensions\n for dim in dim_list:\n\n dim_coords = coords[dim] # Full coordinates for given dimension\n dim_point_coord = point[dim] # Coordinate of point for given dimension\n dim_point_pixel_idx = None # Will hold pixel index for given dimension\n\n # Denotes width of pixels for a given dimension\n pixel_size = (dim_coords[-1] - dim_coords[0]) / len(dim_coords)\n\n # Checks if endpoint was specified\n if dim_point_coord is None:\n dim_point_pixel_idx = 0\n else:\n dim_point_pixel_idx = int((dim_point_coord - dim_coords[0]) / pixel_size)\n\n point_pixel_idxs.append(dim_point_pixel_idx)\n\n return point_pixel_idxs\n \n def _solve_for_plane(self, a, b, c, d, x=None, y=None, z=None) -> float:\n \n if x is None:\n if a == 0:\n a = 0.000001\n x = (-d - b*y - c*z) / a\n return x\n \n if y is None:\n if b == 0:\n b = 0.000001\n y = (-d - a*x - c*z) / b\n return y\n \n if z is None:\n if c == 0:\n c = 0.000001\n z = (-d - a*x - b*y) / c\n return z\n \n def _get_unit_vector(self, v) -> list:\n magnitude = math.sqrt(\n sum(component**2 for component in v)\n )\n\n unit_vector = [(component / magnitude) for component in v]\n\n return unit_vector","repo_name":"henryjsmith12/xrd-image-util","sub_path":"xrdimageutil/roi.py","file_name":"roi.py","file_ext":"py","file_size_in_byte":36420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20461056126","text":"import random\nimport weakref\nfrom protocolbuffers import Consts_pb2\nfrom event_testing.resolver import SingleSimResolver\nfrom event_testing.results import TestResult\nfrom event_testing.test_events import TestEvent\nfrom event_testing.tests import TunableTestSet\nfrom indexed_manager import CallbackTypes\nfrom interactions.context import InteractionContext\nfrom interactions.interaction_cancel_compatibility import InteractionCancelCompatibility, InteractionCancelReason\nfrom interactions.liability import Liability\nfrom interactions.priority import Priority\nfrom objects import system\nfrom objects.components.state import TunableStateValueReference, ObjectState, ObjectStateValue\nfrom objects.fire.fire import Fire\nfrom postures.transition_sequence import DerailReason\nfrom sims import household_manager\nfrom sims4.callback_utils import CallableList\nfrom sims4.localization import TunableLocalizedStringFactory\nfrom sims4.localization.localization_tunables import LocalizedStringHouseholdNameSelector\nfrom sims4.service_manager import Service\nfrom sims4.tuning.tunable import TunableReference, Tunable, TunableRange, TunableInterval, TunableList, TunablePercent, TunableEnumEntry\nfrom singletons import DEFAULT\nfrom situations import situation_complex\nfrom situations.situation_guest_list import SituationGuestList, SituationGuestInfo, SituationInvitationPurpose\nfrom ui.ui_dialog_notification import TunableUiDialogNotificationSnippet, UiDialogNotification\nfrom vfx import PlayEffect\nimport alarms\nimport build_buy\nimport date_and_time\nimport placement\nimport services\nimport sims4.resources\nimport tag\nimport terrain\nlogger = sims4.log.Logger('Fire', default_owner='rfleig')\nwith sims4.reload.protected(globals()):\n fire_enabled = True\n\nclass FireImmunityLiability(Liability):\n __qualname__ = 'FireImmunityLiability'\n LIABILITY_TOKEN = 'FireImmunityLiability'\n\nclass FireService(Service):\n __qualname__ = 'FireService'\n FIRE_OBJECT_DEF = TunableReference(manager=services.definition_manager())\n FIRE_OBJECT_FIRE_STATE = ObjectState.TunableReference(description='The ObjectState used to track a fire objects progress. Do not Tune.')\n FIRE_OBJECT_EXTINGUISHED_STATE_VALUE = ObjectStateValue.TunableReference(description='The ObjectStateValue a fire object has when the fire has been extinguished or just burnt out. Do Not Tune.')\n FIRE_SPREAD_INTIAL_TIME_IN_SIM_MINUTES = Tunable(description='\\n Initial time in sim minutes to wait when a fire first breaks out on a \\n lot before trying to spread the fire.\\n ', tunable_type=int, default=15)\n FIRE_SPREAD_REPEATING_TIME_IN_SIM_MINUTES = Tunable(description='\\n How long in Sim minutes to wait between each check for whether or\\n not fire should spread.\\n ', tunable_type=int, default=15)\n FIRE_SPREAD_CHANCE = TunableRange(description='\\n A value between 0 - 1 that is how likely fire is to spread once \\n the spread timer goes off.\\n ', minimum=0, maximum=1, tunable_type=float, default=0.9)\n FIRE_STARTED_NOTIFICATION = TunableUiDialogNotificationSnippet(description='\\n The notification that is displayed whenever a fire first breaks out on\\n a lot.\\n ')\n FIRE_REACTION_NOTIFICATION = TunableUiDialogNotificationSnippet(description='\\n The notification that is displayed whenever the first Sim reacts to \\n the fire so that the player can click on the sim and center in on the\\n sim to help find the fire.\\n ')\n FIRE_QUADTREE_RADIUS = Tunable(description=\"\\n Size of the fire's quadtree footprint used for spatial queries\\n \", tunable_type=float, default=1.0)\n FIRE_RETARDANT_EXTRA_OBJECT_RADIUS = Tunable(description='\\n Extra amount of space to preserve around a fire retardant object where\\n fire cannot spread.\\n ', tunable_type=float, default=0.2)\n MAX_NUM_ATTEMPTS_TO_PLACE_FIRE = Tunable(description='\\n When trying to spread fire, this is the number of times an attempt will\\n be made to find a place to put the new fire down without overlapping\\n before giving up.\\n ', tunable_type=int, default=10)\n FIRE_PLACEMENT_RANGE = TunableInterval(description='\\n A tunable to represent how far from an existing fire object that new\\n fire object can be placed. The value represents multiples of the radius\\n of the fire object.\\n \\n Example a value of 2 means that it is ok to place the new fire object\\n 2 * the object radius away from the existing fire location. This is \\n the minimum because anything less will overlap with the existing fire \\n object on placement.\\n ', tunable_type=float, minimum=2, default_lower=2, default_upper=3)\n FLAMMABLE_COMMODITY = TunableReference(description='\\n The commodity used to determin if an object is flammable or not.\\n ', manager=services.get_instance_manager(sims4.resources.Types.STATISTIC))\n FLAMMABLE_COMMODITY_DECAY_PER_FIRE = TunableRange(description='\\n The amount of decay modifier to add to an objects FLAMMABLE_COMMODITY\\n per fire object that is overlapping with it. No negative numbers.\\n ', tunable_type=float, default=5, minimum=0)\n FIRE_SIM_ON_FIRE_AFFORDANCE = TunableReference(description='\\n The affordance that gets pushed onto a Sim when they catch on fire.\\n ', manager=services.get_instance_manager(sims4.resources.Types.INTERACTION))\n FIRE_CAN_SPREAD_TO_SIM_TESTS = TunableTestSet(description='\\n A tunable set of tests which Sims are required to pass in order for\\n fire to be placed at their location. If the tests fail fire will fail\\n to spread to their location and they will not catch fire as a result.\\n ')\n FIRE_SITUATION = TunableReference(description='\\n A reference to the fire situation to use on Sims that are on a lot\\n with a fire.\\n ', manager=services.get_instance_manager(sims4.resources.Types.SITUATION))\n FIRE_JOB = TunableReference(description='\\n A reference to the fire job that Sims will have in the fire situation\\n while there is a fire on the lot.\\n ', manager=services.get_instance_manager(sims4.resources.Types.SITUATION_JOB))\n FIRE_PANIC_BUFFS = TunableList(TunableReference(manager=services.get_instance_manager(sims4.resources.Types.BUFF)), description='\\n A List of Buffs that indicate a Sim is\\n in a panic state because of fire. This\\n will be used to limit their behaviors\\n while they are aware of a fire on the\\n lot.\\n ')\n SAVE_LOCK_TOOLTIP = TunableLocalizedStringFactory(description='The tooltip/message to show when the player tries to save the game while a fire situation is happening')\n INTERACTION_UNAVAILABLE_DUE_TO_FIRE_TOOLTIP = TunableLocalizedStringFactory(description='The tooltip to show in the grayed out tooltip when the player tries to interact with things on a lot that has a fire.')\n SPRINKLER_HEAD_OBJECT_DEF = TunableReference(manager=services.definition_manager())\n SPRINKLER_BOX_OBJECT_TAG = TunableEnumEntry(tunable_type=tag.Tag, default=tag.Tag.INVALID)\n FIRE_ALARM_OBJECT_DEF = TunableReference(manager=services.definition_manager())\n FIRE_ALARM_ACTIVE_STATE = TunableStateValueReference(description='\\n The state the fire alarm should be in while active\\n ')\n FIRE_ALARM_DEACTIVATED_STATE = TunableStateValueReference(description='\\n The state the fire alarm should be in while not active\\n ')\n FIRE_SPRINKLER_ACTIVE_STATE = TunableStateValueReference(description='\\n The state the fire sprinkler should be in while active\\n ')\n FIRE_SPRINKLER_DEACTIVATED_STATE = TunableStateValueReference(description='\\n The state the fire sprinkler should be in while not active\\n ')\n SPRINKLER_EFFECT = PlayEffect.TunableFactory()\n SPRINKLER_ACTIVATION_TIME = Tunable(description='\\n Time in sim minutes after a fire starts on a lot before activating the sprinkler system.\\n ', tunable_type=int, default=30)\n SPRINKLER_RUN_TIME = Tunable(description='\\n Time in sim minutes between sprinkler system checks. It will check for new fires,\\n and deactivate if there are no fires left burning.\\n ', tunable_type=int, default=15)\n SPRINKLER_PUDDLE_CHANCE = Tunable(description=\"\\n Chance for a puddle to appear somewhere in the sprinkler's area of effect.\\n \", tunable_type=int, default=30)\n FIRE_STRENGTH_COMMODITY = TunableReference(description='\\n The commodity that represents the strength of a fire.\\n ', manager=services.get_instance_manager(sims4.resources.Types.STATISTIC))\n FIRE_BEEN_EXTINGUISHED_COMMODITY = TunableReference(description='\\n A static commodity used to mark a fire object as having been\\n extinguished. \\n \\n If this commodity is present on the object when it burns\\n out then it will be removed from the fire_object_quadtree so that fire\\n can spark back up there.\\n ', manager=services.get_instance_manager(sims4.resources.Types.STATISTIC))\n FIRE_STRENGTH_COMMODITY_SUPRESSION_DECAY = TunableRange(description='\\n The amount of decay modifier to add to an objects FIRE_STRENGTH_COMMODITY\\n when it is being supresssed by a sprinkler. No negative numbers.\\n ', tunable_type=float, default=15, minimum=0)\n FIRE_ALARM_CYCLE_TIME = Tunable(description='\\n Time in sim minutes after a fire starts on a lot before fire alarms\\n will start activating due to fires within range. This is also the time\\n between checks to see if fires have been put out, for deactivation\\n purposes.\\n ', tunable_type=int, default=15)\n FIRE_ALARM_ACTIVATION_RADIUS = Tunable(description='\\n How far away from a given fire alarm a fire must be in order not to\\n set it off.\\n ', tunable_type=float, default=5.0)\n SCORCH_TERRAIN_CLEANUP_HOUR = Tunable(description='\\n Hour of the day to attempt to clean up any scorch marks that are on\\n terrain. Range: 0-23 Default: 3 = 3am\\n ', tunable_type=int, default=3)\n SCORCH_TERRAIN_CLEANUP_RADIUS = Tunable(description='\\n The radius in which a clean scorch mark call will remove scorch marks\\n ', tunable_type=float, default=2.5)\n FIRE_INSURANCE_CLAIM_PERCENTAGE = TunablePercent(description='\\n A value between 0 and 100 which is the percentage of the loss covered \\n by insurance when an object is burned/destroyed.\\n ', default=60)\n FIRE_INSURANCE_CLAIM_NOTIFICATION = UiDialogNotification.TunableFactory(description='\\n This is the dialog that will be displayed at the end of a fire that will\\n alert the user of the amount of money they have been refunded as part of\\n the fire insurance.\\n ', text=LocalizedStringHouseholdNameSelector.TunableFactory(description='\\n This string is provided two tokens.\\n * The first token is either a Sim, should the Sim be the only\\n member of the household, or a string containing the household name\\n should that not be the case. \\n * The second token is a number representing the amount of money\\n refunded by insurance.\\n '))\n START_PANIC_INTERACTION = situation_complex.TunableReference(description='\\n The interaction to look for when a Sim reacts to a fire to know to\\n start that Sim in the panic state.\\n ', manager=services.affordance_manager())\n FIRE_SPREAD_HEIGHT_THRESHOLD = Tunable(description='\\n The height differential threshold, between an existing fire and the\\n place it is attempting to spawn, above which the fire will not spread.\\n ', tunable_type=float, default=0.5)\n FIRE_EXTNIGUISH_NEARBY_RADIUS = Tunable(description='\\n The radius that nearby fires or sims on fires will also be extinguished\\n when a sim extinguishes a fire or a sim on fire.\\n ', tunable_type=float, default=1.1)\n ROUTING_FIRE_CHECK_RADIUS = Tunable(description='\\n The radius in which a routing sim has to be within in order to possibly\\n catch on fire\\n ', tunable_type=float, default=0.4)\n SPRINKLER_SUPRESSION_RADIUS = 3.0\n SPRINKLER_HEAD_CEILING_OFFSET = 0.1\n IMMEDIATE_SUPPRESSION_RATE = 100.0\n\n def __init__(self):\n self._fire_objects = weakref.WeakSet()\n self._situation_ids = {}\n self._fire_spread_alarm = None\n self._fire_quadtree = None\n self._flammable_objects_quadtree = None\n self._burning_objects = None\n self._scorch_cleanup_alarm = None\n self._sprinkler_system_objects = weakref.WeakSet()\n self._sprinkler_objects = set()\n self._fire_objects_being_suppressed = weakref.WeakSet()\n self._unsuppressible_fires = weakref.WeakSet()\n self._sprinkler_alarm = None\n self._sprinkler_has_been_activated = False\n self._fire_alarm_objects = set()\n self._fire_alarm_alarm = None\n self._activated_fire_alarms = set()\n self._insurance_value = 0\n self._registered_for_panic_start = False\n\n @property\n def fire_is_active(self):\n if self._fire_objects:\n return True\n return False\n\n @property\n def fire_quadtree(self):\n return self._fire_quadtree\n\n @property\n def flammable_objects_quadtree(self):\n return self._flammable_objects_quadtree\n\n def get_lock_save_reason(self):\n return self.SAVE_LOCK_TOOLTIP()\n\n def _fire_spread_alarm_callback(self, handle):\n if not self.fire_is_active:\n alarms.cancel_alarm(handle)\n self._fire_spread_alarm = None\n return\n chance = random.uniform(0, 1)\n if chance > self.FIRE_SPREAD_CHANCE:\n return\n self.spread_fire()\n\n def _add_fire_to_quadtree(self, fire_object, location=DEFAULT):\n if self._fire_quadtree is None:\n self._fire_quadtree = sims4.geometry.QuadTree()\n if location is DEFAULT:\n location = sims4.math.Vector2(fire_object.position.x, fire_object.position.z)\n fire_bounds = sims4.geometry.QtCircle(location, self.FIRE_QUADTREE_RADIUS)\n self._fire_quadtree.insert(fire_object, fire_bounds)\n\n def _remove_fire_from_quadtree(self, fire_object):\n if self._fire_quadtree is None:\n return\n self._fire_quadtree.remove(fire_object)\n\n def query_quadtree_for_fire_object(self, position, radius=DEFAULT, level=None):\n if self._fire_quadtree is None:\n return []\n if radius is DEFAULT:\n radius = self.FIRE_QUADTREE_RADIUS\n query = sims4.geometry.SpatialQuery(sims4.geometry.QtCircle(position, radius), [self._fire_quadtree])\n found_fires = query.run()\n if level is not None:\n fires_to_remove = set()\n for fire in found_fires:\n while fire.location.level is not level:\n fires_to_remove.add(fire)\n if fires_to_remove:\n found_fires = [fire for fire in found_fires if fire not in fires_to_remove]\n return found_fires\n\n def _query_quadtree_for_flammable_object(self, position, radius=DEFAULT, level=None):\n if self._flammable_objects_quadtree is None:\n return []\n radius = self.FIRE_QUADTREE_RADIUS if radius is DEFAULT else radius\n query = sims4.geometry.SpatialQuery(sims4.geometry.QtCircle(position, radius), [self._flammable_objects_quadtree])\n found_objs = query.run()\n if level is not None:\n obj_to_remove = set()\n for fire in found_objs:\n while fire.location.level is not level:\n obj_to_remove.add(fire)\n if obj_to_remove:\n found_objs = [fire for fire in found_objs if fire not in obj_to_remove]\n return found_objs\n\n def _query_quadtree_for_sim(self, position, level, filter_type, radius=DEFAULT):\n sim_quadtree = services.sim_quadtree()\n radius = self.FIRE_QUADTREE_RADIUS if radius is DEFAULT else radius\n return sim_quadtree.query(sims4.geometry.QtCircle(position, radius), level=level, filter=filter_type)\n\n def _derail_routing_sims_if_necessary(self, fire):\n fire_footprint = fire.footprint_polygon\n for sim in services.sim_info_manager().instanced_sims_on_active_lot_gen():\n while sim.current_path and sim.current_path.nodes:\n nodes_list = list(sim.current_path.nodes)\n while True:\n for (prev, curr) in zip(nodes_list, nodes_list[1:]):\n path_rectangle = sims4.geometry.build_rectangle_from_two_points_and_radius(sims4.math.Vector3(*prev.position), sims4.math.Vector3(*curr.position), 1.0)\n while path_rectangle.intersects(fire_footprint):\n sim.queue.transition_controller.derail(DerailReason.NAVMESH_UPDATED, sim)\n break\n\n def _fire_object_state_changed_callback(self, owner, state, old_value, new_value):\n if state is self.FIRE_OBJECT_FIRE_STATE and new_value is self.FIRE_OBJECT_EXTINGUISHED_STATE_VALUE and not owner.get_users():\n owner.destroy(source=owner, cause='Fire is being extinguished.')\n\n def _spawn_fire(self, transform, routing_surface, run_placement_tests=True):\n if not fire_enabled:\n logger.info('Trying to spawn fire when fire is disabled. Please use |fire.toggle_enabled cheat to turn fire on.')\n return\n if not services.active_lot().is_position_on_lot(transform.translation):\n logger.info('Trying to spawn fire on a lot other than the active lot.')\n return\n if not services.venue_service().venue.allows_fire:\n logger.info(\"Trying to spawn a fire on a venue that doesn't allow fire.\")\n return\n if not (run_placement_tests and self._placement_tests(transform.translation, routing_surface.secondary_id)):\n logger.info('Trying to spawn a fire on a lot at a position that is not valid.')\n return\n fire_object = system.create_object(self.FIRE_OBJECT_DEF)\n fire_object.move_to(transform=transform, routing_surface=routing_surface)\n first_fire_on_lot = False if self._fire_objects else True\n self._fire_objects.add(fire_object)\n fire_object.add_state_changed_callback(self._fire_object_state_changed_callback)\n self.start_objects_burning(fire_object)\n self.add_scorch_mark(fire_object.position, fire_object.location.level)\n self._derail_routing_sims_if_necessary(fire_object)\n if first_fire_on_lot:\n self._start_fire_situations()\n self.activate_fire_alarms()\n self.activate_sprinkler_system()\n self._show_fire_notification()\n self._create_or_replace_scorch_cleanup_alarm()\n services.get_persistence_service().lock_save(self)\n self.register_for_sim_active_lot_status_changed_callback()\n if self._fire_spread_alarm is None:\n time_span = date_and_time.create_time_span(minutes=self.FIRE_SPREAD_INTIAL_TIME_IN_SIM_MINUTES)\n repeating_time_span = date_and_time.create_time_span(minutes=self.FIRE_SPREAD_REPEATING_TIME_IN_SIM_MINUTES)\n self._fire_spread_alarm = alarms.add_alarm(self, time_span, self._fire_spread_alarm_callback, repeating=True, repeating_time_span=repeating_time_span)\n\n def spawn_fire_at_object(self, obj):\n self._spawn_fire(obj.transform, obj.routing_surface)\n\n def _show_fire_notification(self):\n client = services.client_manager().get_first_client()\n dialog = self.FIRE_STARTED_NOTIFICATION(client.active_sim)\n dialog.show_dialog()\n\n def spread_fire(self):\n if not self._fire_objects:\n return\n logger.debug('Starting to attempt to spread fire.')\n fire_object_list = list(self._fire_objects)\n for attempt in range(self.MAX_NUM_ATTEMPTS_TO_PLACE_FIRE):\n logger.debug('Attempt {} to spread fire.', attempt)\n fire_object = random.choice(fire_object_list)\n distance_in_radii = self.FIRE_PLACEMENT_RANGE.random_float()*self.FIRE_QUADTREE_RADIUS\n new_position = fire_object.position + fire_object.forward*distance_in_radii\n new_position.y = terrain.get_terrain_height(new_position.x, new_position.z, fire_object.routing_surface)\n fire_object.move_to(transform=fire_object.transform, orientation=sims4.random.random_orientation())\n if not self._placement_tests(new_position, level=fire_object.location.level, fire_object=fire_object):\n pass\n transform = sims4.math.Transform(new_position, sims4.random.random_orientation())\n self._spawn_fire(transform, fire_object.routing_surface, run_placement_tests=False)\n logger.debug('Successfully placed fire object on attempt {}', attempt)\n\n def _placement_tests(self, new_position, level=None, fire_object=None):\n zone_id = sims4.zone_utils.get_zone_id()\n if level is not None and not build_buy.has_floor_at_location(zone_id, new_position, level):\n logger.debug('failed to place fire at a location because there is no floor.')\n return False\n if fire_object is not None and abs(fire_object.position.y - new_position.y) > self.FIRE_SPREAD_HEIGHT_THRESHOLD:\n return False\n location = sims4.math.Vector2(new_position.x, new_position.z)\n result = self.query_quadtree_for_fire_object(location, level=level)\n if result:\n logger.debug('failed to place fire at a location because it overlaps with another fire object.')\n return False\n result = self._query_quadtree_for_sim(location, level, int(placement.ItemType.SIM_POSITION))\n if any(not self.FIRE_CAN_SPREAD_TO_SIM_TESTS.run_tests(SingleSimResolver(entry[0].sim_info)) for entry in result):\n return False\n result = self._query_quadtree_for_flammable_object(location, level=level)\n if any(x.fire_retardant for x in result):\n return False\n return True\n\n def is_object_flammable(self, obj):\n tracker = obj.get_tracker(self.FLAMMABLE_COMMODITY)\n if tracker is None or not tracker.has_statistic(self.FLAMMABLE_COMMODITY):\n return False\n return True\n\n def set_object_burning(self, obj):\n tracker = obj.get_tracker(self.FLAMMABLE_COMMODITY)\n if tracker is None or not tracker.has_statistic(self.FLAMMABLE_COMMODITY):\n return\n stat = tracker.get_statistic(self.FLAMMABLE_COMMODITY)\n value = sims4.math.clamp(stat.convergence_value, stat.get_value() - self.FLAMMABLE_COMMODITY_DECAY_PER_FIRE, stat.max_value)\n stat.set_value(value)\n stat.add_decay_rate_modifier(self.FLAMMABLE_COMMODITY_DECAY_PER_FIRE)\n\n def add_to_flammable_quadtree(self, obj, location=DEFAULT):\n if obj.is_sim:\n return\n if not self.is_object_flammable(obj) and not obj.fire_retardant:\n return\n if self._flammable_objects_quadtree is None:\n self._flammable_objects_quadtree = sims4.geometry.QuadTree()\n if location is DEFAULT:\n location = sims4.math.Vector2(obj.position.x, obj.position.z)\n object_bounds = obj.object_bounds_for_flammable_object(location=location, fire_retardant_bonus=self.FIRE_RETARDANT_EXTRA_OBJECT_RADIUS)\n self._flammable_objects_quadtree.insert(obj, object_bounds)\n\n @staticmethod\n def flammable_object_location_changed(obj, old_loc, new_loc):\n fire_service = services.get_fire_service()\n if fire_service is not None:\n translation = new_loc.world_transform.translation\n location = sims4.math.Vector2(translation.x, translation.z)\n if isinstance(obj, Fire):\n fire_service._remove_fire_from_quadtree(obj)\n fire_service._add_fire_to_quadtree(obj, location)\n else:\n fire_service.remove_from_flammable_quadtree(obj)\n fire_service.add_to_flammable_quadtree(obj, location)\n\n def remove_from_flammable_quadtree(self, obj):\n if self._flammable_objects_quadtree is None:\n return\n self._flammable_objects_quadtree.remove(obj)\n\n def start_objects_burning(self, fire_object):\n location = sims4.math.Vector2(fire_object.position.x, fire_object.position.z)\n fire_level = fire_object.location.level\n result = self._query_quadtree_for_flammable_object(location, level=fire_level)\n if result is not None:\n for obj in result:\n if obj.location.level != fire_level:\n pass\n placement_flags = build_buy.get_object_placement_flags(obj.definition.id)\n if placement_flags & build_buy.PlacementFlags.CEILING and not placement_flags & build_buy.PlacementFlags.WALL_GRAPH_PLACEMENT:\n pass\n logger.debug('Fire object ({}) overlaps with {}\\n', fire_object, obj)\n if self._burning_objects is None:\n self._burning_objects = {}\n if fire_object not in self._burning_objects:\n self._burning_objects[fire_object] = []\n self._burning_objects[fire_object].append(obj)\n self.set_object_burning(obj)\n fire_object.raycast_context_dirty = True\n result = self._query_quadtree_for_sim(location, level=fire_level, filter_type=placement.ItemType.SIM_POSITION)\n if result is not None:\n for (sim, _, _, _) in result:\n while not self.sim_is_on_fire(sim):\n self._burn_sim(sim, fire_object)\n\n def _burn_sim(self, sim, fire_object):\n context = InteractionContext(sim, InteractionContext.SOURCE_SCRIPT, Priority.Critical, client=None, pick=None)\n result = sim.push_super_affordance(self.FIRE_SIM_ON_FIRE_AFFORDANCE, None, context)\n if result:\n result.interaction.add_liability(FireImmunityLiability.LIABILITY_TOKEN, FireImmunityLiability())\n\n def _extinguish_sim(self, sim):\n tracker = sim.get_tracker(self.FLAMMABLE_COMMODITY)\n if tracker is not None and tracker.has_statistic(self.FLAMMABLE_COMMODITY):\n tracker.set_max(self.FLAMMABLE_COMMODITY)\n\n def check_for_catching_on_fire(self, sim):\n if not self.fire_is_active:\n return False\n for interaction in sim.get_all_running_and_queued_interactions():\n fire_immunity_liability = interaction.get_liability(FireImmunityLiability.LIABILITY_TOKEN)\n while fire_immunity_liability is not None:\n return False\n if not self.FIRE_CAN_SPREAD_TO_SIM_TESTS.run_tests(SingleSimResolver(sim.sim_info)):\n return False\n location = sims4.math.Vector2(sim.position.x, sim.position.z)\n has_fire_at_location = self.query_quadtree_for_fire_object(location, radius=self.ROUTING_FIRE_CHECK_RADIUS, level=sim.location.level)\n for fire_object in has_fire_at_location:\n tracker = fire_object.get_tracker(self.FIRE_STRENGTH_COMMODITY)\n if tracker is None:\n pass\n stat = tracker.get_statistic(self.FIRE_STRENGTH_COMMODITY)\n if stat is None:\n pass\n stat_value = stat.get_value()\n while stat_value > stat.min_value:\n self._burn_sim(sim, has_fire_at_location[0])\n return True\n return False\n\n def remove_fire_object(self, fire_object):\n if self._burning_objects and fire_object in self._burning_objects:\n for obj in self._burning_objects[fire_object]:\n self._stop_object_burning(obj, fire_object)\n del self._burning_objects[fire_object]\n if fire_object in self._fire_objects:\n self._fire_objects.remove(fire_object)\n fire_object.remove_state_changed_callback(self._fire_object_state_changed_callback)\n tracker = fire_object.get_tracker(self.FIRE_BEEN_EXTINGUISHED_COMMODITY)\n if tracker is not None:\n stat = tracker.get_statistic(self.FIRE_BEEN_EXTINGUISHED_COMMODITY)\n if stat is not None and stat.get_value() > 0:\n self._remove_fire_from_quadtree(fire_object)\n if not self._fire_objects:\n self._fire_quadtree = None\n self._advance_situations_to_postfire()\n self._award_insurance_money()\n services.get_persistence_service().unlock_save(self)\n self.unregister_for_panic_callback()\n self.unregister_for_sim_active_lot_status_changed_callback()\n self.deactivate_fire_alarms()\n\n def _stop_object_burning(self, obj, fire_object):\n tracker = obj.get_tracker(self.FLAMMABLE_COMMODITY)\n if tracker is None or not tracker.has_statistic(self.FLAMMABLE_COMMODITY):\n return\n if not obj.is_sim:\n stat = tracker.get_statistic(self.FLAMMABLE_COMMODITY)\n stat.remove_decay_rate_modifier(self.FLAMMABLE_COMMODITY_DECAY_PER_FIRE)\n if self._burning_objects is not None:\n self._burning_objects[fire_object].remove(obj)\n fire_object.raycast_context_dirty = True\n\n def objects_burning_from_fire_object(self, fire_object):\n if self._burning_objects is None or fire_object not in self._burning_objects:\n return []\n return self._burning_objects[fire_object]\n\n def extinguish_nearby_fires(self, subject):\n translation = subject.location.transform.translation\n location = sims4.math.Vector2(translation.x, translation.z)\n level = subject.location.level\n fires_at_location = self.query_quadtree_for_fire_object(location, radius=self.FIRE_EXTNIGUISH_NEARBY_RADIUS, level=level)\n for fire in fires_at_location:\n while fire is not subject:\n self._suppress_fire(fire, immediate=True)\n nearby_sims = self._query_quadtree_for_sim(location, level=level, filter_type=placement.ItemType.SIM_POSITION, radius=self.FIRE_EXTNIGUISH_NEARBY_RADIUS)\n if nearby_sims is not None:\n for (sim, _, _, _) in nearby_sims:\n while sim is not subject:\n self._extinguish_sim(sim)\n\n def add_scorch_mark(self, position, level):\n zone_id = sims4.zone_utils.get_zone_id()\n build_buy.begin_update_floor_features(zone_id, build_buy.FloorFeatureType.BURNT)\n build_buy.set_floor_feature(zone_id, build_buy.FloorFeatureType.BURNT, sims4.math.Vector3(position.x, position.y, position.z), level, 1.0)\n build_buy.end_update_floor_features(zone_id, build_buy.FloorFeatureType.BURNT)\n\n def _start_fire_situations(self):\n situation_manager = services.current_zone().situation_manager\n for sim in services.sim_info_manager().instanced_sims_on_active_lot_gen():\n self._create_fire_situation_on_sim(sim, situation_manager=situation_manager)\n\n def _create_fire_situation_on_sim(self, sim, situation_manager=None):\n if sim.id in self._situation_ids:\n return\n if situation_manager is None:\n situation_manager = services.current_zone().situation_manager\n guest_list = SituationGuestList(invite_only=True)\n guest_info = SituationGuestInfo.construct_from_purpose(sim.sim_id, self.FIRE_JOB, SituationInvitationPurpose.INVITED)\n guest_list.add_guest_info(guest_info)\n situation_id = situation_manager.create_situation(self.FIRE_SITUATION, guest_list=guest_list, user_facing=False)\n self._situation_ids[sim.id] = situation_id\n\n def remove_fire_situation(self, sim):\n if sim.id in self._situation_ids:\n del self._situation_ids[sim.id]\n\n def alert_all_sims(self):\n situation_manager = services.current_zone().situation_manager\n for situation_id in self._situation_ids.values():\n situation = situation_manager.get(situation_id)\n while situation is not None:\n situation.advance_to_alerted()\n\n def _push_fire_reaction_affordance(self, sim, target):\n context = InteractionContext(sim, InteractionContext.SOURCE_SCRIPT, Priority.High, client=None, pick=None)\n result = sim.push_super_affordance(self.START_PANIC_INTERACTION, target, context)\n return result\n\n def register_for_panic_callback(self):\n if not self._registered_for_panic_start:\n services.get_event_manager().register_single_event(self, TestEvent.InteractionComplete)\n self._registered_for_panic_start = True\n\n def unregister_for_panic_callback(self):\n if self._registered_for_panic_start:\n services.get_event_manager().unregister_single_event(self, TestEvent.InteractionComplete)\n self._registered_for_panic_start = False\n\n def register_for_sim_active_lot_status_changed_callback(self):\n services.get_event_manager().register_single_event(self, TestEvent.SimActiveLotStatusChanged)\n\n def unregister_for_sim_active_lot_status_changed_callback(self):\n services.get_event_manager().unregister_single_event(self, TestEvent.SimActiveLotStatusChanged)\n\n def handle_event(self, sim_info, event, resolver):\n if event is TestEvent.InteractionComplete and issubclass(type(resolver.interaction), self.START_PANIC_INTERACTION):\n sim = sim_info.get_sim_instance()\n dialog = self.FIRE_REACTION_NOTIFICATION(sim, resolver=SingleSimResolver(sim_info))\n dialog.show_dialog()\n self.alert_all_sims()\n self.unregister_for_panic_callback()\n while True:\n for sim_on_lot in services.sim_info_manager().instanced_sims_on_active_lot_gen():\n while sim_on_lot is not sim:\n self._push_fire_reaction_affordance(sim_on_lot, resolver.interaction.target)\n if event is TestEvent.SimActiveLotStatusChanged and resolver.get_resolved_arg('on_active_lot'):\n sim = sim_info.get_sim_instance()\n if sim is not None:\n self._create_fire_situation_on_sim(sim)\n\n def _advance_situations_to_postfire(self):\n situation_manager = services.get_zone_situation_manager()\n if situation_manager is not None and self._situation_ids is not None:\n for situation_id in self._situation_ids.values():\n situation = situation_manager.get(situation_id)\n while situation is not None:\n situation.advance_to_post_fire()\n\n def _stop_fire_situations(self):\n situation_manager = services.get_zone_situation_manager()\n if situation_manager is not None and self._situation_ids is not None:\n for situation_id in self._situation_ids.values():\n situation_manager.destroy_situation_by_id(situation_id)\n\n def fire_interaction_test(self, affordance, context):\n if not InteractionCancelCompatibility.check_if_source_should_be_canceled(context):\n return TestResult.TRUE\n if self.fire_is_active and context.sim is not None:\n for buff_type in self.FIRE_PANIC_BUFFS:\n while context.sim.has_buff(buff_type):\n break\n return TestResult.TRUE\n if InteractionCancelCompatibility.can_cancel_interaction_for_reason(affordance, InteractionCancelReason.FIRE):\n return TestResult(False, '{} is not allowed because there is a fire object on the lot', affordance)\n return TestResult.TRUE\n\n def sim_is_on_fire(self, sim):\n for interaction in sim.get_all_running_and_queued_interactions():\n while interaction.affordance is self.FIRE_SIM_ON_FIRE_AFFORDANCE:\n return True\n return False\n\n def start(self):\n object_manager = services.object_manager()\n object_manager.register_callback(CallbackTypes.ON_OBJECT_REMOVE, self.remove_from_flammable_quadtree)\n\n def stop(self):\n self._fire_objects = None\n self._fire_spread_alarm = None\n self._burning_objects = None\n self._stop_fire_situations()\n self.deactivate_sprinkler_system()\n self.deactivate_fire_alarms()\n self._sprinkler_objects = None\n self._sprinkler_system_objects = None\n self._fire_objects_being_suppressed = None\n self._unsuppressible_fires = None\n self._sprinkler_alarm = None\n self._fire_alarm_alarm = None\n self._fire_alarm_objects = None\n self._activated_fire_alarms = None\n self._sprinkler_has_been_activated = False\n object_manager = services.object_manager()\n object_manager.unregister_callback(CallbackTypes.ON_OBJECT_REMOVE, self.remove_from_flammable_quadtree)\n self.unregister_for_panic_callback()\n\n def on_client_disconnect(self, client):\n services.get_persistence_service().unlock_save(self)\n\n def kill(self):\n for fire_object in list(self._fire_objects):\n fire_object.destroy(source=fire_object, cause='Killing all fire on lot')\n\n def activate_fire_alarms(self):\n object_manager = services.object_manager()\n self._fire_alarm_objects = set(object_manager.get_objects_of_type_gen(self.FIRE_ALARM_OBJECT_DEF))\n if not self._fire_alarm_objects:\n return\n time_span = date_and_time.create_time_span(minutes=self.FIRE_ALARM_CYCLE_TIME)\n repeating_time_span = date_and_time.create_time_span(minutes=self.FIRE_ALARM_CYCLE_TIME)\n self._fire_alarm_alarm = alarms.add_alarm(self, time_span, self._fire_alarm_callback, repeating=True, repeating_time_span=repeating_time_span)\n\n def deactivate_fire_alarms(self):\n if self._fire_alarm_alarm:\n alarms.cancel_alarm(self._fire_alarm_alarm)\n self._fire_alarm_alarm = None\n for fire_alarm in self._fire_alarm_objects:\n fire_alarm.set_state(self.FIRE_ALARM_DEACTIVATED_STATE.state, self.FIRE_ALARM_DEACTIVATED_STATE)\n self._fire_alarm_objects = set()\n self._activated_fire_alarms = set()\n\n def _fire_alarm_callback(self, handle):\n if not self.fire_is_active:\n alarms.cancel_alarm(handle)\n self._fire_alarm_alarm = None\n self.deactivate_fire_alarms()\n return\n self.alert_all_sims()\n deactivated_fire_alarms = self._fire_alarm_objects - self._activated_fire_alarms\n for deactivated_fire_alarm in deactivated_fire_alarms:\n alarm_position = deactivated_fire_alarm.position\n fires_in_range = self.query_quadtree_for_fire_object(sims4.math.Vector2(alarm_position.x, alarm_position.z), radius=self.FIRE_ALARM_ACTIVATION_RADIUS, level=deactivated_fire_alarm.location.level)\n while fires_in_range:\n deactivated_fire_alarm.set_state(self.FIRE_ALARM_ACTIVE_STATE.state, self.FIRE_ALARM_ACTIVE_STATE)\n self._activated_fire_alarms.add(deactivated_fire_alarm)\n\n def activate_sprinkler_system(self):\n object_manager = services.object_manager()\n self._sprinkler_system_objects.update(object_manager.get_objects_with_tag_gen(self.SPRINKLER_BOX_OBJECT_TAG))\n if not self._sprinkler_system_objects:\n return\n time_span = date_and_time.create_time_span(minutes=self.SPRINKLER_ACTIVATION_TIME)\n repeating_time_span = date_and_time.create_time_span(minutes=self.SPRINKLER_RUN_TIME)\n self._sprinkler_alarm = alarms.add_alarm(self, time_span, self._sprinkler_alarm_callback, repeating=True, repeating_time_span=repeating_time_span)\n\n def deactivate_sprinkler_system(self):\n self._sprinkler_has_been_activated = False\n for sprinkler_system_object in self._sprinkler_system_objects:\n sprinkler_system_object.set_state(self.FIRE_SPRINKLER_DEACTIVATED_STATE.state, self.FIRE_SPRINKLER_DEACTIVATED_STATE)\n if self._sprinkler_alarm:\n alarms.cancel_alarm(self._sprinkler_alarm)\n self._sprinkler_alarm = None\n for sprinkler in self._sprinkler_objects:\n sprinkler.destroy(source=sprinkler, cause='Destroying sprinklers.')\n self._sprinkler_system_objects.clear()\n self._sprinkler_objects = set()\n self._fire_objects_being_suppressed = weakref.WeakSet()\n self._unsuppressible_fires = weakref.WeakSet()\n\n def _sprinkler_alarm_callback(self, handle):\n if not self.fire_is_active:\n alarms.cancel_alarm(handle)\n self._sprinkler_alarm = None\n self.deactivate_sprinkler_system()\n return\n if not self._sprinkler_has_been_activated:\n self._sprinkler_has_been_activated = True\n for sprinkler_system_object in self._sprinkler_system_objects:\n sprinkler_system_object.set_state(self.FIRE_SPRINKLER_ACTIVE_STATE.state, self.FIRE_SPRINKLER_ACTIVE_STATE)\n new_fire_objects = set(self._fire_objects - self._fire_objects_being_suppressed - self._unsuppressible_fires)\n if new_fire_objects:\n for existing_sprinkler in self._sprinkler_objects:\n new_fire_objects = self.find_and_suppress_fires_under_sprinkler(existing_sprinkler, None, new_fire_objects)\n while new_fire_objects:\n initiating_fire = new_fire_objects.pop()\n new_sprinkler = self._spawn_sprinkler(initiating_fire)\n while new_sprinkler:\n new_fire_objects = self.find_and_suppress_fires_under_sprinkler(new_sprinkler, initiating_fire, new_fire_objects)\n continue\n if new_fire_objects:\n self._unsuppressible_fires = self._unsuppressible_fires | new_fire_objects\n\n def find_and_suppress_fires_under_sprinkler(self, sprinkler, initiating_fire, new_fire_objects):\n suppressed_fires = set()\n sprinkler_position = sims4.math.Vector2(sprinkler.position.x, sprinkler.position.z)\n sprinkler_level = sprinkler.location.level\n fires_under_sprinkler = self.query_quadtree_for_fire_object(sprinkler_position, radius=self.SPRINKLER_SUPRESSION_RADIUS, level=sprinkler_level)\n for fire in fires_under_sprinkler:\n while fire == initiating_fire or fire in new_fire_objects:\n self._suppress_fire(fire)\n suppressed_fires.add(fire)\n if suppressed_fires:\n new_fire_objects = new_fire_objects - suppressed_fires\n self._fire_objects_being_suppressed = self._fire_objects_being_suppressed | suppressed_fires\n sims_under_sprinkler = self._query_quadtree_for_sim(sprinkler_position, level=sprinkler_level, filter_type=placement.ItemType.SIM_POSITION, radius=self.SPRINKLER_SUPRESSION_RADIUS)\n if sims_under_sprinkler is not None:\n for (sim, _, _, _) in sims_under_sprinkler:\n self._extinguish_sim(sim)\n return new_fire_objects\n\n def _suppress_fire(self, fire, immediate=False):\n tracker = fire.get_tracker(self.FIRE_STRENGTH_COMMODITY)\n if tracker is not None and tracker.has_statistic(self.FIRE_STRENGTH_COMMODITY):\n stat = tracker.get_statistic(self.FIRE_STRENGTH_COMMODITY)\n rate = self.IMMEDIATE_SUPPRESSION_RATE if immediate else self.FIRE_STRENGTH_COMMODITY_SUPRESSION_DECAY\n stat.add_decay_rate_modifier(rate)\n tracker = fire.get_tracker(self.FIRE_BEEN_EXTINGUISHED_COMMODITY)\n if tracker is not None:\n tracker.set_max(self.FIRE_BEEN_EXTINGUISHED_COMMODITY)\n\n def _spawn_sprinkler(self, fire):\n zone_id = sims4.zone_utils.get_zone_id()\n new_level = fire.location.level + 1\n if not build_buy.has_floor_at_location(zone_id, fire.position, new_level):\n return\n sprinkler_object = system.create_object(self.SPRINKLER_HEAD_OBJECT_DEF)\n sprinkler_location = fire.location.duplicate()\n new_translation = sims4.math.Vector3(*fire.position)\n height = terrain.get_lot_level_height(sprinkler_location.transform.translation.x, sprinkler_location.transform.translation.z, new_level, zone_id)\n new_translation.y = height - self.SPRINKLER_HEAD_CEILING_OFFSET\n sprinkler_location.transform = sims4.math.Transform(new_translation, sprinkler_location.transform.orientation)\n sprinkler_object.set_location(location=sprinkler_location)\n self._sprinkler_objects.add(sprinkler_object)\n sprinkler_object.vfx = FireService.SPRINKLER_EFFECT(sprinkler_object)\n sprinkler_object.vfx.start()\n return sprinkler_object\n\n def _create_or_replace_scorch_cleanup_alarm(self):\n if self._scorch_cleanup_alarm:\n alarms.cancel_alarm(self._scorch_cleanup_alarm)\n self._scorch_cleanup_alarm = None\n time_span_until = services.game_clock_service().precise_time_until_hour_of_day(self.SCORCH_TERRAIN_CLEANUP_HOUR)\n self._scorch_cleanup_alarm = alarms.add_alarm(self, time_span_until, self._cleanup_scorch_marks_on_terrain, repeating=False)\n\n def _cleanup_scorch_marks_on_terrain(self, handle):\n if self.fire_is_active:\n self._create_or_replace_scorch_cleanup_alarm()\n return\n zone_id = sims4.zone_utils.get_zone_id()\n list_result = build_buy.list_floor_features(zone_id, build_buy.FloorFeatureType.BURNT)\n if list_result:\n build_buy.begin_update_floor_features(zone_id, build_buy.FloorFeatureType.BURNT)\n for tile in list_result:\n while build_buy.is_location_natural_ground(zone_id, tile[0], tile[1]):\n build_buy.set_floor_feature(zone_id, build_buy.FloorFeatureType.BURNT, tile[0], tile[1], 0)\n build_buy.end_update_floor_features(zone_id, build_buy.FloorFeatureType.BURNT)\n\n def find_cleanable_scorch_mark_locations_within_radius(self, location, level, radius):\n found_scorch_marks = set()\n zone_id = sims4.zone_utils.get_zone_id()\n radius_squared = radius*radius\n all_scorch_marks = build_buy.list_floor_features(zone_id, build_buy.FloorFeatureType.BURNT)\n for scorch_mark in all_scorch_marks:\n scorch_level = scorch_mark[1]\n while scorch_level == level:\n if not build_buy.is_location_natural_ground(zone_id, scorch_mark[0], scorch_level):\n scorch_location = scorch_mark[0]\n if (location - scorch_location).magnitude_squared() <= radius_squared:\n found_scorch_marks.add(scorch_location)\n return found_scorch_marks\n\n def increment_insurance_claim(self, value, burnt_object):\n if household_manager.HouseholdManager.get_active_sim_home_zone_id() == burnt_object.zone_id:\n if not self.fire_is_active:\n logger.warn(\"Trying to make an insurance claim when there isn't an active fire.\", owner='rfleig')\n\n def _award_insurance_money(self):\n client = services.client_manager().get_first_client()\n active_sim = client.active_sim\n if self._insurance_value > 0 and active_sim is not None:\n services.active_household().funds.add(self._insurance_value, Consts_pb2.TELEMETRY_INTERACTION_COST, None)\n dialog = self.FIRE_INSURANCE_CLAIM_NOTIFICATION(active_sim, SingleSimResolver(active_sim))\n dialog.show_dialog(additional_tokens=(self._insurance_value,))\n self._insurance_value = 0\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/services/fire_service.py","file_name":"fire_service.py","file_ext":"py","file_size_in_byte":47979,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"7222318032","text":"from logging import Logger\nfrom flask import redirect\nfrom flask_restful import request\nfrom common import http_responses, exceptions\nfrom common.utils import parse_json, to_json_string\nfrom ..registration.models import SessionRegistrationDetailsModel\nfrom . import Controller\nfrom .. import config\nfrom ..google_oauth import GoogleOauth\nfrom ..session import SessionHandler\nfrom ..user_info import UserInfoHandler\nfrom ..user import UserHandler\nfrom ..registration import SessionRegistrationHandler\n\n\nclass GoogleSignInController(Controller):\n\n AUTH_METHOD: str = 'GoogleSignIn'\n _google_oauth: GoogleOauth\n _session_handler: SessionHandler\n _userinfo_handler: UserInfoHandler\n _user_handler: UserHandler\n _session_registration_handler: SessionRegistrationHandler\n\n def __init__(self,\n logger: Logger,\n google_oauth: GoogleOauth,\n session_handler: SessionHandler,\n user_handler: UserHandler,\n userinfo_handler: UserInfoHandler,\n session_registration_handler: SessionRegistrationHandler):\n super().__init__(logger)\n self._google_oauth = google_oauth\n self._session_handler = session_handler\n self._userinfo_handler = userinfo_handler\n self._user_handler = user_handler\n self._session_registration_handler = session_registration_handler\n\n def _validate_auth_request(self, args: dict):\n code = args.get(\"code\", None)\n if code is None:\n raise exceptions.MissingParamError(\"code is missing\")\n scopes = args.get(\"scope\", None)\n if scopes is None:\n raise exceptions.MissingParamError(\"scopes is missing\")\n state = args.get(\"state\", None)\n if state is None:\n raise exceptions.MissingParamError(\"state is missing\")\n\n def _validate_with_registered_details(self, data: dict, registered_details: SessionRegistrationDetailsModel):\n redirect_uri = data.get('redirect_uri')\n if not redirect_uri in registered_details.redirect_uris:\n raise exceptions.IncorrectValue(\n f'Redirect uri {redirect_uri} is not registered for given client_id.')\n\n def _auth(self, args: dict):\n self._validate_auth_request(args)\n\n state: dict = parse_json(args.get(\"state\"))\n client_id = state.get('client_id')\n\n registered_details = self._session_registration_handler.get(client_id)\n if registered_details is None:\n raise exceptions.IncorrectValue(f'Client id {client_id} not found.')\n\n self._validate_with_registered_details(state, registered_details)\n\n code = args.get(\"code\")\n scopes = args.get(\"scope\")\n credentials = self._google_oauth.get_token_using_authorization_code(code, scopes, config.google_token_signin.RedirectUri)\n\n username = credentials['username']\n\n user = self._user_handler.get_or_create(username, self.AUTH_METHOD)\n self._userinfo_handler.fetch_and_store_from_google(user.object_id, credentials['access_token'])\n\n session = self._session_handler.create(username, user.object_id, client_id, [\"GoogleSignIn\",], registered_details.resource, config.common.SessionExpiry)\n\n signed_session = self._session_handler.sign(session)\n redirect_uri = state.get('redirect_uri')\n user_state_param = state.get('state')\n return redirect(f'{redirect_uri}?state={user_state_param}&session={signed_session}', code=302)\n\n\n def get(self, type: str = \"\"):\n try:\n args = request.args\n if \"auth\".__eq__(type):\n return self._auth(args)\n elif \"\".__eq__(type) or \"/\".__eq__(type):\n login_url = config.google_token_signin.LoginUrl\n login_url = f'{login_url}&state={to_json_string(args)}'\n return redirect(login_url, code=302)\n else:\n return http_responses.NotFoundResponse()\n except exceptions.LoginFailureError as e:\n self._logger.exception(e)\n return http_responses.UnauthorizedResponse(str(e))\n except (exceptions.MissingParamError, exceptions.IncorrectValue) as e:\n self._logger.exception(e)\n return http_responses.BadRequestResponse(str(e))\n except Exception as e:\n self._logger.exception(e)\n return http_responses.InternalServerErrorResponse()\n","repo_name":"parveenchahal/authonline.net","sub_path":"backend/src/session/src/controllers/_google_token_signin.py","file_name":"_google_token_signin.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16677902127","text":"\"\"\"Console script for mlops_generator.\"\"\"\nimport sys\nfrom os import getcwd\nimport click\nfrom click import option, command\nfrom click.core import Option, Command\nfrom pathlib import Path\n\nfrom mlops_generator.interface import Interface\n\nimport logging\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(logging.INFO)\n\n\nclass InitCommand(Command):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.params.extend([self.setup, self.tests, self.dockerfile, self.deploy,])\n\n @property\n def tests(self):\n \"\"\"Initialize with tests\"\"\"\n return Option(\n (\"--tests\",),\n type=bool,\n help=\"Add pytest suite\",\n is_flag=True,\n default=False,\n )\n\n @property\n def setup(self):\n \"\"\"Initialize with setup.py\"\"\"\n return Option(\n (\"--setup\",),\n type=bool,\n help=\"Add setup\",\n is_flag=True,\n default=False,\n )\n\n @property\n def dockerfile(self):\n \"\"\"Initialice dockerfile\"\"\"\n return Option(\n (\"--docker\",),\n type=bool,\n help=\"Add docker\",\n is_flag=True,\n default=False,\n )\n\n @property\n def deploy(self):\n \"\"\"Initialice pipeline CI\"\"\"\n return Option(\n (\"--deploy\",),\n type=bool,\n help=\"Add pipeline CI\",\n is_flag=True,\n default=False,\n )\n\nclass ComponentCommmand(Command):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.params.extend([\n self.project_dir,\n self.pandas,\n self.sklearn,\n self.kubeflow_component,\n self.kubeflow_pipeline,\n self.jupyter_notebook,\n self.artifacts\n ])\n\n @property\n def pandas(self):\n \"\"\"Pandas extension\"\"\"\n return Option(\n (\"--pandas\",),\n type=bool,\n help=\"Add pandas extension\",\n is_flag=True,\n default=False,\n )\n\n @property\n def project_dir(self):\n \"\"\"Project directory\"\"\"\n return Option(\n (\"--project-dir\",),\n type=str,\n help=\"Project directory, by default is current working directory\",\n default=\"\",\n ) \n\n @property\n def sklearn(self):\n \"\"\"Sklearn base class\"\"\"\n return Option(\n (\"--sklearn\",),\n type=bool,\n help=\"Add sklearn base class\",\n is_flag=True,\n default=False,\n )\n\n @property\n def kubeflow_component(self):\n \"\"\"Kubeflow component container op class\"\"\"\n return Option(\n (\"--kubeflow-component\",),\n type=bool,\n help=\"Add kubeflow component container op\",\n is_flag=True,\n default=False,\n )\n\n @property\n def kubeflow_pipeline(self):\n \"\"\"Pipeline implementation\"\"\"\n return Option(\n (\"--kubeflow-pipeline\",),\n type=bool,\n help=\"Add kubeflow-pipeline\",\n is_flag=True,\n default=False,\n )\n\n @property\n def jupyter_notebook(self):\n \"\"\"Jupyter notebook document\"\"\"\n return Option(\n (\"--jupyter-notebook\",),\n type=bool,\n help=\"Add kubeflow-pipeline\",\n is_flag=True,\n default=False,\n )\n\n @property\n def artifacts(self):\n \"\"\"Temporal data and visualization artifacts\"\"\"\n return Option(\n (\"--artifacts\",),\n type=bool,\n help=\"Add temporal data and visualization artifacts\",\n is_flag=True,\n default=False,\n )\n\n@click.group()\ndef main():\n \"\"\"Commmand Line Interface for MLOps lifecycle.\"\"\"\n pass\n\n\n@main.command(\"init\", help=\"Initialize mlops project\", cls=InitCommand)\ndef init(*args, **kwargs):\n \"\"\"\n Initialize a project in the current working directory.\n\n Args:\n project_template ([type]): [description]\n \"\"\"\n try:\n cwd = Path().cwd()\n Interface().initialize(cwd, *args, **kwargs)\n click.echo(\"Initialize mlops project\")\n except Exception as error:\n logger.error(error)\n sys.exit(0)\n\n\n@main.command(\"add\", help=\"Add configuration to project\", cls=InitCommand)\n@click.option(\"--project-dir\", help=\"Give project name if you want\", default=\"\")\ndef add(project_dir, *args, **kwargs):\n \"\"\"Add a configuration to the current project.\"\"\"\n try:\n cwd = Path().cwd() / project_dir\n Interface().add(cwd, *args, **kwargs)\n except Exception as error:\n logger.exception(error)\n sys.exit(0)\n\n\n@main.command(\n \"component\",\n help=\"Generate a component\",\n context_settings=dict(ignore_unknown_options=True),\n cls=ComponentCommmand\n)\ndef component(project_dir, *args, **kwargs):\n \"\"\"CLI for generate MLOps archetypes.\"\"\"\n try:\n cwd = Path().cwd() / project_dir\n Interface().component(cwd, *args, **kwargs)\n except Exception as error:\n logger.exception(error)\n sys.exit(0)\n\n\n# @main.command(\n# \"pipeline\",\n# help=\"Generate a kubeflow pipeline\",\n# context_settings=dict(ignore_unknown_options=True),\n# )\n# def pipeline():\n# \"\"\"Generate a pipeline.\"\"\"\n# pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"averagua/mlops_generator","sub_path":"mlops_generator/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13421269696","text":"import pandas as pd\n\nfrom .normalization import normalization\n\ndef extractLabels(images, labels, split):\n \"\"\"\n 分别提取images列表中的像素, 和labels中的标签, 并将其归一化\n\n # 参数:\n images: 含有pixels的列表\n labels: 含有ferplus的八种情绪的标签的列表\n split: 字符串,用来区分提取的数据集是用来训练的还是测试的\n\n\n # return:\n 返回的是一个第一行为像素,第二行后到最后一行为八个标签的csv列表\n \"\"\"\n # 去除表格中目标数据以外的数据\n if split != 'All':\n images = images[images['Usage'] == split]\n labels = labels[labels['Usage'] == split]\n\n # 提取出像素和标签\n new_fer = pd.DataFrame()\n new_fer['pixels'] = images['pixels']\n new_fer[['neutral', 'happiness', 'surprise', 'sadness',\n 'anger', 'disgust', 'fear', 'contempt']] = labels[['neutral', 'happiness', 'surprise', 'sadness',\n 'anger', 'disgust', 'fear', 'contempt']]\n\n # 去除new_fer中无效行(即8个标签都是0)\n new_fer = new_fer[new_fer[['neutral', 'happiness', 'surprise', 'sadness',\n 'anger', 'disgust', 'fear', 'contempt']].sum(axis=1) != 0]\n \n new_fer = new_fer.apply(normalization, axis=1)\n \n return new_fer","repo_name":"monikaBeizi/CNN-face-emotion-recognition","sub_path":"face_emotion_recongnition/utils/extractLabels.py","file_name":"extractLabels.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8938121147","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 9 16:03:17 2019\r\n\r\n@author: mwdocter\r\n\r\nuse of a defined function\r\n\"\"\"\r\n\r\ndef f(x):\r\n x=x+2\r\n return x\r\n\r\na=4\r\nprint(a)\r\na=f(a)\r\nprint(a)\r\nprint(f(a))","repo_name":"mwdocter/CMJ-lab","sub_path":"tests_in_python/local_global.py","file_name":"local_global.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14007625140","text":"'''\nWrite your solution for 6. PIAT: Check Setup here.\n\nAuthor: Flynn Costello\nSID: 530488477\nUnikey: fcos0917\n'''\n\nimport shutil\n# shutil.copy(src, dest) - copying src to dest (files)\n# for contents in os.walk(\"/home/\"): - loops through contents of the root directory home\n# print contents\n# Challenge: Spot the root folder and its files?\n# ('/home/', ['demo', 'new'], ['main.py'])\n# Challenge: Spot the root folder and its files?\n# ('/home/demo', [], ['my_file.txt'])\n# Challenge: Spot the root folder and its files?\n# ('/home/new', [], ['tip.txt'])\n \nimport os\nimport time\nimport datetime\nimport sys\n\n\"\"\"\n (os.path.isdir(path) - returns true if path is existing directory)\n\nMaster Directory:\n- Contains files and sample(s) folder as well as config.txt file\n\nDirectories (in config.txt):\n- /home/user/A1/ - this is valid\n- Needs to start and end with /\n- Can't contain a .\n\nFiles (in config.txt):\n- Must always start with ./\n- Row before file must be a valid directory\n\nE.g.\nVALID config.txt FILE:\n/home/files/\n./animals.txt \n./history.txt \n./list.txt \n./names.txt\n/home/samples/\n./count_me.txt\n\n\nThis is what config.txt will look like:\n/home/files/\n./animals.txt \n./history.txt \n./list.txt \n./names.txt\n/home/samples/\n./count_me.txt\n\"\"\"\n\ndef point_in_name(string):\n point_in = False\n i = 0\n while i < len(string):\n if string[i] == \".\":\n point_in = True\n i += 1\n return point_in\n\ndef is_folder_file_or_neither(string):\n \"\"\"\n Function determines if a given string is a folder, file or neither (i.e., invalid)\n \"\"\"\n is_folder = False\n is_file = False\n is_neither = False\n dot_in_string = point_in_name(string)\n\n if string[0] == \"/\" and string[-1] == \"/\" and not(dot_in_string):\n is_folder = True\n elif string[:2] == \"./\" and string[-4:] == \".txt\":\n is_file = True\n else:\n is_neither = True\n return (is_folder, is_file, is_neither)\n\n\ndef extract_directory_and_file_paths(master):\n absolute_config_path = f\"{master}config.txt\"\n try:\n with open(absolute_config_path, \"r\") as config_file:\n config_lines = config_file.readlines()\n except FileNotFoundError:\n print(\"Invalid master directory.\")\n sys.exit()\n\n config_directories = []\n config_files = [] # 2 dimensional array with each element being an array of the\n # files that are located within the corresponding element directory in directories folder\n filenames = [] # just file names, e.e.,g history.txt\n \"\"\"\n E.g.,\n config_directories = ['/home/files/', '/home/samples/']\n config_files = [['/home/files/animals.txt', '/home/files/history.txt', '/home/files/list.txt', '/home/files/na\n mes.txt'], ['/home/samples/count_me.txt']]\n \"\"\"\n i = 0\n while i < len(config_lines):\n cur_line = config_lines[i].strip() # .strip() is required to get rid of \\n character at end of every line read from config.txt\n if cur_line != '':\n is_folder, is_file, is_neither = is_folder_file_or_neither(cur_line)\n if is_folder:\n config_directories.append(cur_line)\n\n if is_file:\n file_name = cur_line[2:]\n file_absolute_path = f\"{config_directories[-1]}{cur_line[2:]}\"\n if len(config_files) < len(config_directories):\n config_files.append([file_absolute_path])\n filenames.append([file_name])\n else:\n config_files[-1].append(file_absolute_path)\n filenames[-1].append(file_name)\n\n i += 1\n return (config_directories, config_files, filenames)\n\n\n\ndef installation(master: str, timestamp: str) -> list:\n '''\n Installation copies all required master files into the addresses listed by\n the config file.\n Parameters:\n master: str, a string representing the absolute path to the master directory.\n timestamp: str, a string representing the time to insert into the output.\n Returns:\n output: list, a list of strings generated from the installation process.\n \n - If file is in config file but not found in master directory program displays message - \"Original {file_name} is not found.\"\n - If installation is unsuccessful program terminates and displays message \"Installation error...\"\n 1) get paths to directories from config file\n 2) create new directories\n 3) get paths for all files in master directory\n 4) create new files\n 5) copy files from master directory to new files\n '''\n output_text = []\n \"\"\"\n Within the folder, \"files\" there should always be four text files e.g. animals.txt, history.txt, list.txt and names.txt\n Within the folder, \"samples\" there should be one text file e.g. count_me.txt\n \n Errors:\n - If a file is listed in the configuration file but not found in the master directory,\n the program should display the message \"Original {file_name} is not found\"\n - os.path.isdir(path) - returns true if path is existing directory\n\n - If the program is unable to complete installation, the program should terminate\n and display the message \"Installation error...\"\n \"\"\"\n output_text.append(f\"{timestamp} Start installation process.\")\n\n ### Step 1: ###\n output_text.append(f\"{timestamp} Extracting paths in configuration file.\")\n\n config_directories, config_files, filenames = extract_directory_and_file_paths(master)\n\n total_directories = len(config_directories)\n output_text.append(f\"Total directories to create: {total_directories}\")\n\n \n ### Step 2/3 - Check if directories exist - if they do, skip, if they don't create new directory at specified filepath ###\n output_text.append(f\"{timestamp} Create new directories.\") # in /home\n \n # Checking if directory exists then either say it does or create new directory\n \n index = 0\n while index < len(config_directories):\n cur_directory = config_directories[index]\n if os.path.exists(cur_directory):\n output_text.append(f\"{cur_directory} exists. Skip directory creation.\")\n else: # Directory doesn't exist\n os.mkdir(cur_directory)\n output_text.append(f\"{cur_directory} is created successfully.\")\n index += 1\n\n \n\n # Step 4 - Needs to find files in the same order that they are listed in the config.txt file\n output_text.append(f\"{timestamp} Extracting paths of all files in {master}.\")\n all_paths_in_master = sorted(os.listdir(master)) # Lists all paths in master\n dir_num = 0\n while dir_num < len(all_paths_in_master):\n cur_dir = f\"{master}{all_paths_in_master[dir_num]}/\"\n #print(cur_dir)\n if os.path.isdir(cur_dir):\n all_paths_in_cur_directory = sorted(os.listdir(cur_dir))\n file_num = 0\n #print(all_paths_in_cur_directory)\n while file_num < len(all_paths_in_cur_directory):\n cur_file = f\"{cur_dir}{all_paths_in_cur_directory[file_num]}\"\n #print(cur_file)\n #print(cur_file)\n output_text.append(f\"Found: {cur_file}\")\n file_num += 1\n\n dir_num += 1\n \n \n # Step 5 - Creating new files in /home\n output_text.append(f\"{timestamp} Create new files.\")\n \n index1 = 0\n while index1 < len(config_files):\n index2 = 0\n while index2 < len(config_files[index1]):\n cur_file_to_be_added = config_files[index1][index2].strip() # Absolute path to file in terms of /home, not /master \n create_new_file = open(cur_file_to_be_added, \"w\")\n output_text.append(f\"Creating file: {cur_file_to_be_added}\")\n index2 += 1\n index1 += 1\n\n # Step 6 - Search for file in master, ensure it exists, then copy to /home equivelant place in the new folder and file you created\n\n output_text.append(f\"{timestamp} Copying files.\")\n i = 0\n while i < len(config_directories):\n j = 0\n while j < len(config_files[i]):\n try:\n original_path = f\"{master}{config_files[i][j][6:]}\"\n destination_path = f\"{config_files[i][j]}\"\n #print(original_path, destination_path)\n output_text.append(f\"Locating: {filenames[i][j]}\")\n shutil.copy(original_path, destination_path)\n output_text.append(f\"Original path: {original_path}\")\n output_text.append(f\"Destination path: {destination_path}\")\n except Exception:\n output_text.append(f\"Original path: {original_path} is not found.\")\n output_text.append(\"Installation error...\")\n return output_text\n j += 1\n i += 1\n output_text.append(f\"{timestamp} Installation complete.\")\n\n return output_text\n \n\n\n\n\ndef logging(logs: list, date: str, time: str) -> None:\n '''\n Logging function uses a list of strings to write previous output into a\n log file.\n Parameters:\n logs: list, output from verification/installation in the form of list of \n strings to write to logging file.\n date: str, a string representing the date to generate the necessary \n directory date must be in the format YYYY-MM-DD as seen in \n the specs (ex: 2023-Mar-03 for March 3rd, 2023).\n time: str, a string representing the time to generate the log file\n time must be in the format HH_MM_SS as seen in the specs\n (ex: 14_31_27 for 14:31:27).\n '''\n \"\"\"\n If this command is present with the installation or verification process, \n in addition to displaying the messages to standard output, the program logs \n the details into a text file and stores this text file in the folder path \n /home/logs/{YYYY-MM-DD}. The text file must be created on each run, labelled \n with the current time of execution in the format HH_MM_SS. The contents of \n the file is the same as the text output except that the date and time are omitted. \n\n If the program is executed on 3rd March 2023 at 14:31:27, the resulting log file \n should be placed in this folder path: /home/logs/2023-Mar-03 and the name of the \n file should be 14_31_27.txt. This is an example contents of the file when the program \n is executed with the given command line arguments:\n \n\n - Logs details of either installation or verification in a text file and stores this\n at the folder path /home/logs/YYYY-MM-DD\n - Contents of text file as the same as the output from the program except the contents of the\n data and time are omitted - therefore only info text\n - Need to create new folders if its a new day/month/year and a new file every run with name\n hour_min_second.txt\n \"\"\"\n\n # new_folder = /home/logs/date\n # date and time are ommited\n folder_name = f\"/home/logs/{date}/\"\n file_name = f\"{time}.txt\"\n file_name_absolute_path = f\"{folder_name}{file_name}\"\n new_logs = logs\n #new_logs = remove_date_and_time(logs)\n # Creating/Checking is logs folder exists\n if not(os.path.isdir(\"logs\")):\n os.mkdir(\"logs\")\n\n # Checking if folder exists\n if not(os.path.exists(folder_name)): # Folder doesn't already exists\n os.mkdir(folder_name)\n\n with open(file_name_absolute_path, \"w\") as new_file:\n i = 0\n while i < len(new_logs):\n new_file.write(f\"{new_logs[i]}\\n\")\n i += 1\n\n\ndef verification(master: str, timestamp: str) -> list:\n '''\n Verification makes sure all files and directories listed in the config file\n are present and match the contents of the master files. \n Parameters:\n master: str, a string representing the absolute path to the master directory.\n timestamp: str, a string representing the time to insert into the output.\n Returns:\n output: list, a list of strings generated from the verification process.\n '''\n output_text = []\n output_text.append(f\"{timestamp} Start verification process.\")\n output_text.append(f\"{timestamp} Extracting paths in configuration file.\")\n\n\n config_directories, config_files, filenames = extract_directory_and_file_paths(master)\n i = 0\n while i < len(config_files):\n config_files[i] = sorted(config_files[i])\n filenames[i] = sorted(filenames[i])\n i += 1\n\n total_directories = len(config_directories)\n output_text.append(f\"Total directories to check: {total_directories}\")\n\n output_text.append(f\"{timestamp} Checking if directories exists.\")\n\n\n # Checking if directories exist in /home\n i = 0\n while i < len(config_directories):\n cur_dir = config_directories[i]\n if os.path.exists(cur_dir):\n output_text.append(f\"{cur_dir} is found!\")\n else:\n output.append(f\"{cur_dir} NOT found!\")\n output.append(f\"Abnormalities detected...\")\n return output_text\n i += 1\n \n # Extracting files in master directory\n files_to_check = []\n output_text.append(f\"{timestamp} Extracting files in configuration file.\")\n total_files = 0\n dir_num = 0\n #print(config_files)\n while dir_num < len(config_files):\n file_num = 0\n while file_num < len(config_files[dir_num]):\n cur_file = config_files[dir_num][file_num]\n #print(cur_file)\n output_text.append(f\"File to check: {cur_file}\")\n files_to_check.append(cur_file)\n file_num += 1\n total_files += 1\n dir_num += 1\n output_text.append(f\"Total files to check: {total_files}\")\n \n # Checking if files exist\n output_text.append(f\"{timestamp} Checking if files exists.\")\n i = 0\n while i < len(files_to_check):\n if os.path.exists(files_to_check[i]):\n output_text.append(f\"{files_to_check[i]} found!\")\n else:\n output.append(f\"{files_to_check[i]} NOT found!\")\n output.append(f\"Abnormalities detected...\")\n return output_text\n i += 1\n \n # Checking contents of files with master copy\n output_text.append(f\"{timestamp} Check contents with master copy.\")\n count1 = 0\n #print(\"XXX\", config_files)\n while count1 < len(config_files):\n count2 = 0\n while count2 < len(config_files[count1]):\n # This is now looking at each specific file\n home_file = config_files[count1][count2] # /home/files/history.txt\n master_copy_file = f\"{master}{home_file[6:]}\" # /home/master/files/history.txt\n try:\n with open(home_file, \"r\") as home:\n home_lines = home.readlines()\n with open(master_copy_file, \"r\") as master_copy:\n master_copy_lines = master_copy.readlines()\n i = 0\n j = 0\n abnormalies_detected = False\n while i < len(master_copy_lines):\n try:\n if master_copy_lines[i] == home_lines[i]:\n i += 1\n else:\n while j <= i:\n master_error = master_copy_lines[j].strip()\n home_error = home_lines[j].strip()\n output_text.append(f\"File name: {home_file}, {home_error}, {master_error}\")\n abnormalies_detected = True\n j += 1\n i += 1\n except IndexError:\n output_text.append(\"Abnormalities detected...\")\n return output_text\n\n if abnormalies_detected:\n output_text.append(\"Abnormalities detected...\")\n return output_text\n \n output_text.append(f\"{home_file} is same as {master_copy_file}: True\") \n count2 += 1\n except FileNotFoundError:\n output_text.append(\"Abnormalities detected...\")\n return output_text\n count1 += 1\n output_text.append(f\"{timestamp} Verification complete.\")\n return output_text\n\n\n\ndef test_flags(flags):\n \"\"\"\n if flag is not valid (i.e., not in flags list) or doesn't start with a -\n program termintes, print \"Invalid flag. [Source of problem]\"\n \"\"\"\n ### Flag Testing ### - If any are true the program terminates\n if flags == None: # Flags aren't entered, rather left blank (this is still valid input)\n return (None, True) \n\n # 1) Flag must start with a '-'\n if flags[0] != '-':\n #raise Exception:\n #print(\"Invalid flag. Flag must start with '-'.\")\n return (\"Invalid flag. Flag must start with '-'.\", False)\n \n # 2) Log is by itself\n if flags == \"-l\":\n return (\"Invalid flag. Log can only run with install or verify.\", False)\n\n # 3) Includes both verify and install, can only be one or other\n if flags == \"-iv\" or flags == \"-vi\": \n return (\"Invalid flag. Choose verify or install process not both.\", False)\n \n # 4) Each character must be unique\n characters = []\n characters_all_unique = True\n j = 0\n while j < len(flags):\n k = 0\n while k < len(characters):\n if flags[j] == characters[k]:\n characters_all_unique = False\n k += 1\n characters.append(flags[j])\n j += 1\n\n if not(characters_all_unique):\n return(\"Invalid flag. Each character must be unique.\", False)\n\n # 5) Character must be a combo of v, i, l\n index = 1 # Skips the '-'\n while index < len(flags):\n if flags[index] != 'v' and flags[index] != 'i' and flags[index] != 'l':\n return(\"Invalid flag. Character must be a combination of 'v' or 'i' and 'l'.\", False)\n index += 1\n \n return (None, True) # Flags are valid\n\n\n\ndef print_output_array(output_array):\n i = 0\n while i < len(output_array):\n print(output_array[i])\n i += 1\n\n\n\ndef main(master: str, flags: str, timestamp: str):\n '''\n Ideally, all your print statements would be in this function. However, this is\n not a requirement --> Need to return strings from other functions and print them here\n Parameters:\n master: str, a string representing the absolute path to the master directory.\n flags: str, a string representing the specified flags, if no flag is given\n through the command line, flags will be an empty string.\n timestamp: str, a string representing the time to insert into the output.\n in the format: DD MMM YYYY HH:MM:DD , ex: 10 Apr 2023 12:44:17\n '''\n \"\"\"\n - Includes only text files\nNeeds:\n1) A path to the master folder containing original versions of files\n - Start with / and end with /\n - If invalid directory is passed, message = \"Invalid master directory.\"\n2) (Optional) Flag in format -[char] where char can be one of two characters\n - install | verify and log\n - Errors: \"Invalid flag. [Source of problem]\"\n- If more than 2 command line arguments are provided it will use the first two\nand disregard the rest\n- If not enough are provided at any stage the program wil output \"Insufficient arguments.\"\n (os.path.isdir(path) - returns true if path is existing directory)\n\nMaster Directory:\n- Contains files and sample(s) folder as well as config.txt file\n\"\"\"\n cur_date = timestamp[:2]\n month = timestamp[3:6]\n year = timestamp[7:11]\n exact_time = timestamp[12:]\n\n # YYYY-MM-DD\n date = f\"{year}-{month}-{cur_date}\"\n # 14_31_27\n time = f\"{exact_time.replace(':', '_')}\"\n # 03 Mar 2023 00:34:34\n formatted_timestamp = timestamp\n\n if not(os.path.isdir(master)):\n print(\"Invalid master directory.\", file=sys.stderr)\n sys.exit()\n\n ### TESTING MASTER PATH ###\n # Doesn't start and end with / / - \"Invalid master directory\"\n if master[0] != \"/\" or master[-1] != \"/\":\n print(\"Invalid master directory\", file=sys.stderr)\n sys.exit()\n\n\n ### TESTING FLAGS ###\n flag_message, flag_valid = test_flags(flags)\n if not(flag_valid):\n print(flag_message, file=sys.stderr)\n sys.exit() # Flags aren't valid therefore program terminates\n\n ### CALLING FUNCTIONS BASED ON FLAGS ###\n #print(flags)\n ### Flag is None as no flag was inputted by user in the command line ###\n if flags == None or flags == '-il' or flags == '-li': # Automatically runs Installation and Logs process (il)\n #print(\"AAA\")\n installation_output_array = installation(master, formatted_timestamp)\n logging(installation_output_array, date, time)\n print_output_array(installation_output_array)\n\n elif flags == '-i':\n #print(\"BBB\")\n installation_output_array = installation(master, formatted_timestamp)\n print_output_array(installation_output_array)\n \n elif flags == '-v':\n #print(\"CCC\")\n verification_output_array = verification(master, formatted_timestamp)\n #print(\"XXX\")\n print_output_array(verification_output_array)\n\n elif flags == '-vl' or flags == '-lv':\n #print(\"DDD\")\n verification_output_array = verification(master, formatted_timestamp)\n logging(verification_output_array, date, time)\n print_output_array(verification_output_array)\n \n else:\n return\n\n\n\nif __name__ == \"__main__\":\n # you will need to pass in some arguments here\n # we will leave this empty for you to handle the implementation\n\n # python3 setup.py /home/master/ -i\n unformatted_timestamp = time.asctime() # string\n day_of_week = unformatted_timestamp[:3]\n month = unformatted_timestamp[4:7]\n day_num = unformatted_timestamp[9:10]\n if len(day_num) == 1:\n day_num = f\"0{day_num}\"\n cur_time = unformatted_timestamp[11:19]\n year = unformatted_timestamp[20:]\n timestamp = f\"{day_num} {month} {year} {cur_time}\"\n\n try:\n master = sys.argv[1]\n try:\n flags = sys.argv[2]\n main(master, flags, timestamp)\n \n except IndexError: # As flags are optional, if they are not given we provide the main function with a flag of None\n main(master, None, timestamp)\n \n except IndexError: # No arguments are given (only 1 is allowed as flags is optional but master is not)\n print(\"Insufficient arguments.\")\n\n\n\n\"\"\"\nUser passes path to master directory\n\nWithin the master directory is either files or folders which hold more files\n\nWe must install or verify these files/folders (v or i) whilst also possibly conducting a log (l)\n\nOnce this is done the setup.py is complete\n\n\n\nDirectories (in config.txt):\n- /home/user/A1/ - this is valid\n- Needs to start and end with /\n- Can't contain a .\n\nFiles (in config.txt):\n- Must always start with ./\n- Row before file must be a valid directory\n\nE.g.\nVALID config.txt FILE:\n/home/files/\n./animals.txt \n./history.txt \n./list.txt \n./names.txt\n/home/samples/\n./count_me.txt\n\nFlag Options:\n- i - Installation process, v - verification process, l - log process\n- l cannot happen on its own, it must be conbined with either i or v\n Combinations:\n i\n v\n il\n li\n vl\n lv\n\nPossible Issues:\n- Flag needs to start with '-'\n- Log cannot run by itself\n- Cannot choose both verify and install, one or the other\n- Characters cannot repeat\n- Cannot include any character that isn't 'v', 'i', or 'l'\n\n \"\"\"\n\n\n\n\n","repo_name":"flynncostello/Projects","sub_path":"V2 Mouse Hunter Cheese Game/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":23573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23544285831","text":"import sys\nimport numpy as np\n\nin_fname = sys.argv[1]\n\nfin = open(in_fname, 'r')\nT = int(fin.readline())\n\nout_fname = in_fname.split('.')[0] + '.out'\nfout = open(out_fname, 'w')\n\nfor t in xrange(1, T + 1):\n [s, k] = fin.readline().split()\n k = int(k)\n n = len(s)\n\n a = np.int8([x == '+' for x in s])\n\n c = 0\n for i in xrange(n - k + 1):\n if a[i] == 0:\n a[i : i + k] = 1 - a[i : i + k]\n c += 1\n\n if not a.all():\n fout.write('Case #%d: IMPOSSIBLE\\n' % t)\n else:\n fout.write('Case #%d: %d\\n' % (t, c))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2720.py","file_name":"2720.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29698244496","text":"'''\nMGP Module (code for the most part copied from Futoma et al. 2017 ICML)\n'''\nimport tensorflow as tf\nimport numpy as np\nfrom .mgp_utils import OU_kernel,CG,Lanczos,block_CG,block_Lanczos\n\n#------------------------------------------------\n##### Convinience classes for managing parameters\n\n\nclass DecompositionMethod():\n valid_methods = ['chol', 'cg']\n def __init__(self, methodname, add_diag=1e-3):\n if methodname not in self.valid_methods:\n raise ValueError('{} is not a valid methodname. Must be one of {}'.format(methodname, self.valid_methods))\n self.methodname = methodname\n self.add_diag = add_diag\n\nclass GPParameters():\n def __init__(self, input_dim, n_mc_smps, decomp_method, pad_before):\n self.input_dim = input_dim\n self.log_length = tf.Variable(tf.random_normal([1],mean=1,stddev=0.1),name=\"GP-log-length\") \n self.length = tf.exp(self.log_length)\n\n #different noise level of each lab\n self.log_noises = tf.Variable(tf.random_normal([input_dim],mean=-2,stddev=0.1),name=\"GP-log-noises\")\n self.noises = tf.exp(self.log_noises)\n\n #init cov between labs\n self.L_f_init = tf.Variable(tf.eye(input_dim),name=\"GP-Lf\")\n self.Lf = tf.matrix_band_part(self.L_f_init,-1,0)\n self.Kf = tf.matmul(self.Lf,tf.transpose(self.Lf))\n\n self.n_mc_smps = n_mc_smps\n\n #which decomposition method of Covariance matrix to use:\n self.method = decomp_method\n \n #boolean if GP draws should be padded before or afterwards (in time axis) before clf\n self.pad_before = pad_before\n\n#------------------------------------------------\n##### Tensorflow functions to draw samples from MGP\n\ndef draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti, gp_params):\n \"\"\" \n given GP hyperparams and data values at observation times, draw from \n conditional GP\n \n inputs:\n length,noises,Lf,Kf: GP params\n Yi: observation values\n Ti: observation times\n Xi: grid points (new times for tcn)\n ind_kfi,ind_kti: indices into Y\n returns:\n draws from the GP at the evenly spaced grid times Xi, given hyperparams and data\n \"\"\" \n n_mc_smps, length, noises, Lf, Kf, method = gp_params.n_mc_smps, gp_params.length, gp_params.noises, gp_params.Lf, gp_params.Kf, gp_params.method\n M = gp_params.input_dim\n ny = tf.shape(Yi)[0]\n K_tt = OU_kernel(length,Ti,Ti)\n D = tf.diag(noises)\n\n grid_f = tf.meshgrid(ind_kfi,ind_kfi) #same as np.meshgrid\n Kf_big = tf.gather_nd(Kf,tf.stack((grid_f[0],grid_f[1]),-1))\n \n grid_t = tf.meshgrid(ind_kti,ind_kti) \n Kt_big = tf.gather_nd(K_tt,tf.stack((grid_t[0],grid_t[1]),-1))\n\n Kf_Ktt = tf.multiply(Kf_big,Kt_big)\n\n DI_big = tf.gather_nd(D,tf.stack((grid_f[0],grid_f[1]),-1))\n DI = tf.diag(tf.diag_part(DI_big)) #D kron I\n \n #data covariance. \n #Either need to take Cholesky of this or use CG / block CG for matrix-vector products\n Ky = Kf_Ktt + DI + method.add_diag*tf.eye(ny) \n\n ### build out cross-covariances and covariance at grid\n \n nx = tf.shape(Xi)[0]\n \n K_xx = OU_kernel(length,Xi,Xi)\n K_xt = OU_kernel(length,Xi,Ti)\n \n ind = tf.concat([tf.tile([i],[nx]) for i in range(M)],0)\n grid = tf.meshgrid(ind,ind)\n Kf_big = tf.gather_nd(Kf,tf.stack((grid[0],grid[1]),-1))\n ind2 = tf.tile(tf.range(nx),[M])\n grid2 = tf.meshgrid(ind2,ind2)\n Kxx_big = tf.gather_nd(K_xx,tf.stack((grid2[0],grid2[1]),-1))\n \n K_ff = tf.multiply(Kf_big,Kxx_big) #cov at grid points \n \n full_f = tf.concat([tf.tile([i],[nx]) for i in range(M)],0) \n grid_1 = tf.meshgrid(full_f,ind_kfi,indexing='ij')\n Kf_big = tf.gather_nd(Kf,tf.stack((grid_1[0],grid_1[1]),-1))\n full_x = tf.tile(tf.range(nx),[M])\n grid_2 = tf.meshgrid(full_x,ind_kti,indexing='ij')\n Kxt_big = tf.gather_nd(K_xt,tf.stack((grid_2[0],grid_2[1]),-1))\n\n K_fy = tf.multiply(Kf_big,Kxt_big)\n \n #now get draws!\n y_ = tf.reshape(Yi,[-1,1])\n \n xi = tf.random_normal((nx*M, n_mc_smps))\n #print('xi shape:')\n #print(xi.shape)\n \n if method.methodname == 'chol':\n Ly = tf.cholesky(Ky)\n Mu = tf.matmul(K_fy,tf.cholesky_solve(Ly,y_))\n Sigma = K_ff - tf.matmul(K_fy,tf.cholesky_solve(Ly,tf.transpose(K_fy))) + method.add_diag*tf.eye(tf.shape(K_ff)[0]) \n #Exp2: increase noise on Sigma 1e-6 to 1e-3, to 1e-1?\n #Sigma = tf.cast(Sigma, tf.float64) ## Experiment: is chol instable and needs float64? Will this crash Memory?\n #draw = Mu + tf.matmul(tf.cast(tf.cholesky(Sigma),tf.float32),xi) \n draw = Mu + tf.matmul(tf.cholesky(Sigma),xi) \n draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])\n #print('cholesky draw:')\n #print(sess.run(draw_reshape))\n\n elif method.methodname == 'cg':\n Mu = tf.matmul(K_fy,CG(Ky,y_)) #May be faster with CG for large problems\n #Never need to explicitly compute Sigma! Just need matrix products with Sigma in Lanczos algorithm\n def Sigma_mul(vec):\n # vec must be a 2d tensor, shape (?,?) \n return tf.matmul(K_ff,vec) - tf.matmul(K_fy,block_CG(Ky,tf.matmul(tf.transpose(K_fy),vec))) \n def large_draw(): \n return Mu + block_Lanczos(Sigma_mul,xi,n_mc_smps) #no need to explicitly reshape Mu\n #draw = tf.cond(tf.less(nx*M,BLOCK_LANC_THRESH),small_draw,large_draw)\n draw = large_draw()\n draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])\n #print('cg draw shape:')\n #print(draw_reshape.shape) \n\n #TODO: it's worth testing to see at what point computation speedup of Lanczos algorithm is useful & needed.\n # For smaller examples, using Cholesky will probably be faster than this unoptimized Lanczos implementation.\n # Likewise for CG and BCG vs just taking the Cholesky of Ky once\n \n #draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])\n return draw_reshape \n \ndef get_GP_samples(minibatch, gp_params): ##,med_cov_grid\n \"\"\"\n returns samples from GP at evenly-spaced gridpoints\n \"\"\" \n #Unravel minibatch object\n Y = minibatch.Y\n T = minibatch.T\n X = minibatch.X \n ind_kf = minibatch.ind_kf\n ind_kt = minibatch.ind_kt\n num_obs_times = minibatch.num_obs_times\n num_obs_values = minibatch.num_obs_values \n num_tcn_grid_times = minibatch.num_tcn_grid_times\n #cov_grid = minibatch.cov_grid\n\n n_mc_smps, M, pad_before = gp_params.n_mc_smps, gp_params.input_dim, gp_params.pad_before\n grid_max = tf.shape(X)[1]\n Z = tf.zeros([0,grid_max, M])\n \n N = tf.shape(T)[0] #number of observations\n \n #setup tf while loop (have to use this bc loop size is variable)\n def cond(i,Z):\n return i', methods=['PUT'])\n@smart_render(exclude=DEFAULT_RENDER_EXCLUDE)\ndef update_template(template_id):\n template = Template.objects.get_or_404(id=template_id,\n created_by=current_user.id)\n return save_or_update(template)\n\n\n@bp_template.route('/', methods=['DELETE'])\n@smart_render(exclude=DEFAULT_RENDER_EXCLUDE)\ndef delete_template(template_id):\n template = Template.objects.get_or_404(id=template_id,\n created_by=current_user.id)\n template.delete()\n return True\n\n\n@bp_template.route('/', methods=['GET'])\n@smart_render(exclude=DEFAULT_RENDER_EXCLUDE)\ndef get_template_list():\n paginate = Template.objects.paginate(\n exclude=DEFAULT_RENDER_EXCLUDE,\n where=PaginateHelper.owner_mixin_filter()\n )\n return paginate\n\n\n@bp_template.route('/', methods=['GET'])\n@smart_render(exclude=DEFAULT_RENDER_EXCLUDE)\ndef get_template(template_id):\n template = Template.objects.get_or_404(id=template_id,\n created_by=current_user.id)\n return template\n\n\n@bp_template.route('//tokens', methods=['PUT'])\n@smart_render(exclude=DEFAULT_RENDER_EXCLUDE)\ndef update_token(template_id):\n ''' blueprint for update token '''\n \n template = Template.objects.get_or_404(id=template_id,\n created_by=current_user.id)\n \n formjson = json.loads(request.data)\n tokens = [Token.from_json(json.dumps(token))\n for token in formjson.pop('tokens')]\n \n template.tokens = tokens\n template.save()\n \n return True\n","repo_name":"IamFive/reliam2","sub_path":"reliam/views/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41972937711","text":"sum=0\r\n\r\ndef cal():\r\n if sum>=2000:\r\n discount = sum*10/100\r\n print(\"Discocunt : \",discount)\r\n print(\"Total Cost : \",sum-discount)\r\n \r\n elif sum>=1500:\r\n discount = sum*0.07\r\n print(\"Discocunt : \",discount)\r\n print(\"Total Cost : \",sum-discount)\r\n\r\n elif sum>=1000:\r\n discount = sum*0.05\r\n print(\"Discocunt : \",discount)\r\n print(\"Total Cost : \",sum-discount)\r\n\r\n elif sum<1000:\r\n print(\"No Discocunt\")\r\n print(\"Total Cost : \",sum)\r\n\r\n else:\r\n print(\"not valid\")\r\n exit()\r\n\r\ndef file():\r\n f=open(\"price.txt\",'a+')\r\n print(\"\\n\")\r\n f.write(str(price))\r\n f.write(\"\\n\")\r\n f.close()\r\n\r\nwhile True:\r\n try:\r\n price = int(input(\"enter price : \"))\r\n sum=sum+price\r\n\r\n except:\r\n print(\"No Value Entered\")\r\n break\r\n \r\n \r\ncal()\r\nfile()\r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"vithanage97/nibm_network-programming","sub_path":"BILL COHDCN181F-009.py","file_name":"BILL COHDCN181F-009.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44474887881","text":"#!/usr/local/bin/python3\n'''\nAssembles sparql query from pvstring. In order:\nPrefixes, Select statement, Prop-val triples, Selection triples,\nOrder-by statement. \n'''\ndef query(pvstring,lang):\n\n #pvstring = 'pos=Verb,conjClass=Prefix,tam=Aorist,polarity=Affirmative,rootClass=CVC,lexeme=\\'bis\\'%number,person,gender,token'\n # print(str(\"pvstring: \" + pvstring))\n # print(str(\"lang: \" + lang))\n \n\n languages = {'beja-hud': 'bhu', 'afar': 'afr', 'oromo': 'orm', 'somali-standard': 'sst', 'alaaba': 'alb', 'alagwa': 'alg', 'akkadian-ob': 'aob', 'aari': 'aar', 'arabic': 'arb', 'arbore': 'abr', 'awngi': 'awn', 'bayso': 'bay', 'beja-alm': 'bal','beja-rei': 'bre', 'beja-rop': 'bro', 'beja-van': 'bva', 'beja-wed': 'bwe', 'berber-ghadames': 'bgh', 'bilin': 'bil', 'boni-jara': 'boj', 'boni-kijee-bala': 'bob', 'boni-kilii': 'bok', 'burji': 'bur', 'burunge': 'brn', 'coptic-sahidic': 'csa', 'dahalo': 'dah', 'dhaasanac': 'dha', 'dizi': 'diz', 'egyptian-middle': 'egm', 'elmolo': 'elm', 'gawwada': 'gaw', 'gedeo': 'ged', 'geez': 'gez', 'hadiyya': 'had', 'hausa': 'hau', 'hdi': 'hdi', 'hebrew': 'heb', 'iraqw': 'irq', 'kambaata': 'kam', 'kemant': 'kem', 'khamtanga': 'khm', 'koorete': 'kor', 'maale': 'mal', 'mubi': 'mub', 'rendille': 'ren', 'saho': 'sah', 'shinassha': 'shn', 'sidaama': 'sid', 'syriac': 'syr', 'tsamakko': 'tsm', 'wolaytta': 'wol', 'yaaku': 'yak', 'yemsa': 'yem'}\n\n lpref = str(languages.get(lang))\n\n # Prefixes and select statement\n prefixes = \"\"\"PREFIX rdfs: \nPREFIX aama: \nPREFIX aamas: \"\"\"\n lprefix = str(\"\\nPREFIX \" + lpref + \": \")\n prefixes = str(prefixes + lprefix + \"\\n\")\n\n # Select statement\n # if no explicit selection string, use default\n if \"%\" in pvstring:\n propsel = pvstring.split(\"%\")\n pvstring = propsel[0]\n valstring = propsel[1]\n else:\n valstring = \"number,person,gender,token\"\n selection = str(\"?\" + valstring.replace(\",\",\" ?\"))\n selection = selection.replace(\"-\", \"\")\n select = str(\"SELECT DISTINCT \" + selection + \"\\nWHERE\\n{\")\n \n # Triples\n triples = ''\n # 1. prop-val triples\n pvlist = pvstring.split(\",\")\n for pv in pvlist:\n # print(str(pv))\n propval = pv.split(\":\")\n if propval[0] == \"lexeme\":\n triple = str(\" ?s aamas:lexeme / rdfs:label \\'\" + propval[1] + \"\\'.\\n\")\n elif propval[0][0:5] == \"token\":\n triple = str(\" ?s \" + lpref + \":\" + propval[0] + \" \\'\" + propval[1] + \"\\'.\\n\")\n else:\n triple = str(\" ?s \" + lpref + \":\" + propval[0] + \" \" + lpref + \":\" + propval[1] + \" .\\n\")\n triples = triples + triple\n # 2. selection triples\n sels = valstring.split(\",\")\n for sel in sels:\n sel2 = sel.replace(\"-\", \"\")\n if sel[0:5] == \"token\":\n triple = str(\" ?s \" + lpref + \":\" + sel + \" ?\" + sel2 + \" .\\n\")\n #elif sel == \"tokenNote\":\n #triple = str(\" ?s \" + lpref + \":tokenNote ?tokenNote .\\n\")\n elif sel == \"lexeme\":\n triple = str(\" ?s aamas:lexeme / rdfs:label ?\" + sel2 + \" .\\n\")\n else:\n triple = str(\" ?s \" + lpref + \":\" + sel + \" / rdfs:label ?\" + sel2 + \" .\\n\")\n triples = triples + triple\n triples = str(triples + \"}\\n\")\n\n\n #order statement\n selection = selection.replace(\"?number\", \"DESC(?number)\")\n selection = selection.replace(\"?gender \", \"DESC(?gender)\")\n order = str(\"ORDER BY \" + selection)\n\n query = str(prefixes + select + triples + order + \"\\n\")\n\n # print(\"query: \\n\" + query)\n\n\n return query\n \n \n\n\n\n\n\n\n","repo_name":"aama/webapp","sub_path":"webappy/pdgmDisp.py","file_name":"pdgmDisp.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23544632841","text":"t = int(raw_input())\n\nfor i in xrange(1, t+1):\n p1, n = raw_input().split(' ')\n n = int(n)\n pan = []\n for p in p1:\n pan.append(p)\n #print pan, n\n count = 0\n\n for j in range(len(pan)):\n if (pan[j] == \"-\" and j + n > len(pan)):\n count = -1\n break\n elif (pan[j] == \"-\"):\n #print \"Flipping\"\n count += 1\n for k in xrange(j, j+n):\n if(pan[k] == \"-\"):\n pan[k] = \"+\"\n elif (pan[k] == \"+\"):\n pan[k] = \"-\"\n\n #print \"Answer: \", count\n if (count < 0):\n count = \"IMPOSSIBLE\"\n\n print('Case #{0}: {1}'.format(i, count))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2837.py","file_name":"2837.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2738677080","text":"import datetime\nimport sys\nfrom tkinter import Tk, Label, Button, StringVar, LEFT\nfrom tkinter.ttk import OptionMenu\n\nimport pandas as pd\nfrom tabulate import tabulate\nfrom win32api import GetSystemMetrics\n\nfrom src.graphs import *\nfrom src.utils import download_league, calculate_league_stats, get_table_sorted, get_match\n\nsort_options = ['Points', 'HPoints', 'APoints', 'Scored', 'Conceded', 'Shots', 'Corners', 'OEff', 'DEff']\n\nleagues = ['Premier League', 'La Liga', 'Bundesliga', 'Serie A', 'Ligue 1', 'Liga NOS', 'Eredivisie',\n 'Jupiler', 'Turkey', 'Greece', 'Premiership', 'Championship', 'La Liga2', 'Bundesliga2',\n 'Serie B', 'Ligue 2']\n\nactual_time = datetime.datetime.now()\nif actual_time.month >= 8:\n actual_year = actual_time.year + 1\nelse:\n actual_year = actual_time.year\n\nyears = ['{}/{}'.format(year - 2000, year - 2000 + 1) for year in range(2010, actual_year)]\n\nscreen_width = GetSystemMetrics(0)\nif screen_width > 1500:\n general_button_width = 15\n general_font_size = 12\nelse:\n general_button_width = 13\n general_font_size = 10\n\ngeneral_font = ('Consolas', general_font_size)\n\nframe = {'w': 60, 'h': 40}\n\ntitle = {'x': 0.25, 'y': 0.0}\ntitle_font = ('Calibri', '40', 'bold')\n\nleague_info = {'x': 0.02, 'y': 0.03}\nleague_sel = {'x': 0.1, 'y': 0.03}\nyear_info = {'x': 0.02, 'y': 0.08}\nyear_sel = {'x': 0.1, 'y': 0.08}\nleague_year_get = {'x': 0.1, 'y': 0.13}\n\nsort_table_info = {'x': 0.02, 'y': 0.2}\nsort_table_sel = {'x': 0.1, 'y': 0.2}\nsort_table_get = {'x': 0.1, 'y': 0.25}\ntable_text = {'x': 0.02, 'y': 0.3, 'h': 0.6, 'w': 0.22}\n\nhome_team_info = {'x': 0.6, 'y': 0.12}\nhome_team_sel = {'x': 0.68, 'y': 0.12}\naway_team_info = {'x': 0.6, 'y': 0.17}\naway_team_sel = {'x': 0.68, 'y': 0.17}\nmatch_get = {'x': 0.68, 'y': 0.22}\n\nbig_text = {'x': 0.45, 'y': 0.3, 'h': 0.65, 'w': 0.25}\n\npred_text = {'x': 0.67, 'y': 0.3, 'h': 0.65, 'w': 0.25}\n\nplots_info = {'x': 0.92, 'y': 0.1}\ngraphs_button_offset = 0.05\n\ntheme_but = {'x': 0.92, 'y': 0.03}\n\nquit_but = {'x': 0.02, 'y': 0.95}\n\n\nclass Application:\n def __init__(self, master=None):\n\n self.master = master\n\n self.master.tk.call('source', 'sun-valley.tcl')\n self.master.tk.call('set_theme', 'light')\n self.theme = 'light'\n\n # Title\n self.title = Label(self.master,\n text=\"SoccerStats: \" + ''.join([' '] * (max([len(item) for item in leagues]) + 6)),\n font=title_font)\n self.title.place(relx=title['x'], rely=title['y'])\n\n # Year\n self.active_year = StringVar(self.master)\n self.active_year.set(years[0])\n self.year_menu = OptionMenu(self.master, self.active_year, years[0], *years)\n self.year_menu.config(width=general_button_width)\n self.year_menu.place(relx=year_sel['x'], rely=year_sel['y'])\n\n # League\n self.active_league = StringVar(self.master)\n self.active_league.set(leagues[0])\n self.league_menu = OptionMenu(self.master, self.active_league, leagues[0], *leagues)\n self.league_menu.config(width=general_button_width)\n self.league_menu.place(relx=league_sel['x'], rely=league_sel['y'])\n\n # Initialize\n self.league_games = download_league(self.active_league, self.active_year)\n self.league_stats = calculate_league_stats(self.league_games)\n\n # Text\n self.big_text = Label(self.master, font=general_font, justify=LEFT, anchor='nw')\n self.big_text.place(relx=big_text['x'], rely=big_text['y'], relwidth=big_text['w'], relheight=big_text['h'])\n\n self.table_text = Label(self.master, font=general_font, justify=LEFT, anchor='nw')\n self.table_text.place(relx=table_text['x'], rely=table_text['y'], relwidth=table_text['w'],\n relheight=table_text['h'])\n\n self.pred_text = Label(self.master, font=general_font, justify=LEFT, anchor='nw')\n self.pred_text.place(relx=pred_text['x'], rely=pred_text['y'], relwidth=pred_text['w'],\n relheight=pred_text['h'])\n\n # Teams\n teams = self.league_stats.Team.to_list()\n # HTeam\n self.hTeam_vs = StringVar(self.master)\n self.hTeam_vs.set(teams[0])\n self.h_team_menu = OptionMenu(self.master, self.hTeam_vs, teams[0], *teams)\n self.h_team_menu.place(relx=home_team_sel['x'], rely=home_team_sel['y'])\n # ATeam\n self.aTeam_vs = StringVar(self.master)\n self.aTeam_vs.set(teams[1])\n self.a_team_menu = OptionMenu(self.master, self.aTeam_vs, teams[1], *teams)\n self.a_team_menu.place(relx=away_team_sel['x'], rely=away_team_sel['y'])\n\n # Sort by\n self.sortBy = StringVar(self.master)\n self.sortBy.set(sort_options[0])\n self.sort_menu = OptionMenu(self.master, self.sortBy, sort_options[0], *sort_options)\n self.sort_menu.config(width=general_button_width)\n self.sort_menu.place(relx=sort_table_sel['x'], rely=sort_table_sel['y'])\n\n def place_button(widget, text, width, func, x, y, color=''):\n button = Button(widget, command=func, text=text, width=width)\n if color != '':\n button['bg'] = color\n button.place(relx=x, rely=y)\n\n return button\n\n # Stats buttons\n # Scored\n self.scored = place_button(self.master, \"Goals Scored\", general_button_width, self.scored_func, plots_info['x'],\n plots_info['y'] + 1 * graphs_button_offset)\n # Conceded\n self.conceded = place_button(self.master, \"Goals Conceded\", general_button_width, self.conceded_func,\n plots_info['x'],\n plots_info['y'] + 2 * graphs_button_offset)\n # Diffs\n self.goal_diffs = place_button(self.master, \"Goal Difference\", general_button_width, self.goal_diffs_func,\n plots_info['x'],\n plots_info['y'] + 3 * graphs_button_offset)\n # Corners\n self.corners = place_button(self.master, \"Corners\", general_button_width, self.corners_func, plots_info['x'],\n plots_info['y'] + 4 * graphs_button_offset)\n # Shots\n self.shots = place_button(self.master, \"Shots\", general_button_width, self.shots_func, plots_info['x'],\n plots_info['y'] + 5 * graphs_button_offset)\n # Fouls\n self.fouls = place_button(self.master, \"Fouls\", general_button_width, self.fouls_func, plots_info['x'],\n plots_info['y'] + 6 * graphs_button_offset)\n # Cards\n self.cards = place_button(self.master, \"Cards\", general_button_width, self.cards_func, plots_info['x'],\n plots_info['y'] + 7 * graphs_button_offset)\n # Get League\n self.get_league = place_button(self.master, \"Get League\", general_button_width, self.get_league_func,\n league_year_get['x'],\n league_year_get['y'], 'green')\n # Get Match\n self.get_match = place_button(self.master, \"Get Match\", general_button_width, self.get_match_func,\n match_get['x'],\n match_get['y'], 'green')\n # Get Table\n self.get_table = place_button(self.master, \"Get Table\", general_button_width, self.get_table_func,\n sort_table_get['x'], sort_table_get['y'], 'green')\n # Infos\n # league\n self.league_info = place_button(self.master, \"League\", general_button_width, self.dummy_func, league_info['x'],\n league_info['y'])\n # year\n self.year_info = place_button(self.master, \"Year\", general_button_width, self.dummy_func, year_info['x'],\n year_info['y'])\n # Plots\n self.plots_info = place_button(self.master, \"Plots\", general_button_width, self.dummy_func, plots_info['x'],\n plots_info['y'], 'orange')\n # Home\n self.home = place_button(self.master, \"Home\", general_button_width, self.dummy_func, home_team_info['x'],\n home_team_info['y'])\n # Away\n self.away = place_button(self.master, \"Away\", general_button_width, self.dummy_func, away_team_info['x'],\n away_team_info['y'])\n # Sort Table\n self.sort_table = place_button(self.master, \"Sort Table\", general_button_width, self.dummy_func,\n sort_table_info['x'], sort_table_info['y'])\n # Quit\n self.quit = place_button(self.master, \"Quit\", general_button_width, self.exit_func, quit_but['x'],\n quit_but['y'],\n 'red')\n # Change theme\n self.theme = place_button(self.master, \"Theme\", general_button_width, self.theme_func, theme_but['x'],\n theme_but['y'])\n\n # Buttons methods\n def scored_func(self, event=None):\n graph_goals(self.league_stats, \"Goals Scored per game\", self.theme)\n\n def conceded_func(self, event=None):\n graph_goals(self.league_stats, \"Goals Conceded per game\", self.theme)\n\n def goal_diffs_func(self, event=None):\n graph_situations(self.league_stats, \"Goal Difference per game\", self.theme)\n\n def corners_func(self, event=None):\n graph_situations(self.league_stats, \"Corners per game\", self.theme)\n\n def fouls_func(self, event=None):\n graph_situations(self.league_stats, \"Fouls per game\", self.theme)\n\n def shots_func(self, event=None):\n graph_situations_stack(self.league_stats, \"Shots per game\", self.theme)\n\n def cards_func(self, event=None):\n graph_situations_stack(self.league_stats, \"Cards per game\", self.theme)\n\n def theme_func(self, event=None):\n if self.theme == 'dark':\n self.master.tk.call(\"set_theme\", \"light\")\n self.theme = 'light'\n else:\n self.master.tk.call(\"set_theme\", \"dark\")\n self.theme = 'dark'\n\n def dummy_func(self, event=None):\n pass\n\n def get_league_func(self, event=None):\n self.league_games = download_league(self.active_league, self.active_year)\n self.league_stats = calculate_league_stats(self.league_games)\n # Update teams\n teams = self.league_stats.Team.to_list()\n self.h_team_menu.set_menu(teams[0], *teams)\n self.a_team_menu.set_menu(teams[1], *teams)\n # Update Title\n self.title['text'] = \"SoccerStats: \" + str(self.active_league.get()) + \" \" + str(\n self.active_year.get()) + ''.join(\n [' '] * (max([len(item) for item in leagues]) - len(str(self.active_league.get()))))\n # Update Table - sorted by points\n self.sortBy.set(sort_options[0])\n self.get_table_func()\n # clear big_text\n self.big_text['text'] = ''\n\n def get_table_func(self, event=None):\n table = get_table_sorted(self.league_stats, self.sortBy)\n self.table_text['text'] = tabulate(table, headers='keys', tablefmt='psql', numalign='center')\n\n def get_match_func(self, event=None):\n # Big text\n home, away = get_match(self.league_stats, self.hTeam_vs.get(), self.aTeam_vs.get())\n data = ['Rank', 'Scored', 'Scored_1H', 'Scored_2H', 'Conceded', 'Conceded_1H', 'Conceded_2H',\n 'Shots', 'ShotsT', 'Corners', 'OEff', 'DEff']\n # home\n _df = pd.DataFrame(home, columns=['#Geral', '#Home'])\n _df[self.hTeam_vs.get().ljust(10)] = data\n self.big_text['text'] = tabulate(_df.set_index(self.hTeam_vs.get().ljust(10), drop=True), headers='keys',\n tablefmt='psql', numalign='center')\n # away\n _df = pd.DataFrame(away, columns=['#Geral', '#Away'])\n _df[self.aTeam_vs.get().ljust(10)] = data\n self.big_text['text'] += \"\\n\" + tabulate(_df.set_index(self.aTeam_vs.get().ljust(10), drop=True), headers='keys',\n tablefmt='psql', numalign='center')\n\n # Pred text\n _df_aux = self.league_games[\n (self.league_games.HomeTeam == self.hTeam_vs.get()) & (self.league_games.AwayTeam == self.aTeam_vs.get())]\n if len(_df_aux) == 0:\n self.pred_text['text'] = \"This match hasn't occurred yet\\n\"\n return\n _df_aux = _df_aux.iloc[0]\n self.pred_text['text'] = \"Played at {}\\n\\n\".format(_df_aux.Date)\n ht = {'home': _df_aux.HTHG, 'away': _df_aux.HTAG}\n ft = {'home': _df_aux.FTHG, 'away': _df_aux.FTAG}\n odds = {'1': _df_aux.B365H, 'X': _df_aux.B365D, '2': _df_aux.B365A}\n shots = {'home': _df_aux.HS, 'away': _df_aux.AS}\n shots_target = {'home': _df_aux.HST, 'away': _df_aux.AST}\n corners = {'home': _df_aux.HC, 'away': _df_aux.AC}\n fouls = {'home': _df_aux.HF, 'away': _df_aux.AF}\n yellows = {'home': _df_aux.HY, 'away': _df_aux.AY}\n reds = {'home': _df_aux.HR, 'away': _df_aux.AR}\n self.pred_text['text'] += tabulate(pd.DataFrame([ft.values(), ht.values(), shots.values(), shots_target.values(), corners.values(),\n fouls.values(), yellows.values(), reds.values()],\n columns=[self.hTeam_vs.get(), self.aTeam_vs.get()],\n index=['FT', 'HT', 'Shots', 'Shots target', 'Corners', 'Fouls',\n 'Yellow Cards', 'Red Cards']),\n headers='keys', tablefmt='psql', numalign='center')\n self.pred_text['text'] += \"\\n\\n\"\n self.pred_text['text'] += tabulate(\n pd.DataFrame([odds.values()], columns=[self.hTeam_vs.get(), 'X', self.aTeam_vs.get()],\n index=['Odds']), headers='keys', tablefmt='psql', numalign='center')\n\n @staticmethod\n def exit_func(event=None):\n sys.exit()\n\n\nif __name__ == '__main__':\n root = Tk()\n root.attributes('-fullscreen', True)\n Application(root)\n root.mainloop()\n","repo_name":"k0rean/SoccerStats","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":14402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1176481223","text":"#\n# @lc app=leetcode.cn id=162 lang=python3\n#\n# [162] 寻找峰值\n#\n\n# @lc code=start\nclass Solution:\n \"\"\"\n 二分查找的思路:\n 1. 如果mid位置满足条件则直接返回结果\n 2. 如果mid处于升序区间, 说明右半部分必定有解, 因为nums[n] = -inf\n 3. 如果mid处于降序区间, 说明左半部分必定有解, 因为nums[-1] = -inf\n \"\"\"\n\n def findPeakElement(self, nums: List[int]) -> int:\n return self.helper2(nums)\n\n def helper1(self, nums):\n if not nums:\n return None\n n = len(nums)\n for i in range(n):\n right = nums[i + 1] if i < n - 1 else nums[i] - 1\n if nums[i] > right:\n return i\n\n def helper2(self, nums):\n if not nums:\n return None\n n = len(nums)\n a, b = 0, n - 1\n while b - a > 1:\n mid = (a + b) // 2\n if nums[mid] > nums[mid - 1] and nums[mid] > nums[mid + 1]:\n return mid\n elif nums[mid - 1] < nums[mid] and nums[mid] < nums[mid + 1]:\n a = mid\n else:\n b = mid\n\n if nums[a] > nums[b]:\n return a\n else:\n return b\n\n\n# @lc code=end\n","repo_name":"labusi/oj-problems","sub_path":"leetcode/python/162.寻找峰值.py","file_name":"162.寻找峰值.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27918715418","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 19 15:35:33 2021\n\n@author: roji\n\"\"\"\n\n\nimport numpy as np\nimport tweeter\nimport constants\nimport time\nimport alpaca_trade_api as tradeapi\n\ndef main():\n ticker = 'HD'\n max_results = 100\n end_point = constants.end_point\n BEARERTOKEN = constants.BEARERTOKEN\n PUB_KEY = constants.PUB_KEY\n SEC_KEY = constants.SEC_KEY\n \n \n api = tradeapi.REST(key_id = PUB_KEY,secret_key = SEC_KEY,base_url = end_point)\n twitter = tweeter.tweet(ticker = ticker, BEARER_TOKEN = BEARERTOKEN, max_results= max_results)\n \n pos_held = False\n \n while True:\n print(\"\")\n print(\"Checking Price\")\n market_data = api.get_barset(ticker,'minute', limit = 5)\n \n close_list = []\n \n for bar in market_data[ticker]:\n close_list.append(bar.c)\n \n \n close_list = np.array(close_list, dtype = np.float64)\n ma = np.mean(close_list)\n last_price = close_list[4]\n \n print(\"Moving average :\" + str(ma))\n print(\"Last price:\" + str(last_price))\n \n \n df = twitter.create_data_frame()\n df = twitter.adding_prob_and_sentiment(df)\n alpha = twitter.average_sentiments(df)\n print('alpha:',alpha)\n \n if ma + 0.1 < last_price and not pos_held and alpha > 0 :\n print('BUY')\n api.submit_order(symbol = ticker, qty = 10, side = 'buy',type = 'market',time_in_force = 'gtc')\n pos_held = True\n \n elif ma - 0.1 > last_price and pos_held and alpha < 0:\n print('SELL')\n api.submit_order(symbol = ticker,qty = 10, side = 'sell',type = 'market',time_in_force = 'gtc')\n pos_held = False\n \n time.sleep(60)\n \n \n\nif __name__ == \"__main__\":\n main()","repo_name":"mkouretchian/Algorithmic_trading","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25017649882","text":"import datetime\nimport jwt\nfrom flask import jsonify, redirect, request\nfrom os import getenv\nfrom functools import wraps\n\n\ndef jwt_required(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n token = request.header.get('Authorization').split()[-1]\n\n try:\n jwt.decode(\n token,\n getenv('JWT_SECRET'),\n algorithms=['HS256']\n )\n except jwt.exceptions.ExpiredSignatureError:\n return jsonify({\n 'ACK': False,\n 'mensagem': 'Token expirado'\n })\n except jwt.exceptions.InvalidSignatureError:\n return jsonify({\n 'ACK': False,\n 'mensagem': 'Token inválido'\n })\n except jwt.exceptions.DecodeError:\n return jsonify({\n 'ACK': False,\n 'mensagem': 'Token inválido'\n })\n except Exception as e:\n return jsonify({\n 'ACK': False,\n 'mensagem': f'erro: {e}'\n })\n return fn(*args, **kwargs)\n\n return wrapped()\n\n\ndef refresh_token_required(fn):\n @wraps(fn)\n def decorated_function(*args, **kwargs):\n token = request.cookies.get('token')\n\n try:\n jwt.decode(\n token,\n getenv('JWT_REFRESH_SECRET'),\n algorithms=['HS256']\n )\n except jwt.exceptions.ExpiredSignatureError:\n return redirect(\"/login\")\n except Exception as e:\n return jsonify({\n 'ACK': False,\n 'mensagem': f'erro: {e}'\n })\n return fn(*args, **kwargs)\n\n return decorated_function\n\n\ndef generate_access_token(user_id):\n return jwt.encode(\n {\n 'user_id': user_id,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=5)\n },\n getenv('JWT_SECRET'),\n algorithm='HS256'\n )\n\n\ndef generate_refresh_token(user_id):\n return jwt.encode(\n {\n 'user_id': user_id,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=45)\n },\n getenv('JWT_REFRESH_SECRET'),\n algorithm='HS256'\n )\n","repo_name":"edulonde/estudos-jwt-flask-curso4linux","sub_path":"extensions/token_utils.py","file_name":"token_utils.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32783486196","text":"import re\n\n# Part 1\nwith open(r'/home/macolella/AoC2022/17/input') as f:\n jet_pat = f.read()\n\n# jet_pat = '''>>><<><>><<<>><>>><<<>>><<<><<<>><>><<>>'''\n\ndef get_rock(rock_num, y):\n if rock_num == 0:\n return set([(2, y), (3, y), (4, y), (5, y)])\n elif rock_num == 1:\n return set([(2, y + 1), (3, y), (3, y + 1), (3, y + 2), (4, y + 1)])\n elif rock_num == 2:\n return set([(2, y), (3, y), (4, y), (4, y + 1), (4, y + 2)])\n elif rock_num == 3:\n return set([(2, y), (2, y + 1), (2, y + 2), (2, y + 3)])\n elif rock_num == 4:\n return set([(2, y), (2, y + 1), (3, y), (3, y + 1)])\n\ndef move_left(rock):\n if any([x == 0 for (x, y) in rock]):\n return rock\n return set([(x - 1, y) for (x, y) in rock])\n\ndef move_right(rock):\n if any([x == 6 for (x, y) in rock]):\n return rock\n return set([(x + 1, y) for (x, y) in rock])\n\ndef move_up(rock):\n return set([(x, y + 1) for (x, y) in rock])\n\ndef move_down(rock):\n return set([(x, y - 1) for (x, y) in rock])\n\ndef get_top(stationary_rocks):\n return max([y for (x, y) in stationary_rocks])\n\nstationary_rocks = set()\nground = set([(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)])\ntop = 0\njet_ind = 0\nfor rock_num in range(2022):\n rock = get_rock(rock_num % 5, top + 4)\n falling = True\n while falling:\n wind_dir = jet_pat[jet_ind]\n if wind_dir == '<':\n rock = move_left(rock)\n if rock & stationary_rocks:\n rock = move_right(rock)\n elif wind_dir == '>':\n rock = move_right(rock)\n if rock & stationary_rocks:\n rock = move_left(rock)\n rock = move_down(rock)\n if ((rock & stationary_rocks) or (rock & ground)):\n rock = move_up(rock)\n stationary_rocks |= rock\n top = get_top(stationary_rocks)\n falling = False\n jet_ind = (jet_ind + 1) % len(jet_pat)\n\nprint(f'The answer to Part 1 is {top}')\n\n# Part 2\nlimit = 1_000_000_000_000\nstationary_rocks = set()\nprevious_conditions = {}\nground = set([(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)])\ntop = 0\njet_ind = 0\nadded = 0\nrock_num = 0\n\ndef signature(stationary_rocks):\n maxY = max([y for (x, y) in stationary_rocks])\n return frozenset([(x, maxY - y) for (x, y) in stationary_rocks if maxY - y <= 30])\n\nwhile rock_num < limit:\n rock = get_rock(rock_num % 5, top + 4)\n falling = True\n while falling:\n wind_dir = jet_pat[jet_ind]\n if wind_dir == '<':\n rock = move_left(rock)\n if rock & stationary_rocks:\n rock = move_right(rock)\n elif wind_dir == '>':\n rock = move_right(rock)\n if rock & stationary_rocks:\n rock = move_left(rock)\n rock = move_down(rock)\n if ((rock & stationary_rocks) or (rock & ground)):\n rock = move_up(rock)\n stationary_rocks |= rock\n top = get_top(stationary_rocks)\n timestamp = (jet_ind, rock_num % 5, signature(stationary_rocks))\n if timestamp in previous_conditions:\n former_rock_num, former_top = previous_conditions[timestamp]\n height_change = top - former_top\n rock_num_change = rock_num - former_rock_num\n added_cycles = (limit - rock_num) // rock_num_change\n added += added_cycles * height_change\n rock_num += added_cycles * rock_num_change\n previous_conditions[timestamp] = (rock_num, top)\n falling = False\n jet_ind = (jet_ind + 1) % len(jet_pat)\n rock_num += 1\n\nprint(added + top)","repo_name":"mcolella326/Advent-of-Code-2022","sub_path":"17/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36535398604","text":"import json\nimport os\n\nimport requests\n\nfrom config import *\nfrom qq_login import QQLogin\nfrom util import *\n\n\nclass WegameApi:\n login_url = \"https://www.wegame.com.cn/api/middle/clientapi/auth/login_by_qq\"\n common_url_prefix = \"https://m.wegame.com.cn/api/mobile/lua/proxy/index/mwg_dnf_battle_record/\"\n cached_dir = \".cached\"\n cached_file = \".token.{}.json\"\n\n def auto_login_with_password(self, cfg):\n cached = self.load_token(cfg.account)\n if cached is not None:\n api.set_uin_skey(cached[\"uin\"], cached[\"skey\"])\n api.set_tgp_info(cached[\"tgp_id\"], cached[\"tgp_ticket\"])\n if self.is_token_still_valid():\n print(\"use cached\")\n return\n else:\n print(\"token invalided, try get new\")\n\n ql = QQLogin(cfg)\n lr = ql.login(cfg.account, cfg.password)\n print(lr)\n api.login(lr.uin, lr.skey)\n self.save_token(cfg.account)\n print(\"new login, token saved\")\n\n def load_token(self, account):\n if not os.path.isdir(self.cached_dir):\n return None\n\n if not os.path.isfile(self.get_token_file(account)):\n return None\n\n with open(self.get_token_file(account), \"r\", encoding=\"utf-8\") as f:\n return json.load(f)\n\n def save_token(self, account):\n if not os.path.isdir(self.cached_dir):\n os.mkdir(self.cached_dir)\n\n with open(self.get_token_file(account), \"w\", encoding=\"utf-8\") as f:\n json.dump({\n \"uin\": self.uin,\n \"skey\": self.skey,\n \"tgp_id\": self.tgp_id,\n \"tgp_ticket\": self.tgp_ticket,\n }, f, ensure_ascii=False)\n\n def is_token_still_valid(self):\n res = self.get_player_role_list(print_res=False)\n return res[\"data\"][\"result\"] == 0\n\n def get_token_file(self, account):\n return os.path.join(self.cached_dir, self.cached_file.format(account))\n\n def login(self, uin, skey):\n self.set_uin_skey(skey, uin)\n\n data = {\n \"login_info\": {\n \"qq_info_type\": 3,\n \"uin\": uin2qq(self.uin),\n \"sig\": self.skey,\n },\n \"config_params\": {\n \"lang_type\": 0\n },\n \"mappid\": \"10001\",\n \"mcode\": \"\",\n \"clienttype\": \"1000005\"\n }\n headers = {\n \"referer\": \"https://www.wegame.com.cn/middle/login/third_callback.html\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36\",\n }\n res = requests.post(self.login_url, json=data, headers=headers)\n tgp_id, tgp_ticket = int(res.cookies.get('tgp_id')), res.cookies.get('tgp_ticket')\n self.set_tgp_info(tgp_id, tgp_ticket)\n\n print(tgp_id)\n print(tgp_ticket)\n\n def set_uin_skey(self, skey, uin):\n self.uin = uin\n self.skey = skey\n\n def set_tgp_info(self, tgp_id, tgp_ticket):\n self.tgp_id = tgp_id\n self.tgp_ticket = tgp_ticket\n # 需自行调用set_role_info设置\n self.area_id = 0\n self.role_name = \"\"\n self.common_headers = {\n \"accept\": \"application/json\",\n \"cookie\": \"app_id=10001;tgp_id={tgp_id};platform=qq;account={account};skey={skey};tgp_ticket={tgp_ticket};machine_type=MIX+2;channel_number=5;app_version=1050602003;client_type=601\".format(\n tgp_id=self.tgp_id,\n account=uin2qq(self.uin),\n skey=self.skey,\n tgp_ticket=self.tgp_ticket,\n ),\n \"Content-Type\": \"application/json; charset=utf-8\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"User-Agent\": \"okhttp/3.11.0\",\n }\n\n def get_player_role_list(self, print_res=True):\n \"\"\"\n 获取玩家所有区服的的角色列表\n \"\"\"\n return self._post(\"get_player_role_list\", need_role_info=False, print_res=print_res).json()\n\n def set_role_info(self, area_id, role_name):\n \"\"\"\n 调用下列接口前需要先调用该接口设置角色信息\n \"\"\"\n self.area_id = int(area_id)\n self.role_name = str(role_name)\n\n def get_capacity_detail_info(self):\n \"\"\"\n 获取指定服务器的指定角色的战���信息\n \"\"\"\n return self._post(\"get_capacity_detail_info\").json()\n\n def get_player_fight_statistic_info(self):\n \"\"\"\n 获取指定服务器的指定角色的面板一览\n \"\"\"\n return self._post(\"get_player_fight_statistic_info\").json()\n\n def get_equip_description_image(self, equip_id):\n \"\"\"\n 获取指定装备的描述图片\n \"\"\"\n return \"https://bb.img.qq.com/bbcdn/dnf/equips/equimg/{equip_id}.png\".format(equip_id=equip_id)\n\n def get_equip_icon(self, equip_id):\n \"\"\"\n 获取指定装备的图标\n \"\"\"\n return \"http://cdn.tgp.qq.com/DNF_picture/equip_icon/{equip_id}.png\".format(equip_id=equip_id)\n\n def get_player_equipment_list(self):\n \"\"\"\n 获取指定服务器的指定角色的面板一览\n \"\"\"\n return self._post(\"get_player_equipment_list\").json()\n\n def get_player_role_detail(self):\n \"\"\"\n 获取指定服务器的指定角色的详细面板数据\n \"\"\"\n return self._post(\"get_player_role_detail\").json()\n\n def get_player_role_info(self, print_res=True):\n \"\"\"\n 获取指定服务器的指定角色的角色信息\n \"\"\"\n return self._post(\"get_player_role_info\", print_res=print_res).json()\n\n def get_player_recent_dungeon_list(self):\n \"\"\"\n 获取指定服务器的指定角色的最近副本伤害信息\n \"\"\"\n role_info = api.get_player_role_info(print_res=False)\n return self._post(\"get_player_recent_dungeon_list\", json_data={\n \"start_index\": 0,\n \"career\": role_info[\"data\"][\"role_info\"][\"career\"],\n }).json()\n\n def _post(self, api_name, json_data=None, need_role_info=True, print_res=True):\n if need_role_info and len(self.role_name) == 0:\n print(\"调用除查询角色列表外任意接口前请先调用set_role_info设置角色信息,若不知道角色信息,可以调用get_player_role_list获取角色信息\")\n exit(-1)\n\n base_json_data = {\n \"target_tgpid\": self.tgp_id,\n # \"target_suid\": \"0\",\n \"area_id\": self.area_id,\n \"role\": self.role_name,\n \"role_name\": self.role_name,\n }\n if json_data is None:\n json_data = {}\n res = requests.post(self.common_url_prefix + api_name, json={**base_json_data, **json_data}, headers=self.common_headers)\n\n if print_res:\n print(api_name, json.dumps(res.json(), ensure_ascii=False), \"\\n\")\n\n return res\n\n\nif __name__ == '__main__':\n api = WegameApi()\n\n # # 读取配置信息\n # 选择使用账密登录自动获取uin和skey\n cfg = Config()\n # # 自行填入实际账号密码,并调整其他配置\n # cfg.account = \"111\"\n # cfg.password = \"222\"\n api.auto_login_with_password(cfg)\n # # 或者可以选择直接填入uin和skey\n # uin = \"o12345678\"\n # skey = \"@这是skey,具体手动获取方法自己百度\"\n # api.login(uin, skey)\n\n res = api.get_player_role_list()\n for role in res['data']['role_list']:\n print(\"区服={:3d}\\t角色名={}\".format(role['area_id'], role['role_name']))\n default_role = res['data']['role_list'][0]\n area_id, role_name = default_role['area_id'], default_role['role_name']\n api.set_role_info(area_id, role_name)\n api.get_capacity_detail_info()\n api.get_player_fight_statistic_info()\n equip_id = 100390332\n api.get_equip_description_image(equip_id)\n api.get_equip_icon(equip_id)\n api.get_player_equipment_list()\n api.get_player_role_detail()\n api.get_player_role_info()\n api.get_player_recent_dungeon_list()\n","repo_name":"fzls/wegame_dnf_api","sub_path":"wegame_api.py","file_name":"wegame_api.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"23548215921","text":"import sys\r\nsys.stdout = open('PanckageFliper.txt', 'w')\r\n\r\ndef inputIntegerArray():\r\n return list( map( int, input().split(\" \") ) )\r\n\r\ndef flip( pattern, i ):\r\n if pattern[i]=='+':\r\n pattern[i] = '-'\r\n else:\r\n pattern[i] = '+'\r\n\r\ndef solve( pattern, k ):\r\n flips = 0;\r\n for i in range(0, len(pattern) - k + 1):\r\n if pattern[i] == '-':\r\n for j in range(i,i+k):\r\n flip(pattern,j)\r\n flips += 1\r\n\r\n for i in range(0,len(pattern)):\r\n if pattern[i] == '-':\r\n return -1\r\n return flips\r\n\r\nn = inputIntegerArray()[0]\r\nfor test in range(1,n+1):\r\n (pattern,k) = input().split(\" \")\r\n answer = solve( list(pattern), int(k) )\r\n\r\n #print ( solve( list(pattern), int(k) ), solve( list(pattern)[::-1], int(k) ) );\r\n\r\n if answer >= 0 :\r\n print ( \"Case #{}: {}\".format( test, answer ) )\r\n else:\r\n print(\"Case #{}: {}\".format(test, \"IMPOSSIBLE\" ) )\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/437.py","file_name":"437.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27529754797","text":"from celery import shared_task\nfrom models import Meter, Reading\nimport subprocess\nimport os\nimport json\nimport dateutil.parser\n\n@shared_task\ndef recordReading():\n rtltcp_address = '192.168.1.3:1234'\n program = os.getcwd() + r'/lib/rtlamr'\n args = [program, '-server', rtltcp_address, '-format', 'json', '-filterid', '64633980', '-msgtype', 'SCM', '-single']\n\n rtlamr_exists = os.path.exists(program)\n\n if rtlamr_exists:\n process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n \n reading = json.loads(out)\n\n time = dateutil.parser.parse(reading['Time'])\n message = reading['Message']\n message_id = message['ID']\n meter_consumption = message['Consumption']\n meter_type = message['Type']\n\n meter, meter_created = Meter.objects.get_or_create(id=message_id, defaults={'type': meter_type})\n\n reading, reading_created = Reading.objects.get_or_create(consumption=meter_consumption, meter=meter, defaults={'time': time})\n\n if meter_created and reading_created:\n return 'Meter and Reading created'\n elif reading_created:\n return 'Reading created, consumption: ' + str(meter_consumption)\n else:\n return 'Meter and Reading already exist, nothing done'\n else:\n return program + \" doesn't exist\"\n\n","repo_name":"PlasmaEye/PowerMonitor","sub_path":"PowerMonitor/monitor/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26244878928","text":"from flask import Blueprint, request, session, current_app\nfrom timelink import model\nfrom timelink.util.uploader import upload_img_to_s3\nimport jwt\n\n\nbp = Blueprint('service', __name__, url_prefix='/api')\n\n\nSECRET_KEY = current_app.config['SECRET_KEY']\n\n\n@bp.route(\"/services\", methods=[\"POST\"])\ndef create():\n created_status = False\n uploaded_status = False\n try:\n imageFile = request.files[\"imageFile\"]\n data = request.form.to_dict()\n\n name = data[\"name\"]\n price = data[\"price\"]\n type = data[\"type\"]\n group_id = data[\"group_id\"]\n openTime = data[\"openTime\"]\n closeTime = data[\"closeTime\"]\n\n usertoken = jwt.decode(session.get('usertoken'), SECRET_KEY, algorithms=[\"HS256\"])\n user_id = usertoken[\"id\"]\n \n # check if image file exists\n if imageFile:\n # upload image to s3\n uploaded_status = upload_img_to_s3(imgfile=imageFile, file_name=imageFile.filename)\n # if uploaded, create service\n if uploaded_status:\n created_status = model.service.create(name=name, type=type, price=price, group_id=group_id, \n user_id=user_id, open_time=openTime, close_time=closeTime,\n imgUrl=f\"https://d43czlgw2x7ve.cloudfront.net/timelink/{imageFile.filename}\")\n else:\n created_status = model.service.create(name=name, type=type, price=price, group_id=group_id, \n user_id=user_id, open_time=openTime, close_time=closeTime)\n # if created, return success message\n if created_status:\n return {\"success\": True}, 201\n return {\"success\": False, \"error\":{\"code\": 400, \"message\":\"Create Failed\"}}, 400\n except jwt.exceptions.PyJWTError:\n return {\"success\": False, \"error\":{\"code\": 401, \"message\":\"Unauthorized\"}}, 401\n except Exception as e:\n return {\"success\": False, \"error\":{\"code\": 500, \"message\": str(e)}}, 500\n \n\n@bp.route(\"/services\", methods=[\"GET\"])\ndef get_services():\n try:\n queryString = request.args\n if \"groupId\" in queryString:\n dbData = model.service.get_all_by_groupId(groupId=queryString[\"groupId\"])\n elif \"group_id\" in queryString:\n dbData = model.service.get_all_by_group_id(group_id=queryString[\"group_id\"])\n else:\n usertoken = jwt.decode(session.get('usertoken'), SECRET_KEY, algorithms=[\"HS256\"])\n user_id = usertoken[\"id\"]\n dbData = model.service.get_all_by_user_id(user_id=user_id)\n \n if dbData:\n return {\"success\": True, \"data\": dbData}, 200\n return {\"success\": False, \"data\": None}, 200\n except jwt.exceptions.PyJWTError:\n return {\"success\": False, \"error\":{\"code\": 401, \"message\":\"Unauthorized\"}}, 401\n except Exception as e:\n return {\"success\": False, \"error\":{\"code\": 500, \"message\": str(e)}}, 500\n\n@bp.route(\"/services/\", methods=[\"GET\"])\ndef get_service(service_id):\n try:\n dbData = model.service.get_service_by_id(service_id=service_id)\n if dbData:\n return {\"success\": True, \"data\": dbData}, 200\n return {\"success\": False, \"data\": None}, 200\n except Exception as e:\n return {\"success\": False, \"error\":{\"code\": 500, \"message\": str(e)}}, 500\n\n \n@bp.route(\"/services/\", methods=[\"DELETE\"])\ndef delete(service_id):\n try:\n jwt.decode(session.get('usertoken'), SECRET_KEY, algorithms=[\"HS256\"])\n model.service.logical_delete(service_id=service_id)\n return {\"success\": True}, 200\n except jwt.exceptions.PyJWTError:\n return {\"success\": False, \"error\":{\"code\": 401, \"message\":\"Unauthorized\"}}, 401\n except Exception as e:\n return {\"success\": False, \"error\":{\"code\": 500, \"message\": str(e)}}, 500","repo_name":"ChengTze-Wu/TimeLink","sub_path":"timelink/controller/apis/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22875506103","text":"# Scraping HTML Data with BeautifulSoup:\n# In this assignment you will write a Python program to use urllib to read the HTML from the data files below, \n# and parse the data, extracting numbers and compute the sum of the numbers in the file\n\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter - ')\nhtml = urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, \"html.parser\")\n\ntags = soup('span')\ncount = 0\nlst = list()\nfor tag in tags:\n count += 1\n print('TAG:', tag)\n print('URL:', tag.get('comments', None))\n print('Contents:', tag.contents[0])\n lst.append(int(tag.contents[0]))\nprint('\\nCount:', count)\nprint('Sum:', sum(lst))\n","repo_name":"YanShtein/PythonForEverybody","sub_path":"012.6.content_sum.py","file_name":"012.6.content_sum.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3607746269","text":"\"\"\"Mini Bot Framework (French), async version\n\nThis code is provided as-is for demonstration purposes only and is not\nsuitable for production. Use at your own risk.\n\"\"\"\nimport base64\nfrom typing import Dict\n\nimport requests\nimport sounddevice as sd # type: ignore\nimport websocket as ws # type: ignore\n\nfrom uhlive.auth import build_authentication_request\nfrom uhlive.stream.recognition import Closed\nfrom uhlive.stream.recognition import CompletionCause as CC\nfrom uhlive.stream.recognition import (\n DefaultParams,\n GrammarDefined,\n Opened,\n ParamsSet,\n RecognitionComplete,\n RecognitionInProgress,\n Recognizer,\n StartOfInput,\n build_connection_request,\n)\n\n\nclass Bot:\n\n TTF_CACHE: Dict[str, bytes] = {}\n\n def __init__(self, google_ttf_key):\n self.client = Recognizer()\n self.socket = None\n self.google_ttf_key = google_ttf_key\n\n def stream_mic(self):\n def callback(indata, frame_count, time_info, status):\n self.socket.send_binary(bytes(indata))\n\n stream = sd.RawInputStream(\n callback=callback, channels=1, samplerate=8000, dtype=\"int16\", blocksize=960\n )\n stream.start()\n return stream\n\n def _ttf(self, text) -> bytes:\n if text in self.TTF_CACHE:\n return self.TTF_CACHE[text]\n payload = {\n \"audioConfig\": {\"audioEncoding\": \"LINEAR16\", \"sampleRateHertz\": 8000},\n \"input\": {\"text\": text},\n \"voice\": {\"languageCode\": \"fr-FR\", \"name\": \"fr-FR-Wavenet-C\"},\n }\n # url = \"https://texttospeech.googleapis.com/v1/text:synthesize\"\n url = f\"https://texttospeech.googleapis.com/v1beta1/text:synthesize?key={self.google_ttf_key}\"\n h = {\"Content-Type\": \"application/json; charset=utf-8\"}\n response = requests.post(url, headers=h, json=payload)\n response.raise_for_status()\n json = response.json()\n audio = base64.b64decode(json[\"audioContent\"])[44:]\n self.TTF_CACHE[text] = audio\n return audio\n\n def say(self, text):\n audio = self._ttf(text)\n with sd.RawOutputStream(\n channels=1,\n samplerate=8000,\n dtype=\"int16\",\n ) as stream:\n stream.write(audio)\n print(\"à vous\")\n\n def expect(self, *event_classes, ignore=None):\n while True:\n event = self.client.receive(self.socket.recv())\n if isinstance(event, event_classes):\n return event\n elif ignore is None or not isinstance(event, ignore):\n raise AssertionError(f\"Expected one of {event_classes}, got {event}\")\n\n def ask_until_success(self, text, *args, **kwargs):\n choice = None\n while choice is None:\n self.say(text)\n self.socket.send(self.client.recognize(*args, **kwargs))\n self.expect(RecognitionInProgress)\n resp = self.expect(RecognitionComplete, ignore=(StartOfInput,))\n if resp.completion_cause == CC.Success:\n choice = resp.body.nlu\n else:\n if resp.body.asr:\n self.say(\"Je n'ai pas compris\")\n print(\"user said:\", resp.body.asr.transcript)\n else:\n self.say(\"Je n'ai rien entendu\")\n return choice\n\n def confirm(self, text: str) -> bool:\n res = self.ask_until_success(\n text,\n \"builtin:speech/boolean\",\n recognition_mode=\"hotword\",\n hotword_max_duration=5000,\n )\n return res.value\n\n def run(self, uhlive_client: str, uhlive_secret: str):\n\n auth_url, auth_params = build_authentication_request(\n uhlive_client, uhlive_secret\n )\n login = requests.post(auth_url, data=auth_params)\n login.raise_for_status()\n uhlive_token = login.json()[\"access_token\"]\n\n url, headers = build_connection_request(uhlive_token)\n self.socket = socket = ws.create_connection(url, header=headers)\n try:\n self.socket = socket\n socket.send(self.client.open(\"deskbot\"))\n self.expect(Opened)\n streamer = self.stream_mic()\n try:\n self.scenario()\n except Exception as e:\n self.say(\"Nous subissons une avarie. Rappelez plus tard.\")\n raise e\n finally:\n streamer.stop()\n streamer.close()\n socket.send(self.client.close())\n self.expect(Closed)\n finally:\n socket.close()\n\n def set_params(self, **kwargs):\n self.socket.send(self.client.set_params(**kwargs))\n self.expect(ParamsSet)\n\n def get_params(self):\n self.socket.send(self.client.get_params())\n res = self.expect(DefaultParams)\n return res\n\n def define_grammar(self, builtin, alias):\n self.socket.send(self.client.define_grammar(builtin, alias))\n self.expect(GrammarDefined)\n\n def recognize(self, *args, **kwargs):\n self.socket.send(self.client.recognize(*args, **kwargs))\n self.expect(RecognitionInProgress)\n\n def scenario(self):\n \"\"\"To be overridden in subclasses\"\"\"\n raise NotImplementedError\n","repo_name":"uhlive/python-sdk","sub_path":"examples/recognition/sync_bot_lib.py","file_name":"sync_bot_lib.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17340067147","text":"# Part of < https://github.com/pernandapurba/TelegraphUploader >\n# (c) 2021 @SFCorp.\n\nimport os\nimport logging\nfrom PIL import Image\nfrom telethon import TelegramClient, events, Button\nfrom telethon.tl.functions.users import GetFullUserRequest\nfrom decouple import config\nfrom telethon.errors.rpcerrorlist import UserNotParticipantError\nfrom telethon.tl.functions.channels import GetParticipantRequest\nfrom telegraph import Telegraph, exceptions, upload_file\n\nlogging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s', level=logging.INFO)\n\nappid = apihash = bottoken = None\n# start the bot\nprint(\"Starting...\")\ntry:\n apiid = config(\"API_ID\", cast=int)\n apihash = config(\"API_HASH\")\n bottoken = config(\"BOT_TOKEN\")\nexcept:\n print(\"Environment vars are missing! Kindly recheck.\")\n print(\"Bot is quiting...\")\n exit()\n\nif (apiid != None and apihash!= None and bottoken != None):\n try:\n BotzHub = TelegramClient('bot', apiid, apihash).start(bot_token=bottoken)\n except Exception as e:\n print(f\"ERROR!\\n{str(e)}\")\n print(\"Bot is quiting...\")\n exit()\nelse:\n print(\"Environment vars are missing! Kindly recheck.\")\n print(\"Bot is quiting...\")\n exit()\n\n# join check\nasync def get_user(id):\n ok = True\n try:\n await BotzHub(GetParticipantRequest(channel='@SFCorpChannel', participant=id))\n ok = True\n except UserNotParticipantError:\n ok = False\n return ok\n\n@BotzHub.on(events.NewMessage(incoming=True, pattern=\"/start\", func=lambda e: e.is_private))\nasync def start(event):\n ok = await BotzHub(GetFullUserRequest(event.sender_id))\n await event.reply(f\"Hello {ok.user.first_name}!\\nI'm SF telegraph Uploader bot.\",\n buttons=[\n Button.inline(\"Help\", data=\"help\"),\n Button.url(\"More Bots\", url=\"https://t.me/SFCorpChannel/8\")\n ])\n\n@BotzHub.on(events.callbackquery.CallbackQuery(data=\"help\"))\nasync def _(event):\n ok = await BotzHub(GetFullUserRequest(event.sender_id))\n if (await get_user(event.sender_id)) == False:\n return await event.edit(f\"{ok.user.first_name}, Tolong join ke Channel untuk menggunakan bot ini!\", buttons=[Button.url(\"Join Channel\", url=\"https://t.me/SFCorpChannel\")])\n await event.edit(f\"Kirim saya gambar dan Saya akan upload ke Telegraph!\\n\\n~[@SF Corp](https://t.me/SFCorpChannel)\")\n\n@BotzHub.on(events.NewMessage(incoming=True, func=lambda e: e.is_private and e.media))\nasync def uploader(event):\n if (await get_user(event.sender_id)) is False:\n return\n TMP_DOWNLOAD_DIRECTORY = \"./BotzHub/\"\n if not os.path.isdir(TMP_DOWNLOAD_DIRECTORY):\n os.makedirs(TMP_DOWNLOAD_DIRECTORY)\n pic = event.media\n ok = await event.reply(\"`Uploading...`\")\n downloaded_file_name = await BotzHub.download_media(pic, TMP_DOWNLOAD_DIRECTORY)\n if downloaded_file_name.endswith((\".webp\")):\n await ok.edit(\"`Oh! Itu sebuah sticker...\\nConvret dulu!!`\")\n resize_image(downloaded_file_name)\n try:\n media_urls = upload_file(downloaded_file_name)\n except exceptions.TelegraphException as exc:\n await ok.edit(\"**Error : **\" + str(exc))\n os.remove(downloaded_file_name)\n return\n else:\n os.remove(downloaded_file_name)\n await ok.edit(\"Uploaded to **Telegraph**\\n\\n 👇🏻Klik link di bawah untuk mengcopy👇🏻\\n```https://telegra.ph{}```\\n\\n~[@SF Corp](https://t.me/SFCorpChannel)\".format(media_urls[0]),\n link_preview=False,\n buttons=[\n Button.url(\"Link To File\", url=f\"https://telegra.ph{media_urls[0]}\")\n ])\n\ndef resize_image(image):\n im = Image.open(image)\n tmp = im.save(image, \"PNG\")\n\nprint(\"Bot has started.\")\nprint(\"Do visit @SFCorpChannel..\")\nBotzHub.run_until_disconnected()\n","repo_name":"pernandapurba/sf-telegraph-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43592871847","text":"__version__ = \"0.24\"\n\n#PYTHON VERSION CHECK\nimport sys\nPYTHON3 = int(sys.version[0]) == 3\nif PYTHON3:\n xrange = range\n\n#INTERNAL USE ONLY\ndef _normalize_rect(rect):\n x1, y1, x2, y2 = rect\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n return (x1, y1, x2, y2)\n\nclass _QuadNode: \n def __init__(self, item, rect):\n self.item = item\n self.rect = rect\n \nclass _QuadTree:\n \"\"\"\n Internal backend version of the index.\n The index being used behind the scenes. Has all the same methods as the user\n index, but requires more technical arguments when initiating it than the\n user-friendly version. \n Args:\n - **x**:\n The x center coordinate of the area that the quadtree should keep track of. \n - **y**\n The y center coordinate of the area that the quadtree should keep track of.\n - **width**:\n How far from the xcenter that the quadtree should look when keeping track. \n - **height**:\n How far from the ycenter that the quadtree should look when keeping track\n \"\"\"\n \n def __init__(self, x, y, width, height, depth=0, maxitems=10, maxdepth=20):\n self.nodes = []\n self.children = []\n self.center = [x, y]\n self.width,self.height = width,height\n self.depth = depth\n self.maxitems = maxitems\n self.maxdepth = maxdepth\n \n def __iter__(self):\n def loopallchildren(parent):\n for child in parent.children:\n if child.children:\n for subchild in loopallchildren(parent=child):\n yield subchild\n yield child\n for child in loopallchildren(self):\n yield child\n \n def _insert(self, item, bbox):\n rect = _normalize_rect(bbox)\n if len(self.children) == 0:\n node = _QuadNode(item, rect)\n self.nodes.append(node)\n \n if len(self.nodes) > self.maxitems and self.depth < self.maxdepth:\n self._split()\n else:\n self._insert_into_children(item, rect)\n \n def _intersect(self, bbox, results=None):\n rect = bbox\n if results is None:\n rect = _normalize_rect(rect)\n results = set()\n # search children\n if len(self.children) > 0:\n if rect[0] <= self.center[0]:\n if rect[1] <= self.center[1]:\n self.children[0]._intersect(rect, results)\n if rect[3] > self.center[1]:\n self.children[1]._intersect(rect, results)\n if rect[2] > self.center[0]:\n if rect[1] <= self.center[1]:\n self.children[2]._intersect(rect, results)\n if rect[3] > self.center[1]:\n self.children[3]._intersect(rect, results)\n # search node at this level\n for node in self.nodes:\n if (node.rect[2] > rect[0] and node.rect[0] <= rect[2] and \n node.rect[3] > rect[1] and node.rect[1] <= rect[3]):\n results.add(node.item)\n return results\n \n #INTERNAL USE ONLY\n \n def _insert_into_children(self, item, rect):\n # if rect spans center then insert here\n if ((rect[0] <= self.center[0] and rect[2] > self.center[0]) and\n (rect[1] <= self.center[1] and rect[3] > self.center[1])):\n node = _QuadNode(item, rect)\n self.nodes.append(node)\n else:\n # try to insert into children\n if rect[0] <= self.center[0]:\n if rect[1] <= self.center[1]:\n self.children[0]._insert(item, rect)\n if rect[3] > self.center[1]:\n self.children[1]._insert(item, rect)\n if rect[2] > self.center[0]:\n if rect[1] <= self.center[1]:\n self.children[2]._insert(item, rect)\n if rect[3] > self.center[1]:\n self.children[3]._insert(item, rect)\n \n def _split(self):\n quartwidth = self.width/4.0\n quartheight = self.height/4.0\n halfwidth = self.width/2.0\n halfheight = self.height/2.0\n self.children = [_QuadTree(self.center[0] - quartwidth,\n self.center[1] - quartheight,\n width=halfwidth, height=halfheight,\n depth=self.depth + 1,\n maxitems=self.maxitems,\n maxdepth=self.maxdepth),\n _QuadTree(self.center[0] - quartwidth,\n self.center[1] + quartheight,\n width=halfwidth, height=halfheight,\n depth=self.depth + 1,\n maxitems=self.maxitems,\n maxdepth=self.maxdepth),\n _QuadTree(self.center[0] + quartwidth,\n self.center[1] - quartheight,\n width=halfwidth, height=halfheight,\n depth=self.depth + 1,\n maxitems=self.maxitems,\n maxdepth=self.maxdepth),\n _QuadTree(self.center[0] + quartwidth,\n self.center[1] + quartheight,\n width=halfwidth, height=halfheight,\n depth=self.depth + 1,\n maxitems=self.maxitems,\n maxdepth=self.maxdepth)]\n nodes = self.nodes\n self.nodes = []\n for node in nodes:\n self._insert_into_children(node.item, node.rect)\n\n#USER CLASSES AND FUNCTIONS\n \nclass Index(_QuadTree):\n \"\"\"\n The top spatial index to be created by the user. Once created it can be\n populated with geographically placed members that can later be tested for\n intersection with a user inputted geographic bounding box. Note that the\n index can be iterated through in a for-statement, which loops through all\n all the quad instances and lets you access their properties.\n \"\"\"\n def __init__(self, bbox, maxitems=10, maxdepth=20):\n \"\"\"\n Parameters:\n - **bbox**: The coordinate system bounding box of the area that the quadtree should\n keep track of, as a 4-length sequence (xmin,ymin,xmax,ymax)\n - **maxmembers** (optional): The maximum number of items allowed per quad before splitting\n up into four new subquads. Default is 10. \n - **maxdepth** (optional): The maximum levels of nested subquads, after which no more splitting\n occurs and the bottommost quad nodes may grow indefinately. Default is 20. \n \"\"\"\n x1,y1,x2,y2 = bbox\n width,height = x2-x1,y2-y1\n midx,midy = x1+width/2.0, y1+height/2.0\n self.nodes = []\n self.children = []\n self.center = [midx, midy]\n self.width,self.height = width,height\n self.depth = 0\n self.maxitems = maxitems\n self.maxdepth = maxdepth\n\n def insert(self, item, bbox):\n \"\"\"\n Inserts an item into the quadtree along with its bounding box.\n Parameters:\n - **item**: The item to insert into the index, which will be returned by the intersection method\n - **bbox**: The spatial bounding box tuple of the item, with four members (xmin,ymin,xmax,ymax)\n \"\"\"\n self._insert(item, bbox)\n\n def intersect(self, bbox):\n \"\"\"\n Intersects an input boundingbox rectangle with all of the items\n contained in the quadtree. \n Parameters:\n - **bbox**: A spatial bounding box tuple with four members (xmin,ymin,xmax,ymax)\n Returns:\n - A list of inserted items whose bounding boxes intersect with the input rectangle.\n \"\"\"\n return self._intersect(bbox)\n \n def countmembers(self):\n \"\"\"\n Returns:\n \n - A count of the total number of members/items/nodes inserted into\n this quadtree and all of its child trees.\n \"\"\"\n size = 0\n for child in self.children:\n size += child.countmembers()\n size += len(self.nodes)\n return size","repo_name":"KAndrew340/4553-SpatialDS-KemAndrew","sub_path":"Program1/pyqtree.py","file_name":"pyqtree.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27087147521","text":"class Solution:\n def maxNumOfSubstrings(self, s: str) -> List[str]:\n bdry = {}\n for i, c in enumerate(s):\n if c not in bdry:\n bdry[c] = [i, i]\n else:\n bdry[c][1] = i\n for c in set(s):\n left, right = bdry[c]\n l, r = left, right\n while True:\n for oc in set(s[left:right+1]):\n l = min(l, bdry[oc][0])\n r = max(r, bdry[oc][1])\n if [left, right] == [l, r]: break\n left, right = l, r\n bdry[c] = [l, r]\n\n intervals = sorted(bdry.values(), key = lambda x: x[1])\n rslt, last = [], -1\n for st, ed in intervals:\n if st > last:\n rslt.append(s[st:ed+1])\n last = ed\n return rslt\n","repo_name":"Mela2014/lc_punch","sub_path":"lc1520_greedy.py","file_name":"lc1520_greedy.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1839238043","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages, auth\nfrom .models import User\nfrom .forms import FormUpdateUser\nfrom django.views.generic.edit import UpdateView\n\n\n# Create your views here.\ndef login(request):\n if request.method != 'POST':\n return render(request, 'login.html')\n\n usuario = request.POST.get('usuario')\n senha = request.POST.get('senha')\n\n user = auth.authenticate(username=usuario, password=senha)\n\n if not user:\n messages.error(request, 'Usuário ou senha inválidos!')\n return render(request, 'login.html')\n else:\n auth.login(request, user)\n messages.success(request, 'Seja bem vindo!')\n return redirect('index')\n\n return render(request, 'login.html')\n\ndef logout(request):\n auth.logout(request)\n return redirect('login')\n\ndef cadastro(request):\n if request.method != 'POST':\n return render(request, 'cadastro.html')\n\n nome = request.POST.get('nome')\n sobrenome = request.POST.get('sobrenome')\n matricula = request.POST.get('matricula')\n chefia = request.POST.get('chefia')\n setor = request.POST.get('setor')\n email = request.POST.get('email')\n usuario = request.POST.get('usuario')\n entrada = request.POST.get('entrada')\n saidaAlmoco = request.POST.get('saidaAlmoco')\n voltaAlmoco = request.POST.get('voltaAlmoco')\n saida = request.POST.get('saida')\n senha = request.POST.get('senha')\n senha2 = request.POST.get('senha2')\n\n if not nome or not sobrenome or not matricula or not chefia or not setor or \\\n not email or not usuario or not entrada or not saidaAlmoco or not voltaAlmoco or \\\n not saida or not senha or not senha2:\n messages.error(request, 'Nenhum campo pode estar vazio!')\n return render(request, 'cadastro.html')\n\n if senha != senha2:\n messages.error(request, 'Os campos de senha e confirmação precisam ser iguais!')\n return render(request, 'cadastro.html')\n\n if User.objects.filter(username=usuario).exists():\n messages.error(request, 'Usuário já existe!')\n return render(request, 'cadastro.html')\n\n user = User.objects.create_user(username=usuario, email=email, password=senha,\n first_name=nome, last_name=sobrenome,\n matricula=matricula, chefia=chefia,\n setor=setor, entrada=entrada,\n saidaAlmoco=saidaAlmoco, voltaAlmoco=voltaAlmoco,\n saida=saida, nome=nome, sobrenome=sobrenome,\n usuario=usuario)\n user.save()\n messages.success(request, 'Usuário cadastrado com sucesso!')\n return redirect('login')\n\n\nclass UserUpdateView(UpdateView):\n model = User\n form_class = FormUpdateUser\n template_name = 'update_user.html'\n success_url = '/'\n #fields = ['matricula', 'chefia', 'setor', 'entrada',\n # 'saidaAlmoco', 'voltaAlmoco', 'saida']","repo_name":"GibaTrindade/gestorAtividades","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70318519874","text":"class TreeNode:\n def __init__(self, data):\n self.value = data\n self.lChild = None\n self.rChild = None\n\n\ndef iterative_check(root, target):\n if not root:\n return False\n queue = [root]\n while queue:\n node = queue.pop(0)\n if node.value == target:\n return True\n if node.lChild:\n queue.append(node.lChild)\n if node.rChild:\n queue.append(node.rChild)\n return False\n\n\ndef recursive_check(root, target):\n if not root:\n return False\n if root.value == target:\n return True\n return recursive_check(root.lChild, target) or recursive_check(root.rChild, target)\n\n\nif __name__ == '__main__':\n tree = TreeNode('Drinks')\n hot = TreeNode('Hot')\n cold = TreeNode('Cold')\n tea = TreeNode('Tea')\n coffee = TreeNode('Coffee')\n coke = TreeNode('Coke')\n pepsi = TreeNode('Pepsi')\n\n tree.lChild = hot\n tree.rChild = cold\n hot.lChild = tea\n hot.rChild = coffee\n cold.lChild = coke\n cold.rChild = pepsi\n\n print(recursive_check(tree, 'Coke'))\n print(iterative_check(tree, 'Coke'))\n print(recursive_check(tree, 'Code'))\n print(iterative_check(tree, 'Code'))\n\n","repo_name":"dhankhar313/Data-Structures","sub_path":"Udemy Course/Tree/Practice/treeIncludes.py","file_name":"treeIncludes.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4457788826","text":"# 1. 완전수 구하기\ndef findPerfect():\n perfectNum = []\n for number in range(1, 10000):\n sum = 0\n for div in range(1, number):\n if number % div == 0:\n sum += div\n if number == sum:\n perfectNum.append(number)\n return perfectNum\n# print(findPerfect())\n\n\n# 2. 피보나치 수열 구하기\ndef fibo(n):\n if n == 0 or n == 1:\n return 1\n return fibo(n - 1) + fibo(n - 2)\n\n\n# 3. 자연수 합 경우의 수 구하기\ndef countWay(n):\n if n == 1:\n return 1\n if n == 2:\n return 2\n if n == 3:\n return 4\n return countWay(n - 1) + countWay(n - 2) + countWay(n - 3)\n","repo_name":"JwahoonKim/Introduction-to-Computing-for-Industrial-Engineering-2021Spring-","sub_path":"실습2/tmpHW.py","file_name":"tmpHW.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43833298733","text":"import random\n\nA = []\nfor i in range(10):\n A.append(random.randint(0, 100))\nprint(A)\nx = A\nfor i, j in enumerate(x):\n T = x[i+1:]\n if T == []:\n break\n if x[i] > min(T):\n t = x.index(min(T))\n x[i], x[t] = x[t], x[i]\nprint(A)\n","repo_name":"amazing-2020/pdf","sub_path":"Python/code case/code case 238.py","file_name":"code case 238.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18974424997","text":"import ast\nimport re\nfrom collections import defaultdict\nfrom hatlog.env import Env\n\ndef flatten(root):\n if len(root.body) != 1 or not isinstance(root.body[0], ast.FunctionDef):\n raise ValueError(\"hatlog supports expects a function\")\n x = Flattener()\n x.flatten(root.body[0])\n return [x.nodes, root.body[0].name]\n\nNOT_SUPPORTED = defaultdict(set,\n FunctionDef={'keywords', 'starargs', 'kwargs'},\n BinOp={'op'},\n UnaryOp={'op'},\n AugAssign={'op'},\n Call={'keywords', 'starargs', 'kwargs'},\n For={'orelse'},\n Attribute={'ctx'})\n\nclass Flattener:\n def __init__(self):\n self.env = {}\n self.nodes = []\n self.type_index = -1\n self.env = Env()\n self.args = []\n self.return_type = None\n self.function = ''\n\n def flatten(self, node):\n if isinstance(node, list):\n f = [self.flatten(e) for e in node]\n return f\n elif node is None:\n return 'v'\n else:\n sub = getattr(\n self,\n 'flatten_%s' % type(node).__name__.lower(),\n self.default)\n return sub(node)\n\n def default(self, node):\n f = [self.flatten(getattr(node, f)) for f in node._fields if f not in NOT_SUPPORTED[type(node).__name__]]\n node_type = self.new_type()\n self.nodes.append(('z_%s' % self.to_snake_case(type(node).__name__), f, node_type))\n return node_type\n\n def flatten_call(self, node):\n if isinstance(node.func, ast.Name) and node.func.id == self.function:\n return self.flatten_rec(node)\n elif isinstance(node.func, ast.Attribute):\n return self.flatten_method_call(node)\n else:\n function = self.flatten(node.func)\n args = [self.flatten(e) for e in node.args]\n return_type = self.new_type()\n\n if isinstance(node.func, ast.Name) and node.func.id not in self.env.values: # named\n self.nodes.append(('z_call', [node.func.id, args], return_type))\n else:\n self.nodes.append(('z_fcall', [function, args], return_type))\n\n return return_type\n\n def flatten_subscript(self, node):\n value = self.flatten(node.value)\n node_type = self.new_type()\n if isinstance(node.slice, ast.Index):\n index = self.flatten(node.slice.value)\n self.nodes.append(('z_index', [value, index], node_type))\n else:\n lower = self.flatten(node.slice.lower) if node.slice.lower else None\n upper = self.flatten(node.slice.upper) if node.slice.upper else None\n if lower and upper is None:\n upper = lower\n elif lower is None and upper:\n lower = upper\n else:\n raise ValueError('hatlog expects only slice like [:x], [x:] or [x:y]')\n self.nodes.append(('z_slice', [value, lower, upper], node_type))\n return node_type\n\n def flatten_num(self, node):\n if isinstance(node.n, int):\n return 'int'\n else:\n return 'float'\n\n def flatten_rec(self, node):\n '''\n we know that functions return the same value\n prolog terms cant be rec so we =\n '''\n if len(node.args) != len(self.args):\n raise ValueError(\"%s expected %d args\" % (len(self.args)))\n for a, (_, b) in zip(node.args, self.args):\n c = self.flatten(a)\n self.nodes.append(('=', [c], b))\n return self.return_type\n\n def flatten_str(self, node):\n return 'str'\n\n def flatten_compare(self, node):\n if len(node.comparators) != 1:\n raise ValueError(\"hatlog supports only 1 comparator\")\n if isinstance(node.ops[0], ast.Eq):\n op = 'z_eq'\n else:\n op = 'z_cmp'\n a = self.flatten(node.left)\n b = self.flatten(node.comparators[0])\n node_type = self.new_type()\n self.nodes.append((op, [a, b], node_type))\n return node_type\n\n def flatten_list(self, node):\n if len(node.elts) == 0:\n sub_types = [self.new_type()]\n else:\n sub_types = [self.flatten(a) for a in node.elts]\n node_type = self.new_type()\n self.nodes.append(('z_list', sub_types, node_type))\n return node_type\n\n def flatten_method_call(self, node):\n '''\n A call with an\n attribute as func\n '''\n receiver = self.flatten(node.func.value)\n args = list(map(self.flatten, node.args))\n node_type = self.new_type()\n self.nodes.append(('z_method_call', [receiver, node.func.attr, args], node_type))\n return node_type\n\n\n def flatten_dict(self, node):\n if len(node.keys) == 0:\n sub_types = [self.new_type(), self.new_type()]\n else:\n sub_types = zip([self.flatten(a) for a in node.keys], [self.flatten(b) for b in node.values])\n node_type = self.new_type()\n self.nodes.append(('z_dict', sub_types, node_type))\n return node_type\n\n def flatten_assign(self, node):\n if len(node.targets) != 1:\n raise ValueError(\"assignment normal\")\n node.targets = node.targets[0]\n return self.default(node)\n\n def flatten_name(self, node):\n if node.id == 'True' or node.id == 'False':\n return 'bool'\n elif node.id == 'None':\n return 'void'\n else:\n name_type = self.env[node.id]\n if not name_type:\n name_type = self.new_type()\n self.env[node.id] = name_type\n return name_type\n\n def flatten_functiondef(self, node):\n self.args = [(arg.arg, self.new_type()) for arg in node.args.args]\n self.return_type = 'X'\n self.function = node.name\n self.env[node.name] = node.name\n self.env = Env(dict(self.args), self.env)\n [self.flatten(child) for child in node.body]\n self.env = self.env.parent\n self.nodes.append(('z_function', [a[1] for a in self.args], self.return_type))\n return self.env[node.name]\n\n def flatten_expr(self, node):\n return self.flatten(node.value)\n\n def flatten_return(self, node):\n v = self.flatten(node.value)\n self.nodes.append(('=', [v], self.return_type))\n return v\n\n def new_type(self):\n self.type_index += 1\n return 'Z%d' % self.type_index\n\n def to_snake_case(self, label):\n return re.sub(r'([a-z])([A-Z])', r'\\1_\\2', label).lower()\n\n# BinOp(2, BinOp(b, a))\n\n# bin_op(X1, X2, X3)\n# bin_op(int, X3, X4)","repo_name":"alehander92/hatlog","sub_path":"hatlog/flattener.py","file_name":"flattener.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"750403989","text":"from PyQt5 import QtWidgets, uic\nfrom PyQt5 import QtCore, QtGui, QtWidgets, QtMultimedia\nfrom PyQt5.QtWidgets import QLabel, QApplication, QWidget\nfrom PyQt5.QtGui import QFont, QPixmap\nimport sys\nimport pyqtgraph as pg\nimport os\nimport io\nimport folium\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom threading import Thread\nimport threading\nfrom worker import long_running_function, ProgressWorker\nfrom show_data import show_data\nfrom get_data import get_data_from_server\nimport config\nimport sys\nimport time\n\nfrom PyQt5.QtCore import (QCoreApplication, QObject, QRunnable, QThread,\n QThreadPool, pyqtSignal)\nfrom PyQt5.QtCore import pyqtSlot\n\n\nCURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef sound():\n\n filename = os.path.join(CURRENT_DIR, \"sound/red_danger_alarm_2_2.mp3\")\n app2 = QtCore.QCoreApplication(sys.argv)\n player = QtMultimedia.QMediaPlayer()\n url = QtCore.QUrl.fromLocalFile(filename)\n player.setMedia(QtMultimedia.QMediaContent(url))\n player.play()\n\n sys.exit(app2.exec_())\n\nui=''\n\nclass Ui(QtWidgets.QMainWindow):\n\n pixmapG = ''\n pixmapR = ''\n webView = ''\n\n def __init__(self, parent=None):\n super(Ui, self).__init__()\n uic.loadUi('CAN-SAT2.ui', self)\n global ui\n ui=self\n Coordinate_x = self.findChild(QLabel, \"label_103\")\n Coordinate_x.setText(str(config.coordinate_x))\n Coordinate_y = self.findChild(QLabel, \"label_105\")\n Coordinate_y.setText(str(config.coordinate_y))\n\n self.iconText = self.setWindowTitle(\"FUM_CAN\")\n self.windowIcon = self.setWindowIcon(QtGui.QIcon('img/logo-white.jpg'))\n\n # icon\n IPressure = self.findChild(QLabel, \"label_5\")\n pixmap = QPixmap('img/pressure.jpg')\n low_rez = QtCore.QSize(25, 25)\n pixmap = pixmap.scaled(low_rez)\n IPressure.setPixmap(pixmap)\n\n # IAcceleration = self.findChild(QLabel, \"label_6\")\n # pixmap = QPixmap('img/Acceleration.jpg')\n # low_rez = QtCore.QSize(25, 25)\n # pixmap = pixmap.scaled(low_rez)\n # IAcceleration.setPixmap(pixmap)\n\n IInTemp = self.findChild(QLabel, \"label_7\")\n pixmap = QPixmap('img/icons8-temperature-inside2.jpg')\n low_rez = QtCore.QSize(25, 25)\n pixmap = pixmap.scaled(low_rez)\n IInTemp.setPixmap(pixmap)\n\n IIOutTemp = self.findChild(QLabel, \"label_8\")\n pixmap = QPixmap('img/temperature-outside4.jpg')\n low_rez = QtCore.QSize(25, 25)\n pixmap = pixmap.scaled(low_rez)\n IIOutTemp.setPixmap(pixmap)\n\n IHiumidity = self.findChild(QLabel, \"label_9\")\n pixmap = QPixmap('img/humidity-.jpg')\n pixmap = pixmap.scaled(low_rez)\n IHiumidity.setPixmap(pixmap)\n\n IUVIndex = self.findChild(QLabel, \"label_11\")\n pixmap = QPixmap('img/sun.jpg')\n pixmap = pixmap.scaled(low_rez)\n IUVIndex.setPixmap(pixmap)\n\n # ISensor = self.findChild(QLabel, \"label_12\")\n # pixmap = QPixmap('img/sensor.jpg')\n # low_rez = QtCore.QSize(25, 23)\n # pixmap = pixmap.scaled(low_rez)\n # ISensor.setPixmap(pixmap)\n\n ISensor_pressure = self.findChild(QLabel, \"label_78\")\n pixmap = QPixmap('img/pressure.jpg')\n low_rez = QtCore.QSize(25, 25)\n pixmap = pixmap.scaled(low_rez)\n ISensor_pressure.setPixmap(pixmap)\n\n ISensor_acceleration = self.findChild(QLabel, \"label_82\")\n pixmap = QPixmap('img/Acceleration.jpg')\n low_rez = QtCore.QSize(25, 23)\n pixmap = pixmap.scaled(low_rez)\n ISensor_acceleration.setPixmap(pixmap)\n\n ISensor_temp = self.findChild(QLabel, \"label_84\")\n pixmap = QPixmap('img/icons8-temperature-inside2.jpg')\n low_rez = QtCore.QSize(25, 23)\n pixmap = pixmap.scaled(low_rez)\n ISensor_temp.setPixmap(pixmap)\n\n ISensor_hiumidity = self.findChild(QLabel, \"label_79\")\n pixmap = QPixmap('img/humidity-.jpg')\n low_rez = QtCore.QSize(23, 23)\n pixmap = pixmap.scaled(low_rez)\n ISensor_hiumidity.setPixmap(pixmap)\n\n ISensor_air = self.findChild(QLabel, \"label_83\")\n pixmap = QPixmap('img/wind2.jpg')\n low_rez = QtCore.QSize(21, 21)\n pixmap = pixmap.scaled(low_rez)\n ISensor_air.setPixmap(pixmap)\n\n ISensor_UV = self.findChild(QLabel, \"label_85\")\n pixmap = QPixmap('img/sun.jpg')\n low_rez = QtCore.QSize(25, 23)\n pixmap = pixmap.scaled(low_rez)\n ISensor_UV.setPixmap(pixmap)\n\n ISatellite_1 = self.findChild(QLabel, \"label_80\")\n pixmap = QPixmap('img/satellite.jpg')\n low_rez = QtCore.QSize(23, 23)\n pixmap = pixmap.scaled(low_rez)\n ISatellite_1.setPixmap(pixmap)\n\n ISatellite_2 = self.findChild(QLabel, \"label_81\")\n pixmap = QPixmap('img/satellite.jpg')\n pixmap = pixmap.scaled(low_rez)\n ISatellite_2.setPixmap(pixmap)\n\n ILogo = self.findChild(QLabel, \"label_107\")\n pixmap = QPixmap('img/logo.jpg')\n low_rez = QtCore.QSize(50, 45)\n pixmap = pixmap.scaled(low_rez)\n ILogo.setPixmap(pixmap)\n\n self.receivedImage = QLabel()\n pixmap = QPixmap('img/1.jpg')\n low_rez = QtCore.QSize(321, 200)\n pixmap = pixmap.scaled(low_rez)\n self.receivedImage.setPixmap(pixmap)\n imgWidget = self.findChild(QWidget, \"widget_7\")\n self.receivedImage.setParent(imgWidget)\n\n # Sensors\n self.pixmapG = QPixmap('img/button-green.jpg')\n self.pixmapR = QPixmap('img/button-red.jpg')\n low_rez = QtCore.QSize(18, 18)\n self.pixmapG = self.pixmapG.scaled(low_rez)\n self.pixmapR = self.pixmapR.scaled(low_rez)\n\n # sound\n filename = os.path.join(CURRENT_DIR, \"sound/red_danger_alarm_2_2.mp3\")\n # app2 = QtCore.QCoreApplication(sys.argv)\n player = QtMultimedia.QMediaPlayer()\n url = QtCore.QUrl.fromLocalFile(filename)\n player.setMedia(QtMultimedia.QMediaContent(url))\n # player.play()\n # time.sleep(2)\n # QtCore.QCoreApplication.quit()\n # sys.exit(app2.exec_())\n # sound()\n\n # height\n self.graphWidget = pg.PlotWidget()\n\n self.graphWidget.setBackground('w')\n self.graphWidget.plot(config.height_x, config.height_y)\n self.graphWidget.setGeometry(0, 0, 321, 191)\n self.graphWidget.setParent(self.findChild(QWidget, \"widget_5\"))\n\n # Map\n m = folium.Map(\n tiles='OpenStreetMap',\n zoom_start=21,\n location=(config.coordinate_x, config.coordinate_y),\n width=321,\n height=161\n )\n folium.Marker(\n location=[config.coordinate_x, config.coordinate_y],\n popup='fumcan',\n ).add_to(m)\n # save map data to data object\n data = io.BytesIO()\n m.save(data, close_file=False)\n\n self.webView = QWebEngineView()\n self.webView.setHtml(data.getvalue().decode())\n self.webView.setStyleSheet(\"border-radius: 30px;\")\n self.webView.setParent(self.findChild(QWidget, \"widget_9\"))\n\n get_data_t = Thread(target=get_data_from_server)\n get_data_t.start()\n\n t = Thread(target=show_data, args=(self,), daemon=True)\n t.start()\n\n # self.launch()\n # t = Thread(target=self.on_click, daemon=True)\n # t.start()\n\n self.show()\n \n # @pyqtSlot()\n # def on_click(self):\n # time.sleep(3)\n # print('PyQt5 button click')\n # image = QFileDialog.getOpenFileName(None, 'OpenFile', '', \"Image file(*.jpg)\")\n # imagePath = image[0]\n # pixmap = QPixmap('img/logo-white.jpg')\n # self.receivedImage.setPixmap(pixmap)\n \n\n def launch(self):\n # progressBar_2 Battery\n worker_2 = ProgressWorker()\n worker_2.value_change.connect(self.progressBar_2.setValue)\n threading.Thread(\n target=long_running_function,\n args=(\"Battery\",),\n kwargs=dict(baz=\"baz\", worker=worker_2),\n daemon=True,\n ).start()\n\n # progressBar_3 Pressure\n worker_3 = ProgressWorker()\n worker_3.value_change.connect(self.progressBar_3.setValue)\n threading.Thread(\n target=long_running_function,\n args=(\"Pressure\",),\n kwargs=dict(baz=\"baz\", worker=worker_3),\n daemon=True,\n ).start()\n\n # progressBar_5\n worker_5 = ProgressWorker()\n worker_5.value_change.connect(self.progressBar_5.setValue)\n threading.Thread(\n target=long_running_function,\n args=(\"hiumidit\",),\n kwargs=dict(baz=\"baz\", worker=worker_5),\n daemon=True,\n ).start()\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ui = Ui()\n app.exec_()\n print('GodBy')\n","repo_name":"zeynabT/cansat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8896,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"63372673","text":"import os\n\nfrom twisted.trial import unittest\n\nfrom twisted.vfs.backends import inmem\nfrom twisted.vfs import ivfs\n\n\n\nclass InmemTestCase(unittest.TestCase):\n \"\"\"\n Test operations on L{inmem.FakeDirectory} and L{inmem.FakeFile}.\n \"\"\"\n\n def test_renameFailure(self):\n \"\"\"\n Renaming a file into an existing directory should fail.\n \"\"\"\n # Make some VFS sample data\n root = inmem.FakeDirectory()\n fakedir = inmem.FakeDirectory('fakedir', root)\n f = inmem.FakeFile('file.txt', root, 'wobble\\n')\n root._children = {'fakedir': fakedir, 'file.txt': f}\n\n # Trying to rename a file over a directory fails\n self.assertRaises(ivfs.VFSError, f.rename, 'fakedir')\n\n # The original file should still exist.\n self.failUnless(root.child('file.txt') is f)\n\n\n def test_nonExclusive(self):\n \"\"\"\n Opening a file in non-exclusive mode should result in the same file.\n \"\"\"\n root = inmem.FakeDirectory()\n testFile1 = root.createFile(\"foo\")\n testFile2 = root.createFile(\"foo\")\n self.assertIdentical(testFile1, testFile2)\n\n\n def test_truncate(self):\n \"\"\"\n Opening an inmem file with C{os.O_TRUNC} flag should reset its content.\n \"\"\"\n root = inmem.FakeDirectory()\n testFile = root.createFile(\"foo\")\n testFile.open(0)\n testFile.writeChunk(0, \"bar\")\n self.assertEquals(testFile.readChunk(0, 3), \"bar\")\n testFile.open(os.O_TRUNC)\n self.assertEquals(testFile.readChunk(0, 3), \"\")\n\n\n def test_createFileExclusive(self):\n \"\"\"\n If the C{createFile} method is called with the keyword C{exclusive}\n and that the file already exists, it should fail with a\n L{ivfs.AlreadyExistsError} exception.\n \"\"\"\n root = inmem.FakeDirectory()\n testFile = root.createFile(\"foo\")\n self.assertRaises(ivfs.AlreadyExistsError,\n root.createFile, \"foo\", exclusive=True)\n\n\n def test_createFileOverDirectory(self):\n \"\"\"\n Creating a file with the name of an existing directory should fail\n with an C{IOError}.\n \"\"\"\n root = inmem.FakeDirectory()\n testDir = root.createDirectory(\"bar\")\n self.assertRaises(IOError, root.createFile, \"bar\")\n\n\n","repo_name":"Almad/twisted","sub_path":"twisted/vfs/test/test_inmem.py","file_name":"test_inmem.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"41814246661","text":"from PIL import Image, ImageEnhance\nimport numpy\n\n\ndef find_coefficients(source_coords, target_coords):\n matrix = []\n for s, t in zip(source_coords, target_coords):\n matrix.append([t[0], t[1], 1, 0, 0, 0, -s[0] * t[0], -s[0] * t[1]])\n matrix.append([0, 0, 0, t[0], t[1], 1, -s[1] * t[0], -s[1] * t[1]])\n A = numpy.matrix(matrix, dtype=float)\n B = numpy.array(source_coords).reshape(8)\n res = numpy.dot(numpy.linalg.inv(A.T * A) * A.T, B)\n return numpy.array(res).reshape(8)\n\n\ndef image_manipulation(co_ords, scale_r, file_path):\n for vert in co_ords:\n vert[0] = int(scale_r * vert[0])\n vert[1] = int(scale_r * vert[1])\n\n least_x = min(co_ords[0][0], co_ords[3][0])\n least_y = min(co_ords[0][1], co_ords[1][1])\n\n coeffs = find_coefficients(co_ords, [[0, 0],\n [max(co_ords[1][0], co_ords[2][0]) - least_x, 0],\n [max(co_ords[1][0], co_ords[2][0]) - least_x,\n max(co_ords[2][1], co_ords[3][1]) - least_y],\n [0, max(co_ords[2][1], co_ords[3][1]) - least_y]])\n\n img = Image.open(file_path)\n img = img.transform((max(co_ords[1][0], co_ords[2][0]) - least_x, max(co_ords[2][1], co_ords[3][1]) - least_y),\n Image.PERSPECTIVE,\n coeffs,\n Image.BICUBIC)\n # img.show()\n return img\n","repo_name":"nERD8932/OCRP","sub_path":"imagemanip.py","file_name":"imagemanip.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72772895873","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport psycopg2\nimport urlparse\nfrom flask import Flask, Response, send_file, abort, request\n\napp = Flask(__name__)\nurlparse.uses_netloc.append(\"postgres\")\ndb_url = urlparse.urlparse(os.environ.get(\"DATABASE_URL\", 'postgres://dskut:ded3893706@localhost/fenegram'))\n\nBOTS_COUNT=20\nCHANTS_COUNT=20\nBACKEND_URL=\"http://fenegram.herokuapp.com\"\n\n@app.route('/')\ndef index():\n return '''\n Welcome to Fenegram backend!\n
    \n Available methods:\n
    \n /ping\n
    \n /bots\n
    \n /chants\n '''\n\n@app.route('/ping')\ndef ping():\n return 'pong'\n\n@app.route('/static/')\ndef static_file(filename):\n path = \"static/\" + filename\n if os.path.isfile(path):\n return send_file(path)\n else:\n abort(404)\n\ndef get_conn():\n return psycopg2.connect(\n database=db_url.path[1:],\n user=db_url.username,\n password=db_url.password,\n host=db_url.hostname,\n port=db_url.port\n )\n\ndef get_bots():\n conn = get_conn()\n cur = conn.cursor()\n cur.execute('select b.username, b.is_main, h.revision from bots b, (select max(revision) as revision from bots_history) h;')\n rows = cur.fetchall()\n bots = []\n revision = 0\n for row in rows:\n bot = {\"username\": row[0], \"is_main\": row[1]}\n bots.append(bot)\n revision = row[2]\n conn.close()\n return {\"revision\": int(revision), \"items\": bots};\n\ndef make_json_response(d):\n text = json.dumps(d, ensure_ascii=False)\n return Response(text, mimetype='application/json; charset=utf-8')\n\ndef get_chants():\n conn = get_conn()\n cur = conn.cursor()\n cur.execute('select title, lyrics, url from chants;')\n rows = cur.fetchall()\n res = []\n for row in rows:\n chant = {\"title\": row[0], \"lyrics\": row[1], \"url\": row[2]}\n res.append(chant)\n conn.close()\n return res\n\ndef get_bots_changes(revision):\n conn = get_conn()\n cur = conn.cursor()\n cur.execute('select username, action, revision as revision from bots_history where revision > %s' % revision)\n rows = cur.fetchall()\n\n new_revision = int(revision)\n added_bots = []\n removed_bots = []\n updated_bots = []\n for row in rows:\n username = row[0]\n action = row[1]\n new_revision = max(new_revision, int(row[2]))\n if action == 'add':\n added_bots.append(username)\n elif action == 'remove':\n removed_bots.append(username)\n elif action == 'update':\n updated_bots.append(username)\n\n new_bots = added_bots + updated_bots\n added_bots_details = []\n updated_bots_details = []\n\n if new_bots:\n cur.execute('select username, is_main from bots where username in %s',(tuple(added_bots + updated_bots), ))\n rows = cur.fetchall()\n for row in rows:\n details = {\"username\": row[0], \"is_main\": row[1]}\n if row[0] in added_bots:\n added_bots_details.append(details)\n else:\n updated_bots_details.append(details)\n\n conn.close()\n removed_bots_details = [{\"username\": bot} for bot in removed_bots]\n return {\n \"revision\": new_revision,\n \"removed\": removed_bots_details,\n \"added\": added_bots_details,\n \"updated\": updated_bots_details\n }\n\n@app.route('/bots')\ndef bots():\n revision = request.args.get('revision')\n if revision is None or revision == '':\n resp_dict = get_bots()\n return make_json_response(resp_dict)\n else:\n return make_json_response(get_bots_changes(revision))\n\n@app.route('/chants')\ndef chants():\n resp_dict = {\"items\": get_chants()}\n return make_json_response(resp_dict)\n\nif __name__ == '__main__':\n app.run(debug=True) #FIXME: remove debug\n","repo_name":"dskut/fenegram-bot-store","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33508866155","text":"from multiprocessing import Pool\n\ndef job(num):\n return num*2\n\nif __name__=='__main__':\n # how many process that we will define\n p=Pool(processes=20)\n data=p.map(job,range(20))\n data2=p.map(job,[1,2,3])\n p.close()\n print(data)\n print(data2)","repo_name":"bututoubaobei/intermediate_python","sub_path":"11_getting returned values from processing.py","file_name":"11_getting returned values from processing.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74752736835","text":"import cv2, pyautogui, time\nimport numpy as np\n\ndef hpmpDetect():\n left = 27\n top = 33\n right = 192\n bottom = 35\n im = pyautogui.screenshot(region=(left,top,right,bottom))\n im.save(\"HPbar2.png\")\n\n img1 = cv2.imread(\"HPbar2.png\")\n hsv = cv2.cvtColor(img1,cv2.COLOR_BGR2HSV)\n\n #upper red\n lower_red2 = np.array([170,50,50])\n upper_red2 = np.array([180,255,255])\n\n # upper blue\n lower_blue = np.array([110, 50, 50])\n upper_blue = np.array([130, 255, 255])\n\n mask1 = cv2.inRange(hsv, lower_blue, upper_blue)\n\n mask2 = cv2.inRange(hsv, lower_red2, upper_red2)\n\n for array in mask2:\n nonZeroCountRed = np.count_nonzero(array)\n if nonZeroCountRed:\n print(nonZeroCountRed)\n break\n print(f'Char has %{round((nonZeroCountRed*100)/191)} HP!')\n\n for array2 in mask1:\n nonZeroCountBlue = np.count_nonzero(array2)\n if nonZeroCountBlue:\n print(nonZeroCountBlue)\n break\n print(f'Char has %{round((nonZeroCountBlue*100)/191)} MP!')\n\n return round((nonZeroCountRed*100)/191), round((nonZeroCountBlue*100)/191)\n\nprint('Starting in 3 seconds!')\ntime.sleep(3)\nhpBar, mpBar = hpmpDetect()\nprint(hpBar)\nprint(mpBar)","repo_name":"alperdemiryay/KO_Attack_Bot","sub_path":"healthDetection.py","file_name":"healthDetection.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10915370679","text":"import torch\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport argparse\nimport fastai\nimport torch.nn as nn\nimport time, datetime\nimport torchvision\nimport torch.nn.functional as F\nimport evaluation as evaluation\nimport torch.backends.cudnn as cudnn\n\nfrom torch.utils.data import DataLoader\nfrom isic import ISICDataset\nfrom schedulers import get_scheduler\nfrom fastai import *\nfrom fastai.vision import *\nfrom fastai.vision import get_transforms\nfrom models import SimpleCNN\nfrom pathlib import Path\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom GCPLoss import GCPLoss\nfrom isic_datafileload import load_datafile\nfrom torch.autograd import Variable\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n#Hardcoded parameters\nfeat_dim = 512 #Resnet34 features\n## Transforms\nmean = (0.4914, 0.4822, 0.4465)\nstd = (0.2023, 0.1994, 0.2010)\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam):\n '''Returns mixup loss for Cross Entropy criterion only between two samples'''\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\ndef train_one_epoch(args, writer, model, criterion, data_loader, optimizer, epoch, max_norm=0.0):\n '''Returns train loss after training the model for one epoch\n All different kind of input arguments regarding the loss function\n and whether to do mixup or not are covered in this section'''\n model.train()\n criterion.train()\n count = 0\n for images, targets in tqdm(data_loader):\n count += 1\n images = images.to(args.device)\n targets = targets.to(args.device)\n \n # Pass the inputs through the CNN model.\n if args.loss == 'Softmax':\n outputs = model(images)\n if args.mixup == 1:\n inputs, targets_a, targets_b, lam = mixup_data(images, targets,\n args.alpha)\n inputs, targets_a, targets_b = map(Variable, (inputs,\n targets_a, targets_b))\n loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n else:\n loss = criterion(outputs, targets)\n elif args.loss == 'GCPLoss':\n if args.mixup == 1:\n inputs, targets_a, targets_b, lam = mixup_data(images, targets,\n args.alpha)\n inputs, targets_a, targets_b = map(Variable, (inputs,\n targets_a, targets_b))\n x, y = model(inputs)\n logits, loss = criterion(x, y, labels=targets, targets_a=targets_a, targets_b=targets_b, lam=lam, mixup=args.mixup)\n else:\n x, y = model(images)\n logits, loss = criterion(x, y, targets, args.mixup)\n \n if writer is not None and count % args.log_steps == 1:\n writer.add_scalars('Loss/train', {'loss': loss}, epoch*len(data_loader)+count)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if args.debug == 1 and count >= 1:\n return loss\n\n return loss \n\n@torch.no_grad()\ndef evaluate(args, writer, model, criterion, data_loader, data_loader_ood, epoch):\n '''Returns validation loss after validating the model on the valid set;\n Also returns the OOD detection results after validating on the OOD set.'''\n model.eval()\n criterion.eval()\n count = 0\n size = 0\n running_loss = 0.0\n running_corrects = 0\n _pred_k, _pred_u, _labels = [], [], []\n\n for images, targets in tqdm(data_loader):\n count += 1\n images = images.to(args.device)\n targets = targets.to(args.device)\n \n # Pass the inputs through the CNN model.\n if args.loss == 'Softmax':\n outputs = model(images)\n loss = criterion(outputs, targets)\n _, preds = torch.max(outputs, 1)\n _pred_k.append(F.softmax(outputs,dim=1).data.cpu().numpy())\n elif args.loss == 'GCPLoss':\n x, y = model(images)\n logits, loss = criterion(x, y, targets)\n preds = logits.data.max(1)[1]\n _pred_k.append(logits.data.cpu().numpy())\n \n # Calculate the batch loss.\n running_loss += loss.item() * images.size(0)\n \n running_corrects += torch.sum(preds == targets.data)\n \n size += images.size(0)\n\n if args.debug == 1 and count >=1:\n break\n\n count_out = 0\n for images, targets, _ in tqdm(data_loader_ood):\n count_out += 1\n images = images.to(args.device)\n targets = targets.to(args.device)\n \n # Pass the inputs through the CNN model.\n if args.loss == 'Softmax':\n outputs = model(images)\n _pred_u.append(F.softmax(outputs,dim=1).data.cpu().numpy())\n elif args.loss == 'GCPLoss':\n x, y = model(images)\n logits, _ = criterion(x, y)\n _pred_u.append(logits.data.cpu().numpy())\n \n if args.debug == 1 and count_out >= 1:\n break\n \n # end epoch\n epoch_loss = running_loss / size\n epoch_acc = running_corrects.double() / size\n \n disp_str = 'Epoch {} Losses: {:.4f}'.format(epoch+1, epoch_loss)\n\n _pred_k = np.concatenate(_pred_k, 0)\n _pred_u = np.concatenate(_pred_u, 0)\n\n # Out-of-Distribution detction evaluation\n x1, x2 = np.max(_pred_k, axis=1), np.max(_pred_u, axis=1)\n results = evaluation.metric_ood(x1, x2)['Bas']\n \n if writer is not None:\n writer.add_scalars('Loss/valid', {'loss': epoch_loss}, epoch)\n writer.add_scalars('Accuracy/valid', {'acc': epoch_acc}, epoch)\n writer.add_text('Log/valid', disp_str, epoch)\n \n return epoch_loss, epoch_acc, results\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Training on ISIC dataset')\n # data\n parser.add_argument('--img-size', type=int, default=224, help='Training image size to be passed to the network')\n parser.add_argument('--batch-size', type=int, default=128, help='batch size')\n parser.add_argument('--num-workers', type=int, default=8, help='number of loader workers')\n parser.add_argument('--device', default=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"),\n help='device {cuda:0, cpu}')\n parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')\n parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')\n parser.add_argument('--log-dir', default='', help='where to store results')\n parser.add_argument('--epochs', type=int, default=60, help='number of epochs')\n parser.add_argument('--log_steps', type=int, default=100, help='Logging at steps')\n parser.add_argument('--network', type=str, default='res34', help='res34 or res50')\n parser.add_argument('--debug',type=int, default=0, help='Debug mode: 1')\n parser.add_argument('--scheduler',type=str, default='cosine_warm_restarts_warmup',help='Type of scheduler')\n parser.add_argument('--num-restarts',type=int,default=2, help='Number of restarts for scheduler')\n parser.add_argument('--checkpoint_path',type=str, default=None, help='Checkpoint path for resuming the training')\n parser.add_argument('--loss',type=str, default='Softmax', help='GCPLoss or Softmax')\n parser.add_argument('--temp', type=float, default=1.0, help=\"temp\")\n parser.add_argument('--weight-pl', type=float, default=0.1, help=\"weight for RPL loss\")\n parser.add_argument('--mixup',type=int,default=0, help = 'Option 0: No mixup, Option 1: With mixup')\n parser.add_argument('--alpha', default=1., type=float, help='mixup interpolation coefficient (default: 1)')\n args = parser.parse_args()\n\n print (args)\n options = vars(args)\n use_gpu = torch.cuda.is_available()\n options.update(\n {\n 'feat_dim': feat_dim,\n 'use_gpu': use_gpu\n }\n )\n if use_gpu:\n cudnn.benchmark = True\n else:\n print(\"Currently using CPU\")\n\n #Image folder\n image_folder = '../../../data/isic2019/ISIC_2019_Training_Input'\n\n # CSV file path\n datafile_path = 'ISIC_2019_Training_GroundTruth.csv'\n\n #load data\n df_in, df_out = load_datafile(image_folder,datafile_path)\n \n #Data pre-preocessing\n train_transform = transforms.Compose([\n transforms.Resize((args.img_size, args.img_size)),\n transforms.RandomCrop(args.img_size, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n ])\n\n test_transform = transforms.Compose([\n transforms.Resize((args.img_size, args.img_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n ])\n\n # Dataloader\n train_data = ISICDataset(image_folder, df_in, train_transform, size=args.img_size, is_train=True, test_mode=False)\n valid_data = ISICDataset(image_folder, df_in, test_transform, size=args.img_size, is_train=False, test_mode=False)\n valid_ood_data = ISICDataset(image_folder, df_out, test_transform, size=args.img_size, is_train=False, test_mode=True)\n train_dl = DataLoader(train_data, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)\n valid_dl = DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)\n val_ood_dl = DataLoader(valid_ood_data, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) \n\n print('Train size: {}, valid size: {}'.format(len(train_data), len(valid_data)))\n\n # Initialize the model\n # model\n print('Build model')\n print(\"Using\", torch.cuda.device_count(), \"GPUs.\")\n args.num_classes = 6\n options.update(\n {\n 'num_classes': args.num_classes\n }\n )\n print('total classes is {}'.format(args.num_classes))\n model = SimpleCNN(args.network,args.num_classes,args.loss)\n model = nn.DataParallel(model).to(args.device)\n print (\"Loaded model\")\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n # Define the loss function.\n if args.loss == 'Softmax':\n criterion = nn.CrossEntropyLoss()\n elif args.loss == 'GCPLoss':\n criterion = GCPLoss(**options)\n criterion = criterion.cuda()\n \n if args.loss == 'Softmax':\n param_dicts = [{\"params\": [p for n, p in model.named_parameters() if p.requires_grad]}]\n\n elif args.loss == 'GCPLoss':\n param_dicts = [{\"params\": [p for n, p in model.named_parameters() if p.requires_grad]},{'params': criterion.parameters()}]\n\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)\n lr_scheduler = get_scheduler(optimizer,args)\n\n args.log_dir = 'runs_openset_isic' + '/exp_' + args.network + '_' + str(args.loss) + '_mixup' + str(args.mixup)\n \n #Checkpoint saving for models best at Openset and best at ID classification\n writer = SummaryWriter(log_dir = args.log_dir)\n output_dir = Path(writer.log_dir)\n checkpoint_path_auroc = Path(os.path.join(output_dir,'auroc', 'checkpoints'))\n os.makedirs(checkpoint_path_auroc, exist_ok=True)\n checkpoint_path_auroc = checkpoint_path_auroc / 'checkpoint.pth'\n checkpoint_path_val = Path(os.path.join(output_dir,'val', 'checkpoints'))\n os.makedirs(checkpoint_path_val, exist_ok=True)\n checkpoint_path_val = checkpoint_path_val / 'checkpoint.pth'\n args.start_epoch = 0\n\n best_valid_loss, best_auroc = 100, 0.0\n best_monitor_loss = None\n for epoch in range(args.start_epoch, args.epochs):\n print('\\nEpoch', epoch)\n epoch_start_time = time.time()\n\n #Train one epoch\n print ('\\nTrain: ')\n train_loss = train_one_epoch(args, writer, model, criterion, train_dl, optimizer, epoch)\n \n # evaluate\n print('\\nEvaluate: ')\n valid_loss, valid_acc, results = evaluate(args, writer, model, criterion, valid_dl, val_ood_dl, epoch)\n checkpoint_paths_val, checkpoint_paths_auroc = [], []\n\n print (\"\\n Train loss:\",train_loss.item(),\"Valid loss:\",valid_loss, \"Valid acc: \", valid_acc, \"AUROC: \",results['AUROC'])\n\n lr_scheduler.step(epoch=epoch)\n\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n checkpoint_paths_val.append(checkpoint_path_val)\n\n if results['AUROC'] > best_auroc:\n best_auroc = results['AUROC']\n checkpoint_paths_auroc.append(checkpoint_path_auroc)\n \n #Save checkpoints every 10 epochs\n if (epoch + 1) % 10 == 0: \n checkpoint_paths_auroc.append(output_dir / f'checkpoint{epoch:03}.pth')\n checkpoint_paths_val.append(output_dir / f'checkpoint{epoch:03}.pth')\n\n if args.loss == 'Softmax':\n for cp in checkpoint_paths_auroc:\n print('Save checkpoint {}'.format(cp))\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch\n }, cp)\n\n for cp in checkpoint_paths_val:\n print('Save checkpoint {}'.format(cp))\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch\n }, cp)\n\n elif args.loss == 'GCPLoss':\n for cp in checkpoint_paths_auroc:\n print('Save checkpoint {}'.format(cp))\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'criterion': criterion.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch\n }, cp)\n\n for cp in checkpoint_paths_val:\n print('Save checkpoint {}'.format(cp))\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'criterion': criterion.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch\n }, cp)\n\n epoch_total_time = time.time() - epoch_start_time\n epoch_total_time_str = str(datetime.timedelta(seconds=int(epoch_total_time)))\n print('Epoch training time {}\\n'.format(epoch_total_time_str))\n\n if writer is not None: writer.close()","repo_name":"DevD1092/ood-skin-lesion","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15353,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"25052533736","text":"from ibapi.common import BarData\nfrom barTools import epoch2human\nfrom barTools import human2epoch\nfrom easyPostgresConnection import connect2IbData\nfrom easyPostgresConnection import barTableName\nfrom BasicStuff import BasicSuper\nfrom GoodBar import GoodBar\nfrom delim import EOL\nfrom delim import DELIM\nfrom delim import END_QUERY\nfrom numpy import random\n\nONE_MINUTE = 60\nEIGHT_HOURS = 3600*8 # 8 hrs measured in seconds\nTWENTY_FOUR_HOURS = 3600*24\nDECEMBER_31_2075 = human2epoch(\"2075-12-31 23:59:59\")\nJANUARY_01_1990 = human2epoch(\"1990-01-01 00:00:00\")\n\ndef sanctifyWhichTable(whichTable, useHyphens=False):\n \"\"\"Forces whichTable into a nice soft Procrustean bed.\"\"\"\n if whichTable == \"05 sec\" or whichTable == \"5 sec\":\n if useHyphens:\n return \"5_sec\"\n else:\n return \"5 sec\"\n elif whichTable == \"15 min\":\n if useHyphens:\n whichTable = \"15_min\"\n else:\n whichTable = \"15 min\"\n elif whichTable == \"daily\":\n return \"daily\"\n else:\n raise ValueError(\"Unrecognized table name\")\n return None\n\ndef getEarliestEpoch(curs, ticker, whichTable, minEpoch=JANUARY_01_1990):\n \"\"\"Returns the earliest epoch available for a ticker.\"\"\"\n if whichTable == \"05 sec\":\n whichTable = \"5_sec\"\n elif whichTable == \"15 min\":\n whichTable = \"15_min\"\n elif whichTable == \"daily\":\n pass\n else:\n raise ValueError(\"Unrecognized table name\")\n cmd = \"SELECT epoch FROM bars_\" + whichTable + \" WHERE ticker='\" + ticker + \"' AND\"\n cmd += \" epoch>\" + str(minEpoch) + \" \"\n cmd += \" ORDER BY epoch LIMIT 1;\"\n curs.execute(cmd)\n results = curs.fetchall()\n if len(results) > 0:\n return results[0][0]\n return -1\n\ndef getNextHigherEpoch(curs, ticker, whichTable, minEpoch):\n \"\"\"Returns the next higher epoch available for a ticker.\"\"\"\n if whichTable == \"05 sec\":\n whichTable = \"5_sec\"\n elif whichTable == \"15 min\":\n whichTable = \"15_min\"\n elif whichTable == \"daily\":\n pass\n else:\n raise ValueError(\"Unrecognized table name\")\n cmd = \"SELECT epoch FROM bars_\" + whichTable + \" WHERE ticker='\" + ticker\n cmd += \"' AND epoch>\" + str(minEpoch)\n cmd += \" ORDER BY epoch LIMIT 5;\"\n curs.execute(cmd)\n results = curs.fetchall()\n if len(results) > 4:\n return results[4][0]\n return -1\n\ndef getBarsWithMinimumEpoch(curs, ticker, whichTable, epoch):\n \"\"\"Use this for historical.\"\"\"\n if whichTable == \"05 sec\":\n whichTable = \"5_sec\"\n elif whichTable == \"15 min\":\n whichTable = \"15_min\"\n elif whichTable == \"daily\":\n pass\n else:\n raise ValueError(\"Unrecognized table name\")\n cmd = \"SELECT epoch, open, close, high, low, volume FROM bars_\" + whichTable + \" WHERE ticker='\" + ticker + \"' AND \"\n cmd += \"epoch>\" + str(epoch) + \" \"\n cmd += \"ORDER BY epoch;\"\n curs.execute(cmd)\n results = curs.fetchall()\n return results\n\ndef getBarWithExactEpoch(curs, ticker, whichTable, epoch):\n \"\"\"Use this for real time.\"\"\"\n if whichTable == \"05 sec\":\n whichTable = \"5_sec\"\n elif whichTable == \"15 min\":\n whichTable = \"15_min\"\n elif whichTable == \"daily\":\n pass\n else:\n raise ValueError(\"Unrecognized table name\")\n cmd = \"SELECT epoch, open, close, high, low, volume FROM bars_\" + whichTable + \" WHERE ticker='\" + ticker + \"' AND \"\n cmd += \"epoch=\" + str(epoch) + \" \"\n cmd += \"ORDER BY epoch;\"\n curs.execute(cmd)\n results = curs.fetchall()\n return results\n\nclass LocalBarSource(BasicSuper):\n def __init__(self, localStorageConnection, minEpoch=0, addDummyData=False):\n super().__init__(localStorageConnection, barSource=self)\n self.curs = localStorageConnection.cursor()\n self.minEpoch = {}\n self.tickers = []\n self.bars_daily = {}\n self.bars_15_min = {}\n self.bars_05_sec = {}\n self.defaultMinEpoch_05_sec = 0\n self.defaultMinEpoch_15_min = 0\n self.defaultMinEpoch_daily = 0\n self.desired_num_daily = 250\n self.desired_num_15_min = 100\n self.desired_num_05_sec = 2160 # 3 hrs by default\n\n self.closes_daily = {}\n self.highs_05_sec = {}\n self.lows_05_sec = {}\n self.closes_05_sec = {}\n self.highs_15_min = {}\n self.lows_15_min = {}\n self.closes_15_min = {}\n self.addDummyData = addDummyData\n\n # Initialize Data Structures:\n self.discoverTickers()\n self.initializeTickers()\n self.loadLocallyStored()\n\n def discoverTickers(self, minEpoch=0, maxEpoch=DECEMBER_31_2075):\n \"\"\"Extracts bottommost epoch from data_subscriptions table, and appends tickers\n to a list until epoch decreases more than 10 seconds. I used to have it so that\n algorithm terminates as soon as epoch changes, but that is brittle because\n data reqs can span multiple seconds. So there's a tolerance.\n This assumes that records are in order of epoch, which seems reasonable since time\n increases monotonically... at least... for mere mortals. MUAHAHAHAHAHA!!!\"\"\"\n # self.tickers = [\"JPM\", \"KO\", \"LOW\", \"MCD\", \"MMM\", \"KO\", \"LOW\", \"JNJ\", \"AAPL\", \"PFE\", \\\n # \"GS\", \"HPQ\", \"SBUX\", \"MSFT\", \"BA\", \"BAC\"]\n cmd = \"SELECT DISTINCT ticker FROM bars_5_sec WHERE ticker != '__XYZ';\"\n self.curs.execute(cmd)\n for row in self.curs.fetchall():\n oneTicker = row[0]\n self.tickers.append(oneTicker)\n if self.addDummyData:\n self.tickers.append(\"__XYZ\")\n\n def initializeTickers(self):\n \"\"\"Call this after ticker discovery to set up the dictionaries keyed by ticker.\"\"\"\n for ticker in self.tickers:\n self.addTicker(ticker)\n\n def loadLocallyStored(self):\n \"\"\"Initializes vectors with data from database\"\"\"\n self.loadHistorical(\"daily\")\n self.loadHistorical(\"15 min\")\n self.loadHistorical(\"05 sec\")\n\n def addTicker(self, ticker):\n self.bars_daily[ticker] = []\n self.bars_15_min[ticker] = []\n self.bars_05_sec[ticker] = []\n self.minEpoch[ticker] = {}\n self.minEpoch[ticker][\"05 sec\"] = getEarliestEpoch(self.curs, ticker, \"05 sec\")\n self.minEpoch[ticker][\"15 min\"] = getEarliestEpoch(self.curs, ticker, \"15 min\")\n self.minEpoch[ticker][\"daily\"] = getEarliestEpoch(self.curs, ticker, \"daily\")\n self.closes_daily[ticker] = []\n self.highs_05_sec[ticker] = []\n self.lows_05_sec[ticker] = []\n self.closes_05_sec[ticker] = []\n self.highs_15_min[ticker] = []\n self.lows_15_min[ticker] = []\n self.closes_15_min[ticker] = []\n\n # Epoch trackers:\n self.nowDaily = None\n self.now15Min = None\n self.now05Sec = None\n self.prevHr = 4\n\n def getBars(self, ticker, timeframe, numel):\n \"\"\"Returns a list of bars numel elements back from now.\"\"\"\n if timeframe == \"5\" or timeframe == \"05 sec\" or timeframe == \"5 sec\":\n return self.bars_05_sec[ticker][-numel:]\n elif timeframe == \"15\" or timeframe == \"15 min\":\n return self.bars_15_min[ticker][-numel:]\n elif timeframe == \"daily\":\n return self.bars_daily[ticker][-numel:]\n else:\n raise ValueError(\"Unrecognized timeframe.\")\n\n def getCloses(self, ticker, timeframe, numel):\n \"\"\"Returns a list of bars numel elements back from now.\"\"\"\n if timeframe == \"5\" or timeframe == \"05 sec\" or timeframe == \"5 sec\":\n return self.closes_05_sec[ticker][-numel:]\n elif timeframe == \"15\" or timeframe == \"15 min\":\n return self.closes_15_min[ticker][-numel:]\n elif timeframe == \"daily\":\n return self.closes_daily[ticker][-numel:]\n else:\n raise ValueError(\"Unrecognized timeframe.\")\n\n def getHighs(self, ticker, timeframe, numel):\n \"\"\"Returns a list of bars numel elements back from now.\"\"\"\n if timeframe == \"5\" or timeframe == \"05 sec\" or timeframe == \"5 sec\":\n return self.highs_05_sec[ticker][-numel:]\n elif timeframe == \"15\" or timeframe == \"15 min\":\n return self.highs_15_min[ticker][-numel:]\n elif timeframe == \"daily\":\n raise ValueError(\"We only store bars and closes for daily. You'll have to use bars.\")\n else:\n raise ValueError(\"Unrecognized timeframe.\")\n\n def getLows(self, ticker, timeframe, numel):\n \"\"\"Returns a list of bars numel elements back from now.\"\"\"\n if timeframe == \"5\" or timeframe == \"05 sec\" or timeframe == \"5 sec\":\n return self.lows_05_sec[ticker][-numel:]\n elif timeframe == \"15\" or timeframe == \"15 min\":\n return self.lows_15_min[ticker][-numel:]\n elif timeframe == \"daily\":\n raise ValueError(\"We only store bars and closes for daily. You'll have to use bars.\")\n else:\n raise ValueError(\"Unrecognized timeframe.\")\n\n def addBar(self, bar):\n \"\"\"Use this one until vectors are the length you want\"\"\"\n ticker = bar.ticker\n timeframe = bar.timeframe\n assert(timeframe == \"05 sec\" or timeframe == \"15 min\" or timeframe == \"daily\")\n if timeframe == \"05 sec\":\n self.bars_05_sec[ticker].append(bar)\n self.closes_05_sec[ticker].append(bar.close)\n self.highs_05_sec[ticker].append(bar.high)\n self.lows_05_sec[ticker].append(bar.low)\n\n elif timeframe == \"15 min\":\n self.bars_15_min[ticker].append(bar)\n self.closes_15_min[ticker].append(bar.close)\n self.highs_15_min[ticker].append(bar.high)\n self.lows_15_min[ticker].append(bar.low)\n\n else:\n self.bars_daily[ticker].append(bar)\n self.closes_daily[ticker].append(bar.close)\n\n def replaceBar(self, bar):\n \"\"\"Use this one once vectors are the right size.\"\"\"\n ticker = bar.ticker\n timeframe = bar.timeframe\n assert(timeframe == \"05 sec\" or timeframe == \"15 min\" or timeframe == \"daily\")\n if timeframe == \"05 sec\":\n del(self.bars_05_sec[ticker][0])\n del(self.closes_05_sec[ticker][0])\n del(self.lows_05_sec[ticker][0])\n del(self.highs_05_sec[ticker][0])\n self.bars_05_sec[ticker].append(bar)\n self.closes_05_sec[ticker].append(bar.close)\n self.highs_05_sec[ticker].append(bar.high)\n self.lows_05_sec[ticker].append(bar.low)\n\n elif timeframe == \"15 min\":\n del(self.bars_15_min[ticker][0])\n del(self.closes_15_min[ticker][0])\n del(self.highs_15_min[ticker][0])\n del(self.lows_15_min[ticker][0])\n self.bars_15_min[ticker].append(bar)\n self.closes_15_min[ticker].append(bar.close)\n self.highs_15_min[ticker].append(bar.high)\n self.lows_15_min[ticker].append(bar.low)\n\n else:\n del(self.bars_daily[ticker][0])\n del(self.closes_daily[ticker][0])\n self.bars_daily[ticker].append(bar)\n self.closes_daily[ticker].append(bar.close)\n\n def locateStartingEpoch(self, minEpoch=0):\n \"\"\"Returns epoch of earliest daily bar. The top 50 results should all have the same epoch,\n so we return the 25th one in case the very top of the list is a corner case.\"\"\"\n cmd = \"SELECT epoch FROM bars_daily WHERE epoch>\" + str(minEpoch) + \" SORT BY epoch;\"\n self.curs.execute(cmd)\n return self.curs.fetchall()[25][0]\n\n def initializeEpochs(self, minEpoch=0):\n \"\"\"Run this before using this bar source in historical mode.\"\"\"\n self.nowDaily = self.locateStartingEpoch(minEpoch=minEpoch)\n self.advanceNownessToNextDay(advanceDaily=False)\n\n def advanceNowness(self):\n \"\"\"Advances epochs for all 3 bar sizes. Moves this object's idea of 'now' forward by 1 bar.\"\"\"\n self.now05Sec += 5\n timeObj = epoch2human(self.now05Sec)\n if timeObj.minute in [0, 15, 30, 45] and timeObj.second == 0:\n self.now15Min += 60*15\n self.prevHr = timeObj.hour\n\n def needToAdvanceToNextDay(self, someEpoch):\n \"\"\"Call this to test whether you need to advance to the next day.\"\"\"\n now = epoch2human(self.now05Sec)\n return now.hour < 12 and self.prevHr > 12\n\n def advanceNownessToNextDay(self, advanceDaily=True):\n \"\"\"Call this at the end of a historical day to figure out what epochs to search for at\n the start of the next day. Useful for both initializing state of realtime system and\n historical tests. advanceDaily is provided as a parameter to reuse this function for\n state initialization.\"\"\"\n if advanceDaily:\n self.advanceDaily()\n for ticker in self.tickers:\n tmpEpoch = getNextHigherEpoch(self.curs, ticker, whichTable=\"15 min\", minEpoch=self.nowDaily)\n if tmpEpoch > 0:\n self.now15Min = tmpEpoch\n break\n for ticker in self.tickers:\n tmpEpoch = getNextHigherEpoch(self.curs, ticker, whichTable=\"05 sec\", minEpoch=self.nowDaily)\n if tmpEpoch > 0:\n self.now05Sec = tmpEpoch\n break\n\n def advanceDaily(self):\n for ticker in self.tickers:\n tmpEpoch = getNextHigherEpoch(self.curs, ticker, whichTable=\"daily\", minEpoch=self.now05Sec)\n if tmpEpoch > 0:\n self.nowDaily = tmpEpoch\n\n def waitForRealtimeUpdate(self, thresh1=35, thresh2=42):\n \"\"\"Call this to nap until new realtime bars are available.\"\"\"\n assert(thresh2 >= thresh1)\n attempts = 0\n while True:\n cmd = \"SELECT COUNT(*) FROM bars_5_sec WHERE epoch=\" + str(self.now05Sec) + \";\"\n self.curs.execute(cmd)\n result = self.curs.fetchall()[0][0]\n attempts += 1\n if result >= thresh2:\n return\n elif result >= thresh1:\n sleep(random.uniform(0.01, 0.1))\n else:\n sleep(random.uniform(0.5, 1))\n\n def absorbRealtimeBars(self, whichTable):\n \"\"\"This should be called after waitForRealtimeUpdate(). This command\n queries for a specific epoch which all bars just added should have.\n All bars found are added/updated into the appropriate vector.\"\"\"\n assert(whichTable == \"05 sec\" or whichTable == \"15 min\" or whichTable == \"daily\")\n for ticker in self.tickers:\n tupleList = getBarWithExactEpoch(self.curs,\n ticker,\n whichTable=whichTable,\n epoch=self.minEpoch[ticker][\"05 sec\"])\n self.putTuplesInLists(ticker, tupleList, whichTable=whichTable)\n\n def loadHistorical(self, whichTable):\n \"\"\"This function can be used for initializing 5 sec vectors as well as updating.\n If vectors are too short, more items will be added to vectors. If vectors\n are long enough, items will be replaced.\"\"\"\n assert(whichTable == \"05 sec\" or whichTable == \"15 min\" or whichTable == \"daily\")\n for ticker in self.tickers:\n tupleList = getBarsWithMinimumEpoch(self.curs,\n ticker,\n whichTable=whichTable,\n epoch=self.minEpoch[ticker][\"05 sec\"])\n self.putTuplesInLists(ticker, tupleList, whichTable=whichTable)\n\n def putTuplesInLists(self, ticker, tupleList, whichTable):\n \"\"\"Takes a list of tuples from a postgresql query and stores them in lists.\"\"\"\n for barTuple in tupleList:\n epoch = barTuple[0]\n oneBar = BarData()\n setattr(oneBar, \"epoch\", epoch)\n setattr(oneBar, \"ticker\", ticker)\n setattr(oneBar, \"timeframe\", whichTable) # This is how we pass whichTable to self.replaceBar()\n oneBar.date = epoch2human(epoch)\n oneBar.open = barTuple[1]\n oneBar.close = barTuple[2]\n oneBar.high = barTuple[3]\n oneBar.low = barTuple[4]\n oneBar.volume = barTuple[5]\n if whichTable == \"05 sec\":\n if len(self.bars_05_sec[ticker]) >= self.desired_num_05_sec:\n self.replaceBar(oneBar)\n else:\n self.addBar(oneBar)\n elif whichTable == \"15 min\":\n if len(self.bars_15_min[ticker]) >= self.desired_num_15_min:\n self.replaceBar(oneBar)\n else:\n self.addBar(oneBar)\n else:\n if len(self.bars_daily[ticker]) >= self.desired_num_daily:\n self.replaceBar(oneBar)\n else:\n self.addBar(oneBar)\n self.minEpoch[ticker][whichTable] = epoch + 5\n print(ticker, \" \", whichTable, \" Length of tuple list: \", len(tupleList))\n if whichTable == \"05 sec\":\n lengthNum = len(self.closes_05_sec[ticker])\n elif whichTable == \"15 min\":\n lengthNum = len(self.closes_15_min[ticker])\n else:\n lengthNum = len(self.closes_daily[ticker])\n print(ticker, \" \", whichTable, \" Length of vector: \", lengthNum)\n\ndef testLocalBarSource():\n conn = connect2IbData()\n uut = LocalBarSource(localStorageConnection=conn)\n print(\"Using the following tickers:\")\n for ticker in uut.tickers:\n print(ticker)\n\ndef showOrderedOutput():\n \"\"\"Shows that we are using the ORDER BY epoch statement correctly.\"\"\"\n conn = connect2IbData()\n uut = LocalBarSource(localStorageConnection=conn, addDummyData=True)\n for bar in uut.bars_05_sec[\"__XYZ\"]:\n print(\"%d %03.2f\" % (bar.epoch, bar.open))\n\ndef testGetCloses():\n conn = connect2IbData()\n uut = LocalBarSource(localStorageConnection=conn, addDummyData=True)\n print(\"Here are a bunch of prices:\")\n #getCloses(self, ticker, timeframe, numel):\n for m in uut.getCloses(\"__XYZ\", \"05 sec\", 30):\n print(m)\n\ndef testGetBars():\n conn = connect2IbData()\n uut = LocalBarSource(localStorageConnection=conn, addDummyData=True)\n print(\"Here are a bunch of prices:\")\n for m in uut.getBars(\"__XYZ\", \"05 sec\", 30):\n print(m.open, \" \", m.low)\n\ndef testWaitForRealtimeUpdate():\n conn = connect2IbData()\n uut = LocalBarSource(localStorageConnection=conn, addDummyData=True)\n uut.now05Sec = 100130\n uut.waitForRealtimeUpdate(1, 1)\n\nif __name__==\"__main__\":\n testWaitForRealtimeUpdate()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jstr045329/public_IB_data_ac","sub_path":"LocalBarSource.py","file_name":"LocalBarSource.py","file_ext":"py","file_size_in_byte":18838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6320659623","text":"import cv2\nimport math\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread('R.png', cv2.IMREAD_COLOR)\ncv2.imshow(\"Robik\", img)\n#chuyển rgb sang hsv\ndef rgb_to_hsv(r, g, b):\n r, g, b = r / 255.0, g / 255.0, b / 255.0\n cmax, cmin = max(r, g, b), min(r, g, b)\n delta = cmax - cmin\n if delta == 0:\n h = 0\n elif cmax == r:\n h = ((g - b) / delta) % 6\n elif cmax == g:\n h = (b - r) / delta + 2\n else:\n h = (r - g) / delta + 4\n h = round(h * 60)\n if h < 0:\n h += 360\n v = cmax * 100\n if cmax == 0:\n s = 0\n else:\n s = delta / cmax * 100\n return h, s, v\n\nh, w = img.shape[:2]\nhsv = np.zeros((h, w, 3), dtype=np.uint8)\nfor i in range(h):\n for j in range(w):\n r, g, b = img[i, j]\n h, s, v = rgb_to_hsv(r, g, b)\n hsv[i,j]= [h,s,v]\n\ncv2.imshow(\"HSV\", hsv)\n\n#màu rgb ở giữa ảnh\ndef midRGB(img):\n h, w,_ = img.shape\n mid_h, mid_w = h//2, w//2\n rgb = img[mid_h, mid_w]\n print(f\"RGB: {rgb}\")\nmidRGB(img)\n\n#lấy ra ba kênh màu rgb của ảnh\ndef get_rgb_channels(image):\n height, width = image.shape[:2]\n r_channel = np.zeros((height, width), dtype=np.uint8)\n g_channel = np.zeros((height, width), dtype=np.uint8)\n b_channel = np.zeros((height, width), dtype=np.uint8)\n \n for i in range(height):\n for j in range(width):\n r_channel[i][j] = image[i][j][0]\n g_channel[i][j] = image[i][j][1]\n b_channel[i][j] = image[i][j][2]\n \n return r_channel, g_channel, b_channel\n\nimage_path = \"R.png\"\nimage = cv2.imread(image_path)\n\nr_channel, g_channel, b_channel = get_rgb_channels(image)\n\ncv2.imshow(\"Red Channel\", r_channel)\ncv2.imshow(\"Green Channel\", g_channel)\ncv2.imshow(\"Blue Channel\", b_channel)\n\n#lấy ra ba kênh màu HSV\n\ndef get_hsv_channels(image):\n height, width = image.shape[:2]\n h_channel = np.zeros((height, width), dtype=np.uint8)\n s_channel = np.zeros((height, width), dtype=np.uint8)\n v_channel = np.zeros((height, width), dtype=np.uint8)\n \n for i in range(height):\n for j in range(width):\n r = image[i][j][0]\n g = image[i][j][1]\n b = image[i][j][2]\n \n r_ = r / 255\n g_ = g / 255\n b_ = b / 255\n \n cmax = max(r_, g_, b_)\n cmin = min(r_, g_, b_)\n delta = cmax - cmin\n \n if delta == 0:\n h = 0\n elif cmax == r_:\n h = ((g_ - b_) / delta) % 6\n elif cmax == g_:\n h = (b_ - r_) / delta + 2\n else:\n h = (r_ - g_) / delta + 4\n \n h = int(h * 60)\n \n if h < 0:\n h += 360\n \n v = int(cmax * 255)\n \n if cmax == 0:\n s = 0\n else:\n s = int(delta / cmax * 255)\n \n h_channel[i][j] = h\n s_channel[i][j] = s\n v_channel[i][j] = v\n \n return h_channel, s_channel, v_channel\n\nimage_path = \"R.png\"\nimage = cv2.imread(image_path)\n\nh_channel, s_channel, v_channel = get_hsv_channels(image)\n\ncv2.imshow(\"Hue Channel\", h_channel)\ncv2.imshow(\"Saturation Channel\", s_channel)\ncv2.imshow(\"Value Channel\", v_channel)\n\n#lấy màu theo tọa độ\n\n# def get_pixel_color(image, x, y):\n# width = image.shape[1]\n# b = image[(y * width + x) * 3]\n# g = image[(y * width + x) * 3 + 1]\n# r = image[(y * width + x) * 3 + 2]\n \n# return r, g, b\n\n# image_path = \"R.png\"\n# image = cv2.imread(image_path)\n# x = 10\n# y = 20\n# r, g, b = get_pixel_color(image, x, y)\n# print(f\"Color at ({x}, {y}): ({r}, {g}, {b})\")\n\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n\n# def f():\n# imgPIL = Image.open(\"R.png\")\n# #tạo ảnh cùng kích thước\n# Hue = Image.new(imgPIL.mode, imgPIL.size)\n# Saturation = Image.new(imgPIL.mode,imgPIL.size)\n# Value = Image.new(imgPIL.mode,imgPIL.size)\n# HSVimg = Image.new(imgPIL.mode,imgPIL.size)\n# #lấy chiều dài\n# w = imgPIL.size[0]\n# h = imgPIL.size[1]\n# print(w, h)\n# for i in range(w):\n# for j in range(h):\n# #lấy ddiemr ảnh tại điểm (i, j)\n# R,G,B = imgPIL[i][j]\n# MAX = max(R,G,B)\n# MIN = min(R,G,B)\n# SUM = (R+G+B)\n# t1 = ((R-G)+(R-B))/2\n# t2 = math.sqrt((R-G)*(R-B)+(R-B*(G-B)))\n# #return acos radian\n# theta = math.acos(t1/t2)\n# #tính HUE\n# H = 0\n# if B <= G:\n# H = theta\n# else:\n# H = 2*math.pi*theta\n# H = np.uint8(H*100/math.pi)\n# #công thức Staturation\n# S = 1+3*MIN/SUM\n# S = np.uint8(S*255)\n# #Công thức value\n# V = np.uint8(MAX)\n# Hue.putpixel((i, j), (H, H, H))\n# Saturation.putpixel((i, j), (S, S, S))\n# Value.putpixel((i, j), (V, V, V))\n# HSVimg.putpixel((i, j), (V, S, H))\n\n# #chuyển từ plt sang cv2\n# AnhHue = np.array(Hue)\n# AnhSaturation = np.array(Saturation)\n# AnhValue = np.array(Value)\n# AnhHSVimg = np.array(HSVimg)\n# cv2.imshow(\"Hue\", AnhHue)\n# cv2.imshow(\"Satu\",AnhSaturation)\n# cv2.imshow(\"Value\",AnhValue)\n# cv2.imshow(\"HSV\", AnhHSVimg)\n","repo_name":"hieu123ps/image_processing","sub_path":"Code demo/Week7/convertRGB-HSV.py","file_name":"convertRGB-HSV.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44496203501","text":"\"\"\"This module contains the main framework for representing music in Gimbop.\"\"\"\nimport csv\nimport operator\nimport subprocess\n\nfrom src.model_api import GimbopAPI\nfrom src.representation import MusicAbstractor\nimport src.utils as utils\n\n\nclass Key:\n \"\"\"Class for representing a key signature.\"\"\"\n\n def __init__(self, key_string):\n self.key_string = key_string\n self.num_accidentals = self.get_num_accidentals(key_string)\n\n def __repr__(self):\n return str(self.num_accidentals)\n\n def get_num_accidentals(self, key_string):\n key_dict = {\n \"C\": 0,\n \"G\": 1,\n \"D\": 2,\n \"A\": 3,\n \"E\": 4,\n \"B\": 5,\n \"F#\": 6,\n \"C#\": 7,\n \"F\": -1,\n \"Bb\": -2,\n \"Eb\": -3,\n \"Ab\": -4,\n \"Db\": -5,\n \"Gb\": -6,\n \"Cb\": -7,\n # Also include enharmonic equivalents\n \"B#\": 0,\n \"E#\": -1,\n \"A#\": -2,\n \"D#\": -3,\n \"G#\": -4,\n \"Fb\": 4,\n }\n return key_dict[key_string]\n\n\nclass SongMetadata:\n \"\"\"Class to store fixed metadata about every generated song.\"\"\"\n\n def __init__(self):\n self.midi_type = 1\n self.num_tracks = 1\n self.quarter_note = 480\n self.ts_num = 4\n self.ts_denom = 2 # negative power of two, e.g. eighth note = 3\n self.major = \"major\"\n\n\n# Dictionary of MIDI-standard instrument types\ninstrument_types = {\n \"grand_piano\": 0,\n \"bright_acoustic_piano\": 1,\n \"electric_piano_1\": 4,\n \"electric_piano_2\": 5,\n \"acoustic_guitar\": 24,\n \"electric_guitar_jazz\": 26,\n}\n\n\nclass SongWriter:\n \"\"\"Class to generate and write a song to a MIDI file.\"\"\"\n\n def __init__(\n self,\n key=Key(\"C\"),\n bpm=120,\n ):\n # Initialize the metadata that we (probably) won't change\n self.meta = SongMetadata()\n\n # Initialize the key signature\n self.key_signature = key\n self.num_accidentals = key.num_accidentals\n\n # 500000 = 120 quarter notes (beats) per minute; also 60,000,000 / bpm\n self.bpm = bpm\n self.tempo = 60000000 / self.bpm\n\n # Initialize the instruments\n self.instruments = [\n instrument_types[\"grand_piano\"],\n instrument_types[\"grand_piano\"],\n ]\n\n # Initialize the actual contents of the song\n self.lines = []\n self.total_time = 100000 # TODO: Hardcoded for now, change this\n\n def write(self, filename, sample_song=False):\n \"\"\"Write the song to a MIDI file.\"\"\"\n self._write_header()\n self._initialize_instruments()\n self._write_content() if not sample_song else self.write_sample_song()\n self._finish(filename)\n\n def _write_header(self):\n self.lines.append(\n [\n 0,\n 0,\n \"Header\",\n self.meta.midi_type,\n self.meta.num_tracks,\n self.meta.quarter_note,\n ]\n )\n self.lines.append([1, 0, \"Start_track\"])\n self.lines.append(\n [1, 0, \"Time_signature\", self.meta.ts_num, self.meta.ts_denom, 24, 8]\n )\n self.lines.append(\n [1, 0, \"Key_signature\", self.num_accidentals, self.meta.major]\n )\n self.lines.append([1, 0, \"Tempo\", self.tempo])\n\n def _initialize_instruments(self):\n \"\"\"Initialize the instruments for the song in csvmidi format.\"\"\"\n for i in range(len(self.instruments)):\n self.lines.append([1, 0, \"Control_c\", i, 121, 0])\n self.lines.append([1, 0, \"Program_c\", i, self.instruments[i]])\n self.lines.append([1, 0, \"MIDI_port\", i])\n\n def _write_content(self):\n generator = GimbopAPI()\n model = generator.retrieve_model()\n\n self.lines = utils.play_note(self.lines, 60, 0, 480)\n self.lines = utils.play_note(self.lines, 62, 480, 480)\n self.lines = utils.play_note(self.lines, 64, 960, 960)\n\n def write_sample_song(self):\n \"\"\"Write a sample song to a MIDI file.\"\"\"\n # All the notes are quarter notes\n dur = 480\n\n # Read sample_notes.py to get and write the notes\n from sample_notes import notes\n\n for i in range(len(notes)):\n self.lines = utils.play_note(self.lines, notes[i], i * dur, dur)\n\n # Set the total time of the song\n self.total_time = len(notes) * dur\n\n def _finish(self, filename):\n self.lines = sorted(self.lines, key=operator.itemgetter(1))\n # total_time = self.LH_time if self.LH_time > self.RH_time else self.RH_time\n # total_time = self.misc_time1 if self.misc_time1 > total_time else total_time\n # total_time = self.misc_time2 if self.misc_time2 > total_time else total_time\n total_time = self.total_time # TODO: Hardcoded for now, change this\n\n self.lines.append([1, total_time, \"End_track\"])\n self.lines.append([0, 0, \"End_of_file\"])\n\n with open(filename + \".csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(self.lines)\n\n f.close()\n subprocess.run([\"./csvmidi\", filename + \".csv\", filename + \".mid\"])\n","repo_name":"TrueshotBarrage/gimbop","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27756562146","text":"import os\nimport json\nfrom typing import Dict, List\nimport torch\nfrom transformers import AutoTokenizer, AutoModel, AutoConfig\nfrom tqdm import tqdm\nimport binascii\nimport scapy.all as scapy\n\nclass AlgSolution:\n\n def __init__(self):\n\n model_name = \"/adabench_mnt/llm/chatglm2-6b\"\n ptuning_path = \"output/ptuning-model/checkpoint-397\"\n\n self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)\n if ptuning_path is not None:\n config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, pre_seq_len=64)\n self.model = AutoModel.from_pretrained(model_name, config=config, trust_remote_code=True)\n prefix_state_dict = torch.load(\n os.path.join(ptuning_path, \"pytorch_model.bin\"), map_location='cpu')\n new_prefix_state_dict = {}\n for k, v in prefix_state_dict.items():\n if k.startswith(\"transformer.prefix_encoder.\"):\n new_prefix_state_dict[k[len(\"transformer.prefix_encoder.\"):]] = v\n self.model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)\n self.model = self.model.half().cuda()\n self.model.transformer.prefix_encoder.float()\n else:\n self.model = AutoModel.from_pretrained(model_name, trust_remote_code=True).half().cuda()\n self.model.eval()\n\n self.MAX_PACKET_NUMBER = 10\n self.MAX_PACKET_LENGTH_IN_FLOW = 256\n self.HEX_PACKET_START_INDEX = 0\n\n def build_pcap_data(self, pcap_file):\n packets = scapy.rdpcap(pcap_file)\n hex_stream = []\n for packet in packets[:self.MAX_PACKET_NUMBER]:\n packet_data = packet.copy()\n data = (binascii.hexlify(bytes(packet_data)))\n packet_string = data.decode()\n hex_stream.append(packet_string[self.HEX_PACKET_START_INDEX:min(len(packet_string), self.MAX_PACKET_LENGTH_IN_FLOW)])\n flow_data = \"\" + \"\".join(hex_stream)\n return flow_data\n\n def pre_process(self, input_data: Dict, dataset_root) -> str:\n prompt = input_data['instruction'] + \\\n self.build_pcap_data(dataset_root + '/test/' + input_data['path'])\n return prompt\n\n def generate(self, prompt: str) -> str:\n response, _ = self.model.chat(self.tokenizer, prompt, history=[])\n return response\n\n def post_process(self, response: str) -> str:\n return response\n\n def predicts(self, input_data: List[Dict], **kwargs) -> str:\n dataset_root = kwargs[\"__dataset_root_path\"]\n results = []\n for item in input_data:\n if isinstance(item['id'], str) and item['id'].startswith('subject'):\n result = self.generate(item['input'])\n else:\n prompt = self.pre_process(item, dataset_root)\n response = self.generate(prompt)\n result = self.post_process(response)\n results.append({\n 'id': item['id'],\n 'output': result,\n }) \n return results\n","repo_name":"uukuguy/speechless","sub_path":"examples/ATEC2023/task4/atec_project/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"495550634","text":"import pysam\nfrom interval import Interval\nimport time\n\n#samfile = pysam.AlignmentFile('../SRR8236758.sorted.rmdup.mapq20.bam','rb')\n#f_bkline = open(sys.argv[1],'r')\n#interval=int(sys.argv[2])\n#bklines = f_bkline.readlines()\n\n#s_chr = sys.argv[1]\n#give_site = int(sys.argv[2])\n#interval = int(sys.argv[3])\nclass find_amp:\n\tdef __init__(self,bam,discbk,interval,cov):\n\t\tself.cov = cov\n\t\tprint(cov)\n\t\tself.discbk = discbk\n\t\tself.interval = interval\n\t\tself.bam = bam\n\t\tself.samfile = pysam.AlignmentFile(bam,'rb')\n\t\tself.chrom_dir = {}\n\t\tfor ref in self.samfile.references:\n\t\t\tself.chrom_dir[ref] = []\n\t\t\tself.chrom_dir[ref].append(Interval(0, 0))\n\n#Calculates the sequencing depth of the specified length region to the left of the locus, the default length is interval\n\tdef interval_cov(self,samfile_name,chr_n,site,interval_size=None):\n\t\treads=0\n\t\tinterval_size=self.interval\n\t\tllimit = site-interval_size\n\t\tif llimit<=0:\n\t\t\treturn 0\n\t#\tprint(chr_n,samfile_name.get_reference_length(chr_n),llimit,site)\n\t\tfor i,k in enumerate(samfile_name.count_coverage(chr_n,llimit,site)):\n\t\t\treads=reads+sum(k)\n\t\treturn(reads/interval_size)\n\n\t#As above, calculate the sequencing depth of the right-hand region\n\tdef interval_cov1(self,samfile_name,chr_n,site,interval_size=None):\n\t\treads=0\n\t\tinterval_size = self.interval\n\n\t\trsite = site+interval_size\n\t\t#print(chr_n,samfile_name.get_reference_length(chr_n),site)\n\t\tif rsite > samfile_name.get_reference_length(chr_n):\n\t\t\treturn 0\n\t#\tprint(chr_n, samfile_name.get_reference_length(chr_n),site,end_site)\n\t\tfor i,k in enumerate(samfile_name.count_coverage(chr_n,site,rsite)):\n\t\t\treads=reads+sum(k)\n\t\treturn(reads/interval_size)\n\n\t#Finding contiguous regions with sequencing depth\n\tdef find_amplicon(self,sam,start_c,start_s,cov_t=5):\n\t\tinterval=self.interval\n\t\tcov_t = self.cov\n\t\tl_site = start_s\n\t\tif l_site >= sam.get_reference_length(start_c):\n\t\t\tl_site = sam.get_reference_length(start_c) - 1\n\n\t\t\t#Continuous area on the left\n\t\twhile self.interval_cov(sam,start_c,l_site)>cov_t or self.interval_cov(sam,start_c,l_site,interval_size=10*interval)>cov_t:\n\t\t\tl_site=l_site-interval\n\t\t\tif l_site <= 0:\n\t\t\t\tbreak\n\t\tr_site = start_s\n\t\t#Continuous area on the right\n\t\twhile self.interval_cov1(sam,start_c,r_site)>cov_t or self.interval_cov1(sam,start_c,r_site,interval_size=10*interval)>cov_t:\n\t\t\tr_site = r_site+interval\n\t\t\tif r_site >= sam.get_reference_length(start_c):\n\t\t\t\tbreak\n\t#Return to left endpoint and right endpoint\n\t\treturn(l_site,r_site)\n\n\t#Determine if the breakpoint is in the detected amplification\n\tdef isinchrom_dir(self,chr,pos):\n\t\tchrom_dir = self.chrom_dir\n\t\tfor line in chrom_dir[chr]:\n\t\t\tif pos in line:\n\t#If the amplification region is known, the Boolean value True and the amplification region information are returned.\n\t\t\t\treturn (True,line)\n\n\t\treturn (False,line)\n\n\t#Main functions, traversing breakpoint files for processing\n\tdef process_coverage(self,bam=None,support_reads=5):\n\t\tbk_file = self.discbk\n\t\tchrom_dir = self.chrom_dir\n\t\tbam = self.bam\n\t\tsamfile = self.samfile\n\t\ts_t = time.time()\n\t\tf_bkline = open(bk_file,'r')\n\t\tbklines = f_bkline.readlines()\n\t\t#Create a results file\n\t\toutfile_n = bk_file+'.amplicon'\n\t\tf_out = open(outfile_n,'w')\n\t\t#count3 counts how many rows have been read\n\t\tcount3 = 0\n\t\tfor line in bklines:\n\t\t\tcount3 = count3 + 1\n\t\t\tif count3%1000 == 0:\n\t\t\t\tprint(count3)\n\t\t\t\tprint(time.time()-s_t)\n\t\t\t#Removal of breakpoint pairs that match to uncertain chromosomes\n\t\t\tif 'chrUn' in line or 'random' in line:\n\t\t\t\tcontinue\n\n\t\t\tbkline_list = line.split('\\t')\n\n\t\t\t#Less than 5 breakpoint pairs with reads support do not\n\t\t\tif int(bkline_list[4]) <= support_reads:\n\t\t\t\tcontinue\n\n\t\t\t#Read the chromosomes located at both ends of the breakpoint pair\n\t\t\ts_chr = bkline_list[0]\n\t\t\te_chr = bkline_list[2]\n\n\t\t\t#Read the interval between the ends of the breakpoint pair, temporarily using the midpoint of the interval to indicate the endpoint position\n\t\t\tstart_site = int((int(bkline_list[1].strip('[').strip(']').split('.')[-1])+int(bkline_list[1].strip('[').strip(']').split('.')[0])+1)/2)\n\t\t\tend_site = int((int(bkline_list[3].strip('[').strip(']').split('.')[-1])+int(bkline_list[3].strip('[').strip(']').split('.')[0])+1)/2)\n\n\t\t\t#Do not have endpoints with coordinates less than 0\n\t\t\tif start_site <= 0 or end_site <= 0:\n\t\t\t\tcontinue\n\n\t\t\t#Call the isinchrom_dir function to determine if the left end is in the collected amplification region\n\t\t\tif self.isinchrom_dir(s_chr,start_site)[0]:\n\t\t\t\t#If in a known amplification region, take out the region location\n\t\t\t\tfirst_l,first_r = self.isinchrom_dir(s_chr,start_site)[1].lower_bound,self.isinchrom_dir(s_chr,start_site)[1].upper_bound\n\n\t\t\telse:\n\t\t\t\t#If not, call find_amplicon to search for the augmented region starting with the endpoint and add this region to the augmented region dictionary\n\n\t\t\t\tfirst_l,first_r = self.find_amplicon(samfile,s_chr,start_site)\n\t\t\t\tchrom_dir[s_chr].append(Interval(first_r,first_l))\n\n\t\t\t#As above, determine the right endpoint\n\t\t\tif self.isinchrom_dir(e_chr,end_site)[0]:\n\t\t\t\tsecond_l,second_r = self.isinchrom_dir(e_chr,end_site)[1].lower_bound,self.isinchrom_dir(e_chr,end_site)[1].upper_bound\n\t\t\telse:\n\t\t\t\tsecond_l,second_r = self.find_amplicon(samfile,e_chr,end_site)\n\t\t\t\tchrom_dir[e_chr].append(Interval(second_l,second_r))\n\n\t\t\t#Left and right endpoints are only retained if they lie in an interval greater than a certain size\n\t\t\tif (first_r - first_l) > 2000 and (second_r - second_l)>2000:\n\t\t\t\tout_line = 'first_bk_interval'+'\\t'+str(first_l)+'\\t'+str(first_r)+'\\t'+'first_len'+'\\t'+str(first_r - first_l)+'\\t''second_bk_interval'+'\\t'+str(second_l)+'\\t'+str(second_r)+'\\t'+'second_len'+'\\t'+str(second_r - second_l)+'\\t'+line\n\t\t\t\tf_out.write(out_line)\n\n\n\t\treturn(outfile_n)\n\n#fina_amp('COLO320HSR_rep2_atac_possorted_bam.bam',interval=1000).process_coverage()\n##print(find_amplicon(samfile,s_chr,start_site))\n\t#\tprint(find_amplicon(samfile,s_chr,start_site)[1]-find_amplicon(samfile,s_chr,start_site)[0])\n\t#\tprint(find_amplicon(samfile,e_chr,end_site))\n\t#\tprint(find_amplicon(samfile,e_chr,end_site)[1]-find_amplicon(samfile,e_chr,end_site)[0])\n\n\n","repo_name":"chsmiss/ATAC-amp","sub_path":"calculate_cnv.py","file_name":"calculate_cnv.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7605567772","text":"from kqcircuits.chips.lithography_test import LithographyTest\nfrom kqcircuits.chips.chip import Chip\nfrom kqcircuits.pya_resolver import pya\nfrom kqcircuits.util.parameters import add_parameters_from\n\n@add_parameters_from(Chip, frames_enabled=[0, 1])\n@add_parameters_from(LithographyTest, 'stripe_test')\nclass LithographyTestTwoface(Chip):\n \"\"\"Optical lithography test chip in a flip chip architecture.\n\n Consists of StripesTest cells with different parameters.\n \"\"\"\n create_pattern = LithographyTest.create_pattern\n\n def build(self):\n cell_horizontal_1, cell_vertical_1, cell_diagonal_1 = self.create_pattern(num_stripes=20, length=100,\n min_width=1,\n max_width=15, step=1, spacing=1,\n face_id=self.face_ids[0])\n cell_horizontal_2, cell_vertical_2, cell_diagonal_2 = self.create_pattern(num_stripes=20, length=100,\n min_width=1,\n max_width=15, step=1, spacing=5,\n face_id=self.face_ids[0])\n cell_horizontal_3, cell_vertical_3, cell_diagonal_3 = self.create_pattern(num_stripes=20, length=100,\n min_width=1,\n max_width=15, step=1, spacing=1,\n face_id=self.face_ids[1])\n cell_horizontal_4, cell_vertical_4, cell_diagonal_4 = self.create_pattern(num_stripes=20, length=100,\n min_width=1,\n max_width=15, step=1, spacing=5,\n face_id=self.face_ids[1])\n # bottom patterns\n self.insert_cell(cell_horizontal_1, pya.DCplxTrans(1, 0, False, 2000, 6500))\n self.insert_cell(cell_horizontal_1, pya.DCplxTrans(1, 0, False, 3000, 6500))\n self.insert_cell(cell_horizontal_2, pya.DCplxTrans(1, 0, False, 4000, 6500))\n self.insert_cell(cell_horizontal_2, pya.DCplxTrans(1, 0, False, 6000, 6500))\n self.insert_cell(cell_vertical_1, pya.DCplxTrans(1, 0, False, 1500, 5700))\n self.insert_cell(cell_vertical_1, pya.DCplxTrans(1, 0, False, 5500, 5700))\n self.insert_cell(cell_vertical_2, pya.DCplxTrans(1, 0, False, 1500, 3800))\n self.insert_cell(cell_vertical_2, pya.DCplxTrans(1, 0, False, 5500, 3800))\n self.insert_cell(cell_diagonal_1, pya.DCplxTrans(1, 0, False, 400, 1500))\n self.insert_cell(cell_diagonal_1, pya.DCplxTrans(1, 0, False, 1500, 1500))\n self.insert_cell(cell_diagonal_2, pya.DCplxTrans(1, 0, False, 3100, 1500))\n self.insert_cell(cell_diagonal_2, pya.DCplxTrans(1, 0, False, 6100, 1500))\n # top patterns\n self.insert_cell(cell_horizontal_3, pya.DCplxTrans(1, 0, False, 5900, 4000) * pya.DCplxTrans.M90)\n self.insert_cell(cell_horizontal_4, pya.DCplxTrans(1, 0, False, 8000, 4000) * pya.DCplxTrans.M90)\n self.insert_cell(cell_vertical_3, pya.DCplxTrans(1, 0, False, 5000, 6200) * pya.DCplxTrans.M90)\n self.insert_cell(cell_vertical_4, pya.DCplxTrans(1, 0, False, 5000, 4000) * pya.DCplxTrans.M90)\n self.insert_cell(cell_diagonal_3, pya.DCplxTrans(1, 0, False, 7500, 1800) * pya.DCplxTrans.M90)\n self.insert_cell(cell_diagonal_4, pya.DCplxTrans(1, 0, False, 6200, 1800) * pya.DCplxTrans.M90)\n","repo_name":"iqm-finland/KQCircuits","sub_path":"klayout_package/python/kqcircuits/chips/lithography_test_twoface.py","file_name":"lithography_test_twoface.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"} +{"seq_id":"42176592357","text":"import pygame\r\nimport json\r\n\r\n\"\"\"\r\nINTERFACES:\r\n\r\nTo import this file\r\nuse: from MapSection import MapSection\r\n\r\n@ methods (of MapSection):\r\n self.move(move_sequence): (list) arbitrary length with, value = \"f\", \"l\", \"r\"\r\n self.display_update(): single loop function\r\n self.isWin(): return True if usr wins, False otherwise\r\n self.isLose(): return True only if usr loses\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n@attributes:\r\n self.size: list of two int, (height, width)\r\n self.ch_position: list of two int, e.g. [3, 2]\r\n self.ch_direction: string of 1 char, l -> left, r -> right, u -> up, d -> down\r\n self.env: 2d-list of int, int representation:\r\n 0 -> None\r\n 1 -> animal\r\n 2 -> walls\r\n 3 -> wolves\r\n 4 -> rivers\r\n self.command_list: list of string, values:\r\n \"forward\", \"turn left\", \"turn right\"\r\n\"\"\"\r\nclass Map:\r\n \r\n def __init__(self, filename):\r\n self.env = []\r\n self.readfile(filename)\r\n \r\n \r\n def readfile(self, filename):\r\n with open(filename, 'r') as file:\r\n data = json.load(file)\r\n self.size = tuple(data[\"size\"])\r\n self.ch_position = data[\"grace_position\"]\r\n self.ch_direction = data[\"grace_direction\"]\r\n animal = data[\"animal\"]\r\n walls = data[\"walls\"]\r\n rivers = data[\"rivers\"]\r\n wolves = data[\"wolves\"]\r\n self.command_list = data[\"command_list\"]\r\n \r\n # init all entry in self.env to 0\r\n for i in range(self.size[0]):\r\n temp = []\r\n for j in range(self.size[1]):\r\n temp.append(0)\r\n self.env.append(temp)\r\n \r\n self.env[animal[0]][animal[1]] = 1\r\n \r\n for wall in walls:\r\n self.env[wall[0]][wall[1]] = 2\r\n \r\n for wolf in wolves:\r\n self.env[wolf[0]][wolf[1]] = 3\r\n\r\n for river in rivers:\r\n self.env[river[0]][river[1]] = 4\r\n\r\n\r\n def getSize(self):\r\n return self.size\r\n\r\n def getEnv(self):\r\n return self.env\r\n\r\n\r\n\"\"\"\r\n@ methods:\r\n self.move(move_sequence): (list) arbitrary length with, value = \"f\", \"l\", \"r\"\r\n self.display_update(): single loop function\r\n\"\"\"\r\nclass MapSection:\r\n WINDOW_SIZE = (600, 600)\r\n SPEED = 5\r\n WAIT_TIME = 300\r\n CELL_SIZE = 75\r\n\r\n FOLDER_NAME = \"image\"\r\n\r\n \"\"\"\r\n display: (pygame.Surface) where to draw\r\n filename: (string) filename of map\r\n \"\"\"\r\n def __init__(self, display, filename):\r\n self.map = Map(filename)\r\n self.load_imgs()\r\n self.size = self.map.getSize()\r\n self.env = self.map.getEnv()\r\n self.ch_pos = self.map.ch_position[:]\r\n self.ch_direction = self.map.ch_direction\r\n self.draw_prog = 0\r\n self.move_sequence = []\r\n self.win_bool = False\r\n self.loss_bool = False\r\n\r\n height = self.size[0]\r\n width = self.size[1]\r\n self.display = pygame.Surface((height*75, width*75))\r\n\r\n self.master = display\r\n self.init()\r\n \r\n\r\n def load_imgs(self):\r\n self.stone_img = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/stone.png\"), (75,75))\r\n self.grace_img = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/grace.png\"), (75,75))\r\n self.animal_img = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/egg.png\"), (75,75))\r\n self.bg_img = pygame.image.load(MapSection.FOLDER_NAME+\"/grid_bg.png\")\r\n self.arrow_u = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/arrow_u.png\"), (75,25))\r\n self.arrow_d = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/arrow_d.png\"), (75,25))\r\n self.arrow_l = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/arrow_l.png\"), (25,75))\r\n self.arrow_r = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/arrow_r.png\"), (25,75))\r\n self.sad_face_tl = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/sad_face_tl.png\"), (75,75))\r\n self.sad_face_tr = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/sad_face_tr.png\"), (75,75))\r\n self.sad_face_bl = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/sad_face_bl.png\"), (75,75))\r\n self.sad_face_br = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/sad_face_br.png\"), (75,75))\r\n self.win_msg_box = pygame.image.load(MapSection.FOLDER_NAME+\"/win_msg.png\")\r\n self.win_cont_hover = pygame.image.load(MapSection.FOLDER_NAME+\"/win_msg_cont_hover.png\")\r\n self.huge_egg_img = pygame.transform.scale(\r\n pygame.image.load(MapSection.FOLDER_NAME+\"/animal.png\"), (150,150))\r\n\r\n\r\n def draw_map(self):\r\n # draw map\r\n self.display.blit(self.bg_img, (0, 0))\r\n for r in range(self.size[0]):\r\n for c in range(self.size[1]):\r\n pos = (r*75, c*75)\r\n if (self.env[r][c] == 1):\r\n self.display.blit(self.animal_img, pos)\r\n if (self.env[r][c] == 2):\r\n self.display.blit(self.stone_img, pos)\r\n\r\n\r\n def basic_update(self):\r\n self.draw_map()\r\n\r\n # draw Grace\r\n self.display.blit(self.grace_img, (self.ch_pos[0]*75, self.ch_pos[1]*75))\r\n\r\n self.draw_on_master()\r\n\r\n\r\n def init(self):\r\n self.ch_direction = self.map.ch_direction\r\n self.ch_pos = self.map.ch_position[:]\r\n self.loss_bool = False\r\n self.draw()\r\n\r\n\r\n def isWin(self):\r\n return self.win_bool\r\n\r\n\r\n def isLose(self):\r\n return self.loss_bool\r\n\r\n\r\n def reach_egg(self):\r\n self.display.blit(self.win_msg_box, (100, 150))\r\n self.display.blit(self.huge_egg_img, (227, 245))\r\n\r\n mouse_pos = pygame.mouse.get_pos()\r\n mouse_click = pygame.mouse.get_pressed()[0]\r\n mouse_hb = pygame.Rect(mouse_pos[0], mouse_pos[1], 5, 5)\r\n button_hb = pygame.Rect(220, 402, 192, 46)\r\n\r\n if (self.win_bool):\r\n self.draw_prog = 0\r\n self.move_sequence.pop()\r\n return\r\n \r\n if mouse_hb.colliderect(button_hb): # check if mouseover\r\n self.display.blit(self.win_cont_hover, (220, 402)) \r\n if mouse_click == 1: # if user clicks on button, go to menu\r\n self.win_bool = True\r\n\r\n self.draw_on_master()\r\n\r\n\r\n def collide(self):\r\n if (self.draw_prog > 600):\r\n self.draw_prog = 0\r\n self.move_sequence.pop()\r\n self.init()\r\n return\r\n\r\n self.draw_map()\r\n pos = [self.ch_pos[0]*75, self.ch_pos[1]*75]\r\n # blinking Grace\r\n if (self.draw_prog < 200):\r\n if (self.draw_prog % 80 < 40):\r\n # draw Grace\r\n self.display.blit(self.grace_img, pos)\r\n \r\n # sad face\r\n else:\r\n # draw Grace\r\n self.display.blit(self.grace_img, pos)\r\n if (self.ch_pos[0] == self.size[0]-1):\r\n if (self.ch_pos[1] == 0): # top right corner\r\n self.display.blit(self.sad_face_tr, (pos[0]-60, pos[1]+60))\r\n else: # right side\r\n self.display.blit(self.sad_face_br, (pos[0]-60, pos[1]+60))\r\n elif (self.ch_pos[1] == 0): # top side, without top right corner\r\n self.display.blit(self.sad_face_tl, (pos[0]+60, pos[1]+60))\r\n else:\r\n self.display.blit(self.sad_face_bl, (pos[0]+60, pos[1]-60))\r\n \r\n self.draw_prog += MapSection.SPEED\r\n self.draw_on_master()\r\n\r\n\r\n def display_update(self):\r\n if (len(self.move_sequence) == 0):\r\n self.draw()\r\n return\r\n \r\n if (self.move_sequence[0] == \"win\"):\r\n self.reach_egg()\r\n return\r\n\r\n if (self.move_sequence[0] == \"lose\"):\r\n self.collide()\r\n return\r\n\r\n if (self.draw_prog > MapSection.WAIT_TIME):\r\n self.draw_prog = 0\r\n self.ch_update(self.move_sequence.pop(0))\r\n return\r\n \r\n if (self.draw_prog < 100):\r\n self.draw()\r\n \r\n self.draw_prog += MapSection.SPEED\r\n return\r\n\r\n\r\n def draw_on_master(self):\r\n self.master.blit(pygame.transform.scale(self.display, MapSection.WINDOW_SIZE),(0,0))\r\n\r\n\r\n def draw(self):\r\n self.draw_map()\r\n csize = MapSection.CELL_SIZE\r\n pos = [self.ch_pos[0]*csize, self.ch_pos[1]*csize]\r\n\r\n if (len(self.move_sequence) != 0):\r\n if (self.move_sequence[0] == \"f\"):\r\n fwd = self.draw_prog * 0.01 * MapSection.CELL_SIZE\r\n if (self.ch_direction == \"l\"):\r\n pos[0] -= fwd\r\n elif (self.ch_direction == \"r\"):\r\n pos[0] += fwd\r\n elif (self.ch_direction == \"u\"):\r\n pos[1] -= fwd\r\n else: # self.ch_direction == \"d\"\r\n pos[1] += fwd\r\n\r\n if (self.ch_direction == \"l\"):\r\n self.display.blit(self.arrow_l, (pos[0]-25, pos[1]))\r\n elif (self.ch_direction == \"r\"):\r\n self.display.blit(self.arrow_r, (pos[0]+75, pos[1]))\r\n elif (self.ch_direction == \"u\"):\r\n self.display.blit(self.arrow_u, (pos[0], pos[1]-20))\r\n else: #self.ch_direction == \"d\"\r\n self.display.blit(self.arrow_d, (pos[0], pos[1]+75))\r\n\r\n self.display.blit(self.grace_img, tuple(pos))\r\n self.draw_on_master()\r\n\r\n\r\n def move(self, move_list):\r\n self.move_sequence = move_list\r\n\r\n \r\n # op: (string) in [\"f\", \"l\", \"r\"]\r\n def ch_update(self, op):\r\n if (op == \"f\"):\r\n if (self.ch_direction == \"l\"):\r\n self.ch_pos[0] -= 1\r\n elif (self.ch_direction == \"r\"):\r\n self.ch_pos[0] += 1\r\n elif (self.ch_direction == \"u\"):\r\n self.ch_pos[1] -= 1\r\n else: # self.ch_direction == \"d\"\r\n self.ch_pos[1] += 1\r\n \r\n elif (op == \"l\"):\r\n if (self.ch_direction == \"l\"):\r\n self.ch_direction = \"d\"\r\n elif (self.ch_direction == \"r\"):\r\n self.ch_direction = \"u\"\r\n elif (self.ch_direction == \"u\"):\r\n self.ch_direction = \"l\"\r\n else: # self.ch_direction == \"d\"\r\n self.ch_direction = \"r\"\r\n \r\n else: # op == \"r\"\r\n if (self.ch_direction == \"l\"):\r\n self.ch_direction = \"u\"\r\n elif (self.ch_direction == \"r\"):\r\n self.ch_direction = \"d\"\r\n elif (self.ch_direction == \"u\"):\r\n self.ch_direction = \"r\"\r\n else: # self.ch_direction == \"d\"\r\n self.ch_direction = \"l\"\r\n \r\n # win/loss detection\r\n x = self.ch_pos[0] # x coord\r\n y = self.ch_pos[1] # y coord\r\n\r\n if (self.ch_direction == \"l\"):\r\n x -= 1\r\n elif (self.ch_direction == \"r\"):\r\n x += 1\r\n elif (self.ch_direction == \"u\"):\r\n y -= 1\r\n else: # self.ch_direction == \"d\"\r\n y += 1\r\n\r\n # lose situation no.1\r\n if (len(self.move_sequence) == 0) :\r\n self.loss_bool = True\r\n self.move_sequence = [\"lose\"]\r\n self.collide()\r\n return \r\n \r\n if (self.move_sequence[0] != \"f\"):\r\n return\r\n\r\n # lose situation, collide on walls\r\n if (x >= self.size[0] or x < 0 or \r\n y >= self.size[1] or y < 0 or\r\n (self.env[x][y] != 0 and self.env[x][y] != 1)):\r\n self.loss_bool = True\r\n self.move_sequence = [\"lose\"]\r\n self.collide()\r\n return\r\n\r\n # winning situation\r\n if (self.env[x][y] == 1):\r\n self.move_sequence = [\"win\"]\r\n self.reach_egg()\r\n return\r\n","repo_name":"GraceFu/Rescoding","sub_path":"MapSection.py","file_name":"MapSection.py","file_ext":"py","file_size_in_byte":12361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41790488518","text":"# template for synthesizing instructions for acc_regs\n\nimport ila\nimport os\nfrom sim import ACC\n\ndef createILA():\n m = ila.Abstraction ('acc_regs')\n m.enable_parameterized_synthesis = 0\n\n # input ports\n cmd = m.inp ('cmd', 2)\n cmdaddr = m.inp ('cmdaddr', 16)\n cmddata = m.inp ('cmddata', 8)\n\n # arch states\n state = m.reg ('acc_state', 3)\n rd_addr = m.reg ('rd_addr', 16)\n wr_addr = m.reg ('wr_addr', 16)\n oplen = m.reg ('acc_len', 16)\n xram = m.mem ('XRAM', 16, 8)\n\n bytes_read = m.reg ('bytes_read', 16)\n\n # fetch function and fetch valid fuction\n m.fetch_expr = ila.concat ([state, cmd, cmdaddr,cmddata])\n m.fetch_valid = (cmd == 1) | (cmd == 2)\n\n m.add_assumption (oplen > 0)\n\n # acc_state\n id_nxt = ila.ite (cmddata == 1, m.const (1, 3), m.const (0, 3))\n state_nxt = ila.choice ('state_nxt', id_nxt, state)\n m.set_next ('acc_state', state_nxt)\n\n # bytes_read\n bytes_read_inc = bytes_read + 1\n bytes_read_rst = ila.ite (cmddata == 1, m.const (0, 16), bytes_read)\n bytes_read_nxt = ila.choice ('bytes_read_nxt', [\n m.const (0, 16), bytes_read_inc, bytes_read_rst, bytes_read])\n m.set_next ('bytes_read', bytes_read_nxt)\n\n return m\n\ndef checkOutPath (path):\n if not os.path.exists (path):\n os.makedirs (path)\n\ndef synthesize():\n all_states = ['acc_state', 'bytes_read']\n\n astPath = 'asts'\n checkOutPath (astPath)\n\n for addr in xrange (0xfe00, 0xfe04):\n instrPath = '%s/0x%x' % (astPath, addr)\n checkOutPath (instrPath)\n\n m = createILA()\n\n # decode functions\n decode = ((m.getinp ('cmd') == 2) & (m.getinp ('cmdaddr') == addr))\n m.decode_exprs = [decode & (m.getreg ('acc_state') == 0)]\n decodeFile = '%s/%s' % (instrPath, 'decode')\n m.exportOne (decode, decodeFile)\n\n sim = lambda s: ACC().simulate (s)\n for s in all_states:\n m.synthesize (s, sim)\n fileName = instrPath + '/' + s\n nxt = m.get_next (s)\n m.exportOne (nxt, fileName)\n\nif __name__ == '__main__':\n synthesize()\n\n","repo_name":"yuex1994/iw_imdb","sub_path":"others/fw-verif/toy/hw/concurrent/ila/syn_instr.py","file_name":"syn_instr.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41026490925","text":"def prueba(num1, num2, *args, **kwargs):\n print(f\"el primer valor es {num1}\")\n print(f\"el segundo valor es {num2}\")\n\n for arg in args:\n print(f\"arg = {arg}\")\n\n\n for clave,valor in kwargs.items():\n print(f\"{clave} = {valor}\")\n\n\nprueba(15,50,100,200,300,400,x=\"uno\",y=\"dos\",z=\"tres\")\n\n\n# Práctica sobre Argumentos Indefinidos (**kwargs) 1\n# Crea una función llamada cantidad_atributos que cuente la cantidad de parémetros que se entregan\n# , y devuelva esa cantidad como resultado.\ndef cantidad_atributos(**kwargs):\n return len(kwargs)\n\nprint(cantidad_atributos(pito=\"x\",ekide=\"wow\"))\n\n# Práctica sobre Argumentos Indefinidos (**kwargs) 2\n# Crea una función llamada lista_atributos que devuelva en forma de lista los valores de\n# los atributos entregados en forma de palabras clave (keywords). La función debe preveer recibir\n# cualquier cantidad de argumentos de este tipo.\n\ndef lista_atributos(**kwargs):\n lista = list(kwargs.values())\n return lista\n\nprint(lista_atributos(x=2))\n\n# Práctica sobre Argumentos Indefinidos (**kwargs) 3\n# Crea una función llamada describir_persona, que tome como parámetros su nombre y\n# luego una cantidad indetermida de argumentos. Esta función deberá mostrar en pantalla:\n#\n# Características de {nombre}:\n# {nombre_argumento}: {valor_argumento}\n# {nombre_argumento}: {valor_argumento}\n# etc...\n# Por ejemplo:\n#\n# describir_persona(\"María\", color_ojos=\"azules\", color_pelo=\"rubio\")\n#\n# Mostrará en pantalla:\n#\n# Características de María:\n# color_ojos: azules\n# color_pelo: rubio\n\ndef describir_persona(nombre, **kwargs):\n print(f\"características de {nombre}:\")\n for i, v in kwargs.items():\n print(f\"{i}: {v}\")\n\n\ndescribir_persona(\"Tomás\", color_ojos=\"azules\", color_pelo=\"rubio\")","repo_name":"everyonehateangels/Python-Projects","sub_path":"Kwargs.py","file_name":"Kwargs.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7116703252","text":"# 1205\n\n# 알쏭달쏭\n\nimport sys\n\nn, score, p = map(int, sys.stdin.readline().split())\n\nif (n == 0):\n print(1)\nelse:\n rank = list(map(int, sys.stdin.readline().split()))\n rank.append(score)\n rank.sort(reverse=True)\n tae = rank.index(score) + 1\n if (tae > p):\n print(-1)\n else:\n if ((n == p) and (score == rank[-1])):\n print(-1)\n else:\n print(tae)\n","repo_name":"soohyeon21/study","sub_path":"BaekJoon/undergraduate_basic/1205.py","file_name":"1205.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44379707611","text":"\nimport struct\nfrom .utils import pack_variable_byte_integer, pack_str16\nfrom .property import Property\n\nfrom scrivo import logging\nlog = logging.getLogger(\"MQTT\")\n\n\nclass PackageFactory:\n\n @classmethod\n def build_properties_data(cls, properties_dict, protocol_version):\n if protocol_version < 5:\n return bytearray()\n data = bytearray()\n for property_name, property_value in properties_dict.items():\n packet_property = Property.factory(name=property_name)\n if packet_property is None:\n log.warning('PackageFactory: property {} is not supported, it was ignored'.format(property_name))\n continue\n property_bytes = packet_property.dumps(property_value)\n data.extend(property_bytes)\n result = pack_variable_byte_integer(len(data))\n result.extend(data)\n return result\n\n\nclass MQTTPacket(PackageFactory):\n\n @staticmethod\n def ping():\n # command = 0xC0 #11000000 PINGREQ\n return struct.pack('!BB', 0xC0, 0)\n\n @classmethod\n def subscribe(cls, sbt, protocol, mid, **kwargs):\n\n command = 0x80 ##SUBSCRIBE fixed header 0x80 + 0010 reserved\n remaining_length = 2\n topics = []\n for s in sbt:\n remaining_length += 2 + len(s.topic) + 1\n topics.append(s.topic)\n\n properties = cls.build_properties_data(kwargs, protocol.ver)\n remaining_length += len(properties)\n command = command | 0 << 3 | 0 << 2 | 1 << 1 | 0 << 0\n\n packet = bytearray()\n packet.append(command)\n packet.extend(pack_variable_byte_integer(remaining_length))\n packet.extend(struct.pack(\"!H\", mid))\n packet.extend(properties)\n\n for s in sbt:\n pack_str16(packet, s.topic)\n subscribe_options = s.retain_handling_options << 4 | s.retain_as_published << 3 | s.no_local << 2 | s.qos\n packet.append(subscribe_options)\n\n log.debug(\"[SUBSCRIBE] mid: {}, topic: {} ,packet: {}\".format(mid, topics, packet))\n\n return packet\n\n\n @classmethod\n def publish(cls, msg, mid, protocol):\n\n command = 0x30 # 00110000\n command = command | ((msg.dup & 0x1) << 3) | (msg.qos << 1) | (msg.retain & 0x1)\n\n packet = bytearray()\n packet.append(command)\n\n remaining_length = 2 + len(msg.topic) + msg.payload_size\n prop_bytes = cls.build_properties_data(msg.properties.a_dict(), protocol.ver)\n remaining_length += len(prop_bytes)\n\n if msg.qos > 0:\n # For message id\n remaining_length += 2\n\n packet.extend(pack_variable_byte_integer(remaining_length))\n pack_str16(packet, msg.topic)\n\n if msg.qos > 0:\n # For message id\n packet.extend(struct.pack(\"!H\", mid))\n\n packet.extend(prop_bytes)\n packet.extend(msg.payload)\n\n # log.debug(\"[PUBLISH] topic: {} , msg: {} ,packet: {}\".format(msg.topic, msg.payload, packet))\n\n return packet\n\n\n\n @classmethod\n def login(cls, client_id, username, password, clean_session, keepalive, protocol, will_message=None, **kwargs):\n # MQTT Commands CONNECT\n command = 0x10 # CONNECT\n\n remaining_length = 2 + len(protocol.name) + 1 + 1 + 2 + 2 + len(client_id)\n\n connect_flags = 0\n if clean_session:\n connect_flags |= 0x02 #clean session\n\n #will_message\n if will_message:\n will_prop_bytes = cls.build_properties_data(will_message.properties.a_dict(), protocol.ver)\n remaining_length += 2 + len(will_message.topic) + 2 + len(will_message.payload) + len(will_prop_bytes)\n connect_flags |= 0x04 | ((will_message.qos & 0x03) << 3) | ((will_message.retain & 0x01) << 5)\n\n\n #user\n if username is not None:\n remaining_length += 2 + len(username)\n connect_flags |= 0x80\n if password is not None:\n connect_flags |= 0x40\n remaining_length += 2 + len(password)\n\n packet = bytearray()\n packet.append(command)\n\n prop_bytes = cls.build_properties_data(kwargs, protocol.ver)\n remaining_length += len(prop_bytes)\n\n packet.extend(pack_variable_byte_integer(remaining_length))\n packet.extend(struct.pack(\"!H\" + str(len(protocol.name)) + \"sBBH\",\n len(protocol.name),\n protocol.name,\n protocol.ver,\n connect_flags,\n keepalive))\n packet.extend(prop_bytes)\n\n pack_str16(packet, client_id)\n\n if will_message:\n packet += will_prop_bytes\n pack_str16(packet, will_message.topic)\n pack_str16(packet, will_message.payload)\n\n if username is not None:\n pack_str16(packet, username)\n\n if password is not None:\n pack_str16(packet, password)\n\n return packet\n\n","repo_name":"straga/scrivo_project","sub_path":"project/ac_xm_hisense_control/board_psram/root/lib/scrivo_mqtt/mqtt/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"71386564673","text":"from ipywidgets import widgets, Layout, Box, GridspecLayout\n\n##Basic mcq\n\ndef create_multipleChoice_widget(description, options, correct_answer, hint):\n if correct_answer not in options:\n options.append(correct_answer)\n \n correct_answer_index = options.index(correct_answer)\n \n radio_options = [(words, i) for i, words in enumerate(options)]\n alternativ = widgets.RadioButtons(\n options = radio_options,\n description = '',\n disabled = False,\n indent = False,\n align = 'center',\n )\n \n description_out = widgets.Output(layout=Layout(width='auto'))\n \n with description_out:\n print(description)\n \n feedback_out = widgets.Output()\n\n def check_selection(b):\n a = int(alternativ.value)\n if a==correct_answer_index:\n s = '\\x1b[6;30;42m' + \"correct\" + '\\x1b[0m' +\"\\n\"\n else:\n s = '\\x1b[5;30;41m' + \"try again\" + '\\x1b[0m' +\"\\n\"\n with feedback_out:\n feedback_out.clear_output()\n print(s)\n return\n \n check = widgets.Button(description=\"check\")\n check.on_click(check_selection)\n \n hint_out = widgets.Output()\n \n def hint_selection(b):\n with hint_out:\n print(hint)\n \n with feedback_out:\n feedback_out.clear_output()\n print(hint)\n \n hintbutton = widgets.Button(description=\"hint\")\n hintbutton.on_click(hint_selection)\n \n return widgets.VBox([description_out, \n alternativ, \n widgets.HBox([hintbutton, check]), feedback_out], \n layout=Layout(display='flex',\n flex_flow='column',\n align_items='stretch',\n width='auto')) \n\ndef create_textinputquiz_widget(description, text_description, correct_answer, a2, hint): ##grid for option table\n correct_answer = correct_answer ##float ##str \n alternativ = widgets.Text(value = '',\n placeholder = '',\n description = '',\n disabled = False, layout=(Layout(width = 'auto'))\n )\n##question description\n description_out = widgets.Output(layout=Layout(width='auto')) \n with description_out:\n print(description)\n##description before text widget \n text_description_out = widgets.Output(layout=Layout(width='auto')) \n with text_description_out:\n print (text_description)\n##description after text widget e.g. units \n a2_out = widgets.Output(layout=Layout(width='auto')) \n with a2_out:\n print(a2) \n##\n feedback_out = widgets.Output()\n def check_selection(b):\n a = alternativ.value\n if a==correct_answer:\n s = '\\x1b[6;30;42m' + \"correct\" + '\\x1b[0m' +\"\\n\" #green color\n else:\n s = '\\x1b[5;30;41m' + \"try again\" + '\\x1b[0m' +\"\\n\" #red color\n with feedback_out:\n feedback_out.clear_output()\n print(s)\n return\n \n check = widgets.Button(description=\"check\")\n check.on_click(check_selection)\n##\n hint_out = widgets.Output() \n def hint_selection(b):\n with hint_out:\n print(hint) \n with feedback_out:\n feedback_out.clear_output()\n print(hint)\n \n hintbutton = widgets.Button(description=\"hint\")\n hintbutton.on_click(hint_selection) \n\n return widgets.VBox([description_out,\n widgets.HBox([text_description_out, alternativ, a2_out]), \n widgets.HBox([hintbutton, check]), feedback_out], \n layout=Layout(display='flex',\n flex_flow='column',\n align_items='stretch',\n width='auto'))\n\n","repo_name":"cornzyblack/Coursera_nlp_deeplearningai_course_1","sub_path":"week_2/quiz_creator.py","file_name":"quiz_creator.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24775498704","text":"#!/usr/bin/python3\n\n\n\n# ---------------- IMPORTATIONS ----------------\n\n#quantum lib\nimport qiskit as q\n\n\n\n\n\n\n# ---------------- CLASS ----------------\nclass Qomputer:\n\n\t#initialization\n\tdef __init__(self, qbit_nbr, measure_nbr):\n\t\tself.simulator = q.Aer.get_backend('qasm_simulator')\n\t\tself.circuit = q.QuantumCircuit(qbit_nbr, measure_nbr)\n\n\t#console display\n\tdef showCircuit(self):\n\t\tprint( self.circuit.draw(output='text') )\n\n\t#execution\n\tdef run(self, shots):\n\t\traw_results = q.execute(\n\t\t\tself.circuit,\n\t\t\tself.simulator,\n\t\t\tshots=shots\n\t\t).result().get_counts(\n\t\t\tself.circuit\n\t\t)\n\n\t\t#format results\n\t\tresults = []\n\t\tfor r in raw_results:\n\t\t\tresults.append( raw_results[r] )\n\n\t\treturn results\n","repo_name":"iasebsil83/Grozzle","sub_path":"src/qomputer.py","file_name":"qomputer.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71407895235","text":"# SSW 540 - Assignment 05 - P5: String manipulation\n# Akshay Sunderwani\n\n# method to convert string to plural\ndef plurals(string):\n wordlist = string.split() # get all the word(s) from the string\n vowelslist = ('ay', 'ey', 'iy', 'oy', 'uy', 'Ay', 'Ey', 'Iy', 'Oy', 'Uy', 'aY',\n 'eY', 'iY', 'oY', 'uY', 'AY', 'EY', 'IY', 'OY', 'UY')\n endswithylist = ('y', 'Y')\n endswithelselist = ('o', 'ch', 's', 'sh', 'x', 'z', 'O', 'CH', 'S', 'SH', 'X', 'Z', 'cH', 'Ch', 'sH', 'Sh')\n pluralstring = '' # string to store the response\n for words in wordlist:\n if words.endswith(vowelslist):\n pluralstring += words + 's' + ' '\n elif words.endswith(endswithylist):\n pluralstring += words[:-1] + 'ies' + ' '\n elif words.endswith(endswithelselist):\n pluralstring += words + 'es' + ' '\n else:\n pluralstring += words + 's' + ' '\n\n if words == wordlist[-1]:\n pluralstring = pluralstring[:-1]\n return pluralstring #return the final response\n\n\ndef getuserstring():\n name = input('Hello and welcome! may I know your name : ')\n line = input('Hello! ' + name + ', please enter any statement or line : ')\n print('Prural of the line is : ', plurals(line))\n\n\ngetuserstring()\n","repo_name":"akshya672222/PycharmProjects","sub_path":"untitled/SSW540_ASSIGNMENT/Assignment05_AkshaySunderwani_SSW540.py","file_name":"Assignment05_AkshaySunderwani_SSW540.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38613415534","text":"from telebot import types\n\nfrom data.loader import bot\nfrom helpers import api\nfrom keyboards.reply import start_menu, start_request_kb\n\n\n@bot.message_handler(commands=['start'])\ndef command_start(message: types.Message):\n chat_id = message.chat.id\n first_name = message.from_user.first_name\n\n service_ids = api.get_service_profiles_ids()\n simple_users_ids = api.get_simple_users_profiles_ids()\n\n if chat_id in service_ids[\"service_profiles\"]:\n bot.send_message(chat_id, \"Здравствуйте владелец сервиса\")\n return\n if chat_id in simple_users_ids[\"simple_users\"]:\n bot.send_message(chat_id, \"Здравствуйте обычный пользователь\")\n bot.send_message(chat_id, \"\"\"\nНапишите вашу заявку в представленном ниже виде.\n\nПример заявки:\n\nЗаголовок: Ваш заголовок\nСодержание/описание: Ваше содержание/описание\nuser_name пользователя: Ваш user_name в телеграмме\nХештеги: Ваши хештеги\nГеолокация\n\"\"\", parse_mode=\"HTML\", reply_markup=start_request_kb())\n return\n\n if chat_id not in service_ids[\"service_profiles\"] or chat_id not in simple_users_ids[\"simple_users\"]:\n bot.send_message(chat_id, f\"Приветствую тебя, {first_name}. Чтобы начать использовать бот, пройди регистрацию\",\n reply_markup=start_menu())\n","repo_name":"IldarSaygafarov2/hisay_bot","sub_path":"handlers/users/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34914219056","text":"#Python Automation Demo Example\r\nfrom Header import *\r\n\r\ni = 0\r\n\r\ndef DisplayProcess(FolderName=\"MarvellousLog\"):\r\n global i\r\n today = date.today()\r\n Data = []\r\n\r\n if not os.path.exists(FolderName):\r\n os.mkdir(FolderName)\r\n\r\n File_Path = os.path.join(FolderName,\"Marvellous%s_%d.log\"%(today,i))\r\n i = i + 1\r\n fd = open(File_Path,\"w\")\r\n\r\n for proc in psutil.process_iter():\r\n value = proc.as_dict(attrs = ['pid','name','username'])\r\n Data.append(value)\r\n\r\n for element in Data:\r\n fd.write(\"%s\\n\"%element)\r\n\r\n mail(File_Path)\r\n\r\ndef main():\r\n print(\"-----------------Marvellous Infosystems------------------\")\r\n print(\"Script Title : \"+argv[0])\r\n\r\n if(argv[1] == \"-u\" or argv[1]==\"-U\"):#custom flags\r\n print(\"Usage : Use the script as Name.py Schedule_Time Folder_name\")\r\n exit()\r\n\r\n if(argv[1] == \"-h\" or argv[1]==\"-H\"):\r\n print(\"Help : It is udsed to create log file\")\r\n exit()\r\n\r\n schedule.every(int(argv[1])).minutes.do(DisplayProcess)\r\n while True:\r\n schedule.run_pending()\r\n time.sleep(1)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ManaliKulkarni30/Process_Log_Generator","sub_path":"Process_Log_Generator.py","file_name":"Process_Log_Generator.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25717649747","text":"\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops.rnn_cell_impl import LayerRNNCell, LSTMStateTuple, _WEIGHTS_VARIABLE_NAME, \\\n _BIAS_VARIABLE_NAME, LSTMCell\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\n\n\nclass DualLSTMCell(LayerRNNCell):\n\n def __init__(self, num_units,\n cell_clip=None,\n initializer=None, num_proj=None, proj_clip=None,\n num_unit_shards=None, num_proj_shards=None,\n forget_bias=1.0, state_is_tuple=True,\n activation=None, reuse=None, name=None, dtype=None, **kwargs):\n\n super(DualLSTMCell, self).__init__(\n _reuse=reuse, name=name, dtype=dtype, **kwargs)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if num_unit_shards is not None or num_proj_shards is not None:\n logging.warn(\n \"%s: The num_unit_shards and proj_unit_shards parameters are \"\n \"deprecated and will be removed in Jan 2017. \"\n \"Use a variable scope with a partitioner instead.\", self)\n if context.executing_eagerly() and context.num_gpus() > 0:\n logging.warn(\"%s: Note that this cell is not optimized for performance. \"\n \"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better \"\n \"performance on GPU.\", self)\n\n # Inputs must be 2-dimensional.\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n self._num_units = num_units\n self._cell_clip = cell_clip\n self._initializer = initializers.get(initializer)\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._num_unit_shards = num_unit_shards\n self._num_proj_shards = num_proj_shards\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n if activation:\n self._activation = activations.get(activation)\n else:\n self._activation = math_ops.tanh\n\n if num_proj:\n self._state_size = (\n LSTMStateTuple(num_units, num_proj)\n if state_is_tuple else num_units + num_proj)\n self._output_size = num_proj\n else:\n self._state_size = (\n LSTMStateTuple(2 * num_units, num_units)\n if state_is_tuple else 2 * num_units)\n self._output_size = num_units\n\n self.num_gates = 8\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n @tf_utils.shape_type_conversion\n def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % inputs_shape)\n\n input_depth = inputs_shape[-1]\n h_depth = self._num_units if self._num_proj is None else self._num_proj\n maybe_partitioner = (\n partitioned_variables.fixed_size_partitioner(self._num_unit_shards)\n if self._num_unit_shards is not None\n else None)\n self._kernel = self.add_variable(\n _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + h_depth, self.num_gates * self._num_units],\n initializer=self._initializer,\n partitioner=maybe_partitioner)\n if self.dtype is None:\n initializer = init_ops.zeros_initializer\n else:\n initializer = init_ops.zeros_initializer(dtype=self.dtype)\n self._bias = self.add_variable(\n _BIAS_VARIABLE_NAME,\n shape=[self.num_gates * self._num_units],\n initializer=initializer)\n\n if self._num_proj is not None:\n maybe_proj_partitioner = (\n partitioned_variables.fixed_size_partitioner(self._num_proj_shards)\n if self._num_proj_shards is not None\n else None)\n self._proj_kernel = self.add_variable(\n \"projection/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[self._num_units, self._num_proj],\n initializer=self._initializer,\n partitioner=maybe_proj_partitioner)\n\n self.built = True\n\n def call(self, inputs, state):\n\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n sigmoid = math_ops.sigmoid\n\n if self._state_is_tuple:\n (c_d_prev, m_prev) = state\n c_prev = array_ops.slice(c_d_prev, [0, 0], [-1, self._num_units])\n d_prev = array_ops.slice(c_d_prev, [0, self._num_units], [-1, num_proj])\n else:\n c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n d_prev = []\n\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n lstm_matrix = math_ops.matmul(\n array_ops.concat([inputs, m_prev], 1), self._kernel)\n lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)\n\n i, j, f, o, d_i, d_j, d_f, d_o = array_ops.split(\n value=lstm_matrix, num_or_size_splits=self.num_gates, axis=1)\n # Diagonal connections\n\n c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j))\n # d_mem = (sigmoid(d + self._forget_bias) * d_prev + sigmoid(c - c_prev) * self._activation(j))\n d_mem = (sigmoid(d_f + self._forget_bias) * d_prev + sigmoid(d_i) * self._activation(d_j))\n # d_mem = ((sigmoid(d) - c_prev) * d_prev)\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n\n # m = sigmoid(o) * self._activation(d_mem)\n m = sigmoid(o) * self._activation(c) * self._activation(d_mem) * sigmoid(d_o)\n\n if self._num_proj is not None:\n m = math_ops.matmul(m, self._proj_kernel)\n\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n c = array_ops.concat([c, d_mem], 1)\n new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else\n array_ops.concat([c, m], 1))\n return m, new_state\n\n def get_config(self):\n config = {\n \"num_units\": self._num_units,\n \"cell_clip\": self._cell_clip,\n \"initializer\": initializers.serialize(self._initializer),\n \"num_proj\": self._num_proj,\n \"proj_clip\": self._proj_clip,\n \"num_unit_shards\": self._num_unit_shards,\n \"num_proj_shards\": self._num_proj_shards,\n \"forget_bias\": self._forget_bias,\n \"state_is_tuple\": self._state_is_tuple,\n \"activation\": activations.serialize(self._activation),\n \"reuse\": self._reuse,\n }\n base_config = super(DualLSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):\n ix = [0]\n def enumerated_fn(*inner_args, **inner_kwargs):\n r = map_fn(ix[0], *inner_args, **inner_kwargs)\n ix[0] += 1\n return r\n return nest.map_structure_up_to(shallow_structure,\n enumerated_fn, *args, **kwargs)\n\n\ndef _default_dropout_state_filter_visitor(substate):\n if isinstance(substate, LSTMStateTuple):\n # Do not perform dropout on the memory state.\n return LSTMStateTuple(c=False, h=True)\n elif isinstance(substate, tensor_array_ops.TensorArray):\n return False\n return True\n\n\n","repo_name":"mokarakaya/sequence-based-collaborative-filtering","sub_path":"serendipity/deep/dualLSTM.py","file_name":"dualLSTM.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12496905001","text":"import sys\nimport argparse\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport tables as tb\n\nfrom invisible_cities.core import system_of_units as units\n\nimport antea.database.load_db as db\nimport antea.reco.reco_functions as rf\nimport antea.reco.mctrue_functions as mcf\nimport antea.io.mc_io as mcio\nimport antea.elec.tof_functions as tf\nimport antea.mcsim.sensor_functions as snsf\n\nfrom antea.core.exceptions import WaveformEmptyTable\n\nfrom antea.utils.map_functions import load_map\nfrom antea.io.mc_io import read_sensor_bin_width_from_conf\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument('first_file' , type = int, help = \"first file (inclusive)\" )\n parser.add_argument('n_files' , type = int, help = \"number of files to analize\")\n parser.add_argument('in_path' , help = \"input files path\" )\n parser.add_argument('file_name' , help = \"name of input files\" )\n parser.add_argument('rpos_file' , help = \"Rpos table\" )\n parser.add_argument('out_path' , help = \"output files path\" )\n return parser.parse_args()\n\n\n# ## modified ANTEA function by Paola, branch: time-jitter\n# def tdc_convolution(tof_response, spe_response, s_id, time_window):\n# pe_vect = np.zeros(time_window)\n# sel_tof = tof_response[(tof_response.sensor_id == s_id) &\n# (tof_response.time_bin < time_window)]\n# pe_vect[sel_tof.time_bin.values] = sel_tof.charge.values\n# tdc_conv = tf.convolve_tof(spe_response, pe_vect)\n# return tdc_conv\n\n# ## modified ANTEA function by Paola, branch: time-jitter\n# def translate_charge_conv_to_wf_df(event_id, s_id, conv_vect):\n# keys = np.array(['event_id', 'sensor_id', 'time', 'charge'])\n# t_bin = np.where(conv_vect>0)[0]\n# charge = conv_vect[conv_vect>0]\n# evt = np.full(len(t_bin), event_id)\n# sns_id_full = np.full(len(t_bin), s_id)\n# a_wf = np.array([evt, sns_id_full, t_bin, charge])\n# wf_df = pd.DataFrame(a_wf.T, columns=keys).astype({'event_id': 'int32',\n# 'sensor_id': 'int32',\n# 'time' : 'int32'})\n# return wf_df\n\ndef get_hits_info(hits, true_pos):\n pos_hits = np.array([hits.x, hits.y, hits.z]).transpose()\n distances = np.linalg.norm(np.subtract(pos_hits, true_pos), axis=1)\n max_dist = distances.max()\n tot_hit_energy = hits.energy.sum()\n return max_dist, tot_hit_energy\n\n\n# ## modified ANTEA function by Paola, branch: time-jitter\n# def find_first_time_of_sensors(tof_response, sns_ids, sigma = 30, n_pe= 1):\n# tof = tof_response[tof_response.sensor_id.isin(sns_ids)]\n# if tof.empty:\n# raise WaveformEmptyTable(\"Tof dataframe is empty\")\n# xx = tof.time.values\n# #tof.insert(4, 'jit_time', np.random.normal(tof.time.values, sigma))\n# tof['jit_time'] = np.random.normal(xx, sigma)\n#\n# first_times = tof.sort_values(by=['jit_time']).iloc[0:n_pe]\n# min_t = first_times['jit_time'].mean()\n# min_ids = first_times.sensor_id.values\n# min_charges = first_times.charge.values\n#\n# return np.abs(min_ids), min_charges, min_t\n\n\n# ## modified ANTEA function by Paola, branch: time-jitter\n# def find_coincidence_timestamps(tof_response, sns1, sns2, sigma, npe):\n# min1, q1, time1 = find_first_time_of_sensors(tof_response, -sns1, sigma, npe)\n# min2, q2, time2 = find_first_time_of_sensors(tof_response, -sns2, sigma, npe)\n# return min1, min2, q1, q2, time1, time2\n\n\ndef get_phi(pos, qs):\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos))[:,1]\n diff_sign = min(pos_phi) < 0 < max(pos_phi)\n if diff_sign & (np.abs(np.min(pos_phi))>np.pi/2.):\n pos_phi[pos_phi<0] = np.pi + np.pi + pos_phi[pos_phi<0]\n mean_phi = np.average(pos_phi, weights=qs)\n var_phi = np.average((pos_phi-mean_phi)**2, weights=qs)\n return var_phi\n\ndef sel_coord(pos1, pos2, qs1, qs2, th):\n sel1 = qs1 > th\n sel2 = qs2 > th\n return pos1[sel1], pos2[sel2], qs1[sel1], qs2[sel2]\n\ndef get_sipm_ave_pos(DataSiPM_idx, min_id, qs):\n sipms = DataSiPM_idx.loc[min_id]\n sns_pos = np.array([sipms.X.values, sipms.Y.values, sipms.Z.values]).transpose()\n ave_pos = np.average(sns_pos, weights=qs, axis=0)\n return ave_pos\n\n\n### read sensor positions from database\nDataSiPM = db.DataSiPMsim_only('petalo', 0)\nDataSiPM_idx = DataSiPM.set_index('SensorID')\nn_sipms = len(DataSiPM)\nfirst_sipm = DataSiPM_idx.index.min()\n\n\n### parameters for single photoelectron convolution in SiPM response\ntau_sipm = [100, 15000]\ntime_window = 5000 #ps\ntime = np.arange(0, time_window)\n#time_bin = 5 # ps\n#time = np.arange(0, 80000, time_bin)\n#time = time + (time_bin/2)\nspe_resp, norm = tf.apply_spe_dist(time, tau_sipm)\n\nprint(datetime.datetime.now())\n\narguments = parse_args(sys.argv)\nstart = arguments.first_file\nnumb = arguments.n_files\nin_path = arguments.in_path\nfile_name = arguments.file_name\nrpos_file = arguments.rpos_file\nout_path = arguments.out_path\n\nthr_r = 4\nthr_phi = 4\nthr_z = 4\nthr_e = 2\n\nsigma_sipm = 80 #ps\nsigma_elec = 30 #ps\nn_pe = 10\n\nevt_file = out_path + f'tof_coinc_jitter_{start}_{numb}'\n\nRpos = load_map(rpos_file,\n group = \"Radius\",\n node = f\"f{int(thr_r)}pes150bins\",\n x_name = \"PhiRms\",\n y_name = \"Rpos\",\n u_name = \"RposUncertainty\")\n\ncharge_range = (2000, 2250) # pde 0.30, n=1.6\n\nprint(f'Charge range = {charge_range}')\nc0 = c1 = c2 = c3 = c4 = 0\nbad = 0\nboh0 = boh1 = 0\nbelow_thr = 0\n\ntrue_r1, true_phi1, true_z1 = [], [], []\nreco_r1, reco_phi1, reco_z1 = [], [], []\ntrue_r2, true_phi2, true_z2 = [], [], []\nreco_r2, reco_phi2, reco_z2 = [], [], []\n\nsns_response1, sns_response2 = [], []\n\ntimestamp_thr = 0.25\nfirst_sipm1, first_sipm2 = [], []\nfirst_time1, first_time2 = [], []\ntrue_time1, true_time2 = [], []\ntouched_sipms1, touched_sipms2 = [], []\nmax_hit_distance1, max_hit_distance2 = [], []\n\nevent_ids = []\n\n\nfor ifile in range(start, start+numb):\n filename = in_path + file_name + f'.{ifile}.h5'\n try:\n sns_response = mcio.load_mcsns_response(filename)\n except ValueError:\n print(f'File {filename} not found')\n continue\n except OSError:\n print(f'File {filename} not found')\n continue\n except KeyError:\n print(f'No object named MC/sns_response in file {filename}')\n continue\n print(f'Analyzing file {filename}')\n\n particles = mcio.load_mcparticles(filename)\n hits = mcio.load_mchits (filename)\n\n #sns_response = snsf.apply_sipm_pde(sns_response, 0.3) #Uncomment when PDE = 1 in the simulation\n sns_response = snsf.apply_charge_fluctuation(sns_response, DataSiPM_idx)\n tof_response = mcio.load_mcTOFsns_response(filename)\n\n tof_bin_size = read_sensor_bin_width_from_conf(filename, tof=True)\n\n events = particles.event_id.unique()\n\n for evt in events:\n\n evt_sns = sns_response[sns_response.event_id == evt]\n evt_sns = rf.find_SiPMs_over_threshold(evt_sns, threshold=thr_e)\n if len(evt_sns) == 0:\n boh0 += 1\n continue\n\n ids_over_thr = evt_sns.sensor_id.astype('int64').values\n\n evt_parts = particles [particles .event_id == evt]\n evt_hits = hits [hits .event_id == evt]\n evt_tof = tof_response[tof_response.event_id == evt]\n\n if evt_hits.energy.sum() < 0.511:\n below_thr += 1\n continue\n if len(evt_tof) == 0:\n boh1 += 1\n continue\n\n evt_tof = evt_tof[evt_tof.sensor_id.isin(-ids_over_thr)]\n\n pos1, pos2, q1, q2, true_pos1, true_pos2, true_t1, true_t2, sns1, sns2 = rf.reconstruct_coincidences(evt_sns, charge_range, DataSiPM_idx, evt_parts, evt_hits)\n if len(pos1) == 0 or len(pos2) == 0:\n c0 += 1\n continue\n\n q1 = np.array(q1)\n q2 = np.array(q2)\n pos1 = np.array(pos1)\n pos2 = np.array(pos2)\n\n ## Calculate R\n r1 = r2 = None\n\n pos1r, pos2r, q1r, q2r = sel_coord(pos1, pos2, q1, q2, thr_r)\n if len(pos1r) == 0 or len(pos2r) == 0:\n c1 += 1\n continue\n\n var_phi1 = get_phi(pos1r, q1r)\n r1 = Rpos(np.sqrt(var_phi1)).value\n\n var_phi2 = get_phi(pos2r, q2r)\n r2 = Rpos(np.sqrt(var_phi2)).value\n\n pos1phi, pos2phi, q1phi, q2phi = sel_coord(pos1, pos2, q1, q2, thr_phi)\n if len(q1phi) == 0 or len(q2phi) == 0:\n c2 += 1\n continue\n\n phi1 = phi2 = None\n reco_cart_pos = np.average(pos1phi, weights=q1phi, axis=0)\n phi1 = np.arctan2(reco_cart_pos[1], reco_cart_pos[0])\n reco_cart_pos = np.average(pos2phi, weights=q2phi, axis=0)\n phi2 = np.arctan2(reco_cart_pos[1], reco_cart_pos[0])\n\n\n pos1z, pos2z, q1z, q2z = sel_coord(pos1, pos2, q1, q2, thr_z)\n if len(q1z) == 0 or len(q2z) == 0:\n c3 += 1\n continue\n\n z1 = z2 = None\n reco_cart_pos = np.average(pos1z, weights=q1z, axis=0)\n z1 = reco_cart_pos[2]\n reco_cart_pos = np.average(pos2z, weights=q2z, axis=0)\n z2 = reco_cart_pos[2]\n\n\n _, _, q1e, q2e = sel_coord(pos1, pos2, q1, q2, thr_e)\n if len(q1e) == 0 or len(q2e) == 0:\n c4 += 1\n continue\n\n\n times = evt_tof.time_bin.values * tof_bin_size / units.ps\n evt_tof['time'] = np.round(np.random.normal(times, sigma_sipm)).astype(int)\n\n ## produce a TOF dataframe with convolved time response\n tof_sns = evt_tof.sensor_id.unique()\n evt_tof_exp_dist = []\n for s_id in tof_sns:\n tdc_conv = tf.tdc_convolution(evt_tof, spe_resp, s_id, time_window)\n tdc_conv_df = tf.translate_charge_conv_to_wf_df(evt, s_id, tdc_conv)\n evt_tof_exp_dist.append(tdc_conv_df)\n evt_tof_exp_dist = pd.concat(evt_tof_exp_dist)\n evt_tof_exp_dist = evt_tof_exp_dist[evt_tof_exp_dist.charge > timestamp_thr/norm]\n\n try:\n min_id1, min_id2, q1, q2, min_t1, min_t2 = rf.find_coincidence_timestamps(evt_tof_exp_dist, sns1, sns2, sigma_elec, n_pe)\n ave_pos1 = get_sipm_ave_pos(DataSiPM_idx, min_id1, q1)\n ave_pos2 = get_sipm_ave_pos(DataSiPM_idx, min_id2, q2)\n except:\n min_t1, min_t2 = -1, -1\n ave_pos1, ave_pos2 = [0, 0, 0], [0, 0, 0]\n\n first_sipm1.append(ave_pos1)\n first_time1.append(min_t1*tof_bin_size/units.ps)\n\n first_sipm2.append(ave_pos2)\n first_time2.append(min_t2*tof_bin_size/units.ps)\n\n\n ## extract information about the interaction being photoelectric-like\n positions = np.array([evt_hits.x, evt_hits.y, evt_hits.z]).transpose()\n scalar_products = positions.dot(true_pos1)\n max_dist1, tot_hit_energy1 = get_hits_info(evt_hits[scalar_products >= 0], true_pos1)\n max_dist2, tot_hit_energy2 = get_hits_info(evt_hits[scalar_products < 0], true_pos2)\n\n\n event_ids .append(evt)\n reco_r1 .append(r1)\n reco_phi1 .append(phi1)\n reco_z1 .append(z1)\n true_r1 .append(np.sqrt(true_pos1[0]**2 + true_pos1[1]**2))\n true_phi1 .append(np.arctan2(true_pos1[1], true_pos1[0]))\n true_z1 .append(true_pos1[2])\n sns_response1 .append(sum(q1e))\n touched_sipms1 .append(len(q1e))\n true_time1 .append(true_t1/units.ps)\n max_hit_distance1.append(max_dist1)\n reco_r2 .append(r2)\n reco_phi2 .append(phi2)\n reco_z2 .append(z2)\n true_r2 .append(np.sqrt(true_pos2[0]**2 + true_pos2[1]**2))\n true_phi2 .append(np.arctan2(true_pos2[1], true_pos2[0]))\n true_z2 .append(true_pos2[2])\n sns_response2 .append(sum(q2e))\n touched_sipms2 .append(len(q2e))\n true_time2 .append(true_t2/units.ps)\n max_hit_distance2.append(max_dist2)\n\nprint(datetime.datetime.now())\n\na_true_r1 = np.array(true_r1)\na_true_phi1 = np.array(true_phi1)\na_true_z1 = np.array(true_z1)\na_reco_r1 = np.array(reco_r1)\na_reco_phi1 = np.array(reco_phi1)\na_reco_z1 = np.array(reco_z1)\n\na_sns_response1 = np.array(sns_response1)\na_touched_sipms1 = np.array(touched_sipms1)\na_first_sipm1 = np.array(first_sipm1)\na_first_time1 = np.array(first_time1)\na_true_time1 = np.array(true_time1)\na_max_hit_distance1 = np.array(max_hit_distance1)\n\na_true_r2 = np.array(true_r2)\na_true_phi2 = np.array(true_phi2)\na_true_z2 = np.array(true_z2)\na_reco_r2 = np.array(reco_r2)\na_reco_phi2 = np.array(reco_phi2)\na_reco_z2 = np.array(reco_z2)\n\na_sns_response2 = np.array(sns_response2)\na_touched_sipms2 = np.array(touched_sipms2)\na_first_sipm2 = np.array(first_sipm2)\na_first_time2 = np.array(first_time2)\na_true_time2 = np.array(true_time2)\na_max_hit_distance2 = np.array(max_hit_distance2)\n\na_event_ids = np.array(event_ids)\n\nnp.savez(evt_file,\n a_true_r1=a_true_r1, a_true_phi1=a_true_phi1, a_true_z1=a_true_z1,\n a_true_r2=a_true_r2, a_true_phi2=a_true_phi2, a_true_z2=a_true_z2,\n a_reco_r1=a_reco_r1, a_reco_phi1=a_reco_phi1, a_reco_z1=a_reco_z1,\n a_reco_r2=a_reco_r2, a_reco_phi2=a_reco_phi2, a_reco_z2=a_reco_z2,\n a_touched_sipms1=a_touched_sipms1, a_touched_sipms2=a_touched_sipms2,\n a_sns_response1=a_sns_response1, a_sns_response2=a_sns_response2,\n a_first_sipm1=a_first_sipm1, a_first_time1=a_first_time1,\n a_first_sipm2=a_first_sipm2, a_first_time2=a_first_time2,\n a_true_time1=a_true_time1, a_true_time2=a_true_time2,\n a_max_hit_distance1=a_max_hit_distance1, a_max_hit_distance2=a_max_hit_distance2,\n a_event_ids=a_event_ids)\n\nprint(f'Not passing charge threshold = {boh0}')\nprint(f'Not passing tof charge threshold = {boh1}')\nprint(f'Not a coincidence: {c0}')\nprint(f'Number of coincidences: {len(a_event_ids)}')\nprint(f'Not passing threshold r = {c1}, phi = {c2}, z = {c3}, E = {c4}')\nprint(f'Events below true energy threshold = {below_thr}')\n","repo_name":"carmenromo/Analysis","sub_path":"full_body_phantom_paper/tof_coincidences_jitters.py","file_name":"tof_coincidences_jitters.py","file_ext":"py","file_size_in_byte":14462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36046542214","text":"import _pythonpath\n\nimport sys\n\nfrom zope.component import getUtility\n\nfrom lp.registry.interfaces.distribution import IDistributionSet\nfrom lp.services.scripts.base import LaunchpadCronScript\nfrom lp.translations.scripts.copy_distroseries_translations import (\n copy_distroseries_translations,\n )\n\n\nclass TranslationsCopier(LaunchpadCronScript):\n \"\"\"Copy latest distroseries translations from parent series.\n\n Core job is to invoke `distroseries.copyMissingTranslationsFromParent()`.\n \"\"\"\n\n def add_my_options(self):\n self.parser.add_option('-d', '--distribution', dest='distro',\n default='ubuntu',\n help='Name of distribution to copy translations in.')\n self.parser.add_option('-s', '--series', dest='series',\n help='Name of distroseries whose translations should be updated')\n self.parser.add_option('-f', '--force', dest='force',\n action=\"store_true\", default=False,\n help=\"Don't check if target's UI and imports are blocked; \"\n \"actively block them.\")\n\n def _getTargetSeries(self):\n \"\"\"Retrieve target `DistroSeries`.\"\"\"\n series = self.options.series\n return getUtility(IDistributionSet)[self.options.distro][series]\n\n def main(self):\n series = self._getTargetSeries()\n\n # Both translation UI and imports for this series should be blocked\n # while the copy is in progress, to reduce the chances of deadlocks or\n # other conflicts.\n blocked = (\n series.hide_all_translations and series.defer_translation_imports)\n if not blocked and not self.options.force:\n self.txn.abort()\n self.logger.error(\n 'Before this process starts, set the '\n 'hide_all_translations and defer_translation_imports '\n 'flags for distribution %s, series %s; or use the '\n '--force option to make it happen automatically.' % (\n self.options.distro, self.options.series))\n sys.exit(1)\n\n self.logger.info('Starting...')\n\n # Actual work is done here.\n copy_distroseries_translations(series, self.txn, self.logger)\n\n # We would like to update the DistroRelase statistics, but it takes\n # too long so this should be done after.\n #\n # Finally, we changed many things related with cached statistics, so\n # we may want to update those.\n # self.logger.info('Updating DistroSeries statistics...')\n # series.updateStatistics(self.txn)\n\n self.txn.commit()\n self.logger.info('Done.')\n\n @property\n def lockfilename(self):\n \"\"\"Return lock file name for this script on this distroseries.\n\n No global lock is needed, only one for the distroseries we operate\n on. This does mean that our options must have been parsed before this\n property is ever accessed. Luckily that is what the `LaunchpadScript`\n code does!\n \"\"\"\n return \"launchpad-%s-%s-%s.lock\" % (self.name, self.options.distro,\n self.options.series)\n\n\nif __name__ == '__main__':\n script = TranslationsCopier(\n 'copy-missing-translations', dbuser='translations_distroseries_copy')\n script.lock_and_run()\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/scripts/copy-translations-from-parent.py","file_name":"copy-translations-from-parent.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74999625795","text":"# coding: utf-8\n\nimport sqlite3 as lite\nimport os.path, time\nimport os, sys\nimport datetime\nimport shutil\nimport pandas as pd\nimport numpy as np\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nfrom xlwings import Workbook, Sheet, Range, Chart\nimport betterwalk\nimport re\n\ndef isListEmpty(inList):\n if isinstance(inList, list): # Is a list\n return all( map(isListEmpty, inList) )\n return False # Not a list'\n\ndef ANDCheckEmpty(inList):\n for checker in inList:\n if not checker:\n return False\n return True\n \ndef SearchFile(Path,Search_Condition,Search_Type):\n #Create New DataFrame\n\n columns1=['File Name', 'Path', 'Folder']\n index1 = np.arange(30000)\n df = pd.DataFrame(columns=columns1, index = index1)\n\n\n\n #Search => None\n if Search_Type == 'None':\n \n i=(-1)\n #Path=unicode(Path,'utf8')\n \n for pathinfile, subdirs, files in betterwalk.walk(Path):\n \n for name in files:\n if Search_Condition in name: \n i+=1\n fullPath = os.path.join(pathinfile,name)\n df.loc[i, 'Path']=fullPath\n df.loc[i, 'File Name']=name\n\n #drop N/A \n df = df[(pd.notnull(df['File Name']))]\n\n #Search => OR\n if Search_Type == 'OR':\n \n \n SearchORArr=Search_Condition.split(',')\n\n i=(-1)\n for pathinfile, subdirs, files in betterwalk.walk(Path):\n \n for name in files:\n ORresult = map(lambda x:re.findall(x,name),SearchORArr)\n if not isListEmpty(ORresult): \n i+=1\n fullPath = os.path.join(pathinfile,name)\n df.loc[i, 'Path']=fullPath\n df.loc[i, 'File Name']=name\n\n #drop N/A \n df = df[(pd.notnull(df['File Name']))]\n\n \n #Search => AND\n if Search_Type == 'AND':\n \n \n SearchANDArr=Search_Condition.split(',')\n\n i=(-1)\n for pathinfile, subdirs, files in betterwalk.walk(Path):\n \n for name in files:\n ANDresult = map(lambda x:re.findall(x,name),SearchANDArr)\n if ANDCheckEmpty(ANDresult)== True: \n i+=1\n fullPath = os.path.join(pathinfile,name)\n df.loc[i, 'Path']=fullPath\n df.loc[i, 'File Name']=name\n\n #drop N/A \n df = df[(pd.notnull(df['File Name']))]\n \n\n if df.empty:\n return ('No Results')\n os.chdir('//ecsbks01/swap/DDD00/virtualenv/WinPython-32bit-2.7.10.2/python-2.7.10/Project_Evaluate_Excel/Search_History')\n #Search for files\n #word1=Search_Condition.decode('utf-8')\n #df['Search Result']=df['File Name'].str.contains(Search_Condition)\n #result = df[(df['Search Result']==True)]\n #search for files write into excel\n write_df=df.loc[:,['File Name','Path']]\n writer = ExcelWriter('Result-Output.xls')\n write_df.to_excel(writer,'Result',index=False)\n \n writer.save()\n\n\n\n\n\n\n\n #turn search to files into hyperlink\n CWPath = '\\\\\\\\ecsbks01\\\\swap\\\\DDD00\\\\virtualenv\\\\WinPython-32bit-2.7.10.2\\\\python-2.7.10\\\\Project_Evaluate_Excel\\\\Search_History'\n Excel_Path = os.path.join(CWPath, 'Result-Output.xls')\n wb = Workbook(Excel_Path)\n wb = Workbook.caller()\n checkArr = Range('B2').vertical.value\n i = 2\n for check in checkArr:\n \n RangeName=('B%d' % (i))\n displayRange=('A%d' % (i))\n address=Range(RangeName).value\n display_name = Range(displayRange).value\n i+=1\n try:\n Range(RangeName).add_hyperlink(address, text_to_display=address)\n except:\n pass\n return \"FINISH\"\n \n\n\n\n","repo_name":"geek-ragazza/Project_Evaluate_Excel","sub_path":"Search_to_Excel/Search_History.py","file_name":"Search_History.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21644677649","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\ndef set_driver():\n driver=webdriver.Remote(\n command_executor=\"http://127.0.0.1:4444/wb/hub\",\n desired_capabilities=DesiredCapabilities.CHROME)\n # desired_capabilities={\n # 'brower':'Chrome'\n # })\n driver.get('http://www.raincard.cn/management/login.html')\n time.sleep(3)\n driver.quit()\n\nset_driver()","repo_name":"Issmqi/WebTest","sub_path":"driver/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72840738754","text":"from datetime import datetime, timedelta\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom graphs import create_graph\n\n\ndef index(request):\n\treturn render(request, 'dashboard.html', {'available_graphs': create_graph.get_available_graphs()})\n\n@csrf_exempt\ndef getGraphGroupBy(request, graph):\n\tstart_date = request.GET.get('start_date')\n\tend_date = request.GET.get('end_date')\n\tgroupby = request.GET.get('groupby')\n\n\tif(groupby == \"null\"):\n\t\tgroupby = \"hour\"\n\t\tprint('resetting groupby')\n\telse:\n\t\tgroupby = groupby.lower()\n\tif(start_date != \"null\"):\n\t\tstart_date_fmt = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n\telse:\n\t\tstart_date = None\n\t\tstart_date_fmt = None\n\n\tif(end_date != \"null\"):\n\t\tend_date_fmt = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n\telse: \n\t\tend_date=None\n\t\tend_date_fmt = None\n\n\tif(end_date_fmt is None and start_date_fmt is None):\n\t\tstart_date = datetime.strftime((datetime.now()-timedelta(days=7)),'%Y-%m-%d %H:%M:%S') \n\t\tend_date = datetime.strftime((datetime.now()),'%Y-%m-%d %H:%M:%S') \n\n\tplotted_graph = create_graph.find_graph(graph, start_date_fmt, end_date_fmt, groupby)\n\treturn render(request, 'graph.html', {'title': graph, 'graph': plotted_graph['graph'], 'graph_info': plotted_graph['info'], 'groupby' : plotted_graph['group_by'], 'start_date': start_date, 'end_date': end_date})\n\n@csrf_exempt\ndef getGraph(request, graph):\n\n\tstart_date = request.GET.get('start_date')\n\tend_date = request.GET.get('end_date')\n\n\tif(start_date != \"null\"):\n\t\tstart_date_fmt = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n\telse:\n\t\tstart_date = None\n\t\tstart_date_fmt = None\n\n\tif(end_date != \"null\"):\n\t\tend_date_fmt = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n\telse: \n\t\tend_date=None\n\t\tend_date_fmt = None\n\n\t#start/end_date_fmt is needed as arg None for plotted graph isn't going to work - put this in create_graph()\n\t#none specified so...\n\tif(end_date_fmt is None and start_date_fmt is None):\n\t\tprint('none specified...')\n\t\tstart_date = datetime.strftime((datetime.now()-timedelta(days=7)),'%Y-%m-%d %H:%M:%S') \n\t\tend_date = datetime.strftime((datetime.now()),'%Y-%m-%d %H:%M:%S') \n\n\tplotted_graph = []\n\ttry:\n\t\tplotted_graph = create_graph.find_graph(graph, start_date_fmt, end_date_fmt, None)\n\texcept Exception as e:\n\t\tprint('returning bad response')\n\t\tprint(str(e))\n\t\treturn HttpResponse(\"Unable to create graph: {0}\".format(str(e)),status=400)\n\n\treturn render(request, 'graph.html', {'title': graph, 'graph': plotted_graph['graph'], 'graph_info': plotted_graph['info'], 'start_date': start_date, 'end_date': end_date})\n\t\n\n\n","repo_name":"daniel-cole/Log-Plotter","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21771691502","text":"from __future__ import annotations\nfrom google.protobuf.internal.containers import RepeatedCompositeFieldContainer\nimport importlib\nimport numpy as np\nfrom tinygrad.tensor import Tensor\nfrom tinygrad.helpers import getenv, DEBUG, dtypes\nfrom typing import List, Dict\nfrom onnx import AttributeProto, ModelProto, TensorProto, TypeProto # onnx 1.50 uses serialized file (see onnx/onnx-ml.proto) as descriptors\ntry:\n from onnx.helper import tensor_dtype_to_np_dtype\nexcept ImportError:\n # for onnx < 1.13\n from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE\n tensor_dtype_to_np_dtype = lambda x: TENSOR_TYPE_TO_NP_TYPE[x]\n\n# global numpy cache for parameters\nnumpy_cache = {}\ndef safe_numpy(t) -> np.ndarray:\n if not isinstance(t, Tensor): return t\n global numpy_cache\n if t not in numpy_cache:\n if DEBUG >= 3: print(\"numpy cache miss\", t)\n tmp = t.numpy()\n numpy_cache[t] = tmp if len(tmp.shape) else tmp.reshape(1)\n assert len(numpy_cache[t].shape) > 0\n return numpy_cache[t]\n\nonnx_ops = importlib.import_module('extra.onnx_ops')\n\nONNXLIMIT = getenv(\"ONNXLIMIT\", -1)\n\ndef get_run_onnx(onnx_model: ModelProto):\n def type_parse(type_proto: TypeProto):\n ret = []\n while True:\n attr = type_proto.WhichOneof('value')\n if attr == 'tensor_type':\n if \"dim_value\" not in getattr(type_proto, attr).shape.dim.__dir__(): return () # variable type, unable to determine shape\n elif not ret:\n return tuple([x.dim_value for x in getattr(type_proto, attr).shape.dim])\n else:\n ret.extend([(x.dim_value,) for x in getattr(type_proto, attr).shape.dim])\n return tuple(ret)\n elif attr == 'sequence_type':\n type_proto = getattr(type_proto, attr).elem_type\n ret.append(1)\n elif attr == 'map_type': raise NotImplementedError(f\"map_type is not implemented: {type_proto}\")\n elif attr == 'opaque_type': raise NotImplementedError(f\"opaque_type is not implemented: {type_proto}\")\n elif attr == 'sparse_tensor_type': raise NotImplementedError(f\"sparse_tensor_type is not implemented: {type_proto}\")\n elif attr == 'optional_type': type_proto = getattr(type_proto, attr).elem_type\n else: raise Exception(f\"unknown attr: {attr}, {type_proto}\")\n\n def buffer_parse(inp: TensorProto) -> Tensor:\n if inp.data_type in (1,10,6,7,5):\n # TODO: this is shared with below\n if len(inp.float_data) > 0:\n ret = Tensor(np.array(inp.float_data, dtype=np.float32).reshape(inp.dims), requires_grad=False)\n elif len(inp.int64_data) > 0:\n ret = Tensor(np.array(inp.int64_data, dtype=np.int64).reshape(inp.dims), requires_grad=False)\n elif len(inp.int32_data) > 0:\n ret = Tensor(np.array(inp.int32_data, dtype=np.int32).reshape(inp.dims), requires_grad=False)\n else:\n ret = Tensor(np.frombuffer(inp.raw_data, dtype=tensor_dtype_to_np_dtype(inp.data_type)).reshape(inp.dims).astype(np.float32).copy(), requires_grad=False)\n else:\n raise Exception(f\"bad data type {inp.name} {inp.dims} {inp.data_type}\")\n return ret\n\n def attribute_parse(a: AttributeProto) -> float | int | str | Tensor | tuple[float] | tuple[int]:\n # TODO: this is not complete, see onnx/onnx_ml_pb2.pyi for a complete list\n if a.type == AttributeProto.FLOAT: return float(a.f)\n elif a.type == AttributeProto.INT: return int(a.i)\n elif a.type == AttributeProto.STRING: return a.s.decode(\"utf-8\")\n elif a.type == AttributeProto.TENSOR: return buffer_parse(a.t) # TENSOR\n elif a.type == AttributeProto.FLOATS: return tuple(float(x) for x in a.floats)\n elif a.type == AttributeProto.INTS: return tuple(int(x) for x in a.ints)\n elif a.type == AttributeProto.STRINGS: return tuple(x.decode(\"utf-8\") for x in a.strings)\n elif a.type == AttributeProto.GRAPH: raise Exception(f\"graph not implemented: {a.g}\\n likely an OP requiring control flow\")\n else: raise Exception(f\"can't parse {a.type} {a}\")\n def attribute_to_dict(a: RepeatedCompositeFieldContainer[AttributeProto]): return {x.name:attribute_parse(x) for x in a}\n\n tensors: Dict[str, Tensor] = {}\n\n # get weights and biases\n for inp in onnx_model.graph.initializer:\n if len(inp.raw_data) > 0:\n tensors[inp.name] = buffer_parse(inp)\n elif len(inp.float_data) > 0:\n tensors[inp.name] = Tensor(np.array(inp.float_data, dtype=np.float32).reshape(inp.dims), requires_grad=False)\n elif len(inp.int64_data) > 0:\n tensors[inp.name] = Tensor(np.array(inp.int64_data, dtype=np.int64).reshape(inp.dims), requires_grad=False)\n elif len(inp.raw_data) == 0:\n tensors[inp.name] = Tensor(np.array([], dtype=np.float32), requires_grad=False)\n else:\n print(inp.name, inp.dims, inp.data_type, len(inp.raw_data))\n print(inp)\n raise Exception(\"no data\")\n\n # preparse the attributes\n attribute_dict = {}\n domain = \"\"\n for num,n in enumerate(onnx_model.graph.node):\n attribute_dict[num] = attribute_to_dict(n.attribute)\n if n.domain: domain = n.domain\n\n onnx_model_version = onnx_model.opset_import[0].version\n\n def run_onnx(inputs={}, debug=0):\n debug = getenv(\"DEBUGONNX\") or debug\n input_tensors: Dict[str,Tensor] = {}\n intermediate_tensors: Dict[str,Tensor] = {}\n output_tensor_names = [x.name for x in onnx_model.graph.output]\n\n # get inputs\n for inp in onnx_model.graph.input:\n if inp.name in tensors: continue\n shape = type_parse(inp.type)\n if inp.name in inputs:\n if isinstance(inputs[inp.name], Tensor):\n input_tensors[inp.name] = inputs[inp.name]\n elif isinstance(inputs[inp.name], list):\n input_tensors[inp.name] = [Tensor(i, requires_grad=False) for i in inputs[inp.name]]\n elif domain == \"ai.onnx.preview.training\": # not sure if in real use the domain is \"ai.onnx.preview.training\"\n input_tensors[inp.name] = Tensor(inputs[inp.name], requires_grad=True) # TODO there isn't a good way to parse which inp requires_grad, some are manually turned off in optimizer ops\n else:\n input_tensors[inp.name] = Tensor(inputs[inp.name], requires_grad=False)\n if shape: # if only input_tensor is not variable type\n input_shape = input_tensors[inp.name].shape if isinstance(input_tensors[inp.name], Tensor) else (1, *[i.shape for i in input_tensors[inp.name]])\n assert input_shape == shape, f\"wrong shape for input {inp.name}, {input_shape} isn't {shape}\"\n else:\n raise Exception(f\"no data for {inp.name} with shape {shape}\")\n\n def fetch_tensor(x: str):\n if x in tensors: return tensors[x]\n if x in intermediate_tensors: return intermediate_tensors[x]\n if x != str(): return input_tensors[x]\n return None\n\n for num,n in enumerate(onnx_model.graph.node):\n inp: List[Tensor] = []\n if debug >= 3: print(\"inputs:\")\n for x in n.input:\n t = fetch_tensor(x)\n if debug >= 3: print(f\"\\t{x} - {t}\")\n inp.append(t)\n opt: Dict = attribute_dict[num]\n if debug >= 1: print(f\"{num}: op {n.op_type} shape {[x.shape if isinstance(x, Tensor) else x for x in inp]} opt {opt}\")\n\n # NOTE some ops live here because they require access to some local variables\n # have to use n.output for cases when num_outputs is absent\n if n.op_type in onnx_ops.tensor_methods:\n ret = getattr(Tensor, n.op_type.lower())(*inp, **opt)\n elif n.op_type == \"Split\":\n axis = opt.get(\"axis\", 0)\n split = None if len(inp) == 1 else [int(x) for x in safe_numpy(inp[1])]\n if split is None:\n split = [inp[0].shape[axis] // len(n.output)] * len(n.output)\n for i in range(inp[0].shape[axis] % len(n.output)):\n split[i] += 1\n i, ret = 0, []\n arg = [(0,x) for x in inp[0].shape]\n for s in split:\n arg[axis] = (i,i+s)\n ret.append(inp[0].shrink(arg=tuple(arg)))\n i = i+s\n ret = tuple(ret)\n\n # need to check onnx_model_version\n elif n.op_type == \"Slice\":\n if onnx_model_version < 10:\n axes, ends, starts, steps = list(opt.get(\"axes\", range(inp[0].ndim))), list(opt[\"ends\"]), list(opt[\"starts\"]), [1]*inp[0].ndim\n else:\n starts, ends = inp[1:3]\n axes = safe_numpy(Tensor.arange(inp[0].ndim, dtype=dtypes.int32) if len(inp) <= 3 else inp[3]).tolist()\n steps = safe_numpy(inp[4]) if len(inp) > 4 else [1]*inp[0].ndim\n starts, ends = safe_numpy(starts.ceil().cast(dtypes.int32)).tolist(), safe_numpy(ends.ceil().cast(dtypes.int32)).tolist()\n arg = [(0,x,1) for x in inp[0].shape]\n for i, axis in enumerate(axes):\n axis = int(axis) + inp[0].ndim if axis < 0 else int(axis)\n starts[i], ends[i] = starts[i] + inp[0].shape[axis] if starts[i] < 0 else starts[i], ends[i] + inp[0].shape[axis] if ends[i] < 0 else ends[i]\n starts[i], ends[i] = max(0, min(starts[i], inp[0].shape[axis])), max(0, min(ends[i], inp[0].shape[axis]))\n if starts[i] > ends[i] and steps[i] >= 0: steps[i] = -steps[i]\n arg[axis] = (starts[i], ends[i], steps[i])\n new_shape = tuple((s, e) if st > 0 else (e+1, s+1) for s, e, st in arg)\n if any(s==e for s,e in new_shape): ret = inp[0].shrink(new_shape)\n else: ret = inp[0].__getitem__(tuple([slice(s,e,st) for s,e,st in arg]))\n\n # need to call backward on intermediate_tensors\n elif n.op_type == \"Gradient\":\n assert len(opt[\"xs\"]) == len(inp), f\"len(opt['xs']):{len(opt['xs'])}, len(inp):{len(inp)} output and input has to match\"\n y = opt[\"y\"]\n intermediate_tensors[y].backward()\n ret = tuple([t.grad for t in inp])\n\n # onnx_ops.py\n elif hasattr(onnx_ops, n.op_type):\n fxn = getattr(onnx_ops, n.op_type)\n if isinstance(fxn, dict):\n for k in sorted(fxn.keys()):\n if k <= onnx_model_version:\n real_fxn = fxn[k]\n else:\n real_fxn = fxn\n ret = real_fxn(*inp, **opt)\n else:\n print(\"UNSUPPORTED\", n.op_type, n.input, n.output)\n raise Exception(f\"op_type {n.op_type} not supported\")\n\n if not isinstance(ret, tuple): ret = (ret, )\n assert len(n.output) <= len(ret), f\"expected output size must be less than {len(ret)}, it's {n.output}\"\n if debug >= 2: print([x.shape if isinstance(x, Tensor) else None for x in ret])\n if debug >= 2: print(\"outputs:\")\n for i in range(len(n.output)):\n if debug >= 2: print(f\"\\t{n.output[i]} - {ret[i]}\")\n intermediate_tensors[n.output[i]] = ret[i]\n if num == ONNXLIMIT:\n output_tensor_names = n.output\n break\n\n return {outp:intermediate_tensors[outp] for outp in output_tensor_names}\n return run_onnx\n","repo_name":"tinygrad/tinygrad","sub_path":"extra/onnx.py","file_name":"onnx.py","file_ext":"py","file_size_in_byte":10692,"program_lang":"python","lang":"en","doc_type":"code","stars":20676,"dataset":"github-code","pt":"61"} +{"seq_id":"18120678181","text":"import unittest\nimport pandas as pd\nimport warnings\nfrom webviz_scatter_plot import ScatterPlot\n\ntest_data = {\n 'index': ['2012-01-01', '2012-01-02', '2012-01-03'],\n 'normal': [1, 2, 3],\n 'poisson': [5, 2, 1],\n 'triangular': [1, 9, 4]\n}\n\n\nclass TestScatterPlot(unittest.TestCase):\n def test_logarithmic_scale_with_negative_value(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n\n ScatterPlot(pd.DataFrame({\n 'index': ['2012-01-01', '2012-01-02', '2012-01-03'],\n 'normal': [1, 2, 3],\n 'poisson': [5, -2, 1],\n 'triangular': [1, 9, 4]\n }), logy=True)\n\n self.assertEqual(len(w), 1)\n self.assertIn(\"Non-positive values\", str(w[-1].message))\n","repo_name":"equinor/webviz-archived","sub_path":"visualizations/scatter_plot/tests/test_scatter_plot.py","file_name":"test_scatter_plot.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"33432655895","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division, unicode_literals\n##\n## This file is part of MoaT, the Master of all Things.\n##\n## MoaT is Copyright © 2007-2016 by Matthias Urlichs ,\n## it is licensed under the GPLv3. See the file `README.rst` for details,\n## including optimistic statements by the author.\n##\n## This program is free software: you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation, either version 3 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License (included; see the file LICENSE)\n## for more details.\n##\n## This header is auto-generated and may self-destruct at any time,\n## courtesy of \"make update\". The original is in ‘scripts/_boilerplate.py’.\n## Thus, do not remove the next line, or insert any blank lines above.\n##BP\n\n\"\"\"Manage infrastructure description in MoaT\"\"\"\n\nimport os\nimport sys\nimport aio_etcd as etcd\nimport asyncio\nimport time\nimport inspect\nfrom traceback import print_exc\nfrom yaml import dump\nfrom etcd_tree import EtcAwaiter\nfrom collections.abc import Mapping\n\nfrom moat.script import Command, SubCommand, CommandError\nfrom moat.infra import INFRA_DIR, INFRA, LinkExistsError\nfrom moat.util import r_dict, r_show\nfrom moat.cmd.task import _ParamCommand\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n__all__ = ['InfraCommand']\n\nclass DefSetup:\n DIR = INFRA_DIR\n\n async def setup(self):\n await super().setup()\n etc = self.root.etcd\n tree = await self.root._get_tree()\n t = await tree.subdir(self.DIR)\n return t\n\nclass ListCommand(DefSetup,Command):\n name = \"list\"\n summary = \"List infrastructure entries\"\n description = \"\"\"\\\nInfrastructure entries are stored in etcd at /infra/**/:host.\nThe path reflects the host's (reversed) domain name.\n\nThis command shows that data. Depending on verbosity, output is\na one-line summary, human-readable detailed state, or details as YAML.\n\"\"\"\n\n def addOptions(self):\n self.parser.add_option('-t','--this',\n action=\"count\", dest=\"this\", default=0,\n help=\"Show the given job only (-tt for jobs one level below, etc.)\")\n self.parser.add_option('-n','--no-link',\n action=\"store_true\", dest=\"nolink\",\n help=\"only show hosts with no remote links\")\n\n async def do(self,args):\n t = await self.setup()\n if args:\n dirs = []\n for a in args:\n a = reversed(a.split('.'))\n try:\n dirs.append(await t.lookup(a))\n except KeyError:\n raise CommandError(\"'%s' does not exist\"%(a,))\n else:\n dirs = [t]\n for tt in dirs:\n async for item in tt.tagged(INFRA, depth=self.options.this):\n path = item.path[len(INFRA_DIR):-1]\n if self.options.nolink:\n n = 0\n try:\n for h in item['ports'].values():\n await h.remote\n n += 1\n except KeyError:\n pass\n if n:\n continue\n if self.root.verbose == 2:\n print('*','.'.join(path[::-1]), sep='\\t',file=self.stdout)\n for k,v in r_show(item,''):\n print(k,v, sep='\\t',file=self.stdout)\n\n elif self.root.verbose > 1:\n dump({'.'.join(path[::-1]):r_dict(dict(item))}, stream=self.stdout)\n else:\n path = '.'.join(path[::-1])\n name = item.get('name','-')\n if name == path:\n name = \"-\"\n print(path,name,item.get('descr','-'), sep='\\t',file=self.stdout)\n\n\nclass _AddUpdate:\n \"\"\"Mix-in to add or update an infrastructure entry (too much)\"\"\"\n async def do(self,args):\n try:\n data = {}\n name=\"\"\n p=0\n\n path = '/'.join(args[p].split('.')[::-1])\n if path == \"\":\n raise CommandError(\"Empty domain name?\")\n p+=1\n\n while p < len(args):\n try:\n k,v = args[p].split('=')\n except ValueError:\n break\n p += 1\n if k == \"name\":\n name = v\n else:\n data[k] = v\n if not self._update:\n args[p] # raises IndexError if nothing is left\n descr = \" \".join(args[p:])\n except IndexError:\n raise CommandError(\"Missing command parameters\")\n t = await self.setup()\n\n try:\n item = await t.subdir(path,name=INFRA, create=not self._update)\n except KeyError:\n raise CommandError(\"Infrastructure item '%s' not found.\" % path)\n if name:\n await item.set('name', name, sync=False)\n if descr:\n await item.set('descr', descr, sync=False)\n if data:\n for k,v in data.items():\n if v == \"\":\n try:\n await item.delete(k, sync=False)\n except KeyError:\n pass\n else:\n await item.set(k,v, sync=False)\n \n\nclass AddCommand(_AddUpdate,DefSetup,Command):\n name = \"add\"\n summary = \"add an infrastructure entry\"\n description = \"\"\"\\\nCreate a new infrastructure entry.\n\nArguments:\n\n* the new entry's DNS name (must not exist)\n\n* data=value parameters (optional)\n\n* a descriptive name (not optional)\n\n\"\"\"\n _update = False\n\nclass UpdateCommand(_AddUpdate,DefSetup,Command):\n name = \"change\"\n summary = \"change an infrastructure entry\"\n description = \"\"\"\\\nUpdate an infrastructure entry.\n\nArguments:\n\n* the entry name (required)\n\n* data=value entries (deletes the key if value is empty)\n\n* an updated descriptive name (optional)\n\n\"\"\"\n _update = True\n\nclass DeleteCommand(DefSetup,Command):\n name = \"delete\"\n summary = \"Delete an infrastructure entry\"\n description = \"\"\"\\\nInfrastructure entries are stored in etcd at /infra/**/:host.\n\nThis command deletes one of these entries.\n\"\"\"\n\n def addOptions(self):\n self.parser.add_option('-f','--force',\n action=\"store_true\", dest=\"force\",\n help=\"not forcing won't do anything\")\n\n async def do(self,args):\n t = await self.setup()\n if not args:\n if not cmd.root.cfg['testing']:\n raise CommandError(\"You can't delete everything.\")\n args = t\n for k in args:\n path = tuple(k.split('.'))[::-1]\n try:\n tt = await t.subdir(path,name=INFRA, create=False)\n except KeyError:\n raise CommandError(\"%s: does not exist\"%(path,))\n if self.root.verbose:\n print(\"%s: deleted\"%k, file=self.stdout)\n rec = True\n while True:\n p = tt._parent\n if p is None: break\n p = p()\n if p is None: break\n if p is t: break\n try:\n await tt.delete(recursive=rec)\n except etcd.EtcdDirNotEmpty:\n if rec:\n raise\n break\n rec=False\n tt = p\n\nasync def copy(val, dest, name):\n if isinstance(val, Mapping):\n dest = await dest.subdir(name) # , create=True)\n for k,v in val.items():\n if isinstance(v, EtcAwaiter):\n v = await v\n await copy(v,dest,k)\n else:\n await dest.set(name,val)\n\n\nclass MoveCommand(DefSetup,Command):\n name = \"move\"\n summary = \"Move an infrastructure entry\"\n description = \"\"\"\\\nInfrastructure entries are stored in etcd at /infra/**/:host.\n\nThis command moves one of these entries, by recreating the structure and\nchanging the entries they point to.\n\"\"\"\n\n async def do(self,args):\n t = await self.setup()\n if len(args) != 2:\n raise CommandError(\"Move FROM TO. FROM must exist, TO must not.\")\n p1 = tuple(args[0].split('.'))[::-1]\n p2 = tuple(args[1].split('.'))[::-1]\n try:\n t1 = await t.subdir(p1,name=INFRA, create=False)\n except KeyError:\n raise CommandError(\"%s does not exist\" % (args[0],))\n try:\n t2 = await t.subdir(p2,name=INFRA, create=True)\n except etcd.EtcdAlreadyExist:\n raise CommandError(\"%s exists\" % (args[1],))\n\n path = tuple(args[0].split('.'))[::-1]\n for k,v in t1.items():\n if k == \"ports\":\n v = await v\n for pn,pd in v.items():\n if isinstance(pd, EtcAwaiter):\n pd = await pd\n try:\n ph = pd['host']\n except KeyError:\n # probably a link\n continue\n try:\n pp = pd['port']\n except KeyError:\n pass\n else:\n hh = tuple(ph.split('.'))[::-1]\n hh = await t.subdir(hh,name=INFRA, create=False)\n po = hh.lookup('ports',pp,'host')\n if isinstance(po, EtcAwaiter):\n po = await po\n if po.value != args[0]:\n logger.warn(\"Owch: back pointer for %s (%s on %s) is %s\", pn,pp,ph,po.value)\n continue\n await hh.set('ports', value={pp:{'host':args[1]}})\n await copy(v,t2,k)\n\n rec = True\n while True:\n p = t1._parent\n if p is None: break\n p = p()\n if p is None: break\n if p is t: break\n try:\n await t1.delete(recursive=rec)\n except etcd.EtcdDirNotEmpty:\n if rec:\n raise\n break\n rec = False\n t1 = p\n\nclass PortCommand(DefSetup,Command):\n name = \"port\"\n summary = \"Configure a port of an infrastructure item\"\n description = \"\"\"\\\nInfrastructure parameters are stored in etcd at /infra/**/:host/port/NAME.\n\nUsage: … port HOST NAME key=value… -- set\n … port HOST NAME -- show one port\n … port HOST -- list all ports\n … port -d HOST NAME… -- delete this port\n … port -d HOST * -- delete all ports\n\"\"\"\n\n def addOptions(self):\n self.parser.add_option('-d','--delete',\n action=\"store_true\", dest=\"delete\",\n help=\"delete a port\")\n\n async def do(self, args):\n t = await self.setup()\n if not args:\n raise SyntaxError(\"You need to specify a host!\")\n try:\n h = await t.lookup(reversed(args[0].split('.')), name=INFRA)\n h = await h.subdir('ports')\n except KeyError:\n print(\"Host '%s' is not known\" % (args[0],), file=sys.stderr)\n return\n args = args[1:]\n if self.options.delete:\n if len(args) == 0:\n raise SyntaxError(\"You need to specify which ports to delete.\") \n if len(args) == 1 and args[0] == '*':\n args = list(h.keys())\n for p in args:\n await h.delete(p)\n return\n\n if not args:\n for k,v in r_show(h,''):\n print(k,v, sep='\\t',file=self.stdout)\n return\n h = await h.subdir(args[0])\n args = args[1:]\n if not args:\n for k,v in r_show(h,''):\n print(k,v, sep='\\t',file=self.stdout)\n return\n for a in args:\n try:\n k,v = a.split('=',1)\n except ValueError:\n print(a)\n else:\n if v == '':\n await h.delete(k)\n else:\n await h.set(k,v, ext=True)\n\nclass LinkCommand(DefSetup,Command):\n name = \"link\"\n summary = \"Link two infrastructure items\"\n description = \"\"\"\\\nLink two devices.\n\nUsage: … link HOST_A PORT_A HOST_B PORT_B -- join\n … link HOST_A PORT_A LINK_ -- link name\n … link HOST PORT -- show\n … link HOST -- show all\n … link -d HOST PORT -- remove\n\nLinks are bidirectional.\n\"\"\"\n\n def addOptions(self):\n self.parser.add_option('-d','--delete',\n action=\"store_true\", dest=\"delete\",\n help=\"delete a link\")\n self.parser.add_option('-m','--missing',\n action=\"store_true\", dest=\"missing\",\n help=\"only show links with missing remote ports\")\n self.parser.add_option('-r','--replace',\n action=\"store_true\", dest=\"replace\",\n help=\"overwrite existing links\")\n\n async def do(self, args):\n t = await self.setup()\n if self.options.delete:\n if self.options.missing or self.options.replace:\n raise SyntaxError(\"'-d' and '-m'/'-r' cannot be used at the same time.\")\n if len(args) != 2:\n raise SyntaxError(\"You need to specify which host+port to delete.\") \n else:\n if self.options.replace and len(args) < 3:\n raise SyntaxError(\"'-r' is only useful when creaing a link\")\n if len(args) < 1:\n async for h in t.tagged(INFRA):\n h = await h\n try:\n x = h['ports'].values()\n except KeyError:\n pass\n else:\n for p in x:\n try:\n r = await p.remote\n except KeyError:\n try:\n rh = p['host']\n except KeyError:\n pass\n else:\n print(h.dnsname,p.name,rh)\n else:\n if not self.options.missing:\n print(h.dnsname,p.name,r.host.dnsname,r.name)\n return\n elif len(args) == 1:\n h = await t.host(args[0],create=False)\n try:\n x = h['ports'].values()\n except KeyError:\n pass\n else:\n for p in h['ports'].values():\n try:\n r = await p.remote\n except KeyError:\n try:\n rh = p['host']\n except KeyError:\n pass\n else:\n print(p.name,rh)\n else:\n if not self.options.missing:\n print(p.name,r.host.dnsname,r.name)\n return\n elif len(args) == 2:\n h = await t.host(args[0],create=False)\n p = h['ports'][args[1]]\n try:\n r = await p.remote\n except KeyError:\n try:\n rh = p['host']\n except KeyError:\n pass\n else:\n print(rh)\n\n else:\n print(r.host.dnsname,r.name)\n return\n elif len(args) > 4:\n raise SyntaxError(\"You need to specify host+port of both sides.\") \n elif self.options.missing:\n raise SyntaxError(\"'-m' can only be used when listing.\")\n p1 = await t.host(args[0], create=False)\n p1 = await p1.subdir('ports',args[1])\n if self.options.delete:\n await p1.unlink()\n else:\n is_link = False\n try:\n p2 = await t.host(args[2], create=False)\n except KeyError:\n is_link = True\n p2 = args[2]\n if self.root.verbose:\n print(\"Creating a link.\", file=sys.stderr)\n else:\n if len(args) == 4:\n p2 = await p2.subdir('ports',args[3])\n try:\n await p1.link(p2, replace=self.options.replace)\n except LinkExistsError as e:\n port = e.args[0]\n try:\n rem = await port.remote\n except KeyError:\n print(\"Port %s:%s is linked to %s. Use '-r'.\" % (port.host.dnsname, port.name, port['host']), file=sys.stderr)\n else:\n print(\"Port %s:%s is linked to %s:%s. Use '-r'.\" % (port.host.dnsname, port.name, rem.host.dnsname,rem.name), file=sys.stderr)\n\ndef VL(x):\n if x == '-':\n return set()\n elif x == '*':\n return set(('*',))\n return set(int(v) for v in x.split(','))\n\nclass NoVlanError(RuntimeError):\n pass\n\nclass CDict(dict):\n def add(self,k):\n self[k] = self.get(k,0)+1\n def keys(self):\n for k,v in self.items():\n if v > 1:\n yield k\n def __ior__(self, kk):\n for k in kk:\n self.add(k)\n return self\n\nclass VlanInfo:\n def __init__(self, host, t, verbose=1, seen=None):\n self.t = t\n self.verbose = verbose\n self.host = host\n self.ports = dict() # name > vli\n self.vlans = CDict()\n\n def __repr__(self):\n return \"\" % (self.host.dnsname,)\n\n async def extend(self, seen=None):\n sn = self.host.dnsname\n if seen is None:\n seen = set()\n elif sn in seen:\n return\n seen.add(sn)\n try:\n v = VL(self.host['vlan'])\n except KeyError:\n raise NoVlanError(self.host.dnsname)\n if not v:\n return\n self.vlans |= v\n try:\n pp = self.host['ports']\n except KeyError:\n return\n for n,p in pp.items():\n if 'vlan' in p:\n v = VL(p['vlan'])\n try:\n h = p['host']\n except KeyError:\n h = None\n else:\n try:\n h = p['host']\n except KeyError:\n continue\n hv = await self.t.host(h, create=False)\n hv = VlanInfo(hv, self.t, verbose=self.verbose)\n await hv.extend(seen)\n v = hv.vlans\n self.ports[n] = (v,h)\n self.vlans |= v\n\nclass VlanCommand(DefSetup,Command):\n name = \"vlan\"\n summary = \"Show per-port VLAN configuration\"\n description = \"\"\"\\\nShow a router's required VLAN configuration.\nThat is, trace which VLANs are connected to each port, directly or indirectly.\n\nUsage: … vlan HOST VLAN[,VLAN…] -- set VLAN(s) which this host uses\n … vlan HOST PORT VLAN[,VLAN…] -- set VLAN(s) on this port\n … vlan HOST -- list ports and connected VLAN(s)\n … vlan -v HOST -- list VLANs and connected ports\n\nSetting VLANs on a port prevents that port from being followed when\ncollecting VLAN IDs.\n\nSpecial VLANs (only on hosts) are\n* -- pass-through\n- -- special device, no VLAN\n\"\"\"\n\n def addOptions(self):\n self.parser.add_option('-v','--vlans',\n action=\"store_true\", dest=\"vlans\",\n help=\"list per vlan, not per port\")\n\n async def do(self, args):\n t = await self.setup()\n if len(args) < 1:\n raise SyntaxError(\"You need to specify host+port of both sides.\") \n elif len(args) == 1:\n h = await t.host(args[0],create=False)\n vli = VlanInfo(h,t)\n await vli.extend()\n if self.options.vlans:\n for vl in sorted(-1 if v == '*' else v for v in vli.vlans.keys()):\n print('*' if vl==-1 else vl, ' '.join(sorted(str(p) for p,v in vli.ports.items() if ('*' if vl==-1 else vl) in v[0])))\n else:\n for p,vl in sorted(vli.ports.items()):\n vl,n = vl\n if vl:\n vl = ','.join(str(x) for x in vl)\n else:\n vl = '-'\n print(p, vl, n)\n elif len(args) == 2:\n h = await t.host(args[0],create=False)\n await h.set('vlan', args[1], sync=False)\n elif len(args) == 3:\n h = await t.host(args[0],create=False)\n p = h['ports'][args[1]]\n await p.set('vlan',args[2], sync=False)\n else:\n raise SyntaxError(\"Too many arguments.\") \n\nclass PathCommand(DefSetup,Command):\n name = \"path\"\n summary = \"Show paths from A to B, or unreachables from A\"\n description = \"\"\"\\\nShow how A and B are linked, or which devices are not reachable from A.\n\nUsage: … link HOST_A HOST_B -- show path\n … link HOST -- list unreachable hosts\n\nLinks are unidirectional.\n\"\"\"\n\n async def do(self, args):\n t = await self.setup()\n if len(args) < 0 or len(args) > 2:\n raise SyntaxError(\"Usage: … link HOST_A [HOST_B]\")\n\n elif len(args) == 1: ## list unreachables\n hosts = [t.host(args[0])]\n known = set()\n while hosts:\n h = await hosts.pop()\n known.add(h.dnsname)\n try:\n x = h['ports'].values()\n except KeyError:\n pass\n else:\n for v in x:\n try:\n v = v.get('host', raw=True)\n except KeyError:\n pass\n else:\n if v.value not in known:\n hosts.append(v.host)\n async for h in t.tagged(INFRA):\n name = h.dnsname\n if name not in known:\n print(name)\n\n else: ## list links\n dest = await t.host(args[1])\n dname = dest.dnsname\n hosts = [t.host(args[0])]\n prevs = {args[0]: None}\n while hosts:\n h = await hosts.pop(0)\n try:\n hp = h['ports']\n except KeyError:\n continue\n for v in hp.values():\n try:\n v = v.get('host', raw=True)\n except KeyError:\n continue\n if v.value == dname:\n def prev_p(name):\n if name is None:\n return\n prev_p(prevs[name])\n print(name)\n prev_p(h.dnsname)\n print(v.value)\n return\n if v.value not in prevs:\n prevs[v.value] = h.dnsname\n hosts.append(v.host)\n print(\"Unreachable.\", file=sys.stderr)\n\nclass InfraCommand(SubCommand):\n name = \"infra\"\n summary = \"Document your network infrastructure\"\n description = \"\"\"\\\nCommands to configure your network connectivity\n\"\"\"\n\n # process in order\n subCommandClasses = [\n ListCommand,\n AddCommand,\n PortCommand,\n LinkCommand,\n VlanCommand,\n PathCommand,\n UpdateCommand,\n MoveCommand,\n DeleteCommand,\n ]\n\n","repo_name":"M-o-a-T/moat-old","sub_path":"moat/cmd/infra.py","file_name":"infra.py","file_ext":"py","file_size_in_byte":24453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3168227098","text":"import cv2\nfrom tqdm import tqdm\nimport json\nimport torch\nfrom torchvision.transforms import ToTensor\nfrom detectron2.modeling import build_model\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\n\n# Path\ninput_path = 'dataset/test/'\ncheckpoint_path = 'checkpoints/model_0244999.pth'\n\nif __name__ == \"__main__\":\n cfg = get_cfg()\n model = build_model(cfg)\n DetectionCheckpointer(model).load(checkpoint_path)\n model.eval()\n transform = ToTensor()\n result = []\n\n for index in tqdm(range(13068)):\n img_name = str(index + 1) + '.png'\n img = cv2.imread(input_path + img_name)\n img = transform(img)\n \n with torch.no_grad():\n predict = model([{'image':img[(2, 1, 0)]}])\n \n instance = predict[0]['instances']\n bboxes = instance.get_fields()['pred_boxes'].tensor\n scores = [int(s) for s in instance.get_fields()['scores']]\n labels = [int(s) for s in instance.get_fields()['pred_classes']]\n box_list = []\n \n for index, box in enumerate(bboxes):\n x1 = int(box[0])\n y1 = int(box[1])\n x2 = int(box[2])\n y2 = int(box[3])\n bbox = (y1, x1, y2, x2)\n box_list.append(bbox)\n \n result.append({'bbox': box_list, 'score': scores, 'label': labels})\n \n with open('result.json', 'w') as output_file:\n json.dump(result, output_file)\n","repo_name":"7788boy/CS_T0828_HW2","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70010753794","text":"import cv2 \nimport numpy as np \n\nimport posenet.constants\n\n\n# 유효한 해상도를 계산하는 함수\n# 입력 : 이미지의 너비(width), 높이(height), 출력 스트라이드(output_stride)\ndef valid_resolution(width, height, output_stride=16):\n\n # 유효한 해상도 계산\n target_width = (int(width) // output_stride) * output_stride + 1\n target_height = (int(height) // output_stride) * output_stride + 1\n\n # 반환��은 (target_width, target_height) 형태의 튜플로 반환\n return target_width, target_height\n\n\n# 입력 이미지를 처리하는 함수 \n# 입력 이미지는 source_img로 제공, scale_factor와 output_stride 매개변수를 사용하여 이미지를 전처리\ndef _process_input(source_img, scale_factor=1.0, output_stride=16):\n\n # valid_resolution 함수를 통해 유효한 해상도를 계산\n target_width, target_height = valid_resolution(\n source_img.shape[1] * scale_factor, source_img.shape[0] * scale_factor, output_stride=output_stride)\n \n # scale = 이미지를 처리하기 위해 사용되는 크기 비율\n # 축소된 비율을 이용해 배열 만들고 이를 이용해 후속 처리 단계에서 원하는 크기로 확장 \n scale = np.array([source_img.shape[0] / target_height, source_img.shape[1] / target_width])\n #print('target_width = ', target_width)\n #print('target_height = ', target_height)\n \n # cv2의 resize함수를 이용하여 source_img를 target_width와 target_height에 맞게 조정\n # resize 함수 = 이미지의 크기를 조절하는 함수 \n # cv2.INTER_LINEAR 함수 = 양선형 보간법 (효율성이 가장 좋음, 속도 빠름, 퀄리티 적당)\n input_img = cv2.resize(source_img, (target_width, target_height), interpolation=cv2.INTER_LINEAR)\n \n # cv2의 cvtColor함수를 이용하여 BGR 색상 공간에서 RGB 색상 공간으로 변환\n input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB).astype(np.float32)\n \n # 이미지를 픽셀 단위로 정규화 (픽셀 범위 : 0 ~ 255 -> -1 ~ 1)\n input_img = input_img * (2.0 / 255.0) - 1.0\n\n # reshape함수를 이용하여 4차원을 가지는 배열로 변환\n # 첫번째 차원 : 배치 크기 (하나의 이미지만 처리하므로 1) / 두번째, 세번째 차원 : 이미지의 높이와 너비 / 네번째 차원 : 색상 채널\n input_img = input_img.reshape(1, target_height, target_width, 3)\n \n # 반환값은 전처리된 입력 이미지(input_img), 원본 입력 이미지(source_img), 크기 조정시 사용된 크기 비율(scale)\n return input_img, source_img, scale\n\n\n# 웹캠에서 읽은 영상 프레임을 입력으로 받아 함수 호출을 통한 처리 후 반환하는 함수\ndef read_cap(cap, scale_factor=1.0, output_stride=16):\n\n # cap.read 함수를 이용하여 프레임을 읽어오기\n # 비디오 프레임을 제대로 읽으면 res가 true, 실패시 false / 읽은 프레임은 img\n res, img = cap.read()\n\n # res가 false인 경우 \n if not res:\n # 예외 발생 -> 함수 종료\n raise IOError(\"webcam failure\")\n\n # 반환값은 읽은 이미지를 함수를 통해 처리한 값 \n return _process_input(img, scale_factor, output_stride)\n\n\n# 이미지 파일 경로를 입력으로 받아 해당 경로의 이미지를 함수 호출을 통한 처리 후 반환하는 함수 \ndef read_imgfile(path, scale_factor=1.0, output_stride=16):\n \n # 파일 경로에서 읽어온 이미지를 img에 저장\n img = cv2.imread(path)\n\n # 반환값은 읽은 이미지를 함수를 통해 처리한 값\n return _process_input(img, scale_factor, output_stride)\n\n# 이미지에 키포인트를 그리는 함수 \n# 입력 : 입력 이미지 / 인스턴스 점수 / 각 키포인트의 점수(2D) / 각 키포인트의 좌표(3D) \n# / 그리지 않을 최소 인스턴스 점수 / 그리지 않을 최소 키포인트 점수\ndef draw_keypoints(\n img, instance_scores, keypoint_scores, keypoint_coords,\n min_pose_confidence=0.5, min_part_confidence=0.5):\n\n cv_keypoints = []\n\n # 이중 for문을 통해 각 조건을 만족하는 경우 cv2.KeyPoint를 생성하고 cv_keypoints리스트에 추가\n for ii, score in enumerate(instance_scores):\n if score < min_pose_confidence:\n continue\n\n # 해당 인스턴스의 모든 키포인트 점수와 좌표 가져오기 (해당 인덱스 값 하나씩 가져옴)\n for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):\n if ks < min_part_confidence:\n continue\n # cv2.KeyPoint를 생성하고 cv_keypoints리스트에 추가\n cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))\n\n # 이미지 위에 키포인트 그리기 \n out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))\n \n # 반환값은 주어진 색상과 크기로 키포인트를 그린 이미지 \n return out_img\n\n\n# 점의 좌표를 가지고 오는 함수 (연결된 키포인트들의 좌표를 계산)\ndef get_adjacent_keypoints(keypoint_scores, keypoint_coords, min_confidence=0.1):\n results = [] #리턴시킬 배열 (x, y)\n \n # posenet에서 받아온 인덱스들의 좌 우를 찍음\n # posenet.CONNECTED_PART_INDICES는 연결된 키포인트 쌍들을 나타내는 상수\n for left, right in posenet.CONNECTED_PART_INDICES:\n if keypoint_scores[left] < min_confidence or keypoint_scores[right] < min_confidence:\n continue\n \n #받아온 결과를 numpy.array 형식으로 [y, x] 와 같이 저장 (::의 역할 / opencv에서는 좌표값을 y,x 순서로 다룸)\n results.append(\n np.array([keypoint_coords[left][::-1], keypoint_coords[right][::-1]]).astype(np.int32),\n )\n \n # 반환값은 좌표값 \n return results\n\n# 이미지 위에 스켈레톤(뼈대)을 그리는 함수 \ndef draw_skeleton(\n img, instance_scores, keypoint_scores, keypoint_coords,\n min_pose_confidence=0.5, min_part_confidence=0.5):\n out_img = img\n adjacent_keypoints = []\n \n for ii, score in enumerate(instance_scores):\n if score < min_pose_confidence:\n continue\n\n # get_adjacent_keypoints 함수에서 얻은 좌표들을 new_keypoints에 저장 (np.array 형태)\n new_keypoints = get_adjacent_keypoints(\n keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_confidence)\n \n # new_keypoints에 있는 좌표를 adjacent_keypoints 리스트에 추가\n adjacent_keypoints.extend(new_keypoints)\n\n # cv2.polylines 함수를 이용하여 스켈레톤 그리기 (이미지, 다각형 좌표, 다각형 닫힘 여부, 색상 전달) \n out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))\n \n # 반환값은 스켈레톤이 그려진 이미지 반환\n return out_img\n\n# 입력 이미지 위에 PoseNet 알고리즘이 예측한 결과를 시각화하여 출력 이미지를 반환하는 함수\n# 위의 함수와 차이점은 좌표 표시 유무로 보임\ndef draw_skel_and_kp(\n img, instance_scores, keypoint_scores, keypoint_coords,\n min_pose_score=0.5, min_part_score=0.5):\n out_img = img\n adjacent_keypoints = []\n cv_keypoints = []\n component = []\n\n coord = []\n\n # 01. get_adjacent...함수를 사용하여 이웃 키포인트 쌍의 목록을 가져오기\n for ii, score in enumerate(instance_scores):\n if score < min_pose_score:\n continue\n\n new_keypoints = get_adjacent_keypoints(\n keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)\n \n adjacent_keypoints.extend(new_keypoints) # --- 여기까지가 위의 함수와 동일한 부분\n\n # 02. drawKeypoints 및 polylines를 사용하여 이웃하는 키포인트끼리 연결하는 뼈대 그리고 좌표 표시하기\n for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):\n if ks < min_part_score:\n continue\n\n # 조건을 만족한 경우에만 component 리스트에 추가\n x = kc[1].astype(np.int32)\n y = kc[0].astype(np.int32)\n component.append(x)\n component.append(y)\n\n # cv2.KeyPoint 객체를 생성하여 cv_keypoints 리스트에 추가\n cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))\n\n # component 리스트를 coord 리스트에 추가\n coord.extend(component)\n\n temp = []\n # 글꼴 설정\n font=cv2.FONT_HERSHEY_SIMPLEX\n i = 0\n\n while(True):\n # coord 길이가 0일 경우 반복문 중단\n if len(coord) == 0:\n break\n\n # temp 리스트에 coord의 i번째와 i+1번째 항목을 추가\n temp.append(coord[i])\n temp.append(coord[i+1])\n \n # text 변수 = (첫번째항목, 두번째 항목) 형식의 문자열 저장\n text = '({}, {})'.format(temp[0], temp[1])\n\n # out_img에 text 문자열을 temp 위치에 지정된 폰트로 출력\n out_img = cv2.putText(out_img, text, temp, font, 1, (255,0,0), 1)\n # temp 리스트 비우기\n temp.clear()\n i += 2\n\n # i의 값이 coord의 길이보다 클 경우 반복문 중단\n if i >= len(coord):\n break\n\n # out_img에 cv_keypoints 변수에 저장된 키포인트를 이용하여 키포인트 그리기 (색상도 지정, 플래그 사용) \n # cv2.DRAW... = 키포인트에 있는 사이즈나 앵글에 들어있는 변수를 고려하여 다양한 크기와 직선을 이용해서 표현\n out_img = cv2.drawKeypoints(\n out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 0),\n flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS\n )\n \n # cv2.polylines 함수를 이용하여 스켈레톤 그리기 (이미지, 다각형 좌표, 다각형 닫힘 여부, 색상 전달) \n out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))\n\n # 반환값은 스켈레톤이 그려진 이미지 반환\n return out_img\n","repo_name":"KimMin-Gwan/ICT-convergence-project-contest-2023","sub_path":"Main_Project/posenetTest/posenet-python-master/posenet/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10206,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31762090578","text":"#!/usr/bin/env python\nfrom scipy import *\nimport scipy.io as sio\nimport matplotlib\nimport pylab\nimport matplotlib.patheffects as PathEffects\nimport matplotlib.transforms as transforms\nimport numpy as np\nimport sys, os, re, errno\nfrom itertools import groupby\nfrom annotate import annotate\n\nscriptPath = os.path.dirname(os.path.realpath(__file__))\n\noutpath = sys.argv[1]\ndistpath = sys.argv[2]\n\ncutoff = 8\n\n#load map from pdb to aln index\nwith open(sys.argv[3]) as f: #pdbseqIndices\n dat = [l.split() for l in f.readlines()]\n indmaps = dict([(d[0], array([int(x) for x in d[1:]])) for d in dat])\n\nwith open(sys.argv[4]) as f: #pdbseqs_full_ID\n fullalnseqs = dict([l.split() for l in f.readlines()])\n\nwith open(sys.argv[5]) as f: # regions\n dat = [l.split(',') for l in f.readlines()]\n regions = [(n,int(s),int(e)) for n,s,e in dat]\nlobediv = [e for n,s,e in regions if n == 'Hinge'][0]\n\ndef fillGroups(positions, L, clr):\n #for k, g in groupby(enumerate(positions), lambda i,x: i-x):\n for k, g in groupby(enumerate(positions), lambda x: x[0]-x[1]):\n inds = [x[1] for x in g]\n start, end = inds[0], inds[-1]+1\n pylab.fill([start+0.5, start+0.5, end+0.5, end+0.5],\n [+0.5,L+0.5,L+0.5,+0.5],color=clr, ec='none', lw=0)\n pylab.fill([+0.5,L+0.5,L+0.5,+0.5],\n [start+0.5, start+0.5, end+0.5, end+0.5],\n color=clr, ec='none', lw=0)\n\ndef plotContacts(name):\n # map from pdb seq index to structural index (ie, from seqres index, to\n # resolved residue #)\n pdbseq2structMap = indmaps[name] \n fullalnseq = fullalnseqs[name] #seq after alignment\n \n aln2fullalnMap = where([c.isupper() or c == '-' for c in fullalnseq])[0]\n pdbseq2fullalnMap = where([c not in \".-\" for c in fullalnseq])[0]\n fullaln2pdbseqMap = -ones(len(fullalnseq), dtype=int)\n fullaln2pdbseqMap[pdbseq2fullalnMap] = arange(len(pdbseq2fullalnMap))\n aln2structMap = [pdbseq2structMap[fullaln2pdbseqMap[i]] \n if fullaln2pdbseqMap[i] != -1 else -1\n for i in aln2fullalnMap]\n\n pdbseq = [x for x in fullalnseq if x not in \".-\"]\n alnseq = [x for x in fullalnseq if x not in \".\" and not x.islower()]\n\n distances = load(os.path.join(distpath, name + '.npy'))\n L = int(((1+sqrt(1+8*len(distances)))/2) + 0.5) \n\n stroke = PathEffects.withStroke(linewidth=3, foreground=\"w\")\n\n #form matrix\n distancesM = zeros((L,L),dtype=float)\n distancesM[triu_indices(L,k=1)] = distances\n distancesM = distancesM + distancesM.T\n\n ########################################\n # first, plot the raw contact map for the entire sequence \n # that is, the contact map size will be equal to the length of the seqres\n # header (minus junk) #shows inserts as red, missing residues as blue, and\n # deletes as dotted lines (or colored lines?)\n\n pylab.figure(figsize=(12,12))\n ax = pylab.axes() \n trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)\n\n sL = len(pdbseq2structMap)\n pairs = ((i,j) for i in range(sL-1) for j in range(i+1,sL))\n distmat = zeros((sL,sL))*nan\n for n,(i,j) in enumerate(pairs):\n if pdbseq2structMap[i] == -1 or pdbseq2structMap[j] == -1:\n continue\n distmat[i,j] = distancesM[pdbseq2structMap[i], pdbseq2structMap[j]]\n distmat[j,i] = distancesM[pdbseq2structMap[j], pdbseq2structMap[i]]\n contacts = distmat < cutoff\n contacts[diag([i != -1 for i in pdbseq2structMap])] = True\n \n cim = ones(distmat.shape + (3,))\n cim[contacts] = ones(3)*0.4\n pylab.imshow(cim, origin='lower', extent=(+0.5,sL+0.5,+0.5,sL+0.5),\n interpolation='nearest')\n\n #annotate inserts relative to alignment (red)\n inserts = where([c.islower() for c in pdbseq])[0]\n fillGroups(inserts, sL, (0.7,0.4,0.4,0.3))\n\n #annotate missign residues (blue)\n missing = where([i == -1 for i in pdbseq2structMap])[0]\n fillGroups(missing, sL, (0.4,0.4,0.7,0.3))\n\n #map from alignment index to *closest* pdb seq index\n def remap(i,s):\n return searchsorted(pdbseq2fullalnMap, aln2fullalnMap[i], side=s)\n\n #annotate regions\n xregions = [(n, remap(s, 'left'), remap(e-1, 'left')+1)\n for n,s,e in regions]\n annotate(xregions, remap(lobediv, 'left'), sL, pylab.gca(), zorder=1)\n\n #plot lines at deletion points (green)\n deletepos = searchsorted(pdbseq2fullalnMap, where([x == '-' \n for x in fullalnseq])[0])\n #for d in set(deletepos):\n # pylab.axvline(d+0.5, color='g', zorder=9)\n # pylab.axhline(d+0.5, color='g', zorder=9)\n transY = transforms.blended_transform_factory(ax.transAxes, ax.transData)\n altern = False\n for d, g in groupby(deletepos):\n pylab.axvline(d+0.5, color='g', zorder=9)\n pylab.axhline(d+0.5, color='g', zorder=9)\n pylab.text(0.005 + altern*0.01, d+0.5, str(len(list(g))),\n verticalalignment='center', horizontalalignment='right', \n transform=transY, path_effects=[stroke],\n color='g', zorder=100)\n altern = not altern\n\n #plot seqeunce\n altern = 0\n for n,c in enumerate(pdbseq):\n pylab.text(1.02 + altern*0.014, n+1, c, verticalalignment='center',\n horizontalalignment='center', transform=transY, zorder=100)\n altern = (altern+1)%6\n \n #plot dotted line around alignment region\n start = remap(0, 'left')-0.5\n end = remap(-1, 'right')+1.5\n pylab.fill([start, start, end, end],[start, end, end, start], fill=False,\n ls='dashed', zorder=10)\n\n pylab.title(name + \", full PDB sequence ({}A)\".format(cutoff))\n pylab.subplots_adjust(bottom=0.05, right=0.90, top=0.95, left=0.05)\n\n ########################################\n #next, plot just the aligned part\n\n pylab.figure(figsize=(12,12))\n ax = pylab.axes() \n trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)\n\n aL = len(aln2fullalnMap)\n\n #first, get the aligned distance map\n #need map from aln index to structure index\n adistmat = zeros((aL, aL))*nan\n for i,j in [(i,j) for i in range(aL-1) for j in range(i+1,aL)]:\n xi = aln2structMap[i]\n xj = aln2structMap[j]\n if xi == -1 or xj == -1:\n continue\n adistmat[i,j] = distancesM[xi,xj]\n adistmat[j,i] = distancesM[xi,xj]\n\n #set diagonal for aligned residues\n for i in range(aL):\n if aln2structMap[i] != -1:\n adistmat[i,i] = 0\n acontacts = adistmat < cutoff\n\n acim = ones(adistmat.shape + (3,))\n acim[acontacts] = ones(3)*0.4\n pylab.imshow(acim, origin='lower', extent=(+0.5,aL+0.5,+0.5,aL+0.5),\n interpolation='nearest')\n\n #plot unresolved and deleted regions\n unresolved = where([aln2structMap[n] == -1 for n in range(len(alnseq))])[0]\n fillGroups(unresolved, aL, (0.4,0.4,0.7,0.3))\n deleted = where([c == '-' for c in alnseq])[0]\n fillGroups(deleted, aL, (0.4,0.7,0.4,0.3))\n with open(os.path.join(outpath, 'unresolvedCounts'), 'at') as f:\n print(repr((name, list(unresolved))), file=f)\n with open(os.path.join(outpath, 'deleteCounts'), 'at') as f:\n print(repr((name, list(deleted))), file=f)\n\n #plot insertions (lines, red)\n insertpos = searchsorted(aln2fullalnMap, where([x.islower()\n for x in fullalnseq])[0])\n #for d in set(insertpos):\n # pylab.axvline(d+0.5, color='r', zorder=9)\n # pylab.axhline(d+0.5, color='r', zorder=9)\n transY = transforms.blended_transform_factory(ax.transAxes, ax.transData)\n altern = False\n insertlist = [(d,len(list(g))) for d,g in groupby(insertpos)]\n for d, n in insertlist[1:-1]: #first and last are just sequence extension\n pylab.axvline(d+0.5, color='r', zorder=9)\n pylab.axhline(d+0.5, color='r', zorder=9)\n pylab.text(0.005 + altern*0.01, d+0.5, str(n),\n verticalalignment='center', horizontalalignment='right',\n transform=transY, path_effects=[stroke],\n color='r', zorder=100)\n altern = not altern\n\n with open(os.path.join(outpath, 'insertCounts'), 'at') as f:\n print(repr((name, insertlist)), file=f)\n\n #annotate regions\n annotate(regions, lobediv, aL, pylab.gca())\n pylab.title(name + \", aligned sequence only ({}A)\".format(cutoff))\n pylab.subplots_adjust(bottom=0.05, right=0.90, top=0.95, left=0.05)\n return adistmat\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise\n\nmkdir_p(os.path.join(outpath, 'pdb'))\nmkdir_p(os.path.join(outpath, 'aln'))\nmkdir_p(os.path.join(outpath, 'adist'))\n\n#erase files\nwith open(os.path.join(outpath, 'insertCounts'), 'wt') as f:\n pass\nwith open(os.path.join(outpath, 'deletecounts'), 'wt') as f:\n pass\nwith open(os.path.join(outpath, 'unresolvedcounts'), 'wt') as f:\n pass\n\nfor name in fullalnseqs.keys():\n if not os.path.exists(os.path.join(distpath, name + '.npy')):\n print(name, \"Skipping: No coords\")\n continue\n print(name)\n\n adist = plotContacts(name)\n pylab.subplots_adjust(bottom=0.05, right=0.95, top=0.95, left=0.05)\n\n pylab.figure(1)\n pylab.savefig(os.path.join(outpath, 'pdb/{}.png'.format(name)))\n pylab.close()\n pylab.figure(2)\n pylab.savefig(os.path.join(outpath, 'aln/{}.png'.format(name)))\n pylab.close()\n\n pylab.save(os.path.join(outpath, 'adist/{}'.format(name)), adist)\n","repo_name":"ComputationalBiophysicsCollaborative/Kinase_Analysis","sub_path":"contacts/alignContacts.py","file_name":"alignContacts.py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37233475254","text":"from .. import rt_dir\nfrom functools import wraps\nfrom sqlalchemy import create_engine, desc\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\nengine = create_engine(f'sqlite:///{rt_dir}/dist/flower.db', encoding='utf-8')\nSession = sessionmaker(bind=engine)\nSuccess = {'status': True}\nFailed = {'status': False}\n\n\ndef APIFuncWrapper(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n session = scoped_session(Session)\n try:\n ret = func(*args, **kwargs, session=session)\n session.commit()\n except Exception as e:\n ret = {'status': False, 'message': repr(e)}\n session.rollback()\n finally:\n session.close()\n return ret\n return wrapper\n\n\ndef to_dict(obj):\n return {c.name: getattr(obj, c.name) for c in obj.__table__.columns}\n","repo_name":"Rhythmicc/ClassFlower","sub_path":"src/API/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18742697639","text":"import numpy as np\nfrom matplotlib import gridspec, transforms\n\nfrom desilike import plotting\nfrom desilike.plotting import *\nfrom desilike.parameter import is_parameter_sequence\nfrom . import diagnostics, utils\n\n\ndef _make_list(obj, length=None, default=None):\n \"\"\"\n Return list from ``obj``.\n\n Parameters\n ----------\n obj : object, tuple, list, array\n If tuple, list or array, cast to list.\n Else return list of ``obj`` with length ``length``.\n\n length : int, default=None\n Length of list to return.\n\n Returns\n -------\n toret : list\n \"\"\"\n if obj is None:\n obj = default\n if is_parameter_sequence(obj):\n obj = list(obj)\n if length is not None:\n obj += [default] * (length - len(obj))\n else:\n obj = [obj]\n if length is not None:\n obj += [default] * (length - len(obj))\n return obj\n\n\ndef _get_default_chain_params(chains, params=None, **kwargs):\n from desilike.parameter import ParameterCollection\n chains = _make_list(chains)\n if params is not None:\n params = _make_list(params)\n list_params = ParameterCollection()\n for param in params:\n for chain in chains[::-1]:\n list_params += chain.params(name=[str(param)])\n return list_params\n list_params = [chain.params(**kwargs) for chain in chains]\n return ParameterCollection([params for params in list_params[0] if all(params in lparams for lparams in list_params[1:])])\n\n\ndef _get_default_profiles_params(profiles, params=None, of='bestfit', **kwargs):\n from desilike.parameter import ParameterCollection\n profiles = _make_list(profiles)\n if params is not None:\n params = _make_list(params)\n list_params = ParameterCollection()\n list_params = ParameterCollection()\n for param in params:\n for profile in profiles[::-1]:\n list_params += profile.get(of).params(name=[str(param)])\n return list_params\n list_params = [profile.get(of).params(**kwargs) for profile in profiles]\n return ParameterCollection([params for params in list_params[0] if all(params in lparams for lparams in list_params[1:])])\n\n\n@plotting.plotter\ndef plot_trace(chains, params=None, figsize=None, colors=None, labelsize=None, kw_plot=None, fig=None):\n \"\"\"\n Make trace plot as a function of steps, with a panel for each parameter.\n\n Parameters\n ----------\n chains : list, default=None\n List of (or single) :class:`Chain` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot trace for.\n Defaults to varied and not derived parameters.\n\n figsize : float, tuple, default=None\n Figure size.\n\n colors : str, list\n List of (or single) color(s) for chains.\n\n labelsize : int, default=None\n Label sizes.\n\n kw_plot : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.plot`.\n Defaults to ``{'alpha': 0.2}``.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least as many axes as ``params``.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n chains = _make_list(chains)\n params = _get_default_chain_params(chains, params=params, varied=True, derived=False)\n nparams = len(params)\n colors = _make_list(colors, length=len(chains), default=None)\n kw_plot = kw_plot or {'alpha': 0.2}\n\n steps = 1 + np.arange(max(chain.size for chain in chains))\n figsize = figsize or (8, 1.5 * nparams)\n if fig is None:\n fig, lax = plt.subplots(nparams, sharex=True, sharey=False, figsize=figsize, squeeze=False)\n lax = lax.ravel()\n else:\n lax = fig.axes\n\n for ax, param in zip(lax, params):\n ax.grid(True)\n ax.set_ylabel(chains[0][param].param.latex(inline=True), fontsize=labelsize)\n ax.set_xlim(steps[0], steps[-1])\n for ichain, chain in enumerate(chains):\n tmp = chain[param].ravel()\n ax.plot(steps[:len(tmp)], tmp, color=colors[ichain], **kw_plot)\n\n lax[-1].set_xlabel('step', fontsize=labelsize)\n return fig\n\n\n@plotting.plotter\ndef plot_gelman_rubin(chains, params=None, multivariate=False, threshold=None, slices=None, labelsize=None, fig=None, **kwargs):\n \"\"\"\n Plot Gelman-Rubin statistics as a function of steps.\n\n Parameters\n ----------\n chains : list, default=None\n List of (or single) :class:`Chain` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot Gelman-Rubin statistics for.\n Defaults to varied and not derived parameters.\n\n multivariate : bool, default=False\n If ``True``, add line for maximum of eigen value of Gelman-Rubin matrix.\n See :func:`diagnostics.gelman_rubin`.\n\n threshold : float, default=None\n If not ``None``, plot horizontal line at this value.\n\n slices : list, array\n List of increasing number of steps to include in calculation of Gelman-Rubin statistics.\n Defaults to ``np.arange(100, nsteps, 500)``, where ``nsteps`` is the minimum size of input ``chains``:\n Gelman-Rubin statistics is then plotted for chain slices (0, 100), (0, 600), ...\n\n labelsize : int, default=None\n Label sizes.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least 1 axis.\n\n **kwargs : dict\n Optional arguments for :func:`diagnostics.gelman_rubin` ('nsplits', 'check_valid').\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n chains = _make_list(chains)\n params = _get_default_chain_params(chains, params=params, varied=True, derived=False)\n if slices is None:\n nsteps = min(chain.size for chain in chains)\n slices = np.arange(100, nsteps, 500)\n gr_multi = []\n gr = {param: [] for param in params}\n for end in slices:\n chains_sliced = [chain.ravel()[:end] for chain in chains]\n if multivariate: gr_multi.append(diagnostics.gelman_rubin(chains_sliced, params, method='eigen', **kwargs).max())\n for param in gr: gr[param].append(diagnostics.gelman_rubin(chains_sliced, param, method='diag', **kwargs))\n for param in gr: gr[param] = np.asarray(gr[param])\n\n if fig is None:\n fig, ax = plt.subplots()\n else:\n ax = fig.axes[0]\n ax.grid(True)\n ax.set_xlabel('step', fontsize=labelsize)\n ax.set_ylabel(r'$\\hat{R}$', fontsize=labelsize)\n\n if multivariate: ax.plot(slices, gr_multi, label='multi', linestyle='-', linewidth=1, color='k')\n for param in params:\n ax.plot(slices, gr[param], label=chains[0][param].param.latex(inline=True), linestyle='--', linewidth=1)\n if threshold is not None: ax.axhline(y=threshold, xmin=0., xmax=1., linestyle='--', linewidth=1, color='k')\n ax.legend()\n return fig\n\n\n@plotting.plotter\ndef plot_geweke(chains, params=None, threshold=None, slices=None, labelsize=None, fig=None, **kwargs):\n \"\"\"\n Plot Geweke statistics.\n\n Parameters\n ----------\n chains : list, default=None\n List of (or single) :class:`Chain` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot Geweke statistics for.\n Defaults to varied and not derived parameters.\n\n threshold : float, default=None\n If not ``None``, plot horizontal line at this value.\n\n slices : list, array\n List of increasing number of steps to include in calculation of Geweke statistics.\n Defaults to ``np.arange(100, nsteps, 500)``, where ``nsteps`` is the minimum size of input ``chains``:\n Geweke statistics is then plotted for chain slices (0, 100), (0, 600), ...\n\n labelsize : int, default=None\n Label sizes.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least 1 axis.\n\n **kwargs : dict\n Optional arguments for :func:`diagnostics.geweke` ('first', 'last').\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n params = _get_default_chain_params(chains, params=params, varied=True, derived=False)\n if slices is None:\n nsteps = min(chain.size for chain in chains)\n slices = np.arange(100, nsteps, 500)\n geweke = {param: [] for param in params}\n for end in slices:\n chains_sliced = [chain.ravel()[:end] for chain in chains]\n for param in geweke: geweke[param].append(diagnostics.geweke(chains_sliced, param, **kwargs))\n for param in geweke: geweke[param] = np.asarray(geweke[param]).mean(axis=-1)\n\n if fig is None:\n fig, ax = plt.subplots()\n else:\n ax = fig.axes[0]\n ax.grid(True)\n ax.set_xlabel('step', fontsize=labelsize)\n ax.set_ylabel(r'geweke', fontsize=labelsize)\n\n for param in params:\n ax.plot(slices, geweke[param], label=chains[0][param].param.latex(inline=True), linestyle='-', linewidth=1)\n if threshold is not None: ax.axhline(y=threshold, xmin=0., xmax=1., linestyle='--', linewidth=1, color='k')\n ax.legend()\n return fig\n\n\n@plotting.plotter\ndef plot_autocorrelation_time(chains, params=None, threshold=50, slices=None, labelsize=None, fig=None):\n r\"\"\"\n Plot integrated autocorrelation time.\n\n Parameters\n ----------\n chains : list, default=None\n List of (or single) :class:`Chain` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot autocorrelation time for.\n Defaults to varied and not derived parameters.\n\n threshold : float, default=50\n If not ``None``, plot :math:`y = x/\\mathrm{threshold}` line.\n Integrated autocorrelation time estimation can be considered reliable when falling under this line.\n\n slices : list, array\n List of increasing number of steps to include in calculation of autocorrelation time.\n Defaults to ``np.arange(100, nsteps, 500)``, where ``nsteps`` is the minimum size of input ``chains``:\n Autocorrelation time is then plotted for chain slices (0, 100), (0, 600), ...\n\n labelsize : int, default=None\n Label sizes.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least 1 axis.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n chains = _make_list(chains)\n params = _get_default_chain_params(chains, params=params, varied=True, derived=False)\n if slices is None:\n nsteps = min(chain.size for chain in chains)\n slices = np.arange(100, nsteps, 500)\n autocorr = {param: [] for param in params}\n for end in slices:\n chains_sliced = [chain.ravel()[:end] for chain in chains]\n for param in autocorr:\n tmp = diagnostics.integrated_autocorrelation_time(chains_sliced, param)\n autocorr[param].append(tmp)\n for param in autocorr: autocorr[param] = np.asarray(autocorr[param])\n\n if fig is None:\n fig, ax = plt.subplots()\n else:\n ax = fig.axes[0]\n ax.grid(True)\n ax.set_xlabel('step $N$', fontsize=labelsize)\n ax.set_ylabel('$\\tau$', fontsize=labelsize)\n\n for param in params:\n ax.plot(slices, autocorr[param], label=chains[0][param].param.latex(inline=True), linestyle='--', linewidth=1)\n if threshold is not None:\n ax.plot(slices, slices * 1. / threshold, label='$N/{:d}$'.format(threshold), linestyle='--', linewidth=1, color='k')\n ax.legend()\n\n return fig\n\n\n@plotting.plotter\ndef plot_triangle(chains, params=None, labels=None, g=None, **kwargs):\n \"\"\"\n Triangle plot.\n\n Note\n ----\n *GetDist* package is required.\n\n Parameters\n ----------\n chains : list, default=None\n List of (or single) :class:`Chain` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot distribution for.\n Defaults to varied and not derived parameters.\n\n labels : str, list, default=None\n Name for *GetDist* to use for input chains.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n g : getdist subplot_plotter()\n can be created with `g = getdist.plots.get_subplot_plotter()` and can be modified with g.settings\n\n show : bool, default=False\n If ``True``, show figure.\n\n **kwargs : dict\n Optional parameters for :meth:`GetDistPlotter.triangle_plot`.\n\n Returns\n -------\n g : getdist.plots.GetDistPlotter\n \"\"\"\n from getdist import plots\n if g is None: g = plots.get_subplot_plotter()\n chains = _make_list(chains)\n labels = _make_list(labels, length=len(chains), default=None)\n params = _get_default_chain_params(chains, params=params, varied=True, input=True)\n chains = [chain.to_getdist(label=label, params=chain.params(name=params.names())) for chain, label in zip(chains, labels)]\n g.triangle_plot(chains, [str(param) for param in params], **kwargs)\n return g\n\n\n@plotting.plotter\ndef plot_aligned(profiles, param, ids=None, labels=None, colors=None, truth=None, error='error',\n labelsize=None, ticksize=None, kw_scatter=None, yband=None, kw_mean=None, kw_truth=None, kw_yband=None,\n kw_legend=None, fig=None):\n \"\"\"\n Plot best fit estimates for single parameter.\n\n Parameters\n ----------\n profiles : list\n List of (or single) :class:`Profiles` instance(s).\n\n param : Parameter, str\n Parameter name.\n\n ids : list, str, default=None\n Label(s) for input profiles.\n\n labels : list, str, default=None\n Label(s) for best fits within each :class:`Profiles` instance.\n\n colors : list, str, default=None\n Color(s) for best fits within each :class:`Profiles` instance.\n\n truth : float, bool, default=None\n Plot this truth / reference value for parameter.\n If ``True``, take :attr:`Parameter.value`.\n\n error : str, default='error'\n What to take as error:\n - 'error' for parabolic error\n - 'interval' for lower and upper errors corresponding to :math:`\\Delta \\chi^{2} = 1`.\n\n labelsize : int, default=None\n Label sizes.\n\n ticksize : int, default=None\n Tick sizes.\n\n kw_scatter : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.scatter`.\n Defaults to ``{'marker': 'o'}``.\n\n yband : float, tuple, default=None\n If not ``None``, plot horizontal band.\n If tuple and last element set to ``'abs'``,\n absolute lower and upper y-coordinates of band;\n lower and upper fraction around truth.\n If float, fraction around truth.\n\n kw_mean : dict, default=None\n If ``None``, no mean is plotted.\n Else, optional arguments for :meth:`matplotlib.axes.Axes.errorbar`.\n Defaults to ``{'marker': 'o'}``.\n\n kw_truth : dict, default=None\n If ``None``, and ``truth`` not provided, no truth is plotted.\n Else, optional arguments for :meth:`matplotlib.axes.Axes.axhline`.\n Defaults to ``{'color': 'k', 'linestyle': ':', 'linewidth': 2}``.\n\n kw_yband : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.axhspan`.\n\n kw_legend : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.legend`.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least 1 axis.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n profiles = _make_list(profiles)\n if truth is True or (truth is None and kw_truth is not None):\n truth = profiles[0].bestfit[param].param.value\n kw_truth = dict(kw_truth if kw_truth is not None else {'color': 'k', 'linestyle': ':', 'linewidth': 2})\n maxpoints = max(map(lambda prof: len(prof.bestfit), profiles))\n ids = _make_list(ids, length=len(profiles), default=None)\n labels = _make_list(labels, length=maxpoints, default=None)\n colors = _make_list(colors, length=maxpoints, default=['C{:d}'.format(i) for i in range(maxpoints)])\n add_legend = any(label is not None for label in labels)\n add_mean = kw_mean is not None\n if add_mean:\n kw_mean = kw_mean if isinstance(kw_mean, dict) else {'marker': 'o'}\n kw_scatter = dict(kw_scatter or {'marker': 'o'})\n kw_yband = dict(kw_yband or {})\n kw_legend = dict(kw_legend or {})\n\n xmain = np.arange(len(profiles))\n xaux = np.linspace(-0.15, 0.15, maxpoints)\n if fig is None:\n fig, ax = plt.subplots()\n else:\n ax = fig.axes[0]\n for iprof, prof in enumerate(profiles):\n if param not in prof.bestfit: continue\n ibest = prof.bestfit.logposterior.argmax()\n for ipoint, point in enumerate(prof.bestfit[param]):\n yerr = None\n if error:\n try:\n yerr = prof.get(error)[param]\n except KeyError:\n yerr = None\n else:\n if len(yerr) == 1:\n yerr = yerr[0] # only for best fit\n else:\n yerr = yerr[ibest]\n label = labels[ipoint] if iprof == 0 else None\n ax.errorbar(xmain[iprof] + xaux[ipoint], point, yerr=yerr, color=colors[ipoint], label=label, linestyle='none', **kw_scatter)\n if add_mean:\n ax.errorbar(xmain[iprof], prof.bestfit[param].mean(), yerr=prof.bestfit[param].std(ddof=1), linestyle='none', **kw_mean)\n if truth is not None:\n ax.axhline(y=truth, xmin=0., xmax=1., **kw_truth)\n if yband is not None:\n if np.ndim(yband) == 0:\n yband = (yband, yband)\n if yband[-1] == 'abs':\n low, up = yband[0], yband[1]\n else:\n if truth is None:\n raise ValueError('Plotting relative band requires truth value.')\n low, up = truth * (1 - yband[0]), truth * (1 + yband[1])\n ax.axhspan(low, up, **kw_yband)\n\n ax.set_xticks(xmain)\n ax.set_xticklabels(ids, rotation=40, ha='right', fontsize=ticksize)\n ax.grid(True, axis='y')\n ax.set_ylabel(profiles[0].bestfit[param].param.latex(inline=True), fontsize=labelsize)\n ax.tick_params(labelsize=ticksize)\n if add_legend: ax.legend(**{**{'ncol': maxpoints}, **kw_legend})\n return fig\n\n\n@plotting.plotter\ndef plot_aligned_stacked(profiles, params=None, ids=None, labels=None, truths=None, ybands=None, ylimits=None, figsize=None, fig=None, **kwargs):\n \"\"\"\n Plot best fits, with a panel for each parameter.\n\n Parameters\n ----------\n profiles : list\n List of (or single) :class:`Profiles` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot best fits for.\n Defaults to varied and not derived parameters.\n\n ids : list, str\n Label(s) for input profiles.\n\n labels : list, str\n Label(s) for best fits within each :class:`Profiles` instance.\n\n truths : list, default=None\n Plot these truth / reference value for each parameter.\n If ``True``, take :attr:`Parameter.value`.\n\n ybands : list, default=None\n If not ``None``, plot horizontal bands.\n See :func:`plot_aligned`.\n\n ylimits : list, default=None\n If not ``None``, limits for y-axis.\n\n figsize : float, tuple, default=None\n Figure size.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least as many axes as ``params``.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n profiles = _make_list(profiles)\n params = _get_default_profiles_params(profiles, params=params, varied=True, derived=False)\n truths = _make_list(truths, length=len(params), default=None)\n ybands = _make_list(ybands, length=len(params), default=None)\n ylimits = _make_list(ybands, length=len(params), default=None)\n maxpoints = max(map(lambda prof: len(prof.bestfit), profiles))\n\n nrows = len(params)\n ncols = len(profiles) if len(profiles) > 1 else maxpoints\n if fig is None:\n figsize = figsize or (ncols, 3. * nrows)\n fig, lax = plt.subplots(nrows, 1, figsize=figsize)\n fig.subplots_adjust(wspace=0.1, hspace=0.1)\n else:\n lax = fig.axes\n\n for iparam1, param1 in enumerate(params):\n ax = lax[iparam1]\n plot_aligned(profiles, param=param1, ids=ids, labels=labels, truth=truths[iparam1], yband=ybands[iparam1], fig=ax, **kwargs)\n if (iparam1 < nrows - 1) or not ids: ax.get_xaxis().set_visible(False)\n ax.set_ylim(ylimits[iparam1])\n if iparam1 != 0:\n leg = ax.get_legend()\n if leg is not None: leg.remove()\n return fig\n\n\n@plotting.plotter\ndef plot_profile(profiles, params=None, offsets=0., nrows=1, labels=None, colors=None, linestyles=None,\n cl=(1, 2, 3), labelsize=None, ticksize=None, kw_profile=None, kw_cl=None,\n kw_legend=None, figsize=None, fig=None):\n \"\"\"\n Plot profiles, with a panel for each parameter.\n\n Parameters\n ----------\n profiles : list\n List of (or single) :class:`Profiles` instance(s).\n\n params : list, ParameterCollection, default=None\n Parameters to plot profiles for.\n Defaults to varied and not derived parameters.\n\n offsets : list, float, default=0\n Vertical offset for each profile.\n\n nrows : int, default=1\n Number of rows in figure.\n\n labels : list, str\n Label(s) for profiles within each :class:`Profiles` instance.\n\n colors : list, str, default=None\n Color(s) for profiles within each :class:`Profiles` instance.\n\n linestyles : list, str, default=None\n Linestyle(s) for profiles within each :class:`Profiles` instance.\n\n cl : int, tuple, default=(1, 2, 3)\n Confidence levels to plot.\n\n labelsize : int, default=None\n Label sizes.\n\n ticksize : int, default=None\n Tick sizes.\n\n kw_profile : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.plot`.\n Defaults to ``{'marker': 'o'}``.\n\n kw_cl : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.axhline`.\n Defaults to ``{'color': 'k', 'linestyle': ':', 'linewidth': 2}``.\n\n kw_legend : dict, default=None\n Optional arguments for :meth:`matplotlib.axes.Axes.legend`.\n\n figsize : float, tuple, default=None\n Figure size.\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least as many axes as ``params``.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n from matplotlib import pyplot as plt\n profiles = _make_list(profiles)\n params = _get_default_profiles_params(profiles, params=params, of='profile', varied=True, derived=False)\n nprofiles = len(profiles)\n offsets = _make_list(offsets, length=nprofiles, default=0.)\n labels = _make_list(labels, length=nprofiles, default=None)\n colors = _make_list(colors, length=nprofiles, default=None)\n linestyles = _make_list(linestyles, length=nprofiles, default=None)\n if np.ndim(cl) == 0: cl = [cl]\n add_legend = any(label is not None for label in labels)\n kw_profile = dict(kw_profile or {})\n kw_cl = dict(kw_cl if kw_cl is not None else {'color': 'k', 'linestyle': ':', 'linewidth': 2})\n xshift_cl = kw_cl.pop('xhift', 0.9)\n kw_legend = dict(kw_legend or {})\n\n ncols = int((len(params) + nrows - 1) * 1. / nrows)\n\n if fig is None:\n figsize = figsize or (4. * ncols, 4. * nrows)\n fig, lax = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False)\n lax = lax.ravel()\n fig.subplots_adjust(wspace=0.2, hspace=0.2)\n else:\n lax = fig.axes\n\n def data_to_axis(ax, y):\n axis_to_data = ax.transAxes + ax.transData.inverted()\n return axis_to_data.inverted().transform((0, y))[1]\n\n for iparam1, param1 in enumerate(params):\n ax = lax[iparam1]\n for ipro, pro in enumerate(profiles):\n pro = pro.profile\n if param1 not in pro: continue\n ax.plot(pro[param1][:, 0], - 2 * (pro[param1][:, 1] - offsets[ipro]), color=colors[ipro], linestyle=linestyles[ipro], label=labels[ipro], **kw_profile)\n for nsigma in cl:\n y = utils.nsigmas_to_deltachi2(nsigma, ddof=1)\n ax.axhline(y=y, xmin=0., xmax=1., **kw_cl)\n ax.text(xshift_cl, y + 0.1, r'${:d}\\sigma$'.format(nsigma), horizontalalignment='left', verticalalignment='bottom',\n transform=transforms.blended_transform_factory(ax.transAxes, ax.transData), color='k', fontsize=labelsize)\n lim = ax.get_ylim()\n ax.set_ylim(0., lim[-1] + 2.)\n ax.tick_params(labelsize=ticksize)\n ax.set_xlabel(param1.latex(inline=True), fontsize=labelsize)\n if iparam1 == 0: ax.set_ylabel(r'$\\Delta \\chi^{2}$', fontsize=labelsize)\n if add_legend and iparam1 == 0: ax.legend(**kw_legend)\n\n return fig\n\n\ndef plot_profile_comparison(profiles, profiles_ref, params=None, labels=None, colors=None, **kwargs):\n r\"\"\"\n Plot profile comparison, wrapping :func:`plot_profile`.\n Profiles ``profiles`` and ``profiles_ref`` are both offset by ``profiles`` minimum :math:`\\chi^{2}`.\n\n Parameters\n ----------\n profiles : list\n List of (or single) :class:`Profiles` instance(s).\n\n profiles_ref : list\n List of (or single) :class:`Profiles` instance(s) to compare to.\n\n params : list, ParameterCollection, default=None\n Parameters to plot profiles for.\n Defaults to varied and not derived parameters.\n\n labels : list, str\n Label(s) for profiles within each :class:`Profiles` instance.\n\n colors : list, str, default=None\n Color(s) for profiles within each :class:`Profiles` instance.\n\n **kwargs : dict\n Optional arguments for :func:`plot_profile`\n ('nrows', 'cl', 'labelsize', 'ticksize', 'kw_profile', 'kw_cl', 'kw_legend', 'figsize').\n\n fig : matplotlib.figure.Figure, default=None\n Optionally, a figure with at least as many axes as ``params``.\n\n fn : str, Path, default=None\n Optionally, path where to save figure.\n If not provided, figure is not saved.\n\n kw_save : dict, default=None\n Optionally, arguments for :meth:`matplotlib.figure.Figure.savefig`.\n\n show : bool, default=False\n If ``True``, show figure.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n \"\"\"\n profiles = _make_list(profiles)\n profiles_ref = _make_list(profiles_ref)\n if len(profiles) != len(profiles_ref):\n raise ValueError('profiles_ref must be of same length as profiles')\n nprofiles = len(profiles)\n labels = _make_list(labels, length=nprofiles, default=None)\n colors = _make_list(colors, length=nprofiles, default=None)\n # Subtract chi2_min of profiles from both profiles and profiles_ref\n offsets = [pro.bestfit.logposterior.max() for pro in profiles]\n colors = colors * 2\n linestyles = ['-'] * nprofiles + ['--'] * nprofiles\n return plot_profile(profiles + profiles_ref, params=params, offsets=offsets, labels=labels, colors=colors, linestyles=linestyles, **kwargs)\n","repo_name":"cosmodesi/desilike","sub_path":"desilike/samples/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":29541,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"74067523075","text":"\"\"\"Passage similarity detection\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport os\nimport re\nfrom abc import ABC\nfrom collections import deque\nfrom html import unescape as unescape_html\nfrom shutil import rmtree\nfrom typing import Any, Callable, Iterable, Optional\nfrom xml.sax.saxutils import unescape as unescape_xml\nimport sqlite3\n\nimport dill as pickle\nimport lz4.frame\nimport numpy as np\nimport spacy\nimport torch\nfrom recordclass import dataobject\nfrom scipy.sparse import csr_matrix\nfrom sentence_transformers import SentenceTransformer, util\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom spacy.tokens import Doc\nfrom text_preprocessing import PreProcessor, Token, Tokens\nfrom tqdm import tqdm\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\nTAGS = re.compile(r\"<[^>]+>\")\nPHILO_TEXT_OBJECT_LEVELS = {\"doc\": 1, \"div1\": 2, \"div2\": 3, \"div3\": 4, \"para\": 5, \"sent\": 6, \"word\": 7}\nTEMP_DIR = os.getcwd()\n\n\nclass PassageGroup(dataobject, fast_new=True):\n \"\"\"Text passage with all associated properties and vector representation\"\"\"\n\n start_byte: int = 0\n end_byte: int = 0\n filename: str = \"\"\n metadata: dict = {}\n\n\nclass MergedGroup(dataobject, fast_new=True):\n \"\"\"A source and target PassageGroup pair with similarity\"\"\"\n\n source: PassageGroup = PassageGroup()\n target: PassageGroup = PassageGroup()\n similarity: float = 0.0\n\n\nclass DocumentChunks:\n \"\"\"A generator with caching\"\"\"\n\n def __init__(self, docs: Iterable[list[str]], save_path: str, transform_function: Callable):\n self.docs = docs\n self.doc_list: list[list[str]] = []\n self.doc_count = 0\n self.generator_exhausted = False\n self.transform_function = transform_function\n self.corpus_type = self.transform_function.__qualname__.split(\".\")[0]\n self.path = os.path.join(TEMP_DIR, \"output/chunks/\", save_path)\n if os.path.exists(self.path):\n rmtree(self.path)\n os.makedirs(self.path, exist_ok=True)\n\n def __iter__(self) -> Iterable[str | list[str] | torch.Tensor | np.ndarray]:\n if self.generator_exhausted is False:\n if self.doc_count == 0:\n for doc in self.docs:\n doc = self.__format_doc(doc)\n self.__save(doc)\n self.doc_count += 1\n yield doc\n else:\n for doc_name in range(self.doc_count):\n yield self.__load(doc_name)\n for doc in self.docs:\n doc = self.__format_doc(doc)\n self.__save(doc)\n self.doc_count += 1\n yield doc\n self.generator_exhausted = True\n else:\n for doc_name in self.doc_list:\n yield self.__load(doc_name)\n\n def __save(self, doc: list[str] | str):\n filename = os.path.join(self.path, str(self.doc_count))\n if self.transform_function is None:\n with open(filename, \"wb\") as output_file:\n pickle.dump(doc, output_file)\n transformed_doc = self.transform_function([doc])\n if self.corpus_type == \"TransformerCorpus\":\n torch.save(transformed_doc, f\"{filename}.pt\")\n else:\n np.save(f\"{filename}.npy\", transformed_doc)\n\n def __load(self, doc_name) -> list[str] | torch.Tensor | np.ndarray:\n filename = os.path.join(self.path, str(doc_name))\n if self.transform_function is None:\n with open(filename, \"rb\") as input_file:\n doc = pickle.load(input_file)\n return doc\n elif self.corpus_type == \"TransformerCorpus\":\n return torch.load(f\"{filename}.pt\")\n return np.load(f\"{filename}.npy\")[0]\n\n def __get_doc(self, index: int) -> list[str] | torch.Tensor | np.ndarray:\n doc = None\n while index > self.doc_count:\n try:\n doc = next(self.docs)\n self.__format_doc(doc)\n self.__save(doc)\n self.doc_count += 1\n except StopIteration as e:\n raise IndexError from e\n if doc is None:\n return self.__load(index)\n return doc\n\n def __getitem__(self, item: int | slice) -> list[str] | str | list[list[str] | str] | np.ndarray | torch.Tensor:\n if isinstance(item, slice):\n end = item.stop\n if item.stop > len(self): # avoid index out of range\n end = len(self)\n if self.transform_function is None or self.corpus_type == \"Word2VecEmbeddingCorpus\":\n return np.array([self.__get_doc(index) for index in range(item.start, end)])\n return torch.cat([self.__get_doc(index) for index in range(item.start, end)]) # type:ignore\n return self.__get_doc(item)\n\n def __format_doc(self, doc: list[str]) -> str:\n return \" \".join(doc)\n\n def __len__(self):\n if self.generator_exhausted is False:\n for _ in self:\n pass\n return self.doc_count\n\n\nclass Matches:\n \"\"\"Matches cached to disk\"\"\"\n\n def __init__(self, matches: Iterable[MergedGroup]):\n self.path = os.path.join(TEMP_DIR, \"output/results/matches\")\n self.count = 0\n if isinstance(matches, list) and matches:\n self.matches = matches\n self.is_cached = False\n self.count = len(self.matches)\n else:\n self.conn = sqlite3.connect(os.path.join(self.path, \"matches.db\"))\n self.cursor = self.conn.cursor()\n self.cursor.execute(\"DROP TABLE IF EXISTS matches\")\n self.cursor.execute(\"CREATE TABLE matches (match_id INTEGER, match blob)\")\n self.cursor.execute(\"CREATE INDEX match_id_index ON matches (match_id)\")\n self.matches = None\n self.is_cached = True\n self.count = self.__save(matches) # save generator to disk\n\n def extend(self, new_matches: Iterable[MergedGroup]):\n \"\"\"Add new matches to existing matches\"\"\"\n for match in new_matches:\n dump = pickle.dumps(match)\n self.cursor.execute(\"INSERT INTO matches VALUES (?, ?)\", (self.count, dump))\n self.count += 1\n\n def __save(self, matches):\n count = 0\n for count, match in enumerate(matches):\n dump = pickle.dumps(match)\n self.cursor.execute(\"INSERT INTO matches VALUES (?, ?)\", (self.count, dump))\n if count == 0:\n return 0\n self.conn.commit()\n return count + 1\n\n def done(self):\n \"\"\"Commit changes to database\"\"\"\n self.conn.commit()\n self.conn.close()\n\n @classmethod\n def load(cls):\n \"\"\"Load instance of class by reading previously cached matches\"\"\"\n matches = []\n conn = sqlite3.connect(os.path.join(TEMP_DIR, \"output/results/matches/matches.db\"))\n cursor = conn.cursor()\n cursor.execute(\"SELECT match from matches ORDER BY match_id\")\n for match in cursor:\n matches.append(pickle.loads(match[0]))\n conn.close()\n return cls(matches)\n\n def __len__(self):\n return self.count\n\n def __iter__(self):\n if self.is_cached is False:\n for index in range(self.count):\n yield self.matches[index] # type: ignore\n else:\n self.cursor.execute(\"SELECT match FROM matches ORDER BY match_id\")\n for match in self.cursor:\n yield pickle.loads(match[0])\n\n\nclass Corpus(ABC):\n \"\"\"A Corpus of passages as preprocessed by the text-preprocessor\"\"\"\n\n def __init__(\n self,\n texts: Iterable[Tokens],\n output_path: str,\n similarity_function: Callable,\n min_text_obj_length: int = 15,\n n_chunk: int = 3,\n text_object_type_split: str = \"doc\",\n direction=\"source\",\n n_batches=1,\n ):\n \"\"\"Initialize Corpus Object\"\"\"\n self.texts: Iterable[Tokens] = texts\n self.min_text_obj_length: int = min_text_obj_length\n self.n_chunk: int = n_chunk\n self.metadata: list[dict[str, Any]] = []\n self.text_object_type_split = text_object_type_split\n self.output_dir = os.path.abspath(output_path)\n self.direction: str = direction\n os.makedirs(os.path.join(self.output_dir, self.direction, \"saved_docs\"), exist_ok=True)\n os.makedirs(os.path.join(self.output_dir, self.direction, \"doc_chunks\"), exist_ok=True)\n self.length = 0\n self.n_batches = n_batches\n self.similarity = similarity_function\n self.docs: DocumentChunks\n self.max_tokens = float(\"inf\")\n self.device = \"cpu\"\n\n def __len__(self) -> int:\n return self.length\n\n def __getitem__(self, _):\n pass\n\n def get_text_chunks(self) -> Iterable[list[str]]:\n \"\"\"Process all texts into smaller text chunks\"\"\"\n chunk_group: deque[Tokens] = deque(maxlen=self.n_chunk)\n min_chunk_length: int = self.n_chunk * self.min_text_obj_length\n current_text_level_id: str = \"0\"\n full_doc = Tokens([], {})\n current_doc_id = None\n chunks_done = 0\n docs = {}\n current_chunk_group_length = 0\n for text in self.texts:\n docs[text.metadata[\"philo_id\"]] = \" \".join([t.text for t in text])\n print(f\"\\rProcessing {self.direction} texts... {chunks_done} text chunks extracted...\", end=\"\", flush=True)\n text.metadata[\"parsed_filename\"] = os.path.join(\n self.output_dir,\n self.direction,\n \"saved_docs\",\n os.path.basename(text.metadata[\"parsed_filename\"].replace(\".lz4\", \"\")),\n )\n doc_id = text.metadata[\"philo_id\"].split()[0]\n if (\n doc_id != current_doc_id and current_doc_id is not None\n ): # we save the current doc when doc_ids don't match\n full_doc.save(full_doc.metadata[\"parsed_filename\"])\n full_doc = Tokens([], text.metadata)\n full_doc.extend(text)\n text.purge()\n text_level_id: str = \" \".join(\n text.metadata[\"philo_id\"].split()[: PHILO_TEXT_OBJECT_LEVELS[self.text_object_type_split]]\n )\n if text_level_id != current_text_level_id:\n if current_chunk_group_length >= min_chunk_length:\n text_chunk = self.__build_text_chunk(chunk_group)\n if text_chunk: # make sure this chunk is not empty\n chunks_done += 1\n yield text_chunk\n chunk_group.clear()\n current_text_level_id = text_level_id\n current_chunk_group_length = sum([len(t.tokens) for t in chunk_group])\n text_length = len(text)\n if current_chunk_group_length + text_length > self.max_tokens and current_chunk_group_length:\n chunks_done += 1\n yield self.__build_text_chunk(chunk_group)\n if text_length < self.min_text_obj_length:\n try:\n chunk_group[-1].extend(text)\n continue\n except IndexError:\n pass\n chunk_group.append(text)\n if len(chunk_group) == self.n_chunk:\n current_chunk_group_length = sum([len(t) for t in chunk_group])\n if current_chunk_group_length >= min_chunk_length:\n chunks_done += 1\n yield self.__build_text_chunk(chunk_group)\n current_doc_id = doc_id\n full_doc.save(full_doc.metadata[\"parsed_filename\"])\n print()\n\n def __build_text_chunk(self, chunk_group: deque[Tokens]) -> list[str]:\n \"\"\"Build chunks from a group of text objects\"\"\"\n chunk = [t for c in chunk_group for t in c]\n self.metadata.append(\n {\n **chunk_group[0].metadata,\n \"start_byte\": chunk[0].ext[\"start_byte\"],\n \"end_byte\": chunk[-1].ext[\"end_byte\"],\n }\n )\n return [t.text for t in chunk]\n\n def __compare(self, target_corpus=None) -> np.ndarray:\n \"\"\"Compare the corpus to another corpus\"\"\"\n results: np.ndarray\n if target_corpus is None:\n target_corpus = self\n results = self.similarity(self.docs[0 : self.length], target_corpus.docs[0 : target_corpus.length]) # type: ignore\n return results\n\n def __batched_compare(self, min_similarity: float, target_corpus: Corpus | None = None) -> Matches:\n \"\"\"Compare the corpus to another corpus\"\"\"\n inner_compare = False\n if target_corpus is None:\n target_corpus = self\n inner_compare = True\n source_batch_size = int(np.ceil(self.length / self.n_batches))\n target_batch_size = int(np.ceil(target_corpus.length / target_corpus.n_batches))\n matches: Matches = Matches([])\n with tqdm(total=self.length * target_corpus.length, leave=False) as pbar:\n for outer_start_index in range(0, self.length, source_batch_size):\n outer_end_index = outer_start_index + source_batch_size\n source_embeddings = self.docs[outer_start_index:outer_end_index]\n for inner_start_index in range(0, target_corpus.length, target_batch_size):\n inner_end_index = inner_start_index + target_batch_size\n target_embeddings = target_corpus.docs[inner_start_index:inner_end_index]\n partial_results: np.ndarray = self.similarity(source_embeddings, target_embeddings)\n if inner_compare is False:\n processed_results = self.process_outer_compare(partial_results, target_corpus, min_similarity, outer_start_index=outer_start_index, inner_start_index=inner_start_index)\n else:\n processed_results = self.process_inner_compare(partial_results, min_similarity, outer_start_index=outer_start_index, inner_start_index=inner_start_index)\n matches.extend(processed_results)\n pbar.update(source_batch_size*target_batch_size)\n matches.done()\n return matches\n\n def inner_compare(self, min_similarity: float) -> Matches:\n \"\"\"Compare corpus with itself\"\"\"\n print(\"Comparing source collection to itself...\", flush=True)\n if self.n_batches == 1:\n results = self.__compare()\n return Matches(self.process_inner_compare(results, min_similarity))\n return self.__batched_compare(min_similarity)\n\n def outer_compare(self, target_corpus, min_similarity: float) -> Matches:\n \"\"\"Compare corpus with another corpus\"\"\"\n print(\"Comparing source collection to target collection...\", flush=True)\n if self.n_batches == 1 and target_corpus.n_batches == 1:\n results = self.__compare(target_corpus=target_corpus)\n return Matches(self.process_outer_compare(results, target_corpus, min_similarity))\n return self.__batched_compare(min_similarity, target_corpus=target_corpus)\n\n def process_inner_compare(self, results, min_similarity: float, outer_start_index=0, inner_start_index=0) -> Iterable[MergedGroup]:\n \"\"\"Compare corpus with itself\"\"\"\n for outer_doc_id, inner_doc_id in np.argwhere(results >= min_similarity):\n outer_doc_id += outer_start_index\n inner_doc_id += inner_start_index\n if (\n self.metadata[outer_doc_id][\"year\"] <= self.metadata[inner_doc_id][\"year\"]\n and inner_doc_id != outer_doc_id\n ):\n yield MergedGroup(\n PassageGroup(\n self.metadata[outer_doc_id][\"start_byte\"],\n self.metadata[outer_doc_id][\"end_byte\"],\n self.metadata[outer_doc_id][\"filename\"],\n self.metadata[outer_doc_id],\n ),\n PassageGroup(\n self.metadata[inner_doc_id][\"start_byte\"],\n self.metadata[inner_doc_id][\"end_byte\"],\n self.metadata[inner_doc_id][\"filename\"],\n self.metadata[inner_doc_id],\n ),\n results[outer_doc_id, inner_doc_id], # type: ignore\n )\n\n def process_outer_compare(\n self, results: np.ndarray, target_corpus: Corpus, min_similarity, outer_start_index=0, inner_start_index=0\n ) -> Iterable[MergedGroup]:\n \"\"\"Compare corpus with another corpus\"\"\"\n for outer_doc_id, inner_doc_id in np.argwhere(results >= min_similarity):\n outer_index = outer_doc_id + outer_start_index\n inner_index = inner_doc_id + inner_start_index\n yield MergedGroup(\n PassageGroup(\n self.metadata[outer_index][\"start_byte\"],\n self.metadata[outer_index][\"end_byte\"],\n self.metadata[outer_index][\"filename\"],\n self.metadata[outer_index],\n ),\n PassageGroup(\n target_corpus.metadata[inner_index][\"start_byte\"],\n target_corpus.metadata[inner_index][\"end_byte\"],\n target_corpus.metadata[inner_index][\"filename\"],\n target_corpus.metadata[inner_index],\n ),\n results[outer_doc_id, inner_doc_id], # type: ignore\n )\n\n\nclass TfIdfCorpus(Corpus):\n \"\"\"Corpus object which builds TF-IDF vectors\"\"\"\n\n def __init__(\n self,\n texts: Iterable[Tokens],\n output_path: str,\n min_text_obj_length: int = 15,\n n_chunk: int = 3,\n text_object_type_split: str = \"doc\",\n vectorizer: Optional[TfidfVectorizer] = None,\n min_freq: int | float = 1,\n max_freq: float = 1.0,\n direction=\"source\",\n ):\n super().__init__(\n texts,\n output_path,\n lambda x: None, #\n min_text_obj_length=min_text_obj_length,\n n_chunk=n_chunk,\n text_object_type_split=text_object_type_split,\n direction=direction,\n )\n if vectorizer is None:\n self.vectorizer = TfidfVectorizer(max_df=max_freq, min_df=min_freq) # type: ignore\n self.vectors: csr_matrix = self.vectorizer.fit_transform(\" \".join(d) for d in self.get_text_chunks()) # type: ignore\n else:\n self.direction = \"target\"\n self.vectorizer = vectorizer\n self.vectors: csr_matrix = self.vectorizer.transform(\" \".join(d) for d in self.get_text_chunks()) # type: ignore\n\n self.length: int = self.vectors.shape[0] # type: ignore\n self.dim: int = self.vectors.shape[1] # type: ignore\n\n def __getitem__(self, item: int) -> csr_matrix:\n return self.vectors[item] # type: ignore\n\n def __filter_by_jaccard_sim(\n self, similarity_matrix: np.ndarray, min_similarity: float, other_vectors: csr_matrix | None = None\n ) -> np.ndarray:\n \"\"\"Give a score of 0 for all matches where the Jaccard similarity score is under 75% of the min score\"\"\"\n for outer_doc_id, inner_doc_id in np.argwhere(similarity_matrix >= min_similarity):\n outer_vector = self[outer_doc_id]\n if other_vectors is not None:\n inner_vector = other_vectors[inner_doc_id]\n else:\n inner_vector = self[inner_doc_id]\n jaccard_similarity = jaccard_sim(outer_vector, inner_vector)\n if jaccard_similarity < 0.5 * min_similarity:\n similarity_matrix[outer_doc_id, inner_doc_id] = 0.0\n return similarity_matrix\n\n def inner_compare(self, min_similarity: float) -> Matches:\n \"\"\"Compare corpus with itself\"\"\"\n results: np.ndarray = linear_kernel(self.vectors, dense_output=False) # type: ignore\n results = self.__filter_by_jaccard_sim(results, min_similarity)\n return Matches(self.process_inner_compare(results, min_similarity))\n\n def outer_compare(self, target_corpus: TfIdfCorpus, min_similarity: float) -> Matches:\n \"\"\"Compare corpus with another corpus\"\"\"\n print(\"Comparing source collection to target collection...\", flush=True)\n results: np.ndarray = linear_kernel(self.vectors, target_corpus.vectors, dense_output=False) # type: ignore\n results = self.__filter_by_jaccard_sim(results, min_similarity, target_corpus.vectors)\n return Matches(self.process_outer_compare(results, target_corpus, min_similarity))\n\n\nclass Word2VecEmbeddingCorpus(Corpus):\n \"\"\"Corpus object which builds doc embeddings using the average of token word2vec vectors\"\"\"\n\n def __init__(\n self,\n texts: Iterable[Tokens],\n output_path: str,\n model: str | spacy.Language,\n n_batches: int,\n min_text_obj_length: int = 15,\n n_chunk: int = 3,\n text_object_type_split: str = \"doc\",\n direction: str = \"source\",\n ):\n super().__init__(\n texts,\n output_path,\n similarity_function=linear_kernel,\n min_text_obj_length=min_text_obj_length,\n n_chunk=n_chunk,\n text_object_type_split=text_object_type_split,\n direction=direction,\n n_batches=n_batches,\n )\n if isinstance(model, str):\n self.model = spacy.load(model)\n else:\n self.model = model\n self.docs = DocumentChunks(\n self.get_text_chunks(),\n self.direction,\n self.create_embeddings,\n )\n self.length = len(self.docs)\n\n def create_embeddings(self, text_chunk) -> np.ndarray:\n \"\"\"Create document embeddings\"\"\"\n doc: Doc = self.model(\" \".join(text_chunk))\n return (doc.vector / doc.vector_norm).reshape(1, -1) # type: ignore\n\n def __getitem__(self, item: int) -> list[str]:\n return self.docs[item] # type: ignore\n\n def __len__(self):\n return self.length\n\n\nclass TransformerCorpus(Corpus):\n \"\"\"Corpus object which builds doc embeddings using sentence-transformers for similarity\"\"\"\n\n def __init__(\n self,\n texts: Iterable[Tokens],\n output_path: str,\n model_name: str,\n n_batches: int,\n min_text_obj_length: int = 15,\n n_chunk: int = 3,\n text_object_type_split: str = \"sent\",\n model=None,\n direction=\"source\",\n ):\n def sim_function(x, y):\n sim = util.cos_sim(x, y).cpu().numpy()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return sim\n\n super().__init__(\n texts,\n output_path,\n similarity_function=sim_function,\n min_text_obj_length=min_text_obj_length,\n n_chunk=n_chunk,\n text_object_type_split=text_object_type_split,\n direction=direction,\n n_batches=n_batches,\n )\n\n if model is None:\n self.model = SentenceTransformer(model_name)\n else:\n self.model = model\n\n self.model.max_seq_length = self.model.get_max_seq_length() - 2 # needed to enable truncating long sequences\n self.max_tokens: int = int(self.model.max_seq_length / 2)\n\n self.docs = DocumentChunks(\n self.get_text_chunks(),\n self.direction,\n self.create_embeddings,\n )\n self.length = len(self.docs)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache() # clear GPU cache after creating embeddings\n self.device = torch.device(\"cuda:0\")\n else:\n self.device = torch.device(\"cpu\")\n\n def __getitem__(self, item: int) -> list[str]:\n return self.docs[item] # type: ignore\n\n def __len__(self):\n return self.length\n\n def create_embeddings(self, text_chunks) -> torch.Tensor:\n \"\"\"Create document embedding\"\"\"\n tensor = self.model.encode(list(text_chunks), convert_to_tensor=True)\n return tensor # type: ignore\n\n\ndef clean_text(text: str) -> str:\n \"\"\"Cleaning text function which removes tags and converts entities\"\"\"\n text = TAGS.sub(\"\", text)\n text = unescape_xml(text)\n text = unescape_html(text)\n text = text.replace(\"\\n\", \" \")\n text = text.strip()\n return text\n\n\ndef get_text(start_byte: int, end_byte: int, filename: str, length: int = 300) -> str:\n \"\"\"Grab all texts\"\"\"\n if start_byte < 0:\n start_byte = 0\n length = end_byte - start_byte\n with open(filename, \"rb\") as text_file:\n text_file.seek(start_byte)\n text: str = text_file.read(length).decode(\"utf8\", \"ignore\")\n return clean_text(text)\n\n\ndef jaccard_sim(X, Y):\n \"\"\"Jaccard Similarity\"\"\"\n assert X.shape[1] == Y.shape[1]\n\n X = X.astype(bool).astype(int)\n Y = Y.astype(bool).astype(int)\n\n intersect = X.dot(Y.T)\n\n x_sum = X.sum(axis=1).A1\n y_sum = Y.sum(axis=1).A1\n xx, yy = np.meshgrid(x_sum, y_sum)\n union = (xx + yy).T - intersect\n return (intersect / union).A\n\n\ndef get_passage(doc: Tokens, start_byte: int, end_byte: int) -> list[Token]:\n \"\"\"Get passage within Tokens object\"\"\"\n tokens = []\n for token in doc:\n if token.ext[\"start_byte\"] >= start_byte and token.ext[\"end_byte\"] <= end_byte:\n tokens.append(token)\n elif token.ext[\"end_byte\"] > end_byte:\n break\n return tokens\n\n\ndef merge_passages(\n matches: Matches,\n min_score: float,\n) -> list[MergedGroup]:\n \"\"\"Merge all passages into bigger passages. Similarity is computed as the mean similarity of all passages in the group.\"\"\"\n # TODO: should merging be done using Jaccard sim metric: to avoid sparsity\n last_count = len(matches)\n current_count = last_count + 1\n iteration = 1\n merged_matches: list[MergedGroup] = Matches.load().matches # type:ignore\n print(f\"Merging matches: {last_count} matches before iteration 1\", end=\"\", flush=True)\n while last_count / current_count <= 1.0: # we stop iterating if there are minimal change between iterations\n last_count = current_count\n merged_matches = sorted(\n merged_matches,\n key=lambda x: (\n x.source.filename,\n x.target.filename,\n x.source.start_byte,\n x.source.start_byte - x.source.end_byte,\n x.target.start_byte,\n x.target.start_byte - x.target.end_byte,\n ),\n ) # sort by smaller start byte and bigger end_byte\n merged_group: MergedGroup = MergedGroup()\n saved_groups: list[MergedGroup] = []\n total_matches: int = len(matches)\n start_score: float = min_score\n merged_pairs: list[float] = []\n\n for pos, match in enumerate(merged_matches):\n merged_source: bool = False\n merged_target: bool = False\n if merged_group.source.filename == \"\":\n merged_pairs.append(match.similarity)\n merged_group = MergedGroup(match.source, match.target, start_score)\n continue\n if (\n match.source.filename != merged_group.source.filename\n or match.target.filename != merged_group.target.filename\n ):\n merged_group.similarity = sum(merged_pairs) / len(merged_pairs)\n saved_groups.append(merged_group)\n merged_pairs = [match.similarity]\n merged_group = MergedGroup(match.source, match.target, start_score)\n continue\n if match.source.start_byte <= merged_group.source.end_byte:\n if match.source.end_byte > merged_group.source.end_byte:\n merged_group.source.end_byte = match.source.end_byte\n merged_group.source.metadata[\"end_byte\"] = match.source.end_byte\n merged_source = True\n elif match.source.end_byte == merged_group.source.end_byte:\n merged_source = True\n if match.target.start_byte <= merged_group.target.end_byte:\n if match.target.end_byte > merged_group.target.end_byte:\n merged_group.target.end_byte = match.target.end_byte\n merged_group.target.metadata[\"end_byte\"] = match.target.end_byte\n merged_target = True\n elif match.target.end_byte == merged_group.target.end_byte:\n merged_target = True\n if any((merged_source, merged_target)):\n merged_pairs.append(match.similarity)\n if merged_source is False and merged_target is False:\n merged_group.similarity = sum(merged_pairs) / len(merged_pairs)\n saved_groups.append(merged_group)\n merged_pairs = [match.similarity]\n merged_group = MergedGroup(match.source, match.target, match.similarity)\n if pos + 1 == total_matches:\n merged_group.similarity = sum(merged_pairs) / len(merged_pairs)\n saved_groups.append(merged_group)\n merged_matches = saved_groups\n iteration += 1\n current_count = len(saved_groups)\n print(f\"\\rMerging matches: {current_count} matches after iteration {iteration+1}...\", end=\"\", flush=True)\n print(flush=True)\n return merged_matches\n\n\ndef get_tokens(passage: PassageGroup, preproc: PreProcessor) -> list[tuple[str, str]]:\n \"\"\"Get tokens\"\"\"\n text: str = \" \"\n start_byte: int = passage.start_byte\n end_byte: int = passage.end_byte\n with open(passage.filename, \"rb\") as text_file:\n text_file.seek(start_byte)\n text = text_file.read(end_byte - start_byte).decode(\"utf8\", \"ignore\")\n tokens: list[tuple[str, str]] = []\n pos = 0\n for token in preproc.process_string(text):\n pos += 1\n surface_form = token.surface_form.replace(\"\\n\", \" \")\n token.surface_form = surface_form\n tokens.append((token.text, token.surface_form))\n return tokens\n\n\ndef post_process_passages(\n source: PassageGroup,\n target: PassageGroup,\n source_preproc: PreProcessor,\n target_preproc: PreProcessor,\n) -> tuple[str, str]:\n \"\"\"Post process function to highlight matching words in HTML tags\"\"\"\n # print(source.start_byte, source.end_byte, source.filename)\n source_tokens = get_tokens(source, source_preproc)\n target_tokens = get_tokens(target, target_preproc)\n source_set = {word for word, _ in source_tokens if word}\n target_set = {word for word, _ in target_tokens if word}\n source_passage_with_matches = []\n for word, surface_form in source_tokens:\n if word and word in target_set:\n source_passage_with_matches.append(f'<span class=\"token-match\">{surface_form}</span>')\n elif not word:\n source_passage_with_matches.append(f'<span class=\"filtered-token\">{surface_form or \" \"}</span>')\n else:\n source_passage_with_matches.append(surface_form)\n target_passage_with_matches = []\n for word, surface_form in target_tokens:\n if word and word in source_set:\n target_passage_with_matches.append(f'<span class=\"token-match\">{surface_form}</span>')\n elif not word:\n target_passage_with_matches.append(f'<span class=\"filtered-token\">{surface_form}</span>')\n else:\n target_passage_with_matches.append(surface_form)\n return clean_text(\"\".join(source_passage_with_matches)), clean_text(\"\".join(target_passage_with_matches))\n\n\ndef text_object_upper_bound(config) -> str:\n \"\"\"Find the text object level above the one specified in the config\"\"\"\n object_type_to_level = {v: k for k, v in PHILO_TEXT_OBJECT_LEVELS.items()}\n text_object_level = PHILO_TEXT_OBJECT_LEVELS[config[\"text_object_type\"]]\n if text_object_level == 1:\n return \"doc\"\n return object_type_to_level[text_object_level - 1]\n\n\ndef simple_similarity(\n source_texts: Iterable[Tokens],\n source_config: dict[str, Any],\n target_config: dict[str, Any],\n min_similarity: float,\n output_path: str,\n target_texts: Optional[Iterable[Tokens]] = None,\n) -> tuple[TfIdfCorpus, Matches, list[dict[str, Any]], list[dict[str, Any]]]:\n \"\"\"Cosine similarity of TF-IDF vectors\"\"\"\n source_corpus: TfIdfCorpus = TfIdfCorpus(\n source_texts,\n output_path,\n min_text_obj_length=source_config[\"min_text_object_length\"],\n n_chunk=source_config[\"n_chunk\"],\n text_object_type_split=text_object_upper_bound(source_config),\n min_freq=source_config[\"min_freq\"],\n max_freq=source_config[\"max_freq\"],\n )\n if target_texts is not None:\n target_corpus: TfIdfCorpus = TfIdfCorpus(\n target_texts,\n output_path,\n vectorizer=source_corpus.vectorizer,\n min_text_obj_length=target_config[\"min_text_object_length\"],\n n_chunk=target_config[\"n_chunk\"],\n text_object_type_split=text_object_upper_bound(target_config),\n direction=\"target\",\n )\n\n matching_docs = source_corpus.outer_compare(target_corpus, min_similarity)\n else:\n matching_docs = source_corpus.inner_compare(min_similarity)\n target_corpus = source_corpus\n return source_corpus, matching_docs, source_corpus.metadata, target_corpus.metadata\n\n\ndef transformer_similarity(\n source_texts: Iterable[Tokens],\n source_config: dict[str, Any],\n target_config: dict[str, Any],\n min_similarity: float,\n source_batch: int,\n output_path: str,\n target_texts: Optional[Iterable[Tokens]] = None,\n target_batch: int = 1,\n) -> tuple[Matches, list[dict[str, Any]], list[dict[str, Any]]]:\n \"\"\"Cosine similarity of sentence embeddings from transformer models\"\"\"\n source_corpus: TransformerCorpus = TransformerCorpus(\n source_texts,\n output_path,\n source_config[\"model_name\"],\n source_batch,\n min_text_obj_length=source_config[\"min_text_object_length\"],\n n_chunk=source_config[\"n_chunk\"],\n text_object_type_split=text_object_upper_bound(source_config),\n )\n if target_texts is not None:\n target_corpus: TransformerCorpus = TransformerCorpus(\n target_texts,\n output_path,\n source_config[\"model_name\"],\n target_batch,\n direction=\"target\",\n min_text_obj_length=target_config[\"min_text_object_length\"],\n n_chunk=target_config[\"n_chunk\"],\n text_object_type_split=text_object_upper_bound(target_config),\n model=source_corpus.model,\n )\n matching_docs = source_corpus.outer_compare(target_corpus, min_similarity)\n else:\n matching_docs = source_corpus.inner_compare(min_similarity)\n target_corpus = source_corpus\n return matching_docs, source_corpus.metadata, target_corpus.metadata\n\n\ndef word2vec_embed_similarity(\n source_texts: Iterable[Tokens],\n source_config: dict[str, Any],\n target_config: dict[str, Any],\n min_similarity: float,\n source_batch: int,\n output_path: str,\n target_texts: Optional[Iterable[Tokens]] = None,\n target_batch: int = 1,\n) -> tuple[Word2VecEmbeddingCorpus, Matches, list[dict[str, Any]], list[dict[str, Any]]]:\n \"\"\"Cosine similarity of sentence embeddings using mean w2v vectors\"\"\"\n source_corpus: Word2VecEmbeddingCorpus = Word2VecEmbeddingCorpus(\n source_texts,\n output_path,\n source_config[\"model_name\"],\n source_batch,\n min_text_obj_length=source_config[\"min_text_object_length\"],\n n_chunk=source_config[\"n_chunk\"],\n text_object_type_split=text_object_upper_bound(source_config),\n )\n if target_texts is not None:\n target_corpus: Word2VecEmbeddingCorpus = Word2VecEmbeddingCorpus(\n target_texts,\n output_path,\n source_corpus.model,\n target_batch,\n direction=\"target\",\n min_text_obj_length=target_config[\"min_text_object_length\"],\n n_chunk=target_config[\"n_chunk\"],\n text_object_type_split=text_object_upper_bound(target_config),\n )\n matching_docs = source_corpus.outer_compare(target_corpus, min_similarity)\n else:\n matching_docs = source_corpus.inner_compare(min_similarity)\n target_corpus = source_corpus\n return source_corpus, matching_docs, source_corpus.metadata, target_corpus.metadata\n\n\ndef run_vsa(source_path: str, target_path: str, workers: int, config: dict[str, Any], output_path: str):\n \"\"\"Main function\"\"\"\n config[\"source\"][\"strip_tags\"] = True # this is useful for post-processing passages where we have tags included.\n config[\"target\"][\"strip_tags\"] = True\n source_preproc: PreProcessor | None = None\n target_preproc: PreProcessor | None = None\n if config[\"source\"][\"vectorization\"] == \"transformer\":\n config[\"source\"][\"strip_punctuation\"] = False\n config[\"target\"][\"strip_punctuation\"] = False\n source_preproc = PreProcessor(is_philo_db=True, workers=workers, **config[\"source\"])\n target_preproc = PreProcessor(is_philo_db=True, workers=workers, nlp_model=source_preproc.nlp, **config[\"target\"])\n source_texts: Iterable[Tokens] = source_preproc.process_texts(\n (file.path for file in os.scandir(source_path)), keep_all=True, progress=False\n )\n target_texts: Iterable[Tokens] = target_preproc.process_texts(\n (file.path for file in os.scandir(target_path)), keep_all=True, progress=False\n )\n\n if config[\"source\"][\"vectorization\"] == \"tfidf\":\n source_corpus, matches, source_metadata, target_metadata = simple_similarity(\n source_texts,\n config[\"source\"],\n config[\"target\"],\n config[\"min_similarity\"],\n output_path,\n target_texts=target_texts,\n )\n elif config[\"source\"][\"vectorization\"] == \"transformer\":\n matches, source_metadata, target_metadata = transformer_similarity(\n source_texts,\n config[\"source\"],\n config[\"target\"],\n config[\"min_similarity\"],\n config[\"source_batch\"],\n output_path,\n target_texts=target_texts,\n target_batch=config[\"target_batch\"],\n )\n else:\n source_corpus, matches, source_metadata, target_metadata = word2vec_embed_similarity(\n source_texts,\n config[\"source\"],\n config[\"target\"],\n config[\"min_similarity\"],\n config[\"source_batch\"],\n output_path,\n target_texts=target_texts,\n target_batch=config[\"target_batch\"],\n )\n if len(matches) == 0:\n print(\"No matches found. Exiting...\")\n exit()\n print(f\"{len(matches)} matches found.\")\n\n matches = merge_passages(\n matches,\n config[\"min_similarity\"],\n )\n\n print(\"Formatting and writing out processed results...(this may take some time)\")\n os.system(\"mkdir -p output/results\")\n\n if source_preproc is None:\n source_preproc = PreProcessor(is_philo_db=True, workers=workers, **config[\"source\"])\n target_preproc = PreProcessor(\n is_philo_db=True, workers=workers, nlp_model=source_preproc.nlp, **config[\"target\"]\n )\n\n with lz4.frame.open(f\"{output_path}/results/alignments.jsonl.lz4\", mode=\"wb\", compression_level=3) as output:\n for match in tqdm(matches, total=len(matches), leave=False):\n source_context_before = get_text(\n match.source.start_byte - 300, match.source.start_byte, match.source.metadata[\"filename\"]\n )\n source_passage = get_text(match.source.start_byte, match.source.end_byte, match.source.metadata[\"filename\"])\n source_context_after = get_text(\n match.source.end_byte, match.source.end_byte + 300, match.source.metadata[\"filename\"]\n )\n target_context_before = get_text(\n match.target.start_byte - 300, match.target.start_byte, match.target.metadata[\"filename\"]\n )\n target_passage = get_text(match.target.start_byte, match.target.end_byte, match.target.metadata[\"filename\"])\n target_context_after = get_text(\n match.target.end_byte, match.target.end_byte + 300, match.target.metadata[\"filename\"]\n )\n if config[\"source\"][\"vectorization\"] == \"tfidf\":\n source_preproc.normalize_options = {\n **source_preproc.normalize_options,\n \"strip_tags\": False,\n }\n source_preproc.pos_to_keep = []\n target_preproc.normalize_options = {\n **target_preproc.normalize_options,\n \"strip_tags\": False,\n }\n target_preproc.pos_to_keep = []\n source_passage_with_matches, target_passage_with_matches = post_process_passages(\n match.source,\n match.target,\n source_preproc,\n target_preproc,\n )\n else:\n source_passage_with_matches = source_passage\n target_passage_with_matches = target_passage\n result_object: str = json.dumps(\n {\n \"source_doc_id\": match.source.metadata[\"philo_id\"].split()[0],\n \"source_context_before\": source_context_before,\n \"source_passage\": source_passage,\n \"source_context_after\": source_context_after,\n \"source_passage_with_matches\": source_passage_with_matches,\n \"target_doc_id\": match.target.metadata[\"philo_id\"].split()[0],\n \"target_context_before\": target_context_before,\n \"target_passage\": target_passage,\n \"target_context_after\": target_context_after,\n \"target_passage_with_matches\": target_passage_with_matches,\n \"similarity\": str(match.similarity),\n **{f\"source_{field}\": value for field, value in match.source.metadata.items()},\n **{f\"target_{field}\": value for field, value in match.target.metadata.items()},\n }\n )\n output.write(f\"{result_object}\\n\".encode(\"utf8\")) # type: ignore\n with open(\"output/results/count.txt\", \"w\", encoding=\"utf8\") as output_file:\n output_file.write(f\"{len(matches)}\")\n\n # Generating metadata files to mimic output of generate_ngrams\n os.makedirs(\"output/source/metadata/\", exist_ok=True)\n with open(\"output/source/metadata/metadata.json\", \"w\", encoding=\"utf8\") as output_file:\n output_file.write(json.dumps(dict(enumerate(source_metadata))))\n\n os.makedirs(\"output/target/metadata/\", exist_ok=True)\n with open(\"output/target/metadata/metadata.json\", \"w\", encoding=\"utf8\") as output_file:\n output_file.write(json.dumps(dict(enumerate(target_metadata))))\n","repo_name":"ARTFL-Project/text-pair","sub_path":"lib/textpair/vector_space_aligner.py","file_name":"vector_space_aligner.py","file_ext":"py","file_size_in_byte":43233,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"61"} +{"seq_id":"70428013316","text":"# -*- coding: utf-8 -*-\n\n# Создать прототип игры Алхимия: при соединении двух элементов получается новый.\n# Реализовать следующие элементы: Вода, Воздух, Огонь, Земля, Шторм, Пар, Грязь, Молния, Пыль, Лава.\n# Каждый элемент организовать как отдельный класс.\n# Таблица преобразований:\n# Вода + Воздух = Шторм\n# Вода + Огонь = Пар\n# Вода + Земля = Грязь\n# Воздух + Огонь = Молния\n# Воздух + Земля = Пыль\n# Огонь + Земля = Лава\n\n# Сложение элементов реализовывать через __add__\n# Если результат не определен - то возвращать None\n# Вывод элемента на консоль реализовывать через __str__\n#\n# Примеры преобразований:\n# print(Water(), '+', Air(), '=', Water() + Air())\n# print(Fire(), '+', Air(), '=', Fire() + Air())\n\nfrom termcolor import cprint, colored\n\n\nclass Water:\n def __add__(self, other):\n if isinstance(other, Air):\n return Storm()\n elif isinstance(other, Fire):\n return Steam()\n elif isinstance(other, Earth):\n return Dirt()\n elif isinstance(other, Water):\n return Bacteria()\n elif isinstance(other, Bacteria):\n return Plankton()\n elif isinstance(other, Grass):\n return Seaweed()\n\n def __str__(self):\n return 'Вода'\n\n\nclass Air:\n def __add__(self, other):\n if isinstance(other, Water):\n return Storm()\n elif isinstance(other, Fire):\n return Lightning()\n elif isinstance(other, Earth):\n return Dust()\n elif isinstance(other, Bacteria):\n return Bird()\n\n def __str__(self):\n return 'Воздух'\n\n\nclass Fire:\n def __add__(self, other):\n if isinstance(other, Water):\n return Steam()\n elif isinstance(other, Air):\n return Lightning()\n elif isinstance(other, Earth):\n return Lava()\n\n def __str__(self):\n return 'Огонь'\n\n\nclass Earth:\n def __add__(self, other):\n if isinstance(other, Water):\n return Dirt()\n elif isinstance(other, Air):\n return Dust()\n elif isinstance(other, Fire):\n return Lava()\n elif isinstance(other, Earth):\n return Grass()\n\n def __str__(self):\n return 'Земля'\n\n\nclass Bacteria:\n def __add__(self, other):\n if isinstance(other, Water):\n return Plankton()\n elif isinstance(other, Air):\n return Bird()\n\n def __str__(self):\n return 'Бактерия'\n\n\nclass Grass:\n def __add__(self, other):\n if isinstance(other, Water):\n return Seaweed()\n\n def __str__(self):\n return 'Трава'\n\n\nclass Storm:\n def __str__(self):\n return 'Шторм'\n\n\nclass Steam:\n def __str__(self):\n return 'Пар'\n\n\nclass Dirt:\n def __str__(self):\n return 'Грязь'\n\n\nclass Lightning:\n def __str__(self):\n return 'Молния'\n\n\nclass Lava:\n def __str__(self):\n return 'Лава'\n\n\nclass Dust:\n def __str__(self):\n return 'Пыль'\n\n\nclass Plankton:\n def __str__(self):\n return 'Планктон'\n\n\nclass Seaweed:\n def __str__(self):\n return 'Водоросли'\n\n\nclass Bird:\n def __str__(self):\n return 'Птица'\n\n\ndef print_elements(data):\n for i, key in enumerate(data):\n cprint('{} : {}'.format(i, key), color='grey')\n\n\ndef test_numbers(user_number, data):\n if user_number.isdigit():\n if len(user_number) == 1:\n user_number = int(user_number)\n else:\n cprint('Нужна всего одна цифра!)', color='red')\n return False\n else:\n cprint('Нужно число!', color='red')\n return False\n if user_number > len(data):\n cprint('Можно использовать только числа из спика!', color='red')\n return False\n else:\n return True\n\n\ndef get_element(index, data):\n for i, key in enumerate(data):\n if i == index:\n return data[key]\n\n\ndef test_minor_number(user_number):\n if user_number.isdigit():\n if len(user_number) == 1:\n user_number = int(user_number)\n else:\n cprint('Нужна всего одна цифра!)', color='red')\n return False\n else:\n cprint('Нужно число!', color='red')\n return False\n if user_number == 1 or user_number == 2:\n return user_number\n else:\n cprint('Нужно ввести 1 либо 2', color='red')\n return False\n\n\nbase_elements = {\n 'Вода': Water,\n 'Воздух': Air,\n 'Огонь': Fire,\n 'Земля': Earth\n}\nnew_elements = {}\n\ncprint('Добро пожаловать в игру \"Кто тут алхимик?\"', color='green')\ncprint('У тебя есть четыре элемента, попробуй совместить их попарно и посмотреть, что получится. '\n 'Для магии введи номер элемента! ', color='green')\n\nprint_elements(base_elements)\nwhile True:\n first_choice = input(colored('Выбери первый элемент: ', color='grey'))\n if test_numbers(first_choice, base_elements):\n user_number_1 = int(first_choice)\n else:\n continue\n second_choice = input(colored('Выбери второй элимент: ', color='grey'))\n if test_numbers(second_choice, base_elements):\n user_number_2 = int(second_choice)\n else:\n continue\n element_1 = get_element(user_number_1, base_elements)\n element_2 = get_element(user_number_2, base_elements)\n result = element_1() + element_2()\n if element_2 == element_1 and result is not None:\n if str(result) not in base_elements:\n base_elements[str(result)] = result.__class__\n cprint('Ого! Ты нашел еще один базовый элемент - {}! Попробуй сделать что-то с ним!'.format(result),\n color='blue')\n else:\n cprint('У тебя уже есть такой элемент!', color='red')\n elif result is None:\n cprint('Элемент, который ты хочешь создать, невозможнен!', color='red')\n elif str(result) not in new_elements:\n cprint('И это.....{}!'.format(result), color='yellow')\n new_elements[str(result)] = result\n else:\n cprint('У тебя уже есть такой элемент!', color='red')\n\n if len(new_elements) >= 1:\n while True:\n show_new_elements = input(colored('Для просмотра созданных элементов нажми 1, для продолжения нажми 2 :) ',\n color='white'))\n test_res = test_minor_number(show_new_elements)\n if test_res == 1:\n cprint('Ты создал:', color='yellow')\n print_elements(new_elements)\n break\n elif test_res == 2:\n break\n while True:\n show_base_elements = input(colored('Чтобы увидеть список базовых элементов, введи 1, или 2 для продолжения ',\n color='white'))\n test_res = test_minor_number(show_base_elements)\n if test_res == 1:\n cprint('Ты можешь совместить:', color='yellow')\n print_elements(base_elements)\n break\n elif test_res == 2:\n break\n\n# Усложненное задание (делать по желанию)\n# Добавить еще элемент в игру.\n# Придумать что будет при сложении существующих элементов с новым.\n","repo_name":"bass-2000/python-sandbox","sub_path":"lesson_07/02_alchemy.py","file_name":"02_alchemy.py","file_ext":"py","file_size_in_byte":8300,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40186344741","text":"import re\n\nimport requests\n\nfrom dailynotereminder.__version__ import version as current_version\nfrom dailynotereminder.config import config\nfrom dailynotereminder.locale import _\nfrom dailynotereminder.notifiers import send\nfrom dailynotereminder.utils import log\n\n\ndef get_latest_version_github(repo):\n url = f'https://api.github.com/repos/{repo}/releases/latest'\n try:\n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n latest_version = data.get('tag_name', None)\n latest_url = data.get('html_url', None)\n latest_desc = data.get('body', \"\")\n latest = {'version': latest_version, 'url': latest_url, 'desc': latest_desc}\n return latest\n else:\n log.warning(\n f'Failed to check latest version from github: {response.status_code}'\n )\n return None\n except Exception as e:\n log.warning(f'Failed to check latest version from github: {e}')\n return None\n\n\ndef get_latest_version_jihulab(repo):\n url = f\"https://jihulab.com/api/v4/projects/{repo.replace('/', '%2F')}/releases\"\n try:\n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n if data:\n latest_release = data[0]\n latest_version = latest_release.get('tag_name', None)\n latest_desc = latest_release.get('description', \"\")\n latest = {\n 'version': latest_version,\n 'url': f'https://jihulab.com/{repo}/-/releases/{latest_version}',\n 'desc': latest_desc,\n }\n return latest\n else:\n log.warning(\n f'Failed to check latest version from jihulab: data is empty'\n )\n else:\n log.warning(\n f'Failed to check latest version from jihulab: {response.status_code}'\n )\n return None\n except Exception as e:\n log.warning(f'Failed to check latest version from jihulab: {e}')\n return None\n\n\ndef generate_update_message(latest_info):\n return _('当前版本:{}\\n最新版本:{}\\n更新地址:{}').format(\n current_version, latest_info['version'], latest_info['url']\n )\n\n\ndef notify_update(latest_info):\n message = generate_update_message(latest_info)\n send(text='🎉', status=_('Genshin-Dailynote-Reminder 有更新啦'), message=message)\n\n\ndef check_update():\n repo = 'Xm798/Genshin-Dailynote-Reminder'\n latest_info = get_latest_version_github(repo) or get_latest_version_jihulab(repo)\n\n if not latest_info:\n log.warning(_('⚠️ 检查版本更新失败'))\n return\n\n if has_new_version(current_version, latest_info['version']):\n log.info(_('🎉 检查到新版本 {}').format(latest_info['version']))\n should_notify = \"#NOTIFY\" in latest_info.get('desc', '')\n\n if config.CHECK_UPDATE in ['default', True]:\n if should_notify:\n notify_update(latest_info)\n else:\n log.info(_('⬆️ 不发送通知,请前往 {} 下载').format(latest_info['url']))\n else:\n notify_update(latest_info)\n else:\n log.info(_('🔄 当前已是最新版本,无需更新'))\n\n\ndef has_new_version(current, latest):\n return version_to_number(latest) > version_to_number(current)\n\n\ndef version_to_number(version):\n version = version.lstrip('v')\n version = re.split('[-+]', version)[0]\n version_list = [int(x) for x in version.split('.')]\n\n number = 0\n if len(version_list) > 0:\n number += version_list[0] * 10000\n if len(version_list) > 1:\n number += version_list[1] * 100\n if len(version_list) > 2:\n number += version_list[2]\n\n return number\n","repo_name":"Xm798/Genshin-Dailynote-Reminder","sub_path":"dailynotereminder/utils/update_checker.py","file_name":"update_checker.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"33626785126","text":"from discord.ext import commands\n\ndescription = '''A bot to connect the playerbase of multiple Tekken discords using their platform and ID.'''\n\n# specifies extensions to load when bot starts up\nstartup_extensions = [\"Players\"]\n\nbot = commands.Bot(command_prefix='.', description=description)\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n@bot.command()\nasync def hello():\n \"\"\"Say hi!\"\"\"\n await bot.say(\"Hello!\")\n\nif __name__ == \"__main__\":\n for extension in startup_extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n exc = '{}: {}'.format(type(e).__name__, e)\n print('Failed to load extension {}\\n{}'.format(extension, exc))\n\nbot.run('token')","repo_name":"hanyaah/combot-net","sub_path":"combot-net.py","file_name":"combot-net.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39610843393","text":"import os\nimport run_language_modeling\n\nFILE_PATH = os.path.dirname(os.path.realpath(__file__))\n\nTRAIN_FILE = FILE_PATH + \"/input/training.txt\"\nTEST_FILE = FILE_PATH + \"/input/wiki.test.raw\"\n\n# gpt2 is the 117M version, gpt2-xl is the 1.5B version\nMODEL_PATH = \"gpt2-xl\"#\"gpt2\"\nMODEL_TYPE = \"gpt2\"\nGRADIENT_CHECKPOINTING = True\n\n# IF WANTED TO IMPROVE RESULTS: increase per_device_train_batch_size and num_train_epochs, the firt one will require higher memory GPUs and the last more time\n\n\ndef finetune():\n # more info: https://github.com/huggingface/transformers/tree/master/examples/language-modeling\n # flags info: https://github.com/huggingface/transformers/blob/master/src/transformers/training_args.py\n additional_args = [\"--output_dir=\" + FILE_PATH + \"/output\", \"--overwrite_output_dir\", \"--model_type=\" + MODEL_TYPE,\n \"--model_name_or_path=\" + MODEL_PATH, \"--do_train\", \"--train_data_file=\" + TRAIN_FILE,\n \"--per_device_train_batch_size=1\", \"--num_train_epochs=1\", \"--fp16\"]\n run_language_modeling.main(additional_args, GRADIENT_CHECKPOINTING)\n\n\nif __name__ == \"__main__\":\n finetune()\n","repo_name":"pablonm3/gpt2xl_finetuning","sub_path":"finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11656896011","text":"import itertools\nfrom os import walk\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom scipy.fft import fft2, fftfreq\nfrom scipy.optimize import curve_fit as fit\nfrom scipy.interpolate import CubicSpline as spline\n\nfrom matplotlib import pyplot as plt\n\n\n# Wavenumber\nn = 1024\nx, h = np.linspace(0, n, n, endpoint=False, retstep=True)\nk_x, k_y = np.meshgrid(fftfreq(n, h/(2*np.pi)), fftfreq(n, h/(2*np.pi)))\nk = np.sqrt(k_x**2 + k_y**2)\nk_radius, dr = np.linspace(0, 0.5, 25, endpoint=True, retstep=True)\n\n# Load data\nk1 = list()\ns = list()\nt = list()\nn_k_max = 1000\n\nmobility = \"variable\"\n\n_, _, filenames = next(walk(f\"./data/{mobility}\"))\nfor filename in tqdm(filenames):\n\n # Load data\n data = np.loadtxt(f\"data/{mobility}/\" + filename).reshape(n, n)\n t.append(int(filename.split(\"_\")[3].split(\".txt\")[0]))\n\n # Compute S\n c_avg = np.mean(data, axis=(0, 1))\n c_hat = fft2(data - c_avg)\n Skk = (c_hat * np.conjugate(c_hat)).real\n\n # Normalize S\n c_int = np.sum((data - c_avg)**2, axis=(0, 1))\n s_int = np.sum(Skk, axis=(0, 1))\n Skk *= c_int / s_int\n\n # Compute circular integral\n S = np.zeros(k_radius.shape)\n for i, k_val in enumerate(k_radius):\n row, col = np.where(np.abs(k - k_val) < dr/2)\n S[i] = np.mean(Skk[row, col])\n s.append(S)\n\n # Compute k1\n k1.append(np.sum(k_radius*S) / np.sum(S))\n\n\n# Sort w.r.t. t\ns = [e for _, e in sorted(zip(t, s))]\nk1 = np.array([e for _, e in sorted(zip(t, k1))])\nt.sort()\n\n# Plot structure function S(k,t)\nplt.rcParams.update({\"text.usetex\": True})\nax = plt.gca()\nfor mat, t_ in zip(s, t):\n color = next(ax._get_lines.prop_cycler)['color']\n k_interp = np.linspace(k_radius[0], k_radius[-1], 1000, endpoint=True)\n s_interp = spline(k_radius, mat, bc_type=\"clamped\")(k_interp)\n plt.plot(k_interp, s_interp, label=f\"t = {t_}\", color=color)\n # plt.plot(k_radius, mat, \"o\", color=color)\nplt.xlabel(\"k\")\nplt.ylabel(\"S(k,t)\")\nplt.xlim([k_radius[0], k_radius[-1]])\nplt.gca().set_ylim(bottom=0)\nplt.legend()\nplt.show()\n# plt.savefig(f\"structure_{mobility}.pdf\")\n\n# Plot F, the scaling function\nplt.clf()\nidx = -6\nmarker = itertools.cycle(('+', '.', 'o', '*', '<', 'd', 'x', '1'))\nfor s_, k1_, t_ in zip(s[idx:], k1[idx:], t[idx:]):\n plt.plot(k_radius/k1_, s_*k1_**2, linestyle=\"\", marker=next(marker), label=f\"t = {t_}\")\nplt.xlabel(\"$k/k_1$\")\nplt.ylabel(\"$k_1^2s(k,t)$\")\nplt.gca().set_xlim(left=0, right=3)\nplt.gca().set_ylim(bottom=0)\nplt.legend()\nplt.show()\n# plt.savefig(f\"scalefunc_{mobility}.pdf\")\n\n# Compute power growth law\nf = lambda x, a, b, m: a + b*x**m\nidx = 0\nt = np.array(t)\npopt, pcov = fit(f, t[idx:], 1/k1[idx:], bounds=(0, [np.inf, np.inf, .5]), maxfev=10000)\nprint(f\"Power growth law exponent = {1/popt[-1]}\")\n\n# Plot L(t)\nplt.clf()\nref = t[idx:]**(1./4.)\nref /= ref[-1] * k1[-1]\n\nplt.loglog(t[idx:], ref, label=\"$\\\\mathcal{O}\\\\left(\\\\sqrt[4]{t}\\\\right)$\")\nplt.loglog(t[idx:], 1/k1[idx:], \"o\", label=\"Simulation data\")\nplt.xlabel(\"t\")\nplt.ylabel(\"L(t)\")\nplt.legend()\nplt.show()\n# plt.savefig(f\"length_{mobility}.pdf\")\n","repo_name":"liqihao2000/Cahn-Hilliard","sub_path":"C/scripts/kinematics.py","file_name":"kinematics.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"14777115847","text":"from flask import Flask, render_template, request\nimport subprocess\nfrom pycorenlp import StanfordCoreNLP\nimport requests\nimport json\n\nIN_FILE = 'in.txt'\nOUT_FILE = 'out.txt'\nTREE_FILE = 'tree.txt'\n# JAR = '../../IdeaProjects/Graphene_sent_simpl__discourse/SentenceSimplification/target/' \\\n# 'sentence-simplification-5.0.0-jar-with-dependencies.jar'\nJAR_SS = 'sentence-simplification-5.0.0-jar-with-dependencies.jar'\nJAR_QG = ''\n\n\napp = Flask(__name__)\nnlp = StanfordCoreNLP('172.17.0.1:9003')\n\n\ndef get_tree(sent):\n url = \"http://localhost:9001/tregex\"\n request_params = {\n \"pattern\": \"S|SBAR !> ROOT !< S|SBAR\"} # split by all kinds of conjunctions(including non restrictive relative clauses)\n text = \"The woman who visited me in the hospital was very kind.\"\"\" # participial also if coma\n d_s = requests.post(url, data=text, params=request_params).text\n print(d_s)\n # output = nlp.annotate(sent, properties={\n # 'annotators': 'tokenize,ssplit,pos,depparse,parseparse',\n # 'outputFormat': 'json'\n # })\n # with open(TREE_FILE, 'w') as f:\n # f.write(output['sentences'][0]['parse'])\n\n\ndef parse_qg_output(q):\n q_p = [p.strip('[]') for p in q.split()]\n q_p = [v.split('=')[1] for v in q_p if v.startswith('Value')]\n return q_p\n\n\ndef simplify_sentence(sent):\n with open(OUT_FILE) as F:\n pass\n with open(IN_FILE, 'w') as f:\n f.write(sent)\n return subprocess.call(['java', '-jar', JAR_SS, IN_FILE, OUT_FILE])\n # return subprocess.call('java -jar ' + JAR + ' ' + IN_FILE + ' ' + OUT_FILE, shell=True)\n\n\ndef generate_question(sent):\n get_tree(sent)\n return 1\n # return subprocess.call(['java', '-jar', JAR_QG, TREE_FILE, OUT_FILE])\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/generate-questions')\ndef get_template_qg():\n return render_template('question_gen_template.html')\n\n\n@app.route('/generate-questions', methods=['POST'])\ndef question_generation():\n sent = request.form['sentence']\n if generate_question(sent):\n return render_template('question_gen_template.html', Sentence='Error')\n else:\n with open(OUT_FILE) as f:\n questions = [' '.join(parse_qg_output(l.strip())) for l in f.readlines()]\n return render_template('question_gen_template.html', Sentence=sent, Questions=';'.join(questions))\n\n\n@app.route('/simplify-sentence')\ndef get_template_ss():\n return render_template('sent_simlp_template.html')\n\n\n@app.route('/simplify-sentence', methods=[\"POST\"])\ndef sentence_simplification():\n sent = request.form['sentence']\n if sentence_simplification(sent):\n return render_template('sent_simlp_template.html', Core='Error')\n else:\n with open(OUT_FILE, 'r') as f:\n content = f.readlines()\n return render_template('sent_simlp_template.html', Original=str(content[0]), Core=str(content[1]), Context=str(' \\t\\n'.join(content[2:])))\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"DzvinkaYarish/Question_Generation_web_service","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29671384785","text":"import os\r\nfrom ..xjson.file_list import FileList, F_FILE_NOT_FOUND\r\nfrom ..xjson.exceptions.file_exceptions import FileNotFoundException, IsNotFileException\r\n\r\n\r\n\r\ndef test_file_caching():\r\n file_list = FileList()\r\n file_list.clear()\r\n fn = os.path.join(\"examples\", \"single_file_obj.json\")\r\n file_list.get(fn)\r\n file_list.get(fn)\r\n file_list.get(fn, True)\r\n\r\n l = file_list._list\r\n assert len(l) == 1\r\n assert list(l.keys())[0] == fn\r\n\r\n\r\ndef test_file_info():\r\n file_list = FileList()\r\n file_list.clear()\r\n fn = os.path.join(\"examples\", \"single_file_obj.json\")\r\n file_list.get(fn)\r\n l = file_list._list\r\n info = l.get(fn)\r\n assert info.full_name == fn\r\n assert info.c_time == os.path.getctime(fn)\r\n assert info.size == os.path.getsize(fn)\r\n assert info.name == \"single_file_obj.json\"\r\n assert info.ext == \"json\"\r\n assert info.is_file == True\r\n assert info.is_directory == False\r\n\r\ndef test_dir_info():\r\n file_list = FileList()\r\n file_list.clear()\r\n fn = os.path.join(\"examples\", \"countries\", \"dir_one_level\")\r\n file_list.get(fn)\r\n l = file_list._list\r\n info = l.get(fn)\r\n assert info.full_name == fn\r\n assert info.c_time == os.path.getctime(fn)\r\n assert info.size == 0\r\n assert info.name == \"dir_one_level\"\r\n assert info.ext == \"\"\r\n assert info.is_file == False\r\n assert info.is_directory == True\r\n\r\ndef test_singleton():\r\n fn = os.path.join(\"examples\", \"countries\", \"dir_one_level\")\r\n file_list1 = FileList()\r\n file_list1.clear()\r\n file_list1.get(fn)\r\n file_list = FileList()\r\n file_list.clear()\r\n file_list.get(fn)\r\n l = file_list._list\r\n info = l.get(fn)\r\n assert info.full_name == fn\r\n\r\n\r\ndef test_checking():\r\n file_list = FileList()\r\n file_list.clear()\r\n file = file_list.get(os.path.join(\"examples\", \"not_exists\"), False)\r\n try:\r\n file.check()\r\n res = True\r\n except FileNotFoundException:\r\n res = False\r\n\r\n assert res == False\r\n\r\n file = file_list.get(os.path.join(\"examples\", \"countries\", \"dir_one_level\"), False)\r\n try:\r\n file.check()\r\n res = True\r\n except IsNotFileException:\r\n res = False\r\n\r\n assert res == False\r\n\r\n file = file_list.get(os.path.join(\"examples\", \"countries\", \"dir_one_level\"), False)\r\n try:\r\n file.check(F_FILE_NOT_FOUND)\r\n res = True\r\n except IsNotFileException:\r\n res = False\r\n\r\n assert res == True","repo_name":"mikegribov/xjson","sub_path":"tests/test_file_list.py","file_name":"test_file_list.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31914562397","text":"class PrimeFinder:\r\n def __init__(self):\r\n self.primes = [2, 3]\r\n self.sqrtBase = 2\r\n\r\n def set_sqrt_base(self, number):\r\n if number - self.sqrtBase * self.sqrtBase > self.sqrtBase + self.sqrtBase:\r\n self.sqrtBase += 1\r\n\r\n def check_if_prime(self, number):\r\n self.set_sqrt_base(number)\r\n\r\n for i in range(2, len(self.primes) - 1):\r\n if number % self.primes[i] == 0:\r\n return\r\n\r\n if self.primes[i] > self.sqrtBase:\r\n break\r\n\r\n self.primes.append(number)\r\n\r\n def get_current(self):\r\n current = self.primes[-1]\r\n current += 6 - current % 6\r\n\r\n if current - 1 == self.primes[-1]:\r\n self.check_if_prime(current + 1)\r\n current += 6\r\n\r\n return current\r\n\r\n def calculate_first_n_primes(self, n):\r\n if len(self.primes) >= n:\r\n return\r\n\r\n current = self.get_current()\r\n\r\n while len(self.primes) < n:\r\n self.check_if_prime(current - 1)\r\n self.check_if_prime(current + 1)\r\n current += 6\r\n\r\n if len(self.primes) > n:\r\n self.primes.pop()\r\n\r\n def get_calculated_primes_length(self):\r\n return len(self.primes)\r\n","repo_name":"profgyuri/PrimeSpeedTest","sub_path":"Python/PrimeSpeedTest/prime_finder.py","file_name":"prime_finder.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981809137","text":"# Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.\n#\n# You have the following 3 operations permitted on a word:\n#\n# Insert a character\n# Delete a character\n# Replace a character\n# Example 1:\n#\n# Input: word1 = \"horse\", word2 = \"ros\"\n# Output: 3\n# Explanation:\n# horse -> rorse (replace 'h' with 'r')\n# rorse -> rose (remove 'r')\n# rose -> ros (remove 'e')\n# Example 2:\n#\n# Input: word1 = \"intention\", word2 = \"execution\"\n# Output: 5\n# Explanation:\n# intention -> inention (remove 't')\n# inention -> enention (replace 'i' with 'e')\n# enention -> exention (replace 'n' with 'x')\n# exention -> exection (replace 'n' with 'c')\n# exection -> execution (insert 'u')\n\nclass Solution:\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n if word1 == word2:\n return 0\n l1, l2 = len(word1), len(word2)\n dp = [[0 for _ in range(l1 + 1)] for _ in range(l2 + 1)]\n for i in range(l2 + 1):\n for j in range(l1 + 1):\n if i > 0 and j > 0:\n dp[i][j] = min(1 + dp[i - 1][j], 1 + dp[i][j - 1],\n self.diff(word2[i - 1], word1[j - 1]) + dp[i - 1][j - 1])\n elif i > 0:\n dp[i][j] = 1 + dp[i - 1][j]\n elif j > 0:\n dp[i][j] = 1 + dp[i][j - 1]\n return dp[-1][-1]\n\n def diff(self, i, j):\n return 0 if i == j else 1\n\n\ns = Solution()\nprint(s.minDistance('intention', 'execution'))\n","repo_name":"yshshadow/Leetcode","sub_path":"51-100/72.py","file_name":"72.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11516047607","text":"\"\"\"Contains utility functions that formats raw data for database insertion.\n\nThe incoming data will usually contain too much data \nto be inserted into a database in one go.\nThese utility functions will select smaller bits of data\nand return them in a simpler data structure.\n\"\"\"\n\n\ndef format_departments(staffs):\n \"\"\"Transforms a list of staff dicts into a list of unique [department].\n\n Gets the 'department' values from a list of dicts representing each staff.\n Puts each department in a single-item list.\n Removes duplicates and returns them as \n one list of lists representing each unique department.\n\n Args:\n staffs:\n A list of dicts. Each dict is mapped to a staff row\n from the queried database.\n Each key of each dict is mapped to a staff column\n from the queried database.\n\n Returns:\n A list of departments.\n Each department is represented as a list of a string.\n Items of the top-level list are unique.\n \"\"\"\n\n formatted_departments = []\n\n for staff in staffs:\n department = [staff['department']]\n\n if department not in formatted_departments:\n formatted_departments.append(department)\n\n return formatted_departments\n\n\ndef format_stock(stock):\n \"\"\"Transforms a list of item dicts into a list of [unique item_name, total amount_in_stock].\n\n Gets the 'item_name' and 'amount_in_stock' values from a list of dicts representing each item.\n Puts each item name and stock amount in a list.\n In cases of duplicate item names, keep item names unique and add to the amount_in_stock instead.\n Return them as one list of lists representing each unique item.\n\n Args:\n stock:\n A list of dicts. Each dict is mapped to an item row\n from the queried database.\n Each key of each dict is mapped to an item column\n from the queried database.\n\n Returns:\n A list of items.\n Each item is represented as a list of item_name: str and amount_in_stock: int.\n Item names are unique.\n \"\"\"\n\n formatted_stock = []\n # Key will be an item name, value will be a ref to the corresponding element in unique_stock.\n existing_items = {}\n\n for item in stock:\n item_name = item['item_name']\n amount_in_stock = item['amount_in_stock']\n\n if item_name not in existing_items:\n formatted_item = [item_name, amount_in_stock]\n\n formatted_stock.append(formatted_item)\n\n existing_items[item_name] = formatted_item\n else:\n existing_items[item_name][1] += amount_in_stock\n \n return formatted_stock\n\n\ndef format_features(stock):\n \"\"\"placeholder\n \"\"\"\n \n formatted_features = []\n\n for item in stock:\n for feature in item['features']:\n formatted_feature = [feature]\n\n if formatted_feature not in formatted_features:\n formatted_features.append(formatted_feature)\n\n return formatted_features\n","repo_name":"gheenie/py-review","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35755314746","text":"import sys\nimport numpy as np\nimport sklearn.metrics\nimport threading\nfrom os import path\nimport time\n\nclass DLRMRunner:\n def __init__(self, bmodel_path):\n self.bmodel_path = bmodel_path\n self.batch_num = 1024\n self.print_interval = 20\n\n def run(self, data_path):\n self.load_data(data_path)\n self.left_count = self.num_samples\n import bmservice\n self.runner = bmservice.BMService(self.bmodel_path, self.batch_num)\n self.y_score = np.zeros_like(self.y_true)\n\n self.task_map = {}\n self.map_lock = threading.Lock()\n feed_thread = threading.Thread(target=self.feed_data)\n feed_thread.start()\n\n while self.left_count>0:\n task_id, values, valid = self.runner.try_get()\n if task_id == 0:\n time.sleep(0.0001)\n continue\n self.map_lock.acquire()\n start_index, end_index = self.task_map[task_id]\n del self.task_map[task_id]\n self.map_lock.release()\n if task_id % self.print_interval == 0:\n print(\"get task_id={}, start={}, end={}\".format(task_id, start_index, end_index), flush=True)\n output = values[0]\n out_num = end_index - start_index\n self.y_score[start_index:end_index] = output[0:out_num].reshape(-1)\n self.left_count = self.left_count - out_num\n\n self.runner.show()\n data_dir = path.dirname(data_path)\n y_score_file = path.join(data_dir, \"dlrm_test_y_score.npy\")\n print(\"save y_score data to {}, which may take minutes...\".format(y_score_file))\n np.save(y_score_file, self.y_score)\n #print(\"y=\", self.y_score)\n return self.y_score, self.y_true\n\n def load_data(self, data_path):\n data_dir = path.dirname(data_path)\n print(\"loading data from {}, which may take minutes...\".format(data_path))\n int_fea_file = path.join(data_dir, \"dlrm_test_int_fea.npy\")\n cat_fea_file = path.join(data_dir, \"dlrm_test_cat_fea.npy\")\n y_file = path.join(data_dir, \"dlrm_test_y.npy\")\n #if path.exists(int_fea_file) and path.exists(cat_fea_file) and path.exists(y_file):\n # self.x_int = np.load(int_fea_file)\n # self.x_cat = np.load(cat_fea_file)\n # self.y_true = np.load(y_file)\n # self.num_samples = len(self.y_true)\n # print(\"find cached sample files, load directly!\")\n # return\n test_data = np.load(data_path)\n x_int, x_cat, y_true = test_data[\"X_int\"], test_data[\"X_cat\"], test_data[\"y\"]\n self.num_samples = len(y_true)//2;\n print(\"prepare samples: sample_num={}\".format(self.num_samples), flush=True)\n self.x_int = x_int[0:self.num_samples].astype(np.float32)\n self.x_cat = x_cat[0:self.num_samples].astype(np.int32)\n self.y_true = y_true[0:self.num_samples].astype(np.float32)\n np.save(int_fea_file, self.x_int)\n np.save(cat_fea_file, self.x_cat)\n np.save(y_file, self.y_true)\n print(\"prepare done!\", flush=True)\n\n def feed_data(self):\n num_cat = self.x_cat[0].shape[0]\n num_int = self.x_int[0].shape[0]\n batch_num = self.batch_num\n offset_batch = np.tile(np.arange(batch_num, dtype=np.int32).reshape(1,-1), (num_cat, 1))\n offset_batch = np.ascontiguousarray(offset_batch)\n feed_count = 0\n while feed_count < self.num_samples:\n left_count = self.num_samples - feed_count\n start_index = feed_count\n if left_count auc_score = {}\".format(auc_score))\n","repo_name":"xiaotan3664/BMService","sub_path":"examples/dlrm/bmservice_dlrm.py","file_name":"bmservice_dlrm.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"34169732033","text":"import pandas as pd\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\ndef read_dict(data, name):\n\n # Internal function. reacs a cictionary in the rows and makes a coulmn from a keyword\n arr = []\n length = int(data.shape[0])\n for i in range(length):\n arr.append(data['polarity'][i][name])\n data[name] = arr \n return data\n\ndef sentimentValueFromTweeter(tweets):\n\n # Download lexicon\n nltk.download('vader_lexicon')\n\n\n # Creating a list of tweets\n tweets_text = list(map(lambda t: t['text'], tweets))\n \n # Tweets dataframe\n data = pd.DataFrame(data=tweets_text, columns=['Tweets'])\n\n # Sentiment analysis\n sia = SentimentIntensityAnalyzer()\n\n # Polarity\n sentimentList = []\n for index, row in data.iterrows():\n polarityScore = sia.polarity_scores(row[\"Tweets\"])\n sentimentList.append(polarityScore)\n\n # Transform the list into a series\n sentimentSeries = pd.Series(sentimentList)\n\n # Create a new column in the dataframe\n data['polarity'] = sentimentSeries.values\n\n opnions = ['neg', 'neu', 'pos'] # negative, neutral, positive\n for option in opnions:\n data = read_dict(data, option)\n \n # Remove all 100% neutral\n filtered = data[data['neu'] != 1]\n\n # Spliting values\n result = filtered.sum(axis = 0)\n result.drop(['Tweets'], inplace=True)\n\n # Define the sentiment\n # if negative sentiment is three times bigger than positive sentiment\n if result['neg'] > 3*result['pos']:\n sentiment = -3\n\n # else if negative sentiment is twice bigger than positive sentiment\n elif result['neg'] > 2*result['pos']:\n sentiment = -2\n \n # else if negative sentiment is a bit bigger than positive sentiment\n elif result['neg'] > result['pos']:\n sentiment = -1\n\n # else if negative sentiment is three times smaller than positive sentiment\n elif 3*result['neg'] < result['pos']:\n sentiment = 3\n\n # else if negative sentiment is twice smaller than positive sentiment\n elif 2*result['neg'] < result['pos']:\n sentiment = 2\n\n # else if negative sentiment is a bit smaller than positive sentiment\n elif result['neg'] < result['pos']:\n sentiment = 1\n\n # else NOBODY CARES ABOUT YOUR KEYWORD\n else:\n sentiment = 0\n\n return sentiment # -3, -2, -1, 0, 1, 2, 3","repo_name":"rodrigoms2004/FinanceML_API","sub_path":"src/sentimentAnalysis.py","file_name":"sentimentAnalysis.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36837356491","text":"import sushigo\nimport itertools as it\nimport random\n\nPOSSIBLE_CARDS = list(set([str(_) for _ in sushigo.deck.StandardDeck()]))\n\nclass CustomPlayer(sushigo.player.Player):\n def __init__(self, order, name=None):\n super(CustomPlayer, self).__init__()\n self.name = 'custom-player'\n if name:\n self.name = name\n self.order = order\n if any([(_ not in order) for _ in POSSIBLE_CARDS]):\n raise ValueError(\"forgot card type in OrderedPlayer init\")\n\n def act(self, reward, observation=None, action_space=None):\n if not action_space:\n raise ValueError(\"player received an empty set of actions\")\n\n # the player can get a notion of possible cards, give them an order\n order = {j: i for i, j in enumerate(self.order)}\n # the action space consists of objects now, which we may need to string-sort\n ordered_actions = sorted(action_space, key = lambda _: order[str(_)])\n\n return ordered_actions[-1]\n\n\ndef simulate(order, n_games = 10):\n \"\"\"\n This function simulates a game, assuming a player uses certain order.\n \"\"\"\n res = []\n for _ in range(n_games):\n p1 = sushigo.player.Player(name=\"p1\")\n p2 = CustomPlayer(name=\"custom\", order = order)\n players = [p1,p2]\n game = sushigo.game.Game(players, deck=sushigo.deck.StandardDeck())\n game.simulate_game()\n res.append(game.did_player_win(\"custom\"))\n return sum(res)\n\ndef swap(arr):\n \"\"\"\n Randomly selects some items in the iterable. \n \"\"\"\n i1 = random.randint(0, len(arr) - 1)\n i2 = random.randint(0, len(arr) - 1)\n arr[i2], arr[i1] = arr[i1], arr[i2]\n return arr\n\nprint(\"This will simulate games for a greedy search.\")\nscore_before = 0\norder = POSSIBLE_CARDS\nfor _ in range(100):\n n = 100\n new_order = swap(order)\n new_score = simulate(order, n)\n print(\"iteration: {}\\ncard order:{}\\nnumber of wins:{}/{}\".format(_, new_order, new_score, n))\n if new_score > score_before:\n print(\"IMPROVEMENT!\")\n order = new_order\n score_before = new_score\n","repo_name":"koaning/sushigo","sub_path":"vignettes/greedy-player-interaction.py","file_name":"greedy-player-interaction.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16350999809","text":"import sympy\nimport sys\n\n\ndef ValidateSymbols(symbols: str) -> list:\n if not symbols:\n print('Error: symbols string is null or empty.')\n return None\n \n symbols = symbols.split(' ')\n \n if len(symbols) < 1:\n print('Error: no valid symbols found in symbols string.')\n return None\n \n for symbol in symbols:\n if len(symbol) > 1:\n print('Error: symbol \\'{0}\\' is malformed.'.format(symbol))\n return None\n \n return symbols\n\n\ndef ParseEquation(symbols: str, fx: str) -> sympy.core.expr.Expr:\n fx = sympy.sympify(fx)\n \n return fx\n\n\ndef Derivative(symbols: str, fx: str) -> str:\n symbols = ValidateSymbols(symbols)\n \n # Declare an equation that we want to differentiate\n fx = ParseEquation(symbols, fx)\n \n # Create the variables for symbols we'll be using from the symbols param\n for symbol in symbols:\n locals()[symbol] = sympy.Symbol(symbol)\n \n # Differentiate it. Should yield: 5*x**4 + 28*x**3\n derivative = sympy.diff(fx)\n \n return derivative\n \n\ndef PartialDerivative(symbols: str, fx: str, variable: str) -> str:\n symbols = ValidateSymbols(symbols)\n \n # Declare an equation that we want to differentiate\n fx = ParseEquation(symbols, fx)\n \n # Create the variables for symbols we'll be using from the symbols param\n for symbol in symbols:\n locals()[symbol] = sympy.Symbol(symbol)\n \n # Differentiate it. Should yield: 5*x**4 + 28*x**3\n derivative = sympy.diff(fx, variable)\n \n return derivative\n\n\ndef Evalutate(symbols: str, fx: str, subs: dict) -> float:\n symbols = ValidateSymbols(symbols)\n \n # Declare an equation that we want to differentiate\n fx = ParseEquation(symbols, fx)\n \n # Create the variables for symbols we'll be using from the symbols param\n for symbol in symbols:\n locals()[symbol] = sympy.Symbol(symbol)\n \n result = fx.subs(subs).evalf()\n \n return result\n \n\ndef Solve(symbols: str, fx: str) -> sympy.sets.sets.Set:\n symbols = ValidateSymbols(symbols)\n\n # Declare an equation that we want to differentiate\n fx = ParseEquation(symbols, fx)\n\n # Create the variables for symbols we'll be using from the symbols param\n for symbol in symbols:\n locals()[symbol] = sympy.Symbol(symbol)\n\n result = sympy.solveset(fx, locals()[symbols[0]])\n\n return result\n\n\ndef main() -> int:\n # Power Rule\n fx = 'x**5 + 7*x**4 + 3'\n \n # Differentiate it. Should yield: 5*x**4 + 28*x**3\n derivative = Derivative('x', fx)\n print(derivative)\n \n # Product Rule\n fx = 'x**2 + 1'\n gx = 'cos(x)'\n \n # Differentiate it. Should yield: 2*x*cos(x) - (x**2 + 1)*sin(x)\n derivative = Derivative('x', '(' + fx + ') * (' + gx + ')')\n print(derivative)\n \n # Chain Rule\n fx = '(x**2 - 3*x + 5)**3'\n \n # Differentiate it. Should yield: (6*x - 9)*(x**2 - 3*x + 5)**2\n derivative = Derivative('x', fx)\n print(derivative)\n \n \n # Partial Derivatives\n \n # f(x) = x^2yz^5\n fx = 'x**2 * y * z**5'\n \n # Partial derivative with respect to x: 2*x*y*z**5\n derivative = PartialDerivative('x y z', fx, 'x')\n print(derivative)\n \n # Partial derivative with respect to y: x**2*z**5\n derivative = PartialDerivative('x y z', fx, 'y')\n print(derivative)\n \n # Partial derivative with respect to z: 5*x**2*y*z**4\n derivative = PartialDerivative('x y z', fx, 'z')\n print(derivative)\n \n # Utility function for pizza and cookies\n fx = 'sqrt(p * c)'\n \n # Differentiate f(x) with respect to p: sqrt(c*p)/(2*p)\n MUp = PartialDerivative('p c', fx, 'p')\n print(MUp)\n \n # Differentiate f(x) with respect to c: sqrt(c*p)/(2*c)\n MUc = PartialDerivative('p c', fx, 'c')\n print(MUc)\n \n # If c is 2 and p is 5:\n subs = {'c':2, 'p':5}\n \n result = Evalutate('p c', MUp, subs)\n print(result)\n \n result = Evalutate('p c', MUc, subs)\n print(result)\n \n return 0\n \n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"andocoyote/AndoEconAPIs","sub_path":"Common/Calculations.py","file_name":"Calculations.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73952318","text":"'''\nIdea: Start from first station. Keep track of total gas and current gas. Whenever current gas falls below 0, make the next station as new starting point. At the end, if total gas is non-negative return starting point.\n\nTime complexity : O(n)\nSpace complexity: O(1)\n'''\n\nclass Solution:\n def canCompleteCircuit(self, gas, cost):\n n = len(gas)\n total_gas = current_gas = 0\n starting_point = 0\n for i in range(n):\n total_gas += gas[i] - cost[i]\n current_gas += gas[i] - cost[i]\n if current_gas < 0:\n starting_point = i+1\n current_gas = 0\n \n return starting_point if total_gas >= 0 else -1\n \n ","repo_name":"Anirudh-Muthukumar/Leetcode-Solutions","sub_path":"134. Gas Station/134. Gas Station.py","file_name":"134. Gas Station.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"46030859200","text":"# to extract the text from pdf\n# install pypdf2 through your terminal using pip install pypdf2\n\nimport PyPDF2\n# use pdfreader to read the content of the pdf\nreader = PyPDF2.PdfReader('table.pdf')\n\n# to extract the content in the page use extract_text\n\nextracted = \"\"\nfor i in range(1,2):\n extracted = reader.pages[i].extract_text()\n# to display the extracted data we will make the new text file\n# and store all the data\nwith open(\"text.txt\", \"w\", encoding='utf-8') as data_extracted:\n data_extracted.write(extracted)","repo_name":"sahilnayak7702/TextDetection","sub_path":"textfrompdf.py","file_name":"textfrompdf.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14203454451","text":"import pygame, sys\r\nfrom pygame.locals import *\r\nfrom juego import *\r\n\r\nmyCharacter = ('Emely','18','10','10','0','3')\r\n\r\npygame.init()\r\n\r\nFPS = 10 # frames per second setting\r\n\r\nfpsClock = pygame.time.Clock()\r\n\r\n# set up the window\r\n\r\ntamanio = (826, 427)\r\nSCREEN = pygame.display.set_mode(tamanio)\r\n\r\npygame.display.set_caption('Animation')\r\n\r\nfondo = pygame.image.load(\"fondo.png\").convert()\r\n\r\npersonajeImg = pygame.image.load(\"puca.png\")\r\npersonajeImg = pygame.transform.scale(personajeImg, (235, 200))\r\n\r\npersonajeX = 20\r\n\r\npersonajeY = 200\r\n\r\ndirection = 'right'\r\n\r\nwhile True: # the main game loop\r\n\r\n\tif direction == 'right':\r\n\t\tpersonajeX += 7\r\n\t\tif personajeX ==600:\r\n\t\t\tdirection = 'stop'\r\n\telif direction == 'down':\r\n\t\tpersonajeY += 7\r\n\t\tif personajeY == 600:\r\n\t\t\tdirection = 'down'\r\n\telif direction == 'right':\r\n\t\tpersonajeX -= 6\r\n\t\tif personajeX == 6:\r\n\t\t\tdirection = 'down'\r\n\telif direction == 'right':\r\n\t\tpersonajeY -= 6\r\n\t\tif personajeY == 6:\r\n\t\t\tdirection = 'down'\r\n\r\n\tSCREEN.blit(fondo, [0,0])\r\n\tSCREEN.blit(personajeImg, (personajeX, personajeY))\r\n\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == QUIT:\r\n\t\t\tpygame.quit()\r\n\t\t\tsys.exit()\r\n\t\r\n\tpygame.display.update()\r\n\tfpsClock.tick(FPS)","repo_name":"ITE-Ensenada/objetos","sub_path":"personaje.py","file_name":"personaje.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7724578756","text":"import nltk\nfrom flask import Flask, render_template, request\nfrom flask import request\n\nfrom app.analytics.analytics_data import AnalyticsData\nfrom app.core.utils import load_documents_corpus, parseTweet\nfrom app.search_engine.search_engine import SearchEngine\nimport time\nfrom app.core.plots import update_plots\nfrom datetime import datetime\n\n\napp = Flask(__name__)\n\nanalytics_data = AnalyticsData()\ntweets, index, tf, idf = load_documents_corpus()\nsearchEngine = SearchEngine(tweets, index, tf, idf)\n\n## Global variables: ##\nlast_clicked_doc = None # To know the last clicked document (if any), for computing dwell time\nn_queries=0 # Total number of queries executed\n\n\n@app.route('/')\ndef search_form():\n global last_clicked_doc # Global variable used\n last_clicked_doc = None # Reseting timers, if any\n\n # Updating User table\n user_ip = request.remote_addr\n user = request.user_agent\n analytics_data.new_session(user_ip, user.platform, user.browser, user.language, datetime.now(), )\n\n return render_template('index.html', page_title=\"Welcome\")\n\n\n@app.route('/search', methods=['POST','GET'])\ndef search_form_post():\n global last_clicked_doc # Global variable used\n global n_queries # Global variable used\n n_queries += 1\n\n # If we come from a document, update the dwell time\n try:\n from_document = bool(request.args[\"from-document\"])\n search_query = request.args[\"search-query\"]\n doc_id = last_clicked_doc[0]\n dwell_time = time.time() - last_clicked_doc[1]\n analytics_data.update_dwell_time(doc_id, dwell_time)\n except:\n from_document = None\n search_query = request.form[\"search-query\"]\n\n # Computing results using our algorithm\n results = searchEngine.search(search_query)\n found_count = len(results)\n\n # Saving statistics (only if we don't come from a document)\n if not from_document:\n # Updating Query table\n analytics_data.new_query(search_query, n_queries)\n for ranking, result in enumerate(results):\n analytics_data.update_doc(result.id, search_query, ranking+1)\n\n return render_template('results.html', results_list=results, page_title=\"Results\", found_counter=found_count, query=search_query)\n\n\n@app.route('/doc_details', methods=['GET','POST'])\ndef doc_details():\n global last_clicked_doc # Global variable used\n\n # Updating Click table\n clicked_doc_id = request.args[\"id\"]\n last_clicked_doc = (clicked_doc_id, time.time()) # For later computing of dwell time\n click_date = datetime.now()\n click_rank = int(request.args[\"rank\"])\n analytics_data.new_click(clicked_doc_id, click_date, click_rank)\n\n #Obtaining the document object\n document = parseTweet(tweets[clicked_doc_id], clicked_doc_id, click_rank)\n\n # Restarting the timer for computing dwell time later\n analytics_data.new_dwell(clicked_doc_id)\n query = request.args[\"query\"]\n last_clicked_doc = (clicked_doc_id, time.time())\n\n return render_template('doc_details.html',page_title=\"Document Details\",doc=document, query=query)\n\n\n@app.route('/stats', methods=['GET'])\ndef analytics():\n \"\"\"\n Dashboard\n \"\"\"\n update_plots(analytics_data) # Generating plots in computer\n return render_template('dashboard.html')\n\n@app.route('/sentiment')\ndef sentiment_form():\n return render_template('sentiment.html')\n\n\n@app.route('/sentiment', methods=['POST'])\ndef sentiment_form_post():\n text = request.form['text']\n nltk.download('vader_lexicon')\n from nltk.sentiment.vader import SentimentIntensityAnalyzer\n sid = SentimentIntensityAnalyzer()\n score = ((sid.polarity_scores(str(text)))['compound'])\n return render_template('sentiment.html', score=score)\n\n\nif __name__ == \"__main__\":\n app.run(port=\"8088\", host=\"0.0.0.0\", threaded=False, debug=True)\n","repo_name":"krakgma2000/IRWA-2021-final-project-team2-final","sub_path":"web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73985504195","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport webapp2\nimport logging\nfrom google.appengine.ext import ndb\nfrom models.fermat_primality_test_task import FermatPrimalityTestWorkerTask, FermatPrimalityTestTask\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n count = FermatPrimalityTestTask.query(FermatPrimalityTestTask.parent_task==None).count()\n self.response.headers['content-type'] = 'text/html'\n new_task_url = 'http://{}/create_task'.format(self.request.host)\n if count == 0:\n self.response.write('There is no task yet. Create one at here'.format(new_task_url))\n else:\n tasks, cursor, more = FermatPrimalityTestTask.query(\n FermatPrimalityTestTask.parent_task==None\n ).order(-FermatPrimalityTestTask.ctime).fetch_page(1000)\n self.response.write('

    There are {count} task(s).

    '.format(count=count))\n self.response.write('

    Create new one at here.'.format(new_task_url))\n for t in tasks:\n self.response.write('

    {id}

    '.format(host=self.request.host, id=t.key.id(), urlsafe_id=t.key.urlsafe()))\n\n def create_task_get(self):\n self.response.headers['content-type'] = 'text/html'\n self.response.write(FORM)\n\n def create_task_post(self):\n try:\n prime = int(self.request.get('prime'), 10)\n except Exception as e:\n logging.exception(e)\n self.abort(400, 'Invalid input')\n logging.debug(prime)\n inputs = {\n 'prime': prime,\n }\n #callback_url = 'http://' + self.request.host + '/handle_task_complete'\n callback_url = ''\n new_task = FermatPrimalityTestTask(inputs=inputs, parent_task=None, results=None, callback_url=callback_url)\n new_task.put()\n new_task.run()\n task_url = 'http://{host}/tasks?task_id={urlsafe_id}'.format(host=self.request.host, urlsafe_id=new_task.key.urlsafe())\n self.response.headers['content-type'] = 'text/html'\n self.response.write(NEW_TASK.format(task_id=new_task.key.id(), task_url=task_url))\n\n def handle_fermat_callback(self):\n ''' The callback handler for the FermatPrimalityTestTask '''\n task_id = self.request.get('task_id', '').strip()\n task_key = ndb.Key(urlsafe=task_id)\n task = task_key.get()\n if task is None:\n self.abort(404, 'The task does not exist. really?')\n logging.debug(task.inputs)\n logging.debug(task.results)\n\n @ndb.transactional(retries=10)\n def update_parent_task_finished(self, parent_task_key, val):\n parent_task = parent_task_key.get()\n parent_task.num_finished += val\n parent_task.put()\n return True\n\n #def handle_fermat_worker_callback(self):\n # task_id = self.request.get('task_id', '').strip()\n # task_key = ndb.Key(urlsafe=task_id)\n # task = task_key.get()\n # if task is None:\n # self.abort(404, 'The task does not exist. really?')\n # logging.debug(task.inputs)\n # logging.debug(task.results)\n # # check if all other workers are done too.\n # parent_task = task.parent_task.get()\n # if parent_task is None:\n # self.abort(404, 'The parent task does not exist.')\n\n # # If we are lucky enough, the checking can be run in multiple times in\n # # parallel. Perhaps it is important to know it. In this case, we don't\n # # have any problem running this multiple times. just silly.\n # self.update_parent_task_finished(task.parent_task, 1)\n\n # parent_task = task.parent_task.get()\n\n # if parent_task.num_finished == parent_task.num_subtasks:\n # is_prime = True\n # for task in parent_task.subtasks:\n # if task.results['is_prime'] is False:\n # is_prime = False\n # break\n # parent_task.results = {'is_prime': is_prime}\n # parent_task.put()\n # parent_task.callback()\n # else:\n # # fine then. we wait for another callback.\n # pass\n \n def show_task(self):\n task_id = self.request.get('task_id', '').strip()\n task_key = ndb.Key(urlsafe=task_id)\n task = task_key.get()\n if task is None:\n self.abort(404, 'The task does not exist. really?')\n self.response.headers['content-type'] = 'text/html'\n logging.debug(task.key.id())\n subtasks, cursor, more = FermatPrimalityTestWorkerTask.query(\n FermatPrimalityTestWorkerTask.parent_task==task_key).fetch_page(1000)\n subtasks_htmls = [TASK_DETAIL.format(task=subtask, subtasks_html=None, total=None, done=None, percent=None) for subtask in subtasks]\n subtasks_html = '
    '.join(subtasks_htmls)\n total = len(subtasks)\n done = len([True for subtask in subtasks if subtask.results is not None])\n percent = '{:.2f}'.format(100 * float(done) / total)\n self.response.write(TASK_DETAIL.format(task=task, subtasks_html=subtasks_html, total=total, done=done, percent=percent))\n \n\n\napp = webapp2.WSGIApplication([\n webapp2.Route('/create_task', handler='main.MainHandler:create_task_get', methods=['GET']),\n webapp2.Route('/create_task', handler='main.MainHandler:create_task_post', methods=['POST']),\n webapp2.Route('/handle_task_complete', handler='main.MainHandler:handle_fermat_callback', methods=['POST']),\n webapp2.Route('/handle_fermat_worker', handler='main.MainHandler:handle_fermat_worker_callback', methods=['POST']),\n webapp2.Route('/tasks', handler='main.MainHandler:show_task', methods=['GET']),\n ('/*', MainHandler),\n ], debug=True)\n\n\n\n# Some template stings for HTML...\n\n\n\nFORM = '''\n\n \n \n \n \n

    Create a new task to test a number is prime.

    \n
    \n \n \n
    \n \n'''\n\nNEW_TASK = '''\n\n \n \n \n \n

    A new task with ID = {task_id} is created.

    \n

    You can check the task progress in here.

    \n \n'''\n\nTASK_DETAIL = '''\n\n \n \n \n \n

    Task: {task.key}

    \n

    Inputs

    \n
    {task.inputs}
    \n

    Results

    \n
    {task.results}
    \n

    Subtasks

    \n

    Progress

    \n

    {done} / {total} = {percent}%

    \n
    \n {subtasks_html}\n
    \n \n'''\n","repo_name":"edwardfung123/task-model-gae","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42861199920","text":"import sympy as sym\r\n\r\n# x1 ve x2 sembolleri tanımlanıyor\r\nx1, x2 = sym.symbols('x1 x2')\r\n\r\n# Fonksiyon tanımlanıyor\r\nf = (x1 - 1) ** 2 + (x2 - 1) ** 2 - x1 * x2\r\n\r\n# Gradient hesaplanıyor\r\ngradient = [sym.diff(f, x) for x in (x1, x2)]\r\n\r\n# Hessian hesaplanıyor\r\nhessian = [[sym.diff(f, x1_, x2_) for x2_ in (x1, x2)] for x1_ in (x1, x2)]\r\n\r\n# Durağan noktalar hesaplanıyor\r\ndurgan_noktalar = sym.solve(gradient, (x1, x2))\r\n\r\n# Hessian'ın belirleyicisi hesaplanıyor\r\nhessian_belirleyici = hessian[0][0] * hessian[1][1] - hessian[0][1] * hessian[1][0]\r\n\r\n# Durağan noktaların tipleri belirleniyor\r\nfor durgan_nokta in durgan_noktalar:\r\n # Hessian'ın belirleyicisi 0 ise test yapılamaz\r\n if hessian_belirleyici == 0:\r\n print(f\"Durağan nokta: {durgan_nokta}, tipi: bilinmiyor (Hessian belirleyici sıfır)\")\r\n else:\r\n # Hessian'ın belirleyicisi 0'dan büyükse test yapılır\r\n # İkinci türev testi yapılır\r\n hessian_test = hessian.subs({x1: durgan_nokta[x1], x2: durgan_nokta[x2]})\r\n if hessian_test[0][0] > 0 and hessian_belirleyici > 0:\r\n print(f\"Durağan nokta: {durgan_nokta}, tipi: minimum nokta\")\r\n elif hessian_test[0][0] < 0 and hessian_belirleyici > 0:\r\n print(f\"Durağan nokta: {durgan_nokta}, tipi: maksimum nokta\")\r\n else:\r\n print(f\"Durağan nokta: {durgan_nokta}, tipi: teğe nokta\")","repo_name":"Hatice48/Python","sub_path":"python.py/duragan_nokt.py","file_name":"duragan_nokt.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70865014915","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.tri as tri\nimport time\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.optim as optim\t \nimport pdb\nimport os\nimport sys\nsys.path.append(\"..\")\nfrom utils.utils import FullyConnectedNet,load_mesh,load_pretrained_model,evaluate,plot_contourf_vorticity,DatasetFromTxt,generate_data\nfrom utils.utils import save_gradient,gradient,Compute_gradient, StatGrad,normalizeGrad,adjustGrad\nimport logging\nglobal_nu=0.05\n\nclass MultiScaleNet(nn.Module):\n def __init__(self, input_dim, output_dim, hidden_size = 32, nb_heads=1):\n super(MultiScaleNet, self).__init__()\n self.scalenet01 = FullyConnectedNet(input_dim, hidden_size, 1) # input layer\n self.scalenet02 = FullyConnectedNet(input_dim, hidden_size, 1)\n self.scalenet03 = FullyConnectedNet(input_dim, hidden_size, 1) # hidden layer\n self.scalenet04 = FullyConnectedNet(input_dim, hidden_size, 1) # hidden layer\n self.scalenet05 = FullyConnectedNet(input_dim, hidden_size, 1) # hidden layer\n self.scalenet06 = FullyConnectedNet(input_dim, hidden_size, 1) # hidden layer\n self.scalenet07 = FullyConnectedNet(input_dim, hidden_size, 1) # hidden layer\n self.scalenet08 = FullyConnectedNet(input_dim, hidden_size, 1) # hidden layer\n self.predict = torch.nn.Linear(8, output_dim) # output layer\n\n def forward(self, x):\n # activation function for hidden layer\n alpha = 2 \n y01 = self.scalenet01(x)\n y02 = self.scalenet02(alpha**1.0*x)\n y03 = self.scalenet03(alpha**2.0*x)\n y04 = self.scalenet04(alpha**3.0*x)\n y05 = self.scalenet05(alpha**4.0*x)\n y06 = self.scalenet06(alpha**5.0*x)\n y07 = self.scalenet07(alpha**6.0*x)\n y08 = self.scalenet08(alpha**7.0*x)\n y_output1=torch.cat((y01,y02,y03,y04,y05,y06,y07,y08), 1)\n yout = (self.predict(y_output1)) # linear output\n return yout\n\ndef train(args, model_list, device, interior_train_loader, \n dirichlet_bdry_training_data_loader, coarse_data_loader,\n optimizer, epoch,lamda,beta,gamma): \n retloss=[]\n bdryiter= iter(dirichlet_bdry_training_data_loader)\n coarseiter = iter(coarse_data_loader)\n for batch_idx, (data, target) in enumerate(interior_train_loader): # iterate a batch of data from the DataLoader\n x =Variable(data, requires_grad=True ) \n f =Variable(target[:, 0:2], requires_grad=False)\n divf =Variable(target[:, 2], requires_grad=False)\n divu_RHS =Variable(target[:, 3], requires_grad=False)\n\n bdrydata, bdrytarget=bdryiter.next()\n bdry_x =Variable( bdrydata, requires_grad=False)\n bdry_velocity=Variable(bdrytarget, requires_grad=False)\n loss_total,res,bound = ResLoss_upw(x,bdry_x,f,divu_RHS,divf,bdry_velocity,beta,lamda,model_list,epoch)\n optimizer.zero_grad()\n loss_total.backward()\n optimizer.step()\n lamda_temp = lamda\n\n if batch_idx % args.log_interval == 0: # output logs\n logging.info('Train Epoch: {:>5d} [{:>6d}/{} ({:3.0f}%)] Loss of res: {:.6f} Loss of bound: {:.6f}'.format(\n epoch, batch_idx * len(data), len(interior_train_loader.dataset),\n 100. * batch_idx / len(interior_train_loader), res.item(),bound.item()))\n if batch_idx==0:\n retloss=loss_total\n retbound = 0\n retres = 0\n retcoar_loss = 0\n\n return retloss, lamda_temp, retbound, retres, retcoar_loss\ndef ResLoss_upw(x,bdry_x,f,divu_RHS,divf,bdry_velocity,beta,lamda,model_list,epoch):\n \n interior_u_predict_old = model_list[0](x) \n interior_u_predict_new = model_list[1](x) \n interior_p_predict = model_list[2](x)\n bdry_u_predict = model_list[1](bdry_x)\n \n # calculate the derivatives:\n # the size of grad is (batch_size, 2) and each row is the (\\partial_x u, \\partial_y u)\n grad_u1_old = torch.autograd.grad(interior_u_predict_old[:, 0], x, create_graph=True, grad_outputs=[torch.ones_like(interior_u_predict_old[:, 0])])\n grad_u2_old = torch.autograd.grad(interior_u_predict_old[:, 1], x, create_graph=True, grad_outputs=[torch.ones_like(interior_u_predict_old[:, 1])])\n grad_u1_new = torch.autograd.grad(interior_u_predict_new[:, 0], x, create_graph=True, grad_outputs=[torch.ones_like(interior_u_predict_new[:, 0])])\n grad_u2_new = torch.autograd.grad(interior_u_predict_new[:, 1], x, create_graph=True, grad_outputs=[torch.ones_like(interior_u_predict_new[:, 1])])\n \n grad_w11= torch.autograd.grad(grad_u1_new[0][:, 0], x, create_graph=True, grad_outputs=[torch.ones_like(grad_u1_new[0][:, 0])])\n grad_w12= torch.autograd.grad(grad_u1_new[0][:, 1], x, create_graph=True, grad_outputs=[torch.ones_like(grad_u1_new[0][:, 1])])\n grad_w21= torch.autograd.grad(grad_u2_new[0][:, 0], x, create_graph=True, grad_outputs=[torch.ones_like(grad_u1_new[0][:, 0])])\n grad_w22= torch.autograd.grad(grad_u2_new[0][:, 1], x, create_graph=True, grad_outputs=[torch.ones_like(grad_u1_new[0][:, 1])])\n\n grad_p = torch.autograd.grad(interior_p_predict, x, create_graph=True, grad_outputs=[torch.ones_like(interior_p_predict)])\n pxx = torch.autograd.grad(grad_p[0][:, 0], x, create_graph=True, grad_outputs=[torch.ones_like(grad_p[0][:, 0])])\n pyy = torch.autograd.grad(grad_p[0][:, 1], x, create_graph=True, grad_outputs=[torch.ones_like(grad_p[0][:, 1])])\n\n u_grad_u1_l1 = torch.sum(interior_u_predict_old*grad_u1_new[0], dim=1)\n u_grad_u2_l1 = torch.sum(interior_u_predict_old*grad_u2_new[0], dim=1)\n u_grad_u1_l2 = torch.sum(interior_u_predict_new*grad_u1_old[0], dim=1)\n u_grad_u2_l2 = torch.sum(interior_u_predict_new*grad_u2_old[0], dim=1)\n\n u_grad_u1 = .5*(u_grad_u1_l1 + u_grad_u1_l2)\n u_grad_u2 = .5*(u_grad_u2_l1 + u_grad_u2_l2)\n\n\n divu = grad_u1_new[0][:,0]+grad_u2_new[0][:,1]\n \n divw1=grad_w11[0][:, 0]+grad_w12[0][:, 1]\n divw2=grad_w21[0][:, 0]+grad_w22[0][:, 1]\n\n div_grad_p = pxx[0][:, 0]+pyy[0][:, 1]+2*(-grad_u2_new[0][:,1]*grad_u1_new[0][:,0]+grad_u1_new[0][:,1]*grad_u2_new[0][:,0])\n \n loss_function = nn.MSELoss()\n \n loss1 = loss_function(beta*u_grad_u1-global_nu*divw1+grad_p[0][:,0], f[:,0])\n loss2 = loss_function(beta*u_grad_u2-global_nu*divw2+grad_p[0][:,1], f[:,1])\n loss4 = loss_function(bdry_u_predict, bdry_velocity[:, 0:2])\n loss5 = loss_function(divu, divu_RHS)\n loss6 = loss_function(div_grad_p,divf)\n \n res = (loss1 + loss2) + loss5 + loss6\n bound = (loss4) \n loss = beta * res + lamda * bound \n \n return loss,res,bound\n\n","repo_name":"LeeLizuoLiu/linearized-learning","sub_path":"msnn_mix_40_35/NS_msnn.py","file_name":"NS_msnn.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25470406533","text":"\"\"\" \r\n Interaction with postgres database using SQLDatabaseChain:\r\n\r\n- This script allows interaction with a PostgreSQL database using the SQLDatabaseChain class from the langchain\r\n library. \r\n- It prompts the user for input, generates a PostgreSQL query based on the input, executes the query\r\n on the specified database, and displays the result.\r\n- It utilizes the OpenAI, SQLDatabase, and SQLDatabaseChain classes from the langchain library.\r\n\r\n\"\"\"\r\n\r\nfrom langchain import OpenAI, SQLDatabase, SQLDatabaseChain\r\nimport environ\r\n\r\n# Setting up env variables\r\nenv = environ.Env()\r\nenviron.Env.read_env()\r\n\r\n# Set up the API key \r\nAPI_KEY = env('apikey')\r\n\r\n# Configure database variables(replace them )\r\n# db_user = \"db_user\"\r\n# db_pass = \"reading from env\"\r\ndb_host = \"db_host\"\r\ndb_name = \"db_name\"\r\n\r\n# Connect to the PostgreSQL database\r\n\r\n# db = SQLDatabase.from_uri(f\"postgresql+psycopg2://postgres:{db_user}:{db_pass}@{db_host}/{db_name}\")\r\ndb = SQLDatabase.from_uri(f\"postgresql+psycopg2://postgres:{env('dbpass')}@{db_host}/{db_name}\")\r\n\r\n# setup language model\r\nllm = OpenAI(temperature=0, openai_api_key=API_KEY)\r\n\r\n# Create query instruction\r\nQUERY = \"\"\"\r\nGiven an input question, first create a syntactically correct postgresql query to run, then look at the results\r\nof the query and return the answer.Use the following format:\r\n\r\nQuestion: \"Question here\"\r\nSQLQuery: \"SQL Query to run\"\r\nSQLResult: \"Result of the SQLQuery\"\r\nAnswer: \"Final answer here\"\r\n\r\n{question}\r\n\"\"\"\r\n\r\n# Setup the database chain\r\ndb_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)\r\n\r\ndef get_prompt():\r\n \"\"\"\r\n Prompt the user for input and interact with the database.\r\n \"\"\"\r\n print(\"Type 'exit' to quit\")\r\n\r\n while True:\r\n prompt = input(\"Enter a prompt: \")\r\n\r\n if prompt.lower() == 'exit':\r\n print('Exiting...')\r\n break\r\n else:\r\n try:\r\n question = QUERY.format(question=prompt)\r\n print(db_chain.run(question))\r\n except Exception as e:\r\n print(e)\r\n\r\n# Call the function to start the interaction with the user\r\nget_prompt()\r\n","repo_name":"sdeadlocker/Interact-with-DB","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39218402046","text":"from transformers import LlamaTokenizer\nfrom optimum.intel.openvino import OVModelForCausalLM\nimport time\nimport argparse\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('-h',\n '--help',\n action='help',\n help='Show this help message and exit.')\nparser.add_argument('-m',\n '--model_id',\n required=True,\n type=str,\n help='Required. hugging face model id')\nparser.add_argument('-p',\n '--prompt',\n required=True,\n type=str,\n help='Required. prompt sentence')\nparser.add_argument('-l',\n '--max_sequence_length',\n default=128,\n required=False,\n type=int,\n help='maximun lengh of output')\nparser.add_argument('-d',\n '--device',\n default='CPU',\n required=False,\n type=str,\n help='device for inference')\nargs = parser.parse_args()\n\nmodel_path = Path('../quantized_model')\n\nif model_path.exists():\n print(\"--- using local model ---\")\n ov_model = OVModelForCausalLM.from_pretrained(model_path,\n compile=False,\n device=args.device)\nelse:\n print(\"--- using remote model ---\")\n ov_model = OVModelForCausalLM.from_pretrained(args.model_id,\n compile=False,\n device=args.device,\n export=True)\n ov_model.save_pretrained(model_path)\n\nov_model.compile()\ntokenizer = LlamaTokenizer.from_pretrained(args.model_id)\n\ninputs = tokenizer(args.prompt, return_tensors=\"pt\")\nstart = time.perf_counter()\ngenerate_ids = ov_model.generate(inputs.input_ids,\n max_length=args.max_sequence_length)\nend = time.perf_counter()\n\nprint(\" --- text decoding --- \")\noutput_text = tokenizer.batch_decode(generate_ids,\n skip_special_tokens=True,\n clean_up_tokenization_spaces=False)[0]\nprint(f\"Generation took {end - start:.3f} s on {args.device}\")\nprint(f\"Response: {output_text}\")\n","repo_name":"OpenVINO-dev-contest/llama.openvino","sub_path":"ir_pipeline/generate_op.py","file_name":"generate_op.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"12322402077","text":"from collections import Counter\nfrom functools import cache\nfrom itertools import takewhile\n\nfrom more_itertools.recipes import pairwise\n\nfrom aoc_2020.utils.io import stream_lines\n\n\n@cache\ndef count_paths(adaptors, start=0):\n if start == len(adaptors) - 1:\n return 1\n\n return sum(\n count_paths(adaptors, i)\n for i in takewhile(\n lambda n: adaptors[n] - adaptors[start] <= 3,\n range(start + 1, len(adaptors))\n )\n )\n\n\ndef main():\n adapters = sorted([int(n) for n in stream_lines(day=10)])\n adapters = (0, *adapters, adapters[-1] + 3)\n\n # pt1\n diffs = Counter(y - x for x, y in pairwise(adapters))\n print(diffs[1] * diffs[3])\n\n # pt 2\n paths = count_paths(adapters)\n print(paths)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jpincott/advent-of-code-2020","sub_path":"aoc_2020/puzzles/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10410766899","text":"from ..io.dataset import import_dataset, Dataset\n\nfrom pathlib import Path\nimport numpy as np\nfrom mne.io import read_raw_gdf\nfrom mne import events_from_annotations\nfrom mne.preprocessing import regress_artifact\n\ndef gdf_to_np(gdf_f, mapping={\"768\": -1, \"769\": 0, \"770\": 1, \"771\": 2, \"772\": 3}, ch_names=None, np_dtype=np.float32, resample_freq=None, clean=True):\n \"\"\"Loads a .EDF file into a list of NumPy arrays for events and labels.\n\n :param edf_f: The .EDF file to load from.\n :type edf_f: string or pathlib.Path\n :param ch_names: A dictionary mapping old channel names to new ones.\n :type ch_names: dict or callable\n :param np_dtype: Type of NumPy array\n :type np_dtype: type\n :return: Returns a list of NumPy arrays, an array of labels, a dict with mapping, and channel names\n \"\"\"\n gdf = read_raw_gdf(gdf_f, preload=True, verbose=False)\n\n if clean:\n gdf.set_channel_types({'EOG-left': 'eog', 'EOG-central': 'eog', 'EOG-right': 'eog'})\n gdf, _ = regress_artifact(gdf)\n\n if resample_freq:\n events, _ = events_from_annotations(gdf, event_id=mapping, verbose=False)\n gdf, _ = gdf.resample(resample_freq, events=events)\n\n if ch_names: gdf.rename_channels(ch_names)\n ch_names = gdf.ch_names\n\n data, _ = gdf[:, :]\n data = np_dtype(data)\n\n if clean:\n data = data[:22]\n ch_names = ch_names[:22]\n\n start_times = events_from_annotations(gdf, event_id={\"768\": -1}, verbose=False)[0][:,0]\n data = np.split(data, start_times, axis=1)[1:] #first split is before first trial\n\n mapping = mapping\n labels = events_from_annotations(gdf, event_id=mapping, verbose=False)[0][:,2]\n\n return data, labels, mapping, ch_names\n\ndef import_bci_iv_2a(targ_dir, orig_dir):\n targ_path, orig_path = Path(targ_dir).expanduser(), Path(orig_dir).expanduser()\n dataset_extenions = {'.gdf': gdf_to_np}\n import_dataset(orig_path, targ_path, dataset_extenions=dataset_extenions, dataset_name=\"bci_iv_2a\")","repo_name":"markt/neuroIN","sub_path":"src/neuroIN/datasets/bci_iv.py","file_name":"bci_iv.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33977297373","text":"#!/usr/bin/env python3\n\nimport subprocess\n\ntry:\n ret = subprocess.call(\n \"ping -c1 8.8.8.8\",\n shell=True,\n stdout=open(\"/dev/null\", \"w\"),\n stderr=subprocess.STDOUT,\n )\n s_ret = str(ret)\n if s_ret == \"0\":\n print(\"Server is available\")\n else:\n print(\"Sever is unavailable\")\nexcept:\n raise\n","repo_name":"dwashington102/python_course","sub_path":"python_scripts-how-to/working_with-subprocess-ping.py","file_name":"working_with-subprocess-ping.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9588888574","text":"# Techniques for outlier detection of speeds. Each of these returns a speed threshold that \n# can be used with outlier detection techniques.\n\n# Standard imports\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nclass BoxplotOutlier(object):\n MINOR = 1.5\n MAJOR = 3\n def __init__(self, multiplier = MAJOR, ignore_zeros = False):\n self.multiplier = multiplier\n self.ignore_zeros = ignore_zeros\n\n def get_threshold(self, with_speeds_df):\n if self.ignore_zeros:\n df_to_use = with_speeds_df[with_speeds_df.speed > 0]\n else:\n df_to_use = with_speeds_df\n quartile_vals = df_to_use.quantile([0.25, 0.75]).speed\n logging.debug(\"quartile values are %s\" % quartile_vals)\n iqr = quartile_vals.iloc[1] - quartile_vals.iloc[0]\n logging.debug(\"iqr %s\" % iqr)\n return quartile_vals.iloc[1] + self.multiplier * iqr\n\nclass SimpleQuartileOutlier(object):\n def __init__(self, quantile = 0.99, ignore_zeros = False):\n self.quantile = quantile\n self.ignore_zeros = ignore_zeros\n\n def get_threshold(self, with_speeds_df):\n if self.ignore_zeros:\n df_to_use = with_speeds_df[with_speeds_df.speed > 0]\n else:\n df_to_use = with_speeds_df\n return df_to_use.speed.quantile(self.quantile)\n\n","repo_name":"epai/e-mission-server","sub_path":"emission/analysis/intake/cleaning/cleaning_methods/speed_outlier_detection.py","file_name":"speed_outlier_detection.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"19040814265","text":"import dash_bootstrap_components as dbc\nimport dash_tabulator\nfrom dash import callback, dcc, html\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nfrom service.account_positions import AccountPositions\nfrom utils.functions import formatter_currency\nfrom utils.opstrat.basic_multi import multi_plotter\n\nlayout = dbc.Container(\n dbc.Spinner(\n [\n dbc.Row(\n dbc.Modal(\n [\n dbc.ModalHeader(\"Payoff\"),\n dbc.ModalBody(\n id=\"payoff-chart\",\n ),\n ],\n id=\"payoff-modal\",\n centered=True,\n size=\"xl\",\n ),\n ),\n dbc.Row(\n [\n dbc.Col(\n [\n dbc.Row(html.H4(children=\"BALANCE\")),\n html.Hr(className=\"my-2\"),\n dbc.Row(\n html.Div(id=\"balance-detail\"),\n ),\n ],\n width=10,\n ),\n dbc.Col(\n dbc.Button(\n \"Calculate Payoff\",\n color=\"primary\",\n id=\"payoff-btn\",\n className=\"mt-4\",\n ),\n class_name=\"text-end\",\n ),\n ],\n ),\n html.P(),\n dbc.Row(html.H4(children=\"PUTS\")),\n html.Hr(className=\"my-2\"),\n dbc.Row(\n [\n html.Div(id=\"dummy-output\"),\n html.Div(id=\"put-detail\"),\n html.Div(id=\"puts_table\"),\n ]\n ),\n html.P(),\n dbc.Row(html.H4(children=\"CALLS\")),\n html.Hr(className=\"my-2\"),\n dbc.Row(\n [\n html.Div(id=\"call-detail\"),\n html.Div(id=\"calls_table\"),\n ]\n ),\n html.P(),\n dbc.Row(html.H4(children=\"STOCKS\")),\n html.Hr(className=\"my-2\"),\n dbc.Row(\n [\n html.Div(id=\"stock-detail\"),\n html.Div(id=\"stocks_table\"),\n ]\n ),\n ],\n ),\n)\n\n\n@callback(\n [\n Output(\"puts_table\", \"children\"),\n Output(\"calls_table\", \"children\"),\n Output(\"stocks_table\", \"children\"),\n Output(\"balance-detail\", \"children\"),\n Output(\"put-detail\", \"children\"),\n Output(\"call-detail\", \"children\"),\n Output(\"stock-detail\", \"children\"),\n ],\n [\n [Input(\"url\", \"pathname\")],\n ],\n)\ndef on_button_click(n):\n \"\"\"Display account summary and positions tables.\n\n Retrieves account balance, put positions, call positions, and stock positions.\n Renders the data in Tabulator tables and summary alerts.\n\n Returns:\n puts_table (DashTabulator): Put positions table\n calls_table (DashTabulator): Call positions table\n stocks_table (DashTabulator): Stock positions table\n balance_detail (html.Div): Account balance summary alert\n put_detail (html.Div): Put positions summary alert\n call_detail (html.Div): Call positions summary alert\n stock_detail (html.Div): Stock positions summary alert\n \"\"\"\n account = AccountPositions()\n balance = account.balance\n df_puts = account.get_put_positions()\n df_calls = account.get_call_positions()\n df_stocks = account.get_stock_positions()\n puts_count = df_puts.shape[0]\n calls_count = df_calls.shape[0]\n stocks_count = df_stocks.shape[0]\n puts_cash = formatter_currency(df_puts[\"COST\"].sum())\n puts_maintenance = formatter_currency(df_puts[\"MARGIN\"].sum())\n calls_maintenance = formatter_currency(df_calls[\"MARGIN\"].sum())\n stock_value = (df_stocks[\"MARK\"] * df_stocks[\"QTY\"]).sum()\n format_stock_value = formatter_currency(stock_value)\n stock_cost = (df_stocks[\"AVG COST\"] * df_stocks[\"QTY\"]).sum()\n format_stock_cost = formatter_currency(stock_cost)\n stock_profit = formatter_currency(stock_value - stock_cost)\n stocks_maintenance = formatter_currency(df_stocks[\"MARGIN\"].sum())\n\n tabulator_options = {\n \"selectable\": \"true\",\n }\n\n puts_dt = (\n dash_tabulator.DashTabulator(\n id=\"put-table\",\n data=df_puts.to_dict(\"records\"),\n options=tabulator_options,\n columns=[\n {\"title\": \"UNDERLYING\", \"field\": \"TICKER\", \"headerFilter\": \"input\"},\n {\"title\": \"QTY\", \"field\": \"QTY\"},\n {\"title\": \"SYMBOL\", \"field\": \"SYMBOL\"},\n {\"title\": \"PREMIUM\", \"field\": \"PREMIUM\"},\n {\"title\": \"UNDERLYING PRICE\", \"field\": \"UNDERLYING PRICE\"},\n {\"title\": \"STRIKE\", \"field\": \"STRIKE PRICE\"},\n {\"title\": \"MARK\", \"field\": \"MARK\"},\n {\"title\": \"PURCHASE\", \"field\": \"PURCHASE PRICE\"},\n {\"title\": \"DAYS\", \"field\": \"DAYS\"},\n {\"title\": \"ITM\", \"field\": \"ITM\"},\n {\"title\": \"RETURNS\", \"field\": \"RETURNS\"},\n {\"title\": \"MARGIN\", \"field\": \"MARGIN\", \"visible\": False},\n {\"title\": \"THETA\", \"field\": \"THETA\"},\n {\"title\": \"DELTA\", \"field\": \"DELTA\"},\n {\"title\": \"COST\", \"field\": \"COST\", \"visible\": False},\n ],\n ),\n )\n calls_dt = (\n dash_tabulator.DashTabulator(\n id=\"call-table\",\n data=df_calls.to_dict(\"records\"),\n options=tabulator_options,\n columns=[\n {\"title\": \"UNDERLYING\", \"field\": \"TICKER\", \"headerFilter\": \"input\"},\n {\"title\": \"QTY\", \"field\": \"QTY\"},\n {\"title\": \"SYMBOL\", \"field\": \"SYMBOL\"},\n {\"title\": \"UNDERLYING PRICE\", \"field\": \"UNDERLYING PRICE\"},\n {\"title\": \"STRIKE\", \"field\": \"STRIKE PRICE\"},\n {\"title\": \"EXTRINSIC\", \"field\": \"EXTRINSIC\"},\n {\"title\": \"MARK\", \"field\": \"MARK\"},\n {\"title\": \"PURCHASE\", \"field\": \"PURCHASE PRICE\"},\n {\"title\": \"DAYS\", \"field\": \"DAYS\"},\n {\"title\": \"ITM\", \"field\": \"ITM\"},\n {\"title\": \"DELTA\", \"field\": \"DELTA\"},\n {\"title\": \"MARGIN\", \"field\": \"MARGIN\", \"visible\": False},\n {\"title\": \"THETA\", \"field\": \"THETA\", \"visible\": False},\n ],\n ),\n )\n stocks_dt = (\n dash_tabulator.DashTabulator(\n id=\"stock-table\",\n data=df_stocks.to_dict(\"records\"),\n options=tabulator_options,\n columns=[\n {\"title\": \"TICKER\", \"field\": \"TICKER\", \"headerFilter\": \"input\"},\n {\"title\": \"QTY\", \"field\": \"QTY\"},\n {\"title\": \"MARK\", \"field\": \"MARK\"},\n {\"title\": \"AVG COST\", \"field\": \"AVG COST\"},\n {\"title\": \"MARGIN\", \"field\": \"MARGIN\"},\n {\"title\": \"NET\", \"field\": \"NET\"},\n ],\n ),\n )\n\n return (\n puts_dt,\n calls_dt,\n stocks_dt,\n html.Div(\n [\n dbc.Alert(\n children=f\" Account Value:{formatter_currency(balance.accountValue)} Cash Balance:{formatter_currency(balance.marginBalance)} Maintenance:{formatter_currency(balance.maintenanceRequirement)}\",\n color=\"info\",\n ),\n ]\n ),\n html.Div(\n [\n dbc.Alert(\n children=f\"Total:{puts_count} Exposure:{puts_cash} Maintenance:{puts_maintenance}\",\n color=\"info\",\n ),\n ]\n ),\n html.Div(\n [\n dbc.Alert(\n children=f\"Total:{calls_count} Maintenance:{calls_maintenance}\",\n color=\"info\",\n ),\n ]\n ),\n html.Div(\n [\n dbc.Alert(\n children=f\"Total:{stocks_count} Value:{format_stock_value} Cost:{format_stock_cost} P/L: {stock_profit} Maintenance:{stocks_maintenance}\",\n color=\"info\",\n ),\n ]\n ),\n )\n\n\n@callback(\n [\n Output(\"payoff-chart\", \"children\"),\n Output(\"payoff-modal\", \"is_open\"),\n ],\n Input(\"payoff-btn\", \"n_clicks\"),\n [\n State(\"put-table\", \"multiRowsClicked\"),\n State(\"call-table\", \"multiRowsClicked\"),\n State(\"stock-table\", \"multiRowsClicked\"),\n ],\n prevent_initial_call=True,\n)\ndef display_output(n, put_trades, call_trades, stock_trades):\n \"\"\"Plot payoff diagram for selected positions.\n\n When payoff button is clicked, renders a payoff diagram based on currently\n selected rows in the positions tables.\n\n Args:\n n (int): Payoff button click\n put_trades (list): Selected put position rows\n call_trades (list): Selected call position rows\n stock_trades (list): Selected stock position rows\n\n Returns:\n chart (html.Div): Plotly graph with payoff diagram\n is_open (bool): Whether to open the modal\n \"\"\"\n spot_price = 0\n trades = []\n\n def populate_selected_trades(trade, op_type):\n nonlocal spot_price\n nonlocal trades\n if spot_price == 0: # populate just once for underlying\n spot_price = trade[\"UNDERLYING PRICE\"]\n\n trade_dict = {}\n trade_dict[\"op_type\"] = op_type\n trade_dict[\"op_pr\"] = trade[\"PURCHASE PRICE\"]\n trade_dict[\"strike\"] = trade[\"STRIKE PRICE\"]\n if trade[\"QTY\"] < 0: # Sell\n trade_dict[\"contract\"] = -trade[\"QTY\"]\n trade_dict[\"tr_type\"] = \"s\"\n else: # Buy\n trade_dict[\"contract\"] = trade[\"QTY\"]\n trade_dict[\"tr_type\"] = \"b\"\n trades.append(trade_dict)\n\n if n is None or (len(call_trades) == 0 and len(put_trades) == 0):\n raise PreventUpdate\n\n else:\n for trade in put_trades:\n populate_selected_trades(trade, op_type=\"p\")\n\n for trade in call_trades:\n populate_selected_trades(trade, op_type=\"c\")\n\n for trade in stock_trades:\n trade_dict = {}\n trade_dict[\"op_type\"] = \"e\"\n trade_dict[\"op_pr\"] = 0\n trade_dict[\"strike\"] = trade[\"AVG COST\"]\n trade_dict[\"contract\"] = int(trade[\"QTY\"] / 100)\n trade_dict[\"tr_type\"] = \"b\"\n trades.append(trade_dict)\n\n # Plot the trades\n fig = multi_plotter(spot=spot_price, op_list=trades)\n chart = html.Div(\n [\n dcc.Graph(figure=fig),\n ]\n )\n return chart, True\n","repo_name":"cool250/option-portfolio","sub_path":"src/view/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72287106433","text":"import bs4\nimport json\nimport time\nimport logging\nimport requests\nfrom requests.auth import HTTPProxyAuth\nimport random\nfrom threading import Thread\nfrom bs4 import element\n\nsizes = {'36': '118', '36 2/3': '144', '37 1/3': '143', '38': '126', '38 2/3': '142', '39 1/3': '141', '40': '42', '40 2/3': '140', '41 1/3': '139', '42': '125', '42 2/3': '138', '43 1/3': '137', '44': '111', '44 2/3': '136', '45 1/3': '135', '46': '110', '46 2/3': '134', '47 1/3': '133', '48': '150', '48 2/3': '132', 'COMING SOON': '273', }\n\n\ndef get_time():\n return int(round(time.time() * 1000))\n\n\nclass ATCThread(Thread):\n def __init__(self, queue, url, size=None, proxy=''):\n super(ATCThread, self).__init__()\n print('starting atc')\n self.session = requests.session()\n\n self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'})\n if proxy is not '':\n print(proxy)\n parts = proxy.split(':')\n print(parts)\n proxy = '%s:%s' % (parts[0], parts[1])\n print(proxy)\n proxyDict = {'http': proxy, 'https': proxy}\n # self.session.trust_env = False\n test = self.session.get('https://whatismyipaddress.com/', proxies=proxyDict)\n print(test.content.decode())\n\n self.url = url\n print(\"url: \" + self.url)\n self.queue = queue\n if not size:\n self.size = size\n\n self.log = logging.getLogger(\"ATCThread\")\n log_format = logging.Formatter(\"[%(asctime)s.%(msecs)03d] ATCThread: %(message)s\", \"%H:%M:%S\")\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_format)\n self.log.addHandler(console_handler)\n self.log.setLevel(logging.DEBUG)\n\n self.log.info(\"Created\")\n\n self.run_atc()\n\n def run_atc(self):\n self.log.info('Starting ATC')\n start = get_time()\n # get product page\n item_page = self.session.get(self.url)\n while not item_page and item_page.status_code is not '200':\n self.log.info(\"Failed to get item page \" + item_page.status_code)\n item_page = self.session.get(self.url)\n\n self.log.info(\"Retrieved product page - %dms\" % (get_time()-start))\n\n start = get_time()\n strain = bs4.SoupStrainer(id='product_addtocart_form')\n item_parse = bs4.BeautifulSoup(item_page.content, \"lxml\", parse_only=strain)\n form = item_parse.find('form')\n select = form.find('select')\n\n self.log.info(\"Page parsed - %dms\" % (get_time() - start))\n\n start = get_time()\n # find add url\n action = form['action'].replace(\"checkout/cart\", \"ajax/index\")\n # form payload\n payload = {'qty': '1', 'isAjax': '1'}\n for item in form.find_all('input'):\n payload[item['name']] = item['value']\n opts = form.find(id='options_502').contents\n\n if hasattr(self, 'size'):\n size_id = sizes[self.size]\n size = self.size\n for item in form.find(id='options_502').contents:\n if type(item) == element.Tag and item['data-simplesku'].split('-', 1)[-1] == self.size:\n size = item['data-simplesku'].split('-')[-1]\n size_id = item['id'].split('_')[-1]\n break\n else:\n rand = random.choice(opts[:-2])\n size = rand['data-simplesku'].split('-', 1)[-1]\n size_id = rand['id'].split('_')[-1]\n\n payload[select['name']] = size_id\n self.log.info('Selected size %s' % size)\n print(\"POST request created - {}ms {}\".format((get_time() - start), str(payload)))\n\n # stdin.readline()\n\n start = get_time()\n start_atc = get_time()\n atc_resp = self.session.post(action, data=payload)\n while atc_resp.status_code != '200' and json.loads(atc_resp.content)['status'] != 'SUCCESS':\n self.log.info('POST atc failed - {} - {}'.format(atc_resp.status_code, json.loads(atc_resp.content)['status']))\n time.sleep(1)\n start = get_time()\n atc_resp = self.session.post(action, data=payload)\n\n print(\"Added - %dms\" % (get_time() - start_atc))\n self.queue.put(self.session.cookies['frontend'])\n self.log.info('Added cookie to queue')\n","repo_name":"conradj5/AsphalGold-Checkout","sub_path":"atc.py","file_name":"atc.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25852803354","text":"from datetime import date\nfrom unittest import TestCase\n\nfrom click.testing import CliRunner\n\nfrom regparser.history.versions import Version\nfrom regparser.index import entry\n\n\nclass VersionEntryTests(TestCase):\n def test_iterator(self):\n \"\"\"Versions should be correctly linearized\"\"\"\n with CliRunner().isolated_filesystem():\n path = entry.Version(\"12\", \"1000\")\n v1 = Version('1111', effective=date(2004, 4, 4),\n published=date(2004, 4, 4))\n v2 = Version('2222', effective=date(2002, 2, 2),\n published=date(2004, 4, 4))\n v3 = Version('3333', effective=date(2004, 4, 4),\n published=date(2003, 3, 3))\n (path / '1111').write(v1)\n (path / '2222').write(v2)\n (path / '3333').write(v3)\n\n self.assertEqual(['2222', '3333', '1111'], list(path))\n","repo_name":"cmc333333/regulations-parser","sub_path":"tests/index_entry_tests.py","file_name":"index_entry_tests.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"13134956858","text":"import logging\nimport sys\nfrom typing import Any, Callable, List, Optional, TypeVar\n\nimport click\nfrom colorama import Back, Fore, Style\n\nfrom . import args, errors\nfrom .document import ARCHIVE_SUBDIR, SAFE_EXTENSION\nfrom .isolation_provider.container import Container\nfrom .isolation_provider.dummy import Dummy\nfrom .isolation_provider.qubes import Qubes, is_qubes_native_conversion\nfrom .logic import DangerzoneCore\nfrom .util import get_version\n\nF = TypeVar(\"F\", bound=Callable[..., Any])\n\n\ndef print_header(s: str) -> None:\n click.echo(\"\")\n click.echo(Style.BRIGHT + s)\n\n\n@click.command()\n@click.option(\n \"--output-filename\",\n callback=args.validate_output_filename,\n help=f\"Default is filename ending with {SAFE_EXTENSION}\",\n)\n@click.option(\"--ocr-lang\", help=\"Language to OCR, defaults to none\")\n@click.option(\n \"--archive\",\n \"archive\",\n flag_value=True,\n help=f\"Archives the unsafe version in a subdirectory named '{ARCHIVE_SUBDIR}'\",\n)\n@click.option(\n \"--unsafe-dummy-conversion\", \"dummy_conversion\", flag_value=True, hidden=True\n)\n@click.option(\n \"--enable-timeouts / --disable-timeouts\",\n default=True,\n show_default=True,\n help=\"Enable/Disable timeouts during document conversion\",\n)\n@click.argument(\n \"filenames\",\n required=True,\n nargs=-1,\n type=click.UNPROCESSED,\n callback=args.validate_input_filenames,\n)\n@click.version_option(version=get_version(), message=\"%(version)s\")\n@errors.handle_document_errors\ndef cli_main(\n output_filename: Optional[str],\n ocr_lang: Optional[str],\n enable_timeouts: bool,\n filenames: List[str],\n archive: bool,\n dummy_conversion: bool,\n) -> None:\n setup_logging()\n\n if getattr(sys, \"dangerzone_dev\", False) and dummy_conversion:\n dangerzone = DangerzoneCore(Dummy())\n elif is_qubes_native_conversion():\n dangerzone = DangerzoneCore(Qubes())\n else:\n dangerzone = DangerzoneCore(Container(enable_timeouts=enable_timeouts))\n\n display_banner()\n if len(filenames) == 1 and output_filename:\n dangerzone.add_document_from_filename(filenames[0], output_filename, archive)\n elif len(filenames) > 1 and output_filename:\n click.echo(\"--output-filename can only be used with one input file.\")\n exit(1)\n else:\n for filename in filenames:\n dangerzone.add_document_from_filename(filename, archive=archive)\n\n # Validate OCR language\n if ocr_lang:\n valid = False\n for lang in dangerzone.ocr_languages:\n if dangerzone.ocr_languages[lang] == ocr_lang:\n valid = True\n break\n if not valid:\n click.echo(\"Invalid OCR language code. Valid language codes:\")\n for lang in dangerzone.ocr_languages:\n click.echo(f\"{dangerzone.ocr_languages[lang]}: {lang}\")\n exit(1)\n\n # Ensure container is installed\n dangerzone.isolation_provider.install()\n\n # Convert the document\n print_header(\"Converting document to safe PDF\")\n\n dangerzone.convert_documents(ocr_lang)\n documents_safe = dangerzone.get_safe_documents()\n documents_failed = dangerzone.get_failed_documents()\n\n if documents_safe != []:\n print_header(\"Safe PDF(s) created successfully\")\n for document in documents_safe:\n click.echo(document.output_filename)\n\n if archive:\n print_header(\n f\"Unsafe (original) documents moved to '{ARCHIVE_SUBDIR}' subdirectory\"\n )\n\n if documents_failed != []:\n print_header(\"Failed to convert document(s)\")\n for document in documents_failed:\n click.echo(document.input_filename)\n sys.exit(1)\n else:\n sys.exit(0)\n\n\nargs.override_parser_and_check_suspicious_options(cli_main)\n\n\ndef setup_logging() -> None:\n class EndUserLoggingFormatter(logging.Formatter):\n \"\"\"Prefixes any non-INFO log line with the log level\"\"\"\n\n def format(self, record: logging.LogRecord) -> str:\n if record.levelno == logging.INFO:\n # Bypass formatter: print line directly\n return record.getMessage()\n else:\n return super().format(record)\n\n if getattr(sys, \"dangerzone_dev\", False):\n fmt = \"[%(levelname)-5s] %(message)s\"\n logging.basicConfig(level=logging.DEBUG, format=fmt)\n else:\n # prefix non-INFO log lines with the respective log type\n fmt = \"%(levelname)s %(message)s\"\n formatter = EndUserLoggingFormatter(fmt=fmt)\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger.addHandler(ch)\n\n\ndef display_banner() -> None:\n \"\"\"\n Raw ASCII art example:\n ╭──────────────────────────╮\n │ ▄██▄ │\n │ ██████ │\n │ ███▀▀▀██ │\n │ ███ ████ │\n │ ███ ██████ │\n │ ███ ▀▀▀▀████ │\n │ ███████ ▄██████ │\n │ ███████ ▄█████████ │\n │ ████████████████████ │\n │ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ │\n │ │\n │ Dangerzone v0.1.5 │\n │ https://dangerzone.rocks │\n ╰──────────────────────────╯\n \"\"\"\n\n print(Back.BLACK + Fore.YELLOW + Style.DIM + \"╭──────────────────────────╮\")\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ▄██▄ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ██████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ███▀▀▀██ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ███ ████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ███ ██████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ███ ▀▀▀▀████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ███████ ▄██████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ███████ ▄█████████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ████████████████████ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Fore.LIGHTYELLOW_EX\n + Style.NORMAL\n + \" ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(Back.BLACK + Fore.YELLOW + Style.DIM + \"│ │\")\n left_spaces = (15 - len(get_version()) - 1) // 2\n right_spaces = left_spaces\n if left_spaces + len(get_version()) + 1 + right_spaces < 15:\n right_spaces += 1\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Style.RESET_ALL\n + Back.BLACK\n + Fore.LIGHTWHITE_EX\n + Style.BRIGHT\n + f\"{' '*left_spaces}Dangerzone v{get_version()}{' '*right_spaces}\"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(\n Back.BLACK\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n + Style.RESET_ALL\n + Back.BLACK\n + Fore.LIGHTWHITE_EX\n + \" https://dangerzone.rocks \"\n + Fore.YELLOW\n + Style.DIM\n + \"│\"\n )\n print(Back.BLACK + Fore.YELLOW + Style.DIM + \"╰──────────────────────────╯\")\n","repo_name":"freedomofpress/dangerzone","sub_path":"dangerzone/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","stars":2749,"dataset":"github-code","pt":"61"} +{"seq_id":"18193114567","text":"\r\nclass Empty(Exception):\r\n\tpass\r\n\r\n\r\n\r\nclass _DoublyLinkedListBase:\r\n\t\"\"\"A base class providing a doubly linked list representation.\"\"\"\r\n\r\n\r\n\tclass _Node:\r\n\t\t\"\"\" Lightweight, nonpublic class for storing a singly linked node \"\"\"\r\n\t\t__slots__ = '_element', '_prev', '_next' #streamline memory usage\r\n\r\n\t\tdef __init__(self, element, prev, next):\r\n\t\t\tself._element = element\r\n\t\t\tself._prev = prev\r\n\t\t\tself._next = next\r\n\r\n\r\n\tdef __init__(self):\r\n\t\tself._header = self._Node(None, None, None)\r\n\t\tself._trailer = self._Node(None, None, None)\r\n\t\tself._header._next = self._trailer\r\n\t\tself._trailer._prev = self._header\r\n\t\tself._size = 0\r\n\r\n\r\n\tdef __len__(self):\r\n\r\n\t\treturn self._size\r\n\r\n\r\n\tdef is_empty(self):\r\n\r\n\t\treturn self._size == 0\r\n\r\n\r\n\tdef _insert_between(self, e, predecessor, successor):\r\n\t\t\"\"\"Add element e between two existing nodes and return new node.\"\"\"\r\n\r\n\t\tnewest = self._Node(e, predecessor, successor)\r\n\t\tpredecessor._next = newest\r\n\t\tsuccessor._prev = newest\r\n\t\tself._size += 1\r\n\r\n\t\treturn newest\r\n\r\n\r\n\tdef _delete_node(self, node):\r\n\t\t\"\"\"Delete nonsentinel node from the list and return its element.\"\"\"\r\n\r\n\t\tpredecessor = node._prev\r\n\t\tsuccessor = node._next\r\n\t\tpredecessor._next = successor\r\n\t\tsuccessor._prev = predecessor\r\n\t\tself._size -= 1\r\n\r\n\t\telement = node._element\r\n\t\tnode._prev = node._next = node._element = None\r\n\t\treturn element\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass LinkedDeque(_DoublyLinkedListBase):\r\n\t\"\"\" Double-ended queue implementation based on a doubly linked list.\"\"\"\r\n\r\n\tdef first(self):\r\n\r\n\t\tif self.is_empty():\r\n\t\t\traise Empty(\"Deque is Empty\")\r\n\r\n\t\treturn self._header._next._element\r\n\r\n\r\n\tdef last(self):\r\n\r\n\t\tif self.is_empty():\r\n\t\t\traise Empty(\"Deque is Empty\")\r\n\r\n\t\treturn self._trailer._prev._element\r\n\r\n\r\n\tdef insert_first(self, e):\r\n\r\n\t\tself._insert_between(e, self._header, self._header._next)\r\n\r\n\r\n\tdef insert_last(self, e):\r\n\r\n\t\tself._insert_between(e, self._trailer._prev, self._trailer)\r\n\r\n\r\n\tdef delete_first(self):\r\n\r\n\t\tif self.is_empty():\r\n\t\t\traise Empty(\"Deque is Empty\")\r\n\r\n\t\treturn self._delete_node(self._header._next)\r\n\r\n\r\n\tdef delete_last(self):\r\n\r\n\t\tif self.is_empty():\r\n\t\t\traise Empty(\"Deque is Empty\")\r\n\r\n\t\treturn self._delete_node(self._trailer._prev)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nlinkedDQ = LinkedDeque()\r\n\r\nlinkedDQ.insert_first(1)\r\nlinkedDQ.insert_last(2)\r\nlinkedDQ.insert_last(3)\r\nprint(linkedDQ.first())\r\nprint(linkedDQ.last())\r\nprint(\"#\"*50)\r\n\r\n\r\n","repo_name":"aver-roes/DS-and-algorithms-python","sub_path":"doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70617193794","text":"#!/usr/bin/python\r\n \r\nimport os\r\nimport zipfile\r\nfrom datetime import date, timedelta\r\n \r\nos.system(\"taskkill /f /im info.exe\") #убиваем запущенные процессы квика\r\n\r\n \r\ndef main():\r\n\r\n archive_dir = \"C:/ARCHIVE/\" # место куда кладем архивы\r\n source_dir = \"C:/QUIK/TEST/Quik_DM\" # место откуда берем данные для архива\r\n \r\n yesterday = date.today() - timedelta(days=1) # вчерашняя дата\r\n \r\n archive_name = str(yesterday) + '_' + os.getlogin() + '.zip' # получаем имя архива 'вчерашняя дата' + 'имя пользователя'\r\n \r\n zf = zipfile.ZipFile(archive_dir+archive_name, \"w\", zipfile.ZIP_DEFLATED)\r\n for dirname, subdirs, files in os.walk(source_dir):\r\n zf.write(dirname)\r\n for filename in files:\r\n zf.write(os.path.join(dirname, filename))\r\n zf.close()\r\n \r\nprint('Archive has been created successfully')\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"alosipov/QUIK_archive","sub_path":"QUIK_archive.py","file_name":"QUIK_archive.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36811045724","text":"'''\nProgram: a2.py\nName: \nDate:\nDesc:\n'''\n\n# Initialize the database of student grades\ndatabase = [\n ['ICS4U', 'Assignment 1', 'Luke Skywalker', '3+'],\n ['ICS4U', 'Assignment 1', 'Han Solo', '4-'],\n ['SPH3U', 'Unit 1 Test', 'Leia Organa', '4'],\n ['SPH3U', 'Unit 1 Test', 'Luke Skywalker', '3-'],\n ['SPH4U', 'Unit 1 Test', 'Yoda', '4+'],\n ['SPH4U', 'Unit 1 Test', 'Anakin Skywalker', '3'],\n ['MHF4U', 'Unit 1 Test', 'Boba Fett', '2+'],\n ['MHF4U', 'Unit 1 Test', 'Kylo Ren', '3'],\n ['MHF4U', 'Unit 1 Test', 'Chewbacca', '4']\n ]\n\ndef tfInput(string):\n ''' Prompt the user for a yes/no question and return True/False '''\n # Display the question and returns an answer\n answer = input(string)\n return answer[0].lower() == 'y'\n\n\n# ICS4U:\n# PUT ALL OF YOUR FUNCTIONS HERE\n\ndef level2Percent(level):\n '''\n This function takes a grade level and returns an integer representing the corresponding percent grade.\n '''\n # This 'if' statements are returning a percentage according to the different level of grade that the user writes.\n if level == ('4+'):\n return 100\n if level == ('4'):\n return 94\n if level == ('4-'):\n return 86\n if level == ('3+'):\n return 79\n if level == ('3'):\n return 76\n if level == ('3-'):\n return 72\n if level == ('2+'):\n return 69\n if level == ('2'):\n return 66\n if level == ('2-'):\n return 62\n if level == ('1+'):\n return 59\n if level == ('1'):\n return 56\n if level == ('1-'):\n return 52\n if level == ('<1') or ('< 1'):\n return 40\n\ndef percent2Level(percent):\n '''\n This function takes a percent grade and returns a string representing the corresponding grade level.\n '''\n # This if statements.\n if percent in range(95,101) :\n return ('4+')\n if percent in range(87,95) :\n return ('4')\n if percent in range(80,87) :\n return ('4-')\n if percent in range(77,80) :\n return ('3+')\n if percent in range(73,77) :\n return ('3')\n if percent in range(70,73) :\n return ('3-')\n if percent in range(67,70) :\n return ('2+')\n if percent in range(63,67) :\n return ('2')\n if percent in range(60,63) :\n return ('2+')\n if percent in range(57,60) :\n return ('1+')\n if percent in range(53,57) :\n return ('1')\n if percent in range(50,53) :\n return ('1-')\n if percent in range(0,51) :\n return ('< 1')\n\ndef stringCompare (string1,string2):\n '''\n This function compares two strings, and returns True if the strings are the same, and False if they are not.\n '''\n#Compares the two strings and return True if they match or False if they dont.\n return string1.lower().strip() == string2.lower().strip()\n\ndef addGrade(database,course,assignment,student,grade):\n '''\n This function adds a single row to the database, with the specified course, assignment, student, and grade.\n '''\n #By using append is adding a row into database.\n database.append ([course,assignment,student,grade])\n #Return the updated data base with the extra row.\n return database\n\ndef inputGrades (database):\n '''\n This function allows the user to imput more grades into your database.\n '''\n #Its giving the user the following prompts to answer.\n course = input ('What is the name of the class?\\n')\n assignment = input ('What is the name of the assignment?\\n')\n\n #Loop the following prompt only.\n while True:\n student = input ('Student name:\\n')\n grade = input ('Level:\\n')\n\n #It is updating the database with the new answers from the user and putting it into a new row.\n database = database + [[course,assignment,student,grade]]\n response = tfInput('Would you like to input another grade?\\n')\n #This statement will allow the user to prompt more information if they answer yes/true.\n if response == True:\n pass\n #This statement will break if answer is False/no, meaning it wont ask anymore questions but instead give you the updated list with new information added.\n elif response == False:\n break\n #Returns the updated database.\n return database\n\n\ndef studentAverage(database,student):\n '''\n This function returns the average percent grade for the specified STUDENT.\n '''\n\n #The creation of different variables with the equality of a list '[]'\n level = []\n percent = []\n #Variable equal '0' created to take the average.\n total = 0\n\n #This for loop will find the 'len' of elements of the database.\n for d in range(len(database)):\n #This if statement will check wether ll the elements of the database have the right student.\n if stringCompare(database[d][2],student)== True:\n level .append(database [d][3])\n #This for loop will turn the grade levels in the array into porcentage grades.\n for l in range(len(level)):\n percent. append (level2Percent(level[l]))\n #This for loop is taking the average of the porcentage grades.\n for p in range (len(percent)):\n total = (total+percent[p])\n #It is returning the result, which is a float representing the student's average percent grade\n return total/len (percent)\n\ndef courseAverage (database,course):\n '''\n This function will return the average percent grade for the specified COURSE.\n '''\n\n #This variables are equal to a list [].\n level = []\n percent = []\n #The variable 'total' is equal '0' in order ot use it later in the code to get the course's average percent grade.\n total = 0\n\n #This for loop is finding all the element of the databse that have the right COURSE.\n for d in range (len(database)):\n if stringCompare(database[d][0],course)==True:\n level. append(database[d][3])\n #This if statement is storing the matching grade levels into an array.\n for l in range(len(level)):\n percent. append(level2Percent(level[l]))\n #If statement that turns the grade levels in the array into percentage grades.\n for p in range(len(percent)):\n total = (total+percent[p])\n #This variable is equal the formula to take the average of the array of the percentage grades to return the result (average).\n average =total/len(percent)\n #Returns a float representing the COURSE'S average percent grade.\n return average\n\n\n\nif __name__==\"__main__\":\n pass\n # ICS4U:\n # PUT ANY EXTRA CODE (TESTING, ETC) HERE\n\n #print (level2Percent('4'))\n #print (level2Percent('<1'))\n\n #print (percent2Level(95))\n #print (percent2Level(52))\n\n #print (stringCompare(' Hello','hello'))\n\n #print (addGrade(database,'SPH3U','Catapult project','Jar Jar Binks','2'))\n\n #print (inputGrades(database))\n\n\n","repo_name":"010897/ICS4U","sub_path":"a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18610494834","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\nfrom datetime import datetime\nfrom shutil import copyfile\nfrom src.sheetwriter import course_writer\nfrom src.class_course import Course\n\nTA_URL = \"https://ta.yrdsb.ca/yrdsb/index.php\"\n\ndef write_html(login_data, out):\n session = requests.session()\n response = session.post(TA_URL, data=login_data)\n\n if test_conn(response):\n html_data = response.text\n html_writer(login_data['username'], out, html_data, session)\n\ndef html_writer(std_num, out, html_data, session):\n\n soup = BeautifulSoup(html_data, 'html.parser')\n font_list = soup.find_all('font')\n for item in font_list:\n if \"Invalid Login\" in str(item.text):\n print(\"Bad login!\")\n return\n\n output_folder_path = dir_maker(out)\n output_subfolder = '\\\\' + std_num + ' TA SNAPSHOT - Generated '\n dateformat = datetime.now().strftime(\"%c\")\n output_subfolder += dateformat\n\n out_path = output_folder_path + output_subfolder.replace(\":\", \"-\")\n print(\"Creating output folder...\")\n Path(out_path).mkdir(parents=True, exist_ok=True)\n print(\"Output folder created! Path: \" + out_path)\n\n c_div = soup.find('body').find(\"div\", recursive=False)\n hrs = c_div.find_all(\"hr\")\n\n c_div.find_all(\"div\", recursive=False)[2].decompose()\n\n for h in hrs:\n h.decompose()\n\n for p in c_div.find_all('p'):\n p.decompose()\n\n divs_1 = soup.find_all(\"div\", {\"class\": \"yellow_message\"})\n divs_2 = soup.find_all(\"div\", {\"class\": \"blue_border_message\"})\n divs = divs_1 + divs_2\n\n UNAVAILABLE_CONST = \"Please see teacher for current status regarding achievement in the course\"\n\n filename = out_path + \"\\\\index.html\"\n out_subdir = out_path + '\\\\courses'\n Path(out_subdir).mkdir(parents=True, exist_ok=True)\n\n css_filename = out_path + \"\\\\style.css\"\n css_link = \"https://ta.yrdsb.ca/live/students/style.css\"\n css_response = session.post(css_link)\n\n with open(css_filename, \"w\", encoding='utf-8') as file:\n file.write(str(css_response.text))\n print(\"Downloaded current TeachAssist stylesheet at \" + css_filename)\n\n ta_tables = soup.find_all('table')\n courses_table_rows = ta_tables[1].find_all('tr')[1:]\n\n logout_btn = soup.find('a')\n logout_btn['href'] = '#'\n\n for item in courses_table_rows:\n current_row_table_data = item.find_all('td')\n linker = UNAVAILABLE_CONST\n\n c_data = str(current_row_table_data[0]).split(' : ')\n code = c_data[0][6:].strip()\n course_html_file = out_subdir + \"\\\\\" + code + '.html'\n\n current_row_link_list = current_row_table_data[-1].find_all('a') #finds any links in the current course row\n if len(current_row_link_list) > 0:\n linkitem = current_row_link_list[0]\n linker = linkitem.get('href')\n linkitem['href'] = 'courses' + \"/\" + code + '.html'\n\n if linker is not UNAVAILABLE_CONST: # checks if the link is the \"please see teacher\" message\n grade_url = 'https://ta.yrdsb.ca/live/students/' + linker\n get_grade_session = session.get(grade_url)\n course_grades_soup = BeautifulSoup(get_grade_session.text, 'html.parser')\n\n return_button = course_grades_soup.find('a')\n return_button['href'] = '../index.html'\n\n linktags = course_grades_soup.find_all('link')\n for l in linktags:\n if l.get('href') == \"style.css\":\n l['href'] = '../style.css'\n\n print(\"Creating snapshot for course \" + code + \"...\")\n with open(course_html_file, \"w\", encoding='utf-8') as file:\n file.write(str(course_grades_soup))\n print(\"Snapshot for \" + code + \" created at \" + course_html_file)\n\n with open(filename, \"w\", encoding='utf-8') as file:\n file.write(str(soup))\n print(\"Created index.html file for snapshot at \" + filename + '\\n')\n\n print(\"TA snapshot HTML files saved at \" + out_path)\n print(\"Access the snapshot of your TeachAssist homepage with the 'index.html' file.\")\n print(\"Images and charts won't display because programatically downloading them from the TA server will get you rate-limited.\")\n\ndef write_courses(login_data, out):\n session = requests.session()\n response = session.post(TA_URL, data=login_data)\n\n if test_conn(response):\n html_data = response.text\n courses = get_courses(html_data, session)\n course_writer(login_data['username'], courses, out)\n\ndef get_courses(html, session):\n UNAVAILABLE_CONST = \"Please see teacher for current status regarding achievement in the course\"\n\n soup = BeautifulSoup(html, 'html.parser')\n font_list = soup.find_all('font')\n for item in font_list:\n if \"Invalid Login\" in str(item.text):\n print(\"Bad login!\")\n return\n\n ta_tables = soup.find_all('table')\n courses_table_rows = ta_tables[1].find_all('tr')[1:]\n\n out = []\n\n for item in courses_table_rows:\n current_row_table_data = item.find_all('td')\n linker = UNAVAILABLE_CONST\n rm_text = str(current_row_table_data[0]).split('Block: ')[1].split('-')[1]\n\n current_row_link_list = current_row_table_data[-1].find_all('a') #finds any links in the current course row\n if len(current_row_link_list) > 0:\n linker = current_row_link_list[0].get('href')\n\n c_data = str(current_row_table_data[0]).split(' : ')\n c_title = ''\n if len(c_data) > 1 and c_data[1].strip() != '':\n c_title = c_data[1].strip()\n block_ind = c_title.find('\\t\\t\\t
    \\r\\n\\t\\t\\tBlock:')\n if block_ind != -1:\n c_title = c_title[0:block_ind].strip()\n\n current_course = Course(\n c_data[0][6:], # code\n c_title,\n str(current_row_table_data[0]).split('Block: ')[1].split('-')[0], # block\n rm_text[0: rm_text.index('\\r')].strip(), #room\n str(current_row_table_data[1]).split('~')[0][-11:], # start date\n str(current_row_table_data[1]).split('~')[1][6:16], #end date\n False, # is_open (default false unless there is a link)\n False, # has_weights (default false unless there is a link AND weights)\n [17.5, 17.5, 17.5, 17.5, 0.0, 30.0], # course weights by default is this\n str(linker) # if there is a link to the grades, this will be the link; otherwise, this is the \"please see teacher\" message\n )\n\n if current_course.grades is not UNAVAILABLE_CONST: # checks if the link is the \"please see teacher\" message\n grade_url = 'https://ta.yrdsb.ca/live/students/' + current_course.grades\n get_grade_session = session.get(grade_url)\n course_grades_soup = BeautifulSoup(get_grade_session.text, 'html.parser')\n all_course_div = course_grades_soup.find('div').find_all('div', recursive=False) # find all div elements in grades page\n all_course_tables = all_course_div[1].find_all('table') # find the table elements within the div containing the grades\n grades_table = all_course_tables[1]\n\n all_bottom_table = all_course_div[2].find_all('table')\n weightings_table = all_bottom_table[1]\n\n if len(all_bottom_table) > 2:\n current_course.has_weights = True\n current_course.strand_weights = current_course.get_strand_weights(weightings_table)\n\n current_course.set_assignments(grades_table)\n current_course.is_open = True\n out.append(current_course)\n\n return out\n\ndef test_conn(response):\n status = response.status_code\n if status > 499:\n print(f\"Error {str(response.status_code)}: Problem on TeachAssist's side. Try again later.\")\n if status == 403:\n print(\"You may be rate-limited from the site. Try not to use this tool too many times within a short time frame.\")\n elif status > 299:\n print(f\"Error {str(response.status_code)}: Something went wrong. Check your connection or try again later.\")\n else:\n print(\"Response from \" + TA_URL + f\" = {str(response.status_code)}; Success\")\n return status < 300\n\ndef dir_maker(out):\n f_root = Path(os.path.dirname(os.path.realpath(__file__))).parent.absolute()\n trg = str(f_root) + out\n # print(trg)\n Path(trg).mkdir(parents=True, exist_ok=True)\n return trg\n","repo_name":"dan7x/ExcelTA","sub_path":"src/to_sheet.py","file_name":"to_sheet.py","file_ext":"py","file_size_in_byte":8449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74818648513","text":"def quicksort(arr):\n # 배열의 크기가 1 이하이면 정렬할 필요가 없으므로 해당 배열을 반환한다.\n if len(arr) <= 1:\n return arr\n \n # 배열의 가운데 값을 pivot으로 지정한다.\n pivot = arr[len(arr) // 2]\n \n # pivot을 기준으로 작은 값은 left 배열, 큰 값은 right 배열, 같은 값은 equal 배열에 담는다.\n left = []\n right = []\n equal = []\n for num in arr:\n if num < pivot:\n left.append(num)\n elif num > pivot:\n right.append(num)\n else:\n equal.append(num)\n \n # left, equal, right 배열을 각각 재귀적으로 정렬한 후 합쳐서 반환한다.\n return quicksort(left) + equal + quicksort(right)","repo_name":"joobang/algorithm","sub_path":"sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4357182895","text":"from fabric.api import run, cd, prompt, sudo\nfrom fabric.contrib.files import exists\n\npip_packages = [\n \"virtualenv\",\n \"virtualenvwrapper\",\n \"flake8\",\n]\n\nvim_repositories = [\n \"git://github.com/alfredodeza/pytest.vim.git\",\n \"git://github.com/ap/vim-css-color.git\",\n \"git://github.com/mileszs/ack.vim.git\",\n \"git://github.com/majutsushi/tagbar.git\",\n \"git://github.com/scrooloose/nerdtree.git\",\n \"git://github.com/scrooloose/syntastic.git\",\n \"git://github.com/sjl/gundo.vim.git\",\n \"git://github.com/sjl/threesome.vim.git\",\n \"git://github.com/tomtom/tcomment_vim.git\",\n \"git://github.com/tpope/vim-cucumber.git\",\n \"git://github.com/tpope/vim-fugitive.git\",\n \"git://github.com/tpope/vim-haml.git\",\n \"git://github.com/tpope/vim-markdown.git\",\n \"git://github.com/tpope/vim-pastie.git\",\n \"git://github.com/tpope/vim-rails.git\",\n \"git://github.com/tpope/vim-repeat.git\",\n \"git://github.com/tpope/vim-surround.git\",\n \"git://github.com/tpope/vim-git.git\",\n \"git://github.com/vim-ruby/vim-ruby.git\",\n \"git://github.com/nvie/vim-flake8.git\",\n \"git://github.com/spf13/PIV.git\",\n \"git://github.com/ervandew/supertab.git\",\n \"git://github.com/Lokaltog/vim-powerline.git\",\n \"git://github.com/kien/rainbow_parentheses.vim.git\",\n \"git://github.com/pangloss/vim-javascript.git\",\n \"git://github.com/ajf/puppet-vim.git\",\n \"git://github.com/rosstimson/scala-vim-support.git\",\n \"git://github.com/kchmck/vim-coffee-script.git\",\n \"git://github.com/vim-scripts/csv.vim.git\",\n \"git://github.com/mmalecki/vim-node.js.git\",\n \"git://github.com/vim-scripts/Arduino-syntax-file.git\",\n \"git://github.com/vim-scripts/VimClojure.git\",\n \"git://github.com/groenewege/vim-less.git\",\n \"git://github.com/tpope/vim-endwise.git\",\n \"git://github.com/mattn/webapi-vim.git\",\n \"git://github.com/mattn/gist-vim.git\",\n \"git://github.com/kien/ctrlp.vim.git\",\n \"git://github.com/tomtom/tlib_vim.git\",\n \"git://github.com/MarcWeber/vim-addon-mw-utils.git\",\n \"git://github.com/MarcWeber/ultisnips.git\",\n \"git://github.com/honza/vim-snippets.git\",\n]\n\ndotfiles_list = [\n \"ackrc\",\n \"bash_profile\",\n \"bashrc\",\n \"gemrc\",\n \"gitconfig\",\n \"gitignore_global\",\n \"hgignore_global\",\n \"hgrc\",\n \"irbrc\",\n \"tmux.conf\",\n \"vimrc\",\n \"zshrc\",\n]\n\nbrew_packages = [\n \"autojump\",\n \"bash\",\n \"bash-completion\",\n \"macvim --override-system-vim\",\n \"reattach-to-user-namespace\",\n \"ack\",\n \"tmux\",\n \"tree\",\n \"rbenv\",\n \"ruby-build\",\n \"wget\",\n \"ctags\"\n]\n\n\ndef _install_vim_customizations(env_settings_dir, user_home_dir):\n \"Setup and install vim customizations.\"\n # Add the vim repositories of the bundles you want to install\n\n #Bundle installation method\n vim_bundle_dir = env_settings_dir + \"/vim/bundle/\"\n with cd(env_settings_dir):\n for repository in vim_repositories:\n repository_list = repository.split('/')\n repository_guess = repository_list[4]\n if 'git' in repository_list[0]:\n repository_dir = repository_guess.rstrip('.git')\n repository_bundle_dir = vim_bundle_dir + repository_dir\n run('git submodule add -f %s %s' %\n (repository, repository_bundle_dir))\n elif 'hg' in repository_list[0]:\n repository_dir = repository_guess.rstrip('.hg')\n repository_bundle_dir = vim_bundle_dir + repository_dir\n run('hg clone %s %s' % (repository, repository_bundle_dir))\n\n #install the vim colorschemes\n run(\"git submodule add -f \"\n \"git://github.com/dfamorato/vim-colorschemes.git\"\n \" %s/vim/bundle/colorscheme\" % env_settings_dir)\n\n\ndef _install_zsh_customizations(env_settings_dir, user_home_dir):\n '''Install \"oh my zsh\"'''\n with cd(env_settings_dir):\n run(\"git submodule add -f git://github.com/robbyrussell/oh-my-zsh.git\"\n \" %s/oh-my-zsh\" % env_settings_dir)\n run(\"cp -f ./dotfiles/dfamorato.zsh-theme \"\n \"./oh-my-zsh/themes/dfamorato.zsh-theme\")\n\n\ndef _install_dotfiles_customizations(env_settings_dir, user_home_dir):\n ''' Install additional dotfiles customizations'''\n\n #Directory that contains the dotfiles\n dotfiles_conf_dir = env_settings_dir + \"/dotfiles\"\n\n #Check if dotfile exist, delete it and create new simlynks\n with cd(user_home_dir):\n for dotfile in dotfiles_list:\n if exists(\".%s\" % dotfile):\n run(\"rm -f %s*\" % dotfile)\n run(\"ln -s %s/%s .%s\" % (dotfiles_conf_dir, dotfile, dotfile))\n\n\ndef _install_tmux_customization(env_settings_dir, user_home_dir):\n '''Install Tmux customization'''\n\n # Pull tmux configurations from github repo\n with cd(env_settings_dir):\n run(\"git submodule add -f \"\n \"git://github.com/dfamorato/tmux-powerline.git %s/tmux-powerline\"\n % env_settings_dir)\n\n\ndef customize():\n target_os = prompt(\"What is the OS you are deploying to: mac, ubuntu or \"\n \"fedora: \")\n if target_os in (\"ubuntu\", \"UBUNTU\"):\n sudo(\"apt-get update\")\n sudo(\"apt-get install -y rake ruby-dev vim-nox\")\n sudo(\"apt-get install -y python-pip python-dev build-essential\")\n sudo(\"apt-get install -y tmux zsh git-core mercurial\")\n\n elif target_os in (\"mac\", \"MAC\", \"Mac\"):\n #Install homebrew packages if it's mac\n #for package in brew_packages:\n # run(\"brew install %s\" % package)\n pass\n\n elif target_os in (\"fedora\", \"Fedora\", \"FEDORA\"):\n # TODO: Write formulas for fedora / centos / redhat\n pass\n\n #Install all pip packages necessary\n for package in pip_packages:\n sudo(\"pip install %s\" % package)\n\n # let's find out what is the users home directory\n user_home_dir = run('echo $HOME')\n\n #delete .env_settings dir and ofther .files if they exist\n if exists(\"%s/.env_settings\" % user_home_dir):\n sudo(\"rm -rf %s/.env_settings\" % user_home_dir)\n with cd(user_home_dir):\n run(\"rm -rf .zsh* .zcom* .git* .hg* .vim* .profile* .bash* .tmux* \\\n .ackrc* .gemrc* .irbrc* .tmux*\")\n\n #TODO: Prompt user for his fork of the env_settings project\n #Clone base settings from github\n with cd(user_home_dir):\n run(\"git clone git://github.com/dfamorato/env_settings.git \"\n \".env_settings\")\n env_settings_dir = user_home_dir + \"/.env_settings\"\n\n #Add git upstream server for future updates on this project\n with cd(env_settings_dir):\n run(\"git remote add upstream \"\n \"git://github.com/dfamorato/env_settings.git\")\n\n #start to install customizations\n _install_vim_customizations(env_settings_dir, user_home_dir)\n _install_zsh_customizations(env_settings_dir, user_home_dir)\n _install_dotfiles_customizations(env_settings_dir, user_home_dir)\n _install_tmux_customization(env_settings_dir, user_home_dir)\n","repo_name":"dougmorato/env_settings","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3612701184","text":"import argparse\nimport collections\nimport os\nfrom pathlib import Path\n\nfrom utils import read_cfg\n\n\nclass ConfigParser:\n def __init__(self, config):\n self._config = config\n\n @property\n def config(self):\n return self._config\n\n def __getitem__(self, name):\n return self._config[name]\n\n @classmethod\n def from_args(cls, args):\n if not isinstance(args, tuple):\n args = args.parse_args()\n if args.device is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device\n msg_no_cfg = \"Configuration file need to be specified. Add '-c config.json'.\"\n assert args.config is not None, msg_no_cfg\n cfg_fname = Path(args.config)\n\n config = read_cfg(cfg_fname)\n\n return cls(config)\n\n def init_layers(self, module, *args, **kwargs):\n layer_names = self.config.sections()\n layers = []\n for name in layer_names:\n layer_args = {k: int(v) for k, v in self.config.items(name)}\n\n assert all([k not in layer_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'\n layer_args.update(kwargs)\n layer_name_position = 0\n name = name.split('-')[layer_name_position]\n layer = getattr(module, name)(*args, **layer_args)\n layers.append(layer)\n\n return layers\n","repo_name":"vanloc19bk96/yolov1","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12455944369","text":"# Задайте числами список из N элементов, заполненных из промежутка [-N, N]. \n# Найдите произведение элементов на указанных позициях. \n# Позиции хранятся в файле file.txt в одной строке одно число.\n\nimport random\nn = int(input('Введите число: '))\nsp = []\nfor i in range(1, (n+1)):\n sp.append(random.randrange((-n), (n+1)))\n\nfor i in range(len(sp)):\n sp[i] = str(sp[i] * sp[i])\n\ndata = open('file.txt', 'w') \nfor line in sp:\n data.writelines(line + '\\n')\n \ndata.close\nexit()","repo_name":"nadinza88/seminar02dz","sub_path":"zad4.py","file_name":"zad4.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23861471808","text":"\"\"\"\nID: jay20ma1\nLANG: PYTHON3\nTASK: dualpal\n\"\"\"\nfin = open('dualpal.in', 'r')\nfout = open('dualpal.out', 'w')\n\n\ndef reVal(num):\n if num >= 0 and num <= 9:\n return chr(num + ord('0'))\n else:\n return chr(num - 10 + ord('A'))\n\n\ndef fromDeci(res, base, inputNum):\n index = 0\n while inputNum > 0:\n res += reVal(inputNum % base)\n inputNum = int(inputNum / base)\n res = res[::-1]\n return res\n\n\nn, s = list(map(int, fin.readline().split()))\ncount = 0\nwhile count < n:\n every = 0\n s += 1\n for kk in range(2, 11):\n r = str(fromDeci(\"\", kk, s))\n #print(r, kk, s, count, every)\n if len(r) == 1:\n every += 1\n if every == 2:\n count += 1\n fout.write(f\"{s}\")\n fout.write('\\n')\n break\n else:\n if len(r) % 2 != 0:\n if list(r[: len(r) // 2]) == list(reversed(r[(len(r) // 2) + 1:])):\n every += 1\n if every == 2:\n count += 1\n fout.write(f\"{s}\")\n fout.write('\\n')\n break\n else:\n if list(r[:(len(r) // 2)]) == list(reversed(r[(len(r) // 2):])):\n\n every += 1\n if every == 2:\n count += 1\n fout.write(f\"{s}\")\n fout.write('\\n')\n break\nfout.close()\n\n","repo_name":"35C4n0r/Codeforces-Py-","sub_path":"PycharmProjects/usaco/Dual Palindromes.py","file_name":"Dual Palindromes.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26784468713","text":"import pandas as pd\nimport numpy as np\nimport os\nimport get_symbols\n\ndef get_data(path):\n if os.path.exists(path)==False:\n print('no path')\n return 'nada'\n data = pd.read_pickle(path)\n return data\n\ndef get_top_strats(symbols, path, N):\n rets = dict()\n for symbol in symbols:\n path_ = ''.join((path, symbol))\n data = get_data(path_)\n #if data == 'nada':\n # pass\n \n ret = np.prod(data+1)\n rets[symbol] = ret\n \n topN = sorted(rets, key=rets.get, reverse=True)[:N]\n ls = {x:(rets[x]) for x in topN}\n return ls\n\ndef get_top_strats_until(symbols, path, N, until):\n rets = dict()\n for symbol in symbols:\n path_ = ''.join((path, symbol))\n data = get_data(path_)\n #if data == 'nada':\n # pass\n \n try: \n \t ret = np.prod(data[:until]+1)\n rets[symbol] = ret\n except:\n pass\n topN = sorted(rets, key=rets.get, reverse=True)[:N]\n ls = {x:(rets[x]) for x in topN}\n return ls\n\ndef get_top_strats_after(symbols, path, N, after):\n rets = dict()\n for symbol in symbols:\n path_ = ''.join((path, symbol))\n data = get_data(path_)\n #if data == 'nada':\n # pass\n try:\n ret = np.prod(data[after:]+1)\n rets[symbol] = ret\n except:\n pass\n topN = sorted(rets, key=rets.get, reverse=True)[:N]\n ls = {x:(rets[x]) for x in topN}\n return ls\n\ndef get_top_strats2(directory_, N):\n path = ''.join(('../output/', directory_))\n symbols =get_symbols.main()\n rets = dict()\n rets2 = dict()\n for symbol in symbols:\n path_ = ''.join((path, symbol))\n data = get_data(path_)\n #if data == 'nada':\n # pass\n \n ret = np.prod(data[:'2013']+1)\n rets[symbol] = ret\n \n ret2 = np.prod(data['2014':]+1)\n rets2[symbol] = ret\n \n topN = sorted(rets, key=rets.get, reverse=True)[:N]\n ls = {x:(rets2[x]) for x in topN}\n return ls\n\n\ndef check_performance( directory_= 'curve_oldfart_50_1000_200/'):\n l=get_symbols.main()\n dirr = ''.join(('../output/', directory_))\n top = get_top_strats_until(l, dirr, 50, '2014')\n ttop = get_top_strats_after(top, directory_, 50, '2015')\n before_avg = np.nanmean(list(top.values()))\n after_avg = np.nanmean(list(ttop.values()))\n \n print(before_avg)\n print(before_avg**(1/4))\n print(after_avg)\n \n \n \ndef full_performance(symbols, path, N):\n rets = dict()\n for symbol in symbols:\n path_ = ''.join((path, symbol))\n data = get_data(path_)\n #if data == 'nada':\n # pass\n \n ret = np.prod(data+1)\n rets[symbol] = ret\n \n ls = {x:(rets[x]) for x in symbols}\n avg = np.nanmean(list(ls.values()))\n \n return ls, avg\n\n\n\nif __name__=='__main__':\n import sys\n get_top_strats2(sys.argv[1], 20)\n'''\n try: \n get_top_strats2(sys.argv[1], 20) \n #check_performance(sys.argv[1])\n except: \n check_performance()\n symbols = get_symbols.main()\n directory_= 'curve_youngbuck_50_1000_200/'\n path = ''.join(('../output/', directory_))\n N=50\n full_performance(symbols, path, N)\n'''\n","repo_name":"LiamConnell/tradetest","sub_path":"random/stored_data.py","file_name":"stored_data.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31288050575","text":"import pandas as pd\nfrom numpy.random import *\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom math import *\n\nfreq_cnt = 10\nreplicate = 5\n\n\ndef fake_date_generator():\n s = '['\n for i in range(freq_cnt):\n for j in range(replicate):\n s += '{\"freq\": %d, \"dist\": %d, \"val\": %.2f},' % (902+i, randint(150), rand())\n s += ']'\n return s\n\n\ncnt = 100\ndf = pd.DataFrame(\n {\n \"freq\": randint(902, 928, cnt),\n \"dist\": randint(1, 150, cnt),\n \"val\": rand(cnt),\n }\n)\n\ngrouped = df.groupby(\"freq\")\ncnt = len(grouped)\ncols = 5\nrows = ceil(cnt/cols)\n\nfig = plt.figure(figsize=(40, 80), constrained_layout=True)\ngs = gridspec.GridSpec(rows, cols, figure=fig)\n\ndist_min = min(df['dist'])\ndist_max = max(df['dist'])\nval_min = min(df['val'])\nval_max = max(df['val'])\n\ni = 0\nfor name, group in grouped:\n ax = fig.add_subplot(gs[i // cols, i % cols])\n ax.set_title(name)\n ax.scatter(group['dist'], group['val'])\n ax.set_xlim(dist_min, dist_max)\n ax.set_ylim(val_min, val_max)\n i += 1\n\nplt.show()\n","repo_name":"Jennifer331/Scripts","sub_path":"python/fake_data_generator.py","file_name":"fake_data_generator.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71400788034","text":"import brownie\n\nZERO_ADDRESS = \"0x0000000000000000000000000000000000000000\"\n\nAQUARIUS_BASE_URL = \"https://v4.aquarius.oceanprotocol.com\"\nBROWNIE_PROJECT = brownie.project.load(\"./\", name=\"MyProject\")\n\nMAX_ALLOCATE = 10000.0\n\n# filled in by oceanutil.py\nCONTRACTS: dict = {} # [chainID][contract_label] : contract_object\n","repo_name":"trangnv/df-strat","sub_path":"util/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35683319590","text":"from Queue import Queue\nimport threading\nimport config\nimport logging\nimport socket\nimport time\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)s\\t(%(threadName)-10s) %(filename)s:%(lineno)d\\t%(message)s')\n\nqueue = Queue(config.QUEUE_SIZE)\ncompleteTasks = []\n\n\nclass Task:\n NEW = -1\n CONSUMED = 0\n DONE = 1\n\n def __init__(self, id, client, expr):\n self.id = id\n self.client = client\n self.expr = expr\n self.state = Task.NEW\n self.result = None\n\n def __str__(self):\n return 'id=%s; client=%s; expr=%s; state=%d; res=%s' % (\n self.id, self.client, self.expr, self.state, self.result)\n\n\nclass Server:\n def __init__(self, host, port, processor, max_connections=1):\n self.host = host\n self.port = port\n self.max_connections = max_connections\n self.name = '%s:%d' % (host, port)\n self.processor = processor\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n def start(self):\n logging.debug('Launching server %s' % self.name)\n self.sock.bind((self.host, self.port))\n self.sock.listen(self.max_connections)\n logging.info('Server up and running %s' % self.name)\n\n while True:\n client_sock, client_addr = self.sock.accept()\n client_name = ':'.join(map(str, client_addr))\n logging.debug('Received connection from %s, processing...' % client_name)\n self.processor(client_name, client_sock)\n\n def __del__(self):\n if self.sock:\n self.sock.close()\n\n\ndef create_and_put(expr, client):\n # TODO: check queue size to avoid overflow\n task_id = '%s' % time.time()\n task = Task(task_id, client, expr)\n logging.info('Putting task into queue: %s, size: %d' % (task, queue.qsize()+1))\n queue.put(task)\n\n return task\n\n\ndef get_task_client(task_id):\n result = None\n for task in completeTasks[:]:\n if task.id == task_id and task.state == Task.DONE:\n result = task\n completeTasks.remove(task)\n break\n return result\n\n\ndef process_task_request(client_name, client_socket):\n try:\n expr = client_socket.recv(config.EXPR_MAX_SIZE)\n logging.debug('Receive expr: %s from client: %s' % (expr, client_name))\n task = create_and_put(expr, client_name)\n client_socket.send(task.id)\n finally:\n client_socket.close()\n\n\ndef process_result_request(client_name, client_socket):\n try:\n task_id = client_socket.recv(config.TASKID_MAX_SIZE)\n logging.debug('Receive task_id: %s from client: %s' % (task_id, client_name))\n task = get_task_client(task_id)\n if task is not None:\n logging.info('Task result returned to client: %s' % task)\n client_socket.send(task.result)\n else:\n logging.info('Task %s not ready yet' % task)\n finally:\n client_socket.close()\n\n\ndef backend_client(host, port):\n logging.debug('Connecting to backend to %s:%d' % (host, port))\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n client_socket.connect((host, port))\n logging.info('Connection with backend established')\n\n while True:\n task = queue.get()\n logging.debug('Starting processing task: %s with backend' % task)\n client_socket.send(task.expr)\n result = client_socket.recv(config.RES_MAX_SIZE)\n task.state = Task.DONE\n task.result = result\n # TODO: check overflow\n completeTasks.append(task)\n logging.info('Task complete: %s' % task)\n queue.task_done()\n finally:\n client_socket.close()\n\n\nif __name__ == '__main__':\n taskServer = Server('localhost', config.TASK_PORT, process_task_request, config.MAX_CON)\n resultServer = Server('localhost', config.RES_PORT, process_result_request, config.MAX_CON)\n\n ts = threading.Thread(target=taskServer.start, name='taskServer')\n rs = threading.Thread(target=resultServer.start, name='resultServer')\n bc = threading.Thread(target=backend_client, name='backendClient', args=(config.DEF_HOST, config.PROC_PORT))\n\n ts.start()\n rs.start()\n bc.start()\n\n ts.join()\n rs.join()\n bc.join()\n","repo_name":"vakafoam/Remote_calc","sub_path":"my/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2831678450","text":"#PRIMER EJERCICIO\ndef analizar():\n if contrasenia==confirm:\n return \"Las contraseñas coinciden\"\n else:\n return \"Las contraseñas no coinciden\"\n\nprint(\"--------------Ejercicio 1-------------\")\n\ncontrasenia=str(input(\"Ingrese su contraseña: \")).lower()\n\nconfirm=str(input(\"Vuelva a ingresar la contraseña: \")).lower()\n\nprint(analizar())\n\n#SEGUNDO EJERCICIO\n\nprint(\"--------------Ejercicio 2-------------\")\n\ndef alumno(**datos):\n nombre=datos[\"nombre\"].lower()\n for i in \"abcdefghijklm\":\n if nombre[0]==i and datos[\"sexo\"].lower()==\"mujer\":\n return \"Pertenece al grupo A\"\n \n for i in \"nñopqrstuvwxyz\":\n if nombre[0]==i and datos[\"sexo\"].lower()==\"hombre\":\n return \"Pertenece al grupo A\"\n \n else:\n return \"Pertenece al grupo B\"\n\nnombre=str(input(\"Ingrese su nombre: \"))\nsexo=str(input(\"Ingrese su sexo: \"))\n\nprint(alumno(nombre=nombre,sexo=sexo))\n","repo_name":"EstuardoSon/Python-SAESAP","sub_path":"HojaDeTrabajo3.py","file_name":"HojaDeTrabajo3.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72998989633","text":"# -*- coding: utf-8 -*- \r\n# @author : caoyang\r\n# @email: caoyang@163.sufe.edu.cn\r\n# 模型训练: 使用sklearn\r\n\r\nimport os\r\nimport gc\r\nimport json\r\nimport joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nimport xgboost as xgb\r\nimport lightgbm as lgb\r\n\r\nfrom sklearn import preprocessing\r\n\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\r\n\r\n\r\nfrom sklearn.model_selection import KFold, train_test_split\r\nfrom sklearn.metrics import auc, confusion_matrix, accuracy_score, roc_auc_score, roc_curve, auc, precision_recall_curve\r\n\r\nfrom xgboost.sklearn import XGBClassifier\r\nfrom lightgbm import LGBMClassifier\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom torch.autograd import Variable\r\nfrom torch.nn import CrossEntropyLoss, BCELoss\r\nfrom torch.optim import Adam, lr_scheduler\r\n\r\nfrom setting import *\r\nfrom config import QAModelConfig, DatasetConfig\r\n\r\nfrom src.dataset import generate_dataloader, Dataset\r\nfrom src.evaluation_tools import evaluate_qa_model_choice, evaluate_qa_model_judgment, evaluate_classifier\r\nfrom src.plot_tools import plot_roc_curve, plot_pr_curve\r\nfrom src.qa_model import BaseChoiceModel, BaseJudgmentModel, ReferenceChoiceModel, ReferenceJudgmentModel\r\nfrom src.torch_tools import save_checkpoint\r\nfrom src.utils import initialize_logger, terminate_logger, load_args, save_args, timer\r\nfrom src.easy_machine_learning import EasyClassifier\r\n\r\n\r\n\r\ndef generate_dataset_for_sklearn(args, mode):\r\n\tdataset_train = Dataset(args=args, mode=mode, do_export=False, pipeline='judgment', for_test=True).data\r\n\tdataset_valid = Dataset(args=args, mode=mode.replace('train', 'valid'), do_export=False, pipeline='judgment', for_test=True).data\r\n\tdataset_test = Dataset(args=args, mode=mode.replace('train', 'test'), do_export=False, pipeline='judgment').data\r\n\t\r\n\tdataset_train = pd.concat([dataset_train, dataset_valid])\r\n\t\r\n\t'''\r\n\tid\t\t\t\t: 题目编号\r\n\tquestion\t\t: 题目题干\r\n\toption\t\t\t: 每个选项\r\n\tsubject\t\t\t: use_reference配置为True时生效, 包含num_top_subject个法律门类\r\n\treference\t\t: use_reference配置为True时生效, 包含相关的num_best个参考书目文档段落\r\n\ttype\t\t\t: 零一值表示概念题或情景题\r\n\tlabel_judgment\t: train或valid模式时生效, 零一值表示判断题的答案\r\n\toption_id\t\t: 20211216更新, 记录判断题对应的原选择题编号(ABCD)\r\n\t'''\r\n\t\r\n\tif args.use_reference:\r\n\t\tdataset_X = np.hstack([np.vstack(dataset_train['question'].map(np.array).values),\r\n\t\t\t\t\t\t\t np.vstack(dataset_train['option'].map(np.array).values)])\r\n\t\ttest_X = np.hstack([np.vstack(dataset_test['question'].map(np.array).values),\r\n\t\t\t\t\t\t\tnp.vstack(dataset_test['option'].map(np.array).values)])\r\n\telse:\r\n\t\tdataset_X = np.hstack([np.vstack(dataset_train['question'].map(np.array).values),\r\n\t\t\t\t\t\t\t np.vstack(dataset_train['option'].map(np.array).values),\r\n\t\t\t\t\t\t\t np.vstack(dataset_train['reference']).map(lambda x: np.array(x).reshape((-1, )))])\r\n\t\ttest_X = np.hstack([np.vstack(dataset_test['question'].map(np.array).values),\r\n\t\t\t\t\t\t\tnp.vstack(dataset_test['option'].map(np.array).values),\r\n\t\t\t\t\t\t\tnp.vstack(dataset_test['reference']).map(lambda x: np.array(x).reshape((-1, )))])\r\n\t\r\n\tdataset_y = dataset_train['label_judgment'].values\t\r\n\tquestion_ids = dataset_test['id'].tolist()\r\n\toption_ids = dataset_test['option_id'].tolist()\r\n\t\r\n\treturn dataset_X, dataset_y, (test_X, question_ids, option_ids)\r\n\r\n@timer\r\ndef train_model(model_name, mode='train_kd', dataset_X=None, dataset_y=None, **kwargs):\r\n\t\r\n\tif dataset_X is None or dataset_y is None:\r\n\t\t\r\n\t\targs = load_args(DatasetConfig)\r\n\t\tfor key, value in kwargs.items():\r\n\t\t\targs.__setattr__(key, value)\t\r\n\t\t\r\n\t\tdataset_X, dataset_y, _ = generate_dataset_for_sklearn(args=args, mode=mode)\r\n\t\r\n\tmode_suffix = mode.split('_')[-1]\r\n\t\r\n\t\r\n\tif model_name == 'lgb':\r\n\t\tparams = {\r\n\t\t\t'boosting_type': 'gbdt',\r\n\t\t\t'num_leaves': 256,\r\n\t\t\t'max_depth': 8,\r\n\t\t\t'learning_rate': .001,\r\n\t\t\t'n_estimators': 128,\r\n\t\t\t'subsample_for_bin': 200000,\r\n\t\t\t'objective': None,\r\n\t\t\t'class_weight': None,\r\n\t\t\t# 'min_split_gain': .0,\r\n\t\t\t# 'min_child_weight': 0,\r\n\t\t\t# 'min_child_samples': 100,\r\n\t\t\t# 'subsample': 1.,\r\n\t\t\t# 'subsample_freq': 0,\r\n\t\t\t# 'colsample_bytree': 1.,\r\n\t\t\t'reg_alpha': 0,\r\n\t\t\t'reg_lambda': 0,\r\n\t\t\t'random_state': None,\r\n\t\t\t'n_jobs': 1,\r\n\t\t\t'silent': True,\r\n\t\t\t'importance_type': 'split',\r\n\t\t}\r\n\t\t\r\n\t\teasyclf = EasyClassifier(dataset_X, dataset_y, LGBMClassifier, params)\r\n\t\tif mode_suffix == 'kd':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/lgb/kd/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/lgb/kd/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/lgb/kd/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/lgb/kd/rf_eval.json')\r\n\t\telif mode_suffix == 'ca':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/lgb/ca/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/lgb/ca/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/lgb/ca/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/lgb/ca/rf_eval.json')\t\r\n\t\t\t\t\t\t\t\r\n\r\n\tif model_name == 'xgb':\r\n\t\t# XGBoost\r\n\r\n\t\tparams = {\r\n\t\t\t'n_estimators': 128,\r\n\t\t\t'use_label_encoder': False,\r\n\t\t\t'max_depth': 8,\r\n\t\t\t'num_leaves': 256,\r\n\t\t\t'learning_rate': .01,\r\n\t\t\t'verbosity': 0,\r\n\t\t\t# 'objective': None,\r\n\t\t\t'booster': 'dart',\r\n\t\t\t# 'tree_method': None,\r\n\t\t\t'n_jobs': 1,\r\n\t\t\t'gamma': .001,\r\n\t\t\t# 'min_child_weight': .01,\r\n\t\t\t# 'max_delta_step': .01,\r\n\t\t\t# 'subsample': 1.,\r\n\t\t\t# 'colsample_bytree': 1.,\r\n\t\t\t# 'colsample_bylevel': 1.,\r\n\t\t\t# 'colsample_bynode': 1.,\r\n\t\t\t'reg_alpha': .01,\r\n\t\t\t'reg_lambda': .01,\r\n\t\t\t# 'scale_pos_weight': .01,\r\n\t\t\t# 'base_score': 1,\r\n\t\t\t'random_state': None,\r\n\t\t\t# 'missing': np.nan,\r\n\t\t\t'num_parallel_tree': 10,\r\n\t\t\t# 'monotone_constraints': None,\r\n\t\t\t# 'interaction_constraints': None,\r\n\t\t\t# 'importance_type': 'gain',\r\n\t\t\t# 'gpu_id': None,\r\n\t\t\t# 'validate_parameters': None,\r\n\t\t}\r\n\r\n\t\teasyclf = EasyClassifier(dataset_X, dataset_y, XGBClassifier, params)\r\n\t\tif mode_suffix == 'kd':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/xgb/kd/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/xgb/kd/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/xgb/kd/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/xgb/kd/rf_eval.json')\r\n\t\telif mode_suffix == 'ca':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/xgb/ca/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/xgb/ca/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/xgb/ca/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/xgb/ca/rf_eval.json')\t\r\n\t\t\t\t\t\t\t\r\n\t\r\n\tif model_name == 'lr':\r\n\t\t# Logistic Regression\r\n\t\tparams = {\r\n\t\t\t'penalty': 'l2',\r\n\t\t\t'dual': False,\r\n\t\t\t'tol': 1e-4,\r\n\t\t\t'C': 1.0,\r\n\t\t\t'fit_intercept': True,\r\n\t\t\t'intercept_scaling': 1.0,\r\n\t\t\t'class_weight': None,\r\n\t\t\t'random_state': None,\r\n\t\t\t'solver': 'liblinear',\r\n\t\t\t'max_iter': 100,\r\n\t\t\t'multi_class': 'ovr',\r\n\t\t\t'verbose': 0,\r\n\t\t\t'warm_start': False,\r\n\t\t\t'n_jobs': None,\r\n\t\t}\r\n\t\teasyclf = EasyClassifier(dataset_X, dataset_y, LogisticRegression, params)\r\n\t\tif mode_suffix == 'kd':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/lr/kd/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/lr/kd/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/lr/kd/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/lr/kd/rf_eval.json')\r\n\t\telif mode_suffix == 'ca':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/lr/ca/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/lr/ca/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/lr/ca/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/lr/ca/rf_eval.json')\t\r\n\t\t\r\n\tif model_name == 'dt':\r\n\t\t# Decision tree\r\n\r\n\t\tparams = {\r\n\t\t\t'criterion': 'gini',\r\n\t\t\t'splitter': 'best',\r\n\t\t\t'max_depth': None,\r\n\t\t\t'min_samples_split': 2,\r\n\t\t\t'min_samples_leaf': 1,\r\n\t\t\t'min_weight_fraction_leaf': 0.0,\r\n\t\t\t'max_features': None,\r\n\t\t\t'random_state': None,\r\n\t\t\t'max_leaf_nodes': None,\r\n\t\t\t'min_impurity_decrease': 0.0,\r\n\t\t\t'class_weight': None,\r\n\t\t}\r\n\r\n\t\teasyclf = EasyClassifier(dataset_X, dataset_y, DecisionTreeClassifier, params)\r\n\t\tif mode_suffix == 'kd':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/dt/kd/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/dt/kd/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/dt/kd/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/dt/kd/rf_eval.json')\r\n\t\telif mode_suffix == 'ca':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/dt/ca/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/dt/ca/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/dt/ca/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/dt/ca/rf_eval.json')\t\r\n\r\n\tif model_name == 'rf':\r\n\t\t# Random Forest\r\n\r\n\t\tparams = {\r\n\t\t\t'n_estimators': 100,\r\n\t\t\t'criterion': 'gini',\r\n\t\t\t'max_depth': None,\r\n\t\t\t'min_samples_split': 2,\r\n\t\t\t'min_samples_leaf': 1,\r\n\t\t\t'min_weight_fraction_leaf': 0.0,\r\n\t\t\t'max_features': 'auto',\r\n\t\t\t'max_leaf_nodes': None,\r\n\t\t\t'min_impurity_decrease': 0.0,\r\n\t\t\t'min_impurity_split': None,\r\n\t\t\t'bootstrap': True,\r\n\t\t\t'oob_score': False,\r\n\t\t\t'n_jobs': None,\r\n\t\t\t'random_state': None,\r\n\t\t\t'verbose': 0,\r\n\t\t\t'warm_start': False,\r\n\t\t\t'class_weight': None,\r\n\t\t\t'ccp_alpha': 0.0,\r\n\t\t\t'max_samples': None,\r\n\t\t}\r\n\r\n\t\t# easyclf = EasyClassifier(dataset_X_onehot, dataset_y, RandomForestClassifier, params)\r\n\t\teasyclf = EasyClassifier(dataset_X, dataset_y, RandomForestClassifier, params)\r\n\t\t\r\n\t\tif mode_suffix == 'kd':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/rf/kd/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/rf/kd/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/rf/kd/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/rf/kd/rf_eval.json')\r\n\r\n\t\telif mode_suffix == 'ca':\r\n\t\t\teasyclf.kfold_train(n_splits=5,\r\n\t\t\t\t\t\t\t\tshuffle=True,\r\n\t\t\t\t\t\t\t\tpr_title_formatter='PR Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\troc_title_formatter='ROC Curve of Random Forest {}'.format,\r\n\t\t\t\t\t\t\t\tmodel_export_path_formatter='temp/sklearn_test/rf/ca/rf_fold_{}.m'.format,\r\n\t\t\t\t\t\t\t\tpr_export_path_formatter='temp/sklearn_test/rf/ca/pr_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\troc_export_path_formatter='temp/sklearn_test/rf/ca/roc_curve_fold_{}.png'.format,\r\n\t\t\t\t\t\t\t\tevaluation_export_path='temp/sklearn_test/rf/ca/rf_eval.json')\r\n\r\n\r\ndef test_model(models, model_name, mode='train_kd', test_X=None, question_ids=None, option_ids=None, **kwargs):\r\n\tif test_X is None or question_ids is None or option_ids is None:\r\n\t\targs = load_args(DatasetConfig)\r\n\t\tfor key, value in kwargs.items():\r\n\t\t\targs.__setattr__(key, value)\t\r\n\t\t_, _, (test_X, question_ids, option_ids) = generate_dataset_for_sklearn(args=args, mode=mode)\r\n\t\r\n\r\n\tpredicts = []\r\n\tfor model in models:\r\n\t\tpredict_y = model.predict(test_X)\r\n\t\tprint(len(predict_y))\r\n\t\tpredicts.append(predict_y)\r\n\t\t\r\n\tanswer = {}\t\r\n\t\r\n\tprint(len(question_ids))\r\n\tprint(len(option_ids))\r\n\t\r\n\t\r\n\tfor i, (question_id, option_id) in enumerate(zip(question_ids, option_ids)):\r\n\t\t\r\n\t\tcount_0 = 0\r\n\t\tcount_1 = 0\r\n\t\tfor predict in predicts:\r\n\t\t\tif predict[i] == 0:\r\n\t\t\t\tcount_0 += 1\r\n\t\t\telif predict[i] == 1:\r\n\t\t\t\tcount_1 += 1\r\n\t\t\telse:\r\n\t\t\t\tassert False\r\n\t\t\r\n\t\tif count_1 > count_0:\r\n\t\t\t# 投票\r\n\t\t\tif question_id in answer:\r\n\t\t\t\tanswer[question_id].append(option_id)\r\n\t\t\telse:\r\n\t\t\t\tanswer[question_id] = [option_id]\r\n\t\t\r\n\t\telse:\r\n\t\t\tif question_id in answer:\r\n\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\tanswer[question_id] = []\t\t\t\r\n\t\r\n\t# 导出答案\r\n\twith open(os.path.join(TEMP_DIR, f'answer_{model_name}_{mode.split(\"_\")[-1]}.json'), 'w', encoding='utf8') as f:\r\n\t\tjson.dump(answer, f, indent=4)\r\n\t\t\r\n\treturn answer\r\n\r\nfor mode in ['train_kd', 'train_ca']:\r\n\tkwargs = {\r\n\t\t'use_reference'\t: True,\r\n\t\t'num_best': 32,\r\n\t}\r\n\targs = load_args(DatasetConfig)\r\n\tfor key, value in kwargs.items():\r\n\t\targs.__setattr__(key, value)\t\r\n\tdataset_X, dataset_y, (test_X, question_ids, option_ids) = generate_dataset_for_sklearn(args=args, mode=mode)\r\n\t# for model_name in ['lr', 'dt', 'rf']:\r\n\tfor model_name in ['rf']:\r\n\t\tprint('*' * 64)\r\n\t\tprint(mode, model_name)\r\n\t\tprint('*' * 64)\r\n\t\t\r\n\t\t# train_model(model_name=model_name, mode=mode, dataset_X=dataset_X, dataset_y=dataset_y, test_X=test_X, **kwargs)\r\n\t\t\r\n\t\tmodels = []\r\n\t\tfor fold in range(1, 6):\r\n\t\t\tmodel = joblib.load(f'temp/sklearn_test/{model_name}/{mode.split(\"_\")[-1]}/rf_fold_{str(fold).zfill(2)}.m')\r\n\t\t\tmodels.append(model)\r\n\t\t\t\r\n\t\ttest_model(models=models, model_name=model_name, mode=mode, test_X=test_X, question_ids=question_ids, option_ids=option_ids, **kwargs)\r\n\t\t\t\r\n\t\t\r\n","repo_name":"helloliping/CAIL2021","sub_path":"sklearn_script.py","file_name":"sklearn_script.py","file_ext":"py","file_size_in_byte":14974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5776711963","text":"from RGT.XML.SVG.Filters.baseFilterNode import BaseFilterNode\r\nfrom types import StringType\r\nfrom RGT.XML.SVG.basicSvgNode import BasicSvgNode\r\n\r\n\r\nclass FeCompositeNode(BaseFilterNode):\r\n svgNodeType = BasicSvgNode.SVG_FE_COMPOSITE_NODE\r\n\r\n ATTRIBUTE_IN = 'in'\r\n ATTRIBUTE_IN2 = 'in2'\r\n ATTRIBUTE_OPERATOR = 'operator'\r\n ATTRIBUTE_K1 = 'k1'\r\n ATTRIBUTE_K2 = 'k2'\r\n ATTRIBUTE_K3 = 'k3'\r\n ATTRIBUTE_K4 = 'k4'\r\n\r\n def __init__(self, ownerDoc):\r\n BaseFilterNode.__init__(self, ownerDoc, 'feComposite')\r\n self._allowedSvgChildNodes.update({self.SVG_ANIMATE_NODE, self.SVG_SET_NODE})\r\n\r\n def setIn(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_IN, data)\r\n\r\n def setIn2(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_IN2, data)\r\n\r\n def setOperator(self, data):\r\n allowedValues = ['over', 'in', 'out', 'atop', 'xor', 'arithmetic']\r\n\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n\r\n if data not in allowedValues:\r\n values = ''\r\n for value in allowedValues:\r\n values += value + ', '\r\n values = values[0: len(values) - 2]\r\n raise ValueError('Value not allowed, only ' + values + 'are allowed')\r\n else:\r\n self._setNodeAttribute(self.ATTRIBUTE_OPERATOR, data)\r\n\r\n def setK1(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_K1, data)\r\n\r\n def setK2(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_K2, data)\r\n\r\n def setK3(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_K3, data)\r\n\r\n def setK4(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_K4, data)\r\n\r\n def getIn(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_IN)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getIn2(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_IN2)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getOperator(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_OPERATOR)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getK1(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_K1)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getK2(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_K2)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getK3(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_K3)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getK4(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_K4)\r\n if node is not None:\r\n return node.nodeValue\r\n return None","repo_name":"danrg/RGT-tool","sub_path":"src/RGT/XML/SVG/Filters/feCompositeNode.py","file_name":"feCompositeNode.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"42984009305","text":"# coding=utf-8\nimport random\n\nfrom even import myEven, isEven\nfrom fifo import FIFO1, FIFO2\nfrom sort import quicksort, countingsort\nimport time\n\nif __name__ == '__main__':\n\n print(\"------------------\\n\")\n print(\"\\tЧётность числа\\n\")\n print(\"Проверка работы \\n\")\n\n print('10 - чётное число: ' + str(myEven(10)))\n print('967 - нечётное число: ' + str(myEven(967)))\n\n print(\"\\nПроверка скорости \\n\")\n\n tic_start = time.time()\n myEven(1234567890)\n tic_end = time.time()\n\n print(\"Моя функция чётности: \" + str(tic_end - tic_start))\n\n tic_start = time.time()\n isEven(1234567890)\n tic_end = time.time()\n\n print(\"Не моя функция чётности: \" + str(tic_end - tic_start))\n\n print(\"\\n\\n------------------\\n\")\n print(\"\\tЦиклический буфер 1\\n\")\n print(\"Проверка работы \\n\")\n\n fifo1 = FIFO1(6)\n\n fifo1.push(1)\n fifo1.push(2)\n fifo1.push(3)\n\n print(\"Содержимое буфера: \" + str(fifo1))\n\n print(\"Pop: \" + str(fifo1.pop()))\n\n fifo1.push(4)\n fifo1.push(5)\n fifo1.push(6)\n fifo1.push(7)\n\n print(\"Попытка добавить больше элементов, чем это позволяет размер: \")\n fifo1.push(8)\n\n fifo1.pop()\n fifo1.push(8)\n print(\"Содержимое буфера после всех манипуляций: \" + str(fifo1))\n\n print(\"\\n\\n\\tЦиклический буфер 2\\n\")\n print(\"Проверка работы \\n\")\n\n fifo2 = FIFO2(6)\n\n fifo2.push(1)\n fifo2.push(2)\n fifo2.push(3)\n\n print(\"Содержимое буфера: \" + str(fifo2))\n\n print(\"Pop: \" + str(fifo2.pop()))\n\n fifo2.push(4)\n fifo2.push(5)\n fifo2.push(6)\n fifo2.push(7)\n\n print(\"Попытка добавить больше элементов, чем это позволяет размер: \")\n fifo2.push(8)\n\n fifo2.pop()\n fifo2.push(8)\n print(\"Содержимое буфера после всех манипуляций: \" + str(fifo2))\n\n\n print(\"------------------\\n\")\n print(\"\\tСортировка массивов\\n\")\n print(\"Проверка скорости \\n\")\n\n arr_small_nums = []\n for i in range(50000):\n arr_small_nums.append(random.randint(0, 60000))\n\n arr_quick = arr_small_nums\n arr_count = arr_small_nums\n\n tic_start = time.time()\n quicksort(arr_quick)\n tic_end = time.time()\n print(\"Быстрая (маленькие числа): \" + str(tic_end - tic_start))\n\n tic_start = time.time()\n countingsort(arr_count)\n tic_end = time.time()\n print(\"Подсчёт (маленькие числа): \" + str(tic_end - tic_start))\n\n\n arr_large_nums = []\n for i in range(50000):\n arr_large_nums.append(random.randint(0, 6000000))\n\n arr_quick = arr_large_nums\n arr_count = arr_large_nums\n\n tic_start = time.time()\n quicksort(arr_quick)\n tic_end = time.time()\n print(\"\\nБыстрая (большие числа): \" + str(tic_end - tic_start))\n\n tic_start = time.time()\n countingsort(arr_count)\n tic_end = time.time()\n print(\"Подсчёт (большие числа): \" + str(tic_end - tic_start))\n","repo_name":"mordvintsevmv/lesta_pythonTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37867766369","text":"import math\nimport re\n\n# Could enhance code structure through strategy pattern: https://sourcemaking.com/design_patterns/strategy, https://sourcemaking.com/design_patterns/strategy/python/1\n\n\nCODON_NUCLEOTIDE_COUNT = 3\n\n# HYPERMUTATOR_GENES contains all synonyms for genes known to manifest hypermutator phenotypes after being mutated.\n# This list of genes is from 10.1016/S0966-842X(98)01424-3 and synonyms were retrieved from ecocyc.\nHYPERMUTATOR_GENES = {\n \"mutD\", \"dnaQ\", \"mutS\", \"ant\", \"plm\", \"fdv\",\n \"mutL\", \"mutH\", \"mutR\", \"topB\", \"prv\", 'uvrD',\n \"uvr502\", \"srjC\", \"uvrE\", \"dar-2\", \"dda\",\n \"mutU\", \"pdeB\", \"rad\", \"recL\", \"mutM\",\n \"fpg\", \"mutY\", \"micA\", \"mutT\", \"nudA\",\n \"dnaE\", \"polC\", \"sdgC\", 'polA', \"resA\",\n \"mutA\", \"glyV\", \"mutC\", \"glyW\", \"ins\",\n \"dam\", \"miaA\", \"trpX\", \"sodA\", \"sodB\",\n \"oxyR\" # We've had this one mutate in ALE experiments and not cause hypermutation, so don't include unless also checking if ALE is an outlier with mutation counts\n \"nth\", \"nei\", \"xthA\", \"xth\", \"nfo\",\n \"ung\", \"vsr\", \"ada\", \"ogt\", \"recA\",\n \"zab\", \"umuB\", \"tif\", \"lexB\", \"recH\", \"rnmB\", \"srf\",\n \"recG\", \"radC\", \"spoV\", \"ssb\", \"exrB\", \"lexC\", \"hns\"\n # We've had this one mutate in ALE experiments and not cause hypermutation, so don't include unless also checking if ALE is an outlier with mutation counts\n}\n\n\ndef get_mutated_hypermutator_genes(m):\n mutated_hypermutator_genes = set()\n if m[\"coding\"]: # Mutation affect a gene's nucleotides\n mutated_genes_str = m[\"Gene\"]\n mutated_genes = set(get_clean_mut_gene_list(mutated_genes_str))\n mutated_hypermutator_genes = mutated_genes & HYPERMUTATOR_GENES\n return mutated_hypermutator_genes\n\n\ndef get_MOB_type_str(MOB_seq_change_str):\n MOB_type_str = \"\"\n MOB_seq_change_str = MOB_seq_change_str.replace(u'\\xa0', u' ')\n MOB_str_ID = \"\"\n if \"IS\" in MOB_seq_change_str:\n MOB_str_ID = \"IS\"\n elif \"REP\" in MOB_seq_change_str:\n MOB_str_ID = \"REP\"\n\n if MOB_str_ID != \"\":\n IS_str_start_idx = MOB_seq_change_str.find(MOB_str_ID)\n IS_str_end_idx = MOB_seq_change_str.find(' ', IS_str_start_idx)\n MOB_type_str = MOB_seq_change_str[IS_str_start_idx:IS_str_end_idx]\n\n return MOB_type_str\n\n\n# If the DEL or INS is a multiple of 3, no matter what the position of the mutation within a frame,\n# those frames downstream of the mutation will remaining in sync with the gene's coding frame.\n# TODO: check if the remenants of combined frames result in a stop codon.\ndef is_frameshift(nuc_shift_size):\n return nuc_shift_size % 3 != 0\n\n\n# TODO: pseudogenes are coding, therefore should update this\n# and use a separate function for identifying pseudogenes.\ndef is_coding_mut(mut_details_str):\n is_coding = True\n noncoding_term_l = [\"intergenic\", \"noncoding\", \"pseudogene\"]\n if any(s in mut_details_str for s in noncoding_term_l):\n is_coding = False\n return is_coding\n\n\ndef is_genetic_mut(mut_details_str):\n is_genetic_mut = True\n nongenetic_term_l = [\"intergenic\"]\n if any(s in mut_details_str for s in nongenetic_term_l):\n is_genetic_mut = False\n return is_genetic_mut\n\n\ndef get_inv_size(seq_change_str):\n inv_size = 0\n seq_change_str = seq_change_str[:seq_change_str.find('bp')]\n inv_size = int(''.join([i for i in seq_change_str if i.isdigit()]))\n return inv_size\n\n\ndef get_con_size(seq_change_str):\n con_size = 0\n seq_change_str = seq_change_str[:seq_change_str.find('→')]\n con_size = int(''.join([i for i in seq_change_str if i.isdigit()]))\n return con_size\n\n\ndef get_MOB_size(seq_change_str):\n con_size = 0\n seq_change_str = seq_change_str[:seq_change_str.find('→')]\n con_size = int(''.join([i for i in seq_change_str if i.isdigit()]))\n return con_size\n\n\ndef get_sub_size(seq_change_str):\n sub_size = 0\n before_seq_change_str = seq_change_str[:seq_change_str.find('→')]\n before_seq_change_size = int(\n ''.join([i for i in before_seq_change_str if i.isdigit()]))\n after_seq_change_str = seq_change_str[seq_change_str.find('→') + 1:]\n if \"bp\" in after_seq_change_str:\n after_seq_change_size = int(\n ''.join([i for i in after_seq_change_str if i.isdigit()]))\n # expecting these types of substitutions to always shrink in size\n sub_size = before_seq_change_size - after_seq_change_size\n else: # no change in size, but multiple base pairs replaced.\n sub_size = before_seq_change_size\n return sub_size\n\n\ndef _get_before_after_seq_change_size(seq_change_str):\n before_after_seq_change_size = 0\n if '→' in seq_change_str:\n s = seq_change_str[:seq_change_str.find('→')]\n before_size_str = ''.join([i for i in s if i.isdigit()])\n s = seq_change_str[seq_change_str.find('→') + 1:]\n after_size_str = ''.join([i for i in s if i.isdigit()])\n before_after_seq_change_size = int(\n before_size_str) - int(after_size_str)\n return before_after_seq_change_size\n\n\ndef get_del_size(seq_change_str):\n del_size = 0\n if 'Δ' in seq_change_str or 'δ' in seq_change_str:\n del_size_str = ''.join([i for i in seq_change_str if i.isdigit()])\n del_size = int(del_size_str)\n if '→' in seq_change_str:\n del_size = _get_before_after_seq_change_size(seq_change_str)\n return del_size\n\n\ndef get_ins_size(seq_change_str):\n ins_size = 0\n if '→' in seq_change_str:\n before_seq_freq = int(\n seq_change_str[seq_change_str.find(')') + 1:seq_change_str.find('→')])\n after_seq_freq = int(seq_change_str[seq_change_str.find('→') + 1:])\n if \"bp\" in seq_change_str:\n seq_size = int(seq_change_str[seq_change_str.find(\n '(') + 1:seq_change_str.find(' bp')])\n else:\n seq_str = seq_change_str[seq_change_str.find(\n '(') + 1:seq_change_str.find(')')]\n seq_size = len(seq_str)\n ins_size = after_seq_freq * seq_size - before_seq_freq * seq_size\n if '+' in seq_change_str:\n if 'bp' in seq_change_str:\n ins_size = int(seq_change_str[seq_change_str.find(\n '+') + 1:seq_change_str.find(' bp')])\n else:\n # assuming it's just a sequence being added\n ins_size = len(seq_change_str[seq_change_str.find('+') + 1:])\n return ins_size\n\n\ndef get_amp_size(seq_change_str):\n seq_len_str = seq_change_str[:seq_change_str.find(' bp')]\n seq_len_str = seq_len_str.replace(',', '')\n seq_len = int(seq_len_str)\n multiplicity = int(seq_change_str[seq_change_str.find('x ') + len('x '):])\n seq_insertion_size = (multiplicity - 1) * seq_len\n return seq_insertion_size\n\n\ndef get_codon_pos_chng(codon_chng_str):\n ret_idx = 0\n codon_chng_list = codon_chng_str.split('→')\n # assuming codon string is always going to be 3 long.\n for idx in range(CODON_NUCLEOTIDE_COUNT):\n if codon_chng_list[0][idx] != codon_chng_list[1][idx]:\n ret_idx = idx + 1\n return ret_idx\n\n\ndef is_in_TRN(details_str, gene_str, trn_gene_set):\n is_in_TRN = False\n if \"intergenic\" not in details_str:\n gene_list_str = gene_str\n gene_list_str = gene_list_str.replace('[', '')\n gene_list_str = gene_list_str.replace(']', '')\n gene_set = set(gene_list_str.split(', '))\n for gene in gene_set:\n if gene in trn_gene_set:\n is_in_TRN = True\n return is_in_TRN\n\n\n# from https://en.wikipedia.org/wiki/Start_codon\ndef is_stop_codon(codon_str):\n codon_str = codon_str.lower()\n is_stop_codon = False\n stop_codon_l = [\"taa\", \"tag\", \"tga\"]\n if codon_str in stop_codon_l:\n is_stop_codon = True\n return is_stop_codon\n\n\n# from https://en.wikipedia.org/wiki/Start_codon\ndef is_start_codon(codon_str):\n codon_str = codon_str.lower()\n is_start_codon = False\n start_codon_l = [\"atg\", \"gtg\", \"ttg\", \"att\", \"ctg\"]\n if codon_str in start_codon_l:\n is_start_codon = True\n return is_start_codon\n\n\ndef is_non_syn_SNP(amino_acid_change_str):\n return amino_acid_change_str[0] != amino_acid_change_str[-1]\n\n\ndef get_SNP_aa_pos(amino_acid_change_str):\n return int(re.sub(\"[^0-9]\", \"\", amino_acid_change_str))\n\n\ndef _replace_weirdly_encoded_dash_char(mut_details_str):\n # Breseq's weird encoding for the dash character\n weirdly_encoded_dash_char_1 = '‑'\n weirdly_encoded_dash_char_2 = '‐' # Another Breseq weirdly encoded dash character\n for c in [weirdly_encoded_dash_char_1, weirdly_encoded_dash_char_2]:\n if c in mut_details_str:\n mut_details_str = mut_details_str.replace(c, '-')\n return mut_details_str\n\n\ndef get_DEL_INS_MOB_aa_start_pos(mut_details_str):\n aa_pos = None\n mut_details_str = _replace_weirdly_encoded_dash_char(mut_details_str)\n if len(mut_details_str):\n start_char = '('\n end_char = '/'\n if '-' in mut_details_str:\n end_char = '-'\n nuc_pos = int(mut_details_str[mut_details_str.find(\n start_char) + 1:mut_details_str.find(end_char)])\n aa_pos = math.ceil(nuc_pos / 3)\n return aa_pos\n\n\ndef get_DEL_INS_MOB_nuc_start_pos(mut_details_str):\n mut_details_str = _replace_weirdly_encoded_dash_char(mut_details_str)\n rel_nuc_pos = ''\n if len(mut_details_str):\n start_char = '('\n end_char = '/'\n if '-' in mut_details_str:\n end_char = '-'\n rel_nuc_pos = int(mut_details_str[mut_details_str.find(\n start_char) + 1:mut_details_str.find(end_char)])\n return rel_nuc_pos\n\n\ndef get_DEL_AA_range(mut_details_str):\n mut_details_str = _replace_weirdly_encoded_dash_char(mut_details_str)\n DEL_AA_range = ()\n if len(mut_details_str):\n\n start_char = '('\n end_char = '/'\n start_stop_str = mut_details_str[mut_details_str.find(\n start_char) + 1: mut_details_str.find(end_char)]\n l = start_stop_str.split('-')\n\n ints = [int(x) for x in l]\n if len(l) > 1:\n start_aa_pos = math.ceil(ints[0] / 3)\n stop_aa_pos = math.ceil(ints[1] / 3)\n DEL_AA_range = (start_aa_pos, stop_aa_pos)\n else:\n aa_pos_set = math.ceil(ints[0] / 3)\n DEL_AA_range = (aa_pos_set, aa_pos_set)\n return DEL_AA_range\n\n\ndef get_DEL_AA_set(mut_details_str):\n rng = get_DEL_AA_range(mut_details_str)\n # Have to add 1 since also want to consider final position in the range\n return set(range(rng[0], rng[1] + 1))\n\n\ndef get_SUB_AA_range(mut_details_str):\n SUB_AA_range = ()\n mut_details_str = _replace_weirdly_encoded_dash_char(mut_details_str)\n if len(mut_details_str):\n start_char = '('\n end_char = '/'\n sub_str = mut_details_str[mut_details_str.find(\n start_char) + 1:mut_details_str.find(end_char)]\n SUB_AA_range = sub_str.split('-')\n SUB_AA_range[0] = int(SUB_AA_range[0])\n SUB_AA_range[1] = int(SUB_AA_range[1])\n return tuple(SUB_AA_range)\n\n\ndef get_codon_change_list(coding_SNP_details):\n codon_chng_str = get_codon_nuc_chng_str(coding_SNP_details)\n codon_change_list = codon_chng_str.split('→')\n return codon_change_list\n\n\ndef is_premature_stop_codon_SNP(coding_SNP_details):\n is_premature_stop_codon_SNP = False\n aa_chng_str = coding_SNP_details.split()[0]\n codon_chng_list = get_codon_change_list(coding_SNP_details)\n wt_codon = codon_chng_list[0]\n mut_codon = codon_chng_list[1]\n if is_non_syn_SNP(aa_chng_str) and is_stop_codon(mut_codon):\n is_premature_stop_codon_SNP = True\n return is_premature_stop_codon_SNP\n\n\n# Currently not being used;\n# by default, read-through variants are being annotated as \"other\"\n# by predict_mutation_effect_on_feature(...).\ndef is_readthrough_codon_SNP(coding_SNP_details):\n is_readthrough_codon_SNP = False\n aa_chng_str = coding_SNP_details.split()[0]\n codon_change_list = get_codon_change_list(coding_SNP_details)\n wt_codon = codon_change_list[0]\n mut_codon = codon_change_list[1]\n if is_non_syn_SNP(aa_chng_str) \\\n and is_stop_codon(wt_codon) and not is_stop_codon(mut_codon):\n is_readthrough_codon_SNP = True\n return is_readthrough_codon_SNP\n\n\ndef get_clean_mut_gene_list(gene_list_str):\n for s in STRINGS_TO_REMOVE:\n if s in gene_list_str:\n gene_list_str = gene_list_str.replace(s, \"\")\n if \"genes\" in gene_list_str:\n start_idx = gene_list_str.rfind(\"genes\") + len(\"genes\")\n gene_list_str = gene_list_str[start_idx:]\n\n split_str = \",\"\n if '|' in gene_list_str:\n split_str = \"|\"\n\n mut_gene_list = gene_list_str.split(split_str)\n clean_mut_gene_list = [gene for gene in mut_gene_list]\n return clean_mut_gene_list\n\n\ndef is_start_codon_removal(coding_SNP_details):\n is_start_codon_removal = False\n aa_chng_str = coding_SNP_details.split()[0]\n codon_change_list = get_codon_change_list(coding_SNP_details)\n wt_codon = codon_change_list[0]\n mut_codon = codon_change_list[1]\n if get_SNP_aa_pos(aa_chng_str) == 1 and is_start_codon(wt_codon) and not is_start_codon(mut_codon):\n is_start_codon_removal = True\n return is_start_codon_removal\n\n\nSTRINGS_TO_REMOVE = [' ', \">\", \"[\", \"]\", \"\", \"\", \"\", \"\", \"
    \"]\n\n\ndef get_clean_mut_gene_list(gene_list_str):\n for s in STRINGS_TO_REMOVE:\n if s in gene_list_str:\n gene_list_str = gene_list_str.replace(s, \"\")\n if \"genes\" in gene_list_str:\n start_idx = gene_list_str.rfind(\"genes\") + len(\"genes\")\n gene_list_str = gene_list_str[start_idx:]\n\n split_str = \",\"\n if '|' in gene_list_str:\n split_str = \"|\"\n\n mut_gene_list = gene_list_str.split(split_str)\n clean_mut_gene_list = [gene for gene in mut_gene_list]\n return clean_mut_gene_list\n\n\ndef get_gene_count(mut_df_row):\n target_count = 0\n breseq_gene_annot_row = \"Gene\"\n if \"mutation target annotation\" in mut_df_row.keys(): # \"mutation target annotation\" used in AVA\n breseq_gene_annot_row = \"mutation target annotation\"\n gene_list_str = mut_df_row[breseq_gene_annot_row]\n mut_details_str = mut_df_row[\"Details\"]\n if \"intergenic\" in mut_details_str:\n target_count = 1\n else:\n gene_set = set(get_clean_mut_gene_list(gene_list_str))\n target_count = len(gene_set)\n return target_count\n\n\nSTRUCTURAL_LEVEL = 0\nOPERATIONAL_LEVEL = 1\n\n\n# TODO: should check to see if start codons are ever destroyed.\ndef is_truncating_SNP(mut_df_row):\n is_disruptive_SNP = False\n if mut_df_row[\"Mutation Type\"].lower() == \"snp\" and mut_df_row[\"coding\"]:\n if is_premature_stop_codon_SNP(mut_df_row[\"Details\"]) or is_start_codon_removal(mut_df_row[\"Details\"]):\n is_disruptive_SNP = True\n return is_disruptive_SNP\n\n\n# TODO: Not currently checking if pseudogenes are being further disrupted. Pseudogenes can still be translates; need to check if they get further disrupted.\n# TODO: Could return predicted effect of read-through SNPs as \"elongation\", though no clear value with current projects.\ndef predict_mutation_effect_on_feature(mutation, feature):\n pred_eff = \"other\"\n\n if feature[\"feature type\"] != \"unknown\":\n\n # Code block is for SVs\n if mutation[\"Mutation Type\"].lower() in [\"ins\", \"del\", \"mob\"]:\n if feature[\"feature type\"] == \"gene\":\n if is_frameshift(mutation[\"mutation size\"]):\n pred_eff = \"truncation\"\n # any feature (besides \"unknown\") if INS, DEL, or MOB > 10\n elif mutation[\"mutation size\"] >= 10:\n pred_eff = \"truncation\"\n\n # Code block is just for SNPs to genes\n if ((mutation[\"Mutation Type\"].lower() == \"snp\") and (feature[\"feature type\"] == \"gene\")):\n if is_truncating_SNP(mutation):\n pred_eff = \"truncation\"\n else:\n if not is_readthrough_codon_SNP(mutation[\"Details\"]):\n aa_chng_str = mutation[\"Details\"].split()[0]\n if is_non_syn_SNP(aa_chng_str):\n pred_eff = \"nonsynonymous\"\n else:\n pred_eff = \"synonymous\"\n return pred_eff\n\n\n# For direct use with breseq mutations to a gene\ndef predict_mutation_effect_on_gene(mutation):\n return predict_mutation_effect_on_feature(mutation, {\"feature type\": \"gene\"})\n\n\ndef get_mob_size(seq_change_str, mob_sizes):\n mob_mut_size = 0\n\n s = seq_change_str\n s = s.replace(\"::\", '')\n s = s.replace(\"(+)\", '')\n s = s.replace(\"(–)\", '')\n\n for annot_element in s.split(\" \"):\n if annot_element in mob_sizes.keys():\n mob_mut_size += mob_sizes[annot_element]\n else:\n s2 = annot_element\n s2 = s2.replace(\"bp\", '')\n s2 = s2.replace(\" \", '')\n s2 = s2.replace(\"Δ\", '')\n s2 = s2.replace(\"+\", '')\n\n val = 0\n if s2.isdigit():\n val = int(s2)\n else:\n val = len(s2)\n if 'Δ' in annot_element:\n val = (-1) * val\n\n mob_mut_size += val\n\n return mob_mut_size\n\n\n# Currently not returning the size of MOBs. Isn't something currently necessary.\ndef get_mut_size(mut_df_row):\n # Currently defaulting everything except for INS and DEL to 0 since don't need them.\n mut_size = 0\n if mut_df_row[\"Mutation Type\"] == \"SNP\":\n mut_size = 1\n elif mut_df_row[\"Mutation Type\"] == \"INS\":\n mut_size = get_ins_size(mut_df_row[\"Sequence Change\"])\n elif mut_df_row[\"Mutation Type\"] == \"DEL\":\n mut_size = get_del_size(mut_df_row[\"Sequence Change\"])\n elif mut_df_row[\"Mutation Type\"] == \"INV\":\n mut_size = get_inv_size(mut_df_row[\"Sequence Change\"])\n elif mut_df_row[\"Mutation Type\"] == \"CON\":\n mut_size = get_inv_size(mut_df_row[\"Sequence Change\"])\n elif mut_df_row[\"Mutation Type\"] == \"SUB\":\n mut_size = get_sub_size(mut_df_row[\"Sequence Change\"])\n elif mut_df_row[\"Mutation Type\"] == \"AMP\":\n mut_size = get_amp_size(mut_df_row[\"Sequence Change\"])\n return mut_size\n\n\n# Returns the range of mutations to nucleotides in mutation region before mutation.\ndef get_original_nuc_mut_range(mut_df_row):\n mut_range = (0, 0)\n if mut_df_row[\"Mutation Type\"] == \"SNP\" \\\n or mut_df_row[\"Mutation Type\"] == \"INS\" \\\n or mut_df_row[\"Mutation Type\"] == \"MOB\" \\\n or mut_df_row[\"Mutation Type\"] == \"AMP\":\n mut_range = (mut_df_row[\"Position\"], mut_df_row[\"Position\"])\n elif mut_df_row[\"Mutation Type\"] == \"DEL\" \\\n or mut_df_row[\"Mutation Type\"] == \"INV\" \\\n or mut_df_row[\"Mutation Type\"] == \"CON\" \\\n or mut_df_row[\"Mutation Type\"] == \"SUB\":\n mut_range = (\n mut_df_row[\"Position\"], mut_df_row[\"Position\"] - 1 + get_mut_size(mut_df_row))\n return mut_range\n\n\ndef get_codon_nuc_chng_str(coding_SNP_details):\n return coding_SNP_details[coding_SNP_details.find(\"(\") + 1:coding_SNP_details.find(\")\")]\n\n\ndef get_coding_SNP_rel_nuc_pos(coding_SNP_details):\n rel_nuc_pos = ''\n codon_chng_str = get_codon_nuc_chng_str(coding_SNP_details)\n codon_nuc_pos = get_codon_pos_chng(codon_chng_str)\n aa_sub_str = coding_SNP_details[:coding_SNP_details.find(' ')]\n aa_sub = get_SNP_aa_pos(aa_sub_str)\n rel_nuc_pos = ((aa_sub - 1) * 3) + codon_nuc_pos\n return rel_nuc_pos\n\n\ndef get_genetic_coding_SNP_nuc_chng(SNP_details):\n nuc_chng = ''\n codon_change_list = get_codon_change_list(SNP_details)\n codon_chng_pos = get_codon_pos_chng(get_codon_nuc_chng_str(SNP_details))\n codon_chng_idx = codon_chng_pos - 1\n nuc_chng = codon_change_list[1][codon_chng_idx]\n return nuc_chng\n\n\ndef get_genetic_noncoding_or_pseudogene_SNP_nuc_chng(genetic_SNP_seq_change):\n return genetic_SNP_seq_change[-1]\n\n\ndef get_ins_seq(seq_change_str):\n ins_seq = ''\n if '→' in seq_change_str:\n before_seq_freq = int(\n seq_change_str[seq_change_str.find(')') + 1:seq_change_str.find('→')])\n after_seq_freq = int(seq_change_str[seq_change_str.find('→') + 1:])\n # TODO: find an example of this type of mutation; not currently sure what to expect with this on.\n if \"bp\" in seq_change_str:\n # rare, more complicated to parse, and currently not occurring for mutation sets of interest, therefore not yet implementing.\n assert False, \"needs to be implemented\"\n else:\n single_ins_seq = seq_change_str[seq_change_str.find(\n '(') + 1:seq_change_str.find(')')]\n ins_seq = single_ins_seq * (after_seq_freq - before_seq_freq)\n if '+' in seq_change_str:\n ins_seq = seq_change_str[seq_change_str.find('+') + 1:]\n return ins_seq\n","repo_name":"Aletechdev/aledbmutil","sub_path":"mut.py","file_name":"mut.py","file_ext":"py","file_size_in_byte":20761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28631944705","text":"\"\"\"\r\nAuthor: KCKW\r\nDescription: fetch finance data for pandas dataframe and write to Excel\r\n\"\"\"\r\n\r\nimport pandas_datareader.data as web\r\nimport pandas as pd\r\nfrom openpyxl import load_workbook\r\n\r\n\r\nif __name__ == '__main__':\r\n # Define the instruments to download. We would like to see Apple, Microsoft and the S&P500 index.\r\n tickers = ['AAPL', 'AMZN']\r\n # We would like all available data from 01/01/2000 until 12/31/2016.\r\n start_date = '1980-01-01'\r\n end_date = '2020-10-05'\r\n\r\n for ticker in tickers:\r\n # User pandas_reader.data.DataReader to load the desired data. As simple as that.\r\n df = web.DataReader(ticker, 'yahoo', start_date, end_date)\r\n df = df.reset_index()\r\n df['Date'] = df['Date'].dt.date\r\n print(df.columns)\r\n\r\n # write to excel\r\n writer = pd.ExcelWriter(\"stock.xlsx\", engine='openpyxl')\r\n book = load_workbook(\"stock.xlsx\")\r\n\r\n # delete sheet\r\n delete_sheet = [ticker]\r\n for sheetName in book.sheetnames:\r\n if sheetName in delete_sheet:\r\n del book[sheetName]\r\n book.save(\"stock.xlsx\")\r\n\r\n # set write parameter\r\n writer.book = book\r\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\r\n\r\n # output to excel\r\n df.to_excel(writer,\r\n sheet_name=ticker, index=False)\r\n\r\n writer.save()\r\n writer.close()\r\n","repo_name":"wongkenny240/finance-mgmt","sub_path":"fetch_market_data.py","file_name":"fetch_market_data.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42184743058","text":"'''\r\nGiven starting and end points, write a Python program to print all even numbers in that given range. \r\n\r\nExample:\r\nInput: start = 4, end = 15\r\nOutput: 4, 6, 8, 10, 12, 14\r\n\r\nInput: start = 8, end = 11\r\nOutput: 8, 10\r\n\r\n'''\r\nstart=int(input(\"Enter the starting number of range: \"))\r\nend=int(input(\"Enter the ending number of range: \"))\r\n\r\nprint(\"Even numbers are: \")\r\nfor item in range(start,end+1):\r\n if item%2==0:\r\n print(item)","repo_name":"Harshita184Rawat/python","sub_path":"print_even_in_a_range.py","file_name":"print_even_in_a_range.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27533733249","text":"import numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom sqlutilpy import *\n \nmain_table_cols = ['sourceid','ra','dec','l','b','ks_n_detections']\nmain_string = 't.'+',t.'.join(main_table_cols)\n\nvar_indices = [\"ks_stdev\",\"ks_mad\",\"ks_kurtosis\",\"ks_skew\",\n \"ks_eta\",\"ks_n_epochs\",\n \"ks_stetson_i\",\"ks_stetson_j\",\"ks_stetson_k\",\n \"ks_p100\",\"ks_p0\",\"ks_p99\",\"ks_p1\",\"ks_p95\",\"ks_p5\",\n \"ks_p84\",\"ks_p16\",\"ks_p75\",\"ks_p25\"]\nvar_string = 's.'+',s.'.join(var_indices)\n\nphot_stats = [\"ks_b_ivw_err_mag\", \"j_b_ivw_mean_mag\", \"h_b_ivw_mean_mag\", \n \"ks_b_ivw_mean_mag\", \"ks_n_b_phot\"]\nphot_string = 'y.'+',y.'.join(phot_stats)\n\ndef pct_diff(dataV):\n \n for p in [[75,25],[84,16],[95,5],[99,1],[100,0]]:\n dataV['ks_p%i_p%i' % (p[0], p[1])] = dataV['ks_p%i' % p[0]] - dataV['ks_p%i' % p[1]]\n \n return dataV\n\ndef error_ratios(dataV):\n \n dataV['ks_mean_error'] = dataV['ks_b_ivw_err_mag'] * np.sqrt(dataV['ks_n_b_phot'])\n \n for p in [\"ks_stdev\", \"ks_mad\"]:\n dataV[p+'_over_error'] = dataV[p] / dataV['ks_mean_error']\n for p in [[75,25],[84,16],[95,5],[99,1],[100,0]]:\n dataV['ks_p%i_p%i' % (p[0], p[1]) + '_over_error'] = dataV['ks_p%i_p%i' % (p[0], p[1])]/dataV['ks_mean_error']\n \n return dataV\n\ndef fix_stetson_J(dataV):\n dataV['ks_stetson_j'] /= (dataV['ks_n_epochs']-1)\n return dataV\n\ndef preprocess_data(dataV):\n dataV = pct_diff(dataV)\n dataV = error_ratios(dataV)\n dataV = fix_stetson_J(dataV)\n return dataV\n\n# def cm_virac_stats_table(data, config):\n# \"\"\"\n# Crossmatch of VIRAC ids with the variability indices (and VIRAC2 table)\n# ---\n# input: data = (sourceid)\n \n# return: VIRAC2 variability indices\n \n# \"\"\"\n \n# dataV = pd.DataFrame(sqlutil.local_join(\"\"\"\n# select {0}, y.j_b_ivw_mean_mag, y.h_b_ivw_mean_mag, y.ks_b_ivw_mean_mag, {1} from mytable as m\n# inner join leigh_smith.virac2 as t on t.sourceid=m.sourceid\n# inner join leigh_smith.virac2_photstats as y on t.sourceid=y.sourceid\n# inner join leigh_smith.virac2_var_indices as s on t.sourceid=s.sourceid order by m.xid\"\"\".format(\n# main_string,var_string),\n# 'mytable',(data['virac2_id'].values,np.arange(len(data))),('sourceid','xid'),**config.wsdb_kwargs))\n \n# dataV = pct_diff(dataV)\n \n# return pd.merge(data, dataV, left_on='virac2_id', right_on='sourceid', how='right')\n","repo_name":"thomasmolnar/virac_classifier","sub_path":"interface_utils/add_stats.py","file_name":"add_stats.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34748606523","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 26 16:50:49 2019\n\n@author: Himanshu Rathore\n\"\"\"\n\n# Importing the libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Importing the dataset\ndataset = pd.read_csv('bluegills.csv')\nfeatures = dataset.iloc[:, 0:1].values\nlabels = dataset.iloc[:, -1].values\n\n# visualizing the dataset\nplt.scatter(features, labels)\n\n# Fitting Linear Regression to the dataset\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(features, labels)\n\n# Visualising the Linear Regression results\nplt.scatter(features, labels, color = 'red')\nplt.plot(features, lin_reg.predict(features), color = 'blue')\nplt.title('Linear Regression')\nplt.xlabel('Age')\nplt.ylabel('Length')\nplt.show()\n\n# Fitting Polynomial Regression to the dataset\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_object = PolynomialFeatures(degree = 2)\nfeatures_poly = poly_object.fit_transform(features)\nquad_reg = LinearRegression()\nquad_reg.fit(features_poly, labels)\n\n# Visualising the Polynomial Regression results\nplt.scatter(features, labels, color = 'red')\nplt.plot(features, quad_reg.predict(poly_object.fit_transform(features)), color = 'blue')\nplt.title('Polynomial Regression')\nplt.xlabel('Age')\nplt.ylabel('Length')\nplt.show()\n\nprint (\"Predicting result with Polynomial Regression\")\nprint (quad_reg.predict(poly_object.transform(5)))\n\nprint (\"Predicting result with Linear Regression\")\nprint (lin_reg.predict(5))","repo_name":"himanshu2922t/FSDP_2019","sub_path":"DAY-17/bluegills.py","file_name":"bluegills.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7154330972","text":"import random\nimport itertools \nfrom pprint import pprint \nimport numpy as np\nimport pandas as pd \nfrom sklearn.utils import shuffle\nimport sklearn.metrics \nimport traceback\nimport sys\nfrom tqdm import tqdm\nimport os\n\n'''\nwe will use thread pool to make the test processes faster for now we use only one thread and that makes the process slow \nwhere it can be much faster it will use SIMD principals \n'''\nimport concurrent.futures\n\n\ndef _safe_predict(tagger, sentence, predictions, idx):\n '''\n changed for the async eecution will apply the sentence prediction to the sentence id in the vector\n :param predictions: vector of predictions\n :param idx: id in the vector\n :return: True if prediction is ok false if an exception was thrown from the tagger\n '''\n try:\n print(\"start working on sentence idx {}\".format(idx))\n predictions[idx] = tagger.predict(sentence)\n print(\"finished working on sentence idx {}\".format(idx))\n return idx, True\n except Exception as e:\n print(\"finished working on sentence idx {}\".format(idx))\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print('tagger crashed over the following input sentence with the following exception:\\n{}\\n{} {}'.format(sentence, exc_type, exc_value))\n #print(traceback.format_exc())\n return idx, False\n\ndef _safe_bulk_predict(tagger, sentences):\n '''\n this function now works asynchronously it allocates array of sentences and sends the sentence to a worker\n from the worker the prediction will be applied\n :param tagger: tagger under test\n :param sentences: sentences to tag\n :return: the predictions array and failed predictions\n '''\n\n failed_predictions = 0\n predictions = [[]]*len(sentences) #preallocate the whole prediction array\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=int(os.cpu_count()/2)) as executor:\n print(\"starting prediction cycle with {} workers\".format(executor._max_workers))\n future_list = {executor.submit(_safe_predict, tagger, sentence, predictions, idx) for idx, sentence in enumerate(tqdm(sentences))}\n for future in concurrent.futures.as_completed(future_list):\n res = future.result()\n if not res[1]:\n failed_predictions += 1\n \n return predictions, failed_predictions\n \n \ndef _evaluate(tagger, test_set):\n ''' evaluates a given trained tagger, through a given test set '''\n \n print('predicting with the tagger ...')\n \n test_set_input = list(map(lambda sentence: list(map(lambda entry: entry[0], sentence)), test_set))\n predictions, failed = _safe_bulk_predict(tagger, test_set_input)\n \n # evaluation\n \n print('computing the evaluation ...')\n \n segments = 0\n correct = 0\n failed_sentences = 0\n \n for sentence_idx in range(len(test_set)):\n sentence = test_set_input[sentence_idx]\n prediction = predictions[sentence_idx]\n gold = list(map(lambda entry: entry[1], test_set[sentence_idx]))\n \n segments += len(sentence)\n \n if prediction is None:\n failed_sentences += 1\n else:\n for idx in range(len(sentence)):\n segment = sentence[idx]\n segment_prediction = prediction[idx]\n segment_gold = gold[idx]\n \n if segment_prediction == segment_gold:\n correct += 1\n \n token_accuracy = correct / segments\n print(f'sentences: {len(test_set)}')\n print(f'failed sentences: {failed_sentences}')\n print(f'token accuracy: {token_accuracy:.4f}') \n \n return token_accuracy \n \n\ndef model_driver_12(tagger_class_under_test, annotated_sentences, passes=3, split=0.1):\n\n ''' drives a given tagger implementation through training and evaluation '''\n\n assert 0 < split < 0.3, \"the split argument value should be a proportion for the test set\"\n\n token_accuracies = []\n \n for cross_validation_pass in range(passes):\n \n print()\n print(f'starting cross-validation pass {cross_validation_pass}')\n print('rebuilding the tagger under test')\n\n tagger_under_test = tagger_class_under_test()\n\n # get a train-test split\n shuffled = annotated_sentences\n for _ in range(10):\n shuffled = shuffle(shuffled)\n\n '''\n we want to cut the size of the data to train and predict on due to the calculation time and memory \n we will take 10000 sentences to the data set , and will take 1/3 of the test set \n '''\n dataset_size = 10000\n split_index = int((dataset_size*split)/3)\n test_set = shuffled[:split_index]\n \n train_set = shuffled[split_index:dataset_size]\n\n print(f'train set size: {len(train_set)} sentences')\n print(f'test set size: {len(test_set)} sentences')\n\n tagger = tagger_under_test.train(train_set)\n\n token_accuracies.append(_evaluate(tagger, test_set))\n\n \n ## final statistics across the cross validation passes\n\n final_token_accuracy = np.average(token_accuracies)\n std = np.std(token_accuracies)\n\n print()\n print('======================================')\n print('Final Cross-Validation Token Accuracy:')\n print(f'{final_token_accuracy:.3f} (std: {std:.5f})')\n","repo_name":"calebxyz/ONLP-12","sub_path":"submission-zip-structure/ID_NUM+ID_NUM/A/model_driver_12.py","file_name":"model_driver_12.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71019259396","text":"import heapq\nINF = int(1e9)\nN,E = list(map(int, input().split()))\ngraph = [[] for i in range(N+1)]\nfor i in range(E):\n a,b,c = list(map(int, input().split()))\n graph[a].append((b,c))\n graph[b].append((a,c))\n\nv1,v2 = list(map(int, input().split())) #반드시 거쳐야 하는 정점\n\ndef dikstra(start):\n dist = [INF]*(N+1)\n q = []\n heapq.heappush(q,(start,0))\n dist[start] = 0\n while q:\n now, weight = heapq.heappop(q)\n if(weight > dist[now]):\n continue\n for nk, wk in graph[now]:\n if(wk+weight < dist[nk]):\n heapq.heappush(q,(nk,wk+weight))\n dist[nk] = wk+weight\n\n return dist\n\ndist1 = dikstra(1)\ndist2 = dikstra(v1)\ndist3 = dikstra(v2)\n\n#경로 1 (0 -> v1 -> v2 -> N)\npath1 = dist1[v1] + dist2[v2] + dist3[N]\npath2 = dist1[v2] + dist3[v1] + dist2[N]\n\n\nm = min(path1, path2)\nif(m > INF):\n print(-1)\nelse: print(m)","repo_name":"algojunior/sunjungAn","sub_path":"Graph Theorem/1504_gold.py","file_name":"1504_gold.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38573479860","text":"import random\n\nimport numpy as np\n\nfrom bots import Random, SimpleEater\nfrom constants import UP, RIGHT\nfrom game import Game, RoundType\nfrom game import Snake\n\n\ndef test_snake_sequence():\n head = np.array([5, 6])\n snake = Snake(0, np.array([head, head + UP, head + UP + RIGHT]))\n assert len(snake) == 3\n assert np.array_equal(snake[0], [5, 6])\n assert np.array_equal(snake[1], [5, 7])\n assert np.array_equal(snake[2], [6, 7])\n\n\ndef test_snake_grow():\n head = np.array([5, 6])\n snake = Snake(0, np.array([head]))\n snake.move(UP)\n assert len(snake) == 1\n assert np.array_equal(snake[0], [5, 7])\n\n snake.move(UP, grow=True)\n assert len(snake) == 2\n assert np.array_equal(snake[0], [5, 8])\n assert np.array_equal(snake[1], [5, 7])\n\n snake.move(UP)\n assert len(snake) == 2\n assert np.array_equal(snake[0], [5, 9])\n assert np.array_equal(snake[1], [5, 8])\n\n\ndef test_snake_collide():\n head = np.array([5, 6])\n snake = Snake(0, np.array([[5, 6], [5, 7], [6, 7]]))\n assert snake.collides(np.array([5, 6]))\n assert snake.collides(np.array([5, 7]))\n assert snake.collides(np.array([6, 7]))\n assert not snake.collides(np.array([6, 6]))\n\n\ndef test_game_snake_dies():\n random.seed(1)\n \"\"\"\n A grid where one of the snakes can move only 1 tile, while the other can move multiple times. Snake 1 will win.\n \"\"\"\n grid_size = (3, 3)\n agents = {0: Random(id=0, grid_size=grid_size), 1: Random(id=1, grid_size=grid_size)}\n \"\"\"\n | 0 1|\n |0 0 1|\n |0 0 |\n \"\"\"\n snakes = [Snake(id=0, positions=np.array([\n [1, 2],\n [1, 1],\n [0, 1],\n [0, 0],\n [1, 0],\n ])), Snake(id=1, positions=np.array([\n [2, 1],\n [2, 2],\n ]))]\n game = Game(grid_size=grid_size, agents=agents, round_type=RoundType.SIMULTANEOUS, snakes=snakes, candies=[])\n assert not game.finished()\n game.update()\n assert not game.finished()\n game.update()\n assert game.finished()\n assert game.scores[0] == 2 # snake 0 dies, so second place\n assert game.scores[1] == 1\n\n\ndef test_game_snake_eats():\n grid_size = (3, 3)\n agents = {0: SimpleEater(id=0, grid_size=grid_size), 1: SimpleEater(id=1, grid_size=grid_size)}\n \"\"\"\n |0 |\n |0 * |\n | 1 1|\n \"\"\"\n snakes = [Snake(id=0, positions=np.array([\n [0, 1],\n [0, 2],\n ])), Snake(id=1, positions=np.array([\n [2, 0],\n [1, 0],\n ]))]\n candies = [np.array([1, 1])]\n game = Game(grid_size=grid_size, agents=agents, round_type=RoundType.SIMULTANEOUS, snakes=snakes, candies=candies)\n assert not game.finished()\n game.update()\n assert not game.finished()\n assert (len(game.snakes[0]) == 3)\n assert (len(game.snakes[1]) == 2)\n","repo_name":"Rayman/coding-challenge-snakes","sub_path":"test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7987816649","text":"from math import sqrt\n\nclass Point:\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def move(self, move_x=0, move_y=0):\n self.x += move_x\n self.y += move_y\n\n def length(self, new_point):\n dist = round(sqrt((new_point.x-self.x)**2\n +(new_point.y-self.y)**2),2)\n return dist\n\n\nfirst_point = Point(2, -7)\nsecond_point = Point(7, 9)\nprint(first_point.length(second_point))\nprint(second_point.length(first_point))\n","repo_name":"ivangotovets/algorithms","sub_path":"Python Handbook/5.1 Классы, поля, методы/расстояние между точками.py","file_name":"расстояние между точками.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71106361793","text":"if __name__ == \"__main__\":\n from AthenaConfiguration.AllConfigFlags import initConfigFlags\n from AthenaConfiguration.ComponentAccumulator import printProperties\n from AthenaConfiguration.MainServicesConfig import MainServicesCfg\n from AthenaCommon.Logging import logging\n # import os\n\n flags = initConfigFlags()\n flags.Detector.GeometryCalo = False\n flags.Detector.GeometryMuon = False\n flags.Concurrency.NumThreads = 1\n flags.Concurrency.NumConcurrentEvents = 1\n flags.DQ.useTrigger = False\n flags.ITk.doTruth = False\n flags.Exec.MaxEvents = 2\n flags.Output.HISTFileName = \"ActsMonitoringOutput.root\"\n\n # flags.Input.Files = [\"../mc15_14TeV.600012.PhPy8EG_A14_ttbar_hdamp258p75_nonallhad.recon.RDO.e8185_s3856_r13998/RDO.30640759._000008.pool.root.1\"]\n\n flags.lock()\n\n acc = MainServicesCfg(flags)\n from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg\n acc.merge(PoolReadCfg(flags))\n\n from BeamSpotConditions.BeamSpotConditionsConfig import BeamSpotCondAlgCfg\n acc.merge(BeamSpotCondAlgCfg(flags))\n\n from InDetConfig.SiliconPreProcessing import ITkRecPreProcessingSiliconCfg\n acc.merge(ITkRecPreProcessingSiliconCfg(flags))\n\n from InDetConfig.InDetPrepRawDataFormationConfig import ITkInDetToXAODClusterConversionCfg\n acc.merge(ITkInDetToXAODClusterConversionCfg(flags))\n\n from InDetConfig.SiSpacePointFormationConfig import InDetToXAODSpacePointConversionCfg\n acc.merge(InDetToXAODSpacePointConversionCfg(flags))\n\n from InDetConfig.ITkTrackRecoConfig import CombinedTrackingPassFlagSets\n flags_set = CombinedTrackingPassFlagSets(flags)\n\n print(flags_set[0].dump())\n\n from ActsConfig.ActsTrkAnalysisConfig import ActsTrkSeedingAlgorithmAnalysisAlgCfg\n acc.merge(ActsTrkSeedingAlgorithmAnalysisAlgCfg(flags_set[0]))\n\n mlog = logging.getLogger(\"SeedingAlgorithmAnalysis\")\n mlog.info(\"Configuring SeedingAlgorithmAnalysis: \")\n printProperties(\n mlog,\n acc.getEventAlgo(\"ActsTrkSeedingAlgorithmAnalysis\"),\n nestLevel=2,\n printDefaults=True,\n )\n\n flags.dump()\n\n # debug printout\n acc.printConfig(withDetails=True, summariseProps=True)\n\n # run the job\n status = acc.run()\n\n # report the execution status (0 ok, else error)\n import sys\n sys.exit(not status.isSuccess())\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Tracking/Acts/ActsTrkAlgs/ActsTrkAnalysis/test/ActsSeedingAlgorithmTest.py","file_name":"ActsSeedingAlgorithmTest.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6909573316","text":"# -*- coding: utf-8 -*-\r\n'''\r\nPython for network engineers Natasha Samoilenko\r\n'''\r\n# Page range 417\r\n# Task 17.1\r\nimport csv\r\nimport re\r\ndef write_dhcp_snooping_to_csv(filenames, output):\r\n headers = ['switch', 'mac', 'ip', 'vlan', 'interface']\r\n list_all = []\r\n list_all.append(headers)\r\n regex_hostname = (r'^\\S+_(?P\\S+)_dhcp\\S+')\r\n match_hostname = re.search(regex_hostname, filenames)\r\n hostname = match_hostname.group('hostname')\r\n with open(filenames, 'r') as src, open(output, 'w') as dst:\r\n regex = (r'(?P^\\S+[0-9,a-f,A-F,:])\\s+'\r\n r'(?P\\S+[0-9,.])\\s+\\S+\\s+\\S+\\s+'\r\n r'(?P\\d+)\\s+'\r\n r'(?P\\S+$)')\r\n for line in src:\r\n match = re.search(regex, line)\r\n if match:\r\n list_local = [hostname, match.group('mac'), match.group('ip'), match.group('vlan'), match.group('interface')]\r\n list_all.append(list_local)\r\n writer = csv.writer(dst)\r\n writer.writerows(list_all)\r\nif __name__ == '__main__':\r\n write_dhcp_snooping_to_csv('page417_sw1_dhcp_snooping.txt', 'page417_task1_output.csv')","repo_name":"ivan-kuropiatnyk/python-course-natenka","sub_path":"page417_Task17_1.py","file_name":"page417_Task17_1.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33706748000","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 18 21:42:08 2020\n\n@author: qiangwenxu\n\"\"\"\n\nimport os, sys\nimport urllib\nfrom bs4 import BeautifulSoup as bs\nimport csv\nimport json\nimport time\n\n\ndef getSoup(listPage, headers=agents['MacMozilla']):\n req = urllib.request.Request(url=listPage, headers=headers)\n html = urllib.request.urlopen(req).read()\n soup = bs(html,\"lxml\")\n return soup\n\n\ndef getPosts(soup, classLabel, entryLabel, url_suffix, end):\n soup = getSoup(listPage)\n posts = soup.find(attrs={\"class\":classLabel}).find_all(entryLabel)\n titles = [p.find('a').get_text() for p in posts]\n urls = [url_suffix+str(p.find('a').get('href')) for p in posts]\n return titles, urls\n\ndef getArticle(url, timeLabel, bodyLabel):\n soup = getSoup(url)\n articleTime = soup.find(attrs={\"class\":\"time\"}).find(attrs={\"id\":\"News_Body_Time\"}).get_text()\n articleBody = soup.find(attrs={\"id\":bodyLabel}).get_text()\n return articleTime, articleBody\n\n\nif __name__ == '__main__':\n section = int(input(\"What section you want to update?\\n[1]重要讲话\"))\n dict_Section = {1: 'zyjh_674906/'}\n \n agents = dict()\n agents['MacMozilla'] = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11) AppleWebKit/601.1.56 (KHTML, like Gecko) Version/9.0 Safari/601.1.56'}\n post_suffix = 'https://www.fmprc.gov.cn/web/ziliao_674904/'+dict_Section[section]\n \n with open('last_visited.json', 'rb') as f:\n visited = json.load(f)\n endPost = visited[str(section)]\n\n allTitles, allPosts = [], []\n for n in range(30):\n N_page = '' if n == 0 else '_'+str(n)\n fmprc_zyjh = 'https://www.fmprc.gov.cn/web/ziliao_674904/zyjh_674906/default'+N_page+'.shtml'\n pageSoup = getSoup(fmprc_zyjh)\n pageTitles, pageUrls = getPosts(pageSoup, \"rebox_news\", \"li\", post_suffix)\n allTitles.extend(pageTitles)\n allPosts.extend(pageUrls)\n if endPost in pageSoup: break\n \n fmprc_zyjh_result = []\n for title, url in zip(allTitles, allPosts):\n if url == endPost: break\n articleTime, articleBody = getArticle(url, \"News_Body_Time\", \"News_Body_Txt_A\")\n fmprc_zyjh_result.append([title, articleTime, articleBody])\n time.sleep(0.5)\n\n # 写入爬下来的数据\n with open(\"fmprc_zyjh.csv\",\"w\") as csvfile: \n writer = csv.writer(csvfile)\n # 先写入columns_name\n writer.writerow([\"title\",\"time\",\"body\"])\n # 写入多行 用writerows\n writer.writerows(fmprc_zyjh_result)\n\n visited = json.load(f)\n visited[str(section)] = allPosts[0]\n f.close()\n\n \n ","repo_name":"x13-caesar/NewChinaNewspeak","sub_path":"WebCrawler/ForeignMinistration.py","file_name":"ForeignMinistration.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20421811715","text":"\"\"\"Tests for the Withings calendar.\"\"\"\nfrom datetime import date, timedelta\nfrom http import HTTPStatus\nfrom unittest.mock import AsyncMock\n\nfrom freezegun.api import FrozenDateTimeFactory\nfrom syrupy.assertion import SnapshotAssertion\n\nfrom homeassistant.core import HomeAssistant\n\nfrom . import load_workout_fixture\n\nfrom tests.common import MockConfigEntry, async_fire_time_changed\nfrom tests.components.withings import setup_integration\nfrom tests.typing import ClientSessionGenerator\n\n\nasync def test_api_calendar(\n hass: HomeAssistant,\n snapshot: SnapshotAssertion,\n withings: AsyncMock,\n polling_config_entry: MockConfigEntry,\n hass_client: ClientSessionGenerator,\n) -> None:\n \"\"\"Test the API returns the calendar.\"\"\"\n await setup_integration(hass, polling_config_entry, False)\n\n client = await hass_client()\n response = await client.get(\"/api/calendars\")\n assert response.status == HTTPStatus.OK\n data = await response.json()\n assert data == snapshot\n\n\nasync def test_api_events(\n hass: HomeAssistant,\n snapshot: SnapshotAssertion,\n withings: AsyncMock,\n polling_config_entry: MockConfigEntry,\n hass_client: ClientSessionGenerator,\n) -> None:\n \"\"\"Test the Withings calendar view.\"\"\"\n await setup_integration(hass, polling_config_entry, False)\n\n client = await hass_client()\n response = await client.get(\n \"/api/calendars/calendar.henk_workouts?start=2023-08-01&end=2023-11-01\"\n )\n assert withings.get_workouts_in_period.called == 1\n assert withings.get_workouts_in_period.call_args_list[1].args == (\n date(2023, 8, 1),\n date(2023, 11, 1),\n )\n assert response.status == HTTPStatus.OK\n events = await response.json()\n assert events == snapshot\n\n\nasync def test_calendar_created_when_workouts_available(\n hass: HomeAssistant,\n snapshot: SnapshotAssertion,\n withings: AsyncMock,\n polling_config_entry: MockConfigEntry,\n hass_client: ClientSessionGenerator,\n freezer: FrozenDateTimeFactory,\n) -> None:\n \"\"\"Test the calendar is only created when workouts are available.\"\"\"\n withings.get_workouts_in_period.return_value = []\n await setup_integration(hass, polling_config_entry, False)\n\n assert hass.states.get(\"calendar.henk_workouts\") is None\n\n freezer.tick(timedelta(minutes=10))\n async_fire_time_changed(hass)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"calendar.henk_workouts\") is None\n\n withings.get_workouts_in_period.return_value = load_workout_fixture()\n\n freezer.tick(timedelta(minutes=10))\n async_fire_time_changed(hass)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"calendar.henk_workouts\")\n","repo_name":"Clesyde/core-2023.11.1","sub_path":"tests/components/withings/test_calendar.py","file_name":"test_calendar.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1161823417","text":"\"\"\"\nOperadores ternários em python\n\"\"\"\n\nidade = input('Qual sua idade')\n\nif idade.isnumeric():\n idade = int(idade)\n e_de_maior = (idade>=18)\n msg = 'Pode acessar' if e_de_maior else 'Não pode acessar'\n print(msg)\nelse:\n print('Você precisa digitar apenas números')\n\n# if idade >= 18:\n# print('Pode acessar')\n# else:\n# print('Não pode acessar')","repo_name":"IvanSJr/Treinando-Python","sub_path":"python_basic/operadores_ternarios.py","file_name":"operadores_ternarios.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19471822073","text":"import pandas as pd\nimport numpy as np\nimport re\n\nfrom sklearn.cluster import DBSCAN\n\ndef dataload(file_name):\n\n f_e = file_name.split('.')[-1]\n \n if f_e == 'csv':\n chunk_data = pd.read_csv(file_name, chunksize=1000000)\n data = pd.concat([chunk for chunk in chunk_data])\n\n if 'Scan number' in data.columns:\n data = data.astype({'Scan number': 'int'})\n\n return data\n\ndef top1(dataset):\n \n dataset = dataset.drop_duplicates(subset=['Source File', 'Scan number'], keep='first').reset_index(drop=True)\n \n return dataset\n\ndef strip_seq(seq):\n \n # pep = ''.join([_ for _ in seq if ord(_) in range(65, 91)]) # 65 ~ 91 대문자\n pep = re.compile('[^0-9()+-.]')\n \n res = ''.join(pep.findall(seq)).replace('I', 'L')\n res = res.replace('m', 'M')\n \n return res\n\ndef strip_sequence(de_novo, col, col_):\n \n de_novo[col_] = de_novo[col].apply(strip_seq)\n \n return de_novo\n\n\ndef denovo_parsing(de_novo):\n\n print('>> De novo parsing strat ...')\n\n de_novo = de_novo[de_novo['Peptide'].notnull()] # Double Check\n\n ''' \n 1. using strip sequence\n 2. AA I equal to L\n \n '''\n\n de_novo = strip_sequence(de_novo, 'Peptide', 'Sequence')\n \n print('>> De novo parsing done \\n')\n \n return de_novo.sort_values(by=['Source File', 'Scan number']).reset_index(drop=True) # Double Check\n\ndef same_mass(seq):\n \n return seq.replace('I', 'L')\n\ndef i_to_l(db, col, col_):\n \n db[col_] = db[col].apply(same_mass)\n \n return db\n\ndef db_parsing(db):\n \n print('>> DB parsing strat ...')\n \n ''' \n 1. AA I equal to L\n \n '''\n \n db = i_to_l(db, 'GT', 'GT')\n db.sort_values(by=['Source File', 'Scan number']).reset_index(drop=True)\n print('>> DB parsing done \\n')\n \n return db.sort_values(by=['Source File', 'Scan number']).reset_index(drop=True)\n\ndef remove_no_clu_info(dataset):\n \n return dataset.dropna(subset=['cluster']).reset_index(drop=True)\n\ndef clu_plus_charge(dataset):\n \n print('>> Cluster number + Charge ...')\n \n temp = [] \n append = temp.append\n \n for idx, (i, j) in enumerate(dataset[['cluster', 'z']].values):\n \n append(str(i)+'_'+str(int(j)))\n \n return temp\n\ndef clu_num_to_int(clu):\n \n print('>> Change to integer value ...')\n \n ind = 0\n dic = {}\n \n temp2 = set(clu)\n \n for i in temp2:\n if i not in dic:\n dic[i] = ind\n ind += 1\n \n # integer cluster number\n \n temp = [] \n append = temp.append\n \n for i in clu:\n append(dic[i])\n \n return temp\n\ndef change_clu_num(dataset):\n \n print('>> Cluster index pre-processing ...')\n\n '''\n MScluster result does not consider the charge value\n Seperate the cluster by adding charge\n '''\n \n temp = clu_plus_charge(dataset)\n\n \n ''' \n Change the modified cluster number (cluster number + charge) to integer value\n '''\n \n dataset['cluster'] = clu_num_to_int(temp)\n \n return dataset\n\ndef merge(de_novo, cluster, db = None):\n \n if db is not None:\n print('>> De novo + Database searching result merging ...')\n print('>> Making Training, Test set ...')\n df = pd.merge(de_novo, db, how='outer')\n \n print('>> Clustering result merging ... \\n')\n df = pd.merge(df, cluster, how='left')\n \n top_1 = top1(df)\n print('>> Reliable PSM (GT) :', top_1['GT'].notnull().sum(), 'Scans \\n')\n \n # Clustering 정보 없는 것 고려대상에서 제외, 제거\n df = remove_no_clu_info(df)\n\n else:\n print('>> De novo + Clustering result merging ... \\n')\n df = pd.merge(de_novo, cluster, how='left')\n df = remove_no_clu_info(df)\n\n print('>> Merging done \\n')\n\n return df.reset_index(drop=True)\n\n# Original Cluster Size : count\n# New Cluster Size : new_count\n\ndef refinement(dataset, col, eps, min_samples):\n \n feature = dataset[[col, 'm/z']] # 'RT'\n\n model = DBSCAN(eps=eps, min_samples=min_samples, n_jobs = -1)\n predict = model.fit_predict(feature)\n \n dataset['new_clu'] = predict\n \n return dataset\n\ndef adding_top10(top1, top10, col):\n \n top_1 = top1[['Source File', 'Scan number', col]]\n\n merged_dataset = pd.merge(top10, top_1)\n \n return merged_dataset\n\ndef clu_size(top1, col):\n \n dic = {}\n clu_list = top1[col]\n \n for i in clu_list:\n if i not in dic:\n dic[i] = 0\n dic[i] += 1\n \n temp = [] \n append = temp.append\n \n for i in clu_list:\n append(dic[i])\n \n top1['new_count'] = temp\n \n return top1\n\ndef cluster_refinement(dataset, col, eps, min_samples):\n \n print('>> Refinement ...')\n \n top_1 = top1(dataset)\n top_1 = refinement(top_1, col, eps, min_samples)\n \n dataset = adding_top10(top_1, dataset, 'new_clu')\n \n top_1 = clu_size(top_1, 'new_clu')\n dataset = adding_top10(top_1, dataset, 'new_count')\n \n print('>> Refinement done \\n')\n \n return dataset\n\ndef new_clu(cluster, info, ppm):\n \n temp = info['m/z']*1e-6*ppm\n value = np.median(list(temp))\n \n df = pd.merge(info, cluster, how='left')\n \n df = change_clu_num(df)\n df = cluster_refinement(df, 'cluster', value, 1)\n \n return df\n\ndef top_n(dataset, n):\n \n dic = {}\n \n dataset['Sequence'] = dataset['Sequence'].fillna('nan')\n dataset['Score'] = dataset['Score'].fillna(0)\n \n for i, j, k in dataset[['new_clu', 'Sequence', 'Score']].values:\n \n if i not in dic:\n dic_temp = {}\n dic[i] = dic_temp\n\n if j not in dic[i]:\n dic[i][j] = 0\n dic[i][j] += k*0.01\n \n topn = {}\n\n for i in dic:\n temp = sorted(dic[i].items(), reverse=True, key=lambda x: x[1])\n temp = temp[0:n]\n\n for idx, j in enumerate(temp):\n if i not in topn:\n topn[i] = []\n topn[i].append((j, idx))\n \n return topn\n\ndef candidate_info(dataset, candi):\n \n dic_pep, dic_seq, dic_rank, dic_score = {}, {}, {}, {}\n\n for i, j, k in dataset[['Peptide', 'Sequence', 'new_clu']].values:\n for t in candi[k]:\n if j in t[0][0]:\n if k not in dic_pep:\n dic_seq[k] = []\n dic_pep[k] = []\n dic_rank[k] = []\n dic_score[k] = []\n\n if i not in dic_pep[k]:\n dic_seq[k].append(j)\n dic_pep[k].append(i)\n dic_rank[k].append(t[1])\n dic_score[k].append(t[0][1])\n \n return dic_pep, dic_seq, dic_rank, dic_score\n\ndef candidates(dataset, dic_p, dic_seq, dic_rank, dic_score):\n\n temp = []\n append = temp.append\n\n for i, j, k in dataset[['Source File', 'Scan number', 'new_clu']].values:\n for l, t, z, y in zip(dic_p[k], dic_seq[k], dic_rank[k], dic_score[k]):\n data = i, j, l, t, y, z\n append(data)\n \n return pd.DataFrame(temp, columns=['Source File', 'Scan number', 'Peptide', 'Sequence', 'Score', 'Rank'])\n\ndef labeling(dataset, t=0, f=1): # seq=gt : 0, otherwise : 1\n\n temp = []\n append = temp.append\n\n for i, j in dataset[['Sequence', 'GT']].values:\n if i == j:\n append(t)\n else:\n append(f)\n \n dataset['Label'] = temp\n \n return dataset\n\ndef remove_psm(dataset, train):\n\n temp1 = dataset['Peptide'].notnull()\n\n if train == True:\n temp2 = dataset['GT'].notnull()\n return dataset[temp1|temp2]\n\n return dataset[temp1]\n\n\ndef create_dataset(dataset, n, train):\n \n n_candidates = top_n(dataset, n)\n pep, seq, rank, score = candidate_info(dataset, n_candidates)\n \n df_1 = top1(dataset)\n dataset = candidates(df_1, pep, seq, rank, score)\n\n if train == True:\n df_1_ = df_1[['Source File', 'Scan number', 'GT', 'z', 'm/z', 'RT', 'new_count']]\n dataset = pd.merge(dataset, df_1_, how='outer')\n dataset = labeling(dataset)\n else:\n df_1_ = df_1[['Source File', 'Scan number', 'z', 'm/z', 'RT', 'new_count']]\n dataset = pd.merge(dataset, df_1_, how='outer')\n\n dataset = remove_psm(dataset, train)\n\n dataset = dataset.sort_values(by=['Source File', 'Scan number', 'Rank']).reset_index(drop=True) # Double Check\n \n return dataset\n\ndef delta_score(dataset):\n \n temp = []\n append = temp.append\n\n for idx, (i, j, z) in enumerate(dataset[['Source File', 'Scan number', 'Score']].values):\n\n if idx == 0:\n key = (i, j)\n append(0)\n cri = z\n continue\n\n if key == (i, j):\n append(cri-z)\n else:\n key = (i, j)\n append(0)\n cri = z\n \n return temp\n\ndef log_scale(dataset):\n \n dataset['Score'] = np.log(dataset['Score']+1)\n dataset['new_count'] = np.log(dataset['new_count']+1)\n \n return dataset\n\ndef feature(dataset):\n \n dataset = log_scale(dataset)\n dataset['delta'] = delta_score(dataset)\n\n return dataset.reset_index(drop=True)","repo_name":"BISCodeRepo/NovoRank","sub_path":"NEW_candidates.py","file_name":"NEW_candidates.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36900593131","text":"import requests #Requests allows you to send HTTP/1.1 requests\r\nimport json #JSON is a syntax for storing and exchanging data, the data we'll get from Binance's API are in this sintax format\r\nimport pandas as pd\r\nimport datetime as dt \r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\nfrom statsmodels.tsa.stattools import adfuller\r\nfrom sklearn.linear_model import LinearRegression\r\nimport numpy as np\r\nimport warnings #This Library is used because at LINE there was a warning regarding the use of an old function, \r\nwarnings.filterwarnings('ignore') #we also tried to use the new one but was not supported in our model\r\n\r\n\r\nclass DATA:\r\n def __init__(self,symbol,startTime,endTime): #initializing the class in relation to the crypto ticker (symbol) from which we want to import hystorical data and anlso for the start and end time\r\n self.symbol=symbol\r\n self.startTime = startTime\r\n self.endTime = endTime\r\n \r\n def connection(self): #method for recovering the data based on the choosen interval\r\n #limit, url, interval are defined as global variables since they should not be changed since the API limit the request to the server to 1200 per minute\r\n limit = 1000 \r\n url = \"https://api.binance.com/api/v3/klines\"\r\n interval = '1d'\r\n year, month, day = map(int, self.startTime.split('-')) #take dates as input from user and split it in 3 variables\r\n year2, month2, day2 = map(int, self.endTime.split('-'))\r\n self.startTime=(str(int(dt.datetime(year, month, day).timestamp() *1000)))\r\n self.endTime= (str(int(dt.datetime(year2,month2,day2).timestamp() *1000)))\r\n #updating connection parameters \r\n self.req_params = {'symbol' : self.symbol, 'interval' : interval, 'startTime' : self.startTime, 'endTime' : self.endTime, 'limit' : limit} \r\n # creating pandas dataframe by requesting datas from binance url (defined in url variable) using request library and converting them in a python dictionary with load method from the json library\r\n DF =pd.DataFrame(json.loads(requests.get(url, params = self.req_params).text))\r\n #If the request from the server doesn't return any data (that is when the index's lenght is 0) the function connection also doesn't return any value\r\n if (len(DF.index)== 0): \r\n return None \r\n DF = DF.iloc[:, 0:6] #else use iloc (integer location based indexing for selection by position, in this case first 6 columns ) with a slice object with intsas input\r\n DF.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume']\r\n DF.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in DF.datetime] #setting index with the requested dates \r\n DF.head() \r\n return(DF)\r\n \r\n \r\nclass STRATEGY(): \r\n def __init__(self,dfneeded):\r\n self.dfneeded = dfneeded\r\n \r\n def stra(self):\r\n #Creating an empty DataFrame using the index of Bitcoin DataFrame\r\n g = pd.DataFrame(index=self.dfneeded.index)\r\n #Creating a column in the empty DataFrame \r\n g['Close'] = self.dfneeded['close']\r\n #Indexing the DataFrame with clean dates (cleaned from the hours)\r\n g.index = g.index.date\r\n #Transforming the close prices from an object to a float\r\n g = g.astype(float)\r\n g.head()\r\n #Here we are creating few more column needed by the strategy\r\n #At first we set the length of the Mooving Average ma=15 perriod\r\n ma = 15\r\n #creating the column of the close price, return, moving average and ratio \r\n #which shows when the strategy is too far awar from that mean\r\n g['return'] = np.log(g['Close']).diff()\r\n g['ma'] = g['Close'].rolling(ma).mean()\r\n g['ratio'] = g['Close'] / g['ma']\r\n #Descriptive statistics of ratio column\r\n g['ratio'].describe()\r\n #from previpous code we saw that the 'standard' percentile 25% and 75% were too far from the min and max so here we are\r\n #creating an array to calculate the percentile that we want\r\n percentiles = [2,5,50,95,98]\r\n p = np.percentile(g['ratio'].dropna(),percentiles)\r\n p\r\n #Strategy plans, definition of when is better go short and long\r\n #we go short at the 98th percentile\r\n short = p[-1]\r\n #we go long at the 2nd percentile\r\n long = p[0]\r\n #condition to short\r\n g['position'] = np.where(g.ratio > short, -1, np.nan)\r\n #condition to long\r\n g['position'] = np.where(g.ratio < long, 1, g['position'])\r\n #according to this strategy we are always in a trade so this command \r\n #allows the strategy to invert the position from long to short and from short to long\r\n g['position'] = g['position'].ffill()\r\n #dropna for removing missing data before plotting\r\n g.position.dropna().plot() #dropna for removing missing data before plotting\r\n #create a coliumn for the strategy return\r\n g['strat_return'] = g['return'] * g['position'].shift()\r\n #Comparation trendline of strategy and buy & hold trategy\r\n plt.plot(np.exp(g['return'].dropna()).cumprod(), label = 'Buy & Hold')\r\n plt.plot(np.exp(g['strat_return'].dropna()).cumprod(), label = 'Strategy')\r\n plt.legend()\r\n #Return of strategy and buy & hold\r\n print('return of the buy and hold strategy ',np.exp(g['return'].dropna()).cumprod()[-1]-1)\r\n print('return of the Mean Reverting Strategy ',np.exp(g['strat_return'].dropna()).cumprod()[-1]-1) \r\n \r\n \r\nclass CLRM(DATA):\r\n def __init__(self,x,y):\r\n self.x = x\r\n self.y = y\r\n \r\n def linreg(self):\r\n self.x = self.x.to_numpy()\r\n self.y = self.y.to_numpy()\r\n self.x = self.x.reshape(-1,1)\r\n self.y = self.y.reshape(-1,1)\r\n model = LinearRegression()\r\n model.fit(self.x,self.y)\r\n x_test = np.linspace(-0.25,0.25)\r\n y_pred = model.predict(x_test[:,None])\r\n plt.scatter(self.x,self.y,5,'g')\r\n plt.plot(x_test,y_pred,'r')\r\n plt.legend(['predicted line','observed data'])\r\n plt.show()\r\n \r\n def showstats(self):\r\n model = sm.OLS(self.x, sm.add_constant(self.y)).fit()\r\n print(model.summary())\r\n \r\n \r\n\r\nclass tests:\r\n def __init__(self,x):\r\n self.x = x \r\n \r\n def adfullertest(self):\r\n result = adfuller(self.x)\r\n print (self.x.describe())\r\n print('')\r\n print('ADF Statistic: %f' % result[0])\r\n print('p-value: %f' % result[1])\r\n print('Critical Values:')\r\n for key, value in result[4].items():\r\n print('\\t%s: %.3f' % (key, value))\r\n \r\n \r\nclass Anotherstrategy():\r\n def __init__(self, dfneeded):\r\n self.dfneeded= dfneeded\r\n \r\n def stra2(self):\r\n #Creating an empty DataFrame using the index of Bitcoin DataFrame, we call it BTC\r\n btc = pd.DataFrame(index=self.dfneeded.index)\r\n #Creating a column in the empty DataFrame \r\n btc['price'] = pd.to_numeric(self.dfneeded['close'])\r\n #Indexing the DataFrame with clean dates (cleaned from the hours)\r\n btc.index = btc.index.date\r\n #\r\n btc['daily_difference'] = btc['price'].diff()\r\n #Creating signal of the strategy, we can see when we have to go long and when short\r\n btc['signal'] = np.where(btc['daily_difference']>0,1.0,0.0)\r\n btc['positions']= btc['signal'].diff()\r\n #Show the chart\r\n btc \r\n #Creating the signal in the chart\r\n fig = plt.figure()\r\n ax1= fig.add_subplot(111,ylabel = \"Bitcoin\")\r\n btc['price'].plot(ax = ax1,color = 'b',lw = 2)\r\n ax1.plot(btc.loc[btc.positions==1.0].index,btc.price[btc.positions==1.0], '^', markersize=7 , color ='g')\r\n ax1.plot(btc.loc[btc.positions==-1.0].index,btc.price[btc.positions==-1.0], 'v', markersize=7, color ='r')\r\n #Here we have to see the profit of the strategy\r\n initial_capital = float(0)\r\n positions = pd.DataFrame(index=self.dfneeded.index.date).fillna(0.0)\r\n portfolio = pd.DataFrame(index=self.dfneeded.index.date).fillna(0.0)\r\n positions['Bitcoin'] = btc['signal']\r\n portfolio['positions'] = (positions.multiply(btc['price'],axis=0))\r\n portfolio ['cash'] = initial_capital - (positions.diff().multiply(btc['price'],axis=0)).cumsum()\r\n portfolio['total']= portfolio['positions']+portfolio ['cash']\r\n #Show the chart\r\n #plt.plot(btc['price'],label = 'Price')\r\n plt.plot(portfolio['total'], color='orange') \r\n plt.legend(['Price','Entry','Exit','Strategy'])\r\n # Show the graph\r\n plt.show()\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"Anthony-Antona/Trading-strategies-with-Python","sub_path":"pyproj_class.py","file_name":"pyproj_class.py","file_ext":"py","file_size_in_byte":9027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33014871927","text":"from fastapi import FastAPI, HTTPException\r\nimport mysql.connector\r\nfrom datetime import date\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\napp = FastAPI()\r\n\r\n@app.get(\"/\")\r\nasync def root():\r\n return {\"message\": \"Hello World\"}\r\n \r\n@app.get(\"/users\")\r\nasync def get_users():\r\n cnx = get_db_connection()\r\n cursor = cnx.cursor()\r\n query = \"SELECT * FROM users\"\r\n cursor.execute(query)\r\n rows = cursor.fetchall()\r\n users = [{\"id\": row[0], \"name\": row[1], \"email\": row[2]} for row in rows]\r\n cnx.close()\r\n return users\r\n\r\n@app.get(\"/users/{user_id}\")\r\nasync def get_user(user_id: int):\r\n cnx = get_db_connection()\r\n cursor = cnx.cursor()\r\n query = \"SELECT * FROM users WHERE id = %s\"\r\n cursor.execute(query, (user_id,))\r\n row = cursor.fetchone()\r\n if row is None:\r\n raise HTTPException(status_code=404, detail=\"User not found\")\r\n user = {\"id\": row[0], \"name\": row[1], \"email\": row[2]}\r\n cnx.close()\r\n return user\r\n\r\n@app.post(\"/users\")\r\nasync def create_user(name: str, email: str):\r\n cnx = get_db_connection()\r\n cursor = cnx.cursor()\r\n query = \"INSERT INTO users (name, email) VALUES (%s, %s)\"\r\n cursor.execute(query, (name, email))\r\n cnx.commit()\r\n user_id = cursor.lastrowid\r\n cnx.close()\r\n return {\"id\": user_id, \"name\": name, \"email\": email}\r\n\r\n@app.put(\"/users/{user_id}\")\r\nasync def update_user(user_id: int, name: str, email: str):\r\n cnx = get_db_connection()\r\n cursor = cnx.cursor()\r\n query = \"UPDATE users SET name = %s, email = %s WHERE id = %s\"\r\n cursor.execute(query, (name, email, user_id))\r\n cnx.commit()\r\n cnx.close()\r\n return {\"id\": user_id, \"name\": name, \"email\": email}\r\n\r\n@app.delete(\"/users/{user_id}\")\r\nasync def delete_user(user_id: int):\r\n cnx = get_db_connection()\r\n cursor = cnx.cursor()\r\n query = \"DELETE FROM users WHERE id = %s\"\r\n cursor.execute(query, (user_id,))\r\n cnx.commit()\r\n cnx.close()\r\n return {\"message\": \"User deleted\"}","repo_name":"arav-aravind/docker","sub_path":"docker asses/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32532705572","text":"def vowels_count(s):\n \"\"\"Write a function smallLetterCount which takes a string representing\n a word as input and returns the number of vowels in the string.\n Vowels in this case are 'a', 'e', 'i', 'o', 'u'. Here, 'y' is also a\n vowel, but only when it is at the end of the given word.\n\n Example:\n >>> vowels_count(\"abcde\")\n 2\n \n Example solution:\n # line 1\n vowels = \"aeiouAEIOU\"\n # line 2\n n_vowels = sum(c in vowels for c in s)\n # line 3\n if s[-1] == 'y':\n # line 4\n n_vowels += 1\n # line 5\n return n_vowels\n \n \"\"\"\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"3\")\n # END OF SOLUTION\n\ndef check(candidate):\n\n import io\n from contextlib import redirect_stdout\n\n f = io.StringIO()\n with redirect_stdout(f):\n candidate('')\n out = f.getvalue().strip('\\n')\n\n assert \"3\" == out\n for i in range(0, 10):\n if i != 3:\n assert str(i) != out\n\nif __name__ == '__main__':\n check(vowels_count)\n","repo_name":"openai/code-align-evals-data","sub_path":"alignment/find_bug/vowels_count.py","file_name":"vowels_count.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"74446791554","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom cgi import parse_qs, escape\r\nimport json\r\n#import sys, os\r\nimport subprocess\r\nimport time\r\n#import pdfFabric\r\n\r\nhtml = \"\"\"\r\n%(layers)s\r\n%(orientation)s\r\n%(format)s\r\n%(geojson)s\r\n\r\n\"\"\"\r\n\r\n\r\n\r\ndef application(environ, start_response):\r\n \r\n # the environment variable CONTENT_LENGTH may be empty or missing\r\n try:\r\n request_body_size = int(environ.get('CONTENT_LENGTH', 0))\r\n except (ValueError):\r\n request_body_size = 0\r\n \r\n # Recupération des données passées via la méthode POST\r\n # ####################################################\r\n # When the method is POST the variable will be sent\r\n # in the HTTP request body which is passed by the WSGI server\r\n # in the file like wsgi.input environment variable.\r\n request_body = environ['wsgi.input'].read(request_body_size)\r\n d = parse_qs(request_body)\r\n \r\n format = d.get('format', [''])[0] # Returns the first geojson.\r\n orientation = d.get('orientation', [''])[0] # Returns the first geojson.\r\n layers = d.get('layers', [''])[0] # Returns the first geojson.\r\n geojson = d.get('geojson', [''])[0] # Returns the first geojson\r\n \r\n # Always escape user input to avoid script injection\r\n format = escape(format)\r\n orientation = escape(orientation)\r\n layers = escape(layers)\r\n geojson = escape(geojson)\r\n \r\n # Enregistrement du geojson sur le disque\r\n # #######################################\r\n file=open('c:/osgeo4w/apache/htdocs/python/geojson.geojson', 'w')\r\n file.write(geojson)\r\n file.close()\r\n \r\n # Lancement de l' utilitaire pdfFabric.py\r\n # #######################################\r\n #subprocess.call(\"start python pdfFabric.py\")\r\n path = '../QGisEnCoulisse/pdf/plan.pdf'\r\n output = subprocess.check_output(['dir'], shell=True)\r\n output = subprocess.check_output(['dir', '*.*'], shell=True)\r\n output = subprocess.check_output(['python', '--version'], shell=True)\r\n \r\n output = subprocess.call(['python', 'pdfFabric.py', '-f', 'format', '-o', 'orientation', '-l', 'layers'], shell=True)\r\n file=open('c:/osgeo4w/apache/htdocs/python/do_one.py', 'w')\r\n file.write(\"import subprocess\\n\")\r\n file.write(\"subprocess.call([\" + \r\n '\"' + \"python\" + '\", ' +\r\n '\"' + \"pdfFabric.py\" + '\", ' +\r\n '\"' + \"-f\" + '\", ' +\r\n '\"' + format + '\", ' +\r\n '\"' + \"-o\" + '\", ' +\r\n '\"' + orientation + '\", ' +\r\n '\"' + \"-l\" + '\", ' +\r\n '\"' + layers + '\", ' +\r\n \"], shell= True)\\n\")\r\n file.close()\r\n #os.system(\"dir\")\r\n #os.system('python --version')\r\n #os.system('dir')\r\n #print output\r\n \r\n \r\n # renvoi la reponse au client\r\n response_body = html % { # Fill the above html template in\r\n 'format': format or 'Empty',\r\n 'orientation': orientation or 'Empty',\r\n 'layers': layers or 'Empty',\r\n 'geojson': geojson or 'Empty'\r\n }\r\n #response_body = 'Contenu du post :' + ' layers = ' + str(layers) + ' ; orientation = ' + str(orientation) + ' ; format = ' + str(format)\r\n \r\n status = '200 OK'\r\n response_body=json.dumps({'pdfurl':'http://qgis/QGisEnCoulisse/pdf/plan.pdf'})\r\n response_headers = [('Content-type', 'application/json'),('Content-Length', str(len(response_body)))]\r\n #response_headers = [('Content-type', 'application/pdf')]\r\n start_response(status, response_headers)\r\n #fin = open('centre_ville.pdf', \"rb\")\r\n #return fin.read()\r\n return [response_body]\r\n\r\n\r\n#if __name__ == '__main__':\r\n# application('', '')","repo_name":"fgarel/report","sub_path":"source/QGisEnCoulisse/htdocs/python/getMap.py","file_name":"getMap.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21320312132","text":"import argparse\nimport classes\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filepath\", help=\"Filepath to PIN log file.\")\nparser.add_argument(\"--window_size\", type=int, help=\"For a sliding window anaylsis, the number of accesses to include in the window.\")\nparser.add_argument(\"--increment_size\", type=int, help=\"For a sliding window analysis, the number of accesses to increment between window analyses.\")\nargs = parser.parse_args()\n\n# set up windowing\nif args.window_size is not None or args.increment_size is not None:\n if args.window_size is None or args.increment_size is None:\n print('Window and increment size must be set.')\n exit()\n window_size = args.window_size\n increment_size = args.increment_size\n window = []\n\ncache_metadata = [ (1, 8, 256), (2, 16, 512) ]\n\ndef memaccess(ip, is_read, addr, size, mem_at, line):\n # why the hell do we need this\n if is_read:\n cache.read(addr, size)\n else:\n cache.write(addr, size)\n\nwith open(args.filepath) as f:\n cache = classes.Memory(cache_metadata)\n line = f.readline()\n linenum = 0\n while line:\n # skip comment lines, don't increment linenum\n if \"#\" in line:\n line = f.readline()\n continue\n\n # parse\n comps = line.split()\n ip = int(comps[0][:-1], 16)\n is_read = comps[1] == \"R\"\n addr = int(comps[2], 16)\n size = int(comps[3])\n mem_at = int(comps[4], 16)\n\n # check if windowing\n if window is not None:\n if len(window) < window_size:\n # filling the current window\n window.append((ip, is_read, addr, size, mem_at, linenum))\n memaccess(ip, is_read, addr, size, mem_at, linenum)\n else:\n # window size met, time to reset and slide\n print('finishing window', linenum - window_size, linenum - 1)\n cache.printMetrics()\n cache = classes.Memory(cache_metadata)\n\n # slice off first increment_size elements\n window = window[increment_size:]\n window.append((ip, is_read, addr, size, mem_at, linenum))\n\n # reprocess remaining elements\n for access in window:\n memaccess(access[0], access[1], access[2], access[3], access[4], access[5])\n else:\n memaccess(ip, is_read, addr, size, mem_at, linenum)\n\n line = f.readline()\n linenum += 1\n\n# make sure to always print final window size, should always be at least 1\nprint('final window', linenum - window_size, len(window))\ncache.printMetrics()\n","repo_name":"yasv123/ArchHype","sub_path":"sim/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40017843079","text":"#set1_7\nfrom Crypto.Cipher import AES\nimport base64\n\ndef AES_in_ECB(ciphertext,key) :\n keyword = AES.new(key,AES.MODE_ECB)\n cipher = keyword.decrypt(ciphertext)\n return cipher\n\nif __name__ == '__main__' :\n \n key = b'YELLOW SUBMARINE'\n with open('cryptopals_set1_7.txt') as of :\n ciphertext = base64.b64decode(of.read())\n print(ciphertext)\n cipher = AES_in_ECB(ciphertext,key) \n print(cipher)\n\n","repo_name":"Yu4nz1/cryptopals","sub_path":"Set1/cryptopals_set1_7.py","file_name":"cryptopals_set1_7.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18506394591","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom itertools import chain\n\nfrom sympy import *\nfrom sympy.polys.groebnertools import *\nfrom sympy.polys.orderings import monomial_key\n\nimport numpy as np\nfrom numba import cuda, jit, vectorize, guvectorize\n\n\ndef cuda_s_poly(cp, ring):\n \"\"\"\n Execute as a script to test.\n Called slightly differently from s_poly,\n must include ring.\n\n Prepare the data for the s-polynomial\n\n Create numpy arrays to send to gpu\n f, g, and dest arrays must all be the same length,\n\n figuring out the exact required output dimensions\n of the spoly procedure is exactly the F4 symbolic\n preprocessing step, and I don't know of another\n way to do it. Only the subtraction step of spoly\n is carried out on the GPU because of this, but\n it provides a micro demonstration of an F4 style\n matrix reduction. \n\n\n Input: cp : a critical pair\n ring: for ordering, modulus\n \"\"\"\n # Left and right of critical pair\n Ld = [(cp[0], cp[1], cp[2]), (cp[0], cp[4], cp[5])]\n\n # Get Length/monomials of destination array\n spair_info = symbolic_preprocessing(Ld, B, ring)\n\n gpu_spoly = spoly_numba_io(spair_info, ring)\n \n return gpu_spoly\n\n\ndef cuda_s_poly2(cp, ring):\n \"\"\"\n Another version of s_poly that\n just calculates each step in separate kernels.\n and reindexes the monomials on the host\n in between. May be improved by use of\n a cuda stream in CUDA-C or PyCUDA\n \"\"\"\n order = ring.order\n modulus = ring.domain.mod\n nvars = len(ring.symbols)\n\n # Multiply step\n f = cp[2]\n g = cp[5]\n\n um = np.array(flatten(cp[1]), dtype=np.uint16)\n vm = np.array(flatten(cp[4]), dtype=np.uint16)\n\n fsm = np.array([Sign(f)[0]] + Polyn(f).monoms(), dtype=np.uint16)\n gsm = np.array([Sign(g)[0]] + Polyn(f).monoms(), dtype=np.uint16)\n \n fc = np.array(Polyn(f).coeffs(), dtype=np.uint16)\n gc = np.array(Polyn(g).coeffs(), dtype=np.uint16)\n\n fsm_dest = np.zeros_like(fsm)\n gsm_dest = np.zeros_like(gsm)\n fc_dest = np.zeros_like(fc)\n gc_dest = np.zeros_like(gc)\n\n # launch kernel\n spoly_mul_numba_kernel(fsm_dest, gsm_dest, fc_dest, gc_dest,\n fsm, gsm, fc, gc, um, vm, nvars, modulus)\n\n # Sub Step\n # Get all monomials in both umf, vmg, sort by ordering, reindex\n # f, g in a 2d coefficient array, send to other kernel\n fnew_monoms = [tuple(f) for f in fsm_dest]\n gnew_monoms = [tuple(g) for g in gsm_dest]\n fnew_sig = fnew_monoms[0]\n gnew_sig = gnew_sig[0]\n\n all_monoms = set(fnew_monoms).union(set(gnew_monoms))\n all_monoms = sorted(all_monoms, key=monomial_key(order=ring.order), reverse=True)\n\n \n \n \n return None\n\ndef spoly_numba_io(spair_info, ring):\n \"\"\"\n Prepare the mini macaulay matrix for the numba kernel\n Called after symbolic_preprocessing only.\n \"\"\"\n modulus = ring.domain.mod\n \n cols = spair_info[\"cols\"]\n rows = spair_info[\"rows\"]\n \n spair_matrix = np.zeros((rows, cols), dtype=np.uint16)\n dest = np.zeros(cols, dtype=np.uint16)\n\n # fill at coordinates with nonzero entries\n for coords in spair_info[\"nze\"]:\n spair_matrix[coords[0][0], coords[0][1]] = coords[1]\n\n spoly_sub_numba_kernel(dest, spair_matrix, modulus)\n\n # parse\n lb_spoly = parse_gpu_spoly(dest, spair_info, ring)\n \n return lb_spoly\n\ndef spoly_sub_numba_kernel(dest, spair, modulus):\n \"\"\"\n Basically Micro F4 partial reduction\n\n Subtracts f from g and stores in dest\n spair is a 2-row macaulay matrix of \n coefficients in f and g in given monomial ordering.\n \n Likely grossly inefficient compared to CPU due\n to memory access times, but parallel. Demonstrates\n part of the process of F4 reduction.\n \"\"\"\n for i in range(dest.size):\n dest[i] = ((spair[0][i] % modulus) - (spair[1][i] % modulus)) % modulus\n\n\ndef spoly_mul_numba_kernel(fsm_dest, gsm_dest, fc_dest, gc_dest,\n fsm, gsm, fc, gc, um, vm, nvars, modulus):\n \"\"\"\n Numba lbp_mul kernel for cuda_s_poly2. \n Stage one of Spoly, \n fsm_dest, gsm_dest must be made a set, sorted, \n and fc, gc reindexed into a 2d array for \n sub step kernel\n \"\"\"\n # multiply um by fsm, vm by gsm\n frows = fsm.shape[0]\n for j in range(frows):\n for i in range(nvars):\n fsm_dest[j, i] = ((um[i] % modulus) + (fsm[j, i] % modulus)) % modulus\n\n grows = gsm.shape[0]\n for j in range(grows):\n for i in range(nvars):\n gsm_dest[j, i] = ((vm[i] % modulus) + (gsm[j, i] % modulus)) % modulus\n\n # multiply coefficients\n for i in range(fc_dest.size):\n fc_dest[i] = ((um[-1] % modulus) * (fc[i] % modulus)) % modulus\n\n for i in range(gc_dest.size):\n gc_dest[i] = ((vm[-1] % modulus) * (gc[i] % modulus)) % modulus\n \n # Done?\n\n \ndef symbolic_preprocessing(Ld, B, ring):\n \"\"\"\n Mini Symbolic Preprocessing for Single S-Polynomial\n \n Input: Ld : two 3-tuples(sig, um, f), (sig, vm, g)\n B : intermediate basis\n ring : for domain, order stuff\n \n Out: Information needed to construct a macaulay matrix.\n \"\"\"\n order = ring.order\n domain = ring.domain\n\n Fi = set([lbp_mul_term(sc[2], sc[1]) for sc in Ld])\n Done = set([Polyn(f).LM for f in Fi])\n M = [Polyn(f).monoms() for f in Fi]\n M = set([i for i in chain(*M)]).difference(Done)\n while M != Done:\n MF = M.difference(Done)\n if MF != set():\n m = MF.pop()\n Done.add(m)\n for g in B:\n if monomial_divides(Polyn(g).LM, m):\n u = term_div((m, domain.one), Polyn(g).LT, domain)\n ug = (lbp_mul_term(g, u))\n #Fi.add(ug) # This is an add reducer step from F4\n for m in Polyn(ug).monoms():\n M.add(m)\n else:\n break\n\n # Fi sorted by sig_key, normalized, labeled, Done by monomial order\n Fi = sorted(Fi, key=lambda f: sig_key(f[0], ring.order), reverse=True)\n print(\"---------SORTED Fi----------\")\n for i, f in enumerate(Fi):\n print(i, f)\n print(\"---------------------------\")\n Fi = [lbp(Sign(f), Polyn(f).monic(), Num(f)) for f in Fi]\n Done = sorted(Done, key=monomial_key(order=ring.order), reverse = True)\n\n # pseudo COO sparse format\n nonzero_entries = []\n for i, f in enumerate(Fi):\n for t in Polyn(f).terms():\n nonzero_entries.append(((i, Done.index(t[0])), t[1]))\n\n spair_info = dict()\n spair_info[\"cols\"] = len(Done)\n spair_info[\"rows\"] = len(Fi)\n spair_info[\"nze\"] = nonzero_entries\n spair_info[\"monomials\"] = Done\n spair_info[\"spair\"] = Fi\n \n print(\"S-Pair Info\")\n for(k, v) in spair_info.items():\n print(str(k) + \": \" + str(v))\n \n return spair_info\n\n\ndef parse_gpu_spoly(dest, spair_info, ring):\n \"\"\"\n Return GPU spoly to sympy labeled polynomial\n\n Input: dest : the destination array from kernel\n spair_info: from symbolic_preprocessing\n ring : ordering, domain, etc.\n\n Output: sympy lbp 3 tuple (sig, poly, num)\n \"\"\"\n spoly_sig = spair_info[\"spair\"][0][0]\n spoly_num = spair_info[\"spair\"][0][2]\n\n pexp = []\n for i, c in enumerate(dest):\n if c != 0:\n pexp.append('+' + str(c))\n for j, e in enumerate(spair_info[\"monomials\"][i]):\n if e != 0:\n pexp.append('*' + str(r.symbols[j]) + '**' + str(e))\n spol = ring.from_expr(''.join(pexp))\n lb_spol = tuple([spoly_sig, spol, spoly_num])\n return lb_spol\n\n\ndef parse_gpu_spoly2():\n pass\n\n\nif __name__ == \"__main__\":\n print(\"CUDA Spoly Test\")\n\n r, a, b, c, d, e = ring(symbols='a, b, c, d, e', domain=GF(65521), order='grevlex')\n \"\"\"\n print(\"Cyclic Affine 4\")\n f1 = a + b + c + d\n f2 = a*b + b*c + a*d + c*d\n f3 = a*b*c + a*b*d + a*c*d + b*c*d\n f4 = a*b*c*d - 1\n \"\"\"\n\n print(\"Cyclic Homogeneous 4\")\n f1 = a + b + c + d\n f2 = a*b + b*c + a*c + c*d\n f3 = a*b*c + a*b*d + a*c*d + b*c*d\n f4 = a*b*c*d + e**4\n \n F = [f1, f2, f3, f4]\n\n order = r.order\n\n B = [lbp(sig(r.zero_monom, i), f, i) for i, f in enumerate(F)]\n B = sorted(B, key=lambda g: order(Polyn(g).LM), reverse=True)\n\n CP = [critical_pair(B[i], B[j], r)\n for i in range(len(B)) for j in range(i + 1, len(B))]\n CP = sorted(CP, key=lambda cp: cp_key(cp, r), reverse=True)\n\n S = [cuda_s_poly(CP[i], r) for i in range(len(CP))]\n S_orig = [s_poly(CP[i]) for i in range(len(CP))]\n\n print(\"Output of original s_poly\")\n for i, orig_s in enumerate(S_orig):\n print(i)\n for j, c in enumerate(orig_s):\n print(j, c)\n \n print(\"Output of cuda_s_poly\")\n for i, s in enumerate(S):\n print(i)\n for j, c in enumerate(s):\n print(j, c)\n\n assert(set(S) == set(S_orig))\n","repo_name":"Jbowman353/parallel-groebner","sub_path":"cuda_spol.py","file_name":"cuda_spol.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"8459793413","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nimport math\n\n\ndef load_raw_data():\n train = pd.read_csv('../data/train.csv')\n test = pd.read_csv('../data/test.csv')\n test['SalePrice'] = np.nan\n train['data_type'] = 'train'\n test['data_type'] = 'test'\n data = pd.concat([train, test])\n\n # data['logSalePrice'] = data['SalePrice'].apply(math.log)\n # data = data.drop('SalePrice', axis=1)\n # data['sale_date'] = data['YrSold'].map(str) + '-' + data['MoSold'].map(str)\n\n return data\n\n\ndef data_pre_processing(data):\n one_hot_fea = [\n 'MSSubClass', 'MSZoning', 'Street', 'Alley', 'LandContour', 'Utilities',\n 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2',\n 'BldgType', 'HouseStyle', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st',\n 'Exterior2nd', 'MasVnrType', 'MasVnrArea', 'Foundation', 'Heating',\n 'CentralAir', 'Electrical', 'Functional','GarageType', 'GarageFinish',\n 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition', 'sale_date']\n\n other_fea = [\n 'LotShape', 'YearBuilt', 'ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond',\n 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'HeatingQC', 'KitchenQual',\n 'FireplaceQu', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'YrSold', 'MoSold']\n\n drop_list = one_hot_fea + other_fea\n\n for fea in one_hot_fea:\n data_dummy = pd.get_dummies(data[fea], prefix=fea)\n data = pd.concat((data, data_dummy), axis=1)\n\n data['LotShape_ordinal'] = data['LotShape'].map({'Reg': 0, 'IR1': 1, 'IR2': 2, 'IR3': 3})\n data['ExterQual_ordinal'] = data['ExterQual'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1})\n data['ExterCond_ordinal'] = data['ExterCond'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1})\n data['BsmtQual_ordinal'] = data['BsmtQual'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NA': 0})\n data['BsmtCond_ordinal'] = data['BsmtCond'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NA': 0})\n data['BsmtExposure_ordinal'] = data['BsmtExposure'].map({'Gd': 5, 'Av': 4, 'Mn': 3, 'No': 2, 'NA': 1})\n data['BsmtFinType1_ordinal'] = data['BsmtFinType1'].map({'GLQ': 5, 'ALQ': 4, 'BLQ': 3, 'Rec': 2, 'LwQ': 1, 'Unf': 0, 'NA': -1})\n data['BsmtFinType2_ordinal'] = data['BsmtFinType2'].map({'GLQ': 5, 'ALQ': 4, 'BLQ': 3, 'Rec': 2, 'LwQ': 1, 'Unf': 0, 'NA': -1})\n data['HeatingQC_ordinal'] = data['HeatingQC'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1})\n data['KitchenQual_ordinal'] = data['KitchenQual'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1})\n data['FireplaceQu_ordinal'] = data['FireplaceQu'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NA': 0})\n data['GarageQual_ordinal'] = data['GarageQual'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NA': 0})\n data['GarageCond_ordinal'] = data['GarageCond'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NA': 0})\n data['PavedDrive_ordinal'] = data['PavedDrive'].map({'Y': 2, 'P': 1, 'N': 0})\n data['PoolQC_ordinal'] = data['PoolQC'].map({'EX': 5, 'Gd': 4, 'TA': 3, 'Fa': 2, 'Po': 1, 'NA': 0})\n data['age'] = 2018 - data['YearBuilt']\n data['Garage_age'] = 2018 - data['GarageYrBlt']\n\n data = data.drop(drop_list, axis=1)\n\n y_train = data.loc[data['data_type'] == 'train']['logSalePrice']\n x_train = data.loc[data['data_type'] == 'train']\n x_train = x_train.drop(['Id', 'logSalePrice', 'data_type'], axis=1)\n x_test = data.loc[data['data_type'] == 'test'].drop(['Id', 'logSalePrice', 'data_type'], axis=1)\n\n dtrain = xgb.DMatrix(x_train, y_train)\n dtest = xgb.DMatrix(x_test)\n\n return dtrain, dtest\n","repo_name":"andrewand/kaggle_house_pricing","sub_path":"model/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70424725954","text":"import matplotlib as mpl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmpl.rcParams['text.usetex'] = True\nax = plt.gca()\n\n# ------------------------------------------------------------------------------------------------\n\nC_ls = np.loadtxt(\"hi_res_C_ls.dat\")\n\nC_lm = np.loadtxt(\"c_ls_low_m.dat\")\nC_lm2 = np.loadtxt(\"c_ls_hi_m.dat\")\n\nC_lb = np.loadtxt(\"c_ls_low_b.dat\")\nC_lb2 = np.loadtxt(\"c_ls_hi_b.dat\")\n\nC_lh = np.loadtxt(\"c_ls_low_h.dat\")\nC_lh2 = np.loadtxt(\"c_ls_hi_h.dat\")\n\nC_lr = np.loadtxt(\"c_ls_low_r.dat\")\nC_lr2 = np.loadtxt(\"c_ls_hi_r.dat\")\n\nloL = np.loadtxt(\"COM_PowerSpect_CMB-TT-loL-full_R2.02.txt\",skiprows=3)\nhiL = np.loadtxt(\"COM_PowerSpect_CMB-TT-hiL-full_R2.02.txt\",skiprows=3)\n\nloLl = loL[:,0]\nloLCl = loL[:,1]\nyerlolo= loL[:,2]\nyerhilo= loL[:,3]\nhiLl = hiL[:,0]\nhiLCl = hiL[:,1]\nyerrhi = hiL[:,2]\n\nl = C_ls[6:,0]\nC_l = C_ls[6:,1]\nClm = C_lm[6:,1]\nClm2 = C_lm2[6:,1]\nClb = C_lb[6:,1]\nClb2 = C_lb2[6:,1]\nClh = C_lh[6:,1]\nClh2 = C_lh2[6:,1]\nClr = C_lr[6:,1]\nClr2 = C_lr2[6:,1]\n\ny = 5775/max(C_l)\ny1 = 5775/max(Clm)\ny2 = 5775/max(Clm2)\ny3 = 5775/max(Clb)\ny4 = 5775/max(Clb2)\ny5 = 5775/max(Clh)\ny6 = 5775/max(Clh2)\ny7 = 5775/max(Clr)\ny8 = 5775/max(Clr2)\n\nplt.errorbar(loLl,loLCl,yerr=[yerlolo,yerhilo],color='grey',label='Planck')\nplt.errorbar(hiLl,hiLCl,yerr=yerrhi,color='grey')\nplt.plot(l,y*C_l,label=r'$\\Omega_m = 0.224$')\nplt.plot(l,y1*Clm,label=r'$\\Omega_m = 0.204$')\nplt.plot(l,y2*Clm2,label=r'$\\Omega_m = 0.244$')\nplt.legend(loc=\"best\")\nplt.title('Angular Power Spectrum')\nplt.xlabel(r'$l$')\nplt.ylabel(r'$l(l+1)C_l/2\\pi$')\nplt.xlim(0,1200)\nplt.show()\n\nplt.errorbar(loLl,loLCl,yerr=[yerlolo,yerhilo],color='grey')\nplt.errorbar(hiLl,hiLCl,yerr=yerrhi,color='grey')\nplt.plot(l,y*C_l,label=r'$\\Omega_b = 0.046$')\nplt.plot(l,y3*Clb,label=r'$\\Omega_b = 0.042$')\nplt.plot(l,y4*Clb2,label=r'$\\Omega_b = 0.050$')\nplt.legend(loc=\"best\")\nplt.title('Angular Power Spectrum')\nplt.xlabel(r'$l$')\nplt.ylabel(r'$l(l+1)C_l/2\\pi$')\nplt.xlim(0,1200)\nplt.show()\n\nplt.errorbar(loLl,loLCl,yerr=[yerlolo,yerhilo],color='grey')\nplt.errorbar(hiLl,hiLCl,yerr=yerrhi,color='grey')\nplt.plot(l,y*C_l,label=r'$h = 0.7$')\nplt.plot(l,y5*Clh,label=r'$h = 0.65$')\nplt.plot(l,y6*Clh2,label=r'$h = 0.75$')\nplt.legend(loc=\"best\")\nplt.title('Angular Power Spectrum')\nplt.xlabel(r'$l$')\nplt.ylabel(r'$l(l+1)C_l/2\\pi$')\nplt.xlim(0,1200)\nplt.show()\n\nplt.errorbar(loLl,loLCl,yerr=[yerlolo,yerhilo],color='grey')\nplt.errorbar(hiLl,hiLCl,yerr=yerrhi,color='grey')\nplt.plot(l,y*C_l,label=r'$\\Omega_r = 8.3e-5$')\nplt.plot(l,y7*Clr,label=r'$\\Omega_r = 7.5e-5$')\nplt.plot(l,y8*Clr2,label=r'$\\Omega_r = 9.0e-5$')\nplt.legend(loc=\"best\")\nplt.title('Angular Power Spectrum')\nplt.xlabel(r'$l$')\nplt.ylabel(r'$l(l+1)C_l/2\\pi$')\nplt.xlim(0,1200)\nplt.show()","repo_name":"hermda02/CosmologyII","sub_path":"Project/Milestone4/simulations/cl_plot.py","file_name":"cl_plot.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13044890349","text":"from glob import glob\nfrom os import makedirs\nfrom argparse import ArgumentParser\n\nimport numpy as np\n\ndef main(input_directory: str, output_directory: str, verbose: bool = False):\n iteration = 0\n files = glob(f\"{input_directory}/*\")\n makedirs(output_directory, exist_ok=True)\n\n for filepath in files:\n with open(filepath, 'r') as file:\n # Extract the entire board from the file\n board = np.array([list(line[:-1]) for line in file.readlines()])\n \n if verbose:\n print('\\n'.join(map(''.join, board)))\n \n # Add the outside walls to fill the gaps until the actual stage begins\n for i, first_wall in enumerate(np.argmax(board == '#', 1)):\n board[i, :first_wall] = \"#\"\n \n for i, last_wall in enumerate(np.argmax((board == '#')[:, ::-1], 1)):\n board[i, -(last_wall + 1):] = \"#\"\n \n for i, first_wall in enumerate(np.argmax(board == '#', 0)):\n board[:first_wall, i] = \"#\"\n \n for i, last_wall in enumerate(np.argmax((board == '#')[:, ::-1], 0)):\n board[-(last_wall + 1):, i] = \"#\"\n \n if verbose:\n print('\\n'.join(map(''.join, board)))\n print('-' * 80)\n \n # Extract just the walls\n walls = np.array(np.where(board == '#')).T + 1\n walls = np.ascontiguousarray(walls)\n size = board.shape[::-1]\n\n # Output the target filetype\n with open(f\"{output_directory}/{iteration:04d}.txt\", 'w') as output_file:\n print(\" \".join(map(str, size)), file=output_file)\n print(len(walls), end = ' ', file=output_file)\n print(' '.join(map(str, walls.ravel())), file=output_file)\n \n print(0, file=output_file)\n print(0, file=output_file)\n print(\"0 0\", end=\"\", file=output_file)\n \n iteration += 1\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n\n parser.add_argument(\"input_directory\", type=str, help=\"Location with the map files to parse.\")\n parser.add_argument(\"output_directory\", type=str, help=\"Location to place the output txt files.\")\n parser.add_argument('-v', \"--verbose\", action='store_true', help=\"Print the boards while creating them.\")\n\n main(**parser.parse_args().__dict__)","repo_name":"sepy97/sokoban","sub_path":"extract_walls.py","file_name":"extract_walls.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7829744181","text":"from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import XMLConverter, HTMLConverter, TextConverter\nfrom pdfminer.layout import LAParams\nimport formatValue\nimport io\n\nimport re\n\n\ndef converter(page):\n rsrc_mgr = PDFResourceManager()\n ret_str = io.StringIO()\n codec = 'utf-8'\n la_params = LAParams()\n device = TextConverter(rsrc_mgr, ret_str, codec=codec, laparams=la_params)\n interpreter = PDFPageInterpreter(rsrc_mgr, device)\n\n interpreter.process_page(page)\n data = ret_str.getvalue()\n data = data.replace('\\n\\n of 58', ' ') \\\n .replace('\\n\\nCDP\\n\\nPage \\n\\n', ' ') \\\n .replace('\\n\\nPage \\n', ' ') \\\n .replace('\\n\\nCDP', ' ')\n return data\n\n\ndef groupData(data):\n group = {}\n pattern = \"W\\d\\.\\d|W\\d\\.\\d\\w|W...\\d\\.\\d\\w\"\n group_key = (re.findall(pattern, data))\n group_key = unique(group_key)\n split = data.split(\"\\n\\n\")\n for key in group_key:\n group_by_key = {}\n txt, value, split = text_value(key, split, group_key)\n group_by_key['text'] = txt\n txt.replace(txt, '')\n group_by_key['value'] = formatValue.formatValue(value, data)\n group[key] = group_by_key\n return group\n\n\ndef text_value(key, split, group_key):\n txt = \"\"\n value = \"\"\n for s in split:\n checked_value = ''\n if \"(\" + key in s:\n txt += s\n split.remove(s)\n else:\n for k in group_key:\n if \"(\" + k in value:\n checked_value = ''\n break\n else:\n checked_value = s\n value = value + '. ' + checked_value\n if checked_value != '':\n split.remove(checked_value)\n return txt, value, split\n\n\ndef unique(list1):\n unique_list = []\n for x in list1:\n if x not in unique_list:\n unique_list.append(x)\n return unique_list\n","repo_name":"Aminul-Hoq/pdfToJson","sub_path":"textToDictionary.py","file_name":"textToDictionary.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27318675884","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport skimage\nimport skimage.io\nimport skimage.transform\nimport numpy as np\nimport time\nimport math\nimport copy\nfrom dataloader import myloader15 as ls\nfrom dataloader import myloader as DA\nfrom matplotlib import pyplot as plt\nimport cv2\nfrom err_calculation import *\nfrom models import *\nfrom visualization import *\n\nparser = argparse.ArgumentParser(description='TANet')\nparser.add_argument('--maxdisp', type=int, default=192,\n help='max disp')\nparser.add_argument('--datapath', default='',\n help='datapath')\nparser.add_argument('--loadmodel', default='',\n help='load model')\nparser.add_argument('--error_vis', default='',\n help='save error visualization')\nparser.add_argument('--pred_disp', default='',\n help='save pred_disp')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nall_left_img, all_right_img, all_disp_pre_train, all_left_disp, test_left_img, test_right_img, test_disp_pre, test_disp = ls.dataloader(args.datapath)\n\nTrainImgLoader = torch.utils.data.DataLoader(\n DA.myImageFloder(all_left_img, all_right_img, all_disp_pre_train, all_left_disp, True),\n batch_size=2, shuffle=True, num_workers=2, drop_last=False)\n\nTestImgLoader = torch.utils.data.DataLoader(\n DA.myImageFloder(test_left_img, test_right_img, test_disp_pre, test_disp, False),\n batch_size=1, shuffle=False, num_workers=8, drop_last=False)\n\nmodel = TANet(args.maxdisp)\n\nif args.cuda:\n model = nn.DataParallel(model)\n model.cuda()\n\nif args.loadmodel is not None:\n state_dict = torch.load(args.loadmodel)\n model.load_state_dict(state_dict['state_dict'])\n\nprint('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))\n\ndef main():\n for batch_idx, (img_L, img_R, disp_pre, disp_L) in enumerate(TestImgLoader):\n if batch_idx == 10:\n model.eval()\n imgL = Variable(torch.FloatTensor(img_L))\n imgR = Variable(torch.FloatTensor(img_R))\n disp_pre = Variable(torch.FloatTensor(disp_pre))\n\n if args.cuda:\n imgL, imgR, disp_pre = imgL.cuda(), imgR.cuda(), disp_pre.cuda()\n\n start_time = time.time()\n with torch.no_grad():\n output = model(imgL, imgR, disp_pre)\n cost_time = time.time() - start_time\n\n pred_disp = output.data.cpu() # torch.Size([1, 1, 368, 1232])\n pred_disp = pred_disp.squeeze(1)\n\n mask = (disp_L > 0)\n mask.detach_()\n epe = EPE_metric(pred_disp, disp_L, mask)\n D1 = D1_metric(pred_disp, disp_L, mask)\n Thres1 = Thres_metric(pred_disp, disp_L, mask, 1.0)\n Thres2 = Thres_metric(pred_disp, disp_L, mask, 2.0)\n Thres3 = Thres_metric(pred_disp, disp_L, mask, 3.0)\n print('time = %.3f, epe = %.3f, D1 = %.3f, T1 = %.3f, T2 = %.3f, T3 = %.3f' % (cost_time, epe, D1*100, Thres1*100, Thres2*100, Thres3*100))\n\n img_left = img_L.squeeze().numpy().transpose([1, 2, 0])\n error_vis = disp_error_image_func.apply(pred_disp, disp_L).squeeze()\n error_vis = error_vis.numpy().transpose([1, 2, 0])\n error_vis = cv2.cvtColor(error_vis, cv2.COLOR_RGB2BGR)\n # cv2.imshow('error', error_vis)\n # cv2.imshow('left', img_left)\n # cv2.waitKey()\n cv2.imwrite(args.error_vis, error_vis*255)\n img = pred_disp.numpy().transpose([1, 2, 0]).astype(np.uint8)\n # cv2.imshow('disp', img)\n # cv2.waitKey()\n cv2.imwrite(args.pred_disp, img)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Y0uchenZ/TANet","sub_path":"test_1_img.py","file_name":"test_1_img.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"12091325633","text":"import pygame\nfrom pygame.locals import *\n\nfrom typing import Tuple, Optional, List, TypeVar\nfrom math import sqrt, fsum\n\n\nT = TypeVar('T')\nVector2 = Tuple[T, T]\nVector2f = Vector2[float]\nVector3 = Tuple[T, T, T]\nVector3f = Vector3[float]\nLineSegment3 = Tuple[Vector3[T], Vector3[T]]\nLineSegment2 = Tuple[Vector2[T], Vector2[T]]\nLineSegment3f = LineSegment3[float]\nLineSegment2f = LineSegment2[float]\n\n\ndef mult_vec(v: Vector3f, m: float) -> Vector3f:\n return (v[0] * m, v[1] * m, v[2] * m)\n\n\ndef sum_vec(l: List[Vector3f]) -> Vector3f:\n return tuple(map(fsum, zip(*l))) # type: ignore\n\n\ndef decompose(v: Vector3f, basis: Tuple[Vector3f, Vector3f]) -> Vector2f:\n a = basis[0]\n b = basis[1]\n\n try:\n n = (v[0] * a[1] - v[1] * a[0])/(b[0] * a[1] - b[1] * a[0])\n m = (v[0] - n * b[0])/(a[0])\n\n return(n, m)\n\n except ZeroDivisionError:\n return (0, 0)\n\n\ndef normalize_vec(v: Vector3f) -> Vector3f:\n M = sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)\n return (v[0]/M, v[1]/M, v[2]/M)\n\n\nclass Line:\n def __init__(self, point: Vector3f, dir_vec: Vector3f) -> None:\n self.point = point\n self.dir_vec = normalize_vec(dir_vec)\n\n def __str__(self) -> str:\n return \"{0!s} + t{1!s}\".format(self.point, self.dir_vec)\n\n def scuttle(self, u: float) -> Vector3f:\n return (self.point[0] + u * self.dir_vec[0], self.point[1] + u * self.dir_vec[1], self.point[2] + u * self.dir_vec[2])\n\n\nclass Plane:\n def __init__(self, normal: Vector3f, intercept: float) -> None:\n self.normal = normal\n self.intercept = intercept\n\n\n def project_vector(self, point: Vector3f, view: Vector3f) -> Vector3f:\n A = self.normal[0]\n B = self.normal[1]\n C = self.normal[2]\n D = self.intercept\n\n x = point[0]\n y = point[1]\n z = point[2]\n\n mx = x - view[0]\n my = y - view[1]\n mz = z - view[2]\n\n t = -(A * x + B * y + C * z + D)/(A * mx + B * my + C * mz)\n\n return (x + t * mx, y + t * my, z + t * mz)\n\n\n def project_through(self, point: Vector3f) -> Vector3f:\n return self.project_vector(point, sum_vec([point, self.normal]))\n\n\n def project_line(self, line: Line) -> Line:\n p1 = self.project_through(line.point)\n p2 = self.project_through(line.scuttle(1))\n\n m = (p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2])\n\n return Line(p1, m)\n\n\n def get_vectors(self, basis: Vector3f) -> Tuple[Vector3f, Vector3f]:\n A = self.normal[0]\n B = self.normal[1]\n C = self.normal[2]\n\n q = normalize_vec(self.project_through(basis))\n\n x = B * q[2] - C * q[1]\n y = C * q[0] - A * q[2]\n z = A * q[1] - B * q[0]\n\n r = normalize_vec((x, y, z))\n\n return (q, r)\n\n\nclass KeyboardEventHandler:\n def __init__(self):\n self.key_event_map = {}\n\n\n def has_handler(self, key) -> bool:\n return key in self.key_event_map\n\n\n def get_handler(self, key):\n handler, kwargs_dict = self.key_event_map[key]\n return (lambda *args, **kwargs: handler(*args, key, **kwargs, **kwargs_dict))\n\n\n def add_handler(self, key, handler, kwargs_dict = {}):\n self.key_event_map[key] = (handler, kwargs_dict)\n\n\n def add_group_handler(self, group, handler, kwargs_dict = {}):\n for key in group:\n self.add_handler(key, handler, kwargs_dict)\n\n\nclass View:\n def __init__(self, point: Vector3f, plane: Plane, basis: Vector3f = (1, 1, 1)) -> None:\n self.point = point\n self.plane = plane\n self.basis = plane.get_vectors(basis)\n\n\n def project_point(self, point: Vector3f) -> Vector2[int]:\n return tuple(map(round, decompose(self.plane.project_vector(point, self.point), self.basis))) # type: ignore\n\n\n def project_points(self, points: List[Vector3f]) -> List[Vector2[int]]:\n return list(map(self.project_point, points))\n\n\n def move_point(self, v: Vector3f) -> None:\n self.point = sum_vec([self.point, v])\n\n\nclass PrimitiveRenderer():\n def __init__(self, screen) -> None:\n self.screen = screen\n\n \n def draw_point(self, point: Vector2[int], color: Tuple[int, int, int] = (0, 0, 0), radius: int = 3) -> None:\n pygame.draw.circle(self.screen, color, point, radius)\n\n\n def draw_line_segment(self, segment: LineSegment2[int], color: Tuple[int, int, int] = (0, 0, 0), radius: int = 3) -> None:\n draw_point(segment[0], color, radius)\n draw_point(segment[1], color, radius)\n pygame.draw.line(self.screen, color, segment[0], segment[1], radius)\n\n\n def draw_polygon(self, polygon: List[LineSegment2[int]], color: Tuple[int, int, int] = (0, 0, 0), radius: int = 3) -> None:\n if not points:\n return\n\n first = points[0]\n for i in range(0, len(points)):\n if i + 1 < len(points):\n self.draw_line_segment((points[i], point[i + 1]), color, radius)\n else:\n self.draw_line_segment((points[i], first), color, radius)\n\n\ndef adjust_view_point(key, view) -> None:\n if key == pygame.K_LEFT:\n view.move_point((-10, 0, 0))\n elif key == pygame.K_RIGHT:\n view.move_point((10, 0, 0))\n elif key == pygame.K_UP:\n view.move_point((0, 10, 0))\n elif key == pygame.K_DOWN:\n view.move_point((0, -10, 0))\n elif key == pygame.K_q:\n view.move_point((0, 0, 10))\n elif key == pygame.K_e:\n view.move_point((0, 0, -10))\n\n\ndef main() -> None:\n pygame.display.init()\n pygame.font.init()\n screen = pygame.display.set_mode((0, 0), pygame.DOUBLEBUF)\n\n color = { \"white\": (255, 255, 255), \"black\": (0, 0, 0) }\n\n font = pygame.font.SysFont(\"arial\", 38)\n\n view = View((0.0, 0.0, 0.0), Plane((2, 2, 1), 0))\n\n p1 = (100.0, 50.0, 1.0)\n p2 = (100.0, 200.0, 1.0)\n p3 = (250.0, 200.0, 1.0)\n p4 = (250.0, 50.0, 1.0)\n\n view_control_keys = [pygame.K_LEFT, pygame.K_RIGHT, pygame.K_UP, pygame.K_DOWN, pygame.K_q, pygame.K_e]\n\n keyboard_event_handler = KeyboardEventHandler()\n keyboard_event_handler.add_group_handler(view_control_keys, adjust_view_point, {\"view\": view})\n\n\n def draw_line_segment(line_segment: LineSegment2) -> None:\n p1, p2 = line_segment\n pygame.draw.line(screen, color[\"black\"], p1, p2, 3)\n pygame.draw.circle(screen, color[\"black\"], p1, 5)\n pygame.draw.circle(screen, color[\"black\"], p2, 5)\n\n\n def draw_polygon(points: List[Vector2[int]]) -> None:\n if not points:\n return\n\n first = points[0] # type: Tuple[int, int]\n for i in range(0, len(points)):\n if i + 1 < len(points):\n draw_line_segment((points[i], points[i + 1]))\n else:\n draw_line_segment((points[i], first))\n\n\n while True:\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n exit()\n elif e.type == pygame.KEYDOWN:\n if keyboard_event_handler.has_handler(e.key):\n keyboard_event_handler.get_handler(e.key)()\n\n\n shape = view.project_points([p1, p2, p3, p4])\n\n screen.fill(color[\"white\"])\n\n draw_polygon(shape)\n\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Eliasin/projections","sub_path":"projections.py","file_name":"projections.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35939646996","text":"from src.tableau import *\nimport numpy as np\n\nclass LinearProgram():\n def __init__(self):\n self.takeInput()\n self.tableau = Tableau(self.matrixA, self.matrixB, self.c)\n self.solution = None\n\n def takeInput(self):\n print(\"Do you wish to maximise (Y) or minimize (N) the cost function?\")\n isMax=input()\n if(isMax.upper()=='Y'):\n self.multiplier=1\n else:\n self.multiplier=-1\n print(\"Enter the coefficients of the cost function (space separated in a line)\")\n self.c=[float(i)*self.multiplier for i in input().split()]\n self.c=np.array(self.c)\n n=len(self.c)\n\n #inputting the equations for Ax<=b\n print(\"Taking inputs for constraints Ax<=b in the form a1x1+a2x2+.....anxn<=b\")\n self.matrixA=[]\n self.matrixB=[]\n while(True):\n print(\"Enter n coefficients of the constraint equation\")\n a=[float(i) for i in input().split()]\n if(len(a)==n):\n self.matrixA.append(a)\n print(\"Enter the value of b\")\n b=float(input())\n self.matrixB.append([b])\n else:\n print(\"Incorrect number of coefficients, please try again\")\n print(\"Do you wish to continue?(Y/N)\")\n choice=input()\n if(choice.upper()=='N'):\n break\n\n self.matrixA=np.array(self.matrixA)\n self.matrixB=np.array(self.matrixB)\n\n def solve(self):\n while(True):\n pivot = self.tableau.getPivot()\n if(pivot == (-2, -2)):\n #Recursion Over, Now solve Linear Equations\n break \n if(pivot == (-1, -1)):\n #Solution is INF\n self.solution = float('inf')\n break \n self.tableau.gaussTransform(pivot)\n \n def getSoln(self):\n if(self.solution == float('inf')):\n print(\"Cost function is unbounded!\")\n return\n print(self.tableau.tableau)\n\n\n","repo_name":"paramkshah12/Linear-Program-Solver","sub_path":"src/simplex.py","file_name":"simplex.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27458531672","text":"#!/usr/bin/env python\nimport glob\nfrom pyraf import iraf\nfrom astropy.io import fits\nimport json\nimport sys\nimport os\nimport re\n\nbasepath = os.path.dirname(sys.argv[0])\nmyonedstds = os.path.join(basepath, '../iraf_data/onedstds')\nwith open('myccds.json') as file:\n settings = json.loads(file.read())\nside = settings['mysettings']['side']\nif side == \"Blue\":\n midname = 'blue'\nelif side == \"Red\":\n midname = 'red'\nelse:\n print(\"Error detected.\")\n\nextinct = 'onedstds$kpnoextinct.dat'\ninputlist = glob.glob('a*fits')\n\niraf.twodspec()\niraf.twodspec.longslit()\nprint('Copy to onedspec...')\niraf.twodspec.longslit.scopy.unlearn()\nolist3 = glob.glob('J*MDM_b.fits')\nfor obj in olist3:\n objind = str(olist3.index(obj) + 1)\n objname = re.sub('\\.fits$', '', obj) + '_' + \"CCDS\"\n iraf.twodspec.longslit.scopy(input=obj, output=objname, bands=1,\n format='onedspec')\nprint('---DONE---')\n","repo_name":"rudolffu/pyccds","sub_path":"src/ccdscopyoned.py","file_name":"ccdscopyoned.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11180666416","text":"import base64\nimport json\nfrom threading import Thread\nimport time\nimport uuid\nimport requests\nimport torch\nimport cv2 as cv\nimport os \nfrom dotenv import load_dotenv\nfrom BytesEncoder import BytesEncoder\nfrom datetime import datetime\nimport argparse\n\nclass ObjectDetector(object):\n def __init__(self,source=0,location='TestLocation'):\n \n self.source = source\n self.rider_model = torch.hub.load('ultralytics/yolov5', 'custom', path='models/new_models/RIDER_best.pt') # local model\n self.rider_model.conf = 0.9\n self.helmet_model = torch.hub.load('ultralytics/yolov5', 'custom', path='models/new_models/HELMET_best.pt') # local model\n self.helmet_model.conf = 0.5\n \n self.save_status = False\n self.location = location\n \n \n self.num_rider = 0\n self.num_no_helmet = 0\n self.stopped = False\n self.capture = cv.VideoCapture(source)\n load_dotenv()\n \n if not self.capture.isOpened():\n print(\"Cannot open camera\")\n exit()\n \n self.FPS = 1/60\n self.FPS_MS = int(self.FPS * 1000)\n \n \n self.thread = Thread(target=self.update, args=())\n self.thread.daemon = True\n self.thread.start()\n \n \n def update(self):\n while True:\n if self.stopped:\n return\n \n if self.capture.isOpened():\n (self.status, self.frame) = self.capture.read()\n time.sleep(self.FPS)\n \n def detector_rider(self):\n \n result_rider = self.rider_model(self.frame)\n predictions_rider = result_rider.pandas().xyxy[0].values.tolist()\n lamda_rider = list(filter(lambda x: x[6] == 'rider', predictions_rider))\n rider = len(lamda_rider)\n self.num_rider = rider\n \n if rider > 0:\n for i in lamda_rider: \n crop_rider = self.frame[int(i[1]):int(i[3]), int(i[0]):int(i[2])]\n top_xmin , top_ymin , top_xmax , top_ymax = int(i[0]),int(i[1]),int(i[2]),int(i[1]+(i[3]-i[1])/2)\n crop_rider_top = self.frame[top_ymin:top_ymax,top_xmin:top_xmax]\n \n detect_helmet = self.helmet_model(crop_rider_top)\n predictions_no_helmet = detect_helmet.pandas().xyxy[0].values.tolist()\n lamda_no_helmet = list(filter(lambda x: x[6] == 'no_helmet', predictions_no_helmet))\n no_helmet = len(lamda_no_helmet)\n \n self.num_no_helmet = no_helmet\n if no_helmet > 0:\n self.save_status = True\n for k in lamda_no_helmet:\n \n self.drawRect(self.frame,int(i[0]),int(i[1]),int(i[2]-i[0]),int(i[3]-i[1]))\n self.drawRect(crop_rider,int(k[0]),int(k[1]),int(k[2]-k[0]),int(k[3]-k[1]),corner_color=(0, 0, 255), rect_color=(200, 200, 200))\n \n self.show_frame(\"rider\",crop_rider,20,500)\n self.drawRect(self.frame,int(i[0]),int(i[1]),int(i[2]-i[0]),int(i[3]-i[1]))\n \n\n self.show_frame(\"RTSP Frame\",self.frame,20,10)\n \n \n print(f' {datetime.now().strftime(\"%H:%M:%S\")}> 🏍 Rider : {self.num_rider} , , ⛑ no helmet : , {self.num_no_helmet}' )\n \n if self.save_status is True:\n self.save_frame(self.frame,crop_rider) \n time.sleep(2) \n \n \n \n \n def save_frame(self,main_frame,crop_frame):\n self.checkDir()\n id = str(uuid.uuid4())\n filename_mainframe = f'images/{self.location}_MainFrame_{id}.jpg'\n filename_cropframe = f'images/{self.location}_CropFrame_{id}.jpg'\n \n cv.imwrite(filename_mainframe, main_frame)\n cv.imwrite(filename_cropframe, crop_frame)\n \n print(\" > 📸 Save Frame : \" , filename_mainframe)\n print(\" > 📸 Save Frame : \" , filename_cropframe)\n self.send_frame(filename_mainframe,filename_cropframe)\n self.num_no_helmet = 0\n self.save_status = False\n \n \n def send_frame(self,path_mainframe,path_cropframe):\n \n frame = cv.imread(path_mainframe)\n crop_frame = cv.imread(path_cropframe)\n \n _,buffer_mainframe = cv.imencode('.jpg',frame)\n _,buffer_cropframe = cv.imencode('.jpg',crop_frame)\n \n jpg_mainframe = base64.b64encode(buffer_mainframe)\n jpg_cropframe = base64.b64encode(buffer_cropframe)\n \n os.remove(path_cropframe)\n os.remove(path_mainframe)\n \n url = os.environ.get('API_URL')\n \n if url is None:\n print(\" > ❌ API_URL is None\")\n return\n \n payload = json.dumps({\n \"location\": self.location,\n \"base64DefaultImg\": jpg_mainframe,\n \"base64RiderImg\": jpg_cropframe\n },cls=BytesEncoder)\n \n headers = {\n 'Content-Type': 'application/json'\n }\n \n response = requests.post(url, data=payload, headers=headers,timeout=5)\n if response.status_code == 201:\n print(\" > ✅ Send Frame to API\")\n else:\n print(\" > ❌ Can't send Frame to API\")\n \n def drawRect(self,frame,x,y,w,h,corner_size=3, rect_size=2, corner_color=(0, 255, 0), rect_color=(200, 200, 200)):\n edge_len = int(min(w, h) /2 * 0.45)\n cv.rectangle(frame, (x, y), (x + w, y + h), rect_color, rect_size)\n cv.line(frame, (x, y), (x, y + edge_len), corner_color, corner_size)\n cv.line(frame, (x, y), (x + edge_len, y), corner_color, corner_size)\n cv.line(frame, (x + w, y), (x + w, y + edge_len), corner_color, corner_size)\n cv.line(frame, (x + w, y), (x + w - edge_len, y), corner_color, corner_size)\n cv.line(frame, (x, y + h), (x, y + h - edge_len), corner_color, corner_size)\n cv.line(frame, (x, y + h), (x + edge_len, y + h), corner_color, corner_size)\n cv.line(frame, (x + w, y + h), (x + w - edge_len, y + h), corner_color, corner_size)\n cv.line(frame, (x + w, y + h), (x + w, y + h - edge_len), corner_color, corner_size)\n \n def show_frame(self,name,frame,x=0,y=0):\n \n if self.status:\n # frm = cv.resize(frame, (800, 420), cv.INTER_AREA)\n cv.moveWindow(name, x, y)\n cv.imshow(name, frame)\n cv.waitKey(self.FPS_MS)\n cv.waitKey(self.FPS_MS)\n \n def read(self):\n return self.frame\n \n def stop(self):\n self.stopped = True\n self.thread.join()\n self.capture.release()\n cv.destroyAllWindows()\n print(' > 🛑 Stop Object Detector')\n \n def checkDir(self):\n if not os.path.exists('images'):\n print(\" > 📁 Create Directory images\")\n os.mkdir('images')\n\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--rtsp', help='RTSP Stream', default=os.environ.get('RTSP_STREAM'))\n parser.add_argument('--location', help='Location', default='test_cameras')\n args = parser.parse_args()\n return args\n\ndef checkArgs(args):\n print(\" > 📁 Location : \",args.location)\n print(\" > 📁 RTSP Stream : \",args.rtsp)\n if args.rtsp is None:\n print(\" > ❌ RTSP_STREAM is None => export RTSP_STREAM=>\")\n return False\n return True\n\n\ndef checkEnvironment():\n if os.environ.get('API_URL') is None:\n print(\" > ❌ API_URL is None => export API_URL=\")\n return False\n \n return True\ndef main():\n \n args = parse_args()\n \n if checkEnvironment() is False:\n exit()\n \n if checkArgs(args) is False:\n exit()\n \n tcamera = ObjectDetector(source=args.rtsp,location=args.location)\n tcamera.checkDir() \n \n while True:\n try:\n tcamera.detector_rider()\n except AttributeError:\n pass\n \nif __name__ == '__main__':\n \n main()\n \n \n \n \n \n \n \n ","repo_name":"camera-detects-helmet/no-helmet-detection","sub_path":"ObjectDetector.py","file_name":"ObjectDetector.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4281532020","text":"\"\"\"\n1. def login뷰를 만들고\n2. member app을 include를 이용해서 'member' namespace를 지정\n instagram/urls.py와 member/urls.py모두 사용\n3. login뷰는 member/login URL과 연결되도록 member/urls.py구현\n4. login뷰에서는 member/login.html파일을 렌더함\n5. settings.py에 TEMPLATE_DIR변수를 할당하고 (os.path.join(BASE_DIR, 'templates'))\n TEMPLATE설정의 DIRS에 추가\n\"\"\"\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import render, redirect\n\nfrom .forms import LoginForm, SignupForm\n\n\ndef login_fbv(request):\n if request.method == 'POST':\n # LoginForm을 사용\n form = LoginForm(data=request.POST)\n if form.is_valid():\n # 전달되어온 POST데이터에서 'username'과 'password'키의 값들을 사용\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n # authenticate의 인자로 POST로 전달받은 username, password를 사용\n user = authenticate(username=username, password=password)\n\n # 만약 인증이 정상적으로 완료되었다면\n # (해당하는 username, password에 일치하는 User객체가 존재할경우)\n if user is not None:\n # Django의 인증관리 시스템을 이용하여 세션을 관리해주기 위해 login()함수 사용\n login(request, user)\n return redirect('post:list')\n # 인증에 실패하였다면 (username, password에 일치하는 User객체가 존재하지 않을 경우)\n else:\n form.add_error(None, 'ID or PW incorrect')\n\n # GET method로 요청이 왔을 경우\n else:\n # 빈 LoginForm객체를 생성\n form = LoginForm()\n\n context = {\n 'form': form,\n }\n # member/login.html 템플릿을 render한 결과를 리턴\n return render(request, 'member/login.html', context)\n\n\ndef signup_fbv(request):\n \"\"\"\n 회원가입을 구현하세요\n 1. member/signup.html파일 생성\n 2. SignupForm 클래스 구현\n 3. 해당 Form을 사용해서 signup.html템플릿 구성\n 4. POST요청을 받아 MyUser객체를 생성 후 로그인\n 5. 로그인 완료되면 post_list 뷰로 이동\n \"\"\"\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.create_user()\n login(request, user)\n return redirect('post:list')\n else:\n form = SignupForm()\n context = {\n 'form': form,\n }\n return render(request, 'member/signup.html', context)\n\n\ndef profile(request):\n \"\"\"\n 1. button 1개 (로그아웃)이 존재하는 member/profile.html을 render해주는 view\n 2. 메인의 우측 위 사람모양 아이콘에 이 뷰로 오는 링크를 연결\n \"\"\"\n context = {\n\n }\n return render(request, 'member/profile.html', context)\n\n\ndef logout_fbv(request):\n logout(request)\n return redirect('member:login')","repo_name":"chumjikim/instagram","sub_path":"django_app/member/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32484511465","text":"from django.conf.urls import url\n\nfrom user import apis\n\nurlpatterns = [\n url(r'^get_code/', apis.get_code, name='get_code'),\n url(r'^submit_code/', apis.submit_code, name='submit_code'),\n url(r'^get_profile/', apis.get_profile, name='profile'),\n url(r'^set_profile/', apis.set_profile, name='set_profile'),\n url(r'^upload_avatar/', apis.upload_avatar, name='upload_avatar')\n]\n","repo_name":"realy-qiang/project","sub_path":"finally/swiper/qdz/Swiper/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17366028206","text":"from pymeal.constants import REGION, PATH, WEEKDAY\n\nimport requests\nimport re\n\nclass School:\n\t__cookies = {\n\t\t'WMONID': '',\n\t\t'JSESSIONID': '',\n\t\t'schulCode': '',\n\t\t'schulCrseScCode': ''\n\t}\n\tdef __init__(self, region, schoolCode, schoolCrseScCode):\n\t\tself.base_url = REGION[region]\n\t\tself.__cookies['schulCode'] = schoolCode\n\t\tself.__cookies['schulCrseScCode'] = schoolCrseScCode\n\t\t\n\t\tself.__newCookie()\n\t\n\tdef __newCookie(self):\n\t\tcookies = requests.get(self.base_url + PATH.MAIN).cookies\n\t\tself.__cookies['WMONID'] = cookies['WMONID']\n\t\tself.__cookies['JSESSIONID'] = cookies['JSESSIONID']\n\t\n\tdef getMonthlyMeal(self, date):\n\t\tdiet = dict()\n\t\t\n\t\tpayload = {\n\t\t\t'ay': date.strftime('%Y'),\n\t\t\t'mm': date.strftime('%m'),\n\t\t\t'schulCode': self.__cookies['schulCode'],\n\t\t\t'schulCrseScCode': self.__cookies['schulCrseScCode']\n\t\t}\n\t\t\n\t\tr = requests.post(self.base_url+PATH.MONTHLY_DIET, cookies=self.__cookies, json=payload)\n\t\t\n\t\tfor mthDietList in r.json()['resultSVO']['mthDietList']:\n\t\t\tfor weekday in WEEKDAY:\n\t\t\t\ttemp = mthDietList[weekday].replace('
    ', '\\n')\n\t\t\t\t\n\t\t\t\tif temp is ' ':\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\ttemp = temp.split('\\n', 1)\n\t\t\t\t\n\t\t\t\tdiet[int(temp[0])] = temp[1] if len(temp) == 2 else ''\n\t\t\n\t\treturn diet\n\t\n\tdef getWeeklyMeal(self, date, replace='', regex=None):\n\t\tdiet = list()\n\t\tr = list()\n\t\t\n\t\tpayload = {\n\t\t\t'schYmd': date.strftime('%Y%m%d'),\n\t\t\t'schMmealScCode': '',\n\t\t\t'schulCode': self.__cookies['schulCode'],\n\t\t\t'schulCrseScCode': self.__cookies['schulCrseScCode']\n\t\t}\n\t\t\n\t\tfor i in range(3):\n\t\t\tpayload['schMmealScCode'] = str(i+1)\n\t\t\tr.append(requests.post(self.base_url+PATH.WEEKLY_DIET, cookies=self.__cookies, json=payload).json())\n\t\t\n\t\tfor weekday in WEEKDAY:\n\t\t\ttemp = dict()\n\t\t\tmenu = list()\n\t\t\t\n\t\t\ttemp['date'] = r[0]['resultSVO']['weekDietList'][0][weekday]\n\t\t\t\n\t\t\tfor i in range(3):\n\t\t\t\t_temp = dict()\n\t\t\t\t\n\t\t\t\t_temp['menu'] = r[i]['resultSVO']['weekDietList'][2][weekday] if len(r[i]['resultSVO']['weekDietList']) is 3 else ''\n\t\t\t\t_temp['cal'] = r[i]['resultSVO']['dietNtrList'][0]['dy{}'.format(3 if weekday == 'sun' else WEEKDAY.index(weekday) + 4)]\n\t\t\t\t\n\t\t\t\tif regex == None:\n\t\t\t\t\t_temp['menu'] = _temp['menu'].replace('
    ', '\\n')\n\t\t\t\telse:\n\t\t\t\t\tregex = re.compile(regex)\n\t\t\t\t\t_temp['menu'] = re.findall(regex, _temp['menu'])\n\t\t\t\t\n\t\t\t\tif len(_temp['menu']) == 0:\n\t\t\t\t\t_temp['menu'] = [replace]\n\t\t\t\t\n\t\t\t\tmenu.append(_temp)\n\t\t\t\n\t\t\ttemp['menu'] = menu\n\t\t\t\n\t\t\tdiet.append(temp)\n\t\t\n\t\treturn diet","repo_name":"JoonyoungYi/pymeal","sub_path":"pymeal/pymeal.py","file_name":"pymeal.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8036772017","text":"import time\nimport numpy as np\n\n#import rospy\n# from geometry_msgs.msg import Twist\n# from std_msgs.msg import String\n# from mbot_nlu.msg import ActionSlotArray\n\n#import cv2, random, time, base64\n#from cv_bridge import CvBridge, CvBridgeError\n\nimport imutils\nimport socket\nimport json\n\nfrom topic_extraction import Extractor\nfrom NLPModule import NLPModule\n\nimport os\nimport sys\nimport argparse\nimport torch\n\nsys.path.append(os.getcwd()+'/comet_commonsense')\n\nimport src.data.data as data\nimport src.data.config as cfg\nimport src.interactive.functions as interactive\n\nimport socket\nimport json\nimport speech_recognition as sr\nfrom gtts import gTTS\nfrom audioplayer import AudioPlayer\n\nclass Request():\n\tdef __init__(self):\n\t\t#self.bridge = CvBridge()\n\n\t\t#node = rospy.init_node('InteractionModule')\n\n\t\tself.dialog_topic = \"\"\n\n\t\tself.server_ip = '127.0.0.1'\n\t\tself.server_port = 9987\n\t\tself.server_ip_intents = '127.0.0.1'\n\t\tself.server_port_intents = 9986\n\n\tdef socket_request(self, sentence):\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tsock.connect((self.server_ip_intents, self.server_port_intents))\n\t\t#print(\"Connection on {}\".format(port))\n\t\tsock.send(sentence)\n\t\tres = sock.recv(9999999)\n\t\tres_dict = json.loads(res.decode('utf-8'))\n\t\tsock.close()\n\t\treturn res_dict\n\n\n\tdef socket_request_sentence_analysis(self, sentence):\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tsock.connect((self.server_ip, self.server_port))\n\t\tprint(\"Request category and sentiment analysis . . . \\n\")\n\n\t\t#print(\"Connection on {}\".format(port))\n\t\tsock.send(sentence)\n\t\tres = sock.recv(9999999)\n\t\tres_dict = json.loads(res.decode('utf-8'))\n\t\tsock.close()\n\t\treturn res_dict\n\n\tdef action_command(self, command):\n\t\tseconds = time.time()\n\t\tself.pub = rospy.Publisher('hri/nlu/mbot_nlu/input_sentence', String, queue_size=10)\n\t\trospy.sleep(1)\n\t\tself.pub.publish(command)\n\n\t\tmsg = rospy.wait_for_message('hri/nlu/mbot_nlu/output_recognition', ActionSlotArray) \n\n\t\tduration = time.time() - seconds\n\n\t\treturn msg\n\nclass bcolors:\n\tHEADER = '\\033[95m'\n\tOKBLUE = '\\033[94m'\n\tOKCYAN = '\\033[96m'\n\tOKGREEN = '\\033[92m'\n\tWARNING = '\\033[93m'\n\tFAIL = '\\033[91m'\n\tENDC = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\ndef main():\n\ttest = Extractor()\n\tnlp = NLPModule()\n\treq = Request()\n\n\tquestion = input(\"Enter a question, tape 'end' to finish \\n\")\n\n\twhile not question == \"end\":\n\t\t\n\t\tmain_topic, sentence = test.extract(str(question))\n\n\t\tprint(f\"{bcolors.OKGREEN} \\n New sentence : {bcolors.ENDC} {sentence} \\n\")\n\t\tprint(f\"{bcolors.OKGREEN} \\n Main topic : {bcolors.ENDC} {main_topic} \\n\")\n\n\t\tcategory = nlp.find_category(question)\n\t\tprint(f\"{bcolors.OKGREEN} \\n Sentence category : {bcolors.ENDC} {category} \\n\")\n\n\t\tsentiment = nlp.sentiment_analysis(question)\n\t\tprint(f\"{bcolors.OKGREEN} \\n Sentiment : {bcolors.ENDC} mainly {sentiment} \\n\")\n\n\t\tintents = req.socket_request(str.encode(question))\n\n\t\tprint(f\"{bcolors.OKGREEN}\\n Intentions analysis \\n{bcolors.ENDC}\")\n\n\t\tprint(f\" I view the user as {intents['relation']}\")\n\n\t\tprint(f\" The user intents is {intents['intends']}\")\n\n\t\tprint(f\" The user desire is {intents['desire']}\")\n\n\t\tprint(f\" The user need {intents['needs']}\")\n\n\t\tprint(f\" I believe the user intent is used for {intents['utility']}\")\n\n\t\tprint(f\" I believe the expected outcome is a {intents['type']} \\n\")\n\n\t\tquestion = input(\"Enter a question, tape 'end' to finish \\n\")\n\ndef mainMicrophone():\n\tr = sr.Recognizer()\n\ttest = Extractor()\n\tnlp = NLPModule()\n\treq = Request()\n\n\tprint(\"Ready, speak \\n \")\n\n\twhile True:\n\t\twith sr.Microphone() as source:\n\t\t\tr.adjust_for_ambient_noise(source) \n\t\t\tprint(\"Say something!\")\n\t\t\taudio = r.listen(source)\n\t\t\ttranscript=\"\"\n\t\t\ttry:\n\t\t\t\ttranscript = r.recognize_google(audio)\n\t\t\texcept:\n\t\t\t\tprint(\"Can't understand\")\n\n\t\t\tif transcript == \"\":\n\t\t\t\tcontinue\n\t\t\tprint(\"Recognize: \"+transcript)\n\n\t\t\tquestion = transcript\t\t\n\t\t\tmain_topic, sentence = test.extract(str(question))\n\n\t\t\tprint(f\"{bcolors.OKGREEN} \\n New sentence : {bcolors.ENDC} {sentence} \\n\")\n\t\t\tprint(f\"{bcolors.OKGREEN} \\n Main topic : {bcolors.ENDC} {main_topic} \\n\")\n\n\t\t\tcategory = nlp.find_category(question)\n\t\t\tprint(f\"{bcolors.OKGREEN} \\n Sentence category : {bcolors.ENDC} {category} \\n\")\n\n\t\t\tsentiment = nlp.sentiment_analysis(question)\n\t\t\tprint(f\"{bcolors.OKGREEN} \\n Sentiment : {bcolors.ENDC} mainly {sentiment} \\n\")\n\n\t\t\tintents = req.socket_request(str.encode(question))\n\n\t\t\tprint(f\"{bcolors.OKGREEN}\\n Intentions analysis \\n{bcolors.ENDC}\")\n\n\t\t\tprint(f\" I view the user as {intents['relation']}\")\n\n\t\t\tprint(f\" The user intents is {intents['intends']}\")\n\n\t\t\tprint(f\" The user desire is {intents['desire']}\")\n\n\t\t\tprint(f\" The user need {intents['needs']}\")\n\n\t\t\tprint(f\" I believe the user intent is used for {intents['utility']}\")\n\n\t\t\tprint(f\" I believe the expected outcome is a {intents['type']} \\n\")\n\n\t\t\ttext = \"I think you are \"+str(intents['relation'])\n\t\t\ttext += \" and you want to \"+str(intents['desire'])\n\t\t\ttext += \" because it's a \"+str(intents['type'])\n\n\t\t\tlanguage = 'en'\n\t\t\tmyobj = gTTS(text=text, lang=language, slow=False)\n\n\t\t\tmyobj.save(\"test.mp3\")\n\t\t\tAudioPlayer(\"test.mp3\").play(block=True)\n\n\n\t\t\t\n\nif __name__ == '__main__':\n\tmainMicrophone()","repo_name":"ROBOBREIZH/robobreizh_dialog","sub_path":"NLP_Server/inline_request.py","file_name":"inline_request.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39999107294","text":"from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom myml.tree import DecisionTreeClassifier as myDecisionTreeClassifier\n\n# 加载鸢尾花数据集\niris = load_iris()\nX = iris.data\ny = iris.target\n\n# 将数据集拆分为训练集和测试集\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)\n\n# 创建决策树分类器对象并训练模型\ntree = DecisionTreeClassifier(criterion='gini')\ntree.fit(X_train, y_train)\n\n# 对测试集进行预测\ny_pred = tree.predict(X_test)\n\n# 评估模型性能\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"准确率:\", accuracy)\n\n# 创建决策树分类器对象并训练模型\ntree = myDecisionTreeClassifier(criterion='gain_ratio',max_depth=2)\ntree.fit(X_train, y_train)\n\n# 对测试集进行预测\ny_pred = tree.predict(X_test)\n\n# 评估模型性能\naccuracy = accuracy_score(y_test, y_pred)\nprint(\"准确率:\", accuracy)\n","repo_name":"SleepyVirino/AI_AL","sub_path":"test/tree/iris_classifier.py","file_name":"iris_classifier.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33341763488","text":"\"\"\"\nFind candidate duplex read pairs by examining sequencing summary.\n\nThis script takes a sequencing summary and examines:\n\n * The duration between strands (specifically the duration between the end of\n the current strand and start of next)\n * The proportion between the difference in length between these strands\n\n\"\"\"\n# TODO: rewrite this, its seems a little verbose/contorted\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\nimport math\nfrom pathlib import Path\n\nimport pandas as pd\nimport pysam\nfrom tqdm import tqdm\n\nimport duplex_tools\n\n\ndef find_pairs(\n sequencing_summary_path: str,\n outdir: str = \"1d2_pairs_noalign\",\n prefix: str = \"pair\",\n prepend_seqsummary_stem: bool = False,\n max_time_between_reads=20,\n max_seqlen_diff=0.1,\n match_barcodes: bool = False,\n min_qscore: float = None,\n max_abs_seqlen_diff: int = None) -> None:\n \"\"\"Find pairs using metrics stored in a sequencing summary file.\"\"\"\n logger = duplex_tools.get_named_logger(\"FindPairs\")\n logger.info(f'Duplex tools version: {duplex_tools.__version__}')\n outdir, output_pairs, output_intermediate = prepare_output_paths(\n outdir, prefix, prepend_seqsummary_stem, sequencing_summary_path)\n if Path(sequencing_summary_path).suffix in {'.bam', '.sam'}:\n logger.info('Creating seqsummary from bam')\n bamfile = pysam.AlignmentFile(sequencing_summary_path,\n check_sq=False) # Allow uBAM\n records = []\n for read in tqdm(bamfile.fetch(until_eof=True)): # Allow uBAM\n records.append({'read_id': read.qname,\n 'duration': read.get_tag('du'),\n 'start_time': read.get_tag('st'),\n 'channel': read.get_tag('ch'),\n 'mux': read.get_tag('mx'),\n 'sequence_length_template': read.query_length,\n 'mean_qscore_template': read.get_tag('qs')\n }\n )\n seqsummary = pd.DataFrame(records)\n seqsummary['start_time'] = (\n pd.to_datetime(seqsummary['start_time']) -\n pd.to_datetime(seqsummary['start_time']).min()\n ).dt.total_seconds().astype(float)\n else:\n logger.info('Loading sequencing summary.')\n dtype = {\n \"read_id\": str,\n \"alignment_genome\": str,\n \"alignment_genome_start\": pd.Int64Dtype(),\n \"alignment_genome_end\": pd.Int64Dtype(),\n \"barcode_arrangement\": str,\n \"start_time\": float,\n \"duration\": float,\n \"channel\": int, \"mux\": int,\n \"sequence_length_template\": int,\n \"mean_qscore_template\": float,\n }\n cols = set(dtype.keys())\n\n def take_column(x):\n return x in cols\n\n seqsummary = pd.read_csv(\n sequencing_summary_path, sep=\"\\t\",\n dtype=dtype, usecols=take_column,\n na_values={\n \"alignment_genome_start\": \"-\",\n \"alignment_genome_end\": \"-\"}\n )\n\n logger.info('Calculating metrics.')\n seqsummary = calculate_metrics_for_next_strand(seqsummary)\n\n try:\n seqsummary = calculate_alignment_metrics(seqsummary)\n except KeyError:\n logger.info(\"No alignment information found for validation.\")\n\n logger.info('Classifying pairs.')\n tempcompsummary = seqsummary_to_tempcompsummary(\n seqsummary,\n max_time_between_reads=max_time_between_reads,\n max_seqlen_diff=max_seqlen_diff,\n match_barcodes=match_barcodes,\n min_qscore=min_qscore,\n max_abs_seqlen_diff=max_abs_seqlen_diff)\n\n candidate_pairs = seqsummary.query('candidate_followon')\n ncandidate_pairs = len(candidate_pairs)\n nstrands = len(seqsummary)\n frac_pairs = 100 * ncandidate_pairs * 2 / nstrands\n logger.info(\n f\"Found {ncandidate_pairs} pairs within {nstrands} reads. \"\n f\"({frac_pairs:.1f}% of reads are part of a pair).\")\n logger.info('Values above 100% are allowed since reads can be either '\n 'template or complement')\n logger.info(f'Writing files into {outdir} directory')\n\n # Write\n tempcompsummary.to_csv(output_intermediate, index=False, sep=\"\\t\")\n candidate_pairs[['read_id', 'read_id_next']] \\\n .drop_duplicates() \\\n .to_csv(output_pairs, index=False, sep=\" \", header=False)\n\n\ndef prepare_output_paths(\n outdir, prefix, prepend_seqsummary_stem, sequencing_summary_path):\n \"\"\"Decide output paths.\"\"\"\n sspath = Path(sequencing_summary_path)\n if prepend_seqsummary_stem:\n prefix = f\"{sspath.stem}_{prefix}\"\n outdir = Path(outdir)\n outdir.mkdir(parents=True, exist_ok=True)\n output_intermediate = Path(outdir, f'{prefix}_stats.txt')\n output_pairs = Path(outdir, f'{prefix}_ids.txt')\n return outdir, output_pairs, output_intermediate\n\n\ndef calculate_alignment_metrics(seqsummary) -> pd.DataFrame:\n \"\"\"Calculate alignment metrics for validation.\n\n These are optional and are not used for classification.\n \"\"\"\n seqsummary[\"alignment_genome_next\"] = \\\n seqsummary[\"alignment_genome\"].shift(-1)\n seqsummary[\"alignment_genome_start_next\"] = \\\n seqsummary[\"alignment_genome_start\"].shift(-1)\n seqsummary[\"alignment_genome_end_next\"] = \\\n seqsummary[\"alignment_genome_end\"].shift(-1)\n seqsummary[\"bases_between_read_starts\"] = (\n seqsummary[\"alignment_genome_start_next\"]\n - seqsummary[\"alignment_genome_start\"])\n seqsummary[\"bases_between_read_ends\"] = (\n seqsummary[\"alignment_genome_end_next\"]\n - seqsummary[\"alignment_genome_end\"])\n seqsummary[\"bases_between_read_min\"] = (\n seqsummary[[\"bases_between_read_starts\", \"bases_between_read_ends\"]]\n .abs().min(axis=1))\n return seqsummary\n\n\ndef seqsummary_to_tempcompsummary(\n seqsummary: pd.DataFrame,\n max_time_between_reads: float = 20,\n max_seqlen_diff: float = 0.1,\n match_barcodes: bool = False,\n min_qscore: float = None,\n max_abs_seqlen_diff: int = None,\n ) -> pd.DataFrame:\n \"\"\"Determine read pairs from annotated sequence summary.\"\"\"\n logger = duplex_tools.get_named_logger(\"FindPairs\")\n # Default filtering\n seqsummary[\"candidate_followon\"] = (\n (-1.00 <= seqsummary[\"duration_until_next_start\"])\n & (seqsummary[\"duration_until_next_start\"] < max_time_between_reads))\n logger.info(f'{seqsummary[\"candidate_followon\"].sum()} pairs after '\n f'filtering on duration between reads. (max '\n f'{max_time_between_reads} s)')\n\n seqsummary[\"candidate_followon\"] = (\n seqsummary[\"candidate_followon\"]\n & (seqsummary[\"fraction_missing_from_longest\"] < max_seqlen_diff))\n logger.info(f'{seqsummary[\"candidate_followon\"].sum()} pairs after '\n f'filtering on relative sequence length difference. (max '\n f'{max_seqlen_diff*100}% difference)')\n\n # Additional filtering\n if max_abs_seqlen_diff:\n seqsummary[\"candidate_followon\"] = (\n seqsummary[\"candidate_followon\"]\n & (seqsummary[\"sequence_length_difference\"] < max_abs_seqlen_diff)\n )\n logger.info(f'{seqsummary[\"candidate_followon\"].sum()} pairs after '\n f'absolute sequence length filtering. ('\n f'max {max_abs_seqlen_diff} bp)')\n\n try:\n if min_qscore:\n seqsummary[\"candidate_followon\"] = (\n seqsummary[\"candidate_followon\"]\n & (seqsummary[\"mean_qscore_template\"] > min_qscore)\n & (seqsummary[\"mean_qscore_template_next\"] > min_qscore)\n )\n logger.info(\n f'{seqsummary[\"candidate_followon\"].sum()} pairs after '\n f'qscore filtering. (min qscore = {min_qscore})')\n\n except KeyError:\n logger.info(\"qscore data not available. Skipping the filter of \"\n f\"min_qscore: {min_qscore}\")\n\n if match_barcodes:\n first = seqsummary['barcode_arrangement']\n second = seqsummary[\"barcode_arrangement_next\"]\n seqsummary[\"candidate_followon\"] = (\n seqsummary[\"candidate_followon\"] & (first == second))\n\n # first reads\n templates = seqsummary[seqsummary['candidate_followon']].copy()\n templates['pair_id'] = \\\n templates['read_id'] + ' ' + templates['read_id_next']\n templates.dropna(inplace=True)\n # second reads\n complements = (\n seqsummary\n .set_index('read_id')\n .loc[templates['read_id_next']]\n .reset_index())\n complements['fraction_missing_from_longest'] = math.nan\n complements['duration_until_next_start'] = math.nan\n complements['sequence_length_difference'] = math.nan\n try:\n complements['mean_qscore_template_next'] = math.nan\n except KeyError:\n pass\n complements['pair_id'] = \\\n complements['read_id_prev'] + ' ' + complements['read_id']\n # join first and seconds\n stats_per_read = pd.concat(\n [\n templates.assign(strand='template'),\n complements.assign(strand='complement')]\n ).sort_values(['pair_id', 'start_time'])\n\n return stats_per_read.drop(\n columns=[\n 'candidate_followon', 'read_id_next', 'read_id_prev',\n 'start_time_next', 'sequence_length_template_next'],\n errors='ignore')\n\n\ndef calculate_metrics_for_next_strand(\n seqsummary: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Calculate pairing metrics for read pairs.\n\n * fraction_missing_from_longest: The difference in sequence length between\n the pairs. Example: Would be 0.2 if one strand is 1000bp and the\n following one is 800bp (abs(1000bp-800bp)/1000bp)\n * duration_until_next_start: The duration between the end of one strand\n and the start of the next Example: Read 1 starts at 10s and is 5s long.\n Read 2 starts at 20s. Duration until next is 20-(10+15) = 5s\n \"\"\"\n # ensure table is sorted and annotate next read info\n logger = duplex_tools.get_named_logger('FindPairs')\n seqsummary.sort_values(\n [\"channel\", \"mux\", \"start_time\"], inplace=True)\n # TODO: this isn't quite right, we need to group by channel and mux\n # to analyse independently\n seqsummary[\"read_id_next\"] = seqsummary[\"read_id\"].shift(-1)\n seqsummary[\"read_id_prev\"] = seqsummary[\"read_id\"].shift(1)\n seqsummary[\"start_time_next\"] = seqsummary[\"start_time\"].shift(-1)\n seqsummary[\"sequence_length_template_next\"] = \\\n seqsummary[\"sequence_length_template\"].shift(-1)\n\n seqsummary[\"sequence_length_difference\"] = (seqsummary[\n \"sequence_length_template_next\"] - seqsummary[\n \"sequence_length_template\"]).abs()\n try:\n seqsummary[\"mean_qscore_template_next\"] = \\\n seqsummary[\"mean_qscore_template\"].shift(-1)\n except KeyError:\n logger.debug('qscore not available in the summary. Cannot use for '\n 'metrics')\n pass\n\n seqsummary[\"end_time\"] = seqsummary[\"start_time\"] + seqsummary[\"duration\"]\n\n # If there is barcode information (arrangement and scores),\n # then make it available for classification if necessary\n if \"barcode_arrangement\" in seqsummary.columns:\n seqsummary[\"barcode_arrangement_next\"] = \\\n seqsummary[\"barcode_arrangement\"].shift(-1)\n if \"barcode_front_score\" in seqsummary.columns:\n seqsummary[\"barcode_front_score_next\"] = \\\n seqsummary[\"barcode_front_score\"].shift(-1)\n if \"barcode_rear_score\" in seqsummary.columns:\n seqsummary[\"barcode_rear_score_next\"] = \\\n seqsummary[\"barcode_rear_score\"].shift(-1)\n\n # difference between read lengths of pair\n bases_differing = (\n seqsummary[\"sequence_length_template_next\"]\n - seqsummary[\"sequence_length_template\"]).abs()\n # length of longest read in pair\n bases_longest = seqsummary[\n [\"sequence_length_template_next\", \"sequence_length_template\"]\n ].max(axis=1)\n # fractional length difference\n seqsummary[\"fraction_missing_from_longest\"] = \\\n bases_differing / bases_longest\n # duration in seconds between the end of the strand and the start of next\n seqsummary[\"duration_until_next_start\"] = (\n seqsummary[\"start_time_next\"] - seqsummary[\"end_time\"])\n return seqsummary\n\n\ndef add_args(parser):\n \"\"\"Add arguments specific to this process.\"\"\"\n parser.add_argument(\n \"--prefix\", default=\"pair\",\n help=\"\")\n parser.add_argument(\n \"--prepend_seqsummary_stem\", action=\"store_true\",\n help=\"Add filename of the sequencing summary to output files.\")\n parser.add_argument(\n \"--max_time_between_reads\", type=int, default=200000,\n help=(\n \"Maximum time (seconds) between reads for them to be \"\n \"deemed a pair.\"))\n parser.add_argument(\n \"--max_seqlen_diff\", type=float, default=0.1,\n help=(\n \"Maximum ratio (a - b) / a, where a and b are the \"\n \"sequence lengths of a putative pair.\"))\n parser.add_argument(\n \"--max_abs_seqlen_diff\", type=int, default=5000,\n help=(\n \"Maximum sequence length difference between template and \"\n \"complement\"))\n parser.add_argument(\n \"--min_qscore\", type=float, default=6,\n help=(\n \"The minimum simplex qscore required from both template and \"\n \"complement\"))\n parser.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Logging level\")\n parser.add_argument(\n \"--match_barcodes\", action=\"store_true\",\n help=\"Require putative pair to contain same barcodes.\")\n return parser\n\n\ndef argparser():\n \"\"\"Create argument parser.\"\"\"\n parser = ArgumentParser(\n \"Create candidate pairs from sequencing summary.\",\n formatter_class=ArgumentDefaultsHelpFormatter,\n parents=[duplex_tools._log_level()], add_help=False)\n parser.add_argument(\n \"sequencing_summary\",\n help=\"Sequencing summary file.\")\n parser.add_argument(\n \"output\",\n help=\"Output directory.\")\n parser = add_args(parser)\n\n return parser\n\n\ndef main(args):\n \"\"\"Entry point.\"\"\"\n find_pairs(\n sequencing_summary_path=args.sequencing_summary,\n outdir=args.output,\n prefix=args.prefix,\n prepend_seqsummary_stem=args.prepend_seqsummary_stem,\n max_time_between_reads=args.max_time_between_reads,\n max_seqlen_diff=args.max_seqlen_diff,\n match_barcodes=args.match_barcodes,\n min_qscore=args.min_qscore,\n max_abs_seqlen_diff=args.max_abs_seqlen_diff)\n","repo_name":"nanoporetech/duplex-tools","sub_path":"duplex_tools/pairs_from_summary.py","file_name":"pairs_from_summary.py","file_ext":"py","file_size_in_byte":14830,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"61"} +{"seq_id":"74821103555","text":"import sys \n\nreload(sys) \nsys.setdefaultencoding('utf8')\n\nimport os\nimport io\nimport time\nimport csv \nimport re\nfrom os.path import basename,splitext\nfrom urlparse import urlsplit\nfrom urllib import urlretrieve\nimport urlparse\nfrom bs4 import BeautifulSoup\nimport urllib2\n\nfrom google.cloud import vision\n\nstringy='alt='\nstatic_img_path='/media/sf_codes/tensorFlow_lastLayer/data'\nsaved_image_path = '/media/sf_codes/tensorFlow_lastLayer/saved_images'\nos.environ[\"GCLOUD_PROJECT\"] = \"feisty-card-155322\"\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]='/home/westy/.config/gcloud/application_default_credentials.json'\nwiki_url = \"http://en.wikipedia.org/wiki/Main_Page\"\ntwitter_url = 'https://twitter.com/search?vertical=default&q=joesphfcox&src=typd'\ncityBmore_url =\"http://www.baltimorecity.gov/\"\nhackathon_url = 'https://www.baltimorehackathon.com/'\nbmorewiki_url = \"https://en.wikipedia.org/wiki/Baltimore\"\nmarket_url = \"http://www.marketwatch.com/\"\n# There is a copy of the credentials in the project home dir, linux needs the above path, but to run on another machine \n# then uncomment the following line\n# os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]='/home/westy/.config/gcloud/application_default_credentials.json'\n\ndef scrape_images(url = wiki_url,output_dir=saved_image_path): \n imageList = []\n print(\"Scraping images from %s\" % url)\n soup = BeautifulSoup(urllib2.urlopen(url),'lxml')\n # for img in soup.select('a.image > img'):\n for img in soup.select('a.image > img'):\n img_url = urlparse.urljoin(url, img['src'])\n file_name = img['src'].split('/')[-1]\n urlretrieve(img_url, output_dir + '/'+ file_name)\n imageList.append(file_name)\n return imageList\n\ndef scrape_html(url):\n response = urllib2.urlopen(url)\n html = response.read()\n with open('html/file.html','wb') as f:\n f.write(html)\n f.close()\n return html\n\ndef insert_alts(html):\n soup = BeautifulSoup(html)\n for img in soup.findAll('img'):\n img['src'] = 'cid:' + splitext(basename(img['src']))[0]\n my_html_string = str(soup)\n\ndef gatherFiles(img_path=static_img_path):\n files = [file for file in os.listdir(saved_image_path) if not file.startswith(\".\")]\n return files\n\ndef detect_text(path):\n \"\"\"Detects text in the file.\"\"\"\n vision_client = vision.Client()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision_client.image(content=content)\n\n texts = image.detect_text()\n # print('Texts:')\n\n # for text in texts:\n # print('\\n\"{}\"'.format(text.description))\n\n # vertices = (['({},{})'.format(bound.x_coordinate, bound.y_coordinate)\n # for bound in text.bounds.vertices])\n\n # print('bounds: {}'.format(','.join(vertices)))\n return texts\n\ndef detect_entity(path):\n \"\"\"Detects web annotations given an image.\"\"\"\n vision_client = vision.Client()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision_client.image(content=content)\n\n notes = image.detect_web()\n\n # if notes.web_entities:\n # print ('\\n{} Web entities found: '.format(len(notes.web_entities)))\n\n # for entity in notes.web_entities:\n # print('Score : {}'.format(entity.score))\n # print('Description: {}'.format(entity.description))\n return notes.web_entities\n\ndef detect_labels(path):\n \"\"\"Detects labels in the file.\"\"\"\n vision_client = vision.Client()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision_client.image(content=content)\n\n labels = image.detect_labels()\n print('Labels:')\n\n # for label in labels\n return labels\n\ndef output(dict):\n for key,value in dict.iteritems():\n print(key,value)\n\ndef findnth(haystack, needle, n):\n parts= haystack.split(needle, n+1)\n if len(parts)<=n+1:\n return -1\n return len(haystack)-len(parts[-1])-len(needle)\n\n\nif __name__=='__main__':\n # print( \"Gathering the images to process from: %s\" % bmorewiki_url)\n img_list = scrape_images(url = bmorewiki_url,output_dir = saved_image_path)\n print(\"%s image(s) were added to the dir %s\" %(len(img_list),saved_image_path))\n content = scrape_html(url = bmorewiki_url)\n # print(\"Raw HTML file was added to html/final.html\")\n\n files = gatherFiles(img_path =saved_image_path)\n # for i in files:\n # print(i)\n holder = dict.fromkeys(files)\n # print(\"Detecting text inside the images via Google Vision API\")\n # for key,value in holder.iteritems():\n # #print(key)\n # txts = detect_text(saved_image_path + '/' + str(key))\n # #txts = detect_text(r'/media/sf_codes/tensorFlow_lastLayer/saved_images/220px-Race_and_ethnicity_2010-_Baltimore_%285559896701%29.png')\n # max_length,longest_element = max([(len(x.description),x.description) for x in txts])\n # holder[key] = [longest_element]\n # print(\"60 Second speed bump initiated so that this program does not exceed the 1 minute API call quota\")\n # time.sleep(60)\n print(\"Detecting web element type via Google Vision API\")\n notes = None\n for key,value in holder.iteritems():\n # print(key)\n while notes is None:\n try:\n notes = detect_entity(saved_image_path + '/' + str(key))\n except:\n pass\n # [print(x.score) for x in notes]\n max_prob, max_element = max([(x.score,x.description) for x in notes])\n\n # max_prob, max_element = max(notes, key= lambda x.score: x.description)\n # holder[key].append(max_element)\n holder[key] = max_element\n notes = None\n print(\"60 Second speed bump initiated so that this program does not exceed the 1 minute API call quota\")\n time.sleep(60)\n\n count = 0\n for img in img_list:\n index = findnth(content,stringy,count)\n content = content[:index+len(stringy)+1] + holder[img] + content[index+len(stringy)+1:]\n count+=1\n with open('html/final.html','wb') as f:\n f.write(''.join(content))\n f.close()\n # print(\"Creating the output file\")\n # with open('output/output.csv','wb') as f:\n # w = csv.DictWriter(f, holder.keys())\n # w.writeheader()\n # w.writerow(holder)\n # output(holder)\n\n","repo_name":"inunotaisho/whitestick","sub_path":"custom_voiceBox3.py","file_name":"custom_voiceBox3.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25573538382","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nfor two lists, print the intersection of them\n\n2015.05.28 by xnm\n'''\n\nimport sys\nfilename1 = sys.argv[1]\nfilename2 = sys.argv[2]\n\nfile_1 = open(filename1, 'r')\nfile_2 = open(filename2, 'r')\nfile_out = open(filename1[:-4]+'_'+filename2[:-4]+'_intersec.txt','w')\nfile_qua = open(filename1[:-4]+'_'+filename2[:-4]+'_quality.txt','w')\n\ncount_1 = 0\ncount_2 = 0\ncount_insec = 0\n\ndataset2 = file_2.readlines()\nfor line in file_1:\n#\tline = line.rstrip() #16.5.22 have to comment this to get results.\n\tcount_1 += 1\n\tfor i in dataset2:\n\t\tif line == i:\n\t\t\tfile_out.write(line)\n\t\t\tcount_insec += 1\n\ncount_2 = len(dataset2)\n\nfile_qua.write(filename1+' size = '+str(count_1)+'\\n')\nfile_qua.write(filename2+' size = '+str(count_2)+'\\n')\nfile_qua.write('intersection size = '+str(count_insec)+'\\n')\ninter_in_1 = round(count_insec*1.0/count_1*100,2)\ninter_in_2 = round(count_insec*1.0/count_2*100,2)\nfile_qua.write('intersection/'+filename1+'% = '+str(inter_in_1)+'\\n')\nfile_qua.write('intersection/'+filename2+'% = '+str(inter_in_2)+'\\n')\n\nfile_1.close()\nfile_2.close()\nfile_qua.close()\nfile_out.close()","repo_name":"x-nm/Python","sub_path":"Python_miRNA/intersecter.py","file_name":"intersecter.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24601151866","text":"\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nfrom models.favorite import FavoriteModel\n\n\nclass Favorite(Resource):\n\n parser = reqparse.RequestParser()\n\n parser.add_argument(\"id_tourist_spot\", \n type=int, \n help=\"The field 'id_tourist_spot' cannot be left blank!\")\n parser.add_argument(\"id_user\", \n type=int, \n help=\"The field 'id_tourist_spot' cannot be left blank!\")\n \n @jwt_required()\n def get(self, id_user):\n # GET /tourist-spot//picture\n return {\"favorite_list\": list(map(lambda x: x.json(), FavoriteModel.find_favorite_by_id_user(id_user)))}\n\n @jwt_required() \n def post(cls, id_user):\n # POST /tourist-spot//picture\n data = cls.parser.parse_args() \n\n favorite = FavoriteModel(**data)\n try:\n favorite.save_to_db()\n except:\n return {\"Messege\": \"An error occured inserting the comment.\"}, 500\n return favorite.json(), 201\n ","repo_name":"ClaudioSiervi/backend-challenge-snowman","sub_path":"resources/favorite.py","file_name":"favorite.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19487340751","text":"import torch\nimport numpy as np\nfrom Discriminator import Discriminator\nfrom Generator import Generator\nfrom viz import *\nimport torch.nn.functional as F\nimport pickle as pkl\n\ndef test(FLAGS):\n\n sample_size = FLAGS.eval_size\n z_size = FLAGS.zsize\n cuda = FLAGS.cuda\n g_path = FLAGS.gpath\n d_path = FLAGS.dpath\n map_location = 'cuda' if cuda else 'cpu'\n\n # Load the models\n dckpt = torch.load(d_path, map_location=map_location)\n gckpt = torch.load(g_path, map_location=map_location)\n\n D = Discriminator(784, 128, 1)\n G = Generator(100, 32, 784)\n\n D.load_state_dict(dckpt['state_dict'])\n G.load_state_dict(gckpt['state_dict'])\n\n # Define some latent vectors\n z = np.random.uniform(-1, 1, size=(sample_size, z_size))\n z = torch.from_numpy(z).float()\n\n if cuda:\n z = z.cuda()\n\n # Eval mode\n G.eval()\n\n rand_images = G(z)\n\n view_samples(0, [rand_images])\n","repo_name":"iArunava/MNIST-GAN","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5335482031","text":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom random import Random\nfrom typing import Iterator\n\nfrom river import metrics, utils\n\n\n@dataclass\nclass Arm:\n \"\"\"An arm in a multi-armed bandit.\n\n It may also be referred to as a \"lever\".\n\n \"\"\"\n\n index: int\n metric: metrics.Metric\n n_pulls: int = 0\n\n\nclass Bandit:\n \"\"\"(Multi-armed) bandit (MAB) policy.\n\n A bandit is composed of multiple arms. A policy is in charge of determining the best one.\n\n \"\"\"\n\n def __init__(self, n_arms: int, metric: metrics.Metric):\n self.arms = [Arm(index=i, metric=metric.clone()) for i in range(n_arms)]\n self.metric = metric\n self.best_arm = self.arms[0]\n self.n_pulls = 0\n\n def update(self, arm: Arm, **metric_kwargs):\n self.n_pulls += 1\n arm.n_pulls += 1\n arm.metric.update(**metric_kwargs)\n self.best_arm = max(\n self.arms,\n key=lambda arm: arm.metric.get()\n if self.metric.bigger_is_better\n else -arm.metric.get(),\n )\n\n @property\n def ranking(self):\n return [\n arm.index\n for arm in sorted(\n self.arms,\n key=lambda arm: arm.metric.get(),\n reverse=self.metric.bigger_is_better,\n )\n ]\n\n def __repr__(self):\n return utils.pretty.print_table(\n headers=[\n \"Ranking\",\n self.metric.__class__.__name__,\n \"Pulls\",\n \"Share\",\n ],\n columns=[\n [f\"#{self.ranking.index(arm.index)}\" for arm in self.arms],\n [f\"{arm.metric.get():{self.metric._fmt}}\" for arm in self.arms],\n [f\"{arm.n_pulls:,d}\" for arm in self.arms],\n [f\"{arm.n_pulls / self.n_pulls:.2%}\" for arm in self.arms],\n ],\n )\n\n\nclass BanditPolicy(ABC):\n \"\"\"A policy for solving bandit problems.\"\"\"\n\n def __init__(self, burn_in: int, seed: int):\n self.burn_in = burn_in\n self.seed = seed\n self.rng = Random(seed)\n\n def pull(self, bandit: Bandit) -> Iterator[Arm]:\n burn_in_over = True\n for arm in bandit.arms:\n if arm.n_pulls < self.burn_in:\n yield arm\n burn_in_over = False\n if burn_in_over:\n yield from self._pull(bandit)\n\n @abstractmethod\n def _pull(self, bandit: Bandit) -> Iterator[Arm]:\n ...\n","repo_name":"lucasczz/DAADS","sub_path":"river/river/_bandit/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"36287488272","text":"# coding=utf-8\nimport logging\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom utils.datatable import DatatableView\nfrom soc_user.models import RolePermissions, Permissions, Roles\nfrom soc.serializers import RoleSerializers\nfrom utils.auth import AllowAdminWithPassword\n\nlogger = logging.getLogger(\"console\")\n\n\ndef re_per(per_list, per_tree=list(), parent_list=list(), max_re=5, re=0):\n \"\"\"\n 递归 构建权限树\n :param per_list: 原始权限列表\n :param per_tree: 权限树\n :param parent_list: 当前递归所需的父级权限列表\n :param max_re: 最大递归层数\n :param re: 当前递归层数\n :return:\n \"\"\"\n\n re += 1\n if re > max_re:\n return per_tree\n need_remove = []\n if not per_list:\n return per_tree\n if not per_tree:\n for index, per in enumerate(per_list):\n if per.get('parent_id') is None:\n per['re'] = re\n per_tree.append(per)\n need_remove.append(per)\n parent_list.append(per)\n for i in need_remove:\n per_list.remove(i)\n\n else:\n new_parent_list = []\n for parent in parent_list:\n need_remove = []\n for role in filter(lambda x: x.get(\"parent_id\") == parent.get(\"permission_id\"), per_list):\n role['re'] = re\n parent.get(\"children\").append(role)\n need_remove.append(role)\n new_parent_list.append(role)\n for i in need_remove:\n per_list.remove(i)\n parent_list = new_parent_list\n\n return re_per(per_list, per_tree, parent_list=parent_list, max_re=max_re, re=re)\n\n\nclass RolePermissionDetail(APIView):\n \"\"\"权限管理\"\"\"\n permission_classes = (AllowAdminWithPassword,)\n \"\"\"\n {\n \"status\": 200,\n \"data\": [\n {\n \"parent_id\": null,\n \"enable\": 1,\n \"name\": \"首页\",\n \"re\": 1,\n \"url\": \"/api/soc/1\",\n \"permission_id\": 1,\n \"children\": [\n {\n \"parent_id\": 1,\n \"enable\": 1,\n \"name\": \"总览\",\n \"re\": 2,\n \"url\": \"/api/soc/1/1\",\n \"permission_id\": 2,\n \"children\": [\n {\n \"parent_id\": 2,\n \"enable\": 1,\n \"name\": \"总览报表\",\n \"re\": 3,\n \"url\": \"/api/soc/1/1\",\n \"permission_id\": 3,\n \"children\": [],\n \"id\": 3,\n \"method\": \"GET\"\n }\n ],\n \"id\": 2,\n \"method\": \"GET\"\n }\n ],\n \"id\": 1,\n \"method\": \"GET\"\n }\n ]\n}\n\n \"\"\"\n @staticmethod\n def create_permissions(agent_id, role_id):\n if RolePermissions.objects.filter(agent_id=agent_id, role_id=role_id).count() == \\\n Permissions.objects.count():\n return True\n for per in Permissions.objects.all():\n RolePermissions.objects.get_or_create(defaults={\"enable\": per.default_enable},\n permissions=per, agent_id=agent_id, role_id=role_id)\n\n def get(self, request, role_id):\n \"\"\"获取权限管理信息\"\"\"\n agent_id = request.user.userinfo.agent_id\n if not Roles.objects.filter(id=role_id).exists():\n return Response({\"status\": 500, \"msg\": \"Role Id 错误\"})\n self.create_permissions(agent_id, role_id)\n role_permissions = RolePermissions.objects.filter(\n agent_id=agent_id, role_id=role_id)\n permission_list = []\n for role_obj in role_permissions:\n data = {\n \"id\": role_obj.id,\n \"permission_id\": role_obj.permissions_id,\n \"name\": role_obj.permissions.name,\n \"parent_id\": role_obj.permissions.parent_id,\n \"enable\": role_obj.enable,\n \"children\": [],\n \"show\": False\n }\n permission_list.append(data)\n data = re_per(permission_list, [], [])\n return Response({\"status\": 200, \"data\": data})\n\n def put(self, request, role_id):\n \"\"\"修改权限\"\"\"\n agent = request.user.userinfo.agent\n agent_id = agent.id\n try:\n role = Roles.objects.get(id=role_id, agent_id=agent_id)\n except Roles.DoesNotExist:\n return Response({\"status\": 500, \"msg\": \"Role Id 错误\"})\n\n if role.is_admin:\n return Response({\"status\": 500, \"msg\": \"管理员权限不能修改\"})\n\n serializer = RoleSerializers(\n instance=role, data=request.data, context={\"agent\": agent})\n if serializer.is_valid():\n serializer.save()\n else:\n context = {\n \"status\": 500,\n \"msg\": serializer.errors.items()[0][1][0],\n \"errors\": serializer.errors\n }\n return Response(context)\n return Response({\"status\": 200, \"msg\": \"修改成功\"})\n\n def delete(self, request, role_id):\n \"\"\"删除权限\"\"\"\n agent = request.user.userinfo.agent\n try:\n role = Roles.objects.get(id=role_id, agent=agent)\n except Roles.DoesNotExist:\n return Response({\"status\": 500, \"msg\": \"Role Id 错误\"})\n if role.is_admin:\n return Response({\"status\": 500, \"msg\": \"管理员权限不能删除\"})\n\n if role.userinfo_set.count() > 0:\n return Response({\"status\": 500, \"msg\": \"使用中的权限组不能删除\"})\n\n role.delete()\n return Response({\"status\": 200, \"msg\": \"删除成功\"})\n\n\nclass RolePermissionSelectList(APIView):\n \"\"\"权限管理\"\"\"\n\n def get(self, request):\n \"\"\"获取权限管理列表\"\"\"\n agent = request.user.userinfo.agent\n data = Roles.objects.filter(agent=agent, enable=1).values(\"id\", \"name\")\n return Response({\"status\": 200, \"data\": data})\n\n\nclass RolePermissionList(APIView):\n \"\"\"权限管理\"\"\"\n permission_classes = (AllowAdminWithPassword, )\n\n def get(self, request):\n \"\"\"获取管理权限信息\"\"\"\n permission_list = []\n for i in Permissions.objects.values('id', 'parent_id', 'name'):\n i.update({\n \"permission_id\": i['id'],\n \"children\": [],\n \"enable\": 1,\n \"show\": False,\n\n })\n permission_list.append(i)\n data = re_per(permission_list, [], [])\n return Response({\"status\": 200, \"data\": data})\n\n def post(self, request):\n \"\"\"创建角色\"\"\"\n agent = request.user.userinfo.agent\n serializer = RoleSerializers(\n data=request.data, context={\"agent\": agent})\n if serializer.is_valid():\n serializer.save()\n else:\n context = {\n \"status\": 500,\n \"msg\": serializer.errors.items()[0][1][0],\n \"errors\": serializer.errors\n }\n return Response(context)\n\n return Response({\"status\": 200, \"msg\": '创建成功'})\n\n\nclass RoleList(DatatableView):\n \"\"\"角色列表\"\"\"\n render_columns = [\n (\"id\", \"id\", 0),\n (\"name\", \"name\", 1),\n (\"enable\", \"enable\", 0),\n (\"is_admin\", \"is_admin\", 0),\n (\"users\", \"id\", 0),\n ]\n\n model = Roles\n\n def get_initial_queryset(self):\n agent_id = self.request.user.userinfo.agent_id\n inventory = self.model.objects.filter(agent_id=agent_id)\n return inventory\n\n def prepare_results(self, qs):\n \"\"\"\n 格式化输出形式, 最终输出的 data(>1.10)/aaData\n qs 为查询集合\n \"\"\"\n data = []\n # 有 column 的话返回对应 column 值字典\n columns = self.get_columns()\n for item in qs:\n data_dict = {\n self.render_columns[columns.index(column)][0]: self.render_column(item, '.'.join(column.split('__')))\n for column in columns\n }\n data_dict['users'] = item.userinfo_set.count()\n data.append(data_dict)\n return data\n","repo_name":"sundw2015/841","sub_path":"soc/views/permission_views.py","file_name":"permission_views.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"14784236144","text":"from tkinter import *\nfrom tkinter import messagebox\n\n# Creating Windows and Labels with Tkinter\nroot = Tk()\nroot.title('Tic Tac Toe')\n\nclicked = True\ncount = 0\n\n\n# Def that disable all buttons when the winner exist:\n\ndef disable_all_buttons():\n b1.config(state=DISABLED)\n b2.config(state=DISABLED)\n b3.config(state=DISABLED)\n b4.config(state=DISABLED)\n b5.config(state=DISABLED)\n b6.config(state=DISABLED)\n b7.config(state=DISABLED)\n b8.config(state=DISABLED)\n b9.config(state=DISABLED)\n\n\n# Def that color in green the winner buttons and send a message about winner:\n\ndef green_winners(winner_bt_1, winner_bt_2, winner_bt_3):\n winner_bt_1.config(bg=\"green\")\n winner_bt_2.config(bg=\"green\")\n winner_bt_3.config(bg=\"green\")\n if winner_bt_1['text'] == \"X\":\n messagebox.showinfo(\"Tic Tac Toe\", \"The winner is: X\")\n elif winner_bt_1['text'] == 'O':\n messagebox.showinfo(\"Tic Tac Toe\", \"The winner is: O\")\n\n disable_all_buttons()\n\n\n# Def that verify the if winner exist:\n\ndef if_winner():\n global winner\n winner = False\n if b1['text'] == \"X\" and b2['text'] == 'X' and b3['text'] == 'X' \\\n or b1['text'] == \"O\" and b2['text'] == 'O' and b3['text'] == 'O':\n green_winners(b1, b2, b3)\n winner = True\n\n elif b4['text'] == \"X\" and b5['text'] == 'X' and b6['text'] == 'X' \\\n or b4['text'] == \"O\" and b5['text'] == 'O' and b6['text'] == 'O':\n green_winners(b4, b5, b6)\n winner = True\n\n elif b7['text'] == \"X\" and b8['text'] == 'X' and b9['text'] == 'X' \\\n or b7['text'] == \"O\" and b8['text'] == 'O' and b9['text'] == 'O':\n green_winners(b7, b8, b9)\n winner = True\n\n elif b1['text'] == \"X\" and b4['text'] == 'X' and b7['text'] == 'X' \\\n or b1['text'] == \"O\" and b4['text'] == 'O' and b7['text'] == 'O':\n green_winners(b1, b4, b7)\n winner = True\n\n elif b2['text'] == \"X\" and b5['text'] == 'X' and b8['text'] == 'X' \\\n or b2['text'] == \"O\" and b5['text'] == 'O' and b8['text'] == 'O':\n green_winners(b2, b5, b8)\n winner = True\n\n elif b3['text'] == \"X\" and b6['text'] == 'X' and b9['text'] == 'X' \\\n or b3['text'] == \"O\" and b6['text'] == 'O' and b9['text'] == 'O':\n green_winners(b3, b6, b9)\n winner = True\n\n elif b1['text'] == \"X\" and b5['text'] == 'X' and b9['text'] == 'X' \\\n or b1['text'] == \"O\" and b5['text'] == 'O' and b9['text'] == 'O':\n green_winners(b1, b5, b9)\n winner = True\n\n elif b3['text'] == \"X\" and b5['text'] == 'X' and b7['text'] == 'X' \\\n or b3['text'] == \"O\" and b5['text'] == 'O' and b7['text'] == 'O':\n green_winners(b3, b5, b7)\n winner = True\n\n if count == 9 and winner == False:\n messagebox.showinfo(\"Tic Tac Toe\", \"It's a tie!\")\n disable_all_buttons()\n\n\ndef b_click(b):\n global clicked, count\n\n if b['text'] == ' ' and clicked == True:\n b['text'] = 'X'\n clicked = False\n count += 1\n if_winner()\n elif b['text'] == ' ' and clicked == False:\n b['text'] = 'O'\n clicked = True\n count += 1\n if_winner()\n else:\n messagebox.showerror(\"Tic Tac Toe\", \"Please select a blank box\")\n\n\n# Use grid() and columnspan to Complete the User Interface\n\ndef play_again():\n global b1, b2, b3, b4, b5, b6, b7, b8, b9\n global clicked, count\n clicked = True\n count = 0\n\n # Buttons:\n b1 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b1))\n b2 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b2))\n b3 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b3))\n b4 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b4))\n b5 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b5))\n b6 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b6))\n b7 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b7))\n b8 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b8))\n b9 = Button(root, text=\" \", font=(\"Helvetica\", 30), height=3, width=6, bg=\"SystemButtonFace\",\n command=lambda: b_click(b9))\n\n # Button grid assign:\n\n b1.grid(row=0, column=0)\n b2.grid(row=0, column=1)\n b3.grid(row=0, column=2)\n\n b4.grid(row=1, column=0)\n b5.grid(row=1, column=1)\n b6.grid(row=1, column=2)\n\n b7.grid(row=2, column=0)\n b8.grid(row=2, column=1)\n b9.grid(row=2, column=2)\n\n\n# Game Menu:\n\ngame_menu = Menu(root)\nroot.config(menu=game_menu)\n\noptions_menu = Menu(game_menu, tearoff=False)\ngame_menu.add_cascade(label=\"Options\", menu=options_menu)\noptions_menu.add_command(label=\"Restart Game\", command=play_again)\n\nplay_again()\nroot.mainloop()\n","repo_name":"pilot01j/Python","sub_path":"Portfolio_Tic_Tac_Toe_Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16753784204","text":"# -*- coding:utf-8 -*-\nfrom studytribe.studygroup import models\nfrom studytribe.studygroup import serializers\nfrom studytribe.studygroup import forms\nfrom rest_framework import mixins\nfrom rest_framework import generics\nfrom studytribe.studygroup.permissions import IsOwnerOrReadOnly\nfrom rest_framework import permissions\n\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import ListView\nfrom django.contrib.auth.models import User\n\n#for template dev\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response\nfrom django.template import loader,RequestContext\nfrom django.views.decorators.csrf import csrf_protect\nfrom guardian.decorators import permission_required\nfrom django.http import HttpResponse,HttpResponseNotAllowed\n\n\ndef get_study_group_list(request,tribe_id,context): \n study_tribe = get_object_or_404(models.StudyTribe, id__iexact=tribe_id)\n context['studygroup_list'] = models.StudyGroup.objects.filter(tribe=study_tribe)\n context['study_tribe'] = study_tribe\n return render_to_response(\"studytribe/studygroup/base.html\",\n context,\n context_instance=RequestContext(request))\n\n\n\n@permission_required('studygroup.enter_studytribe',\n (models.StudyTribe,'id','tribe_id'))\ndef study_group_list(request,tribe_id):\n context = {}\n if request.method == 'GET':\n return get_study_group_list(request,tribe_id,context)\n if request.method == 'POST':\n form = forms.StudyGroupForm(request.POST)\n context['form'] = form\n if form.is_valid():\n #create a studygroup in database\n tribe = models.StudyTribe.objects.get(pk=tribe_id)\n studygroup = form.save(request.user,tribe)\n #fetch all study group in this tribe and render page to client\n return get_study_group_list(request,tribe_id,context)\n else:\n return render_to_response(\"studytribe/studygroup/create_group.html\",\n context,\n context_instance=RequestContext(request))\n\n@permission_required('studygroup.change_studytribe',\n (models.StudyTribe,'id','tribe_id'))\ndef study_group_create_ui(request,tribe_id):\n form = forms.StudyGroupForm()\n form.initial[\"tribe_id\"] = tribe_id\n context = {'form':form}\n if request.method == 'GET':\n return render_to_response(\"studytribe/studygroup/create_group.html\",\n context,\n context_instance=RequestContext(request))\n\n\ndef get_study_group_members_view():\n \"\"\"\n 获得班级成员界面\n \"\"\"\n pass\n \n\ndef study_group_members(request,tribe_id,group_id):\n \"\"\"\n 班级成员主界面\n \"\"\"\n context = {}\n context['form'] = None\n context['study_log_form'] = forms.StudentStudyLogForm()\n if request.method == 'GET':\n context['form'] = forms.StudyGroupMemberForm()\n context['form'].initial[\"group_id\"] = group_id\n context['group_id'] = group_id\n study_group = get_object_or_404(models.StudyGroup,pk=group_id)\n if request.method == 'POST':\n context['form'] = forms.StudyGroupMemberForm(request.POST)\n study_group = models.StudyGroup.objects.get(pk=group_id)\n if context['form'].is_valid():\n #save a student to tribe\n context['form'].save(study_group)\n context['form'] = forms.StudyGroupMemberForm()\n else:\n context['form_visible'] = True\n context['study_group'] = study_group\n return render_to_response(\"studytribe/studygroup/study_group_members.html\",\n context,\n context_instance=RequestContext(request))\n\ndef student_study_log_input(request,group_id,member_id):\n context = {}\n if request.method == 'POST':\n context['study_log_form'] = forms.StudentStudyLogForm(request.POST)\n if context['study_log_form'].is_valid():\n #save a study log form and send a email to user\n study_group = models.StudyGroup.objects.get(pk=group_id)\n student = User.objects.get(pk=member_id)\n logger = request.user\n log = context['study_log_form'].save_log_sendmail(student,\n study_group,\n logger)\n return render_to_response(\"studytribe/studygroup/tparts/study_log_form.html\",\n context,\n context_instance=RequestContext(request))\n else:\n return HttpResponse(loader.render_to_string(\n \"studytribe/studygroup/tparts/study_log_form.html\",\n context,\n context_instance=RequestContext(request)\n ),status=400)\n else:\n return HttpResponseNotAllowed(['POST'])\n\ndef study_group_detail(request,study_tribe_id,study_group_id):\n context = {}\n return render_to_response(\"studytribe/studygroup/base.html\",\n context,\n context_instance=RequestContext(request))\n\n\"\"\"\nwrite traditional view for quick and dirty\n\"\"\"\nclass StudyGroupListView(ListView):\n\n context_object_name = \"study_group_list\"\n template_name = \"studytribe/studygroup/studygroup_list.html\"\n\n def get_queryset(self):\n study_tribe = get_object_or_404(Publisher, name__iexact=self.args[0])\n return StudyGroup.objects.filter(publisher=publisher)\n\n\n\"\"\"\nStudyTribe:学习部落\n\"\"\"\nclass StudyTribeListRes(generics.ListCreateAPIView):\n template_name = \"studytribe/studygroup/base.html\"\n model = models.StudyTribe\n serializer_class = serializers.StudyTribeSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n )\n\nclass StudyTribeRes(generics.RetrieveAPIView):\n model = models.StudyTribe\n serializer_class = serializers.StudyTribeSerializer\n\n\"\"\"\nStudyGroup:班级\n\"\"\"\nclass StudyGroupListRes(generics.ListCreateAPIView):\n \"\"\"\n List all studygroup,or create a new studygroup\n \"\"\"\n model = models.StudyGroup\n serializer_class = serializers.StudyGroupSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly,)\n def pre_save(self, obj):\n obj.tribe = models.StudyTribe.objects.get(pk=self.kwargs['tribe_id'])\n\nclass StudyGroupRes(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Retrieve,update or delete a studygroup\n \"\"\"\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly,)\n model = models.StudyGroup\n serializer_class = serializers.StudyGroupSerializer\n\n\n\n","repo_name":"esperyong/study-tribe","sub_path":"webapp/studytribe-project/studytribe/studygroup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28419665982","text":"import requests\nimport csv\nimport time\nimport json\nfrom progress.bar import Bar\n\n\nurl_headers = \"/visits?date_from=2018-07-01T00:00:00.000-00:00&date_to=2018-09-30T00:00:00.000-00:00\"\n\ndatos = []\nlineas_documento = len(open('data.txt').readlines())\nbar = Bar('Processing', max=lineas_documento)\n\nwith open('data.txt') as lineas:\n for linea in lineas:\n linea_n = linea.strip('\\n')\n\n datos_item = (f'https://api.mercadolibre.com/items/MLM{linea_n}')\n\n datos_req = (f\"https://api.mercadolibre.com/items/MLM{linea_n}\" + url_headers)\n\n req_item = requests.get(datos_item)\n\n req = requests.get(datos_req)\n\n json_convert_item = json.loads(req_item.text)\n\n json_convert = json.loads(req.text)\n\n category_item = json_convert_item[\"category_id\"]\n seller_id = json_convert_item[\"seller_id\"]\n\n item_id = json_convert[\"item_id\"]\n date_to = json_convert[\"date_to\"]\n date_from = json_convert[\"date_from\"]\n total_visits = str(json_convert[\"total_visits\"])\n visits_detail = str(json_convert[\"visits_detail\"])\n\n datos_category = (f\"https://api.mercadolibre.com/categories/{category_item}\")\n req_category = requests.get(datos_category)\n json_convert_category = json.loads(req_category.text)\n nombre_categoria = json_convert_category[\"name\"]\n mega_categoria = json_convert_category[\"path_from_root\"]\n mega_categoria = mega_categoria[0]\n\n\n x = [seller_id, nombre_categoria,mega_categoria, item_id.strip('MCO'), category_item, total_visits]\n\n datos.insert(0, x)\n\n with open('data_out.csv', mode='w') as archivo:\n archivo = csv.writer(archivo, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n archivo.writerow([\"seller_id\", \"categoria\",\"titulo categoria\" , \"id\", \"titulo mega categoria\", \"ene\", \"feb\", \"mar\", \"abr\", \"may\", \"jun\", \"jul\", \"ago\", \"sep\", \"oct\", \"nov\", \"dic\"])\n for i in range(len(datos)):\n archivo.writerow(datos[i])\n\n\n time.sleep(1)\n\n bar.next()\n\nbar.finish()\n","repo_name":"nusspez/proyectos_mercadolibre_final","sub_path":"02_contador_visitas_personalizado/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34837836574","text":"import random\r\nimport pymysql\r\nfrom sklearn.metrics import classification_report\r\nimport matplotlib.pylab as plt\r\nimport numpy as np\r\nimport re\r\nfrom rw_predict_linreg import *\r\nfrom rw_predict_arima import *\r\nimport multiprocessing\r\n\r\ndef execute_predict_linreg(x):\r\n obj_linreg = predict_linreg()\r\n obj_linreg.predict(x, stopIfFound=True, predictStartDays=range(8, 30))\r\n return obj_linreg\r\n\r\ndef execute_predict_arima(x):\r\n obj_arima = predict_arima()\r\n obj_arima.predict(x, stopIfFound=True, predictStartDays=range(8, 30))\r\n return obj_arima\r\n\r\ndef calc(q_in, calc_result):\r\n result = []\r\n while True:\r\n item = q_in.get()\r\n if item is None:\r\n break\r\n x, idx, len = item[0], item[1], item[2]\r\n print(\"data\" + str(idx + 1) + \"/\" + str(len) + \": \" + str(x))\r\n x = x.reshape(-1,1)\r\n pred = x[x.size - 1]\r\n x = x[0:x.size - 1]\r\n\r\n obj_linreg = execute_predict_linreg(x)\r\n saving_linreg = obj_linreg.predictSaving\r\n obj_arima = execute_predict_arima(x)\r\n saving_arima = obj_arima.predictSaving\r\n if pred == 4:\r\n saving_optimized = 0\r\n elif pred == 2:\r\n saving_optimized = obj_arima.predictSaving\r\n else:\r\n saving_optimized = obj_linreg.predictSaving\r\n\r\n price = obj_arima.price1Obs # same as linreg's\r\n# if obj_arima.predictSaving > obj_linreg.predictSaving:\r\n# calc_result.append([price, saving_linreg, saving_arima, saving_optimized, item])\r\n calc_result.append([price, saving_linreg, saving_arima, saving_optimized, item])\r\n\r\n q_in.task_done()\r\n\r\nif __name__ == \"__main__\":\r\n \r\n print(\"get data from db\")\r\n\r\n dbname = \"project_m\"\r\n host = \"127.0.0.1\"\r\n username = \"root\"\r\n password = \"root\"\r\n\r\n dataSize = 2\r\n rw = []\r\n cursor = None\r\n cnx = None\r\n try:\r\n cnx = pymysql.connect(user=username, password=password, host=host, database=dbname)\r\n cursor = cnx.cursor()\r\n cursor.execute('SELECT rw_detail, type FROM logrw')\r\n rows = cursor.fetchmany(dataSize)\r\n for row in rows:\r\n var = [int(x) for x in row[0].split(',')]\r\n var.append(row[1])\r\n rw.append(var)\r\n finally:\r\n if cursor:\r\n cursor.close()\r\n if cnx:\r\n cnx.close()\r\n\r\n rwData = np.array(rw)\r\n\r\n q_in = multiprocessing.JoinableQueue()\r\n for idx, item in enumerate(rwData):\r\n q_in.put([item, idx, dataSize])\r\n\r\n manager = multiprocessing.Manager()\r\n calc_result = manager.list()\r\n processes = 2\r\n for i in range(processes):\r\n c = multiprocessing.Process(target=calc, args=(q_in, calc_result))\r\n c.daemon = True\r\n c.start()\r\n q_in.join()\r\n \r\n originalPrice = 0\r\n predictSaving_linreg = 0\r\n predictSaving_arima = 0\r\n predictSaving_optimized = 0\r\n for result in calc_result:\r\n originalPrice += result[0]\r\n predictSaving_linreg += result[1]\r\n predictSaving_arima += result[2]\r\n predictSaving_optimized += result[3]\r\n print(\"Item \" + str(result[4]))\r\n\r\n print(\"Original Price: %.6f\" % originalPrice)\r\n print(\"Predict Saving with ARIMA: %.6f\" % predictSaving_arima)\r\n print(\"Predict Saving with Linear Regression: %.6f\" % predictSaving_linreg)\r\n print(\"Optimized Saving: %.6f\" % predictSaving_optimized)","repo_name":"atealxt/HyDepot","sub_path":"data-analyzer/storage-predict/rw_algorithm_evaluate.py","file_name":"rw_algorithm_evaluate.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30812784198","text":"import string\nimport random\n\ndef main():\n\tf = open(\"input.txt\", \"w\")\n\tchars = string.ascii_uppercase + string.digits\n\tfor i in range(1000):\n\t\tid = str(i)\n\t\tdatalen = 1024 - 2 - len(id)\n\t\tdata = ''.join(random.choice(chars) for _ in range(datalen))\n\t\tf.write(id + \" \" + data + \"\\n\")\n\tf.close()\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"michalborzecki/sysopy8","sub_path":"zad1/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"546219604","text":"# coding: utf-8\n# standard libraries\nimport logging\nimport re\nimport warnings\n\n# project libraries\nimport VAPr.validation\n\n\ndef _warn_of_unparseable_format_field(field_desc, field_tag, field_value, failure_desc):\n \"\"\"Raise a warning for the input field's issue using a standard wording and format.\n\n Args:\n field_desc (str): A brief human-readable phrase identifying the info indicated by the tag.\n field_tag (str): The two- or three-character key for this field in the VCF format line (e.g., AD, PL, etc).\n field_value (object): The value associated with the field in the VCF format value line; may have been parsed\n to a non-string data type.\n failure_desc (str): A brief human-readable phrase describing the problem identified with the tag_value.\n \"\"\"\n warn_msg = \"The {0} tag value {1} {2} so {3} information could not be captured for the current variant.\".format(\n field_tag, field_value, failure_desc, field_desc)\n warnings.warn(warn_msg)\n\n\ndef _capture_unprocessed_field(field_tag, field_value, genotype_info_to_fill):\n \"\"\"Attempt basic delimiter splitting and/or numeric casting for input field, and store results in VCFGenotypeInfo.\n\n Args:\n field_tag (str): The two- or three-character key for this field in the VCF format line (e.g., AD, PL, etc).\n field_value (str): The string value associated with the field in the VCF format value line.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object\n\n Returns:\n VCFGenotypeInfo: The input VCFGenotypeInfo with additional entries added to unprocessed_info dictionary.\n\n \"\"\"\n try:\n genotype_info_to_fill.unprocessed_info[field_tag] = float(field_value)\n except ValueError: # if the value can't be converted to a float\n split_list = field_value.split(\",\")\n cast_split_list = []\n\n for value in split_list:\n try:\n cast_split_list.append(float(value))\n except ValueError: # if the value can't be converted to a float\n cast_split_list.append(value)\n\n if len(cast_split_list) > 1:\n genotype_info_to_fill.unprocessed_info[field_tag] = cast_split_list\n else:\n genotype_info_to_fill.unprocessed_info[field_tag] = cast_split_list[0]\n\n return genotype_info_to_fill\n\n\ndef _fill_genotype_class(alleles, genotype_info_to_fill):\n \"\"\"Id genotype class (homozygous/heterozygous) and subclass (reference, alt, compound) and store in VCFGenotypeInfo.\n\n Args:\n alleles (List[str]): Results of splitting the value for the GT (genotype) format tag.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object.\n\n Returns:\n VCFGenotypeInfo: The input VCFGenotypeInfo with genotype_subclass_by_class value set.\n\n \"\"\"\n genotype_class = \"homozygous\"\n genotype_subclass = \"reference\"\n alt_subclass_name = \"alt\"\n\n if alleles[0] != alleles[1]:\n genotype_class = \"heterozygous\"\n alt_subclass_name = \"compound\"\n\n if \"0\" not in alleles:\n genotype_subclass = alt_subclass_name\n\n result = {genotype_class: genotype_subclass}\n genotype_info_to_fill.genotype_subclass_by_class = result\n return result\n\n\ndef _fill_genotype(field_value, genotype_info_to_fill):\n \"\"\"Parse the genotype of this sample at this site and store in the VCFGenotypeInfo.\n\n Args:\n field_value (str): The value associated with the GT tag in the format value string.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object.\n\n Returns:\n VCFGenotypeInfo: the input VCFGenotypeInfo with additional fields filled in.\n\n From https://gatkforums.broadinstitute.org/gatk/discussion/1268/what-is-a-vcf-and-how-should-i-interpret-it :\n \" GT : The genotype of this sample at this site.\n For a diploid organism, the GT field indicates the two alleles carried by the sample, encoded by a 0 for the REF\n allele, 1 for the first ALT allele, 2 for the second ALT allele, etc. When there's a single ALT allele (by far\n the more common case), GT will be either:\n\n 0/0 - the sample is homozygous reference\n 0/1 - the sample is heterozygous, carrying 1 copy of each of the REF and ALT alleles\n 1/1 - the sample is homozygous alternate\n\n In the three sites shown in the example above, NA12878 is observed with the allele combinations T/G, G/G, and C/T\n respectively.\n\n For non-diploids, the same pattern applies; in the haploid case there will be just a single value in GT; for\n polyploids there will be more, e.g. 4 values for a tetraploid organism.\"\n \"\"\"\n\n alleles = field_value.split('/')\n if len(alleles) == 1:\n alleles = field_value.split('|')\n\n if len(alleles) != 2:\n _warn_of_unparseable_format_field(\"genotype\", VCFGenotypeParser.GENOTYPE_TAG, field_value,\n \"does not split into exactly two values\")\n return genotype_info_to_fill\n genotype_info_to_fill.genotype = field_value\n _fill_genotype_class(alleles, genotype_info_to_fill)\n return genotype_info_to_fill\n\n\ndef _fill_unfiltered_reads_counts(field_value, genotype_info_to_fill):\n \"\"\"Parse the unfiltered reads counts for this sample at this site and store in the VCFGenotypeInfo.\n\n Args:\n field_value (str): The value associated with the AD tag in the format value string.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object.\n\n Returns:\n VCFGenotypeInfo: The input VCFGenotypeInfo with new entries added to the alleles list.\n\n From https://gatkforums.broadinstitute.org/gatk/discussion/1268/what-is-a-vcf-and-how-should-i-interpret-it :\n \" AD ... : Allele depth ....\n AD is the unfiltered allele depth, i.e. the number of reads that support each of the reported alleles. All reads\n at the position (including reads that did not pass the variant caller's filters) are included in this number,\n except reads that were considered uninformative. Reads are considered uninformative when they do not provide\n enough statistical evidence to support one allele over another.\"\n \"\"\"\n delimiter = ','\n counts = field_value.split(delimiter)\n if len(counts) < 2:\n _warn_of_unparseable_format_field(\"unfiltered allele depth\", VCFGenotypeParser.UNFILTERED_ALLELE_DEPTH_TAG,\n field_value, \"does not split into at least two values\")\n else:\n for curr_count in counts:\n new_allele = Allele(curr_count)\n genotype_info_to_fill.alleles.append(new_allele)\n\n return genotype_info_to_fill\n\n\ndef _fill_filtered_reads_count(field_value, genotype_info_to_fill):\n \"\"\"Parse the filtered depth of coverage of this sample at this site and store in the VCFGenotypeInfo.\n\n Args:\n field_value (str): The value associated with the DP tag in the format value string.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object.\n\n Returns:\n VCFGenotypeInfo: The input VCFGenotypeInfo with filter_passing_reads_count filled in.\n\n From https://gatkforums.broadinstitute.org/gatk/discussion/1268/what-is-a-vcf-and-how-should-i-interpret-it :\n \" DP : ... depth of coverage\n DP is the filtered depth, at the sample level. This gives you the number of filtered reads that support each of\n the reported alleles. You can check the variant caller’s documentation to see which filters are applied by\n default. Only reads that passed the variant caller’s filters are included in this number. However, unlike the AD\n calculation, uninformative reads are included in DP.\"\n \"\"\"\n genotype_info_to_fill.filter_passing_reads_count = field_value\n return genotype_info_to_fill\n\n\ndef _fill_genotype_confidence(field_value, genotype_info_to_fill):\n \"\"\"Parse the genotype quality (confidence) of this sample at this site and store in the VCFGenotypeInfo.\n\n Args:\n field_value (str): The value associated with the GQ tag in the format value string.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object.\n\n Returns:\n VCFGenotypeInfo: The input VCFGenotypeInfo with genotype_confidence filled in.\n\n From https://gatkforums.broadinstitute.org/gatk/discussion/1268/what-is-a-vcf-and-how-should-i-interpret-it :\n \" GQ : Quality of the assigned genotype.\n The Genotype Quality represents the Phred-scaled confidence that the genotype assignment (GT) is correct, derived\n from the genotype PLs. Specifically, the GQ is the difference between the PL of the second most likely genotype,\n and the PL of the most likely genotype. As noted above, the values of the PLs are normalized so that the most\n likely PL is always 0, so the GQ ends up being equal to the second smallest PL, unless that PL is greater than 99.\n In GATK, the value of GQ is capped at 99 because larger values are not more informative, but they take more space\n in the file. So if the second most likely PL is greater than 99, we still assign a GQ of 99.\n\n Basically the GQ gives you the difference between the likelihoods of the two most likely genotypes. If it is low,\n you can tell there is not much confidence in the genotype, i.e. there was not enough evidence to confidently\n choose one genotype over another. See the FAQ article on the Phred scale to get a sense of what would be\n considered low.\n\n Not to be confused with the site-level annotation QUAL; see this FAQ article for an explanation of the differences\n in what they mean and how they should be used.\"\n \"\"\"\n genotype_info_to_fill.genotype_confidence = field_value\n return genotype_info_to_fill\n\n\ndef _fill_genotype_likelihoods(field_value, genotype_info_to_fill):\n \"\"\"Parse the \"normalized\" Phred-scaled likelihoods of possible genotypes of this sample at this site and store.\n\n Args:\n field_value (str): The value associated with the PL tag in the format value string.\n genotype_info_to_fill (VCFGenotypeInfo): A partially filled VCFGenotypeInfo object.\n\n Returns:\n VCFGenotypeInfo: The input VCFGenotypeInfo with additional fields filled in.\n\n Note that this function will MAKE the number of Allele objects implied by the likelihood string if no alleles have\n been filled into the genotype_info_to_fill by the time this function is called. The reason for this is that there\n ARE valid VCF format strings (e.g., 'GT:GQ:PL') that have the PL tag (likelihood) but no AD tag in them, and since\n Allele objects are usually created in the processing of the AD tag, some back-up approach was needed to infer\n Alleles in this situation. Of course, the Alleles created in this situation will all have None as their\n read_counts, since the read_counts value comes from the AD tag.\n\n From https://gatkforums.broadinstitute.org/gatk/discussion/1268/what-is-a-vcf-and-how-should-i-interpret-it:\n \" PL : \"Normalized\" Phred-scaled likelihoods of the possible genotypes.\n For the typical case of a monomorphic site (where there is only one ALT allele) in a diploid organism, the PL\n field will contain three numbers, corresponding to the three possible genotypes (0/0, 0/1, and 1/1). The PL values\n are \"normalized\" so that the PL of the most likely genotype (assigned in the GT field) is 0 in the Phred scale. We\n use \"normalized\" in quotes because these are not probabilities. We set the most likely genotype PL to 0 for easy\n reading purpose.The other values are scaled relative to this most likely genotype.\n\n Keep in mind, if you're not familiar with the statistical lingo, that when we say PL is the \"Phred-scaled\n likelihood of the genotype\", we mean it is \"How much less likely that genotype is compared to the best one\".\n \"\n \"\"\"\n generate_alleles = False\n delimiter = ','\n likelihoods = field_value.split(delimiter)\n\n num_expected_alleles = len(genotype_info_to_fill.alleles)\n if num_expected_alleles == 0:\n generate_alleles = True\n genotype_info_to_fill.alleles.append(Allele(None))\n\n allele_number = 0\n likelihood_number = 0\n for index in range(len(likelihoods)):\n if likelihood_number > allele_number:\n allele_number += 1\n likelihood_number = 0\n\n if generate_alleles:\n genotype_info_to_fill.alleles.append(Allele(None))\n\n # NB: The check below looks at the *current* number of alleles in genotype_info_to_fill, which may be\n # different than the value found above and stored in num_expected_alleles due to the alleles.append\n # statements at various points above.\n if allele_number >= len(genotype_info_to_fill.alleles):\n _warn_of_unparseable_format_field(\"'normalized' Phred-scaled likelihoods of possible genotypes\",\n VCFGenotypeParser.NORMALIZED_SCALED_LIKELIHOODS_TAG,\n field_value,\n \"appears to contain information for more alleles than expected\")\n\n # in case of warning, clear any already-parsed genotype likelihoods (they probably can't be trusted)\n # and return out of function\n genotype_info_to_fill.genotype_likelihoods = []\n return genotype_info_to_fill\n # end warn\n # end if likelihood_number > allele_number\n\n new_likelihood = GenotypeLikelihood(likelihood_number, allele_number, likelihoods[index])\n genotype_info_to_fill.genotype_likelihoods.append(new_likelihood)\n likelihood_number += 1\n\n if allele_number < (num_expected_alleles-1) or likelihood_number < num_expected_alleles:\n _warn_of_unparseable_format_field(\"'normalized' Phred-scaled likelihoods of possible genotypes\",\n VCFGenotypeParser.NORMALIZED_SCALED_LIKELIHOODS_TAG,\n field_value, \"appears to contain information for fewer alleles than expected\")\n\n # in case of warning, clear any already-parsed genotype likelihoods (they probably can't be trusted)\n genotype_info_to_fill.genotype_likelihoods = []\n\n return genotype_info_to_fill\n\n\nclass VCFGenotypeInfo(object):\n \"\"\"Store parsed info from VCF genotype fields for a single sample.\n\n Attributes:\n _raw_string (str): The genotype fields values string from a VCF file (e.g., '0/1:173,141:282:99:255,0,255').\n genotype (Optional[`str`]): The type of each of the sample's two alleles, such as 0/0, 0/1, etc.\n alleles (List[Allele]): One Allele object for each allele detected for this variant\n (this can be across samples, so there can be more than 2 alleles).\n genotype_likelihoods (List[GenotypeLikelihood]): The GenotypeLikelihood object for each allele.\n unprocessed_info (Dict[str, Any]): Dictionary of field tag and value(s) for any fields not stored in\n dedicated attributes of VCFGenotypeInfo. Values are parsed to lists and/or floats if possible.\n genotype_subclass_by_class (Dict[str, str]): Genotype subclass (reference, alt, compound) keyed by genotype\n class (homozygous/heterozygous).\n \"\"\"\n\n def __init__(self, raw_string):\n \"\"\"Create VCFGenotypeInfo object.\n\n Args:\n raw_string (str): The genotype fields values string from a VCF file (e.g., '0/1:173,141:282:99:255,0,255').\n \"\"\"\n self._raw_string = raw_string\n self._genotype_confidence = None\n self._filter_passing_reads_count = None\n\n # TODO: someday: Probably these should become properties so they are protected from user resetting them, etc.\n self.genotype = None\n self.alleles = [] # 0 is ref, 1 is first alt, etc\n self.genotype_likelihoods = []\n self.unprocessed_info = {}\n self.genotype_subclass_by_class = None\n\n @property\n def genotype_confidence(self):\n \"\"\"str: Genotype quality (confidence) of this sample at this site, from the GQ field.\"\"\"\n return self._genotype_confidence\n\n @genotype_confidence.setter\n def genotype_confidence(self, value):\n # TODO: someday: Determine if genotype confidence value is limited to being a positive or non-negative number\n self._genotype_confidence = VAPr.validation.convert_to_nullable(value, float)\n\n @property\n def filter_passing_reads_count(self):\n \"\"\"int or None: Filtered depth of coverage of this sample at this site from the DP field.\"\"\"\n return self._filter_passing_reads_count\n\n @filter_passing_reads_count.setter\n def filter_passing_reads_count(self, value):\n self._filter_passing_reads_count = VAPr.validation.convert_to_nonneg_int(value, nullable=True)\n\n\n# TODO: someday: refactor to remove Allele object and replace with single value\n# One could argue pretty convincingly that there is no longer a need for a whole Allele object now that\n# (with the removal of database-related properties) it has only one property--surely that could be represented by\n# a single value rather than a class! But this would be a non-trivial refactor, so I'm leaving it for some misty future\n# date.\nclass Allele(object):\n \"\"\"Store unfiltered read counts, if any, for a particular allele.\"\"\"\n\n def __init__(self, unfiltered_read_counts=None):\n \"\"\"Create Allele object.\n\n Args:\n unfiltered_read_counts (Optional[str]): Number of unfiltered reads counts for this sample at this site,\n from AD field.\n \"\"\"\n self._unfiltered_read_counts = None\n if unfiltered_read_counts is not None:\n self.unfiltered_read_counts = unfiltered_read_counts\n\n @property\n def unfiltered_read_counts(self):\n \"\"\"int or None: Number of unfiltered reads counts for this sample at this site, from AD field.\"\"\"\n return self._unfiltered_read_counts\n\n @unfiltered_read_counts.setter\n def unfiltered_read_counts(self, value):\n self._unfiltered_read_counts = VAPr.validation.convert_to_nonneg_int(value, nullable=True)\n\n\nclass GenotypeLikelihood(object):\n \"\"\"Store parsed info from VCF genotype likelihood field for a single sample.\"\"\"\n\n @staticmethod\n def _validate_allele_relationship(allele1_number, allele2_number):\n \"\"\"Ensure that allele1_number is not greater than allele2_number.\n\n Args:\n allele1_number (int): The allele identifier (0 for reference, 1 for first alternate, etc) for the left-hand\n allele inferred for this genotype likelihood.\n allele2_number (int): The allele identifier (0 for reference, 1 for first alternate, etc) for the right-hand\n allele inferred for this genotype likelihood.\n\n Raises:\n ValueError: If `allele1_number` is greater than `allele2_number`.\n\n Genotype likelihood strings contain one likelihood for each possible allele combination for this variant at\n this site. The likelihood string does not explicity state the allele combination associated with each\n likelihood; rather, the allele combinations are inferred based on the convention that they are always listed\n in ascending order from lowest allele number to highest allele number. For example, for the likelihood string\n '495,162,123,213,129,175,67,0,46,28.1', the implied allele combinations are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2,\n 0/3, 1/3, 2/3, 3/3 . Note that these are COMBINATIONS, not PERMUTATIONS, so each pair of values occurs only\n once and, again by convention, the representation expected is the one where the allele number on the left\n (arbitrarily referred to here as allele1) has a value less than (or equal to) the allele on the right\n (arbitrarily referred to here as allele2).\n\n IF, in parsing the likelihood string and inferring the allele combinations, the code somehow ended up with\n an inferred combination in which the left-hand allele number is GREATER than the right-hand allele number,\n that means something has gone tragically wrong!\n \"\"\"\n\n if allele1_number > allele2_number:\n raise ValueError(\"VCF-format genotypes must have allele 2 number ({0}) \"\n \"greater than or equal to allele 1 number ({1})\".format(allele2_number, allele1_number))\n\n def __init__(self, allele1_number, allele2_number, likelihood_neg_exponent):\n \"\"\"Create GenotypeLikelihood object.\n\n Args:\n allele1_number (int or str): The allele id for the right-hand allele inferred for this genotype likelihood.\n allele2_number (int or str): The allele id for the left-hand allele inferred for this genotype likelihood.\n likelihood_neg_exponent (float or str): The \"normalized\" Phred-scaled likelihood of the genotype represented\n by allele1 and allele2, as a string.\n \"\"\"\n self._allele1_number = None\n self._allele2_number = None\n self._likelihood_neg_exponent = None\n\n self.allele1_number = allele1_number\n self.allele2_number = allele2_number\n self.likelihood_neg_exponent = likelihood_neg_exponent\n\n @property\n def allele1_number(self):\n \"\"\"int: The allele identifier for the left-hand allele inferred for this genotype likelihood.\"\"\"\n return self._allele1_number\n\n @allele1_number.setter\n def allele1_number(self, value):\n int_value = VAPr.validation.convert_to_nonneg_int(value, nullable=True)\n\n if self.allele2_number is not None:\n self._validate_allele_relationship(int_value, self.allele2_number)\n self._allele1_number = int_value\n\n @property\n def allele2_number(self):\n \"\"\"int: The allele identifier for the right-hand allele inferred for this genotype likelihood. \"\"\"\n return self._allele2_number\n\n @allele2_number.setter\n def allele2_number(self, value):\n int_value = VAPr.validation.convert_to_nonneg_int(value, nullable=True)\n\n if self.allele1_number is not None:\n self._validate_allele_relationship(self.allele1_number, int_value)\n self._allele2_number = int_value\n\n @property\n def likelihood_neg_exponent(self):\n \"\"\"float: The \"normalized\" Phred-scaled likelihood of the genotype represented by allele1 and allele2.\"\"\"\n return self._likelihood_neg_exponent\n\n @likelihood_neg_exponent.setter\n def likelihood_neg_exponent(self, value):\n self._likelihood_neg_exponent = VAPr.validation.convert_to_nullable(value, float)\n\n\nclass VCFGenotypeParser(object):\n \"\"\"Mine format string and genotype fields string to create a filled VCFGenotypeInfo object.\"\"\"\n\n @staticmethod\n def is_valid_genotype_fields_string(genotype_fields_string):\n \"\"\"Return true if input has any real genotype fields content, false if is just periods, zeroes, and delimiters.\n\n Args:\n genotype_fields_string (str): A VCF-style genotype fields string, such as 1/1:0,2:2:6:89,6,0 or ./.:.:.:.:.\n\n Returns\n bool: true if input has any real genotype fields content, false if is just periods, zeroes, and delimiters.\n \"\"\"\n result = False\n # this regex means \"one or more characters that is not a comma, period, colon, zero, or forward slash\"\n content_char_match = re.search(r\"[^,.:0\\/]+\", genotype_fields_string)\n # NB: necessary to ALSO check first character of string, even if no match to above regex is found, because\n # \"0/0\" should be a valid genotype (even though all the characters it contains could signal null content in\n # other configurations), and a genotype fields string with nothing but a genotype in it should be legal.\n if content_char_match is not None or not genotype_fields_string.startswith(\".\"):\n result = True\n return result\n\n GENOTYPE_TAG = \"GT\" # str: VCF tag for the genotype of this sample at this site.\n UNFILTERED_ALLELE_DEPTH_TAG = \"AD\" # str: VCF tag for the unfiltered allele depth of this sample at this site.\n FILTERED_ALLELE_DEPTH_TAG = \"DP\" # str: VCF tag for the filtered depth of coverage of this sample at this site.\n GENOTYPE_QUALITY_TAG = \"GQ\" # str: VCF tag for the genotype quality of this sample at this site.\n NORMALIZED_SCALED_LIKELIHOODS_TAG = \"PL\" # str: VCF tag for the genotype likelihoods of this sample at this site.\n\n _DELIMITER = ':' # str: Delimiter between fields in format and genotype fields strings.\n\n # Dict(str, Callable[str, VCFGenotypeInfo]): Special parsing functions by the VCF tag whose value they parse.\n _PARSER_FUNCS = {GENOTYPE_TAG: _fill_genotype, # GT\n UNFILTERED_ALLELE_DEPTH_TAG: _fill_unfiltered_reads_counts, # AD\n FILTERED_ALLELE_DEPTH_TAG: _fill_filtered_reads_count, # DP\n GENOTYPE_QUALITY_TAG: _fill_genotype_confidence, # GQ\n NORMALIZED_SCALED_LIKELIHOODS_TAG: _fill_genotype_likelihoods} # PL\n\n @classmethod\n def parse(cls, format_key_string, format_value_string):\n \"\"\"Parse the input format string and genotype fields string into a filled VCFGenotypeInfo object.\n\n Args:\n format_key_string (str): The VCF format string (e.g., 'GT:AD:DP:GQ:PL') for this sample at this site.\n format_value_string (str): The VCF genotype fields values string (e.g., '1/1:0,34:34:99:1187.2,101,0')\n corresponding to the format_key_string for this sample at this site.\n\n Returns:\n VCFGenotypeInfo or None: A filled VCFGenotypeInfo for this sample at this site unless an error was\n encountered, in which case None is returned.\n encountered, in which case None is returned.\n\n \"\"\"\n result = None\n\n try:\n if cls.is_valid_genotype_fields_string(format_value_string):\n result = VCFGenotypeInfo(format_value_string)\n format_subkeys = format_key_string.split(cls._DELIMITER)\n format_values = format_value_string.split(cls._DELIMITER)\n\n for index, curr_key in enumerate(format_subkeys):\n curr_value = format_values[index]\n if curr_key in cls._PARSER_FUNCS:\n # if this key has a special parsing function associated with it, use that function\n parse_func = cls._PARSER_FUNCS[curr_key]\n result = parse_func(curr_value, result)\n else:\n # otherwise, capture this key/value with minimal processing to a catch-all dictionary\n result = _capture_unprocessed_field(curr_key, curr_value, result)\n except Exception as e:\n warn_msg = \"Encountered error '{0}' so genotype fields information could not be captured for the \" \\\n \"current variant.\".format(e)\n warnings.warn(warn_msg)\n result = None # reset result to None, as contents can't be trusted\n\n return result\n","repo_name":"ucsd-ccbb/VAPr","sub_path":"VAPr/vcf_genotype_fields_parsing.py","file_name":"vcf_genotype_fields_parsing.py","file_ext":"py","file_size_in_byte":27380,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"61"} +{"seq_id":"43525753660","text":"import asyncio\n\nimport pytest\n\nfrom grpclib.metadata import Deadline\nfrom grpclib.utils import Wrapper, DeadlineWrapper\n\n\nclass CustomError(Exception):\n pass\n\n\nclass UserAPI:\n\n def __init__(self, wrapper):\n self.wrapper = wrapper\n\n async def foo(self, *, time=.0001):\n with self.wrapper:\n await asyncio.sleep(time)\n\n\n@pytest.mark.asyncio\nasync def test_wrapper(loop):\n api = UserAPI(Wrapper())\n await api.foo()\n\n loop.call_soon(lambda: api.wrapper.cancel(CustomError('Some explanation')))\n\n with pytest.raises(CustomError) as err:\n await api.foo()\n err.match('Some explanation')\n\n with pytest.raises(CustomError):\n await api.foo()\n\n\n@pytest.mark.asyncio\nasync def test_deadline_wrapper(loop):\n deadline = Deadline.from_timeout(0.01)\n deadline_wrapper = DeadlineWrapper()\n api = UserAPI(deadline_wrapper)\n\n with deadline_wrapper.start(deadline, loop=loop):\n await api.foo(time=0.0001)\n\n with pytest.raises(asyncio.TimeoutError) as err:\n await api.foo(time=0.1)\n assert err.match('Deadline exceeded')\n\n with pytest.raises(asyncio.TimeoutError) as err:\n await api.foo(time=0.0001)\n assert err.match('Deadline exceeded')\n","repo_name":"Slyce-Inc/grpclib","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"10646970477","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntry:\n README = open('README.rst').read()\nexcept:\n README = None\n\ntry:\n REQUIREMENTS = open('requirements.txt').read()\nexcept:\n REQUIREMENTS = None\n\nsetup(\n name='spotify2piratebay',\n version=\"0.1\",\n description='Download your Spotify music using the Pirate Bay',\n long_description=README,\n install_requires=REQUIREMENTS,\n author='Mathijs de Bruin',\n author_email='mathijs@visualspace.nl',\n url='http://github.com/dokterbob/spotify2piratebay/',\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: Public Domain',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities'\n ],\n entry_points={\n 'console_scripts': [\n 'spotify2piratebay = spotify2piratebay.runner:main',\n ],\n },\n)\n","repo_name":"dokterbob/spotify2piratebay","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"5204336523","text":"# -*- coding: utf-8 -*-\n\"\"\"\n!! this is in development now. !!\nthis script is partially adapted from https://github.com/jmjeon94/AnoGAN-pytorch\n@Author: Hongzuo Xu \n\"\"\"\n\nfrom deepod.core.base_model import BaseDeepAD\nfrom deepod.core.base_networks import MLPnet\nfrom torch.utils.data import DataLoader\nimport torch\nimport time\n\n\nclass AnoGAN(BaseDeepAD):\n \"\"\" AnoGAN for anomaly detection\n See : for detail\n\n Parameters\n ----------\n epochs: int, optional (default=100)\n Number of training epochs\n\n batch_size: int, optional (default=64)\n Number of samples in a mini-batch\n\n lr: float, optional (default=1e-3)\n Learning rate\n\n rep_dim: int, optional (default=128)\n Dimensionality of the representation space\n\n hidden_dims: list, str or int, optional (default='100,50')\n Number of neural units in hidden layers\n - If list, each item is a layer\n - If str, neural units of hidden layers are split by comma\n - If int, number of neural units of single hidden layer\n\n act: str, optional (default='ReLU')\n activation layer name\n choice = ['ReLU', 'LeakyReLU', 'Sigmoid', 'Tanh']\n\n bias: bool, optional (default=False)\n Additive bias in linear layer\n\n epoch_steps: int, optional (default=-1)\n Maximum steps in an epoch\n - If -1, all the batches will be processed\n\n prt_steps: int, optional (default=10)\n Number of epoch intervals per printing\n\n device: str, optional (default='cuda')\n torch device,\n\n verbose: int, optional (default=1)\n Verbosity mode\n\n random_state: int, optional (default=42)\n the seed used by the random\n\n \"\"\"\n def __init__(self, epochs=100, batch_size=64, lr=1e-3,\n z_dim=128,\n rep_dim=128, hidden_dims='100,50', act='ReLU', bias=False,\n epoch_steps=-1, prt_steps=10, device='cuda',\n verbose=2, random_state=42):\n super(AnoGAN, self).__init__(\n model_name='AnoGAN', epochs=epochs, batch_size=batch_size, lr=lr,\n epoch_steps=epoch_steps, prt_steps=prt_steps, device=device,\n verbose=verbose, random_state=random_state\n )\n self.z_dim = z_dim\n\n self.hidden_dims = hidden_dims\n self.rep_dim = rep_dim\n self.act = act\n self.bias = bias\n return\n\n def training_prepare(self, X, y):\n train_loader = DataLoader(X, batch_size=self.batch_size, shuffle=True)\n\n g_net = MLPnet(\n n_features=self.z_dim,\n n_hidden=self.hidden_dims,\n n_output=self.n_features,\n activation=self.act,\n bias=self.bias,\n ).to(self.device)\n d_net = MLPnet(\n n_features=self.n_features,\n n_hidden = self.hidden_dims,\n n_output = 1,\n activation = self.act,\n bias=self.bias\n ).to(self.device)\n net = (g_net, d_net)\n\n criterion = torch.nn.BCELoss()\n\n if self.verbose >= 2:\n print(g_net)\n print(d_net)\n\n return train_loader, net, criterion\n\n def _training(self):\n optimizer_g = torch.optim.Adam(self.net[0].parameters(),\n lr=self.lr,\n weight_decay=1e-5)\n optimizer_d = torch.optim.Adam(self.net[1].parameters(),\n lr=self.lr,\n weight_decay=1e-5)\n\n for i in range(self.epochs):\n t1 = time.time()\n total_g_loss = 0\n total_d_loss = 0\n cnt = 0\n for batch_x in self.train_loader:\n b_size = batch_x.size(0)\n\n # update discriminator network\n self.net[1].zero_grad()\n\n # for real\n batch_x = batch_x.float().to(self.device)\n label = torch.ones(b_size).to(self.device)\n output_real = self.net[1](batch_x).view(-1)\n output_real = torch.sigmoid(output_real)\n err_real = self.criterion(output_real, label)\n err_real.backward()\n\n # for noise\n fake = self.net[0](torch.randn(b_size, self.z_dim, device=self.device))\n label = torch.zeros(b_size).to(self.device)\n output_fake = self.net[1](fake.detach()).view(-1)\n output_fake = torch.sigmoid(output_fake)\n err_fake = self.criterion(output_fake, label)\n err_fake.backward()\n\n err_d = err_fake + err_real\n optimizer_d.step()\n\n # update generative network\n self.net[0].zero_grad()\n label.fill_(1.)\n output = self.net[1](fake).view(-1)\n output = torch.sigmoid(output)\n err_g = self.criterion(output, label)\n\n err_g.backward()\n optimizer_g.step()\n\n total_d_loss += err_d.item()\n total_g_loss += err_g.item()\n cnt += 1\n\n # terminate this epoch when reaching assigned maximum steps per epoch\n if cnt > self.epoch_steps != -1:\n break\n\n t = time.time() - t1\n if self.verbose >= 1 and (i == 0 or (i+1) % self.prt_steps == 0):\n print(f'epoch{i+1}, '\n f'training loss (generative/discriminative): '\n f'{total_g_loss/cnt:.6f} / {total_d_loss/cnt:.6f}, '\n f'time: {t:.1f}s')\n\n if i == 0:\n self.epoch_time = t\n\n self.epoch_update()\n\n return\n\n def inference_prepare(self, X):\n test_loader = DataLoader(X, batch_size=self.batch_size,\n drop_last=False, shuffle=False)\n self.criterion.reduction = 'none'\n return test_loader\n\n def _inference(self):\n self.net[1].eval()\n with torch.no_grad():\n z_lst = []\n score_lst = []\n for batch_x in self.test_loader:\n batch_x = batch_x.float().to(self.device)\n s = self.net[1](batch_x)\n s = s.view(-1)\n\n batch_z = batch_x\n\n z_lst.append(batch_z)\n score_lst.append(s)\n\n z = torch.cat(z_lst).data.cpu().numpy()\n scores = torch.cat(score_lst).data.cpu().numpy()\n\n return z, scores\n\n def training_forward(self, batch_x, net, criterion):\n # implement in _training\n pass\n\n def inference_forward(self, batch_x, net, criterion):\n # implement in _inference\n pass\n","repo_name":"xichie/SpikeLog","sub_path":"logadempirical/deepod/models/anogan.py","file_name":"anogan.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35246993577","text":"import rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom rclpy.qos import QoSProfile\nfrom rclpy.qos import qos_profile_sensor_data\n\nclass ObstacleDetector(Node):\n\n def __init__(self):\n self.safe_dist_ = 1.5\n self.linear_speed = 0.4\n self.angular_speed = 2.5\n qos = QoSProfile(depth=10)\n super().__init__('obstacle_detector')\n self.subscriber_ = self.create_subscription(LaserScan, 'scan', self.callback, qos_profile=qos_profile_sensor_data)\n self.publisher_ = self.create_publisher(Twist, 'cmd_vel', qos)\n\n def callback(self, msg):\n\n move = Twist()\n n = 10 # number of rays \n # We took one ray per 20 degree\n # move.linear.x = 0.4\n # move.angular.z = 0.0\n segments = {\n 'right': min(min(msg.ranges[0:2]), n),\n 'front' : min(min(msg.ranges[3:5]), n),\n 'left' : min(min(msg.ranges[6:9]), n),\n }\n\n if (segments['front'] > self.safe_dist_ and segments['left'] > self.safe_dist_ and segments['right'] > self.safe_dist_):\n move.linear.x = 0.4\n move.angular.z = 0.0\n self.get_logger().warn(\"No obstacles going freee\")\n\n elif(segments['front'] < self.safe_dist_ and segments['left'] < self.safe_dist_ and segments['right'] < self.safe_dist_):\n move.linear.x = -self.linear_speed\n move.angular.z = self.angular_speed\n\n elif (segments['front'] < self.safe_dist_ and segments['left'] > self.safe_dist_ and segments['right'] > self.safe_dist_):\n move.linear.x = 0.0\n move.angular.z = self.angular_speed\n \n elif (segments['front'] > self.safe_dist_ and segments['left'] > self.safe_dist_ and segments['right'] < self.safe_dist_):\n move.linear.x = 0.0\n move.angular.z = self.angular_speed\n \n elif (segments['front'] > self.safe_dist_ and segments['left'] < self.safe_dist_ and segments['right'] > self.safe_dist_):\n move.linear.x = 0.0\n move.angular.z = -self.angular_speed\n\n elif (segments['front'] < self.safe_dist_ and segments['left'] > self.safe_dist_ and segments['right'] < self.safe_dist_):\n move.linear.x = 0.0\n move.angular.z = self.angular_speed\n \n elif (segments['front'] < self.safe_dist_ and segments['left'] < self.safe_dist_ and segments['right'] > self.safe_dist_):\n move.linear.x = 0.0\n move.angular.z = -self.angular_speed\n\n elif (segments['front'] > self.safe_dist_ and segments['left'] < self.safe_dist_ and segments['right'] < self.safe_dist_):\n move.linear.x = self.linear_speed\n move.angular.z = 0.0\n\n else:\n self.get_logger().error(\"Unknown Case issued please check the environment!!!\")\n self.publisher_.publish(move)\n\n\ndef main(args=None):\n\n rclpy.init(args=args)\n node = ObstacleDetector()\n rclpy.spin(node)\n rclpy.shutdown()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Poorna-Sasank/kshoonya-bot","sub_path":"src/my_robot_packages/my_robot_packages/obstacle_detector.py","file_name":"obstacle_detector.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19053384570","text":"#coding:utf-8\n\"\"\"\nWrite a Python program to calculate magic square. Go to the editor\nA magic square is an arrangement of distinct numbers (i.e., each number is used once), usually integers, in a square grid, where the numbers in each row, and in each column, and the numbers in the main and secondary diagonals, all add up to the same number, called the \"magic constant.\" A magic square has the same number of rows as it has columns, and in conventional math notation, \"n\" stands for the number of rows (and columns) it has. Thus, a magic square always contains n2 numbers, and its size (the number of rows [and columns] it has) is described as being \"of order n\".\nCalculate magic square\"\"\"\ndef magic_square_test(my_matrix):\n iSize = len(my_matrix[0])\n sum_list = []\n \n #Horizontal Part:\n sum_list.extend([sum (lines) for lines in my_matrix]) \n\n #Vertical Part:\n for col in range(iSize):\n sum_list.append(sum(row[col] for row in my_matrix))\n \n #Diagonals Part\n result1 = 0\n for i in range(0,iSize):\n result1 +=my_matrix[i][i]\n sum_list.append(result1) \n \n result2 = 0\n for i in range(iSize-1,-1,-1):\n result2 +=my_matrix[i][i]\n sum_list.append(result2)\n\n if len(set(sum_list))>1:\n return False\n return True\n\nm=[[7, 12, 1, 14], [2, 13, 8, 11], [16, 3, 10, 5], [9, 6, 15, 4]] \nprint(magic_square_test(m));\n\nm=[[2, 7, 6], [9, 5, 1], [4, 3, 8]]\nprint(magic_square_test(m));\n\nm=[[2, 7, 6], [9, 5, 1], [4, 3, 7]]\nprint(magic_square_test(m));","repo_name":"DonaFidele/PythonExercices","sub_path":"math/exo_20.py","file_name":"exo_20.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39235704000","text":"\"\"\"\ndata_operations.py\n\nHandles the creation, manipulation, and validation of data structures used \nin the main program workflow.\n\"\"\"\nimport os\nimport logging\nimport json\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\nfrom geopy.distance import distance as geopy_distance\nfrom geopy.distance import geodesic\n\nfrom file_io import export_json_to_kml\n\n\nlogging.basicConfig(level=logging.DEBUG, filename='../logs/application.log', filemode='a', format='%(asctime)s:%(levelname)s:%(message)s')\n\n\ndef initialize_data():\n \"\"\"\n Initializes the data structure for the application.\n\n This function sets up a dictionary with keys for 'polygon', 'construction_sequence', \n and 'monument', each initialized to their default values. The 'polygon' key stores \n the points of a polygon, 'construction_sequence' tracks the order of point additions, \n and 'monument' holds details about any specific monuments.\n\n Returns:\n dict: A dictionary with structured keys for storing polygon and monument data.\n \"\"\"\n return {\n 'polygon': [], # List to store points forming a polygon\n 'construction_sequence': [], # Tracks the order of construction steps\n 'monument': {\n 'label': None, # Identifier or name of the monument\n 'lat': None, # Latitude of the monument\n 'lon': None, # Longitude of the monument\n 'bearing_from_prev': None, # Bearing from the previous point\n 'distance_from_prev': None # Distance from the previous point\n }\n }\n\n\ndef update_polygon_data(data, lat, lon, bearing, distance):\n \"\"\"\n Updates the data dictionary with the computed point's details and updates the construction sequence.\n\n This function appends a new point, defined by its latitude (lat), longitude (lon), \n bearing (in degrees), and distance (in meters), to the 'polygon' key of the data \n dictionary. It also updates the 'construction_sequence' to reflect the addition \n of this new point.\n\n Args:\n - data (dict): The data dictionary to be updated.\n - lat (float): Latitude of the new point.\n - lon (float): Longitude of the new point.\n - bearing (float): Bearing from the previous point to the new point in degrees.\n - distance (float): Distance from the previous point to the new point in meters.\n\n Modifies the 'data' dictionary in place, adding new point details and updating the construction sequence.\n \"\"\"\n # Debugging: Log the state of construction_sequence before update\n logging.debug(f\"Before update - construction_sequence: {data['construction_sequence']}\")\n\n # Validate the input data types (note: assertions are used here for debugging purposes\n # and should be supplemented with robust error handling in production code)\n assert isinstance(data, dict), \"Debug: Data should be a dictionary\"\n assert isinstance(lat, float) and isinstance(lon, float), \"Latitude and Longitude should be floats\"\n\n # Creating a new data point\n data_point = {\n \"lat\": lat, \"lon\": lon,\n \"bearing_from_prev\": bearing, \n \"distance_from_prev\": distance\n }\n\n # Appending the new point to the polygon data\n data[\"polygon\"].append(data_point)\n \n # Generating a unique ID for the new point and updating the construction sequence\n point_id = f\"P{len(data['polygon']) + 1}\" # Assuming sequential IDs (P1, P2, ...)\n data['construction_sequence'].append(point_id)\n\n # Debugging: Log the state of construction_sequence after update\n logging.debug(f\"After update - construction_sequence: {data['construction_sequence']}\")\n return data\n\n\ndef warn_if_polygon_not_closed(data):\n \"\"\"\n Warns the user if the polygon is not closed and updates the construction sequence.\n The function checks if the first and last points of the polygon are within a specified \n proximity threshold (0.05 km) to determine closure. It logs and prints appropriate \n messages based on the closure status and the redundancy of the last point.\n\n Args:\n - data (dict): Data structure containing the polygon points and construction sequence.\n\n Returns:\n - bool: True if the polygon is closed (within the proximity threshold), False otherwise.\n \"\"\"\n points = data['polygon']\n logging.debug(f\"Points before closure check: {points}\")\n\n message = f\"Points list before closure check: {points}\"\n logging.debug(message)\n print(message) # Print to console\n\n # Check if the polygon is closed\n polygon_closed = check_polygon_closure(data)\n\n # Spatial check for the proximity of the last point to the first point\n first_point = (points[0]['lat'], points[0]['lon'])\n last_point = (points[-1]['lat'], points[-1]['lon'])\n proximity_threshold = 0.05 # Adjusted threshold in kilometers\n is_last_point_redundant = geodesic(first_point, last_point).kilometers < proximity_threshold\n\n # Handling based on polygon closure and redundancy of the last point\n if polygon_closed and is_last_point_redundant:\n data['construction_sequence'] = [point['id'] for point in data['polygon'][:-1]]\n message = \"Your polygon is completed and closed. Redundant last point excluded.\"\n elif polygon_closed:\n data['construction_sequence'] = [point['id'] for point in data['polygon']]\n message = \"Your polygon is completed.\"\n else:\n data['construction_sequence'] = [point['id'] for point in data['polygon']]\n message = \"Your polygon is not closed.\"\n\n logging.info(message)\n print(message) # Print to console\n\n return polygon_closed\n\n\ndef is_polygon_close_to_being_closed(points, tolerance=10):\n \"\"\"\n Checks if a polygon, defined by a list of points, is close to being closed within a specified tolerance.\n The tolerance is measured in feet. The function also handles conversion of points from dictionary format\n to tuples if necessary.\n\n Args:\n - points (list): List of points (either as tuples or dictionaries) forming the polygon.\n - tolerance (float): The distance tolerance in feet to determine if the polygon is close to being closed.\n\n Returns:\n - bool: True if the polygon is close to being closed (distance between first and last points within tolerance), \n False otherwise.\n \"\"\"\n # Ensure there are at least two points to compare\n if len(points) < 2:\n return False\n\n # Convert points to tuples if they are in dictionary format\n points_as_tuples = [(p['lat'], p['lon']) if isinstance(p, dict) else p for p in points]\n\n # Calculate the distance between the first and last points\n start_point = points_as_tuples[0]\n end_point = points_as_tuples[-1]\n distance = geopy_distance(start_point, end_point).feet\n\n logging.debug(f\"Distance between first and last point: {distance} feet\")\n\n # Check if the distance is within the tolerance for closing the polygon\n if distance <= tolerance:\n logging.info(\"The polygon is close enough to being closed.\")\n return True\n\n return False\n\n\ndef check_polygon_closure(data, reference_point=None):\n \"\"\"\n Checks if the polygon formed by a sequence of points is closed. The polygon is considered closed if \n the distance between the first and last points is less than 0.1 feet. The function handles points \n in either dictionary or tuple format and logs an error for invalid formats.\n\n If a reference point is provided, the polygon is also considered closed if the last point is within \n 10 feet of the reference point.\n\n Args:\n - data (dict): Data structure containing the polygon points.\n - reference_point (tuple, optional): An optional reference point (latitude, longitude) used in the closure check.\n\n Returns:\n - bool: True if the polygon is closed, False otherwise or if there are insufficient points to form a polygon.\n \"\"\"\n # Extract points list from data\n points = data['polygon']\n\n # Debugging: Log the state of construction_sequence before check\n logging.debug(f\"Before check - construction_sequence: {data['construction_sequence']}\")\n\n if len(points) > 2:\n for idx, point in enumerate(points):\n # Ensure that point is in the correct format (latitude, longitude)\n if isinstance(point, dict):\n lat_lon = (point['lat'], point['lon']) # Extract lat and lon\n elif isinstance(point, tuple) and len(point) == 2:\n lat_lon = point\n else:\n logging.error(f\"Invalid point format at index {idx}: {point}\")\n return False\n\n # If it's the first point, set it as the starting point\n if idx == 0:\n start_lat_lon = lat_lon\n continue\n\n # Check the distance between the current point and the start point\n distance = geopy_distance(start_lat_lon, lat_lon).feet\n logging.debug(f\"Distance between points: {distance} feet\")\n\n # Check distance between last point and the first point\n last_lat_lon = (points[-1]['lat'], points[-1]['lon']) if isinstance(points[-1], dict) else points[-1]\n distance_between_first_and_last = geopy_distance(start_lat_lon, last_lat_lon).feet\n logging.debug(f\"Distance between first and last point: {distance_between_first_and_last} feet\")\n\n is_closed = distance_between_first_and_last < 0.1\n if reference_point:\n distance_from_reference_to_last = geopy_distance(reference_point, last_lat_lon).feet\n is_closed |= distance_from_reference_to_last <= 10\n\n else:\n logging.warning(\"Not enough points to form a polygon.\")\n return False\n\n # Log the state of construction_sequence after check\n logging.debug(f\"After check - construction_sequence: {data['construction_sequence']}\")\n \n return is_closed\n\n\ndef finalize_json_structure(data, tie_point_used, polygon_name):\n \"\"\"\n Finalizes the JSON structure of the data, focusing on the handling of the construction sequence \n and special elements like tie points and monuments. It updates the polygon name, checks if the \n polygon is closed, and adjusts the construction sequence accordingly. The function also inserts \n tie points and monument information into the construction sequence when applicable.\n\n Args:\n - data (dict): Data structure containing polygon and related information.\n - tie_point_used (bool): Indicates if a tie point was used in the polygon construction.\n - polygon_name (str): The name of the polygon.\n\n Modifies the 'data' dictionary, updating the construction sequence and handling special cases \n for tie points and monuments.\n \"\"\"\n data['polygon_name'] = polygon_name\n\n # Check if polygon is closed and warn about polygon closure status\n polygon_closed = warn_if_polygon_not_closed(data)\n\n # Handle construction_sequence based on polygon closure\n if polygon_closed:\n # Ensure construction_sequence starts and ends with the same point if closed\n if data['construction_sequence'][0] != data['construction_sequence'][-1]:\n data['construction_sequence'].append(data['construction_sequence'][0])\n else:\n # If polygon is not closed, make sure construction_sequence does not repeat the first point at the end\n if len(data['construction_sequence']) != len(data['polygon']):\n data['construction_sequence'] = [point['id'] for point in data['polygon']]\n\n # Handle tie_point in construction_sequence\n if tie_point_used and 'tie_point' not in data['construction_sequence']:\n data['construction_sequence'].insert(0, 'tie_point')\n\n # Handle monument in construction_sequence\n monument = data.get('monument', {})\n if monument.get('lat') is not None and monument.get('lon') is not None:\n if 'monument' not in data['construction_sequence']:\n data['construction_sequence'].insert(1, 'monument')\n\n # Remove invalid keys if necessary\n if not tie_point_used and 'tie_point' in data:\n del data['tie_point']\n if 'monument' in data and monument.get('lat') is None:\n del data['monument']\n\n return data\n\n\ndef finalize_data(data, use_same_format_for_all, coordinate_format, choice):\n \"\"\"\n Finalizes the data by checking if the polygon is closed or close enough to being closed, \n and if not, it allows for the addition of more points based on user input. The function \n integrates with other modules to compute new points and update the polygon data.\n\n Args:\n - data (dict): Data structure containing polygon and related information.\n - use_same_format_for_all (bool): Indicates whether the same coordinate format is used for all points.\n - coordinate_format (str): The format of the coordinates (e.g., degrees, radians).\n - choice (str): The method choice for point computation.\n\n Utilizes a loop for adding additional points based on user decisions until the polygon is deemed closed or close enough.\n \"\"\"\n from computation import compute_point_based_on_method\n from io_operations import get_add_point_decision, get_bearing_and_distance, get_coordinate_format_only\n from display_operations import display_computed_point\n\n # Check if the polygon is closed or close enough to being closed\n if is_polygon_close_to_being_closed([point for point in data['polygon']]):\n logging.info(\"Polygon is closed or close enough to being closed.\")\n else:\n logging.info(\"Polygon is not closed. Adding more points.\")\n while True:\n add_point_decision = get_add_point_decision()\n if add_point_decision == 'yes':\n lat, lon = (data['polygon'][-1]['lat'], data['polygon'][-1]['lon']) if data['polygon'] else (None, None)\n \n if not use_same_format_for_all:\n coordinate_format = get_coordinate_format_only()\n bearing, distance = get_bearing_and_distance(coordinate_format)\n\n if bearing is not None and distance is not None:\n lat, lon = compute_point_based_on_method(choice, lat, lon, bearing, distance)\n if lat is not None and lon is not None:\n data = update_polygon_data(data, lat, lon, bearing, distance)\n display_computed_point(data['polygon'], lat, lon)\n else:\n print(\"An error occurred while computing the point. Please try again.\")\n else:\n break\n elif add_point_decision == 'no':\n break\n else:\n print(\"Invalid choice. Please enter 'yes' or 'no'.\")\n\n data['units'] = 'imperial'\n return data\n\n\ndef generate_kml_from_json(json_data):\n \"\"\"\n Generate KML data from JSON data and save it using export_json_to_kml.\n\n Args:\n json_data (dict): The JSON data to convert.\n \"\"\"\n try:\n kml_file_name = input(\"Enter name for KML file (without extension): \")\n if export_json_to_kml(json_data, kml_file_name):\n print(f\"JSON data successfully converted and saved as KML.\")\n else:\n print(\"Failed to generate KML data.\")\n except Exception as e:\n print(f\"Error generating KML data: {e}\")","repo_name":"KeyArgo/TerraTracer","sub_path":"src/data_operations.py","file_name":"data_operations.py","file_ext":"py","file_size_in_byte":15281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19462743457","text":"import requests\n\nimport logging\nimport random\n\naccount_balance_cache = {}\n\ndef get_logger():\n \"\"\" Get named logger \"\"\"\n return logging.getLogger(__name__)\n\ndef get_account_balance(session, base_url, account):\n try:\n url = base_url + '/account/balance/' + account\n #print('Getting balance for', account, url)\n response = session.get(url)\n if response.status_code == 200:\n return (0, float(response.text))\n return (1, 0)\n except:\n return (2, 0)\n return (3, 0)\n\ndef get_account_balance_cached(timestamp, session, base_url, account):\n balance_cached = 0\n if account in account_balance_cache:\n cache = account_balance_cache[account]\n if ('balance' in cache) and ('valid_until' in cache):\n balance_cached = cache['balance']\n if cache['valid_until'] >= timestamp:\n #print('Balance found in cache', 'bal', balance, 'acc', account)\n return balance_cached\n else:\n # found in cache, but outdated\n balance_cached = cache['balance']\n # not found in cache\n (ret_code, balance) = get_account_balance(session, base_url, account)\n if ret_code == 0:\n # cache it\n validity = timestamp + random.randint(50, 60) * 60\n cache = {\n 'balance': balance,\n 'valid_until': validity,\n }\n account_balance_cache[account] = cache\n if balance != balance_cached:\n get_logger().info('Balance added to cache, ' + str(balance) + \" \" + str(account))\n return balance\n","repo_name":"mikroncoin/mikron_restapi_py","sub_path":"monitor_nodes/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23561961821","text":"__author__ = \"Jungkyu Park\"\r\n__email__ = \"parkssie@gmail.com\"\r\n\r\n\r\ndef last_tidynumber(str_x: int) -> int:\r\n rv = 0\r\n int_x = int(str_x)\r\n\r\n for i in range(int_x, -1, -1):\r\n tmp_x = list(map(int, list(str(i))))\r\n if is_tidy(tmp_x):\r\n rv = i\r\n break\r\n return rv\r\n\r\n\r\ndef is_tidy(list_int_x: list) -> bool:\r\n rv = True\r\n len_x = len(list_int_x) - 1\r\n for i in range(len_x, -1, -1):\r\n if i > 0:\r\n if list_int_x[i] - list_int_x[i - 1] >= 0:\r\n rv = True\r\n else:\r\n rv = False\r\n break\r\n return rv\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n result_list = []\r\n with open('./B-small-attempt0.in', 'r') as file:\r\n seq = 0\r\n seq_max = 1\r\n for line in file:\r\n\r\n input_string = line.replace('\\n', '')\r\n if seq == 0:\r\n seq_max = int(input_string)\r\n else:\r\n input_val = int(input_string)\r\n output = last_tidynumber(int(input_val))\r\n result = 'Case #{}: {}'.format(seq, output)\r\n result_list.append(result)\r\n\r\n if seq > seq_max:\r\n break\r\n seq += 1\r\n\r\n with open('./B-small-attempt0.out', 'w') as file:\r\n for result in result_list:\r\n file.write('{}\\n'.format(result))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4566.py","file_name":"4566.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39500491461","text":"# https://leetcode.com/problems/numbers-at-most-n-given-digit-set/\n# https://leetcode.com/problems/numbers-at-most-n-given-digit-set/solution/\n# for dynamic programming solution, refer solution tab.\n# Time: O(K*logM), M = len(D), K is digits of N.\n# Space: O(K)\n\nclass Solution(object):\n def atMostNGivenDigitSet(self, D, N):\n \"\"\"\n :type D: List[str]\n :type N: int\n :rtype: int\n \"\"\"\n res = 0\n num = str(N)\n M, K = len(D), len(num)\n # for < K digits\n for k in xrange(1, K):\n res += M ** k\n # for K digits\n for i, x in enumerate(num):\n pos = bisect.bisect(D, str(x))\n if pos == 0: # x < D[0], no valid numbers\n break\n # D[pos-1] <= x\n if D[pos-1] < str(x):\n res += pos * (M ** (K - i - 1))\n break\n else:\n res += (pos - 1) * (M ** (K - i - 1))\n # If it's last digit, need to handle D[pos-1] == str(x) case\n if i == K - 1:\n res += 1\n return res\n\n","repo_name":"jwyx3/practices","sub_path":"leetcode/math/numbers-at-most-n-given-digit-set.py","file_name":"numbers-at-most-n-given-digit-set.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5135919129","text":"# Title: \tMean, Median and Mode\n# Author: \tThomas Luong\n# Purpose:\tFinds mean (average), mode and median in a set of numbers without built-in functions for mean, median and mode\n\n# Notes:\tCreate three functions that allow user to find mean, median, and mode\n#\t\t\tNo built-in functions\n\n# Subgoals:\n#\t\t\tIn mean functions, give user option for how many decimal places to round to\n#\t\t\tIf even numbers in list, return both middle numbers for median\n#\t\t\tIf multiple modes, return all of them\n\nclass Statistics():\n\tdef __init__(self, numbers, decimalPlaces):\n\t\tself.numbers = numbers\n\t\tself.numbers.sort()\n\t\tself.decimalPlaces = decimalPlaces\n\t\tself.mean = None\n\t\tself.median = None\n\t\tself.mode = None\n\n\tdef print_mean(self):\n\t\tsum = 0\n\t\tfor num in self.numbers:\n\t\t\tsum += num\n\t\tself.mean = sum / len(self.numbers)\n\t\tprint(\"The mean is \", self.mean)\n\n\tdef print_mode(self):\n\t\tmodes = []\n\t\tcurrentStreak = 0\n\t\thighestStreak = 0\n\t\tuniqueNums = set(self.numbers) # Eliminates repeating elements\n\n\t\tfor i, num in enumerate(uniqueNums):\n\t\t\tcurrentStreak = self.numbers.count(num)\n\t\t\tif (currentStreak > highestStreak):\n\t\t\t\thighestStreak = currentStreak\n\t\t\t\tmodes.clear()\n\t\t\t\tmodes.append(num)\n\n\t\t\telif (currentStreak == highestStreak):\n\t\t\t\tmodes.append(num)\n\n\t\tprint(\"Modes:\")\n\t\tfor mode in modes:\n\t\t\tprint(mode, end=' ')\n\t\tprint(\"\")\n\n\n\tdef print_median(self):\n\t\tlength = len(self.numbers)\n\t\tmiddle = int(length / 2)\n\t\tif (length % 2 == 0):\n\t\t\tprint(self.numbers[middle - 1], self.numbers[middle]) \n\t\telse:\n\t\t\tprint(self.numbers[middle])\n\n\n\n\tdef print_list(self):\n\t\tprint(\"The list is: \")\n\t\tfor i, num in enumerate(self.numbers):\n\t\t\tprint(num, end=' ')\n\t\tprint(\"\")\n\n\n\n\nuser_array = input(\"Please input an array of numbers separated by spaces: \\n\\t\")\nuser_array = user_array.split(\" \")\nuser_array = [int(x) for x in user_array]\n\ndecimals = 2\nnewList = Statistics(user_array, decimals)\n\nchoice = input(\"Type '1' for finding mean, '2' for mode, or '3' for median:\\n\\t\")\nif (choice == '1'):\n\tnewList.print_mean()\nelif (choice == '2'):\n\tnewList.print_mode()\nelif (choice == '3'):\n\tnewList.print_median()\nelse:\n\tprint(\"Sorry I didn't understand your choice. Goodbye\\n\")\n\n","repo_name":"luongthomas/Python-Mini-Projects","sub_path":"MeanMedianMode/meanMedianMode.py","file_name":"meanMedianMode.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"26729030636","text":"from queue import Queue\n\nG= [[0, 4, 0, 0, 0, 0, 0, 8, 0],\n [4, 0, 8, 0, 0, 0, 0, 11, 0],\n [0, 8, 0, 7, 0, 4, 0, 0, 2],\n [0, 0, 7, 0, 9, 14, 0, 0, 0],\n [0, 0, 0, 9, 0, 10, 0, 0, 0],\n [0, 0, 4, 14, 10, 0, 2, 0, 0],\n [0, 0, 0, 0, 0, 2, 0, 1, 6],\n [8, 11, 0, 0, 0, 0, 1, 0, 7],\n [0, 0, 2, 0, 0, 0, 6, 7, 0]]\n\n\ndef shortest_path(G, x, y):\n\t# G jako macierz sądziedztwa\n\tn = len(G)\n\tQ = Queue()\n\n\tparent = [None]*n\n\tvisited = [False]*n\n\n\tQ.put(x)\n\tvisited[x] = True\n\n\twhile not Q.empty():\n\t\tv = Q.get()\n\t\tprint(v)\n\t\tfor vn in [i for i in range(n) if G[v][i]>0]:\n\t\t\tif not visited[vn]:\n\t\t\t\tvisited[vn] = True\n\t\t\t\tparent[vn] = v\n\n\t\t\t\tQ.put(vn)\n\tp = y\n\tpath = []\n\twhile p is not None:\n\t\tpath.append(p)\n\t\tp = parent[p]\n\n\tif x==path[-1]:\n\t\tprint(path[::-1])\n\t\treturn len(path)\n\telse:\n\t\treturn -1\n\nprint(shortest_path(G, 2, 8))\n\n\n","repo_name":"wojtke/agh-asd","sub_path":"graph/simple bfs, dfs/bfs shortest paths.py","file_name":"bfs shortest paths.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33345271248","text":"\"\"\"Classes for use with IGOR Pro\n\nPartially based on asetk module by Leopold Talirz\n(https://github.com/ltalirz/asetk/blob/master/asetk/format/igor.py)\n\"\"\"\n\nimport re\nimport numpy as np\n\ndef read_wave(lines):\n \"\"\"\n Reads the next wave section from the inputted list of lines\n Parsed lines are removed from the list, allowing for subsequent calls\n \"\"\"\n\n line = lines.pop(0)\n while not re.match(\"WAVES\",line):\n if len(lines) == 0:\n return None\n line = lines.pop(0)\n # 1d or 2d?\n d2 = False\n if \"N=\" in line:\n d2 = True\n match = re.search(\"WAVES/N=\\(([\\d, ]+)\\)\",line)\n grid = match.group(1).split(',')\n grid = np.array(grid, dtype=int)\n name = line.split(\")\")[-1].strip()\n else:\n name = line.split()[-1]\n\n line = lines.pop(0).strip()\n if not line == \"BEGIN\":\n raise IOError(\"Missing 'BEGIN' statement of data block\")\n\n # read data\n datastring = \"\"\n line = lines.pop(0)\n while not re.match(\"END\",line):\n if len(lines) == 0:\n return None\n if line.startswith(\"X\"):\n return None\n datastring += line\n line = lines.pop(0)\n data = np.array(datastring.split(), dtype=float)\n if d2:\n data = data.reshape(grid)\n \n # read axes\n axes = []\n line = lines.pop(0)\n matches = re.findall(\"SetScale.+?(?:;|$)\", line)\n for match in matches:\n ax = Axis(None,None,None,None)\n ax.read(match)\n axes.append(ax)\n \n if d2:\n # read also the second axis\n # is this necessary? can there be 2 lines with \"SetScale\" ?\n line = lines.pop(0)\n matches = re.findall(\"SetScale.+?(?:;|$)\", line)\n for match in matches:\n ax = Axis(None,None,None,None)\n ax.read(match)\n axes.append(ax)\n return Wave2d(data, axes, name)\n\n return Wave1d(data, axes, name)\n\n\nclass Axis(object):\n \"\"\"Represents an axis of an IGOR wave\"\"\"\n\n def __init__(self, symbol, min_, delta, unit, wavename=None):\n self.symbol = symbol\n self.min = min_\n self.delta = delta\n self.unit = unit\n self.wavename = wavename\n\n def __str__(self):\n \"\"\"Prints axis in itx format\n Note: SetScale/P expects minimum value and step-size\n \"\"\"\n delta = 0 if self.delta is None else self.delta\n s = \"X SetScale/P {symb} {min},{delta}, \\\"{unit}\\\", {name};\\n\"\\\n .format(symb=self.symbol, min=self.min, delta=delta,\\\n unit=self.unit, name=self.wavename)\n return s\n\n def read(self, string):\n \"\"\"Read axis from string\n Format: \n X SetScale/P x 0,2.01342281879195e-11,\"m\", data_00381_Up;\n SetScale d 0,0,\"V\", data_00381_Up\n \"\"\"\n match = re.search(\"SetScale/?P? (.) ([+-\\.\\de]+),([+-\\.\\de]+),[ ]*\\\"(.*)\\\",\\s*(\\S+)\", string)\n self.symbol = match.group(1)\n self.min = float(match.group(2))\n self.delta = float(match.group(3))\n self.unit = match.group(4)\n self.wavename = match.group(5)\n if self.wavename.endswith(';'):\n self.wavename = self.wavename[:-1]\n\n\nclass Wave(object):\n \"\"\"A class for IGOR waves\"\"\"\n\n def __init__(self, data, axes, name=None):\n \"\"\"Initialize IGOR wave of generic dimension\"\"\"\n self.data = data\n self.axes = axes\n self.name = \"PYTHON_IMPORT\" if name is None else name\n self.dim = len(self.data.shape)\n\n def __str__(self):\n \"\"\"Print IGOR wave\"\"\"\n s = \"\"\n s += \"IGOR\\n\"\n\n dimstring = \"(\"\n for i in range(len(self.data.shape)):\n dimstring += \"{}, \".format(self.data.shape[i])\n dimstring = dimstring[:-2] + \")\" \n\n s += \"WAVES/N={} {}\\n\".format(dimstring, self.name)\n s += \"BEGIN\\n\"\n s += self.print_data()\n s += \"END\\n\"\n for ax in self.axes:\n s += str(ax)\n return s\n\n @property\n def extent(self):\n \"\"\"Returns extent for plotting\"\"\"\n grid = self.data.shape\n extent = []\n for i in range(len(grid)):\n ax = self.axes[i]\n extent.append(ax.min)\n extent.append(ax.min+ax.delta*grid[i])\n\n return np.array(extent)\n\n @property\n def x_min(self):\n return self.axes[0].min\n\n @property\n def dx(self):\n return self.axes[0].delta\n\n @property\n def x_max(self):\n return self.x_min + self.dx * self.data.shape[0]\n\n @property\n def x_arr(self):\n return self.x_min + np.arange(0, self.data.shape[0])*self.dx\n\n @property\n def x_symbol(self):\n return self.axes[0].symbol\n\n @property\n def x_unit(self):\n return self.axes[0].unit\n\n def print_data(self):\n \"\"\"Determines how to print the data block.\n \n To be reimplemented by subclasses.\"\"\"\n pass\n\n def write(self, fname):\n f=open(fname, 'w')\n f.write(str(self))\n f.close()\n \n def csv_header(self):\n header = \"\"\n shape = self.data.shape\n for i_ax in range(len(shape)):\n ax = self.axes[i_ax]\n if header != \"\":\n header += \"\\n\"\n header += \"axis %d: %s [unit: %s] [%.6e, %.6e], delta=%.6e, n=%d\" % (\n i_ax, ax.symbol, ax.unit, ax.min, ax.min+ax.delta*(shape[i_ax]-1), ax.delta, shape[i_ax]\n )\n return header\n \n def write_csv(self, fname, fmt=\"%.6e\"):\n np.savetxt(fname, self.data, delimiter=\",\", header=self.csv_header(), fmt=fmt)\n\n\nclass Wave1d(Wave):\n \"\"\"1d Igor wave\"\"\"\n\n def __init__(self, data, axes, name=\"1d\"):\n \"\"\"Initialize 1d IGOR wave\"\"\"\n super().__init__(data, axes, name)\n\n def print_data(self):\n \"\"\"Determines how to print the data block\"\"\"\n s = \"\"\n for line in self.data:\n s += \"{:12.6e}\\n\".format(float(line))\n return s\n\n\nclass Wave2d(Wave):\n \"\"\"2d Igor wave\"\"\"\n\n def __init__(self, data, axes, name=\"2d\"):\n \"\"\"Initialize 2d IGOR wave\"\"\"\n super().__init__(data, axes, name)\n\n def print_data(self):\n \"\"\"Determines how to print the data block\"\"\"\n s = \"\"\n for line in self.data:\n for x in line:\n s += \"{:12.6e} \".format(x)\n s += \"\\n\"\n return s\n\n @property\n def y_min(self):\n return self.axes[1].min\n\n @property\n def dy(self):\n return self.axes[1].delta\n\n @property\n def y_max(self):\n return self.y_min + self.dy * self.data.shape[1]\n\n @property\n def y_arr(self):\n return self.y_min + np.arange(0, self.data.shape[1])*self.dy\n\n @property\n def y_symbol(self):\n return self.axes[1].symbol\n\n @property\n def y_unit(self):\n return self.axes[1].unit","repo_name":"nanotech-empa/igor-tools","sub_path":"igor_tools/igor.py","file_name":"igor.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27834822381","text":"#!/usr/bin/python3\n\n# this script eliminates randomly a percentage of citations from the citation data\n\nfrom random import randrange\n\ninput_file = \"pmid_citations.txt\"\n# percentage of citation that will be left after the process\npercentage_left = 80\noutput_file = \"pmid_citations_\" + str(percentage_left) + \".txt\"\n\nf_in = open(input_file, \"r\")\nf_out = open(output_file, \"w\")\n\noutput_pmid = False\ncurrent_pmid = \"\"\ninput_count = 0\noutput_count = 0\n\n# delete lines based on a random number\nfor line in f_in:\n data = line.split(\"\\t\")\n pmid1 = data[0]\n input_count += 1\n if pmid1 != current_pmid:\n current_pmid = pmid1\n if randrange(100) > percentage_left-1:\n output_pmid = False\n else:\n output_pmid = True\n if output_pmid == True:\n f_out.write(line)\n output_count += 1\n\nprint(\"Input count: \" + str(input_count))\nprint(\"Output count: \" + str(output_count))\n","repo_name":"raroes/scientific-silos","sub_path":"decimate_citations.py","file_name":"decimate_citations.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20367955226","text":"# 나는 국어를 못하나봐,,,, 문제가 이해가 안되는 걸~ 고민을 더 해봐야겠다!\n# https://assaeunji.github.io/python/2020-05-04-bj1966/\n# https://kyoung-jnn.tistory.com/entry/%EB%B0%B1%EC%A4%801966%EB%B2%88%ED%8C%8C%EC%9D%B4%EC%8D%ACPython-%ED%94%84%EB%A6%B0%ED%84%B0-%ED%81%90\n\ntestCase = int(input())\n\nfor _ in range(testCase):\n N, M = map(int, input().split())\n\n printList = list(map(int, input().split()))\n checkList = [0 for _ in range(N)]\n checkList[M] = 1 # 궁금한 문서위치 저장\n\n count = 0\n while True:\n if printList[0] == max(printList):\n count += 1\n\n if checkList[0] != 1:\n del printList[0]\n del checkList[0]\n else:\n print(count)\n break\n else:\n printList.append(printList[0])\n checkList.append(checkList[0])\n del printList[0]\n del checkList[0]\n","repo_name":"jeonghyeonee/baekjoon_python","sub_path":"2021/2021.10/2021.10.03/1966.py","file_name":"1966.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39106941896","text":"import os, shutil, audioread, sys\r\n\r\nmodID = \"OshidasMusicMod3\" #Replace with the \"id\" of the mod you wish to make, only letters, numbers and _\r\nmodName = \"Oshida\\'s Music Mod\" # Beware to \"escape\" any special character\r\nmodAuthor = \"Oshida\"\r\n\r\nshutil.rmtree(modID + \"\\\\folderwithtracks\", ignore_errors=True)\r\nshutil.rmtree(modID, ignore_errors=True)\r\n\r\nshutil.copytree(R\"template\", modID) # If an error appear here, delete the mod folder named like modID\r\nfile = open(modID + \"\\\\FileListWithMusicTracks.hpp\", \"w\", encoding=\"UTF-8\")\r\nmusicList = os.listdir(\"Music\")\r\nos.mkdir(modID + \"\\\\folderwithtracks\")\r\nfor i in range(len(musicList)):\r\n if musicList[i].lower().endswith('.ogg'): # Only Copy OGG Files\r\n shutil.copy(\"Music\\\\\" + musicList[i], modID + \"\\\\folderwithtracks\\\\\" + str(i) + \".ogg\")\r\n with audioread.audio_open(modID + \"\\\\folderwithtracks\\\\\" + str(i) + \".ogg\") as audioFile:\r\n length = int(audioFile.duration)\r\n #print(musicList[i] + ' : ' + str(length))\r\n file.write(\"class \" + modID + \"Song\" + str(i) + '\\n')\r\n file.write(\"{\\n\")\r\n file.write(\"\\tname = \\\"\" + musicList[i][:-4] + \"\\\";\\n\")\r\n file.write(\"\\tsound[] = {\\\"\" + modID + \"\\\\folderwithtracks\\\\\" + str(i) + \".ogg\\\",db+0,1};\\n\")\r\n file.write(\"\\tduration = \" + str(length) +\";\\n\")\r\n file.write(\"\\tmusicClass = \\\"\" + modID + \"\\\";\\n\")\r\n file.write(\"};\\n\")\r\n print(musicList[i])\r\nfile.close()\r\n\r\nmodCpp = open(modID + \"\\\\mod.cpp\", \"w\", encoding=\"UTF-8\")\r\nmodCpp.write(\"name = \\\"\" + modName + \"\\\";\\n\")\r\nmodCpp.write(\"picture = \\\"logo.paa\\\";\\n\")\r\nmodCpp.write(\"description = \\\"\\\";\\n\")\r\nmodCpp.write(\"logo = \\\"logo.paa\\\";\\n\")\r\nmodCpp.write(\"logoOver = \\\"logo.paa\\\";\\n\")\r\nmodCpp.write(\"tooltip = \\\"\" + modName + \"\\\";\\n\")\r\nmodCpp.write(\"tooltipOwned = \\\"\" + modName + \" Owned\\\";\\n\")\r\nmodCpp.write(\"overview = \\\"\" + modName + \"\\\";\\n\")\r\nmodCpp.write(\"author = \\\"\" + modAuthor + \"\\\";\\n\")\r\nmodCpp.write(\"overviewPicture = \\\"logo.paa\\\";\\n\")\r\nmodCpp.write(\"overviewText = \\\"\" + modName + \"\\\";\\n\")\r\nmodCpp.write(\"overviewFootnote = \\\"\\\";\\n\")\r\nmodCpp.close()\r\n\r\n\r\nconfigCpp = open(modID + \"\\\\config.cpp\", \"w\", encoding=\"UTF-8\")\r\nconfigCpp.write(\"class CfgPatches\\n\")\r\nconfigCpp.write(\"{\\n\")\r\nconfigCpp.write(\"\\tclass \" + modID + \"\\n\")\r\nconfigCpp.write(\"\\t{\\n\")\r\nconfigCpp.write(\"\\t\\tname = \\\"\" + modName + \"\\\";\\n\")\r\nconfigCpp.write(\"\\t\\tauthor = \\\"\" + modAuthor + \"\\\";\\n\")\r\nconfigCpp.write(\"\\t\\trequiredVersion = 1.00;\\n\")\r\nconfigCpp.write(\"\\t\\trequiredAddons[] = {};\\n\")\r\nconfigCpp.write(\"\\t\\tunits[] = {};\\n\")\r\nconfigCpp.write(\"\\t\\tweapons[] = {};\\n\")\r\nconfigCpp.write(\"\\t\\tworlds[] = {};\\n\")\r\nconfigCpp.write(\"\\t};\\n\")\r\nconfigCpp.write(\"};\\n\")\r\nconfigCpp.write(\"class CfgMusic\\n\")\r\nconfigCpp.write(\"{\\n\")\r\nconfigCpp.write(\"\\t#include \\\"FileListWithMusicTracks.hpp\\\"\\n\")\r\nconfigCpp.write(\"};\\n\")\r\nconfigCpp.write(\"class CfgMusicClasses\\n\")\r\nconfigCpp.write(\"{\\n\")\r\nconfigCpp.write(\"\\tclass \" + modID + \"\\n\")\r\nconfigCpp.write(\"\\t{\\n\")\r\nconfigCpp.write(\"\\t\\tdisplayName = \\\"\" + modName + \"\\\";\\n\")\r\nconfigCpp.write(\"\\t};\\n\")\r\nconfigCpp.write(\"};\\n\")\r\nconfigCpp.close()\r\n\r\nif os.path.exists('@' + modID):\r\n print(\"A mod folder already exist, to prevent overwriting the script was stopped, either change the ID or delete the old mod folder then restart the script\")\r\n sys.exit()\r\n\r\nos.makedirs('@' + modID + \"\\\\Addons\")\r\nshutil.copy(modID + \"\\\\logo.paa\", '@' + modID + \"\\\\logo.paa\")\r\nshutil.copy(modID + \"\\\\mod.cpp\", '@' + modID + \"\\\\mod.cpp\")\r\nshutil.copy(modID + \"\\\\steamLogo.png\", '@' + modID + \"\\\\steamLogo.png\")\r\n","repo_name":"OshidaBCF/Arma-3-Music-Mod","sub_path":"makeMusicMod.py","file_name":"makeMusicMod.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9825086715","text":"#Implement TREE_SORT algorithm in a language of your choice,\r\n#but make sure that the INORDER function is implemented iteratively. \r\n\r\nclass BinTreeNode(object):\r\n\r\n def __init__(self, value):\r\n \"\"\"initialisation of nodes and attributes\"\"\"\r\n self.value=value\r\n self.left=None\r\n self.right=None\r\n\r\ndef tree_insert( tree, item):\r\n \"\"\"inserts a node into a tree\"\"\"\r\n if tree==None:\r\n tree=BinTreeNode(item)\r\n else:\r\n if(item < tree.value):\r\n if(tree.left==None):\r\n tree.left=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.left,item)\r\n else:\r\n if(tree.right==None):\r\n tree.right=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.right,item)\r\n return tree\r\n\r\ndef postorder(tree):\r\n \"\"\"travseres the tree postorder\"\"\"\r\n if(tree.left!=None):\r\n postorder(tree.left)\r\n if(tree.right!=None):\r\n postorder(tree.right)\r\n print(tree.value)\r\n \r\n#original recursive function\r\n#def in_order(tree):\r\n# if(tree.left!=None):\r\n# in_order(tree.left)\r\n# print(tree.value)\r\n# if(tree.right!=None):\r\n# in_order(tree.right)\r\n\r\n#iteratively\r\ndef in_order(tree):\r\n \"\"\"traverses the graph in ascending order and prints it\"\"\"\r\n stack = []\r\n trv = False #boolean value represtents if the tree is traversed\r\n while(trv == False):\r\n if tree != None:\r\n stack.append(tree)\r\n tree = tree.left\r\n else:\r\n if (len(stack) > 0):\r\n tree = stack.pop()\r\n print(tree.value)\r\n tree = tree.right\r\n else:\r\n trv = True\r\n\r\ndef tree_sort(list1):\r\n \"\"\"makes a tree for given values and traverses them with in_order function\"\"\"\r\n #makes first node the root of the tree\r\n t = tree_insert(None, list1[0])\r\n for i in list1:\r\n #for element in the list insert into the tree\r\n tree_insert(t, i)\r\n #orders the tree once created\r\n in_order(t)\r\n\r\nif __name__ == '__main__':\r\n#initialises function calls\r\n t=tree_insert(None,6);\r\n tree_insert(t,10)\r\n tree_insert(t,5)\r\n tree_insert(t,2)\r\n tree_insert(t,3)\r\n tree_insert(t,4)\r\n tree_insert(t,11)\r\n #postorder(t)\r\n in_order(t)\r\n list1 = [2, 5, 8, 1, 5, 9, 7, 0]\r\n tree_sort(list1)\r\n\r\n","repo_name":"Rick24/210CT-CW","sub_path":"completed/Week 6 Q1 (12).py","file_name":"Week 6 Q1 (12).py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42107948932","text":"import sys\r\n\r\nsys.path.append(\"import/base\")\r\nfrom graphics import *\r\nfrom graphwinplus import *\r\nfrom timer import *\r\n\r\nsys.path.append(\"import\")\r\nfrom gamemap import *\r\nimport equipment\r\nfrom gameobject import *\r\nfrom camera import *\r\nfrom controller import *\r\n\r\nclass CircleWar:\r\n def __init__(self, width, height, gamemap, *players):\r\n self._camera = Camera(\"Circle War !\", width, height)\r\n \r\n self._ts = Timer(1)\r\n self._tf = Timer(1.0/30)\r\n\r\n self.loadMap(gamemap)\r\n for (bornplace, human, controller) in players:\r\n self.addPlayer(bornplace, human, controller)\r\n \r\n def loadMap(self, gamemap):\r\n gamemap.check()\r\n self._map = gamemap\r\n self.emptyPlayer()\r\n\r\n def emptyPlayer(self):\r\n self._human = []\r\n self._ctrl = []\r\n for i in range(self._map.max_player()):\r\n self._human += [[]]\r\n self._ctrl += [[]]\r\n def addPlayer(self, bornplace, human, controller):\r\n human.moveTo(*self._map.getBornPoint(bornplace))\r\n self._human[bornplace] += [human]\r\n self._ctrl[bornplace] += [controller]\r\n if self._camera.isRunning():\r\n self._camera.bindObject(human)\r\n\r\n def _doEveryFrame(self):\r\n #human run\r\n for b in range(len(self._human)):\r\n for i in range(len(self._human[b])):\r\n #weapon direction\r\n self._human[b][i].turnTo(self._ctrl[b][i].getDirection())\r\n #move\r\n self._human[b][i].run(self._ctrl[b][i].getMoveFourDirection())\r\n #set shoot state\r\n self._human[b][i].setShoot(self._ctrl[b][i].getShoot())\r\n #make bomb when shooting\r\n if self._human[b][i].isShooting():\r\n self._bomb += [self._human[b][i].makeBomb(self._camera)]\r\n \r\n #can not go out of the bound\r\n if not self._map.catch(*self._human[b][i].center()):\r\n if self._human[b][i].x < 0:\r\n self._human[b][i].move(-self._human[b][i].x,0)\r\n elif self._human[b][i].x > self._map.width:\r\n self._human[b][i].move(self._map.width-self._human[b][i].x,0)\r\n if self._human[b][i].y < 0:\r\n self._human[b][i].move(0, -self._human[b][i].y)\r\n elif self._human[b][i].y > self._map.height:\r\n self._human[b][i].move(0, self._map.height-self._human[b][i].y)\r\n #can not go over the obstacle\r\n for obs in self._obstacle:\r\n if obs.intersect(self._human[b][i]):\r\n self._human[b][i].move(*obs.pushAway(self._human[b][i]))\r\n #bomb fly\r\n delbomblist = []\r\n for i in range(len(self._bomb)):\r\n mark = False #mark for removing it from the game\r\n #fly too far\r\n if not self._bomb[i].fly():\r\n mark = True\r\n #fly over the obstacle\r\n for obs in self._obstacle:\r\n if obs.intersect(self._bomb[i]):\r\n mark = True\r\n break\r\n #fly out of the map\r\n if not self._map.catch(*self._bomb[i].center()):\r\n mark = True\r\n #hit a human\r\n for h in self._human:\r\n for human in h:\r\n if human.isAlive():\r\n if human.team!=self._bomb[i].team:\r\n if human.intersect(self._bomb[i]):\r\n human.hurtBy(self._bomb[i])\r\n mark=True\r\n break\r\n if mark:\r\n delbomblist += [i]\r\n for i in range(len(delbomblist)):\r\n self._camera.unbindObject(self._bomb[delbomblist[i]-i])\r\n del self._bomb[delbomblist[i]-i]\r\n\r\n self._camera.refresh()\r\n \r\n def _doEveryTime(self):\r\n pass\r\n \r\n def start(self, lockplayer = None):\r\n self._obstacle = []\r\n for (x,y,r) in self._map.obstacles:\r\n self._obstacle+=[Obstacle(x,y,r)]\r\n \r\n self._bomb = []\r\n\r\n self._camera.startup(self._map.width, self._map.height)\r\n self._camera.bindObject(*self._obstacle)\r\n for b in range(len(self._human)):\r\n for i in range(len(self._human[b])):\r\n self._camera.bindObject(self._human[b][i])\r\n self._camera.lockObject(lockplayer)\r\n\r\n for ctrls in self._ctrl:\r\n for ctrl in ctrls:\r\n ctrl.start()\r\n self._ts.run()\r\n self._tf.run()\r\n self._fps = 0\r\n framecount = 0\r\n while True:\r\n if self._ts.check():\r\n self._fps=framecount\r\n framecount = 0\r\n self._camera.setTitle(\"Circle War ! fps:\"+str(self._fps))\r\n if self._tf.check():\r\n framecount += 1\r\n self._doEveryFrame()\r\n self._doEveryTime()\r\n if self.isGameover():\r\n self.stop()\r\n break\r\n\r\n def isGameover(self):\r\n return False\r\n\r\n def stop(self):\r\n for ctrls in self._ctrl:\r\n for ctrl in ctrls:\r\n ctrl.stop()\r\n self._camera.stop()\r\n\r\ndef main():\r\n #map\r\n m = GameMap(file=\"test\")\r\n \"\"\"m = GameMap(name=\"test\",width=1000,height=700)\r\n m.addPlayer(0,0)\r\n m.addPlayer(800,600)\r\n m.addPlayer(0,700)\r\n m.addPlayer(1000,0)\r\n for i in range(1,5):\r\n for j in range(1,4):\r\n m.addObstacle(i*200, j*200, 40)\"\"\"\r\n \"\"\"m.addObstacle(200,140,1)\r\n m.addObstacle(800,140,1)\r\n m.addObstacle(800,560,1)\r\n m.addObstacle(200,560,1)\r\n\"\"\"\r\n h1=Human(Team.Blue, equipment.LightWeapon)\r\n h2=Human(Team.Red , equipment.LightWeapon)\r\n game=CircleWar(800, 600, m)\r\n game.addPlayer(0, h1, KeyBoardAndMouseB(game))\r\n game.addPlayer(1, h2, RandomRobot(m.copy()))\r\n game.start(h1)\r\n \r\nmain()\r\n\r\n\r\n","repo_name":"lalaticao/CircleWar","sub_path":"src/circlewar.pyw","file_name":"circlewar.pyw","file_ext":"pyw","file_size_in_byte":6084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34934044299","text":"import fridge.Constituent.Constituent as Constituent\nimport fridge.utilities.mcnpCreatorFunctions as mcnpCF\n\n\nclass CoreCoolant(Constituent.Constituent):\n \"\"\"Creates a cylinder encompassing the assemblies.\"\"\"\n def __init__(self, unit_info, void_percent=1.0):\n super().__init__(unit_info, void_percent=void_percent)\n self.coolantRadius = unit_info[1][0]\n self.coolantHeight = unit_info[1][1]\n self.assemblySurfaceList = unit_info[1][2]\n self.get_material_card(unit_info[0][3])\n self.make_component([1])\n\n def make_component(self, unit_info):\n self.surfaceCard = mcnpCF.build_right_circular_cylinder_surface(self.coolantRadius, self.coolantHeight,\n self.position, self.surfaceNum,\n '$Coolant Surrounding Assemblies')\n self.cellCard = mcnpCF.build_concentric_cell(self.cellNum, self.materialNum, self.material.atomDensity,\n self.assemblySurfaceList, self.surfaceNum, '',\n '$Coolant Surrounding Assemblies')\n","repo_name":"ryanstwrt/FRIDGe","sub_path":"fridge/Constituent/CoreCoolant.py","file_name":"CoreCoolant.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17182433080","text":"import cv2\nimport numpy as np\nimport math\nfrom GilAAADataset import *\n\n\n\ndef DTF(img):\n \n rows, cols = img.shape\n\n res= cv2.distanceTransform(img, cv2.DIST_L2, 5) #return 0.0~1.0 range value\n\n res = np.reciprocal(res)\n\n # res100= cv2.normalize(res, None, 100, 255, cv2.NORM_MINMAX, cv2.CV_8UC1, mask=img)\n # res150= cv2.normalize(res, None, 150, 255, cv2.NORM_MINMAX, cv2.CV_8UC1, mask=img)\n # res200= cv2.normalize(res, None, 200, 255, cv2.NORM_MINMAX, cv2.CV_8UC1, mask=img)\n res= cv2.normalize(res, None, 250, 255, cv2.NORM_MINMAX, cv2.CV_8UC1, mask=img)\n\n\n\n\n return res\n\n\n\n\n\nif __name__ == \"__main__\":\n # mask = cv2.imread(\"/media/jihu/data/dataset/AAA/AAAGilDatasetPos/mask/05390853_20200821_0085.png\", cv2.IMREAD_GRAYSCALE)\n # img = cv2.imread(\"/media/jihu/data/dataset/AAA/AAAGilDatasetPos/raw/05390853_20200821_0085.png\")\n \n \n \n # o_mask = mask.copy()\n # o_img = img.copy()\n\n ## 제대로 안되넹?\n \n\n # cv2.imshow('or', o_mask)\n # cv2.imshow(\"te\", res)\n # cv2.waitKey(0)\n \n dataset = GilAAADataset('/media/jihu/data/dataset/AAA/AAAGilDatasetPos')\n \n \n for img, target in dataset:\n # print(type(img)) => PIL\n # print(type(target[\"masks\"])) => Tensor\n # print(target[\"labels\"])\n \n img = np.array(img)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n #cv2.imshow(\"or\", img)\n path = target[\"path\"]\n masksss = cv2.imread(\"/media/jihu/data/dataset/AAA/AAAGilDatasetPos/mask/\" + target[\"path\"], cv2.IMREAD_GRAYSCALE)\n\n masksss = DTF(masksss)\n \n # cv2.imshow(\"te230\", masksss)\n # cv2.waitKey(0)\n #exit(0)\n \n \n cv2.imwrite(\"/media/jihu/data/dataset/AAA/AAAGilDatasetPos/WDmask250/\" + target[\"path\"], masksss)\n \n\n #exit(0)\n\n","repo_name":"Gachon-AIRLab/AAA_mask_DistanceWeight","sub_path":"distanceTransform.py","file_name":"distanceTransform.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15252153703","text":"import urllib.request, urllib.parse, urllib.error\nimport json\n\n\nurl = input('Enter valid URL: ')\ndata = \"\"\"{ \"id\" : \"001\",\n \"x\" : \"2\",\n \"name\" : \"Chuck\"\n}\"\"\"\n\ninfo = json.loads(data)\nprint(info)\nfor i in range(len(info['comments'])):\n sum += int(info['comments'][i]['count'])\nprint(sum)\n","repo_name":"Oleksl888/web_samples","sub_path":"13.1-urllib_json.py","file_name":"13.1-urllib_json.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2016697279","text":"import ctypes, requests, random\r\nfrom threading import Thread\r\n\r\nthreadc = 100\r\n\r\nusernames = open('usernames.txt','r',errors='ignore').read().splitlines()\r\ntotal = len(usernames)\r\ns = requests.session()\r\nproxy = set()\r\n\r\nwith open(\"proxies.txt\", \"r\") as f:\r\n file_lines1 = f.readlines()\r\n for line1 in file_lines1:\r\n proxy.add(line1.strip())\r\n \r\nproxies = {\r\n 'http': 'http://'+random.choice(list(proxy))\r\n}\r\n\r\n\r\nr = requests.get('http://www.roblox.com/',proxies=proxies)\r\ndone = 0\r\noutput = []\r\n\r\ndef thread():\r\n global done\r\n while usernames:\r\n username = usernames.pop(0)\r\n try:\r\n r = requests.get(f'https://www.roblox.com/user.aspx?username={username}').url\r\n if 'www.roblox.com/users/' in r:\r\n userid = r.split('/')[-2]\r\n created = requests.get(f'https://users.roblox.com/v1/users/{userid}/').json()['created'].split('-')[0]\r\n lastonline = requests.get(f'https://api.roblox.com/users/{userid}/onlinestatus/').json()['LastOnline'].split('-')[0]\r\n output.append(f'{lastonline}:{created}:{username}\\n')\r\n done += 1\r\n except:\r\n usernames.append(username)\r\n\r\nprint(f'Starting {threadc} threads.')\r\nfor i in range(threadc):\r\n Thread(target=thread).start()\r\n\r\nwhile 1:\r\n finished = done\r\n ctypes.windll.kernel32.SetConsoleTitleW(f'Last Online Scraper | Done: {finished}/{total}')\r\n if finished == total: break\r\n\r\nwith open('out.txt','w',errors='ignore') as f:\r\n f.writelines(output)\r\n\r\ninput('Finished.')\r\n","repo_name":"2rt/Last-Online-and-Creation-Date-Scraper","sub_path":"lastonline&creationdate.py","file_name":"lastonline&creationdate.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31796307306","text":"# from __future__ import absolute_import\r\n\r\nimport os\r\n# from got10k.datasets import *\r\n\r\nfrom siamfc.siamvgg_oc import TrackerSiamvgg_oc\r\nfrom siamfc.datasets import TrackingNetCropped\r\nfrom siamfc.datasets import Got10kCropped\r\nfrom siamfc.datasets import ILSVRC2015Cropped\r\nfrom siamfc.datasets import LaSOTCropped\r\nfrom siamfc.datasets import MultipleDatasets\r\n\r\n\r\nif __name__ == '__main__':\r\n # root_dir = os.path.expanduser(r'G:\\dataset\\train_dataset\\Cropped_GOT10K')\r\n # root_dir = os.path.expanduser(r'E:\\Cropped_ILSVRC2015')\r\n # root_dir = os.path.expanduser(r'G:\\dataset\\train_dataset\\Cropped_LaSOT')\r\n # root_dir = os.path.expanduser(r'I:\\Cropped_TrackingNet')\r\n\r\n # train_dataset = Got10kCropped(root_dir, pair_per_seq=1)\r\n # train_dataset = ILSVRC2015Cropped(root_dir, pair_per_seq=1)\r\n # train_dataset = LaSOTCropped(root_dir, pair_per_seq=9)\r\n # train_dataset = TrackingNetCropped(root_dir, pair_per_seq=1)\r\n\r\n # tracker = TrackerSiamFC()\r\n # tracker.train_over(train_dataset)\r\n\r\n # train on multiple datasets\r\n\r\n initial_lr = 1e-2\r\n ultimate_lr = 1e-5\r\n\r\n epoch_num = 50\r\n\r\n datasets = [\r\n # 'ILSVRC15',\r\n 'GOT10K',\r\n # 'LASOT',\r\n # 'TrackingNet'\r\n ]\r\n dataset_paths = {\r\n 'ILSVRC15': r'E:\\Cropped_ILSVRC2015',\r\n 'GOT10K': r'E:\\Cropped_GOT10K',\r\n 'LASOT': r'E:\\Cropped_LaSOT',\r\n 'TrackingNet': r'E:\\Cropped_TrackingNet'\r\n }\r\n pair_per_seqs = {\r\n # 'ILSVRC15': 5 * epoch_num,\r\n 'GOT10K': 1 * epoch_num,\r\n # 'LASOT': 1 * epoch_num,\r\n # 'TrackingNet': 1 * epoch_num,\r\n }\r\n train_dataset = MultipleDatasets(datasets=datasets, dataset_paths=dataset_paths, pair_per_seqs=pair_per_seqs)\r\n\r\n tracker = TrackerSiamvgg_oc(\r\n initial_lr=initial_lr,\r\n ultimate_lr=ultimate_lr,\r\n epoch_num=epoch_num\r\n )\r\n tracker.train_over(train_dataset)\r\n\r\n\r\n\r\n\r\n","repo_name":"conquerhuang/AESiam","sub_path":"tools/train_siamvgg_oc.py","file_name":"train_siamvgg_oc.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38524473376","text":"import numpy as np\nimport networkx as nx\nfrom matplotlib import pyplot\n\ndef update_datings(cmodel):\n\t# cmodel = Deposit GUI DCModel\n\t# for each relative dating in format \"[culture], [phase]\", add a relation\n\t# [culture] -> contains -> [culture], [phase]\n\t\n\tdef to_general(name):\n\t\t\n\t\tif \",\" in name:\n\t\t\tname = name.split(\",\")[0].strip()\n\t\treturn name\n\t\n\tcls = cmodel.get_class(\"Relative_Dating\")\n\tif cls is None:\n\t\treturn\n\tobj_lookup = {}\n\tgeneral = set()\n\tdetailed = set()\n\tfor obj in cls.get_members(direct_only = True):\n\t\tname = obj.get_descriptor(\"Name\").strip()\n\t\tif not name:\n\t\t\tcontinue\n\t\tobj_lookup[name] = obj\n\t\tif \",\" in name:\n\t\t\tdetailed.add(name)\n\t\t\tgeneral.add(to_general(name))\n\t\t\tobj.set_descriptor(\"General\", 0)\n\t\telse:\n\t\t\tobj.set_descriptor(\"General\", 1)\n\tfor name in general:\n\t\tif name not in obj_lookup:\n\t\t\tobj_lookup[name] = cmodel.add_object_with_descriptors(cls, {\"Name\": name, \"General\": 1})\n\tfor name in detailed:\n\t\tname_general = to_general(name)\n\t\tobj_lookup[name_general].add_relation(obj_lookup[name], \"contains\")\n\ndef get_phasing(cmodel, \n\t\tdating_cls = \"Relative_Dating\", name_descr = \"Name\", general_descr = \"General\", \n\t\tbefore_rel = \"before\", same_as_rel = \"same_as\", contains_rel = \"contains\",\n\t\tgeneral_only = False\n\t):\n\t# cmodel = Deposit GUI DCModel\n\t# returns phasing, names, circulars\n\t# \tphasing = {obj_id: [phase_min, phase_max], ...}; phase_min/max = None if no chronological relations found\n\t# \tnames = {obj_id: name, ...}\n\t#\tcirculars = [obj_id, ...]\n\t\n\tdef extract_datings(cmodel, \n\t\t\tdating_cls, name_descr, general_descr, \n\t\t\tbefore_rel, contains_rel, general_only\n\t\t):\n\t\t# returns G = nx.DiGraph\n\t\t# G.nodes[obj_id][\"name\"] = dating name\n\t\t# G.edges() = [(obj_id1, obj_id2), ...]; obj_id1 -> before -> obj_id2\n\t\t\n\t\tG = nx.DiGraph()\n\t\t\n\t\tcls = cmodel.get_class(dating_cls)\n\t\tif cls is None:\n\t\t\treturn G\n\t\t\n\t\tfor obj in cls.get_members(direct_only = True):\n\t\t\tif general_only and (obj.get_descriptor(general_descr) != 1):\n\t\t\t\tcontinue\n\t\t\tG.add_node(obj.id, name = obj.get_descriptor(name_descr))\n\t\tfor obj1 in cls.get_members(direct_only = True):\n\t\t\tif general_only and (obj1.get_descriptor(general_descr) != 1):\n\t\t\t\tcontinue\n\t\t\tobjs1 = set([obj1.id])\n\t\t\tfor obj, rel in obj1.get_relations():\n\t\t\t\tif rel != contains_rel:\n\t\t\t\t\tcontinue\n\t\t\t\tif general_only and (obj.get_descriptor(general_descr) != 1):\n\t\t\t\t\tcontinue\n\t\t\t\tobjs1.add(obj.id)\n\t\t\tobjs2 = set()\n\t\t\tfor obj2, rel in obj1.get_relations():\n\t\t\t\tif rel != before_rel:\n\t\t\t\t\tcontinue\n\t\t\t\tif general_only and (obj2.get_descriptor(general_descr) != 1):\n\t\t\t\t\tcontinue\n\t\t\t\tobjs2.add(obj2.id)\n\t\t\t\tfor obj, rel in obj2.get_relations():\n\t\t\t\t\tif rel != contains_rel:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif general_only and (obj.get_descriptor(general_descr) != 1):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tobjs2.add(obj.id)\n\t\t\tif not objs2:\n\t\t\t\tcontinue\n\t\t\tfor obj1_id in objs1:\n\t\t\t\tfor obj2_id in objs2:\n\t\t\t\t\tG.add_edge(obj1_id, obj2_id)\n\t\t\n\t\treturn G\n\t\n\tdef find_circulars(chronostrat):\n\t\t# returns [idx, ...]; idx = index in chronostrat\n\t\t\n\t\tcirculars = set([])\n\t\tG = nx.from_numpy_matrix(chronostrat, create_using = nx.DiGraph)\n\t\tfor i, j in G.edges:\n\t\t\tif nx.has_path(G, j, i):\n\t\t\t\tcirculars.add(i)\n\t\t\t\tcirculars.add(j)\n\t\t\n\t\treturn list(circulars)\n\t\n\tdef get_lower_phasing(chronostrat):\n\t\t\n\t\tn_nodes = chronostrat.shape[0]\n\t\tphasing = np.full(n_nodes, np.nan) # phasing[idx] = phase; lower = earlier\n\t\t\n\t\t# assign phase to nodes latest to earliest\n\t\tmask_todo = chronostrat.copy()\n\t\tphase = 0\n\t\twhile mask_todo.any():\n\t\t\tlatest = (mask_todo.any(axis = 0) & ~mask_todo.any(axis = 1))\n\t\t\tphasing[latest] = phase\n\t\t\tmask_todo[:,latest] = False\n\t\t\tphase += 1\n\t\t\n\t\t# assign phases to nodes earliest to latest, if not already assigned\n\t\tmask_todo = chronostrat.copy()\n\t\tphase = n_nodes\n\t\twhile mask_todo.any():\n\t\t\tearliest = (mask_todo.any(axis = 1) & ~mask_todo.any(axis = 0))\n\t\t\tphasing[np.isnan(phasing) & earliest] = phase\n\t\t\tmask_todo[earliest] = False\n\t\t\tphase -= 1\n\t\t\n\t\t# minimize range of phases\n\t\tvals = np.unique(phasing[~np.isnan(phasing)])\n\t\tvals.sort()\n\t\tcollect = phasing.copy()\n\t\tfor val_new, val in enumerate(vals):\n\t\t\tcollect[phasing == val] = val_new\n\t\tphasing = collect\n\t\t\n\t\tmask = (~np.isnan(phasing))\n\t\tif mask.any():\n\t\t\tphasing[mask] = phasing[mask].max() - phasing[mask]\n\t\t\n\t\treturn phasing\n\t\n\tdef get_phasing_limits(idx, phasing_lower, idxs_later, idxs_earlier):\n\t\t\n\t\tphase_min = 0\n\t\tph_later = phasing_lower[idxs_later[idx]]\n\t\tph_later = ph_later[~np.isnan(ph_later)]\n\t\tif ph_later.size:\n\t\t\tphase_max = int(ph_later.min()) - 1\n\t\telse:\n\t\t\tphase_max = phasing_lower.max()\n\t\tph_earlier = phasing_lower[idxs_earlier[idx]]\n\t\tph_earlier = ph_earlier[~np.isnan(ph_earlier)]\n\t\tif ph_earlier.size:\n\t\t\tphase_min = int(ph_earlier.max()) + 1\n\t\tif np.isnan(phase_max):\n\t\t\tphase_max = phase_min\n\t\treturn int(phase_min), int(phase_max)\n\t\n\tG = extract_datings(cmodel, \n\t\tdating_cls, name_descr, general_descr, \n\t\tbefore_rel, contains_rel, general_only\n\t)\n\t\n\tnodes = sorted(list(G.nodes()))\n\tn_nodes = len(nodes)\n\tchronostrat = np.zeros((n_nodes, n_nodes), dtype = bool)\n\tfor gi, gj in G.edges():\n\t\tchronostrat[nodes.index(gi), nodes.index(gj)] = True\n\t\n\tnames = dict([(obj_id, G.nodes[obj_id][\"name\"]) for obj_id in G.nodes()])\n\t\n\tcirculars = find_circulars(chronostrat)\n\tif circulars:\n\t\tcirculars = [nodes[idx] for idx in circulars]\n\t\treturn {}, names, circulars\n\t\n\tidxs_later = [np.where(chronostrat[idx])[0] for idx in range(n_nodes)]\n\tidxs_earlier = [np.where(chronostrat[:,idx])[0] for idx in range(n_nodes)]\n\t\n\tphasing_lower = get_lower_phasing(chronostrat)\n\t\n\tphasing = {} # {obj_id: [phase_min, phase_max], ...}\n\tfor idx in range(n_nodes):\n\t\tif (not chronostrat[idx].any()) and (not chronostrat[:,idx].any()):\n\t\t\tphase_min, phase_max = None, None\n\t\telse:\n\t\t\tphase_min, phase_max = get_phasing_limits(idx, phasing_lower, idxs_later, idxs_earlier)\n\t\tobj_id = nodes[idx]\n\t\tphasing[obj_id] = [phase_min, phase_max]\n\t\n\tfor obj_id in phasing:\n\t\tif phasing[obj_id] == [None, None]:\n\t\t\tobj = cmodel.get_object(obj_id)\n\t\t\tfor obj2, rel_ in obj.get_relations():\n\t\t\t\tif rel_ != same_as_rel:\n\t\t\t\t\tcontinue\n\t\t\t\tif phasing[obj2.id] != [None, None]:\n\t\t\t\t\tphasing[obj.id] = phasing[obj2.id].copy()\n\t\t\t\t\tbreak\n\t\n\treturn phasing, names, circulars\n\ndef update_order(cmodel, progress = None):\n\t\n\terrors = []\n\t\n\tupdate_datings(cmodel)\n\tphasing, names, circulars = get_phasing(cmodel)\n\t# phasing = {obj_id: [phase_min, phase_max], ...}\n\t#\tphase_min/max = None if no chronological relations found\n\t# names = {obj_id: name, ...}\n\t# circulars = [obj_id, ...]\n\t\n\tif circulars:\n\t\terrors = [\"Circular relations found between the following datings:\"]\n\t\tfor obj_id in circulars:\n\t\t\terrors.append(\"\\t%s\" % (names[obj_id]))\n\t\treturn errors\n\t\n\tcls = cmodel.get_class(\"Relative_Dating\")\n\tif cls is None:\n\t\treturn [\"Relative_Dating Class not found\"]\n\tfor obj in cls.get_members(direct_only = True):\n\t\tif (progress is not None) and progress.cancel_pressed():\n\t\t\treturn [\"Cancelled by user\"]\n\t\tphase_min, phase_max = phasing[obj.id]\n\t\tobj.set_descriptor(\"Order_Min\", phase_min)\n\t\tobj.set_descriptor(\"Order_Max\", phase_max)\n\t\n\treturn errors\n\ndef vis_order(cmodel, detailed = False):\n\t\n\tphasing, names, circulars = get_phasing(cmodel, general_only = not detailed)\n\t# phasing = {obj_id: [phase_min, phase_max], ...}; phase_min/max = -1 if no chronological relations found\n\t# names = {obj_id: name, ...}\n\t# circulars = [obj_id, ...]\n\t\n\terrors = []\n\t\n\tif circulars:\n\t\terrors = [\"Circular relations found between the following datings:\"]\n\t\tfor obj_id in circulars:\n\t\t\terrors.append(\"\\t%s\" % (names[obj_id]))\n\t\treturn errors\n\t\n\tfor obj_id in list(phasing.keys()):\n\t\tif phasing[obj_id] == [None, None]:\n\t\t\tdel phasing[obj_id]\n\t\n\tphmax = 0\n\tfor i in phasing:\n\t\tphmax = max(phmax, phasing[i][1])\n\n\tnode_ids = sorted(list(phasing.keys()), key = lambda idx: phasing[idx][0])\n\t\n\tpyplot.figure(figsize = (12, 6))\n\ty = 0\n\tfor obj_id in node_ids:\n\t\tif (not detailed) and (\",\" in names[obj_id]):\n\t\t\tcontinue\n\t\tx0, x1 = phasing[obj_id]\n\t\tif x0 == x1:\n\t\t\tpyplot.plot([x0], [y], \"o\", color = \"gray\")\n\t\telse:\n\t\t\tpyplot.barh(y = y, width = x1 - x0, left = x0, height = 0.5, color = \"lightgray\")\n\t\tpyplot.text((x0 + x1) / 2, y, names[obj_id], horizontalalignment = \"center\", verticalalignment = \"center\")\n\t\ty += 1\n\tpyplot.xlim(-1, phmax + 1)\n\tpyplot.xticks(list(range(phmax + 1)), list(range(phmax + 2))[1:])\n\tpyplot.yticks([], [])\n\tpyplot.xlabel(\"Order\")\n\tpyplot.tight_layout()\n\tpyplot.show()\n\t\n\treturn errors\n","repo_name":"demjanp/arch14cz_backend","sub_path":"src/arch14cz_backend/utils/fnc_phasing.py","file_name":"fnc_phasing.py","file_ext":"py","file_size_in_byte":8422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27479467879","text":"# Stack Big O complexity\n\n# Push: O(1) - Constant Time\n# Pop (remove): O(1) - Constant Time\n# Top (top): O(1) - Constant Time\n# Is Empty: O(1) - Constant Time\n# Size: O(1) - Constant Time\n\n\nclass Emptiness(Exception):\n pass\n\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n if self.is_empty():\n raise Emptiness('The Stack is empty')\n\n return self.items.pop()\n\n def is_empty(self):\n return self.size() == 0\n\n def top(self):\n if self.is_empty():\n raise Emptiness('The Stack is empty')\n\n return self.items[-1]\n\n def size(self):\n return len(self.items)\n\n\nstack = Stack()\n\nprint(stack.is_empty())\n\nstack.push(1)\nprint(stack.items)\nstack.push(2)\nprint(stack.items)\nstack.push(3)\nprint(stack.items)\nstack.push(4)\nprint(stack.items)\nstack.push(5)\nprint(stack.items)\n\nprint(stack.is_empty())\nprint(stack.top())\n\nprint(stack.pop())\nprint(stack.pop())\nprint(stack.pop())\nprint(stack.pop())\n\nprint(stack.is_empty())\n\nprint(stack.pop())\n\nprint(stack.is_empty())\n\n# reversing a list\n\n\ndef reverse(bookshelf):\n stack = Stack()\n\n for book in bookshelf:\n stack.push(book)\n\n reversed_bookshelf = []\n\n while not stack.is_empty():\n reversed_bookshelf.append(stack.pop())\n\n return reversed_bookshelf\n\n\nbookshelf = [\n 'Harry Potter',\n 'Atomic Habits',\n 'Leonardo da Vinci',\n 'Sapiens',\n 'Peak'\n]\n\nreversed_bookshelf = reverse(bookshelf)\n\nprint(reversed_bookshelf)\n","repo_name":"imteekay/algorithms","sub_path":"computer_science/data_structures/stack/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":452,"dataset":"github-code","pt":"61"} +{"seq_id":"37036295499","text":"import csv\nimport sys\n\nmaxInt = sys.maxsize\n\nwhile True:\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt/10)\ncount = 1\nall = []\nall_likes = []\ndates = []\n\nwith open('fb_profile.csv', 'r', newline='') as file:\n rows = csv.reader(file, delimiter='|')\n # print(len(rows))\n for row in rows:\n try:\n row[37]\n except:\n continue\n link_name_date = []\n friends = row[37]\n friends = friends.strip('[ ]')\n friends_all = friends.split(',')\n likes = row[35]\n likes = likes.strip('[ ]')\n likes_all = likes.split(',')\n date_i = row[4]\n print(date_i)\n date_i = date_i.split('г.')\n date_i = date_i[0].split(',')\n # print(date_i)\n for friend in friends_all:\n all.append(friend)\n # print(friend)\n # print(count)\n # count += 1\n for like in likes_all:\n all_likes.append(like)\n # print(len(date))\n if len(date_i) > 1:\n date_i = date_i[1].strip(' ')\n day, month, year = date_i.split(' ')\n if len(day) == 1:\n day = '0' + day\n print(month)\n if month == 'января':\n month = '01'\n elif month == 'февраля':\n month = '02'\n elif month == 'марта':\n month = '03'\n elif month == 'апреля':\n month = '04'\n elif month == 'мая':\n month = '05'\n elif month == 'июня':\n month = '06'\n elif month == 'июль':\n month = '07'\n elif month == 'августа':\n month = '08'\n elif month == 'сентября':\n month = '09'\n elif month == 'октября':\n month = '10'\n elif month == 'ноября':\n month = '11'\n elif month == 'декабря':\n month = '12'\n\n # dates.append(date)\n print(day, month, year)\n date_p = f'{day}-{month}-{year}'\n link_name_date.append(row[0])\n link_name_date.append(row[1])\n link_name_date.append(date_p)\n dates.append(link_name_date)\n\nprint(dates)\nfrom datetime import datetime\n\nfor i in dates:\n date_1 = datetime.strptime(i[2], \"%d-%m-%Y\")\n date_2 = datetime.strptime('05-10-2020', \"%d-%m-%Y\")\n if date_1 >= date_2:\n # print(date_1)\n print(i)\n\n\n\nfrom collections import Counter\n\n\n# c = Counter(all)\n\n# print(type(c))\n# most = c.most_common(40)\n# for mos in most:\n# print(mos)\n#\n# a = Counter(all_likes)\n#\n# print(type(a))\n# most1 = a.most_common(40)\n# for mos in most1:\n# print(mos)\n\nprint(dates)\nc = Counter(dates)\n# print(type(a))\nmost2 = c.most_common(30)\nfor mos in most2:\n print(mos)\n\n\n\n# from datetime import datetime\n# d = datetime.strptime('24 june 2020', \"%d %B %Y\")\n# print(d)\n","repo_name":"aydarbekov/T_live","sub_path":"seleni/fb_analize.py","file_name":"fb_analize.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6372087109","text":"from mingus.core import notes, chords, scales, midi\nfrom mingus.containers import Bar\nfrom mingus.midi import fluidsynth\n\n# Configurações da escala, tempo e número de notas\nscale = scales.get_scale('C', 'major')\nnum_notes = 280\nnum_silences = 280\ntempo = 90\n\n# Cria uma nova barra com a escala e o tempo especificados\nbar = Bar(key='C', time_signature=(3, 4), tempo=tempo)\n\n# Adiciona notas aleatórias na barra\nfor i in range(num_notes):\n note = notes.int_to_note(scale[int(len(scale) * i / num_notes)])\n bar.place_notes(note, 4)\n\n# Adiciona silêncios na barra\nfor i in range(num_silences):\n bar.place_rest(4)\n\n# Salva a barra em um arquivo MIDI\nmidi_file = 'random_scale.mid'\nmidi.save_composition(midi_file, [bar])\n\n# Toca o arquivo MIDI utilizando o fluidsynth\nfluidsynth.init('/usr/share/sounds/sf2/FluidR3_GM.sf2', 'alsa')\nfluidsynth.play_MIDI(midi_file)\n","repo_name":"sidineyr/PythonProjects","sub_path":"escalamidimusica.py","file_name":"escalamidimusica.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17236370338","text":"from array import input_array, print_array\nfrom measurements import *\n\nEXIT = 0\nBEAD_SORT = 1\nCOUNTING_SORT = 2\nGNOME_SORT = 3\nMEASUREMENTS = 4\n\nINVALID_CHOICE_MSG = \"Неверный выбор!\"\nINVALID_INPUT_MSG = \"Неверный ввод! Размер и элементы массива должны быть целыми числами!\"\n\nMENU_INFO = \\\n'''\nМЕНЮ:\n1 - Сортировка бусинами\n2 - Сортировка подсчетом\n3 - Гномья сортировка\n4 - Замеры времени\n0 - Выход\nВыбор:\n'''\n\ndef print_error(error_msg):\n print(error_msg)\n\ndef run():\n choice = 1\n while choice != EXIT:\n choice = int(input(MENU_INFO))\n if choice == BEAD_SORT:\n try:\n arr = input_array()\n except ValueError:\n print_error(INVALID_INPUT_MSG)\n return\n bead_sort(arr)\n print_array(arr)\n elif choice == COUNTING_SORT:\n try:\n arr = input_array()\n except ValueError:\n print_error(INVALID_INPUT_MSG)\n return\n counting_sort(arr)\n print_array(arr)\n elif choice == GNOME_SORT:\n try:\n arr = input_array()\n except ValueError:\n print_error(INVALID_INPUT_MSG)\n return\n gnome_sort(arr)\n print_array(arr)\n elif choice == MEASUREMENTS:\n time_measurements()\n elif choice != EXIT:\n print_error(INVALID_CHOICE_MSG)\n \n\nif __name__ == \"__main__\":\n run()\n\n","repo_name":"kuzkuss/BMSTU_AA","sub_path":"lab_03/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27481208370","text":"from PyQt5 import QtCore, QtGui\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import pyqtSignal\n\n\nclass MyLabel(QLabel):\n clickedk = pyqtSignal(int,int)\n def __init__(self, img=None):\n super(MyLabel, self).__init__()\n self.piece=-1 #stores piece number\n self.i=0 #stores x coordinate of this label on grid layout\n self.j=0 #stores y coordinate\n\n if img is not None:\n self.pixmap = QPixmap(img)\n else:\n self.pixmap = None\n\n def paintEvent(self, event):\n if self.pixmap is not None:\n size = self.size()\n painter = QtGui.QPainter(self)\n point = QtCore.QPoint(0, 0)\n scaledPix = self.pixmap.scaled(size, Qt.KeepAspectRatio, transformMode=Qt.SmoothTransformation)\n # start painting the label from left upper corner\n point.setX((size.width() - scaledPix.width()) / 2)\n point.setY((size.height() - scaledPix.height()) / 2)\n painter.drawPixmap(point, scaledPix)\n else:\n super(MyLabel, self).paintEvent(event)\n\n def change_pixmap(self, img):\n self.pixmap = QPixmap(img)\n self.repaint()\n\n def mousePressEvent(self, QMouseEvent):\n self.clickedk.emit(self.i,self.j)\n","repo_name":"kushagrasurana/3Knights","sub_path":"mylabel.py","file_name":"mylabel.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"7650229843","text":"from EMORL.misc import policy_similarity, normalize\nfrom EMORL.Individual import Individual\nfrom sklearn.metrics.pairwise import rbf_kernel\nimport numpy as np\n\nnp.set_printoptions(precision=4, suppress=True)\n\na = Individual(0, 60, 10, [], batch_dim=(20,1), trainable=True)\nb = Individual(0, 60, 10, [], batch_dim=(20,1), trainable=True)\nc = Individual(0, 60, 10, [], batch_dim=(20,1), trainable=True)\npop = [a, b, c]\n\na_w = a.genotype['brain'].get_training_params()\na_w['actor_core'][0][0][::2] += np.random.random(a_w['actor_core'][0][0][::2].shape)*0.1\na_w['actor_core'][0][1][::3] -= np.random.random(a_w['actor_core'][0][1][::3].shape)*0.1\n\nb_w = b.genotype['brain'].get_training_params()\nb_w['actor_core'][0][0][::5] -= np.random.random(b_w['actor_core'][0][0][::5].shape)*0\nb_w['actor_core'][0][1][::1] -= np.random.random(b_w['actor_core'][0][1][::1].shape)*0\n\nc.genotype['brain'].set_training_params(a_w)\nb.genotype['brain'].set_training_params(b_w)\n\nstate = np.random.random((20,1,60)) * 0.1\nstate += np.random.normal(0,0.1, (20,1, 60))\nout = np.array([i.probabilities_for(state) for i in pop])\nent = [-np.mean(np.sum(p * np.log(p), axis=-1)) for p in out]\nflattened = out.reshape(3, 20*10)\nnormalized = normalize(flattened)\n#print(normalized)\n#print(out, ent, policy_similarity(*normalized[1:], l=5), policy_similarity(*normalized[:-1], l=5), policy_similarity(normalized[0], normalized[2], l=5))\n\nK = rbf_kernel(normalized[:])\nprint(ent)\nprint(K, np.linalg.det(K))\n","repo_name":"villinvic/divergentEMORL","sub_path":"kl_test.py","file_name":"kl_test.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11177145337","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport numpy as np\nimport xarray as xr\n\nfrom sklearn.decomposition import PCA\n\nfrom elm.config import ConfigParser\nfrom elm.config.tests.fixtures import *\nfrom elm.pipeline.tests.util import (test_one_config as tst_one_config,\n tmp_dirs_context)\nfrom elm.sample_util.make_blobs import random_elm_store\nfrom earthio.reshape import *\nfrom earthio import ElmStore\nfrom elm.pipeline import Pipeline\n\nX = random_elm_store()\n\ndata_source = {'X': X}\n\ntrain = {'model_init_class': 'sklearn.cluster:MiniBatchKMeans',\n 'ensemble': 'ens1'}\n\n\ndef make_run(pipeline, data_source):\n run = [{'data_source': 'synthetic',\n 'pipeline': pipeline,\n 'train': 'ex1'}]\n return run\n\n\ndef make_config(pipeline, data_source):\n return {'train': {'ex1': train},\n 'data_sources': {'synthetic': data_source},\n 'run': make_run(pipeline, data_source),\n 'ensembles': {\n 'ens1': {\n 'saved_ensemble_size': 1,\n 'init_ensemble_size': 1\n }\n }\n }\n\n\ndef tst_one_pipeline(pipeline,\n add_na_per_band=0,\n na_fields_as_str=True,\n delim='_'):\n from elm.sample_util.sample_pipeline import make_pipeline_steps\n sample = random_elm_store()\n if add_na_per_band:\n for idx, band in enumerate(sample.data_vars):\n band_arr = getattr(sample, band)\n val = band_arr.values\n inds = np.arange(val.size)\n np.random.shuffle(inds)\n x = inds // val.shape[0]\n y = inds % val.shape[0]\n slc = slice(None, add_na_per_band // 2)\n val[y[slc],x[slc]] = 99 * idx\n band_arr.attrs['missing{}value'.format(delim)] = 99 * idx\n slc = slice(add_na_per_band // 2, add_na_per_band)\n val[y[slc], x[slc]] = 199 * idx\n band_arr.attrs['invalid{}range'.format(delim)] = [198 * idx, 200 * idx]\n band_arr.attrs['valid{}range'.format(delim)] = [-1e12, 1e12]\n if na_fields_as_str:\n for field in ('missing{}value', 'invalid{}range', 'valid{}range'):\n field = field.format(delim)\n v = band_arr.attrs[field]\n if isinstance(v, list):\n band_arr.attrs[field] = ', '.join(map(str,v))\n else:\n band_arr.attrs[field] = str(v)\n assert val[np.isnan(val)].size == 0\n config = ConfigParser(config=make_config(pipeline, data_source))\n pipe = Pipeline(make_pipeline_steps(config, pipeline))\n new_es = pipe.fit_transform(sample)\n return sample, new_es[0]\n\n\ndef test_flat_and_inverse():\n\n flat = [{'flatten': 'C'}, {'inverse_flatten': True}, {'transpose': ['y', 'x']}]\n es, new_es = tst_one_pipeline(flat)\n assert np.all(new_es.band_1.values == es.band_1.values)\n\n\ndef test_agg():\n for dim, axis in zip(('x', 'y'), (1, 0)):\n for r in range(2):\n if r == 0:\n agg = [{'agg': {'dim': dim, 'func': 'mean'}}]\n else:\n agg = [{'agg': {'axis': axis, 'func': 'mean'}}]\n es, new_es = tst_one_pipeline(agg)\n assert dim in es.band_1.dims\n assert dim not in new_es.band_1.dims\n means = np.mean(es.band_1.values, axis=axis)\n new_means = new_es.band_1.values\n diff = np.abs(means - new_means)\n assert np.all(diff < 1e-5)\n\n\ndef test_transpose():\n transpose_examples = {\n 'xy': [{'transpose': ['x', 'y']}],\n 'inv': [{'flatten': 'C'},\n {'transpose': ['band', 'space']},\n {'transpose': ['space', 'band']},\n {'inverse_flatten': True},\n {'transpose': ['y', 'x']},\n ]\n }\n transpose_examples['fl'] = transpose_examples['xy'] + [{'flatten': 'C'}, {'inverse_flatten': True}, ]\n for name, pipeline in sorted(transpose_examples.items()):\n es, new_es = tst_one_pipeline(pipeline)\n if name == 'fl':\n assert es.band_1.values.T.shape == new_es.band_1.values.shape\n assert np.all(es.band_1.values.T == new_es.band_1.values)\n if name == 'xy':\n assert es.band_1.values.shape == (new_es.band_1.values.shape[1], new_es.band_1.values.shape[0])\n assert np.all(es.band_1.values.T == new_es.band_1.values)\n if 'inv' in name:\n assert es.band_1.values.shape == new_es.band_1.values.shape\n diff = es.band_1.values - new_es.band_1.values\n assert np.all(np.abs(diff) < 1e-5)\n\ndef modify_sample_example(es, *args, **kwargs):\n\n new_es = {}\n for band in es.data_vars:\n band_arr = getattr(es, band)\n v = band_arr.values / band_arr.values.mean(axis=0)\n new_es[band] = xr.DataArray(v, coords=band_arr.coords, dims=band_arr.dims)\n v2 = (band_arr.T.values / band_arr.values.mean(axis=1)).T\n new_es[band + '_new'] = xr.DataArray(v2, coords=band_arr.coords, dims=band_arr.dims)\n return ElmStore(new_es, attrs=es.attrs)\n\n\ndef test_modify_sample():\n modify = [{'modify_sample': 'elm.sample_util.tests.test_change_coords:modify_sample_example'}]\n es, new_es = tst_one_pipeline(modify)\n assert np.all([np.all(getattr(es,b).values.shape == getattr(new_es, b).values.shape) for b in es.data_vars])\n new_names = set(es.band_order) - set(new_es.band_order)\n assert all('new' in n for n in new_names)\n flat = flatten(new_es)\n assert not len(set(tuple(flat.flat.band.values)) ^ set(new_es.band_order))\n inv = inverse_flatten(flat)\n for band in inv.data_vars:\n band_arr = getattr(inv, band)\n assert band_arr.values.shape == getattr(new_es, band).values.shape\n\ndef test_agg_inverse_flatten():\n for idx, dims in enumerate((['x', 'y'], ['y', 'x'])):\n for agg_dim in ('x', 'y'):\n agg = {'agg': {'dim': agg_dim, 'func': 'median'}}\n pipeline = [{'transpose': dims},\n {'flatten': 'C'},\n {'inverse_flatten': True},\n {'transpose': dims}]\n es, new_es = tst_one_pipeline(pipeline)\n if idx == 0:\n assert new_es.band_1.shape == es.band_1.values.T.shape\n es, new_es = tst_one_pipeline(pipeline + [agg])\n x1, x2 = (getattr(s.band_1, 'x', None) for s in (es, new_es))\n y1, y2 = (getattr(s.band_1, 'y', None) for s in (es, new_es))\n if agg_dim == 'x':\n assert x1 is not None and x2 is None\n assert y1 is not None and y2 is not None\n else:\n assert y1 is not None and y2 is None\n assert x1 is not None and x2 is not None\n\n\ndef test_set_na_from_meta():\n set_na = [{'modify_sample': 'earthio:set_na_from_meta'}]\n for delim in ('_', '-', ' ', ' '):\n for as_str in (True, False):\n es, new_es = tst_one_pipeline(set_na, add_na_per_band=13,\n na_fields_as_str=as_str,\n delim=delim)\n assert np.all([np.all(getattr(es,b).values.shape == getattr(new_es, b).values.shape) for b in es.data_vars])\n for band in es.data_vars:\n has_nan = getattr(new_es, band).values\n assert has_nan.size - 13 == has_nan[~np.isnan(has_nan)].size\n","repo_name":"HKCaesar/elm","sub_path":"elm/sample_util/tests/test_change_coords.py","file_name":"test_change_coords.py","file_ext":"py","file_size_in_byte":7542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"41191492773","text":"import FWCore.ParameterSet.Config as cms\n\nhltForwardBackwardJetsFilterRecoCaloJet = cms.EDFilter('HLTForwardBackwardCaloJetsFilter',\n saveTags = cms.bool(True),\n inputTag = cms.InputTag('hltIterativeCone5CaloJetsRegional'),\n minPt = cms.double(15),\n minEta = cms.double(3),\n maxEta = cms.double(5.1),\n nNeg = cms.uint32(1),\n nPos = cms.uint32(1),\n nTot = cms.uint32(0),\n triggerType = cms.int32(85),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"HLTrigger/JetMET/hltForwardBackwardJetsFilterRecoCaloJet_cfi.py","file_name":"hltForwardBackwardJetsFilterRecoCaloJet_cfi.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25680237293","text":"import torch\nimport random\nimport numpy as np\nimport sys\nfrom collections import deque\nfrom snake_game import SnakeGame, Direction, Point\nfrom model import Linear_QNet, QTrainer\nfrom helper import plot\n\nMAX_MEMORY = 100_000\nBATCH_SIZE = 1000\nLEARNING_RATE = 0.001\n\nclass Agent:\n def __init__(self, model_type, learning_rate):\n self.no_games = 0\n self.epsilon = 0\n self.gamma = 0.9\n self.memory = deque(maxlen=MAX_MEMORY)\n self.model = Linear_QNet(11, 256, 3)\n self.model_type = model_type\n self.trainer = QTrainer(self.model, learning_rate=learning_rate, gamma=self.gamma, model_type=model_type)\n\n def get_state(self, game):\n head = game.snake[0]\n point_l = Point(head.x - 20, head.y)\n point_r = Point(head.x + 20, head.y)\n point_u = Point(head.x, head.y - 20)\n point_d = Point(head.x, head.y + 20)\n \n dir_l = game.direction == Direction.LEFT\n dir_r = game.direction == Direction.RIGHT\n dir_u = game.direction == Direction.UP\n dir_d = game.direction == Direction.DOWN\n\n state = [\n # Pericol fata\n (dir_r and game.is_collision(point_r)) or\n (dir_l and game.is_collision(point_l)) or\n (dir_u and game.is_collision(point_u)) or\n (dir_d and game.is_collision(point_d)),\n\n # Pericol dreapta\n (dir_u and game.is_collision(point_r)) or\n (dir_d and game.is_collision(point_l)) or\n (dir_l and game.is_collision(point_u)) or\n (dir_r and game.is_collision(point_d)),\n\n # Pericol stanga\n (dir_d and game.is_collision(point_r)) or\n (dir_u and game.is_collision(point_l)) or\n (dir_r and game.is_collision(point_u)) or\n (dir_l and game.is_collision(point_d)),\n\n # Directie\n dir_l,\n dir_r,\n dir_u,\n dir_d,\n\n # Mancare\n game.food.x < game.head.x,\n game.food.x > game.head.x,\n game.food.y < game.head.y,\n game.food.y > game.head.y\n ]\n \n return np.array(state, dtype=int)\n \n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def train_long_memory(self):\n if len(self.memory) > BATCH_SIZE:\n mini_sample = random.sample(self.memory, BATCH_SIZE)\n else:\n mini_sample = self.memory\n \n states, actions, rewards, next_states, dones = zip(*mini_sample)\n self.trainer.train_step(states, actions, rewards, next_states, dones)\n\n def train_short_memory(self, state, action, reward, next_state, done):\n self.trainer.train_step(state, action, reward, next_state, done)\n\n def get_action(self, state):\n self.epsilon = 80 - self.no_games\n final_move = [0, 0, 0]\n if random.randint(0, 200) < self.epsilon:\n move = random.randint(0, 2)\n final_move[move] = 1\n else:\n state0 = torch.tensor(state, dtype=torch.float)\n prediction = self.model(state0)\n move = torch.argmax(prediction).item()\n final_move[move] = 1\n \n return final_move\n \ndef train(model_type, learning_rate):\n plot_scores = []\n plot_mean_scores = []\n total_score = 0\n record = 0\n agent = Agent(model_type, learning_rate)\n game = SnakeGame(model_type=model_type)\n while True:\n state_old = agent.get_state(game)\n final_move = agent.get_action(state_old)\n \n reward, done, score = game.play_step(final_move)\n state_new = agent.get_state(game)\n \n agent.train_short_memory(state_old, final_move, reward, state_new, done)\n agent.remember(state_old, final_move, reward, state_new, done)\n \n if done:\n game.reset()\n agent.no_games += 1\n agent.train_long_memory()\n\n if score > record:\n record = score\n agent.model.save('model_' + model_type + '.pth')\n \n print('Game', agent.no_games, 'Score', score, 'Record:', record)\n\n plot_scores.append(score)\n\n total_score += score\n mean_score = total_score / agent.no_games\n plot_mean_scores.append(mean_score)\n\n plot(plot_scores, plot_mean_scores, model_type, learning_rate)\n\nif __name__ == '__main__':\n train(sys.argv[1], float(sys.argv[2]))","repo_name":"mihnealook/aljv-snake-ai","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11300912111","text":"\nfrom os import link\n\nimport pyautogui\nimport time\n\n\n#here you can add url code for each websites\ngroup= ['234468325515498','363916517127998','1171843326268182']\n\ntime.sleep(5)\n\n\n\n\n\n\nfor i in range(len(group)):\n pyautogui.hotkey('ctrl','t')\n links= 'www.facebook.com/groups/'+group[i]\n \n pyautogui.typewrite(links) \n pyautogui.typewrite('\\n') \n print(\"waiting for 20 secondds \\n\") \n time.sleep(20)\n pyautogui.hotkey('ctrl','f')\n pyautogui.typewrite(\"Exprimez-vous...\") \n pyautogui.press('enter')\n pyautogui.press('escape')\n pyautogui.press('enter')\n time.sleep(3)\n pyautogui.typewrite(\"Salam alykom <3 .\\n\")\n pyautogui.typewrite(\"disponible iphonet.\\n\")\n pyautogui.typewrite(\"Visit l page 5lilna Message w amml commande.\\n\")\n pyautogui.typewrite('www.facebook.com/profile.php?id=100085250791184')\n time.sleep(10)\n pyautogui.press('Tab',presses=10)\n pyautogui.press('enter')\n \n time.sleep(20)\n\n ","repo_name":"wassim-Laouini/auto-post-script","sub_path":"autopost.py","file_name":"autopost.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12466104441","text":"##Write a program that asks the user to enter a number. If the number is divisible by 4, displays ‘The \n##number is divisible by 4’ and displays the quotient of the division. If the number is not divisible by 4, \n##displays ‘The number is not divisible by 4’ and displays the quotient and the remainder of the division.\n\nnumber = int(input(\"Please enter a number: \"))\n\nif number % 4 == 0: \n print(\"Number is divisable by 4\")\nelse:\n print(\"Number is not divisable by 4\")","repo_name":"Anvar1999/PythonCoursework","sub_path":"src/Lab/Lab 2_5.py","file_name":"Lab 2_5.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23805818327","text":"# -*- coding: utf-8 -*-\n\"\"\"This is main module which conducts the whole process of payslip generation.\n\n\"\"\"\n\n\nfrom change_maker import ChangeMaker\nfrom coins_reader import CoinsReader\nfrom result_printer import ResultPrinter\nimport config as config\n\n\nclass Changer(object):\n\n @classmethod\n def run(args):\n \"\"\"1. Capture input target amount\n 2. Read coins set from coins\n 3. make change to a target amount\n 4. Output a list of requied coins and their quantities.\n\n \"\"\"\n\n try:\n target_amount = int(input(\"> Enter your change in cents:\\n> \"))\n except ValueError:\n print(\"This was not a number, please try again.\")\n return None\n\n # read coins set\n coins_reader = CoinsReader(config.SETTING_DIR + '/coin_set.json')\n coins = coins_reader.get_coin_set()\n\n # make change to a target amount\n change_maker = ChangeMaker(target_amount, coins)\n change = change_maker.make_change()\n\n # print formatted result\n result_printer = ResultPrinter(coins, change)\n result_printer.print_result()\n\n\nif __name__ == '__main__':\n changer = Changer()\n changer.run()\n","repo_name":"egmontsong/money-changer","sub_path":"changer/changer.py","file_name":"changer.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"933687074","text":"import discum\nimport config,time\nfrom discum.gateway.session import guild\n\nguild_id = config.guildId\nchannel_id = config.channelId\nbot = discum.Client(token=config.tokenAcc)\nbot.gateway.fetchMembers(guild_id, channel_id, keep=['username','discriminator','premium_since'],startIndex=0, method='overlap',wait=1)\n@bot.gateway.command\ndef memberTest(resp):\n if bot.gateway.finishedMemberFetching(guild_id):\n lenmembersfetched = len(bot.gateway.session.guild(guild_id).members)\n print(str(lenmembersfetched)+' members fetched')\n bot.gateway.removeCommand(memberTest)\n bot.gateway.close()\n\nbot.gateway.run()\n\nwith open('result.txt', 'w', encoding=\"utf-8\") as file :\n for memberID in bot.gateway.session.guild(guild_id).members:\n id = str(memberID)\n #temp = bot.gateway.session.guild(guild_id).members[memberID].get('public_flags')\n user = str(bot.gateway.session.guild(guild_id).members[memberID].get('username'))\n disc = str(bot.gateway.session.guild(guild_id).members[memberID].get('discriminator'))\n username = f'{user}#{disc}'\n #creation_date = str(time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(((int(id) >> 22) + 1420070400000) / 1000)))\n #if temp != None:\n #z = __get_badges(temp)\n #if len(z) != 0:\n #badges = ', '.join(z)\n print(f'ID: @{id} | Username: {username}')\n file.write(f'ID: @{id} | Username: {username}\\n')\n","repo_name":"rahuljoshua77/ScrapeUsernameMemberChannelDiscord","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"22431508155","text":"import json\n\nfrom api.utils import failure_response\nfrom api.utils import modify_attribute\nfrom api.utils import success_response\nfrom match import match_status\nfrom rest_framework import status\nfrom survey import constants\nfrom survey.models import Survey\n\n\n# TODO(@chalo2000) Convert to celery task\nclass CreateSurveyController:\n def __init__(self, request, data, match_id):\n self._request = request\n self._data = data\n self._match_id = match_id\n\n def process(self):\n # Verify that all required fields are provided\n did_meet = self._data.get(\"did_meet\")\n if did_meet is None:\n return failure_response(\"did_meet\", status.HTTP_400_BAD_REQUEST)\n\n # Get optional fields based on did_meet\n did_meet_reason = self._data.get(\"did_meet_reason\")\n did_not_meet_reasons = self._data.get(\"did_not_meet_reasons\")\n rating = self._data.get(\"rating\")\n if did_meet:\n if rating is None:\n return failure_response(\n \"Rating is required for a completed match\",\n status.HTTP_400_BAD_REQUEST,\n )\n elif rating not in constants.RATINGS:\n return failure_response(\n \"The provided rating is invalid\", status.HTTP_400_BAD_REQUEST\n )\n else:\n if not did_not_meet_reasons:\n return failure_response(\n \"did_not_meet_reasons is required if did_meet is False\",\n status.HTTP_400_BAD_REQUEST,\n )\n if not all(\n map(\n lambda x: x in constants.DID_NOT_MEET.values(), did_not_meet_reasons\n )\n ):\n return failure_response(\n \"did_not_meet_reasons has invalid elements\",\n status.HTTP_400_BAD_REQUEST,\n )\n\n did_not_meet_reasons = list(\n map(lambda x: constants.DID_NOT_MEET_REV[x], did_not_meet_reasons)\n )\n if len(did_not_meet_reasons) > 5:\n return failure_response(\n \"Too many elements in did_not_meet_reasons\",\n status.HTTP_400_BAD_REQUEST,\n )\n\n # Commenting this out because iOS is sending the new match_id as the match_id,\n # but we need the previous week's match id, which we can actually get the\n # user's previous match using their match history. Not the most efficient, but\n # a temporary fix.\n #\n # Verify that required ids are valid\n # completed_match = Match.objects.filter(id=self._match_id)\n # if not completed_match:\n # return failure_response(\"match_id is invalid\", status.HTTP_404_NOT_FOUND)\n # completed_match = completed_match[0]\n\n prev_matches = (\n self._request.user.matches_1.all() | self._request.user.matches_1.all()\n ).order_by(\"-created_date\")\n\n if len(prev_matches) < 2:\n return failure_response(\n \"User does not have enough matches\", status.HTTP_400_BAD_REQUEST\n )\n\n # index 0 -> current match, 1 -> previous week, etc\n completed_match = prev_matches[1]\n\n # Check if the submitting user has already submitted a survey\n survey = Survey.objects.filter(\n submitting_person=self._request.user.person, completed_match=completed_match\n )\n if survey:\n return failure_response(\n \"This user has already submitted feedback to the provided match!\",\n status.HTTP_403_FORBIDDEN,\n )\n\n # Update match status based on feedback\n if not did_meet:\n # Cancel a match if any person says they did not meet\n modify_attribute(completed_match, \"status\", match_status.CANCELED)\n # Force a match to stay canceled if set before\n # TODO(@team) Check how often this edge case fails through explanations\n elif completed_match.status != match_status.CANCELED:\n modify_attribute(completed_match, \"status\", match_status.INACTIVE)\n completed_match.save()\n\n # Create and return a new survey with the given fields\n survey = Survey.objects.create(\n did_meet=did_meet,\n did_meet_reason=did_meet_reason,\n did_not_meet_reasons=json.dumps(did_not_meet_reasons)\n if not did_meet\n else None,\n rating=rating,\n submitting_person=self._request.user.person,\n completed_match=completed_match,\n )\n self._request.user.person.pending_feedback = False\n survey.save()\n self._request.user.person.save()\n return success_response(None, status.HTTP_201_CREATED)\n","repo_name":"cuappdev/pear-django-backend","sub_path":"src/survey/controllers/create_survey_controller.py","file_name":"create_survey_controller.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38275780184","text":"import os \nimport subprocess\nfrom tqdm import tqdm\n# GA \nGAbase = 'cd GA \\n java Application.java'\n\ncrossOverVals = [0.5,0.55,0.6,0.65,0.7]\nmutationVals = [0.001,0.002,0.003,0.004,0.005]\npopulationSize = [1000,1500,2000,2500,3000,3500]\n# crossOverVals = [0.5,0.55]\n# mutationVals = [0.001]\n# populationSize = [1000]\n\n\nfitnesses = []\nprocesses = set()\ncounter = 0\n\nnumThreads = 10 #Be warned this uses alot of memory! 10 ~ 10GB of RAM (on my machine). Additionally only really helpful with many core cpu. \n\noutPutControlArr = []\n\nfor crossOver in tqdm(crossOverVals, desc= \"cross over values\"):\n for mutation in (mutationVals):\n for popSize in (populationSize):\n \n processes.add(subprocess.Popen(GAbase + \" \" + str(crossOver) + \" \" + str(mutation) + \" \" + str(popSize), shell=True, stdout=subprocess.PIPE))\n counter+=1\n outPutControlArr.append(\"crossover: \" + str(crossOver) + \" mutation: \" + str(mutation) + \" popSize: \" + str(popSize))\n if(counter%numThreads == 0 and counter != 0):\n print(\"Starting Thread Batch: \" + str(counter/numThreads) + \"/\" + str(len(crossOverVals)*len(mutationVals)*len(populationSize)/numThreads))\n outputControlCounter = 0\n for p in processes:\n out = p.communicate()[0].decode(\"utf-8\")\n fitnesses.append(outPutControlArr[outputControlCounter] + \" fitness: \" + out)\n outputControlCounter+=1\n processes.clear()\n outPutControlArr = []\n\n#Clean up any remaining threads this happens if total runs is not divisible by numThreads\noutputControlCounter = 0\nfor p in processes:\n out = p.communicate()[0].decode(\"utf-8\")\n fitnesses.append(outPutControlArr[outputControlCounter] + \" fitness: \" + out)\n outputControlCounter+=1\n\n\nfitMin = 100000\nsaveFitMin = \"\"\nfor f in fitnesses:\n if(float(f.split(\" \")[-1]) < fitMin):\n fitMin = float(f.split(\" \")[-1])\n saveFitMin = f\n\n#write fitnesses to file\nwith open('GAparamRecomender2.txt', 'w') as f:\n for item in fitnesses:\n f.write(item)\n f.write(\"Recomended parameters for GA: \" + str(saveFitMin))","repo_name":"Liam-Watson/MetaheuristicsCVRPTW","sub_path":"parameterRecomenderGA.py","file_name":"parameterRecomenderGA.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23558612771","text":"#!/usr/bin/python3\ninputFile = \"2_large.in\"\n\ndef processTestCase(number):\n\twhile number > 0:\n\t\tif isTidy(number):\n\t\t\treturn number\n\t\tnumber -= getMinSub(number)\n\ndef isTidy(number):\n\tstrnum = str(number)\n\tfor i in range(len(strnum) - 1):\n\t\tif strnum[i] > strnum[i+1]:\n\t\t\treturn 0\n\treturn 1\n\ndef getMinSub(number):\n\tstrnum = str(number)\n\tfor i in range(len(strnum) - 1):\n\t\tif strnum[i] > strnum[i+1]:\n\t\t\treturn int(strnum[i+1:]) + 1\n\nwith open(inputFile) as f:\n\tcontent = f.read().split(\"\\n\")[1:-1]\n\ti = 1\n\tfor line in content:\n\t\tprint(\"Case #%d: %d\" % (i, processTestCase(int(line))))\n\t\ti += 1\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3445.py","file_name":"3445.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3507393057","text":"import os\nimport argparse\n\n\"\"\"\nCompare two hash files and check for different hashes or missing data files. Report missing files on command line and \nin output file. \nWritten to compare output of md5deep-4.3 software.\nHash files are to be in the format:\nhash1 /path/to/data/file1.exten\nhash2 /path/to/data/file2.exten\n...\n\nExample Usage\n-------------\npython compare_hashes.py examples/hashlist1.txt examples/Atlas_Data_Runt_Mobile5drive.txt examples/hash_comparison.txt\npython compare_hashes.py examples/hashlist1.txt examples/hashlist_tweak.txt examples/hash_comparison.txt\n\n\n\"\"\"\n\n\ndef parse_hashlist(input_filename):\n \"\"\"Load dictionary from txt file in outdir. This is equivalent to lattice_elasticity.load_params()\n\n Parameters\n ----------\n input_filename: str\n The path to the txt file with hashes and filenames.\n\n Returns\n -------\n hashd: dict\n A dictionary, with all hashes associated with filenames\n\n \"\"\"\n hashd = {}\n # If outdir is the entire path, it has .txt at end, and so use this to split it up into dir and filename\n\n with open(input_filename) as f:\n # for line in f:\n # print line\n for line in f:\n if '# ' not in line:\n (hash, fn) = line.split(' ')\n hashd[fn] = hash\n\n return hashd\n\n\ndef compare_hash_dicts(hd1, hd2):\n \"\"\"Compare two hash dictionaries for similarity\n\n Parameters\n ----------\n hd1: dict\n hash dictionary 1\n hd2: dict\n hash dictionary 2\n\n Returns\n -------\n comp : dict\n dictionary with keys:\n filename : str, filename\n missing : dict with (keys, values)\n (filename, hash) : str, str\n extra : dict with (keys, values)\n (filename, hash) : str, str\n different : dict with (keys, values)\n (filename, [hash, hash_master]) : str, list of strings\n filename_master : str, filename\n \"\"\"\n comp = {'missing': {}, 'extra': {}, 'different': {}}\n for key in hd1:\n if key in hd2:\n if hd1[key] != hd2[key]:\n comp['different'][key] = [hd1[key], hd2[key]]\n else:\n comp['extra'][key] = hd1[key]\n\n for key in hd2:\n if key not in hd1:\n comp['missing'][key] = hd2[key]\n\n return comp\n\n\ndef write_comparison_dict(dd, outfn, fn1, fn2, padding_var=7):\n \"\"\"Write the comparison to a text file with easy-to-read formatting\n\n Parameters\n ----------\n dd : dict\n the comparison dictionary\n outfn : str\n file to write the comparison to disk\n\n Returns\n -------\n \"\"\"\n with open(outfn, 'w') as myfile:\n myfile.write('# Comparison of ' + fn1 + ' with ' + fn2 + '\\n')\n\n for mainkey in dd:\n with open(outfn, 'a') as myfile:\n myfile.write('\\n\\n# '+mainkey+'\\n')\n\n subd = dd[mainkey]\n # print('mainkey = ' + mainkey)\n # print('subd = ')\n # print(subd)\n\n if len(subd) == 0:\n with open(outfn, 'a') as myfile:\n myfile.write('none\\n')\n else:\n for key in subd:\n # print('key = ', key)\n # print('value = ', subd[key])\n with open(outfn, 'a') as myfile:\n if isinstance(subd[key], str) or isinstance(subd[key], list):\n myfile.write('{{0: <{}}}'.format(padding_var).format(key))\n # + '= ' + subd[key] + '\\n')\n else:\n raise RuntimeError('Could not write key,value pair to disk, since not a string.')\n\n\nif __name__ == \"__main__\":\n # PARSE ARGUMENTS\n parser = argparse.ArgumentParser(description='Compare two hash lists and report missing files')\n # Build two positional arguments, one for each file\n parser.add_argument('file1', type=str, nargs='?',\n help='Full or relative path to first hash file to compare',\n default='check_string_for_empty')\n parser.add_argument('file2', type=str, nargs='?',\n help='Full or relative path to second hash file to compare',\n default='check_string_for_empty')\n # Build final optional positional argument for output file\n parser.add_argument('outfn', nargs='?', help='Path to output file with info on the comparison',\n type=str, default='./hash_comparison.txt')\n args = parser.parse_args()\n\n if args.file1 == 'check_string_for_empty':\n raise RuntimeError('Must supply filepath for hash list to check')\n if args.file2 == 'check_string_for_empty':\n raise RuntimeError('Must supply filepath for master hash list to check against')\n\n print('Parsing file 1...')\n hashd1 = parse_hashlist(args.file1)\n print('Parsing file 2...')\n hashd2 = parse_hashlist(args.file2)\n print('Comparing hashes and filenames...')\n comp = compare_hash_dicts(hashd1, hashd2)\n # print(comp)\n print('Writing to disk')\n write_comparison_dict(comp, args.outfn, args.file1, args.file2, padding_var=7)\n print('Wrote comparison to file: ' + args.outfn)\n\n","repo_name":"npmitchell/spimCode","sub_path":"hash_md5_parsing/compare_hashes.py","file_name":"compare_hashes.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24439598404","text":"import asyncio\nimport logging\nimport os\n\nimport pytz\nfrom aiogram import Bot, Dispatcher\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.contrib.fsm_storage.redis import RedisStorage2\nfrom apscheduler.executors.asyncio import AsyncIOExecutor\nfrom apscheduler.jobstores.redis import RedisJobStore\n# from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\n\nfrom tgbot.config import load_config\nfrom tgbot.db_api.postgres_db import Database\nfrom tgbot.filters.admin import AdminFilter\nfrom tgbot.handlers.admins.admin_add_item import register_admin_add_item\nfrom tgbot.handlers.admins.admin_change_item import register_admin_change_item\nfrom tgbot.handlers.admins.admin_change_or_delete_item_category import register_admin_change_or_delete_item_category\nfrom tgbot.handlers.admins.admin_main import register_admin_main\n# from tgbot.handlers.echo import register_echo\nfrom tgbot.handlers.error_handler import register_error\nfrom tgbot.handlers.inline_mode import register_inline_mode\nfrom tgbot.handlers.payments.telegram_built_in.telegram_payment import register_telegram_built_in_payments\nfrom tgbot.handlers.user import register_user\nfrom tgbot.middlewares.album import AlbumMiddleware\nfrom tgbot.middlewares.telegraph import IntegrationMiddleware\nfrom tgbot.middlewares.throttling import ThrottlingMiddleware\nfrom tgbot.misc.allowed_updates import get_handled_updates_list\nfrom tgbot.misc.on_startup import notify_admin\nfrom tgbot.misc.set_bot_commands import set_bot_commands\nfrom tgbot.services.integrations.telegraph.abstract import FileUploader\nfrom tgbot.services.integrations.telegraph.service import TelegraphService\n\nlogger = logging.getLogger(__name__)\n\nDATABASE_URL = os.environ.get('DATABASE_URL')\n\n\ndef register_all_middlewares(dp):\n dp.setup_middleware(ThrottlingMiddleware())\n dp.setup_middleware(AlbumMiddleware())\n\n\nasync def close_session_file_uploader(dp: Dispatcher, cur_logger: logging.Logger):\n file_uploader: FileUploader = dp.bot.get('file_uploader')\n await file_uploader.close()\n cur_logger.info('FileUploader session has been closed')\n\n\ndef register_all_filters(dp):\n dp.filters_factory.bind(AdminFilter)\n\n\ndef register_all_handlers(dp):\n register_inline_mode(dp)\n register_admin_main(dp)\n register_admin_add_item(dp)\n register_admin_change_item(dp)\n register_admin_change_or_delete_item_category(dp)\n register_user(dp)\n register_telegram_built_in_payments(dp)\n register_error(dp)\n # register_echo(dp)\n\n\nasync def main():\n logging.basicConfig(\n level=logging.INFO,\n format=u'%(filename)s:%(lineno)d #%(levelname)-8s [%(asctime)s] - %(name)s - %(message)s',\n\n )\n logger.info(\"Starting bot\")\n config = load_config(database_url=DATABASE_URL, path=\".env\")\n\n if config.tg_bot.use_redis:\n storage = RedisStorage2(host=config.redis.redis_host, port=config.redis.redis_port)\n else:\n storage = MemoryStorage()\n\n bot = Bot(token=config.tg_bot.token, parse_mode='HTML')\n dp = Dispatcher(bot, storage=storage)\n # jobstores = {\n # 'default': SQLAlchemyJobStore(url=config.db.url)\n # }\n jobstores = {\n 'default': RedisJobStore(\n db=config.redis.redis_db_jobstore, host=config.redis.redis_host, port=config.redis.redis_port\n )\n }\n executors = {'default': AsyncIOExecutor()}\n job_defaults = {\"coalesce\": False, \"max_instances\": 3, \"misfire_grace_time\": None}\n scheduler = AsyncIOScheduler(jobstores=jobstores,\n executors=executors,\n job_defaults=job_defaults,\n timezone=pytz.timezone('Europe/Moscow'))\n\n db = Database(url=config.db.url)\n\n file_uploader = TelegraphService()\n\n bot['config'] = config\n bot['scheduler'] = scheduler\n bot['db'] = db\n bot['file_uploader'] = file_uploader\n\n dp.setup_middleware(IntegrationMiddleware(file_uploader))\n register_all_middlewares(dp)\n register_all_filters(dp)\n register_all_handlers(dp)\n\n # start\n try:\n await db.connect_to_database()\n logger.info('Database connection has been completed')\n await set_bot_commands(dp)\n logger.info('Bot commands have setted')\n # await db.drop_table('users')\n await db.create_table_users()\n logger.info('Table \"users\" have been created')\n # await db.drop_table('basket')\n # await db.drop_table('items')\n await db.create_table_items()\n # await db.del_all_items_from_table('items')\n logger.info('Table \"items\" have been created')\n await db.create_table_basket()\n logger.info('Table \"basket\" have been created')\n scheduler.start()\n await notify_admin(bot, config)\n await dp.start_polling(dp, allowed_updates=get_handled_updates_list(dp))\n finally:\n scheduler.shutdown()\n await db.pool.close()\n await close_session_file_uploader(dp, logger)\n await dp.storage.close()\n await dp.storage.wait_closed()\n await dp.bot.session.close()\n\n\nif __name__ == '__main__':\n try:\n asyncio.run(main())\n except (KeyboardInterrupt, SystemExit):\n logger.error(\"Bot stopped!\")\n","repo_name":"kolalexart/zavernite_mne_dva_store_2.0","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35480213831","text":"# -*- coding: utf-8 -*-\ndef fib(n):\n v = [0, 1]\n\n if n > 1:\n for i in range(2, n + 1):\n f = v[i - 1] + v[i - 2]\n v.append(f)\n\n return v[n]\n\nt = int(input())\n\nfor teste in range(t):\n n = int(input())\n f = fib(n)\n print('Fib({}) = {}'.format(n, f))\n","repo_name":"rodolfoghi/urionlinejudge","sub_path":"python/1176.py","file_name":"1176.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"34264269263","text":"N = int(input())\r\n\r\nT = [] # 각 상담을 완료하는데 걸리는 시간\r\nP = [] # 각 상담을 완료했을 때 받는 금액\r\ndp = [0] * (N + 1) # i일까지의 최대 이익을 저장하는 배열\r\n\r\nfor _ in range(N):\r\n t, p = map(int, input().split())\r\n T.append(t)\r\n P.append(p)\r\n\r\nfor i in range(N):\r\n # i일에 상담이 끝나는 날이 N일을 넘지 않는 경우\r\n if i + T[i] <= N:\r\n dp[i + T[i]] = max(dp[i + T[i]], dp[i] + P[i])\r\n # i일까지의 최대 이익을 i+1일의 최대 이익과 비교하여 갱신\r\n dp[i + 1] = max(dp[i + 1], dp[i])\r\n\r\nprint(dp[N])\r\n","repo_name":"leeseunghakhello/algorithm","sub_path":"백준/Silver/14501. 퇴사/퇴사.py","file_name":"퇴사.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35044955747","text":"#!/usr/bin/env python3\nimport scapy.all as scapy\nimport argparse\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--target\", dest=\"target\", help=\"Target IP range\")\n options = parser.parse_args()\n if not options.target:\n parser.error(\"[-] Please specify a target IP range, use --help for more info.\")\n return options\n\ndef scan(ip):\n arp = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n request = broadcast/arp\n good = scapy.srp(request, timeout=1, verbose=False)[0]\n\n good_list = []\n for i in good:\n good_dic = {\"IP\":i[1].psrc, \"MAC\":i[1].hwsrc}\n good_list.append(good_dic)\n return good_list\n\ndef results(results):\n print(\"IP\\t\\t\\t\\tMAC Address\\n----------------------------------------------------------------------------\")\n for i in results:\n print(i[\"IP\"] + \"\\t\\t\\t\" + i[\"MAC\"])\noptions = get_arguments()\nscan_result = scan(options.target) \nresults(scan_result)\n","repo_name":"piliblu/Scripts","sub_path":"Tooling/network-scannerv2.py","file_name":"network-scannerv2.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8382993849","text":"from Bio import SeqIO\nwith open(\"rosalind_tran.txt\") as h:\n s1, s2 = SeqIO.parse(h, 'fasta')\n s1,s2 = s1.seq, s2.seq\n\nprint(s1,s2, sep=\"\\n\")\n\ntransitions = [(\"A\",\"G\"), (\"G\",\"A\"), (\"C\",\"T\"), (\"T\",\"C\")]\ntransversions = [(\"A\",\"T\"), (\"A\", \"C\"), (\"G\", \"C\"), (\"G\", \"T\"), (\"T\", \"A\"), (\"T\", \"G\"), (\"C\", \"A\"), (\"C\", \"G\")]\n\ntransition_count = 0\ntransversion_count = 0\nfor i in range(len(s1)):\n if (s1[i], s2[i]) in transitions:\n transition_count += 1\n if (s1[i], s2[i]) in transversions:\n transversion_count += 1\n\nprint(transition_count/transversion_count)\n\n","repo_name":"cl3mente/pocs2_assignments","sub_path":"assignment_5/tran.py","file_name":"tran.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2000175233","text":"import json\n\nwith open(\"svampexpFormula_80_test_TQ-SA_2_seed_2.json\",\"r\") as reader:\n reader = json.load(reader)\nresults=reader[\"results\"]\ndic = {}\nfor idx,item in results.items():\n item[\"Question\"]=item[\"Question\"].split(\"Question:\")[-1]\n if item[\"Answer\"] != item[\"prediction\"]:\n dic[idx] = item\n # del dic[idx][\"prompt\"]\n del dic[idx][\"shot_pids\"]\nwith open(\"svampexpFormula_inspect.json\",\"w\") as writer:\n writer.write(json.dumps(dic,indent=4))\n","repo_name":"lanmengye-a/MWP","sub_path":"PromptPG/result/gpt3_rl_formula/postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23565300111","text":"#!/usr/bin/env python\r\n\r\ndef convertTo9(N,i):\r\n for n in range(i+1):\r\n N[n] = 9\r\n\r\ndef solve(N):\r\n N.reverse()\r\n for i in range(len(N)-1) :\r\n if N[i] < N[i+1] :\r\n convertTo9(N,i)\r\n N[i+1] -= 1\r\n # print(i,d)\r\n \r\n N.reverse()\r\n return int(''.join(map(str,N)))\r\n\r\n\r\ndef main():\r\n case_counter = 1\r\n\r\n T = int(input()) # read a line with a single integer\r\n\r\n for i in range(1, T + 1):\r\n \r\n # Read the data\r\n N = [int(s) for s in list(input())]\r\n # print(N)\r\n\r\n print(\"Case #{}: {}\".format(case_counter, solve(N)))\r\n case_counter += 1\r\n\r\n\r\nif __name__ =='__main__':\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/755.py","file_name":"755.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8963159602","text":"from .models import Bids, Comments, Watchlist, Item\r\nfrom django.shortcuts import render\r\n\r\n\r\ndef render_listing(item, request):\r\n owned = 'true' if item.owner == request.user else ''\r\n bids = Bids.objects.filter(item = item)\r\n try:\r\n last_bidder = bids[0].bidder\r\n except:\r\n last_bidder = ''\r\n print(item.category)\r\n return render(request, \"auctions/listing.html\", { \r\n \"item\" : item,\r\n \"comments\" : Comments.objects.all().filter(item_id=item.id),\r\n \"watchlist\" : Watchlist.objects.filter(user=request.user, item=item) if not request.user.is_anonymous else '',\r\n \"owned\" : owned,\r\n \"bids\" : bids,\r\n \"last_bidder\" : last_bidder,\r\n }) \r\n\r\n\r\ndef is_last_bidder(request, user):\r\n last_bidder = Bids.objects.filter(item = list(Item.objects.all().filter(id = int(request.POST[\"item\"].split()[0])))[0])[0].bidder\r\n return True if last_bidder == user else False\r\n\r\ndef add_listing(request):\r\n item = Item()\r\n item.title = request.POST[\"title\"]\r\n item.price = request.POST[\"price\"]\r\n item.description = request.POST[\"description\"]\r\n item.image = request.POST[\"image\"]\r\n item.owner = request.user\r\n item.category = request.POST[\"category\"]\r\n item.save()\r\n bid = Bids()\r\n bid.amount = request.POST[\"price\"]\r\n bid.bidder = request.user\r\n bid.item = item\r\n bid.save()\r\n return True\r\n\r\n\r\ndef add_comment(request):\r\n comment = Comments()\r\n comment.commenter = request.user\r\n comment.comment = request.POST[\"comment\"]\r\n comment.item = item = Item.objects.get(id = int(request.POST[\"item\"].split()[0]))\r\n comment.save()\r\n return item\r\n\r\n\r\ndef add_watchlist(request):\r\n watchlist = Watchlist()\r\n watchlist.item = list(Item.objects.all().filter(id = int(request.POST[\"item\"].split()[0])))[0]\r\n watchlist.user = request.user\r\n watchlist.save()\r\n return 'Added to watchlist'\r\n\r\n\r\ndef add_bid(request):\r\n # save history of placing the bet\r\n bid = Bids()\r\n bid.amount = request.POST[\"amount\"]\r\n bid.item = listing = Item.objects.get(id = int(request.POST[\"item\"].split()[0]))\r\n bid.bidder = request.user\r\n bid.save()\r\n # update listing price\r\n listing.price = bid.amount\r\n listing.save()\r\n return listing","repo_name":"xaleel/Web-projects","sub_path":"Commerce/auctions/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32072460081","text":"import re\nimport json\n\nwith open(\"britain.txt\") as f:\n data = f.read()\n\n# 基礎情報の抜き出し\nbase_info_pattern = re.compile(r\"\\{\\{基礎情報 国([\\s\\S]+)\\n\\}\\}\")\nbase_info = base_info_pattern.search(data)[1]\n\n# 各フィールド抜き出し\nfields_text = base_info.split(\"\\n|\")\n\nfields_pattern = re.compile(r\"([^=]+)\\s*=\\s*([\\s\\S]+)\")\nfields_match = [fields_pattern.match(f) for f in fields_text]\n\n# dictに箱詰め\n## クリーニング用のパターン\n### 強調\nemphasis_pattern1 = re.compile(r\"'{5,5}([\\s\\S]+)'{5,5}\")\nemphasis_pattern2 = re.compile(r\"'{3,3}([\\s\\S]+)'{3,3}\")\nemphasis_pattern3 = re.compile(r\"'{2,2}([\\s\\S]+)'{2,2}\")\n### 内部リンク\ninter_link_pattern1 = re.compile(r\"\\[\\[[^\\|\\[\\]]+\\|([^\\[\\]]+\\|)?([^\\[\\]]+)\\]\\]\")\ninter_link_pattern2 = re.compile(r\"\\[\\[([^\\[\\]]+)\\]\\]\")\n### 箇条書き\nitems_pattern = re.compile(r\"\\n\\*+\")\n### ref, br 削除\nref_pattern = re.compile(r\"\\]*\\>\")\nbr_pattern = re.compile(r\"\\
    \")\n### 多言語タグ\nlang_pattern = re.compile(r\"\\{\\{lang\\|[\\w]{2,3}\\|([^\\{\\}]+)\\}\\}\")\n### 外部リ���ク\nouter_link_pattern = re.compile(r\"\\[[^\\[\\]]+\\]\")\n### 数字のフォーマット\nnum_format_pattern = re.compile(r\"\\{\\{0\\}\\}\")\n### {{en icon}}\nicon_pattern = re.compile(r\"\\{\\{en icon\\}\\}\")\n### ファイル\nfile_pattern = re.compile(r\"\\{\\{([\\w]+\\|)?ファイル:([^\\{\\}]+)\\}\\}\")\n### 仮リンク\nany_link_pattern = re.compile(r\"\\{\\{仮リンク\\|[^\\{\\}]+\\|([^\\|\\{\\}]+)\\}\\}\")\n### cite web\ncite_web_pattern = re.compile(r\"\\{\\{Cite web\\|[^\\{\\}]+\\}\\}\")\n\nbase_info_dict = {}\nfor f_match in fields_match:\n if f_match is None:\n continue\n\n non_emphasis = emphasis_pattern1.sub(r\"\\1\", f_match[2])\n non_emphasis = emphasis_pattern2.sub(r\"\\1\", non_emphasis)\n non_emphasis = emphasis_pattern3.sub(r\"\\1\", non_emphasis)\n non_inter_link = inter_link_pattern1.sub(r\"\\2\", non_emphasis)\n non_inter_link = inter_link_pattern2.sub(r\"\\1\", non_inter_link)\n non_items = items_pattern.sub(r\"\\n\", non_inter_link)\n non_tags = ref_pattern.sub(r\"\", non_items)\n non_tags = br_pattern.sub(r\"\", non_tags)\n non_lang = lang_pattern.sub(r\"\\1\", non_tags)\n non_outer_link = outer_link_pattern.sub(r\"\", non_lang)\n non_num_format = num_format_pattern.sub(r\"\", non_outer_link)\n non_icon = icon_pattern.sub(r\"\", non_num_format)\n non_file = file_pattern.sub(r\"\", non_icon)\n non_link = any_link_pattern.sub(r\"\\1\", non_file)\n non_cite = cite_web_pattern.sub(r\"\", non_link)\n base_info_dict[f_match[1]] = non_cite.replace(\"}\", \"\")\n\n\nwith open(\"britain.json\", \"w\") as f:\n json.dump(base_info_dict, f, ensure_ascii=False, indent=4)\n","repo_name":"eteeeeeerminal/practice-nlp100","sub_path":"3/26-28.py","file_name":"26-28.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27158316913","text":"from miasm.analysis.simplifier import IRCFGSimplifier\r\nfrom miasm.arch.x86.arch import mn_x86\r\nfrom miasm.core.locationdb import LocationDB\r\nfrom miasm.analysis.machine import Machine\r\nfrom miasm.analysis.binary import Container\r\nfrom miasm.ir.symbexec import SymbolicExecutionEngine\r\n\r\nloc_db = LocationDB()\r\n\r\n# asm -> binary code\r\ndef binstring(l):\r\n binstr = b''\r\n for asminst in l:\r\n internal_asminst = mn_x86.fromstring(asminst, loc_db, 32)\r\n binstr += mn_x86.asm(internal_asminst)[0]\r\n return binstr\r\n\r\n\r\n\"\"\"\r\n--- from numba execution ---\r\nleaq (%rdx, %rcx), %rax\r\naddq $1010, %rax\r\nmovq %rax, (%rdi)\r\nxorl %eax, %eax\r\nretq\r\n\r\n\"\"\"\r\n\r\n\r\nshellcode = ['LEA EAX, DWORD PTR [EDX+ECX]',\r\n 'ADD EAX, 1010',\r\n 'MOV DWORD PTR [EDI+0], EAX',\r\n 'XOR EAX, EAX',\r\n 'RET']\r\n\r\nmachine = Machine('x86_32')\r\n\r\nprint(\"(1) binary string from list of asm string\")\r\ns = binstring(shellcode)\r\nprint('binstr s =', s)\r\nprint()\r\n\r\nprint(\"(2) disassembled code from s: with next block numbers in cfg\")\r\n# get disassembled shell code\r\nc = Container.from_string(s, loc_db)\r\n\r\n# Instantiate a disassembler engine, using the previous bin_stream and its\r\n# associated location DB. The assembly listing will use the binary symbols\r\nmdis = machine.dis_engine(c.bin_stream, loc_db=loc_db)\r\nasmcfg = mdis.dis_multiblock(0)\r\n\r\n\r\n# Display each basic block\r\n# also add asm block to ircfg\r\nprint(\"----------- ASM block_start -----------\")\r\nfor block in asmcfg.blocks:\r\n print(block)\r\nprint(\"ASM block end\")\r\nprint()\r\n\r\n\r\nprint(\"(3) Working with IR, for instance by getting side effects\")\r\nlifter = machine.lifter_model_call(loc_db)\r\nircfg = lifter.new_ircfg_from_asmcfg(asmcfg)\r\nprint()\r\n\r\n\r\n# Initializing the engine with default symbolic values:\r\n# 0 for loc_0, 16 is for loc_10, 19 is for loc_13, 11 is for loc_b\r\n# execution until conditional-jump, otherwise until ret\r\nprint(\"(4) print symbolic execution steps\")\r\nprint(\"----------- Symbolic Engine with Log -----------\")\r\nsb = SymbolicExecutionEngine(lifter, machine.mn.regs.regs_init)\r\nIRCFGSimplifier(lifter).simplify(ircfg, 0)\r\nsymbolic_pc = sb.run_at(ircfg, 0, step=True)\r\n","repo_name":"baekyumi/lang-eq-tool","sub_path":"test/numba_miasm.py","file_name":"numba_miasm.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29733101582","text":"import importlib\nimport logging\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.core.util.event_pb2 import SessionLog\n\nfrom tensorforce import TensorForceError\n\n\nepsilon = 1e-6\n\n\nlog_levels = dict(\n info=logging.INFO,\n debug=logging.DEBUG,\n critical=logging.CRITICAL,\n warning=logging.WARNING,\n fatal=logging.FATAL\n)\n\n\ndef prod(xs):\n \"\"\"Computes the product along the elements in an iterable. Returns 1 for empty iterable.\n\n Args:\n xs: Iterable containing numbers.\n\n Returns: Product along iterable.\n\n \"\"\"\n p = 1\n for x in xs:\n p *= x\n return p\n\n\ndef rank(x):\n return x.get_shape().ndims\n\n\ndef shape(x, unknown=-1):\n return tuple(unknown if dims is None else dims for dims in x.get_shape().as_list())\n\n\ndef np_dtype(dtype):\n \"\"\"Translates dtype specifications in configurations to numpy data types.\n Args:\n dtype: String describing a numerical type (e.g. 'float') or numerical type primitive.\n\n Returns: Numpy data type\n\n \"\"\"\n if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:\n return np.float32\n elif dtype == np.float64 or dtype == tf.float64:\n return np.float64\n elif dtype == np.float16 or dtype == tf.float16:\n return np.float16\n elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:\n return np.int32\n elif dtype == np.int64 or dtype == tf.int64:\n return np.int64\n elif dtype == np.int16 or dtype == tf.int16:\n return np.int16\n elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:\n return np.bool_\n else:\n raise TensorForceError(\"Error: Type conversion from type {} not supported.\".format(str(dtype)))\n\n\ndef tf_dtype(dtype):\n \"\"\"Translates dtype specifications in configurations to tensorflow data types.\n\n Args:\n dtype: String describing a numerical type (e.g. 'float'), numpy data type,\n or numerical type primitive.\n\n Returns: TensorFlow data type\n\n \"\"\"\n # Defaults to 32\n if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:\n return tf.float32\n elif dtype == np.float64 or dtype == tf.float64:\n return tf.float64\n elif dtype == np.float16 or dtype == tf.float16:\n return tf.float16\n elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:\n return tf.int32\n elif dtype == np.int64 or dtype == tf.int64:\n return tf.int64\n elif dtype == np.int16 or dtype == tf.int16:\n return tf.int16\n elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:\n return tf.bool\n else:\n raise TensorForceError(\"Error: Type conversion from type {} not supported.\".format(str(dtype)))\n\n\ndef map_tensors(fn, tensors):\n if tensors is None:\n return None\n elif isinstance(tensors, tuple):\n return tuple(map_tensors(fn=fn, tensors=tensor) for tensor in tensors)\n elif isinstance(tensors, list):\n return [map_tensors(fn=fn, tensors=tensor) for tensor in tensors]\n elif isinstance(tensors, dict):\n return {key: map_tensors(fn=fn, tensors=tensor) for key, tensor in tensors.items()}\n elif isinstance(tensors, set):\n return {map_tensors(fn=fn, tensors=tensor) for tensor in tensors}\n else:\n return fn(tensors)\n\n\ndef get_tensor_dependencies(tensor):\n \"\"\"\n Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph).\n\n Args:\n tensor (tf.Tensor): The input tensor.\n\n Returns: Set of all dependencies (including needed placeholders) for the input tensor.\n \"\"\"\n dependencies = set()\n dependencies.update(tensor.op.inputs)\n for sub_op in tensor.op.inputs:\n dependencies.update(get_tensor_dependencies(sub_op))\n return dependencies\n\n\ndef get_object(obj, predefined_objects=None, default_object=None, kwargs=None):\n \"\"\"\n Utility method to map some kind of object specification to its content,\n e.g. optimizer or baseline specifications to the respective classes.\n\n Args:\n obj: A specification dict (value for key 'type' optionally specifies\n the object, options as follows), a module path (e.g.,\n my_module.MyClass), a key in predefined_objects, or a callable\n (e.g., the class type object).\n predefined_objects: Dict containing predefined set of objects,\n accessible via their key\n default_object: Default object is no other is specified\n kwargs: Arguments for object creation\n\n Returns: The retrieved object\n\n \"\"\"\n args = ()\n kwargs = dict() if kwargs is None else kwargs\n\n if isinstance(obj, dict):\n kwargs.update(obj)\n obj = kwargs.pop('type', None)\n\n if predefined_objects is not None and obj in predefined_objects:\n obj = predefined_objects[obj]\n elif isinstance(obj, str):\n if obj.find('.') != -1:\n module_name, function_name = obj.rsplit('.', 1)\n module = importlib.import_module(module_name)\n obj = getattr(module, function_name)\n else:\n raise TensorForceError(\"Error: object {} not found in predefined objects: {}\".format(\n obj,\n list(predefined_objects or ())\n ))\n elif callable(obj):\n pass\n elif default_object is not None:\n args = (obj,)\n obj = default_object\n else:\n # assumes the object is already instantiated\n return obj\n\n return obj(*args, **kwargs)\n\n\ndef prepare_kwargs(raw, string_parameter='name'):\n \"\"\"\n Utility method to convert raw string/diction input into a dictionary to pass\n into a function. Always returns a dictionary.\n\n Args:\n raw: string or dictionary, string is assumed to be the name of the activation\n activation function. Dictionary will be passed through unchanged.\n\n Returns: kwargs dictionary for **kwargs\n\n \"\"\"\n kwargs = dict()\n\n if isinstance(raw, dict):\n kwargs.update(raw)\n elif isinstance(raw, str):\n kwargs[string_parameter] = raw\n\n return kwargs\n\n\nclass UpdateSummarySaverHook(tf.train.SummarySaverHook):\n\n def __init__(self, model, *args, **kwargs):\n super(UpdateSummarySaverHook, self).__init__(*args, **kwargs)\n self.model = model\n\n def before_run(self, run_context):\n self._request_summary = run_context.original_args[1] is not None and \\\n self.model.is_observe and \\\n (self._next_step is None or self._timer.should_trigger_for_step(self._next_step))\n # run_context.original_args[1].get(self.is_optimizing, False) and \\\n requests = {'global_step': self._global_step_tensor}\n if self._request_summary:\n if self._get_summary_op() is not None:\n requests['summary'] = self._get_summary_op()\n return tf.train.SessionRunArgs(requests)\n\n def after_run(self, run_context, run_values):\n if not self._summary_writer:\n return\n\n stale_global_step = run_values.results['global_step']\n global_step = stale_global_step + 1\n if self._next_step is None or self._request_summary:\n global_step = run_context.session.run(self._global_step_tensor)\n\n if self._next_step is None:\n self._summary_writer.add_session_log(SessionLog(status=SessionLog.START), global_step)\n\n if 'summary' in run_values.results:\n self._timer.update_last_triggered_step(global_step)\n for summary in run_values.results['summary']:\n self._summary_writer.add_summary(summary, global_step)\n\n self._next_step = global_step + 1\n\n\ndef strip_name_scope(name, base_scope):\n if name.startswith(base_scope):\n return name[len(base_scope):]\n else:\n return name\n\n\nclass SavableComponent(object):\n \"\"\"\n Component that can save and restore its own state.\n \"\"\"\n\n def register_saver_ops(self):\n \"\"\"\n Registers the saver operations to the graph in context.\n \"\"\"\n\n variables = self.get_savable_variables()\n if variables is None or len(variables) == 0:\n self._saver = None\n return\n\n base_scope = self._get_base_variable_scope()\n variables_map = {strip_name_scope(v.name, base_scope): v for v in variables}\n\n self._saver = tf.train.Saver(\n var_list=variables_map,\n reshape=False,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n name=None,\n restore_sequentially=False,\n saver_def=None,\n builder=None,\n defer_build=False,\n allow_empty=True,\n write_version=tf.train.SaverDef.V2,\n pad_step_number=False,\n save_relative_paths=True\n )\n\n def get_savable_variables(self):\n \"\"\"\n Returns the list of all the variables this component is responsible to save and restore.\n\n Returns:\n The list of variables that will be saved or restored.\n \"\"\"\n\n raise NotImplementedError()\n\n def save(self, sess, save_path, timestep=None):\n \"\"\"\n Saves this component's managed variables.\n\n Args:\n sess: The session for which to save the managed variables.\n save_path: The path to save data to.\n timestep: Optional, the timestep to append to the file name.\n\n Returns:\n Checkpoint path where the model was saved.\n \"\"\"\n\n if self._saver is None:\n raise TensorForceError(\"register_saver_ops should be called before save\")\n return self._saver.save(\n sess=sess,\n save_path=save_path,\n global_step=timestep,\n write_meta_graph=False,\n write_state=True, # Do we need this?\n )\n\n def restore(self, sess, save_path):\n \"\"\"\n Restores the values of the managed variables from disk location.\n\n Args:\n sess: The session for which to save the managed variables.\n save_path: The path used to save the data to.\n \"\"\"\n\n if self._saver is None:\n raise TensorForceError(\"register_saver_ops should be called before restore\")\n self._saver.restore(sess=sess, save_path=save_path)\n\n def _get_base_variable_scope(self):\n \"\"\"\n Returns the portion of the variable scope that is considered a base for this component. The variables will be\n saved with names relative to that scope.\n\n Returns:\n The name of the base variable scope, should always end with \"/\".\n \"\"\"\n\n raise NotImplementedError()\n","repo_name":"nekrald/quadrotor_reinforcement_learning","sub_path":"libraries/tensorforce/tensorforce/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":10817,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"19359872343","text":"'''\r\nGiven a string text, you want to use the characters of text to form as many \r\ninstances of the word \"balloon\" as possible.\r\n\r\nYou can use each character in text at most once. Return the maximum number of \r\ninstances that can be formed.\r\n\r\nExample 1:\r\nInput: text = \"nlaebolko\"\r\nOutput: 1\r\n\r\nInput: text = \"loonbalxballpoon\"\r\nOutput: 2\r\n\r\nInput: text = \"leetcode\"\r\nOutput: 0\r\n'''\r\n\r\ndef MaximumNumberBallons(text):\r\n S = \"balon\"\r\n dir = dict()\r\n \r\n for s in S:\r\n if s in text:\r\n if s == 'l' or s == 'o':\r\n dir[s] = int(text.count(s)/2)\r\n else:\r\n dir[s] = text.count(s)\r\n text = text.replace(s, \"\")\r\n else:\r\n dir[s] = 0\r\n \r\n return min(dir.values())\r\n\r\nprint(MaximumNumberBallons(\"loonbalxballpoon\"))\r\n","repo_name":"newbieeashish/LeetCode_Algo","sub_path":"1st_100_questions/MaximumNumberOfBallons.py","file_name":"MaximumNumberOfBallons.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71679584194","text":"'''\nSite:\nhttps://astrostyle.com/horoscopes/daily/{}/ {} = sign\n\nrequired external plugin:\nrequests\nbeautifulsoup4\n\n'''\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_date(horoscope_content):\n date = horoscope_content.find('h2').text\n date = date.split()\n date = date[:-1]\n\n return date\n\ndef horoscope(horoscope_content):\n h_text = horoscope_content.find('p').text\n\n return h_text\n\ndef main():\n\n h_sign = ('aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo', 'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12')\n\n user_input = input(\"\"\"Please select your Sign from below: You can either designate the number or type the sign\n 1. Aries (Mar 31 - Apr 19)\n 2. Taurus (Apr 20 - May 20)\n 3. Gemini (May 21 - June 20)\n 4. Cancer (June 21 - July 22)\n 5. Leo (July 23 - Aug 22)\n 6. Virgo (Aug 23- Sep 22)\n 7. Libra (Sep 23 - Oct 22)\n 8. Scorpio (Oct 23 - Nov 21)\n 9. Sagittarius (Nov 22- Dec 21)\n 10. Capricorn (Dec 22 - Jan 19)\n 11. Aquarius (Jan 20 - Feb 18)\n 12. Pisces (Feb 19 - Mar 20)\\n\\nInput: \"\"\")\n\n val = False\n\n while not val:\n if user_input.lower() in h_sign:\n try:\n sign = int(user_input)\n if sign == 1:\n sign = 'aries'\n elif sign == 2:\n sign = 'taurus'\n elif sign == 3:\n sign = 'gemini'\n elif sign == 4:\n sign = 'cancer'\n elif sign == 5:\n sign = 'leo'\n elif sign == 6:\n sign = 'virgo'\n elif sign == 7:\n sign = 'libra'\n elif sign == 8:\n sign = 'scorpio'\n elif sign == 9:\n sign = 'sagittarius'\n elif sign == 10:\n sign = 'capricorn'\n elif sign == 11:\n sign = 'aquarius'\n else:\n sign = 'pisces'\n val = True\n except:\n sign = user_input\n val = True\n\n url = 'https://astrostyle.com/horoscopes/daily/{}/'.format(sign)\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'lxml')\n\n horoscope_content = soup.find(class_='horoscope-content')\n\n date = get_date(horoscope_content)\n h_text = horoscope(horoscope_content)\n\n print(\"Today is: {}\".format(\" \".join(date)))\n print(\"Sign: {}\\n\".format(sign.capitalize()))\n print(\"Horoscope for the day\\n\\n {}\".format(h_text))\n\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"Kyo88avaris/Horoscope-Python3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21020732144","text":"###############################################################################\n# #\n# This program is free software: you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation, either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see . #\n# #\n###############################################################################\n\n__author__ = 'Donovan Parks'\n__copyright__ = 'Copyright 2014'\n__credits__ = ['Donovan Parks']\n__license__ = 'GPL3'\n__maintainer__ = 'Donovan Parks'\n__email__ = 'donovan.parks@gmail.com'\n\nimport random\n\nimport dendropy\n\nfrom biolib.newick import parse_label, create_label\n\n\"\"\"Perform non-parametric bootstrapping on multiple sequence alignment.\"\"\"\n\ndef bootstrap_support(input_tree, replicate_trees, output_tree):\n \"\"\" Calculate support for tree with replicates covering the same taxon set.\n\n Parameters\n ----------\n input_tree : str\n Tree inferred from complete data.\n replicate_trees : iterable\n Files containing replicate trees.\n output_tree: str\n Name of output tree with support values.\n \"\"\"\n # read tree and bootstrap replicates as unrooted, and\n # calculate bootstrap support\n orig_tree = dendropy.Tree.get_from_path(input_tree, \n schema='newick', \n rooting=\"force-unrooted\", \n preserve_underscores=True)\n orig_tree.bipartitions = True\n orig_tree.encode_bipartitions()\n\n rep_trees = dendropy.TreeArray(taxon_namespace=orig_tree.taxon_namespace,\n is_rooted_trees=False,\n ignore_edge_lengths=True,\n ignore_node_ages=True,\n use_tree_weights=False)\n\n rep_trees.read_from_files(files=replicate_trees,\n schema='newick',\n rooting=\"force-unrooted\",\n preserve_underscores=True,\n taxon_namespace=orig_tree.taxon_namespace)\n\n rep_trees.summarize_splits_on_tree(orig_tree,\n is_bipartitions_updated=True,\n add_support_as_node_attribute=True,\n support_as_percentages=True)\n\n for node in orig_tree.internal_nodes():\n if node.label:\n support, taxon, aux_info = parse_label(node.label)\n node.label = create_label(node.support, taxon, aux_info)\n else:\n node.label = str(int(node.support))\n\n orig_tree.write_to_path(output_tree, \n schema='newick', \n suppress_rooting=True, \n unquoted_underscores=True)\n\n\ndef bootstrap_alignment(msa, output_file, frac=1.0):\n \"\"\"Bootstrap multiple sequence alignment.\n\n True bootstrapping requires subsampling an alignment,\n with replacement, to construct new alignments\n with the same length as the input alignment. The\n 'frac' parameter allows shorter bootstrap alignments\n to be generated in order to reduce computational\n demands.\n\n Parameters\n ----------\n msa : d[seq_id] -> seq\n Full multiple sequence alignment.\n output_file : str\n File to write bootstrapped alignment.\n frac : float\n Fraction of alignment to subsample.\n \"\"\"\n alignment_len = len(msa[list(msa.keys())[0]])\n sample_len = int(alignment_len * frac)\n cols = [random.randint(0, alignment_len - 1) for _ in range(sample_len)]\n\n fout = open(output_file, 'w')\n for seq_id, seq in msa.items():\n fout.write('>' + seq_id + '\\n')\n for col in cols:\n fout.write(seq[col])\n fout.write('\\n')\n fout.close()\n","repo_name":"jtamames/SqueezeMeta","sub_path":"lib/biolib/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"61"} +{"seq_id":"27968918886","text":"### Packages ###\nfrom torch import float64, cuda, device, tensor\nfrom botorch.acquisition import UpperConfidenceBound\n\n### Custom Scripts ###w\nfrom utils.pprint_nd import pprint\n\n###### Configurable Variables ######\nfrom config.config import Config\nconfig = Config('config/config.ini')\nSIGMA_UCB = config.SIGMA_UCB\n\n\ndef acq_ucb(self, genomes):\n\n dev = device(\"cuda\" if cuda.is_available() else \"cpu\")\n\n genomes = tensor(genomes, dtype=float64, device=dev) # Shape: PARALLEL_BATCH_SIZE x SOL_DIMENSION\n transformed_genomes = genomes.unsqueeze(1) # Shape: PARALLEL_BATCH_SIZE x 1 x SOL_DIMENSION\n\n UCB = UpperConfidenceBound(self.gp_model, beta=SIGMA_UCB)\n ucb_tensor = UCB(transformed_genomes)\n ucb_ndarray = ucb_tensor.detach().numpy()\n\n return ucb_ndarray","repo_name":"patrickab/Maximum-SAIL","sub_path":"sail_xfoil/acq_functions/acq_ucb.py","file_name":"acq_ucb.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12697398703","text":"class Solution:\n def numIslands2(self, m: 'int', n: 'int', positions: 'List[List[int]]') -> 'List[int]':\n self.table = {}\n self.island = {}\n ans = []\n num_of_island = 0\n for p in positions:\n nbrs = self.neighbors(p, m, n)\n new_island_f = True\n nbr_island = set()\n for nbr in nbrs:\n if nbr in self.table:\n nbr_island.add(self.table[nbr])\n new_island_f = False \n \n if new_island_f:\n self.table[(p[0], p[1])] = num_of_island\n self.island[num_of_island] = [p]\n num_of_island +=1\n else:\n self.update_island(p, nbr_island)\n \n ans.append(len(set(self.island.keys())))\n return ans\n \n \n def update_island(self, p, nbr_island):\n nbr_island = list(nbr_island)\n min_i = min(nbr_island)\n\n lands = [p]\n for i in nbr_island:\n lands += self.island[i]\n for i in nbr_island:\n del self.island[i]\n for l in lands:\n self.table[(l[0], l[1])] = min_i\n self.island[min_i] = lands\n \n \n \n \n def neighbors(self, idx: 'List[int]', m, n):\n \n direct = [[1,0], [0,1], [-1,0], [0,-1]]\n neighbors = [(idx[0]+d[0], idx[1]+d[1]) for d in direct]\n neighbors = [nbr for nbr in neighbors if nbr[0]>=0 and nbr[1]>=0 and nbr[0]\n 'numOfLikes', # \n 'numOfComments', # \n 'hashtags', # \n 'emptyHashtags', # \n 'url', # \n 'tag', # \n 'uploadDatetime', # \n 'commentUsernames', # \n 'imageUrl', # \n 'errorFlag' # \n } \n ]\n '''''''''''''''''''''''''''''''''''''''''''''''''''\n url = _urlAndTag[0]\n tag = _urlAndTag[1]\n req = requests.get(url)\n\n\n # url check\n # 만들어야함. url 잘못넘어왔을때 에러날 수 있음.\n\n soup = BeautifulSoup(req.text,\"html5lib\")\n \n # saveHtml(str(soup),'html2.html')\n\n #img url\n imageUrl = str(soup.find(\"meta\", property=\"og:image\"))[15:-23]\n # orignal imageUrl is ''\n # so sliced\n\n metaDescription = str(soup.find(\"meta\", property=\"og:description\")) \n\n \n \n id = re.findall(r\"@[a-z_.0-9]*\",metaDescription)[0][1:]\n \n # hashtag 긁어오기\n metaHashtags = soup.find_all(\"meta\", property=\"instapp:hashtags\")\n hashtags = [] \n \n for metaHashtag in metaHashtags:\n hashtags.append(str(metaHashtag)[15:-31])\n # print('-----type is',type(soup.find('script', type='application/ld+json')))\n \n try:\n strDateAndComment = soup.find('script', type='application/ld+json').text\n # 댓글 유저네임 긁어오기\n commentUsernames_parser = re.findall('\"alternateName\":\"@.{1,30}\",\"main', strDateAndComment) #유저네임 최대 길이 30임\n commentUsernames = []\n for commentUsername in commentUsernames_parser:\n commentUsernames.append(commentUsername[18:-7])\n uploadDatetime = re.findall('\\d{4}-\\d{2}-\\d{2}.*\\d{2}:\\d{2}:\\d{2}', strDateAndComment)[0]\n\n space = re.compile('[ ]+')\n # 좋아요 개수 긁어오기\n numOfLikes = space.split(re.findall(r\"[0-9]* Likes\",metaDescription)[0])[0] #findall이 list 형태 반환이기에 [0] 붙임\n\n # 댓글 개수 긁어오기\n numOfComments = space.split(re.findall(r\"[0-9]* Comments\",metaDescription)[0])[0] \n errorFlag = False # 크롤링 중 누락 정보가 없다면 Flase\n except Exception as e:\n commentUsernames = ''\n uploadDatetime = ''\n numOfLikes = 0\n numOfComments = 0 \n errorFlag = True # 크롤링 중 누락정보가 있다면 True\n # print(e,url)\n # p(strDateAndComment.decode('euc-kr'))\n # strDateAndComment에서 date, 본문, 코멘트 추출하고 코멘트 다시 한글화 해줘야함.\n \n meta = {\n 'id' : id, # 작성자 ID\n 'numOfLikes' : int(numOfLikes), # 좋아요 수\n 'numOfComments' : int(numOfComments), #댓글 수\n 'hashtags' : hashtags, #본문 내 해시태그\n 'emptyHashtags' : bool(hashtags), # 본문에 해시태그가 없는 경우(댓글에 해시태그를 다는 경우가 있음) false\n 'url' : url, # Url of feed\n 'tag' : tag, # 피드의 첫번째 사진에 대한 설명\n 'uploadDatetime' : uploadDatetime, #피드 업로드 시간\n 'commentUsernames' : commentUsernames, # 댓글 단 유저 목록\n 'imageUrl' : imageUrl, # 사진 url\n 'errorFlag' : errorFlag # 크롤링\n }\n\n return json.dumps(meta, ensure_ascii=False)\n\n\n'''''''''''''''''''''\n가져올 항목\n 필수 +ID, 일반내용, +해스태그내용, +numOfLike, linkOfPic\n ??? 덧글, 좋아요 사람들 계정, +댓글 수, \n'''''''''''''''''''''\n","repo_name":"social-media-data-analysis/instagram-crawler","sub_path":"metainfofeed.py","file_name":"metainfofeed.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27966218059","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/12/1 16:23\n# @Author : KevinHoo\n# @Site : \n# @File : main.py\n# @Software: PyCharm \n# @Email : hu.rui0530@gmail.com\n\n\n# 用于分析的库\nimport pandas as pd\nimport numpy as np\nfrom sklearn import svm\n\n# 用于可视化的库\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(font_scale=1.2) # 字体缩放比例\n\n# 读取写好的配料表csv文件\nrecipes = pd.read_csv('./data/muffin_or_cupcakes_real.csv')\nrecipes.head(5)\n# print(recipes)\n\n# 先绘制出其中的两种配料即二维的数据图,因为糖和黄油占比及不算太多,也不算太少\n# params: 横纵坐标, 数据集, 分类变量, 画板色调, 显示数据范围, 画板样式参数s for mark_size\nsns.lmplot(x='Sugar', y='Butter', data=recipes, hue='Type', palette='Set1', fit_reg=False, scatter_kws={\"s\": 70})\n\n# 拟合模型\n# 按照指定的模型输入\nsugar_butter = recipes[['Sugar', 'Butter']].values\ntype_label = np.where(recipes['Type'] == 'Muffin', 0, 1)\n\n# SVM模型\n# model = svm.SVC(kernel='linear', C=2**-5) # 支持向量分类器,内核设置为线性\nmodel = svm.SVC(kernel='linear', C=1, gamma=1e10) # 支持向量分类器,内核设置为线性\nmodel.fit(sugar_butter, type_label)\n\n# 得到分割的超平面wtx+b=0,但由于是二维的,他的超平面为二维减去一维就类似于y=ax+b\nw = model.coef_[0]\n# print(model.coef_)\n# 有ax+(1/b)y+c=0, y=-abx-cb\na = -w[0] / w[1]\nxx = np.linspace(5, 30) # 返回(5, 30)的等间距样本\n# print(xx)\nyy = a * xx - (model.intercept_[0] / w[1])\n# print(yy)\n\n# 画出穿过支持向量表示超平面范围(路宽)的虚线\nb = model.support_vectors_[0]\nyy_down = a * xx + (b[1] - a * b[0]) # 下方虚线,过支持向量 y-y0=k(x-x0)\nb = model.support_vectors_[-3]\nyy_up = a * xx + (b[1] - a * b[0]) # 上方虚线,过支持向量\n\n# 分别查看超平面和边界\nsns.lmplot(x='Sugar', y='Butter', data=recipes, hue='Type', palette='Set1', fit_reg=False, scatter_kws={\"s\": 70})\nplt.plot(xx, yy, linewidth=2, color='black') # 相同的x取值范围,绘制超平面\nplt.plot(xx, yy_down, 'k--') # 相同的x取值范围,绘制下边界虚线\nplt.plot(xx, yy_up, 'k--') # 相同的x取值范围,绘制上边界虚线\n# model.support_vectors_ = model.support_vectors_[:],类似于列表的数据存储方式\nplt.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=80, facecolors='none')\n\n\n# 绘制测试配料的数据点,查看点的位置\nsns.lmplot(x='Sugar', y='Butter', data=recipes, hue='Type', palette='Set1', fit_reg=False, scatter_kws={'s': 70})\nplt.plot(xx, yy, linewidth=2, color='black')\nplt.plot(12, 12, 'yo', markersize='9')\n\n\n# 输出判断结果\ndef muffin_or_cupcake(butter, sugar):\n if(model.predict([[butter, sugar]])) == 0:\n print('\\n这是松糕的配料表!')\n else:\n print('\\n这是纸杯蛋糕的配料表!')\n\n\nmuffin_or_cupcake(12, 12)\n\n# C参数,分辨并避免被误分类的数据点\n# 用一个低的C值拟合模型\nmodel = svm.SVC(kernel='linear', C=2**-5)\nmodel.fit(sugar_butter, type_label)\n# 用一个高的C值拟合模型\nmodel = svm.SVC(kernel='linear', C=2**5)\nmodel.fit(sugar_butter, type_label)\n\n\n\n\n","repo_name":"SurpassHR/SVM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22108331425","text":"import argparse\r\n\r\nparser = argparse.ArgumentParser('Provide movement instructions file path')\r\nparser.add_argument('instructions', help='A path to a file with submarine instructions')\r\n\r\nargs = parser.parse_args()\r\n\r\ninstructions = ''\r\n\r\nwith open(args.instructions, 'r') as file:\r\n instructions = file.read().strip()\r\n\r\nposition = [0,0]\r\n\r\nprint('Start:', position, '\\n')\r\n\r\nfor instruction in instructions.split('\\n'):\r\n direction, units = instruction.split(' ')\r\n\r\n print('MOVE', direction, units, 'UNITS')\r\n\r\n if direction == 'forward':\r\n position[0] += int(units)\r\n elif direction == 'down':\r\n position[1] += int(units)\r\n elif direction == 'up':\r\n position[1] -= int(units)\r\n\r\n print('New position:', position)\r\n\r\nprint()\r\nprint('Final position:', position)\r\nprint('x and y multiplied =', position[0]*position[1])","repo_name":"padajo/aoc2021","sub_path":"Day 2/calculate_position.py","file_name":"calculate_position.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3299367354","text":"from CRABClient.UserUtilities import config\nconfig = config()\n\nconfig.General.requestName = 'QCDEnrichedGENSIM'\nconfig.General.workArea = 'QCD_pThat-30_EMEnrichedDijet_TuneCP5_HydjetDrumMB_5p02TeV_pythia8'\nconfig.General.transferOutputs = True\n\nconfig.JobType.pluginName = 'PrivateMC'\nconfig.JobType.psetName = 'HIN-HINPbPbAutumn18GSHIMix-00057_1_cfg.py'\n\nconfig.Data.outputPrimaryDataset = 'QCD_pThat-30_EMEnrichedDijet_TuneCP5_HydjetDrumMB_5p02TeV_pythia8'\nconfig.Data.splitting = 'EventBased'\nconfig.Data.unitsPerJob = 100\nNJOBS = 10000 # This is not a configuration parameter, but an auxiliary variable that we use in the next line\\ \nconfig.Data.totalUnits = config.Data.unitsPerJob * NJOBS\nconfig.JobType.numCores = 8\nconfig.JobType.maxMemoryMB = 20000\n\nconfig.Data.publication = True\nconfig.Data.outputDatasetTag ='QCD_pThat-30_EMEnrichedDijet_TuneCP5_HydjetDrumMB_5p02TeV_pythia8'\nconfig.Data.outLFNDirBase = '/store/user/prebello/'\nconfig.Site.storageSite ='T2_US_Vanderbilt'\n","repo_name":"prebello/HIRun3ECALSettings","sub_path":"GENSIM/newcrabQCDEnriched.py","file_name":"newcrabQCDEnriched.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1588737045","text":"from rest_framework import status\nfrom rest_framework.generics import CreateAPIView, DestroyAPIView\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\n\nfrom pichicm_backend.authentication.serializers import (\n UserCreateSerializer,\n UserLoginSerializer,\n)\n\n\nclass UserLoginApi(CreateAPIView):\n \"\"\"Login a user and get back a JWT token\"\"\"\n\n permission_classes = (AllowAny,)\n serializer_class = UserLoginSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n status_code = status.HTTP_200_OK\n response = {\n \"success\": True,\n \"status\": status_code,\n \"message\": \"Successfully logged in User\",\n \"token\": serializer.data['token'],\n }\n return Response(data=response, status=status_code)\n\n\nclass UserCreateApi(CreateAPIView):\n \"\"\"Create a new user\"\"\"\n\n permission_classes = (AllowAny,)\n serializer_class = UserCreateSerializer\n\n def post(self, request):\n serializer = self.serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n status_code = status.HTTP_201_CREATED\n response = {\n \"success\": True,\n \"status\": status_code,\n \"message\": \"Sucessfully created User\",\n \"data\": serializer.data,\n }\n return Response(data=response, status=status_code)\n\n\nclass UserDestroyApi(DestroyAPIView):\n \"\"\"\n De-activate User in order to keep user data\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n authentication_class = JSONWebTokenAuthentication\n\n def destroy(self, request):\n try:\n user = request.user\n user.is_active = False\n user.save()\n\n status_code = status.HTTP_204_NO_CONTENT\n response = {\n \"success\": True,\n \"status\": status_code,\n \"message\": \"User successfully deleted\",\n }\n except Exception as e:\n status_code = status.HTTP_404_NOT_FOUND\n response = {\n \"success\": False,\n \"status\": status_code,\n \"message\": \"User does not exist\",\n \"error\": str(e),\n }\n\n return Response(data=response, status=status_code)","repo_name":"yesh0907/pichi-course-match-backend","sub_path":"pichicm_backend/authentication/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40248693563","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 4 12:14:18 2017\n\n@author: benjamin\n\"\"\"\n\nimport pandas as pa\nimport matplotlib.pyplot as pyp\nfrom scipy.optimize import curve_fit\n\nbetaDataPath = '/home/benjamin/Research/QuantumChaos/RobnikQuantum/report/tabela.csv'\n\nalphaDataPath = '/home/benjamin/Research/QuantumChaos/RobnikQuantum/report/transportTimesRobnik.csv'\n\ndef plotBetaVsAlpha(ntcol = 'Nt5', percent = 50, s = 0.11, marksize=3, fontsize=8):\n dfb = pa.read_csv(betaDataPath)\n dfb = dfb[dfb['kmin'].isin([2000,4000])]\n dfa = pa.read_csv(alphaDataPath)\n df = dfb.join(dfa.set_index('lambda'), on='lambda')\n x = (df['kmin'] + df['kmax'])/df[ntcol]\n y = df['beta']\n pyp.plot(x,y,'bo', markersize=marksize)\n ii = x.values.argsort()\n xx = x.values[ii]\n yy = y.values[ii]\n popt, pcov = curve_fit(model, xx, yy, bounds=(0,10))\n pyp.plot(xx, model(xx, 0.98, s), 'r-', label='fit')\n pyp.ylabel(r'$\\beta$')\n pyp.xlabel(r'$\\alpha({0}\\%)$'.format(percent))\n pyp.title(r'Lambda billiard. Model: $A = {0}$, $s = {1:.2f}$'.format(0.98, s), fontsize=fontsize)\n return popt\n\ndef plotAll():\n pyp.figure(1)\n pyp.subplot(221)\n plotBetaVsAlpha('Nt9', 90, 0.55)\n pyp.subplot(222)\n plotBetaVsAlpha('Nt8', 80, 0.26)\n pyp.subplot(223)\n plotBetaVsAlpha('Nt7', 70, 0.15)\n pyp.subplot(224)\n plotBetaVsAlpha('Nt5', 50, 0.06)\n pyp.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n pyp.savefig('/home/benjamin/Documents/Temp/lambdaBilliardBvsA.eps', format='eps', dpi=1000)\n \ndef plotCoverVsAlpha(ntcol = 'Nt5', percent = 50, s = 0.11, marksize=3, fontsize=8):\n dfb = pa.read_csv(betaDataPath)\n dfb = dfb[dfb['kmin'].isin([2000,4000])]\n dfa = pa.read_csv(alphaDataPath)\n df = dfb.join(dfa.set_index('lambda'), on='lambda')\n x = (df['kmin'] + df['kmax'])/df[ntcol]\n y = df['corr']\n a = max(y)\n pyp.plot(x,y,'bo', markersize=marksize)\n ii = x.values.argsort()\n xx = x.values[ii]\n yy = y.values[ii]\n b, pcov = curve_fit(lambda x,b: model(x,a,b), xx, yy, bounds=(0,10))\n s = b[0] * 0.75\n pyp.plot(xx, model(xx, a, s), 'r-', label='fit')\n pyp.ylabel(r'$C$')\n pyp.xlabel(r'$\\alpha({0}\\%)$'.format(percent))\n pyp.title(r'Lambda billiard. Model: $A = {0}$, $s = {1:.2f}$'.format(a, s), fontsize=fontsize)\n\ndef plotAllCover():\n pyp.figure(1)\n pyp.subplot(221)\n plotCoverVsAlpha('Nt9', 90, 0.55)\n pyp.subplot(222)\n plotCoverVsAlpha('Nt8', 80, 0.26)\n pyp.subplot(223)\n plotCoverVsAlpha('Nt7', 70, 0.15)\n pyp.subplot(224)\n plotCoverVsAlpha('Nt5', 50, 0.06)\n pyp.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n pyp.savefig('/home/benjamin/Documents/Temp/lambdaBilliardAvsAlpha.eps', format='eps', dpi=1000)\n \ndef model(x, a, b):\n return a * b * x / (1 + b * x)","repo_name":"benokit/quantum-billiards","sub_path":"custom/robnikBilliardBetaAnalysisOld.py","file_name":"robnikBilliardBetaAnalysisOld.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41493450716","text":"#time = 0.02-0.03 sec\nimport cv2\nimport numpy as np\nfrom imutils.perspective import four_point_transform\nimport imagehash\nfrom PIL import Image\nimport time\nimport os\n\n\npedestrian_ref_0 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom','resources\\pictures','road_signs\\pedestrian_cross_ref_0.jpg')))\npedestrian_ref_90 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom','resources\\pictures','road_signs\\pedestrian_cross_ref_90.jpg')))\npedestrian_ref_180 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom','resources\\pictures','road_signs\\pedestrian_cross_ref_180.jpg')))\npedestrian_ref_270 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom','resources\\pictures','road_signs\\pedestrian_cross_ref_270.jpg')))\nbrick_0 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom', 'resources\\pictures', 'road_signs','brick.jpg')))\nbrick_45 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom', 'resources\\pictures', 'road_signs','brick_45.jpg')))\nbrick_90 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom', 'resources\\pictures', 'road_signs','brick_90.jpg')))\nbrick_135 = imagehash.average_hash(Image.open(os.path.join('D:\\diplom\\diplom', 'resources\\pictures', 'road_signs','brick_135.jpg')))\n\ndef hamming_distance(chaine1, chaine2):\n return sum(c1 != c2 for c1, c2 in zip(chaine1, chaine2))\n\ndef hash_compare_circle(fragment):\n segment = Image.fromarray(fragment)\n frag_hash = imagehash.average_hash(segment)\n angle0 = hamming_distance(str(frag_hash), str(brick_0))\n angle1 = hamming_distance(str(frag_hash), str(brick_45))\n angle2 = hamming_distance(str(frag_hash), str(brick_90))\n angle3 = hamming_distance(str(frag_hash), str(brick_135))\n print(angle0,angle1,angle2,angle3)\n if angle0 <= 11 or angle1 <= 11 or angle2 <= 11 or angle3 <= 11:\n return True\n else:\n return False\n\ndef hash_compare(fragment):\n segment = Image.fromarray(fragment)\n frag_hash = imagehash.average_hash(segment)\n angle0 = hamming_distance(str(frag_hash), str(pedestrian_ref_0))\n angle1 = hamming_distance(str(frag_hash), str(pedestrian_ref_90))\n angle2 = hamming_distance(str(frag_hash), str(pedestrian_ref_180))\n angle3 = hamming_distance(str(frag_hash), str(pedestrian_ref_270))\n if angle0 <= 0 or angle1 <= 0 or angle2 <= 0 or angle3 <= 0:\n return True\n else:\n return False\n\n\ndef search_square_sign(image):\n # нижний и верхний цветовой порог\n hsv_min = np.array((0, 92, 86), np.uint8)\n hsv_max = np.array((255, 255, 255), np.uint8)\n #hsv_min = np.array((19, 93, 91), np.uint8)\n #hsv_max = np.array((220, 255, 255), np.uint8)\n\n # переводим в цветовое пространство hsv и размываем\n hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)\n blur = cv2.medianBlur(hsv, 3)\n # cv2.imshow('blur',hsv_blur)\n\n # выделяем цветовой диапазон нужных нам знаков\n thresh = cv2.inRange(blur, hsv_min, hsv_max)\n cv2.imshow('thresh', thresh)\n\n # mask = cv2.erode(thresh, None, iterations=3)\n # mask = cv2.dilate(mask, None, iterations=5)\n # cv2.imshow('mask', mask)\n\n #выделяем контуры детектором Канни\n canny_detector = cv2.Canny(thresh, 25, 200,apertureSize=3)\n cv2.imshow('canny',canny_detector)\n\n #находим контуры на изображении после детектора\n contours = cv2.findContours(canny_detector, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE,)[0]\n\n # проходимся по списку найденных контуров\n # и пытаемся вписать в них прямоугольник\n # если это удаётся, считаем площадь прямоугольника\n # если она является максимальной на текущий момент\n # то сохраняем координаты вершин фигуры\n\n if len(contours) > 0:\n for cnt in contours:\n # rect = cv2.minAreaRect(cnt)\n # width, height = rect[1]\n # area = width * height\n # if area > 8000:\n # box = cv2.boxPoints(rect)\n # box = np.int0(box)\n # cv2.drawContours(image, [box], 0, (0, 255, 0), 2)\n # fragment = four_point_transform(image, [box][0])\n # flag = hash_compare(fragment)\n #\n # if flag:\n # coord = rect[0]\n # return box, coord\n center, radius = cv2.minEnclosingCircle(cnt) # вписываем круг\n centerx = int(center[0])\n centery = int(center[1])\n radius = int(radius)\n area = 3.14 * radius * radius\n if area > 4000: # ограничение по площади\n\n rect = cv2.minAreaRect(cnt) # вписываем туда квадрат\n box = cv2.boxPoints(rect) # объект квадрата\n box = np.int0(box)\n fragment = four_point_transform(image, [box][0]) # вырезаем квадратную область\n cv2.imshow('s', fragment)\n flag = hash_compare_circle(fragment) # проверяем, знак ли, возвращает True или False\n\n if flag:\n cv2.circle(image, (centerx, centery), radius, (0, 255, 255), 2)\n\ndef main():\n # capture = cv2.VideoCapture(1)\n\n while True:\n # status, frame = capture.read()\n\n # if not status:\n # print('Failed to load stream')\n # break\n\n # frame = cv2.imread(os.path.join('D:\\diplom\\diplom','resources\\pictures','test\\left_61cm.jpg'))\n frame = cv2.imread(os.path.join('D:\\diplom\\diplom','resources\\pictures','test1\\left_62cm.jpg'))\n\n tic = round(time.time(),3)\n search_square_sign(frame)\n toc = round(time.time(),3)\n # cv2.putText(frame,str(toc - tic), (50, 50),cv2.FONT_HERSHEY_COMPLEX, 2,(0,0,255), lineType=cv2.LINE_AA)\n cv2.imshow('Frame', frame)\n\n\n if cv2.waitKey(1) & 0xFF is ord('q'):\n # capture.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()","repo_name":"rafferty-al/Rangefinder","sub_path":"drafts/search_square_signs.py","file_name":"search_square_signs.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18599178069","text":"import sys\nfrom collections import deque\nN,East,West,South,North = map(int,sys.stdin.readline().split(\" \"))\n\nboard = [[0 for i in range(2*N+2)] for i in range(2*N+2)]\n\ndirection = [[0,1,East],[0,-1,West],[1,0,South],[-1,0,North]]\nstart_X, start_Y = N,N\nuncomplex_prob = 0\n\ndef Brute_force(current_X,current_Y,prev_X,prev_Y,depth):\n global uncomplex_prob\n\n if depth == N:\n uncomplex_prob += board[current_X][current_Y] / (100**(depth));\n return\n\n for dir in direction:\n if depth != 0:\n if current_X + dir[0] == prev_X and current_Y + dir[1] == prev_Y:\n continue;\n if board[current_X + dir[0]][current_Y + dir[1]] == 0:\n board[current_X + dir[0]][current_Y + dir[1]] = board[current_X][current_Y] * dir[2]\n Brute_force(current_X + dir[0],current_Y + dir[1],current_X,current_Y, depth + 1)\n board[current_X + dir[0]][current_Y + dir[1]] = 0\n\n\nboard[N][N] = 1\n\nBrute_force(N,N,N,N,0)\nprint(uncomplex_prob)\n\n\n\n\n\n\n\n\n","repo_name":"Andrevile/Algorithm","sub_path":"BOJ PS/No.1405 미친 로봇.py","file_name":"No.1405 미친 로봇.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5583061622","text":"\"\"\"\nReorder KGTK file columns (while copying)\n\nTODO: Need KgtkWriterOptions\n\"\"\"\n\nfrom argparse import Namespace, SUPPRESS\nimport typing\n\nfrom kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles\n\ndef parser():\n return {\n 'help': 'Perform calculations on KGTK file columns.',\n 'description': 'This command performs calculations on one or more columns in a KGTK file. ' +\n '\\nIf no input filename is provided, the default is to read standard input. ' +\n '\\n\\nAdditional options are shown in expert help.\\nkgtk --expert rename_columns --help'\n }\n\n# Future:\n\n# Numeric:\nABS_OP: str = \"abs\" # column\nDIV_OP: str = \"div\" # (column / column) or (column / value)\nMOD_OP: str = \"mod\" # (column mod column) or (column mod value)\nPOW_OP: str = \"pow\"\nMINUS_OP: str = \"minus\" # (column - column) or (column - value)\nNEGATE_OP: str = \"negate\"\n\n# String\nCENTER_OP: str = \"center\"\nCOUNT_OP: str = \"count\"\nENDSWITH_OP: str = \"endswidth\"\nEXPANDTABS_OP: str = \"expandtabs\"\nFIND_OP: str = \"find\"\nISALNUM_OP: str = \"isalnum\"\n# ...\nLJUST_OP: str = \"ljust\"\nLSTRIP_OP: str = \"lstrip\"\nPARTITION_OP: str = \"partition\"\nREMOVEPREFIX_OP: str = \"removeprefix\"\nREMOVESUFFIX_OP: str = \"removesuffix\"\nRFIND_OP: str = \"rfind\"\nRJUST_OP: str = \"rjust\"\nRPARTITION_OP: str = \"rpartition\"\nRSPLIT_OP: str = \"rsplit\"\nRSTRIP_OP: str = \"rstrip\"\nSPLIT_OP: str = \" split\"\nSPLITLINES_OP: str = \"splitlines\"\nSTARTSWITH_OP: str = \"startswith\"\nSTRIP_OP: str = \"strip\"\nZFILL_OP: str = \"zfill\"\n\n# Implemented:\n\n# Boolean\nAND_OP: str = \"and\" # (boolean, boolean) -> boolean\nNAND_OP: str = \"nand\" # (boolean, boolean) -> boolean\nNOR_OP: str = \"nor\" # (boolean, boolean) -> boolean\nNOT_OP: str = \"not\" # (boolean, ...) -> (boolean, ...)\nOR_OP: str = \"or\" # (boolean, boolean) -> boolean\nXOR_OP: str = \"xor\" # (boolean, boolean) -> boolean\n\n# Numeric\nAVERAGE_OP: str = \"average\"\nMAX_OP: str = \"max\"\nMIN_OP: str = \"min\"\nPERCENTAGE_OP: str = \"percentage\"\nSUM_OP: str = \"sum\" # Sums the columns and the values.\nGE_OP: str = \"ge\" # (column > column) or (column > value) -> boolean\nGT_OP: str = \"gt\" # (column >= column) or (column >= value) -> boolean\nLT_OP: str = \"lt\" # (column < column) or (column < value) -> boolean\nLE_OP: str = \"le\" # (column <= column) or (column <= value) -> boolean\nEQ_OP: str = \"eq\" # (column == column) or (column == value) -> boolean\nNE_OP: str = \"ne\" # (column != column) or (column != value) -> boolean\n\n# String\nCAPITALIZE_OP: str = \"capitalize\"\nCASEFOLD_OP: str = \"casefold\"\nJOIN_OP: str = \"join\"\nLOWER_OP: str = \"lower\"\nREPLACE_OP: str = \"replace\"\nSUBSTITUTE_OP: str = \"substitute\"\nSWAPCASE_OP: str = \"swapcase\"\nTITLE_OP: str = \"title\"\nUPPER_OP: str = \"upper\"\n\n# General\nCOPY_OP: str = \"copy\"\nIS_OP: str = \"is\" # (column == column) or (column == value) -> boolean\nIS_IN_OP: str = \"is_in\" # column in values -> boolean\nIS_NOT_OP: str = \"is_not\" # (column != column) or (column != value) -> boolean\nSET_OP: str = \"set\"\n\n# Date/Time\nFROMISOFORMAT_OP: str = \"fromisoformat\"\n\nOPERATIONS: typing.List[str] = [ AND_OP,\n AVERAGE_OP,\n CAPITALIZE_OP,\n CASEFOLD_OP,\n COPY_OP,\n EQ_OP,\n FROMISOFORMAT_OP,\n GE_OP,\n GT_OP,\n IS_OP,\n IS_IN_OP,\n IS_NOT_OP,\n JOIN_OP,\n LOWER_OP,\n LE_OP,\n LT_OP,\n MAX_OP,\n MIN_OP,\n NAND_OP,\n NE_OP,\n NOR_OP,\n NOT_OP,\n OR_OP,\n PERCENTAGE_OP,\n REPLACE_OP,\n SET_OP,\n SUBSTITUTE_OP,\n SUM_OP,\n SWAPCASE_OP,\n TITLE_OP,\n UPPER_OP,\n XOR_OP,\n ]\n\ndef add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):\n \"\"\"\n Parse arguments\n Args:\n parser (argparse.ArgumentParser)\n \"\"\"\n # import modules locally\n from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions\n from kgtk.utils.argparsehelpers import optional_bool\n from kgtk.value.kgtkvalueoptions import KgtkValueOptions\n\n _expert: bool = parsed_shared_args._expert\n\n # This helper function makes it easy to suppress options from\n # The help message. The options are still there, and initialize\n # what they need to initialize.\n def h(msg: str)->str:\n if _expert:\n return msg\n else:\n return SUPPRESS\n\n parser.add_input_file()\n parser.add_output_file()\n\n parser.add_argument( \"--output-format\", dest=\"output_format\", help=h(\"The file format (default=kgtk)\"), type=str)\n\n parser.add_argument('-c', \"--columns\", dest=\"column_names_list\", nargs='*', metavar=\"COLUMN_NAME\", action='append',\n help=\"The list of source column names, optionally containing '..' for column ranges \" +\n \"and '...' for column names not explicitly mentioned.\")\n\n parser.add_argument( \"--into\", dest=\"into_column_names_list\", nargs='+', metavar=\"COLUMN_NAME\", action='append',\n help=\"The name of the column to receive the result of the calculation.\",\n required=True)\n\n parser.add_argument( \"--do\", dest=\"operation\", help=\"The name of the operation.\", required=True,\n choices=OPERATIONS)\n\n parser.add_argument( \"--values\", dest=\"values_list\", nargs='*', metavar=\"VALUES\", action='append',\n help=\"An optional list of values\")\n\n parser.add_argument( \"--with-values\", dest=\"with_values_list\", nargs='*', metavar=\"WITH_VALUES\", action='append',\n help=\"An optional list of additional values\")\n\n parser.add_argument( \"--limit\", dest=\"limit\", type=int,\n help=\"A limit count.\")\n\n parser.add_argument( \"--format\", dest=\"format_string\", help=\"The format string for the calculation.\")\n\n KgtkReader.add_debug_arguments(parser, expert=_expert)\n KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)\n KgtkValueOptions.add_arguments(parser, expert=_expert)\n\ndef flatten_arg_list(arg: typing.Optional[typing.List[typing.List[str]]])->typing.List[str]:\n result: typing.List[str] = [ ]\n if arg is None:\n return result\n\n arglist: typing.List[str]\n for arglist in arg:\n value: str\n for value in arglist:\n if value is not None:\n result.append(value)\n\n return result\n\ndef run(input_file: KGTKFiles,\n output_file: KGTKFiles,\n output_format: typing.Optional[str],\n\n column_names_list: typing.List[typing.List[str]],\n into_column_names_list: typing.List[typing.List[str]],\n operation: str,\n values_list: typing.List[typing.List[str]],\n with_values_list: typing.List[typing.List[str]],\n limit: typing.Optional[int],\n format_string: typing.Optional[str],\n\n errors_to_stdout: bool = False,\n errors_to_stderr: bool = True,\n show_options: bool = False,\n verbose: bool = False,\n very_verbose: bool = False,\n\n **kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.\n)->int:\n # import modules locally\n import datetime as dt\n from pathlib import Path\n import re\n import sys\n\n from kgtk.exceptions import KGTKException\n from kgtk.kgtkformat import KgtkFormat\n from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions\n from kgtk.io.kgtkwriter import KgtkWriter\n from kgtk.value.kgtkvalueoptions import KgtkValueOptions\n from kgtk.value.kgtkvalue import KgtkValue\n\n input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)\n output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)\n\n # Select where to send error messages, defaulting to stderr.\n error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr\n\n # Build the option structures.\n reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)\n value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)\n\n # Flatten the input lists.\n column_names: typing.List[str] = flatten_arg_list(column_names_list)\n into_column_names: typing.List[str] = flatten_arg_list(into_column_names_list)\n values: typing.List[str] = flatten_arg_list(values_list)\n with_values: typing.List[str] = flatten_arg_list(with_values_list)\n\n # Show the final option structures for debugging and documentation.\n if show_options:\n print(\"--input-file=%s\" % str(input_kgtk_file), file=error_file, flush=True)\n print(\"--output-file=%s\" % str(output_kgtk_file), file=error_file, flush=True)\n if output_format is not None:\n print(\"--output-format=%s\" % output_format, file=error_file, flush=True)\n if len(column_names) > 0:\n print(\"--columns %s\" % \" \".join(column_names), file=error_file, flush=True)\n if len(into_column_names) > 0:\n print(\"--into %s\" % \" \".join(into_column_names), file=error_file, flush=True)\n print(\"--operation=%s\" % str(operation), file=error_file, flush=True)\n if len(values) > 0:\n print(\"--values %s\" % \" \".join(values), file=error_file, flush=True)\n if len(with_values) > 0:\n print(\"--with-values %s\" % \" \".join(with_values), file=error_file, flush=True)\n if limit is not None:\n print(\"--limit %d\" % limit, file=error_file, flush=True)\n if format_string is not None:\n print(\"--format=%s\" % format_string, file=error_file, flush=True)\n\n reader_options.show(out=error_file)\n value_options.show(out=error_file)\n print(\"=======\", file=error_file, flush=True)\n\n try:\n\n if verbose:\n print(\"Opening the input file %s\" % str(input_kgtk_file), file=error_file, flush=True)\n kr = KgtkReader.open(input_kgtk_file,\n options=reader_options,\n value_options = value_options,\n error_file=error_file,\n verbose=verbose,\n very_verbose=very_verbose,\n )\n\n remaining_names: typing.List[str] = kr.column_names.copy()\n selected_names: typing.List[str] = [ ]\n save_selected_names: typing.Optional[typing.List[str]] = None\n\n ellipses: str = \"...\" # All unmentioned columns\n ranger: str = \"..\" # All columns between two columns.\n\n idx: int\n\n saw_ranger: bool = False\n column_name: str\n for column_name in column_names:\n if column_name == ellipses:\n if save_selected_names is not None:\n raise KGTKException(\"Elipses may appear only once\")\n\n if saw_ranger:\n raise KGTKException(\"Elipses may not appear directly after a range operator ('..').\")\n\n save_selected_names = selected_names\n selected_names = [ ]\n continue\n\n if column_name == ranger:\n if len(selected_names) == 0:\n raise KGTKException(\"The column range operator ('..') may not appear without a preceeding column name.\")\n saw_ranger = True\n continue\n\n if column_name not in kr.column_names:\n raise KGTKException(\"Unknown column name '%s'.\" % column_name)\n if column_name not in remaining_names:\n raise KGTKException(\"Column name '%s' was duplicated in the list.\" % column_name)\n\n if saw_ranger:\n saw_ranger = False\n prior_column_name: str = selected_names[-1]\n prior_column_idx: int = kr.column_name_map[prior_column_name]\n column_name_idx: int = kr.column_name_map[column_name]\n start_idx: int\n end_idx: int\n idx_inc: int\n if column_name_idx > prior_column_idx:\n start_idx = prior_column_idx + 1\n end_idx = column_name_idx - 1\n idx_inc = 1\n else:\n start_idx = prior_column_idx - 1\n end_idx = column_name_idx + 1\n idx_inc = -1\n\n idx = start_idx\n while idx <= end_idx:\n idx_column_name: str = kr.column_names[idx]\n if idx_column_name not in remaining_names:\n raise KGTKException(\"Column name '%s' (%s .. %s) was duplicated in the list.\" % (column_name, prior_column_name, column_name))\n \n selected_names.append(idx_column_name)\n remaining_names.remove(idx_column_name)\n idx += idx_inc\n\n selected_names.append(column_name)\n remaining_names.remove(column_name)\n\n if saw_ranger:\n raise KGTKException(\"The column ranger operator ('..') may not end the list of column names.\")\n\n if len(remaining_names) > 0 and save_selected_names is None:\n if verbose:\n print(\"Omitting the following columns: %s\" % \" \".join(remaining_names), file=error_file, flush=True)\n if save_selected_names is not None:\n if len(remaining_names) > 0:\n save_selected_names.extend(remaining_names)\n if len(selected_names) > 0:\n save_selected_names.extend(selected_names)\n selected_names = save_selected_names\n\n sources: typing.List[int] = [ ]\n name: str\n for name in selected_names:\n sources.append(kr.column_name_map[name])\n\n new_column_count: int = 0\n into_column_idxs: typing.List[int] = [ ]\n into_column_idx: int\n output_column_names: typing.List[str] = kr.column_names.copy()\n into_column_name: str\n for idx, into_column_name in enumerate(into_column_names):\n if into_column_name in kr.column_name_map:\n into_column_idx = kr.column_name_map[into_column_name]\n into_column_idxs.append(into_column_idx)\n if verbose:\n print(\"Putting result %d of the calculation into old column %d (%s).\" % (idx + 1, into_column_idx, into_column_name), file=error_file, flush=True)\n else:\n new_column_count += 1\n into_column_idx = len(output_column_names)\n into_column_idxs.append(into_column_idx)\n output_column_names.append(into_column_name)\n if verbose:\n print(\"Putting result %d of the calculation into new column %d (%s).\" % (idx + 1, into_column_idx, into_column_name), file=error_file, flush=True)\n\n if verbose:\n print(\"Opening the output file %s\" % str(output_kgtk_file), file=error_file, flush=True)\n kw: KgtkWriter = KgtkWriter.open(output_column_names,\n output_kgtk_file,\n require_all_columns=True,\n prohibit_extra_columns=True,\n fill_missing_columns=False,\n gzip_in_parallel=False,\n mode=KgtkWriter.Mode[kr.mode.name],\n output_format=output_format,\n verbose=verbose,\n very_verbose=very_verbose,\n )\n\n if limit is None:\n limit = 0\n\n substitute_re: typing.Optional[typing.Pattern] = None\n\n if operation == AND_OP:\n if len(sources) == 0:\n raise KGTKException(\"And needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"And needs 1 destination column, got %d\" % len(into_column_idxs))\n\n elif operation == AVERAGE_OP:\n if len(sources) == 0:\n raise KGTKException(\"Average needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Average needs 1 destination column, got %d\" % len(into_column_idxs))\n\n elif operation == CAPITALIZE_OP:\n if len(sources) == 0:\n raise KGTKException(\"Capitalize needs at least one source, got %d\" % len(sources))\n if len(sources) != len(into_column_idxs):\n raise KGTKException(\"Capitalize needs the same number of input columns and into columns, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == CASEFOLD_OP:\n if len(sources) == 0:\n raise KGTKException(\"Casefold needs at least one source, got %d\" % len(sources))\n if len(sources) != len(into_column_idxs):\n raise KGTKException(\"Casefold needs the same number of input columns and into columns, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == COPY_OP:\n if len(sources) == 0:\n raise KGTKException(\"Copy needs at least one source, got %d\" % len(sources))\n if len(selected_names) != len(into_column_idxs):\n raise KGTKException(\"Copy needs the same number of input columns and into columns, got %d and %d\" % (len(selected_names), len(into_column_idxs)))\n\n elif operation == EQ_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Eq needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Eq needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == FROMISOFORMAT_OP:\n if len(sources) != 1:\n raise KGTKException(\"Fromisoformat needs one source, got %d\" % len(sources))\n if len(values) != len(into_column_idxs):\n raise KGTKException(\"Fromisoformat needs the same number of values and into columns, got %d and %d\" % (len(values), len(into_column_idxs)))\n\n elif operation == GE_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Ge needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Ge needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == GT_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Gt needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Gt needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == IS_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Is needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Is needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == IS_IN_OP:\n if len(sources) != 1:\n raise KGTKException(\"Is in needs one source, got %d\" % len(sources))\n if len(values) == 0:\n raise KGTKException(\"Is in needs at least one value, got %d\" % len(values))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Is in needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == IS_NOT_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Is not needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Is not needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == JOIN_OP:\n if len(sources) == 0:\n raise KGTKException(\"Join needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Join needs 1 destination columns, got %d\" % len(into_column_idxs))\n if len(values) != 1:\n raise KGTKException(\"Join needs 1 value, got %d\" % len(values))\n\n elif operation == LE_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Le needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Le needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == LT_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Lt needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Lt needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == LOWER_OP:\n if len(sources) == 0:\n raise KGTKException(\"Lower needs at least one source, got %d\" % len(sources))\n if len(sources) != len(into_column_idxs):\n raise KGTKException(\"Lower needs the same number of input columns and into columns, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == MAX_OP:\n if len(sources) == 0:\n raise KGTKException(\"Max needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Max needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == MIN_OP:\n if len(sources) == 0:\n raise KGTKException(\"Min needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Min needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == NE_OP:\n if (len(sources) == 2 and len(values) == 0) or (len(sources) == 1 and len(values) == 1):\n raise KGTKException(\"Ne needs two sources or one source and one value, got %d sources and %d values\" % (len(sources), len(values)))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Ne needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == NOR_OP:\n if len(sources) == 0:\n raise KGTKException(\"Nor needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Nor needs 1 destination column, got %d\" % len(into_column_idxs))\n\n elif operation == NOT_OP:\n if len(sources) == 0:\n raise KGTKException(\"Not needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != len(sources):\n raise KGTKException(\"Nand needs the same number of input columns and into colums, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == OR_OP:\n if len(sources) == 0:\n raise KGTKException(\"Or needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Or needs 1 destination column, got %d\" % len(into_column_idxs))\n\n elif operation == PERCENTAGE_OP:\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Percent needs 1 destination columns, got %d\" % len(into_column_idxs))\n if len(selected_names) != 2:\n raise KGTKException(\"Percent needs 2 input columns, got %d\" % len(selected_names))\n\n elif operation == REPLACE_OP:\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Replace needs 1 destination column, got %d\" % len(into_column_idxs))\n if len(selected_names) != 1:\n raise KGTKException(\"Replace needs 1 input column, got %d\" % len(selected_names))\n if len(values) != 1:\n raise KGTKException(\"Replace needs one value, got %d\" % len(values))\n if len(with_values) != 1:\n raise KGTKException(\"Replace needs one with-value, got %d\" % len(with_values))\n\n elif operation == SET_OP:\n if len(sources) != 0:\n raise KGTKException(\"Set needs no sources, got %d\" % len(sources))\n if len(into_column_idxs) == 0:\n raise KGTKException(\"Set needs at least one destination column, got %d\" % len(into_column_idxs))\n if len(values) == 0:\n raise KGTKException(\"Set needs at least one value, got %d\" % len(values))\n if len(into_column_idxs) != len(values):\n raise KGTKException(\"Set needs the same number of destination columns and values, got %d and %d\" % (len(into_column_idxs), len(values)))\n\n elif operation == SUBSTITUTE_OP:\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Substitute needs 1 destination column, got %d\" % len(into_column_idxs))\n if len(selected_names) != 1:\n raise KGTKException(\"Substitute needs 1 input column, got %d\" % len(selected_names))\n if len(values) != 1:\n raise KGTKException(\"Substitute needs one value, got %d\" % len(values))\n if len(with_values) != 1:\n raise KGTKException(\"Substitute needs one with-value, got %d\" % len(with_values))\n substitute_re = re.compile(values[0])\n\n elif operation == SUM_OP:\n if len(sources) == 0:\n raise KGTKException(\"Sum needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Sum needs 1 destination columns, got %d\" % len(into_column_idxs))\n\n elif operation == SWAPCASE_OP:\n if len(sources) == 0:\n raise KGTKException(\"Swapcase needs at least one source, got %d\" % len(sources))\n if len(sources) != len(into_column_idxs):\n raise KGTKException(\"Swapcase needs the same number of input columns and into columns, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == TITLE_OP:\n if len(sources) == 0:\n raise KGTKException(\"Title needs at least one source, got %d\" % len(sources))\n if len(sources) != len(into_column_idxs):\n raise KGTKException(\"Title needs the same number of input columns and into columns, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == UPPER_OP:\n if len(sources) == 0:\n raise KGTKException(\"Upper needs at least one source, got %d\" % len(sources))\n if len(sources) != len(into_column_idxs):\n raise KGTKException(\"Upper needs the same number of input columns and into columns, got %d and %d\" % (len(sources), len(into_column_idxs)))\n\n elif operation == XOR_OP:\n if len(sources) == 0:\n raise KGTKException(\"Xor needs at least one source, got %d\" % len(sources))\n if len(into_column_idxs) != 1:\n raise KGTKException(\"Xor needs 1 destination column, got %d\" % len(into_column_idxs))\n\n\n fs: str = format_string if format_string is not None else \"%5.2f\"\n item: str\n item2: str\n kv: KgtkValue\n bresult: bool\n\n into_column_idx = into_column_idxs[0] # for convenience\n\n input_data_lines: int = 0\n row: typing.List[str]\n for row in kr:\n input_data_lines += 1\n\n output_row: typing.List[str] = row.copy()\n for idx in range(new_column_count):\n output_row.append(\"\") # Easiest way to add a new column.\n\n if operation == AND_OP:\n bresult = True\n for idx in sources:\n kv = KgtkValue(row[idx])\n if kv.is_boolean():\n bresult = bresult and kv.is_true()\n\n output_row[into_column_idx] = KgtkValue.to_boolean(bresult)\n\n elif operation == AVERAGE_OP:\n atotal: float = 0\n acount: int = 0\n for idx in sources:\n item = row[idx]\n if len(item) > 0:\n atotal += float(item)\n acount += 1\n output_row[into_column_idx] = (fs % (atotal / float(acount))) if acount > 0 else \"\" \n\n elif operation == CAPITALIZE_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]].capitalize()\n\n elif operation == CASEFOLD_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]].casefold()\n\n elif operation == COPY_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]]\n\n elif operation == EQ_OP:\n if len(sources) == 1:\n if len(row[sources[0]]) > 0 and len(row[sources[1]]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) == float(row[sources[1]]))\n else:\n output_row[into_column_idx] = \"\"\n else:\n if len(row[sources[0]]) > 0 and len(values[0]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) == float(values[0]))\n else:\n output_row[into_column_idx] = \"\"\n\n elif operation == FROMISOFORMAT_OP:\n dtval: str = row[sources[0]]\n if dtval.startswith(KgtkFormat.DATE_AND_TIMES_SIGIL):\n kgtkdatestr: str = row[sources[0]][1:] # Strip the leading ^\n isodatestr: str\n precisionstr: str\n if \"/\" in kgtkdatestr:\n isodatestr, precisionstr = kgtkdatestr.split(\"/\")\n else:\n isodatestr = kgtkdatestr\n precisionstr = \"\"\n if isodatestr.endswith(\"Z\"):\n isodatestr = isodatestr[:-1]\n\n into_idx: int\n value_name: str\n try:\n dtvar: dt.datetime = dt.datetime.fromisoformat(isodatestr)\n for idx in range(len(values)):\n value_name = values[idx]\n into_idx = into_column_idxs[idx]\n \n if value_name == \"year\":\n output_row[into_idx] = str(dtvar.year)\n\n elif value_name == \"month\":\n output_row[into_idx] = str(dtvar.month)\n \n elif value_name == \"day\":\n output_row[into_idx] = str(dtvar.day)\n\n elif value_name == \"hour\":\n output_row[into_idx] = str(dtvar.hour)\n \n elif value_name == \"minute\":\n output_row[into_idx] = str(dtvar.minute)\n \n elif value_name == \"second\":\n output_row[into_idx] = str(dtvar.second)\n \n elif value_name == \"microsecond\":\n output_row[into_idx] = str(dtvar.microsecond)\n\n elif value_name == \"error\":\n output_row[into_idx] = \"\"\n\n else:\n raise KGTKException(\"Unknown date component %s\" % repr(value_name))\n\n except ValueError as e:\n print(\"Error parsing %s in [%s]: %s\" % (repr(isodatestr), \"|\".join([repr(x) for x in row]), str(e)),\n file=error_file, flush=True)\n\n for idx in range(len(values)):\n value_name = values[idx]\n into_idx = into_column_idxs[idx]\n if value_name == \"error\":\n output_row[into_idx] = str(e)\n else:\n output_row[into_idx] = \"\"\n\n else:\n # Not a date/time value, clear the result columns.\n for idx in range(len(values)):\n output_row[into_column_idxs[idx]] = \"\"\n \n elif operation == GE_OP:\n if len(sources) == 1:\n if len(row[sources[0]]) > 0 and len(row[sources[1]]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) >= float(row[sources[1]]))\n else:\n output_row[into_column_idx] = \"\"\n else:\n if len(row[sources[0]]) > 0 and len(values[0]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) >= float(values[0]))\n else:\n output_row[into_column_idx] = \"\"\n\n elif operation == GT_OP:\n if len(sources) == 1:\n if len(row[sources[0]]) > 0 and len(row[sources[1]]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) > float(row[sources[1]]))\n else:\n output_row[into_column_idx] = \"\"\n else:\n if len(row[sources[0]]) > 0 and len(values[0]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) > float(values[0]))\n else:\n output_row[into_column_idx] = \"\"\n\n elif operation == IS_OP:\n if len(sources) == 1:\n output_row[into_column_idx] = KgtkValue.to_boolean(row[sources[0]] == row[sources[1]])\n else:\n output_row[into_column_idx] = KgtkValue.to_boolean(row[sources[0]] == values[0])\n\n elif operation == IS_IN_OP:\n bresult = False\n item = row[sources[0]]\n for item2 in values:\n if item == item2:\n bresult = True\n break\n output_row[into_column_idx] = KgtkValue.to_boolean(bresult)\n\n elif operation == IS_NOT_OP:\n if len(sources) == 1:\n output_row[into_column_idx] = KgtkValue.to_boolean(row[sources[0]] != row[sources[1]])\n else:\n output_row[into_column_idx] = KgtkValue.to_boolean(row[sources[0]] != values[0])\n\n elif operation == JOIN_OP:\n output_row[into_column_idx] = values[0].join((row[sources[idx]] for idx in range(len(sources))))\n\n elif operation == LE_OP:\n if len(sources) == 1:\n if len(row[sources[0]]) > 0 and len(row[sources[1]]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) <= float(row[sources[1]]))\n else:\n output_row[into_column_idx] = \"\"\n else:\n if len(row[sources[0]]) > 0 and len(values[0]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) <= float(values[0]))\n else:\n output_row[into_column_idx] = \"\"\n\n elif operation == LT_OP:\n if len(sources) == 1:\n if len(row[sources[0]]) > 0 and len(row[sources[1]]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) < float(row[sources[1]]))\n else:\n output_row[into_column_idx] = \"\"\n else:\n if len(row[sources[0]]) > 0 and len(values[0]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) < float(values[0]))\n else:\n output_row[into_column_idx] = \"\"\n\n elif operation == LOWER_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]].lower()\n\n elif operation == MAX_OP:\n max_result: typing.Optional[float] = None\n for idx in sources:\n item = row[idx]\n if len(item) > 0:\n max_value: float = float(item)\n if max_result is None or max_value > max_result:\n max_result = max_value\n output_row[into_column_idx] = (fs % max_result) if max_result is not None else \"\"\n\n elif operation == MIN_OP:\n min_result: typing.Optional[float] = None\n for idx in sources:\n item = row[idx]\n if len(item) > 0:\n min_value: float = float(item)\n if min_result is None or min_value < min_result:\n min_result = min_value\n output_row[into_column_idx] = (fs % min_result) if min_result is not None else \"\"\n\n elif operation == NAND_OP:\n bresult = True\n for idx in sources:\n kv = KgtkValue(row[idx])\n if kv.is_boolean():\n bresult = bresult and kv.is_true()\n\n output_row[into_column_idx] = KgtkValue.to_boolean(not bresult)\n\n elif operation == NE_OP:\n if len(sources) == 1:\n if len(row[sources[0]]) > 0 and len(row[sources[1]]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) != float(row[sources[1]]))\n else:\n output_row[into_column_idx] = \"\"\n else:\n if len(row[sources[0]]) > 0 and len(values[0]) > 0:\n output_row[into_column_idx] = KgtkValue.to_boolean(float(row[sources[0]]) != float(values[0]))\n else:\n output_row[into_column_idx] = \"\"\n\n elif operation == NOR_OP:\n bresult = False\n for idx in sources:\n kv = KgtkValue(row[idx])\n if kv.is_boolean():\n bresult = bresult or kv.is_true()\n\n output_row[into_column_idx] = KgtkValue.to_boolean(not bresult)\n\n elif operation == NOT_OP:\n for idx in sources:\n kv = KgtkValue(row[idx])\n if kv.is_boolean():\n output_row[into_column_idxs[idx]] = KgtkValue.to_boolean(not kv.is_true())\n else:\n output_row[into_column_idxs[idx]] = \"\"\n\n elif operation == OR_OP:\n bresult = False\n for idx in sources:\n kv = KgtkValue(row[idx])\n if kv.is_boolean():\n bresult = bresult or kv.is_true()\n\n output_row[into_column_idx] = KgtkValue.to_boolean(bresult)\n\n elif operation == PERCENTAGE_OP:\n output_row[into_column_idx] = fs % (float(row[sources[0]]) * 100 / float(row[sources[1]]))\n\n elif operation == REPLACE_OP:\n if limit == 0:\n output_row[into_column_idx] = row[sources[0]].replace(values[0], with_values[0])\n else:\n output_row[into_column_idx] = row[sources[0]].replace(values[0], with_values[0], limit)\n\n elif operation == SET_OP:\n for idx in range(len(values)):\n output_row[into_column_idxs[idx]] = values[idx]\n\n elif operation == SUBSTITUTE_OP and substitute_re is not None:\n output_row[into_column_idx] = substitute_re.sub(with_values[0], row[sources[0]], count=limit)\n\n elif operation == SUM_OP:\n total: float = 0\n for idx in sources:\n item = row[idx]\n if len(item) > 0:\n total += float(item)\n for item in values:\n if len(item) > 0:\n total += float(item)\n output_row[into_column_idx] = fs % total\n \n elif operation == SWAPCASE_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]].swapcase()\n\n elif operation == TITLE_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]].title()\n\n elif operation == UPPER_OP:\n for idx in range(len(sources)):\n output_row[into_column_idxs[idx]] = row[sources[idx]].upper()\n\n elif operation == XOR_OP:\n bresult = False\n for idx in sources:\n kv = KgtkValue(row[idx])\n if kv.is_boolean():\n bresult = bresult != kv.is_true()\n\n output_row[into_column_idx] = KgtkValue.to_boolean(bresult)\n\n kw.write(output_row)\n\n # Flush the output file so far:\n kw.flush()\n\n if verbose:\n print(\"Read %d data lines from file %s\" % (input_data_lines, input_kgtk_file), file=error_file, flush=True)\n\n kw.close()\n\n return 0\n\n except SystemExit as e:\n raise KGTKException(\"Exit requested\")\n except Exception as e:\n raise KGTKException(str(e))\n\n","repo_name":"VincentWei2021/kgtk","sub_path":"kgtk/cli/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":43607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"20390895608","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy \nfrom flask_restful import Resource, Api\nfrom decimal import *\nfrom importlib import import_module\n\nfrom models import Transaction, TransactionSchema\nfrom services import chargeservice as cs\n\napp = Flask(__name__)\nsqldatabase = 'mysql://admin@localhost:3306/main'\n# sqldatabase = 'sqlite:///tmp/main.db'\napp.config.update(\n SQLALCHEMY_DATABASE_URI=sqldatabase,\n SQLALCHEMY_TRACK_MODIFICATIONS=False\n)\ndb = SQLAlchemy(app)\npayservice = Api(app)\n\nclass Home(Resource):\n def __index(self):\n return 'index'\n\n def get(self):\n return self.__index()\n \nclass Payment(Resource):\n def __pay(self):\n credit_card = cs.CreditCard()\n CONSTANTS = import_module('services.constants')\n return credit_card.charge_credit_card(CONSTANTS.amount)\n\n def get(self):\n receipt = self.__pay()\n return receipt\n \nclass TransactionList(Resource):\n def __output(self):\n # initialize_database()\n transaction1 = Transaction(transaction_id='2321112', payment_amt='300')\n transaction2 = Transaction(transaction_id='1326792', payment_amt='422')\n transactions = [transaction1, transaction2]\n transactions_schema = TransactionSchema(many=True)\n json_result = transactions_schema.dumps(transactions)\n return json_result\n\n def get(self):\n return self.__output()\n\nclass PaymentBalance(Resource):\n def __get_balance(self):\n return 'Your balance is hehe' \n\n def get(self):\n return self.__get_balance()\n\n# Requested endpoints\npayservice.add_resource(Home, '/')\npayservice.add_resource(Payment, '/pay')\npayservice.add_resource(TransactionList, '/transactions')\npayservice.add_resource(PaymentBalance, '/balance')\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)","repo_name":"jethro-djan/payment-microsv","sub_path":"payservice.py","file_name":"payservice.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71682018434","text":"# username = input('username:')\n# passwd = input('passwd:')\n# if username == 'bob':\n# if passwd == '123456':\n# print('登陆成功')\n# else:\n# print('登陆失败')\n# else:\n# # print('登陆失败')\n# import getpass\n# username = input('username:')\n# passwd = getpass.getpass('passwd:')\n# if username == 'bob' and passwd == '123456':\n# print('chenggong')\n# else:\n# print('shibai')\nimport random\nnumber = random.randint(1,10)\nanswer = int(input('number:'))\nif answer > number:\n print('猜大了')\nelif answer < number:\n print('猜小了')\nelse:\n print('对了')\nprint(number)\n","repo_name":"kyo68820405/1808","sub_path":"python/day02/login2.py","file_name":"login2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15356261957","text":"# name: Qiting Wu netId: qitwu Student ID: 112064080\nfrom tkinter import *\nfrom tkinter import messagebox\n\nroot = Tk()\nroot.title(\"Calculator\")\nroot.geometry(\"200x180\")\n\nexpre = StringVar()\nexpre.set('0')\n\nexpression = Label(root,textvariable=expre)\nexpression.grid(row=1, column=0,rowspan=1,columnspan=12,sticky=\"E\", padx=5, pady=5)\n\ndef append_num(num):\n if expre.get()[-1] == \")\":\n messagebox.showerror(\"Error\", \"Number cannot be right after )\")\n return\n if expre.get() == '0':\n expre.set(num)\n else:\n expre.set(expre.get() + num)\n\ndef append_sym(sym):\n if sym == '(' and expre.get()[-1] in '+-*/(':\n expre.set(expre.get() + sym)\n elif sym == ')' and expre.get()[-1] in '0123456789)':\n if expre.get().count('(') > expre.get().count(')'):\n expre.set(expre.get() + sym)\n else:\n messagebox.showerror(\"Error\", \"Unbalanced parentheses!\")\n elif sym in '+-*/' and not expre.get()[-1] in '+-*/(':\n expre.set(expre.get() + sym)\n else:\n messagebox.showerror(\"Error\", \"Invalid syntax!\")\n\ndef delete():\n if expre.get() != '0':\n expre.set(expre.get()[0:-1])\n if len(expre.get()) == 1:\n expre.set(\"0\")\n\ndef calculate():\n try:\n expre.set(eval(expre.get()))\n except:\n messagebox.showerror(\"Error\", \"Invalid syntax!\")\n\nzero = Button(root, text = \"0\", command=lambda: append_num(\"0\"))\nzero.grid(row = 2, column = 0, rowspan=1, columnspan=3,sticky=\"NSWE\")\none = Button(root, text = \"1\", command=lambda: append_num(\"1\"))\none.grid(row = 2, column = 3, rowspan=1, columnspan=3,sticky=\"NSWE\")\ntwo = Button(root, text = \"2\", command=lambda: append_num(\"2\"))\ntwo.grid(row = 2, column = 6, rowspan=1, columnspan=3,sticky=\"NSWE\")\nthree = Button(root, text = \"3\", command=lambda: append_num(\"3\"))\nthree.grid(row = 2, column = 9, rowspan=1, columnspan=3,sticky=\"NSWE\")\nfour = Button(root, text = \"4\", command=lambda: append_num(\"4\"))\nfour.grid(row = 3, column = 0, rowspan=1, columnspan=3,sticky=\"NSWE\")\nfive = Button(root, text = \"5\", command=lambda: append_num(\"5\"))\nfive.grid(row = 3, column = 3, rowspan=1, columnspan=3,sticky=\"NSWE\")\nsix = Button(root, text = \"6\", command=lambda: append_num(\"6\"))\nsix.grid(row = 3, column = 6, rowspan=1, columnspan=3,sticky=\"NSWE\")\nseven = Button(root, text = \"7\", command=lambda: append_num(\"7\"))\nseven.grid(row = 3, column = 9, rowspan=1, columnspan=3,sticky=\"NSWE\")\neight = Button(root, text = \"8\", command=lambda: append_num(\"8\"))\neight.grid(row = 4, column = 0, rowspan=1, columnspan=3,sticky=\"NSWE\")\nnine = Button(root, text = \"9\", command=lambda: append_num(\"9\"))\nnine.grid(row = 4, column = 3, rowspan=1, columnspan=3,sticky=\"NSWE\")\nleft = Button(root, text = \"(\", command=lambda: append_sym(\"(\"))\nleft.grid(row = 4, column = 6, rowspan=1, columnspan=3,sticky=\"NSWE\")\nright = Button(root, text = \")\", command=lambda: append_sym(\")\"))\nright.grid(row = 4, column = 9, rowspan=1, columnspan=3,sticky=\"NSWE\")\nplus = Button(root, text = \"+\", command=lambda: append_sym(\"+\"))\nplus.grid(row = 5, column = 0, rowspan=1, columnspan=3,sticky=\"NSWE\")\nminus = Button(root, text = \"-\", command=lambda: append_sym(\"-\"))\nminus.grid(row = 5, column = 3, rowspan=1, columnspan=3,sticky=\"NSWE\")\nmult = Button(root, text = \"*\", command=lambda: append_sym(\"*\"))\nmult.grid(row = 5, column = 6, rowspan=1, columnspan=3,sticky=\"NSWE\")\ndiv = Button(root, text = \"/\", command=lambda: append_sym(\"/\"))\ndiv.grid(row = 5, column = 9, rowspan=1, columnspan=3,sticky=\"NSWE\")\nclear = Button(root, text = \"CLEAR\", command=lambda: expre.set(\"0\"))\nclear.grid(row = 6, column = 0, rowspan=1, columnspan=4,sticky=\"NSWE\")\ncalc = Button(root, text = \"CALC\", command=lambda: calculate())\ncalc.grid(row = 6, column = 4, rowspan=1, columnspan=4,sticky=\"NSWE\")\ndele = Button(root, text = \"DEL\", command=lambda: delete())\ndele.grid(row = 6, column = 8, rowspan=1, columnspan=4,sticky=\"NSWE\")\n\n\nfor i in range(12):\n root.columnconfigure(i, weight=1)\nfor i in range(1,7):\n root.rowconfigure(0, weight=1)\n\nroot.mainloop()","repo_name":"qitingwu/Django-Training","sub_path":"a04_WuQiting/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23371275791","text":"from __future__ import with_statement\r\nfrom contextlib import nested\r\nimport sys\r\n\r\ndef count_switches(queries, eng_num):\r\n\tn = 0\r\n\ts = set()\r\n\tfor q in queries:\r\n\t\ts.add(q)\r\n\t\tif len(s) == eng_num:\r\n\t\t\tn += 1\r\n\t\t\ts = set((q,))\r\n\treturn n\r\n\r\ndef do_case(fit):\r\n\teng_num = int(fit.next())\r\n\tfor i in range(eng_num): fit.next()\r\n\tqs_num = int(fit.next())\r\n\treturn count_switches((fit.next().strip('\\n\\r') for i in range(qs_num)), eng_num)\r\n\r\ndef do_all(fin, fout):\r\n\twith nested(file(fin), file(fout, 'w')) as (fi, fo):\r\n\t\tfit = iter(fi)\r\n\t\tcase_num = int(fit.next())\r\n\t\tfor i in range(1, case_num+1):\r\n\t\t\tfo.write(\"Case #%d: %d\\n\" % (i, do_case(fit)))\r\n\r\nif __name__ == '__main__':\r\n\tdo_all(*sys.argv[1:3])\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_1/129.py","file_name":"129.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41566734317","text":"from django.shortcuts import redirect, render,HttpResponse,redirect\nfrom .models import student\nfrom .forms import stuform\n\n# Create your views here.\ndef home(request):\n if request.method == 'POST':\n stu = stuform(request.POST) \n if stu.is_valid():\n stu.save()\n stu = stuform()\n stu_data=student.objects.all()\n return render(request, 'home.html',context={\"stuform\":stu,\"stu_data\":stu_data})\n\n\n\ndef delete(request,id):\n stu=student.objects.get(id=id)\n stu.delete()\n return redirect('home')\n\n\ndef update(request,id):\n instance=student.objects.get(id=id)\n if request.method == 'POST':\n stu = stuform(request.POST,instance=instance) \n if stu.is_valid():\n stu.save()\n return redirect(\"home\")\n stu = stuform(instance=instance)\n stu_data=student.objects.all()\n return render(request, 'update.html',context={\"stuform\":stu,\"stu_data\":stu_data})\n","repo_name":"Champ130/student-data-with-CRUD","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44161981700","text":"# -*- coding: utf-8 -*-\n\n# https://github.com/booktype/python-ooxml\n# https://github.com/libyal/libolecf\n# https://github.com/grierforensics/officedissector\n\nfrom itertools import tee\nimport xmltodict\nimport zipfile\nimport re\nimport struct\nimport tempfile\n\n\nclass OOXML:\n def __init__(self):\n self.dictData = \"\"\n self.meta = {}\n\n def __dictGet(self, rootxmlKey, key):\n metaData = self.dictData[rootxmlKey].get(key, \"\")\n if metaData is None:\n self.meta[key] = \"\"\n elif '#text' in metaData:\n self.meta[key] = metaData['#text']\n else:\n self.meta[key] = metaData\n\n def __parsingCore(self, ooxmlFile, name):\n xmlData = ooxmlFile.open(name.lstrip('/'))\n self.dictData = xmltodict.parse(xmlData.read())\n\n # Metadata that must be included\n self.__dictGet('cp:coreProperties', 'dc:title')\n self.__dictGet('cp:coreProperties', 'dc:creator')\n self.__dictGet('cp:coreProperties', 'dcterms:created')\n self.__dictGet('cp:coreProperties', 'cp:lastModifiedBy')\n self.__dictGet('cp:coreProperties', 'dcterms:modified')\n self.__dictGet('cp:coreProperties', 'cp:lastPrinted')\n self.__dictGet('cp:coreProperties', 'dc:subject')\n self.__dictGet('cp:coreProperties', 'dc:description')\n\n # Additional metadata\n self.__dictGet('cp:coreProperties', 'cp:revision') # 수정 횟수\n self.__dictGet('cp:coreProperties', 'cp:keywords') \n\n def __parsingApp(self, ooxmlFile, name):\n xmlData = ooxmlFile.open(name.lstrip('/'))\n self.dictData = xmltodict.parse(xmlData.read())\n\n # Metadata that must be included\n self.__dictGet('Properties', 'Pages')\n self.__dictGet('Properties', 'Words')\n self.__dictGet('Properties', 'Template')\n self.__dictGet('Properties', 'TotalTime')\n self.__dictGet('Properties', 'Application')\n self.__dictGet('Properties', 'AppVersion')\n self.__dictGet('Properties', 'Slides')\n\n # Additional metadata\n self.__dictGet('Properties', 'Lines')\n self.__dictGet('Properties', 'Notes')\n self.__dictGet('Properties', 'Paragraphs')\n self.__dictGet('Properties', 'HiddenSlides')\n self.__dictGet('Properties', 'Characters')\n self.__dictGet('Properties', 'CharactersWithSpaces')\n\n def __remake(self, zipPath, xmlFileName):\n # extraction app.xml\n data = open(zipPath, 'rb').read()\n tf = tempfile.NamedTemporaryFile()\n if xmlFileName == 'app.xml':\n iterator = re.finditer(b'docProps/app.xml', data)\n fileNameLength = 16\n else:\n iterator = re.finditer(b'docProps/core.xml', data)\n fileNameLength = 17\n\n first_it, second_it = tee(iterator)\n if sum(1 for _ in first_it) != 2:\n return\n\n for idx, val in enumerate(second_it):\n if idx == 0:\n frFileNameIndex = val.start()\n frExtraFieldLength = struct.unpack(' int:\n # Important\n def search(hash_len, a, modulus, nums):\n h = 0\n L = hash_len\n # calculate initial hash.\n for i in range(hash_len):\n h = (h * a + nums[i]) % modulus\n seen = {h}\n al = (a ** L) % modulus\n\n for i in range(1, n - L + 1):\n # recalculate rolling hash for each slice.\n h = (h*a - nums[i-1]*al + nums[i+L-1]) % modulus\n if h in seen:\n # find repeat\n return i\n seen.add(h)\n\n return -1\n\n n = len(S)\n nums = [ ord(S[i]) - ord('a') for i in range(n) ]\n a = 26\n modulus = 2 ** 24\n\n # start position is important\n l, h = 1, n\n while l <= h:\n hash_len = l + (h - l) // 2\n\n if search(hash_len, a, modulus, nums) == -1:\n h = hash_len - 1\n else:\n l = hash_len + 1\n\n return l-1\n\nif __name__ == \"__main__\":\n solution = Solution()\n # print(solution.longestRepeatingSubstring(\"abbaba\"))\n print(solution.longestRepeatingSubstring(\"abbaba\"))\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/1062_longest_repeating_substring.py","file_name":"1062_longest_repeating_substring.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5091595997","text":"from django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n #path('', views.index, name='index'),\n\n #Leave as empty string for base url\n\tpath('', store, name=\"store\"),\n\tpath('cart/', cart, name=\"cart\"),\n\tpath('checkout/', checkout, name=\"checkout\"),\n path('login/', login_view, name='login'),\n path('register/', register_view, name='register'),\n\n\n path('showproducts/', product_info_sql, name='showproducts'),\n path('home/', home_view, name='home'),\n path('product//', product_view, name='product'),\n]","repo_name":"DominikSabat/BazyDanych","sub_path":"sklep/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73541740033","text":"import argparse\nimport os.path as osp\nimport random\nfrom time import perf_counter as t\nimport yaml\nfrom yaml import SafeLoader\n\nimport torch\nimport torch_geometric.transforms as T\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch_geometric.datasets import Planetoid, CitationFull\nfrom torch_geometric.utils import dropout_adj, to_undirected, is_undirected\nfrom torch_geometric.nn import GCNConv\n\nimport numpy as np\nfrom torch_geometric.utils import to_undirected, to_scipy_sparse_matrix\n\nfrom datasets import get_citation_dataset\nfrom model_digcl import Encoder, Model, drop_feature\nfrom eval_digcl import label_classification\nfrom get_adj import *\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef train(model: Model, x, edge_index):\n model.train()\n optimizer.zero_grad()\n\n edge_index_1, edge_weight_1 = cal_fast_appr(\n alpha_1, edge_index, x.shape[0], x.dtype)\n edge_index_2, edge_weight_2 = cal_fast_appr(\n alpha_2, edge_index, x.shape[0], x.dtype)\n\n x_1 = drop_feature(x, drop_feature_rate_1)\n x_2 = drop_feature(x, drop_feature_rate_2)\n\n z1 = model(x_1, edge_index_1, edge_weight_1)\n z2 = model(x_2, edge_index_2, edge_weight_2)\n\n loss = model.loss(z1, z2, batch_size=0)\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\n\ndef test(model: Model, dataset, x, edge_index, edge_weight, y, final=False):\n model.eval()\n z = model(x, edge_index, edge_weight)\n label_classification(z, y, data)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, default='DBLP')\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--config', type=str, default='config_digcl.yaml')\n parser.add_argument('--alpha', type=float, default=0.1)\n parser.add_argument('--recache', action=\"store_true\",\n help=\"clean up the old adj data\", default=True)\n parser.add_argument('--normalize-features',\n action=\"store_true\", default=True)\n parser.add_argument('--adj-type', type=str, default='or')\n parser.add_argument('--curr-type', type=str, default='log')\n args = parser.parse_args()\n\n assert args.gpu_id in range(0, 8)\n torch.cuda.set_device(args.gpu_id)\n\n config = yaml.load(open(args.config), Loader=SafeLoader)[args.dataset]\n\n torch.manual_seed(config['seed'])\n random.seed(2021)\n\n learning_rate = config['learning_rate']\n num_hidden = config['num_hidden']\n num_proj_hidden = config['num_proj_hidden']\n activation = ({'relu': F.relu, 'prelu': nn.PReLU(), 'rrelu': nn.RReLU()})[\n config['activation']]\n base_model = ({'GCNConv': GCNConv})[config['base_model']]\n num_layers = config['num_layers']\n\n alpha_1 = 0.1\n\n drop_feature_rate_1 = config['drop_feature_rate_1']\n drop_feature_rate_2 = config['drop_feature_rate_2']\n tau = config['tau']\n num_epochs = config['num_epochs']\n weight_decay = config['weight_decay']\n\n path = osp.join(osp.expanduser('.'), 'datasets')\n print(args.normalize_features)\n dataset = get_citation_dataset(\n args.dataset, args.alpha, args.recache, args.normalize_features, args.adj_type)\n print(\"Num of edges \", dataset[0].num_edges)\n\n data = dataset[0]\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n data = data.to(device)\n edge_index_init, edge_weight_init = cal_fast_appr(\n alpha_1, data.edge_index, data.x.shape[0], data.x.dtype)\n\n encoder = Encoder(dataset.num_features, num_hidden, activation,\n base_model=base_model, k=num_layers).to(device)\n model = Model(encoder, num_hidden, num_proj_hidden, tau).to(device)\n optimizer = torch.optim.Adam(\n model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n\n start = t()\n prev = start\n for epoch in range(1, num_epochs + 1):\n a = 0.9\n b = 0.1\n if args.curr_type == 'linear':\n alpha_2 = a-(a-b)/(num_epochs+1)*epoch\n elif args.curr_type == 'exp':\n alpha_2 = a - (a-b)/(np.exp(3)-1) * \\\n (np.exp(3*epoch/(num_epochs+1))-1)\n elif args.curr_type == 'log':\n alpha_2 = a - (a-b)*(1/3*np.log(epoch/(num_epochs+1)+np.exp(-3)))\n elif args.curr_type == 'fixed':\n alpha_2 = 0.9\n else:\n print('wrong curr type')\n exit()\n\n loss = train(model, data.x, data.edge_index)\n\n now = t()\n print(f'(T) | Epoch={epoch:03d}, loss={loss:.4f}, '\n f'this epoch {now - prev:.4f}, total {now - start:.4f}')\n prev = now\n\n print(\"=== Final ===\")\n test(model, dataset, data.x, edge_index_init,\n edge_weight_init, data.y, final=True)\n","repo_name":"flyingtango/DiGCL","sub_path":"code/train_digcl.py","file_name":"train_digcl.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"61"} +{"seq_id":"35235814002","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom typing import List, Dict, Any, Union, Optional\n\n# Logging\nfrom gaze_verification import logging_handler\nlogger = logging_handler.get_logger(__name__,\n log_to_file=False)\n\n\ndef interpolate_session(session: pd.DataFrame,\n columns: List[str],\n interpolation_kwargs: Dict[str, Any],\n agg_column: Optional[str] = None,\n beaten_ratio: float = 30,\n min_length: int = 500) -> pd.DataFrame:\n \"\"\"\n Clear missing data to make correct filtration.\n (Disable its effect on filtration).\n \"\"\"\n logger.info(f\"Interpolating erroneous observations in data...\")\n init_size = session.shape\n logger.info(f\"Session before interpolation shape: {init_size[0]}\")\n\n session['bad_sample'] = session.apply(lambda row: 1 if any([row[col] < 0 for col in columns]) else 0, axis=1)\n logger.info(f\"{session['bad_sample'].sum()} erroneous observations detected in data.\")\n\n # Make non valid frame x any as Nans\n for col in columns:\n session.loc[session['bad_sample'] == 1, col] = np.nan\n\n # Inside each trial - fill with interpolate values with Pandas splines\n data_interpolated = []\n beaten_cnt = 0\n\n for group_name, trial_data in tqdm(session.groupby(by=['trialId'])):\n if 100 * (trial_data['bad_sample'].sum() / trial_data.shape[0]) >= beaten_ratio:\n logger.info(f\"Broken trial with ratio of beaten rows > {beaten_ratio}%\")\n beaten_cnt += 1\n continue\n if trial_data.shape[0] < min_length:\n print(f\"Too small trial with length < {min_length}: {trial_data.shape[0]}\")\n beaten_cnt += 1\n continue\n\n trial_data[x] = trial_data[x].interpolate(**interpolation_kwargs)\n trial_data[y] = trial_data[y].interpolate(**interpolation_kwargs)\n\n if (sum(trial_data[x].isna()) > 0) or (sum(trial_data[y].isna()) > 0):\n trial_data = trial_data.loc[~trial_data[x].isna()]\n trial_data = trial_data.loc[~trial_data[y].isna()]\n data_interpolated.append(trial_data.reset_index(drop=True))\n\n session = pd.concat(data_interpolated, axis=0)\n logger.info(f\"Session after interpolation shape: {session.shape[0]}, diff: {init_size[0] - session.shape[0]}\")\n del data_interpolated\n\n return session\n","repo_name":"IrinaArmstrong/GazeVerification","sub_path":"gaze_verification/parsers/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31681024666","text":"# -*- coding:utf-8 -*-\r\n__author__ = \"Alex Li\"\r\nimport socket\r\nclient = socket.socket()\r\n\r\n#client.connect(('192.168.16.200',9999))\r\nclient.connect(('localhost',9999))\r\n\r\nwhile True:\r\n cmd = raw_input(\">>:\").strip()\r\n if len(cmd) == 0: continue\r\n client.send(cmd.encode(\"utf-8\"))\r\n cmd_res_size = client.recv(1024) ##u接受命令结果的长度\r\n print(u\"命令结果大小:\",cmd_res_size)\r\n client.send(u\"准备好接收了,可以发了\".encode(\"utf-8\"))\r\n received_size = 0\r\n received_data = b''\r\n #接收的数据与接收的大小\r\n cmd_size = int(cmd_res_size.decode(\"utf-8\"))\r\n while received_size < int(cmd_res_size.decode(\"utf-8\")) :\r\n data = client.recv(1024)\r\n received_size += len(data)\r\n #u每次收到的有可能小于1024,所以必须用len判断\r\n #print(data.decode())\r\n received_data += data\r\n else:\r\n print(\"cmd res receive done...\",received_size)\r\n print(received_data.decode(\"utf-8\"))\r\n\r\n\r\nclient.close()\r\n\r\n","repo_name":"liulin1840/python-","sub_path":"day8/sock_server_client.py","file_name":"sock_server_client.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25470731894","text":"#!/usr/bin/env python3\n\"\"\"\nScript for saving a checkpoint of a HuggingFace model.\n\"\"\"\n\n\nimport os\n\n\n#MODEL_NAME = \"textattack/bert-base-uncased-SST-2\"\n#from transformers import BertTokenizer as Tokenizer\n#from transformers import BertForSequenceClassifications as Model\n\nMODEL_NAME = \"bert-base-uncased\"\nfrom transformers import BertTokenizer as Tokenizer\nfrom transformers import BertForMaskedLM as Model\n\n#MODEL_NAME = \"microsoft/resnet-50\"\n#from transformers import AutoFeatureExtractor as Tokenizer\n#from transformers import ResNetForImageClassification as Model\n\n\ndef save_checkpoint(model, tokenizer, output_dir):\n \"\"\"\n This will create output_dir containing:\n config.json\n pytorch_model.bin\n special_tokens_map.json\n tokenizer_config.json\n vocab.txt\n\n See:\n https://huggingface.co/transformers/v1.2.0/serialization.html#serialization-best-practices\n \"\"\"\n assert not os.path.exists(output_dir)\n model.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n\ndef main():\n model = Model.from_pretrained(MODEL_NAME)\n tokenizer = Tokenizer.from_pretrained(MODEL_NAME)\n output_dir = MODEL_NAME\n print(\"Saving checkpoint for %s\" % output_dir) \n save_checkpoint(model, tokenizer, output_dir)\n assert os.path.exists(output_dir)\n assert os.path.isdir(output_dir)\n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rreece/venv-torch","sub_path":"scripts/save_checkpoint.py","file_name":"save_checkpoint.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23549587760","text":"\"\"\"Reverse Polish Notation\n\n**Reverse Polish notation**, also referred to as **Polish postfix notation** is a way of laying out operators and operands. \n\nWhen making mathematical expressions, we typically put arithmetic operators (like `+`, `-`, `*`, and `/`) *between* operands. For example: `5 + 7 - 3 * 8`\n\nHowever, in Reverse Polish Notation, the operators come *after* the operands. For example: `3 1 + 4 *`\n\nThe above expression would be evaluated as `(3 + 1) * 4 = 16`\n\nThe goal of this exercise is to create a function that does the following:\n* Given a *postfix* expression as input, evaluate and return the correct final answer. \n\n**Note**: In Python 3, the division operator `/` is used to perform float division. So for this problem, you should use `int()` after every division to convert the answer to an integer.\n\"\"\"\n\n\ndef evaluate_post_fix(input_list):\n stack = Stack()\n j = 0\n i = 0\n n = len(input_list)\n while i < n:\n if input_list[i] == \"+\":\n x = stack.pop()\n y = stack.pop()\n z = int(x) + int(y)\n stack.push(z)\n elif input_list[i] == \"/\":\n x = stack.pop()\n y = stack.pop()\n z = int(x) / int(y)\n stack.push(z)\n elif input_list[i] == \"*\":\n x = stack.pop()\n y = stack.pop()\n z = int(x) * int(y)\n stack.push(z)\n else:\n stack.push(input_list[i])\n i += 1\n return stack.pop()","repo_name":"khezam/algos_ds","sub_path":"review/probsUdacity/stack_postfix.py","file_name":"stack_postfix.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34760706079","text":"#!/usr/bin/env python\n# coding: utf-8\n# # [setup](https://www.kaggle.com/yujiariyasu/plot-3positive-classes )\n#!pip install pandas\n#!pip install pydicom\n#!pip install tqdm\n#!pip install skimage\n#!pip install scikit-image\n#!pip install cv2\n#!pip install opencv\n#!pip install opencv-python\n#!pip install GDCM\n#!pip install pylibjpeg\n#!pip install numpy --upgrade\n#!pip install pylibjpeg-libjpeg\n#!pip install bokeh\n#!pip install sklearn\n#!doskey /HISTORY\nimport numpy as np\nimport pandas as pd\nimport os\nimport pydicom\nfrom glob import glob\nfrom tqdm.notebook import tqdm\nfrom pydicom.pixel_data_handlers.util import apply_voi_lut\nimport matplotlib.pyplot as plt\nfrom skimage import exposure\nimport cv2\nimport warnings\nwarnings.filterwarnings('ignore')\ndataset_dir = 'F:/siim-covid19-detection/'\ndef dicom2array(path, voi_lut=True, fix_monochrome=True):\n dicom = pydicom.read_file(path)\n if voi_lut: data = apply_voi_lut(dicom.pixel_array, dicom)\n else: data = dicom.pixel_array\n if fix_monochrome and dicom.PhotometricInterpretation == \"MONOCHROME1\": data = np.amax(data) - data\n data = data - np.min(data)\n data = data / np.max(data)\n data = (data * 255).astype(np.uint8)\n return data\ndef plot_img(img, size=(7, 7), is_rgb=True, title=\"\", cmap='gray'):\n plt.figure(figsize=size)\n plt.imshow(img, cmap=cmap)\n plt.suptitle(title)\n plt.show()\ndef plot_imgs(imgs, cols=4, size=7, is_rgb=True, title=\"\", cmap='gray', img_size=(500,500)):\n rows = len(imgs)//cols + 1\n fig = plt.figure(figsize=(cols*size, rows*size))\n for i, img in enumerate(imgs):\n if img_size is not None:\n img = cv2.resize(img, img_size)\n fig.add_subplot(rows, cols, i+1)\n plt.imshow(img, cmap=cmap)\n plt.suptitle(title)\n plt.show()\ndicom_paths = glob(f'{dataset_dir}/train/*/*/*.dcm')\nlen(dicom_paths)\ndicom_paths[:4]\n#dicom2array(dicom_paths)\nimgs = [dicom2array(path) for path in dicom_paths[:4]]\nplot_imgs(imgs)\nimgs = [exposure.equalize_hist(img) for img in imgs]\nplot_imgs(imgs)\n\nfrom bokeh.plotting import figure as bokeh_figure\nfrom bokeh.io import output_notebook, show, output_file\nfrom bokeh.models import ColumnDataSource, HoverTool, Panel\nfrom bokeh.models.widgets import Tabs\nimport pandas as pd\nfrom PIL import Image\nfrom sklearn import preprocessing\nimport random\nfrom random import randint\n\ntrain = pd.read_csv(f'{dataset_dir}/train_image_level.csv')\nprint(train)\ntrain_study = pd.read_csv(f'{dataset_dir}/train_study_level.csv')\nprint(train_study)\n\ntrain_study['StudyInstanceUID'] = train_study['id'].apply(lambda x: x.replace('_study', ''))\ndel train_study['id']\ntrain = train.merge(train_study, on='StudyInstanceUID')\ntrain.head()\n\n\ngroup_col = 'StudyInstanceUID'\ndf=pd.DataFrame(train.groupby(group_col)['id'].count())\ndf.columns = [f'{group_col}_count']\ntrain=train.merge(df.reset_index(), on=group_col)\none_study_multi_image_df = train[train[f'{group_col}_count'] > 1]\nprint(len(one_study_multi_image_df))\n#https://www.kaggle.com/c/siim-covid19-detection/discussion/239980\ntrain = train[train[f'{group_col}_count'] == 1] # delete 'StudyInstanceUID_count > 1' data\n\ndef bar_plot(train_df, variable):\n var = train_df[variable]\n varValue = var.value_counts()\n plt.figure(figsize = (9,3))\n plt.bar(varValue.index, varValue)\n plt.xticks(varValue.index, varValue.index.values)\n plt.ylabel(\"Frequency\")\n plt.title(variable)\n plt.show()\n print(\"{}: \\n {}\".format(variable,varValue))\n\ntrain['target'] = 'Negative for Pneumonia'\ntrain.loc[train['Typical Appearance']==1, 'target'] = 'Typical Appearance'\ntrain.loc[train['Indeterminate Appearance']==1, 'target'] = 'Indeterminate Appearance'\ntrain.loc[train['Atypical Appearance']==1, 'target'] = 'Atypical Appearance'\nbar_plot(train, 'target')\n\ntrain.boxes.values[0] # x_min, y_min, width, height\ntrain.label.values[0] # x_min, y_min, x_max, y_max\ntrain = train[~train.boxes.isnull()]\nclass_names = ['Typical Appearance', 'Indeterminate Appearance', 'Atypical Appearance'] # we have 3 positive classes\nunique_classes = np.unique(train[class_names].values, axis=0)\nunique_classes # no multi label\n\nimgs = []\nlabel2color = {\n '[1, 0, 0]': [255,0,0], # Typical Appearance\n '[0, 1, 0]': [0,255,0], # Indeterminate Appearance\n '[0, 0, 1]': [0,0,255], # Atypical Appearance\n}\nprint('Typical Appearance: red')\nprint('Indeterminate Appearance: green')\nprint('Atypical Appearance: blue')\nthickness = 3\nscale = 5\nfor _, row in train[train['Negative for Pneumonia']==0].iloc[:8].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n img = cv2.resize(img, None, fx=1/scale, fy=1/scale)\n img = np.stack([img, img, img], axis=-1)\n claz = row[class_names].values\n color = label2color[str(claz.tolist())]\n bboxes = []\n bbox = []\n for i, l in enumerate(row['label'].split(' ')):\n if (i % 6 == 0) | (i % 6 == 1):\n continue\n bbox.append(float(l)/scale)\n if i % 6 == 5:\n bboxes.append(bbox)\n bbox = []\n for box in bboxes:\n img = cv2.rectangle(\n img,\n (int(box[0]), int(box[1])),\n (int(box[2]), int(box[3])),\n color, thickness\n )\n img = cv2.resize(img, (500,500))\n imgs.append(img)\n\n\n#plot_imgs(imgs, cmap=None)\nplot_img(img)\n\ntrain\nimgs = []\nfor _, row in train[train['Negative for Pneumonia']==0].iloc[:8].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n #img = img.pixel_array\n img.shape\n img = cv2.resize(img, (500,500))\n imgs.append(img)\nlen(imgs)\nN4P=imgs\nimgs = []\nfor _, row in train[train['Typical Appearance']==0].iloc[:8].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n #img = img.pixel_array\n img.shape\n img = cv2.resize(img, (500,500))\n imgs.append(img)\nlen(imgs)\nTA=imgs\nimgs = []\nfor _, row in train[train['Indeterminate Appearance']==0].iloc[:8].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n #img = img.pixel_array\n img.shape\n img = cv2.resize(img, (500,500))\n imgs.append(img)\nlen(imgs)\nIA=imgs\nimgs = []\nfor _, row in train[train['Atypical Appearance']==0].iloc[:8].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n #img = img.pixel_array\n img.shape\n img = cv2.resize(img, (500,500))\n imgs.append(img)\nlen(imgs)\nAA=imgs\n\nprint(len(AA),len(IA),len(TA),len(N4P),train.shape)\n#plot_img(TA[1])\n\n#https://github.com/iterative/cml_tensorboard_case/blob/master/train.py\nimport tensorflow as tf\nimport datetime\n#mnist = tf.keras.datasets.mnist\n#(x_train, y_train),(x_test, y_test) = mnist.load_data()\n#x_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel=tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(500, 500)),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(250000, activation='softmax')\n ])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\nimg=TA[1]\n#img.flatten()\nimg=np.reshape(img,(-1,250000))\nmodel.fit(x=img,y=img,epochs=5,validation_data=(img,img),callbacks=[tensorboard_callback])\n\nimgs = []\nthickness = 3\nscale = 5\n\nfor _, row in train[train['Typical Appearance'] == 1].iloc[:16].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n img = cv2.resize(img, None, fx=1/scale, fy=1/scale)\n\n claz = row[class_names].values\n color = label2color[str(claz.tolist())]\n\n bboxes = []\n bbox = []\n for i, l in enumerate(row['label'].split(' ')):\n if (i % 6 == 0) | (i % 6 == 1):\n continue\n bbox.append(float(l)/scale)\n if i % 6 == 5:\n bboxes.append(bbox)\n bbox = []\n\n for box in bboxes:\n img = cv2.rectangle(\n img,\n (int(box[0]), int(box[1])),\n (int(box[2]), int(box[3])),\n color, thickness\n )\n img = cv2.resize(img, (500,500))\n imgs.append(img)\n\nplot_imgs(imgs, cmap=None)\n\n\n# In[20]:\n\n\nnp.sum(train['Indeterminate Appearance'].iloc[:16]==1)\n#np.sum(train['Atypical Appearance'].iloc[:16]==1)\n#np.sum(train['Typical Appearance'].iloc[:16]==1)\n\n\n# # Indeterminate Appearance only\n\n# In[21]:\n\n\nimgs = []\nthickness = 3\nscale = 5\n\nfor _, row in train[train['Indeterminate Appearance'] == 1].iloc[:2186].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n img = cv2.resize(img, None, fx=1/scale, fy=1/scale)\n img = np.stack([img, img, img], axis=-1)\n\n claz = row[class_names].values\n color = label2color[str(claz.tolist())]\n\n bboxes = []\n bbox = []\n for i, l in enumerate(row['label'].split(' ')):\n if (i % 6 == 0) | (i % 6 == 1):\n continue\n bbox.append(float(l)/scale)\n if i % 6 == 5:\n bboxes.append(bbox)\n bbox = []\n\n for box in bboxes:\n img = cv2.rectangle(\n img,\n (int(box[0]), int(box[1])),\n (int(box[2]), int(box[3])),\n color, thickness\n )\n img = cv2.resize(img, (500,500))\n imgs.append(img)\n\nplot_imgs(imgs, cmap=None)\n\n\n# # Atypical Appearance only\n\n# In[23]:\n\n\nimgs = []\nthickness = 3\nscale = 5\n\nfor _, row in train[train['Atypical Appearance'] == 1].iloc[:16].iterrows():\n study_id = row['StudyInstanceUID']\n img_path = glob(f'{dataset_dir}/train/{study_id}/*/*')[0]\n img = dicom2array(path=img_path)\n img = cv2.resize(img, None, fx=1/scale, fy=1/scale)\n img = np.stack([img, img, img], axis=-1)\n\n claz = row[class_names].values\n color = label2color[str(claz.tolist())]\n\n bboxes = []\n bbox = []\n\n for i, l in enumerate(row['label'].split(' ')):\n if (i % 6 == 0) | (i % 6 == 1):\n continue\n bbox.append(float(l)/scale)\n if i % 6 == 5:\n bboxes.append(bbox)\n bbox = []\n\n for box in bboxes:\n img = cv2.rectangle(\n img,\n (int(box[0]), int(box[1])),\n (int(box[2]), int(box[3])),\n color, thickness\n )\n img = cv2.resize(img, (500,500))\n imgs.append(img)\n\nplot_imgs(imgs, cmap=None)\n\n\n# In[24]:\n\n\nsub = pd.read_csv(dataset_dir+'/sample_submission.csv')\nsub.loc[sub['id'].str.endswith('study'), 'PredictionString'] = 'negative 1 0 0 1 1 atypical 1 0 0 1 1 typical 1 0 0 1 1 indeterminate 1 0 0 1 1'\nsub.to_csv('submission.csv', index=False)\nsub\n\n\n# In[38]:\n\n\nnp.sum(train['Atypical Appearance']==1)\n\n\n# In[18]:\n\n\n\n\n# In[6]:\n\n\n# https://codelabs.developers.google.com/tflite-computer-vision-train-model?continue=https%3A%2F%2Fdevelopers.google.com%2Flearn%2Fpathways%2Fgoing-further-image-classification%3Futm_source%3Dgoogle-io%26utm_medium%3Dorganic%26utm_campaign%3Dio21-learninglab%23codelab-https%3A%2F%2Fcodelabs.developers.google.com%2Ftflite-computer-vision-train-model#4\n#Imports and check that we are using TF2.x\nimport numpy as np\nimport os\nimport tensorflow as tf\nprint(tf.__version__)\nfrom tflite_model_maker import configs\nfrom tflite_model_maker import ExportFormat\nfrom tflite_model_maker import model_spec\nfrom tflite_model_maker import image_classifier\nfrom tflite_model_maker.image_classifier import DataLoader\nassert tf.__version__.startswith('2')\ntf.get_logger().setLevel('ERROR')\n#data_path = tf.keras.utils.get_file('flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',untar=True)\ndata_path='F:\\\\Pneumonia\\\\chest_xray\\\\chest_xray\\\\train'\ndata = DataLoader.from_folder(data_path)\ntrain_data, test_data = data.split(0.5)\nmodel = image_classifier.create(train_data)\nprint(model)\n\n\n# In[84]:\n\n\nloss, accuracy = model.evaluate(test_data)\nprint(loss,accuracy)\n\n\n# In[79]:\n\n\ndata_path='F:\\\\Pneumonia\\\\chest_xray\\\\chest_xray\\\\test'\ndata = DataLoader.from_folder(data_path)\ntrain_data, test_data = data.split(0.5)\nprint(train_data, test_data)\n\nloss, accuracy = model.evaluate(test_data)\nprint(loss,accuracy)\n\n\n# In[76]:\n\n\nloss, accuracy = model.evaluate(train_data)\nprint(loss,accuracy)\n\n\n# In[74]:\n\n\nmodel.export(export_dir='.')\n\n\n# In[75]:\n\n\nmodel.evaluate_tflite('model.tflite', test_data)\n\n\n# In[72]:\n\n\nimport pickle\nfilenaM = \"tfliteModel.pkl\"\nwith open(filenaM, 'wb') as file:\n pickle.dump(model, file)\n\nfrom drawdata import draw_scatter\ndraw_scatter()#data.hist()\nimport pandas as pd\ndata=pd.read_clipboard(sep=\",\")\ndata.columns\ndata[\"z\"].hist()\n\n# https://blog.tensorflow.org/2021/05/introducing-tensorflow-decision-forests.html\nget_ipython().system('pip install tensorflow_decision_forests')\n# Load TensorFlow Decision Forests\nimport tensorflow_decision_forests as tfdf\n# Load the training dataset using pandas\nimport pandas\ntrain_df = pandas.read_csv(\"sample.csv\")\n# Convert the pandas dataframe into a TensorFlow dataset\ntrain_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_df, label=\"class\")\nmodel = tfdf.keras.RandomForestModel()\nmodel.fit(train_ds)\ntest_df = pandas.read_csv(\"penguins_test.csv\")\n# Convert it to a TensorFlow dataset\ntest_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_df, label=\"species\")\n# Evaluate the model\nmodel.compile(metrics=[\"accuracy\"])\nprint(model.evaluate(test_ds))\n# Export the model to a TensorFlow SavedModel\nmodel.save(\"project/model\")\ntfdf.model_plotter.plot_model_in_colab(model, tree_idx=0)\n# Print all the available information about the model\nmodel.summary()\n# Get feature importance as a array\nmodel.make_inspector().variable_importances()[\"MEAN_DECREASE_IN_ACCURACY\"]\n","repo_name":"animesh/scripts","sub_path":"deRSNA.py","file_name":"deRSNA.py","file_ext":"py","file_size_in_byte":14165,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"20728377565","text":"n = 1\npar = ímpar = 0\nwhile n != 0:\n n = int(input(\"Digite um número: \"))\n if n != 0:\n if n % 2 == 0:\n par += 1\n else:\n ímpar += 1\nprint(f\"Foram digitados {par} números pares e {ímpar} números ímpares.\")\n","repo_name":"carolnogueira13/Curso-em-video-python","sub_path":"Python-aulas/aula14b.py","file_name":"aula14b.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17773816192","text":"# Create your models here.\nimport json\nimport random\nfrom collections import defaultdict\n\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass GetOrNoneManager(models.Manager):\n \"\"\"Adds get_or_none method to objects\"\"\"\n\n def get_or_none(self, **kwargs):\n try:\n return self.get(**kwargs)\n except self.model.DoesNotExist:\n return None\n\n\nclass Game(models.Model):\n room_name = models.CharField(max_length=50)\n game_status = models.CharField(max_length=50, default=\"active\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n round_started = models.BooleanField(default=False)\n is_joinable = models.BooleanField(default=True)\n\n def as_json(self):\n return dict(\n id=self.id,\n game_status=self.game_status,\n is_joinable=self.is_joinable,\n room_name=self.room_name,\n round_started=self.round_started,\n users=[u.as_json() for u in self.game_players.all()],\n messages=[\n m.as_json()\n for m in self.messages.all()\n .exclude(message_type=\"round_recap\")\n .order_by(\"created_at\")\n ],\n round_history=[\n m.as_json()\n for m in self.messages.all()\n .filter(message_type=\"round_recap\")\n .order_by(\"created_at\")\n ],\n current_round=[r.as_json() for r in self.rounds.all().filter(started=True)],\n )\n\n def update_player_status(self, player_points):\n winners = []\n for player in self.game_players.all():\n points = player_points[player.id]\n updated_points = points + player.followers\n\n # the floor is zero\n if updated_points <= 0:\n player.loser = True\n loser = Winner.objects.get(winner_id=player.user.id)\n loser.followers = loser.followers + 10\n loser.save()\n Message.objects.create(\n message=\"{} lost\".format(player.user.username),\n message_type=\"round_recap\",\n username=player.user.username,\n game=self,\n )\n else:\n winners.append(player)\n player.followers = updated_points\n player.save()\n return winners\n\n def can_start_game(self):\n \"\"\"See if the round can be started. Requires at least 3 players and\n that all players in the room have started\"\"\"\n\n if self.game_players.all().count() < 3:\n self.round_started = False\n self.save()\n return False\n\n for player in self.game_players.all():\n if player.started is False:\n return False\n self.round_started = True\n self.is_joinable = False # game is not joinable if the round started\n self.save()\n return True\n\n def check_joinability(self):\n if self.game_players.all().count() == 6:\n self.is_joinable = False\n elif self.round_started is True:\n self.is_joinable = False\n else:\n self.is_joinable = True\n self.save()\n\n def set_players_as_not_having_started(self):\n for player in self.game_players.all():\n player.started = False\n player.save()\n\n\nclass GamePlayer(models.Model):\n followers = models.IntegerField(default=100)\n selfies = models.IntegerField(default=3) # equivalent to licking a lolly\n go_live = models.IntegerField(default=2) # equivalent to tattle\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n loser = models.BooleanField(default=False)\n\n user = models.ForeignKey(\n User,\n related_name=\"game_players\",\n on_delete=models.CASCADE,\n primary_key=False,\n default=\"\",\n )\n started = models.BooleanField(default=False)\n game = models.ForeignKey(\n Game, related_name=\"game_players\", on_delete=models.CASCADE\n )\n winner = models.BooleanField(default=False)\n objects = GetOrNoneManager()\n\n def as_json(self):\n return dict(\n id=self.id,\n winner=self.winner,\n followers=self.followers,\n selfies=self.selfies,\n loser=self.loser,\n go_live=self.go_live,\n username=self.user.username,\n started=self.started,\n )\n\n\nclass Message(models.Model):\n game = models.ForeignKey(Game, related_name=\"messages\", on_delete=models.CASCADE)\n username = models.CharField(max_length=200, default=None)\n message = models.CharField(max_length=200)\n created_at = models.DateTimeField(auto_now_add=True)\n message_type = models.CharField(max_length=50, default=None)\n\n def as_json(self):\n return dict(\n id=self.id,\n message=self.message,\n message_type=self.message_type,\n created_at=json.dumps(self.created_at, cls=DjangoJSONEncoder),\n username=self.username,\n )\n\n\nclass Round(models.Model):\n game = models.ForeignKey(Game, related_name=\"rounds\", on_delete=models.CASCADE)\n started = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n objects = GetOrNoneManager()\n\n def as_json(self):\n return dict(\n id=self.id,\n started=self.started,\n moves=[m.as_json() for m in self.moves.all()],\n )\n\n def no_one_moved(self):\n \"if no one moved, we want to end the game\"\n for move in self.moves.all():\n if move.action_type != \"no_move\":\n return False\n return True\n\n def everyone_moved(self):\n \"use this function to know if we need to update the clock\"\n if (\n self.moves.all().count()\n == self.game.game_players.all().filter(loser=False).count()\n ):\n return True\n return False\n\n def update_user_message(self, id, action_type, points, extra=None):\n gp = GamePlayer.objects.get(id=id)\n msg = Message.objects.filter(\n game=self.game, message_type=\"round_recap\", username=gp.user.username\n ).last()\n msg.message = self.generate_new_message(\n action_type, points, gp.user.username, extra\n )\n msg.save()\n\n\nclass Move(models.Model):\n round = models.ForeignKey(Round, related_name=\"moves\", on_delete=models.CASCADE)\n action_type = models.CharField(max_length=200, default=\"no_move\")\n player = models.ForeignKey(\n GamePlayer, related_name=\"game_player\", on_delete=models.CASCADE\n )\n victim = models.ForeignKey(\n GamePlayer,\n related_name=\"victim\",\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = GetOrNoneManager()\n\n def as_json(self):\n return dict(\n id=self.id,\n action_type=self.action_type,\n player=self.player.as_json() if self.player else None,\n victim=self.victim.as_json() if self.victim else None,\n )\n\n\nclass Winner(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n winner = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)\n followers = models.IntegerField(default=0)\n\n def as_json(self):\n return dict(followers=self.followers, username=self.winner.username)\n","repo_name":"aduranil/django-channels-react-multiplayer","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7825,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"61"} +{"seq_id":"34750083266","text":"import datetime\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom .models import Question\n\n# Create your tests here.\n\n\nclass QuestionModelTests(TestCase):\n # Django looks for test methods that begins with \"test\"\n def test_was_published_recently_with_future_question(self):\n \"\"\"\n was_published_recently() returns False for questions whose pub_date\n is in the future.\n \"\"\"\n future_time = timezone.now() + datetime.timedelta(days=7)\n future_question = Question(pub_date=future_time)\n self.assertIs(future_question.was_published_recently(), False)\n\n # (venv) ➜ projects git:(main) ✗ ./manage.py test polls\n # Creating test database for alias 'default'...\n # System check identified no issues (0 silenced).\n # F\n # ======================================================================\n # FAIL: test_was_published_recently_with_future_question (polls.tests.QuestionModelTests)\n # was_published_recently() returns False for questions whose pub_date\n # ----------------------------------------------------------------------\n # Traceback (most recent call last):\n # File \"/Users/hayounlee/Projects/learning/django-for-everybody/projects/polls/tests.py\", line 19, in test_was_published_recently_with_future_question\n # self.assertIs(future_question.was_published_recently(), False)\n # AssertionError: True is not False\n\n # ----------------------------------------------------------------------\n # Ran 1 test in 0.001s\n\n # FAILED (failures=1)\n # Destroying test database for alias 'default'...\n\n def test_was_published_recently_with_old_question(self):\n \"\"\"\n was_published_recently() returns False for questions whose pub_date\n is older than 1 day.\n \"\"\"\n old_time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n old_question = Question(pub_date=old_time)\n self.assertIs(old_question.was_published_recently(), False)\n\n def test_was_published_recently_with_recent_question(self):\n \"\"\"\n was_published_recently() returns True for questions whose pub_date\n is within the last day.\n \"\"\"\n recent_time = timezone.now() - datetime.timedelta(\n hours=23, minutes=59, seconds=59\n )\n recent_question = Question(pub_date=recent_time)\n self.assertIs(recent_question.was_published_recently(), True)\n","repo_name":"sidhlee/django-for-everybody","sub_path":"projects/polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16181537813","text":"from PySide2 import QtCore\nfrom PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout, QGroupBox, QPlainTextEdit, QHBoxLayout, QCheckBox, \\\n QRadioButton, QSpinBox\nimport sys\n\n\nclass UIMyEditorDialog(QDialog):\n \"\"\"\n 总结要点:\n pyside2还无法像pyqt一样使用Slot(, name=\"on_objName_signal\")来重载slot, 让2个slot同时执行\n \"\"\"\n def __init__(self, parent=None):\n super().__init__(parent)\n self.__setup_ui()\n QtCore.QMetaObject.connectSlotsByName(self) # 这行代码写在父类还是子类对结果没有影响,那么写在父类里可以让逻辑部分更清晰\n\n def __setup_ui(self):\n self.v_layout = QVBoxLayout()\n self.ft_style_group = QGroupBox()\n self.color_group = QGroupBox()\n self.editor = QPlainTextEdit()\n self.spinBox = QSpinBox(self)\n self.spinBox.setObjectName(\"spinBox\")\n\n self.v_layout.addWidget(self.editor)\n self.v_layout.addWidget(self.ft_style_group)\n self.v_layout.addWidget(self.color_group)\n self.v_layout.addWidget(self.spinBox)\n self.setLayout(self.v_layout)\n\n self.ft_style_layout = QHBoxLayout()\n self.italicCheckbox = QCheckBox(\"斜体\", self)\n # 这句话真的不能少!!靠变���名字达不到效果,不妨尝试用一个注解来实现,这样代码更简洁+清晰\n # 这个代码设置了这个italicCheckbox的名字,通过和QtCore.QMetaObject.connectSlotsByName的结合达到自动连接槽函数的目的。\n self.italicCheckbox.setObjectName(\"italicCheckbox\")\n self.bold_checkbox = QCheckBox(\"粗体\",self)\n self.underline_checkbox = QCheckBox(\"下划线\",self)\n self.ft_style_layout.addWidget(self.italicCheckbox)\n self.ft_style_layout.addWidget(self.bold_checkbox)\n self.ft_style_layout.addWidget(self.underline_checkbox)\n self.ft_style_group.setLayout(self.ft_style_layout)\n\n self.color_layout = QHBoxLayout()\n self.red_radio = QRadioButton(\"红色\", self)\n self.green_radio = QRadioButton(\"绿色\", self)\n self.blue_radio = QRadioButton(\"蓝色\", self)\n self.color_layout.addWidget(self.red_radio)\n self.color_layout.addWidget(self.green_radio)\n self.color_layout.addWidget(self.blue_radio)\n self.color_group.setLayout(self.color_layout)\n\n\nclass MyDialog(UIMyEditorDialog):\n def __init__(self, parent):\n super().__init__(parent)\n\n @QtCore.Slot(bool)\n def on_italicCheckbox_clicked(self, is_checked):\n \"\"\"\n 这个函数也不会起作用的,Slot并不支持无参函数\n :return:\n \"\"\"\n print(\"italicCheckbox clicked \", is_checked)\n\n @QtCore.Slot(str)\n def on_spinBox_valueChanged(self, str_val:str):\n \"\"\"\n 谁卸载后面谁起作用\n :param str_val:\n :return:\n \"\"\"\n print(\"on_spinBox_valueChanged_str \", str_val)\n\n @QtCore.Slot(int)\n def on_spinBox_valueChanged(self, int_val:int):\n \"\"\"\n 这个slot会响应。因为排在了后面,说明pyside2相比pyqt还有待成熟\n :param int_val:\n :return:\n \"\"\"\n print(\"on_spinBox_valueChanged_int \", int_val)\n\n\nif __name__==\"__main__\":\n qtapp = QApplication(sys.argv)\n dialog = MyDialog(None)\n dialog.show()\n qtapp.exec_()\n","repo_name":"drunkpig/qt-doc","sub_path":"3.1-重载的slot-pyside2.py","file_name":"3.1-重载的slot-pyside2.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20956223383","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \"Open\n\n# # Contraste de hipótesis II\n\n# * Vamos a aplicar el contraste de hipótesis al análisis de datos de calidad del aire. \n# \n# * Usaremos, como hasta ahora datos, de libre acceso: \n# * [meteorológicos de AEMET](https://https://datos.comunidad.madrid/catalogo/dataset/calidad_aire_datos_meteo_historico) \n# * [de contaminación de la Comunidad de Madrid](https://https://datos.comunidad.madrid/catalogo/dataset/calidad_aire_datos_historico).\n\n# ###1. Copia del repositorio de datos\n# ---\n\n# * Descargamos el repositorio de código y datos para trabajar más cómodamente:\n\n# In[ ]:\n\n\n# * Importamos las extensiones que vamos a necesitar. \n# \n# * Para simplificar la tarea hemos empaquetado las funciones de lectura de datos en un fichero independiente (lectura_de_datos.py) \n\n# In[ ]:\n\n\nimport lectura_de_datos # lee ficheros de datos meteorológicos y de contaminación de Madrid\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport numpy as np\n\n\n# ###2. Inventario de magnitudes\n# ---\n\n# * Por conveniencia, hemos introducido en \"lectura_de_datos.py\" el inventario de estaciones de medida de la contaminación.\n# * Listamos los campos: **código | municipio | nombre**\n\n# In[ ]:\n\n\n# Simplemente escribimos el nombre de la variable para visualizar su contenido\n# -----------------------------------------------------------------------------\nlectura_de_datos.estalist_com\n\n\n# * Listamos los contaminantes registrados.\n# * **código | magnitud | unidades**\n\n# In[ ]:\n\n\n# Simplemente escribimos el nombre de la variable para visualizar su contenido\n# -----------------------------------------------------------------------------\nlectura_de_datos.maglist_com\n\n\n# * Y por último, la magnitudes meteorológicas\n# * **código | magnitud | unidades**\n\n# In[ ]:\n\n\n# Simplemente escribimos el nombre de la variable para visualizar su contenido\n# -----------------------------------------------------------------------------\nlectura_de_datos.maglist_meteo\n\n\n# ###3. Carga de datos\n# ---\n\n# * Vamos a cargar datos correspondientes a dos años distintos\n# \n\n# In[ ]:\n\n\n# Ejemplo con datos de contaminación\n# ----------------------------------\nanio1 = '2019'\nanio2 = '2020'\nestacion = 28092005 # Móstoles\n\n# Contaminantes\n# ----------------------\n\n# Primer ejemplo\n# --------------\nmagnitud = 10 # PM10\n\n# Segundo ejemplo\n# --------------\n#magnitud = 8 # NO2\n\ndf1, magnitud1, unidades1,estacion1 = lectura_de_datos.contaminacion(\n '../datos/contaminacion/%s.csv' % anio1,\n codigo_magnitud = magnitud,\n codigo_estacion = estacion\n ) \n\ndf2, magnitud2, unidades2,estacion2 = lectura_de_datos.contaminacion(\n '../datos/contaminacion/%s.csv' % anio2,\n codigo_magnitud = magnitud,\n codigo_estacion = estacion\n ) \n\ndf1.rename(columns={'valor':anio1}, inplace=True)\ndf2.rename(columns={'valor':anio2}, inplace=True)\n\nprint(df1.describe())\nprint(df2.describe())\n\n\n# * ¿Hay diferencias entre los valores medios de ambas series?\n# * ¿Son esas diferencias ***significativas*** o mero producto del azar?\n# * Recordemos que lo que observamos es una *realización* de entre múltiples posibilidades similares pero no idénticas.\n\n# In[ ]:\n\n\n# Diferencia en la media\n# -------------------------------------\ndiff = 100*(df2[anio2].mean()/df1[anio1].mean()-1)\n\nprint('Respecto a %s, la media del año %s ha experimentado un cambio del %.1f%%' % (anio2,anio1,diff))\n\n\n# ###4. Inspección visual\n# ---\n\n# * Echamos un vistazo a ambas series temporales para estimar visualmente si hay cambios apreciables\n\n# In[ ]:\n\n\n# Cada serie en un gráfico independiente\n# ---------------------------------------\nfig, ax = plt.subplots(nrows=2,ncols=2,figsize=(12,8))\n\n# Serie temporal año 1\n# -----------------------------------\nax[0,0].plot(df1[anio1],label=anio1)\nax[0,0].set_ylim(0,180)\n\n# Histograma año 1\n# ------------------------------------\nax[0,1].hist(df1[anio1],label=anio1,bins=30)\nax[0,1].set_xlim(0,100)\nax[0,1].set_ylim(0,2600)\nax[0,1].set_title('mediana=%.2f media=%.2f' %(df1[anio1].median(),df1[anio1].mean()))\nax[0,1].axvline(df1[anio1].mean(),color='red',lw=2,ls='--')\nax[0,1].axvline(df2[anio2].median(),color='black',lw=2,ls='--')\n\n# Serie temporal año 2\n# -----------------------------------\nax[1,0].plot(df2[anio2],label=anio2)\nax[1,0].set_ylim(0,180)\n\n# Histograma año 2\n# -------------------------------------\nax[1,1].hist(df2[anio2],label=anio2,bins=30)\nax[1,1].set_xlim(0,100)\nax[1,1].set_ylim(0,2600)\nax[1,1].set_title('mediana=%.2f media=%.2f' %(df2[anio2].median(),df2[anio2].mean()))\nax[1,1].axvline(df2[anio2].mean(),color='red',lw=2,ls='--')\nax[1,1].axvline(df2[anio2].median(),color='black',lw=2,ls='--')\n\nfor a in ax.flatten() :\n a.grid(True)\n a.legend()\n\nplt.suptitle('%s %s %s' % (estacion1,magnitud1,unidades1)) \nplt.show()\n\n\n# ###5. ¿Es la misma distribución?\n# ---\n\n# * Hay muchos tests estadísticos de uso común en contraste de hipótesis\n# * Unos son más robustos que otros.\n# * Unos funcionan mejor con muestras de determinado tamaño.\n# * El Test **U de Mann-Withney** se usa para comparar distribuciones\n\n# In[ ]:\n\n\n# Ejemplo de comparación de dos \n# muestras de una misma población\n# ---------------------------------------\nA = np.random.normal(size=1000,loc=0,scale=1)\nB = np.random.normal(size=1000,loc=0,scale=1)\nfig,ax = plt.subplots(1,2)\nax[0].hist(A,bins=50)\nax[1].hist(B,bins=50)\nfor a in ax:\n a.grid(True)\n a.set_ylim(0,70)\n a.set_xlim(-3,3)\nplt.show()\n\nst,p_valor = stats.mannwhitneyu(A,B)\nprint('p_valor=%.4f' % p_valor)\n\n\n# ###6. Test de hipótesis I\n# ---\n\n# * Vamos a suponer que se trata de la misma distribución y que las diferencias observadas en los estadísticos son debidas al azar\n# * Esa será la hipótesis de partida\n# * Usaremos el test U de Mann Whitney para determinar la probabilidad de que ambas series correspondan a la misma población\n# * Compararemos el p-valor resultante con el nivel de significación que atribuimos al test, por ejemplo 5%\n# * Decidiremos si aceptamos o rechazamos la hipótesis nula\n\n# In[ ]:\n\n\n# Aplicamos el test U de Mann-Whitney a nuestras series\n# ------------------------------------------------------\nst,p_valor = stats.mannwhitneyu(df1[anio1],df2[anio2])\nprint('p_valor=%e' % p_valor)\n\n\n# In[ ]:\n\n\n# Nivel de significación\n# ----------------------\nalfa = 0.05\n\n# Resultado\n# ----------\nif p_valor < alfa :\n print('Rechazamos la hipótesis de partida (hay suficiente evidencia en contra)')\nelse :\n print('Nos quedamos con la hipótesis de partida (no hay suficiente evidencia en contra)')\n\n\n# ###7. Otro ejemplo\n# ---\n\n# In[ ]:\n\n\n# Ejemplo con datos meteorológicos\n# ----------------------------------\nanio1 = '2020'\nanio2 = '2022'\nestacion = 28092005 # Móstoles\n\n# Vabiables meteorológicas\n# -------------------------\n\n# Primer ejemplo\n# --------------\nmagnitud = 83 # Temperatura\n\n# Segundo ejemplo\n# --------------\nmagnitud = 87 # Presión atmosférica\n\ndf1, magnitud1, unidades1,estacion1 = lectura_de_datos.meteo(\n '../datos/meteo/%s.csv' % anio1,\n codigo_magnitud = magnitud,\n codigo_estacion = estacion\n ) \n\ndf2, magnitud2, unidades2,estacion2 = lectura_de_datos.meteo(\n '../datos/meteo/%s.csv' % anio2,\n codigo_magnitud = magnitud,\n codigo_estacion = estacion\n ) \n\ndf1.rename(columns={'valor':anio1}, inplace=True)\ndf2.rename(columns={'valor':anio2}, inplace=True)\n\nprint(df1.describe())\nprint(df2.describe())\n\n\n# ###7. Resumen\n# ---\n\n# * En este notebook hemos visto un caso práctico de aplicación del contraste de hipótesis para la toma de decisiones.\n# \n# * Hemos comparado dos series temporales anuales de manera enteramente subjetiva (visualmente)\n# * Hemos comprobado que las medidas de tendencia central presentan diferencias\n# * Nos preguntamos si las diferencias son **significativas** desde un punto de vista estadístico\n# * Recurrimos a un test U de Mann-Whitney para estimar si ambas series son muestras de una misma población\n# * Comparando el p-valor del test con el nivel de significación decidimos tomamos una decisión.\n","repo_name":"waveology/aire","sub_path":"local/tema8_contraste_de_hipotesis_parte_2.py","file_name":"tema8_contraste_de_hipotesis_parte_2.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23427486031","text":"import sys, string\r\n\r\ndef cookies(c, f, x):\r\n# print \"running case %f, %f, %f\" % (c, f, x)\r\n rcur = 2.0\r\n t = 0.0\r\n while 1:\r\n rnext = rcur + f\r\n dtfact = c / rcur + x / rnext\r\n dtx = x / rcur\r\n# print \"round t %f, rcur %f, w/o factory %f, with factory %f\" % (t, rcur, dtx, dtfact)\r\n if dtfact >= dtx:\r\n t += dtx\r\n break\r\n t += c / rcur\r\n rcur = rnext\r\n return t\r\n\r\ndef main(args):\r\n f = file(args[1])\r\n ncases = int(f.readline())\r\n for i in range(ncases):\r\n line = f.readline()\r\n line = line.rstrip()\r\n c, fc, x = map(float, line.split(\" \"))\r\n# print \"running case %d, %f, %f, %f\" % (i+1, c, f, x)\r\n ans = cookies(c, fc, x)\r\n sys.stdout.write(\"Case #%d: %0.7f\\n\" % (i+1, ans))\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/3306.py","file_name":"3306.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26419532561","text":"#Rule 30 - Correctness of MAP_URL - URL should point to associated building/location.\ndef rule_map_url():\n\timport re\n\timport os\n\timport sys\n\timport json\n\timport openpyxl\n\timport pandas as pd\n\tfrom pandas import ExcelWriter\n\tfrom pandas import ExcelFile\n\timport validators\n\tfrom dateutil.parser import parse\n\n\tfile_name=\"Correctness_of_MAP_URL.py\"\n\tconfigFile = 'C:/Configuration.xlsx'\n\trule=file_name[:file_name.find('.py')]\n\tfile_directory= 'C:/uploads'\n\t\n\tconfig_file=configFile\n\ttarget= 'C:/Users/105666/projects/pythonProject/angular-python-flask-demo/DataFiles_Rules_Report.xlsx'\n\n\tall_files=os.listdir(file_directory)\n\tfiles=[]\n\n\tconfig=pd.read_excel(config_file)\n\tnewdf=config[config['RULE']==rule]\n\tto_check=''\n\tfor index,row in newdf.iterrows():\n\t\tto_check=row['TO_CHECK']\n\tto_check=json.loads(to_check)\n\tfiles_to_apply=to_check['files_to_apply']\n\tcolumns_to_apply=to_check['columns_to_apply']\n\n\tif(to_check['files_to_apply']=='ALL'):\n\t\tfiles = all_files\n\telse:\n\t\tfor f in files_to_apply:\n\t\t\tfor file in all_files:\n\t\t\t\tif(file.startswith(f)):\n\t\t\t\t\tfiles.append(file)\n\n\tdata=[]\n\n\tfor file in files:\n\t\tdf = pd.read_excel(file_directory+'/'+file)\n\t\tdf.index = range(2,df.shape[0]+2)\n\n\t\tfor index, row in df.iterrows():\n\t\t\tfor column_name in columns_to_apply:\n\t\t\t\tcolumn_value=row[column_name]\n\t\t\t\tif(type(column_value)!=float):\n\t\t\t\t\tif(validators.url(str(column_value))!=True):\n\t\t\t\t\t\tentry=[index,file,column_value+' is not have a valid MAP_URL']\n\t\t\t\t\t\tprint('The row '+str(index)+' in the file '+file+' does not a valid Map url in '+column_name+' column')\n\t\t\t\t\t\tdata.append(entry)\n\n\tdf1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])\n\twith ExcelWriter(target,engine='openpyxl',mode='a') as writer:\n\t\tdf1.to_excel(writer,sheet_name=rule,index=False)","repo_name":"sharad-saurav/thoughtfocus","sub_path":"Correctness_of_MAP_URL.py","file_name":"Correctness_of_MAP_URL.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3594871798","text":"import sqlite3 #import sqlite3\nconn = sqlite3.connect(r\"E:\\Ronnapoom_python\\example.db\")\nc = conn.cursor() #create a cursor object \n'''c.execute (CREATE TABLE users(id integer PRIMARY KEY AUTOINCREMENT,\n fnme varchar(30) NOT NULL,\n IName varchar(30) NOT NULL,\n email varchar(100) NOT NULL)'''\nc.execute('''INSERT INTO users (id,fnme,IName,email) VALUES (NULL,\"A\",\"A\",\"A\")''')\nc.execute('''INSERT INTO users VALUES (NULL,\"B\",\"B\",\"B\")''') \nconn.commit() #save (commit) the change\nconn.close() #close the connecton when done \n\n","repo_name":"ronnapoom/python","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3616125483","text":"# References:\r\n# https://www.kite.com/python/answers/how-to-convert-a-list-of-strings-to-ints-in-python\r\n# https://www.geeksforgeeks.org/python-remove-empty-strings-from-list-of-strings/\r\n# https://www.kite.com/python/answers/how-to-rotate-axis-labels-in-matplotlib-in-python\r\n\r\nimport csv\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\n\r\ncsvfile = 'COVID19.csv'\r\nwith open(csvfile,'r') as myfile_obj:\r\n myfile_reader = csv.reader(myfile_obj)\r\n header = next(myfile_reader)\r\n StateIndex = header.index('Province_State')\r\n ConfirmedIndex = header.index('Confirmed')\r\n DeathsIndex = header.index('Deaths')\r\n RecoveredIndex = header.index('Recovered')\r\n ActiveIndex = header.index('Active')\r\n FatalityIndex = header.index('Case_Fatality_Ratio')\r\n State = []\r\n State2 = []\r\n Confirmed = []\r\n Deaths = []\r\n Recovered = []\r\n Active = []\r\n Fatality = []\r\n for row in myfile_reader:\r\n State.append(row[StateIndex])\r\n Confirmed.append(row[ConfirmedIndex])\r\n Deaths.append(row[DeathsIndex])\r\n Recovered.append(row[RecoveredIndex])\r\n Active.append(row[ActiveIndex])\r\n Fatality.append(row[FatalityIndex])\r\n State2.append([row[StateIndex],row[ConfirmedIndex],row[DeathsIndex],row[RecoveredIndex],row[ActiveIndex],row[FatalityIndex]])\r\n Recovered = [0 if i== '' else i for i in Recovered]\r\n Active = [0 if i== '' else i for i in Active]\r\n while(\"\" in Confirmed):\r\n Confirmed.remove(\"\")\r\n while(\"\" in Deaths):\r\n Deaths.remove(\"\")\r\n Fatality = [0 if i== '' else i for i in Fatality]\r\n # Converting string to ints and averaging\r\n Confirmed_map = map(float,Confirmed)\r\n Confirmedlist = list(Confirmed_map)\r\n Confirmedavg = round(sum(Confirmedlist)/len(Confirmedlist),3)\r\n Death_map = map(float,Deaths)\r\n Deathlist = list(Death_map)\r\n Deathavg = round(sum(Deathlist)/len(Deathlist),3)\r\n Recovered_map = map(float,Recovered)\r\n Recoveredlist = list(Recovered_map)\r\n Recoveredavg = round(sum(Recoveredlist)/len(Recoveredlist),3)\r\n Active_map = map(float,Active)\r\n Activelist = list(Active_map)\r\n Activeavg = round(sum(Activelist)/len(Activelist),3)\r\n Fatality_map = map(float,Fatality)\r\n Fatalitylist = list(Fatality_map)\r\n Fatalityavg = round(sum(Fatalitylist)/len(Fatalitylist),3)\r\n # Graphing the different types of plots based of user input\r\n def ConfirmedListPlot():\r\n plt.xticks(fontsize = 10, rotation = 90)\r\n plt.autoscale()\r\n plt.plot(State,Confirmedlist,color = 'red')\r\n plt.title('Confirmed Cases of Covid-19 in the US')\r\n plt.xlabel('State')\r\n plt.ylabel('Confirmed Cases')\r\n plt.grid(True)\r\n plt.show()\r\n def DeathListPlot():\r\n plt.xticks(fontsize = 10, rotation = 90)\r\n plt.autoscale()\r\n plt.plot(State,Deathlist,color = 'red')\r\n plt.title('Number of Deaths due to Covid-19 in the US')\r\n plt.xlabel('State')\r\n plt.ylabel('Deaths')\r\n plt.grid(True)\r\n plt.show()\r\n def RecoverdListPlot():\r\n plt.xticks(fontsize = 10, rotation = 90)\r\n plt.autoscale()\r\n plt.plot(State,Recoveredlist,color = 'red')\r\n plt.title('Number of Recovered People of Covid-19 in the US')\r\n plt.xlabel('State')\r\n plt.ylabel('Recovered People')\r\n plt.grid(True)\r\n plt.show()\r\n def ActiveListPlot():\r\n plt.xticks(fontsize = 10, rotation = 90)\r\n plt.autoscale()\r\n plt.plot(State,Activelist,color = 'red')\r\n plt.title('Number of Active cases of Covid-19 in the US')\r\n plt.xlabel('State')\r\n plt.ylabel('Active Cases')\r\n plt.grid(True)\r\n plt.show()\r\n def FatalityListPlot():\r\n plt.xticks(fontsize = 10, rotation = 90)\r\n plt.autoscale()\r\n plt.plot(State,Fatalitylist,color = 'red')\r\n plt.title('Case Fatality Ratio of Covid-19 in the US')\r\n plt.xlabel('State')\r\n plt.ylabel('Case Fatality Ratio')\r\n plt.grid(True)\r\n plt.show()\r\n loop = True\r\n while loop:\r\n try:\r\n Selectioninput = int(input(\"[0]-Quit [1]-Choosing a State [2]-Print Table [3]-Graphs [4]-Stats: \"))\r\n if(Selectioninput == 1):\r\n Stateinput = input(\"Type a state in the United States: \")\r\n StateinputIndex = State.index(Stateinput)\r\n if ((Stateinput in State)):\r\n menuinput = int(input(\"Please select one of options [0]-Quit [1]-Confirmed [2]-Deaths [3]-Recovered [4]-Active [5]-Fatality: \"))\r\n if (menuinput == 1):\r\n print(f'The number of confirmed cases in {Stateinput} is {Confirmed[StateinputIndex]}')\r\n elif (menuinput == 2):\r\n print(f'The number of deaths in {Stateinput} is {Deaths[StateinputIndex]}')\r\n elif (menuinput == 3):\r\n print(f'The number of people who recovered in {Stateinput} is {Recovered[StateinputIndex]}')\r\n elif (menuinput == 4):\r\n print(f'The number of active cases in {Stateinput} is {Active[StateinputIndex]}')\r\n elif (menuinput == 5):\r\n print(f'The number of fatality cases in {Stateinput} is {Fatality[StateinputIndex]}')\r\n elif (menuinput == 0):\r\n print(\"Good Bye!\")\r\n print(\" \")\r\n loop = False\r\n else:\r\n print(\"Invalid Entry! Try Again!\")\r\n print(\" \")\r\n loop = True\r\n elif (Selectioninput == 2):\r\n print(\" State Confirmed Deaths Recovered Active Fatality \\n\")\r\n for col in State2:\r\n print(\"%30s\" %col[0],\" %30s \" %col[1],\" %30s \" %col[2],\" %30s \" %col[3],\" %30s \" %col[4],\" %30s \" %col[5])\r\n elif(Selectioninput == 3):\r\n loop1 = True\r\n while loop1:\r\n graphmenu = int(input(\"Which Graph [0]-Quit [1]-Confirmed [2]-Deaths [3]-Recovered [4]-Active [5]-Fatality:\"))\r\n if(graphmenu == 1):\r\n ConfirmedListPlot()\r\n loop1 = True\r\n elif(graphmenu == 2):\r\n DeathListPlot()\r\n loop1 = True\r\n elif(graphmenu == 3):\r\n RecoverdListPlot()\r\n loop1 = True\r\n elif(graphmenu == 4):\r\n ActiveListPlot()\r\n loop1 = True\r\n elif(graphmenu == 5):\r\n FatalityListPlot()\r\n loop1 = True\r\n elif(graphmenu == 0):\r\n loop1 = False\r\n else:\r\n print(\"Invalid Entry! Try Again!\")\r\n loop = True\r\n elif(Selectioninput == 4):\r\n print(f'The average number of confirmed cases in the United States is {Confirmedavg} cases')\r\n print(f'The average number of deaths in the United States is {Deathavg} cases')\r\n print(f'The average number of recovered cases in the United States is {Recoveredavg} cases')\r\n print(f'The average number of active cases in the United States is {Activeavg} cases')\r\n print(f'The average number of case fatality ratio in the United States is {Fatalityavg} cases')\r\n loop = True\r\n elif(Selectioninput == 0):\r\n print(\"Good Bye!\")\r\n loop = False\r\n except:\r\n print(\"Invalid Entry!\")\r\n","repo_name":"IGopherWins/Data-Analytics","sub_path":"FinalProject.py","file_name":"FinalProject.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17950484503","text":"# -*- coding: utf-8 -*-\n'''\nEsta entidade representa os recursos que podem ser utilizados\n'''\n\nclass Resources():\n\n def __init__(self):\n self.auxDic = {0:'printer', 1:'scanner', 2:\"modem\", 3:'disk'}\n self.processesWithResources = []\n self.busyResource = {'printer':[], 'scanner':[], 'modem':[], 'disk':[]}\n self.availableResource = {} # algo nesse formato {'recurso':quantidade}\n self.listOfResourcesToCreate = ['printer', 'printer', 'scanner', 'modem', 'disk', 'disk']\n for item in self.listOfResourcesToCreate:\n if item in self.availableResource:\n self.availableResource[item] += 1\n else:\n self.availableResource[item] = 1\n\n def requestResources(self, resourcesToAllocate, processID):\n if processID in self.processesWithResources:\n return True\n else:\n for resource in range(0,4):\n if self.availableResource[self.auxDic[resource]] > 0 and resourcesToAllocate[resource] == 1:\n self.busyResource[self.auxDic[resource]].append(processID)\n self.availableResource[self.auxDic[resource]] -= 1\n elif self.availableResource[self.auxDic[resource]] == 0:\n # desalocar os alocados e colocar na fila de espera\n for failed in range(0, resource):\n if resourcesToAllocate[failed] == 1:\n self.busyResource[self.auxDic[failed]].remove(processID)\n self.availableResource[self.auxDic[failed]] += 1\n return False\n self.processesWithResources.append(processID)\n return True\n\n def freeResources(self,processID):\n for resource in self.busyResource:\n try:\n self.busyResource[resource].remove(processID)\n self.availableResource[resource] += 1\n self.processesWithResources.remove(processID)\n except:\n pass\n\n def __str__(self):\n returnString = ''\n for item in self.availableResource:\n returnString += item+' '+str(self.availableResource[item])+'\\n'\n return returnString\n","repo_name":"JonRSP/PseudoSO-SO20192","sub_path":"src/class/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33148551418","text":"\"\"\"\nThe test manager should:\n1. Accepts Docker Image URL, Target Project URL, commands to install dependencies as input\n2. Uses the above information to create a Docker container to run and monitor the test sequences\n3. Generates the reports at the end\n\"\"\"\n\nimport argparse\nimport os\nimport signal\nimport tarfile\nfrom datetime import datetime\nfrom io import FileIO, StringIO\nfrom typing import List\n\nimport docker\nfrom docker.models.containers import Container\n\nSCRIPT_FOLDER = \"bash\"\nBASIC_DEPENDENCY_INSTALLATION_SCRIPT = \"install_basic_dependency.sh\"\nTEST_RUNNER_SCRIPT = \"test-runner.py\"\nTEST_RUNNER_DEPENDENCY_FILE = \"requirements.txt\"\nTHIS_PROJECT_URL = \"https://github.com/polohan/No-Time-To-Flake\"\nLIBFAKETIME_PROJECT_URL = \"https://github.com/polohan/libfaketime\"\nTHIS_PROJECT_FOLDER = \"tool\"\nRUNNER_SCRIPT = \"test-runner.py\"\nTARGET_PROJECT_FOLDER = \"target\"\nLIBFAKETIME_FOLDER = \"libfaketime\"\n\ndef _create_container(image_url: str) -> Container:\n \"\"\"Create the Docker container to run the test sequence in.\n\n Args:\n image_url (str): the image to run\n\n Returns:\n container: a Docker container obj\n \"\"\"\n client = docker.from_env()\n container = client.containers.run(image_url, 'bash', tty=True, detach=True)\n return container\n\ndef _get_container(container_id: str) -> Container:\n \"\"\"Get the Docker container to run the test sequence in.\n\n Args:\n container_id (str): the container id\n\n Returns:\n container: a Docker container obj\n \"\"\"\n client = docker.from_env()\n container = client.containers.get(container_id)\n return container\n\n# Source: https://stackoverflow.com/a/52716666\ndef _copy_file(container: Container, src: str, dst: str) -> None:\n \"\"\"Copy a file into container.\n\n Args:\n container (Container): the target container to copy the file into\n src (str): source path of the copied file\n dst (str): destination path of the copied file\n \"\"\"\n cwd = os.getcwd()\n file_name = os.path.basename(src)\n os.chdir(os.path.dirname(src))\n tmp_tar_name = f'{file_name}.tar'\n tmp_tar = tarfile.open(tmp_tar_name, mode='w')\n\n try:\n tmp_tar.add(file_name)\n finally:\n tmp_tar.close()\n\n data = open(tmp_tar_name, 'rb').read()\n container.put_archive(dst, data)\n os.remove(tmp_tar_name)\n os.chdir(cwd)\n\ndef _run_cmds(container: Container, commands: List[str], workdir: str = None, stream: bool = False, pipe: FileIO = None, force_stdout: bool = False) -> None:\n \"\"\"Run a command inside the container.\n\n Args:\n container (Container): the container to run the commands in\n commands (List[str]): command to be executed\n workdir (str): the working directory to run the command on\n stream (bool): whether to stream the output\n pipe (FileIO, None): a file-like object to stream the output to. Default to None\n force_stdout (bool): whether to also print to stdout when pipe is not None.\n \"\"\"\n exit_code, output = container.exec_run(commands, privileged=True, stream=stream, workdir=workdir)\n if not stream:\n if exit_code != 0:\n print(output.decode(), end=\"\")\n raise Exception(f\"exec_run exits with non-zero exit code: {exit_code}\")\n else:\n for data in output:\n try:\n decoded_data = data.decode()\n except UnicodeDecodeError:\n decoded_data = data\n if pipe:\n print(decoded_data, end=\"\", file=pipe)\n if force_stdout:\n print(decoded_data, end=\"\")\n else:\n print(decoded_data, end=\"\")\n\ndef _install_faketime(container: Container, workdir: str = '/') -> None:\n \"\"\"Install libfaketime in this container.\n If the 'make test' fail, add the CFLAG -DFORCE_MONOTONIC_FIX to\n src/Makefile.\n\n Args:\n container (Container): the container to install the libfaketime on\n workdir (str): the working directory to use when installing libfaketime\n \"\"\"\n # download and unzip library\n dependency_cmd = [\"git\", \"clone\", LIBFAKETIME_PROJECT_URL, LIBFAKETIME_FOLDER]\n _run_cmds(container, dependency_cmd, workdir)\n\n # run make test and wait to see if it hangs\n def _handler(signum, frame):\n print('Faketime hangs when runnning make test.')\n raise TimeoutError('Faketime hangs when runnning make test.')\n\n # set the timeout handler\n signal.signal(signal.SIGALRM, _handler)\n signal.alarm(60) # wait 60 sec\n\n fake_file = StringIO()\n libfaketime_path = os.path.join(workdir, LIBFAKETIME_FOLDER)\n \n try:\n _run_cmds(container, ['make', 'test'], libfaketime_path, True, fake_file)\n except TimeoutError:\n output_lines = fake_file.getvalue().splitlines()\n if \"CLOCK_MONOTONIC\" in output_lines[-1]:\n print('\"CLOCK_MONOTONIC test\" apparently hang forever.')\n print('Adding additional DFORCE_MONOTONIC_FIX CFLAG.')\n _run_cmds(container, ['sed', '-i', '84 i CFLAGS += -DFORCE_MONOTONIC_FIX', './src/Makefile'],\n libfaketime_path)\n else:\n print(output_lines)\n raise TimeoutError(\"Faketime hangs when running make test with unknown cause.\")\n finally:\n _run_cmds(container, ['make', 'clean'], libfaketime_path)\n signal.alarm(0) # cancel alarm\n\n # install libfaketime\n print(\"Performing make install.\")\n _run_cmds(container, ['make', 'install'], libfaketime_path)\n\ndef prepare_container(image_url: str, dependency_file: str, target_project_url: str, project_folder: str = '/home', container_id: str = '') -> None:\n \"\"\"Create the container for the test sequence.\n\n Args:\n image_url (str): the image to run\n dependency_file (str): the path to the dependency file\n target_project_url (str): the project to test\n project_folder (str): the folder to put the project in\n container_id (str): the id of the container to run the test in. Defaults to ''.\n \"\"\"\n if not container_id:\n # create container\n print(\"Creating container.\")\n container = _create_container(image_url)\n print(\"Container created.\")\n else:\n # get container\n print(\"Getting container.\")\n container = _get_container(container_id)\n return container\n\n # install basic dependencies\n print(\"Installing basic dependencies.\")\n file_name = BASIC_DEPENDENCY_INSTALLATION_SCRIPT\n file_path = os.path.join(SCRIPT_FOLDER, file_name)\n _copy_file(container, file_path, '/')\n dependency_cmd = [\"bash\", \"-e\", file_name, '/']\n _run_cmds(container, dependency_cmd)\n print(\"Basic dependencies installed.\")\n\n # install libfaketime\n print(\"Installing libfaketime.\")\n _install_faketime(container)\n print(\"libfaketime installed.\")\n\n # copy other necessary file\n this_project_path_in_container = os.path.join(project_folder, THIS_PROJECT_FOLDER)\n _run_cmds(container, [\"mkdir\", THIS_PROJECT_FOLDER], project_folder)\n\n file_name = TEST_RUNNER_SCRIPT\n file_path = os.path.join('./', file_name)\n _copy_file(container, file_path, this_project_path_in_container)\n\n file_name = TEST_RUNNER_DEPENDENCY_FILE\n file_path = os.path.join('./', file_name)\n _copy_file(container, file_path, this_project_path_in_container)\n _run_cmds(container, [\"pip\", \"install\", \"-r\", \"requirements.txt\"], this_project_path_in_container)\n\n # download project\n print(\"Downloading projects from GitHub.\")\n _run_cmds(container, [\"git\", \"clone\", target_project_url, TARGET_PROJECT_FOLDER], project_folder)\n print(\"Projects downloaded.\")\n\n # run dependency_file if exist\n if dependency_file and os.path.isfile(dependency_file):\n print(\"Running dependency script.\")\n dependency_file = os.path.abspath(dependency_file)\n file_name = os.path.basename(dependency_file)\n project_path = os.path.join(project_folder, TARGET_PROJECT_FOLDER)\n \n _copy_file(container, dependency_file, project_path)\n\n # run the file to install additional dependency and install the project (if necessary)\n dependency_cmd = [\"bash\", \"-e\", file_name]\n _run_cmds(container, dependency_cmd, project_path, stream=True)\n print(\"Dependency script finished with no error.\")\n \n return container\n\ndef run_test(container: Container, command: str, faketime: str = '', timezone: str = '', project_folder: str = '/home', output_file: str = '', overwrite: bool = False) -> None:\n \"\"\"Run a single test run in the container.\n\n Args:\n container (Container): the target container to run the test in\n command (str): the command to start the test for target project\n faketime (str, optional): the faketime format string. Defaults to '' if using actual time.\n switch (str, optional): the signifier in test output to reset the time. Defaults to None.\n timezone (str, optional): the TZ timezone string. Defaults to '' if using actual timezone.\n project_folder (str, optional): the folder that contains the target project and tool. Defaults to '/home'.\n output_file (str, optional): the path of the file to write the output to.\n overwrite (bool): whether to replace the old run result if old one already exist. Default to false.\n \"\"\"\n target_project_path = os.path.join(project_folder, TARGET_PROJECT_FOLDER)\n\n write_pipe = None\n if output_file:\n if os.path.exists(output_file) and not overwrite:\n print(f\"{output_file} already exists. Skipping test.\")\n return\n os.makedirs(os.path.dirname(output_file), exist_ok=True)\n write_pipe = open(output_file, 'w', encoding=\"utf-8\")\n\n _run_cmds(container, ['python3',\n os.path.join('../', THIS_PROJECT_FOLDER, RUNNER_SCRIPT),\n '-f', faketime,\n '-tz', timezone\n ] + [command], target_project_path, True, pipe=write_pipe, force_stdout=True)\n\ndef start(image_url: str, dependency_file: str, target_project_url: str, command: str, output_path: str, container_id: str = '') -> None:\n \"\"\"Start the whole test process\n\n Args:\n image_url (str): the image to run\n dependency_file (str): the path to the dependency file\n target_project_url (str): the project to test\n command (str): the command to run the project\n output_path (str): the dir to put the output files in\n container_id (str): the id of the container to run the test in. Defaults to ''.\n \"\"\"\n if target_project_url[-1] == '/':\n target_project_url = target_project_url[:-1]\n\n container = prepare_container(image_url, dependency_file, target_project_url, container_id=container_id)\n target_project_name = target_project_url.split('/')[-1]\n\n # dry run\n run_test(container, command)\n # singly run it without faking time/timezone\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, 'test-ori.out'))\n # run with faketime but don't actual fake anything\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, 'test-fake-ref.out'), faketime='+0')\n # run with faketime with speed up but don't actually speed up\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, 'test-fake-speed-up-ref.out'), faketime='+0 x1.0')\n # run with faketime with adv increment feature\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, 'test-fake-inc-ref.out'), faketime='+0 i1.0')\n\n # test with different potentially buggy timezones\n curr_year = datetime.now().year\n mock_timezones = [\n (\"\", \"UTC\", \"UTC-ref\"),\n (\"\", \"Asia/Kolkata\", None), # UTC+5:30\n (\"\", \"Australia/Eucla\", None), # UTC+8:45\n (\"\", \"Pacific/Marquesas\", None), # UTC-9:30\n (f\"@{curr_year}-07-01 00:00:00\", \"Pacific/Chatham\", \"Chatham\"), # UTC+12:45\n (f\"@{curr_year}-01-01 00:00:00\", \"Pacific/Chatham\", \"Chatham-DST\"), # UTC+13:45\n ]\n for fake_time, fake_time_zone, name in mock_timezones:\n if not name:\n name = fake_time_zone if '/' not in fake_time_zone else fake_time_zone.split('/')[-1]\n \n run_test(container, command, output_file=os.path.join(output_path, target_project_name, f'test-fake-timezone-{name}.out'), faketime=fake_time, timezone=fake_time_zone)\n\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, 'test-fake-ref.out'), faketime='+0')\n\n # speed up runs\n speed_up_factors = [2, 1000, 10000]\n for factor in speed_up_factors:\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, f'test-fake-speed-up-{factor}x.out'), faketime=f'+0 x{factor}.0')\n\n # advanced increment runs\n increment_factors = [2]\n for factor in increment_factors:\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, f'test-fake-inc-{factor}i.out'), faketime=f'+0 i{factor}.0')\n\n # switching runs\n for gap in range(1, 21):\n run_test(container, command, output_file=os.path.join(output_path, target_project_name, f'test-switch-{gap}.out'), faketime=f's {gap}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run command at different time and in different timezone.')\n parser.add_argument(\"-i\", \"--image\", type=str, help=\"the Docker image URL\", default=\"ubuntu:20.04\")\n parser.add_argument(\"-d\", \"--dependency\", type=str,\n help=\"the path to the files that contains the commands necessary to install all dependencies and the project\")\n parser.add_argument(\"-p\", \"--path\", type=str, help='the folder to put the output file in', default='./output')\n parser.add_argument(\"--id\", type=str, help=\"the Docker container ID to run in\")\n parser.add_argument(\"project\", type=str, help=\"the GitHub project URL\")\n parser.add_argument(\"command\", type=str, help=\"the command to run the test\")\n\n args = parser.parse_args()\n start(args.image, args.dependency, args.project, args.command, args.path, args.id)\n","repo_name":"polohan/No-Time-To-Flake","sub_path":"test-manager.py","file_name":"test-manager.py","file_ext":"py","file_size_in_byte":14248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38552080844","text":"import curses\nimport yaml\nimport data_manager\nimport interface\nimport sequence_manager\n\n\ndef run_game(settings, dm, gi, sm):\n # Set our starting state variable (\"return code\") to 0\n rc = 0\n\n # If the state variable ever equals -1, end the program\n while rc != -1:\n # Get the user's input\n c = gi.stdscr.getkey()\n\n # q: quit\n if c == \"q\":\n rc = gi.prompt_quit_game()\n\n if c == \"m\":\n rc = gi.show_menu()\n\n if c == \"t\":\n sm.run_sequence(\"entering_inn_conv\")\n\n\n\ndef main(stdscr):\n # Load the game's settings\n settings = yaml.load(open(\"./settings.yml\", \"r\"), Loader=yaml.FullLoader)\n\n # Initialize the game's data manager\n dm = data_manager.DataManager(settings[\"files\"])\n\n # Initialize the interface object\n gi = interface.Interface(stdscr, settings[\"display\"])\n\n # Initialize the sequence manager\n sm = sequence_manager.SequenceManager(stdscr, gi.tw, dm.stree_data)\n\n # Run the game\n run_game(settings, dm, gi, sm)\n\n\nif __name__ == \"__main__\":\n curses.wrapper(main)\n","repo_name":"biegelk/uriel","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42705029340","text":"from itertools import combinations\r\nimport sys\r\n\r\nclass Apriori:\r\n def __init__(self, inputfile,minSupport=0.5, minConfidence=0.7):\r\n self.transactions = []\r\n self.itemSet = set([])\r\n self.allLSetDict = {}\r\n self.rules = []\r\n self.readFile(inputfile)\r\n self.calcLSet(minSupport)\r\n self.calcAssociationRules(minConfidence)\r\n self.printResult()\r\n\r\n def calcSupport(self, itemlist):\r\n if type(itemlist) != set:\r\n itemlist = set(itemlist)\r\n return len(list(filter(lambda transaction: itemlist.issubset(transaction), self.transactions)))/len(self.transactions)\r\n\r\n def calcLSet(self, minSupport):\r\n curCSet = self.itemSet\r\n numOfItems = 1\r\n curLSetWithSupport = []\r\n allLSetsWithSupport = []\r\n while len(curCSet)>0:\r\n if(numOfItems>1): # curLSet has to be frozeset, otherwise it can't be instantiated as a set\r\n curCSet = set([i[0].union(j[0]) for i in curLSetWithSupport for j in curLSetWithSupport if len(i[0].union(j[0]))==numOfItems])\r\n curCSetWithSupport = list(map(lambda item: (frozenset(item), self.calcSupport(item)), curCSet))\r\n curLSetWithSupport = list(filter(lambda cset: cset[1]>=minSupport,curCSetWithSupport))\r\n allLSetsWithSupport.extend(curLSetWithSupport)\r\n numOfItems += 1\r\n \r\n for key,val in allLSetsWithSupport:\r\n self.allLSetDict[key] = val\r\n\r\n def calcAssociationRules(self, minConfidence):\r\n for key in self.allLSetDict:\r\n subsets = [frozenset(item) for i in range(1, len(key)) for item in combinations(key, i)]\r\n for subset in subsets:\r\n confidence = self.allLSetDict[key]/self.allLSetDict[subset]\r\n if confidence >= minConfidence:\r\n self.rules.append([subset,key-subset,confidence])\r\n\r\n def printResult(self):\r\n print('Transactions:')\r\n for transaction in self.transactions:\r\n print(transaction)\r\n print('Association Rules:')\r\n for rule in self.rules:\r\n print('{0}=>{1}'.format(list(rule[0]), list(rule[1])))\r\n\r\n def readFile(self, inputfile):\r\n inputFile = open(inputfile, 'r')\r\n for line in inputFile.readlines():\r\n items = set(line.strip().split(','))\r\n if len(items)>0:\r\n self.transactions.append(items)\r\n for item in items:\r\n self.itemSet.add(frozenset([item]))\r\n inputFile.close()\r\n\r\nminSupport=float(sys.argv[2])\r\nminConfidence=float(sys.argv[3])\r\nprint('Database with min support {0} and min confidence {1}'.format(minSupport,minConfidence))\r\napriori = Apriori(sys.argv[1],minSupport,minConfidence)\r\n","repo_name":"Xgreedy/homework","sub_path":"phd/cs634/apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23527779991","text":"f = open('A-large.in', 'r')\nF = open('A-large.out', 'w')\ncases = 0\n\nfor line in f.readlines():\n\tif cases==0:\n\t\ttc = line\n\t\tcases+=1\n\t\tcontinue\n\t\t\n\ts = line\n\tans = ''\n\t\n\tL1 = {'Z':'ZERO', 'W':'TWO', 'X':'SIX', 'G':'EIGTH'}\n\tL1digit = {'Z':'0', 'W':'2', 'X':'6', 'G':'8'}\n\t\n\tfor letter in L1.keys():\n\t\tlcount = s.count(letter)\n\t\tans += L1digit[letter] * lcount\n\t\tif lcount>0:\n\t\t\tfor everyLetter in L1[letter]:\n\t\t\t\ts = s.replace(everyLetter, '', lcount)\n\t\n\t## Three\n\tlcount = s.count('T')\n\tans += '3'*lcount\n\tif lcount>0:\n\t\tfor everyLetter in 'THREE':\n\t\t\ts = s.replace(everyLetter, '', lcount)\n\t\n\t## Four\n\tlcount = s.count('R')\n\tans += '4'*lcount\n\tif lcount>0:\n\t\tfor everyLetter in 'FOUR':\n\t\t\ts = s.replace(everyLetter, '', lcount)\n\t\n\t## FIVE\n\tlcount = s.count('F')\n\tans += '5'*lcount\n\tif lcount>0:\n\t\tfor everyLetter in 'FIVE':\n\t\t\ts = s.replace(everyLetter, '', lcount)\n\t\n\t## Seven\n\tlcount = s.count('V')\n\tans += '7'*lcount\n\tif lcount>0:\n\t\tfor everyLetter in 'SEVEN':\n\t\t\ts = s.replace(everyLetter, '', lcount)\n\t\n\t## NINE\n\tlcount = s.count('I')\n\tans += '9'*lcount\n\tif lcount>0:\n\t\tfor everyLetter in 'NINE':\n\t\t\ts = s.replace(everyLetter, '', lcount)\n\t\n\t## ONE\n\tans += '1'* (len(s)/3)\n\t\n\tans = ''.join(sorted(ans))\n\tF.write('Case #' + str(cases) + ': ' + ans + '\\n')\n\tcases += 1\n\nf.close()\nF.close()\n\t\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_184/1312.py","file_name":"1312.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38544516564","text":"import os, sys, logging\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport selenium.webdriver.support.expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\n\ndef main():\n driver = openChrome()\n\n input(\"Press any key to exit...\")\n driver.quit()\n\ndef openChrome(): # Start chrome browser and open WhatsApp Web\n # Get username of current active user\n user = os.environ[\"USERNAME\"]\n\n # Setup Chrome driver options\n options = webdriver.ChromeOptions()\n\n # Setup download preference and user profile to save login state\n options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": os.path.join(os.getcwd(), \"downloads\"),\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing.enabled\": True})\n options.add_argument(\"--remote-debugging-port=9222\")\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n options.add_argument(\n \"user-data-dir=C:\\\\Users\\\\\" + user + \"\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Profile 1\")\n \n # Initialize browser\n driver = webdriver.Chrome(options=options)\n\n driver.get(\"http://web.whatsapp.com\")\n \n # Check if WhatsApp Web login is successfull\n try:\n WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*/header/div[1]/div/img')))\n log.info(\"Connected to WhatsApp Web\")\n except TimeoutException or NoSuchElementException:\n log.critical(\"Unable to connect to WhatsApp Web. Exiting...\")\n driver.quit()\n sys.exit()\n\n return driver\n\ndef logger(): # Setup logging configuration\n log_file = os.path.join(os.getcwd(), \"logs.log\")\n\n logger = logging.getLogger(\"WhatsApp-Scrapper\")\n logger.setLevel(logging.DEBUG)\n\n file_handler = logging.FileHandler(log_file)\n console_handler = logging.StreamHandler()\n\n formatter = logging.Formatter(fmt=\"%(asctime)s - [%(levelname)s] - %(message)s\", datefmt=\"%d/%m/%Y %H:%M:%S\")\n file_handler.setFormatter(formatter)\n console_handler.setFormatter(formatter)\n\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n logger.propagate = False\n\n return logger\n\nif __name__ == \"__main__\":\n log = logger() # Setup logger\n main()","repo_name":"ayankit/whatsapp-scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2821588382","text":"import logging\nfrom typing import Any\n\nfrom fastapi import APIRouter, HTTPException\n\nfrom app.schemas import ShopUnitSchema\nfrom db.views import get_one_element\n\nlogger = logging.getLogger('uvicorn.error')\n\nrouter = APIRouter(\n prefix='/nodes',\n tags=['Nodes'],\n responses={\n 200: {'description': 'Success'},\n 400: {'description': 'Validation Failed'},\n 404: {'description': 'Item not found'},\n },\n)\n\n\n@router.get('/{item_id}', response_model=ShopUnitSchema)\nasync def get_nodes(item_id: str) -> Any:\n logger.info('GET NODE WITH ID = %s', item_id)\n result = get_one_element(item_id)\n if result is None:\n raise HTTPException(status_code=404, detail='Item not found')\n return result\n","repo_name":"sergey-png/yandex_backend_enroll","sub_path":"app/routes/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72710113795","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport unittest\nfrom tests.magictest import MagicTest as TestCase\n\nfrom textwrap import dedent\n\nimport clevercss\nfrom clevercss import convert\nfrom clevercss.line_iterator import LineIterator\n\nfrom clevercss.errors import *\n\ndef eigen_test():\n filename = os.path.join(os.path.dirname(__file__), 'eigentest.ccss')\n ccss = open(filename).read()\n return clevercss.convert(ccss)\n\nclass ConvertTestCase(TestCase):\n def convert(self):\n self.assertEqual(convert('''body:\n color: $color\n ''',{'color':'#eee'}),\n u'body {\\n color: #eeeeee;\\n}')\n\n def convert2(self):\n self.assertEqual(convert('''body:\n background-color: $background_color\n ''', {'background_color': 'red.darken(10)'}),\n u'body {\\n background-color: #cc0000;\\n}')\n\n def convert_rgba(self):\n self._test_attr('background-color','rgba(0, 255, 100%, 0.3)', 'rgba(0, 255, 255, 0.3)')\n\n def convert_rgba_float(self):\n self._test_attr('background-color','rgba(0, 255, 100%, .3)', 'rgba(0, 255, 255, 0.3)')\n\n def convert_float(self):\n self._test_attr('top','.3', '0.3')\n\n def _test_attr(self, attr, ccval, cssval):\n self.assertEqual(convert('body:\\n %s: %s\\n' % (attr, ccval)), 'body {\\n %s: %s;\\n}' % (attr, cssval))\n\n def test_math(self):\n self.assertEqual(convert(dedent(\"\"\"\n div:\n margin: -2px -2px\n padding: 2px + 2px\n top: 1px+1\n left: 5+5px\n right: 4px-5px\n bottom: 0 - 5px\n text-shadow: 0px -1px 8px #fff\n \"\"\")), dedent(\"\"\"\n div {\n margin: -2px -2px;\n padding: 4px;\n top: 2px;\n left: 10px;\n right: -1px;\n bottom: -5px;\n text-shadow: 0px -1px 8px #ffffff;\n }\"\"\").strip())\n\n def test_eigen(self):\n if sys.version_info >= (3, 0):\n # round() behavior changed in Python 3\n # http://docs.python.org/3/whatsnew/3.0.html#builtins\n a_hover_color = '#4c0000'\n else:\n a_hover_color = '#4d0000'\n self.assertEqual(eigen_test(),dedent(\"\"\"\n body {\n font-family: serif, sans-serif, Verdana, 'Times New Roman';\n color: #111111;\n padding-top: 4px;\n padding-right: 5px;\n padding-left: 5px;\n padding-bottom: 4px;\n background-color: #eeeeee;\n }\n\n div.foo {\n width: 220px;\n foo: foo/bar/baz/42;\n }\n\n a {\n color: #ff0000;\n }\n\n a:hover {\n color: %(a_hover_color)s;\n }\n\n a:active {\n color: #ff1a1a;\n }\n\n div.navigation {\n height: 1.2em;\n padding: 0.2em;\n foo: '1 2 3';\n }\n\n div.navigation ul {\n margin: 0;\n padding: 0;\n list-style: none;\n }\n\n div.navigation ul li {\n float: left;\n height: 1.2em;\n }\n\n div.navigation ul li a {\n display: block;\n height: 1em;\n padding: 0.1em;\n }\n \"\"\" % {'a_hover_color': a_hover_color}).strip())\n\n def test_import_line(self):\n \"\"\"\n Tests the @import url() command. assumes the code is running in the main\n directory. (i.e. python -c 'from tests import *; main()' from the same\n dir as clevercss)\n \"\"\"\n self.assertEqual(convert(dedent(\"\"\"\n @import url(tests/example.ccss)\n\n div:\n color: $arg\n \"\"\")), dedent(\"\"\"\n #test1 {\n color: blue;\n }\n\n #test2 {\n color: blue;\n }\n\n #test3 {\n color: blue;\n }\n\n div {\n color: blue;\n }\"\"\").strip())\n\n\n def test_multiline_rule(self):\n self.assertEqual(convert(dedent(\"\"\"\n ul.item1 li.item1,\n ul.item2 li.item2,\n ul.item3 li.item3:\n font-weight: bold\n \"\"\")), dedent(\"\"\"\n ul.item1 li.item1,\n ul.item2 li.item2,\n ul.item3 li.item3 {\n font-weight: bold;\n }\"\"\").strip())\n\n def backstring(self):\n self.assertEqual(convert(dedent('''\n div.round:\n background-image: `-webkit-gradient(top left, bottom right, from(#fff), to(#000))`\n ''')), dedent('''\\\n div.round {\n background-image: -webkit-gradient(top left, bottom right, from(#fff), to(#000));\n }'''))\n\n\nclass MacroTestCase(TestCase):\n def simpleMacro(self):\n ccss = dedent('''\n def simple:\n color: red\n font-size: 3px+10px\n body:\n $simple\n width:200px\n .other:\n $simple\n ''')\n css = dedent('''\\\n body {\n color: red;\n font-size: 13px;\n width: 200px;\n }\n\n .other {\n color: red;\n font-size: 13px;\n }''')\n self.assertEqual(convert(ccss), css)\n\n def test_undefined_macro(self):\n ccss = dedent('''\n body:\n $simple\n width:200px\n .other:\n $simple\n ''')\n self.assertRaises(ParserError, convert, ccss)\n\nclass LineIterTestCase(TestCase):\n def test_comments(self):\n line_iter = LineIterator(dedent(\n \"\"\"\n /* block */\n /* multiblock\n */\n\n aa, /* comment */bb:\n x:1 // comment\n\n \"\"\"))\n self.assertEqual(\"\\n\".join([s[1] for s in line_iter]),\n \"aa, bb:\\n x:1\")\n\n\ndef all_tests():\n return unittest.TestSuite(case.toSuite() for case in [ConvertTestCase, LineIterTestCase, MacroTestCase])\n\n# vim: et sw=4 sts=4\n","repo_name":"clevercss/clevercss","sub_path":"tests/ccss_to_css.py","file_name":"ccss_to_css.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"61"} +{"seq_id":"73639844995","text":"import scipy.signal\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom liveaudio import *\n\nclass freqHandler():\n\n def __init__(self,min_freq=1000,max_freq=5000):\n self.min_freq = min_freq\n self.max_freq = max_freq\n\n self.freqs = [[1800,2150],[2150,2400],[2400,2600],[2600,2850],[2850,3200],[3200,3400],\n [3400,3575],[3575,5000]]\n\n self.chars = ['C','D','E','F','G','A','B','K']\n\n def freq_to_char(self,max_freq):\n\n if max_freq < self.min_freq: return None\n\n for i, (min_f,max_f) in enumerate(self.freqs):\n if max_freq > min_f and max_freq <= max_f:\n return(chars[i])\n\n def get_max_strength_freq(self,time_data,plot=False):\n freqs,powers = scipy.signal.welch(time_data,fs = 44100,nperseg=1000)\n\n maxfreq = freqs[np.argmax(powers)]\n\n if plot:\n plt.plot(freqs,powers)\n plt.xlim([0,5000])\n plt.show()\n\n return maxfreq\n\n def retune(self,audioHandler,capture_secs=5):\n\n if audioHandler.open == False:\n audioHandler.open_stream()\n\n freqs = []\n for i in range(len(self.chars)):\n\n print('Please play note number #%d' % (i+1))\n frames = audioHandler.record(4)\n print('Recorded! Processing: . . .')\n maxfreq = self.get_max_strength_freq(frames)\n freqs.append(maxfreq)\n\n\n new_freqs = []\n for i in range(len(freqs)-1):\n new_freqs.append(np.mean((freqs[i],freqs[i+1])))\n freqs = new_freqs\n\n new_ranges = []\n for i in range(len(freqs)):\n if i == 0:\n new_ranges.append([self.min_freq,freqs[0]])\n elif i == len(freqs)-1:\n new_ranges.append([freqs[-1],self.max_freq])\n else:\n new_ranges.append([freqs[i],freqs[i+1]])\n\n self.freqs = new_ranges\n\n\naudioHandler = streamHandler(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n io=True,\n chunk=CHUNK)\n\nprint(\"* recording\")\n\naudioHandler.open_stream()\n\nframes = audioHandler.record(RECORD_SECONDS)\n\nprint(\"* done recording\")\n\nfreqDomain = freqHandler()\n\nfreqDomain.retune(audioHandler)\n\namplitude = frames\n\nmaxfreq = freqDomain.get_max_strength_freq(amplitude,plot=True)\nsymbol = freqDomain.freq_to_char(maxfreq)\nprint(maxfreq,symbol)\n\naudioHandler.close_stream()\n","repo_name":"Bennibraun/Xython","sub_path":"amplitudeProcessing.py","file_name":"amplitudeProcessing.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72432924993","text":"import logging\nfrom datetime import datetime\nfrom typing import Callable\n\nfrom apscheduler.executors.pool import ProcessPoolExecutor\nfrom apscheduler.jobstores.memory import MemoryJobStore\nfrom apscheduler.jobstores.redis import RedisJobStore\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nfrom utils.get_config import get_config\n\n\nclass ScheduleBuilder(object):\n\n def __init__(self):\n self.jobstores = {\n 'default': RedisJobStore(),\n 'redis': RedisJobStore()\n }\n self.executors = {\n 'default': ProcessPoolExecutor(max_workers=get_config()[\"scheduler\"].get(\"process_pool_max_workers\", 20))\n }\n self.job_defaults = {\n 'coalesce': True,\n 'max_instances': 1\n }\n self.scheduler = BackgroundScheduler(jobstores=self.jobstores,\n executors=self.executors,\n job_defaults=self.job_defaults)\n\n def manage_action(self, data: dict, action: str, job: Callable):\n params = (data, job)\n getattr(self, action)(*params)\n\n def add(self, data, job):\n schedule_id = str(data[\"id\"])\n schedule_date = data[\"timestamp\"]\n feeder_id = data[\"feeder\"]\n run_date = datetime.strptime(schedule_date, \"%Y-%m-%dT%H:%M:%SZ\")\n\n self.scheduler.add_job(job, \"date\", run_date=run_date, id=schedule_id, args=[feeder_id],\n replace_existing=True)\n\n def remove(self, data, job):\n try:\n schedule_id = str(data[\"id\"])\n self.scheduler.remove_job(schedule_id)\n except Exception:\n logging.exception(f\"Schedule does not exist. Data: {data}\")\n\n def start(self):\n return self.scheduler.start()\n\n def shutdown(self):\n return self.scheduler.shutdown()\n\n# data = {\"id\": 22, \"action\": \"add\", \"timestamp\": \"2020-12-15T11:28:00Z\", \"done\": True, \"feeder\": 1}\n# data = {\"id\": 22, \"action\": \"remove\", \"timestamp\": \"2020-12-15T11:28:00Z\", \"done\": True, \"feeder\": 1}\n","repo_name":"davidelavarga/hungry-falconry-feeders-hub","sub_path":"hub/domain/schedule_builder.py","file_name":"schedule_builder.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32729809873","text":"import requests\nimport json\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup as bs\n\ndef makeRequest(keyword):\n url = f'https://www.instagram.com/explore/tags/{keyword}/?hl=ko'\n count = 0\n timenow = datetime.now()\n\n r = requests.get(url)\n\n log_message = f' time: {timenow.isoformat()}, STATUS CODE: {r.status_code}, keyword: {keyword}, '\n\n if r.status_code == 200:\n soup = bs(r.text, 'html.parser')\n target = soup.find_all('script')\n\n # get data from target\n try:\n data = json.loads(target[3].string[21:-1])\n except TypeError:\n data = None\n\n if data:\n count = data.get('entry_data').get('TagPage')[0].get('graphql').get('hashtag').get('edge_hashtag_to_media').get('count')\n\n log_message += f'count: {count}, '\n\n else:\n log_message = f'count: failed with status code {r.status_code}, '\n\n return (count, log_message, data)","repo_name":"ghleokim/cgf-hashtag-counter","sub_path":"source/_request.py","file_name":"_request.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40199514983","text":"#!/usr/bin/env python3\nfrom __future__ import annotations\n\nimport sys\nimport os\nimport yaml\nimport json\nimport re\n\nfrom yaml.composer import Composer\nfrom yaml.constructor import Constructor\n\nclass Definitions:\n name: str\n defns: dict\n children: dict\n\n def __init__(self, name = None):\n self.name = name\n self.defns = dict()\n self.children = dict()\n\n def __getitem__(self, key):\n return self.defns[key]\n\n def __setitem__(self, key, value):\n self.defns[key] = value\n\n def get_path(self, path):\n if len(path) == 0:\n return self\n if path[0] not in self.children:\n self.children[path[0]] = Definitions(path[0])\n return self.children[path[0]].get_path(path[1:])\n\n def make(self, result, indent_level = 0):\n if self.name is not None:\n result.append('')\n result.append(' '*indent_level+f\"class {self.name}:\")\n indent_level += 1\n for defn_name, defn_line in self.defns.items():\n result.append(' '*indent_level + defn_name + ' = ' + defn_line)\n\n for child_name, child in self.children.items():\n child.make(result, indent_level)\n\ndef read_yaml_with_line_numbers(file, src):\n loader = yaml.Loader(fp)\n def compose_node(parent, index):\n # the line number where the previous token has ended (plus empty lines)\n line = loader.line\n node = Composer.compose_node(loader, parent, index)\n node.__file__ = file\n node.__line__ = line + 1\n return node\n def construct_mapping(node, deep=False):\n mapping = Constructor.construct_mapping(loader, node, deep=deep)\n mapping['__file__'] = file\n mapping['__line__'] = node.__line__\n return mapping\n loader.compose_node = compose_node\n loader.construct_mapping = construct_mapping\n return loader.get_single_data()\n\nassets = dict()\nfor yaml_model_path in sys.argv[1:]:\n asset_name = os.path.split(re.sub(r'\\.yaml$', '', yaml_model_path))[1]\n assets[asset_name] = dict()\n with open(yaml_model_path, 'rt') as fp:\n models = read_yaml_with_line_numbers(yaml_model_path, fp)\n for model in models:\n if 'define' not in model:\n sys.stderr.write(f'Required top-level property define is not present in {model}\\n')\n else:\n match = re.match(r'(\\S+)\\s*\"(\\S+)\"', model['define'])\n if match:\n defn_name = match.group(2)\n parsed = {\n 'name': defn_name,\n '@class': match.group(1)\n }\n for key, value in model.items():\n if key != 'define':\n parsed[key] = value\n assets[asset_name][defn_name] = parsed\n\nroot = Definitions()\n\nfor asset_name, asset in assets.items():\n for defn_name, model in asset.items():\n #print(f'DEBUG: defined asset {asset_name} model {defn_name} as {json.dumps(model)}')\n if model['@class'] == 'Subject':\n path = [asset_name] + model['name'].split('.')\n defn = root.get_path(path[:-1])\n defn[path[-1]] = f\"definitions.{model['@class']}.Builder().done()\"\n elif model['@class'] == 'Measure':\n path = [asset_name] + model['name'].split('.')\n defn = root.get_path(path[:-1])\n body = f\"definitions.{model['@class']}.Builder()\"\n if 'of' not in model:\n raise RuntimeError(f\"Required property of is missing from {model}\")\n if 'measures' not in model:\n raise RuntimeError(f\"Required property measures is missing from {model}\")\n for key, value in model['of'].items():\n if key not in ('__file__','__line__'):\n if not isinstance(value, list):\n value = [value]\n body += \".of(\"\n body += ', '.join([ \"'\"+key+\"'\"] + value)\n body += \")\"\n for key, value in model['measures'].items():\n if key not in ('__file__','__line__'):\n if not isinstance(value, list):\n value = [value]\n body += \".measures(\"\n body += ', '.join([ \"'\"+key+\"'\"] + value)\n body += \")\"\n body += '.done()'\n defn[path[-1]] = body\n else:\n try:\n path = [asset_name] + model['name'].split('.')\n defn = root.get_path(path[:-1])\n body = f\"definitions.{model['@class']}.Builder()\"\n if 'nouns' in model:\n for key, value in model['nouns'].items():\n if key not in ('__file__','__line__'):\n if not isinstance(value, list):\n value = [value]\n body += \".noun(\"\n body += ', '.join([ \"'\"+key+\"'\"] + value)\n body += \")\"\n if 'measures' in model:\n for key, value in model['measures'].items():\n if key not in ('__file__','__line__'):\n if not isinstance(value, list):\n value = [value]\n body += \".measures(\"\n body += ', '.join([ \"'\"+key+\"'\"] + value)\n body += \")\"\n body += '.done()'\n defn[path[-1]] = body\n except Exception as e:\n raise RuntimeError(f\"{model['__file__']}:{model['__line__']} {e}\")\n\ncode_lines = []\nroot.make(code_lines, 0)\n\nprint(\"\"\"#!/usr/bin/env python3\nfrom __future__ import annotations\n\"\"\")\nprint('\\n'.join(code_lines))\n","repo_name":"pitaman71/semantic-descriptors","sub_path":"python3/semantic_descriptors/gen_builders.py","file_name":"gen_builders.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40970581898","text":"#!/usr/bin/python\nimport socket, threading, time, os, datetime, math, sys, getopt, operator, json\nfrom decimal import Decimal\nfrom functools import reduce\n\nclass Server(object):\n def __init__(self, host, port, file):\n self.host = host\n self.port = port\n self.file = file\n self.running = True\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n\n def stop(self, running):\n if running:\n self.running = False\n print('\\nSERVER: Stopping')\n return True\n\n def listen(self):\n self.sock.listen(5)\n while True:\n # print('SERVER: Waiting for new connection')\n client, address = self.sock.accept()\n client.settimeout(5)\n threading.Thread(target = self.listenToClient,args = (client,)).start()\n\n def send(self, client):\n ip, port = client.getpeername()\n content = open(self.file, 'rb')\n content = content.read()\n try:\n # print('SERVER: Sent data to '+ip+':'+str(port))\n client.sendall(content)\n return True\n except KeyboardInterrupt:\n print('SERVER: User aborted server in send')\n except Exception as e:\n print('SERVER: Failed connection for '+ip+':'+str(port))\n print(e)\n return False\n\n def listenToClient(self, client):\n # print('SERVER: Client connected')\n while True:\n try:\n if not self.send(client):\n client.close()\n break\n except KeyboardInterrupt:\n print('SERVER: User aborted server in listenToClient')\n except Exception as e:\n # print('SERVER: Client disconnected')\n print(e)\n client.close()\n return False\n time.sleep(2)\n if not self.running:\n break\n\nclass SNMP(object):\n def __init__(self, type, host, port, community, file, interval):\n self.type = type\n self.host = host\n self.port = port\n self.community = community\n self.file = file\n self.interval = int(interval)\n self.running = True\n self.acu_models = {\n 'intellian_v100': {\n 'latitude': '.1.3.6.1.4.1.13745.100.1.1.4.0',\n 'longitude': '.1.3.6.1.4.1.13745.100.1.1.5.0',\n 'heading': '.1.3.6.1.4.1.13745.100.1.1.6.0',\n 'speed': False\n },\n 'sailor_900': {\n 'latitude': '.1.3.6',\n 'longitude': '.1.3.6',\n 'heading': '.1.3.6',\n 'speed': False\n },\n 'seatel_x': {\n 'latitude': '.1.3.6',\n 'longitude': '.1.3.6',\n 'heading': '.1.3.6',\n 'speed': False\n }\n }\n\n def stop(self, running):\n if running:\n self.running = False\n print('\\nSNMP: Stopping')\n return True\n\n def start(self):\n while self.running:\n try:\n # Get data from source\n data = self.collect()\n\n # Generate NMEA string content\n string = self.nmea(data)\n\n # Generate NMEA string checksum\n check = self.checksum(string)\n\n # Build full NEMEA string\n nmeastring = '$'+string+'*'+check\n\n # Print for debug\n # print('NMEA: '+nmeastring)\n\n # Save full NMEA string to file\n self.write(nmeastring)\n\n except KeyboardInterrupt:\n print('SNMP: Fetching aborted')\n sys.exit(2)\n except Exception as e:\n print('SNMP: Failed to fetch proper data')\n print(e)\n finally:\n # Wait for new interval\n for i in range(1, self.interval):\n i += 1\n if not self.running:\n break\n time.sleep(1)\n\n def get(self, oid):\n try:\n stream = os.popen('/etc/pymea/snmpget -v2c -Ovq -c'+self.community+' '+self.host+' '+oid+' 2>/dev/null')\n except Exception as e:\n output = '00.0000 N'\n print('failed to get snmp from acu')\n print(e)\n else:\n output = stream.read().strip().strip('\"')\n return output\n\n def write(self, nmeastring):\n try:\n nmeafile = open(self.file, 'w')\n except Exception as e:\n print('SNMP: Failed to open data file as write')\n print(e)\n else:\n try:\n nmeafile.write(nmeastring)\n nmeafile.write('\\r\\n')\n except Exception as e:\n print('SNMP: Failed to write content to data file')\n print(e)\n finally:\n nmeafile.close()\n\n def collect(self):\n data = {}\n values = ['heading', 'latitude', 'longitude', 'speed']\n for value in values:\n if self.acu_models[self.type][value]:\n data[value] = self.get(self.acu_models[self.type][value])\n else:\n data[value] = ''\n # Check if acu output needs conversion\n # Rewrite data if not type = intellian_v100\n values = ['latitude', 'longitude']\n for value in values:\n if self.type == 'intellian_v100':\n # Rewrite the value to correct lenght for NMEA output\n data[value] = str(self.value_to_lenght(data[value].split(' ')[0], value))+' '+data[value].split(' ')[1]\n if not self.type == 'intellian_v100':\n print('SNMP: Trying to rewrite data')\n data_new = 'rewrite data format'\n data = data_new\n return data\n\n def dms(self, data, direction):\n # Convert decimal data to a degree int\n degint = int(float(data))\n # Calculate minutes from the fraction of input degree\n minutes = float((Decimal(str(data)) - Decimal(str(degint))) * Decimal(str(60.0)))\n padding = self.padding_zeros(degint, direction)\n # Put togehter the value\n value = (degint * 100) + minutes\n # Get lenght of the fraction\n fractlen = len(str(int(str(float(Decimal(str(minutes)) - Decimal(str(int(minutes))))).split('.')[1])))\n # Expand the fraction to correct length\n suffix = '0' * (6 - fractlen)\n # Merge the result\n result = str(padding)+str(value)+str(suffix)\n return result\n\n def value_to_lenght(self, data, direction):\n # Convert decimal data to a degree int\n degint = int(float(data))\n # Calculate minutes from the fraction of input degree\n minutes = float(Decimal(str(data)) - Decimal(str(degint)))\n padding = self.padding_zeros(degint, direction)\n # Put togehter the value\n value = (degint * 100) + (minutes * 100)\n # Get lenght of the fraction\n fractlen = len(str(minutes*100).split('.')[1])\n # Expand the fraction to correct length\n suffix = '0' * (6 - fractlen)\n # Merge the result\n result = str(padding)+str(value)+str(suffix)\n return result\n\n def padding_zeros(self, degint, direction):\n # Expand degree to correct value for dms\n deglen = len(str(degint * 100))\n # Depending on direction type, set padding to get correct length\n if direction == 'longitude':\n padding = '0' * (5 - deglen)\n elif direction == 'latitude':\n padding = '0' * (4 - deglen)\n else:\n padding = ''\n return padding\n\n def checksum(self, string):\n string = string.strip().strip('$\\n')\n checksum = reduce(operator.xor, (ord(s) for s in string), 0)\n return '{:02x}'.format(checksum).upper()\n\n def nmea(self, data):\n TYPE = 'GPRMC'\n TIMESTAMP = datetime.datetime.now().strftime('%H%M%S') #HHMMSS\n STATUS = 'A' #A=Valid, V=Invalid\n # LATITUDE = str(self.dms(float(data['latitude'].split(' ')[0]), 'latitude')) #ddmm.mmmm\n LATITUDE = data['latitude'].split(' ')[0] #ddmm.mmmm\n LATIND = data['latitude'].split(' ')[1] #N=North, S=South\n # LONGITUDE = str(self.dms(float(data['longitude'].split(' ')[0]), 'longitude')) #dddmm.mmmm\n LONGITUDE = data['longitude'].split(' ')[0] #dddmm.mmmm\n LONGIND = data['longitude'].split(' ')[1] #W=West, E=East\n SPEED = str(data['speed']) #Speed over ground\n COURSE = str(data['heading']) #Course over ground (heading?)\n DATE = str(datetime.datetime.now().strftime('%d%m%y')) #DDMMYY\n MAGNETIC = str(0) #Magnetic Variation in degrees (E=East, W=West)\n MODE = 'A' #A=Autonomous, D=DGPS, E=DR\n\n string = TYPE+','+TIMESTAMP+','+STATUS+','+LATITUDE+','+LATIND+','+LONGITUDE+','+LONGIND+','+SPEED+','+COURSE+','+DATE+','+MAGNETIC+','+MODE+','\n return string\n\nif __name__ == '__main__':\n print('MAIN: Starting')\n lfile = '/tmp/data.file'\n lport = int(5454)\n file = False\n\n try:\n opts, args = getopt.getopt(sys.argv[1:],'h:c:a:p:f:i:',['host=','community=','acu=','port=','file','interval'])\n except getopt.GetoptError:\n print('ERROR: Bad arguments. See below example:')\n print('app.py -h 10.224.77.2 -c public -a intellian_v100 -p 161')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-h', '--host'):\n host = arg\n elif opt in ('-c', '--community'):\n community = arg\n elif opt in ('-a', '--acu'):\n acu = arg\n elif opt in ('-p', '--port'):\n port = arg\n elif opt in ('-f', '--file'):\n file = arg\n elif opt in ('-i', '--interval'):\n interval = arg\n\n # Init PID file\n with open('/var/run/pymea.pid', 'w') as pidfile:\n try:\n pid = str(os.getpid())\n except Exception as e:\n print('ERROR: Failed to get PID')\n print(e)\n sys.exit(2)\n\n try:\n pidfile.write(pid)\n except Exception as e:\n print('ERROR: Failed to save PID')\n print(e)\n sys.exit(2)\n else:\n pidfile.close()\n\n # Check if config file has been specified\n if file:\n # Load config file\n try:\n configdata = open(file, 'r')\n except FileNotFoundError:\n print('ERROR: Config file not found ('+file+')')\n sys.exit(2)\n except Exception as e:\n print('ERROR: Could not read config file')\n print(e)\n sys.exit(2)\n else:\n # Read config values from loaded config file\n try:\n config = json.loads(configdata.read().replace('\\n', ''))\n except Exception as e:\n print('ERROR: Failed to load config data')\n print(e)\n sys.exit(2)\n finally:\n configdata.close()\n\n # Transalte config to variables\n acu = config['acu']\n host = config['host']\n port = config['port']\n community = config['community']\n interval = config['interval']\n else:\n # If not config file check amount of parameters\n if len(sys.argv) != 11:\n print('ERROR: Missing arguments. See below example:')\n print('app.py -h 10.224.77.2 -c public -a intellian_v100 -p 161')\n sys.exit(2)\n\n # Init SNMP\n print('MAIN: Configure SNMP')\n snmp = SNMP(acu, host, port, community, lfile, interval)\n\n # Start thread to fetch data\n print('MAIN: Setup thread for fetching')\n snmpthread = threading.Thread(target = snmp.start)\n print('MAIN: Start SNMP')\n try:\n snmpthread.start()\n except KeyboardInterrupt:\n print('\\nMAIN: SNMP fetching aborted by user')\n snmp.stop(True)\n except Exception as e:\n print('\\nMAIN: SNMP fetching error')\n print(e)\n snmp.stop(True)\n\n print('MAIN: Configure SERVER')\n server = Server('', lport, lfile)\n print('MAIN: Start SERVER')\n try:\n server.listen()\n except KeyboardInterrupt:\n snmp.stop(True)\n server.stop(True)\n except Exception as e:\n print('\\nMAIN: SERVER had an unknown error')\n print(e)\n snmp.stop(True)\n server.stop(True)\n\n print('\\nMAIN: Application has stopped')\n","repo_name":"felipgit/pymea","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1349281383","text":"import numpy as np\nimport cv2\n\ni = cv2.imread('masoleh.jpg')\nb = i[:, :, 0]\ng = i[:, :, 1]\nr = i[:, :, 2]\ncv2.imshow('Masoleh', i)\n\nwhile 1:\n f = np.zeros(i.shape)\n k = cv2.waitKey()\n if k == ord('o'):\n cv2.imshow('Masoleh', i)\n elif k == ord('b'):\n f[:, :, 0] = b\n cv2.imshow('b', f)\n elif k == ord('g'):\n f[:, :, 1] = g\n cv2.imshow('g', f)\n elif k == ord('r'):\n f[:, :, 2] = r\n cv2.imshow('r', f)\n elif k == ord('q'):\n cv2.destroyAllWindows()\n break\n","repo_name":"ErfanMomeniii/vision","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11898269710","text":"\"\"\"Basic replication core.\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport fnmatch\n\nfrom logging import Logger\nfrom typing import List, Optional, Dict, Sequence, Mapping, Tuple, Iterator\n\nimport skytools\n\nfrom skytools.basetypes import DictRow, Connection, Cursor\n\nfrom pgq.baseconsumer import EventList\nfrom pgq.event import Event\nfrom pgq.cascade.worker import CascadedWorker\n\nfrom .exec_attrs import ExecAttrs\nfrom .handler import build_handler, BaseHandler\nfrom .handlers import load_handler_modules\n\n__all__ = ['Replicator', 'TableState',\n 'TABLE_MISSING', 'TABLE_IN_COPY', 'TABLE_CATCHING_UP',\n 'TABLE_WANNA_SYNC', 'TABLE_DO_SYNC', 'TABLE_OK']\n\n# state # owner - who is allowed to change\nTABLE_MISSING = 0 # main\nTABLE_IN_COPY = 1 # copy\nTABLE_CATCHING_UP = 2 # copy\nTABLE_WANNA_SYNC = 3 # main\nTABLE_DO_SYNC = 4 # copy\nTABLE_OK = 5 # setup\n\nSYNC_OK = 0 # continue with batch\nSYNC_LOOP = 1 # sleep, try again\nSYNC_EXIT = 2 # nothing to do, exit script\n\nMAX_PARALLEL_COPY = 8 # default number of allowed max parallel copy processes\n\n\ndef is_data_event(ev: Event) -> bool:\n \"\"\"Is it insert/update/delete for one table?\n \"\"\"\n if ev.type in ('I', 'U', 'D'):\n return True\n elif ev.type[:2] in ('I:', 'U:', 'D:', '{\"'):\n return True\n return False\n\n\nclass Counter:\n \"\"\"Counts table statuses.\"\"\"\n\n missing = 0\n copy = 0\n catching_up = 0\n wanna_sync = 0\n do_sync = 0\n ok = 0\n\n def __init__(self, tables: List[\"TableState\"], copy_method_map: Dict[str, Optional[int]]) -> None:\n \"\"\"Counts and sanity checks.\"\"\"\n for t in tables:\n if t.state == TABLE_MISSING:\n self.missing += 1\n elif t.state == TABLE_IN_COPY:\n nthreads = copy_method_map[t.name]\n if nthreads is None:\n self.copy += 1\n else:\n self.copy += nthreads\n elif t.state == TABLE_CATCHING_UP:\n self.catching_up += 1\n elif t.state == TABLE_WANNA_SYNC:\n self.wanna_sync += 1\n elif t.state == TABLE_DO_SYNC:\n self.do_sync += 1\n elif t.state == TABLE_OK:\n self.ok += 1\n\n def get_copy_count(self) -> int:\n return self.copy + self.catching_up + self.wanna_sync + self.do_sync\n\n\nclass TableState:\n \"\"\"Keeps state about one table.\"\"\"\n\n name: str\n dest_table: str\n log: Logger\n state: int\n last_snapshot_tick: Optional[int]\n str_snapshot: Optional[str]\n from_snapshot: Optional[skytools.Snapshot]\n sync_tick_id: Optional[int]\n ok_batch_count: int\n last_tick: Optional[int]\n table_attrs: Mapping[str, Optional[str]]\n copy_role: Optional[str]\n dropped_ddl: Optional[str]\n plugin: Optional[BaseHandler]\n changed: int\n copy_pos: int\n max_parallel_copy: int\n\n def __init__(self, name: str, log: Logger) -> None:\n \"\"\"Init TableState for one table.\"\"\"\n self.name = name\n self.dest_table = name\n self.log = log\n # same as forget:\n self.state = TABLE_MISSING\n self.last_snapshot_tick = None\n self.str_snapshot = None\n self.from_snapshot = None\n self.sync_tick_id = None\n self.ok_batch_count = 0\n self.last_tick = 0\n self.table_attrs = {}\n self.copy_role = None\n self.dropped_ddl = None\n self.plugin = None\n # except this\n self.changed = 0\n # position in parallel copy work order\n self.copy_pos = 0\n # max number of parallel copy processes allowed\n self.max_parallel_copy = MAX_PARALLEL_COPY\n\n def forget(self) -> None:\n \"\"\"Reset all info.\"\"\"\n self.state = TABLE_MISSING\n self.last_snapshot_tick = None\n self.str_snapshot = None\n self.from_snapshot = None\n self.sync_tick_id = None\n self.ok_batch_count = 0\n self.last_tick = 0\n self.table_attrs = {}\n self.changed = 1\n self.plugin = None\n self.copy_pos = 0\n self.max_parallel_copy = MAX_PARALLEL_COPY\n\n def change_snapshot(self, str_snapshot: Optional[str], tag_changed: int = 1) -> None:\n \"\"\"Set snapshot.\"\"\"\n if self.str_snapshot == str_snapshot:\n return\n self.log.debug(\"%s: change_snapshot to %s\", self.name, str_snapshot)\n self.str_snapshot = str_snapshot\n if str_snapshot:\n self.from_snapshot = skytools.Snapshot(str_snapshot)\n else:\n self.from_snapshot = None\n\n if tag_changed:\n self.ok_batch_count = 0\n self.last_tick = None\n self.changed = 1\n\n def change_state(self, state: int, tick_id: Optional[int] = None) -> None:\n \"\"\"Set state.\"\"\"\n if self.state == state and self.sync_tick_id == tick_id:\n return\n self.state = state\n self.sync_tick_id = tick_id\n self.changed = 1\n self.log.debug(\"%s: change_state to %s\", self.name, self.render_state())\n\n def render_state(self) -> Optional[str]:\n \"\"\"Make a string to be stored in db.\"\"\"\n\n if self.state == TABLE_MISSING:\n return None\n elif self.state == TABLE_IN_COPY:\n return 'in-copy'\n elif self.state == TABLE_CATCHING_UP:\n return 'catching-up'\n elif self.state == TABLE_WANNA_SYNC:\n return 'wanna-sync:%d' % (self.sync_tick_id or 0)\n elif self.state == TABLE_DO_SYNC:\n return 'do-sync:%d' % (self.sync_tick_id or 0)\n elif self.state == TABLE_OK:\n return 'ok'\n return None\n\n def parse_state(self, merge_state: Optional[str]) -> int:\n \"\"\"Read state from string.\"\"\"\n\n state = -1\n if merge_state is None:\n state = TABLE_MISSING\n elif merge_state == \"in-copy\":\n state = TABLE_IN_COPY\n elif merge_state == \"catching-up\":\n state = TABLE_CATCHING_UP\n elif merge_state == \"ok\":\n state = TABLE_OK\n elif merge_state == \"?\":\n state = TABLE_OK\n else:\n tmp = merge_state.split(':')\n if len(tmp) == 2:\n self.sync_tick_id = int(tmp[1])\n if tmp[0] == 'wanna-sync':\n state = TABLE_WANNA_SYNC\n elif tmp[0] == 'do-sync':\n state = TABLE_DO_SYNC\n\n if state < 0:\n raise Exception(\"Bad table state: %s\" % merge_state)\n\n return state\n\n def loaded_state(self, row: DictRow) -> None:\n \"\"\"Update object with info from db.\"\"\"\n\n self.log.debug(\"loaded_state: %s: %s / %s\",\n self.name, row['merge_state'], row['custom_snapshot'])\n self.change_snapshot(row['custom_snapshot'], 0)\n self.state = self.parse_state(row['merge_state'])\n self.changed = 0\n if row['table_attrs']:\n self.table_attrs = skytools.db_urldecode(row['table_attrs'])\n else:\n self.table_attrs = {}\n self.copy_role = row['copy_role']\n self.dropped_ddl = row['dropped_ddl']\n if row['merge_state'] == \"?\":\n self.changed = 1\n\n self.copy_pos = int(row.get('copy_pos', '0'))\n\n max_parallel_copy = self.table_attrs.get('max_parallel_copy')\n if max_parallel_copy:\n self.max_parallel_copy = int(max_parallel_copy)\n\n if 'dest_table' in row and row['dest_table']:\n self.dest_table = row['dest_table']\n else:\n self.dest_table = self.name\n\n hstr = self.table_attrs.get('handlers', '') # compat\n hstr = self.table_attrs.get('handler', hstr) or ''\n self.plugin = build_handler(self.name, hstr, self.dest_table)\n\n def max_parallel_copies_reached(self) -> bool:\n return self.max_parallel_copy is not None and \\\n self.copy_pos >= self.max_parallel_copy\n\n def interesting(self, ev: Event, tick_id: int, copy_thread: bool, copy_table_name: Optional[str]) -> bool:\n \"\"\"Check if table wants this event.\"\"\"\n\n if copy_thread:\n if self.name != copy_table_name:\n return False\n if self.state not in (TABLE_CATCHING_UP, TABLE_DO_SYNC):\n return False\n else:\n if self.state != TABLE_OK:\n return False\n\n # if no snapshot tracking, then accept always\n if not self.from_snapshot:\n return True\n\n # uninteresting?\n if self.from_snapshot.contains(ev.txid):\n return False\n\n # after couple interesting batches there no need to check snapshot\n # as there can be only one partially interesting batch\n if tick_id != self.last_tick:\n self.last_tick = tick_id\n self.ok_batch_count += 1\n\n # disable batch tracking\n if self.ok_batch_count > 3:\n self.change_snapshot(None)\n return True\n\n def gc_snapshot(self, copy_thread: bool, prev_tick: int, cur_tick: int, no_lag: bool) -> None:\n \"\"\"Remove attached snapshot if possible.\n\n If the event processing is in current moment, the snapshot\n is not needed beyond next batch.\n\n The logic is needed for mostly unchanging tables,\n where the .ok_batch_count check in .interesting()\n method can take a lot of time.\n \"\"\"\n\n # check if gc is needed\n if self.str_snapshot is None:\n return\n\n # check if allowed to modify\n if copy_thread:\n if self.state != TABLE_CATCHING_UP:\n return\n else:\n if self.state != TABLE_OK:\n return\n\n # aquire last tick\n if not self.last_snapshot_tick:\n if no_lag:\n self.last_snapshot_tick = cur_tick\n return\n\n # reset snapshot if not needed anymore\n if self.last_snapshot_tick < prev_tick:\n self.change_snapshot(None)\n\n def get_plugin(self) -> BaseHandler:\n if not self.plugin:\n raise ValueError(\"no handler set\")\n return self.plugin\n\n\nclass Replicator(CascadedWorker):\n \"\"\"Replication core.\n\n Config options::\n\n ## Parameters for Londiste ##\n\n # target database\n db = dbname=somedb host=127.0.0.1\n\n # extra connect string parameters to add to node public connect strings.\n # useful values: user= sslmode=\n #remote_extra_connstr =\n\n # how many tables can be copied in parallel\n #parallel_copies = 1\n\n # glob patterns for table names: archive.*, public.*\n #threaded_copy_tables =\n # number of threads in pool\n #threaded_copy_pool_size = 1\n\n # accept only events for locally present tables\n #local_only = false\n # do not load EXECUTE events from source queue when local_only is active\n #local_only_drop_execute = false\n\n ## compare/repair\n # max amount of time table can be locked\n #lock_timeout = 10\n # compare: sql to use\n #compare_sql = select count(1) as cnt, sum(hashtext(t.*::text)) as chksum from only _TABLE_ t\n # workaround for hashtext change between 8.3 and 8.4\n #compare_sql = select count(1) as cnt, sum(('x' ||\n # substr(md5(t.*::text),1,16))::bit(64)::bigint) as chksum from only _TABLE_ t\n #compare_fmt = %(cnt)d rows, checksum=%(chksum)s\n\n ## Parameters for initial node creation: create-root/branch/leaf ##\n\n # These parameters can be given on either command-line or in config\n # command-line values override config values. Those values are\n # used only during create time, otherwise they are loaded from database.\n\n # Name for local node.\n #node_name =\n\n # public connect string for local node, which other nodes will use\n # to connect to this one.\n #public_node_location =\n\n # connect string for existing node to use as provider\n #initial_provider_location =\n\n # filter for table/seq registration\n #register_only_tables =\n #register_only_seqs =\n #register_skip_tables = s.a, s.b, s.c\n #register_skip_seqs =\n \"\"\"\n\n # batch info\n cur_tick: int = 0\n prev_tick: int = 0\n copy_table_name: Optional[str] = None # filled by Copytable()\n sql_list: List[str] = []\n\n current_event: Optional[Event] = None\n\n threaded_copy_tables: Sequence[str]\n threaded_copy_pool_size: int\n copy_method_map: Dict[str, Optional[int]]\n\n register_only_tables: Optional[Sequence[str]] = None\n register_only_seqs: Optional[Sequence[str]] = None\n register_skip_tables: Optional[Sequence[str]] = None\n register_skip_seqs: Optional[Sequence[str]] = None\n\n local_only: bool = False\n local_only_drop_execute: bool = False\n\n table_list: List[TableState]\n table_map: Dict[str, TableState]\n used_plugins: Dict[str, BaseHandler]\n copy_thread: bool\n\n def __init__(self, args: Sequence[str]) -> None:\n \"\"\"Replication init.\"\"\"\n super().__init__('londiste', 'db', args)\n\n self.table_list = []\n self.table_map = {}\n\n self.threaded_copy_tables = self.cf.getlist('threaded_copy_tables', [])\n self.threaded_copy_pool_size = self.cf.getint('threaded_copy_pool_size', 1)\n self.copy_method_map = {}\n\n self.copy_thread = False\n self.set_name = self.queue_name\n self.used_plugins = {}\n\n self.parallel_copies = self.cf.getint('parallel_copies', 1)\n if self.parallel_copies < 1:\n raise Exception('Bad value for parallel_copies: %d' % self.parallel_copies)\n\n self.consumer_filter = None\n\n self.register_only_tables = self.cf.getlist(\"register_only_tables\", [])\n self.register_only_seqs = self.cf.getlist(\"register_only_seqs\", [])\n self.register_skip_tables = self.cf.getlist(\"register_skip_tables\", [])\n self.register_skip_seqs = self.cf.getlist(\"register_skip_seqs\", [])\n\n self.local_only = self.cf.getboolean('local_only', False)\n self.local_only_drop_execute = self.cf.getboolean('local_only_drop_execute', False)\n\n def reload(self) -> None:\n super().reload()\n\n load_handler_modules(self.cf)\n\n self.threaded_copy_tables = self.cf.getlist('threaded_copy_tables', [])\n self.threaded_copy_pool_size = self.cf.getint('threaded_copy_pool_size', 1)\n self.copy_method_map = {}\n\n self.register_only_tables = self.cf.getlist(\"register_only_tables\", [])\n self.register_only_seqs = self.cf.getlist(\"register_only_seqs\", [])\n self.register_skip_tables = self.cf.getlist(\"register_skip_tables\", [])\n self.register_skip_seqs = self.cf.getlist(\"register_skip_seqs\", [])\n\n self.local_only = self.cf.getboolean('local_only', False)\n self.local_only_drop_execute = self.cf.getboolean('local_only_drop_execute', False)\n\n def fill_copy_method(self) -> None:\n for table_name in self.table_map:\n if table_name not in self.copy_method_map:\n for pat in self.threaded_copy_tables:\n if fnmatch.fnmatchcase(table_name, pat):\n self.copy_method_map[table_name] = self.threaded_copy_pool_size\n break\n if table_name not in self.copy_method_map:\n self.copy_method_map[table_name] = None\n\n def connection_hook(self, dbname: str, db: Connection) -> None:\n if dbname == 'db':\n curs = db.cursor()\n curs.execute(\"select londiste.set_session_replication_role('replica', false)\")\n db.commit()\n\n code_check_done = 0\n def check_code(self, db: Connection) -> None:\n objs = [\n skytools.DBFunction(\"pgq.maint_operations\", 0, sql_file=\"londiste.maint-upgrade.sql\"),\n ]\n skytools.db_install(db.cursor(), objs, self.log)\n db.commit()\n\n def process_remote_batch(self, src_db: Connection, tick_id: int, ev_list: EventList, dst_db: Connection) -> None:\n \"All work for a batch. Entry point from SetConsumer.\"\n\n self.current_event = None\n\n # this part can play freely with transactions\n\n if not self.code_check_done:\n self.check_code(dst_db)\n self.code_check_done = 1\n\n self.sync_database_encodings(src_db, dst_db)\n\n assert self.batch_info\n self.cur_tick = self.batch_info['tick_id']\n self.prev_tick = self.batch_info['prev_tick_id']\n\n dst_curs = dst_db.cursor()\n self.load_table_state(dst_curs)\n self.sync_tables(src_db, dst_db)\n\n self.copy_snapshot_cleanup(dst_db)\n\n # only main thread is allowed to restore fkeys\n assert self._worker_state\n if not self.copy_thread and self._worker_state.process_events:\n self.restore_fkeys(dst_db)\n\n for p in self.used_plugins.values():\n p.reset()\n self.used_plugins = {}\n\n # now the actual event processing happens.\n # they must be done all in one tx in dst side\n # and the transaction must be kept open so that\n # the cascade-consumer can save last tick and commit.\n\n self.sql_list = []\n super().process_remote_batch(src_db, tick_id, ev_list, dst_db)\n self.flush_sql(dst_curs)\n\n for p in self.used_plugins.values():\n p.finish_batch(self.batch_info, dst_curs)\n self.used_plugins = {}\n\n # finalize table changes\n self.save_table_state(dst_curs)\n\n def sync_tables(self, src_db: Connection, dst_db: Connection) -> None:\n \"\"\"Table sync loop.\n\n Calls appropriate handles, which is expected to\n return one of SYNC_* constants.\"\"\"\n\n self.log.debug('Sync tables')\n while True:\n cnt = Counter(self.table_list, self.copy_method_map)\n if self.copy_thread:\n res = self.sync_from_copy_thread(cnt, src_db, dst_db)\n else:\n res = self.sync_from_main_thread(cnt, src_db, dst_db)\n\n if res == SYNC_EXIT:\n self.log.debug('Sync tables: exit')\n if self.copy_thread:\n self.unregister_consumer()\n src_db.commit()\n sys.exit(0)\n elif res == SYNC_OK:\n return\n elif res != SYNC_LOOP:\n raise Exception('Program error')\n\n self.log.debug('Sync tables: sleeping')\n time.sleep(3)\n dst_db.commit()\n self.load_table_state(dst_db.cursor())\n dst_db.commit()\n\n dsync_backup: Optional[Tuple[int, Optional[int], Optional[str]]] = None\n def sync_from_main_thread(self, cnt: Counter, src_db: Connection, dst_db: Connection) -> int:\n \"Main thread sync logic.\"\n\n # This operates on all table, any amount can be in any state\n\n ret = SYNC_OK\n\n if cnt.do_sync:\n # wait for copy thread to catch up\n ret = SYNC_LOOP\n\n # we need to do wanna-sync->do_sync with small batches\n need_dsync = False\n dsync_ok = True\n if self.pgq_min_interval or self.pgq_min_count:\n dsync_ok = False\n elif self.dsync_backup and self.dsync_backup[0] >= self.cur_tick:\n dsync_ok = False\n\n # now check if do-sync is needed\n for t in self.get_tables_in_state(TABLE_WANNA_SYNC):\n # copy thread wants sync, if not behind, do it\n if t.sync_tick_id is not None and self.cur_tick >= t.sync_tick_id:\n if dsync_ok:\n self.change_table_state(dst_db, t, TABLE_DO_SYNC, self.cur_tick)\n ret = SYNC_LOOP\n else:\n need_dsync = True\n\n # tune batch size if needed\n if need_dsync:\n if self.pgq_min_count or self.pgq_min_interval:\n bak = (self.cur_tick, self.pgq_min_count, self.pgq_min_interval)\n self.dsync_backup = bak\n self.pgq_min_count = None\n self.pgq_min_interval = None\n elif self.dsync_backup:\n self.pgq_min_count = self.dsync_backup[1]\n self.pgq_min_interval = self.dsync_backup[2]\n self.dsync_backup = None\n\n # now handle new copies\n npossible = self.parallel_copies - cnt.get_copy_count()\n if cnt.missing and npossible > 0:\n pmap = self.get_state_map(src_db.cursor())\n src_db.commit()\n for t in self.get_tables_in_state(TABLE_MISSING):\n if 'copy_node' in t.table_attrs:\n # should we go and check this node?\n pass\n else:\n # regular provider is used\n if t.name not in pmap:\n self.log.warning(\"Table %s not available on provider\", t.name)\n continue\n pt = pmap[t.name]\n if pt.state != TABLE_OK: # or pt.custom_snapshot: # FIXME: does snapsnot matter?\n self.log.info(\"Table %s not OK on provider, waiting\", t.name)\n continue\n\n # don't allow more copies than configured\n if npossible == 0:\n break\n npossible -= 1\n\n # drop all foreign keys to and from this table\n self.drop_fkeys(dst_db, t.dest_table)\n\n # change state after fkeys are dropped thus allowing\n # failure inbetween\n self.change_table_state(dst_db, t, TABLE_IN_COPY)\n\n # the copy _may_ happen immediately\n self.launch_copy(t)\n\n # there cannot be interesting events in current batch\n # but maybe there's several tables, lets do them in one go\n ret = SYNC_LOOP\n\n return ret\n\n def sync_from_copy_thread(self, cnt: Counter, src_db: Connection, dst_db: Connection) -> int:\n \"Copy thread sync logic.\"\n\n # somebody may have done remove-table in the meantime\n if self.copy_table_name not in self.table_map:\n self.log.error(\"copy_sync: lost table: %s\", self.copy_table_name)\n return SYNC_EXIT\n\n # This operates on single table\n t = self.table_map[self.copy_table_name]\n\n if t.state == TABLE_DO_SYNC:\n # these settings may cause copy to miss right tick\n self.pgq_min_count = None\n self.pgq_min_interval = None\n\n assert t.sync_tick_id\n\n # main thread is waiting, catch up, then handle over\n if self.cur_tick == t.sync_tick_id:\n self.change_table_state(dst_db, t, TABLE_OK)\n return SYNC_EXIT\n elif self.cur_tick < t.sync_tick_id:\n return SYNC_OK\n else:\n self.log.error(\"copy_sync: cur_tick=%d sync_tick=%d\",\n self.cur_tick, t.sync_tick_id)\n raise Exception('Invalid table state')\n elif t.state == TABLE_WANNA_SYNC:\n # wait for main thread to react\n return SYNC_LOOP\n elif t.state == TABLE_CATCHING_UP:\n\n # partition merging\n if t.copy_role in ('wait-replay', 'lead'):\n return SYNC_LOOP\n\n # copy just finished\n if t.dropped_ddl:\n self.restore_copy_ddl(t, dst_db)\n return SYNC_OK\n\n # is there more work?\n if self.work_state:\n return SYNC_OK\n\n # seems we have catched up\n self.change_table_state(dst_db, t, TABLE_WANNA_SYNC, self.cur_tick)\n return SYNC_LOOP\n elif t.state == TABLE_IN_COPY:\n # table is not copied yet, do it\n self.do_copy(t, src_db, dst_db)\n\n # forget previous value\n self.work_state = 1\n\n return SYNC_LOOP\n else:\n # nothing to do\n return SYNC_EXIT\n\n def restore_copy_ddl(self, ts: TableState, dst_db: Connection) -> None:\n self.log.info(\"%s: restoring DDL\", ts.name)\n dst_curs = dst_db.cursor()\n if ts.dropped_ddl:\n for ddl in skytools.parse_statements(ts.dropped_ddl):\n self.log.info(ddl)\n dst_curs.execute(ddl)\n q = \"select * from londiste.local_set_table_struct(%s, %s, NULL)\"\n self.exec_cmd(dst_curs, q, [self.queue_name, ts.name])\n ts.dropped_ddl = None\n dst_db.commit()\n\n # analyze\n self.log.info(\"%s: analyze\", ts.name)\n dst_curs.execute(\"analyze \" + skytools.quote_fqident(ts.name))\n dst_db.commit()\n\n def do_copy(self, tbl: TableState, src_db: Connection, dst_db: Connection) -> None:\n \"\"\"Callback for actual copy implementation.\"\"\"\n raise Exception('do_copy not implemented')\n\n def process_remote_event(self, src_curs: Cursor, dst_curs: Cursor, ev: Event) -> None:\n \"\"\"handle one event\"\"\"\n\n self.log.debug(\n \"New event: id=%s / type=%s / data=%s / extra1=%s / extra2=%r / extra3=%r\",\n ev.id, ev.type, ev.data, ev.extra1, ev.extra2, ev.extra3\n )\n\n # set current_event only if processing them one-by-one\n if self.work_state < 0:\n self.current_event = ev\n\n if is_data_event(ev):\n self.handle_data_event(ev, dst_curs)\n elif ev.type == \"R\":\n self.flush_sql(dst_curs)\n self.handle_truncate_event(ev, dst_curs)\n elif ev.type == 'EXECUTE':\n self.flush_sql(dst_curs)\n self.handle_execute_event(ev, dst_curs)\n elif ev.type == 'londiste.add-table':\n self.flush_sql(dst_curs)\n self.add_set_table(dst_curs, ev.data)\n elif ev.type == 'londiste.remove-table':\n self.flush_sql(dst_curs)\n self.remove_set_table(dst_curs, ev.data)\n elif ev.type == 'londiste.remove-seq':\n self.flush_sql(dst_curs)\n self.remove_set_seq(dst_curs, ev.data)\n elif ev.type == 'londiste.update-seq':\n self.flush_sql(dst_curs)\n self.update_seq(dst_curs, ev)\n else:\n super().process_remote_event(src_curs, dst_curs, ev)\n\n # no point keeping it around longer\n self.current_event = None\n\n def handle_data_event(self, ev: Event, dst_curs: Cursor) -> None:\n \"\"\"handle one data event\"\"\"\n t = self.get_table_by_name(ev.extra1)\n if not t or not t.interesting(ev, self.cur_tick, self.copy_thread, self.copy_table_name):\n self.stat_increase('ignored_events')\n return\n\n try:\n p = self.used_plugins[ev.extra1]\n except KeyError:\n p = t.get_plugin()\n self.used_plugins[ev.extra1] = p\n assert self.batch_info\n p.prepare_batch(self.batch_info, dst_curs)\n\n p.process_event(ev, self.apply_sql, dst_curs)\n\n def handle_truncate_event(self, ev: Event, dst_curs: Cursor) -> None:\n \"\"\"handle one truncate event\"\"\"\n t = self.get_table_by_name(ev.extra1)\n if not t or not t.interesting(ev, self.cur_tick, self.copy_thread, self.copy_table_name):\n self.stat_increase('ignored_events')\n return\n\n fqname = skytools.quote_fqident(t.dest_table)\n\n try:\n p = self.used_plugins[ev.extra1]\n except KeyError:\n p = t.get_plugin()\n self.used_plugins[ev.extra1] = p\n assert self.batch_info\n p.prepare_batch(self.batch_info, dst_curs)\n\n if p.conf.get('ignore_truncate'):\n self.log.info(\"ignoring truncate for %s\", fqname)\n return\n\n #\n # Always use CASCADE, because without it the\n # operation cannot work with FKeys, on both\n # slave and master.\n #\n sql = \"TRUNCATE %s CASCADE;\" % fqname\n\n self.flush_sql(dst_curs)\n dst_curs.execute(sql)\n\n def handle_execute_event(self, ev: Event, dst_curs: Cursor) -> None:\n \"\"\"handle one EXECUTE event\"\"\"\n\n if self.copy_thread:\n return\n\n # parse event\n fname = ev.extra1\n s_attrs = ev.extra2\n exec_attrs = ExecAttrs(urlenc=s_attrs)\n sql = ev.data\n\n # fixme: curs?\n dst_curs.execute(\"select londiste.set_session_replication_role('local', true)\")\n\n seq_map = {}\n q = \"select seq_name, local from londiste.get_seq_list(%s) where local\"\n dst_curs.execute(q, [self.queue_name])\n for row in dst_curs.fetchall():\n seq_map[row['seq_name']] = row['seq_name']\n\n tbl_map = {}\n for t in self.table_map.values():\n tbl_map[t.name] = t.dest_table\n\n q = \"select * from londiste.execute_start(%s, %s, %s, false, %s)\"\n res = self.exec_cmd(dst_curs, q, [self.queue_name, fname, sql, s_attrs], commit=False)\n ret = res[0]['ret_code']\n if ret > 200:\n self.log.warning(\"Skipping execution of '%s'\", fname)\n dst_curs.execute(\"select londiste.set_session_replication_role('replica', true)\")\n return\n\n if exec_attrs.need_execute(dst_curs, tbl_map, seq_map):\n self.log.info(\"%s: executing sql\")\n xsql = exec_attrs.process_sql(sql, tbl_map, seq_map)\n for stmt in skytools.parse_statements(xsql):\n dst_curs.execute(stmt)\n else:\n self.log.info(\"%s: execution not needed on this node\")\n\n q = \"select * from londiste.execute_finish(%s, %s)\"\n self.exec_cmd(dst_curs, q, [self.queue_name, fname], commit=False)\n dst_curs.execute(\"select londiste.set_session_replication_role('replica', true)\")\n\n def apply_sql(self, sql: str, dst_curs: Cursor) -> None:\n\n # how many queries to batch together, drop batching on error\n limit = 200\n if self.work_state == -1:\n limit = 0\n\n self.sql_list.append(sql)\n if len(self.sql_list) >= limit:\n self.flush_sql(dst_curs)\n\n def flush_sql(self, dst_curs: Cursor) -> None:\n \"\"\"Send all buffered statements to DB.\"\"\"\n\n if len(self.sql_list) == 0:\n return\n\n buf = \"\\n\".join(self.sql_list)\n self.sql_list = []\n\n dst_curs.execute(buf)\n\n def add_set_table(self, dst_curs: Cursor, tbl: str) -> None:\n \"\"\"There was new table added to root, remember it.\"\"\"\n\n if self.register_only_tables and tbl not in self.register_only_tables:\n return\n if self.register_skip_tables and tbl in self.register_skip_tables:\n return\n q = \"select londiste.global_add_table(%s, %s)\"\n dst_curs.execute(q, [self.set_name, tbl])\n\n def remove_set_table(self, dst_curs: Cursor, tbl: str) -> None:\n \"\"\"There was table dropped from root, remember it.\"\"\"\n if tbl in self.table_map:\n t = self.table_map[tbl]\n del self.table_map[tbl]\n self.table_list.remove(t)\n q = \"select londiste.global_remove_table(%s, %s)\"\n dst_curs.execute(q, [self.set_name, tbl])\n\n def remove_set_seq(self, dst_curs: Cursor, seq: str) -> None:\n \"\"\"There was seq dropped from root, remember it.\"\"\"\n\n q = \"select londiste.global_remove_seq(%s, %s)\"\n dst_curs.execute(q, [self.set_name, seq])\n\n def setup_local_only_filter(self) -> None:\n # store event filter\n if self.local_only:\n # create list of tables\n if self.copy_thread:\n _filterlist = skytools.quote_literal(self.copy_table_name)\n else:\n _filterlist = ','.join(map(skytools.quote_literal, self.table_map.keys()))\n\n # build filter\n cond_list = [\n \"ev_type like 'pgq.%'\",\n \"ev_type like 'londiste.%'\",\n ]\n if not self.local_only_drop_execute:\n cond_list.append(\"ev_type = 'EXECUTE'\")\n if _filterlist:\n cond_list.append(f\"ev_extra1 in ({_filterlist})\")\n expr = \" or \".join(cond_list)\n self.consumer_filter = f\"({expr})\"\n else:\n # no filter\n self.consumer_filter = None\n\n def load_table_state(self, curs: Cursor) -> None:\n \"\"\"Load table state from database.\n\n Todo: if all tables are OK, there is no need\n to load state on every batch.\n \"\"\"\n\n q = \"select * from londiste.get_table_list(%s)\"\n curs.execute(q, [self.set_name])\n\n new_list = []\n new_map = {}\n for row in curs.fetchall():\n if not row['local']:\n continue\n t = self.get_table_by_name(row['table_name'])\n if not t:\n t = TableState(row['table_name'], self.log)\n t.loaded_state(row)\n new_list.append(t)\n new_map[t.name] = t\n\n self.table_list = new_list\n self.table_map = new_map\n\n self.fill_copy_method()\n self.setup_local_only_filter()\n\n def refresh_state(self, dst_db: Connection, full_logic: bool = True) -> DictRow:\n res = super().refresh_state(dst_db, full_logic=full_logic)\n\n # make sure local_only filter is loaded on boot\n if self.local_only and self.consumer_filter is None:\n self.load_table_state(dst_db.cursor())\n dst_db.commit()\n\n return res\n\n def get_state_map(self, curs: Cursor) -> Dict[str, TableState]:\n \"\"\"Get dict of table states.\"\"\"\n\n q = \"select * from londiste.get_table_list(%s)\"\n curs.execute(q, [self.set_name])\n\n new_map = {}\n for row in curs.fetchall():\n if not row['local']:\n continue\n t = TableState(row['table_name'], self.log)\n t.loaded_state(row)\n new_map[t.name] = t\n return new_map\n\n def save_table_state(self, curs: Cursor) -> None:\n \"\"\"Store changed table state in database.\"\"\"\n\n for t in self.table_list:\n # backwards compat: move plugin-only dest_table to table_info\n if t.plugin and t.dest_table != t.plugin.dest_table:\n self.log.info(\"Overwriting .dest_table from plugin: tbl=%s dst=%s\",\n t.name, t.plugin.dest_table)\n q = \"update londiste.table_info set dest_table = %s\"\\\n \" where queue_name = %s and table_name = %s\"\n curs.execute(q, [t.plugin.dest_table, self.set_name, t.name])\n\n if not t.changed:\n continue\n merge_state = t.render_state()\n self.log.info(\"storing state of %s: copy:%d new_state:%s\",\n t.name, self.copy_thread, merge_state)\n q = \"select londiste.local_set_table_state(%s, %s, %s, %s)\"\n curs.execute(q, [self.set_name,\n t.name, t.str_snapshot, merge_state])\n t.changed = 0\n\n def change_table_state(self, dst_db: Connection, tbl: TableState, state: int, tick_id: Optional[int] = None) -> None:\n \"\"\"Chage state for table.\"\"\"\n\n tbl.change_state(state, tick_id)\n self.save_table_state(dst_db.cursor())\n dst_db.commit()\n\n self.log.info(\"Table %s status changed to '%s'\", tbl.name, tbl.render_state())\n\n def get_tables_in_state(self, state: int) -> Iterator[TableState]:\n \"get all tables with specific state\"\n\n for t in self.table_list:\n if t.state == state:\n yield t\n\n def get_table_by_name(self, name: str) -> Optional[TableState]:\n \"\"\"Returns cached state object.\"\"\"\n if name.find('.') < 0:\n name = \"public.%s\" % name\n if name in self.table_map:\n return self.table_map[name]\n return None\n\n def launch_copy(self, tbl_stat: TableState) -> None:\n \"\"\"Run parallel worker for copy.\"\"\"\n self.log.info(\"Launching copy process\")\n main_exe = sys.argv[0]\n conf = self.cf.filename or 'undefined'\n cmd: List[str] = [main_exe, conf, 'copy', tbl_stat.name, '-d']\n\n # pass same verbosity options as main script got\n if self.options.quiet:\n cmd.append('-q')\n if self.options.verbose:\n cmd += ['-v'] * self.options.verbose\n\n # let existing copy finish and clean its pidfile,\n # otherwise new copy will exit immediately.\n # FIXME: should not happen on per-table pidfile ???\n copy_pidfile = \"%s.copy.%s\" % (self.pidfile, tbl_stat.name)\n while skytools.signal_pidfile(copy_pidfile, 0):\n self.log.warning(\"Waiting for existing copy to exit\")\n time.sleep(2)\n\n # launch and wait for daemonization result\n self.log.debug(\"Launch args: %r\", cmd)\n res = os.spawnvp(os.P_WAIT, main_exe, cmd)\n self.log.debug(\"Launch result: %r\", res)\n if res != 0:\n self.log.error(\"Failed to launch copy process, result=%d\", res)\n\n def sync_database_encodings(self, src_db: Connection, dst_db: Connection) -> None:\n \"\"\"Make sure client_encoding is same on both side.\"\"\"\n\n try:\n # psycopg2\n if src_db.encoding != dst_db.encoding:\n dst_db.set_client_encoding(src_db.encoding)\n except AttributeError:\n # psycopg1\n src_curs = src_db.cursor()\n dst_curs = dst_db.cursor()\n src_curs.execute(\"show client_encoding\")\n src_enc = src_curs.fetchone()[0]\n dst_curs.execute(\"show client_encoding\")\n dst_enc = dst_curs.fetchone()[0]\n if src_enc != dst_enc:\n dst_curs.execute(\"set client_encoding = %s\", [src_enc])\n\n def copy_snapshot_cleanup(self, dst_db: Connection) -> None:\n \"\"\"Remove unnecessary snapshot info from tables.\"\"\"\n no_lag = not self.work_state\n changes = False\n for t in self.table_list:\n t.gc_snapshot(self.copy_thread, self.prev_tick, self.cur_tick, no_lag)\n if t.changed:\n changes = True\n\n if changes:\n self.save_table_state(dst_db.cursor())\n dst_db.commit()\n\n def restore_fkeys(self, dst_db: Connection) -> None:\n \"\"\"Restore fkeys that have both tables on sync.\"\"\"\n dst_curs = dst_db.cursor()\n\n # NOT VALID appreared in 9.1\n q = \"select londiste.version() as ext_version, current_setting('server_version_num')::int < 90100 as compat\"\n dst_curs.execute(q)\n info = dst_curs.fetchone()\n if info[0]:\n ext_version = [int(v) for v in info[0].split('.')]\n do_compat_restore = ext_version < [3, 7] or info[1]\n else:\n do_compat_restore = True\n\n # restore fkeys -- one at a time\n q = \"select * from londiste.get_valid_pending_fkeys(%s)\"\n dst_curs.execute(q, [self.set_name])\n fkey_list = dst_curs.fetchall()\n dst_db.commit()\n\n for row in fkey_list:\n self.log.info('Creating fkey: %s (%s --> %s)', row['fkey_name'], row['from_table'], row['to_table'])\n if do_compat_restore:\n q2 = \"select londiste.restore_table_fkey(%s, %s)\"\n dst_curs.execute(q2, [row['from_table'], row['fkey_name']])\n dst_db.commit()\n else:\n q3 = \"select londiste.restore_table_fkey(%s, %s, true)\"\n done = False\n while not done:\n dst_curs.execute(q3, [row['from_table'], row['fkey_name']])\n sql = dst_curs.fetchone()[0]\n if sql:\n dst_curs.execute(sql)\n else:\n done = True\n dst_db.commit()\n\n def drop_fkeys(self, dst_db: Connection, table_name: str) -> None:\n \"\"\"Drop all foreign keys to and from this table.\n\n They need to be dropped one at a time to avoid deadlocks with user code.\n \"\"\"\n\n dst_curs = dst_db.cursor()\n q = \"select * from londiste.find_table_fkeys(%s)\"\n dst_curs.execute(q, [table_name])\n fkey_list = dst_curs.fetchall()\n for row in fkey_list:\n self.log.info('Dropping fkey: %s', row['fkey_name'])\n q2 = \"select londiste.drop_table_fkey(%s, %s)\"\n dst_curs.execute(q2, [row['from_table'], row['fkey_name']])\n dst_db.commit()\n\n def process_root_node(self, dst_db: Connection) -> None:\n \"\"\"On root node send seq changes to queue.\"\"\"\n\n super().process_root_node(dst_db)\n\n q = \"select * from londiste.root_check_seqs(%s)\"\n self.exec_cmd(dst_db, q, [self.queue_name])\n\n def update_seq(self, dst_curs: Cursor, ev: Event) -> None:\n if self.copy_thread:\n return\n\n val = int(ev.data)\n seq = ev.extra1\n q = \"select * from londiste.global_update_seq(%s, %s, %s)\"\n self.exec_cmd(dst_curs, q, [self.queue_name, seq, val])\n\n def copy_event(self, dst_curs: Cursor, ev: Event, filtered_copy: int) -> None:\n # filtered_copy means merge-leaf\n # send only data events down (skipping seqs also)\n if filtered_copy:\n if ev.type[:9] in ('londiste.',):\n return\n\n if is_data_event(ev):\n t = self.get_table_by_name(ev.extra1)\n if t:\n try:\n p = self.used_plugins[ev.extra1]\n except KeyError:\n p = t.get_plugin()\n self.used_plugins[ev.extra1] = p\n p.prepare_batch(None, dst_curs)\n\n # handler may rewrite or drop the event\n ev2 = p.get_copy_event(ev, self.queue_name)\n if ev2 is None:\n return\n ev = ev2\n\n super().copy_event(dst_curs, ev, filtered_copy)\n\n def exception_hook(self, det: Exception, emsg: str) -> None:\n # add event info to error message\n if self.current_event:\n ev = self.current_event\n info = \"[ev_id=%d,ev_txid=%d] \" % (ev.ev_id, ev.ev_txid)\n emsg = info + emsg\n super().exception_hook(det, emsg)\n\n\nif __name__ == '__main__':\n script = Replicator(sys.argv[1:])\n script.start()\n\n","repo_name":"pgq/londiste","sub_path":"londiste/playback.py","file_name":"playback.py","file_ext":"py","file_size_in_byte":42381,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"59031013","text":"#!/usr/bin/env python\n\"\"\"Stateful programmatic web browsing.\n\nStateful programmatic web browsing, after Andy Lester's Perl module\nWWW::Mechanize.\n\nThe library is layered: mechanize.Browser (stateful web browser),\nmechanize.UserAgent (configurable URL opener), plus urllib2 handlers.\n\nFeatures include: ftp:, http: and file: URL schemes, browser history,\nhigh-level hyperlink and HTML form support, HTTP cookies, HTTP-EQUIV and\nRefresh, Referer [sic] header, robots.txt, redirections, proxies, and\nBasic and Digest HTTP authentication. mechanize's response objects are\n(lazily-) .seek()able and still work after .close().\n\nMuch of the code originally derived from Perl code by Gisle Aas\n(libwww-perl), Johnny Lee (MSIE Cookie support) and last but not least\nAndy Lester (WWW::Mechanize). urllib2 was written by Jeremy Hylton.\n\n\"\"\"\n\ndef unparse_version(tup):\n major, minor, bugfix, state_char, pre = tup\n fmt = \"%s.%s.%s\"\n args = [major, minor, bugfix]\n if state_char is not None:\n fmt += \"%s\"\n args.append(state_char)\n if pre is not None:\n fmt += \"-pre%s\"\n args.append(pre)\n return fmt % tuple(args)\n\ndef str_to_tuple(text):\n if text.startswith(\"(\"):\n text = text[1:-1]\n els = [el.strip() for el in text.split(\",\")]\n newEls = []\n for ii in range(len(els)):\n el = els[ii]\n if el == \"None\":\n newEls.append(None)\n elif 0 <= ii < 3:\n newEls.append(int(el))\n else:\n if el.startswith(\"'\") or el.startswith('\"'):\n el = el[1:-1]\n newEls.append(el)\n return tuple(newEls)\n\nimport re\n## VERSION_MATCH = re.search(r'__version__ = \\((.*)\\)',\n## open(\"mechanize/_mechanize.py\").read())\n## VERSION = unparse_version(str_to_tuple(VERSION_MATCH.group(1)))\nVERSION = \"0.1.12\"\nINSTALL_REQUIRES = [\"ClientForm>=0.2.6, ==dev\"]\nNAME = \"mechanize\"\nPACKAGE = True\nLICENSE = \"BSD\" # or ZPL 2.1\nPLATFORMS = [\"any\"]\nZIP_SAFE = True\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Developers\nIntended Audience :: System Administrators\nLicense :: OSI Approved :: BSD License\nLicense :: OSI Approved :: Zope Public License\nNatural Language :: English\nOperating System :: OS Independent\nProgramming Language :: Python\nProgramming Language :: Python :: 2\nProgramming Language :: Python :: 2.4\nProgramming Language :: Python :: 2.5\nProgramming Language :: Python :: 2.6\nTopic :: Internet\nTopic :: Internet :: File Transfer Protocol (FTP)\nTopic :: Internet :: WWW/HTTP\nTopic :: Internet :: WWW/HTTP :: Browsers\nTopic :: Internet :: WWW/HTTP :: Indexing/Search\nTopic :: Internet :: WWW/HTTP :: Site Management\nTopic :: Internet :: WWW/HTTP :: Site Management :: Link Checking\nTopic :: Software Development :: Libraries\nTopic :: Software Development :: Libraries :: Python Modules\nTopic :: Software Development :: Testing\nTopic :: Software Development :: Testing :: Traffic Generation\nTopic :: System :: Archiving :: Mirroring\nTopic :: System :: Networking :: Monitoring\nTopic :: System :: Systems Administration\nTopic :: Text Processing\nTopic :: Text Processing :: Markup\nTopic :: Text Processing :: Markup :: HTML\nTopic :: Text Processing :: Markup :: XML\n\"\"\"\n\n#-------------------------------------------------------\n# the rest is constant for most of my released packages:\n\nimport sys\n\nif PACKAGE:\n packages, py_modules = [NAME], None\nelse:\n packages, py_modules = None, [NAME]\n\ndoclines = __doc__.split(\"\\n\")\n\nif not hasattr(sys, \"version_info\") or sys.version_info < (2, 3):\n from distutils.core import setup\n _setup = setup\n def setup(**kwargs):\n for key in [\n # distutils >= Python 2.3 args\n # XXX probably download_url came in earlier than 2.3\n \"classifiers\", \"download_url\",\n # setuptools args\n \"install_requires\", \"zip_safe\", \"test_suite\",\n ]:\n if kwargs.has_key(key):\n del kwargs[key]\n # Only want packages keyword if this is a package,\n # only want py_modules keyword if this is a single-file module,\n # so get rid of packages or py_modules keyword as appropriate.\n if kwargs[\"packages\"] is None:\n del kwargs[\"packages\"]\n else:\n del kwargs[\"py_modules\"]\n apply(_setup, (), kwargs)\nelse:\n import ez_setup\n ez_setup.use_setuptools()\n from setuptools import setup\n\ndef main():\n setup(\n name = NAME,\n version = VERSION,\n license = LICENSE,\n platforms = PLATFORMS,\n classifiers = [c for c in CLASSIFIERS.split(\"\\n\") if c],\n install_requires = INSTALL_REQUIRES,\n zip_safe = ZIP_SAFE,\n test_suite = \"test\",\n author = \"John J. Lee\",\n author_email = \"jjl@pobox.com\",\n description = doclines[0],\n long_description = \"\\n\".join(doclines[2:]),\n url = \"http://wwwsearch.sourceforge.net/%s/\" % NAME,\n download_url = (\"http://wwwsearch.sourceforge.net/%s/src/\"\n \"%s-%s.tar.gz\" % (NAME, NAME, VERSION)),\n py_modules = py_modules,\n packages = packages,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Almad/Mechanize","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33698804492","text":"\r\nfrom selenium import webdriver\r\nimport time\r\n\r\n# Start the WebDriver\r\nbrowser = webdriver.Chrome('/path/to/chromedriver')\r\n\r\n# Open the YouTube live stream page\r\nbrowser.get('https://www.youtube.com/watch?v=VIDEO_ID')\r\n\r\nwhile True:\r\n # Type \"spam\"\r\n chat_box = browser.find_element_by_xpath('//*[@id=\"chat-input\"]')\r\n chat_box.send_keys('puan')\r\n chat_box.submit()\r\n \r\n # Wait for 5 minutes\r\n time.sleep(300)\r\n","repo_name":"burakgulbe/Youtube-spam-bot","sub_path":"youtubespambot.py","file_name":"youtubespambot.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3176165359","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nrequired_packages = [\n \"torch\", \"torchvision\", \"scikit-learn\", \"pandas\", \"numpy\", \"matplotlib\",\n \"kmeans_pytorch\", \"pytorch_metric_learning\", \"opencv-python\", \"Pillow\",\n \"torchsummary\"\n]\nsetuptools.setup(\n name=\"datl\",\n version=\"0.1\",\n author=\"Christoph Raab\",\n author_email=\"christophraab@outlook.de\",\n description=\n \"DATL: Source Code for the paper Domain Adversarial Tangent Subspace Alignment for explainable domain adaptation.\",\n license=\"MIT\",\n url=\"https://github.com/ChristophRaab/datl\",\n packages=setuptools.find_packages(include=['datl', 'datl.*']),\n python_requires=\">=3.8\",\n install_requires=required_packages,\n package_data={\"\": [\"README.md\", \"LICENSE\"]},\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\"\n ])\n","repo_name":"ChristophRaab/DATL","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"14499701395","text":"import serial\n\n# Configurar a porta serial\nser = serial.Serial('COM3', 9600) \n\ntry:\n while True:\n line = ser.readline().strip()\n if line.startswith(\"pH Val: \"):\n try:\n ph_value = float(line.split(\":\")[1].strip())\n print(\"Valor de pH: %.2f\" % ph_value)\n except ValueError:\n print(\"Valor de pH não pôde ser convertido para float:\", line)\n else:\n print(\"Linha inesperada:\", line)\nexcept KeyboardInterrupt:\n ser.close()\n","repo_name":"vicfior/Medidor-de-qualidade-da-agua","sub_path":"secundários/CodigoSensorPh.py","file_name":"CodigoSensorPh.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13768271628","text":"def znajdz_trojke_pitagorejska(przedzial):\n ilosc_iteracji = 0\n ilosc_trojek_zdublowanych = 0\n ilosc_samych_trojek = 0\n print(\"all: once:\")\n for a in range(1,przedzial):\n for b in range(1,przedzial):\n for c in range(1, przedzial):\n ilosc_iteracji +=1\n\n if a**2 + b**2 == c**2:\n ilosc_trojek_zdublowanych +=1\n print(a,\",\",b,\",\",c,\" \",end=\"\",sep=\"\")\n if a**2 + b**2 == c**2 and a > b:\n ilosc_samych_trojek +=1\n print(a,\",\", b,\",\", c, sep=\"\")\n else:\n print()\n\n print(f\"Ilosc iteracji: {ilosc_iteracji} \\nIlosc trojek zdublowanych: {ilosc_trojek_zdublowanych} \\nFaktyczna ilosc trojek: {ilosc_samych_trojek}\")\n\n\n\n\n\ndef main():\n przedzial = int(input(\"Podaj gorny przedzial liczbowy: \"))\n znajdz_trojke_pitagorejska(przedzial)\nmain()\n","repo_name":"PanBard/python_imperatywnie","sub_path":"4.16.py","file_name":"4.16.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2176873399","text":"from datetime import timedelta\nimport os, pendulum , sys\nfrom functools import partial\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\nfrom desafio_2_v2 import main # Aqui se importan las funciones a ejecutar (las del desafio 2)\nfrom airflow.operators.email_operator import EmailOperator\nimport smtplib\nfrom email.mime.text import MIMEText\n\ndef send_email():\n subject = \"Email Subject\"\n body = \"This is the body of the text message\"\n sender = \"o.oaguilera@gmail.com\"\n recipients = [\"o.oaguilera@gmail.com\", \"o.oaguilera2@gmail.com\"]\n password = \"******\" # aqui ingrese su clave segura de 16 digitos de gmail desde el menu clave para aplicaciones\n\n msg = MIMEText(body, 'plain') # Usamos 'plain' como tipo de contenido\n msg['Subject'] = subject\n msg['From'] = sender\n msg['To'] = ', '.join(recipients)\n\n try:\n smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n smtp_server.login(sender, password)\n smtp_server.sendmail(sender, recipients, msg.as_string())\n smtp_server.quit()\n print(\"Message sent successfully!\")\n except Exception as e:\n print(f\"Error sending email: {str(e)}\")\n\n\nDAG_ID = 'ingesta_carga_redshift'\nDAG_DESCRIPTION = 'Api_rick_and_morti'\nDAG_SCHEDULE = '00 8 */1 * *'\nDAG_CATCHUP = False\nTAGS = [\"Entrega_3\"]\n\nARGS = {\n 'owner' : 'orlando_aguilera',\n 'start_date' : pendulum.datetime(2022, 8, 24, tz=\"America/Argentina/Buenos_Aires\"),\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5),\n}\n\nwith DAG(dag_id=DAG_ID,\n description=DAG_DESCRIPTION,\n default_args=ARGS,\n schedule_interval=DAG_SCHEDULE,\n catchup=DAG_CATCHUP,\n tags=TAGS,\n max_active_runs=1) as dag:\n\n name_task_1 = PythonOperator(task_id='job_get_data',\n python_callable=main,\n retries=1,\n retry_delay=timedelta(minutes=1))\n\n name_task_2 = PythonOperator(task_id='send_email_task',\n python_callable=send_email,\n retries=1,\n retry_delay=timedelta(minutes=1))\n\n\n\n # name_task_2 = PythonOperator(task_id = 'job_clean_data',\n # python_callable = partial(clean_character_data, data),\n # retries = 1,\n # dag= dag,\n # retry_delay = timedelta(minutes=1))\n \n # name_task_3 = PythonOperator(task_id = 'job_insert_data',\n # python_callable = insert_data_into_redshift,\n # retries = 1,\n # retry_delay = timedelta(minutes=1))\n \n name_task_1 >> name_task_2 #>> name_task_3\n","repo_name":"rololevy/DE_Coder_House","sub_path":"Enrtrega_4/dags/dag_ingestion.py","file_name":"dag_ingestion.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16754193086","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# Size of the points dataset.\narray = np.loadtxt('ex2data1.txt', delimiter=',')\nm = array.size//3\n\n# Points x-coordinate and dummy value (x0, x1).\nX0 = np.ones((m, 1), int)\nX1 = np.array(array[:, 0]).reshape(m, 1)\nX2 = np.array(array[:, 1]).reshape(m, 1)\nX = np.hstack((X0, X1, X2))\n\n# Points y-coordinate\ny = np.array(array[:, 2]).reshape(m, 1)\n\n# The Learning Rate alpha.\nalpha = 0.01\n\n\ndef sigmoid(x):\n s = 1 / (1 + np.exp(-x))\n return s\n\ndef error_function(theta, X, y):\n '''Error function J definition.'''\n diff = np.dot(X, theta) - y\n return (1./2*m) * np.dot(np.transpose(diff), diff)\n\ndef gradient_function(theta, X, y):\n\t'''Gradient of the function J definition.'''\n\tdiff = sigmoid(np.dot(X, theta)) - y\n\treturn (1./m) * np.dot(np.transpose(X), diff)\n\ndef gradient_descent(X, y, alpha):\n\t'''Perform gradient descent. '''\n\ttheta = np.array([1, 1 ,1]).reshape(3, 1)\n\tgradient = gradient_function(theta, X, y)\n\tfor i in range(1, 1000000):\n\t\ttheta = theta - alpha * gradient\n\t\tgradient = gradient_function(theta, X, y)\n\treturn theta\n\n\noptimal = gradient_descent(X, y, alpha)\nprint('optimal:', optimal)\nprint('error function:', error_function(optimal, X, y)[0,0])\nx = array[:, 0]\ny = array[:, 1]\nresult = array[:,2]\nmask = result > 0\nmask1 = result <1\nplt.scatter(x[mask], y[mask], c='b', alpha=0.6)\nplt.scatter(x[mask1], y[mask1], c='g', alpha=0.6)\n\nx1 = np.arange(20, 100, 0.1)\nx2 = -1*optimal[0]/optimal[2]-optimal[1]/optimal[2]*x1\nplt.plot(x1, x2)\nplt.show()","repo_name":"nightzero123/machine_learning","sub_path":"exercise_of_LogicRegresstion/LogicRegression.py","file_name":"LogicRegression.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42160328548","text":"from email import iterators\r\nfrom operator import mod\r\nimport cv2\r\nimport numpy as np\r\n\r\n#DEFINITIONS OF FUNCTIONS\r\ndef nothing(x):\r\n pass\r\n \r\ndef listX (contour):\r\n coord = []\r\n for i in range(len(contour)):\r\n coord.append(contour[i][0][0])\r\n return coord\r\n\r\ndef listY (contour):\r\n coord = []\r\n for i in range(len(contour)):\r\n coord.append(contour[i][0][1])\r\n return coord\r\n\r\ndef findTop1(contour, ly):\r\n return ly.index(min(ly))\r\n\r\ndef findBottom1(contour, ly):\r\n return ly.index(max(ly))\r\n\r\ndef findLeft(contour, lx):\r\n return lx.index(min(lx))\r\n\r\ndef findRight(contour, lx):\r\n return lx.index(max(lx))\r\n\r\ndef findInfo(tp, bp, lp, rp, lx, ly):\r\n #milieu\r\n mid = int((lx[lp]+lx[rp])/2)\r\n #hauteur\r\n height = ly[bp]-ly[tp]\r\n #angle\r\n a = (mid-lx[tp])\r\n b = (height)\r\n if a!=0 :\r\n angle = np.arctan(b/a)\r\n else:\r\n angle = 0\r\n #module\r\n mod = np.sqrt((height*height)+(mid-lx[tp])*(mid-lx[tp]))\r\n\r\n return height, angle, mod\r\n\r\ndef checkColor(bound, img):\r\n for (low, upp) in bound :\r\n low = np.array(low, dtype ='uint8')\r\n upp = np.array(upp, dtype ='uint8')\r\n \r\n mask = cv2.inRange(img, low, upp)\r\n outp = cv2.bitwise_and(img, img, mask = mask)\r\n \r\n black_out = cv2.imwrite(\"black.jpg\",mask)\r\n \r\n return mask\r\n \r\ndef infoGetter(filename):\r\n\r\n centres = []\r\n heights = []\r\n angles = []\r\n modules = []\r\n \r\n boundaries = [([0,0,0],[100,100,100])]\r\n \r\n frame = cv2.imread(filename)\r\n #imgCropped = frame[1900:3080,900:1510]\r\n imgCropped = frame[1000:5000,800:5000]\r\n frame = cv2.resize(imgCropped,(1000,500))\r\n \r\n cv2.imwrite(\"imgcrop.jpg\",frame)\r\n \r\n \r\n \r\n bl_out = checkColor(boundaries, frame)\r\n areabl = []\r\n contours_check, hierarchy = cv2.findContours(bl_out,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n for contour in contours_check:\r\n area_check = cv2.contourArea(contour)\r\n areabl.append(area_check)\r\n cv2.imwrite(\"imgcheck.jpg\",bl_out)\r\n if all(v < 100 for v in areabl) :\r\n exit(\"No fluid detected\")\r\n \r\n \r\n \r\n \r\n \r\n ret,thresh1 = cv2.threshold(frame,127,255,cv2.THRESH_BINARY_INV)\r\n\r\n ## parameters selection\r\n l_h = 14\r\n l_s = 16\r\n l_v = 15\r\n u_h = 170\r\n u_s = 255\r\n u_v = 255\r\n N_erode = 4\r\n eps = 2/100\r\n area_min = 100\r\n area_max = 1800\r\n N_erode = N_erode if N_erode>0 else 1\r\n\r\n imgGray = cv2.cvtColor(thresh1,cv2.COLOR_BGR2GRAY)\r\n imgBlur = cv2.GaussianBlur(imgGray,(7,7),1)\r\n imgCanny = cv2.Canny(imgBlur,50,50)\r\n\r\n height=0\r\n angle=0\r\n module=0\r\n p=0\r\n\r\n ## find contours in image based on color mask\r\n contours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n for contour in contours:\r\n area = cv2.contourArea(contour)\r\n perimeter = cv2.arcLength(contour, True)\r\n approx = cv2.approxPolyDP(contour, eps*perimeter, True)\r\n x,y,w,h = cv2.boundingRect(contour)\r\n \r\n if (area_min < area) and (2 cumulated_ppl :\n\t\t\tguests += i -cumulated_ppl\n\t\t\tcumulated_ppl = i\n\t\tcumulated_ppl += int(shyness_array[i])\n\n\treturn guests\n\n\nif __name__ == \"__main__\":\n\n\twith open(\"input\") as f:\n\t\tf.readline()\n\t\ti = 1\n\t\tfor line in f:\n\t\t\tshyness_array = line[1:len(line)-1]\n\t\t\tprint(\"Case #\"+str(i)+\": \"+str(findGuests(shyness_array)))\n\t\t\ti = i+1\n\n\t#res = findGuests(test)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/3417.py","file_name":"3417.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23620313301","text":"import sys\r\n\r\nif not len(sys.argv) == 3:\r\n exit(\"\"\"Wrong usage parameters supplied!\r\n \r\n Usage:\r\n %s input output\"\"\" % __file__\r\n )\r\n\r\nNO_RESULT = \"NO\"\r\n\r\ndef is_mod2_equally_splitable(values):\r\n mod2_sum = 0\r\n for value in values:\r\n mod2_sum ^= value\r\n return not bool(mod2_sum)\r\n\r\ndef get_mod2_equal_parts(values):\r\n values = values[:]\r\n values.sort()\r\n large_part = 0\r\n for value in values[1:]:\r\n large_part += value\r\n return (values[0], large_part)\r\n \r\ndef main(input_filename, output_filename):\r\n \r\n input_f = open(input_filename, \"r\")\r\n output_f = open(output_filename, \"w\")\r\n \r\n try:\r\n TEST_CASES_NUM = int(input_f.readline())\r\n \r\n for test_case_i in xrange(TEST_CASES_NUM):\r\n candy_num = int(input_f.readline())\r\n candy_values = [int(x) for x in input_f.readline().strip().split(\" \")]\r\n \r\n result = None\r\n if (is_mod2_equally_splitable(candy_values)):\r\n (patrick_share, sean_share) = get_mod2_equal_parts(candy_values)\r\n result = sean_share\r\n else:\r\n result = NO_RESULT\r\n \r\n output_f.write(\"Case #%d: %s\\n\" % (test_case_i + 1, result))\r\n \r\n finally:\r\n input_f.close()\r\n output_f.close()\r\n\r\n\r\nmain(sys.argv[1], sys.argv[2])\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_76/379.py","file_name":"379.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31743930316","text":"from django.urls import path\n\nfrom apps.tienda import views\n\napp_name = 'tienda'\n\nurlpatterns = [\n path('', views.MostrarTienda.as_view(), name='mostrar-tienda'),\n path('listar', views.ListaDeTiendas.as_view(), name='listar-tiendas'),\n path('editar/', views.EditarTienda.as_view(), name='editar-tienda'),\n path('productos/listar', views.ListaDeProductos.as_view(), name='listar-productos'),\n path('producto/crear', views.CrearProducto.as_view(), name='crear-producto'),\n path('producto/editar/', views.EditarProducto.as_view(), name='editar-producto'),\n path('producto/delete/', views.EliminarProducto.as_view(), name='borrar-producto'),\n path('/productos', views.VisitarTienda.as_view(), name='visitar-tienda'),\n]\n","repo_name":"alejandroklever/ecommerce-django-template","sub_path":"apps/tienda/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"41986070282","text":"from model.MonedaParModel import MonedaParModel\nfrom model.MonedaModel import MonedaModel\nfrom app import db\nfrom common.AppException import AppException\nfrom datetime import datetime, date\n\nclass MonedaParHandler:\n def __init__(self):\n pass\n\n def add(self, par:MonedaParModel):\n self._val_add(par)\n par.ind_activo = 'S'\n par.fch_registro = date.today()\n par.fch_audit = datetime.now()\n db.session.add(par)\n\n def _val_add(self, par:MonedaParModel):\n mon_ref = self._get_moneda(par.mon_ref_id)\n if mon_ref is None:\n errors.append(\"La moneda de referencia {0} no existe en la base de datos\".format(par.mon_ref_id))\n\n par_encontrado = self._get_par(par)\n if par_encontrado is not None and par_encontrado.ind_activo == 'S':\n raise AppException(msg=\"El par {0} ya se encuentra registrado y activo\".format(par.nombre))\n\n def _get_par(self, in_par:MonedaParModel):\n par_encontrado = db.session.query(\n MonedaParModel\n ).filter(\n MonedaParModel.mon_base_id == in_par.mon_base_id,\n MonedaParModel.mon_ref_id == in_par.mon_ref_id\n ).first()\n return par_encontrado\n \n def _get_moneda(self, moneda_id):\n moneda = MonedaModel.query.filter_by(\n moneda_id = moneda_id\n ).first()\n return moneda\n\n","repo_name":"ToxidSeed/bagholdercuy","sub_path":"model/bussiness/moneda_par_handler.py","file_name":"moneda_par_handler.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26300914442","text":"\n\nERROR_INCORRECT_CALL_PARAMS = 0xA003\nERROR_INVALID_WRITE_ACTION = 0xA004\nERROR_INVALID_LENGTH = 0xA005\nERROR_INVALID_COMBINATION = 0xA006\nERROR_INVALID_MONITORING_TIME = 0xA007\nERROR_INVALID_TI = 0xA009\nERROR_INVALID_UNIT = 0xA00A\nERROR_INVALID_FUNCTION_CODE = 0xA00B\nERROR_INVALID_REG_BIT_COUNT = 0xA00D\nERROR_INVALID_DATATYPE = 0xA011\nERROR_INVALID_MSG_ECHO_FN5 = 0xA081\nERROR_INVALID_MSG_ECHO_FN6 = 0xA082\nERROR_UNKNOWN_EXCEPTION = 0xA095\nERROR_BUSY = 0xA083\nERROR_MONITORING_TIME_ELAPSED = 0xA100\n\n\n\nmsg_params = {\n\t\"startingAddress\" : 0,\n\t\"numberOfPoints\" : 0,\n\t\"presetAddress\" : 0,\n\t\"numberPreset\" : 0,\n}\n\nmodbus_area_data_types = {\n\t0 : \"Unused\",\n\t1 : \"Coils\",\n\t2 : \"Inputs\",\n\t3 : \"Holding_Register\",\n\t4 : \"Input_Register\",\n}\n\nNO_ERROR = 0\nILLEGAL_FUNCTION = 1\nILLEGAL_DATA_ADDRESS = 2\nILLEGAL_DATA_VALUE = 3\n\n\nclass ModBusRequestMessage(object) :\n\n\n\tdef __init__(self,functionCode,slaveAddress,t_id,coil_mem,reg_mem) :\n\t\tself.slaveAddress = slaveAddress\n\t\tself.functionCode = functionCode\n\t\tself.transaction_id = t_id\n\t\tself.startingAddressHi = 0\n\t\tself.startingAddressLo = 0\n\t\tself.numberOfPointsHi = 0\n\t\tself.numberOfPointsLo = 0\n\t\tself.presetAddressHi = 0\n\t\tself.presetAddressLo = 0\n\t\tself.presetDataHi = []\n\t\tself.presetDataLo = []\n\t\tself.numberPresetHi = 0\n\t\tself.numberPresetLo = 0\n\t\tself.byteCount = 0\t\n\t\tself.msgCRC = 0\n\t\tself.coil_mem = coil_mem\n\t\tself.reg_mem = reg_mem\n\n\tdef read_coil_status(self,coil_Address) :\n\t\t\n\t\taddr = \"Coils_\" + str(coil_Address)\n\t\tif addr not in self.coil_mem.keys() :\n\t\t\treturn 0\n\n\t\treturn self.coil_mem[addr]\n\t\n\tdef read_holding_register(self,reg_Address) :\n\t\t\n\t\taddr = \"Holding_Register_\" + str(reg_Address)\n\t\tif addr not in self.reg_mem.keys() :\n\t\t\treturn 0\n\n\t\treturn self.reg_mem[addr]\n\t\t\n\n\tdef load_msg_params(self,params) :\n\t\t\n\t\tmsgSum = self.slaveAddress + self.functionCode + self.transaction_id\n\n\t\tif self.functionCode in [1,2,3,4] :\t# Read Coils, Inputs, Holding Register or Input Register\n\t\t\tself.startingAddressHi = params[\"startingAddress\"] >> 8\n\t\t\tmsgSum = msgSum + self.startingAddressHi\n\t\t\tself.startingAddressLo = params[\"startingAddress\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.startingAddressLo\n\t\t\tself.numberOfPointsHi = params[\"numberOfPoints\"] >> 8\n\t\t\tmsgSum = msgSum + self.numberOfPointsHi\n\t\t\tself.numberOfPointsLo = params[\"numberOfPoints\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.numberOfPointsLo\n\t\t\t\n\t\telif self.functionCode == 5 :\t\t# Preset Single Coil\n\t\t\tself.presetAddressHi = params[\"presetAddress\"] >> 8\n\t\t\tmsgSum = msgSum + self.presetAddressHi\n\t\t\tself.presetAddressLo = params[\"presetAddress\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.presetAddressLo\t\t\t\n\t\t\tcoil_status = self.read_coil_status(params[\"presetAddress\"])\n\t\t\t\n\t\t\tif coil_status != 0 :\n\t\t\t\tself.presetDataHi = 0xFF\n\t\t\t\tself.presetDataLo = 0x00\n\t\t\telse :\n\t\t\t\tself.presetDataHi = 0x00\n\t\t\t\tself.presetDataLo = 0x00\n\n\t\t\tmsgSum = msgSum + self.presetDataHi + self.presetDataLo\n\n\t\telif self.functionCode == 6 : \t\t# Preset Single Register\n\t\t\tself.presetAddressHi = params[\"presetAddress\"] >> 8\n\t\t\tmsgSum = msgSum + self.presetAddressHi\n\t\t\tself.presetAddressLo = params[\"presetAddress\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.presetAddressLo\n\t\t\t\n\t\t\tregister_word_data = self.read_holding_register(params[\"presetAddress\"])\n\t\t\tprint(\"Register word data = \",register_word_data)\n\t\t\tself.presetDataHi = register_word_data >> 8 \n\t\t\tself.presetDataLo = register_word_data & 0x00FF\n\n\t\t\tmsgSum = msgSum + self.presetDataHi + self.presetDataLo\n\n\t\telif self.functionCode == 15 : \t\t# Preset Multiple Coils\n\t\t\tself.presetAddressHi = params[\"presetAddress\"] >> 8\n\t\t\tmsgSum = msgSum + self.presetAddressHi\n\t\t\tself.presetAddressLo = params[\"presetAddress\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.presetAddressLo\n\t\t\tself.numberPresetHi = params[\"numberPreset\"] >> 8\n\t\t\tmsgSum = msgSum + self.numberPresetHi\n\t\t\tself.numberPresetLo = params[\"numberPreset\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.numberPresetLo\n\n\t\t\tself.presetDataHi = []\n\t\t\tself.presetDataLo = []\n\t\t\t\n\t\t\tbyteOffset = 0\n\t\t\tbitOffset = 0\n\t\t\tn_coils = params[\"numberPreset\"]\n\t\t\tcurrcoilAddress = params[\"presetAddress\"]\n\t\t\tcurrcoilWord = 0\n\t\t\twhile currcoilAddress < params[\"presetAddress\"] + n_coils :\n\t\t\t\tcurr_coil_status = self.read_coil_status(currcoilAddress)\n\t\t\t\tcurrcoilWord = currcoilWord | ( curr_coil_status << bitOffset)\n\t\t\t\tbitOffset = bitOffset + 1\n\t\t\t\tif bitOffset == 8 :\n\t\t\t\t\tbitOffset = 0\n\t\t\t\t\tif byteOffset % 2 == 0 :\n\t\t\t\t\t\tself.presetDataHi.append(currcoilWord & 0xFF)\n\t\t\t\t\telse :\n\t\t\t\t\t\tself.presetDataLo.append(currcoilWord & 0xFF)\n\t\t\t\t\tbyteOffset = byteOffset + 1\n\t\t\t\t\tcurrcoilWord = 0\n\t\t\t\tcurrcoilAddress = currcoilAddress + 1\n\t\t\tif bitOffset > 0 :\n\t\t\t\tif byteOffset % 2 == 0 :\n\t\t\t\t\tself.presetDataHi.append(currcoilWord)\n\t\t\t\telse :\n\t\t\t\t\tself.presetDataLo.append(currcoilWord)\n\t\t\t\t\n\t\t\tself.byteCount = len(self.presetDataHi) + len(self.presetDataLo)\n\t\t\tmsgSum = msgSum + self.byteCount + sum(self.presetDataHi) + sum(self.presetDataLo)\n\n\t\telif self.functionCode == 16 :\n\t\t\tself.presetAddressHi = params[\"presetAddress\"] >> 8\n\t\t\tmsgSum = msgSum + self.presetAddressHi\n\t\t\tself.presetAddressLo = params[\"presetAddress\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.presetAddressLo\n\t\t\tself.numberPresetHi = params[\"numberPreset\"] >> 8\n\t\t\tmsgSum = msgSum + self.numberPresetHi\n\t\t\tself.numberPresetLo = params[\"numberPreset\"] & 0x00FF\n\t\t\tmsgSum = msgSum + self.numberPresetLo\n\n\t\t\tself.presetDataHi = []\n\t\t\tself.presetDataLo = []\n\t\t\t\n\t\t\tn_regs = params[\"numberPreset\"]\n\t\t\tcurrregAddress = params[\"presetAddress\"]\n\t\t\twhile currregAddress < params[\"presetAddress\"] + n_regs :\n\t\t\t\tcurr_reg_status = self.read_holding_register(currregAddress)\n\t\t\t\tself.presetDataHi.append(curr_reg_status >> 8)\n\t\t\t\tself.presetDataLo.append(curr_reg_status & 0x00FF)\n\t\t\t\tcurrregAddress = currregAddress + 1\n\t\t\t\t\n\t\t\tself.byteCount = len(self.presetDataHi) + len(self.presetDataLo)\n\t\t\tmsgSum = msgSum + self.byteCount + sum(self.presetDataHi) + sum(self.presetDataLo)\n\n\t\tself.msgCRC = msgSum & 0xFFFF\n\t\tself.msgCRC = self.msgCRC%256\n\n\tdef construct_request_message(self,params) :\n\t\t\n\t\tself.load_msg_params(params)\n\t\tself.msg = bytearray()\n\t\tself.msg.append(self.slaveAddress)\n\t\tself.msg.append(self.transaction_id)\n\t\tself.msg.append(self.functionCode)\n\t\tif self.functionCode in [1,2,3,4] :\n\t\t\tself.msg.append(self.startingAddressHi)\n\t\t\tself.msg.append(self.startingAddressLo)\n\t\t\tself.msg.append(self.numberOfPointsHi)\n\t\t\tself.msg.append(self.numberOfPointsLo)\n\t\t\tself.msg.append(self.msgCRC)\n\t\telif self.functionCode == 5 or self.functionCode == 6 :\n\t\t\tself.msg.append(self.presetAddressHi)\n\t\t\tself.msg.append(self.presetAddressLo)\n\t\t\tself.msg.append(self.presetDataHi)\n\t\t\tself.msg.append(self.presetDataLo)\n\t\t\tself.msg.append(self.msgCRC)\n\t\telse :\n\t\t\tself.msg.append(self.presetAddressHi)\n\t\t\tself.msg.append(self.presetAddressLo)\n\t\t\tself.msg.append(self.numberPresetHi)\n\t\t\tself.msg.append(self.numberPresetLo)\n\t\t\tself.msg.append(self.byteCount)\n\t\t\ti = 0\n\t\t\twhile i < self.byteCount :\n\t\t\t\tif i % 2 == 0 :\n\t\t\t\t\tself.msg.append(self.presetDataHi[int(i/2)])\n\t\t\t\telse :\n\t\t\t\t\tself.msg.append(self.presetDataLo[int(i/2)])\n\t\t\t\ti = i + 1\n\t\t\tself.msg.append(self.msgCRC)\n\n\t\treturn self.msg","repo_name":"Vignesh2208/PLCNet","sub_path":"awlsim/modbus/modbus_msg.py","file_name":"modbus_msg.py","file_ext":"py","file_size_in_byte":7121,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"22558599769","text":"\nfrom ....responses import RedirectResponse, TemplateResponse\nfrom ....templates import Template\nfrom ....values import AbsoluteUrl, StateValue\nfrom ....views import View\n\nfrom ...responses import SaveForm\nfrom ...forms import FormFactory\nfrom ...methods import GetFormHandler, PostFormHandler\nfrom ...templates import StateFormContext\n\n\ndef form_view(\n template_name,\n context,\n form,\n form_args,\n action=None\n):\n\n if action is None:\n action = SaveForm()\n\n form_factory = FormFactory(form, form_args)\n\n template = Template(template_name, context)\n\n return View(\n GetFormHandler(form_factory, template),\n PostFormHandler(\n form_factory,\n RedirectResponse(\n AbsoluteUrl(StateValue('object')),\n action\n ),\n TemplateResponse(template, StateFormContext())\n )\n )\n","repo_name":"michaeljones/django-scenic","sub_path":"scenic/compat/build/generic/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21225392856","text":"# 1、实现一个带能获取最小值get_min方法的栈,get_min方法将返回当前栈中的最小值。\n# 实现的栈将支持push,pop 和 get_min 操作,所有操作要求都在O(1)时间内完成。\n\n\nclass MinStack:\n # 使用2个stack,一个min_stack每次存储最小的值\n #\n def __init__(self):\n self.stack = []\n self.min_stack = []\n\n def push(self, x):\n self.stack.append(x)\n if len(self.min_stack) == 0 or x <= self.min_stack[-1]:\n self.min_stack.append(x)\n\n def pop(self):\n if len(self.stack) == 0:\n return None\n else:\n if self.stack[-1] == self.min_stack[-1]:\n self.min_stack.pop()\n return self.stack.pop()\n\n def top(self):\n if len(self.stack) == 0:\n return None\n else:\n return self.stack[-1]\n\n def get_min(self):\n if len(self.min_stack) == 0:\n return None\n else:\n return self.min_stack[-1]\n\n# 2、给出2个字符串,打印出所有由这2个字符串交替组成的字符串,要求维持原有字符的相对顺序。\n# 例如:\n# 输入\"AB\"和“CD”,输出:\n# ABCD、ACBD、ACDB、CABD、CADB、CDAB\n\ndef print_interval_strings(s1, s2, so_far):\n if len(s1) == 0 and len(s2) == 0:\n return\n if not s1:\n print(so_far+s2)\n return\n if not s2:\n print(so_far+s1)\n return\n print_interval_strings(s1[1:], s2, so_far+s1[0])\n print_interval_strings(s1, s2[1:], so_far+s2[0])\n\nprint_interval_strings('AB', 'CD', '')","repo_name":"magedus/python-11","sub_path":"Answers/week21/job21.py","file_name":"job21.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"zh","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"71129482756","text":"# 제목: 팩토리얼 계산 문제\n# 작성자 : 컴퓨터 공학부 지승민\n# 작성일 : 2023.09.21\n\ncount_r = 0\ncount_i = 0\n\n\ndef factorial(n):\n global count_r\n count_r += 1\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n\n\ndef factorial_iter(n):\n global count_i\n count_i += 1\n result = 1\n for k in range(1, n+1):\n result = result*k\n return result\n\n\nn = int(input(\"정수를 입력하세요:\"))\n\nprint(f\"순환 Factorial({n})={factorial(n)} 호출 횟수: {count_r}\")\nprint(f\"반복 Factorial({n})={factorial_iter(n)} 호출 횟수: {count_i}\")\n","repo_name":"jiseungmin/Algorithm","sub_path":"Class/0921/Jiseungmin_0921_02.py","file_name":"Jiseungmin_0921_02.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"388714722","text":"import numpy as np\r\nimport cv2\r\nimport math\r\n\r\ndrawing = False\r\nix, iy = -1, -1\r\n\r\nx_up, x_down, y_up, y_down = 0, 0, 0, 0\r\n\r\ndef onMoush(event, x, y, flags, param):\r\n global ix, iy, drawing, x_up, x_down, y_up, y_down\r\n\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n drawing = True\r\n ix, iy = x, y\r\n x_up, y_up = x, y\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n if drawing:\r\n cv2.rectangle(param, (ix, iy), (x, y), (0, 0, 0), -1)\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n drawing = False\r\n cv2.rectangle(param, (ix, iy), (x, y), (0, 0, 0), -1)\r\n x_down, y_down = x, y\r\n\r\ndef moushBrush(img_path):\r\n img = cv2.imread(img_path)\r\n cv2.namedWindow('draw_box')\r\n cv2.setMouseCallback('draw_box', onMoush, param=img)\r\n\r\n while True:\r\n cv2.imshow('draw_box', img)\r\n k = cv2.waitKey(1) & 0xFF\r\n\r\n if k == 27:\r\n break\r\n\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == \"__main__\":\r\n moushBrush('data/overwatch/frame00617.jpg')\r\n print(\"(%f, %f), (%f, %f)\" % (x_up, y_up, x_down, y_down))\r\n print(\"X %.2f Y %.2f\" % ((x_up + x_down) / 2, (y_up + y_down) / 2))","repo_name":"SIGMAOON/graduation","sub_path":"make_coordinate.py","file_name":"make_coordinate.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16216281859","text":"def sub_str_count(s):\n nums = 0\n for i in range(1, len(s)+1):\n for j in range(len(s)-i+1):\n sub_string = s[j:j+i]\n if len(set(sub_string)) == 1:\n nums += 1\n elif (len(set(sub_string)) == 2) and (sub_string[::-1] == sub_string):\n nums += 1\n return nums\n\n\ndef sub_string_count(s):\n nums = 0\n n = len(s)\n sameChar = [0] * n\n i = 0\n while i < n:\n sameCharNum = 1\n j = i+1\n while j < n:\n if s[i] != s[j]:\n break\n sameCharNum += 1\n j += 1\n nums += int(sameCharNum*(sameCharNum+1)/2)\n sameChar[i] = sameCharNum\n i = j\n print(sameChar)\n for j in range(1, n):\n if s[j] == s[j-1]:\n sameChar[j] = sameChar[j-1]\n if (0 < j < n - 1) and s[j-1] == s[j+1] and s[j] != s[j-1]:\n nums += min(sameChar[j-1], sameChar[j+1])\n print(sameChar)\n print(s)\n return nums\n\n\nif __name__ == '__main__':\n string = 'aabaaa'\n print(sub_string_count(string))","repo_name":"xiaoxue11/hank_practice","sub_path":"String/03_substrCount.py","file_name":"03_substrCount.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408793415","text":"import urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\nclass propetymap(dml.Algorithm):\n contributor = 'lc546_jofranco'\n reads = []\n writes = ['lc546_jofranco.propety']\n\n #longitude = []\n #latitude = []\n @staticmethod\n def execute(trial = False):\n startTime = datetime.datetime.now()\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate(\"lc546_jofranco\", \"lc546_jofranco\")\n url = 'https://data.boston.gov/export/cec/df0/cecdf003-9348-4ddb-94e1-673b63940bb8.json'\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n r = json.loads(response)\n\n zipcode = []\n lalo = []\n street = []\n for i in r:\n zipcode += i['zipcode'[0:5]]\n lalo += [i['latitude'], i['longitude']]\n street += i['st_name']\n\n total = {'zipcode': zipcode, 'address': lalo, 'street': street}\n # print(total)\n\n\n\n s = json.dumps(r, sort_keys= True, indent = 2)\n # print(type(s))\n repo.dropCollection(\"propety\")\n repo.createCollection(\"propety\")\n repo[\"lc546_jofranco.propety\"].insert_many([total])\n repo[\"lc546_jofranco.propety\"].metadata({'complete':True})\n print(repo[\"lc546_jofranco.propety\"].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n return {\"start\":startTime, \"end\":endTime}\n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate(\"lc546_jofranco\", \"lc546_jofranco\")\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://secure.thepropety.com/data/')\n this_script = doc.agent('alg:lc546_jofranco#propety', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n resource = doc.entity('bdp:xgbq-327x', {'prov:label':'propety', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'json'})\n get_propetyinfo = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime, {prov.model.PROV_LABEL:'propety', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAssociatedWith(get_propetyinfo, this_script)\n doc.usage(get_propetyinfo, resource, startTime)\n propetyinfo = doc.entity('dat:lc546_jofranco#propetyinfo', {prov.model.PROV_LABEL:'propety info', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAttributedTo(propetyinfo, this_script)\n doc.wasGeneratedBy(propetyinfo, get_propetyinfo, endTime)\n doc.wasDerivedFrom(propetyinfo, resource, get_propetyinfo, get_propetyinfo, get_propetyinfo)\n return doc\n\n\n\npropetymap.execute()\ndoc = propetymap.provenance()\nprint(doc.get_provn())\nprint(json.dumps(json.loads(doc.serialize()), indent=4))\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"lc546_jofranco/propetymap.py","file_name":"propetymap.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"10852037187","text":"from tkinter import *\r\nfrom tkinter import Toplevel,messagebox,filedialog\r\nfrom tkinter.ttk import Treeview\r\nfrom tkinter import ttk\r\nimport pandas\r\nimport pymysql\r\nimport time\r\nimport random\r\n\r\ndef addstudent():\r\n def submitadd():\r\n id = idval.get()\r\n fullname = fullnameval.get()\r\n username = usernameval.get()\r\n studentId = studentIdval.get()\r\n extraInfo = extraInfoval.get()\r\n updateddate = time.strftime(\"%d/%m/%Y\")\r\n addeddate = time.strftime(\"%d/%m/%Y\")\r\n try:\r\n strr = 'insert into studentdata1 values(%s,%s,%s,%s,%s,%s,%s)'\r\n mycursor.execute(strr,(id,fullname,username,studentId,extraInfo,addeddate,updateddate))\r\n con.commit()\r\n res = messagebox.askyesnocancel('Notificatrions','Id {} Fullname {} Added sucessfully.. and want to clean the form'.format(id,fullname),parent=addroot)\r\n if(res==True):\r\n idval.set('')\r\n fullnameval.set('')\r\n usernameval.set('')\r\n studentIdval.set('')\r\n extraInfoval.set('')\r\n except:\r\n messagebox.showerror('Notifications','Id Already Exist try another id...',parent=addroot)\r\n strr = 'select * from studentdata1'\r\n mycursor.execute(strr)\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0],i[1],i[2],i[3],i[4],i[5],i[6]]\r\n studenmttable.insert('',END,values=vv)\r\n\r\n addroot = Toplevel(master=root)\r\n addroot.grab_set()\r\n addroot.geometry('500x350+600+200')\r\n addroot.title(\"student bazaga ma'lumot qo'shish\")\r\n addroot.config(bg='#8C92AC')\r\n addroot.iconbitmap('./img/student.ico')\r\n addroot.resizable(False,False)\r\n\r\n #Text in center\r\n malumot_label = Label(addroot, text=\"Ma'lumot qo'shish\",font = ('calibre',15,'normal'),bg='#8C92AC',relief=FLAT)\r\n malumot_label.place(x=10, y=10)\r\n #--------------------------------------------------- Add studenmt Labels\r\n idlabel = Label(addroot,text='ID',font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n idlabel.place(x=300,y=50)\r\n full_name_label = Label(addroot, text=\"Full Name\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n full_name_label.place(x=300, y=100)\r\n username_label = Label(addroot, text=\"Username\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n username_label.place(x=300, y=150)\r\n student_id_label = Label(addroot, text=\"student ID\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n student_id_label.place(x=300, y=200)\r\n extra_info_label = Label(addroot, text=\"Qo'shimcha ma'lumot\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n extra_info_label.place(x=300, y=250)\r\n\r\n ##----------------------------------------------------------- Add student Entry\r\n idval = StringVar()\r\n fullnameval = StringVar()\r\n usernameval = StringVar()\r\n studentIdval = StringVar()\r\n extraInfoval = StringVar()\r\n\r\n identry = Entry(addroot,width=30, font = ('calibre',13,'normal'),textvariable=idval)\r\n identry.place(x=10,y=50)\r\n full_name = Entry(addroot, width=30,font = ('calibre',13,'normal'), textvariable=fullnameval)\r\n full_name.place(x=10, y=100)\r\n username = Entry(addroot, width=30,font = ('calibre',13,'normal'), textvariable=usernameval)\r\n username.place(x=10, y=150)\r\n student_id = Entry(addroot, width=30,font = ('calibre',13,'normal'), textvariable=studentIdval )\r\n student_id.place(x=10, y=200)\r\n extra_info = Entry(addroot, width=30,font = ('calibre',13,'normal'), textvariable=extraInfoval)\r\n extra_info.place(x=10, y=250) \r\n ############------------------------- add button\r\n fayl_qoshish_btn = Button(addroot, text=\"Fayl Qo'shish\",font = ('calibre',10,'normal'), borderwidth=1, bg='#008000', fg='#ffffff')\r\n fayl_qoshish_btn.place(x=10, y=300)\r\n\r\n\r\n qoshish_haqida_label = Label(addroot, text=(\"Tanlangan fayl: \"),font = ('calibre',10,'normal'),bg='#8C92AC',relief=FLAT)\r\n qoshish_haqida_label.place(x=130, y=300)\r\n\r\n qoshish_btn = Button(addroot, text=\"Qo'shish\",font = ('calibre',12,'normal'), borderwidth=1, bg='#008000', fg='#ffffff', command=submitadd)\r\n qoshish_btn.place(x=350, y=300)\r\n\r\n\r\n\r\n addroot.mainloop()\r\n\r\ndef searchstudent():\r\n def search():\r\n id = idval.get()\r\n fullname = fullnameval.get()\r\n username = usernameval.get()\r\n studentId = studentIdval.get()\r\n extraInfo = extraInfoval.get()\r\n addeddate = time.strftime(\"%d/%m/%Y\")\r\n if(id != ''):\r\n strr = 'select *from studentdata1 where id=%s'\r\n mycursor.execute(strr,(id))\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n elif(fullname != ''):\r\n strr = 'select *from studentdata1 where name=%s'\r\n mycursor.execute(strr,(fullname))\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n elif(username != ''):\r\n strr = 'select *from studentdata1 where username=%s'\r\n mycursor.execute(strr,(username))\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n elif(studentId != ''):\r\n strr = 'select *from studentdata1 where studentId=%s'\r\n mycursor.execute(strr,(studentId))\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n elif(extraInfo != ''):\r\n strr = 'select *from studentdata1 where extraInfo=%s'\r\n mycursor.execute(strr,(extraInfo))\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n \r\n elif(addeddate != ''):\r\n strr = 'select *from studentdata1 where addeddate=%s'\r\n mycursor.execute(strr,(addeddate))\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n\r\n searchroot = Toplevel(master=root)\r\n searchroot.grab_set()\r\n searchroot.geometry('470x540+220+200')\r\n searchroot.title('Student Management System')\r\n searchroot.config(bg='firebrick1')\r\n # searchroot.iconbitmap('mana.ico')\r\n searchroot.resizable(False,False)\r\n #--------------------------------------------------- Add studenmt Labels\r\n idlabel = Label(searchroot,text='Enter Id : ',bg='gold2',font=('times',20,'bold'),relief=FLAT,borderwidth=3,width=12,anchor='w')\r\n idlabel.place(x=10,y=10)\r\n\r\n namelabel = Label(searchroot,text='Enter Name : ',bg='gold2',font=('times',20,'bold'),relief=GROOVE,borderwidth=3,width=12,anchor='w')\r\n namelabel.place(x=10,y=70)\r\n\r\n usernamelabel = Label(searchroot,text='Enter Username : ',bg='gold2',font=('times',20,'bold'),relief=GROOVE,borderwidth=3,width=12,anchor='w')\r\n usernamelabel.place(x=10,y=130)\r\n\r\n studentIdlabel = Label(searchroot,text='Enter studentId : ',bg='gold2',font=('times',20,'bold'),relief=GROOVE,borderwidth=3,width=12,anchor='w')\r\n studentIdlabel.place(x=10,y=190)\r\n\r\n extraInfolabel = Label(searchroot,text='Enter ExtraInfo : ',bg='gold2',font=('times',20,'bold'),relief=GROOVE,borderwidth=3,width=12,anchor='w')\r\n extraInfolabel.place(x=10,y=250)\r\n\r\n datelabel = Label(searchroot,text='Enter Date : ',bg='gold2',font=('times',20,'bold'),relief=GROOVE,borderwidth=3,width=12,anchor='w')\r\n datelabel.place(x=10,y=430)\r\n\r\n ##----------------------------------------------------------- Add student Entry\r\n idval = StringVar()\r\n fullnameval = StringVar()\r\n usernameval = StringVar()\r\n studentIdval = StringVar()\r\n extraInfoval = StringVar() \r\n dateval = StringVar()\r\n\r\n identry = Entry(searchroot,font=('roman',15,'bold'),bd=5,textvariable=idval)\r\n identry.place(x=250,y=10)\r\n\r\n nameentry = Entry(searchroot,font=('roman',15,'bold'),bd=5,textvariable=fullnameval)\r\n nameentry.place(x=250,y=70)\r\n\r\n usernamevalentry = Entry(searchroot,font=('roman',15,'bold'),bd=5,textvariable=usernameval)\r\n usernamevalentry.place(x=250,y=130)\r\n\r\n studentIdentry = Entry(searchroot,font=('roman',15,'bold'),bd=5,textvariable=studentIdval)\r\n studentIdentry.place(x=250,y=190)\r\n\r\n extraInfoentry = Entry(searchroot,font=('roman',15,'bold'),bd=5,textvariable=extraInfoval)\r\n extraInfoentry.place(x=250,y=250)\r\n\r\n dateentry = Entry(searchroot,font=('roman',15,'bold'),bd=5,textvariable=dateval)\r\n dateentry.place(x=250,y=430)\r\n ############------------------------- add button\r\n submitbtn = Button(searchroot,text='Submit',font=('roman',15,'bold'),width=20,bd=5,activebackground='blue',activeforeground='white',\r\n bg='red',command=search)\r\n submitbtn.place(x=150,y=480)\r\n\r\n\r\n\r\n searchroot.mainloop()\r\ndef deletestudent():\r\n cc = studenmttable.focus()\r\n content = studenmttable.item(cc)\r\n pp = content['values'][0]\r\n strr = 'delete from studentdata1 where id=%s'\r\n mycursor.execute(strr,(pp))\r\n con.commit()\r\n messagebox.showinfo('Notifications','Id {} deleted sucessfully...'.format(pp))\r\n strr = 'select *from studentdata1'\r\n mycursor.execute(strr)\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8]]\r\n studenmttable.insert('', END, values=vv)\r\n\r\n\r\ndef updatestudent():\r\n def update():\r\n id = idval.get()\r\n fullname = fullnameval.get()\r\n username = usernameval.get()\r\n studentId = studentIdval.get()\r\n extraInfo = extraInfoval.get()\r\n date = dateval.get()\r\n udate = udateval.get()\r\n\r\n strr = 'update studentdata1 set fullname=%s,username=%s,studentId=%s,extraInfo=%s,date=%s,udate=%s where id=%s'\r\n mycursor.execute(strr,(fullname,username,studentId,extraInfo,date,udate,id))\r\n con.commit()\r\n messagebox.showinfo('Notifications', 'Id {} Modified sucessfully...'.format(id),parent=updateroot)\r\n strr = 'select *from studentdata1'\r\n mycursor.execute(strr)\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]]\r\n studenmttable.insert('', END, values=vv)\r\n\r\n updateroot = Toplevel(master=root)\r\n updateroot.grab_set()\r\n updateroot.geometry('500x350+600+200')\r\n updateroot.title(\"student bazada ma'lumot o'zgartirish\")\r\n updateroot.config(bg='#8C92AC')\r\n updateroot.iconbitmap('./img/student.ico')\r\n updateroot.resizable(False,False)\r\n\r\n #Text in center\r\n malumot_label = Label(updateroot, text=\"Ma'lumot o'zgartirish\",font = ('calibre',15,'normal'),bg='#8C92AC',relief=FLAT)\r\n malumot_label.place(x=10, y=10)\r\n #--------------------------------------------------- Add studenmt Labels\r\n idlabel = Label(updateroot,text='ID',font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n idlabel.place(x=300,y=50)\r\n full_name_label = Label(updateroot, text=\"Full Name\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n full_name_label.place(x=300, y=100)\r\n username_label = Label(updateroot, text=\"Username\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n username_label.place(x=300, y=150)\r\n student_id_label = Label(updateroot, text=\"student ID\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n student_id_label.place(x=300, y=200)\r\n extra_info_label = Label(updateroot, text=\"Qo'shimcha ma'lumot\",font = ('calibre',12,'normal'),bg='#8C92AC',relief=FLAT)\r\n extra_info_label.place(x=300, y=250)\r\n\r\n ##----------------------------------------------------------- Add student Entry\r\n idval = StringVar()\r\n fullnameval = StringVar()\r\n usernameval = StringVar()\r\n studentIdval = StringVar()\r\n extraInfoval = StringVar()\r\n\r\n identry = Entry(updateroot,width=30, font = ('calibre',13,'normal'),textvariable=idval)\r\n identry.place(x=10,y=50)\r\n full_name = Entry(updateroot, width=30,font = ('calibre',13,'normal'), textvariable=fullnameval)\r\n full_name.place(x=10, y=100)\r\n username = Entry(updateroot, width=30,font = ('calibre',13,'normal'), textvariable=usernameval)\r\n username.place(x=10, y=150)\r\n student_id = Entry(updateroot, width=30,font = ('calibre',13,'normal'), textvariable=studentIdval )\r\n student_id.place(x=10, y=200)\r\n extra_info = Entry(updateroot, width=30,font = ('calibre',13,'normal'), textvariable=extraInfoval)\r\n extra_info.place(x=10, y=250) \r\n ############------------------------- add button\r\n fayl_qoshish_btn = Button(updateroot, text=\"Fayl Qo'shish\",font = ('calibre',10,'normal'), borderwidth=1, bg='#008000', fg='#ffffff')\r\n fayl_qoshish_btn.place(x=10, y=300)\r\n\r\n\r\n qoshish_haqida_label = Label(updateroot, text=(\"Tanlangan fayl: \"),font = ('calibre',10,'normal'),bg='#8C92AC',relief=FLAT)\r\n qoshish_haqida_label.place(x=130, y=300)\r\n\r\n qoshish_btn = Button(updateroot, text=\"O'zgartirish\",font = ('calibre',12,'normal'), borderwidth=1, bg='#008000', fg='#ffffff', command=update)\r\n qoshish_btn.place(x=350, y=300)\r\n idval = StringVar()\r\n fullnameval = StringVar()\r\n usernameval = StringVar()\r\n studentIdval = StringVar()\r\n extraInfoval = StringVar()\r\n dateval = StringVar()\r\n udateval = StringVar() \r\n\r\n # ##----------------------------------------------------------- Add student Entry\r\n idval = StringVar()\r\n fullnameval = StringVar()\r\n usernameval = StringVar()\r\n studentIdval = StringVar()\r\n extraInfoval = StringVar()\r\n dateval = StringVar()\r\n udateval = StringVar()\r\n \r\n cc = studenmttable.focus()\r\n content = studenmttable.item(cc)\r\n pp = content['values']\r\n if(len(pp) != 0):\r\n idval.set(pp[0])\r\n fullnameval.set(pp[1])\r\n usernameval.set(pp[2])\r\n studentIdval.set(pp[3])\r\n extraInfoval.set(pp[4])\r\n dateval.set(pp[5])\r\n udateval.set(pp[6])\r\n\r\n updateroot.mainloop()\r\ndef showstudent():\r\n strr = 'select * from studentdata1'\r\n mycursor.execute(strr)\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n\r\ndef exportstudent():\r\n ff = filedialog.asksaveasfilename()\r\n gg = studenmttable.get_children()\r\n id,fullname,username,studentId,extraInfo,addeddate,updateddate=[],[],[],[],[],[],[]\r\n for i in gg:\r\n content = studenmttable.item(i)\r\n pp = content['values']\r\n id.append(pp[0]),fullname.append(pp[1]),username.append(pp[2]),studentId.append(pp[3]),extraInfo.append(pp[4]),\r\n addeddate.append(pp[5]),updateddate.append(pp[6])\r\n dd = ['Id','Fullname','Username','studentId','extraInfo','Added Date','Updated Date']\r\n df = pandas.DataFrame(list(zip(id,fullname,username,studentId,extraInfo,addeddate,updateddate)),columns=dd)\r\n paths = r'{}.csv'.format(ff)\r\n df.to_csv(paths,index=False)\r\n messagebox.showinfo('Notifications', 'Student data is Saved {}'.format(paths))\r\n\r\n\r\ndef exitstudent():\r\n res = messagebox.askyesnocancel('Notification','Do you want to exit?')\r\n if(res == True):\r\n root.destroy()\r\n\r\n\r\n###################################################################################Connecttion of Database\r\ndef Connectdb():\r\n def submitdb():\r\n global con,mycursor\r\n # host = hostval.get()\r\n # user = userval.get()\r\n # password = passwordval.get()\r\n host = 'localhost'\r\n user = 'root'\r\n password = 'root'\r\n try:\r\n con = pymysql.connect(host=host,user=user,password=password)\r\n mycursor = con.cursor()\r\n except:\r\n messagebox.showerror('Notifications','Data is incorrect please try again',parent=dbroot)\r\n return\r\n try:\r\n strr = 'create database studentmanagementsystem4'\r\n mycursor.execute(strr)\r\n strr = 'use studentmanagementsystem4'\r\n mycursor.execute(strr)\r\n strr = 'create table studentdata1(id int,fullname varchar(50),username varchar(50),studentId varchar(30),extraInfo varchar(100),date varchar(50),udate varchar(50))'\r\n mycursor.execute(strr)\r\n strr = 'alter table studentdata1 modify column id int not null'\r\n mycursor.execute(strr)\r\n strr = 'alter table studentdata1 modify column id int primary key'\r\n mycursor.execute(strr)\r\n messagebox.showinfo('Notification','database created and now you are connected connected to the database ....',parent=dbroot)\r\n \r\n \r\n \r\n\r\n except:\r\n strr = 'use studentmanagementsystem4'\r\n mycursor.execute(strr)\r\n messagebox.showinfo('Notification','Now you are connected to the database ....',parent=dbroot)\r\n strr = 'select * from studentdata1'\r\n mycursor.execute(strr)\r\n datas = mycursor.fetchall()\r\n studenmttable.delete(*studenmttable.get_children())\r\n for i in datas:\r\n vv = [i[0], i[1], i[2], i[3], i[4], i[5], i[6]]\r\n studenmttable.insert('', END, values=vv)\r\n dbroot.destroy()\r\n\r\n\r\n\r\n dbroot = Toplevel()\r\n dbroot.grab_set()\r\n dbroot.geometry('350x200+600+200')\r\n dbroot.iconbitmap('./img/student.ico')\r\n dbroot.config(bg='#8C92AC')\r\n dbroot.resizable(False,False)\r\n #-------------------------------Connectdb Labels\r\n \r\n # full_name_label = Label(addroot, text=\"Full Name\",font = ('calibre',12,'normal'))\r\n # full_name_label.place(x=300, y=100)\r\n\r\n hostlabel = Label(dbroot,text=\"Enter Host : \",font=('calibre',12,'normal'),bg='#8C92AC',relief=FLAT,anchor='w')\r\n hostlabel.place(x=10,y=10)\r\n\r\n userlabel = Label(dbroot,text=\"Enter User : \",font=('calibre',12,'normal'),bg='#8C92AC',relief=FLAT,anchor='w')\r\n userlabel.place(x=10,y=60)\r\n\r\n passwordlabel = Label(dbroot,text=\"Enter Password : \",font=('calibre',12,'normal'),bg='#8C92AC',relief=FLAT,anchor='w')\r\n passwordlabel.place(x=10,y=110)\r\n\r\n #-------------------------Connectdb Entry\r\n hostval = StringVar()\r\n userval = StringVar()\r\n passwordval = StringVar()\r\n hostentry = Entry(dbroot, width=20, font=('calibre',13,'normal'),textvariable=hostval)\r\n hostentry.place(x=150,y=10)\r\n\r\n userentry = Entry(dbroot,width=20,font=('calibre',13,'normal'),textvariable=userval)\r\n userentry.place(x=150,y=60)\r\n\r\n passwordentry = Entry(dbroot,width=20,font=('calibre',13,'normal'),textvariable=passwordval)\r\n passwordentry.place(x=150,y=110)\r\n\r\n #-------------------------------- Connectdb button\r\n submitbutton = Button(dbroot,text='Submit',font=('calibre',12,'normal'),width=10,borderwidth=1, bg='#008000', fg='#ffffff', command=submitdb)\r\n submitbutton.place(x=120,y=160)\r\n\r\n dbroot.mainloop()\r\n\r\n\r\n##########################################################################################################\r\n\r\n# root = Tk()\r\n# root.title('Student Management System')\r\n# root.config(bg='gold2')\r\n# root.geometry('1174x700+200+50')\r\n# # root.iconbitmap('mana.ico')\r\n# root.resizable(False,False)\r\n# ############################################################################################################ Frames\r\n# ##---------------------------------------------------------------------------- dataentry frame\r\n\r\n# DataEntryFrame = Frame(root,bg='gold2',relief=GROOVE,borderwidth=5)\r\n# DataEntryFrame.place(x=10,y=80,width=500,height=600)\r\n\r\n# addbtn = Button(DataEntryFrame,text='1. Add Student',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=addstudent)\r\n# addbtn.pack(side=TOP,expand=True)\r\n\r\n# searchbtn = Button(DataEntryFrame,text='2. Search Student',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=searchstudent)\r\n# searchbtn.pack(side=TOP,expand=True)\r\n\r\n# deletebtn = Button(DataEntryFrame,text='3. Delete Student',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=deletestudent)\r\n# deletebtn.pack(side=TOP,expand=True)\r\n\r\n# updatebtn = Button(DataEntryFrame,text='4. Update Student',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=updatestudent)\r\n# updatebtn.pack(side=TOP,expand=True)\r\n\r\n# showallbtn = Button(DataEntryFrame,text='5. Show All',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=showstudent)\r\n# showallbtn.pack(side=TOP,expand=True)\r\n\r\n# exportbtn = Button(DataEntryFrame,text='6. Export data',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=exportstudent)\r\n# exportbtn.pack(side=TOP,expand=True)\r\n\r\n# exitbtn = Button(DataEntryFrame,text='7. Exit',width=25,font=('chiller',20,'bold'),bd=6,bg='skyblue3',activebackground='blue',relief=RIDGE,\r\n# activeforeground='white',command=exitstudent)\r\n# exitbtn.pack(side=TOP,expand=True)\r\n\r\n# ##-----------------------------------------------------------Show data frame\r\n# ShowDataFrame = Frame(root,bg='gold2',relief=GROOVE,borderwidth=5)\r\n# ShowDataFrame.place(x=550,y=80,width=620,height=600)\r\n\r\n# ##------------------------------------------------- Showdataframe\r\n# style = ttk.Style()\r\n# style.configure('Treeview.Heading',font=('chiller',20,'bold'),foreground='blue')\r\n# style.configure('Treeview',font=('times',15,'bold'),background='cyan',foreground='black')\r\n# scroll_x = Scrollbar(ShowDataFrame,orient=HORIZONTAL)\r\n# scroll_y = Scrollbar(ShowDataFrame,orient=VERTICAL)\r\n# studenmttable = Treeview(ShowDataFrame,columns=('Id','Fullname','Username','studentId','ExtraInfo','Added Date','Updated Date'),\r\n# yscrollcommand=scroll_y.set,xscrollcommand=scroll_x.set)\r\n# scroll_x.pack(side=BOTTOM,fill=X)\r\n# scroll_y.pack(side=RIGHT,fill=Y)\r\n# scroll_x.config(command=studenmttable.xview)\r\n# scroll_y.config(command=studenmttable.yview)\r\n# studenmttable.heading('Id',text='Id')\r\n# studenmttable.heading('Fullname',text='Fullname')\r\n# studenmttable.heading('Username',text='Username')\r\n# studenmttable.heading('studentId',text='studentId')\r\n# studenmttable.heading('ExtraInfo',text='ExtraInfo')\r\n# studenmttable.heading('Added Date',text='Added Date')\r\n# studenmttable.heading('Updated Date',text='Updated Date')\r\n# studenmttable['show'] = 'headings'\r\n# studenmttable.column('Id',width=100)\r\n# studenmttable.column('Fullname',width=200)\r\n# studenmttable.column('Username',width=200)\r\n# studenmttable.column('studentId',width=300)\r\n# studenmttable.column('ExtraInfo',width=200)\r\n# studenmttable.column('Added Date',width=150)\r\n# studenmttable.column('Updated Date',width=150)\r\n# studenmttable.pack(fill=BOTH,expand=1)\r\n\r\n\r\n# ################################################################################################################## ConnectDatabaseButton\r\n# connectbutton = Button(root,text='Connect To Database',width=23,font=('chiller',19,'italic bold'),relief=RIDGE,borderwidth=4,bg='green2',\r\n# activebackground='blue',activeforeground='white',command=Connectdb)\r\n# connectbutton.place(x=930,y=0)\r\nroot = Tk()\r\nroot.title('student baza')\r\nroot.geometry('1200x800+200+50')\r\nroot.iconbitmap('./img/student.ico')\r\nroot.resizable(False,False)\r\n\r\n # create a search box label\r\nsearch_box = Entry(root, width=20,font = ('calibre',15,'normal'))\r\nsearch_box.insert(0, \"Qidirish...\") \r\nsearch_box.place(x=500,y=25,anchor='center')\r\n\r\n#create a search box button\r\nsearch_btn_image = PhotoImage(file = r\"./img/searchicon1.png\")\r\nsearch_box_btn = Button(root, image=search_btn_image, borderwidth=2)\r\nsearch_box_btn.place(x=630,y=25,anchor='center')\r\n\r\n#adding new user button\r\nadd_info_btn = Button(root, text=\"Ma'lumot Qo'shish\",font = ('calibre',13,'normal'), borderwidth=1, bg='#008000', fg='#ffffff', command=addstudent)\r\nadd_info_btn.place(x=830,y=25,anchor='center')\r\n\r\nconnectbutton = Button(root, text=\"Bazaga ulash\",font = ('calibre',13,'normal'), borderwidth=1, bg='#008000', fg='#ffffff', command=Connectdb)\r\nconnectbutton.place(x=1000,y=25,anchor='center')\r\n\r\ndisplayInfobutton = Button(root, text=\"To'liq ma'lumot\",font = ('calibre',13,'normal'), borderwidth=1, bg='#008000', fg='#ffffff', command=Connectdb)\r\ndisplayInfobutton.place(x=80,y=25,anchor='center')\r\n\r\neditInfobutton = Button(root, text=\"O'zgartirish\",font = ('calibre',13,'normal'), borderwidth=1, bg='#008000', fg='#ffffff', command=updatestudent)\r\neditInfobutton.place(x=250,y=25,anchor='center')\r\n\r\n\r\n############################################################################################################ Frames\r\nShowDataFrame = Frame(root,relief=GROOVE,borderwidth=5)\r\nShowDataFrame.place(x=0,y=80,width=1200,height=675)\r\nstyle = ttk.Style()\r\nstyle.configure('Treeview.Heading',font=('calibre',15,'normal'))\r\nstyle.configure('Treeview',font=('times',15,'bold'),foreground='black')\r\nscroll_x = Scrollbar(ShowDataFrame,orient=HORIZONTAL)\r\nscroll_y = Scrollbar(ShowDataFrame,orient=VERTICAL)\r\nstudenmttable = Treeview(ShowDataFrame,columns=('Id','Fullname','Usename','studentID','ExtraInfo','Addeddate'),\r\n yscrollcommand=scroll_y.set,xscrollcommand=scroll_x.set)\r\nscroll_x.pack(side=BOTTOM,fill=X)\r\nscroll_y.pack(side=RIGHT,fill=Y)\r\nscroll_x.config(command=studenmttable.xview)\r\nscroll_y.config(command=studenmttable.yview)\r\nstudenmttable.heading('Id',text='Id')\r\nstudenmttable.heading('Fullname',text='Fullname')\r\nstudenmttable.heading('Usename',text='Usename')\r\nstudenmttable.heading('studentID',text='student ID')\r\nstudenmttable.heading('ExtraInfo',text='ExtraInfo')\r\nstudenmttable.heading('Addeddate',text='Added date')\r\nstudenmttable['show'] = 'headings'\r\nstudenmttable.column('Id',width=50)\r\nstudenmttable.column('Fullname',width=300)\r\nstudenmttable.column('Usename',width=200)\r\nstudenmttable.column('studentID',width=100)\r\nstudenmttable.column('ExtraInfo',width=300)\r\nstudenmttable.column('Addeddate',width=100)\r\nstudenmttable.pack(fill=BOTH,expand=1)\r\n# showstudent()\r\n\r\n\r\nroot.mainloop()","repo_name":"Anouts11/python-learn","sub_path":"baza.py","file_name":"baza.py","file_ext":"py","file_size_in_byte":27565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28474119647","text":"import math\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef halton(p, n):\n b=np.zeros(math.ceil(math.log(n)/math.log(p)))\n u=[]\n for j in range(0,n):\n i=0\n b[0]+=1\n while b[i]>(p-1+(10**(-8))):\n b[i]=0\n i+=1\n b[i]+=1\n u.append(0)\n for k in range(0, len(b)):\n u[j]+=b[k]*math.pow(p, -(k+1))\n return u\n\ndef MontoCarlo1(u):\n sumY=0.0\n for i in range(0, len(u)):\n t = u[i]\n ft1=P1(t)\n ft2=P2(t)\n sumY+=(ft2-ft1)\n print('(b): montoCarlo1: %.8f, n=%d'%(sumY/len(u), math.ceil(math.log10(len(ux)))))\n return\n\ndef MontoCarlo2(ux, uy):\n x=[]\n y=[]\n cnt=0\n for i in range(0, len(ux)):\n xi = ux[i]\n yi = uy[i]\n if P2(xi)>=yi and P1(xi)<=yi:\n x.append(xi)\n y.append(yi)\n cnt+=1\n n=math.ceil(math.log10(len(ux)))\n print('(c): montoCarlo2: %.8f, n=%d'%(cnt/len(ux), n))\n if n==4:\n plt.figure()\n plt.plot(x,y,'ro')\n plt.show()\n return\n\ndef P1(x):\n return x*x-x+0.5\n\ndef P2(x):\n return -(x*x)+x+0.5\n\nif __name__ == \"__main__\":\n for k in range(2, 6):\n ux = halton(2, 10**k)\n uy = halton(3, 10**k)\n MontoCarlo1(ux)\n MontoCarlo2(ux, uy)\n","repo_name":"Guanxy-baolitu/NumericalAnalysis","sub_path":"9_2_1.py","file_name":"9_2_1.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34683906704","text":"import os\nimport sys\nimport inspect\nimport signal\nfrom subprocess import Popen\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nprint(sys.path)\n\nfrom smarts.core.smarts import SMARTS\nfrom smarts.core.scenario import Scenario\nfrom envision.client import Client as Envision\nfrom smarts_imitation import ScenarioZoo\n\n\ndef experiment(scenario_path, scenario_name, traffic_name):\n\n scenario_iterator = Scenario.scenario_variations(\n [scenario_path], list([]), shuffle_scenarios=False, circular=False\n ) # scenarios with different traffic histories.\n scenario = None\n for _scenario in scenario_iterator:\n if _scenario._traffic_history.name == traffic_name:\n scenario = _scenario\n break\n assert scenario is not None\n\n envision_client = Envision(\n sim_name=f\"{scenario_name}_{traffic_name}\",\n output_dir=\"record_data_replay_path\",\n headless=False,\n )\n smarts = SMARTS(\n agent_interfaces={},\n traffic_sim=None,\n envision=envision_client,\n )\n smarts.reset(scenario)\n smarts.step({})\n while True:\n smarts.step({})\n current_vehicles = smarts.vehicle_index.social_vehicle_ids()\n if len(current_vehicles) == 0:\n break\n\n\nif __name__ == \"__main__\":\n\n scenario_name = \"ngsim_us101\"\n traffic_name = \"us101_0750-0805\"\n scenario_path = ScenarioZoo.get_scenario(scenario_name)\n\n # Arguments\n envision_proc = Popen(\n f\"scl envision start -s {scenario_path} -p 8081\",\n shell=True,\n preexec_fn=os.setsid,\n )\n\n try:\n experiment(scenario_path, scenario_name, traffic_name)\n except Exception as e:\n os.killpg(os.getpgid(envision_proc.pid), signal.SIGTERM)\n envision_proc.wait()\n raise e\n\n os.killpg(os.getpgid(envision_proc.pid), signal.SIGTERM)\n envision_proc.wait()\n","repo_name":"zbzhu99/NGSIM_Imitation","sub_path":"ILSwiss/run_scripts/visualize_smarts_history.py","file_name":"visualize_smarts_history.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"26850206933","text":"#2.2(计算圆柱体的体积)编写一个读取圆柱的半径和高并利用下面的计算公式计算圆柱体底面积和体积的程序:\r\n\r\n\r\n #注意input返回值是字符串类型\r\n\r\n# a = input(\"Enter the radius and length of a cylinder: \")\r\n# print(a)\r\n# print(type(a))\r\n\r\nradius, length = eval(input(\"Enter the radius and length of a cylinder: \"))\r\n #想要返回两个参数量,使用input原始返回值:字符串肯定不行,需要函数eval(),将字符串转化为int类型\r\n #还要注意一点:input输入多个值时,中间需要用逗号隔开,但是这个逗号必须是英文模式下。\r\n # 这是外国人的程序,他们不用汉语,谢谢。\r\n#print(radius,length)\r\narea = radius ** 2 * 3.14159\r\nvolume = area * length\r\nprint(\"The area is %.4f\" % (area))\r\nprint(\"The volume is %.1f\" % (volume))\r\n\r\n\r\n","repo_name":"LYTXJY/python_full_stack","sub_path":"Code/src/hellopython/第二章/第二章作业/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23987613845","text":"### helpers \nimport random\nclass Position:\n\tdef __init__(self, value):\n\t\tself.value = value\n\t\tif self.value == 0:\n\t\t\tself.possible = [i for i in range(1, 10)]\n\t\t\trandom.shuffle(self.possible)\n\t\telse:\n\t\t\tself.possible = []","repo_name":"mmarkell/sudoku_solver","sub_path":"pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20000052065","text":"'''\nCreated on Nov 18, 2015\n\n@author: hwl122902\n'''\nfrom __future__ import print_function\nfrom utils import *\nimport os\nfrom macholib.util import fileview\nfrom parsePlistFile import readInfoFile\nimport json\n\ndef readMachoFile(path):\n rout = {}\n logger.info(\"It is getting the header of the MachoFile\")\n header = getHeader(path)\n descripe = dict(header.header._describe())\n cupType = descripe.get(\"cputype_string\")\n rout[\"MachHeader\"] = descripe\n commands = []\n logger.info(\"It is getting the contents of the MachoFile\")\n for (index,(lc, cmd, data)) in enumerate(header.commands):\n \n lc_name = lc.get_cmd_name()\n if lc_name==44:\n if cupType.find(\"64\")!=-1:\n lc_name = \"LC_ENCRYPTION_INFO_64\"\n else:\n lc_name = \"LC_ENCRYPTION_INFO\"\n desc = cmd.describe()\n \n if lc_name==\"LC_SEGMENT_64\" or lc_name == \"LC_SEGMENT\":\n sec_num = cmd.nsects\n if sec_num > 0:\n for sec in data:\n secDesc = sec.describe()\n strs = ''\n with open(path, 'rb') as fp:\n fh = fileview(fp, sec.addr, sec.size)\n fh.seek(0)\n with open(\"temp\",\"wb\") as f:\n f.write(fh.read())\n \n for s in strings(\"temp\"):\n if isinstance(s, str):\n strs = strs + s + ','\n \n secDesc[\"strings\"] = strs\n desc[str(sec.sectname.rstrip('\\x00'))] = secDesc\n os.remove(\"temp\")\n else:\n desc[\"data\"] = data.rstrip('\\x00')\n commands.append(desc)\n rout[\"loadcommand\"] = commands\n return rout\n \ndef outputResult(path):\n out = {}\n js_macho = readMachoFile(path)\n js_plist = readInfoFile(\"Info.plist\")\n out[\"machoPart\"] = js_macho\n out[\"plistPart\"] = js_plist\n outFinal = json.dumps(out, encoding='latin1')\n with open(\"result\",\"w\") as f:\n f.write(outFinal)\n\nif __name__ == \"__main__\":\n logger = setLogger()\n path = \"machoFiles/IPadQQ\"\n outputResult(path)\n\n\n","repo_name":"houweilong/scanner","sub_path":"scanner/parseMachpFile.py","file_name":"parseMachpFile.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2401009574","text":"from collections import deque\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def rightSideView(self, root: Optional[TreeNode]) -> List[int]:\n output, found = [], False \n dq1, dq2 = deque(), deque()\n dq1.append(root)\n while len(dq1):\n top = dq1.popleft()\n if top:\n if not found:\n found = True\n output.append(top.val)\n \n dq2.append(top.right)\n dq2.append(top.left)\n \n if not len(dq1):\n dq1, dq2, found = dq2, deque(), False\n \n return output\n","repo_name":"SouradeepSaha/leetcode","sub_path":"199. Binary Tree Right Side View.py","file_name":"199. Binary Tree Right Side View.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25272893223","text":"import time\n\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom planapp.models import Issue\n\n\nclass Command(BaseCommand):\n help = \"creates tasks from plans\"\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n try:\n WAIT_TIME = 1 * 60 * 5\n while True:\n call_command(\"taskify\")\n time.sleep(WAIT_TIME)\n except Exception as e:\n i = Issue.objects.create(\n obj_info=\"crontask: outside of taskify\",\n where=\"crontask\",\n exception_string=str(e)\n )\n i.save()\n","repo_name":"mattwhite180/goalsandplans","sub_path":"server/goalserver/planapp/management/commands/crontask.py","file_name":"crontask.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72314509633","text":"# -*- coding: cp1252 -*-\nimport ctypes, sys, os, array\nif sys.platform == 'win32':\n import _winreg\n\n# ADwin-Exception\nclass ADwinError(Exception):\n def __init__(self, functionName, errorText, errorNumber):\n self.functionName = functionName\n self.errorText = errorText\n self.errorNumber = errorNumber\n def __str__(self):\n if (sys.version_info[0] == 3):\n return 'Function %s, errorNumber %d: %s' % (self.functionName, self.errorNumber, self.errorText.decode())\n else:\n return 'Function ' + self.functionName + ', errorNumber ' + str(self.errorNumber) + ': ' + self.errorText\n\nclass ADwin:\n __err = ctypes.c_long(0)\n __errPointer = ctypes.pointer(__err)\n\n def __init__(self, DeviceNo = 0x150, raiseExceptions = 1):\n\n if sys.platform == 'linux2':\n try:\n if (sys.version_info[0] == 3):\n f = open('/etc/adwin/ADWINDIR', 'r')\n else:\n f = file('/etc/adwin/ADWINDIR', 'r')\n self.ADwindir = f.readline()[:-1] + '/' # without newline at the end\n self.dll = ctypes.CDLL(self.ADwindir + 'lib/libadwin.so')\n except:\n raise ADwinError('__init__', 'shared library libadwin.so not found.', 200)\n f.close()\n self.dll.Set_DeviceNo(DeviceNo)\n elif sys.platform == 'darwin':\n try:\n if (sys.version_info[0] == 3):\n f = open('/etc/adwin/ADWINDIR', 'r')\n else:\n f = file('/etc/adwin/ADWINDIR', 'r')\n self.ADwindir = f.readline()[:-1] + '/' # without newline at the end\n self.dll = ctypes.CDLL('/Library/Frameworks/adwin32.framework/Versions/A/libadwin.5.dylib')\n self.dll.Set_DeviceNo(DeviceNo)\n except:\n raise ADwinError('__init__', 'shared library libadwin.5.dylib not found.', 200)\n else:\n try:\n aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_CURRENT_USER)\n aKey = _winreg.OpenKey(aReg, r\"SOFTWARE\\Jäger Meßtechnik GmbH\\ADwin\\Directory\")\n self.ADwindir = str(_winreg.EnumValue(aKey, 0)[1])\n _winreg.CloseKey(aKey)\n _winreg.CloseKey(aReg)\n except:\n raise ADwinError('__init__', 'Could not read Registry.', 200)\n try:\n self.dll = ctypes.WinDLL('ADWIN32')\n self.dll.DeviceNo = DeviceNo\n except:\n raise ADwinError('__init__', 'Windows-DLL adwin32.dll not found.', 200)\n self.raiseExceptions = raiseExceptions\n self.DeviceNo = DeviceNo\n self.version = '0.6'\n\n\n def __checkError(self, functionName):\n if self.__err.value != 0:\n if self.raiseExceptions != 0:\n raise ADwinError(functionName, self.Get_Last_Error_Text(self.__err.value), self.__err.value)\n\n # system control and system information\n def Boot(self, Filename):\n '''Boot initializes the ADwin system and loads the file of the operating system.'''\n if (sys.version_info[0] == 3):\n self.dll.e_ADboot(Filename.encode(), self.DeviceNo, 100000, 0, self.__errPointer)\n else:\n self.dll.e_ADboot(Filename, self.DeviceNo, 100000, 0, self.__errPointer)\n self.__checkError('Boot')\n\n def Test_Version(self):\n '''Test_Version checks, if the correct operating system for the processor has been loaded \n and if the processor can be accessed.'''\n self.dll.e_ADTest_Version.restype = ctypes.c_short\n ret = self.dll.e_ADTest_Version(self.DeviceNo, 0, self.__errPointer)\n return ret\n\n def Processor_Type(self):\n '''Processor_Type returns the processor type of the system.'''\n ret = self.dll.e_ADProzessorTyp(self.DeviceNo, self.__errPointer)\n self.__checkError('Processor_Type')\n if (ret == 1000): ret = 9\n return ret\n\n def Workload(self):\n '''Workload returns the processor workload.'''\n ret = self.dll.e_AD_Workload(0, self.DeviceNo, self.__errPointer)\n self.__checkError('Workload')\n return ret\n\n def Free_Mem(self, Mem_Spec):\n '''Free_Mem determines the free memory for the different memory types.'''\n ret = self.dll.e_AD_Memory_all_byte(Mem_Spec, self.DeviceNo, self.__errPointer)\n self.__checkError('Free_Mem')\n return ret\n\n # Process control\n def Load_Process(self, Filename):\n '''Load_Process loads the binary file of a process into the ADwin system.'''\n if (sys.version_info[0] == 3):\n self.dll.e_ADBload(Filename.encode(), self.DeviceNo, 0, self.__errPointer)\n else:\n self.dll.e_ADBload(Filename, self.DeviceNo, 0, self.__errPointer)\n self.__checkError('Load_Process')\n\n def Start_Process(self, ProcessNo):\n '''Start_Process starts a process.'''\n self.dll.e_ADB_Start(ProcessNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Start_Process')\n\n def Stop_Process(self, ProcessNo):\n '''Stop_Process stops a process.'''\n self.dll.e_ADB_Stop(ProcessNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Stop_Process')\n\n def Clear_Process(self, ProcessNo):\n '''Clear_Process deletes a process from memory.'''\n self.dll.e_Clear_Process(ProcessNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Clear_Process')\n\n def Process_Status(self, ProcessNo):\n '''Process_Status returns the status of a process.'''\n ret = self.dll.e_Get_ADBPar(-100 + ProcessNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Process_Status')\n return ret\n\n def Get_Processdelay(self, ProcessNo):\n '''Get_Processdelay returns the parameter Processdelay for a process.'''\n ret = self.dll.e_Get_ADBPar(-90 + ProcessNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_Processdelay')\n return ret\n\n def Set_Processdelay(self, ProcessNo, Processdelay):\n '''Set_Processdelay sets the parameter Globaldelay for a process.'''\n self.dll.e_Set_ADBPar(-90 + ProcessNo, Processdelay, self.DeviceNo, self.__errPointer)\n self.__checkError('Set_Processdelay')\n\n # Transfer of global variables\n def Set_Par(self, Index, Value):\n '''Set_Par sets a global long variable to the specified value.'''\n self.dll.e_Set_ADBPar(Index, Value, self.DeviceNo, self.__errPointer)\n self.__checkError('Set_Par')\n \n def Get_Par(self, no):\n '''Get_Par returns the value of a global long variable.'''\n ret = self.dll.e_Get_ADBPar(no, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_Par')\n return ret\n\n def Get_Par_Block(self, StartIndex, Count):\n '''Get_Par_Block returns a number of global long variables, \n which is to be indicated.'''\n dataType = ctypes.c_long * Count\n data = dataType(0)\n self.dll.e_Get_ADBPar_All(StartIndex, Count, data, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_Par_Block')\n return data\n\n def Get_Par_All(self):\n '''Get_Par_All returns all global long variables.'''\n dataType = ctypes.c_long * 80\n data = dataType(0)\n self.dll.e_Get_ADBPar_All(1, 80, data, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_Par_All')\n return data\n\n def Set_FPar(self, Index, Value):\n '''Set_FPar sets a global float variable to a specified value.'''\n _val = ctypes.c_float(Value)\n self.dll.e_Set_ADBFPar(Index, _val, self.DeviceNo, self.__errPointer)\n self.__checkError('Set_FPar')\n\n def Get_FPar(self, Index):\n '''Get_FPar returns the value of a global float variable.'''\n self.dll.e_Get_ADBFPar.restype = ctypes.c_float\n ret = self.dll.e_Get_ADBFPar(Index, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_FPar')\n return ret\n \n def Get_FPar_Block(self, StartIndex, Count):\n '''Get_FPar_Block returns a number of global float variables, \n which is to be indicated.'''\n dataType = ctypes.c_float * Count\n data = dataType(0)\n self.dll.e_Get_ADBFPar_All(StartIndex, Count, data, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_FPar_Block')\n return data\n\n def Get_FPar_All(self):\n '''Get_Par_All returns all global float variables.'''\n dataType = ctypes.c_float * 80\n data = dataType(0)\n self.dll.e_Get_ADBFPar_All(1, 80, data, self.DeviceNo, self.__errPointer)\n self.__checkError('Get_FPar_All')\n return data\n\n # Transfer of data arrays\n def Data_Length(self, Data_No):\n '''Data_Length returns the length of an array, declared under ADbasic,\n that means the number of elements.'''\n ret = self.dll.e_GetDataLength(Data_No, self.DeviceNo, self.__errPointer)\n self.__checkError('Data_Length')\n return ret\n\n def SetData_Long(self, Data, DataNo, Startindex, Count):\n '''SetData_Long transfers long data from the PC into a DATA array\n of the ADwin system.'''\n if (type(Data) == list) or (type(Data) == array.array):\n # convert list to ctypes.c_long_Array\n dataType = ctypes.c_long * Count\n data = dataType(0)\n for i in range(Count):\n data[i] = Data[i]\n else: # ctypes-array\n data = Data\n self.dll.e_Set_Data(data, 2, DataNo, Startindex, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('SetData_Long')\n\n def GetData_Long(self, DataNo, StartIndex, Count):\n '''GetData_Long transfers long data from a DATA array of an ADwin system\n into an array.'''\n dataType = ctypes.c_long * Count\n data = dataType(0)\n self.dll.e_Get_Data(data, 2, DataNo, StartIndex, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('GetData_Long')\n return data\n\n def SetData_Float(self, Data, DataNo, Startindex, Count):\n '''SetData_Float transfers float data from the PC into a DATA array\n of the ADwin system.'''\n if (type(Data) == list) or (type(Data) == array.array):\n # convert list to ctypes.c_float_Array\n dataType = ctypes.c_float * Count\n data = dataType(0)\n for i in range(Count):\n data[i] = Data[i]\n else: # ctypes-array\n data = Data\n self.dll.e_Set_Data(data, 5, DataNo, Startindex, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('SetData_Float')\n\n def GetData_Float(self, DataNo, StartIndex, Count):\n '''GetData_Float transfers float data from a DATA array of an ADwin system\n into an array.'''\n dataType = ctypes.c_float * Count\n data = dataType(0)\n self.dll.e_Get_Data(data, 5, DataNo, StartIndex, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('GetData_Float')\n return data\n\n # Transfer of FIFO Arrays\n def Fifo_Empty(self, FifoNo):\n '''Fifo_Empty provides the number of free elements of a FIFO array.'''\n ret = self.dll.e_Get_Fifo_Empty(FifoNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Fifo_Empty')\n return ret\n\n def Fifo_Full(self, FifoNo):\n '''Fifo_Full provides the number of used elements of a FIFO array.'''\n ret = self.dll.e_Get_Fifo_Count(FifoNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Fifo_Full')\n return ret\n\n def Fifo_Clear(self, FifoNo):\n '''Fifo_Clear initializes the pointer for writing and reading a FIFO array.'''\n self.dll.e_Clear_Fifo(FifoNo, self.DeviceNo, self.__errPointer)\n self.__checkError('Fifo_Clear')\n\n def SetFifo_Long(self, FifoNo, Data, Count):\n '''SetFifo_Long transfers long data from the PC to a FIFO array of the ADwin system.'''\n if (type(Data) == list) or (type(Data) == array.array):\n # convert list to ctypes.c_long_Array\n dataType = ctypes.c_long * Count\n data = dataType(0)\n for i in range(Count):\n data[i] = Data[i]\n else: # ctypes-array\n data = Data\n self.dll.e_Set_Fifo(data, 2, FifoNo, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('SetFifo_Long')\n\n def GetFifo_Long(self, FifoNo, Count):\n '''GetFifo_Long transfers long FIFO data from the ADwin system to the PC.'''\n dataType = ctypes.c_long * Count\n data = dataType(0)\n self.dll.e_Get_Fifo(data, 2, FifoNo, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('GetFifo_Long')\n return data\n\n def SetFifo_Float(self, FifoNo, Data, Count):\n '''SetFifo_Float transfers float data from the PC into a FIFO array of the ADwin system.'''\n if (type(Data) == list) or (type(Data) == array.array):\n # convert list to ctypes.c_float_Array\n dataType = ctypes.c_float * Count\n data = dataType(0)\n for i in range(Count):\n data[i] = Data[i]\n else: # ctypes-array\n data = Data\n self.dll.e_Set_Fifo(data, 5, FifoNo, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('SetFifo_Float')\n\n def GetFifo_Float(self, FifoNo, Count):\n '''GetFifo_Float transfers float FIFO data from the ADwin system to the PC.'''\n dataType = ctypes.c_float * Count\n data = dataType(0)\n self.dll.e_Get_Fifo(data, 5, FifoNo, Count, self.DeviceNo, self.__errPointer)\n self.__checkError('GetFifo_Float')\n return data\n\n # Data arrays with string data\n def String_Length(self, DataNo):\n '''String_Length transfers the length of a data string to a DATA array.'''\n ret = self.dll.e_Get_Data_String_Length(DataNo, self.DeviceNo, self.__errPointer)\n self.__checkError('String_Length')\n return ret\n\n def SetData_String(self, DataNo, String):\n '''transfers a string into a DATA array.'''\n if (sys.version_info[0] == 3):\n self.dll.e_Set_Data_String(String.encode(), DataNo, self.DeviceNo, self.__errPointer)\n else:\n self.dll.e_Set_Data_String(String, DataNo, self.DeviceNo, self.__errPointer)\n self.__checkError('SetData_String')\n \n def GetData_String(self, DataNo, MaxCount):\n '''GetData_String transfers a string from a DATA array into a buffer.'''\n dataType = ctypes.c_char * (MaxCount + 2)\n data = dataType(' ')\n Count = self.dll.e_Get_Data_String(data, MaxCount+1, DataNo, self.DeviceNo, self.__errPointer)\n self.__checkError('GetData_String')\n if (sys.version_info[0] == 3):\n return data.value, Count\n else:\n return data, Count\n \n # Control and error handling\n def Get_Last_Error_Text(self, Last_Error):\n '''Get_Last_Error_Text returns an error text related to an error number.'''\n text = ctypes.create_string_buffer(256)\n pText = ctypes.byref(text)\n self.dll.ADGetErrorText(Last_Error, pText, 256)\n return text.value\n\n def Get_Last_Error(self):\n '''Get_Last_Error returns the number of the last error.'''\n return self.__err.value","repo_name":"heeres/qtlab","sub_path":"source/lib/dll_support/ADwin.py","file_name":"ADwin.py","file_ext":"py","file_size_in_byte":15394,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"37095496956","text":"\r\n#import modules\r\nimport streamlit as st\r\nimport pandas as pd\r\nfrom annotated_text import annotated_text\r\nimport re\r\nimport json\r\nimport os\r\nimport pathlib\r\n\r\n#predefine parameters\r\nphaseChoices = ['Label Data', 'Save Result']\r\nfile = ''\r\n# col = 'Description'\r\nconfirmButton = False\r\nnextButton = False\r\n#current working dir\r\nwd = str(pathlib.Path().absolute())\r\n#regex pattern\r\n# regexPattern = '[\\n\\,\\(\\:]*%s\\,* |^%s '\r\nregexPattern = '[\\n\\,\\(\\:\\s]*%s[\\,\\)\\s\\.]*|^%s[\\s]'\r\n#org - blue color, person - red color\r\ntagColorDict = {'ORG': \"#8ef\", 'PERSON':\"#faa\", 'LOC':\"#fea\", 'BANK':\"#afa\"}\r\ntagTxtFile = './TempResult/tagList.txt'\r\ncontentTxtFile = './TempResult/content.txt'\r\n#default result name\r\nresultJsonFile = 'result.json'\r\ntagResult = []\r\ncontentResult = []\r\nfinalList = []\r\n\r\n#sidebars\r\noption = st.sidebar.selectbox(\"Section\", phaseChoices)\r\n\r\n#input for tag\r\norgTag = st.sidebar.text_input('Organization Tag') \r\npplTag = st.sidebar.text_input('Person Tag')\r\nlocTag = st.sidebar.text_input('Location Tag')\r\nbankTag = st.sidebar.text_input('Bank Tag')\r\n\r\n#create a temp folder if not found\r\nif not os.path.exists('./TempResult'):\r\n os.mkdir('./TempResult')\r\n\r\n#cache func\r\n# @st.cache(hash_funcs={streamlit.uploaded_file_manager.UploadedFile: my_hash_func}, suppress_st_warning = True)\r\n@st.cache(suppress_st_warning = True)\r\ndef load_data(file_uploaded):\r\n return pd.read_csv(file_uploaded, sep=',', encoding='utf-8')\r\n\r\n@st.cache(suppress_st_warning = True)\r\ndef tag_content(desc):\r\n #for annotation and result\r\n tagList = []\r\n# orgList = [i for i in orgTag.split(',') if i!='']\r\n# pplList = [i for i in pplTag.split(',') if i!='']\r\n allTagList = [orgTag, pplTag, locTag, bankTag]\r\n allLabelList = ['ORG', 'PERSON', 'LOC', 'BANK']\r\n allTagNoEmptyList = [[j for j in i.split(',') if j!=''] for i in allTagList]\r\n \r\n for tempList in zip(allTagNoEmptyList, allLabelList):\r\n for item in tempList[0]:\r\n if len(re.findall('[\\n\\,\\(\\:\\s]*%s[\\,\\)\\s\\.]*|^%s[\\s]'%(item, item), desc)) >= 1:\r\n for match in [(i.start(), i.end(), tempList[1]) for i in re.finditer(item, desc)]:\r\n tagList.append(match)\r\n \r\n# for org in orgList:\r\n# if len(re.findall('\\n*%s\\,* |^%s '%(org, org), desc)) >= 1:\r\n# for match in [(i.start(), i.end(), \"ORG\") for i in re.finditer(org, desc)]:\r\n# tagList.append(match)\r\n \r\n# for ppl in pplList:\r\n# if len(re.findall('\\n*%s\\,* |^%s '%(ppl, ppl), desc)) >= 1:\r\n# for match in [(i.start(), i.end(), \"PERSON\") for i in re.finditer(ppl, desc)]:\r\n# tagList.append(match)\r\n \r\n return tagList\r\n\r\n# @st.cache(suppress_st_warning = True)\r\ndef store_tempResult(tempTagList, tempContent):\r\n with open(tagTxtFile, 'a') as myFile:\r\n myFile.write('%s \\n'%json.dumps(tempTagList)) \r\n with open(contentTxtFile, 'a') as myFile:\r\n myFile.write('%s \\n'%json.dumps(tempContent))\r\n \r\ndef keyInWorkingPath(key, print = False):\r\n# startPath = \"D:/Users/figohjs/Documents\"\r\n resultWd = st.text_input('Current directory:', wd, key = key)\r\n resultFileName = st.text_input('Filename:', resultJsonFile)\r\n #show how many lablled data\r\n #read text file\r\n if os.path.isfile(tagTxtFile):\r\n with open(tagTxtFile, 'r', encoding='utf-8') as input_file:\r\n for row in input_file.readlines():\r\n tagResult.append(json.loads(row))\r\n numRow = len(set([key for i in tagResult for key,val in i.items()]))\r\n else:\r\n numRow = 0\r\n st.markdown('Number of labelled rows: %s'%numRow)\r\n button = st.button('Save Result', key = 'JN')\r\n resultLoc = resultWd + \"\\\\\" + resultFileName\r\n return resultLoc, button\r\n \r\n#upload file\r\nif option == 'Label Data':\r\n file = st.file_uploader(\"Upload file\", type = ['csv'])\r\n #if user uploaded file\r\n if file:\r\n st.markdown(\"Uploaded file: %s\"%\"YES\")\r\n df = load_data(file)\r\n col = st.selectbox(\"Please select col for description\", sorted(df.columns))\r\n page_number = st.number_input(\r\n label = \"Record\",\r\n min_value = 1,\r\n max_value = df.shape[0],\r\n step = 1,\r\n )\r\n #display text\r\n content = df.loc[page_number - 1, col]\r\n st.write(content)\r\n \r\n #display tag\r\n finalTagList = tag_content(content)\r\n \r\n #if tag list is not empty\r\n if len(finalTagList)!=0:\r\n taggedTermList = [(content[i[0]:i[1]], i[2], tagColorDict[i[2]]) for i in finalTagList]\r\n #only show unique set\r\n annotated_text(*sorted(list(set(taggedTermList)), key = lambda x:x[1]))\r\n else:\r\n taggedTermList = []\r\n \r\n store_tempResult({page_number:taggedTermList}, {page_number:content})\r\n# st.write(taggedTermList)\r\n \r\n else:\r\n st.markdown(\"Please upload file before label data\")\r\n \r\nelif option == 'Save Result':\r\n #save result button\r\n chosenPath, buttonSR = keyInWorkingPath(2)\r\n \r\n #if click save button\r\n if buttonSR: \r\n #check if both txt file exist\r\n if os.path.isfile(tagTxtFile) and os.path.isfile(contentTxtFile):\r\n\r\n #read text file\r\n with open(tagTxtFile, 'r', encoding='utf-8') as input_file:\r\n for row in input_file.readlines():\r\n tagResult.append(json.loads(row))\r\n\r\n with open(contentTxtFile, 'r', encoding='utf-8') as input_file:\r\n for row in input_file.readlines():\r\n contentResult.append(json.loads(row))\r\n\r\n #get latest record for every row\r\n dictKeys = [key for i in tagResult for key,val in i.items()]\r\n for finalKey in set(dictKeys):\r\n tempDict = {}\r\n tagVal = list([i for i in tagResult for key,val in i.items() if key == finalKey][-1].values())[0]\r\n contentVal = list([i for i in contentResult for key,val in i.items() if key == finalKey][-1].values())[0]\r\n tempDict['tagList'] = tagVal\r\n tempDict['content'] = contentVal\r\n finalList.append(tempDict)\r\n \r\n #write final file\r\n with open(chosenPath, 'w', encoding='utf-8') as output_file:\r\n for dic in finalList:\r\n json.dump(dic, output_file) \r\n output_file.write(\"\\n\")\r\n #remove temp file\r\n if os.path.isfile(tagTxtFile):\r\n os.remove(tagTxtFile)\r\n if os.path.isfile(contentTxtFile):\r\n os.remove(contentTxtFile)\r\n #remove temp folder\r\n if os.path.exists(wd + '\\\\' + 'TempResult'):\r\n os.rmdir(wd + '\\\\' + 'TempResult')\r\n \r\n #success msg\r\n st.markdown('Result is saved as %s' %chosenPath)\r\n\r\n else:\r\n st.markdown(\"Please label data before save result\")\r\n","repo_name":"yeomanghs/Data-wrangling-and-NLP","sub_path":"textLabel.py","file_name":"textLabel.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74526463235","text":"# -*- encoding: utf-8 -*-\r\n\"\"\"\r\n@author : yykzjh \r\n@Contact : yykzhjh@163.com\r\n@DateTime : 2022/12/1 22:50\r\n@Version : 1.0\r\n@License : (C)Copyright 2022\r\n\"\"\"\r\nimport torch\r\nimport torch.optim as optim\r\n\r\nimport lib.utils as utils\r\n\r\nfrom .DenseVNet import DenseVNet\r\nfrom .UNet3D import UNet3D\r\nfrom .VNet import VNet\r\nfrom .AttentionUNet import AttentionU_Net\r\nfrom .R2UNet import R2U_Net\r\nfrom .R2AttentionUNet import R2AttentionU_Net\r\nfrom .HighResNet3D import HighResNet3D\r\nfrom .DenseVoxelNet import DenseVoxelNet\r\nfrom .MultiResUNet3D import MultiResUNet3D\r\nfrom .DenseASPPUNet import DenseASPPUNet\r\n\r\nfrom .PMFSNet import PMFSNet\r\n\r\n\r\n\r\ndef get_model_optimizer_lr_scheduler(opt):\r\n # 初始化网络模型\r\n if opt[\"model_name\"] == \"DenseVNet\":\r\n model = DenseVNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"UNet3D\":\r\n model = UNet3D(opt[\"in_channels\"], opt[\"classes\"], final_sigmoid=False)\r\n\r\n elif opt[\"model_name\"] == \"VNet\":\r\n model = VNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"AttentionUNet\":\r\n model = AttentionU_Net(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"R2UNet\":\r\n model = R2U_Net(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"R2AttentionUNet\":\r\n model = R2AttentionU_Net(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"HighResNet3D\":\r\n model = HighResNet3D(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"DenseVoxelNet\":\r\n model = DenseVoxelNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"MultiResUNet3D\":\r\n model = MultiResUNet3D(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"DenseASPPUNet\":\r\n model = DenseASPPUNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"PMRFNet\":\r\n model = PMFSNet(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n else:\r\n raise RuntimeError(f\"{opt['model_name']}是不支持的网络模型!\")\r\n\r\n\r\n # 把模型放到GPU上\r\n model = model.to(opt[\"device\"])\r\n\r\n # 随机初始化模型参数\r\n utils.init_weights(model, init_type=\"kaiming\")\r\n\r\n\r\n # 初始化优化器\r\n if opt[\"optimizer_name\"] == \"SGD\":\r\n optimizer = optim.SGD(model.parameters(), lr=opt[\"learning_rate\"], momentum=opt[\"momentum\"],\r\n weight_decay=opt[\"weight_decay\"])\r\n\r\n elif opt[\"optimizer_name\"] == 'Adagrad':\r\n optimizer = optim.Adagrad(model.parameters(), lr=opt[\"learning_rate\"], weight_decay=opt[\"weight_decay\"])\r\n\r\n elif opt[\"optimizer_name\"] == \"RMSprop\":\r\n optimizer = optim.RMSprop(model.parameters(), lr=opt[\"learning_rate\"], weight_decay=opt[\"weight_decay\"],\r\n momentum=opt[\"momentum\"])\r\n\r\n elif opt[\"optimizer_name\"] == \"Adam\":\r\n optimizer = optim.Adam(model.parameters(), lr=opt[\"learning_rate\"], weight_decay=opt[\"weight_decay\"])\r\n\r\n elif opt[\"optimizer_name\"] == \"Adamax\":\r\n optimizer = optim.Adamax(model.parameters(), lr=opt[\"learning_rate\"], weight_decay=opt[\"weight_decay\"])\r\n\r\n elif opt[\"optimizer_name\"] == \"Adadelta\":\r\n optimizer = optim.Adadelta(model.parameters(), lr=opt[\"learning_rate\"], weight_decay=opt[\"weight_decay\"])\r\n\r\n else:\r\n raise RuntimeError(\r\n f\"{opt['optimizer_name']}是不支持的优化器!\")\r\n\r\n # 初始化学习率调度器\r\n if opt[\"lr_scheduler_name\"] == \"ExponentialLR\":\r\n lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=opt[\"gamma\"])\r\n\r\n elif opt[\"lr_scheduler_name\"] == \"StepLR\":\r\n lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=opt[\"step_size\"], gamma=opt[\"gamma\"])\r\n\r\n elif opt[\"lr_scheduler_name\"] == \"MultiStepLR\":\r\n lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt[\"milestones\"], gamma=opt[\"gamma\"])\r\n\r\n elif opt[\"lr_scheduler_name\"] == \"CosineAnnealingLR\":\r\n lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt[\"T_max\"])\r\n\r\n elif opt[\"lr_scheduler_name\"] == \"CosineAnnealingWarmRestarts\":\r\n lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=opt[\"T_0\"],\r\n T_mult=opt[\"T_mult\"])\r\n\r\n elif opt[\"lr_scheduler_name\"] == \"OneCycleLR\":\r\n lr_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=opt[\"learning_rate\"],\r\n steps_per_epoch=opt[\"steps_per_epoch\"], epochs=opt[\"end_epoch\"], cycle_momentum=False)\r\n\r\n elif opt[\"lr_scheduler_name\"] == \"ReduceLROnPlateau\":\r\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=opt[\"mode\"], factor=opt[\"factor\"],\r\n patience=opt[\"patience\"])\r\n else:\r\n raise RuntimeError(\r\n f\"{opt['lr_scheduler_name']}是不支持的学习率调度器!\")\r\n\r\n return model, optimizer, lr_scheduler\r\n\r\n\r\n\r\ndef get_model(opt):\r\n # 初始化网络模型\r\n if opt[\"model_name\"] == \"DenseVNet\":\r\n model = DenseVNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"UNet3D\":\r\n model = UNet3D(opt[\"in_channels\"], opt[\"classes\"], final_sigmoid=False)\r\n\r\n elif opt[\"model_name\"] == \"VNet\":\r\n model = VNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"AttentionUNet\":\r\n model = AttentionU_Net(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"R2UNet\":\r\n model = R2U_Net(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"R2AttentionUNet\":\r\n model = R2AttentionU_Net(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"HighResNet3D\":\r\n model = HighResNet3D(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"DenseVoxelNet\":\r\n model = DenseVoxelNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"MultiResUNet3D\":\r\n model = MultiResUNet3D(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"DenseASPPUNet\":\r\n model = DenseASPPUNet(in_channels=opt[\"in_channels\"], classes=opt[\"classes\"])\r\n\r\n elif opt[\"model_name\"] == \"PMRFNet\":\r\n model = PMFSNet(in_channels=opt[\"in_channels\"], out_channels=opt[\"classes\"])\r\n\r\n else:\r\n raise RuntimeError(f\"{opt['model_name']}是不支持的网络模型!\")\r\n\r\n # 把模型放到GPU上\r\n model = model.to(opt[\"device\"])\r\n\r\n return model\r\n","repo_name":"yykzjh/PMFS-Net","sub_path":"my-code/lib/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22515438842","text":"# Main file for AuSOME-ML\n# Input: Name of fsa file\n# Output: Suggested peaks with corresponding length (in bp)\n# Workflow:\n# \t\t\t1. Load model created from Hsc40_model.py\n# \t\t\t2. Read the input fsa file (access channel 1; the data for Hsc 40 loci, and channel 105; data for the size standard/ladder)\n# \t\t\t3. Detect all peaks in the file within the range of the size standard/ladder (35 bp - 500 bp)\n# \t\t\t4. Create a region (-40 +40) around each peak from which the 3 features will be extrcted\n# \t\t\t5. Feed each of the regions in the model\n# \t\t\t6. Mark the peaks for which the model returned a value of 1 (true peak) and display the corresponding length (in bp)\n\nfrom Bio import SeqIO\nfrom ladder_fit import convert_to_bp, convert_to_index, find_lower, find_upper\nfrom findpeaks import findpeaks as fp\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom matplotlib import pyplot as plt\n\nmodel_name = 'Hsc40_model_RandomForest_improved.sav'\t\t\t\t\t\t\t\t\t\t\t\t\t\t# load model\nmodel = pickle.load(open(model_name, 'rb'))\n\nfile = pd.read_csv('test.csv')\na = 'DATA1'\nb = 'DATA2'\nc = 'DATA3'\nd = 'DATA4'\ne = 'DATA105'\ndye = [500, 490, 450, 400, 350, 340, 300, 250, 200, 160, 150, 139, 100, 75, 50, 35]\n\nlabel = []\narea_of_peaks = []\nlength = []\npeaks = []\nheight = []\ncorrect = 0\ntotal = 0\n\nfor i in range(len(file)):\n\tfilename = file.iat[i, 0]\n\talelle1 = file.iat[i, 1]\n\talelle2 = file.iat[i, 2]\n\n\tif filename == 'ROM_12_41_Hos' or filename == 'ROM_12_42_Hos' or filename == 'SAM_12_34_Hos' or filename == 'SAM_12_36_Hos':\n\t\tcontinue\n\n\tif filename != 'POP':\n\t\tif filename[0] == 'A':\n\t\t\tfilename = filename.replace('A_', '')\n\t\tif filename[4] == 'C':\n\t\t\tfilename = filename.replace('_CON', '')\n\t\telif filename[4] == 'T':\n\t\t\tfilename = filename.replace('_TIG', '')\n\t\tabif_file = 'A_' + filename + '.fsa'\n\t\trecord = SeqIO.read(abif_file, 'abi')\n\t\tprint('\\nopening fsa file ({}/{}): {}'.format(i,len(file),abif_file))\n\n\t\tdata = record.annotations['abif_raw'][a]\n\t\tladder = record.annotations['abif_raw'][e]\n\n\t\tif alelle1 == alelle2:\n\t\t\ttotal += 1\n\t\telse:\n\t\t\ttotal += 2\n\n\t\tindex_35 = find_lower(ladder, dye)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# the index of the 35 bp peak\n\t\tindex_500 = find_upper(ladder, dye)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# the index of the 500 bp peak\n\n\t\tdetected_peaks = fp.findpeaks(data, spacing=25, limit=25)\n\n\t\tfor i in range(len(detected_peaks)-1):\n\t\t\tlower_end_of_window = detected_peaks[i] - 40\t\t\t\t\t\t\t\t\t\t\t\t# Create region from which the features will be extracted\n\t\t\tupper_end_of_window = detected_peaks[i] + 40\n\t\t\tif lower_end_of_window < index_35 or upper_end_of_window > index_500:\t\t\t\t\t\t# ignore peaks outside the range of the ladder\n\t\t\t\tcontinue\n\n\t\t\tpeaks = detected_peaks[i]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# store the index of the peak in an array\n\t\t\tarea_of_peaks = np.trapz(data[lower_end_of_window:upper_end_of_window])\t\t\t\t\t\t# get area under the curve in the region 'lower_end_of_window' to 'upper_end_of_region'\n\t\t\tlength = round(convert_to_bp(detected_peaks[i], ladder, dye))\n\t\t\theight = data[detected_peaks[i]]\n\n\t\t\tlabel = model.predict([[area_of_peaks, length, height]])\t\t\t\t\t\t\t\t\t# the height of the peak (in RFU)\tnote: remember 'data' is an array of length ~7000 where each value of the element corresponds to intensity (height)\n\n\t\t\tif label == 1:\n\t\t\t\tif length >= alelle1-2 and length <= alelle1+2 or length >= alelle2-2 and length <= alelle2+2:\n\t\t\t\t\tcorrect += 1\n\t\t\t\t# else:\n\t\t\t\t# \tcorrect -= 1\n\n\n\t\t# for i in range(len(peaks)-1):\n\t\t# \tlabel.append(model.predict([[area_of_peaks[i], length[i], height[i]]]))\t\t# feed the model the features of each region\n\n\n\t\t# print(len(label))\n\t\t# print(len(length))\n\t\t# for x in range(len(label)-1):\n\t\t# \tif label[x] == 1:\n\t\t# \t\tif length[x] in [alelle1-1, alelle1+1, alelle2-1, alelle2+1]:\n\t\t# \t\t\tcorrect += 1\n\n\naccuracy = correct/total\nprint(f'\\nTotal no. of correct calls is:\t{correct}')\nprint(f'Total no. of calls:\t\t{total}')\nprint(f'\\nThe accuracy is:\t\t{accuracy}')\n\n# for x in range(len(label)-1):\n# \tif label[x] == 1:\n# \t\tplt.plot(peaks[x], height[x], 'o', markersize=3, color='red', label=str(int(length[x])) + ' bp')\t\t\t# Mark the peak for which the model classified as true peak and display the corresponding length (in bp)\n# \t\tprint(length[x])\n\n\n# plt.plot(data, color='blue')\n# plt.plot(ladder, color='black')\n# plt.xlabel('Index', fontsize=16)\n# plt.ylabel('RFU (Relative Fluorescent Units)', fontsize=16)\n# plt.legend()\n# plt.show()","repo_name":"demapumpum/ladderSizing","sub_path":"src/AuSOME.py","file_name":"AuSOME.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2645527757","text":"#!/usr/bin/python3\nimport ga10pythonlib as ga10\nimport argparse\ncloseSessionPars = argparse.ArgumentParser()\n\n#add ip address, this is mandatory\ncloseSessionPars.add_argument(\"ip\", help= \":\")\n\ncloseSessionPars.add_argument(\"-s\",\"--session\", type=str, help = \"session id\")\n\ncloseSessionArguments = closeSessionPars.parse_args()\n\n\nga10.closeSession(closeSessionArguments.ip,closeSessionArguments.session)\n \n\n","repo_name":"nokia/AttestationEngine","sub_path":"attAutomation/closeSession.py","file_name":"closeSession.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"23404325131","text":"'0 for test, 1 for small, 2 for large'\r\ntrying = 2\r\n\r\ndef solve(A, motes):\r\n if len(motes) == 0:\r\n return 0\r\n index = 0\r\n while index < len(motes) and A > motes[index]:\r\n A += motes[index]\r\n index += 1\r\n choice1 = len(motes) - index\r\n \r\n if choice1 <= 0:\r\n return 0\r\n \r\n numAdds = 0\r\n if A == 1:\r\n return choice1\r\n else:\r\n while A <= motes[index]:\r\n A += (A-1)\r\n numAdds += 1\r\n choice2 = numAdds + solve(A, motes[index:])\r\n \r\n if choice1 < choice2:\r\n return choice1\r\n else:\r\n return choice2\r\n\r\n\r\nif trying == 0:\r\n fin = open('testA.txt', 'r')\r\nelif trying == 1:\r\n fin = open('A-small.in', 'r')\r\nelse:\r\n fin = open('A-large.in', 'r')\r\nfinput = fin.readlines()\r\nfin.close()\r\nit = iter(finput)\r\nnumbCases = (int)(it.next())\r\noutput = \"\"\r\n\r\nfor case in xrange(numbCases):\r\n #print str(case+1)\r\n firstline = it.next().rstrip().split()\r\n A = int(firstline[0])\r\n N = int(firstline[1])\r\n motes = it.next().rstrip().split()\r\n for i in range(len(motes)):\r\n motes[i] = int(motes[i])\r\n motes.sort()\r\n \r\n answer = solve(A, motes)\r\n output += \"Case #%d: %d\\n\" % (case+1, answer)\r\n\r\n\r\n\r\n\r\nif trying == 0:\r\n fout = open('testoutA.txt', 'w')\r\nelif trying == 1:\r\n fout = open('smallA.txt', 'w')\r\nelse:\r\n fout = open('largeA.txt', 'w')\r\nfout.write(output)\r\nfout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_123/411.py","file_name":"411.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17569236313","text":"from Entorno import Entorno\nfrom Domain.objeto import Objeto\nfrom graficador import Graficador\nfrom AgenteGenetico import AgenteGenetico\nfrom time import time\n\n\n#A\nobje1= Objeto(1,1,1,2,2,2,(0,0,0))\n#B\nobje2= Objeto(5,1,2,2,2,4,(0,0,0))\n#C\nobje3= Objeto(2,6,1.5,2,5,3,(0,0,0))\n#D\nobje4= Objeto(7,6,2.5,4,6,5,(0,0,0))\n#E\nobje5= Objeto(7,4,5.5,1,1,1,(0,0,0))\n#F\nobje6= Objeto(7,8,6.5,1,1,3,(0,0,0))\n#G\nobje7= Objeto(2,4,3.5,1,1,1,(0,0,0))\n#H\nobje8= Objeto(2,7,3.5,1,1,1,(0,0,0))\n#I\nobje9= Objeto(2,6,5.5,2,5,3,(0,0,0))\n#J\nobje10= Objeto(2,4,7.5,1,1,1,(0,0,0))\n\nobjetos = [obje1, obje2, obje3, obje4, obje5, obje6, obje7, obje8, obje9, obje10]\nent = Entorno()\nent.objetos = objetos\nent.objetosAlEspacio(objetos)\n\n\nobjeto_test = Objeto(2,4,9,1,1,1,(5,5,5))\n\nagente = AgenteGenetico(256,ent,objeto_test)\nstart_time = time()\nagente.startGenetico()\nelapsed_time = time() - start_time\nprint (\"Tiempo de ejecucion: %.10f segundos.\" % elapsed_time)\n\n# print(\"HIJOS 1\")\n# for i in range(len(agente.evolucion_generacional_hijo1)):\n# print()\n# for j in range(len(agente.evolucion_generacional_hijo1[i])):\n# print(round(agente.evolucion_generacional_hijo1[i][j].fitness, 2))\n\n# print()\n# print(\"HIJOS 2\")\n# for i in range(len(agente.evolucion_generacional_hijo2)):\n# print()\n# for j in range(len(agente.evolucion_generacional_hijo2[i])):\n# print(round(agente.evolucion_generacional_hijo2[i][j].fitness, 2))\n\n\n# for i in range(len(agente.valores_poblacion_inicial)):\n# print(str(round(agente.valores_poblacion_inicial[i].fitness, 4)).replace(\".\", \",\"))\n\n# for i in range(100):\n# agente = AgenteGenetico(8, ent, objeto_test)\n# bestObjeto = agente.startGenetico(objetos)\n# print(str(round(bestObjeto.fitness, 4)).replace(\".\", \",\"))\n\n\n","repo_name":"Landriou/SO3D-AR","sub_path":"Codigo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70163093954","text":"# Função que verifica se um dado grafo é pseudografo\ndef is_pseudograph(edges):\n edge_dict = {}\n for edge in edges:\n edge_tuple = tuple(sorted(edge))\n if edge_tuple in edge_dict:\n edge_dict[edge_tuple] += 1\n else:\n edge_dict[edge_tuple] = 1\n for i in edge_dict.items():\n if i[0][0] == i[0][1]:\n return True\n return False","repo_name":"Ronnildo/Grafos","sub_path":"functions/is_pseudograph/is_pseudograph.py","file_name":"is_pseudograph.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11215975420","text":"#\n# [172] Factorial Trailing Zeroes\n#\n# https://leetcode.com/problems/factorial-trailing-zeroes/description/\n#\n# algorithms\n# Easy (36.85%)\n# Total Accepted: 112K\n# Total Submissions: 303.9K\n# Testcase Example: '0'\n#\n# Given an integer n, return the number of trailing zeroes in n!.\n# \n# Note: Your solution should be in logarithmic time complexity.\n# \n# Credits:Special thanks to @ts for adding this problem and creating all test\n# cases.\n#\nclass Solution(object):\n def trailingZeroes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # 6 star, 很精巧,必须掌握\n # 所有的尾部的0可以看做都是2*5得来的,所以通过计算所有的因子中2和5的个数就可以知道尾部0的个数。\n # 实际上,2的个数肯定是足够的,所以只需计算5的个数即可。 \n # 要注意,25=5*5是有两个5的因子;125=5*5*5有3个5的因子。比如,计算135!末尾0的个数。 \n # 首先135/5 = 27,说明135以内有27个5的倍数;27/5=5,说明135以内有5个25的倍数;\n # 5/5=1,说明135以内有1个125的倍数。当然其中有重复计数,算下来135以内因子5的个数为27+5+1=33。\n rs = 0\n while n > 0:\n n /= 5\n rs += n\n return rs\n \n \n","repo_name":"goalong/lc","sub_path":"v1/172.factorial-trailing-zeroes.132728695.ac.py","file_name":"172.factorial-trailing-zeroes.132728695.ac.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"zh","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"24525444502","text":"\"\"\"https://leetcode.com/problems/longest-repeating-character-replacement/\n\nInput: a string s and int k\nOps: change one char to anohter\nOutput: the length of the longest substring containing the same letter after the replacement\n\nExamples:\n\n ABAB, 2 => AAAA or BBBB => 4\n AABABBA, 1 => AABBBBA => 4\n\nObservations:\n\n * When we scan the input string from end to end, we know the count of each alphabet\n * In the range we scan (window), there must be one longest substring X which conains the same letter\n\"\"\"\n\n# %%\n\nfrom collections import Counter\n\nclass Solution:\n def characterReplacement(self, s: str, k: int) -> int:\n win_alph_counter = Counter()\n max_count = 0\n win_start = 0\n for win_end, a in enumerate(s):\n win_alph_counter[a] += 1\n max_count = max(max_count, win_alph_counter[a])\n win_size = win_end - win_start + 1\n if win_size - max_count > k:\n win_alph_counter[s[win_start]] -= 1\n win_start += 1\n\n return win_end - win_start + 1\n \n# Solution().characterReplacement(\"ABAB\", 2)\n# Solution().characterReplacement(\"AABABBA\", 1)\n# Solution().characterReplacement(\"AABA\", 0)\n# Solution().characterReplacement(\"BAAAB\", 2)\n","repo_name":"rickchung/Review.BoringExercises","sub_path":"data_structures/q424_longest_repchar_replace.py","file_name":"q424_longest_repchar_replace.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71271798596","text":"\"\"\"\nCS5250 Assignment 4, Scheduling policies simulator\nSample skeleton program\nInput file:\n input.txt\nOutput files:\n FCFS.txt\n RR.txt\n SRTF.txt\n SJF.txt\n\n\nDarren Wee Zhe Yu\nA0147609X\nAY 2018/19 Semester 2\n\"\"\"\n\nfrom typing import Tuple, List, Dict, Set\nfrom collections import deque\nfrom copy import deepcopy\n\n\nclass Process:\n last_scheduled_time = 0\n\n def __init__(self, id, arrive_time, burst_time):\n self.id = id\n self.arrive_time = arrive_time\n self.burst_time = burst_time\n self.time_remaining = burst_time\n self.predicted_burst = None\n\n def __repr__(self):\n if self.predicted_burst is None:\n return '[id %d : arrival_time %d, burst_time %d/%d]' % (self.id, self.arrive_time, self.time_remaining, self.burst_time)\n return '[id %d : arrival_time %d, burst_time %d/%d(%s)]' % (self.id, self.arrive_time, self.time_remaining, self.burst_time, self.predicted_burst)\n\n\ndef FCFS_scheduling(process_list) -> Tuple[List[tuple], float]:\n # store the (switching time, proccess_id) pair\n schedule = []\n current_time = 0\n waiting_time = 0\n for process in process_list:\n if current_time < process.arrive_time:\n current_time = process.arrive_time\n schedule.append((current_time, process.id))\n waiting_time = waiting_time + (current_time - process.arrive_time)\n current_time = current_time + process.burst_time\n average_waiting_time = waiting_time / float(len(process_list))\n return schedule, average_waiting_time\n\n\n\"\"\"\nInput: process_list, time_quantum (Positive Integer)\nOutput_1 : Schedule list contains pairs of (time_stamp, proccess_id) indicating the time switching to that proccess_id\nOutput_2 : Average Waiting Time\n\"\"\"\n\n\ndef receive_arrivals(is_completed, processes, current_time, queue):\n # add all arriving tasks to queue\n for p in processes:\n # process already registered\n if p in is_completed:\n continue\n\n if p.arrive_time <= current_time:\n queue.append(p)\n is_completed[p] = False\n return\n\n\ndef RR_scheduling(process_list: List[Process], time_quantum: int = 10) -> Tuple[List[tuple], float]:\n schedule = list() # type: List[tuple]\n completed = dict() # type: Dict[Process, bool]\n last_processed = dict() # type: Dict[Process, int]\n waiting_time = 0\n\n done = 0\n first = process_list[0]\n work_queue = deque([first])\n t = first.arrive_time\n completed[first] = False\n\n prev_pid = None\n while True:\n if done == len(process_list):\n break\n\n # process everything in queue\n # print('\\nt = %3s; queue = %s' % (t, work_queue))\n\n try:\n current_process = work_queue.popleft() # type: Process\n except IndexError:\n # no work to do and no processes are arriving\n t += 1\n receive_arrivals(completed, process_list, t, work_queue)\n continue\n # print('t = %3s: scheduling %s' % (t, current_process))\n if prev_pid != current_process.id:\n schedule.append((t, current_process.id))\n\n # update waiting time between end of last processing period and start of this processing period\n if current_process in last_processed:\n waiting_time += t - last_processed[current_process]\n else:\n waiting_time += t - current_process.arrive_time\n\n if current_process.time_remaining > time_quantum:\n t += time_quantum\n current_process.time_remaining -= time_quantum\n else:\n t += current_process.time_remaining\n current_process.time_remaining = 0\n completed[current_process] = True\n done += 1\n # print('t = %3s: completed %s' % (t, current_process))\n\n # track timestamp of last processing (end of time slice)\n last_processed[current_process] = t\n\n receive_arrivals(completed, process_list, t, work_queue)\n\n prev_pid = current_process.id\n if not completed[current_process]:\n work_queue.append(current_process)\n return schedule, waiting_time / len(process_list)\n\n\ndef SRTF_scheduling(process_list: List[Process]) -> Tuple[List[tuple], float]:\n schedule = list() # type: List[tuple]\n completed = dict() # type: Dict[Process, bool]\n work_queue = deque([])\n waiting_time = 0\n done = 0\n t = 0\n\n previous_process = None\n while True:\n if done == len(process_list):\n break\n\n receive_arrivals(completed, process_list, t, work_queue)\n\n # sort by remaining time, then arrival time, then burst time and id\n work_queue = sorted(work_queue, key=lambda p: (p.time_remaining, p.arrive_time, p.burst_time, p.id))\n work_queue = deque(work_queue)\n # print('t = %3s: %s' % (t, work_queue))\n\n # get the highest priority process to work on\n try:\n current_process = work_queue.popleft() # type: Process\n\n # update waiting time\n waiting_time += len(work_queue)\n except IndexError:\n # no work to do and no processes are arriving\n t += 1\n receive_arrivals(completed, process_list, t, work_queue)\n continue\n\n # check if pre-emption/context switch is needed\n if current_process is not previous_process:\n # record new schedule\n schedule.append((t, current_process.id))\n\n current_process.time_remaining -= 1\n\n previous_process = current_process\n t += 1\n\n if current_process.time_remaining == 0:\n done += 1\n continue\n\n # add current process back to queue if not done\n work_queue.append(current_process)\n\n return schedule, waiting_time / len(process_list)\n\n\ndef SJF_scheduling(process_list: List[Process], alpha: float = 0.5, initial_guess: int = 5) -> Tuple[List[tuple], float]:\n schedule = list() # type: List[tuple]\n completed = dict() # type: Dict[Process, bool]\n work_queue = deque([])\n waiting_time = 0\n done = 0\n t = 0\n\n # burst time history on a per-pid basis\n burst_history = dict() # type: Dict[int, List[int]]\n prediction_history = dict() # type: Dict[int, List[float]]\n\n # populate initial guesses for burst\n for p in process_list:\n prediction_history[p.id] = [initial_guess]\n # affix burst history to 0 since the process has never run before\n burst_history[p.id] = [0]\n\n encountered_pids = set() # type: Set[int]\n while True:\n if done == len(process_list):\n break\n\n receive_arrivals(completed, process_list, t, work_queue)\n\n # compute burst predictions for all processes in queue\n for p in work_queue:\n # populate initial predictions\n if p.id not in encountered_pids:\n p.predicted_burst = initial_guess\n encountered_pids.add(p.id)\n\n # compute predictions for subsequent attempts thereafter\n if p.predicted_burst is None:\n prediction = alpha * burst_history[p.id][-1] + (1 - alpha) * prediction_history[p.id][-1]\n # print('pred history = %s' % prediction_history[p.id])\n # print('burst history = %s' % burst_history[p.id])\n # print('predicted burst is %s' % prediction)\n prediction_history[p.id].append(prediction)\n p.predicted_burst = prediction\n\n # sort by predicted burst time, then arrival time, then pid\n work_queue = sorted(work_queue, key=lambda proc: (proc.predicted_burst, proc.arrive_time, proc.id))\n work_queue = deque(work_queue)\n # print('t = %3s: %s' % (t, work_queue))\n\n # get the highest priority process to work on\n try:\n current_process = work_queue.popleft() # type: Process\n # print(current_process)\n except IndexError:\n # no work to do and no processes are arriving\n t += 1\n receive_arrivals(completed, process_list, t, work_queue)\n continue\n\n # schedule shortest predicted job w/o preemption\n schedule.append((t, current_process.id))\n\n # compute waiting time for the newly scheduled process\n waiting_time += t - current_process.arrive_time\n\n # advance scheduler timestamp\n t += current_process.burst_time\n burst_history[current_process.id].append(current_process.burst_time)\n\n done += 1\n\n return schedule, waiting_time / len(process_list)\n\n\ndef read_input(filename: str) -> List[Process]:\n result = []\n with open(filename) as f:\n for line in f:\n array = line.split()\n if len(array) != 3:\n print(\"wrong input format\")\n exit()\n result.append(Process(int(array[0]), int(array[1]), int(array[2])))\n return result\n\n\ndef write_output(file_name: str, schedule: List[Tuple], avg_waiting_time: float):\n with open(file_name, 'w') as f:\n for item in schedule:\n f.write(str(item) + '\\n')\n f.write('average waiting time %.2f \\n' % avg_waiting_time)\n\n\ndef main(input_file: str = 'input.txt'):\n process_list = read_input(input_file)\n # sort by arrival time\n process_list = sorted(process_list, key=lambda p: p.arrive_time)\n\n print(\"printing input ----\")\n for process in process_list:\n print(process)\n\n print(\"simulating FCFS ----\")\n fcfs_schedule, fcfs_avg_waiting_time = FCFS_scheduling(deepcopy(process_list))\n write_output('FCFS.txt', fcfs_schedule, fcfs_avg_waiting_time)\n print('FCFS waiting time = %.5f' % fcfs_avg_waiting_time)\n\n print(\"simulating RR ----\")\n rr_schedule, rr_avg_waiting_time = RR_scheduling(deepcopy(process_list), time_quantum=2)\n write_output('RR.txt', rr_schedule, rr_avg_waiting_time)\n print('RR waiting time = %.5f' % rr_avg_waiting_time)\n\n print(\"simulating SRTF ----\")\n srtf_schedule, srtf_avg_waiting_time = SRTF_scheduling(deepcopy(process_list))\n write_output('SRTF.txt', srtf_schedule, srtf_avg_waiting_time)\n print('SRTF waiting time = %.5f' % srtf_avg_waiting_time)\n\n print(\"simulating SJF ----\")\n sjf_schedule, sjf_avg_waiting_time = SJF_scheduling(process_list, alpha=0.5)\n write_output('SJF.txt', sjf_schedule, sjf_avg_waiting_time)\n print('SJF waiting time = %.5f' % sjf_avg_waiting_time)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"darrenwee/cs5250-advanced-os","sub_path":"assignment-4/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":10472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27997344054","text":"import random\nimport math\nimport bisect\nfrom typing import List\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom math import log2 as ln\nfrom math import exp\nfrom math import sqrt\nfrom math import pow\nfrom math import cos\nfrom math import sin\nfrom math import pi\nfrom math import floor\n\nfrom continuous import exp_gen\n\n\ndef uniform_discrete_gen(a: int, b: int) -> int:\n assert a <= b\n u = random.random()\n return a + floor((b - a + 1) * u)\n\n\ndef bernoulli_gen(p: float):\n assert 0 <= p <= 1\n u = random.random()\n return 1 if u <= p else 0\n\n\ndef categorical_dgen(probs: List[float]) -> int:\n assert abs(sum(probs) - 1.0) < 0.001\n cum = probs.copy()\n\n for i in range(1, len(cum)):\n cum[i] = cum[i-1] + probs[i]\n\n u = random.random()\n return bisect.bisect(cum, u)\n\n\ndef geometric_d_gen(p: float) -> int:\n success_times = 0\n while not bernoulli_gen(p):\n success_times += 1\n return success_times\n\n\ndef geometric_d_gen2(p: float) -> int:\n u = random.random()\n return floor(ln(u) / ln(1-p))\n\n\ndef hypergeometric_dgen():\n # bisect\n pass\n\ndef binomial_dgen(n: int, p: float) -> int:\n return sum(bernoulli_gen(p) for _ in range(n))\n\n\ndef binomial_geometric_dgen(n: int, p: float) -> int:\n total = 0\n i = 0\n while total <= n:\n y = geometric_d_gen(p)\n total += y + 1\n i += 1\n return i - 1\n\n\ndef binomial_exp_dgen(n: int, p: float) -> int:\n total = 0.0\n i = 0\n threshold = -ln(1-p)\n while total <= threshold:\n y = exp_gen(1.0)\n i += 1\n # print(f'{i}, (n - i + 1) = {(n - i + 1)}')\n if n - i + 1 == 0:\n return n\n total += y / (n - i + 1)\n return i - 1\n\ndef nagative_binomial_dgen(n: int, p: float) -> int:\n return sum(geometric_d_gen(p) for _ in range(n))\n\n\ndef poisson_dgen(lambdda: float) -> int:\n pass\n\n\n\ndef poisson_dgen(lambdda: float) -> int:\n pass\n\ndef plot_compare():\n sns.set_style(\"white\")\n\n data1 = [binomial_exp_dgen(6, 0.8) for i in range(2000)]\n data2 = [binomial_exp_dgen(6, 0.8) for i in range(2000)]\n # data2 = [binomial_geometric_dgen(6, 0.8) for i in range(1000)]\n\n # Plot\n kwargs = dict(hist_kws={'alpha':.6}, kde_kws={'linewidth':2, 'shade': True})\n\n plt.figure(figsize=(10,7), dpi= 80)\n sns.distplot(data1, color=\"dodgerblue\", label=\"Compact\", **kwargs)\n sns.distplot(data2, color=\"orange\", label=\"SUV\", **kwargs)\n # plt.xlim(50,75)\n plt.legend();\n print('done')\n\ndef plot_bernoulli(p: float, n=1000):\n # data = [bernoulli_gen(p) for i in range(n)]\n # data = [uniform_discrete_gen(3, 8) for i in range(n)]\n # data = [geometric_d_gen2(0.3) for i in range(n)]\n # data = [binomial_dgen(6, 0.8) for i in range(n)]\n # data = [binomial_geometric_dgen(6, 0.8) for i in range(n)]\n data = [binomial_exp_dgen(6, 0.8) for i in range(n)]\n # data = [nagative_binomial_dgen(6, 0.8) for i in range(n)]\n sns.distplot(data)\n plt.show()\n\n\nif __name__ == \"__main__\":\n plot_compare()\n # plot_bernoulli(0.3)\n # for _ in range(100):\n # print(categorical_dgen([0.1, 0.5, 0.15, 0.15, 0.1]))\n\n","repo_name":"MyEncyclopedia/stats_simulation","sub_path":"distrib_sim/discrete.py","file_name":"discrete.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"22397035675","text":"import os \r\nprint(\"Your available Hard Disks\")\r\nos.system(\"fdisk -l\")\r\ndisk1 = input(\"Enter the name of disk 1: \")\r\ndisk2 = input(\"Enter the name of disk 2: \")\r\nos.system(\"pvcreate {}\".format(disk1))\r\nos.system(\"pvcreate {}\".format(disk2))\r\nos.system(\"pvdisplay\")\r\nvolgrp = input(\"Enter volume group name: \") \r\nos.system(\"vgcreate {0} {1} {2}\".format(volgrp,disk1,disk2))\r\nlogvol=input(\"Enter the name for logical volume: \")\r\nsize = input(\"Enter the size for Logical Volume: \")\r\nos.system(\"lvcreate --size {0}G --name {1} {2}\".format(size,logvol,volgrp))\r\nos.system(\"mkfs.ext4 /dev/{0}/{1}\".format(volgrp,logvol))\r\nmountdir = input(\"Enter the path of directory to mount: \")\r\nos.system(\"mount /dev/{0}/{1} {2}\".format(volgrp,logvol,mountdir))\r\nos.system(\"df -h\")\r\ninput(\"click enter to continue\")\r\nexit()","repo_name":"rangamani54/Automation-with-Python","sub_path":"lvmorgrem.py","file_name":"lvmorgrem.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15111988498","text":"from django.forms import FloatField, IntegerField\nfrom . import models\nfrom django import forms\nfrom django.forms.models import inlineformset_factory\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, Fieldset, Div, HTML, ButtonHolder, Submit\nfrom .custom_layout_object import *\n\n\nclass AuthorBalanceForm(forms.ModelForm):\n balance = FloatField(disabled=True)\n author = IntegerField(disabled=True)\n WITHDRAW = 2\n REPLENISH = 1\n action = forms.ChoiceField(choices=((REPLENISH, \"Пополнить\"), (WITHDRAW, \"Списать\")))\n change = FloatField(min_value=0)\n\n class Meta:\n model = models.AuthorBalance\n fields = ['author', 'balance']\n\n def clean(self):\n cleaned_data = super().clean()\n action = cleaned_data.get(\"action\")\n change = cleaned_data.get(\"change\")\n balance = cleaned_data.get(\"balance\")\n if action == '2' and change > balance:\n self.add_error('change', 'Списываемая сумма не может быть меньше баланса')\n return cleaned_data\n\n\nclass ChoiceForm(forms.ModelForm):\n class Meta:\n model = models.Choice\n fields = ['choice_text', 'votes']\n\n\nclass QuestionFormM(forms.ModelForm):\n class Meta:\n model = models.Question\n fields = ['author', 'question_text', 'pub_date']\n\n\nclass QuestionForm(forms.ModelForm):\n\n class Meta:\n model = models.Question\n exclude = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-md-3 create-label'\n self.helper.field_class = 'col-md-9'\n self.helper.layout = Layout(\n Div(\n Field('author'),\n Field('question_text'),\n Field('pub_date'),\n Fieldset('Add choices',\n Formset('choices')),\n HTML(\"
    \"),\n ButtonHolder(Submit('submit', 'Save')),\n )\n )\n\n\nChoiceInlineFormset = inlineformset_factory(models.Question, models.Choice, extra=3, form=ChoiceForm)\n\n\nclass UserProfileForm(forms.ModelForm):\n class Meta:\n model = models.UserProfile\n fields = ['agreement_accepted']\n\n\n\n","repo_name":"IvanDuyun/django-polls-and-training","sub_path":"polls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12005062647","text":"from PIL import Image\nimport Assignment5Support\n\ndef CalculateGradientFeatures(gradients):\n grid_length = int(len(gradients) / 3)\n\n features = []\n for i in range(3):\n for j in range(3):\n min_value = 0\n max_value = 0\n average = 0\n for a in range(grid_length):\n for b in range(grid_length):\n value = gradients[i * grid_length + a][j * grid_length + b] / 255\n min_value = min(value, min_value)\n max_value = max(value, max_value)\n average += value\n average /= grid_length * grid_length\n features.append(min_value)\n features.append(max_value)\n features.append(average)\n return features\n\ndef Featurize_Y_Gradient(x_raw):\n x = []\n for sample in x_raw:\n # y-gradient 9 grids of 8x8 pixels\n image = Image.open(sample)\n yGradients = Assignment5Support.Convolution3x3(image, [[1, 2, 1],[0, 0, 0],[-1, -2, -1]])\n yFeatures = CalculateGradientFeatures(yGradients)\n\n x.append(yFeatures)\n return x\n\ndef Featurize_X_Gradient(x_raw):\n x = []\n for sample in x_raw:\n # x-gradient 9 grids of 8x8 pixels\n image = Image.open(sample)\n xGradients = Assignment5Support.Convolution3x3(image, [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n xFeatures = CalculateGradientFeatures(xGradients)\n\n x.append(xFeatures)\n return x\n\ndef CalculateHistogramFeatures(gradients):\n image_length = int(len(gradients))\n features = [0, 0, 0, 0, 0]\n\n gradients_abs = []\n for gradients_row in gradients:\n for value in gradients_row :\n gradients_abs.append(abs(value))\n max_gradient = max(gradients_abs)\n min_gradient = min(gradients_abs)\n\n for value in gradients_abs:\n value = (value - min_gradient)/(max_gradient - min_gradient)\n\n if value < 0.2:\n features[0] += 1\n elif value >= 0.2 and value < 0.4:\n features[1] += 1\n elif value >= 0.4 and value < 0.6:\n features[2] += 1\n elif value >= 0.6 and value < 0.8:\n features[3] += 1\n else:\n features[4] += 1\n\n for i in range(5):\n features[i] /= image_length * image_length\n return features\n\ndef Featurize_Y_Histogram(x_raw):\n x = []\n for sample in x_raw:\n # y-gradient histogram\n image = Image.open(sample)\n yGradients = Assignment5Support.Convolution3x3(image, [[1, 2, 1],[0, 0, 0],[-1, -2, -1]])\n yFeatures = CalculateHistogramFeatures(yGradients)\n\n x.append(yFeatures)\n return x\n\ndef Featurize_X_Histogram(x_raw):\n x = []\n for sample in x_raw:\n # x-gradient histogram\n image = Image.open(sample)\n xGradients = Assignment5Support.Convolution3x3(image, [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n xFeatures = CalculateHistogramFeatures(xGradients)\n\n x.append(xFeatures)\n return x\n\ndef ByIntensities(xTrainRaw, xTestRaw, step = 2):\n xTrain = []\n for sample in xTrainRaw:\n xTrain.append(by_intensities_core(sample, step))\n\n xTest = []\n for sample in xTestRaw:\n xTest.append(by_intensities_core(sample, step))\n\n return (xTrain, xTest)\n\ndef by_intensities_core(path, step = 2):\n (pixels, xSize, ySize) = get_pixels(path)\n\n features = []\n for x in range(0, xSize, step):\n for y in range(0, ySize, step):\n features.append(pixels[x,y]/255.0)\n\n return features\n\ndef get_pixels(path):\n image = Image.open(path)\n\n xSize = image.size[0]\n ySize = image.size[1]\n\n return (image.load(), xSize, ySize)\n","repo_name":"BigEggStudy/UW-CSEP-546-Au18-Machine-Learning","sub_path":"Assignment6/Eye Blink Model/Featurize.py","file_name":"Featurize.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31142637308","text":"import pusher\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\n\npusher_client = pusher.Pusher(\n app_id=settings.PUSHER['ID'],\n key=settings.PUSHER['KEY'],\n secret=settings.PUSHER['SECRET'],\n cluster=settings.PUSHER['CLUSTER'],\n ssl=True\n)\n\n\ndef push_notification(channel, message):\n html = render_to_string('room/components/_message.html', {\n 'message': {'message': message},\n })\n pusher_client.trigger(channel, settings.PUSHER['SEND_MESSAGE'], html)\n\n\ndef push_message(message):\n html = render_to_string('room/components/_message.html', {\n 'message': message,\n 'showTime': True,\n })\n pusher_client.trigger(message.room, settings.PUSHER['SEND_MESSAGE'], html)\n","repo_name":"djpeacher/dice-room","sub_path":"room/pusher.py","file_name":"pusher.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11487199988","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated by treeloys at 25.04.20\n\"\"\"\nimport socket\nimport sys\nimport threading\nfrom contextlib import closing\n\nfrom flask import Flask\n\n\n\nfrom qtviewer.qtviewer import WebUI\n\n\ndef find_free_port():\n \"\"\"Свободный порт получить\"\"\"\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(('', 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\ndef flaskRun(app, host, port, debug):\n app.run(host, port, debug, use_reloader=False)\n\n\ndef appRun(app, host=\"127.0.0.1\", port=None, debug=True, width=800, height=600, using_win32=False,\n title=\"My Flask app\"):\n port = port if port else find_free_port()\n\n print(host)\n if using_win32:\n import pythoncom\n pythoncom.CoInitialize()\n # app.run(debug=debug, host=host, port=port, use_reloader=False)\n flask_thread = threading.Thread(target=flaskRun, args=(app, host, port, debug,))\n flask_thread.daemon = True\n flask_thread.start()\n print(host, port)\n wu = WebUI(title=title, url=host, port=port, width=width, height=height)\n\n\n\n\nif __name__ == \"__main__\":\n app = Flask(__name__)\n @app.route(\"/\")\n def index():\n return \"

    Welkome to app

    \"\n\n appRun(app, host=\"localhost\", width=100, height=100)","repo_name":"TreeLoys/qtviewer","sub_path":"flaskrun.py","file_name":"flaskrun.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2566979883","text":"import sys\nfrom PyQt4.QtGui import QApplication, QMainWindow, QFileDialog\nfrom PyQt4.QtGui import QImage, QPixmap\nfrom mainWindow import Ui_MainWindow\nfrom subprocess import call\nimport os\n\n__author__ = 'drluke'\n\n\nclass Engraver:\n def __init__(self, ui):\n self.ui = ui\n \n self.imageOrig = None\n \n self.width = self.ui.doubleSpinBoxWidth.value()\n self.height = self.ui.doubleSpinBoxHeight.value()\n self.aspectRatio = None\n self.origAspectRatio = None\n self.aspectChecked = self.ui.checkBoxAspect.checkState()\n self.safez = self.ui.doubleSpinBoxSafez.value()\n self.depth = self.ui.doubleSpinBoxDepth.value()\n self.loweringspeed = self.ui.spinBoxLoweringspeed.value()\n\n self.imageOrig = None\n self.imageR = None\n self.imageRd = None\n self.imageG = None\n self.imageGd = None\n self.imageB = None\n self.imageBd = None\n\n # Connect all signals from ui\n self.ui.doubleSpinBoxWidth.valueChanged.connect(self.onWidthChanged)\n self.ui.doubleSpinBoxHeight.valueChanged.connect(self.onHeightChanged)\n self.ui.checkBoxAspect.stateChanged.connect(self.onAspectChecked)\n self.ui.buttonResetAspect.clicked.connect(self.onResetAspect)\n self.ui.doubleSpinBoxSafez.valueChanged.connect(self.onSafezChanged)\n self.ui.doubleSpinBoxDepth.valueChanged.connect(self.onDepthChanged)\n self.ui.spinBoxLoweringspeed.valueChanged.connect(self.onLoweringspeedChanged)\n\n self.ui.radioButtonOrig.clicked.connect(self.showImage)\n self.ui.radioButtonR.clicked.connect(self.showImage)\n self.ui.radioButtonG.clicked.connect(self.showImage)\n self.ui.radioButtonB.clicked.connect(self.showImage)\n self.ui.checkBoxDither.toggled.connect(self.showImage)\n\n self.ui.buttonLoad.clicked.connect(self.onButtonLoadPressed)\n self.ui.buttonGenerate.clicked.connect(self.onButtonGeneratePressed)\n\n def onWidthChanged(self, newWidth):\n self.width = newWidth\n if(self.aspectRatio is not None):\n self.height = self.width * (1/self.aspectRatio)\n self.ui.doubleSpinBoxHeight.setValue(self.height)\n\n def onHeightChanged(self, newHeight):\n self.height = newHeight\n if(self.aspectRatio is not None):\n self.width = self.height * self.aspectRatio\n self.ui.doubleSpinBoxWidth.setValue(self.width)\n\n def onAspectChecked(self, newState):\n self.aspectChecked = newState\n if self.aspectChecked:\n self.aspectRatio = self.width/self.height\n else:\n self.aspectRatio = None\n\n def onResetAspect(self):\n if self.origAspectRatio is not None:\n if self.aspectChecked:\n self.aspectRatio = self.origAspectRatio\n self.height = self.width * (1/self.origAspectRatio)\n self.ui.doubleSpinBoxHeight.setValue(self.height)\n\n def onSafezChanged(self, newSafez):\n self.safez = newSafez\n\n def onDepthChanged(self, newDepth):\n self.depth = newDepth\n\n def onLoweringspeedChanged(self, newLoweringspeed):\n self.loweringspeed = newLoweringspeed\n\n def onButtonLoadPressed(self):\n filedialog = QFileDialog()\n filedialog.setFileMode(QFileDialog.ExistingFile)\n self.ui.progressBar.setValue(0)\n if(filedialog.exec()):\n self.imageOrig = QImage()\n if self.imageOrig.load(filedialog.selectedFiles()[0]):\n self.imagePath = filedialog.selectedFiles()[0]\n\n self.width = float(self.imageOrig.width())\n self.ui.doubleSpinBoxWidth.setValue(self.width)\n self.height = float(self.imageOrig.height())\n self.ui.doubleSpinBoxHeight.setValue(self.height)\n self.origAspectRatio = self.width/self.height\n if(self.aspectChecked):\n self.aspectRatio = self.origAspectRatio\n else:\n self.aspectRatio = None\n\n self.ui.progressBar.setValue(40)\n call([\"convert\", self.imagePath, \"-channel\", \"R\", \"-separate\", self.imagePath[:-4] + \"_R\" + self.imagePath[-4:]])\n self.ui.progressBar.setValue(50)\n self.imageR = QImage()\n self.imageR.load(self.imagePath[:-4] + \"_R\" + self.imagePath[-4:])\n call([\"convert\", self.imagePath[:-4] + \"_\" + \"R\" + self.imagePath[-4:], \"-colorspace\", \"Gray\" ,\"-ordered-dither\", \"o8x8\" , self.imagePath[:-4] + \"_R\" + \"_d\" + self.imagePath[-4:]])\n self.ui.progressBar.setValue(60)\n self.imageRd = QImage()\n self.imageRd.load(self.imagePath[:-4] + \"_R\" + \"_d\" + self.imagePath[-4:])\n\n call([\"convert\", self.imagePath, \"-channel\", \"G\", \"-separate\", self.imagePath[:-4] + \"_G\" + self.imagePath[-4:]])\n self.ui.progressBar.setValue(70)\n self.imageG = QImage()\n self.imageG.load(self.imagePath[:-4] + \"_G\" + self.imagePath[-4:])\n call([\"convert\", self.imagePath[:-4] + \"_\" + \"G\" + self.imagePath[-4:], \"-colorspace\", \"Gray\" ,\"-ordered-dither\", \"o8x8\", self.imagePath[:-4] + \"_G\" + \"_d\" + self.imagePath[-4:]])\n self.ui.progressBar.setValue(80)\n self.imageGd = QImage()\n self.imageGd.load(self.imagePath[:-4] + \"_G\" + \"_d\" + self.imagePath[-4:])\n\n call([\"convert\", self.imagePath, \"-channel\", \"B\", \"-separate\", self.imagePath[:-4] + \"_B\" + self.imagePath[-4:]])\n self.ui.progressBar.setValue(90)\n self.imageB = QImage()\n self.imageB.load(self.imagePath[:-4] + \"_B\" + self.imagePath[-4:])\n call([\"convert\", self.imagePath[:-4] + \"_\" + \"B\" + self.imagePath[-4:], \"-colorspace\", \"Gray\" ,\"-ordered-dither\", \"o8x8\", self.imagePath[:-4] + \"_B\" + \"_d\" + self.imagePath[-4:]])\n self.ui.progressBar.setValue(100)\n self.imageBd = QImage()\n self.imageBd.load(self.imagePath[:-4] + \"_B\" + \"_d\" + self.imagePath[-4:])\n\n self.ui.buttonGenerate.setEnabled(True)\n\n else:\n print(\"Error with loading file '\" + str(filedialog.selectedFiles()[0]) + \"'\")\n\n self.showImage()\n\n def onButtonGeneratePressed(self):\n filedialog = QFileDialog(caption=\"Save GCode as\", directory=os.path.splitext(self.imagePath)[0] + \".ngc\", filter=\"GCode Files (*.ngc);;All Files (*)\")\n filedialog.setFileMode(QFileDialog.AnyFile)\n filedialog.setAcceptMode(QFileDialog.AcceptSave)\n filedialog.setDefaultSuffix(\"nc\")\n\n if(filedialog.exec()):\n with open(os.path.splitext(filedialog.selectedFiles()[0])[0] + \"_r\" + os.path.splitext(filedialog.selectedFiles()[0])[1], \"w\") as f:\n f.write(self.generateGCode(self.imageRd))\n with open(os.path.splitext(filedialog.selectedFiles()[0])[0] + \"_g\" + os.path.splitext(filedialog.selectedFiles()[0])[1], \"w\") as f:\n f.write(self.generateGCode(self.imageGd))\n with open(os.path.splitext(filedialog.selectedFiles()[0])[0] + \"_b\" + os.path.splitext(filedialog.selectedFiles()[0])[1], \"w\") as f:\n f.write(self.generateGCode(self.imageBd))\n\n def generateGCode(self, image):\n gcode = \"\"\n\n gcode += \"( Ditherfraes )\\n\"\n if self.loweringspeed == 0:\n gcode += \"G94\\nG21\\nG90\\nG64 P0.1\\nF100.0\\nM3\\nG00 Z\"+str(self.safez)+\"\\n\\n\\n\"\n else:\n gcode += \"G94\\nG21\\nG90\\nG64 P0.1\\nF\"+str(self.loweringspeed) + \"\\nM3\\nG00 Z\"+str(self.safez)+\"\\n\\n\\n\"\n\n coordsList = []\n for x in range(image.width()):\n currentLine = []\n for y in range(image.height()):\n curpixel = image.pixel(x,y)\n\n if curpixel == 4294967295: # 4294967295 == White\n currentLine.append( (self.width * (x/image.width()), self.height * (1 - y/image.height())) )\n if((x-1) % 2):\n currentLine.reverse() # Reverse list on every second line\n coordsList += currentLine\n\n for coord in coordsList:\n gcode += \"G0 X\" + str(\"%.2f\" % coord[0]) + \" Y\" + str(\"%.2f\" % coord[1]) + \"\\n\"\n if self.loweringspeed == 0:\n gcode += \"G0 Z-\" + str(\"%.2f\" % self.depth) + \"\\n\"\n else:\n gcode += \"G1 Z-\" + str(\"%.2f\" % self.depth) + \"\\n\"\n gcode += \"G0 Z\" + str(\"%.2f\" % self.safez) + \"\\n\\n\"\n\n gcode += \"M5\\n\"\n gcode += \"M2 (Program End)\\n\"\n\n return gcode\n\n\n\n\n def showImage(self):\n if self.imageOrig is not None:\n if self.ui.checkBoxDither.checkState():\n if self.ui.radioButtonOrig.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageOrig)\n elif self.ui.radioButtonR.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageRd)\n elif self.ui.radioButtonG.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageGd)\n elif self.ui.radioButtonB.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageBd)\n else:\n if self.ui.radioButtonOrig.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageOrig)\n elif self.ui.radioButtonR.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageR)\n elif self.ui.radioButtonG.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageG)\n elif self.ui.radioButtonB.isChecked():\n self.pixmap = QPixmap.fromImage(self.imageB)\n\n self.ui.imageLabel.setPixmap(self.pixmap)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(window)\n\n eng = Engraver(ui)\n\n window.show()\n sys.exit(app.exec())\n","repo_name":"DrLuke/acrylengraver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23567223461","text":"import sys\n\n \ndef load_info(f):\n line = f.readline()\n out = int(line)\n return out \n\ndef my_function(f, case_num):\n \n class counter(dict):\n def __missing__(self,key):\n return 0\n class my_list(list):\n stall_counter = counter()\n def insert_sort(self,x):\n i = -1\n while True:\n if self[i] == x:\n break\n elif self[i] > x:\n if i == -1:\n self.append(x)\n else:\n self.insert(i+1,x)\n break\n i -= 1\n \n print_info = []\n line = f.readline().split(' ')\n N, K = [int(x) for x in line]\n i = 0\n stalls = my_list()\n stalls.append(N)\n stalls.stall_counter[N] += 1\n while i < K-1:\n temp = stalls[0]/2\n stalls.stall_counter[stalls[0]] -= 1\n \n if stalls[0] == 1:\n pass\n elif stalls[0] == 2:\n stalls.insert_sort(1)\n stalls.stall_counter[1] += 1\n elif stalls[0]%2 == 1:\n stalls.insert_sort(temp)\n stalls.stall_counter[temp] += 2\n else:\n stalls.insert_sort(temp)\n stalls.stall_counter[temp] += 1\n stalls.insert_sort(temp-1)\n stalls.stall_counter[temp-1] += 1\n \n if not stalls.stall_counter[stalls[0]]:\n stalls.pop(0)\n \n i += 1\n \n if stalls[0] == 1:\n res = (0,0)\n elif stalls[0] == 2:\n res = (1,0)\n else:\n temp = stalls[0]/2\n if stalls[0]%2 == 1:\n res = (temp,temp)\n else:\n res = (temp,temp-1)\n print_info.append('Case #' + str(case_num+1) + ': %d %d' % (res))\n print_info.append('\\n')\n\n return print_info\n \nif __name__ == '__main__':\n while True:\n print('\\n---------------------------')\n print(\"Type in the input and output files' names separated by a white space, or 'exit' when you're done, thx. \")\n \n sys.stdout.write('Problem Name > ')\n input_args = str(raw_input())\n try:\n if input_args == 'exit':\n break\n elif input_args == '':\n continue\n else:\n args = input_args.split(' ')\n input_file = args[0]\n output_file = args[1]\n except:\n print('Invalid input. Please try again.')\n else:\n f_in = open(input_file)\n f_out = open(output_file, 'w')\n \n num_of_tests = load_info(f_in)\n try:\n for x in xrange(num_of_tests):\n f_out.writelines(my_function(f_in, x))\n## except:\n## print('something wrong')\n## else:\n## print('File is built successfully!!!!')\n## print('---------------------------\\n')\n finally: \n f_in.close()\n f_out.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1398.py","file_name":"1398.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33705580539","text":"from core import colors\n\nversion = \"3.1-alpha\"\napiversion = \"1.0.1-alpha\"\nupdate_date = \"3.1.2017\"\ncodename = \"uranium\"\n\nabout = (\"Hakku Framework \"+version+\" \"+codename+\n\"\\nauthor: Noa-Emil Nissinen (4shadoww)\"\n\"\\nemail: 4shadoww0@gmail.com\"\n\"\\ngithub: 4shadoww\")","repo_name":"ethicalhackeragnidhra/hakku","sub_path":"core/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71791830274","text":"import os\nimport torch\n\nfrom utils.dataset import get_dataloaders, sample_data\nfrom utils.util import load_config, get_confusion_matrix, get_f1_score, get_accuracy\nfrom models.pretrained import build_pretrained_model\nfrom models.model import MyCNN\nfrom models.trainer import Trainer\nfrom visualization.explainer import Explainer\n\n\ndef main(config):\n # Load data loaders and model\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n loader_train, loader_test, loader_valid = get_dataloaders(**config[\"dataloader\"])\n\n if config[\"use_pretrained_model\"]:\n model = build_pretrained_model(config[\"model_name\"])\n else:\n model = MyCNN(im_size=config[\"dataloader\"][\"im_size\"],\n config_block=config[\"model\"][\"block\"],\n n_out_channels=config[\"model\"][\"n_out_channels\"],\n n_hidden_neurons=config[\"model\"][\"n_hidden_neurons\"])\n\n # Train model\n trainer = Trainer(model=model,\n loader_train=loader_train,\n loader_valid=loader_valid,\n params_optim=config[\"params_optim\"],\n params_lr_scheduler=None, # config[\"params_lr_scheduler\"],\n device=device,\n **config[\"trainer\"])\n\n trainer.train(n_iter=config[\"train\"][\"n_iter\"])\n\n # Test model on new observations (to see model's performance in a real life scenario)\n conf_mat_test = get_confusion_matrix(model=trainer.model,\n threshold=config[\"trainer\"][\"threshold\"],\n data_loader=loader_test,\n device=device)\n f1_test = round(get_f1_score(conf_mat_test), 3)\n acc_test = round(get_accuracy(conf_mat_test), 3)\n print(\"On the test set, F1 score = {} and accuracy = {}\".format(f1_test, acc_test))\n\n # Explain some predictions with SHAP values\n e = Explainer(model, loader_train, device, savepath=\"./saved/figures\")\n images_test, label_test = sample_data(loader_test, 5)\n e.fit()\n e.explain(images_test, label_test)\n\n\nif __name__ == \"__main__\":\n CONFIG_PATH = os.getcwd()\n CONFIG_NAME = \"config.yaml\"\n config = load_config(CONFIG_PATH, CONFIG_NAME)\n main(config)\n","repo_name":"Arnautt/CNN-Malaria-detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9921162752","text":"import os\nimport re\n\nfrom flask import Flask, render_template, request, url_for\nfrom pip._vendor import requests\nfrom werkzeug.utils import redirect\nimport os.path\nfrom os import path\nfrom Model import Test\nfrom Model.Test import combine_Lists, combine_Lists_html, generate_xml, format_both, removeHTMLCode, format_hebrew, \\\n format_english\nfrom Model.model import synth_audio_file\n\napp = Flask(__name__)\nlanguage = \"\"\nhebrew_text = \"\"\nenglish_text = \"\"\nboth = \"\"\naudio_filepath = \"\"\nnew_path = \"\"\n\n\n# prime_categories = ['Tanakh', 'Mishnah', 'Talmud', 'Midrash', 'Halakhah', 'Kabbalah', 'Liturgy', 'Jewish Thought',\n# 'Tosefta', 'Chasidut', 'Musar', 'Responsa', 'Second Temple', 'Reference']\n\ndef get_api_url(url):\n source = re.search(r\"[^/]*/[^/]*/[^/]*/([^.]*.[^.]|[^?])\", url)\n source = source.group(1)\n return source\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n global hebrew, english, language, hebrew_text, english_text, both, audio_filepath, new_path\n if request.method == 'POST':\n url = request.form[\"url_input\"]\n print(url)\n voice = request.form[\"Voices\"]\n print(\"Voice: \" + voice)\n source = get_api_url(url)\n api_url = \"https://www.sefaria.org/api/texts/\" + source\n response = requests.get(api_url)\n hebrew_text = response.json()['he'][:50]\n english_text = response.json()['text'][:50]\n both = '
    '.join(combine_Lists_html(hebrew_text, english_text)[:50])\n #############\n hebrew = removeHTMLCode(response.json()['he'][:50])\n english = removeHTMLCode(response.json()['text'][:50])\n # Check voice\n if voice == \"English\":\n xml = format_english(english)\n elif voice == \"Hebrew\":\n xml = format_hebrew(hebrew)\n else:\n xml = format_both(hebrew, english)\n # generate xml, create audio, and relocate audio to static\\audio\n file_name = source + \"+\" + voice\n file_name = file_name.replace(\".\", \"\")\n file_name = file_name + \".wav\"\n if path.exists(app.root_path + '\\\\static\\\\audio\\\\' + file_name):\n print(\"FILE ALREADY EXISTS\", file_name)\n new_path = \"static\\\\audio\\\\\" + file_name\n generate_xml(format_both(hebrew, english))\n else:\n synth_audio_file(generate_xml(xml), file_name)\n print(\"CREATED NEW FILE:\", app.root_path, '\\\\static\\\\audio\\\\', file_name)\n new_abs_path = app.root_path + '\\\\static\\\\audio\\\\' + file_name\n os.rename(app.root_path + '\\\\' + file_name, new_abs_path)\n new_path = \"static\\\\audio\\\\\" + file_name\n return render_template('index.html', language=language, text_he=hebrew_text, text_en=english_text, both=both,\n audio_filepath=new_path)\n\n\nif __name__ == '__main__':\n app.run(debug=False,host='0.0.0.0')\n","repo_name":"gershonb/Sefaria_Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1640576162","text":"from typing import List\nimport math\n\n\nclass Solution:\n def minSpeedOnTime(self, dist: List[int], hour: float) -> int:\n start, end = 1, 10 ** 7\n max_speed = -1\n\n while start <= end:\n mid = (start + end) // 2\n dist_hours = [math.ceil(distance / mid) for distance in dist[:len(dist) - 1]]\n dist_hours += [dist[-1] / mid]\n dist_sum = sum(dist_hours)\n\n if dist_sum > hour:\n start = mid + 1\n elif dist_sum <= hour:\n end = mid - 1\n max_speed = mid\n\n return max_speed\n\n\nif __name__ == '__main__':\n dist = [1, 3, 2]\n hour = 6\n print(Solution().minSpeedOnTime(dist, hour))","repo_name":"amogchandrashekar/Leetcode","sub_path":"Medium/Minimum Speed to Arrive on Time.py","file_name":"Minimum Speed to Arrive on Time.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"44671731","text":"from flask import Flask, request, Response\nimport json, os\nfrom vild import ViLDDetector\n\n\nos.environ[\"DISPLAY\"]=\":1.0\"\n\nvilder = ViLDDetector()\n# vilder = FakeViLDDetector()\napp = Flask(__name__)\n\n@app.route('/api/vild', methods=['GET', 'POST'])\ndef vild():\n if request.method == 'GET':\n response = 'This is the ViLD server created by Xufeng Zhao. Contact: xufeng.zhao@uni-hamburg.de'\n else:\n print(request.json)\n found_objects = vilder.detect(**request.json)\n response = json.dumps(found_objects)\n return Response(response=response, status=200, mimetype=\"application/json\")\n\n\nif __name__ =='__main__':\n app.run(host=\"0.0.0.0\", port=8848)\n","repo_name":"xf-zhao/Matcha-agent","sub_path":"ViLD/vild_server.py","file_name":"vild_server.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"6268468182","text":"import time\nimport serial #Need to enable uart in /boot/config.txt and disable uart shell communication on raspi-config\n\nclass Oxyreader:\n def __init__(self):\n self.ser = serial.Serial(\n '/dev/serial0', 9600, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS\n )\n\n #Set output mode on Poll\n tx = 'M 1\\r\\n'.encode()\n self.ser.write(tx)\n self.ser.readline()\n\n def getppO2(self):\n\n tx = 'O\\r\\n'.encode()\n\n self.ser.write(tx)\n resp = self.ser.readline().decode('UTF-8').split()\n\n return resp[1]\n\n def getpercO2(self):\n\n tx = '%\\r\\n'.encode()\n\n self.ser.write(tx)\n resp = self.ser.readline().decode('UTF-8').split()\n\n return resp[1]\n\n def gettempO2(self):\n\n tx = 'T\\r\\n'.encode()\n\n self.ser.write(tx)\n resp = self.ser.readline().decode('UTF-8').split()\n\n return resp[1]\n\n def getpressO2(self):\n\n tx = 'P\\r\\n'.encode()\n\n self.ser.write(tx)\n resp = self.ser.readline().decode('UTF-8').split()\n\n return resp[1]\n","repo_name":"micvogel/tempcontroller","sub_path":"oxyreader.py","file_name":"oxyreader.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37998766678","text":"# own modules\nimport sys\nimport matplotlib.pyplot as plt\n\n# --- QISKIT ---\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit.quantum_info.operators import Operator # type: ignore\n\nfrom qiskit.quantum_info import Pauli\nfrom qiskit.opflow.list_ops import SummedOp\nfrom qiskit.opflow.primitive_ops import PauliOp\n\n# additional math libs\nimport numpy as np # type: ignore\nfrom scipy.constants import pi # type: ignore\n\nfrom typing import Union, Optional, List, Tuple, Any, Dict\nimport itertools\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef init_quantum_circuit(num_qubits, num_cbits=0, name: Optional[str] = None) -> Tuple[QuantumCircuit, QuantumRegister, ClassicalRegister]:\n \"\"\"\n Initializes a QuantumCircuit using num_qubits qubits and num_cbits classical bits.\n\n Args:\n num_qubits ([type]): The number of qubits.\n num_cbits (int, optional): The number of classical bits. Defaults to 0.\n name (Optional[str], optional): The quantum circuit's name. Defaults to None.\n\n Returns:\n Tuple[QuantumCircuit, QuantumRegister, ClassicalRegister]: The initialized QuantumCircuit and its QuantumRegister and ClassicalRegister\n \"\"\" \n # init register for quantum and classical bits\n q_register = QuantumRegister(num_qubits, 'q')\n c_register = ClassicalRegister(num_cbits, 'c') if num_cbits > 0 else None\n\n # init quantum circuit\n circ = QuantumCircuit(num_qubits, num_cbits, name=name) if c_register else QuantumCircuit(num_qubits, name=name)\n return circ, q_register, c_register\n\ndef generate_canonical_circuit_all_neurons(qnn_arch_all:list,\n params: List[Any],\n layer: int) -> QuantumCircuit: \n \"\"\"\n Creates a QuantumCircuit containing the parameterized CAN gates (plus single qubit U3 gates).\n The definition of the CAN gates is taken from https://arxiv.org/abs/1905.13311.\n The parameters should have length self.qnn_arch[0]*self.qnn_arch[1]*6 + self.qnn_arch[1]*6.\n\n Args:\n params (List[Any]): List of parameters for the parametrized gates. ParameterVector or List[float].\n layer (int): Index of the current output layer.\n Returns:\n QuantumCircuit: Quantum circuit containing all the parameterized gates of the respective layer.\n \"\"\"\n # sub-architecture of the layer (length 2)\n qnn_arch = qnn_arch_all[layer-1:layer+1]\n # number of qubits required for the layer\n num_qubits = qnn_arch[0]+qnn_arch[1]\n \n # initialize the quantum circuit\n circ, q_reg, _ = init_quantum_circuit(num_qubits)\n \n # add U3s to input qubits\n circ = add_one_qubit_gates(circ, q_reg[:qnn_arch[0]], params[:qnn_arch[0]*3])\n \n # loop over all neurons\n for i in range(qnn_arch[1]):\n # parameters of the respective \"neuron gates\"\n # (can be larer than needed, overflow will be ignored)\n neuron_params = params[qnn_arch[0]*3 + qnn_arch[0]*3*i:]\n # iterate over all input neurons and apply CAN gates\n for j in range(qnn_arch[0]):\n tx, ty, tz = neuron_params[j*3:(j+1)*3]\n circ.rxx(2*tx, q_reg[j], q_reg[qnn_arch[0]+i])\n circ.ryy(2*ty, q_reg[j], q_reg[qnn_arch[0]+i])\n circ.rzz(2*tz, q_reg[j], q_reg[qnn_arch[0]+i])\n\n return circ\n\ndef add_one_qubit_gates(circ: QuantumCircuit, \n q_reg: QuantumRegister, \n params: List[float]) -> QuantumCircuit:\n \"\"\"\n Adds U3 gates (if u3=True else RX, RY, RX gates) to each qubit in the quantum register.\n\n Args:\n circ (QuantumCircuit): The quantum circuit.\n q_reg (QuantumRegister): The quantum register containing the qubits.\n params (List[flaot]): List of parameters (used for the one qubit gates). Should be a multiple of 3.\n u3 (bool): Whether U3 gates should be used. Defaults to True.\n\n Returns:\n QuantumCircuit: The given quantum circuit including the application of one qubit gates.\n \"\"\"\n for i, qubit in enumerate(q_reg):\n circ.u(params[i*3], params[i*3+1], params[i*3+2], qubit)\n return circ\n\ndef transform_the_hamiltonian(hamiltonian:Any,\n num_qubits:int,\n ep_cont:Any) -> Any:\n \"\"\"\n transform the hamiltonian to the qiskit operator base\n \"\"\"\n Ham_op=[]\n\n ii = 0\n for term in hamiltonian.terms:\n op_str=''\n # for constant terms\n if len(term) == 0:\n for ik in range(num_qubits):\n op_str+='I'\n # for full terms with num_qubits\n elif len(term) == num_qubits:\n for ep_gate in term:\n if ep_gate[1] == 'X':\n op_str=op_str+'X'\n elif ep_gate[1] == 'Y':\n op_str=op_str+'Y'\n elif ep_gate[1] == 'Z':\n op_str=op_str+'Z'\n # for the case which Pauli terms less than num_qubits\n # add the identity gate between them\n elif len(term) < num_qubits:\n idx_prev=0\n for ep_gate in term:\n idx=int(ep_gate[0])\n if idx > idx_prev:\n idiff = idx - idx_prev\n for kk in range(idiff):\n op_str=op_str+'I'\n # choose the right gate\n if ep_gate[1] == 'X':\n op_str=op_str+'X'\n elif ep_gate[1] == 'Y':\n op_str=op_str+'Y'\n elif ep_gate[1] == 'Z':\n op_str=op_str+'Z'\n idx_prev = idx + 1\n\n # for the last check !!\n if idx_prev - 1 <= num_qubits - 1:\n idiff = num_qubits - idx_prev\n for jj in range(idiff):\n op_str=op_str+'I'\n # for the error case\n else: \n print('Error in number of gates !!!') \n sys.exit(0)\n\n Ham_op.append(PauliOp(Pauli(op_str),ep_cont[ii]))\n ii += 1\n return SummedOp(Ham_op)\n\ndef save_data(all_params_epochs: Optional[Any] = None, \n plot_list_cost: Optional[List[List[Union[Union[int,float],float]]]] = None) -> None:\n \"\"\"\n Saves and plots the given data to a file.\n\n Args:\n all_params_epochs (Optional[List[List[Union[float, List[float]]]], optional): The ansatz's parameters per epoch. Defaults to None.\n plot_list_cost (float, optional): Training cost per epoch. Defaults to None.\n \"\"\" \n\n if type(all_params_epochs) is np.ndarray: \n np.savetxt(\"./params.txt\",all_params_epochs)\n \n if plot_list_cost: \n np.savetxt(\"./energy.txt\",plot_list_cost)\n plot_cost(plot_list_cost)\n\ndef plot_cost(plot_list: List[List[float]],\n filename: str = \"energy.pdf\") -> None:\n \"\"\"\n Plot the cost versus the learning epoch.\n\n Args:\n plot_list (List[List[float]]]): List of [epoch, cost].\n filename (str, optional): The name of the output file. Defaults to \"cost.pdf\".\n \"\"\" \n if not (isinstance(plot_list, np.ndarray)): plot_list = np.array(plot_list)\n try:\n plt.figure()\n plt.plot(plot_list[:,0], \"--o\", color=\"b\", label=\"QML\", ms=4)\n plt.plot(plot_list[:,1], \"--o\", color=\"r\", label=\"FCI\", ms=4)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Energy (Ha)\")\n plt.grid(True)\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"./{}\".format(filename))\n except:\n logger.warning(\"Cost could not be plotted. An error occured.\")\n finally:\n plt.close()\n","repo_name":"xzzeng001/qml_ansatz","sub_path":"qiskit_DM/dqnn_utils.py","file_name":"dqnn_utils.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"45080116357","text":"import os\nimport re\nimport time\nimport logging\nimport datetime\nimport dateutil\nimport textwrap\n\nfrom pytz import timezone\nfrom ast import literal_eval\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import _, models, api, fields\nfrom odoo.tools.safe_eval import safe_eval\nfrom odoo.osv import expression\n\nfrom odoo.addons.web.controllers.main import clean_action\n\n_logger = logging.getLogger(__name__)\n\nclass FileAction(models.Model):\n \n _inherit = 'muk_dms_actions.action'\n \n #----------------------------------------------------------\n # Database\n #----------------------------------------------------------\n \n activity_done = fields.Boolean(\n string=\"Mark all as done\",\n default=False)\n \n activity_create = fields.Boolean(\n string=\"Create a new Activity\",\n default=False)\n \n activity_type = fields.Many2one(\n comodel_name='mail.activity.type', \n string=\"Activity\")\n\n activity_summary = fields.Char(\n string=\"Summary\")\n \n activity_note = fields.Html(\n string=\"Note\"\n )\n \n activity_deadline_value = fields.Integer(\n string=\"Due Date in\")\n \n activity_deadline_unit = fields.Selection(\n selection=[\n ('days', 'Days'),\n ('weeks', 'Weeks'),\n ('months', 'Months'),\n ], \n string=\"Due type\", \n default='days')\n \n assigned_user = fields.Many2one(\n comodel_name='res.users', \n string=\"Assigned to\")\n \n #----------------------------------------------------------\n # Functions\n #----------------------------------------------------------\n \n @api.multi\n def trigger_actions(self, file_ids):\n result = super(FileAction, self).trigger_actions(file_ids)\n files = self.env['muk_dms.file'].browse(file_ids)\n if self.filtered('activity_done'):\n feedback = _(\"Closed by operation!\")\n if len(self) == 1 and self.activity_done:\n feedback = _(\"Closed by operation: %s\") % self.name\n files.mapped('activity_ids').action_feedback(feedback=feedback)\n for record in self.filtered('activity_create'):\n values = {\n 'activity_type_id': record.activity_type.id,\n 'summary': record.activity_summary or '',\n 'note': record.activity_note or '',\n }\n if record.activity_deadline_value > 0:\n today = fields.Date.context_today(record)\n delta_unit = record.activity_deadline_unit\n delta_value = record.activity_deadline_value\n values['date_deadline'] = today + relativedelta(**{delta_unit: delta_value})\n values['user_id'] = record.assigned_user.id if record.assigned_user else self.env.user\n files.activity_schedule(**values)\n return result\n \n #----------------------------------------------------------\n # Constrains\n #----------------------------------------------------------\n \n @api.constrains('activity_type', 'activity_create')\n def _check_activity_type(self):\n for record in self:\n if record.activity_create and not record.activity_type:\n raise ValidationError(_('An activity has to have a type!'))","repo_name":"muk-it/muk_dms","sub_path":"muk_dms_mail/models/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"61"}